Source file
src/runtime/stack.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
11 )
12
13
62
63 const (
64
65
66
67
68 _StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
69
70
71 _StackMin = 2048
72
73
74
75 _FixedStack0 = _StackMin + _StackSystem
76 _FixedStack1 = _FixedStack0 - 1
77 _FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
78 _FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
79 _FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
80 _FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
81 _FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
82 _FixedStack = _FixedStack6 + 1
83
84
85
86
87
88
89 _StackBig = 4096
90
91
92
93 _StackGuard = 880*sys.StackGuardMultiplier + _StackSystem
94
95
96
97
98 _StackSmall = 128
99
100
101
102 _StackLimit = _StackGuard - _StackSystem - _StackSmall
103 )
104
105 const (
106
107
108
109
110
111 stackDebug = 0
112 stackFromSystem = 0
113 stackFaultOnFree = 0
114 stackPoisonCopy = 0
115 stackNoCache = 0
116
117
118 debugCheckBP = false
119 )
120
121 const (
122 uintptrMask = 1<<(8*sys.PtrSize) - 1
123
124
125
126
127
128 stackPreempt = uintptrMask & -1314
129
130
131
132
133 stackFork = uintptrMask & -1234
134 )
135
136
137
138
139
140
141 var stackpool [_NumStackOrders]mSpanList
142 var stackpoolmu mutex
143
144
145 var stackLarge struct {
146 lock mutex
147 free [_MHeapMap_Bits]mSpanList
148 }
149
150 func stackinit() {
151 if _StackCacheSize&_PageMask != 0 {
152 throw("cache size must be a multiple of page size")
153 }
154 for i := range stackpool {
155 stackpool[i].init()
156 }
157 for i := range stackLarge.free {
158 stackLarge.free[i].init()
159 }
160 }
161
162
163 func stacklog2(n uintptr) int {
164 log2 := 0
165 for n > 1 {
166 n >>= 1
167 log2++
168 }
169 return log2
170 }
171
172
173
174 func stackpoolalloc(order uint8) gclinkptr {
175 list := &stackpool[order]
176 s := list.first
177 if s == nil {
178
179 s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
180 if s == nil {
181 throw("out of memory")
182 }
183 if s.allocCount != 0 {
184 throw("bad allocCount")
185 }
186 if s.manualFreeList.ptr() != nil {
187 throw("bad manualFreeList")
188 }
189 s.elemsize = _FixedStack << order
190 for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
191 x := gclinkptr(s.base() + i)
192 x.ptr().next = s.manualFreeList
193 s.manualFreeList = x
194 }
195 list.insert(s)
196 }
197 x := s.manualFreeList
198 if x.ptr() == nil {
199 throw("span has no free stacks")
200 }
201 s.manualFreeList = x.ptr().next
202 s.allocCount++
203 if s.manualFreeList.ptr() == nil {
204
205 list.remove(s)
206 }
207 return x
208 }
209
210
211 func stackpoolfree(x gclinkptr, order uint8) {
212 s := mheap_.lookup(unsafe.Pointer(x))
213 if s.state != _MSpanManual {
214 throw("freeing stack not in a stack span")
215 }
216 if s.manualFreeList.ptr() == nil {
217
218 stackpool[order].insert(s)
219 }
220 x.ptr().next = s.manualFreeList
221 s.manualFreeList = x
222 s.allocCount--
223 if gcphase == _GCoff && s.allocCount == 0 {
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239 stackpool[order].remove(s)
240 s.manualFreeList = 0
241 mheap_.freeManual(s, &memstats.stacks_inuse)
242 }
243 }
244
245
246
247
248
249 func stackcacherefill(c *mcache, order uint8) {
250 if stackDebug >= 1 {
251 print("stackcacherefill order=", order, "\n")
252 }
253
254
255
256 var list gclinkptr
257 var size uintptr
258 lock(&stackpoolmu)
259 for size < _StackCacheSize/2 {
260 x := stackpoolalloc(order)
261 x.ptr().next = list
262 list = x
263 size += _FixedStack << order
264 }
265 unlock(&stackpoolmu)
266 c.stackcache[order].list = list
267 c.stackcache[order].size = size
268 }
269
270
271 func stackcacherelease(c *mcache, order uint8) {
272 if stackDebug >= 1 {
273 print("stackcacherelease order=", order, "\n")
274 }
275 x := c.stackcache[order].list
276 size := c.stackcache[order].size
277 lock(&stackpoolmu)
278 for size > _StackCacheSize/2 {
279 y := x.ptr().next
280 stackpoolfree(x, order)
281 x = y
282 size -= _FixedStack << order
283 }
284 unlock(&stackpoolmu)
285 c.stackcache[order].list = x
286 c.stackcache[order].size = size
287 }
288
289
290 func stackcache_clear(c *mcache) {
291 if stackDebug >= 1 {
292 print("stackcache clear\n")
293 }
294 lock(&stackpoolmu)
295 for order := uint8(0); order < _NumStackOrders; order++ {
296 x := c.stackcache[order].list
297 for x.ptr() != nil {
298 y := x.ptr().next
299 stackpoolfree(x, order)
300 x = y
301 }
302 c.stackcache[order].list = 0
303 c.stackcache[order].size = 0
304 }
305 unlock(&stackpoolmu)
306 }
307
308
309
310
311
312
313
314 func stackalloc(n uint32) stack {
315
316
317
318 thisg := getg()
319 if thisg != thisg.m.g0 {
320 throw("stackalloc not on scheduler stack")
321 }
322 if n&(n-1) != 0 {
323 throw("stack size not a power of 2")
324 }
325 if stackDebug >= 1 {
326 print("stackalloc ", n, "\n")
327 }
328
329 if debug.efence != 0 || stackFromSystem != 0 {
330 n = uint32(round(uintptr(n), physPageSize))
331 v := sysAlloc(uintptr(n), &memstats.stacks_sys)
332 if v == nil {
333 throw("out of memory (stackalloc)")
334 }
335 return stack{uintptr(v), uintptr(v) + uintptr(n)}
336 }
337
338
339
340
341 var v unsafe.Pointer
342 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
343 order := uint8(0)
344 n2 := n
345 for n2 > _FixedStack {
346 order++
347 n2 >>= 1
348 }
349 var x gclinkptr
350 c := thisg.m.mcache
351 if stackNoCache != 0 || c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
352
353
354
355
356 lock(&stackpoolmu)
357 x = stackpoolalloc(order)
358 unlock(&stackpoolmu)
359 } else {
360 x = c.stackcache[order].list
361 if x.ptr() == nil {
362 stackcacherefill(c, order)
363 x = c.stackcache[order].list
364 }
365 c.stackcache[order].list = x.ptr().next
366 c.stackcache[order].size -= uintptr(n)
367 }
368 v = unsafe.Pointer(x)
369 } else {
370 var s *mspan
371 npage := uintptr(n) >> _PageShift
372 log2npage := stacklog2(npage)
373
374
375 lock(&stackLarge.lock)
376 if !stackLarge.free[log2npage].isEmpty() {
377 s = stackLarge.free[log2npage].first
378 stackLarge.free[log2npage].remove(s)
379 }
380 unlock(&stackLarge.lock)
381
382 if s == nil {
383
384 s = mheap_.allocManual(npage, &memstats.stacks_inuse)
385 if s == nil {
386 throw("out of memory")
387 }
388 s.elemsize = uintptr(n)
389 }
390 v = unsafe.Pointer(s.base())
391 }
392
393 if raceenabled {
394 racemalloc(v, uintptr(n))
395 }
396 if msanenabled {
397 msanmalloc(v, uintptr(n))
398 }
399 if stackDebug >= 1 {
400 print(" allocated ", v, "\n")
401 }
402 return stack{uintptr(v), uintptr(v) + uintptr(n)}
403 }
404
405
406
407
408
409
410
411 func stackfree(stk stack) {
412 gp := getg()
413 v := unsafe.Pointer(stk.lo)
414 n := stk.hi - stk.lo
415 if n&(n-1) != 0 {
416 throw("stack not a power of 2")
417 }
418 if stk.lo+n < stk.hi {
419 throw("bad stack size")
420 }
421 if stackDebug >= 1 {
422 println("stackfree", v, n)
423 memclrNoHeapPointers(v, n)
424 }
425 if debug.efence != 0 || stackFromSystem != 0 {
426 if debug.efence != 0 || stackFaultOnFree != 0 {
427 sysFault(v, n)
428 } else {
429 sysFree(v, n, &memstats.stacks_sys)
430 }
431 return
432 }
433 if msanenabled {
434 msanfree(v, n)
435 }
436 if n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
437 order := uint8(0)
438 n2 := n
439 for n2 > _FixedStack {
440 order++
441 n2 >>= 1
442 }
443 x := gclinkptr(v)
444 c := gp.m.mcache
445 if stackNoCache != 0 || c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
446 lock(&stackpoolmu)
447 stackpoolfree(x, order)
448 unlock(&stackpoolmu)
449 } else {
450 if c.stackcache[order].size >= _StackCacheSize {
451 stackcacherelease(c, order)
452 }
453 x.ptr().next = c.stackcache[order].list
454 c.stackcache[order].list = x
455 c.stackcache[order].size += n
456 }
457 } else {
458 s := mheap_.lookup(v)
459 if s.state != _MSpanManual {
460 println(hex(s.base()), v)
461 throw("bad span state")
462 }
463 if gcphase == _GCoff {
464
465
466 mheap_.freeManual(s, &memstats.stacks_inuse)
467 } else {
468
469
470
471
472
473 log2npage := stacklog2(s.npages)
474 lock(&stackLarge.lock)
475 stackLarge.free[log2npage].insert(s)
476 unlock(&stackLarge.lock)
477 }
478 }
479 }
480
481 var maxstacksize uintptr = 1 << 20
482
483 var ptrnames = []string{
484 0: "scalar",
485 1: "ptr",
486 }
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516 type adjustinfo struct {
517 old stack
518 delta uintptr
519 cache pcvalueCache
520
521
522 sghi uintptr
523 }
524
525
526
527 func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
528 pp := (*uintptr)(vpp)
529 p := *pp
530 if stackDebug >= 4 {
531 print(" ", pp, ":", hex(p), "\n")
532 }
533 if adjinfo.old.lo <= p && p < adjinfo.old.hi {
534 *pp = p + adjinfo.delta
535 if stackDebug >= 3 {
536 print(" adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
537 }
538 }
539 }
540
541
542 type bitvector struct {
543 n int32
544 bytedata *uint8
545 }
546
547 type gobitvector struct {
548 n uintptr
549 bytedata []uint8
550 }
551
552 func gobv(bv bitvector) gobitvector {
553 return gobitvector{
554 uintptr(bv.n),
555 (*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
556 }
557 }
558
559 func ptrbit(bv *gobitvector, i uintptr) uint8 {
560 return (bv.bytedata[i/8] >> (i % 8)) & 1
561 }
562
563
564
565 func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f funcInfo) {
566 bv := gobv(*cbv)
567 minp := adjinfo.old.lo
568 maxp := adjinfo.old.hi
569 delta := adjinfo.delta
570 num := bv.n
571
572
573
574
575
576 useCAS := uintptr(scanp) < adjinfo.sghi
577 for i := uintptr(0); i < num; i++ {
578 if stackDebug >= 4 {
579 print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
580 }
581 if ptrbit(&bv, i) != 1 {
582 continue
583 }
584 pp := (*uintptr)(add(scanp, i*sys.PtrSize))
585 retry:
586 p := *pp
587 if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
588
589
590 getg().m.traceback = 2
591 print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
592 throw("invalid pointer found on stack")
593 }
594 if minp <= p && p < maxp {
595 if stackDebug >= 3 {
596 print("adjust ptr ", hex(p), " ", funcname(f), "\n")
597 }
598 if useCAS {
599 ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
600 if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
601 goto retry
602 }
603 } else {
604 *pp = p + delta
605 }
606 }
607 }
608 }
609
610
611 func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
612 adjinfo := (*adjustinfo)(arg)
613 targetpc := frame.continpc
614 if targetpc == 0 {
615
616 return true
617 }
618 f := frame.fn
619 if stackDebug >= 2 {
620 print(" adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
621 }
622 if f.entry == systemstack_switchPC {
623
624
625
626 return true
627 }
628 if targetpc != f.entry {
629 targetpc--
630 }
631 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
632 if pcdata == -1 {
633 pcdata = 0
634 }
635
636
637 size := frame.varp - frame.sp
638 var minsize uintptr
639 switch sys.ArchFamily {
640 case sys.ARM64:
641 minsize = sys.SpAlign
642 default:
643 minsize = sys.MinFrameSize
644 }
645 if size > minsize {
646 var bv bitvector
647 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
648 if stackmap == nil || stackmap.n <= 0 {
649 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
650 throw("missing stackmap")
651 }
652
653 if pcdata < 0 || pcdata >= stackmap.n {
654
655 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
656 throw("bad symbol table")
657 }
658 bv = stackmapdata(stackmap, pcdata)
659 size = uintptr(bv.n) * sys.PtrSize
660 if stackDebug >= 3 {
661 print(" locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
662 }
663 adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
664 }
665
666
667 if sys.ArchFamily == sys.AMD64 && frame.argp-frame.varp == 2*sys.RegSize {
668 if !framepointer_enabled {
669 print("runtime: found space for saved base pointer, but no framepointer experiment\n")
670 print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
671 throw("bad frame layout")
672 }
673 if stackDebug >= 3 {
674 print(" saved bp\n")
675 }
676 if debugCheckBP {
677
678
679 bp := *(*uintptr)(unsafe.Pointer(frame.varp))
680 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
681 println("runtime: found invalid frame pointer")
682 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
683 throw("bad frame pointer")
684 }
685 }
686 adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
687 }
688
689
690 if frame.arglen > 0 {
691 var bv bitvector
692 if frame.argmap != nil {
693 bv = *frame.argmap
694 } else {
695 stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
696 if stackmap == nil || stackmap.n <= 0 {
697 print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
698 throw("missing stackmap")
699 }
700 if pcdata < 0 || pcdata >= stackmap.n {
701
702 print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
703 throw("bad symbol table")
704 }
705 bv = stackmapdata(stackmap, pcdata)
706 }
707 if stackDebug >= 3 {
708 print(" args\n")
709 }
710 adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, funcInfo{})
711 }
712 return true
713 }
714
715 func adjustctxt(gp *g, adjinfo *adjustinfo) {
716 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
717 if !framepointer_enabled {
718 return
719 }
720 if debugCheckBP {
721 bp := gp.sched.bp
722 if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
723 println("runtime: found invalid top frame pointer")
724 print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
725 throw("bad top frame pointer")
726 }
727 }
728 adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
729 }
730
731 func adjustdefers(gp *g, adjinfo *adjustinfo) {
732
733 tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
734
735
736
737 for d := gp._defer; d != nil; d = d.link {
738 adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
739 adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
740 adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
741 }
742 }
743
744 func adjustpanics(gp *g, adjinfo *adjustinfo) {
745
746
747 adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
748 }
749
750 func adjustsudogs(gp *g, adjinfo *adjustinfo) {
751
752
753 for s := gp.waiting; s != nil; s = s.waitlink {
754 adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
755 }
756 }
757
758 func fillstack(stk stack, b byte) {
759 for p := stk.lo; p < stk.hi; p++ {
760 *(*byte)(unsafe.Pointer(p)) = b
761 }
762 }
763
764 func findsghi(gp *g, stk stack) uintptr {
765 var sghi uintptr
766 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
767 p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
768 if stk.lo <= p && p < stk.hi && p > sghi {
769 sghi = p
770 }
771 }
772 return sghi
773 }
774
775
776
777
778 func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
779 if gp.waiting == nil {
780 return 0
781 }
782
783
784
785
786
787
788 var lastc *hchan
789 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
790 if sg.c != lastc {
791 lock(&sg.c.lock)
792 }
793 lastc = sg.c
794 }
795
796
797 adjustsudogs(gp, adjinfo)
798
799
800
801
802 var sgsize uintptr
803 if adjinfo.sghi != 0 {
804 oldBot := adjinfo.old.hi - used
805 newBot := oldBot + adjinfo.delta
806 sgsize = adjinfo.sghi - oldBot
807 memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
808 }
809
810
811 lastc = nil
812 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
813 if sg.c != lastc {
814 unlock(&sg.c.lock)
815 }
816 lastc = sg.c
817 }
818
819 return sgsize
820 }
821
822
823
824
825
826
827
828
829 func copystack(gp *g, newsize uintptr, sync bool) {
830 if gp.syscallsp != 0 {
831 throw("stack growth not allowed in system call")
832 }
833 old := gp.stack
834 if old.lo == 0 {
835 throw("nil stackbase")
836 }
837 used := old.hi - gp.sched.sp
838
839
840 new := stackalloc(uint32(newsize))
841 if stackPoisonCopy != 0 {
842 fillstack(new, 0xfd)
843 }
844 if stackDebug >= 1 {
845 print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
846 }
847
848
849 var adjinfo adjustinfo
850 adjinfo.old = old
851 adjinfo.delta = new.hi - old.hi
852
853
854 ncopy := used
855 if sync {
856 adjustsudogs(gp, &adjinfo)
857 } else {
858
859
860
861
862
863
864 adjinfo.sghi = findsghi(gp, old)
865
866
867
868 ncopy -= syncadjustsudogs(gp, used, &adjinfo)
869 }
870
871
872 memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
873
874
875
876
877 adjustctxt(gp, &adjinfo)
878 adjustdefers(gp, &adjinfo)
879 adjustpanics(gp, &adjinfo)
880 if adjinfo.sghi != 0 {
881 adjinfo.sghi += adjinfo.delta
882 }
883
884
885 gp.stack = new
886 gp.stackguard0 = new.lo + _StackGuard
887 gp.sched.sp = new.hi - used
888 gp.stktopsp += adjinfo.delta
889
890
891 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
892
893
894 if stackPoisonCopy != 0 {
895 fillstack(old, 0xfc)
896 }
897 stackfree(old)
898 }
899
900
901 func round2(x int32) int32 {
902 s := uint(0)
903 for 1<<s < x {
904 s++
905 }
906 return 1 << s
907 }
908
909
910
911
912
913
914
915
916
917
918
919
920
921 func newstack() {
922 thisg := getg()
923
924 if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
925 throw("stack growth after fork")
926 }
927 if thisg.m.morebuf.g.ptr() != thisg.m.curg {
928 print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
929 morebuf := thisg.m.morebuf
930 traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
931 throw("runtime: wrong goroutine in newstack")
932 }
933
934 gp := thisg.m.curg
935
936 if thisg.m.curg.throwsplit {
937
938 morebuf := thisg.m.morebuf
939 gp.syscallsp = morebuf.sp
940 gp.syscallpc = morebuf.pc
941 pcname, pcoff := "(unknown)", uintptr(0)
942 f := findfunc(gp.sched.pc)
943 if f.valid() {
944 pcname = funcname(f)
945 pcoff = gp.sched.pc - f.entry
946 }
947 print("runtime: newstack at ", pcname, "+", hex(pcoff),
948 " sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
949 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
950 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
951
952 thisg.m.traceback = 2
953 traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
954 throw("runtime: stack split at bad time")
955 }
956
957 morebuf := thisg.m.morebuf
958 thisg.m.morebuf.pc = 0
959 thisg.m.morebuf.lr = 0
960 thisg.m.morebuf.sp = 0
961 thisg.m.morebuf.g = 0
962
963
964
965
966 preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
967
968
969
970
971
972
973
974
975
976
977
978
979
980 if preempt {
981 if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
982
983
984 gp.stackguard0 = gp.stack.lo + _StackGuard
985 gogo(&gp.sched)
986 }
987 }
988
989 if gp.stack.lo == 0 {
990 throw("missing stack in newstack")
991 }
992 sp := gp.sched.sp
993 if sys.ArchFamily == sys.AMD64 || sys.ArchFamily == sys.I386 {
994
995 sp -= sys.PtrSize
996 }
997 if stackDebug >= 1 || sp < gp.stack.lo {
998 print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
999 "\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
1000 "\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
1001 }
1002 if sp < gp.stack.lo {
1003 print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
1004 print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
1005 throw("runtime: split stack overflow")
1006 }
1007
1008 if preempt {
1009 if gp == thisg.m.g0 {
1010 throw("runtime: preempt g0")
1011 }
1012 if thisg.m.p == 0 && thisg.m.locks == 0 {
1013 throw("runtime: g is running but p is not")
1014 }
1015
1016 casgstatus(gp, _Grunning, _Gwaiting)
1017 if gp.preemptscan {
1018 for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
1019
1020
1021
1022
1023
1024 }
1025 if !gp.gcscandone {
1026
1027
1028 gcw := &gp.m.p.ptr().gcw
1029 scanstack(gp, gcw)
1030 if gcBlackenPromptly {
1031 gcw.dispose()
1032 }
1033 gp.gcscandone = true
1034 }
1035 gp.preemptscan = false
1036 gp.preempt = false
1037 casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
1038
1039 casgstatus(gp, _Gwaiting, _Grunning)
1040 gp.stackguard0 = gp.stack.lo + _StackGuard
1041 gogo(&gp.sched)
1042 }
1043
1044
1045 casgstatus(gp, _Gwaiting, _Grunning)
1046 gopreempt_m(gp)
1047 }
1048
1049
1050 oldsize := gp.stack.hi - gp.stack.lo
1051 newsize := oldsize * 2
1052 if newsize > maxstacksize {
1053 print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
1054 throw("stack overflow")
1055 }
1056
1057
1058
1059 casgstatus(gp, _Grunning, _Gcopystack)
1060
1061
1062
1063 copystack(gp, newsize, true)
1064 if stackDebug >= 1 {
1065 print("stack grow done\n")
1066 }
1067 casgstatus(gp, _Gcopystack, _Grunning)
1068 gogo(&gp.sched)
1069 }
1070
1071
1072 func nilfunc() {
1073 *(*uint8)(nil) = 0
1074 }
1075
1076
1077
1078 func gostartcallfn(gobuf *gobuf, fv *funcval) {
1079 var fn unsafe.Pointer
1080 if fv != nil {
1081 fn = unsafe.Pointer(fv.fn)
1082 } else {
1083 fn = unsafe.Pointer(funcPC(nilfunc))
1084 }
1085 gostartcall(gobuf, fn, unsafe.Pointer(fv))
1086 }
1087
1088
1089
1090
1091 func shrinkstack(gp *g) {
1092 gstatus := readgstatus(gp)
1093 if gstatus&^_Gscan == _Gdead {
1094 if gp.stack.lo != 0 {
1095
1096
1097 stackfree(gp.stack)
1098 gp.stack.lo = 0
1099 gp.stack.hi = 0
1100 }
1101 return
1102 }
1103 if gp.stack.lo == 0 {
1104 throw("missing stack in shrinkstack")
1105 }
1106 if gstatus&_Gscan == 0 {
1107 throw("bad status in shrinkstack")
1108 }
1109
1110 if debug.gcshrinkstackoff > 0 {
1111 return
1112 }
1113 if gp.startpc == gcBgMarkWorkerPC {
1114
1115
1116 return
1117 }
1118
1119 oldsize := gp.stack.hi - gp.stack.lo
1120 newsize := oldsize / 2
1121
1122
1123 if newsize < _FixedStack {
1124 return
1125 }
1126
1127
1128
1129
1130
1131 avail := gp.stack.hi - gp.stack.lo
1132 if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
1133 return
1134 }
1135
1136
1137
1138 if gp.syscallsp != 0 {
1139 return
1140 }
1141 if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
1142 return
1143 }
1144
1145 if stackDebug > 0 {
1146 print("shrinking stack ", oldsize, "->", newsize, "\n")
1147 }
1148
1149 copystack(gp, newsize, false)
1150 }
1151
1152
1153 func freeStackSpans() {
1154 lock(&stackpoolmu)
1155
1156
1157 for order := range stackpool {
1158 list := &stackpool[order]
1159 for s := list.first; s != nil; {
1160 next := s.next
1161 if s.allocCount == 0 {
1162 list.remove(s)
1163 s.manualFreeList = 0
1164 mheap_.freeManual(s, &memstats.stacks_inuse)
1165 }
1166 s = next
1167 }
1168 }
1169
1170 unlock(&stackpoolmu)
1171
1172
1173 lock(&stackLarge.lock)
1174 for i := range stackLarge.free {
1175 for s := stackLarge.free[i].first; s != nil; {
1176 next := s.next
1177 stackLarge.free[i].remove(s)
1178 mheap_.freeManual(s, &memstats.stacks_inuse)
1179 s = next
1180 }
1181 }
1182 unlock(&stackLarge.lock)
1183 }
1184
1185
1186 func morestackc() {
1187 systemstack(func() {
1188 throw("attempt to execute system stack code on user stack")
1189 })
1190 }
1191
View as plain text