Source file
src/runtime/proc.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
11 )
12
13 var buildVersion = sys.TheVersion
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78 var (
79 m0 m
80 g0 g
81 raceprocctx0 uintptr
82 )
83
84
85 func runtime_init()
86
87
88 func main_init()
89
90
91
92
93
94 var main_init_done chan bool
95
96
97 func main_main()
98
99
100 var mainStarted bool
101
102
103 var runtimeInitTime int64
104
105
106 var initSigmask sigset
107
108
109 func main() {
110 g := getg()
111
112
113
114 g.m.g0.racectx = 0
115
116
117
118
119 if sys.PtrSize == 8 {
120 maxstacksize = 1000000000
121 } else {
122 maxstacksize = 250000000
123 }
124
125
126 mainStarted = true
127
128 systemstack(func() {
129 newm(sysmon, nil)
130 })
131
132
133
134
135
136
137
138 lockOSThread()
139
140 if g.m != &m0 {
141 throw("runtime.main not on m0")
142 }
143
144 runtime_init()
145 if nanotime() == 0 {
146 throw("nanotime returning zero")
147 }
148
149
150 needUnlock := true
151 defer func() {
152 if needUnlock {
153 unlockOSThread()
154 }
155 }()
156
157
158
159 runtimeInitTime = nanotime()
160
161 gcenable()
162
163 main_init_done = make(chan bool)
164 if iscgo {
165 if _cgo_thread_start == nil {
166 throw("_cgo_thread_start missing")
167 }
168 if GOOS != "windows" {
169 if _cgo_setenv == nil {
170 throw("_cgo_setenv missing")
171 }
172 if _cgo_unsetenv == nil {
173 throw("_cgo_unsetenv missing")
174 }
175 }
176 if _cgo_notify_runtime_init_done == nil {
177 throw("_cgo_notify_runtime_init_done missing")
178 }
179
180
181 startTemplateThread()
182 cgocall(_cgo_notify_runtime_init_done, nil)
183 }
184
185 fn := main_init
186 fn()
187 close(main_init_done)
188
189 needUnlock = false
190 unlockOSThread()
191
192 if isarchive || islibrary {
193
194
195 return
196 }
197 fn = main_main
198 fn()
199 if raceenabled {
200 racefini()
201 }
202
203
204
205
206
207 if atomic.Load(&runningPanicDefers) != 0 {
208
209 for c := 0; c < 1000; c++ {
210 if atomic.Load(&runningPanicDefers) == 0 {
211 break
212 }
213 Gosched()
214 }
215 }
216 if atomic.Load(&panicking) != 0 {
217 gopark(nil, nil, "panicwait", traceEvGoStop, 1)
218 }
219
220 exit(0)
221 for {
222 var x *int32
223 *x = 0
224 }
225 }
226
227
228
229 func os_beforeExit() {
230 if raceenabled {
231 racefini()
232 }
233 }
234
235
236 func init() {
237 go forcegchelper()
238 }
239
240 func forcegchelper() {
241 forcegc.g = getg()
242 for {
243 lock(&forcegc.lock)
244 if forcegc.idle != 0 {
245 throw("forcegc: phase error")
246 }
247 atomic.Store(&forcegc.idle, 1)
248 goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
249
250 if debug.gctrace > 0 {
251 println("GC forced")
252 }
253
254 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerTime, now: nanotime()})
255 }
256 }
257
258
259
260
261
262 func Gosched() {
263 mcall(gosched_m)
264 }
265
266
267
268
269 func goschedguarded() {
270 mcall(goschedguarded_m)
271 }
272
273
274
275
276
277 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
278 mp := acquirem()
279 gp := mp.curg
280 status := readgstatus(gp)
281 if status != _Grunning && status != _Gscanrunning {
282 throw("gopark: bad g status")
283 }
284 mp.waitlock = lock
285 mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
286 gp.waitreason = reason
287 mp.waittraceev = traceEv
288 mp.waittraceskip = traceskip
289 releasem(mp)
290
291 mcall(park_m)
292 }
293
294
295
296 func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
297 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
298 }
299
300 func goready(gp *g, traceskip int) {
301 systemstack(func() {
302 ready(gp, traceskip, true)
303 })
304 }
305
306
307 func acquireSudog() *sudog {
308
309
310
311
312
313
314
315
316 mp := acquirem()
317 pp := mp.p.ptr()
318 if len(pp.sudogcache) == 0 {
319 lock(&sched.sudoglock)
320
321 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
322 s := sched.sudogcache
323 sched.sudogcache = s.next
324 s.next = nil
325 pp.sudogcache = append(pp.sudogcache, s)
326 }
327 unlock(&sched.sudoglock)
328
329 if len(pp.sudogcache) == 0 {
330 pp.sudogcache = append(pp.sudogcache, new(sudog))
331 }
332 }
333 n := len(pp.sudogcache)
334 s := pp.sudogcache[n-1]
335 pp.sudogcache[n-1] = nil
336 pp.sudogcache = pp.sudogcache[:n-1]
337 if s.elem != nil {
338 throw("acquireSudog: found s.elem != nil in cache")
339 }
340 releasem(mp)
341 return s
342 }
343
344
345 func releaseSudog(s *sudog) {
346 if s.elem != nil {
347 throw("runtime: sudog with non-nil elem")
348 }
349 if s.isSelect {
350 throw("runtime: sudog with non-false isSelect")
351 }
352 if s.next != nil {
353 throw("runtime: sudog with non-nil next")
354 }
355 if s.prev != nil {
356 throw("runtime: sudog with non-nil prev")
357 }
358 if s.waitlink != nil {
359 throw("runtime: sudog with non-nil waitlink")
360 }
361 if s.c != nil {
362 throw("runtime: sudog with non-nil c")
363 }
364 gp := getg()
365 if gp.param != nil {
366 throw("runtime: releaseSudog with non-nil gp.param")
367 }
368 mp := acquirem()
369 pp := mp.p.ptr()
370 if len(pp.sudogcache) == cap(pp.sudogcache) {
371
372 var first, last *sudog
373 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
374 n := len(pp.sudogcache)
375 p := pp.sudogcache[n-1]
376 pp.sudogcache[n-1] = nil
377 pp.sudogcache = pp.sudogcache[:n-1]
378 if first == nil {
379 first = p
380 } else {
381 last.next = p
382 }
383 last = p
384 }
385 lock(&sched.sudoglock)
386 last.next = sched.sudogcache
387 sched.sudogcache = first
388 unlock(&sched.sudoglock)
389 }
390 pp.sudogcache = append(pp.sudogcache, s)
391 releasem(mp)
392 }
393
394
395
396
397 func funcPC(f interface{}) uintptr {
398 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
399 }
400
401
402 func badmcall(fn func(*g)) {
403 throw("runtime: mcall called on m->g0 stack")
404 }
405
406 func badmcall2(fn func(*g)) {
407 throw("runtime: mcall function returned")
408 }
409
410 func badreflectcall() {
411 panic(plainError("arg size to reflect.call more than 1GB"))
412 }
413
414 var badmorestackg0Msg = "fatal: morestack on g0\n"
415
416
417
418 func badmorestackg0() {
419 sp := stringStructOf(&badmorestackg0Msg)
420 write(2, sp.str, int32(sp.len))
421 }
422
423 var badmorestackgsignalMsg = "fatal: morestack on gsignal\n"
424
425
426
427 func badmorestackgsignal() {
428 sp := stringStructOf(&badmorestackgsignalMsg)
429 write(2, sp.str, int32(sp.len))
430 }
431
432
433 func badctxt() {
434 throw("ctxt != 0")
435 }
436
437 func lockedOSThread() bool {
438 gp := getg()
439 return gp.lockedm != 0 && gp.m.lockedg != 0
440 }
441
442 var (
443 allgs []*g
444 allglock mutex
445 )
446
447 func allgadd(gp *g) {
448 if readgstatus(gp) == _Gidle {
449 throw("allgadd: bad status Gidle")
450 }
451
452 lock(&allglock)
453 allgs = append(allgs, gp)
454 allglen = uintptr(len(allgs))
455 unlock(&allglock)
456 }
457
458 const (
459
460
461 _GoidCacheBatch = 16
462 )
463
464
465
466
467
468
469
470
471
472 func schedinit() {
473
474
475 _g_ := getg()
476 if raceenabled {
477 _g_.racectx, raceprocctx0 = raceinit()
478 }
479
480 sched.maxmcount = 10000
481
482 tracebackinit()
483 moduledataverify()
484 stackinit()
485 mallocinit()
486 mcommoninit(_g_.m)
487 alginit()
488 modulesinit()
489 typelinksinit()
490 itabsinit()
491
492 msigsave(_g_.m)
493 initSigmask = _g_.m.sigmask
494
495 goargs()
496 goenvs()
497 parsedebugvars()
498 gcinit()
499
500 sched.lastpoll = uint64(nanotime())
501 procs := ncpu
502 if n, ok := atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
503 procs = n
504 }
505 if procresize(procs) != nil {
506 throw("unknown runnable goroutine during bootstrap")
507 }
508
509
510
511
512 if debug.cgocheck > 1 {
513 writeBarrier.cgo = true
514 writeBarrier.enabled = true
515 for _, p := range allp {
516 p.wbBuf.reset()
517 }
518 }
519
520 if buildVersion == "" {
521
522
523 buildVersion = "unknown"
524 }
525 }
526
527 func dumpgstatus(gp *g) {
528 _g_ := getg()
529 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
530 print("runtime: g: g=", _g_, ", goid=", _g_.goid, ", g->atomicstatus=", readgstatus(_g_), "\n")
531 }
532
533 func checkmcount() {
534
535 if mcount() > sched.maxmcount {
536 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
537 throw("thread exhaustion")
538 }
539 }
540
541 func mcommoninit(mp *m) {
542 _g_ := getg()
543
544
545 if _g_ != _g_.m.g0 {
546 callers(1, mp.createstack[:])
547 }
548
549 lock(&sched.lock)
550 if sched.mnext+1 < sched.mnext {
551 throw("runtime: thread ID overflow")
552 }
553 mp.id = sched.mnext
554 sched.mnext++
555 checkmcount()
556
557 mp.fastrand[0] = 1597334677 * uint32(mp.id)
558 mp.fastrand[1] = uint32(cputicks())
559 if mp.fastrand[0]|mp.fastrand[1] == 0 {
560 mp.fastrand[1] = 1
561 }
562
563 mpreinit(mp)
564 if mp.gsignal != nil {
565 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
566 }
567
568
569
570 mp.alllink = allm
571
572
573
574 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
575 unlock(&sched.lock)
576
577
578 if iscgo || GOOS == "solaris" || GOOS == "windows" {
579 mp.cgoCallers = new(cgoCallers)
580 }
581 }
582
583
584 func ready(gp *g, traceskip int, next bool) {
585 if trace.enabled {
586 traceGoUnpark(gp, traceskip)
587 }
588
589 status := readgstatus(gp)
590
591
592 _g_ := getg()
593 _g_.m.locks++
594 if status&^_Gscan != _Gwaiting {
595 dumpgstatus(gp)
596 throw("bad g->status in ready")
597 }
598
599
600 casgstatus(gp, _Gwaiting, _Grunnable)
601 runqput(_g_.m.p.ptr(), gp, next)
602 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
603 wakep()
604 }
605 _g_.m.locks--
606 if _g_.m.locks == 0 && _g_.preempt {
607 _g_.stackguard0 = stackPreempt
608 }
609 }
610
611 func gcprocs() int32 {
612
613
614 lock(&sched.lock)
615 n := gomaxprocs
616 if n > ncpu {
617 n = ncpu
618 }
619 if n > _MaxGcproc {
620 n = _MaxGcproc
621 }
622 if n > sched.nmidle+1 {
623 n = sched.nmidle + 1
624 }
625 unlock(&sched.lock)
626 return n
627 }
628
629 func needaddgcproc() bool {
630 lock(&sched.lock)
631 n := gomaxprocs
632 if n > ncpu {
633 n = ncpu
634 }
635 if n > _MaxGcproc {
636 n = _MaxGcproc
637 }
638 n -= sched.nmidle + 1
639 unlock(&sched.lock)
640 return n > 0
641 }
642
643 func helpgc(nproc int32) {
644 _g_ := getg()
645 lock(&sched.lock)
646 pos := 0
647 for n := int32(1); n < nproc; n++ {
648 if allp[pos].mcache == _g_.m.mcache {
649 pos++
650 }
651 mp := mget()
652 if mp == nil {
653 throw("gcprocs inconsistency")
654 }
655 mp.helpgc = n
656 mp.p.set(allp[pos])
657 mp.mcache = allp[pos].mcache
658 pos++
659 notewakeup(&mp.park)
660 }
661 unlock(&sched.lock)
662 }
663
664
665
666 const freezeStopWait = 0x7fffffff
667
668
669
670 var freezing uint32
671
672
673
674
675 func freezetheworld() {
676 atomic.Store(&freezing, 1)
677
678
679
680 for i := 0; i < 5; i++ {
681
682 sched.stopwait = freezeStopWait
683 atomic.Store(&sched.gcwaiting, 1)
684
685 if !preemptall() {
686 break
687 }
688 usleep(1000)
689 }
690
691 usleep(1000)
692 preemptall()
693 usleep(1000)
694 }
695
696 func isscanstatus(status uint32) bool {
697 if status == _Gscan {
698 throw("isscanstatus: Bad status Gscan")
699 }
700 return status&_Gscan == _Gscan
701 }
702
703
704
705
706 func readgstatus(gp *g) uint32 {
707 return atomic.Load(&gp.atomicstatus)
708 }
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
727 success := false
728
729
730 switch oldval {
731 default:
732 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
733 dumpgstatus(gp)
734 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
735 case _Gscanrunnable,
736 _Gscanwaiting,
737 _Gscanrunning,
738 _Gscansyscall:
739 if newval == oldval&^_Gscan {
740 success = atomic.Cas(&gp.atomicstatus, oldval, newval)
741 }
742 }
743 if !success {
744 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
745 dumpgstatus(gp)
746 throw("casfrom_Gscanstatus: gp->status is not in scan state")
747 }
748 }
749
750
751
752 func castogscanstatus(gp *g, oldval, newval uint32) bool {
753 switch oldval {
754 case _Grunnable,
755 _Grunning,
756 _Gwaiting,
757 _Gsyscall:
758 if newval == oldval|_Gscan {
759 return atomic.Cas(&gp.atomicstatus, oldval, newval)
760 }
761 }
762 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
763 throw("castogscanstatus")
764 panic("not reached")
765 }
766
767
768
769
770
771
772 func casgstatus(gp *g, oldval, newval uint32) {
773 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
774 systemstack(func() {
775 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
776 throw("casgstatus: bad incoming values")
777 })
778 }
779
780 if oldval == _Grunning && gp.gcscanvalid {
781
782
783
784
785 systemstack(func() {
786 print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
787 throw("casgstatus")
788 })
789 }
790
791
792 const yieldDelay = 5 * 1000
793 var nextYield int64
794
795
796
797 for i := 0; !atomic.Cas(&gp.atomicstatus, oldval, newval); i++ {
798 if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
799 systemstack(func() {
800 throw("casgstatus: waiting for Gwaiting but is Grunnable")
801 })
802 }
803
804
805
806
807
808
809
810
811 if i == 0 {
812 nextYield = nanotime() + yieldDelay
813 }
814 if nanotime() < nextYield {
815 for x := 0; x < 10 && gp.atomicstatus != oldval; x++ {
816 procyield(1)
817 }
818 } else {
819 osyield()
820 nextYield = nanotime() + yieldDelay/2
821 }
822 }
823 if newval == _Grunning {
824 gp.gcscanvalid = false
825 }
826 }
827
828
829
830
831
832
833
834 func casgcopystack(gp *g) uint32 {
835 for {
836 oldstatus := readgstatus(gp) &^ _Gscan
837 if oldstatus != _Gwaiting && oldstatus != _Grunnable {
838 throw("copystack: bad status, not Gwaiting or Grunnable")
839 }
840 if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
841 return oldstatus
842 }
843 }
844 }
845
846
847
848
849 func scang(gp *g, gcw *gcWork) {
850
851
852
853
854
855 gp.gcscandone = false
856
857
858 const yieldDelay = 10 * 1000
859 var nextYield int64
860
861
862
863
864
865
866 loop:
867 for i := 0; !gp.gcscandone; i++ {
868 switch s := readgstatus(gp); s {
869 default:
870 dumpgstatus(gp)
871 throw("stopg: invalid status")
872
873 case _Gdead:
874
875 gp.gcscandone = true
876 break loop
877
878 case _Gcopystack:
879
880
881 case _Grunnable, _Gsyscall, _Gwaiting:
882
883
884
885
886 if castogscanstatus(gp, s, s|_Gscan) {
887 if !gp.gcscandone {
888 scanstack(gp, gcw)
889 gp.gcscandone = true
890 }
891 restartg(gp)
892 break loop
893 }
894
895 case _Gscanwaiting:
896
897
898 case _Grunning:
899
900
901
902
903
904 if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
905 break
906 }
907
908
909 if castogscanstatus(gp, _Grunning, _Gscanrunning) {
910 if !gp.gcscandone {
911 gp.preemptscan = true
912 gp.preempt = true
913 gp.stackguard0 = stackPreempt
914 }
915 casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
916 }
917 }
918
919 if i == 0 {
920 nextYield = nanotime() + yieldDelay
921 }
922 if nanotime() < nextYield {
923 procyield(10)
924 } else {
925 osyield()
926 nextYield = nanotime() + yieldDelay/2
927 }
928 }
929
930 gp.preemptscan = false
931 }
932
933
934 func restartg(gp *g) {
935 s := readgstatus(gp)
936 switch s {
937 default:
938 dumpgstatus(gp)
939 throw("restartg: unexpected status")
940
941 case _Gdead:
942
943
944 case _Gscanrunnable,
945 _Gscanwaiting,
946 _Gscansyscall:
947 casfrom_Gscanstatus(gp, s, s&^_Gscan)
948 }
949 }
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965 func stopTheWorld(reason string) {
966 semacquire(&worldsema)
967 getg().m.preemptoff = reason
968 systemstack(stopTheWorldWithSema)
969 }
970
971
972 func startTheWorld() {
973 systemstack(func() { startTheWorldWithSema(false) })
974
975
976 semrelease(&worldsema)
977 getg().m.preemptoff = ""
978 }
979
980
981
982 var worldsema uint32 = 1
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006 func stopTheWorldWithSema() {
1007 _g_ := getg()
1008
1009
1010
1011 if _g_.m.locks > 0 {
1012 throw("stopTheWorld: holding locks")
1013 }
1014
1015 lock(&sched.lock)
1016 sched.stopwait = gomaxprocs
1017 atomic.Store(&sched.gcwaiting, 1)
1018 preemptall()
1019
1020 _g_.m.p.ptr().status = _Pgcstop
1021 sched.stopwait--
1022
1023 for _, p := range allp {
1024 s := p.status
1025 if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
1026 if trace.enabled {
1027 traceGoSysBlock(p)
1028 traceProcStop(p)
1029 }
1030 p.syscalltick++
1031 sched.stopwait--
1032 }
1033 }
1034
1035 for {
1036 p := pidleget()
1037 if p == nil {
1038 break
1039 }
1040 p.status = _Pgcstop
1041 sched.stopwait--
1042 }
1043 wait := sched.stopwait > 0
1044 unlock(&sched.lock)
1045
1046
1047 if wait {
1048 for {
1049
1050 if notetsleep(&sched.stopnote, 100*1000) {
1051 noteclear(&sched.stopnote)
1052 break
1053 }
1054 preemptall()
1055 }
1056 }
1057
1058
1059 bad := ""
1060 if sched.stopwait != 0 {
1061 bad = "stopTheWorld: not stopped (stopwait != 0)"
1062 } else {
1063 for _, p := range allp {
1064 if p.status != _Pgcstop {
1065 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1066 }
1067 }
1068 }
1069 if atomic.Load(&freezing) != 0 {
1070
1071
1072
1073
1074 lock(&deadlock)
1075 lock(&deadlock)
1076 }
1077 if bad != "" {
1078 throw(bad)
1079 }
1080 }
1081
1082 func mhelpgc() {
1083 _g_ := getg()
1084 _g_.m.helpgc = -1
1085 }
1086
1087 func startTheWorldWithSema(emitTraceEvent bool) int64 {
1088 _g_ := getg()
1089
1090 _g_.m.locks++
1091 if netpollinited() {
1092 gp := netpoll(false)
1093 injectglist(gp)
1094 }
1095 add := needaddgcproc()
1096 lock(&sched.lock)
1097
1098 procs := gomaxprocs
1099 if newprocs != 0 {
1100 procs = newprocs
1101 newprocs = 0
1102 }
1103 p1 := procresize(procs)
1104 sched.gcwaiting = 0
1105 if sched.sysmonwait != 0 {
1106 sched.sysmonwait = 0
1107 notewakeup(&sched.sysmonnote)
1108 }
1109 unlock(&sched.lock)
1110
1111 for p1 != nil {
1112 p := p1
1113 p1 = p1.link.ptr()
1114 if p.m != 0 {
1115 mp := p.m.ptr()
1116 p.m = 0
1117 if mp.nextp != 0 {
1118 throw("startTheWorld: inconsistent mp->nextp")
1119 }
1120 mp.nextp.set(p)
1121 notewakeup(&mp.park)
1122 } else {
1123
1124 newm(nil, p)
1125 add = false
1126 }
1127 }
1128
1129
1130 startTime := nanotime()
1131 if emitTraceEvent {
1132 traceGCSTWDone()
1133 }
1134
1135
1136
1137
1138 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
1139 wakep()
1140 }
1141
1142 if add {
1143
1144
1145
1146
1147
1148
1149
1150 newm(mhelpgc, nil)
1151 }
1152 _g_.m.locks--
1153 if _g_.m.locks == 0 && _g_.preempt {
1154 _g_.stackguard0 = stackPreempt
1155 }
1156
1157 return startTime
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170 func mstart() {
1171 _g_ := getg()
1172
1173 osStack := _g_.stack.lo == 0
1174 if osStack {
1175
1176
1177 size := _g_.stack.hi
1178 if size == 0 {
1179 size = 8192 * sys.StackGuardMultiplier
1180 }
1181 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1182 _g_.stack.lo = _g_.stack.hi - size + 1024
1183 }
1184
1185
1186 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1187 _g_.stackguard1 = _g_.stackguard0
1188 mstart1(0)
1189
1190
1191 if GOOS == "windows" || GOOS == "solaris" || GOOS == "plan9" {
1192
1193
1194
1195 osStack = true
1196 }
1197 mexit(osStack)
1198 }
1199
1200 func mstart1(dummy int32) {
1201 _g_ := getg()
1202
1203 if _g_ != _g_.m.g0 {
1204 throw("bad runtime·mstart")
1205 }
1206
1207
1208
1209
1210
1211 save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
1212 asminit()
1213 minit()
1214
1215
1216
1217 if _g_.m == &m0 {
1218 mstartm0()
1219 }
1220
1221 if fn := _g_.m.mstartfn; fn != nil {
1222 fn()
1223 }
1224
1225 if _g_.m.helpgc != 0 {
1226 _g_.m.helpgc = 0
1227 stopm()
1228 } else if _g_.m != &m0 {
1229 acquirep(_g_.m.nextp.ptr())
1230 _g_.m.nextp = 0
1231 }
1232 schedule()
1233 }
1234
1235
1236
1237
1238
1239
1240
1241 func mstartm0() {
1242
1243 if iscgo && !cgoHasExtraM {
1244 cgoHasExtraM = true
1245 newextram()
1246 }
1247 initsig(false)
1248 }
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260 func mexit(osStack bool) {
1261 g := getg()
1262 m := g.m
1263
1264 if m == &m0 {
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276 handoffp(releasep())
1277 lock(&sched.lock)
1278 sched.nmfreed++
1279 checkdead()
1280 unlock(&sched.lock)
1281 notesleep(&m.park)
1282 throw("locked m0 woke up")
1283 }
1284
1285 sigblock()
1286 unminit()
1287
1288
1289 if m.gsignal != nil {
1290 stackfree(m.gsignal.stack)
1291 }
1292
1293
1294 lock(&sched.lock)
1295 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
1296 if *pprev == m {
1297 *pprev = m.alllink
1298 goto found
1299 }
1300 }
1301 throw("m not found in allm")
1302 found:
1303 if !osStack {
1304
1305
1306
1307
1308 atomic.Store(&m.freeWait, 1)
1309
1310
1311
1312
1313 m.freelink = sched.freem
1314 sched.freem = m
1315 }
1316 unlock(&sched.lock)
1317
1318
1319 handoffp(releasep())
1320
1321
1322
1323
1324
1325 lock(&sched.lock)
1326 sched.nmfreed++
1327 checkdead()
1328 unlock(&sched.lock)
1329
1330 if osStack {
1331
1332
1333 return
1334 }
1335
1336
1337
1338
1339
1340 exitThread(&m.freeWait)
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 func forEachP(fn func(*p)) {
1355 mp := acquirem()
1356 _p_ := getg().m.p.ptr()
1357
1358 lock(&sched.lock)
1359 if sched.safePointWait != 0 {
1360 throw("forEachP: sched.safePointWait != 0")
1361 }
1362 sched.safePointWait = gomaxprocs - 1
1363 sched.safePointFn = fn
1364
1365
1366 for _, p := range allp {
1367 if p != _p_ {
1368 atomic.Store(&p.runSafePointFn, 1)
1369 }
1370 }
1371 preemptall()
1372
1373
1374
1375
1376
1377
1378
1379 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
1380 if atomic.Cas(&p.runSafePointFn, 1, 0) {
1381 fn(p)
1382 sched.safePointWait--
1383 }
1384 }
1385
1386 wait := sched.safePointWait > 0
1387 unlock(&sched.lock)
1388
1389
1390 fn(_p_)
1391
1392
1393
1394 for _, p := range allp {
1395 s := p.status
1396 if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
1397 if trace.enabled {
1398 traceGoSysBlock(p)
1399 traceProcStop(p)
1400 }
1401 p.syscalltick++
1402 handoffp(p)
1403 }
1404 }
1405
1406
1407 if wait {
1408 for {
1409
1410
1411
1412
1413 if notetsleep(&sched.safePointNote, 100*1000) {
1414 noteclear(&sched.safePointNote)
1415 break
1416 }
1417 preemptall()
1418 }
1419 }
1420 if sched.safePointWait != 0 {
1421 throw("forEachP: not done")
1422 }
1423 for _, p := range allp {
1424 if p.runSafePointFn != 0 {
1425 throw("forEachP: P did not run fn")
1426 }
1427 }
1428
1429 lock(&sched.lock)
1430 sched.safePointFn = nil
1431 unlock(&sched.lock)
1432 releasem(mp)
1433 }
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446 func runSafePointFn() {
1447 p := getg().m.p.ptr()
1448
1449
1450
1451 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
1452 return
1453 }
1454 sched.safePointFn(p)
1455 lock(&sched.lock)
1456 sched.safePointWait--
1457 if sched.safePointWait == 0 {
1458 notewakeup(&sched.safePointNote)
1459 }
1460 unlock(&sched.lock)
1461 }
1462
1463
1464
1465
1466 var cgoThreadStart unsafe.Pointer
1467
1468 type cgothreadstart struct {
1469 g guintptr
1470 tls *uint64
1471 fn unsafe.Pointer
1472 }
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 func allocm(_p_ *p, fn func()) *m {
1483 _g_ := getg()
1484 _g_.m.locks++
1485 if _g_.m.p == 0 {
1486 acquirep(_p_)
1487 }
1488
1489
1490
1491 if sched.freem != nil {
1492 lock(&sched.lock)
1493 var newList *m
1494 for freem := sched.freem; freem != nil; {
1495 if freem.freeWait != 0 {
1496 next := freem.freelink
1497 freem.freelink = newList
1498 newList = freem
1499 freem = next
1500 continue
1501 }
1502 stackfree(freem.g0.stack)
1503 freem = freem.freelink
1504 }
1505 sched.freem = newList
1506 unlock(&sched.lock)
1507 }
1508
1509 mp := new(m)
1510 mp.mstartfn = fn
1511 mcommoninit(mp)
1512
1513
1514
1515 if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
1516 mp.g0 = malg(-1)
1517 } else {
1518 mp.g0 = malg(8192 * sys.StackGuardMultiplier)
1519 }
1520 mp.g0.m = mp
1521
1522 if _p_ == _g_.m.p.ptr() {
1523 releasep()
1524 }
1525 _g_.m.locks--
1526 if _g_.m.locks == 0 && _g_.preempt {
1527 _g_.stackguard0 = stackPreempt
1528 }
1529
1530 return mp
1531 }
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567 func needm(x byte) {
1568 if iscgo && !cgoHasExtraM {
1569
1570
1571 write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
1572 exit(1)
1573 }
1574
1575
1576
1577
1578
1579 mp := lockextra(false)
1580
1581
1582
1583
1584
1585
1586
1587
1588 mp.needextram = mp.schedlink == 0
1589 extraMCount--
1590 unlockextra(mp.schedlink.ptr())
1591
1592
1593
1594
1595
1596
1597
1598 msigsave(mp)
1599 sigblock()
1600
1601
1602
1603
1604
1605
1606 setg(mp.g0)
1607 _g_ := getg()
1608 _g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
1609 _g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
1610 _g_.stackguard0 = _g_.stack.lo + _StackGuard
1611
1612
1613 asminit()
1614 minit()
1615
1616
1617 casgstatus(mp.curg, _Gdead, _Gsyscall)
1618 atomic.Xadd(&sched.ngsys, -1)
1619 }
1620
1621 var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")
1622
1623
1624
1625
1626 func newextram() {
1627 c := atomic.Xchg(&extraMWaiters, 0)
1628 if c > 0 {
1629 for i := uint32(0); i < c; i++ {
1630 oneNewExtraM()
1631 }
1632 } else {
1633
1634 mp := lockextra(true)
1635 unlockextra(mp)
1636 if mp == nil {
1637 oneNewExtraM()
1638 }
1639 }
1640 }
1641
1642
1643 func oneNewExtraM() {
1644
1645
1646
1647
1648
1649 mp := allocm(nil, nil)
1650 gp := malg(4096)
1651 gp.sched.pc = funcPC(goexit) + sys.PCQuantum
1652 gp.sched.sp = gp.stack.hi
1653 gp.sched.sp -= 4 * sys.RegSize
1654 gp.sched.lr = 0
1655 gp.sched.g = guintptr(unsafe.Pointer(gp))
1656 gp.syscallpc = gp.sched.pc
1657 gp.syscallsp = gp.sched.sp
1658 gp.stktopsp = gp.sched.sp
1659 gp.gcscanvalid = true
1660 gp.gcscandone = true
1661
1662
1663
1664
1665 casgstatus(gp, _Gidle, _Gdead)
1666 gp.m = mp
1667 mp.curg = gp
1668 mp.lockedInt++
1669 mp.lockedg.set(gp)
1670 gp.lockedm.set(mp)
1671 gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
1672 if raceenabled {
1673 gp.racectx = racegostart(funcPC(newextram) + sys.PCQuantum)
1674 }
1675
1676 allgadd(gp)
1677
1678
1679
1680
1681
1682 atomic.Xadd(&sched.ngsys, +1)
1683
1684
1685 mnext := lockextra(true)
1686 mp.schedlink.set(mnext)
1687 extraMCount++
1688 unlockextra(mp)
1689 }
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714 func dropm() {
1715
1716
1717
1718 mp := getg().m
1719
1720
1721 casgstatus(mp.curg, _Gsyscall, _Gdead)
1722 atomic.Xadd(&sched.ngsys, +1)
1723
1724
1725
1726
1727
1728 sigmask := mp.sigmask
1729 sigblock()
1730 unminit()
1731
1732 mnext := lockextra(true)
1733 extraMCount++
1734 mp.schedlink.set(mnext)
1735
1736 setg(nil)
1737
1738
1739 unlockextra(mp)
1740
1741 msigrestore(sigmask)
1742 }
1743
1744
1745 func getm() uintptr {
1746 return uintptr(unsafe.Pointer(getg().m))
1747 }
1748
1749 var extram uintptr
1750 var extraMCount uint32
1751 var extraMWaiters uint32
1752
1753
1754
1755
1756
1757
1758
1759 func lockextra(nilokay bool) *m {
1760 const locked = 1
1761
1762 incr := false
1763 for {
1764 old := atomic.Loaduintptr(&extram)
1765 if old == locked {
1766 yield := osyield
1767 yield()
1768 continue
1769 }
1770 if old == 0 && !nilokay {
1771 if !incr {
1772
1773
1774
1775 atomic.Xadd(&extraMWaiters, 1)
1776 incr = true
1777 }
1778 usleep(1)
1779 continue
1780 }
1781 if atomic.Casuintptr(&extram, old, locked) {
1782 return (*m)(unsafe.Pointer(old))
1783 }
1784 yield := osyield
1785 yield()
1786 continue
1787 }
1788 }
1789
1790
1791 func unlockextra(mp *m) {
1792 atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
1793 }
1794
1795
1796
1797 var execLock rwmutex
1798
1799
1800
1801
1802 var newmHandoff struct {
1803 lock mutex
1804
1805
1806
1807 newm muintptr
1808
1809
1810
1811 waiting bool
1812 wake note
1813
1814
1815
1816
1817 haveTemplateThread uint32
1818 }
1819
1820
1821
1822
1823
1824 func newm(fn func(), _p_ *p) {
1825 mp := allocm(_p_, fn)
1826 mp.nextp.set(_p_)
1827 mp.sigmask = initSigmask
1828 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840 lock(&newmHandoff.lock)
1841 if newmHandoff.haveTemplateThread == 0 {
1842 throw("on a locked thread with no template thread")
1843 }
1844 mp.schedlink = newmHandoff.newm
1845 newmHandoff.newm.set(mp)
1846 if newmHandoff.waiting {
1847 newmHandoff.waiting = false
1848 notewakeup(&newmHandoff.wake)
1849 }
1850 unlock(&newmHandoff.lock)
1851 return
1852 }
1853 newm1(mp)
1854 }
1855
1856 func newm1(mp *m) {
1857 if iscgo {
1858 var ts cgothreadstart
1859 if _cgo_thread_start == nil {
1860 throw("_cgo_thread_start missing")
1861 }
1862 ts.g.set(mp.g0)
1863 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
1864 ts.fn = unsafe.Pointer(funcPC(mstart))
1865 if msanenabled {
1866 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
1867 }
1868 execLock.rlock()
1869 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
1870 execLock.runlock()
1871 return
1872 }
1873 execLock.rlock()
1874 newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
1875 execLock.runlock()
1876 }
1877
1878
1879
1880
1881
1882 func startTemplateThread() {
1883 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
1884 return
1885 }
1886 newm(templateThread, nil)
1887 }
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 func templateThread() {
1902 lock(&sched.lock)
1903 sched.nmsys++
1904 checkdead()
1905 unlock(&sched.lock)
1906
1907 for {
1908 lock(&newmHandoff.lock)
1909 for newmHandoff.newm != 0 {
1910 newm := newmHandoff.newm.ptr()
1911 newmHandoff.newm = 0
1912 unlock(&newmHandoff.lock)
1913 for newm != nil {
1914 next := newm.schedlink.ptr()
1915 newm.schedlink = 0
1916 newm1(newm)
1917 newm = next
1918 }
1919 lock(&newmHandoff.lock)
1920 }
1921 newmHandoff.waiting = true
1922 noteclear(&newmHandoff.wake)
1923 unlock(&newmHandoff.lock)
1924 notesleep(&newmHandoff.wake)
1925 }
1926 }
1927
1928
1929
1930 func stopm() {
1931 _g_ := getg()
1932
1933 if _g_.m.locks != 0 {
1934 throw("stopm holding locks")
1935 }
1936 if _g_.m.p != 0 {
1937 throw("stopm holding p")
1938 }
1939 if _g_.m.spinning {
1940 throw("stopm spinning")
1941 }
1942
1943 retry:
1944 lock(&sched.lock)
1945 mput(_g_.m)
1946 unlock(&sched.lock)
1947 notesleep(&_g_.m.park)
1948 noteclear(&_g_.m.park)
1949 if _g_.m.helpgc != 0 {
1950
1951 gchelper()
1952
1953 _g_.m.helpgc = 0
1954 _g_.m.mcache = nil
1955 _g_.m.p = 0
1956 goto retry
1957 }
1958 acquirep(_g_.m.nextp.ptr())
1959 _g_.m.nextp = 0
1960 }
1961
1962 func mspinning() {
1963
1964 getg().m.spinning = true
1965 }
1966
1967
1968
1969
1970
1971
1972
1973 func startm(_p_ *p, spinning bool) {
1974 lock(&sched.lock)
1975 if _p_ == nil {
1976 _p_ = pidleget()
1977 if _p_ == nil {
1978 unlock(&sched.lock)
1979 if spinning {
1980
1981
1982 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
1983 throw("startm: negative nmspinning")
1984 }
1985 }
1986 return
1987 }
1988 }
1989 mp := mget()
1990 unlock(&sched.lock)
1991 if mp == nil {
1992 var fn func()
1993 if spinning {
1994
1995 fn = mspinning
1996 }
1997 newm(fn, _p_)
1998 return
1999 }
2000 if mp.spinning {
2001 throw("startm: m is spinning")
2002 }
2003 if mp.nextp != 0 {
2004 throw("startm: m has p")
2005 }
2006 if spinning && !runqempty(_p_) {
2007 throw("startm: p has runnable gs")
2008 }
2009
2010 mp.spinning = spinning
2011 mp.nextp.set(_p_)
2012 notewakeup(&mp.park)
2013 }
2014
2015
2016
2017
2018 func handoffp(_p_ *p) {
2019
2020
2021
2022
2023 if !runqempty(_p_) || sched.runqsize != 0 {
2024 startm(_p_, false)
2025 return
2026 }
2027
2028 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
2029 startm(_p_, false)
2030 return
2031 }
2032
2033
2034 if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) {
2035 startm(_p_, true)
2036 return
2037 }
2038 lock(&sched.lock)
2039 if sched.gcwaiting != 0 {
2040 _p_.status = _Pgcstop
2041 sched.stopwait--
2042 if sched.stopwait == 0 {
2043 notewakeup(&sched.stopnote)
2044 }
2045 unlock(&sched.lock)
2046 return
2047 }
2048 if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
2049 sched.safePointFn(_p_)
2050 sched.safePointWait--
2051 if sched.safePointWait == 0 {
2052 notewakeup(&sched.safePointNote)
2053 }
2054 }
2055 if sched.runqsize != 0 {
2056 unlock(&sched.lock)
2057 startm(_p_, false)
2058 return
2059 }
2060
2061
2062 if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
2063 unlock(&sched.lock)
2064 startm(_p_, false)
2065 return
2066 }
2067 pidleput(_p_)
2068 unlock(&sched.lock)
2069 }
2070
2071
2072
2073 func wakep() {
2074
2075 if !atomic.Cas(&sched.nmspinning, 0, 1) {
2076 return
2077 }
2078 startm(nil, true)
2079 }
2080
2081
2082
2083 func stoplockedm() {
2084 _g_ := getg()
2085
2086 if _g_.m.lockedg == 0 || _g_.m.lockedg.ptr().lockedm.ptr() != _g_.m {
2087 throw("stoplockedm: inconsistent locking")
2088 }
2089 if _g_.m.p != 0 {
2090
2091 _p_ := releasep()
2092 handoffp(_p_)
2093 }
2094 incidlelocked(1)
2095
2096 notesleep(&_g_.m.park)
2097 noteclear(&_g_.m.park)
2098 status := readgstatus(_g_.m.lockedg.ptr())
2099 if status&^_Gscan != _Grunnable {
2100 print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
2101 dumpgstatus(_g_)
2102 throw("stoplockedm: not runnable")
2103 }
2104 acquirep(_g_.m.nextp.ptr())
2105 _g_.m.nextp = 0
2106 }
2107
2108
2109
2110
2111 func startlockedm(gp *g) {
2112 _g_ := getg()
2113
2114 mp := gp.lockedm.ptr()
2115 if mp == _g_.m {
2116 throw("startlockedm: locked to me")
2117 }
2118 if mp.nextp != 0 {
2119 throw("startlockedm: m has p")
2120 }
2121
2122 incidlelocked(-1)
2123 _p_ := releasep()
2124 mp.nextp.set(_p_)
2125 notewakeup(&mp.park)
2126 stopm()
2127 }
2128
2129
2130
2131 func gcstopm() {
2132 _g_ := getg()
2133
2134 if sched.gcwaiting == 0 {
2135 throw("gcstopm: not waiting for gc")
2136 }
2137 if _g_.m.spinning {
2138 _g_.m.spinning = false
2139
2140
2141 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2142 throw("gcstopm: negative nmspinning")
2143 }
2144 }
2145 _p_ := releasep()
2146 lock(&sched.lock)
2147 _p_.status = _Pgcstop
2148 sched.stopwait--
2149 if sched.stopwait == 0 {
2150 notewakeup(&sched.stopnote)
2151 }
2152 unlock(&sched.lock)
2153 stopm()
2154 }
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165 func execute(gp *g, inheritTime bool) {
2166 _g_ := getg()
2167
2168 casgstatus(gp, _Grunnable, _Grunning)
2169 gp.waitsince = 0
2170 gp.preempt = false
2171 gp.stackguard0 = gp.stack.lo + _StackGuard
2172 if !inheritTime {
2173 _g_.m.p.ptr().schedtick++
2174 }
2175 _g_.m.curg = gp
2176 gp.m = _g_.m
2177
2178
2179 hz := sched.profilehz
2180 if _g_.m.profilehz != hz {
2181 setThreadCPUProfiler(hz)
2182 }
2183
2184 if trace.enabled {
2185
2186
2187 if gp.syscallsp != 0 && gp.sysblocktraced {
2188 traceGoSysExit(gp.sysexitticks)
2189 }
2190 traceGoStart()
2191 }
2192
2193 gogo(&gp.sched)
2194 }
2195
2196
2197
2198 func findrunnable() (gp *g, inheritTime bool) {
2199 _g_ := getg()
2200
2201
2202
2203
2204
2205 top:
2206 _p_ := _g_.m.p.ptr()
2207 if sched.gcwaiting != 0 {
2208 gcstopm()
2209 goto top
2210 }
2211 if _p_.runSafePointFn != 0 {
2212 runSafePointFn()
2213 }
2214 if fingwait && fingwake {
2215 if gp := wakefing(); gp != nil {
2216 ready(gp, 0, true)
2217 }
2218 }
2219 if *cgo_yield != nil {
2220 asmcgocall(*cgo_yield, nil)
2221 }
2222
2223
2224 if gp, inheritTime := runqget(_p_); gp != nil {
2225 return gp, inheritTime
2226 }
2227
2228
2229 if sched.runqsize != 0 {
2230 lock(&sched.lock)
2231 gp := globrunqget(_p_, 0)
2232 unlock(&sched.lock)
2233 if gp != nil {
2234 return gp, false
2235 }
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Load64(&sched.lastpoll) != 0 {
2246 if gp := netpoll(false); gp != nil {
2247
2248 injectglist(gp.schedlink.ptr())
2249 casgstatus(gp, _Gwaiting, _Grunnable)
2250 if trace.enabled {
2251 traceGoUnpark(gp, 0)
2252 }
2253 return gp, false
2254 }
2255 }
2256
2257
2258 procs := uint32(gomaxprocs)
2259 if atomic.Load(&sched.npidle) == procs-1 {
2260
2261
2262
2263 goto stop
2264 }
2265
2266
2267
2268 if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= procs-atomic.Load(&sched.npidle) {
2269 goto stop
2270 }
2271 if !_g_.m.spinning {
2272 _g_.m.spinning = true
2273 atomic.Xadd(&sched.nmspinning, 1)
2274 }
2275 for i := 0; i < 4; i++ {
2276 for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() {
2277 if sched.gcwaiting != 0 {
2278 goto top
2279 }
2280 stealRunNextG := i > 2
2281 if gp := runqsteal(_p_, allp[enum.position()], stealRunNextG); gp != nil {
2282 return gp, false
2283 }
2284 }
2285 }
2286
2287 stop:
2288
2289
2290
2291
2292 if gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
2293 _p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
2294 gp := _p_.gcBgMarkWorker.ptr()
2295 casgstatus(gp, _Gwaiting, _Grunnable)
2296 if trace.enabled {
2297 traceGoUnpark(gp, 0)
2298 }
2299 return gp, false
2300 }
2301
2302
2303
2304
2305
2306 allpSnapshot := allp
2307
2308
2309 lock(&sched.lock)
2310 if sched.gcwaiting != 0 || _p_.runSafePointFn != 0 {
2311 unlock(&sched.lock)
2312 goto top
2313 }
2314 if sched.runqsize != 0 {
2315 gp := globrunqget(_p_, 0)
2316 unlock(&sched.lock)
2317 return gp, false
2318 }
2319 if releasep() != _p_ {
2320 throw("findrunnable: wrong p")
2321 }
2322 pidleput(_p_)
2323 unlock(&sched.lock)
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 wasSpinning := _g_.m.spinning
2339 if _g_.m.spinning {
2340 _g_.m.spinning = false
2341 if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
2342 throw("findrunnable: negative nmspinning")
2343 }
2344 }
2345
2346
2347 for _, _p_ := range allpSnapshot {
2348 if !runqempty(_p_) {
2349 lock(&sched.lock)
2350 _p_ = pidleget()
2351 unlock(&sched.lock)
2352 if _p_ != nil {
2353 acquirep(_p_)
2354 if wasSpinning {
2355 _g_.m.spinning = true
2356 atomic.Xadd(&sched.nmspinning, 1)
2357 }
2358 goto top
2359 }
2360 break
2361 }
2362 }
2363
2364
2365 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(nil) {
2366 lock(&sched.lock)
2367 _p_ = pidleget()
2368 if _p_ != nil && _p_.gcBgMarkWorker == 0 {
2369 pidleput(_p_)
2370 _p_ = nil
2371 }
2372 unlock(&sched.lock)
2373 if _p_ != nil {
2374 acquirep(_p_)
2375 if wasSpinning {
2376 _g_.m.spinning = true
2377 atomic.Xadd(&sched.nmspinning, 1)
2378 }
2379
2380 goto stop
2381 }
2382 }
2383
2384
2385 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
2386 if _g_.m.p != 0 {
2387 throw("findrunnable: netpoll with p")
2388 }
2389 if _g_.m.spinning {
2390 throw("findrunnable: netpoll with spinning")
2391 }
2392 gp := netpoll(true)
2393 atomic.Store64(&sched.lastpoll, uint64(nanotime()))
2394 if gp != nil {
2395 lock(&sched.lock)
2396 _p_ = pidleget()
2397 unlock(&sched.lock)
2398 if _p_ != nil {
2399 acquirep(_p_)
2400 injectglist(gp.schedlink.ptr())
2401 casgstatus(gp, _Gwaiting, _Grunnable)
2402 if trace.enabled {
2403 traceGoUnpark(gp, 0)
2404 }
2405 return gp, false
2406 }
2407 injectglist(gp)
2408 }
2409 }
2410 stopm()
2411 goto top
2412 }
2413
2414
2415
2416
2417
2418 func pollWork() bool {
2419 if sched.runqsize != 0 {
2420 return true
2421 }
2422 p := getg().m.p.ptr()
2423 if !runqempty(p) {
2424 return true
2425 }
2426 if netpollinited() && atomic.Load(&netpollWaiters) > 0 && sched.lastpoll != 0 {
2427 if gp := netpoll(false); gp != nil {
2428 injectglist(gp)
2429 return true
2430 }
2431 }
2432 return false
2433 }
2434
2435 func resetspinning() {
2436 _g_ := getg()
2437 if !_g_.m.spinning {
2438 throw("resetspinning: not a spinning m")
2439 }
2440 _g_.m.spinning = false
2441 nmspinning := atomic.Xadd(&sched.nmspinning, -1)
2442 if int32(nmspinning) < 0 {
2443 throw("findrunnable: negative nmspinning")
2444 }
2445
2446
2447
2448 if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
2449 wakep()
2450 }
2451 }
2452
2453
2454
2455 func injectglist(glist *g) {
2456 if glist == nil {
2457 return
2458 }
2459 if trace.enabled {
2460 for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
2461 traceGoUnpark(gp, 0)
2462 }
2463 }
2464 lock(&sched.lock)
2465 var n int
2466 for n = 0; glist != nil; n++ {
2467 gp := glist
2468 glist = gp.schedlink.ptr()
2469 casgstatus(gp, _Gwaiting, _Grunnable)
2470 globrunqput(gp)
2471 }
2472 unlock(&sched.lock)
2473 for ; n != 0 && sched.npidle != 0; n-- {
2474 startm(nil, false)
2475 }
2476 }
2477
2478
2479
2480 func schedule() {
2481 _g_ := getg()
2482
2483 if _g_.m.locks != 0 {
2484 throw("schedule: holding locks")
2485 }
2486
2487 if _g_.m.lockedg != 0 {
2488 stoplockedm()
2489 execute(_g_.m.lockedg.ptr(), false)
2490 }
2491
2492
2493
2494 if _g_.m.incgo {
2495 throw("schedule: in cgo")
2496 }
2497
2498 top:
2499 if sched.gcwaiting != 0 {
2500 gcstopm()
2501 goto top
2502 }
2503 if _g_.m.p.ptr().runSafePointFn != 0 {
2504 runSafePointFn()
2505 }
2506
2507 var gp *g
2508 var inheritTime bool
2509 if trace.enabled || trace.shutdown {
2510 gp = traceReader()
2511 if gp != nil {
2512 casgstatus(gp, _Gwaiting, _Grunnable)
2513 traceGoUnpark(gp, 0)
2514 }
2515 }
2516 if gp == nil && gcBlackenEnabled != 0 {
2517 gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
2518 }
2519 if gp == nil {
2520
2521
2522
2523 if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
2524 lock(&sched.lock)
2525 gp = globrunqget(_g_.m.p.ptr(), 1)
2526 unlock(&sched.lock)
2527 }
2528 }
2529 if gp == nil {
2530 gp, inheritTime = runqget(_g_.m.p.ptr())
2531 if gp != nil && _g_.m.spinning {
2532 throw("schedule: spinning with local work")
2533 }
2534 }
2535 if gp == nil {
2536 gp, inheritTime = findrunnable()
2537 }
2538
2539
2540
2541
2542 if _g_.m.spinning {
2543 resetspinning()
2544 }
2545
2546 if gp.lockedm != 0 {
2547
2548
2549 startlockedm(gp)
2550 goto top
2551 }
2552
2553 execute(gp, inheritTime)
2554 }
2555
2556
2557
2558
2559
2560
2561
2562
2563 func dropg() {
2564 _g_ := getg()
2565
2566 setMNoWB(&_g_.m.curg.m, nil)
2567 setGNoWB(&_g_.m.curg, nil)
2568 }
2569
2570 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
2571 unlock((*mutex)(lock))
2572 return true
2573 }
2574
2575
2576 func park_m(gp *g) {
2577 _g_ := getg()
2578
2579 if trace.enabled {
2580 traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip)
2581 }
2582
2583 casgstatus(gp, _Grunning, _Gwaiting)
2584 dropg()
2585
2586 if _g_.m.waitunlockf != nil {
2587 fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
2588 ok := fn(gp, _g_.m.waitlock)
2589 _g_.m.waitunlockf = nil
2590 _g_.m.waitlock = nil
2591 if !ok {
2592 if trace.enabled {
2593 traceGoUnpark(gp, 2)
2594 }
2595 casgstatus(gp, _Gwaiting, _Grunnable)
2596 execute(gp, true)
2597 }
2598 }
2599 schedule()
2600 }
2601
2602 func goschedImpl(gp *g) {
2603 status := readgstatus(gp)
2604 if status&^_Gscan != _Grunning {
2605 dumpgstatus(gp)
2606 throw("bad g status")
2607 }
2608 casgstatus(gp, _Grunning, _Grunnable)
2609 dropg()
2610 lock(&sched.lock)
2611 globrunqput(gp)
2612 unlock(&sched.lock)
2613
2614 schedule()
2615 }
2616
2617
2618 func gosched_m(gp *g) {
2619 if trace.enabled {
2620 traceGoSched()
2621 }
2622 goschedImpl(gp)
2623 }
2624
2625
2626 func goschedguarded_m(gp *g) {
2627
2628 if gp.m.locks != 0 || gp.m.mallocing != 0 || gp.m.preemptoff != "" || gp.m.p.ptr().status != _Prunning {
2629 gogo(&gp.sched)
2630 }
2631
2632 if trace.enabled {
2633 traceGoSched()
2634 }
2635 goschedImpl(gp)
2636 }
2637
2638 func gopreempt_m(gp *g) {
2639 if trace.enabled {
2640 traceGoPreempt()
2641 }
2642 goschedImpl(gp)
2643 }
2644
2645
2646 func goexit1() {
2647 if raceenabled {
2648 racegoend()
2649 }
2650 if trace.enabled {
2651 traceGoEnd()
2652 }
2653 mcall(goexit0)
2654 }
2655
2656
2657 func goexit0(gp *g) {
2658 _g_ := getg()
2659
2660 casgstatus(gp, _Grunning, _Gdead)
2661 if isSystemGoroutine(gp) {
2662 atomic.Xadd(&sched.ngsys, -1)
2663 }
2664 gp.m = nil
2665 locked := gp.lockedm != 0
2666 gp.lockedm = 0
2667 _g_.m.lockedg = 0
2668 gp.paniconfault = false
2669 gp._defer = nil
2670 gp._panic = nil
2671 gp.writebuf = nil
2672 gp.waitreason = ""
2673 gp.param = nil
2674 gp.labels = nil
2675 gp.timer = nil
2676
2677 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
2678
2679
2680
2681 scanCredit := int64(gcController.assistWorkPerByte * float64(gp.gcAssistBytes))
2682 atomic.Xaddint64(&gcController.bgScanCredit, scanCredit)
2683 gp.gcAssistBytes = 0
2684 }
2685
2686
2687
2688 gp.gcscanvalid = true
2689 dropg()
2690
2691 if _g_.m.lockedInt != 0 {
2692 print("invalid m->lockedInt = ", _g_.m.lockedInt, "\n")
2693 throw("internal lockOSThread error")
2694 }
2695 _g_.m.lockedExt = 0
2696 gfput(_g_.m.p.ptr(), gp)
2697 if locked {
2698
2699
2700
2701
2702
2703
2704 if GOOS != "plan9" {
2705 gogo(&_g_.m.g0.sched)
2706 }
2707 }
2708 schedule()
2709 }
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719 func save(pc, sp uintptr) {
2720 _g_ := getg()
2721
2722 _g_.sched.pc = pc
2723 _g_.sched.sp = sp
2724 _g_.sched.lr = 0
2725 _g_.sched.ret = 0
2726 _g_.sched.g = guintptr(unsafe.Pointer(_g_))
2727
2728
2729
2730 if _g_.sched.ctxt != nil {
2731 badctxt()
2732 }
2733 }
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772 func reentersyscall(pc, sp uintptr) {
2773 _g_ := getg()
2774
2775
2776
2777 _g_.m.locks++
2778
2779
2780
2781
2782
2783 _g_.stackguard0 = stackPreempt
2784 _g_.throwsplit = true
2785
2786
2787 save(pc, sp)
2788 _g_.syscallsp = sp
2789 _g_.syscallpc = pc
2790 casgstatus(_g_, _Grunning, _Gsyscall)
2791 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2792 systemstack(func() {
2793 print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2794 throw("entersyscall")
2795 })
2796 }
2797
2798 if trace.enabled {
2799 systemstack(traceGoSysCall)
2800
2801
2802
2803 save(pc, sp)
2804 }
2805
2806 if atomic.Load(&sched.sysmonwait) != 0 {
2807 systemstack(entersyscall_sysmon)
2808 save(pc, sp)
2809 }
2810
2811 if _g_.m.p.ptr().runSafePointFn != 0 {
2812
2813 systemstack(runSafePointFn)
2814 save(pc, sp)
2815 }
2816
2817 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2818 _g_.sysblocktraced = true
2819 _g_.m.mcache = nil
2820 _g_.m.p.ptr().m = 0
2821 atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
2822 if sched.gcwaiting != 0 {
2823 systemstack(entersyscall_gcwait)
2824 save(pc, sp)
2825 }
2826
2827
2828
2829
2830 _g_.stackguard0 = stackPreempt
2831 _g_.m.locks--
2832 }
2833
2834
2835
2836 func entersyscall(dummy int32) {
2837 reentersyscall(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
2838 }
2839
2840 func entersyscall_sysmon() {
2841 lock(&sched.lock)
2842 if atomic.Load(&sched.sysmonwait) != 0 {
2843 atomic.Store(&sched.sysmonwait, 0)
2844 notewakeup(&sched.sysmonnote)
2845 }
2846 unlock(&sched.lock)
2847 }
2848
2849 func entersyscall_gcwait() {
2850 _g_ := getg()
2851 _p_ := _g_.m.p.ptr()
2852
2853 lock(&sched.lock)
2854 if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
2855 if trace.enabled {
2856 traceGoSysBlock(_p_)
2857 traceProcStop(_p_)
2858 }
2859 _p_.syscalltick++
2860 if sched.stopwait--; sched.stopwait == 0 {
2861 notewakeup(&sched.stopnote)
2862 }
2863 }
2864 unlock(&sched.lock)
2865 }
2866
2867
2868
2869 func entersyscallblock(dummy int32) {
2870 _g_ := getg()
2871
2872 _g_.m.locks++
2873 _g_.throwsplit = true
2874 _g_.stackguard0 = stackPreempt
2875 _g_.m.syscalltick = _g_.m.p.ptr().syscalltick
2876 _g_.sysblocktraced = true
2877 _g_.m.p.ptr().syscalltick++
2878
2879
2880 pc := getcallerpc()
2881 sp := getcallersp(unsafe.Pointer(&dummy))
2882 save(pc, sp)
2883 _g_.syscallsp = _g_.sched.sp
2884 _g_.syscallpc = _g_.sched.pc
2885 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2886 sp1 := sp
2887 sp2 := _g_.sched.sp
2888 sp3 := _g_.syscallsp
2889 systemstack(func() {
2890 print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2891 throw("entersyscallblock")
2892 })
2893 }
2894 casgstatus(_g_, _Grunning, _Gsyscall)
2895 if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
2896 systemstack(func() {
2897 print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
2898 throw("entersyscallblock")
2899 })
2900 }
2901
2902 systemstack(entersyscallblock_handoff)
2903
2904
2905 save(getcallerpc(), getcallersp(unsafe.Pointer(&dummy)))
2906
2907 _g_.m.locks--
2908 }
2909
2910 func entersyscallblock_handoff() {
2911 if trace.enabled {
2912 traceGoSysCall()
2913 traceGoSysBlock(getg().m.p.ptr())
2914 }
2915 handoffp(releasep())
2916 }
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927 func exitsyscall(dummy int32) {
2928 _g_ := getg()
2929
2930 _g_.m.locks++
2931 if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
2932
2933
2934
2935 systemstack(func() {
2936 throw("exitsyscall: syscall frame is no longer valid")
2937 })
2938 }
2939
2940 _g_.waitsince = 0
2941 oldp := _g_.m.p.ptr()
2942 if exitsyscallfast() {
2943 if _g_.m.mcache == nil {
2944 systemstack(func() {
2945 throw("lost mcache")
2946 })
2947 }
2948 if trace.enabled {
2949 if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
2950 systemstack(traceGoStart)
2951 }
2952 }
2953
2954 _g_.m.p.ptr().syscalltick++
2955
2956 casgstatus(_g_, _Gsyscall, _Grunning)
2957
2958
2959
2960 _g_.syscallsp = 0
2961 _g_.m.locks--
2962 if _g_.preempt {
2963
2964 _g_.stackguard0 = stackPreempt
2965 } else {
2966
2967 _g_.stackguard0 = _g_.stack.lo + _StackGuard
2968 }
2969 _g_.throwsplit = false
2970 return
2971 }
2972
2973 _g_.sysexitticks = 0
2974 if trace.enabled {
2975
2976
2977 for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
2978 osyield()
2979 }
2980
2981
2982
2983
2984 _g_.sysexitticks = cputicks()
2985 }
2986
2987 _g_.m.locks--
2988
2989
2990 mcall(exitsyscall0)
2991
2992 if _g_.m.mcache == nil {
2993 systemstack(func() {
2994 throw("lost mcache")
2995 })
2996 }
2997
2998
2999
3000
3001
3002
3003
3004 _g_.syscallsp = 0
3005 _g_.m.p.ptr().syscalltick++
3006 _g_.throwsplit = false
3007 }
3008
3009
3010 func exitsyscallfast() bool {
3011 _g_ := getg()
3012
3013
3014 if sched.stopwait == freezeStopWait {
3015 _g_.m.mcache = nil
3016 _g_.m.p = 0
3017 return false
3018 }
3019
3020
3021 if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
3022
3023 exitsyscallfast_reacquired()
3024 return true
3025 }
3026
3027
3028 oldp := _g_.m.p.ptr()
3029 _g_.m.mcache = nil
3030 _g_.m.p = 0
3031 if sched.pidle != 0 {
3032 var ok bool
3033 systemstack(func() {
3034 ok = exitsyscallfast_pidle()
3035 if ok && trace.enabled {
3036 if oldp != nil {
3037
3038
3039 for oldp.syscalltick == _g_.m.syscalltick {
3040 osyield()
3041 }
3042 }
3043 traceGoSysExit(0)
3044 }
3045 })
3046 if ok {
3047 return true
3048 }
3049 }
3050 return false
3051 }
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062 func exitsyscallfast_reacquired() {
3063 _g_ := getg()
3064 _g_.m.mcache = _g_.m.p.ptr().mcache
3065 _g_.m.p.ptr().m.set(_g_.m)
3066 if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
3067 if trace.enabled {
3068
3069
3070
3071 systemstack(func() {
3072
3073 traceGoSysBlock(_g_.m.p.ptr())
3074
3075 traceGoSysExit(0)
3076 })
3077 }
3078 _g_.m.p.ptr().syscalltick++
3079 }
3080 }
3081
3082 func exitsyscallfast_pidle() bool {
3083 lock(&sched.lock)
3084 _p_ := pidleget()
3085 if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
3086 atomic.Store(&sched.sysmonwait, 0)
3087 notewakeup(&sched.sysmonnote)
3088 }
3089 unlock(&sched.lock)
3090 if _p_ != nil {
3091 acquirep(_p_)
3092 return true
3093 }
3094 return false
3095 }
3096
3097
3098
3099
3100
3101 func exitsyscall0(gp *g) {
3102 _g_ := getg()
3103
3104 casgstatus(gp, _Gsyscall, _Grunnable)
3105 dropg()
3106 lock(&sched.lock)
3107 _p_ := pidleget()
3108 if _p_ == nil {
3109 globrunqput(gp)
3110 } else if atomic.Load(&sched.sysmonwait) != 0 {
3111 atomic.Store(&sched.sysmonwait, 0)
3112 notewakeup(&sched.sysmonnote)
3113 }
3114 unlock(&sched.lock)
3115 if _p_ != nil {
3116 acquirep(_p_)
3117 execute(gp, false)
3118 }
3119 if _g_.m.lockedg != 0 {
3120
3121 stoplockedm()
3122 execute(gp, false)
3123 }
3124 stopm()
3125 schedule()
3126 }
3127
3128 func beforefork() {
3129 gp := getg().m.curg
3130
3131
3132
3133
3134 gp.m.locks++
3135 msigsave(gp.m)
3136 sigblock()
3137
3138
3139
3140
3141
3142 gp.stackguard0 = stackFork
3143 }
3144
3145
3146
3147
3148 func syscall_runtime_BeforeFork() {
3149 systemstack(beforefork)
3150 }
3151
3152 func afterfork() {
3153 gp := getg().m.curg
3154
3155
3156 gp.stackguard0 = gp.stack.lo + _StackGuard
3157
3158 msigrestore(gp.m.sigmask)
3159
3160 gp.m.locks--
3161 }
3162
3163
3164
3165
3166 func syscall_runtime_AfterFork() {
3167 systemstack(afterfork)
3168 }
3169
3170
3171
3172 var inForkedChild bool
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185 func syscall_runtime_AfterForkInChild() {
3186
3187
3188
3189
3190 inForkedChild = true
3191
3192 clearSignalHandlers()
3193
3194
3195
3196 msigrestore(getg().m.sigmask)
3197
3198 inForkedChild = false
3199 }
3200
3201
3202
3203 func syscall_runtime_BeforeExec() {
3204
3205 execLock.lock()
3206 }
3207
3208
3209
3210 func syscall_runtime_AfterExec() {
3211 execLock.unlock()
3212 }
3213
3214
3215 func malg(stacksize int32) *g {
3216 newg := new(g)
3217 if stacksize >= 0 {
3218 stacksize = round2(_StackSystem + stacksize)
3219 systemstack(func() {
3220 newg.stack = stackalloc(uint32(stacksize))
3221 })
3222 newg.stackguard0 = newg.stack.lo + _StackGuard
3223 newg.stackguard1 = ^uintptr(0)
3224 }
3225 return newg
3226 }
3227
3228
3229
3230
3231
3232
3233
3234
3235 func newproc(siz int32, fn *funcval) {
3236 argp := add(unsafe.Pointer(&fn), sys.PtrSize)
3237 pc := getcallerpc()
3238 systemstack(func() {
3239 newproc1(fn, (*uint8)(argp), siz, pc)
3240 })
3241 }
3242
3243
3244
3245
3246 func newproc1(fn *funcval, argp *uint8, narg int32, callerpc uintptr) {
3247 _g_ := getg()
3248
3249 if fn == nil {
3250 _g_.m.throwing = -1
3251 throw("go of nil func value")
3252 }
3253 _g_.m.locks++
3254 siz := narg
3255 siz = (siz + 7) &^ 7
3256
3257
3258
3259
3260
3261 if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
3262 throw("newproc: function arguments too large for new goroutine")
3263 }
3264
3265 _p_ := _g_.m.p.ptr()
3266 newg := gfget(_p_)
3267 if newg == nil {
3268 newg = malg(_StackMin)
3269 casgstatus(newg, _Gidle, _Gdead)
3270 allgadd(newg)
3271 }
3272 if newg.stack.hi == 0 {
3273 throw("newproc1: newg missing stack")
3274 }
3275
3276 if readgstatus(newg) != _Gdead {
3277 throw("newproc1: new g is not Gdead")
3278 }
3279
3280 totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize
3281 totalSize += -totalSize & (sys.SpAlign - 1)
3282 sp := newg.stack.hi - totalSize
3283 spArg := sp
3284 if usesLR {
3285
3286 *(*uintptr)(unsafe.Pointer(sp)) = 0
3287 prepGoExitFrame(sp)
3288 spArg += sys.MinFrameSize
3289 }
3290 if narg > 0 {
3291 memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))
3292
3293
3294
3295
3296
3297
3298 if writeBarrier.needed && !_g_.m.curg.gcscandone {
3299 f := findfunc(fn.fn)
3300 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
3301
3302 bv := stackmapdata(stkmap, 0)
3303 bulkBarrierBitmap(spArg, spArg, uintptr(narg), 0, bv.bytedata)
3304 }
3305 }
3306
3307 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
3308 newg.sched.sp = sp
3309 newg.stktopsp = sp
3310 newg.sched.pc = funcPC(goexit) + sys.PCQuantum
3311 newg.sched.g = guintptr(unsafe.Pointer(newg))
3312 gostartcallfn(&newg.sched, fn)
3313 newg.gopc = callerpc
3314 newg.startpc = fn.fn
3315 if _g_.m.curg != nil {
3316 newg.labels = _g_.m.curg.labels
3317 }
3318 if isSystemGoroutine(newg) {
3319 atomic.Xadd(&sched.ngsys, +1)
3320 }
3321 newg.gcscanvalid = false
3322 casgstatus(newg, _Gdead, _Grunnable)
3323
3324 if _p_.goidcache == _p_.goidcacheend {
3325
3326
3327
3328 _p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
3329 _p_.goidcache -= _GoidCacheBatch - 1
3330 _p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
3331 }
3332 newg.goid = int64(_p_.goidcache)
3333 _p_.goidcache++
3334 if raceenabled {
3335 newg.racectx = racegostart(callerpc)
3336 }
3337 if trace.enabled {
3338 traceGoCreate(newg, newg.startpc)
3339 }
3340 runqput(_p_, newg, true)
3341
3342 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted {
3343 wakep()
3344 }
3345 _g_.m.locks--
3346 if _g_.m.locks == 0 && _g_.preempt {
3347 _g_.stackguard0 = stackPreempt
3348 }
3349 }
3350
3351
3352
3353 func gfput(_p_ *p, gp *g) {
3354 if readgstatus(gp) != _Gdead {
3355 throw("gfput: bad status (not Gdead)")
3356 }
3357
3358 stksize := gp.stack.hi - gp.stack.lo
3359
3360 if stksize != _FixedStack {
3361
3362 stackfree(gp.stack)
3363 gp.stack.lo = 0
3364 gp.stack.hi = 0
3365 gp.stackguard0 = 0
3366 }
3367
3368 gp.schedlink.set(_p_.gfree)
3369 _p_.gfree = gp
3370 _p_.gfreecnt++
3371 if _p_.gfreecnt >= 64 {
3372 lock(&sched.gflock)
3373 for _p_.gfreecnt >= 32 {
3374 _p_.gfreecnt--
3375 gp = _p_.gfree
3376 _p_.gfree = gp.schedlink.ptr()
3377 if gp.stack.lo == 0 {
3378 gp.schedlink.set(sched.gfreeNoStack)
3379 sched.gfreeNoStack = gp
3380 } else {
3381 gp.schedlink.set(sched.gfreeStack)
3382 sched.gfreeStack = gp
3383 }
3384 sched.ngfree++
3385 }
3386 unlock(&sched.gflock)
3387 }
3388 }
3389
3390
3391
3392 func gfget(_p_ *p) *g {
3393 retry:
3394 gp := _p_.gfree
3395 if gp == nil && (sched.gfreeStack != nil || sched.gfreeNoStack != nil) {
3396 lock(&sched.gflock)
3397 for _p_.gfreecnt < 32 {
3398 if sched.gfreeStack != nil {
3399
3400 gp = sched.gfreeStack
3401 sched.gfreeStack = gp.schedlink.ptr()
3402 } else if sched.gfreeNoStack != nil {
3403 gp = sched.gfreeNoStack
3404 sched.gfreeNoStack = gp.schedlink.ptr()
3405 } else {
3406 break
3407 }
3408 _p_.gfreecnt++
3409 sched.ngfree--
3410 gp.schedlink.set(_p_.gfree)
3411 _p_.gfree = gp
3412 }
3413 unlock(&sched.gflock)
3414 goto retry
3415 }
3416 if gp != nil {
3417 _p_.gfree = gp.schedlink.ptr()
3418 _p_.gfreecnt--
3419 if gp.stack.lo == 0 {
3420
3421 systemstack(func() {
3422 gp.stack = stackalloc(_FixedStack)
3423 })
3424 gp.stackguard0 = gp.stack.lo + _StackGuard
3425 } else {
3426 if raceenabled {
3427 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
3428 }
3429 if msanenabled {
3430 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
3431 }
3432 }
3433 }
3434 return gp
3435 }
3436
3437
3438 func gfpurge(_p_ *p) {
3439 lock(&sched.gflock)
3440 for _p_.gfreecnt != 0 {
3441 _p_.gfreecnt--
3442 gp := _p_.gfree
3443 _p_.gfree = gp.schedlink.ptr()
3444 if gp.stack.lo == 0 {
3445 gp.schedlink.set(sched.gfreeNoStack)
3446 sched.gfreeNoStack = gp
3447 } else {
3448 gp.schedlink.set(sched.gfreeStack)
3449 sched.gfreeStack = gp
3450 }
3451 sched.ngfree++
3452 }
3453 unlock(&sched.gflock)
3454 }
3455
3456
3457 func Breakpoint() {
3458 breakpoint()
3459 }
3460
3461
3462
3463
3464
3465 func dolockOSThread() {
3466 _g_ := getg()
3467 _g_.m.lockedg.set(_g_)
3468 _g_.lockedm.set(_g_.m)
3469 }
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483 func LockOSThread() {
3484 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
3485
3486
3487
3488 startTemplateThread()
3489 }
3490 _g_ := getg()
3491 _g_.m.lockedExt++
3492 if _g_.m.lockedExt == 0 {
3493 _g_.m.lockedExt--
3494 panic("LockOSThread nesting overflow")
3495 }
3496 dolockOSThread()
3497 }
3498
3499
3500 func lockOSThread() {
3501 getg().m.lockedInt++
3502 dolockOSThread()
3503 }
3504
3505
3506
3507
3508
3509 func dounlockOSThread() {
3510 _g_ := getg()
3511 if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 {
3512 return
3513 }
3514 _g_.m.lockedg = 0
3515 _g_.lockedm = 0
3516 }
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532 func UnlockOSThread() {
3533 _g_ := getg()
3534 if _g_.m.lockedExt == 0 {
3535 return
3536 }
3537 _g_.m.lockedExt--
3538 dounlockOSThread()
3539 }
3540
3541
3542 func unlockOSThread() {
3543 _g_ := getg()
3544 if _g_.m.lockedInt == 0 {
3545 systemstack(badunlockosthread)
3546 }
3547 _g_.m.lockedInt--
3548 dounlockOSThread()
3549 }
3550
3551 func badunlockosthread() {
3552 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
3553 }
3554
3555 func gcount() int32 {
3556 n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
3557 for _, _p_ := range allp {
3558 n -= _p_.gfreecnt
3559 }
3560
3561
3562
3563 if n < 1 {
3564 n = 1
3565 }
3566 return n
3567 }
3568
3569 func mcount() int32 {
3570 return int32(sched.mnext - sched.nmfreed)
3571 }
3572
3573 var prof struct {
3574 signalLock uint32
3575 hz int32
3576 }
3577
3578 func _System() { _System() }
3579 func _ExternalCode() { _ExternalCode() }
3580 func _LostExternalCode() { _LostExternalCode() }
3581 func _GC() { _GC() }
3582 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
3583
3584
3585 var lostAtomic64Count uint64
3586
3587
3588
3589
3590 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
3591 if prof.hz == 0 {
3592 return
3593 }
3594
3595
3596
3597
3598
3599
3600
3601 if GOARCH == "mips" || GOARCH == "mipsle" {
3602 if f := findfunc(pc); f.valid() {
3603 if hasprefix(funcname(f), "runtime/internal/atomic") {
3604 lostAtomic64Count++
3605 return
3606 }
3607 }
3608 }
3609
3610
3611
3612
3613
3614
3615
3616 getg().m.mallocing++
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683 traceback := true
3684 if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
3685 traceback = false
3686 }
3687 var stk [maxCPUProfStack]uintptr
3688 n := 0
3689 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
3690 cgoOff := 0
3691
3692
3693
3694
3695
3696 if atomic.Load(&mp.cgoCallersUse) == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
3697 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
3698 cgoOff++
3699 }
3700 copy(stk[:], mp.cgoCallers[:cgoOff])
3701 mp.cgoCallers[0] = 0
3702 }
3703
3704
3705 n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0)
3706 } else if traceback {
3707 n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
3708 }
3709
3710 if n <= 0 {
3711
3712
3713 n = 0
3714 if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
3715
3716
3717 n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
3718 }
3719 if n == 0 {
3720
3721 n = 2
3722
3723 if pc > firstmoduledata.etext {
3724 pc = funcPC(_ExternalCode) + sys.PCQuantum
3725 }
3726 stk[0] = pc
3727 if mp.preemptoff != "" || mp.helpgc != 0 {
3728 stk[1] = funcPC(_GC) + sys.PCQuantum
3729 } else {
3730 stk[1] = funcPC(_System) + sys.PCQuantum
3731 }
3732 }
3733 }
3734
3735 if prof.hz != 0 {
3736 if (GOARCH == "mips" || GOARCH == "mipsle") && lostAtomic64Count > 0 {
3737 cpuprof.addLostAtomic64(lostAtomic64Count)
3738 lostAtomic64Count = 0
3739 }
3740 cpuprof.add(gp, stk[:n])
3741 }
3742 getg().m.mallocing--
3743 }
3744
3745
3746
3747
3748 var sigprofCallers cgoCallers
3749 var sigprofCallersUse uint32
3750
3751
3752
3753
3754
3755
3756
3757 func sigprofNonGo() {
3758 if prof.hz != 0 {
3759 n := 0
3760 for n < len(sigprofCallers) && sigprofCallers[n] != 0 {
3761 n++
3762 }
3763 cpuprof.addNonGo(sigprofCallers[:n])
3764 }
3765
3766 atomic.Store(&sigprofCallersUse, 0)
3767 }
3768
3769
3770
3771
3772
3773
3774 func sigprofNonGoPC(pc uintptr) {
3775 if prof.hz != 0 {
3776 stk := []uintptr{
3777 pc,
3778 funcPC(_ExternalCode) + sys.PCQuantum,
3779 }
3780 cpuprof.addNonGo(stk)
3781 }
3782 }
3783
3784
3785
3786
3787
3788
3789
3790
3791
3792
3793
3794 func setsSP(pc uintptr) bool {
3795 f := findfunc(pc)
3796 if !f.valid() {
3797
3798
3799 return true
3800 }
3801 switch f.entry {
3802 case gogoPC, systemstackPC, mcallPC, morestackPC:
3803 return true
3804 }
3805 return false
3806 }
3807
3808
3809
3810 func setcpuprofilerate(hz int32) {
3811
3812 if hz < 0 {
3813 hz = 0
3814 }
3815
3816
3817
3818 _g_ := getg()
3819 _g_.m.locks++
3820
3821
3822
3823
3824 setThreadCPUProfiler(0)
3825
3826 for !atomic.Cas(&prof.signalLock, 0, 1) {
3827 osyield()
3828 }
3829 if prof.hz != hz {
3830 setProcessCPUProfiler(hz)
3831 prof.hz = hz
3832 }
3833 atomic.Store(&prof.signalLock, 0)
3834
3835 lock(&sched.lock)
3836 sched.profilehz = hz
3837 unlock(&sched.lock)
3838
3839 if hz != 0 {
3840 setThreadCPUProfiler(hz)
3841 }
3842
3843 _g_.m.locks--
3844 }
3845
3846
3847
3848
3849
3850 func procresize(nprocs int32) *p {
3851 old := gomaxprocs
3852 if old < 0 || nprocs <= 0 {
3853 throw("procresize: invalid arg")
3854 }
3855 if trace.enabled {
3856 traceGomaxprocs(nprocs)
3857 }
3858
3859
3860 now := nanotime()
3861 if sched.procresizetime != 0 {
3862 sched.totaltime += int64(old) * (now - sched.procresizetime)
3863 }
3864 sched.procresizetime = now
3865
3866
3867 if nprocs > int32(len(allp)) {
3868
3869
3870 lock(&allpLock)
3871 if nprocs <= int32(cap(allp)) {
3872 allp = allp[:nprocs]
3873 } else {
3874 nallp := make([]*p, nprocs)
3875
3876
3877 copy(nallp, allp[:cap(allp)])
3878 allp = nallp
3879 }
3880 unlock(&allpLock)
3881 }
3882
3883
3884 for i := int32(0); i < nprocs; i++ {
3885 pp := allp[i]
3886 if pp == nil {
3887 pp = new(p)
3888 pp.id = i
3889 pp.status = _Pgcstop
3890 pp.sudogcache = pp.sudogbuf[:0]
3891 for i := range pp.deferpool {
3892 pp.deferpool[i] = pp.deferpoolbuf[i][:0]
3893 }
3894 pp.wbBuf.reset()
3895 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
3896 }
3897 if pp.mcache == nil {
3898 if old == 0 && i == 0 {
3899 if getg().m.mcache == nil {
3900 throw("missing mcache?")
3901 }
3902 pp.mcache = getg().m.mcache
3903 } else {
3904 pp.mcache = allocmcache()
3905 }
3906 }
3907 if raceenabled && pp.racectx == 0 {
3908 if old == 0 && i == 0 {
3909 pp.racectx = raceprocctx0
3910 raceprocctx0 = 0
3911 } else {
3912 pp.racectx = raceproccreate()
3913 }
3914 }
3915 }
3916
3917
3918 for i := nprocs; i < old; i++ {
3919 p := allp[i]
3920 if trace.enabled && p == getg().m.p.ptr() {
3921
3922
3923 traceGoSched()
3924 traceProcStop(p)
3925 }
3926
3927 for p.runqhead != p.runqtail {
3928
3929 p.runqtail--
3930 gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
3931
3932 globrunqputhead(gp)
3933 }
3934 if p.runnext != 0 {
3935 globrunqputhead(p.runnext.ptr())
3936 p.runnext = 0
3937 }
3938
3939
3940 if gp := p.gcBgMarkWorker.ptr(); gp != nil {
3941 casgstatus(gp, _Gwaiting, _Grunnable)
3942 if trace.enabled {
3943 traceGoUnpark(gp, 0)
3944 }
3945 globrunqput(gp)
3946
3947
3948 p.gcBgMarkWorker.set(nil)
3949 }
3950
3951 if gcphase != _GCoff {
3952 wbBufFlush1(p)
3953 p.gcw.dispose()
3954 }
3955 for i := range p.sudogbuf {
3956 p.sudogbuf[i] = nil
3957 }
3958 p.sudogcache = p.sudogbuf[:0]
3959 for i := range p.deferpool {
3960 for j := range p.deferpoolbuf[i] {
3961 p.deferpoolbuf[i][j] = nil
3962 }
3963 p.deferpool[i] = p.deferpoolbuf[i][:0]
3964 }
3965 freemcache(p.mcache)
3966 p.mcache = nil
3967 gfpurge(p)
3968 traceProcFree(p)
3969 if raceenabled {
3970 raceprocdestroy(p.racectx)
3971 p.racectx = 0
3972 }
3973 p.gcAssistTime = 0
3974 p.status = _Pdead
3975
3976 }
3977
3978
3979 if int32(len(allp)) != nprocs {
3980 lock(&allpLock)
3981 allp = allp[:nprocs]
3982 unlock(&allpLock)
3983 }
3984
3985 _g_ := getg()
3986 if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
3987
3988 _g_.m.p.ptr().status = _Prunning
3989 } else {
3990
3991 if _g_.m.p != 0 {
3992 _g_.m.p.ptr().m = 0
3993 }
3994 _g_.m.p = 0
3995 _g_.m.mcache = nil
3996 p := allp[0]
3997 p.m = 0
3998 p.status = _Pidle
3999 acquirep(p)
4000 if trace.enabled {
4001 traceGoStart()
4002 }
4003 }
4004 var runnablePs *p
4005 for i := nprocs - 1; i >= 0; i-- {
4006 p := allp[i]
4007 if _g_.m.p.ptr() == p {
4008 continue
4009 }
4010 p.status = _Pidle
4011 if runqempty(p) {
4012 pidleput(p)
4013 } else {
4014 p.m.set(mget())
4015 p.link.set(runnablePs)
4016 runnablePs = p
4017 }
4018 }
4019 stealOrder.reset(uint32(nprocs))
4020 var int32p *int32 = &gomaxprocs
4021 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
4022 return runnablePs
4023 }
4024
4025
4026
4027
4028
4029
4030
4031 func acquirep(_p_ *p) {
4032
4033 acquirep1(_p_)
4034
4035
4036 _g_ := getg()
4037 _g_.m.mcache = _p_.mcache
4038
4039 if trace.enabled {
4040 traceProcStart()
4041 }
4042 }
4043
4044
4045
4046
4047
4048
4049 func acquirep1(_p_ *p) {
4050 _g_ := getg()
4051
4052 if _g_.m.p != 0 || _g_.m.mcache != nil {
4053 throw("acquirep: already in go")
4054 }
4055 if _p_.m != 0 || _p_.status != _Pidle {
4056 id := int64(0)
4057 if _p_.m != 0 {
4058 id = _p_.m.ptr().id
4059 }
4060 print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
4061 throw("acquirep: invalid p state")
4062 }
4063 _g_.m.p.set(_p_)
4064 _p_.m.set(_g_.m)
4065 _p_.status = _Prunning
4066 }
4067
4068
4069 func releasep() *p {
4070 _g_ := getg()
4071
4072 if _g_.m.p == 0 || _g_.m.mcache == nil {
4073 throw("releasep: invalid arg")
4074 }
4075 _p_ := _g_.m.p.ptr()
4076 if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
4077 print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
4078 throw("releasep: invalid p state")
4079 }
4080 if trace.enabled {
4081 traceProcStop(_g_.m.p.ptr())
4082 }
4083 _g_.m.p = 0
4084 _g_.m.mcache = nil
4085 _p_.m = 0
4086 _p_.status = _Pidle
4087 return _p_
4088 }
4089
4090 func incidlelocked(v int32) {
4091 lock(&sched.lock)
4092 sched.nmidlelocked += v
4093 if v > 0 {
4094 checkdead()
4095 }
4096 unlock(&sched.lock)
4097 }
4098
4099
4100
4101
4102 func checkdead() {
4103
4104
4105
4106 if islibrary || isarchive {
4107 return
4108 }
4109
4110
4111
4112
4113
4114 if panicking > 0 {
4115 return
4116 }
4117
4118 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
4119 if run > 0 {
4120 return
4121 }
4122 if run < 0 {
4123 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
4124 throw("checkdead: inconsistent counts")
4125 }
4126
4127 grunning := 0
4128 lock(&allglock)
4129 for i := 0; i < len(allgs); i++ {
4130 gp := allgs[i]
4131 if isSystemGoroutine(gp) {
4132 continue
4133 }
4134 s := readgstatus(gp)
4135 switch s &^ _Gscan {
4136 case _Gwaiting:
4137 grunning++
4138 case _Grunnable,
4139 _Grunning,
4140 _Gsyscall:
4141 unlock(&allglock)
4142 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
4143 throw("checkdead: runnable g")
4144 }
4145 }
4146 unlock(&allglock)
4147 if grunning == 0 {
4148 throw("no goroutines (main called runtime.Goexit) - deadlock!")
4149 }
4150
4151
4152 gp := timejump()
4153 if gp != nil {
4154 casgstatus(gp, _Gwaiting, _Grunnable)
4155 globrunqput(gp)
4156 _p_ := pidleget()
4157 if _p_ == nil {
4158 throw("checkdead: no p for timer")
4159 }
4160 mp := mget()
4161 if mp == nil {
4162
4163
4164 throw("checkdead: no m for timer")
4165 }
4166 mp.nextp.set(_p_)
4167 notewakeup(&mp.park)
4168 return
4169 }
4170
4171 getg().m.throwing = -1
4172 throw("all goroutines are asleep - deadlock!")
4173 }
4174
4175
4176
4177
4178
4179
4180 var forcegcperiod int64 = 2 * 60 * 1e9
4181
4182
4183
4184
4185 func sysmon() {
4186 lock(&sched.lock)
4187 sched.nmsys++
4188 checkdead()
4189 unlock(&sched.lock)
4190
4191
4192
4193 scavengelimit := int64(5 * 60 * 1e9)
4194
4195 if debug.scavenge > 0 {
4196
4197 forcegcperiod = 10 * 1e6
4198 scavengelimit = 20 * 1e6
4199 }
4200
4201 lastscavenge := nanotime()
4202 nscavenge := 0
4203
4204 lasttrace := int64(0)
4205 idle := 0
4206 delay := uint32(0)
4207 for {
4208 if idle == 0 {
4209 delay = 20
4210 } else if idle > 50 {
4211 delay *= 2
4212 }
4213 if delay > 10*1000 {
4214 delay = 10 * 1000
4215 }
4216 usleep(delay)
4217 if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) {
4218 lock(&sched.lock)
4219 if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
4220 atomic.Store(&sched.sysmonwait, 1)
4221 unlock(&sched.lock)
4222
4223
4224 maxsleep := forcegcperiod / 2
4225 if scavengelimit < forcegcperiod {
4226 maxsleep = scavengelimit / 2
4227 }
4228 shouldRelax := true
4229 if osRelaxMinNS > 0 {
4230 next := timeSleepUntil()
4231 now := nanotime()
4232 if next-now < osRelaxMinNS {
4233 shouldRelax = false
4234 }
4235 }
4236 if shouldRelax {
4237 osRelax(true)
4238 }
4239 notetsleep(&sched.sysmonnote, maxsleep)
4240 if shouldRelax {
4241 osRelax(false)
4242 }
4243 lock(&sched.lock)
4244 atomic.Store(&sched.sysmonwait, 0)
4245 noteclear(&sched.sysmonnote)
4246 idle = 0
4247 delay = 20
4248 }
4249 unlock(&sched.lock)
4250 }
4251
4252 if *cgo_yield != nil {
4253 asmcgocall(*cgo_yield, nil)
4254 }
4255
4256 lastpoll := int64(atomic.Load64(&sched.lastpoll))
4257 now := nanotime()
4258 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
4259 atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
4260 gp := netpoll(false)
4261 if gp != nil {
4262
4263
4264
4265
4266
4267
4268
4269 incidlelocked(-1)
4270 injectglist(gp)
4271 incidlelocked(1)
4272 }
4273 }
4274
4275
4276 if retake(now) != 0 {
4277 idle = 0
4278 } else {
4279 idle++
4280 }
4281
4282 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && atomic.Load(&forcegc.idle) != 0 {
4283 lock(&forcegc.lock)
4284 forcegc.idle = 0
4285 forcegc.g.schedlink = 0
4286 injectglist(forcegc.g)
4287 unlock(&forcegc.lock)
4288 }
4289
4290 if lastscavenge+scavengelimit/2 < now {
4291 mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
4292 lastscavenge = now
4293 nscavenge++
4294 }
4295 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
4296 lasttrace = now
4297 schedtrace(debug.scheddetail > 0)
4298 }
4299 }
4300 }
4301
4302 type sysmontick struct {
4303 schedtick uint32
4304 schedwhen int64
4305 syscalltick uint32
4306 syscallwhen int64
4307 }
4308
4309
4310
4311 const forcePreemptNS = 10 * 1000 * 1000
4312
4313 func retake(now int64) uint32 {
4314 n := 0
4315
4316
4317 lock(&allpLock)
4318
4319
4320
4321 for i := 0; i < len(allp); i++ {
4322 _p_ := allp[i]
4323 if _p_ == nil {
4324
4325
4326 continue
4327 }
4328 pd := &_p_.sysmontick
4329 s := _p_.status
4330 if s == _Psyscall {
4331
4332 t := int64(_p_.syscalltick)
4333 if int64(pd.syscalltick) != t {
4334 pd.syscalltick = uint32(t)
4335 pd.syscallwhen = now
4336 continue
4337 }
4338
4339
4340
4341 if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
4342 continue
4343 }
4344
4345 unlock(&allpLock)
4346
4347
4348
4349
4350 incidlelocked(-1)
4351 if atomic.Cas(&_p_.status, s, _Pidle) {
4352 if trace.enabled {
4353 traceGoSysBlock(_p_)
4354 traceProcStop(_p_)
4355 }
4356 n++
4357 _p_.syscalltick++
4358 handoffp(_p_)
4359 }
4360 incidlelocked(1)
4361 lock(&allpLock)
4362 } else if s == _Prunning {
4363
4364 t := int64(_p_.schedtick)
4365 if int64(pd.schedtick) != t {
4366 pd.schedtick = uint32(t)
4367 pd.schedwhen = now
4368 continue
4369 }
4370 if pd.schedwhen+forcePreemptNS > now {
4371 continue
4372 }
4373 preemptone(_p_)
4374 }
4375 }
4376 unlock(&allpLock)
4377 return uint32(n)
4378 }
4379
4380
4381
4382
4383
4384
4385 func preemptall() bool {
4386 res := false
4387 for _, _p_ := range allp {
4388 if _p_.status != _Prunning {
4389 continue
4390 }
4391 if preemptone(_p_) {
4392 res = true
4393 }
4394 }
4395 return res
4396 }
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408 func preemptone(_p_ *p) bool {
4409 mp := _p_.m.ptr()
4410 if mp == nil || mp == getg().m {
4411 return false
4412 }
4413 gp := mp.curg
4414 if gp == nil || gp == mp.g0 {
4415 return false
4416 }
4417
4418 gp.preempt = true
4419
4420
4421
4422
4423
4424 gp.stackguard0 = stackPreempt
4425 return true
4426 }
4427
4428 var starttime int64
4429
4430 func schedtrace(detailed bool) {
4431 now := nanotime()
4432 if starttime == 0 {
4433 starttime = now
4434 }
4435
4436 lock(&sched.lock)
4437 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", mcount(), " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
4438 if detailed {
4439 print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
4440 }
4441
4442
4443
4444 for i, _p_ := range allp {
4445 mp := _p_.m.ptr()
4446 h := atomic.Load(&_p_.runqhead)
4447 t := atomic.Load(&_p_.runqtail)
4448 if detailed {
4449 id := int64(-1)
4450 if mp != nil {
4451 id = mp.id
4452 }
4453 print(" P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
4454 } else {
4455
4456
4457 print(" ")
4458 if i == 0 {
4459 print("[")
4460 }
4461 print(t - h)
4462 if i == len(allp)-1 {
4463 print("]\n")
4464 }
4465 }
4466 }
4467
4468 if !detailed {
4469 unlock(&sched.lock)
4470 return
4471 }
4472
4473 for mp := allm; mp != nil; mp = mp.alllink {
4474 _p_ := mp.p.ptr()
4475 gp := mp.curg
4476 lockedg := mp.lockedg.ptr()
4477 id1 := int32(-1)
4478 if _p_ != nil {
4479 id1 = _p_.id
4480 }
4481 id2 := int64(-1)
4482 if gp != nil {
4483 id2 = gp.goid
4484 }
4485 id3 := int64(-1)
4486 if lockedg != nil {
4487 id3 = lockedg.goid
4488 }
4489 print(" M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=", id3, "\n")
4490 }
4491
4492 lock(&allglock)
4493 for gi := 0; gi < len(allgs); gi++ {
4494 gp := allgs[gi]
4495 mp := gp.m
4496 lockedm := gp.lockedm.ptr()
4497 id1 := int64(-1)
4498 if mp != nil {
4499 id1 = mp.id
4500 }
4501 id2 := int64(-1)
4502 if lockedm != nil {
4503 id2 = lockedm.id
4504 }
4505 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
4506 }
4507 unlock(&allglock)
4508 unlock(&sched.lock)
4509 }
4510
4511
4512
4513
4514
4515 func mput(mp *m) {
4516 mp.schedlink = sched.midle
4517 sched.midle.set(mp)
4518 sched.nmidle++
4519 checkdead()
4520 }
4521
4522
4523
4524
4525
4526 func mget() *m {
4527 mp := sched.midle.ptr()
4528 if mp != nil {
4529 sched.midle = mp.schedlink
4530 sched.nmidle--
4531 }
4532 return mp
4533 }
4534
4535
4536
4537
4538
4539 func globrunqput(gp *g) {
4540 gp.schedlink = 0
4541 if sched.runqtail != 0 {
4542 sched.runqtail.ptr().schedlink.set(gp)
4543 } else {
4544 sched.runqhead.set(gp)
4545 }
4546 sched.runqtail.set(gp)
4547 sched.runqsize++
4548 }
4549
4550
4551
4552
4553
4554 func globrunqputhead(gp *g) {
4555 gp.schedlink = sched.runqhead
4556 sched.runqhead.set(gp)
4557 if sched.runqtail == 0 {
4558 sched.runqtail.set(gp)
4559 }
4560 sched.runqsize++
4561 }
4562
4563
4564
4565 func globrunqputbatch(ghead *g, gtail *g, n int32) {
4566 gtail.schedlink = 0
4567 if sched.runqtail != 0 {
4568 sched.runqtail.ptr().schedlink.set(ghead)
4569 } else {
4570 sched.runqhead.set(ghead)
4571 }
4572 sched.runqtail.set(gtail)
4573 sched.runqsize += n
4574 }
4575
4576
4577
4578 func globrunqget(_p_ *p, max int32) *g {
4579 if sched.runqsize == 0 {
4580 return nil
4581 }
4582
4583 n := sched.runqsize/gomaxprocs + 1
4584 if n > sched.runqsize {
4585 n = sched.runqsize
4586 }
4587 if max > 0 && n > max {
4588 n = max
4589 }
4590 if n > int32(len(_p_.runq))/2 {
4591 n = int32(len(_p_.runq)) / 2
4592 }
4593
4594 sched.runqsize -= n
4595 if sched.runqsize == 0 {
4596 sched.runqtail = 0
4597 }
4598
4599 gp := sched.runqhead.ptr()
4600 sched.runqhead = gp.schedlink
4601 n--
4602 for ; n > 0; n-- {
4603 gp1 := sched.runqhead.ptr()
4604 sched.runqhead = gp1.schedlink
4605 runqput(_p_, gp1, false)
4606 }
4607 return gp
4608 }
4609
4610
4611
4612
4613
4614 func pidleput(_p_ *p) {
4615 if !runqempty(_p_) {
4616 throw("pidleput: P has non-empty run queue")
4617 }
4618 _p_.link = sched.pidle
4619 sched.pidle.set(_p_)
4620 atomic.Xadd(&sched.npidle, 1)
4621 }
4622
4623
4624
4625
4626
4627 func pidleget() *p {
4628 _p_ := sched.pidle.ptr()
4629 if _p_ != nil {
4630 sched.pidle = _p_.link
4631 atomic.Xadd(&sched.npidle, -1)
4632 }
4633 return _p_
4634 }
4635
4636
4637
4638 func runqempty(_p_ *p) bool {
4639
4640
4641
4642
4643 for {
4644 head := atomic.Load(&_p_.runqhead)
4645 tail := atomic.Load(&_p_.runqtail)
4646 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&_p_.runnext)))
4647 if tail == atomic.Load(&_p_.runqtail) {
4648 return head == tail && runnext == 0
4649 }
4650 }
4651 }
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662 const randomizeScheduler = raceenabled
4663
4664
4665
4666
4667
4668
4669 func runqput(_p_ *p, gp *g, next bool) {
4670 if randomizeScheduler && next && fastrand()%2 == 0 {
4671 next = false
4672 }
4673
4674 if next {
4675 retryNext:
4676 oldnext := _p_.runnext
4677 if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
4678 goto retryNext
4679 }
4680 if oldnext == 0 {
4681 return
4682 }
4683
4684 gp = oldnext.ptr()
4685 }
4686
4687 retry:
4688 h := atomic.Load(&_p_.runqhead)
4689 t := _p_.runqtail
4690 if t-h < uint32(len(_p_.runq)) {
4691 _p_.runq[t%uint32(len(_p_.runq))].set(gp)
4692 atomic.Store(&_p_.runqtail, t+1)
4693 return
4694 }
4695 if runqputslow(_p_, gp, h, t) {
4696 return
4697 }
4698
4699 goto retry
4700 }
4701
4702
4703
4704 func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
4705 var batch [len(_p_.runq)/2 + 1]*g
4706
4707
4708 n := t - h
4709 n = n / 2
4710 if n != uint32(len(_p_.runq)/2) {
4711 throw("runqputslow: queue is not full")
4712 }
4713 for i := uint32(0); i < n; i++ {
4714 batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
4715 }
4716 if !atomic.Cas(&_p_.runqhead, h, h+n) {
4717 return false
4718 }
4719 batch[n] = gp
4720
4721 if randomizeScheduler {
4722 for i := uint32(1); i <= n; i++ {
4723 j := fastrandn(i + 1)
4724 batch[i], batch[j] = batch[j], batch[i]
4725 }
4726 }
4727
4728
4729 for i := uint32(0); i < n; i++ {
4730 batch[i].schedlink.set(batch[i+1])
4731 }
4732
4733
4734 lock(&sched.lock)
4735 globrunqputbatch(batch[0], batch[n], int32(n+1))
4736 unlock(&sched.lock)
4737 return true
4738 }
4739
4740
4741
4742
4743
4744 func runqget(_p_ *p) (gp *g, inheritTime bool) {
4745
4746 for {
4747 next := _p_.runnext
4748 if next == 0 {
4749 break
4750 }
4751 if _p_.runnext.cas(next, 0) {
4752 return next.ptr(), true
4753 }
4754 }
4755
4756 for {
4757 h := atomic.Load(&_p_.runqhead)
4758 t := _p_.runqtail
4759 if t == h {
4760 return nil, false
4761 }
4762 gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
4763 if atomic.Cas(&_p_.runqhead, h, h+1) {
4764 return gp, false
4765 }
4766 }
4767 }
4768
4769
4770
4771
4772
4773 func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
4774 for {
4775 h := atomic.Load(&_p_.runqhead)
4776 t := atomic.Load(&_p_.runqtail)
4777 n := t - h
4778 n = n - n/2
4779 if n == 0 {
4780 if stealRunNextG {
4781
4782 if next := _p_.runnext; next != 0 {
4783 if _p_.status == _Prunning {
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794 if GOOS != "windows" {
4795 usleep(3)
4796 } else {
4797
4798
4799
4800 osyield()
4801 }
4802 }
4803 if !_p_.runnext.cas(next, 0) {
4804 continue
4805 }
4806 batch[batchHead%uint32(len(batch))] = next
4807 return 1
4808 }
4809 }
4810 return 0
4811 }
4812 if n > uint32(len(_p_.runq)/2) {
4813 continue
4814 }
4815 for i := uint32(0); i < n; i++ {
4816 g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
4817 batch[(batchHead+i)%uint32(len(batch))] = g
4818 }
4819 if atomic.Cas(&_p_.runqhead, h, h+n) {
4820 return n
4821 }
4822 }
4823 }
4824
4825
4826
4827
4828 func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
4829 t := _p_.runqtail
4830 n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
4831 if n == 0 {
4832 return nil
4833 }
4834 n--
4835 gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
4836 if n == 0 {
4837 return gp
4838 }
4839 h := atomic.Load(&_p_.runqhead)
4840 if t-h+n >= uint32(len(_p_.runq)) {
4841 throw("runqsteal: runq overflow")
4842 }
4843 atomic.Store(&_p_.runqtail, t+n)
4844 return gp
4845 }
4846
4847
4848 func setMaxThreads(in int) (out int) {
4849 lock(&sched.lock)
4850 out = int(sched.maxmcount)
4851 if in > 0x7fffffff {
4852 sched.maxmcount = 0x7fffffff
4853 } else {
4854 sched.maxmcount = int32(in)
4855 }
4856 checkmcount()
4857 unlock(&sched.lock)
4858 return
4859 }
4860
4861 func haveexperiment(name string) bool {
4862 if name == "framepointer" {
4863 return framepointer_enabled
4864 }
4865 x := sys.Goexperiment
4866 for x != "" {
4867 xname := ""
4868 i := index(x, ",")
4869 if i < 0 {
4870 xname, x = x, ""
4871 } else {
4872 xname, x = x[:i], x[i+1:]
4873 }
4874 if xname == name {
4875 return true
4876 }
4877 if len(xname) > 2 && xname[:2] == "no" && xname[2:] == name {
4878 return false
4879 }
4880 }
4881 return false
4882 }
4883
4884
4885 func procPin() int {
4886 _g_ := getg()
4887 mp := _g_.m
4888
4889 mp.locks++
4890 return int(mp.p.ptr().id)
4891 }
4892
4893
4894 func procUnpin() {
4895 _g_ := getg()
4896 _g_.m.locks--
4897 }
4898
4899
4900
4901 func sync_runtime_procPin() int {
4902 return procPin()
4903 }
4904
4905
4906
4907 func sync_runtime_procUnpin() {
4908 procUnpin()
4909 }
4910
4911
4912
4913 func sync_atomic_runtime_procPin() int {
4914 return procPin()
4915 }
4916
4917
4918
4919 func sync_atomic_runtime_procUnpin() {
4920 procUnpin()
4921 }
4922
4923
4924
4925
4926 func sync_runtime_canSpin(i int) bool {
4927
4928
4929
4930
4931
4932 if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
4933 return false
4934 }
4935 if p := getg().m.p.ptr(); !runqempty(p) {
4936 return false
4937 }
4938 return true
4939 }
4940
4941
4942
4943 func sync_runtime_doSpin() {
4944 procyield(active_spin_cnt)
4945 }
4946
4947 var stealOrder randomOrder
4948
4949
4950
4951
4952
4953 type randomOrder struct {
4954 count uint32
4955 coprimes []uint32
4956 }
4957
4958 type randomEnum struct {
4959 i uint32
4960 count uint32
4961 pos uint32
4962 inc uint32
4963 }
4964
4965 func (ord *randomOrder) reset(count uint32) {
4966 ord.count = count
4967 ord.coprimes = ord.coprimes[:0]
4968 for i := uint32(1); i <= count; i++ {
4969 if gcd(i, count) == 1 {
4970 ord.coprimes = append(ord.coprimes, i)
4971 }
4972 }
4973 }
4974
4975 func (ord *randomOrder) start(i uint32) randomEnum {
4976 return randomEnum{
4977 count: ord.count,
4978 pos: i % ord.count,
4979 inc: ord.coprimes[i%uint32(len(ord.coprimes))],
4980 }
4981 }
4982
4983 func (enum *randomEnum) done() bool {
4984 return enum.i == enum.count
4985 }
4986
4987 func (enum *randomEnum) next() {
4988 enum.i++
4989 enum.pos = (enum.pos + enum.inc) % enum.count
4990 }
4991
4992 func (enum *randomEnum) position() uint32 {
4993 return enum.pos
4994 }
4995
4996 func gcd(a, b uint32) uint32 {
4997 for b != 0 {
4998 a, b = b, a%b
4999 }
5000 return a
5001 }
5002
View as plain text