Source file
src/runtime/mgcmark.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "runtime/internal/atomic"
11 "runtime/internal/sys"
12 "unsafe"
13 )
14
15 const (
16 fixedRootFinalizers = iota
17 fixedRootFreeGStacks
18 fixedRootCount
19
20
21
22 rootBlockBytes = 256 << 10
23
24
25
26 rootBlockSpans = 8 * 1024
27
28
29
30
31
32
33
34
35 maxObletBytes = 128 << 10
36
37
38
39
40
41
42
43 drainCheckThreshold = 100000
44 )
45
46
47
48
49
50
51
52
53
54 func gcMarkRootPrepare() {
55 if gcphase == _GCmarktermination {
56 work.nFlushCacheRoots = int(gomaxprocs)
57 } else {
58 work.nFlushCacheRoots = 0
59 }
60
61
62 nBlocks := func(bytes uintptr) int {
63 return int((bytes + rootBlockBytes - 1) / rootBlockBytes)
64 }
65
66 work.nDataRoots = 0
67 work.nBSSRoots = 0
68
69
70 if !work.markrootDone {
71 for _, datap := range activeModules() {
72 nDataRoots := nBlocks(datap.edata - datap.data)
73 if nDataRoots > work.nDataRoots {
74 work.nDataRoots = nDataRoots
75 }
76 }
77
78 for _, datap := range activeModules() {
79 nBSSRoots := nBlocks(datap.ebss - datap.bss)
80 if nBSSRoots > work.nBSSRoots {
81 work.nBSSRoots = nBSSRoots
82 }
83 }
84 }
85
86 if !work.markrootDone {
87
88
89
90
91
92
93
94
95
96
97
98
99 work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
100
101
102
103
104
105
106
107
108
109 work.nStackRoots = int(atomic.Loaduintptr(&allglen))
110 } else {
111
112
113 work.nSpanRoots = 0
114
115
116
117
118 work.nStackRoots = 0
119
120 if debug.gcrescanstacks > 0 {
121
122 work.nStackRoots = int(atomic.Loaduintptr(&allglen))
123 }
124 }
125
126 work.markrootNext = 0
127 work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
128 }
129
130
131
132 func gcMarkRootCheck() {
133 if work.markrootNext < work.markrootJobs {
134 print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
135 throw("left over markroot jobs")
136 }
137
138 lock(&allglock)
139
140 var gp *g
141 if gcphase == _GCmarktermination && debug.gcrescanstacks > 0 {
142 for i := 0; i < len(allgs); i++ {
143 gp = allgs[i]
144 if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead {
145 goto fail
146 }
147 }
148 } else {
149 for i := 0; i < work.nStackRoots; i++ {
150 gp = allgs[i]
151 if !gp.gcscandone {
152 goto fail
153 }
154 }
155 }
156 unlock(&allglock)
157 return
158
159 fail:
160 println("gp", gp, "goid", gp.goid,
161 "status", readgstatus(gp),
162 "gcscandone", gp.gcscandone,
163 "gcscanvalid", gp.gcscanvalid)
164 unlock(&allglock)
165 throw("scan missed a g")
166 }
167
168
169 var oneptrmask = [...]uint8{1}
170
171
172
173
174
175
176
177
178 func markroot(gcw *gcWork, i uint32) {
179
180
181 baseFlushCache := uint32(fixedRootCount)
182 baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
183 baseBSS := baseData + uint32(work.nDataRoots)
184 baseSpans := baseBSS + uint32(work.nBSSRoots)
185 baseStacks := baseSpans + uint32(work.nSpanRoots)
186 end := baseStacks + uint32(work.nStackRoots)
187
188
189 switch {
190 case baseFlushCache <= i && i < baseData:
191 flushmcache(int(i - baseFlushCache))
192
193 case baseData <= i && i < baseBSS:
194 for _, datap := range activeModules() {
195 markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
196 }
197
198 case baseBSS <= i && i < baseSpans:
199 for _, datap := range activeModules() {
200 markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
201 }
202
203 case i == fixedRootFinalizers:
204
205
206 if work.markrootDone {
207 break
208 }
209 for fb := allfin; fb != nil; fb = fb.alllink {
210 cnt := uintptr(atomic.Load(&fb.cnt))
211 scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
212 }
213
214 case i == fixedRootFreeGStacks:
215
216
217 if !work.markrootDone {
218
219
220 systemstack(markrootFreeGStacks)
221 }
222
223 case baseSpans <= i && i < baseStacks:
224
225 markrootSpans(gcw, int(i-baseSpans))
226
227 default:
228
229 var gp *g
230 if baseStacks <= i && i < end {
231 gp = allgs[i-baseStacks]
232 } else {
233 throw("markroot: bad index")
234 }
235
236
237
238 status := readgstatus(gp)
239 if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
240 gp.waitsince = work.tstart
241 }
242
243
244
245 systemstack(func() {
246
247
248
249
250 userG := getg().m.curg
251 selfScan := gp == userG && readgstatus(userG) == _Grunning
252 if selfScan {
253 casgstatus(userG, _Grunning, _Gwaiting)
254 userG.waitreason = "garbage collection scan"
255 }
256
257
258
259
260
261
262
263
264 scang(gp, gcw)
265
266 if selfScan {
267 casgstatus(userG, _Gwaiting, _Grunning)
268 }
269 })
270 }
271 }
272
273
274
275
276
277 func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
278 if rootBlockBytes%(8*sys.PtrSize) != 0 {
279
280 throw("rootBlockBytes must be a multiple of 8*ptrSize")
281 }
282
283 b := b0 + uintptr(shard)*rootBlockBytes
284 if b >= b0+n0 {
285 return
286 }
287 ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
288 n := uintptr(rootBlockBytes)
289 if b+n > b0+n0 {
290 n = b0 + n0 - b
291 }
292
293
294 scanblock(b, n, ptrmask, gcw)
295 }
296
297
298
299
300
301
302
303 func markrootFreeGStacks() {
304
305 lock(&sched.gflock)
306 list := sched.gfreeStack
307 sched.gfreeStack = nil
308 unlock(&sched.gflock)
309 if list == nil {
310 return
311 }
312
313
314 tail := list
315 for gp := list; gp != nil; gp = gp.schedlink.ptr() {
316 shrinkstack(gp)
317 tail = gp
318 }
319
320
321 lock(&sched.gflock)
322 tail.schedlink.set(sched.gfreeNoStack)
323 sched.gfreeNoStack = list
324 unlock(&sched.gflock)
325 }
326
327
328
329
330 func markrootSpans(gcw *gcWork, shard int) {
331
332
333
334
335
336
337
338
339
340
341
342
343
344 if work.markrootDone {
345 throw("markrootSpans during second markroot")
346 }
347
348 sg := mheap_.sweepgen
349 spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
350
351
352
353
354
355
356 for _, s := range spans {
357 if s.state != mSpanInUse {
358 continue
359 }
360 if !useCheckmark && s.sweepgen != sg {
361
362 print("sweep ", s.sweepgen, " ", sg, "\n")
363 throw("gc: unswept span")
364 }
365
366
367
368
369
370
371
372
373 if s.specials == nil {
374 continue
375 }
376
377
378
379 lock(&s.speciallock)
380
381 for sp := s.specials; sp != nil; sp = sp.next {
382 if sp.kind != _KindSpecialFinalizer {
383 continue
384 }
385
386
387 spf := (*specialfinalizer)(unsafe.Pointer(sp))
388
389 p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
390
391
392
393
394 scanobject(p, gcw)
395
396
397 scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw)
398 }
399
400 unlock(&s.speciallock)
401 }
402 }
403
404
405
406
407
408 func gcAssistAlloc(gp *g) {
409
410
411 if getg() == gp.m.g0 {
412 return
413 }
414 if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
415 return
416 }
417
418 traced := false
419 retry:
420
421
422
423
424 debtBytes := -gp.gcAssistBytes
425 scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
426 if scanWork < gcOverAssistWork {
427 scanWork = gcOverAssistWork
428 debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
429 }
430
431
432
433
434
435
436
437 bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
438 stolen := int64(0)
439 if bgScanCredit > 0 {
440 if bgScanCredit < scanWork {
441 stolen = bgScanCredit
442 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
443 } else {
444 stolen = scanWork
445 gp.gcAssistBytes += debtBytes
446 }
447 atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
448
449 scanWork -= stolen
450
451 if scanWork == 0 {
452
453
454 if traced {
455 traceGCMarkAssistDone()
456 }
457 return
458 }
459 }
460
461 if trace.enabled && !traced {
462 traced = true
463 traceGCMarkAssistStart()
464 }
465
466
467 systemstack(func() {
468 gcAssistAlloc1(gp, scanWork)
469
470
471 })
472
473 completed := gp.param != nil
474 gp.param = nil
475 if completed {
476 gcMarkDone()
477 }
478
479 if gp.gcAssistBytes < 0 {
480
481
482
483
484
485
486
487 if gp.preempt {
488 Gosched()
489 goto retry
490 }
491
492
493
494
495
496
497
498
499
500
501 if !gcParkAssist() {
502 goto retry
503 }
504
505
506
507 }
508 if traced {
509 traceGCMarkAssistDone()
510 }
511 }
512
513
514
515
516
517
518
519
520
521
522
523 func gcAssistAlloc1(gp *g, scanWork int64) {
524
525
526 gp.param = nil
527
528 if atomic.Load(&gcBlackenEnabled) == 0 {
529
530
531
532
533
534
535
536 gp.gcAssistBytes = 0
537 return
538 }
539
540
541
542 startTime := nanotime()
543
544 decnwait := atomic.Xadd(&work.nwait, -1)
545 if decnwait == work.nproc {
546 println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
547 throw("nwait > work.nprocs")
548 }
549
550
551 casgstatus(gp, _Grunning, _Gwaiting)
552 gp.waitreason = "GC assist marking"
553
554
555
556 gcw := &getg().m.p.ptr().gcw
557 workDone := gcDrainN(gcw, scanWork)
558
559
560 if gcBlackenPromptly {
561 gcw.dispose()
562 }
563
564 casgstatus(gp, _Gwaiting, _Grunning)
565
566
567
568
569
570
571
572 gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
573
574
575
576 incnwait := atomic.Xadd(&work.nwait, +1)
577 if incnwait > work.nproc {
578 println("runtime: work.nwait=", incnwait,
579 "work.nproc=", work.nproc,
580 "gcBlackenPromptly=", gcBlackenPromptly)
581 throw("work.nwait > work.nproc")
582 }
583
584 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
585
586
587
588
589 gp.param = unsafe.Pointer(gp)
590 }
591 duration := nanotime() - startTime
592 _p_ := gp.m.p.ptr()
593 _p_.gcAssistTime += duration
594 if _p_.gcAssistTime > gcAssistTimeSlack {
595 atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
596 _p_.gcAssistTime = 0
597 }
598 }
599
600
601
602
603 func gcWakeAllAssists() {
604 lock(&work.assistQueue.lock)
605 injectglist(work.assistQueue.head.ptr())
606 work.assistQueue.head.set(nil)
607 work.assistQueue.tail.set(nil)
608 unlock(&work.assistQueue.lock)
609 }
610
611
612
613
614
615
616
617 func gcParkAssist() bool {
618 lock(&work.assistQueue.lock)
619
620
621
622 if atomic.Load(&gcBlackenEnabled) == 0 {
623 unlock(&work.assistQueue.lock)
624 return true
625 }
626
627 gp := getg()
628 oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail
629 if oldHead == 0 {
630 work.assistQueue.head.set(gp)
631 } else {
632 oldTail.ptr().schedlink.set(gp)
633 }
634 work.assistQueue.tail.set(gp)
635 gp.schedlink.set(nil)
636
637
638
639
640
641 if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
642 work.assistQueue.head = oldHead
643 work.assistQueue.tail = oldTail
644 if oldTail != 0 {
645 oldTail.ptr().schedlink.set(nil)
646 }
647 unlock(&work.assistQueue.lock)
648 return false
649 }
650
651 goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlockGC, 2)
652 return true
653 }
654
655
656
657
658
659
660
661
662
663
664
665 func gcFlushBgCredit(scanWork int64) {
666 if work.assistQueue.head == 0 {
667
668
669
670
671 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
672 return
673 }
674
675 scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
676
677 lock(&work.assistQueue.lock)
678 gp := work.assistQueue.head.ptr()
679 for gp != nil && scanBytes > 0 {
680
681
682 if scanBytes+gp.gcAssistBytes >= 0 {
683
684 scanBytes += gp.gcAssistBytes
685 gp.gcAssistBytes = 0
686 xgp := gp
687 gp = gp.schedlink.ptr()
688
689
690
691
692
693
694 ready(xgp, 0, false)
695 } else {
696
697 gp.gcAssistBytes += scanBytes
698 scanBytes = 0
699
700
701
702
703 xgp := gp
704 gp = gp.schedlink.ptr()
705 if gp == nil {
706
707 gp = xgp
708 } else {
709 xgp.schedlink = 0
710 work.assistQueue.tail.ptr().schedlink.set(xgp)
711 work.assistQueue.tail.set(xgp)
712 }
713 break
714 }
715 }
716 work.assistQueue.head.set(gp)
717 if gp == nil {
718 work.assistQueue.tail.set(nil)
719 }
720
721 if scanBytes > 0 {
722
723 scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
724 atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
725 }
726 unlock(&work.assistQueue.lock)
727 }
728
729
730
731
732
733
734
735
736 func scanstack(gp *g, gcw *gcWork) {
737 if gp.gcscanvalid {
738 return
739 }
740
741 if readgstatus(gp)&_Gscan == 0 {
742 print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
743 throw("scanstack - bad status")
744 }
745
746 switch readgstatus(gp) &^ _Gscan {
747 default:
748 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
749 throw("mark - bad status")
750 case _Gdead:
751 return
752 case _Grunning:
753 print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
754 throw("scanstack: goroutine not stopped")
755 case _Grunnable, _Gsyscall, _Gwaiting:
756
757 }
758
759 if gp == getg() {
760 throw("can't scan our own stack")
761 }
762 mp := gp.m
763 if mp != nil && mp.helpgc != 0 {
764 throw("can't scan gchelper stack")
765 }
766
767
768
769 if !work.markrootDone {
770 shrinkstack(gp)
771 }
772
773
774
775
776 if gp.sched.ctxt != nil {
777 scanblock(uintptr(unsafe.Pointer(&gp.sched.ctxt)), sys.PtrSize, &oneptrmask[0], gcw)
778 }
779
780
781 var cache pcvalueCache
782 scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
783 scanframeworker(frame, &cache, gcw)
784 return true
785 }
786 gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
787 tracebackdefers(gp, scanframe, nil)
788 gp.gcscanvalid = true
789 }
790
791
792
793 func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
794
795 f := frame.fn
796 targetpc := frame.continpc
797 if targetpc == 0 {
798
799 return
800 }
801 if _DebugGC > 1 {
802 print("scanframe ", funcname(f), "\n")
803 }
804 if targetpc != f.entry {
805 targetpc--
806 }
807 pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
808 if pcdata == -1 {
809
810
811
812 pcdata = 0
813 }
814
815
816 size := frame.varp - frame.sp
817 var minsize uintptr
818 switch sys.ArchFamily {
819 case sys.ARM64:
820 minsize = sys.SpAlign
821 default:
822 minsize = sys.MinFrameSize
823 }
824 if size > minsize {
825 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
826 if stkmap == nil || stkmap.n <= 0 {
827 print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
828 throw("missing stackmap")
829 }
830
831
832 if pcdata < 0 || pcdata >= stkmap.n {
833
834 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
835 throw("scanframe: bad symbol table")
836 }
837 bv := stackmapdata(stkmap, pcdata)
838 size = uintptr(bv.n) * sys.PtrSize
839 scanblock(frame.varp-size, size, bv.bytedata, gcw)
840 }
841
842
843 if frame.arglen > 0 {
844 var bv bitvector
845 if frame.argmap != nil {
846 bv = *frame.argmap
847 } else {
848 stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
849 if stkmap == nil || stkmap.n <= 0 {
850 print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
851 throw("missing stackmap")
852 }
853 if pcdata < 0 || pcdata >= stkmap.n {
854
855 print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
856 throw("scanframe: bad symbol table")
857 }
858 bv = stackmapdata(stkmap, pcdata)
859 }
860 scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw)
861 }
862 }
863
864 type gcDrainFlags int
865
866 const (
867 gcDrainUntilPreempt gcDrainFlags = 1 << iota
868 gcDrainNoBlock
869 gcDrainFlushBgCredit
870 gcDrainIdle
871 gcDrainFractional
872
873
874
875
876 gcDrainBlock gcDrainFlags = 0
877 )
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901 func gcDrain(gcw *gcWork, flags gcDrainFlags) {
902 if !writeBarrier.needed {
903 throw("gcDrain phase incorrect")
904 }
905
906 gp := getg().m.curg
907 preemptible := flags&gcDrainUntilPreempt != 0
908 blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0
909 flushBgCredit := flags&gcDrainFlushBgCredit != 0
910 idle := flags&gcDrainIdle != 0
911
912 initScanWork := gcw.scanWork
913
914
915
916 checkWork := int64(1<<63 - 1)
917 var check func() bool
918 if flags&(gcDrainIdle|gcDrainFractional) != 0 {
919 checkWork = initScanWork + drainCheckThreshold
920 if idle {
921 check = pollWork
922 } else if flags&gcDrainFractional != 0 {
923 check = pollFractionalWorkerExit
924 }
925 }
926
927
928 if work.markrootNext < work.markrootJobs {
929 for !(preemptible && gp.preempt) {
930 job := atomic.Xadd(&work.markrootNext, +1) - 1
931 if job >= work.markrootJobs {
932 break
933 }
934 markroot(gcw, job)
935 if check != nil && check() {
936 goto done
937 }
938 }
939 }
940
941
942 for !(preemptible && gp.preempt) {
943
944
945
946
947
948 if work.full == 0 {
949 gcw.balance()
950 }
951
952 var b uintptr
953 if blocking {
954 b = gcw.get()
955 } else {
956 b = gcw.tryGetFast()
957 if b == 0 {
958 b = gcw.tryGet()
959 }
960 }
961 if b == 0 {
962
963 break
964 }
965 scanobject(b, gcw)
966
967
968
969
970 if gcw.scanWork >= gcCreditSlack {
971 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
972 if flushBgCredit {
973 gcFlushBgCredit(gcw.scanWork - initScanWork)
974 initScanWork = 0
975 }
976 checkWork -= gcw.scanWork
977 gcw.scanWork = 0
978
979 if checkWork <= 0 {
980 checkWork += drainCheckThreshold
981 if check != nil && check() {
982 break
983 }
984 }
985 }
986 }
987
988
989
990
991
992 done:
993
994 if gcw.scanWork > 0 {
995 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
996 if flushBgCredit {
997 gcFlushBgCredit(gcw.scanWork - initScanWork)
998 }
999 gcw.scanWork = 0
1000 }
1001 }
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 func gcDrainN(gcw *gcWork, scanWork int64) int64 {
1017 if !writeBarrier.needed {
1018 throw("gcDrainN phase incorrect")
1019 }
1020
1021
1022
1023 workFlushed := -gcw.scanWork
1024
1025 gp := getg().m.curg
1026 for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
1027
1028 if work.full == 0 {
1029 gcw.balance()
1030 }
1031
1032
1033
1034
1035
1036
1037 b := gcw.tryGetFast()
1038 if b == 0 {
1039 b = gcw.tryGet()
1040 }
1041
1042 if b == 0 {
1043
1044
1045
1046
1047 if work.markrootNext < work.markrootJobs {
1048 job := atomic.Xadd(&work.markrootNext, +1) - 1
1049 if job < work.markrootJobs {
1050 markroot(gcw, job)
1051 continue
1052 }
1053 }
1054
1055 break
1056 }
1057 scanobject(b, gcw)
1058
1059
1060 if gcw.scanWork >= gcCreditSlack {
1061 atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
1062 workFlushed += gcw.scanWork
1063 gcw.scanWork = 0
1064 }
1065 }
1066
1067
1068
1069
1070
1071 return workFlushed + gcw.scanWork
1072 }
1073
1074
1075
1076
1077
1078
1079
1080
1081 func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
1082
1083
1084
1085 b := b0
1086 n := n0
1087
1088 arena_start := mheap_.arena_start
1089 arena_used := mheap_.arena_used
1090
1091 for i := uintptr(0); i < n; {
1092
1093 bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
1094 if bits == 0 {
1095 i += sys.PtrSize * 8
1096 continue
1097 }
1098 for j := 0; j < 8 && i < n; j++ {
1099 if bits&1 != 0 {
1100
1101 obj := *(*uintptr)(unsafe.Pointer(b + i))
1102 if obj != 0 && arena_start <= obj && obj < arena_used {
1103 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 {
1104 greyobject(obj, b, i, hbits, span, gcw, objIndex)
1105 }
1106 }
1107 }
1108 bits >>= 1
1109 i += sys.PtrSize
1110 }
1111 }
1112 }
1113
1114
1115
1116
1117
1118
1119
1120 func scanobject(b uintptr, gcw *gcWork) {
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 arena_start := mheap_.arena_start
1131 arena_used := mheap_.arena_used
1132
1133
1134
1135
1136
1137
1138 hbits := heapBitsForAddr(b)
1139 s := spanOfUnchecked(b)
1140 n := s.elemsize
1141 if n == 0 {
1142 throw("scanobject n == 0")
1143 }
1144
1145 if n > maxObletBytes {
1146
1147
1148 if b == s.base() {
1149
1150
1151
1152
1153
1154 if s.spanclass.noscan() {
1155
1156 gcw.bytesMarked += uint64(n)
1157 return
1158 }
1159
1160
1161
1162
1163
1164
1165 for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
1166 if !gcw.putFast(oblet) {
1167 gcw.put(oblet)
1168 }
1169 }
1170 }
1171
1172
1173
1174
1175 n = s.base() + s.elemsize - b
1176 if n > maxObletBytes {
1177 n = maxObletBytes
1178 }
1179 }
1180
1181 var i uintptr
1182 for i = 0; i < n; i += sys.PtrSize {
1183
1184 if i != 0 {
1185
1186 hbits = hbits.next()
1187 }
1188
1189 bits := hbits.bits()
1190
1191
1192
1193
1194 if i != 1*sys.PtrSize && bits&bitScan == 0 {
1195 break
1196 }
1197 if bits&bitPointer == 0 {
1198 continue
1199 }
1200
1201
1202
1203 obj := *(*uintptr)(unsafe.Pointer(b + i))
1204
1205
1206
1207 if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n {
1208
1209 if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 {
1210 greyobject(obj, b, i, hbits, span, gcw, objIndex)
1211 }
1212 }
1213 }
1214 gcw.bytesMarked += uint64(n)
1215 gcw.scanWork += int64(i)
1216 }
1217
1218
1219
1220
1221
1222 func shade(b uintptr) {
1223 if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 {
1224 gcw := &getg().m.p.ptr().gcw
1225 greyobject(obj, 0, 0, hbits, span, gcw, objIndex)
1226 if gcphase == _GCmarktermination || gcBlackenPromptly {
1227
1228
1229 gcw.dispose()
1230 }
1231 }
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241 func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr) {
1242
1243 if obj&(sys.PtrSize-1) != 0 {
1244 throw("greyobject: obj not pointer-aligned")
1245 }
1246 mbits := span.markBitsForIndex(objIndex)
1247
1248 if useCheckmark {
1249 if !mbits.isMarked() {
1250 printlock()
1251 print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
1252 print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
1253
1254
1255 gcDumpObject("base", base, off)
1256
1257
1258 gcDumpObject("obj", obj, ^uintptr(0))
1259
1260 getg().m.traceback = 2
1261 throw("checkmark found unmarked object")
1262 }
1263 if hbits.isCheckmarked(span.elemsize) {
1264 return
1265 }
1266 hbits.setCheckmarked(span.elemsize)
1267 if !hbits.isCheckmarked(span.elemsize) {
1268 throw("setCheckmarked and isCheckmarked disagree")
1269 }
1270 } else {
1271 if debug.gccheckmark > 0 && span.isFree(objIndex) {
1272 print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
1273 gcDumpObject("base", base, off)
1274 gcDumpObject("obj", obj, ^uintptr(0))
1275 getg().m.traceback = 2
1276 throw("marking free object")
1277 }
1278
1279
1280 if mbits.isMarked() {
1281 return
1282 }
1283
1284 atomic.Or8(mbits.bytep, mbits.mask)
1285
1286
1287 if span.spanclass.noscan() {
1288 gcw.bytesMarked += uint64(span.elemsize)
1289 return
1290 }
1291 }
1292
1293
1294
1295
1296
1297
1298
1299 if !gcw.putFast(obj) {
1300 gcw.put(obj)
1301 }
1302 }
1303
1304
1305
1306 func gcDumpObject(label string, obj, off uintptr) {
1307 if obj < mheap_.arena_start || obj >= mheap_.arena_used {
1308 print(label, "=", hex(obj), " is not in the Go heap\n")
1309 return
1310 }
1311 k := obj >> _PageShift
1312 x := k
1313 x -= mheap_.arena_start >> _PageShift
1314 s := mheap_.spans[x]
1315 print(label, "=", hex(obj), " k=", hex(k))
1316 if s == nil {
1317 print(" s=nil\n")
1318 return
1319 }
1320 print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.spanclass=", s.spanclass, " s.elemsize=", s.elemsize, " s.state=")
1321 if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
1322 print(mSpanStateNames[s.state], "\n")
1323 } else {
1324 print("unknown(", s.state, ")\n")
1325 }
1326
1327 skipped := false
1328 size := s.elemsize
1329 if s.state == _MSpanManual && size == 0 {
1330
1331
1332
1333 size = off + sys.PtrSize
1334 }
1335 for i := uintptr(0); i < size; i += sys.PtrSize {
1336
1337
1338
1339 if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
1340 skipped = true
1341 continue
1342 }
1343 if skipped {
1344 print(" ...\n")
1345 skipped = false
1346 }
1347 print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
1348 if i == off {
1349 print(" <==")
1350 }
1351 print("\n")
1352 }
1353 if skipped {
1354 print(" ...\n")
1355 }
1356 }
1357
1358
1359
1360
1361
1362
1363
1364
1365 func gcmarknewobject(obj, size, scanSize uintptr) {
1366 if useCheckmark && !gcBlackenPromptly {
1367 throw("gcmarknewobject called while doing checkmark")
1368 }
1369 markBitsForAddr(obj).setMarked()
1370 gcw := &getg().m.p.ptr().gcw
1371 gcw.bytesMarked += uint64(size)
1372 gcw.scanWork += int64(scanSize)
1373 if gcBlackenPromptly {
1374
1375
1376 gcw.dispose()
1377 }
1378 }
1379
1380
1381
1382
1383 func gcMarkTinyAllocs() {
1384 for _, p := range allp {
1385 c := p.mcache
1386 if c == nil || c.tiny == 0 {
1387 continue
1388 }
1389 _, hbits, span, objIndex := heapBitsForObject(c.tiny, 0, 0)
1390 gcw := &p.gcw
1391 greyobject(c.tiny, 0, 0, hbits, span, gcw, objIndex)
1392 if gcBlackenPromptly {
1393 gcw.dispose()
1394 }
1395 }
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 var useCheckmark = false
1420
1421
1422 func initCheckmarks() {
1423 useCheckmark = true
1424 for _, s := range mheap_.allspans {
1425 if s.state == _MSpanInUse {
1426 heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
1427 }
1428 }
1429 }
1430
1431 func clearCheckmarks() {
1432 useCheckmark = false
1433 for _, s := range mheap_.allspans {
1434 if s.state == _MSpanInUse {
1435 heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
1436 }
1437 }
1438 }
1439
View as plain text