Source file
src/runtime/mheap.go
Documentation: runtime
1
2
3
4
5
6
7
8
9 package runtime
10
11 import (
12 "runtime/internal/atomic"
13 "runtime/internal/sys"
14 "unsafe"
15 )
16
17
18
19
20 const minPhysPageSize = 4096
21
22
23
24
25
26
27
28
29
30 type mheap struct {
31 lock mutex
32 free [_MaxMHeapList]mSpanList
33 freelarge mTreap
34 busy [_MaxMHeapList]mSpanList
35 busylarge mSpanList
36 sweepgen uint32
37 sweepdone uint32
38 sweepers uint32
39
40
41
42
43
44
45
46
47
48
49
50
51 allspans []*mspan
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68 spans []*mspan
69
70
71
72
73
74
75
76
77
78
79 sweepSpans [2]gcSweepBuf
80
81 _ uint32
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 pagesInUse uint64
102 pagesSwept uint64
103 pagesSweptBasis uint64
104 sweepHeapLiveBasis uint64
105 sweepPagesPerByte float64
106
107
108
109
110 largealloc uint64
111 nlargealloc uint64
112 largefree uint64
113 nlargefree uint64
114 nsmallfree [_NumSizeClasses]uint64
115
116
117 bitmap uintptr
118 bitmap_mapped uintptr
119
120
121
122
123
124
125
126
127
128
129 arena_start uintptr
130 arena_used uintptr
131
132
133
134
135 arena_alloc uintptr
136 arena_end uintptr
137
138
139
140
141
142
143 arena_reserved bool
144
145 _ uint32
146
147
148
149
150
151
152 central [numSpanClasses]struct {
153 mcentral mcentral
154 pad [sys.CacheLineSize - unsafe.Sizeof(mcentral{})%sys.CacheLineSize]byte
155 }
156
157 spanalloc fixalloc
158 cachealloc fixalloc
159 treapalloc fixalloc
160 specialfinalizeralloc fixalloc
161 specialprofilealloc fixalloc
162 speciallock mutex
163
164 unused *specialfinalizer
165 }
166
167 var mheap_ mheap
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195 type mSpanState uint8
196
197 const (
198 _MSpanDead mSpanState = iota
199 _MSpanInUse
200 _MSpanManual
201 _MSpanFree
202 )
203
204
205
206 var mSpanStateNames = []string{
207 "_MSpanDead",
208 "_MSpanInUse",
209 "_MSpanManual",
210 "_MSpanFree",
211 }
212
213
214
215
216 type mSpanList struct {
217 first *mspan
218 last *mspan
219 }
220
221
222 type mspan struct {
223 next *mspan
224 prev *mspan
225 list *mSpanList
226
227 startAddr uintptr
228 npages uintptr
229
230 manualFreeList gclinkptr
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247 freeindex uintptr
248
249
250 nelems uintptr
251
252
253
254
255
256
257
258 allocCache uint64
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282 allocBits *gcBits
283 gcmarkBits *gcBits
284
285
286
287
288
289
290
291 sweepgen uint32
292 divMul uint16
293 baseMask uint16
294 allocCount uint16
295 spanclass spanClass
296 incache bool
297 state mSpanState
298 needzero uint8
299 divShift uint8
300 divShift2 uint8
301 elemsize uintptr
302 unusedsince int64
303 npreleased uintptr
304 limit uintptr
305 speciallock mutex
306 specials *special
307 }
308
309 func (s *mspan) base() uintptr {
310 return s.startAddr
311 }
312
313 func (s *mspan) layout() (size, n, total uintptr) {
314 total = s.npages << _PageShift
315 size = s.elemsize
316 if size > 0 {
317 n = total / size
318 }
319 return
320 }
321
322
323
324
325
326
327
328
329
330
331
332
333 func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
334 h := (*mheap)(vh)
335 s := (*mspan)(p)
336 if len(h.allspans) >= cap(h.allspans) {
337 n := 64 * 1024 / sys.PtrSize
338 if n < cap(h.allspans)*3/2 {
339 n = cap(h.allspans) * 3 / 2
340 }
341 var new []*mspan
342 sp := (*slice)(unsafe.Pointer(&new))
343 sp.array = sysAlloc(uintptr(n)*sys.PtrSize, &memstats.other_sys)
344 if sp.array == nil {
345 throw("runtime: cannot allocate memory")
346 }
347 sp.len = len(h.allspans)
348 sp.cap = n
349 if len(h.allspans) > 0 {
350 copy(new, h.allspans)
351 }
352 oldAllspans := h.allspans
353 *(*notInHeapSlice)(unsafe.Pointer(&h.allspans)) = *(*notInHeapSlice)(unsafe.Pointer(&new))
354 if len(oldAllspans) != 0 {
355 sysFree(unsafe.Pointer(&oldAllspans[0]), uintptr(cap(oldAllspans))*unsafe.Sizeof(oldAllspans[0]), &memstats.other_sys)
356 }
357 }
358 h.allspans = h.allspans[:len(h.allspans)+1]
359 h.allspans[len(h.allspans)-1] = s
360 }
361
362
363
364
365
366
367
368 type spanClass uint8
369
370 const (
371 numSpanClasses = _NumSizeClasses << 1
372 tinySpanClass = spanClass(tinySizeClass<<1 | 1)
373 )
374
375 func makeSpanClass(sizeclass uint8, noscan bool) spanClass {
376 return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))
377 }
378
379 func (sc spanClass) sizeclass() int8 {
380 return int8(sc >> 1)
381 }
382
383 func (sc spanClass) noscan() bool {
384 return sc&1 != 0
385 }
386
387
388
389
390
391
392 func inheap(b uintptr) bool {
393 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
394 return false
395 }
396
397 s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift]
398 if s == nil || b < s.base() || b >= s.limit || s.state != mSpanInUse {
399 return false
400 }
401 return true
402 }
403
404
405
406
407
408
409 func inHeapOrStack(b uintptr) bool {
410 if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
411 return false
412 }
413
414 s := mheap_.spans[(b-mheap_.arena_start)>>_PageShift]
415 if s == nil || b < s.base() {
416 return false
417 }
418 switch s.state {
419 case mSpanInUse, _MSpanManual:
420 return b < s.limit
421 default:
422 return false
423 }
424 }
425
426
427
428
429
430
431 func spanOf(p uintptr) *mspan {
432 if p == 0 || p < mheap_.arena_start || p >= mheap_.arena_used {
433 return nil
434 }
435 return spanOfUnchecked(p)
436 }
437
438
439
440
441 func spanOfUnchecked(p uintptr) *mspan {
442 return mheap_.spans[(p-mheap_.arena_start)>>_PageShift]
443 }
444
445 func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
446 _g_ := getg()
447
448 _g_.m.mcache.local_nlookup++
449 if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
450
451 lock(&mheap_.lock)
452 purgecachedstats(_g_.m.mcache)
453 unlock(&mheap_.lock)
454 }
455
456 s := mheap_.lookupMaybe(unsafe.Pointer(v))
457 if sp != nil {
458 *sp = s
459 }
460 if s == nil {
461 if base != nil {
462 *base = 0
463 }
464 if size != nil {
465 *size = 0
466 }
467 return 0
468 }
469
470 p := s.base()
471 if s.spanclass.sizeclass() == 0 {
472
473 if base != nil {
474 *base = p
475 }
476 if size != nil {
477 *size = s.npages << _PageShift
478 }
479 return 1
480 }
481
482 n := s.elemsize
483 if base != nil {
484 i := (v - p) / n
485 *base = p + i*n
486 }
487 if size != nil {
488 *size = n
489 }
490
491 return 1
492 }
493
494
495 func (h *mheap) init(spansStart, spansBytes uintptr) {
496 h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys)
497 h.spanalloc.init(unsafe.Sizeof(mspan{}), recordspan, unsafe.Pointer(h), &memstats.mspan_sys)
498 h.cachealloc.init(unsafe.Sizeof(mcache{}), nil, nil, &memstats.mcache_sys)
499 h.specialfinalizeralloc.init(unsafe.Sizeof(specialfinalizer{}), nil, nil, &memstats.other_sys)
500 h.specialprofilealloc.init(unsafe.Sizeof(specialprofile{}), nil, nil, &memstats.other_sys)
501
502
503
504
505
506
507
508
509 h.spanalloc.zero = false
510
511
512 for i := range h.free {
513 h.free[i].init()
514 h.busy[i].init()
515 }
516
517 h.busylarge.init()
518 for i := range h.central {
519 h.central[i].mcentral.init(spanClass(i))
520 }
521
522 sp := (*slice)(unsafe.Pointer(&h.spans))
523 sp.array = unsafe.Pointer(spansStart)
524 sp.len = 0
525 sp.cap = int(spansBytes / sys.PtrSize)
526
527
528
529
530 h.setArenaUsed(h.arena_used, false)
531 }
532
533
534
535
536
537
538 func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) {
539
540
541
542
543
544
545 h.mapBits(arena_used)
546
547
548 h.mapSpans(arena_used)
549
550
551 if racemap && raceenabled {
552 racemapshadow(unsafe.Pointer(h.arena_used), arena_used-h.arena_used)
553 }
554
555 h.arena_used = arena_used
556 }
557
558
559
560
561
562 func (h *mheap) mapSpans(arena_used uintptr) {
563
564 n := arena_used
565 n -= h.arena_start
566 n = n / _PageSize * sys.PtrSize
567 n = round(n, physPageSize)
568 need := n / unsafe.Sizeof(h.spans[0])
569 have := uintptr(len(h.spans))
570 if have >= need {
571 return
572 }
573 h.spans = h.spans[:need]
574 sysMap(unsafe.Pointer(&h.spans[have]), (need-have)*unsafe.Sizeof(h.spans[0]), h.arena_reserved, &memstats.other_sys)
575 }
576
577
578
579 func (h *mheap) reclaimList(list *mSpanList, npages uintptr) uintptr {
580 n := uintptr(0)
581 sg := mheap_.sweepgen
582 retry:
583 for s := list.first; s != nil; s = s.next {
584 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
585 list.remove(s)
586
587 list.insertBack(s)
588 unlock(&h.lock)
589 snpages := s.npages
590 if s.sweep(false) {
591 n += snpages
592 }
593 lock(&h.lock)
594 if n >= npages {
595 return n
596 }
597
598 goto retry
599 }
600 if s.sweepgen == sg-1 {
601
602 continue
603 }
604
605
606 break
607 }
608 return n
609 }
610
611
612
613 func (h *mheap) reclaim(npage uintptr) {
614
615
616 for i := int(npage); i < len(h.busy); i++ {
617 if h.reclaimList(&h.busy[i], npage) != 0 {
618 return
619 }
620 }
621
622
623 if h.reclaimList(&h.busylarge, npage) != 0 {
624 return
625 }
626
627
628
629 reclaimed := uintptr(0)
630 for i := 0; i < int(npage) && i < len(h.busy); i++ {
631 reclaimed += h.reclaimList(&h.busy[i], npage-reclaimed)
632 if reclaimed >= npage {
633 return
634 }
635 }
636
637
638 unlock(&h.lock)
639 for {
640 n := sweepone()
641 if n == ^uintptr(0) {
642 break
643 }
644 reclaimed += n
645 if reclaimed >= npage {
646 break
647 }
648 }
649 lock(&h.lock)
650 }
651
652
653
654 func (h *mheap) alloc_m(npage uintptr, spanclass spanClass, large bool) *mspan {
655 _g_ := getg()
656 if _g_ != _g_.m.g0 {
657 throw("_mheap_alloc not on g0 stack")
658 }
659 lock(&h.lock)
660
661
662
663 if h.sweepdone == 0 {
664
665
666
667
668
669
670
671 if trace.enabled {
672 traceGCSweepStart()
673 }
674 h.reclaim(npage)
675 if trace.enabled {
676 traceGCSweepDone()
677 }
678 }
679
680
681 memstats.heap_scan += uint64(_g_.m.mcache.local_scan)
682 _g_.m.mcache.local_scan = 0
683 memstats.tinyallocs += uint64(_g_.m.mcache.local_tinyallocs)
684 _g_.m.mcache.local_tinyallocs = 0
685
686 s := h.allocSpanLocked(npage, &memstats.heap_inuse)
687 if s != nil {
688
689
690 atomic.Store(&s.sweepgen, h.sweepgen)
691 h.sweepSpans[h.sweepgen/2%2].push(s)
692 s.state = _MSpanInUse
693 s.allocCount = 0
694 s.spanclass = spanclass
695 if sizeclass := spanclass.sizeclass(); sizeclass == 0 {
696 s.elemsize = s.npages << _PageShift
697 s.divShift = 0
698 s.divMul = 0
699 s.divShift2 = 0
700 s.baseMask = 0
701 } else {
702 s.elemsize = uintptr(class_to_size[sizeclass])
703 m := &class_to_divmagic[sizeclass]
704 s.divShift = m.shift
705 s.divMul = m.mul
706 s.divShift2 = m.shift2
707 s.baseMask = m.baseMask
708 }
709
710
711 h.pagesInUse += uint64(npage)
712 if large {
713 memstats.heap_objects++
714 mheap_.largealloc += uint64(s.elemsize)
715 mheap_.nlargealloc++
716 atomic.Xadd64(&memstats.heap_live, int64(npage<<_PageShift))
717
718 if s.npages < uintptr(len(h.busy)) {
719 h.busy[s.npages].insertBack(s)
720 } else {
721 h.busylarge.insertBack(s)
722 }
723 }
724 }
725
726 if gcBlackenEnabled != 0 {
727 gcController.revise()
728 }
729
730 if trace.enabled {
731 traceHeapAlloc()
732 }
733
734
735
736
737
738
739
740
741
742
743 unlock(&h.lock)
744 return s
745 }
746
747 func (h *mheap) alloc(npage uintptr, spanclass spanClass, large bool, needzero bool) *mspan {
748
749
750
751 var s *mspan
752 systemstack(func() {
753 s = h.alloc_m(npage, spanclass, large)
754 })
755
756 if s != nil {
757 if needzero && s.needzero != 0 {
758 memclrNoHeapPointers(unsafe.Pointer(s.base()), s.npages<<_PageShift)
759 }
760 s.needzero = 0
761 }
762 return s
763 }
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780 func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
781 lock(&h.lock)
782 s := h.allocSpanLocked(npage, stat)
783 if s != nil {
784 s.state = _MSpanManual
785 s.manualFreeList = 0
786 s.allocCount = 0
787 s.spanclass = 0
788 s.nelems = 0
789 s.elemsize = 0
790 s.limit = s.base() + s.npages<<_PageShift
791
792 memstats.heap_sys -= uint64(s.npages << _PageShift)
793 }
794
795
796 unlock(&h.lock)
797
798 return s
799 }
800
801
802
803
804 func (h *mheap) allocSpanLocked(npage uintptr, stat *uint64) *mspan {
805 var list *mSpanList
806 var s *mspan
807
808
809 for i := int(npage); i < len(h.free); i++ {
810 list = &h.free[i]
811 if !list.isEmpty() {
812 s = list.first
813 list.remove(s)
814 goto HaveSpan
815 }
816 }
817
818 s = h.allocLarge(npage)
819 if s == nil {
820 if !h.grow(npage) {
821 return nil
822 }
823 s = h.allocLarge(npage)
824 if s == nil {
825 return nil
826 }
827 }
828
829 HaveSpan:
830
831 if s.state != _MSpanFree {
832 throw("MHeap_AllocLocked - MSpan not free")
833 }
834 if s.npages < npage {
835 throw("MHeap_AllocLocked - bad npages")
836 }
837 if s.npreleased > 0 {
838 sysUsed(unsafe.Pointer(s.base()), s.npages<<_PageShift)
839 memstats.heap_released -= uint64(s.npreleased << _PageShift)
840 s.npreleased = 0
841 }
842
843 if s.npages > npage {
844
845 t := (*mspan)(h.spanalloc.alloc())
846 t.init(s.base()+npage<<_PageShift, s.npages-npage)
847 s.npages = npage
848 p := (t.base() - h.arena_start) >> _PageShift
849 if p > 0 {
850 h.spans[p-1] = s
851 }
852 h.spans[p] = t
853 h.spans[p+t.npages-1] = t
854 t.needzero = s.needzero
855 s.state = _MSpanManual
856 t.state = _MSpanManual
857 h.freeSpanLocked(t, false, false, s.unusedsince)
858 s.state = _MSpanFree
859 }
860 s.unusedsince = 0
861
862 p := (s.base() - h.arena_start) >> _PageShift
863 for n := uintptr(0); n < npage; n++ {
864 h.spans[p+n] = s
865 }
866
867 *stat += uint64(npage << _PageShift)
868 memstats.heap_idle -= uint64(npage << _PageShift)
869
870
871 if s.inList() {
872 throw("still in list")
873 }
874 return s
875 }
876
877
878
879
880
881 func (h *mheap) isLargeSpan(npages uintptr) bool {
882 return npages >= uintptr(len(h.free))
883 }
884
885
886
887 func (h *mheap) allocLarge(npage uintptr) *mspan {
888
889 return h.freelarge.remove(npage)
890 }
891
892
893
894
895
896 func (h *mheap) grow(npage uintptr) bool {
897
898
899
900
901 npage = round(npage, (64<<10)/_PageSize)
902 ask := npage << _PageShift
903 if ask < _HeapAllocChunk {
904 ask = _HeapAllocChunk
905 }
906
907 v := h.sysAlloc(ask)
908 if v == nil {
909 if ask > npage<<_PageShift {
910 ask = npage << _PageShift
911 v = h.sysAlloc(ask)
912 }
913 if v == nil {
914 print("runtime: out of memory: cannot allocate ", ask, "-byte block (", memstats.heap_sys, " in use)\n")
915 return false
916 }
917 }
918
919
920
921 s := (*mspan)(h.spanalloc.alloc())
922 s.init(uintptr(v), ask>>_PageShift)
923 p := (s.base() - h.arena_start) >> _PageShift
924 for i := p; i < p+s.npages; i++ {
925 h.spans[i] = s
926 }
927 atomic.Store(&s.sweepgen, h.sweepgen)
928 s.state = _MSpanInUse
929 h.pagesInUse += uint64(s.npages)
930 h.freeSpanLocked(s, false, true, 0)
931 return true
932 }
933
934
935
936
937 func (h *mheap) lookup(v unsafe.Pointer) *mspan {
938 p := uintptr(v)
939 p -= h.arena_start
940 return h.spans[p>>_PageShift]
941 }
942
943
944
945
946
947
948
949
950 func (h *mheap) lookupMaybe(v unsafe.Pointer) *mspan {
951 if uintptr(v) < h.arena_start || uintptr(v) >= h.arena_used {
952 return nil
953 }
954 s := h.spans[(uintptr(v)-h.arena_start)>>_PageShift]
955 if s == nil || uintptr(v) < s.base() || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != _MSpanInUse {
956 return nil
957 }
958 return s
959 }
960
961
962 func (h *mheap) freeSpan(s *mspan, acct int32) {
963 systemstack(func() {
964 mp := getg().m
965 lock(&h.lock)
966 memstats.heap_scan += uint64(mp.mcache.local_scan)
967 mp.mcache.local_scan = 0
968 memstats.tinyallocs += uint64(mp.mcache.local_tinyallocs)
969 mp.mcache.local_tinyallocs = 0
970 if msanenabled {
971
972 base := unsafe.Pointer(s.base())
973 bytes := s.npages << _PageShift
974 msanfree(base, bytes)
975 }
976 if acct != 0 {
977 memstats.heap_objects--
978 }
979 if gcBlackenEnabled != 0 {
980
981 gcController.revise()
982 }
983 h.freeSpanLocked(s, true, true, 0)
984 unlock(&h.lock)
985 })
986 }
987
988
989
990
991
992
993
994
995
996
997
998
999 func (h *mheap) freeManual(s *mspan, stat *uint64) {
1000 s.needzero = 1
1001 lock(&h.lock)
1002 *stat -= uint64(s.npages << _PageShift)
1003 memstats.heap_sys += uint64(s.npages << _PageShift)
1004 h.freeSpanLocked(s, false, true, 0)
1005 unlock(&h.lock)
1006 }
1007
1008
1009 func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince int64) {
1010 switch s.state {
1011 case _MSpanManual:
1012 if s.allocCount != 0 {
1013 throw("MHeap_FreeSpanLocked - invalid stack free")
1014 }
1015 case _MSpanInUse:
1016 if s.allocCount != 0 || s.sweepgen != h.sweepgen {
1017 print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
1018 throw("MHeap_FreeSpanLocked - invalid free")
1019 }
1020 h.pagesInUse -= uint64(s.npages)
1021 default:
1022 throw("MHeap_FreeSpanLocked - invalid span state")
1023 }
1024
1025 if acctinuse {
1026 memstats.heap_inuse -= uint64(s.npages << _PageShift)
1027 }
1028 if acctidle {
1029 memstats.heap_idle += uint64(s.npages << _PageShift)
1030 }
1031 s.state = _MSpanFree
1032 if s.inList() {
1033 h.busyList(s.npages).remove(s)
1034 }
1035
1036
1037
1038 s.unusedsince = unusedsince
1039 if unusedsince == 0 {
1040 s.unusedsince = nanotime()
1041 }
1042 s.npreleased = 0
1043
1044
1045 p := (s.base() - h.arena_start) >> _PageShift
1046 if p > 0 {
1047 before := h.spans[p-1]
1048 if before != nil && before.state == _MSpanFree {
1049
1050 s.startAddr = before.startAddr
1051 s.npages += before.npages
1052 s.npreleased = before.npreleased
1053 s.needzero |= before.needzero
1054 p -= before.npages
1055 h.spans[p] = s
1056
1057
1058 if h.isLargeSpan(before.npages) {
1059
1060 h.freelarge.removeSpan(before)
1061 } else {
1062 h.freeList(before.npages).remove(before)
1063 }
1064 before.state = _MSpanDead
1065 h.spanalloc.free(unsafe.Pointer(before))
1066 }
1067 }
1068
1069
1070 if (p + s.npages) < uintptr(len(h.spans)) {
1071 after := h.spans[p+s.npages]
1072 if after != nil && after.state == _MSpanFree {
1073 s.npages += after.npages
1074 s.npreleased += after.npreleased
1075 s.needzero |= after.needzero
1076 h.spans[p+s.npages-1] = s
1077 if h.isLargeSpan(after.npages) {
1078 h.freelarge.removeSpan(after)
1079 } else {
1080 h.freeList(after.npages).remove(after)
1081 }
1082 after.state = _MSpanDead
1083 h.spanalloc.free(unsafe.Pointer(after))
1084 }
1085 }
1086
1087
1088 if h.isLargeSpan(s.npages) {
1089 h.freelarge.insert(s)
1090 } else {
1091 h.freeList(s.npages).insert(s)
1092 }
1093 }
1094
1095 func (h *mheap) freeList(npages uintptr) *mSpanList {
1096 return &h.free[npages]
1097 }
1098
1099 func (h *mheap) busyList(npages uintptr) *mSpanList {
1100 if npages < uintptr(len(h.busy)) {
1101 return &h.busy[npages]
1102 }
1103 return &h.busylarge
1104 }
1105
1106 func scavengeTreapNode(t *treapNode, now, limit uint64) uintptr {
1107 s := t.spanKey
1108 var sumreleased uintptr
1109 if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
1110 start := s.base()
1111 end := start + s.npages<<_PageShift
1112 if physPageSize > _PageSize {
1113
1114
1115
1116
1117
1118 start = (start + physPageSize - 1) &^ (physPageSize - 1)
1119 end &^= physPageSize - 1
1120 if end <= start {
1121
1122
1123 return sumreleased
1124 }
1125 }
1126 len := end - start
1127 released := len - (s.npreleased << _PageShift)
1128 if physPageSize > _PageSize && released == 0 {
1129 return sumreleased
1130 }
1131 memstats.heap_released += uint64(released)
1132 sumreleased += released
1133 s.npreleased = len >> _PageShift
1134 sysUnused(unsafe.Pointer(start), len)
1135 }
1136 return sumreleased
1137 }
1138
1139 func scavengelist(list *mSpanList, now, limit uint64) uintptr {
1140 if list.isEmpty() {
1141 return 0
1142 }
1143
1144 var sumreleased uintptr
1145 for s := list.first; s != nil; s = s.next {
1146 if (now-uint64(s.unusedsince)) <= limit || s.npreleased == s.npages {
1147 continue
1148 }
1149 start := s.base()
1150 end := start + s.npages<<_PageShift
1151 if physPageSize > _PageSize {
1152
1153
1154
1155
1156
1157 start = (start + physPageSize - 1) &^ (physPageSize - 1)
1158 end &^= physPageSize - 1
1159 if end <= start {
1160
1161
1162 continue
1163 }
1164 }
1165 len := end - start
1166
1167 released := len - (s.npreleased << _PageShift)
1168 if physPageSize > _PageSize && released == 0 {
1169 continue
1170 }
1171 memstats.heap_released += uint64(released)
1172 sumreleased += released
1173 s.npreleased = len >> _PageShift
1174 sysUnused(unsafe.Pointer(start), len)
1175 }
1176 return sumreleased
1177 }
1178
1179 func (h *mheap) scavenge(k int32, now, limit uint64) {
1180
1181
1182
1183 gp := getg()
1184 gp.m.mallocing++
1185 lock(&h.lock)
1186 var sumreleased uintptr
1187 for i := 0; i < len(h.free); i++ {
1188 sumreleased += scavengelist(&h.free[i], now, limit)
1189 }
1190 sumreleased += scavengetreap(h.freelarge.treap, now, limit)
1191 unlock(&h.lock)
1192 gp.m.mallocing--
1193
1194 if debug.gctrace > 0 {
1195 if sumreleased > 0 {
1196 print("scvg", k, ": ", sumreleased>>20, " MB released\n")
1197 }
1198 print("scvg", k, ": inuse: ", memstats.heap_inuse>>20, ", idle: ", memstats.heap_idle>>20, ", sys: ", memstats.heap_sys>>20, ", released: ", memstats.heap_released>>20, ", consumed: ", (memstats.heap_sys-memstats.heap_released)>>20, " (MB)\n")
1199 }
1200 }
1201
1202
1203 func runtime_debug_freeOSMemory() {
1204 GC()
1205 systemstack(func() { mheap_.scavenge(-1, ^uint64(0), 0) })
1206 }
1207
1208
1209 func (span *mspan) init(base uintptr, npages uintptr) {
1210
1211 span.next = nil
1212 span.prev = nil
1213 span.list = nil
1214 span.startAddr = base
1215 span.npages = npages
1216 span.allocCount = 0
1217 span.spanclass = 0
1218 span.incache = false
1219 span.elemsize = 0
1220 span.state = _MSpanDead
1221 span.unusedsince = 0
1222 span.npreleased = 0
1223 span.speciallock.key = 0
1224 span.specials = nil
1225 span.needzero = 0
1226 span.freeindex = 0
1227 span.allocBits = nil
1228 span.gcmarkBits = nil
1229 }
1230
1231 func (span *mspan) inList() bool {
1232 return span.list != nil
1233 }
1234
1235
1236 func (list *mSpanList) init() {
1237 list.first = nil
1238 list.last = nil
1239 }
1240
1241 func (list *mSpanList) remove(span *mspan) {
1242 if span.list != list {
1243 print("runtime: failed MSpanList_Remove span.npages=", span.npages,
1244 " span=", span, " prev=", span.prev, " span.list=", span.list, " list=", list, "\n")
1245 throw("MSpanList_Remove")
1246 }
1247 if list.first == span {
1248 list.first = span.next
1249 } else {
1250 span.prev.next = span.next
1251 }
1252 if list.last == span {
1253 list.last = span.prev
1254 } else {
1255 span.next.prev = span.prev
1256 }
1257 span.next = nil
1258 span.prev = nil
1259 span.list = nil
1260 }
1261
1262 func (list *mSpanList) isEmpty() bool {
1263 return list.first == nil
1264 }
1265
1266 func (list *mSpanList) insert(span *mspan) {
1267 if span.next != nil || span.prev != nil || span.list != nil {
1268 println("runtime: failed MSpanList_Insert", span, span.next, span.prev, span.list)
1269 throw("MSpanList_Insert")
1270 }
1271 span.next = list.first
1272 if list.first != nil {
1273
1274
1275 list.first.prev = span
1276 } else {
1277
1278 list.last = span
1279 }
1280 list.first = span
1281 span.list = list
1282 }
1283
1284 func (list *mSpanList) insertBack(span *mspan) {
1285 if span.next != nil || span.prev != nil || span.list != nil {
1286 println("runtime: failed MSpanList_InsertBack", span, span.next, span.prev, span.list)
1287 throw("MSpanList_InsertBack")
1288 }
1289 span.prev = list.last
1290 if list.last != nil {
1291
1292 list.last.next = span
1293 } else {
1294
1295 list.first = span
1296 }
1297 list.last = span
1298 span.list = list
1299 }
1300
1301
1302
1303 func (list *mSpanList) takeAll(other *mSpanList) {
1304 if other.isEmpty() {
1305 return
1306 }
1307
1308
1309 for s := other.first; s != nil; s = s.next {
1310 s.list = list
1311 }
1312
1313
1314 if list.isEmpty() {
1315 *list = *other
1316 } else {
1317
1318 other.last.next = list.first
1319 list.first.prev = other.last
1320 list.first = other.first
1321 }
1322
1323 other.first, other.last = nil, nil
1324 }
1325
1326 const (
1327 _KindSpecialFinalizer = 1
1328 _KindSpecialProfile = 2
1329
1330
1331
1332
1333 )
1334
1335
1336 type special struct {
1337 next *special
1338 offset uint16
1339 kind byte
1340 }
1341
1342
1343
1344
1345
1346
1347
1348 func addspecial(p unsafe.Pointer, s *special) bool {
1349 span := mheap_.lookupMaybe(p)
1350 if span == nil {
1351 throw("addspecial on invalid pointer")
1352 }
1353
1354
1355
1356
1357 mp := acquirem()
1358 span.ensureSwept()
1359
1360 offset := uintptr(p) - span.base()
1361 kind := s.kind
1362
1363 lock(&span.speciallock)
1364
1365
1366 t := &span.specials
1367 for {
1368 x := *t
1369 if x == nil {
1370 break
1371 }
1372 if offset == uintptr(x.offset) && kind == x.kind {
1373 unlock(&span.speciallock)
1374 releasem(mp)
1375 return false
1376 }
1377 if offset < uintptr(x.offset) || (offset == uintptr(x.offset) && kind < x.kind) {
1378 break
1379 }
1380 t = &x.next
1381 }
1382
1383
1384 s.offset = uint16(offset)
1385 s.next = *t
1386 *t = s
1387 unlock(&span.speciallock)
1388 releasem(mp)
1389
1390 return true
1391 }
1392
1393
1394
1395
1396 func removespecial(p unsafe.Pointer, kind uint8) *special {
1397 span := mheap_.lookupMaybe(p)
1398 if span == nil {
1399 throw("removespecial on invalid pointer")
1400 }
1401
1402
1403
1404
1405 mp := acquirem()
1406 span.ensureSwept()
1407
1408 offset := uintptr(p) - span.base()
1409
1410 lock(&span.speciallock)
1411 t := &span.specials
1412 for {
1413 s := *t
1414 if s == nil {
1415 break
1416 }
1417
1418
1419 if offset == uintptr(s.offset) && kind == s.kind {
1420 *t = s.next
1421 unlock(&span.speciallock)
1422 releasem(mp)
1423 return s
1424 }
1425 t = &s.next
1426 }
1427 unlock(&span.speciallock)
1428 releasem(mp)
1429 return nil
1430 }
1431
1432
1433
1434
1435
1436
1437
1438 type specialfinalizer struct {
1439 special special
1440 fn *funcval
1441 nret uintptr
1442 fint *_type
1443 ot *ptrtype
1444 }
1445
1446
1447 func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool {
1448 lock(&mheap_.speciallock)
1449 s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
1450 unlock(&mheap_.speciallock)
1451 s.special.kind = _KindSpecialFinalizer
1452 s.fn = f
1453 s.nret = nret
1454 s.fint = fint
1455 s.ot = ot
1456 if addspecial(p, &s.special) {
1457
1458
1459
1460
1461 if gcphase != _GCoff {
1462 _, base, _ := findObject(p)
1463 mp := acquirem()
1464 gcw := &mp.p.ptr().gcw
1465
1466
1467 scanobject(uintptr(base), gcw)
1468
1469
1470 scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
1471 if gcBlackenPromptly {
1472 gcw.dispose()
1473 }
1474 releasem(mp)
1475 }
1476 return true
1477 }
1478
1479
1480 lock(&mheap_.speciallock)
1481 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1482 unlock(&mheap_.speciallock)
1483 return false
1484 }
1485
1486
1487 func removefinalizer(p unsafe.Pointer) {
1488 s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer)))
1489 if s == nil {
1490 return
1491 }
1492 lock(&mheap_.speciallock)
1493 mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
1494 unlock(&mheap_.speciallock)
1495 }
1496
1497
1498
1499
1500 type specialprofile struct {
1501 special special
1502 b *bucket
1503 }
1504
1505
1506 func setprofilebucket(p unsafe.Pointer, b *bucket) {
1507 lock(&mheap_.speciallock)
1508 s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
1509 unlock(&mheap_.speciallock)
1510 s.special.kind = _KindSpecialProfile
1511 s.b = b
1512 if !addspecial(p, &s.special) {
1513 throw("setprofilebucket: profile already set")
1514 }
1515 }
1516
1517
1518
1519 func freespecial(s *special, p unsafe.Pointer, size uintptr) {
1520 switch s.kind {
1521 case _KindSpecialFinalizer:
1522 sf := (*specialfinalizer)(unsafe.Pointer(s))
1523 queuefinalizer(p, sf.fn, sf.nret, sf.fint, sf.ot)
1524 lock(&mheap_.speciallock)
1525 mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
1526 unlock(&mheap_.speciallock)
1527 case _KindSpecialProfile:
1528 sp := (*specialprofile)(unsafe.Pointer(s))
1529 mProf_Free(sp.b, size)
1530 lock(&mheap_.speciallock)
1531 mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
1532 unlock(&mheap_.speciallock)
1533 default:
1534 throw("bad special kind")
1535 panic("not reached")
1536 }
1537 }
1538
1539
1540
1541
1542 type gcBits uint8
1543
1544
1545 func (b *gcBits) bytep(n uintptr) *uint8 {
1546 return addb((*uint8)(b), n)
1547 }
1548
1549
1550
1551 func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) {
1552 return b.bytep(n / 8), 1 << (n % 8)
1553 }
1554
1555 const gcBitsChunkBytes = uintptr(64 << 10)
1556 const gcBitsHeaderBytes = unsafe.Sizeof(gcBitsHeader{})
1557
1558 type gcBitsHeader struct {
1559 free uintptr
1560 next uintptr
1561 }
1562
1563
1564 type gcBitsArena struct {
1565
1566 free uintptr
1567 next *gcBitsArena
1568 bits [gcBitsChunkBytes - gcBitsHeaderBytes]gcBits
1569 }
1570
1571 var gcBitsArenas struct {
1572 lock mutex
1573 free *gcBitsArena
1574 next *gcBitsArena
1575 current *gcBitsArena
1576 previous *gcBitsArena
1577 }
1578
1579
1580
1581 func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits {
1582 if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) {
1583 return nil
1584 }
1585
1586 end := atomic.Xadduintptr(&b.free, bytes)
1587 if end > uintptr(len(b.bits)) {
1588 return nil
1589 }
1590
1591 start := end - bytes
1592 return &b.bits[start]
1593 }
1594
1595
1596
1597 func newMarkBits(nelems uintptr) *gcBits {
1598 blocksNeeded := uintptr((nelems + 63) / 64)
1599 bytesNeeded := blocksNeeded * 8
1600
1601
1602 head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next)))
1603 if p := head.tryAlloc(bytesNeeded); p != nil {
1604 return p
1605 }
1606
1607
1608
1609 lock(&gcBitsArenas.lock)
1610
1611
1612
1613 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
1614 unlock(&gcBitsArenas.lock)
1615 return p
1616 }
1617
1618
1619 fresh := newArenaMayUnlock()
1620
1621
1622
1623 if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil {
1624
1625
1626 fresh.next = gcBitsArenas.free
1627 gcBitsArenas.free = fresh
1628 unlock(&gcBitsArenas.lock)
1629 return p
1630 }
1631
1632
1633
1634 p := fresh.tryAlloc(bytesNeeded)
1635 if p == nil {
1636 throw("markBits overflow")
1637 }
1638
1639
1640 fresh.next = gcBitsArenas.next
1641 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh))
1642
1643 unlock(&gcBitsArenas.lock)
1644 return p
1645 }
1646
1647
1648
1649
1650
1651
1652
1653 func newAllocBits(nelems uintptr) *gcBits {
1654 return newMarkBits(nelems)
1655 }
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671 func nextMarkBitArenaEpoch() {
1672 lock(&gcBitsArenas.lock)
1673 if gcBitsArenas.previous != nil {
1674 if gcBitsArenas.free == nil {
1675 gcBitsArenas.free = gcBitsArenas.previous
1676 } else {
1677
1678 last := gcBitsArenas.previous
1679 for last = gcBitsArenas.previous; last.next != nil; last = last.next {
1680 }
1681 last.next = gcBitsArenas.free
1682 gcBitsArenas.free = gcBitsArenas.previous
1683 }
1684 }
1685 gcBitsArenas.previous = gcBitsArenas.current
1686 gcBitsArenas.current = gcBitsArenas.next
1687 atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), nil)
1688 unlock(&gcBitsArenas.lock)
1689 }
1690
1691
1692
1693 func newArenaMayUnlock() *gcBitsArena {
1694 var result *gcBitsArena
1695 if gcBitsArenas.free == nil {
1696 unlock(&gcBitsArenas.lock)
1697 result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
1698 if result == nil {
1699 throw("runtime: cannot allocate memory")
1700 }
1701 lock(&gcBitsArenas.lock)
1702 } else {
1703 result = gcBitsArenas.free
1704 gcBitsArenas.free = gcBitsArenas.free.next
1705 memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes)
1706 }
1707 result.next = nil
1708
1709
1710 if uintptr(unsafe.Offsetof(gcBitsArena{}.bits))&7 == 0 {
1711 result.free = 0
1712 } else {
1713 result.free = 8 - (uintptr(unsafe.Pointer(&result.bits[0])) & 7)
1714 }
1715 return result
1716 }
1717
View as plain text