...
Source file
src/runtime/mgcsweepbuf.go
Documentation: runtime
1
2
3
4
5 package runtime
6
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
11 )
12
13
14
15
16
17 type gcSweepBuf struct {
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34 spineLock mutex
35 spine unsafe.Pointer
36 spineLen uintptr
37 spineCap uintptr
38
39
40
41 index uint32
42 }
43
44 const (
45 gcSweepBlockEntries = 512
46 gcSweepBufInitSpineCap = 256
47 )
48
49 type gcSweepBlock struct {
50 spans [gcSweepBlockEntries]*mspan
51 }
52
53
54
55 func (b *gcSweepBuf) push(s *mspan) {
56
57 cursor := uintptr(atomic.Xadd(&b.index, +1) - 1)
58 top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
59
60
61 spineLen := atomic.Loaduintptr(&b.spineLen)
62 var block *gcSweepBlock
63 retry:
64 if top < spineLen {
65 spine := atomic.Loadp(unsafe.Pointer(&b.spine))
66 blockp := add(spine, sys.PtrSize*top)
67 block = (*gcSweepBlock)(atomic.Loadp(blockp))
68 } else {
69
70
71 lock(&b.spineLock)
72
73
74 spineLen = atomic.Loaduintptr(&b.spineLen)
75 if top < spineLen {
76 unlock(&b.spineLock)
77 goto retry
78 }
79
80 if spineLen == b.spineCap {
81
82 newCap := b.spineCap * 2
83 if newCap == 0 {
84 newCap = gcSweepBufInitSpineCap
85 }
86 newSpine := persistentalloc(newCap*sys.PtrSize, sys.CacheLineSize, &memstats.gc_sys)
87 if b.spineCap != 0 {
88
89
90 memmove(newSpine, b.spine, b.spineCap*sys.PtrSize)
91 }
92
93 atomic.StorepNoWB(unsafe.Pointer(&b.spine), newSpine)
94 b.spineCap = newCap
95
96
97
98
99
100
101
102 }
103
104
105 block = (*gcSweepBlock)(persistentalloc(unsafe.Sizeof(gcSweepBlock{}), sys.CacheLineSize, &memstats.gc_sys))
106 blockp := add(b.spine, sys.PtrSize*top)
107
108 atomic.StorepNoWB(blockp, unsafe.Pointer(block))
109 atomic.Storeuintptr(&b.spineLen, spineLen+1)
110 unlock(&b.spineLock)
111 }
112
113
114 block.spans[bottom] = s
115 }
116
117
118
119
120 func (b *gcSweepBuf) pop() *mspan {
121 cursor := atomic.Xadd(&b.index, -1)
122 if int32(cursor) < 0 {
123 atomic.Xadd(&b.index, +1)
124 return nil
125 }
126
127
128
129 top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
130 blockp := (**gcSweepBlock)(add(b.spine, sys.PtrSize*uintptr(top)))
131 block := *blockp
132 s := block.spans[bottom]
133
134 block.spans[bottom] = nil
135 return s
136 }
137
138
139
140
141
142
143
144 func (b *gcSweepBuf) numBlocks() int {
145 return int((atomic.Load(&b.index) + gcSweepBlockEntries - 1) / gcSweepBlockEntries)
146 }
147
148
149
150 func (b *gcSweepBuf) block(i int) []*mspan {
151
152
153 if i < 0 || uintptr(i) >= atomic.Loaduintptr(&b.spineLen) {
154 throw("block index out of range")
155 }
156
157
158 spine := atomic.Loadp(unsafe.Pointer(&b.spine))
159 blockp := add(spine, sys.PtrSize*uintptr(i))
160 block := (*gcSweepBlock)(atomic.Loadp(blockp))
161
162
163 cursor := uintptr(atomic.Load(&b.index))
164 top, bottom := cursor/gcSweepBlockEntries, cursor%gcSweepBlockEntries
165 var spans []*mspan
166 if uintptr(i) < top {
167 spans = block.spans[:]
168 } else {
169 spans = block.spans[:bottom]
170 }
171
172
173
174 for len(spans) > 0 && spans[len(spans)-1] == nil {
175 spans = spans[:len(spans)-1]
176 }
177 return spans
178 }
179
View as plain text