Source file
src/runtime/mgcsweep.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
12 )
13
14 var sweep sweepdata
15
16
17 type sweepdata struct {
18 lock mutex
19 g *g
20 parked bool
21 started bool
22
23 nbgsweep uint32
24 npausesweep uint32
25 }
26
27
28
29
30
31
32
33 func finishsweep_m() {
34
35
36
37
38
39 for sweepone() != ^uintptr(0) {
40 sweep.npausesweep++
41 }
42
43 nextMarkBitArenaEpoch()
44 }
45
46 func bgsweep(c chan int) {
47 sweep.g = getg()
48
49 lock(&sweep.lock)
50 sweep.parked = true
51 c <- 1
52 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
53
54 for {
55 for gosweepone() != ^uintptr(0) {
56 sweep.nbgsweep++
57 Gosched()
58 }
59 for freeSomeWbufs(true) {
60 Gosched()
61 }
62 lock(&sweep.lock)
63 if !gosweepdone() {
64
65
66
67 unlock(&sweep.lock)
68 continue
69 }
70 sweep.parked = true
71 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
72 }
73 }
74
75
76
77
78 func sweepone() uintptr {
79 _g_ := getg()
80 sweepRatio := mheap_.sweepPagesPerByte
81
82
83
84 _g_.m.locks++
85 if atomic.Load(&mheap_.sweepdone) != 0 {
86 _g_.m.locks--
87 return ^uintptr(0)
88 }
89 atomic.Xadd(&mheap_.sweepers, +1)
90
91 npages := ^uintptr(0)
92 sg := mheap_.sweepgen
93 for {
94 s := mheap_.sweepSpans[1-sg/2%2].pop()
95 if s == nil {
96 atomic.Store(&mheap_.sweepdone, 1)
97 break
98 }
99 if s.state != mSpanInUse {
100
101
102
103 if s.sweepgen != sg {
104 print("runtime: bad span s.state=", s.state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
105 throw("non in-use span in unswept list")
106 }
107 continue
108 }
109 if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
110 continue
111 }
112 npages = s.npages
113 if !s.sweep(false) {
114
115
116
117 npages = 0
118 }
119 break
120 }
121
122
123
124 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
125 if debug.gcpacertrace > 0 {
126 print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
127 }
128 }
129 _g_.m.locks--
130 return npages
131 }
132
133
134 func gosweepone() uintptr {
135 var ret uintptr
136 systemstack(func() {
137 ret = sweepone()
138 })
139 return ret
140 }
141
142
143 func gosweepdone() bool {
144 return mheap_.sweepdone != 0
145 }
146
147
148
149 func (s *mspan) ensureSwept() {
150
151
152
153 _g_ := getg()
154 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
155 throw("MSpan_EnsureSwept: m is not locked")
156 }
157
158 sg := mheap_.sweepgen
159 if atomic.Load(&s.sweepgen) == sg {
160 return
161 }
162
163 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
164 s.sweep(false)
165 return
166 }
167
168 for atomic.Load(&s.sweepgen) != sg {
169 osyield()
170 }
171 }
172
173
174
175
176
177
178
179 func (s *mspan) sweep(preserve bool) bool {
180
181
182 _g_ := getg()
183 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
184 throw("MSpan_Sweep: m is not locked")
185 }
186 sweepgen := mheap_.sweepgen
187 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
188 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
189 throw("MSpan_Sweep: bad span state")
190 }
191
192 if trace.enabled {
193 traceGCSweepSpan(s.npages * _PageSize)
194 }
195
196 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
197
198 spc := s.spanclass
199 size := s.elemsize
200 res := false
201
202 c := _g_.m.mcache
203 freeToHeap := false
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221 specialp := &s.specials
222 special := *specialp
223 for special != nil {
224
225 objIndex := uintptr(special.offset) / size
226 p := s.base() + objIndex*size
227 mbits := s.markBitsForIndex(objIndex)
228 if !mbits.isMarked() {
229
230
231 hasFin := false
232 endOffset := p - s.base() + size
233 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
234 if tmp.kind == _KindSpecialFinalizer {
235
236 mbits.setMarkedNonAtomic()
237 hasFin = true
238 break
239 }
240 }
241
242 for special != nil && uintptr(special.offset) < endOffset {
243
244
245 p := s.base() + uintptr(special.offset)
246 if special.kind == _KindSpecialFinalizer || !hasFin {
247
248 y := special
249 special = special.next
250 *specialp = special
251 freespecial(y, unsafe.Pointer(p), size)
252 } else {
253
254
255 specialp = &special.next
256 special = *specialp
257 }
258 }
259 } else {
260
261 specialp = &special.next
262 special = *specialp
263 }
264 }
265
266 if debug.allocfreetrace != 0 || raceenabled || msanenabled {
267
268
269 mbits := s.markBitsForBase()
270 abits := s.allocBitsForIndex(0)
271 for i := uintptr(0); i < s.nelems; i++ {
272 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
273 x := s.base() + i*s.elemsize
274 if debug.allocfreetrace != 0 {
275 tracefree(unsafe.Pointer(x), size)
276 }
277 if raceenabled {
278 racefree(unsafe.Pointer(x), size)
279 }
280 if msanenabled {
281 msanfree(unsafe.Pointer(x), size)
282 }
283 }
284 mbits.advance()
285 abits.advance()
286 }
287 }
288
289
290 nalloc := uint16(s.countAlloc())
291 if spc.sizeclass() == 0 && nalloc == 0 {
292 s.needzero = 1
293 freeToHeap = true
294 }
295 nfreed := s.allocCount - nalloc
296 if nalloc > s.allocCount {
297 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
298 throw("sweep increased allocation count")
299 }
300
301 s.allocCount = nalloc
302 wasempty := s.nextFreeIndex() == s.nelems
303 s.freeindex = 0
304 if trace.enabled {
305 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
306 }
307
308
309
310 s.allocBits = s.gcmarkBits
311 s.gcmarkBits = newMarkBits(s.nelems)
312
313
314 s.refillAllocCache(0)
315
316
317
318
319
320
321 if freeToHeap || nfreed == 0 {
322
323
324 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
325 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
326 throw("MSpan_Sweep: bad span state after sweep")
327 }
328
329
330
331 atomic.Store(&s.sweepgen, sweepgen)
332 }
333
334 if nfreed > 0 && spc.sizeclass() != 0 {
335 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
336 res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
337
338 } else if freeToHeap {
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355 if debug.efence > 0 {
356 s.limit = 0
357 sysFault(unsafe.Pointer(s.base()), size)
358 } else {
359 mheap_.freeSpan(s, 1)
360 }
361 c.local_nlargefree++
362 c.local_largefree += size
363 res = true
364 }
365 if !res {
366
367
368 mheap_.sweepSpans[sweepgen/2%2].push(s)
369 }
370 return res
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
391 if mheap_.sweepPagesPerByte == 0 {
392
393 return
394 }
395
396 if trace.enabled {
397 traceGCSweepStart()
398 }
399
400 retry:
401 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
402
403
404 newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
405 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
406 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
407 if gosweepone() == ^uintptr(0) {
408 mheap_.sweepPagesPerByte = 0
409 break
410 }
411 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
412
413 goto retry
414 }
415 }
416
417 if trace.enabled {
418 traceGCSweepDone()
419 }
420 }
421
View as plain text