...
Source file
src/runtime/mcentral.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import "runtime/internal/atomic"
16
17
18
19
20 type mcentral struct {
21 lock mutex
22 spanclass spanClass
23 nonempty mSpanList
24 empty mSpanList
25
26
27
28
29 nmalloc uint64
30 }
31
32
33 func (c *mcentral) init(spc spanClass) {
34 c.spanclass = spc
35 c.nonempty.init()
36 c.empty.init()
37 }
38
39
40 func (c *mcentral) cacheSpan() *mspan {
41
42 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
43 deductSweepCredit(spanBytes, 0)
44
45 lock(&c.lock)
46 traceDone := false
47 if trace.enabled {
48 traceGCSweepStart()
49 }
50 sg := mheap_.sweepgen
51 retry:
52 var s *mspan
53 for s = c.nonempty.first; s != nil; s = s.next {
54 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
55 c.nonempty.remove(s)
56 c.empty.insertBack(s)
57 unlock(&c.lock)
58 s.sweep(true)
59 goto havespan
60 }
61 if s.sweepgen == sg-1 {
62
63 continue
64 }
65
66 c.nonempty.remove(s)
67 c.empty.insertBack(s)
68 unlock(&c.lock)
69 goto havespan
70 }
71
72 for s = c.empty.first; s != nil; s = s.next {
73 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
74
75
76 c.empty.remove(s)
77
78 c.empty.insertBack(s)
79 unlock(&c.lock)
80 s.sweep(true)
81 freeIndex := s.nextFreeIndex()
82 if freeIndex != s.nelems {
83 s.freeindex = freeIndex
84 goto havespan
85 }
86 lock(&c.lock)
87
88
89 goto retry
90 }
91 if s.sweepgen == sg-1 {
92
93 continue
94 }
95
96
97 break
98 }
99 if trace.enabled {
100 traceGCSweepDone()
101 traceDone = true
102 }
103 unlock(&c.lock)
104
105
106 s = c.grow()
107 if s == nil {
108 return nil
109 }
110 lock(&c.lock)
111 c.empty.insertBack(s)
112 unlock(&c.lock)
113
114
115
116 havespan:
117 if trace.enabled && !traceDone {
118 traceGCSweepDone()
119 }
120 cap := int32((s.npages << _PageShift) / s.elemsize)
121 n := cap - int32(s.allocCount)
122 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
123 throw("span has no free objects")
124 }
125
126
127 atomic.Xadd64(&c.nmalloc, int64(n))
128 usedBytes := uintptr(s.allocCount) * s.elemsize
129 atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
130 if trace.enabled {
131
132 traceHeapAlloc()
133 }
134 if gcBlackenEnabled != 0 {
135
136 gcController.revise()
137 }
138 s.incache = true
139 freeByteBase := s.freeindex &^ (64 - 1)
140 whichByte := freeByteBase / 8
141
142 s.refillAllocCache(whichByte)
143
144
145
146 s.allocCache >>= s.freeindex % 64
147
148 return s
149 }
150
151
152 func (c *mcentral) uncacheSpan(s *mspan) {
153 lock(&c.lock)
154
155 s.incache = false
156
157 if s.allocCount == 0 {
158 throw("uncaching span but s.allocCount == 0")
159 }
160
161 cap := int32((s.npages << _PageShift) / s.elemsize)
162 n := cap - int32(s.allocCount)
163 if n > 0 {
164 c.empty.remove(s)
165 c.nonempty.insert(s)
166
167
168 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
169
170
171
172 atomic.Xadd64(&c.nmalloc, -int64(n))
173 }
174 unlock(&c.lock)
175 }
176
177
178
179
180
181
182
183
184
185 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
186 if s.incache {
187 throw("freeSpan given cached span")
188 }
189 s.needzero = 1
190
191 if preserve {
192
193
194 if !s.inList() {
195 throw("can't preserve unlinked span")
196 }
197 atomic.Store(&s.sweepgen, mheap_.sweepgen)
198 return false
199 }
200
201 lock(&c.lock)
202
203
204 if wasempty {
205 c.empty.remove(s)
206 c.nonempty.insert(s)
207 }
208
209
210
211
212
213 atomic.Store(&s.sweepgen, mheap_.sweepgen)
214
215 if s.allocCount != 0 {
216 unlock(&c.lock)
217 return false
218 }
219
220 c.nonempty.remove(s)
221 unlock(&c.lock)
222 mheap_.freeSpan(s, 0)
223 return true
224 }
225
226
227 func (c *mcentral) grow() *mspan {
228 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
229 size := uintptr(class_to_size[c.spanclass.sizeclass()])
230 n := (npages << _PageShift) / size
231
232 s := mheap_.alloc(npages, c.spanclass, false, true)
233 if s == nil {
234 return nil
235 }
236
237 p := s.base()
238 s.limit = p + size*n
239
240 heapBitsForSpan(s.base()).initSpan(s)
241 return s
242 }
243
View as plain text