PR c++/65554
[official-gcc.git] / libgo / runtime / mcentral.c
blobe41a83fbf03383d9ad25caedbe63e239f321ec3a
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Central free lists.
6 //
7 // See malloc.h for an overview.
8 //
9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
11 // and those that are completely allocated (c->empty).
13 // TODO(rsc): tcmalloc uses a "transfer cache" to split the list
14 // into sections of class_to_transfercount[sizeclass] objects
15 // so that it is faster to move those lists between MCaches and MCentrals.
17 #include "runtime.h"
18 #include "arch.h"
19 #include "malloc.h"
21 static bool MCentral_Grow(MCentral *c);
22 static void MCentral_Free(MCentral *c, MLink *v);
23 static void MCentral_ReturnToHeap(MCentral *c, MSpan *s);
25 // Initialize a single central free list.
26 void
27 runtime_MCentral_Init(MCentral *c, int32 sizeclass)
29 c->sizeclass = sizeclass;
30 runtime_MSpanList_Init(&c->nonempty);
31 runtime_MSpanList_Init(&c->empty);
34 // Allocate a span to use in an MCache.
35 MSpan*
36 runtime_MCentral_CacheSpan(MCentral *c)
38 MSpan *s;
39 int32 cap, n;
40 uint32 sg;
42 runtime_lock(c);
43 sg = runtime_mheap.sweepgen;
44 retry:
45 for(s = c->nonempty.next; s != &c->nonempty; s = s->next) {
46 if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
47 runtime_unlock(c);
48 runtime_MSpan_Sweep(s);
49 runtime_lock(c);
50 // the span could have been moved to heap, retry
51 goto retry;
53 if(s->sweepgen == sg-1) {
54 // the span is being swept by background sweeper, skip
55 continue;
57 // we have a nonempty span that does not require sweeping, allocate from it
58 goto havespan;
61 for(s = c->empty.next; s != &c->empty; s = s->next) {
62 if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
63 // we have an empty span that requires sweeping,
64 // sweep it and see if we can free some space in it
65 runtime_MSpanList_Remove(s);
66 // swept spans are at the end of the list
67 runtime_MSpanList_InsertBack(&c->empty, s);
68 runtime_unlock(c);
69 runtime_MSpan_Sweep(s);
70 runtime_lock(c);
71 // the span could be moved to nonempty or heap, retry
72 goto retry;
74 if(s->sweepgen == sg-1) {
75 // the span is being swept by background sweeper, skip
76 continue;
78 // already swept empty span,
79 // all subsequent ones must also be either swept or in process of sweeping
80 break;
83 // Replenish central list if empty.
84 if(!MCentral_Grow(c)) {
85 runtime_unlock(c);
86 return nil;
88 goto retry;
90 havespan:
91 cap = (s->npages << PageShift) / s->elemsize;
92 n = cap - s->ref;
93 if(n == 0)
94 runtime_throw("empty span");
95 if(s->freelist == nil)
96 runtime_throw("freelist empty");
97 c->nfree -= n;
98 runtime_MSpanList_Remove(s);
99 runtime_MSpanList_InsertBack(&c->empty, s);
100 s->incache = true;
101 runtime_unlock(c);
102 return s;
105 // Return span from an MCache.
106 void
107 runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s)
109 MLink *v;
110 int32 cap, n;
112 runtime_lock(c);
114 s->incache = false;
116 // Move any explicitly freed items from the freebuf to the freelist.
117 while((v = s->freebuf) != nil) {
118 s->freebuf = v->next;
119 runtime_markfreed(v);
120 v->next = s->freelist;
121 s->freelist = v;
122 s->ref--;
125 if(s->ref == 0) {
126 // Free back to heap. Unlikely, but possible.
127 MCentral_ReturnToHeap(c, s); // unlocks c
128 return;
131 cap = (s->npages << PageShift) / s->elemsize;
132 n = cap - s->ref;
133 if(n > 0) {
134 c->nfree += n;
135 runtime_MSpanList_Remove(s);
136 runtime_MSpanList_Insert(&c->nonempty, s);
138 runtime_unlock(c);
141 // Free the list of objects back into the central free list c.
142 // Called from runtime_free.
143 void
144 runtime_MCentral_FreeList(MCentral *c, MLink *start)
146 MLink *next;
148 runtime_lock(c);
149 for(; start != nil; start = next) {
150 next = start->next;
151 MCentral_Free(c, start);
153 runtime_unlock(c);
156 // Helper: free one object back into the central free list.
157 // Caller must hold lock on c on entry. Holds lock on exit.
158 static void
159 MCentral_Free(MCentral *c, MLink *v)
161 MSpan *s;
163 // Find span for v.
164 s = runtime_MHeap_Lookup(&runtime_mheap, v);
165 if(s == nil || s->ref == 0)
166 runtime_throw("invalid free");
167 if(s->sweepgen != runtime_mheap.sweepgen)
168 runtime_throw("free into unswept span");
170 // If the span is currently being used unsynchronized by an MCache,
171 // we can't modify the freelist. Add to the freebuf instead. The
172 // items will get moved to the freelist when the span is returned
173 // by the MCache.
174 if(s->incache) {
175 v->next = s->freebuf;
176 s->freebuf = v;
177 return;
180 // Move span to nonempty if necessary.
181 if(s->freelist == nil) {
182 runtime_MSpanList_Remove(s);
183 runtime_MSpanList_Insert(&c->nonempty, s);
186 // Add the object to span's free list.
187 runtime_markfreed(v);
188 v->next = s->freelist;
189 s->freelist = v;
190 s->ref--;
191 c->nfree++;
193 // If s is completely freed, return it to the heap.
194 if(s->ref == 0) {
195 MCentral_ReturnToHeap(c, s); // unlocks c
196 runtime_lock(c);
200 // Free n objects from a span s back into the central free list c.
201 // Called during sweep.
202 // Returns true if the span was returned to heap. Sets sweepgen to
203 // the latest generation.
204 bool
205 runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end)
207 if(s->incache)
208 runtime_throw("freespan into cached span");
209 runtime_lock(c);
211 // Move to nonempty if necessary.
212 if(s->freelist == nil) {
213 runtime_MSpanList_Remove(s);
214 runtime_MSpanList_Insert(&c->nonempty, s);
217 // Add the objects back to s's free list.
218 end->next = s->freelist;
219 s->freelist = start;
220 s->ref -= n;
221 c->nfree += n;
223 // delay updating sweepgen until here. This is the signal that
224 // the span may be used in an MCache, so it must come after the
225 // linked list operations above (actually, just after the
226 // lock of c above.)
227 runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen);
229 if(s->ref != 0) {
230 runtime_unlock(c);
231 return false;
234 // s is completely freed, return it to the heap.
235 MCentral_ReturnToHeap(c, s); // unlocks c
236 return true;
239 void
240 runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj)
242 int32 size;
243 int32 npages;
245 npages = runtime_class_to_allocnpages[sizeclass];
246 size = runtime_class_to_size[sizeclass];
247 *npagesp = npages;
248 *sizep = size;
249 *nobj = (npages << PageShift) / size;
252 // Fetch a new span from the heap and
253 // carve into objects for the free list.
254 static bool
255 MCentral_Grow(MCentral *c)
257 int32 i, n, npages;
258 uintptr size;
259 MLink **tailp, *v;
260 byte *p;
261 MSpan *s;
263 runtime_unlock(c);
264 runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n);
265 s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0, 1);
266 if(s == nil) {
267 // TODO(rsc): Log out of memory
268 runtime_lock(c);
269 return false;
272 // Carve span into sequence of blocks.
273 tailp = &s->freelist;
274 p = (byte*)(s->start << PageShift);
275 s->limit = p + size*n;
276 for(i=0; i<n; i++) {
277 v = (MLink*)p;
278 *tailp = v;
279 tailp = &v->next;
280 p += size;
282 *tailp = nil;
283 runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift));
285 runtime_lock(c);
286 c->nfree += n;
287 runtime_MSpanList_Insert(&c->nonempty, s);
288 return true;
291 // Return s to the heap. s must be unused (s->ref == 0). Unlocks c.
292 static void
293 MCentral_ReturnToHeap(MCentral *c, MSpan *s)
295 int32 size;
297 size = runtime_class_to_size[c->sizeclass];
298 runtime_MSpanList_Remove(s);
299 s->needzero = 1;
300 s->freelist = nil;
301 if(s->ref != 0)
302 runtime_throw("ref wrong");
303 c->nfree -= (s->npages << PageShift) / size;
304 runtime_unlock(c);
305 runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift);
306 runtime_MHeap_Free(&runtime_mheap, s, 0);