- Add Bugzilla PRs to ChangeLog reverting AIX long double size.
[official-gcc.git] / libgo / runtime / mheap.c
blob52c6d8c1baa696667870eea0f38b61d1d68dd226
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Page heap.
6 //
7 // See malloc.h for overview.
8 //
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
15 #include "runtime.h"
16 #include "malloc.h"
18 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
19 static bool MHeap_Grow(MHeap*, uintptr);
20 static void MHeap_FreeLocked(MHeap*, MSpan*);
21 static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
22 static MSpan *BestFit(MSpan*, uintptr, MSpan*);
24 static void
25 RecordSpan(void *vh, byte *p)
27 MHeap *h;
28 MSpan *s;
30 h = vh;
31 s = (MSpan*)p;
32 s->allnext = h->allspans;
33 h->allspans = s;
36 // Initialize the heap; fetch memory using alloc.
37 void
38 runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
40 uint32 i;
42 runtime_initlock(h);
43 runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
44 runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
45 runtime_MHeapMap_Init(&h->map, alloc);
46 // h->mapcache needs no init
47 for(i=0; i<nelem(h->free); i++)
48 runtime_MSpanList_Init(&h->free[i]);
49 runtime_MSpanList_Init(&h->large);
50 for(i=0; i<nelem(h->central); i++)
51 runtime_MCentral_Init(&h->central[i], i);
54 // Allocate a new span of npage pages from the heap
55 // and record its size class in the HeapMap and HeapMapCache.
56 MSpan*
57 runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct)
59 MSpan *s;
61 runtime_lock(h);
62 mstats.heap_alloc += m->mcache->local_alloc;
63 m->mcache->local_alloc = 0;
64 mstats.heap_objects += m->mcache->local_objects;
65 m->mcache->local_objects = 0;
66 s = MHeap_AllocLocked(h, npage, sizeclass);
67 if(s != nil) {
68 mstats.heap_inuse += npage<<PageShift;
69 if(acct) {
70 mstats.heap_objects++;
71 mstats.heap_alloc += npage<<PageShift;
74 runtime_unlock(h);
75 return s;
78 static MSpan*
79 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
81 uintptr n;
82 MSpan *s, *t;
84 // Try in fixed-size lists up to max.
85 for(n=npage; n < nelem(h->free); n++) {
86 if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
87 s = h->free[n].next;
88 goto HaveSpan;
92 // Best fit in list of large spans.
93 if((s = MHeap_AllocLarge(h, npage)) == nil) {
94 if(!MHeap_Grow(h, npage))
95 return nil;
96 if((s = MHeap_AllocLarge(h, npage)) == nil)
97 return nil;
100 HaveSpan:
101 // Mark span in use.
102 if(s->state != MSpanFree)
103 runtime_throw("MHeap_AllocLocked - MSpan not free");
104 if(s->npages < npage)
105 runtime_throw("MHeap_AllocLocked - bad npages");
106 runtime_MSpanList_Remove(s);
107 s->state = MSpanInUse;
109 if(s->npages > npage) {
110 // Trim extra and put it back in the heap.
111 t = runtime_FixAlloc_Alloc(&h->spanalloc);
112 mstats.mspan_inuse = h->spanalloc.inuse;
113 mstats.mspan_sys = h->spanalloc.sys;
114 runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
115 s->npages = npage;
116 runtime_MHeapMap_Set(&h->map, t->start - 1, s);
117 runtime_MHeapMap_Set(&h->map, t->start, t);
118 runtime_MHeapMap_Set(&h->map, t->start + t->npages - 1, t);
119 t->state = MSpanInUse;
120 MHeap_FreeLocked(h, t);
123 // Record span info, because gc needs to be
124 // able to map interior pointer to containing span.
125 s->sizeclass = sizeclass;
126 for(n=0; n<npage; n++)
127 runtime_MHeapMap_Set(&h->map, s->start+n, s);
128 return s;
131 // Allocate a span of exactly npage pages from the list of large spans.
132 static MSpan*
133 MHeap_AllocLarge(MHeap *h, uintptr npage)
135 return BestFit(&h->large, npage, nil);
138 // Search list for smallest span with >= npage pages.
139 // If there are multiple smallest spans, take the one
140 // with the earliest starting address.
141 static MSpan*
142 BestFit(MSpan *list, uintptr npage, MSpan *best)
144 MSpan *s;
146 for(s=list->next; s != list; s=s->next) {
147 if(s->npages < npage)
148 continue;
149 if(best == nil
150 || s->npages < best->npages
151 || (s->npages == best->npages && s->start < best->start))
152 best = s;
154 return best;
157 // Try to add at least npage pages of memory to the heap,
158 // returning whether it worked.
159 static bool
160 MHeap_Grow(MHeap *h, uintptr npage)
162 uintptr ask;
163 void *v;
164 MSpan *s;
166 // Ask for a big chunk, to reduce the number of mappings
167 // the operating system needs to track; also amortizes
168 // the overhead of an operating system mapping.
169 // Allocate a multiple of 64kB (16 pages).
170 npage = (npage+15)&~15;
171 ask = npage<<PageShift;
172 if(ask < HeapAllocChunk)
173 ask = HeapAllocChunk;
175 v = runtime_SysAlloc(ask);
176 if(v == nil) {
177 if(ask > (npage<<PageShift)) {
178 ask = npage<<PageShift;
179 v = runtime_SysAlloc(ask);
181 if(v == nil)
182 return false;
184 mstats.heap_sys += ask;
186 if((byte*)v < h->min || h->min == nil)
187 h->min = v;
188 if((byte*)v+ask > h->max)
189 h->max = (byte*)v+ask;
191 // NOTE(rsc): In tcmalloc, if we've accumulated enough
192 // system allocations, the heap map gets entirely allocated
193 // in 32-bit mode. (In 64-bit mode that's not practical.)
194 if(!runtime_MHeapMap_Preallocate(&h->map, ((uintptr)v>>PageShift) - 1, (ask>>PageShift) + 2)) {
195 runtime_SysFree(v, ask);
196 return false;
199 // Create a fake "in use" span and free it, so that the
200 // right coalescing happens.
201 s = runtime_FixAlloc_Alloc(&h->spanalloc);
202 mstats.mspan_inuse = h->spanalloc.inuse;
203 mstats.mspan_sys = h->spanalloc.sys;
204 runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
205 runtime_MHeapMap_Set(&h->map, s->start, s);
206 runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
207 s->state = MSpanInUse;
208 MHeap_FreeLocked(h, s);
209 return true;
212 // Look up the span at the given page number.
213 // Page number is guaranteed to be in map
214 // and is guaranteed to be start or end of span.
215 MSpan*
216 runtime_MHeap_Lookup(MHeap *h, PageID p)
218 return runtime_MHeapMap_Get(&h->map, p);
221 // Look up the span at the given page number.
222 // Page number is *not* guaranteed to be in map
223 // and may be anywhere in the span.
224 // Map entries for the middle of a span are only
225 // valid for allocated spans. Free spans may have
226 // other garbage in their middles, so we have to
227 // check for that.
228 MSpan*
229 runtime_MHeap_LookupMaybe(MHeap *h, PageID p)
231 MSpan *s;
233 s = runtime_MHeapMap_GetMaybe(&h->map, p);
234 if(s == nil || p < s->start || p - s->start >= s->npages)
235 return nil;
236 if(s->state != MSpanInUse)
237 return nil;
238 return s;
241 // Free the span back into the heap.
242 void
243 runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
245 runtime_lock(h);
246 mstats.heap_alloc += m->mcache->local_alloc;
247 m->mcache->local_alloc = 0;
248 mstats.heap_objects += m->mcache->local_objects;
249 m->mcache->local_objects = 0;
250 mstats.heap_inuse -= s->npages<<PageShift;
251 if(acct) {
252 mstats.heap_alloc -= s->npages<<PageShift;
253 mstats.heap_objects--;
255 MHeap_FreeLocked(h, s);
256 runtime_unlock(h);
259 static void
260 MHeap_FreeLocked(MHeap *h, MSpan *s)
262 MSpan *t;
264 if(s->state != MSpanInUse || s->ref != 0) {
265 // runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
266 runtime_throw("MHeap_FreeLocked - invalid free");
268 s->state = MSpanFree;
269 runtime_MSpanList_Remove(s);
271 // Coalesce with earlier, later spans.
272 if((t = runtime_MHeapMap_Get(&h->map, s->start - 1)) != nil && t->state != MSpanInUse) {
273 s->start = t->start;
274 s->npages += t->npages;
275 runtime_MHeapMap_Set(&h->map, s->start, s);
276 runtime_MSpanList_Remove(t);
277 t->state = MSpanDead;
278 runtime_FixAlloc_Free(&h->spanalloc, t);
279 mstats.mspan_inuse = h->spanalloc.inuse;
280 mstats.mspan_sys = h->spanalloc.sys;
282 if((t = runtime_MHeapMap_Get(&h->map, s->start + s->npages)) != nil && t->state != MSpanInUse) {
283 s->npages += t->npages;
284 runtime_MHeapMap_Set(&h->map, s->start + s->npages - 1, s);
285 runtime_MSpanList_Remove(t);
286 t->state = MSpanDead;
287 runtime_FixAlloc_Free(&h->spanalloc, t);
288 mstats.mspan_inuse = h->spanalloc.inuse;
289 mstats.mspan_sys = h->spanalloc.sys;
292 // Insert s into appropriate list.
293 if(s->npages < nelem(h->free))
294 runtime_MSpanList_Insert(&h->free[s->npages], s);
295 else
296 runtime_MSpanList_Insert(&h->large, s);
298 // TODO(rsc): IncrementalScavenge() to return memory to OS.
301 // Initialize a new span with the given start and npages.
302 void
303 runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
305 span->next = nil;
306 span->prev = nil;
307 span->start = start;
308 span->npages = npages;
309 span->freelist = nil;
310 span->ref = 0;
311 span->sizeclass = 0;
312 span->state = 0;
315 // Initialize an empty doubly-linked list.
316 void
317 runtime_MSpanList_Init(MSpan *list)
319 list->state = MSpanListHead;
320 list->next = list;
321 list->prev = list;
324 void
325 runtime_MSpanList_Remove(MSpan *span)
327 if(span->prev == nil && span->next == nil)
328 return;
329 span->prev->next = span->next;
330 span->next->prev = span->prev;
331 span->prev = nil;
332 span->next = nil;
335 bool
336 runtime_MSpanList_IsEmpty(MSpan *list)
338 return list->next == list;
341 void
342 runtime_MSpanList_Insert(MSpan *list, MSpan *span)
344 if(span->next != nil || span->prev != nil)
345 runtime_throw("MSpanList_Insert");
346 span->next = list->next;
347 span->prev = list;
348 span->next->prev = span;
349 span->prev->next = span;