pretty-print.h (pp_base): Remove.
[official-gcc.git] / libgo / runtime / mheap.c
blobb4d94b68559d2d8b7915f91bfbe789cd61e97c48
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Page heap.
6 //
7 // See malloc.h for overview.
8 //
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
15 #include "runtime.h"
16 #include "arch.h"
17 #include "malloc.h"
19 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
20 static bool MHeap_Grow(MHeap*, uintptr);
21 static void MHeap_FreeLocked(MHeap*, MSpan*);
22 static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
23 static MSpan *BestFit(MSpan*, uintptr, MSpan*);
25 static void
26 RecordSpan(void *vh, byte *p)
28 MHeap *h;
29 MSpan *s;
30 MSpan **all;
31 uint32 cap;
33 h = vh;
34 s = (MSpan*)p;
35 if(h->nspan >= h->nspancap) {
36 cap = 64*1024/sizeof(all[0]);
37 if(cap < h->nspancap*3/2)
38 cap = h->nspancap*3/2;
39 all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]));
40 if(all == nil)
41 runtime_throw("runtime: cannot allocate memory");
42 if(h->allspans) {
43 runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
44 runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]));
46 h->allspans = all;
47 h->nspancap = cap;
49 h->allspans[h->nspan++] = s;
52 // Initialize the heap; fetch memory using alloc.
53 void
54 runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
56 uint32 i;
58 runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
59 runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
60 // h->mapcache needs no init
61 for(i=0; i<nelem(h->free); i++)
62 runtime_MSpanList_Init(&h->free[i]);
63 runtime_MSpanList_Init(&h->large);
64 for(i=0; i<nelem(h->central); i++)
65 runtime_MCentral_Init(&h->central[i], i);
68 // Allocate a new span of npage pages from the heap
69 // and record its size class in the HeapMap and HeapMapCache.
70 MSpan*
71 runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
73 MSpan *s;
75 runtime_lock(h);
76 runtime_purgecachedstats(runtime_m()->mcache);
77 s = MHeap_AllocLocked(h, npage, sizeclass);
78 if(s != nil) {
79 mstats.heap_inuse += npage<<PageShift;
80 if(acct) {
81 mstats.heap_objects++;
82 mstats.heap_alloc += npage<<PageShift;
85 runtime_unlock(h);
86 if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
87 runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
88 return s;
91 static MSpan*
92 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
94 uintptr n;
95 MSpan *s, *t;
96 PageID p;
98 // Try in fixed-size lists up to max.
99 for(n=npage; n < nelem(h->free); n++) {
100 if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
101 s = h->free[n].next;
102 goto HaveSpan;
106 // Best fit in list of large spans.
107 if((s = MHeap_AllocLarge(h, npage)) == nil) {
108 if(!MHeap_Grow(h, npage))
109 return nil;
110 if((s = MHeap_AllocLarge(h, npage)) == nil)
111 return nil;
114 HaveSpan:
115 // Mark span in use.
116 if(s->state != MSpanFree)
117 runtime_throw("MHeap_AllocLocked - MSpan not free");
118 if(s->npages < npage)
119 runtime_throw("MHeap_AllocLocked - bad npages");
120 runtime_MSpanList_Remove(s);
121 s->state = MSpanInUse;
122 mstats.heap_idle -= s->npages<<PageShift;
123 mstats.heap_released -= s->npreleased<<PageShift;
124 if(s->npreleased > 0) {
125 // We have called runtime_SysUnused with these pages, and on
126 // Unix systems it called madvise. At this point at least
127 // some BSD-based kernels will return these pages either as
128 // zeros or with the old data. For our caller, the first word
129 // in the page indicates whether the span contains zeros or
130 // not (this word was set when the span was freed by
131 // MCentral_Free or runtime_MCentral_FreeSpan). If the first
132 // page in the span is returned as zeros, and some subsequent
133 // page is returned with the old data, then we will be
134 // returning a span that is assumed to be all zeros, but the
135 // actual data will not be all zeros. Avoid that problem by
136 // explicitly marking the span as not being zeroed, just in
137 // case. The beadbead constant we use here means nothing, it
138 // is just a unique constant not seen elsewhere in the
139 // runtime, as a clue in case it turns up unexpectedly in
140 // memory or in a stack trace.
141 *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
143 s->npreleased = 0;
145 if(s->npages > npage) {
146 // Trim extra and put it back in the heap.
147 t = runtime_FixAlloc_Alloc(&h->spanalloc);
148 mstats.mspan_inuse = h->spanalloc.inuse;
149 mstats.mspan_sys = h->spanalloc.sys;
150 runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
151 s->npages = npage;
152 p = t->start;
153 if(sizeof(void*) == 8)
154 p -= ((uintptr)h->arena_start>>PageShift);
155 if(p > 0)
156 h->map[p-1] = s;
157 h->map[p] = t;
158 h->map[p+t->npages-1] = t;
159 *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
160 t->state = MSpanInUse;
161 MHeap_FreeLocked(h, t);
162 t->unusedsince = s->unusedsince; // preserve age
164 s->unusedsince = 0;
166 // Record span info, because gc needs to be
167 // able to map interior pointer to containing span.
168 s->sizeclass = sizeclass;
169 s->elemsize = (sizeclass==0 ? s->npages<<PageShift : (uintptr)runtime_class_to_size[sizeclass]);
170 s->types.compression = MTypes_Empty;
171 p = s->start;
172 if(sizeof(void*) == 8)
173 p -= ((uintptr)h->arena_start>>PageShift);
174 for(n=0; n<npage; n++)
175 h->map[p+n] = s;
176 return s;
179 // Allocate a span of exactly npage pages from the list of large spans.
180 static MSpan*
181 MHeap_AllocLarge(MHeap *h, uintptr npage)
183 return BestFit(&h->large, npage, nil);
186 // Search list for smallest span with >= npage pages.
187 // If there are multiple smallest spans, take the one
188 // with the earliest starting address.
189 static MSpan*
190 BestFit(MSpan *list, uintptr npage, MSpan *best)
192 MSpan *s;
194 for(s=list->next; s != list; s=s->next) {
195 if(s->npages < npage)
196 continue;
197 if(best == nil
198 || s->npages < best->npages
199 || (s->npages == best->npages && s->start < best->start))
200 best = s;
202 return best;
205 // Try to add at least npage pages of memory to the heap,
206 // returning whether it worked.
207 static bool
208 MHeap_Grow(MHeap *h, uintptr npage)
210 uintptr ask;
211 void *v;
212 MSpan *s;
213 PageID p;
215 // Ask for a big chunk, to reduce the number of mappings
216 // the operating system needs to track; also amortizes
217 // the overhead of an operating system mapping.
218 // Allocate a multiple of 64kB (16 pages).
219 npage = (npage+15)&~15;
220 ask = npage<<PageShift;
221 if(ask < HeapAllocChunk)
222 ask = HeapAllocChunk;
224 v = runtime_MHeap_SysAlloc(h, ask);
225 if(v == nil) {
226 if(ask > (npage<<PageShift)) {
227 ask = npage<<PageShift;
228 v = runtime_MHeap_SysAlloc(h, ask);
230 if(v == nil) {
231 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
232 return false;
235 mstats.heap_sys += ask;
237 // Create a fake "in use" span and free it, so that the
238 // right coalescing happens.
239 s = runtime_FixAlloc_Alloc(&h->spanalloc);
240 mstats.mspan_inuse = h->spanalloc.inuse;
241 mstats.mspan_sys = h->spanalloc.sys;
242 runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
243 p = s->start;
244 if(sizeof(void*) == 8)
245 p -= ((uintptr)h->arena_start>>PageShift);
246 h->map[p] = s;
247 h->map[p + s->npages - 1] = s;
248 s->state = MSpanInUse;
249 MHeap_FreeLocked(h, s);
250 return true;
253 // Look up the span at the given address.
254 // Address is guaranteed to be in map
255 // and is guaranteed to be start or end of span.
256 MSpan*
257 runtime_MHeap_Lookup(MHeap *h, void *v)
259 uintptr p;
261 p = (uintptr)v;
262 if(sizeof(void*) == 8)
263 p -= (uintptr)h->arena_start;
264 return h->map[p >> PageShift];
267 // Look up the span at the given address.
268 // Address is *not* guaranteed to be in map
269 // and may be anywhere in the span.
270 // Map entries for the middle of a span are only
271 // valid for allocated spans. Free spans may have
272 // other garbage in their middles, so we have to
273 // check for that.
274 MSpan*
275 runtime_MHeap_LookupMaybe(MHeap *h, void *v)
277 MSpan *s;
278 PageID p, q;
280 if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
281 return nil;
282 p = (uintptr)v>>PageShift;
283 q = p;
284 if(sizeof(void*) == 8)
285 q -= (uintptr)h->arena_start >> PageShift;
286 s = h->map[q];
287 if(s == nil || p < s->start || p - s->start >= s->npages)
288 return nil;
289 if(s->state != MSpanInUse)
290 return nil;
291 return s;
294 // Free the span back into the heap.
295 void
296 runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
298 runtime_lock(h);
299 runtime_purgecachedstats(runtime_m()->mcache);
300 mstats.heap_inuse -= s->npages<<PageShift;
301 if(acct) {
302 mstats.heap_alloc -= s->npages<<PageShift;
303 mstats.heap_objects--;
305 MHeap_FreeLocked(h, s);
306 runtime_unlock(h);
309 static void
310 MHeap_FreeLocked(MHeap *h, MSpan *s)
312 uintptr *sp, *tp;
313 MSpan *t;
314 PageID p;
316 if(s->types.sysalloc)
317 runtime_settype_sysfree(s);
318 s->types.compression = MTypes_Empty;
320 if(s->state != MSpanInUse || s->ref != 0) {
321 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
322 runtime_throw("MHeap_FreeLocked - invalid free");
324 mstats.heap_idle += s->npages<<PageShift;
325 s->state = MSpanFree;
326 runtime_MSpanList_Remove(s);
327 sp = (uintptr*)(s->start<<PageShift);
328 // Stamp newly unused spans. The scavenger will use that
329 // info to potentially give back some pages to the OS.
330 s->unusedsince = runtime_nanotime();
331 s->npreleased = 0;
333 // Coalesce with earlier, later spans.
334 p = s->start;
335 if(sizeof(void*) == 8)
336 p -= (uintptr)h->arena_start >> PageShift;
337 if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
338 tp = (uintptr*)(t->start<<PageShift);
339 *tp |= *sp; // propagate "needs zeroing" mark
340 s->start = t->start;
341 s->npages += t->npages;
342 s->npreleased = t->npreleased; // absorb released pages
343 p -= t->npages;
344 h->map[p] = s;
345 runtime_MSpanList_Remove(t);
346 t->state = MSpanDead;
347 runtime_FixAlloc_Free(&h->spanalloc, t);
348 mstats.mspan_inuse = h->spanalloc.inuse;
349 mstats.mspan_sys = h->spanalloc.sys;
351 if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
352 tp = (uintptr*)(t->start<<PageShift);
353 *sp |= *tp; // propagate "needs zeroing" mark
354 s->npages += t->npages;
355 s->npreleased += t->npreleased;
356 h->map[p + s->npages - 1] = s;
357 runtime_MSpanList_Remove(t);
358 t->state = MSpanDead;
359 runtime_FixAlloc_Free(&h->spanalloc, t);
360 mstats.mspan_inuse = h->spanalloc.inuse;
361 mstats.mspan_sys = h->spanalloc.sys;
364 // Insert s into appropriate list.
365 if(s->npages < nelem(h->free))
366 runtime_MSpanList_Insert(&h->free[s->npages], s);
367 else
368 runtime_MSpanList_Insert(&h->large, s);
371 static void
372 forcegchelper(void *vnote)
374 Note *note = (Note*)vnote;
376 runtime_gc(1);
377 runtime_notewakeup(note);
380 static uintptr
381 scavengelist(MSpan *list, uint64 now, uint64 limit)
383 uintptr released, sumreleased;
384 MSpan *s;
386 if(runtime_MSpanList_IsEmpty(list))
387 return 0;
389 sumreleased = 0;
390 for(s=list->next; s != list; s=s->next) {
391 if((now - s->unusedsince) > limit) {
392 released = (s->npages - s->npreleased) << PageShift;
393 mstats.heap_released += released;
394 sumreleased += released;
395 s->npreleased = s->npages;
396 runtime_SysUnused((void*)(s->start << PageShift), s->npages << PageShift);
399 return sumreleased;
402 static uintptr
403 scavenge(uint64 now, uint64 limit)
405 uint32 i;
406 uintptr sumreleased;
407 MHeap *h;
409 h = runtime_mheap;
410 sumreleased = 0;
411 for(i=0; i < nelem(h->free); i++)
412 sumreleased += scavengelist(&h->free[i], now, limit);
413 sumreleased += scavengelist(&h->large, now, limit);
414 return sumreleased;
417 // Release (part of) unused memory to OS.
418 // Goroutine created at startup.
419 // Loop forever.
420 void
421 runtime_MHeap_Scavenger(void* dummy)
423 G *g;
424 MHeap *h;
425 uint64 tick, now, forcegc, limit;
426 uint32 k;
427 uintptr sumreleased;
428 const byte *env;
429 bool trace;
430 Note note, *notep;
432 USED(dummy);
434 g = runtime_g();
435 g->issystem = true;
436 g->isbackground = true;
438 // If we go two minutes without a garbage collection, force one to run.
439 forcegc = 2*60*1e9;
440 // If a span goes unused for 5 minutes after a garbage collection,
441 // we hand it back to the operating system.
442 limit = 5*60*1e9;
443 // Make wake-up period small enough for the sampling to be correct.
444 if(forcegc < limit)
445 tick = forcegc/2;
446 else
447 tick = limit/2;
449 trace = false;
450 env = runtime_getenv("GOGCTRACE");
451 if(env != nil)
452 trace = runtime_atoi(env) > 0;
454 h = runtime_mheap;
455 for(k=0;; k++) {
456 runtime_noteclear(&note);
457 runtime_entersyscallblock();
458 runtime_notetsleep(&note, tick);
459 runtime_exitsyscall();
461 runtime_lock(h);
462 now = runtime_nanotime();
463 if(now - mstats.last_gc > forcegc) {
464 runtime_unlock(h);
465 // The scavenger can not block other goroutines,
466 // otherwise deadlock detector can fire spuriously.
467 // GC blocks other goroutines via the runtime_worldsema.
468 runtime_noteclear(&note);
469 notep = &note;
470 __go_go(forcegchelper, (void*)notep);
471 runtime_entersyscallblock();
472 runtime_notesleep(&note);
473 runtime_exitsyscall();
474 if(trace)
475 runtime_printf("scvg%d: GC forced\n", k);
476 runtime_lock(h);
477 now = runtime_nanotime();
479 sumreleased = scavenge(now, limit);
480 runtime_unlock(h);
482 if(trace) {
483 if(sumreleased > 0)
484 runtime_printf("scvg%d: %p MB released\n", k, sumreleased>>20);
485 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
486 k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
487 mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
492 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
494 void
495 runtime_debug_freeOSMemory(void)
497 runtime_gc(1);
498 runtime_lock(runtime_mheap);
499 scavenge(~(uintptr)0, 0);
500 runtime_unlock(runtime_mheap);
503 // Initialize a new span with the given start and npages.
504 void
505 runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
507 span->next = nil;
508 span->prev = nil;
509 span->start = start;
510 span->npages = npages;
511 span->freelist = nil;
512 span->ref = 0;
513 span->sizeclass = 0;
514 span->elemsize = 0;
515 span->state = 0;
516 span->unusedsince = 0;
517 span->npreleased = 0;
518 span->types.compression = MTypes_Empty;
521 // Initialize an empty doubly-linked list.
522 void
523 runtime_MSpanList_Init(MSpan *list)
525 list->state = MSpanListHead;
526 list->next = list;
527 list->prev = list;
530 void
531 runtime_MSpanList_Remove(MSpan *span)
533 if(span->prev == nil && span->next == nil)
534 return;
535 span->prev->next = span->next;
536 span->next->prev = span->prev;
537 span->prev = nil;
538 span->next = nil;
541 bool
542 runtime_MSpanList_IsEmpty(MSpan *list)
544 return list->next == list;
547 void
548 runtime_MSpanList_Insert(MSpan *list, MSpan *span)
550 if(span->next != nil || span->prev != nil) {
551 runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
552 runtime_throw("MSpanList_Insert");
554 span->next = list->next;
555 span->prev = list;
556 span->next->prev = span;
557 span->prev->next = span;