1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.h for overview.
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
19 static MSpan
*MHeap_AllocLocked(MHeap
*, uintptr
, int32
);
20 static bool MHeap_Grow(MHeap
*, uintptr
);
21 static void MHeap_FreeLocked(MHeap
*, MSpan
*);
22 static MSpan
*MHeap_AllocLarge(MHeap
*, uintptr
);
23 static MSpan
*BestFit(MSpan
*, uintptr
, MSpan
*);
26 RecordSpan(void *vh
, byte
*p
)
35 if(h
->nspan
>= h
->nspancap
) {
36 cap
= 64*1024/sizeof(all
[0]);
37 if(cap
< h
->nspancap
*3/2)
38 cap
= h
->nspancap
*3/2;
39 all
= (MSpan
**)runtime_SysAlloc(cap
*sizeof(all
[0]), &mstats
.other_sys
);
41 runtime_throw("runtime: cannot allocate memory");
43 runtime_memmove(all
, h
->allspans
, h
->nspancap
*sizeof(all
[0]));
44 runtime_SysFree(h
->allspans
, h
->nspancap
*sizeof(all
[0]), &mstats
.other_sys
);
49 h
->allspans
[h
->nspan
++] = s
;
52 // Initialize the heap; fetch memory using alloc.
54 runtime_MHeap_Init(MHeap
*h
)
58 runtime_FixAlloc_Init(&h
->spanalloc
, sizeof(MSpan
), RecordSpan
, h
, &mstats
.mspan_sys
);
59 runtime_FixAlloc_Init(&h
->cachealloc
, sizeof(MCache
), nil
, nil
, &mstats
.mcache_sys
);
60 // h->mapcache needs no init
61 for(i
=0; i
<nelem(h
->free
); i
++)
62 runtime_MSpanList_Init(&h
->free
[i
]);
63 runtime_MSpanList_Init(&h
->large
);
64 for(i
=0; i
<nelem(h
->central
); i
++)
65 runtime_MCentral_Init(&h
->central
[i
], i
);
69 runtime_MHeap_MapSpans(MHeap
*h
)
74 // Map spans array, PageSize at a time.
75 n
= (uintptr
)h
->arena_used
;
76 n
-= (uintptr
)h
->arena_start
;
77 n
= n
/ PageSize
* sizeof(h
->spans
[0]);
78 n
= ROUND(n
, PageSize
);
79 pagesize
= getpagesize();
80 n
= ROUND(n
, pagesize
);
81 if(h
->spans_mapped
>= n
)
83 runtime_SysMap((byte
*)h
->spans
+ h
->spans_mapped
, n
- h
->spans_mapped
, &mstats
.other_sys
);
87 // Allocate a new span of npage pages from the heap
88 // and record its size class in the HeapMap and HeapMapCache.
90 runtime_MHeap_Alloc(MHeap
*h
, uintptr npage
, int32 sizeclass
, int32 acct
, int32 zeroed
)
95 mstats
.heap_alloc
+= runtime_m()->mcache
->local_cachealloc
;
96 runtime_m()->mcache
->local_cachealloc
= 0;
97 s
= MHeap_AllocLocked(h
, npage
, sizeclass
);
99 mstats
.heap_inuse
+= npage
<<PageShift
;
101 mstats
.heap_objects
++;
102 mstats
.heap_alloc
+= npage
<<PageShift
;
106 if(s
!= nil
&& *(uintptr
*)(s
->start
<<PageShift
) != 0 && zeroed
)
107 runtime_memclr((byte
*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
112 MHeap_AllocLocked(MHeap
*h
, uintptr npage
, int32 sizeclass
)
118 // Try in fixed-size lists up to max.
119 for(n
=npage
; n
< nelem(h
->free
); n
++) {
120 if(!runtime_MSpanList_IsEmpty(&h
->free
[n
])) {
126 // Best fit in list of large spans.
127 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
) {
128 if(!MHeap_Grow(h
, npage
))
130 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
)
136 if(s
->state
!= MSpanFree
)
137 runtime_throw("MHeap_AllocLocked - MSpan not free");
138 if(s
->npages
< npage
)
139 runtime_throw("MHeap_AllocLocked - bad npages");
140 runtime_MSpanList_Remove(s
);
141 s
->state
= MSpanInUse
;
142 mstats
.heap_idle
-= s
->npages
<<PageShift
;
143 mstats
.heap_released
-= s
->npreleased
<<PageShift
;
144 if(s
->npreleased
> 0) {
145 // We have called runtime_SysUnused with these pages, and on
146 // Unix systems it called madvise. At this point at least
147 // some BSD-based kernels will return these pages either as
148 // zeros or with the old data. For our caller, the first word
149 // in the page indicates whether the span contains zeros or
150 // not (this word was set when the span was freed by
151 // MCentral_Free or runtime_MCentral_FreeSpan). If the first
152 // page in the span is returned as zeros, and some subsequent
153 // page is returned with the old data, then we will be
154 // returning a span that is assumed to be all zeros, but the
155 // actual data will not be all zeros. Avoid that problem by
156 // explicitly marking the span as not being zeroed, just in
157 // case. The beadbead constant we use here means nothing, it
158 // is just a unique constant not seen elsewhere in the
159 // runtime, as a clue in case it turns up unexpectedly in
160 // memory or in a stack trace.
161 runtime_SysUsed((void*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
162 *(uintptr
*)(s
->start
<<PageShift
) = (uintptr
)0xbeadbeadbeadbeadULL
;
166 if(s
->npages
> npage
) {
167 // Trim extra and put it back in the heap.
168 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
169 runtime_MSpan_Init(t
, s
->start
+ npage
, s
->npages
- npage
);
172 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
176 h
->spans
[p
+t
->npages
-1] = t
;
177 *(uintptr
*)(t
->start
<<PageShift
) = *(uintptr
*)(s
->start
<<PageShift
); // copy "needs zeroing" mark
178 t
->state
= MSpanInUse
;
179 MHeap_FreeLocked(h
, t
);
180 t
->unusedsince
= s
->unusedsince
; // preserve age
184 // Record span info, because gc needs to be
185 // able to map interior pointer to containing span.
186 s
->sizeclass
= sizeclass
;
187 s
->elemsize
= (sizeclass
==0 ? s
->npages
<<PageShift
: (uintptr
)runtime_class_to_size
[sizeclass
]);
188 s
->types
.compression
= MTypes_Empty
;
190 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
191 for(n
=0; n
<npage
; n
++)
196 // Allocate a span of exactly npage pages from the list of large spans.
198 MHeap_AllocLarge(MHeap
*h
, uintptr npage
)
200 return BestFit(&h
->large
, npage
, nil
);
203 // Search list for smallest span with >= npage pages.
204 // If there are multiple smallest spans, take the one
205 // with the earliest starting address.
207 BestFit(MSpan
*list
, uintptr npage
, MSpan
*best
)
211 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
212 if(s
->npages
< npage
)
215 || s
->npages
< best
->npages
216 || (s
->npages
== best
->npages
&& s
->start
< best
->start
))
222 // Try to add at least npage pages of memory to the heap,
223 // returning whether it worked.
225 MHeap_Grow(MHeap
*h
, uintptr npage
)
232 // Ask for a big chunk, to reduce the number of mappings
233 // the operating system needs to track; also amortizes
234 // the overhead of an operating system mapping.
235 // Allocate a multiple of 64kB (16 pages).
236 npage
= (npage
+15)&~15;
237 ask
= npage
<<PageShift
;
238 if(ask
< HeapAllocChunk
)
239 ask
= HeapAllocChunk
;
241 v
= runtime_MHeap_SysAlloc(h
, ask
);
243 if(ask
> (npage
<<PageShift
)) {
244 ask
= npage
<<PageShift
;
245 v
= runtime_MHeap_SysAlloc(h
, ask
);
248 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64
)ask
, mstats
.heap_sys
);
253 // Create a fake "in use" span and free it, so that the
254 // right coalescing happens.
255 s
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
256 runtime_MSpan_Init(s
, (uintptr
)v
>>PageShift
, ask
>>PageShift
);
258 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
260 h
->spans
[p
+ s
->npages
- 1] = s
;
261 s
->state
= MSpanInUse
;
262 MHeap_FreeLocked(h
, s
);
266 // Look up the span at the given address.
267 // Address is guaranteed to be in map
268 // and is guaranteed to be start or end of span.
270 runtime_MHeap_Lookup(MHeap
*h
, void *v
)
275 p
-= (uintptr
)h
->arena_start
;
276 return h
->spans
[p
>> PageShift
];
279 // Look up the span at the given address.
280 // Address is *not* guaranteed to be in map
281 // and may be anywhere in the span.
282 // Map entries for the middle of a span are only
283 // valid for allocated spans. Free spans may have
284 // other garbage in their middles, so we have to
287 runtime_MHeap_LookupMaybe(MHeap
*h
, void *v
)
292 if((byte
*)v
< h
->arena_start
|| (byte
*)v
>= h
->arena_used
)
294 p
= (uintptr
)v
>>PageShift
;
296 q
-= (uintptr
)h
->arena_start
>> PageShift
;
298 if(s
== nil
|| p
< s
->start
|| (byte
*)v
>= s
->limit
|| s
->state
!= MSpanInUse
)
303 // Free the span back into the heap.
305 runtime_MHeap_Free(MHeap
*h
, MSpan
*s
, int32 acct
)
308 mstats
.heap_alloc
+= runtime_m()->mcache
->local_cachealloc
;
309 runtime_m()->mcache
->local_cachealloc
= 0;
310 mstats
.heap_inuse
-= s
->npages
<<PageShift
;
312 mstats
.heap_alloc
-= s
->npages
<<PageShift
;
313 mstats
.heap_objects
--;
315 MHeap_FreeLocked(h
, s
);
320 MHeap_FreeLocked(MHeap
*h
, MSpan
*s
)
326 s
->types
.compression
= MTypes_Empty
;
328 if(s
->state
!= MSpanInUse
|| s
->ref
!= 0) {
329 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s
, s
->start
<<PageShift
, s
->state
, s
->ref
);
330 runtime_throw("MHeap_FreeLocked - invalid free");
332 mstats
.heap_idle
+= s
->npages
<<PageShift
;
333 s
->state
= MSpanFree
;
334 runtime_MSpanList_Remove(s
);
335 sp
= (uintptr
*)(s
->start
<<PageShift
);
336 // Stamp newly unused spans. The scavenger will use that
337 // info to potentially give back some pages to the OS.
338 s
->unusedsince
= runtime_nanotime();
341 // Coalesce with earlier, later spans.
343 p
-= (uintptr
)h
->arena_start
>> PageShift
;
344 if(p
> 0 && (t
= h
->spans
[p
-1]) != nil
&& t
->state
!= MSpanInUse
) {
345 if(t
->npreleased
== 0) { // cant't touch this otherwise
346 tp
= (uintptr
*)(t
->start
<<PageShift
);
347 *tp
|= *sp
; // propagate "needs zeroing" mark
350 s
->npages
+= t
->npages
;
351 s
->npreleased
= t
->npreleased
; // absorb released pages
354 runtime_MSpanList_Remove(t
);
355 t
->state
= MSpanDead
;
356 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
358 if((p
+s
->npages
)*sizeof(h
->spans
[0]) < h
->spans_mapped
&& (t
= h
->spans
[p
+s
->npages
]) != nil
&& t
->state
!= MSpanInUse
) {
359 if(t
->npreleased
== 0) { // cant't touch this otherwise
360 tp
= (uintptr
*)(t
->start
<<PageShift
);
361 *sp
|= *tp
; // propagate "needs zeroing" mark
363 s
->npages
+= t
->npages
;
364 s
->npreleased
+= t
->npreleased
;
365 h
->spans
[p
+ s
->npages
- 1] = s
;
366 runtime_MSpanList_Remove(t
);
367 t
->state
= MSpanDead
;
368 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
371 // Insert s into appropriate list.
372 if(s
->npages
< nelem(h
->free
))
373 runtime_MSpanList_Insert(&h
->free
[s
->npages
], s
);
375 runtime_MSpanList_Insert(&h
->large
, s
);
379 forcegchelper(void *vnote
)
381 Note
*note
= (Note
*)vnote
;
384 runtime_notewakeup(note
);
388 scavengelist(MSpan
*list
, uint64 now
, uint64 limit
)
390 uintptr released
, sumreleased
, start
, end
, pagesize
;
393 if(runtime_MSpanList_IsEmpty(list
))
397 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
398 if((now
- s
->unusedsince
) > limit
&& s
->npreleased
!= s
->npages
) {
399 released
= (s
->npages
- s
->npreleased
) << PageShift
;
400 mstats
.heap_released
+= released
;
401 sumreleased
+= released
;
402 s
->npreleased
= s
->npages
;
404 start
= s
->start
<< PageShift
;
405 end
= start
+ (s
->npages
<< PageShift
);
407 // Round start up and end down to ensure we
408 // are acting on entire pages.
409 pagesize
= getpagesize();
410 start
= ROUND(start
, pagesize
);
411 end
&= ~(pagesize
- 1);
413 runtime_SysUnused((void*)start
, end
- start
);
420 scavenge(int32 k
, uint64 now
, uint64 limit
)
428 for(i
=0; i
< nelem(h
->free
); i
++)
429 sumreleased
+= scavengelist(&h
->free
[i
], now
, limit
);
430 sumreleased
+= scavengelist(&h
->large
, now
, limit
);
432 if(runtime_debug
.gctrace
> 0) {
434 runtime_printf("scvg%d: %D MB released\n", k
, (uint64
)sumreleased
>>20);
435 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
436 k
, mstats
.heap_inuse
>>20, mstats
.heap_idle
>>20, mstats
.heap_sys
>>20,
437 mstats
.heap_released
>>20, (mstats
.heap_sys
- mstats
.heap_released
)>>20);
441 // Release (part of) unused memory to OS.
442 // Goroutine created at startup.
445 runtime_MHeap_Scavenger(void* dummy
)
449 uint64 tick
, now
, forcegc
, limit
;
457 g
->isbackground
= true;
459 // If we go two minutes without a garbage collection, force one to run.
461 // If a span goes unused for 5 minutes after a garbage collection,
462 // we hand it back to the operating system.
464 // Make wake-up period small enough for the sampling to be correct.
472 runtime_noteclear(¬e
);
473 runtime_notetsleepg(¬e
, tick
);
476 now
= runtime_nanotime();
477 if(now
- mstats
.last_gc
> forcegc
) {
479 // The scavenger can not block other goroutines,
480 // otherwise deadlock detector can fire spuriously.
481 // GC blocks other goroutines via the runtime_worldsema.
482 runtime_noteclear(¬e
);
484 __go_go(forcegchelper
, (void*)notep
);
485 runtime_notetsleepg(¬e
, -1);
486 if(runtime_debug
.gctrace
> 0)
487 runtime_printf("scvg%d: GC forced\n", k
);
489 now
= runtime_nanotime();
491 scavenge(k
, now
, limit
);
496 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
499 runtime_debug_freeOSMemory(void)
502 runtime_lock(&runtime_mheap
);
503 scavenge(-1, ~(uintptr
)0, 0);
504 runtime_unlock(&runtime_mheap
);
507 // Initialize a new span with the given start and npages.
509 runtime_MSpan_Init(MSpan
*span
, PageID start
, uintptr npages
)
514 span
->npages
= npages
;
515 span
->freelist
= nil
;
520 span
->unusedsince
= 0;
521 span
->npreleased
= 0;
522 span
->types
.compression
= MTypes_Empty
;
525 // Initialize an empty doubly-linked list.
527 runtime_MSpanList_Init(MSpan
*list
)
529 list
->state
= MSpanListHead
;
535 runtime_MSpanList_Remove(MSpan
*span
)
537 if(span
->prev
== nil
&& span
->next
== nil
)
539 span
->prev
->next
= span
->next
;
540 span
->next
->prev
= span
->prev
;
546 runtime_MSpanList_IsEmpty(MSpan
*list
)
548 return list
->next
== list
;
552 runtime_MSpanList_Insert(MSpan
*list
, MSpan
*span
)
554 if(span
->next
!= nil
|| span
->prev
!= nil
) {
555 runtime_printf("failed MSpanList_Insert %p %p %p\n", span
, span
->next
, span
->prev
);
556 runtime_throw("MSpanList_Insert");
558 span
->next
= list
->next
;
560 span
->next
->prev
= span
;
561 span
->prev
->next
= span
;