1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.h for overview.
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
19 static MSpan
*MHeap_AllocLocked(MHeap
*, uintptr
, int32
);
20 static bool MHeap_Grow(MHeap
*, uintptr
);
21 static void MHeap_FreeLocked(MHeap
*, MSpan
*);
22 static MSpan
*MHeap_AllocLarge(MHeap
*, uintptr
);
23 static MSpan
*BestFit(MSpan
*, uintptr
, MSpan
*);
26 RecordSpan(void *vh
, byte
*p
)
33 s
->allnext
= h
->allspans
;
37 // Initialize the heap; fetch memory using alloc.
39 runtime_MHeap_Init(MHeap
*h
, void *(*alloc
)(uintptr
))
43 runtime_FixAlloc_Init(&h
->spanalloc
, sizeof(MSpan
), alloc
, RecordSpan
, h
);
44 runtime_FixAlloc_Init(&h
->cachealloc
, sizeof(MCache
), alloc
, nil
, nil
);
45 // h->mapcache needs no init
46 for(i
=0; i
<nelem(h
->free
); i
++)
47 runtime_MSpanList_Init(&h
->free
[i
]);
48 runtime_MSpanList_Init(&h
->large
);
49 for(i
=0; i
<nelem(h
->central
); i
++)
50 runtime_MCentral_Init(&h
->central
[i
], i
);
53 // Allocate a new span of npage pages from the heap
54 // and record its size class in the HeapMap and HeapMapCache.
56 runtime_MHeap_Alloc(MHeap
*h
, uintptr npage
, int32 sizeclass
, int32 acct
)
61 runtime_purgecachedstats(runtime_m());
62 s
= MHeap_AllocLocked(h
, npage
, sizeclass
);
64 mstats
.heap_inuse
+= npage
<<PageShift
;
66 mstats
.heap_objects
++;
67 mstats
.heap_alloc
+= npage
<<PageShift
;
75 MHeap_AllocLocked(MHeap
*h
, uintptr npage
, int32 sizeclass
)
81 // Try in fixed-size lists up to max.
82 for(n
=npage
; n
< nelem(h
->free
); n
++) {
83 if(!runtime_MSpanList_IsEmpty(&h
->free
[n
])) {
89 // Best fit in list of large spans.
90 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
) {
91 if(!MHeap_Grow(h
, npage
))
93 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
)
99 if(s
->state
!= MSpanFree
)
100 runtime_throw("MHeap_AllocLocked - MSpan not free");
101 if(s
->npages
< npage
)
102 runtime_throw("MHeap_AllocLocked - bad npages");
103 runtime_MSpanList_Remove(s
);
104 s
->state
= MSpanInUse
;
105 mstats
.heap_idle
-= s
->npages
<<PageShift
;
107 if(s
->npages
> npage
) {
108 // Trim extra and put it back in the heap.
109 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
110 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
111 mstats
.mspan_sys
= h
->spanalloc
.sys
;
112 runtime_MSpan_Init(t
, s
->start
+ npage
, s
->npages
- npage
);
115 if(sizeof(void*) == 8)
116 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
120 h
->map
[p
+t
->npages
-1] = t
;
121 *(uintptr
*)(t
->start
<<PageShift
) = *(uintptr
*)(s
->start
<<PageShift
); // copy "needs zeroing" mark
122 t
->state
= MSpanInUse
;
123 MHeap_FreeLocked(h
, t
);
126 if(*(uintptr
*)(s
->start
<<PageShift
) != 0)
127 runtime_memclr((byte
*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
129 // Record span info, because gc needs to be
130 // able to map interior pointer to containing span.
131 s
->sizeclass
= sizeclass
;
133 if(sizeof(void*) == 8)
134 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
135 for(n
=0; n
<npage
; n
++)
140 // Allocate a span of exactly npage pages from the list of large spans.
142 MHeap_AllocLarge(MHeap
*h
, uintptr npage
)
144 return BestFit(&h
->large
, npage
, nil
);
147 // Search list for smallest span with >= npage pages.
148 // If there are multiple smallest spans, take the one
149 // with the earliest starting address.
151 BestFit(MSpan
*list
, uintptr npage
, MSpan
*best
)
155 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
156 if(s
->npages
< npage
)
159 || s
->npages
< best
->npages
160 || (s
->npages
== best
->npages
&& s
->start
< best
->start
))
166 // Try to add at least npage pages of memory to the heap,
167 // returning whether it worked.
169 MHeap_Grow(MHeap
*h
, uintptr npage
)
176 // Ask for a big chunk, to reduce the number of mappings
177 // the operating system needs to track; also amortizes
178 // the overhead of an operating system mapping.
179 // Allocate a multiple of 64kB (16 pages).
180 npage
= (npage
+15)&~15;
181 ask
= npage
<<PageShift
;
182 if(ask
< HeapAllocChunk
)
183 ask
= HeapAllocChunk
;
185 v
= runtime_MHeap_SysAlloc(h
, ask
);
187 if(ask
> (npage
<<PageShift
)) {
188 ask
= npage
<<PageShift
;
189 v
= runtime_MHeap_SysAlloc(h
, ask
);
192 runtime_printf("runtime: out of memory: cannot allocate %llu-byte block (%llu in use)\n", (unsigned long long)ask
, (unsigned long long)mstats
.heap_sys
);
196 mstats
.heap_sys
+= ask
;
198 // Create a fake "in use" span and free it, so that the
199 // right coalescing happens.
200 s
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
201 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
202 mstats
.mspan_sys
= h
->spanalloc
.sys
;
203 runtime_MSpan_Init(s
, (uintptr
)v
>>PageShift
, ask
>>PageShift
);
205 if(sizeof(void*) == 8)
206 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
208 h
->map
[p
+ s
->npages
- 1] = s
;
209 s
->state
= MSpanInUse
;
210 MHeap_FreeLocked(h
, s
);
214 // Look up the span at the given address.
215 // Address is guaranteed to be in map
216 // and is guaranteed to be start or end of span.
218 runtime_MHeap_Lookup(MHeap
*h
, void *v
)
223 if(sizeof(void*) == 8)
224 p
-= (uintptr
)h
->arena_start
;
225 return h
->map
[p
>> PageShift
];
228 // Look up the span at the given address.
229 // Address is *not* guaranteed to be in map
230 // and may be anywhere in the span.
231 // Map entries for the middle of a span are only
232 // valid for allocated spans. Free spans may have
233 // other garbage in their middles, so we have to
236 runtime_MHeap_LookupMaybe(MHeap
*h
, void *v
)
241 if((byte
*)v
< h
->arena_start
|| (byte
*)v
>= h
->arena_used
)
243 p
= (uintptr
)v
>>PageShift
;
245 if(sizeof(void*) == 8)
246 q
-= (uintptr
)h
->arena_start
>> PageShift
;
248 if(s
== nil
|| p
< s
->start
|| p
- s
->start
>= s
->npages
)
250 if(s
->state
!= MSpanInUse
)
255 // Free the span back into the heap.
257 runtime_MHeap_Free(MHeap
*h
, MSpan
*s
, int32 acct
)
260 runtime_purgecachedstats(runtime_m());
261 mstats
.heap_inuse
-= s
->npages
<<PageShift
;
263 mstats
.heap_alloc
-= s
->npages
<<PageShift
;
264 mstats
.heap_objects
--;
266 MHeap_FreeLocked(h
, s
);
271 MHeap_FreeLocked(MHeap
*h
, MSpan
*s
)
277 if(s
->state
!= MSpanInUse
|| s
->ref
!= 0) {
278 // runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
279 runtime_throw("MHeap_FreeLocked - invalid free");
281 mstats
.heap_idle
+= s
->npages
<<PageShift
;
282 s
->state
= MSpanFree
;
283 runtime_MSpanList_Remove(s
);
284 sp
= (uintptr
*)(s
->start
<<PageShift
);
286 // Coalesce with earlier, later spans.
288 if(sizeof(void*) == 8)
289 p
-= (uintptr
)h
->arena_start
>> PageShift
;
290 if(p
> 0 && (t
= h
->map
[p
-1]) != nil
&& t
->state
!= MSpanInUse
) {
291 tp
= (uintptr
*)(t
->start
<<PageShift
);
292 *tp
|= *sp
; // propagate "needs zeroing" mark
294 s
->npages
+= t
->npages
;
297 runtime_MSpanList_Remove(t
);
298 t
->state
= MSpanDead
;
299 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
300 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
301 mstats
.mspan_sys
= h
->spanalloc
.sys
;
303 if(p
+s
->npages
< nelem(h
->map
) && (t
= h
->map
[p
+s
->npages
]) != nil
&& t
->state
!= MSpanInUse
) {
304 tp
= (uintptr
*)(t
->start
<<PageShift
);
305 *sp
|= *tp
; // propagate "needs zeroing" mark
306 s
->npages
+= t
->npages
;
307 h
->map
[p
+ s
->npages
- 1] = s
;
308 runtime_MSpanList_Remove(t
);
309 t
->state
= MSpanDead
;
310 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
311 mstats
.mspan_inuse
= h
->spanalloc
.inuse
;
312 mstats
.mspan_sys
= h
->spanalloc
.sys
;
315 // Insert s into appropriate list.
316 if(s
->npages
< nelem(h
->free
))
317 runtime_MSpanList_Insert(&h
->free
[s
->npages
], s
);
319 runtime_MSpanList_Insert(&h
->large
, s
);
321 // TODO(rsc): IncrementalScavenge() to return memory to OS.
324 // Initialize a new span with the given start and npages.
326 runtime_MSpan_Init(MSpan
*span
, PageID start
, uintptr npages
)
331 span
->npages
= npages
;
332 span
->freelist
= nil
;
338 // Initialize an empty doubly-linked list.
340 runtime_MSpanList_Init(MSpan
*list
)
342 list
->state
= MSpanListHead
;
348 runtime_MSpanList_Remove(MSpan
*span
)
350 if(span
->prev
== nil
&& span
->next
== nil
)
352 span
->prev
->next
= span
->next
;
353 span
->next
->prev
= span
->prev
;
359 runtime_MSpanList_IsEmpty(MSpan
*list
)
361 return list
->next
== list
;
365 runtime_MSpanList_Insert(MSpan
*list
, MSpan
*span
)
367 if(span
->next
!= nil
|| span
->prev
!= nil
) {
368 // runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
369 runtime_throw("MSpanList_Insert");
371 span
->next
= list
->next
;
373 span
->next
->prev
= span
;
374 span
->prev
->next
= span
;