1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.h for overview.
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
19 static MSpan
*MHeap_AllocLocked(MHeap
*, uintptr
, int32
);
20 static bool MHeap_Grow(MHeap
*, uintptr
);
21 static void MHeap_FreeLocked(MHeap
*, MSpan
*);
22 static MSpan
*MHeap_AllocLarge(MHeap
*, uintptr
);
23 static MSpan
*BestFit(MSpan
*, uintptr
, MSpan
*);
26 RecordSpan(void *vh
, byte
*p
)
35 if(h
->nspan
>= h
->nspancap
) {
36 cap
= 64*1024/sizeof(all
[0]);
37 if(cap
< h
->nspancap
*3/2)
38 cap
= h
->nspancap
*3/2;
39 all
= (MSpan
**)runtime_SysAlloc(cap
*sizeof(all
[0]), &mstats
.other_sys
);
41 runtime_throw("runtime: cannot allocate memory");
43 runtime_memmove(all
, h
->allspans
, h
->nspancap
*sizeof(all
[0]));
44 // Don't free the old array if it's referenced by sweep.
45 // See the comment in mgc0.c.
46 if(h
->allspans
!= runtime_mheap
.sweepspans
)
47 runtime_SysFree(h
->allspans
, h
->nspancap
*sizeof(all
[0]), &mstats
.other_sys
);
52 h
->allspans
[h
->nspan
++] = s
;
55 // Initialize the heap; fetch memory using alloc.
57 runtime_MHeap_Init(MHeap
*h
)
61 runtime_FixAlloc_Init(&h
->spanalloc
, sizeof(MSpan
), RecordSpan
, h
, &mstats
.mspan_sys
);
62 runtime_FixAlloc_Init(&h
->cachealloc
, sizeof(MCache
), nil
, nil
, &mstats
.mcache_sys
);
63 runtime_FixAlloc_Init(&h
->specialfinalizeralloc
, sizeof(SpecialFinalizer
), nil
, nil
, &mstats
.other_sys
);
64 runtime_FixAlloc_Init(&h
->specialprofilealloc
, sizeof(SpecialProfile
), nil
, nil
, &mstats
.other_sys
);
65 // h->mapcache needs no init
66 for(i
=0; i
<nelem(h
->free
); i
++) {
67 runtime_MSpanList_Init(&h
->free
[i
]);
68 runtime_MSpanList_Init(&h
->busy
[i
]);
70 runtime_MSpanList_Init(&h
->freelarge
);
71 runtime_MSpanList_Init(&h
->busylarge
);
72 for(i
=0; i
<nelem(h
->central
); i
++)
73 runtime_MCentral_Init(&h
->central
[i
], i
);
77 runtime_MHeap_MapSpans(MHeap
*h
)
82 // Map spans array, PageSize at a time.
83 n
= (uintptr
)h
->arena_used
;
84 n
-= (uintptr
)h
->arena_start
;
85 n
= n
/ PageSize
* sizeof(h
->spans
[0]);
86 n
= ROUND(n
, PageSize
);
87 pagesize
= getpagesize();
88 n
= ROUND(n
, pagesize
);
89 if(h
->spans_mapped
>= n
)
91 runtime_SysMap((byte
*)h
->spans
+ h
->spans_mapped
, n
- h
->spans_mapped
, h
->arena_reserved
, &mstats
.other_sys
);
95 // Sweeps spans in list until reclaims at least npages into heap.
96 // Returns the actual number of pages reclaimed.
98 MHeap_ReclaimList(MHeap
*h
, MSpan
*list
, uintptr npages
)
105 sg
= runtime_mheap
.sweepgen
;
107 for(s
= list
->next
; s
!= list
; s
= s
->next
) {
108 if(s
->sweepgen
== sg
-2 && runtime_cas(&s
->sweepgen
, sg
-2, sg
-1)) {
109 runtime_MSpanList_Remove(s
);
110 // swept spans are at the end of the list
111 runtime_MSpanList_InsertBack(list
, s
);
113 n
+= runtime_MSpan_Sweep(s
);
117 // the span could have been moved elsewhere
120 if(s
->sweepgen
== sg
-1) {
121 // the span is being sweept by background sweeper, skip
124 // already swept empty span,
125 // all subsequent ones must also be either swept or in process of sweeping
131 // Sweeps and reclaims at least npage pages into heap.
132 // Called before allocating npage pages.
134 MHeap_Reclaim(MHeap
*h
, uintptr npage
)
136 uintptr reclaimed
, n
;
138 // First try to sweep busy spans with large objects of size >= npage,
139 // this has good chances of reclaiming the necessary space.
140 for(n
=npage
; n
< nelem(h
->busy
); n
++) {
141 if(MHeap_ReclaimList(h
, &h
->busy
[n
], npage
))
145 // Then -- even larger objects.
146 if(MHeap_ReclaimList(h
, &h
->busylarge
, npage
))
149 // Now try smaller objects.
150 // One such object is not enough, so we need to reclaim several of them.
152 for(n
=0; n
< npage
&& n
< nelem(h
->busy
); n
++) {
153 reclaimed
+= MHeap_ReclaimList(h
, &h
->busy
[n
], npage
-reclaimed
);
154 if(reclaimed
>= npage
)
158 // Now sweep everything that is not yet swept.
161 n
= runtime_sweepone();
162 if(n
== (uintptr
)-1) // all spans are swept
165 if(reclaimed
>= npage
)
171 // Allocate a new span of npage pages from the heap
172 // and record its size class in the HeapMap and HeapMapCache.
174 runtime_MHeap_Alloc(MHeap
*h
, uintptr npage
, int32 sizeclass
, bool large
, bool needzero
)
179 mstats
.heap_alloc
+= runtime_m()->mcache
->local_cachealloc
;
180 runtime_m()->mcache
->local_cachealloc
= 0;
181 s
= MHeap_AllocLocked(h
, npage
, sizeclass
);
183 mstats
.heap_inuse
+= npage
<<PageShift
;
185 mstats
.heap_objects
++;
186 mstats
.heap_alloc
+= npage
<<PageShift
;
187 // Swept spans are at the end of lists.
188 if(s
->npages
< nelem(h
->free
))
189 runtime_MSpanList_InsertBack(&h
->busy
[s
->npages
], s
);
191 runtime_MSpanList_InsertBack(&h
->busylarge
, s
);
196 if(needzero
&& s
->needzero
)
197 runtime_memclr((byte
*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
204 MHeap_AllocLocked(MHeap
*h
, uintptr npage
, int32 sizeclass
)
210 // To prevent excessive heap growth, before allocating n pages
211 // we need to sweep and reclaim at least n pages.
213 MHeap_Reclaim(h
, npage
);
215 // Try in fixed-size lists up to max.
216 for(n
=npage
; n
< nelem(h
->free
); n
++) {
217 if(!runtime_MSpanList_IsEmpty(&h
->free
[n
])) {
223 // Best fit in list of large spans.
224 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
) {
225 if(!MHeap_Grow(h
, npage
))
227 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
)
233 if(s
->state
!= MSpanFree
)
234 runtime_throw("MHeap_AllocLocked - MSpan not free");
235 if(s
->npages
< npage
)
236 runtime_throw("MHeap_AllocLocked - bad npages");
237 runtime_MSpanList_Remove(s
);
238 runtime_atomicstore(&s
->sweepgen
, h
->sweepgen
);
239 s
->state
= MSpanInUse
;
240 mstats
.heap_idle
-= s
->npages
<<PageShift
;
241 mstats
.heap_released
-= s
->npreleased
<<PageShift
;
242 if(s
->npreleased
> 0)
243 runtime_SysUsed((void*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
246 if(s
->npages
> npage
) {
247 // Trim extra and put it back in the heap.
248 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
249 runtime_MSpan_Init(t
, s
->start
+ npage
, s
->npages
- npage
);
252 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
256 h
->spans
[p
+t
->npages
-1] = t
;
257 t
->needzero
= s
->needzero
;
258 runtime_atomicstore(&t
->sweepgen
, h
->sweepgen
);
259 t
->state
= MSpanInUse
;
260 MHeap_FreeLocked(h
, t
);
261 t
->unusedsince
= s
->unusedsince
; // preserve age
265 // Record span info, because gc needs to be
266 // able to map interior pointer to containing span.
267 s
->sizeclass
= sizeclass
;
268 s
->elemsize
= (sizeclass
==0 ? s
->npages
<<PageShift
: (uintptr
)runtime_class_to_size
[sizeclass
]);
269 s
->types
.compression
= MTypes_Empty
;
271 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
272 for(n
=0; n
<npage
; n
++)
277 // Allocate a span of exactly npage pages from the list of large spans.
279 MHeap_AllocLarge(MHeap
*h
, uintptr npage
)
281 return BestFit(&h
->freelarge
, npage
, nil
);
284 // Search list for smallest span with >= npage pages.
285 // If there are multiple smallest spans, take the one
286 // with the earliest starting address.
288 BestFit(MSpan
*list
, uintptr npage
, MSpan
*best
)
292 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
293 if(s
->npages
< npage
)
296 || s
->npages
< best
->npages
297 || (s
->npages
== best
->npages
&& s
->start
< best
->start
))
303 // Try to add at least npage pages of memory to the heap,
304 // returning whether it worked.
306 MHeap_Grow(MHeap
*h
, uintptr npage
)
313 // Ask for a big chunk, to reduce the number of mappings
314 // the operating system needs to track; also amortizes
315 // the overhead of an operating system mapping.
316 // Allocate a multiple of 64kB (16 pages).
317 npage
= (npage
+15)&~15;
318 ask
= npage
<<PageShift
;
319 if(ask
< HeapAllocChunk
)
320 ask
= HeapAllocChunk
;
322 v
= runtime_MHeap_SysAlloc(h
, ask
);
324 if(ask
> (npage
<<PageShift
)) {
325 ask
= npage
<<PageShift
;
326 v
= runtime_MHeap_SysAlloc(h
, ask
);
329 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64
)ask
, mstats
.heap_sys
);
334 // Create a fake "in use" span and free it, so that the
335 // right coalescing happens.
336 s
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
337 runtime_MSpan_Init(s
, (uintptr
)v
>>PageShift
, ask
>>PageShift
);
339 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
341 h
->spans
[p
+ s
->npages
- 1] = s
;
342 runtime_atomicstore(&s
->sweepgen
, h
->sweepgen
);
343 s
->state
= MSpanInUse
;
344 MHeap_FreeLocked(h
, s
);
348 // Look up the span at the given address.
349 // Address is guaranteed to be in map
350 // and is guaranteed to be start or end of span.
352 runtime_MHeap_Lookup(MHeap
*h
, void *v
)
357 p
-= (uintptr
)h
->arena_start
;
358 return h
->spans
[p
>> PageShift
];
361 // Look up the span at the given address.
362 // Address is *not* guaranteed to be in map
363 // and may be anywhere in the span.
364 // Map entries for the middle of a span are only
365 // valid for allocated spans. Free spans may have
366 // other garbage in their middles, so we have to
369 runtime_MHeap_LookupMaybe(MHeap
*h
, void *v
)
374 if((byte
*)v
< h
->arena_start
|| (byte
*)v
>= h
->arena_used
)
376 p
= (uintptr
)v
>>PageShift
;
378 q
-= (uintptr
)h
->arena_start
>> PageShift
;
380 if(s
== nil
|| p
< s
->start
|| (byte
*)v
>= s
->limit
|| s
->state
!= MSpanInUse
)
385 // Free the span back into the heap.
387 runtime_MHeap_Free(MHeap
*h
, MSpan
*s
, int32 acct
)
390 mstats
.heap_alloc
+= runtime_m()->mcache
->local_cachealloc
;
391 runtime_m()->mcache
->local_cachealloc
= 0;
392 mstats
.heap_inuse
-= s
->npages
<<PageShift
;
394 mstats
.heap_alloc
-= s
->npages
<<PageShift
;
395 mstats
.heap_objects
--;
397 MHeap_FreeLocked(h
, s
);
402 MHeap_FreeLocked(MHeap
*h
, MSpan
*s
)
407 s
->types
.compression
= MTypes_Empty
;
409 if(s
->state
!= MSpanInUse
|| s
->ref
!= 0 || s
->sweepgen
!= h
->sweepgen
) {
410 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
411 s
, s
->start
<<PageShift
, s
->state
, s
->ref
, s
->sweepgen
, h
->sweepgen
);
412 runtime_throw("MHeap_FreeLocked - invalid free");
414 mstats
.heap_idle
+= s
->npages
<<PageShift
;
415 s
->state
= MSpanFree
;
416 runtime_MSpanList_Remove(s
);
417 // Stamp newly unused spans. The scavenger will use that
418 // info to potentially give back some pages to the OS.
419 s
->unusedsince
= runtime_nanotime();
422 // Coalesce with earlier, later spans.
424 p
-= (uintptr
)h
->arena_start
>> PageShift
;
425 if(p
> 0 && (t
= h
->spans
[p
-1]) != nil
&& t
->state
!= MSpanInUse
) {
427 s
->npages
+= t
->npages
;
428 s
->npreleased
= t
->npreleased
; // absorb released pages
429 s
->needzero
|= t
->needzero
;
432 runtime_MSpanList_Remove(t
);
433 t
->state
= MSpanDead
;
434 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
436 if((p
+s
->npages
)*sizeof(h
->spans
[0]) < h
->spans_mapped
&& (t
= h
->spans
[p
+s
->npages
]) != nil
&& t
->state
!= MSpanInUse
) {
437 s
->npages
+= t
->npages
;
438 s
->npreleased
+= t
->npreleased
;
439 s
->needzero
|= t
->needzero
;
440 h
->spans
[p
+ s
->npages
- 1] = s
;
441 runtime_MSpanList_Remove(t
);
442 t
->state
= MSpanDead
;
443 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
446 // Insert s into appropriate list.
447 if(s
->npages
< nelem(h
->free
))
448 runtime_MSpanList_Insert(&h
->free
[s
->npages
], s
);
450 runtime_MSpanList_Insert(&h
->freelarge
, s
);
454 forcegchelper(void *vnote
)
456 Note
*note
= (Note
*)vnote
;
459 runtime_notewakeup(note
);
463 scavengelist(MSpan
*list
, uint64 now
, uint64 limit
)
465 uintptr released
, sumreleased
, start
, end
, pagesize
;
468 if(runtime_MSpanList_IsEmpty(list
))
472 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
473 if((now
- s
->unusedsince
) > limit
&& s
->npreleased
!= s
->npages
) {
474 released
= (s
->npages
- s
->npreleased
) << PageShift
;
475 mstats
.heap_released
+= released
;
476 sumreleased
+= released
;
477 s
->npreleased
= s
->npages
;
479 start
= s
->start
<< PageShift
;
480 end
= start
+ (s
->npages
<< PageShift
);
482 // Round start up and end down to ensure we
483 // are acting on entire pages.
484 pagesize
= getpagesize();
485 start
= ROUND(start
, pagesize
);
486 end
&= ~(pagesize
- 1);
488 runtime_SysUnused((void*)start
, end
- start
);
495 scavenge(int32 k
, uint64 now
, uint64 limit
)
503 for(i
=0; i
< nelem(h
->free
); i
++)
504 sumreleased
+= scavengelist(&h
->free
[i
], now
, limit
);
505 sumreleased
+= scavengelist(&h
->freelarge
, now
, limit
);
507 if(runtime_debug
.gctrace
> 0) {
509 runtime_printf("scvg%d: %D MB released\n", k
, (uint64
)sumreleased
>>20);
510 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
511 k
, mstats
.heap_inuse
>>20, mstats
.heap_idle
>>20, mstats
.heap_sys
>>20,
512 mstats
.heap_released
>>20, (mstats
.heap_sys
- mstats
.heap_released
)>>20);
516 // Release (part of) unused memory to OS.
517 // Goroutine created at startup.
520 runtime_MHeap_Scavenger(void* dummy
)
524 uint64 tick
, now
, forcegc
, limit
;
533 g
->isbackground
= true;
535 // If we go two minutes without a garbage collection, force one to run.
537 // If a span goes unused for 5 minutes after a garbage collection,
538 // we hand it back to the operating system.
540 // Make wake-up period small enough for the sampling to be correct.
548 runtime_noteclear(¬e
);
549 runtime_notetsleepg(¬e
, tick
);
552 unixnow
= runtime_unixnanotime();
553 if(unixnow
- mstats
.last_gc
> forcegc
) {
555 // The scavenger can not block other goroutines,
556 // otherwise deadlock detector can fire spuriously.
557 // GC blocks other goroutines via the runtime_worldsema.
558 runtime_noteclear(¬e
);
560 __go_go(forcegchelper
, (void*)notep
);
561 runtime_notetsleepg(¬e
, -1);
562 if(runtime_debug
.gctrace
> 0)
563 runtime_printf("scvg%d: GC forced\n", k
);
566 now
= runtime_nanotime();
567 scavenge(k
, now
, limit
);
572 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
575 runtime_debug_freeOSMemory(void)
577 runtime_gc(2); // force GC and do eager sweep
578 runtime_lock(&runtime_mheap
);
579 scavenge(-1, ~(uintptr
)0, 0);
580 runtime_unlock(&runtime_mheap
);
583 // Initialize a new span with the given start and npages.
585 runtime_MSpan_Init(MSpan
*span
, PageID start
, uintptr npages
)
590 span
->npages
= npages
;
591 span
->freelist
= nil
;
594 span
->incache
= false;
596 span
->state
= MSpanDead
;
597 span
->unusedsince
= 0;
598 span
->npreleased
= 0;
599 span
->types
.compression
= MTypes_Empty
;
600 span
->specialLock
.key
= 0;
601 span
->specials
= nil
;
606 // Initialize an empty doubly-linked list.
608 runtime_MSpanList_Init(MSpan
*list
)
610 list
->state
= MSpanListHead
;
616 runtime_MSpanList_Remove(MSpan
*span
)
618 if(span
->prev
== nil
&& span
->next
== nil
)
620 span
->prev
->next
= span
->next
;
621 span
->next
->prev
= span
->prev
;
627 runtime_MSpanList_IsEmpty(MSpan
*list
)
629 return list
->next
== list
;
633 runtime_MSpanList_Insert(MSpan
*list
, MSpan
*span
)
635 if(span
->next
!= nil
|| span
->prev
!= nil
) {
636 runtime_printf("failed MSpanList_Insert %p %p %p\n", span
, span
->next
, span
->prev
);
637 runtime_throw("MSpanList_Insert");
639 span
->next
= list
->next
;
641 span
->next
->prev
= span
;
642 span
->prev
->next
= span
;
646 runtime_MSpanList_InsertBack(MSpan
*list
, MSpan
*span
)
648 if(span
->next
!= nil
|| span
->prev
!= nil
) {
649 runtime_printf("failed MSpanList_Insert %p %p %p\n", span
, span
->next
, span
->prev
);
650 runtime_throw("MSpanList_Insert");
653 span
->prev
= list
->prev
;
654 span
->next
->prev
= span
;
655 span
->prev
->next
= span
;
658 // Adds the special record s to the list of special records for
659 // the object p. All fields of s should be filled in except for
660 // offset & next, which this routine will fill in.
661 // Returns true if the special was successfully added, false otherwise.
662 // (The add will fail only if a record with the same p and s->kind
665 addspecial(void *p
, Special
*s
)
672 span
= runtime_MHeap_LookupMaybe(&runtime_mheap
, p
);
674 runtime_throw("addspecial on invalid pointer");
676 // Ensure that the span is swept.
677 // GC accesses specials list w/o locks. And it's just much safer.
678 runtime_m()->locks
++;
679 runtime_MSpan_EnsureSwept(span
);
681 offset
= (uintptr
)p
- (span
->start
<< PageShift
);
684 runtime_lock(&span
->specialLock
);
686 // Find splice point, check for existing record.
688 while((x
= *t
) != nil
) {
689 if(offset
== x
->offset
&& kind
== x
->kind
) {
690 runtime_unlock(&span
->specialLock
);
691 runtime_m()->locks
--;
692 return false; // already exists
694 if(offset
< x
->offset
|| (offset
== x
->offset
&& kind
< x
->kind
))
698 // Splice in record, fill in offset.
702 runtime_unlock(&span
->specialLock
);
703 runtime_m()->locks
--;
707 // Removes the Special record of the given kind for the object p.
708 // Returns the record if the record existed, nil otherwise.
709 // The caller must FixAlloc_Free the result.
711 removespecial(void *p
, byte kind
)
717 span
= runtime_MHeap_LookupMaybe(&runtime_mheap
, p
);
719 runtime_throw("removespecial on invalid pointer");
721 // Ensure that the span is swept.
722 // GC accesses specials list w/o locks. And it's just much safer.
723 runtime_m()->locks
++;
724 runtime_MSpan_EnsureSwept(span
);
726 offset
= (uintptr
)p
- (span
->start
<< PageShift
);
728 runtime_lock(&span
->specialLock
);
730 while((s
= *t
) != nil
) {
731 // This function is used for finalizers only, so we don't check for
732 // "interior" specials (p must be exactly equal to s->offset).
733 if(offset
== s
->offset
&& kind
== s
->kind
) {
735 runtime_unlock(&span
->specialLock
);
736 runtime_m()->locks
--;
741 runtime_unlock(&span
->specialLock
);
742 runtime_m()->locks
--;
746 // Adds a finalizer to the object p. Returns true if it succeeded.
748 runtime_addfinalizer(void *p
, FuncVal
*f
, const FuncType
*ft
, const PtrType
*ot
)
752 runtime_lock(&runtime_mheap
.speciallock
);
753 s
= runtime_FixAlloc_Alloc(&runtime_mheap
.specialfinalizeralloc
);
754 runtime_unlock(&runtime_mheap
.speciallock
);
755 s
->kind
= KindSpecialFinalizer
;
762 // There was an old finalizer
763 runtime_lock(&runtime_mheap
.speciallock
);
764 runtime_FixAlloc_Free(&runtime_mheap
.specialfinalizeralloc
, s
);
765 runtime_unlock(&runtime_mheap
.speciallock
);
769 // Removes the finalizer (if any) from the object p.
771 runtime_removefinalizer(void *p
)
775 s
= (SpecialFinalizer
*)removespecial(p
, KindSpecialFinalizer
);
777 return; // there wasn't a finalizer to remove
778 runtime_lock(&runtime_mheap
.speciallock
);
779 runtime_FixAlloc_Free(&runtime_mheap
.specialfinalizeralloc
, s
);
780 runtime_unlock(&runtime_mheap
.speciallock
);
783 // Set the heap profile bucket associated with addr to b.
785 runtime_setprofilebucket(void *p
, Bucket
*b
)
789 runtime_lock(&runtime_mheap
.speciallock
);
790 s
= runtime_FixAlloc_Alloc(&runtime_mheap
.specialprofilealloc
);
791 runtime_unlock(&runtime_mheap
.speciallock
);
792 s
->kind
= KindSpecialProfile
;
794 if(!addspecial(p
, s
))
795 runtime_throw("setprofilebucket: profile already set");
798 // Do whatever cleanup needs to be done to deallocate s. It has
799 // already been unlinked from the MSpan specials list.
800 // Returns true if we should keep working on deallocating p.
802 runtime_freespecial(Special
*s
, void *p
, uintptr size
, bool freed
)
804 SpecialFinalizer
*sf
;
808 case KindSpecialFinalizer
:
809 sf
= (SpecialFinalizer
*)s
;
810 runtime_queuefinalizer(p
, sf
->fn
, sf
->ft
, sf
->ot
);
811 runtime_lock(&runtime_mheap
.speciallock
);
812 runtime_FixAlloc_Free(&runtime_mheap
.specialfinalizeralloc
, sf
);
813 runtime_unlock(&runtime_mheap
.speciallock
);
814 return false; // don't free p until finalizer is done
815 case KindSpecialProfile
:
816 sp
= (SpecialProfile
*)s
;
817 runtime_MProf_Free(sp
->b
, size
, freed
);
818 runtime_lock(&runtime_mheap
.speciallock
);
819 runtime_FixAlloc_Free(&runtime_mheap
.specialprofilealloc
, sp
);
820 runtime_unlock(&runtime_mheap
.speciallock
);
823 runtime_throw("bad special kind");
828 // Free all special records for p.
830 runtime_freeallspecials(MSpan
*span
, void *p
, uintptr size
)
832 Special
*s
, **t
, *list
;
835 if(span
->sweepgen
!= runtime_mheap
.sweepgen
)
836 runtime_throw("runtime: freeallspecials: unswept span");
837 // first, collect all specials into the list; then, free them
838 // this is required to not cause deadlock between span->specialLock and proflock
840 offset
= (uintptr
)p
- (span
->start
<< PageShift
);
841 runtime_lock(&span
->specialLock
);
843 while((s
= *t
) != nil
) {
844 if(offset
+ size
<= s
->offset
)
846 if(offset
<= s
->offset
) {
853 runtime_unlock(&span
->specialLock
);
858 if(!runtime_freespecial(s
, p
, size
, true))
859 runtime_throw("can't explicitly free an object with a finalizer");
863 // Split an allocated span into two equal parts.
865 runtime_MHeap_SplitSpan(MHeap
*h
, MSpan
*s
)
873 if(s
->state
!= MSpanInUse
)
874 runtime_throw("MHeap_SplitSpan on a free span");
875 if(s
->sizeclass
!= 0 && s
->ref
!= 1)
876 runtime_throw("MHeap_SplitSpan doesn't have an allocated object");
879 // remove the span from whatever list it is in now
880 if(s
->sizeclass
> 0) {
881 // must be in h->central[x].empty
882 c
= &h
->central
[s
->sizeclass
];
884 runtime_MSpanList_Remove(s
);
888 // must be in h->busy/busylarge
890 runtime_MSpanList_Remove(s
);
892 // heap is locked now
895 // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
897 s
->sizeclass
= runtime_SizeToClass(PageSize
/2);
898 s
->elemsize
= PageSize
/2;
900 // convert span of n>1 pages into two spans of n/2 pages each.
901 if((s
->npages
& 1) != 0)
902 runtime_throw("MHeap_SplitSpan on an odd size span");
904 // compute position in h->spans
906 p
-= (uintptr
)h
->arena_start
>> PageShift
;
908 // Allocate a new span for the first half.
909 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
910 runtime_MSpan_Init(t
, s
->start
, npages
/2);
911 t
->limit
= (byte
*)((t
->start
+ npages
/2) << PageShift
);
912 t
->state
= MSpanInUse
;
913 t
->elemsize
= npages
<< (PageShift
- 1);
914 t
->sweepgen
= s
->sweepgen
;
915 if(t
->elemsize
<= MaxSmallSize
) {
916 t
->sizeclass
= runtime_SizeToClass(t
->elemsize
);
920 // the old span holds the second half.
921 s
->start
+= npages
/2;
922 s
->npages
= npages
/2;
923 s
->elemsize
= npages
<< (PageShift
- 1);
924 if(s
->elemsize
<= MaxSmallSize
) {
925 s
->sizeclass
= runtime_SizeToClass(s
->elemsize
);
929 // update span lookup table
930 for(i
= p
; i
< p
+ npages
/2; i
++)
934 // place the span into a new list
935 if(s
->sizeclass
> 0) {
937 c
= &h
->central
[s
->sizeclass
];
939 // swept spans are at the end of the list
940 runtime_MSpanList_InsertBack(&c
->empty
, s
);
943 // Swept spans are at the end of lists.
944 if(s
->npages
< nelem(h
->free
))
945 runtime_MSpanList_InsertBack(&h
->busy
[s
->npages
], s
);
947 runtime_MSpanList_InsertBack(&h
->busylarge
, s
);