1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.h for overview.
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
19 static MSpan
*MHeap_AllocLocked(MHeap
*, uintptr
, int32
);
20 static bool MHeap_Grow(MHeap
*, uintptr
);
21 static void MHeap_FreeLocked(MHeap
*, MSpan
*);
22 static MSpan
*MHeap_AllocLarge(MHeap
*, uintptr
);
23 static MSpan
*BestFit(MSpan
*, uintptr
, MSpan
*);
26 RecordSpan(void *vh
, byte
*p
)
35 if(h
->nspan
>= h
->nspancap
) {
36 cap
= 64*1024/sizeof(all
[0]);
37 if(cap
< h
->nspancap
*3/2)
38 cap
= h
->nspancap
*3/2;
39 all
= (MSpan
**)runtime_SysAlloc(cap
*sizeof(all
[0]), &mstats()->other_sys
);
41 runtime_throw("runtime: cannot allocate memory");
43 runtime_memmove(all
, h
->allspans
, h
->nspancap
*sizeof(all
[0]));
44 // Don't free the old array if it's referenced by sweep.
45 // See the comment in mgc0.c.
46 if(h
->allspans
!= runtime_mheap
.sweepspans
)
47 runtime_SysFree(h
->allspans
, h
->nspancap
*sizeof(all
[0]), &mstats()->other_sys
);
52 h
->allspans
[h
->nspan
++] = s
;
55 // Initialize the heap; fetch memory using alloc.
57 runtime_MHeap_Init(MHeap
*h
)
63 runtime_FixAlloc_Init(&h
->spanalloc
, sizeof(MSpan
), RecordSpan
, h
, &pmstats
->mspan_sys
);
64 runtime_FixAlloc_Init(&h
->cachealloc
, sizeof(MCache
), nil
, nil
, &pmstats
->mcache_sys
);
65 runtime_FixAlloc_Init(&h
->specialfinalizeralloc
, sizeof(SpecialFinalizer
), nil
, nil
, &pmstats
->other_sys
);
66 runtime_FixAlloc_Init(&h
->specialprofilealloc
, sizeof(SpecialProfile
), nil
, nil
, &pmstats
->other_sys
);
67 // h->mapcache needs no init
68 for(i
=0; i
<nelem(h
->free
); i
++) {
69 runtime_MSpanList_Init(&h
->free
[i
]);
70 runtime_MSpanList_Init(&h
->busy
[i
]);
72 runtime_MSpanList_Init(&h
->freelarge
);
73 runtime_MSpanList_Init(&h
->busylarge
);
74 for(i
=0; i
<nelem(h
->central
); i
++)
75 runtime_MCentral_Init(&h
->central
[i
], i
);
79 runtime_MHeap_MapSpans(MHeap
*h
)
84 // Map spans array, PageSize at a time.
85 n
= (uintptr
)h
->arena_used
;
86 n
-= (uintptr
)h
->arena_start
;
87 n
= n
/ PageSize
* sizeof(h
->spans
[0]);
88 n
= ROUND(n
, PageSize
);
89 pagesize
= getpagesize();
90 n
= ROUND(n
, pagesize
);
91 if(h
->spans_mapped
>= n
)
93 runtime_SysMap((byte
*)h
->spans
+ h
->spans_mapped
, n
- h
->spans_mapped
, h
->arena_reserved
, &mstats()->other_sys
);
97 // Sweeps spans in list until reclaims at least npages into heap.
98 // Returns the actual number of pages reclaimed.
100 MHeap_ReclaimList(MHeap
*h
, MSpan
*list
, uintptr npages
)
107 sg
= runtime_mheap
.sweepgen
;
109 for(s
= list
->next
; s
!= list
; s
= s
->next
) {
110 if(s
->sweepgen
== sg
-2 && runtime_cas(&s
->sweepgen
, sg
-2, sg
-1)) {
111 runtime_MSpanList_Remove(s
);
112 // swept spans are at the end of the list
113 runtime_MSpanList_InsertBack(list
, s
);
115 n
+= runtime_MSpan_Sweep(s
);
119 // the span could have been moved elsewhere
122 if(s
->sweepgen
== sg
-1) {
123 // the span is being sweept by background sweeper, skip
126 // already swept empty span,
127 // all subsequent ones must also be either swept or in process of sweeping
133 // Sweeps and reclaims at least npage pages into heap.
134 // Called before allocating npage pages.
136 MHeap_Reclaim(MHeap
*h
, uintptr npage
)
138 uintptr reclaimed
, n
;
140 // First try to sweep busy spans with large objects of size >= npage,
141 // this has good chances of reclaiming the necessary space.
142 for(n
=npage
; n
< nelem(h
->busy
); n
++) {
143 if(MHeap_ReclaimList(h
, &h
->busy
[n
], npage
))
147 // Then -- even larger objects.
148 if(MHeap_ReclaimList(h
, &h
->busylarge
, npage
))
151 // Now try smaller objects.
152 // One such object is not enough, so we need to reclaim several of them.
154 for(n
=0; n
< npage
&& n
< nelem(h
->busy
); n
++) {
155 reclaimed
+= MHeap_ReclaimList(h
, &h
->busy
[n
], npage
-reclaimed
);
156 if(reclaimed
>= npage
)
160 // Now sweep everything that is not yet swept.
163 n
= runtime_sweepone();
164 if(n
== (uintptr
)-1) // all spans are swept
167 if(reclaimed
>= npage
)
173 // Allocate a new span of npage pages from the heap
174 // and record its size class in the HeapMap and HeapMapCache.
176 runtime_MHeap_Alloc(MHeap
*h
, uintptr npage
, int32 sizeclass
, bool large
, bool needzero
)
183 pmstats
->heap_alloc
+= (intptr
)runtime_m()->mcache
->local_cachealloc
;
184 runtime_m()->mcache
->local_cachealloc
= 0;
185 s
= MHeap_AllocLocked(h
, npage
, sizeclass
);
187 pmstats
->heap_inuse
+= npage
<<PageShift
;
189 pmstats
->heap_objects
++;
190 pmstats
->heap_alloc
+= npage
<<PageShift
;
191 // Swept spans are at the end of lists.
192 if(s
->npages
< nelem(h
->free
))
193 runtime_MSpanList_InsertBack(&h
->busy
[s
->npages
], s
);
195 runtime_MSpanList_InsertBack(&h
->busylarge
, s
);
200 if(needzero
&& s
->needzero
)
201 runtime_memclr((byte
*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
208 MHeap_AllocLocked(MHeap
*h
, uintptr npage
, int32 sizeclass
)
214 // To prevent excessive heap growth, before allocating n pages
215 // we need to sweep and reclaim at least n pages.
217 MHeap_Reclaim(h
, npage
);
219 // Try in fixed-size lists up to max.
220 for(n
=npage
; n
< nelem(h
->free
); n
++) {
221 if(!runtime_MSpanList_IsEmpty(&h
->free
[n
])) {
227 // Best fit in list of large spans.
228 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
) {
229 if(!MHeap_Grow(h
, npage
))
231 if((s
= MHeap_AllocLarge(h
, npage
)) == nil
)
237 if(s
->state
!= MSpanFree
)
238 runtime_throw("MHeap_AllocLocked - MSpan not free");
239 if(s
->npages
< npage
)
240 runtime_throw("MHeap_AllocLocked - bad npages");
241 runtime_MSpanList_Remove(s
);
242 runtime_atomicstore(&s
->sweepgen
, h
->sweepgen
);
243 s
->state
= MSpanInUse
;
244 mstats()->heap_idle
-= s
->npages
<<PageShift
;
245 mstats()->heap_released
-= s
->npreleased
<<PageShift
;
246 if(s
->npreleased
> 0)
247 runtime_SysUsed((void*)(s
->start
<<PageShift
), s
->npages
<<PageShift
);
250 if(s
->npages
> npage
) {
251 // Trim extra and put it back in the heap.
252 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
253 runtime_MSpan_Init(t
, s
->start
+ npage
, s
->npages
- npage
);
256 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
260 h
->spans
[p
+t
->npages
-1] = t
;
261 t
->needzero
= s
->needzero
;
262 runtime_atomicstore(&t
->sweepgen
, h
->sweepgen
);
263 t
->state
= MSpanInUse
;
264 MHeap_FreeLocked(h
, t
);
265 t
->unusedsince
= s
->unusedsince
; // preserve age
269 // Record span info, because gc needs to be
270 // able to map interior pointer to containing span.
271 s
->sizeclass
= sizeclass
;
272 s
->elemsize
= (sizeclass
==0 ? s
->npages
<<PageShift
: (uintptr
)runtime_class_to_size
[sizeclass
]);
273 s
->types
.compression
= MTypes_Empty
;
275 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
276 for(n
=0; n
<npage
; n
++)
281 // Allocate a span of exactly npage pages from the list of large spans.
283 MHeap_AllocLarge(MHeap
*h
, uintptr npage
)
285 return BestFit(&h
->freelarge
, npage
, nil
);
288 // Search list for smallest span with >= npage pages.
289 // If there are multiple smallest spans, take the one
290 // with the earliest starting address.
292 BestFit(MSpan
*list
, uintptr npage
, MSpan
*best
)
296 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
297 if(s
->npages
< npage
)
300 || s
->npages
< best
->npages
301 || (s
->npages
== best
->npages
&& s
->start
< best
->start
))
307 // Try to add at least npage pages of memory to the heap,
308 // returning whether it worked.
310 MHeap_Grow(MHeap
*h
, uintptr npage
)
317 // Ask for a big chunk, to reduce the number of mappings
318 // the operating system needs to track; also amortizes
319 // the overhead of an operating system mapping.
320 // Allocate a multiple of 64kB (16 pages).
321 npage
= (npage
+15)&~15;
322 ask
= npage
<<PageShift
;
323 if(ask
< HeapAllocChunk
)
324 ask
= HeapAllocChunk
;
326 v
= runtime_MHeap_SysAlloc(h
, ask
);
328 if(ask
> (npage
<<PageShift
)) {
329 ask
= npage
<<PageShift
;
330 v
= runtime_MHeap_SysAlloc(h
, ask
);
333 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64
)ask
, mstats()->heap_sys
);
338 // Create a fake "in use" span and free it, so that the
339 // right coalescing happens.
340 s
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
341 runtime_MSpan_Init(s
, (uintptr
)v
>>PageShift
, ask
>>PageShift
);
343 p
-= ((uintptr
)h
->arena_start
>>PageShift
);
345 h
->spans
[p
+ s
->npages
- 1] = s
;
346 runtime_atomicstore(&s
->sweepgen
, h
->sweepgen
);
347 s
->state
= MSpanInUse
;
348 MHeap_FreeLocked(h
, s
);
352 // Look up the span at the given address.
353 // Address is guaranteed to be in map
354 // and is guaranteed to be start or end of span.
356 runtime_MHeap_Lookup(MHeap
*h
, void *v
)
361 p
-= (uintptr
)h
->arena_start
;
362 return h
->spans
[p
>> PageShift
];
365 // Look up the span at the given address.
366 // Address is *not* guaranteed to be in map
367 // and may be anywhere in the span.
368 // Map entries for the middle of a span are only
369 // valid for allocated spans. Free spans may have
370 // other garbage in their middles, so we have to
373 runtime_MHeap_LookupMaybe(MHeap
*h
, void *v
)
378 if((byte
*)v
< h
->arena_start
|| (byte
*)v
>= h
->arena_used
)
380 p
= (uintptr
)v
>>PageShift
;
382 q
-= (uintptr
)h
->arena_start
>> PageShift
;
384 if(s
== nil
|| p
< s
->start
|| (uintptr
)v
>= s
->limit
|| s
->state
!= MSpanInUse
)
389 // Free the span back into the heap.
391 runtime_MHeap_Free(MHeap
*h
, MSpan
*s
, int32 acct
)
397 pmstats
->heap_alloc
+= (intptr
)runtime_m()->mcache
->local_cachealloc
;
398 runtime_m()->mcache
->local_cachealloc
= 0;
399 pmstats
->heap_inuse
-= s
->npages
<<PageShift
;
401 pmstats
->heap_alloc
-= s
->npages
<<PageShift
;
402 pmstats
->heap_objects
--;
404 MHeap_FreeLocked(h
, s
);
409 MHeap_FreeLocked(MHeap
*h
, MSpan
*s
)
414 s
->types
.compression
= MTypes_Empty
;
416 if(s
->state
!= MSpanInUse
|| s
->ref
!= 0 || s
->sweepgen
!= h
->sweepgen
) {
417 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
418 s
, s
->start
<<PageShift
, s
->state
, s
->ref
, s
->sweepgen
, h
->sweepgen
);
419 runtime_throw("MHeap_FreeLocked - invalid free");
421 mstats()->heap_idle
+= s
->npages
<<PageShift
;
422 s
->state
= MSpanFree
;
423 runtime_MSpanList_Remove(s
);
424 // Stamp newly unused spans. The scavenger will use that
425 // info to potentially give back some pages to the OS.
426 s
->unusedsince
= runtime_nanotime();
429 // Coalesce with earlier, later spans.
431 p
-= (uintptr
)h
->arena_start
>> PageShift
;
432 if(p
> 0 && (t
= h
->spans
[p
-1]) != nil
&& t
->state
!= MSpanInUse
) {
434 s
->npages
+= t
->npages
;
435 s
->npreleased
= t
->npreleased
; // absorb released pages
436 s
->needzero
|= t
->needzero
;
439 runtime_MSpanList_Remove(t
);
440 t
->state
= MSpanDead
;
441 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
443 if((p
+s
->npages
)*sizeof(h
->spans
[0]) < h
->spans_mapped
&& (t
= h
->spans
[p
+s
->npages
]) != nil
&& t
->state
!= MSpanInUse
) {
444 s
->npages
+= t
->npages
;
445 s
->npreleased
+= t
->npreleased
;
446 s
->needzero
|= t
->needzero
;
447 h
->spans
[p
+ s
->npages
- 1] = s
;
448 runtime_MSpanList_Remove(t
);
449 t
->state
= MSpanDead
;
450 runtime_FixAlloc_Free(&h
->spanalloc
, t
);
453 // Insert s into appropriate list.
454 if(s
->npages
< nelem(h
->free
))
455 runtime_MSpanList_Insert(&h
->free
[s
->npages
], s
);
457 runtime_MSpanList_Insert(&h
->freelarge
, s
);
461 forcegchelper(void *vnote
)
463 Note
*note
= (Note
*)vnote
;
466 runtime_notewakeup(note
);
470 scavengelist(MSpan
*list
, uint64 now
, uint64 limit
)
472 uintptr released
, sumreleased
, start
, end
, pagesize
;
475 if(runtime_MSpanList_IsEmpty(list
))
479 for(s
=list
->next
; s
!= list
; s
=s
->next
) {
480 if((now
- s
->unusedsince
) > limit
&& s
->npreleased
!= s
->npages
) {
481 released
= (s
->npages
- s
->npreleased
) << PageShift
;
482 mstats()->heap_released
+= released
;
483 sumreleased
+= released
;
484 s
->npreleased
= s
->npages
;
486 start
= s
->start
<< PageShift
;
487 end
= start
+ (s
->npages
<< PageShift
);
489 // Round start up and end down to ensure we
490 // are acting on entire pages.
491 pagesize
= getpagesize();
492 start
= ROUND(start
, pagesize
);
493 end
&= ~(pagesize
- 1);
495 runtime_SysUnused((void*)start
, end
- start
);
502 scavenge(int32 k
, uint64 now
, uint64 limit
)
510 for(i
=0; i
< nelem(h
->free
); i
++)
511 sumreleased
+= scavengelist(&h
->free
[i
], now
, limit
);
512 sumreleased
+= scavengelist(&h
->freelarge
, now
, limit
);
514 if(runtime_debug
.gctrace
> 0) {
516 runtime_printf("scvg%d: %D MB released\n", k
, (uint64
)sumreleased
>>20);
517 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
518 k
, mstats()->heap_inuse
>>20, mstats()->heap_idle
>>20, mstats()->heap_sys
>>20,
519 mstats()->heap_released
>>20, (mstats()->heap_sys
- mstats()->heap_released
)>>20);
523 // Release (part of) unused memory to OS.
524 // Goroutine created at startup.
527 runtime_MHeap_Scavenger(void* dummy
)
531 uint64 tick
, now
, forcegc
, limit
;
540 g
->isbackground
= true;
542 // If we go two minutes without a garbage collection, force one to run.
544 // If a span goes unused for 5 minutes after a garbage collection,
545 // we hand it back to the operating system.
547 // Make wake-up period small enough for the sampling to be correct.
555 runtime_noteclear(¬e
);
556 runtime_notetsleepg(¬e
, tick
);
559 unixnow
= runtime_unixnanotime();
560 if(unixnow
- mstats()->last_gc
> forcegc
) {
562 // The scavenger can not block other goroutines,
563 // otherwise deadlock detector can fire spuriously.
564 // GC blocks other goroutines via the runtime_worldsema.
565 runtime_noteclear(¬e
);
567 __go_go(forcegchelper
, (void*)notep
);
568 runtime_notetsleepg(¬e
, -1);
569 if(runtime_debug
.gctrace
> 0)
570 runtime_printf("scvg%d: GC forced\n", k
);
573 now
= runtime_nanotime();
574 scavenge(k
, now
, limit
);
579 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
582 runtime_debug_freeOSMemory(void)
584 runtime_gc(2); // force GC and do eager sweep
585 runtime_lock(&runtime_mheap
);
586 scavenge(-1, ~(uintptr
)0, 0);
587 runtime_unlock(&runtime_mheap
);
590 // Initialize a new span with the given start and npages.
592 runtime_MSpan_Init(MSpan
*span
, PageID start
, uintptr npages
)
597 span
->npages
= npages
;
598 span
->freelist
= nil
;
601 span
->incache
= false;
603 span
->state
= MSpanDead
;
604 span
->unusedsince
= 0;
605 span
->npreleased
= 0;
606 span
->types
.compression
= MTypes_Empty
;
607 span
->speciallock
.key
= 0;
608 span
->specials
= nil
;
613 // Initialize an empty doubly-linked list.
615 runtime_MSpanList_Init(MSpan
*list
)
617 list
->state
= MSpanListHead
;
623 runtime_MSpanList_Remove(MSpan
*span
)
625 if(span
->prev
== nil
&& span
->next
== nil
)
627 span
->prev
->next
= span
->next
;
628 span
->next
->prev
= span
->prev
;
634 runtime_MSpanList_IsEmpty(MSpan
*list
)
636 return list
->next
== list
;
640 runtime_MSpanList_Insert(MSpan
*list
, MSpan
*span
)
642 if(span
->next
!= nil
|| span
->prev
!= nil
) {
643 runtime_printf("failed MSpanList_Insert %p %p %p\n", span
, span
->next
, span
->prev
);
644 runtime_throw("MSpanList_Insert");
646 span
->next
= list
->next
;
648 span
->next
->prev
= span
;
649 span
->prev
->next
= span
;
653 runtime_MSpanList_InsertBack(MSpan
*list
, MSpan
*span
)
655 if(span
->next
!= nil
|| span
->prev
!= nil
) {
656 runtime_printf("failed MSpanList_Insert %p %p %p\n", span
, span
->next
, span
->prev
);
657 runtime_throw("MSpanList_Insert");
660 span
->prev
= list
->prev
;
661 span
->next
->prev
= span
;
662 span
->prev
->next
= span
;
665 // Adds the special record s to the list of special records for
666 // the object p. All fields of s should be filled in except for
667 // offset & next, which this routine will fill in.
668 // Returns true if the special was successfully added, false otherwise.
669 // (The add will fail only if a record with the same p and s->kind
672 addspecial(void *p
, Special
*s
)
679 span
= runtime_MHeap_LookupMaybe(&runtime_mheap
, p
);
681 runtime_throw("addspecial on invalid pointer");
683 // Ensure that the span is swept.
684 // GC accesses specials list w/o locks. And it's just much safer.
685 runtime_m()->locks
++;
686 runtime_MSpan_EnsureSwept(span
);
688 offset
= (uintptr
)p
- (span
->start
<< PageShift
);
691 runtime_lock(&span
->speciallock
);
693 // Find splice point, check for existing record.
695 while((x
= *t
) != nil
) {
696 if(offset
== x
->offset
&& kind
== x
->kind
) {
697 runtime_unlock(&span
->speciallock
);
698 runtime_m()->locks
--;
699 return false; // already exists
701 if(offset
< x
->offset
|| (offset
== x
->offset
&& kind
< x
->kind
))
705 // Splice in record, fill in offset.
709 runtime_unlock(&span
->speciallock
);
710 runtime_m()->locks
--;
714 // Removes the Special record of the given kind for the object p.
715 // Returns the record if the record existed, nil otherwise.
716 // The caller must FixAlloc_Free the result.
718 removespecial(void *p
, byte kind
)
724 span
= runtime_MHeap_LookupMaybe(&runtime_mheap
, p
);
726 runtime_throw("removespecial on invalid pointer");
728 // Ensure that the span is swept.
729 // GC accesses specials list w/o locks. And it's just much safer.
730 runtime_m()->locks
++;
731 runtime_MSpan_EnsureSwept(span
);
733 offset
= (uintptr
)p
- (span
->start
<< PageShift
);
735 runtime_lock(&span
->speciallock
);
737 while((s
= *t
) != nil
) {
738 // This function is used for finalizers only, so we don't check for
739 // "interior" specials (p must be exactly equal to s->offset).
740 if(offset
== s
->offset
&& kind
== s
->kind
) {
742 runtime_unlock(&span
->speciallock
);
743 runtime_m()->locks
--;
748 runtime_unlock(&span
->speciallock
);
749 runtime_m()->locks
--;
753 // Adds a finalizer to the object p. Returns true if it succeeded.
755 runtime_addfinalizer(void *p
, FuncVal
*f
, const FuncType
*ft
, const PtrType
*ot
)
759 runtime_lock(&runtime_mheap
.speciallock
);
760 s
= runtime_FixAlloc_Alloc(&runtime_mheap
.specialfinalizeralloc
);
761 runtime_unlock(&runtime_mheap
.speciallock
);
762 s
->kind
= KindSpecialFinalizer
;
769 // There was an old finalizer
770 runtime_lock(&runtime_mheap
.speciallock
);
771 runtime_FixAlloc_Free(&runtime_mheap
.specialfinalizeralloc
, s
);
772 runtime_unlock(&runtime_mheap
.speciallock
);
776 // Removes the finalizer (if any) from the object p.
778 runtime_removefinalizer(void *p
)
782 s
= (SpecialFinalizer
*)removespecial(p
, KindSpecialFinalizer
);
784 return; // there wasn't a finalizer to remove
785 runtime_lock(&runtime_mheap
.speciallock
);
786 runtime_FixAlloc_Free(&runtime_mheap
.specialfinalizeralloc
, s
);
787 runtime_unlock(&runtime_mheap
.speciallock
);
790 // Set the heap profile bucket associated with addr to b.
792 runtime_setprofilebucket(void *p
, Bucket
*b
)
796 runtime_lock(&runtime_mheap
.speciallock
);
797 s
= runtime_FixAlloc_Alloc(&runtime_mheap
.specialprofilealloc
);
798 runtime_unlock(&runtime_mheap
.speciallock
);
799 s
->kind
= KindSpecialProfile
;
801 if(!addspecial(p
, s
))
802 runtime_throw("setprofilebucket: profile already set");
805 // Do whatever cleanup needs to be done to deallocate s. It has
806 // already been unlinked from the MSpan specials list.
807 // Returns true if we should keep working on deallocating p.
809 runtime_freespecial(Special
*s
, void *p
, uintptr size
, bool freed
)
811 SpecialFinalizer
*sf
;
815 case KindSpecialFinalizer
:
816 sf
= (SpecialFinalizer
*)s
;
817 runtime_queuefinalizer(p
, sf
->fn
, sf
->ft
, sf
->ot
);
818 runtime_lock(&runtime_mheap
.speciallock
);
819 runtime_FixAlloc_Free(&runtime_mheap
.specialfinalizeralloc
, sf
);
820 runtime_unlock(&runtime_mheap
.speciallock
);
821 return false; // don't free p until finalizer is done
822 case KindSpecialProfile
:
823 sp
= (SpecialProfile
*)s
;
824 runtime_MProf_Free(sp
->b
, size
, freed
);
825 runtime_lock(&runtime_mheap
.speciallock
);
826 runtime_FixAlloc_Free(&runtime_mheap
.specialprofilealloc
, sp
);
827 runtime_unlock(&runtime_mheap
.speciallock
);
830 runtime_throw("bad special kind");
835 // Free all special records for p.
837 runtime_freeallspecials(MSpan
*span
, void *p
, uintptr size
)
839 Special
*s
, **t
, *list
;
842 if(span
->sweepgen
!= runtime_mheap
.sweepgen
)
843 runtime_throw("runtime: freeallspecials: unswept span");
844 // first, collect all specials into the list; then, free them
845 // this is required to not cause deadlock between span->specialLock and proflock
847 offset
= (uintptr
)p
- (span
->start
<< PageShift
);
848 runtime_lock(&span
->speciallock
);
850 while((s
= *t
) != nil
) {
851 if(offset
+ size
<= s
->offset
)
853 if(offset
<= s
->offset
) {
860 runtime_unlock(&span
->speciallock
);
865 if(!runtime_freespecial(s
, p
, size
, true))
866 runtime_throw("can't explicitly free an object with a finalizer");
870 // Split an allocated span into two equal parts.
872 runtime_MHeap_SplitSpan(MHeap
*h
, MSpan
*s
)
880 if(s
->state
!= MSpanInUse
)
881 runtime_throw("MHeap_SplitSpan on a free span");
882 if(s
->sizeclass
!= 0 && s
->ref
!= 1)
883 runtime_throw("MHeap_SplitSpan doesn't have an allocated object");
886 // remove the span from whatever list it is in now
887 if(s
->sizeclass
> 0) {
888 // must be in h->central[x].mempty
889 c
= &h
->central
[s
->sizeclass
];
891 runtime_MSpanList_Remove(s
);
895 // must be in h->busy/busylarge
897 runtime_MSpanList_Remove(s
);
899 // heap is locked now
902 // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
904 s
->sizeclass
= runtime_SizeToClass(PageSize
/2);
905 s
->elemsize
= PageSize
/2;
907 // convert span of n>1 pages into two spans of n/2 pages each.
908 if((s
->npages
& 1) != 0)
909 runtime_throw("MHeap_SplitSpan on an odd size span");
911 // compute position in h->spans
913 p
-= (uintptr
)h
->arena_start
>> PageShift
;
915 // Allocate a new span for the first half.
916 t
= runtime_FixAlloc_Alloc(&h
->spanalloc
);
917 runtime_MSpan_Init(t
, s
->start
, npages
/2);
918 t
->limit
= (uintptr
)((t
->start
+ npages
/2) << PageShift
);
919 t
->state
= MSpanInUse
;
920 t
->elemsize
= npages
<< (PageShift
- 1);
921 t
->sweepgen
= s
->sweepgen
;
922 if(t
->elemsize
<= MaxSmallSize
) {
923 t
->sizeclass
= runtime_SizeToClass(t
->elemsize
);
927 // the old span holds the second half.
928 s
->start
+= npages
/2;
929 s
->npages
= npages
/2;
930 s
->elemsize
= npages
<< (PageShift
- 1);
931 if(s
->elemsize
<= MaxSmallSize
) {
932 s
->sizeclass
= runtime_SizeToClass(s
->elemsize
);
936 // update span lookup table
937 for(i
= p
; i
< p
+ npages
/2; i
++)
941 // place the span into a new list
942 if(s
->sizeclass
> 0) {
944 c
= &h
->central
[s
->sizeclass
];
946 // swept spans are at the end of the list
947 runtime_MSpanList_InsertBack(&c
->mempty
, s
);
950 // Swept spans are at the end of lists.
951 if(s
->npages
< nelem(h
->free
))
952 runtime_MSpanList_InsertBack(&h
->busy
[s
->npages
], s
);
954 runtime_MSpanList_InsertBack(&h
->busylarge
, s
);