* lib/ubsan-dg.exp: Add library path for libstdc++.
[official-gcc.git] / libgo / runtime / mheap.c
blob793915ef44c6682b17a537ed479854bf3512c089
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Page heap.
6 //
7 // See malloc.h for overview.
8 //
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
15 #include "runtime.h"
16 #include "arch.h"
17 #include "malloc.h"
19 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
20 static bool MHeap_Grow(MHeap*, uintptr);
21 static void MHeap_FreeLocked(MHeap*, MSpan*);
22 static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
23 static MSpan *BestFit(MSpan*, uintptr, MSpan*);
25 static void
26 RecordSpan(void *vh, byte *p)
28 MHeap *h;
29 MSpan *s;
30 MSpan **all;
31 uint32 cap;
33 h = vh;
34 s = (MSpan*)p;
35 if(h->nspan >= h->nspancap) {
36 cap = 64*1024/sizeof(all[0]);
37 if(cap < h->nspancap*3/2)
38 cap = h->nspancap*3/2;
39 all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats.other_sys);
40 if(all == nil)
41 runtime_throw("runtime: cannot allocate memory");
42 if(h->allspans) {
43 runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
44 // Don't free the old array if it's referenced by sweep.
45 // See the comment in mgc0.c.
46 if(h->allspans != runtime_mheap.sweepspans)
47 runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats.other_sys);
49 h->allspans = all;
50 h->nspancap = cap;
52 h->allspans[h->nspan++] = s;
55 // Initialize the heap; fetch memory using alloc.
56 void
57 runtime_MHeap_Init(MHeap *h)
59 uint32 i;
61 runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &mstats.mspan_sys);
62 runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &mstats.mcache_sys);
63 runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &mstats.other_sys);
64 runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &mstats.other_sys);
65 // h->mapcache needs no init
66 for(i=0; i<nelem(h->free); i++) {
67 runtime_MSpanList_Init(&h->free[i]);
68 runtime_MSpanList_Init(&h->busy[i]);
70 runtime_MSpanList_Init(&h->freelarge);
71 runtime_MSpanList_Init(&h->busylarge);
72 for(i=0; i<nelem(h->central); i++)
73 runtime_MCentral_Init(&h->central[i], i);
76 void
77 runtime_MHeap_MapSpans(MHeap *h)
79 uintptr pagesize;
80 uintptr n;
82 // Map spans array, PageSize at a time.
83 n = (uintptr)h->arena_used;
84 n -= (uintptr)h->arena_start;
85 n = n / PageSize * sizeof(h->spans[0]);
86 n = ROUND(n, PageSize);
87 pagesize = getpagesize();
88 n = ROUND(n, pagesize);
89 if(h->spans_mapped >= n)
90 return;
91 runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats.other_sys);
92 h->spans_mapped = n;
95 // Sweeps spans in list until reclaims at least npages into heap.
96 // Returns the actual number of pages reclaimed.
97 static uintptr
98 MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
100 MSpan *s;
101 uintptr n;
102 uint32 sg;
104 n = 0;
105 sg = runtime_mheap.sweepgen;
106 retry:
107 for(s = list->next; s != list; s = s->next) {
108 if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
109 runtime_MSpanList_Remove(s);
110 // swept spans are at the end of the list
111 runtime_MSpanList_InsertBack(list, s);
112 runtime_unlock(h);
113 n += runtime_MSpan_Sweep(s);
114 runtime_lock(h);
115 if(n >= npages)
116 return n;
117 // the span could have been moved elsewhere
118 goto retry;
120 if(s->sweepgen == sg-1) {
121 // the span is being sweept by background sweeper, skip
122 continue;
124 // already swept empty span,
125 // all subsequent ones must also be either swept or in process of sweeping
126 break;
128 return n;
131 // Sweeps and reclaims at least npage pages into heap.
132 // Called before allocating npage pages.
133 static void
134 MHeap_Reclaim(MHeap *h, uintptr npage)
136 uintptr reclaimed, n;
138 // First try to sweep busy spans with large objects of size >= npage,
139 // this has good chances of reclaiming the necessary space.
140 for(n=npage; n < nelem(h->busy); n++) {
141 if(MHeap_ReclaimList(h, &h->busy[n], npage))
142 return; // Bingo!
145 // Then -- even larger objects.
146 if(MHeap_ReclaimList(h, &h->busylarge, npage))
147 return; // Bingo!
149 // Now try smaller objects.
150 // One such object is not enough, so we need to reclaim several of them.
151 reclaimed = 0;
152 for(n=0; n < npage && n < nelem(h->busy); n++) {
153 reclaimed += MHeap_ReclaimList(h, &h->busy[n], npage-reclaimed);
154 if(reclaimed >= npage)
155 return;
158 // Now sweep everything that is not yet swept.
159 runtime_unlock(h);
160 for(;;) {
161 n = runtime_sweepone();
162 if(n == (uintptr)-1) // all spans are swept
163 break;
164 reclaimed += n;
165 if(reclaimed >= npage)
166 break;
168 runtime_lock(h);
171 // Allocate a new span of npage pages from the heap
172 // and record its size class in the HeapMap and HeapMapCache.
173 MSpan*
174 runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
176 MSpan *s;
178 runtime_lock(h);
179 mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
180 runtime_m()->mcache->local_cachealloc = 0;
181 s = MHeap_AllocLocked(h, npage, sizeclass);
182 if(s != nil) {
183 mstats.heap_inuse += npage<<PageShift;
184 if(large) {
185 mstats.heap_objects++;
186 mstats.heap_alloc += npage<<PageShift;
187 // Swept spans are at the end of lists.
188 if(s->npages < nelem(h->free))
189 runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
190 else
191 runtime_MSpanList_InsertBack(&h->busylarge, s);
194 runtime_unlock(h);
195 if(s != nil) {
196 if(needzero && s->needzero)
197 runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
198 s->needzero = 0;
200 return s;
203 static MSpan*
204 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
206 uintptr n;
207 MSpan *s, *t;
208 PageID p;
210 // To prevent excessive heap growth, before allocating n pages
211 // we need to sweep and reclaim at least n pages.
212 if(!h->sweepdone)
213 MHeap_Reclaim(h, npage);
215 // Try in fixed-size lists up to max.
216 for(n=npage; n < nelem(h->free); n++) {
217 if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
218 s = h->free[n].next;
219 goto HaveSpan;
223 // Best fit in list of large spans.
224 if((s = MHeap_AllocLarge(h, npage)) == nil) {
225 if(!MHeap_Grow(h, npage))
226 return nil;
227 if((s = MHeap_AllocLarge(h, npage)) == nil)
228 return nil;
231 HaveSpan:
232 // Mark span in use.
233 if(s->state != MSpanFree)
234 runtime_throw("MHeap_AllocLocked - MSpan not free");
235 if(s->npages < npage)
236 runtime_throw("MHeap_AllocLocked - bad npages");
237 runtime_MSpanList_Remove(s);
238 runtime_atomicstore(&s->sweepgen, h->sweepgen);
239 s->state = MSpanInUse;
240 mstats.heap_idle -= s->npages<<PageShift;
241 mstats.heap_released -= s->npreleased<<PageShift;
242 if(s->npreleased > 0)
243 runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
244 s->npreleased = 0;
246 if(s->npages > npage) {
247 // Trim extra and put it back in the heap.
248 t = runtime_FixAlloc_Alloc(&h->spanalloc);
249 runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
250 s->npages = npage;
251 p = t->start;
252 p -= ((uintptr)h->arena_start>>PageShift);
253 if(p > 0)
254 h->spans[p-1] = s;
255 h->spans[p] = t;
256 h->spans[p+t->npages-1] = t;
257 t->needzero = s->needzero;
258 runtime_atomicstore(&t->sweepgen, h->sweepgen);
259 t->state = MSpanInUse;
260 MHeap_FreeLocked(h, t);
261 t->unusedsince = s->unusedsince; // preserve age
263 s->unusedsince = 0;
265 // Record span info, because gc needs to be
266 // able to map interior pointer to containing span.
267 s->sizeclass = sizeclass;
268 s->elemsize = (sizeclass==0 ? s->npages<<PageShift : (uintptr)runtime_class_to_size[sizeclass]);
269 s->types.compression = MTypes_Empty;
270 p = s->start;
271 p -= ((uintptr)h->arena_start>>PageShift);
272 for(n=0; n<npage; n++)
273 h->spans[p+n] = s;
274 return s;
277 // Allocate a span of exactly npage pages from the list of large spans.
278 static MSpan*
279 MHeap_AllocLarge(MHeap *h, uintptr npage)
281 return BestFit(&h->freelarge, npage, nil);
284 // Search list for smallest span with >= npage pages.
285 // If there are multiple smallest spans, take the one
286 // with the earliest starting address.
287 static MSpan*
288 BestFit(MSpan *list, uintptr npage, MSpan *best)
290 MSpan *s;
292 for(s=list->next; s != list; s=s->next) {
293 if(s->npages < npage)
294 continue;
295 if(best == nil
296 || s->npages < best->npages
297 || (s->npages == best->npages && s->start < best->start))
298 best = s;
300 return best;
303 // Try to add at least npage pages of memory to the heap,
304 // returning whether it worked.
305 static bool
306 MHeap_Grow(MHeap *h, uintptr npage)
308 uintptr ask;
309 void *v;
310 MSpan *s;
311 PageID p;
313 // Ask for a big chunk, to reduce the number of mappings
314 // the operating system needs to track; also amortizes
315 // the overhead of an operating system mapping.
316 // Allocate a multiple of 64kB (16 pages).
317 npage = (npage+15)&~15;
318 ask = npage<<PageShift;
319 if(ask < HeapAllocChunk)
320 ask = HeapAllocChunk;
322 v = runtime_MHeap_SysAlloc(h, ask);
323 if(v == nil) {
324 if(ask > (npage<<PageShift)) {
325 ask = npage<<PageShift;
326 v = runtime_MHeap_SysAlloc(h, ask);
328 if(v == nil) {
329 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
330 return false;
334 // Create a fake "in use" span and free it, so that the
335 // right coalescing happens.
336 s = runtime_FixAlloc_Alloc(&h->spanalloc);
337 runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
338 p = s->start;
339 p -= ((uintptr)h->arena_start>>PageShift);
340 h->spans[p] = s;
341 h->spans[p + s->npages - 1] = s;
342 runtime_atomicstore(&s->sweepgen, h->sweepgen);
343 s->state = MSpanInUse;
344 MHeap_FreeLocked(h, s);
345 return true;
348 // Look up the span at the given address.
349 // Address is guaranteed to be in map
350 // and is guaranteed to be start or end of span.
351 MSpan*
352 runtime_MHeap_Lookup(MHeap *h, void *v)
354 uintptr p;
356 p = (uintptr)v;
357 p -= (uintptr)h->arena_start;
358 return h->spans[p >> PageShift];
361 // Look up the span at the given address.
362 // Address is *not* guaranteed to be in map
363 // and may be anywhere in the span.
364 // Map entries for the middle of a span are only
365 // valid for allocated spans. Free spans may have
366 // other garbage in their middles, so we have to
367 // check for that.
368 MSpan*
369 runtime_MHeap_LookupMaybe(MHeap *h, void *v)
371 MSpan *s;
372 PageID p, q;
374 if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
375 return nil;
376 p = (uintptr)v>>PageShift;
377 q = p;
378 q -= (uintptr)h->arena_start >> PageShift;
379 s = h->spans[q];
380 if(s == nil || p < s->start || (byte*)v >= s->limit || s->state != MSpanInUse)
381 return nil;
382 return s;
385 // Free the span back into the heap.
386 void
387 runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
389 runtime_lock(h);
390 mstats.heap_alloc += runtime_m()->mcache->local_cachealloc;
391 runtime_m()->mcache->local_cachealloc = 0;
392 mstats.heap_inuse -= s->npages<<PageShift;
393 if(acct) {
394 mstats.heap_alloc -= s->npages<<PageShift;
395 mstats.heap_objects--;
397 MHeap_FreeLocked(h, s);
398 runtime_unlock(h);
401 static void
402 MHeap_FreeLocked(MHeap *h, MSpan *s)
404 MSpan *t;
405 PageID p;
407 s->types.compression = MTypes_Empty;
409 if(s->state != MSpanInUse || s->ref != 0 || s->sweepgen != h->sweepgen) {
410 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
411 s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
412 runtime_throw("MHeap_FreeLocked - invalid free");
414 mstats.heap_idle += s->npages<<PageShift;
415 s->state = MSpanFree;
416 runtime_MSpanList_Remove(s);
417 // Stamp newly unused spans. The scavenger will use that
418 // info to potentially give back some pages to the OS.
419 s->unusedsince = runtime_nanotime();
420 s->npreleased = 0;
422 // Coalesce with earlier, later spans.
423 p = s->start;
424 p -= (uintptr)h->arena_start >> PageShift;
425 if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
426 s->start = t->start;
427 s->npages += t->npages;
428 s->npreleased = t->npreleased; // absorb released pages
429 s->needzero |= t->needzero;
430 p -= t->npages;
431 h->spans[p] = s;
432 runtime_MSpanList_Remove(t);
433 t->state = MSpanDead;
434 runtime_FixAlloc_Free(&h->spanalloc, t);
436 if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
437 s->npages += t->npages;
438 s->npreleased += t->npreleased;
439 s->needzero |= t->needzero;
440 h->spans[p + s->npages - 1] = s;
441 runtime_MSpanList_Remove(t);
442 t->state = MSpanDead;
443 runtime_FixAlloc_Free(&h->spanalloc, t);
446 // Insert s into appropriate list.
447 if(s->npages < nelem(h->free))
448 runtime_MSpanList_Insert(&h->free[s->npages], s);
449 else
450 runtime_MSpanList_Insert(&h->freelarge, s);
453 static void
454 forcegchelper(void *vnote)
456 Note *note = (Note*)vnote;
458 runtime_gc(1);
459 runtime_notewakeup(note);
462 static uintptr
463 scavengelist(MSpan *list, uint64 now, uint64 limit)
465 uintptr released, sumreleased, start, end, pagesize;
466 MSpan *s;
468 if(runtime_MSpanList_IsEmpty(list))
469 return 0;
471 sumreleased = 0;
472 for(s=list->next; s != list; s=s->next) {
473 if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
474 released = (s->npages - s->npreleased) << PageShift;
475 mstats.heap_released += released;
476 sumreleased += released;
477 s->npreleased = s->npages;
479 start = s->start << PageShift;
480 end = start + (s->npages << PageShift);
482 // Round start up and end down to ensure we
483 // are acting on entire pages.
484 pagesize = getpagesize();
485 start = ROUND(start, pagesize);
486 end &= ~(pagesize - 1);
487 if(end > start)
488 runtime_SysUnused((void*)start, end - start);
491 return sumreleased;
494 static void
495 scavenge(int32 k, uint64 now, uint64 limit)
497 uint32 i;
498 uintptr sumreleased;
499 MHeap *h;
501 h = &runtime_mheap;
502 sumreleased = 0;
503 for(i=0; i < nelem(h->free); i++)
504 sumreleased += scavengelist(&h->free[i], now, limit);
505 sumreleased += scavengelist(&h->freelarge, now, limit);
507 if(runtime_debug.gctrace > 0) {
508 if(sumreleased > 0)
509 runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
510 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
511 k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
512 mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
516 // Release (part of) unused memory to OS.
517 // Goroutine created at startup.
518 // Loop forever.
519 void
520 runtime_MHeap_Scavenger(void* dummy)
522 G *g;
523 MHeap *h;
524 uint64 tick, now, forcegc, limit;
525 int64 unixnow;
526 uint32 k;
527 Note note, *notep;
529 USED(dummy);
531 g = runtime_g();
532 g->issystem = true;
533 g->isbackground = true;
535 // If we go two minutes without a garbage collection, force one to run.
536 forcegc = 2*60*1e9;
537 // If a span goes unused for 5 minutes after a garbage collection,
538 // we hand it back to the operating system.
539 limit = 5*60*1e9;
540 // Make wake-up period small enough for the sampling to be correct.
541 if(forcegc < limit)
542 tick = forcegc/2;
543 else
544 tick = limit/2;
546 h = &runtime_mheap;
547 for(k=0;; k++) {
548 runtime_noteclear(&note);
549 runtime_notetsleepg(&note, tick);
551 runtime_lock(h);
552 unixnow = runtime_unixnanotime();
553 if(unixnow - mstats.last_gc > forcegc) {
554 runtime_unlock(h);
555 // The scavenger can not block other goroutines,
556 // otherwise deadlock detector can fire spuriously.
557 // GC blocks other goroutines via the runtime_worldsema.
558 runtime_noteclear(&note);
559 notep = &note;
560 __go_go(forcegchelper, (void*)notep);
561 runtime_notetsleepg(&note, -1);
562 if(runtime_debug.gctrace > 0)
563 runtime_printf("scvg%d: GC forced\n", k);
564 runtime_lock(h);
566 now = runtime_nanotime();
567 scavenge(k, now, limit);
568 runtime_unlock(h);
572 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
574 void
575 runtime_debug_freeOSMemory(void)
577 runtime_gc(2); // force GC and do eager sweep
578 runtime_lock(&runtime_mheap);
579 scavenge(-1, ~(uintptr)0, 0);
580 runtime_unlock(&runtime_mheap);
583 // Initialize a new span with the given start and npages.
584 void
585 runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
587 span->next = nil;
588 span->prev = nil;
589 span->start = start;
590 span->npages = npages;
591 span->freelist = nil;
592 span->ref = 0;
593 span->sizeclass = 0;
594 span->incache = false;
595 span->elemsize = 0;
596 span->state = MSpanDead;
597 span->unusedsince = 0;
598 span->npreleased = 0;
599 span->types.compression = MTypes_Empty;
600 span->specialLock.key = 0;
601 span->specials = nil;
602 span->needzero = 0;
603 span->freebuf = nil;
606 // Initialize an empty doubly-linked list.
607 void
608 runtime_MSpanList_Init(MSpan *list)
610 list->state = MSpanListHead;
611 list->next = list;
612 list->prev = list;
615 void
616 runtime_MSpanList_Remove(MSpan *span)
618 if(span->prev == nil && span->next == nil)
619 return;
620 span->prev->next = span->next;
621 span->next->prev = span->prev;
622 span->prev = nil;
623 span->next = nil;
626 bool
627 runtime_MSpanList_IsEmpty(MSpan *list)
629 return list->next == list;
632 void
633 runtime_MSpanList_Insert(MSpan *list, MSpan *span)
635 if(span->next != nil || span->prev != nil) {
636 runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
637 runtime_throw("MSpanList_Insert");
639 span->next = list->next;
640 span->prev = list;
641 span->next->prev = span;
642 span->prev->next = span;
645 void
646 runtime_MSpanList_InsertBack(MSpan *list, MSpan *span)
648 if(span->next != nil || span->prev != nil) {
649 runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
650 runtime_throw("MSpanList_Insert");
652 span->next = list;
653 span->prev = list->prev;
654 span->next->prev = span;
655 span->prev->next = span;
658 // Adds the special record s to the list of special records for
659 // the object p. All fields of s should be filled in except for
660 // offset & next, which this routine will fill in.
661 // Returns true if the special was successfully added, false otherwise.
662 // (The add will fail only if a record with the same p and s->kind
663 // already exists.)
664 static bool
665 addspecial(void *p, Special *s)
667 MSpan *span;
668 Special **t, *x;
669 uintptr offset;
670 byte kind;
672 span = runtime_MHeap_LookupMaybe(&runtime_mheap, p);
673 if(span == nil)
674 runtime_throw("addspecial on invalid pointer");
676 // Ensure that the span is swept.
677 // GC accesses specials list w/o locks. And it's just much safer.
678 runtime_m()->locks++;
679 runtime_MSpan_EnsureSwept(span);
681 offset = (uintptr)p - (span->start << PageShift);
682 kind = s->kind;
684 runtime_lock(&span->specialLock);
686 // Find splice point, check for existing record.
687 t = &span->specials;
688 while((x = *t) != nil) {
689 if(offset == x->offset && kind == x->kind) {
690 runtime_unlock(&span->specialLock);
691 runtime_m()->locks--;
692 return false; // already exists
694 if(offset < x->offset || (offset == x->offset && kind < x->kind))
695 break;
696 t = &x->next;
698 // Splice in record, fill in offset.
699 s->offset = offset;
700 s->next = x;
701 *t = s;
702 runtime_unlock(&span->specialLock);
703 runtime_m()->locks--;
704 return true;
707 // Removes the Special record of the given kind for the object p.
708 // Returns the record if the record existed, nil otherwise.
709 // The caller must FixAlloc_Free the result.
710 static Special*
711 removespecial(void *p, byte kind)
713 MSpan *span;
714 Special *s, **t;
715 uintptr offset;
717 span = runtime_MHeap_LookupMaybe(&runtime_mheap, p);
718 if(span == nil)
719 runtime_throw("removespecial on invalid pointer");
721 // Ensure that the span is swept.
722 // GC accesses specials list w/o locks. And it's just much safer.
723 runtime_m()->locks++;
724 runtime_MSpan_EnsureSwept(span);
726 offset = (uintptr)p - (span->start << PageShift);
728 runtime_lock(&span->specialLock);
729 t = &span->specials;
730 while((s = *t) != nil) {
731 // This function is used for finalizers only, so we don't check for
732 // "interior" specials (p must be exactly equal to s->offset).
733 if(offset == s->offset && kind == s->kind) {
734 *t = s->next;
735 runtime_unlock(&span->specialLock);
736 runtime_m()->locks--;
737 return s;
739 t = &s->next;
741 runtime_unlock(&span->specialLock);
742 runtime_m()->locks--;
743 return nil;
746 // Adds a finalizer to the object p. Returns true if it succeeded.
747 bool
748 runtime_addfinalizer(void *p, FuncVal *f, const FuncType *ft, const PtrType *ot)
750 SpecialFinalizer *s;
752 runtime_lock(&runtime_mheap.speciallock);
753 s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
754 runtime_unlock(&runtime_mheap.speciallock);
755 s->kind = KindSpecialFinalizer;
756 s->fn = f;
757 s->ft = ft;
758 s->ot = ot;
759 if(addspecial(p, s))
760 return true;
762 // There was an old finalizer
763 runtime_lock(&runtime_mheap.speciallock);
764 runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, s);
765 runtime_unlock(&runtime_mheap.speciallock);
766 return false;
769 // Removes the finalizer (if any) from the object p.
770 void
771 runtime_removefinalizer(void *p)
773 SpecialFinalizer *s;
775 s = (SpecialFinalizer*)removespecial(p, KindSpecialFinalizer);
776 if(s == nil)
777 return; // there wasn't a finalizer to remove
778 runtime_lock(&runtime_mheap.speciallock);
779 runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, s);
780 runtime_unlock(&runtime_mheap.speciallock);
783 // Set the heap profile bucket associated with addr to b.
784 void
785 runtime_setprofilebucket(void *p, Bucket *b)
787 SpecialProfile *s;
789 runtime_lock(&runtime_mheap.speciallock);
790 s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
791 runtime_unlock(&runtime_mheap.speciallock);
792 s->kind = KindSpecialProfile;
793 s->b = b;
794 if(!addspecial(p, s))
795 runtime_throw("setprofilebucket: profile already set");
798 // Do whatever cleanup needs to be done to deallocate s. It has
799 // already been unlinked from the MSpan specials list.
800 // Returns true if we should keep working on deallocating p.
801 bool
802 runtime_freespecial(Special *s, void *p, uintptr size, bool freed)
804 SpecialFinalizer *sf;
805 SpecialProfile *sp;
807 switch(s->kind) {
808 case KindSpecialFinalizer:
809 sf = (SpecialFinalizer*)s;
810 runtime_queuefinalizer(p, sf->fn, sf->ft, sf->ot);
811 runtime_lock(&runtime_mheap.speciallock);
812 runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, sf);
813 runtime_unlock(&runtime_mheap.speciallock);
814 return false; // don't free p until finalizer is done
815 case KindSpecialProfile:
816 sp = (SpecialProfile*)s;
817 runtime_MProf_Free(sp->b, size, freed);
818 runtime_lock(&runtime_mheap.speciallock);
819 runtime_FixAlloc_Free(&runtime_mheap.specialprofilealloc, sp);
820 runtime_unlock(&runtime_mheap.speciallock);
821 return true;
822 default:
823 runtime_throw("bad special kind");
824 return true;
828 // Free all special records for p.
829 void
830 runtime_freeallspecials(MSpan *span, void *p, uintptr size)
832 Special *s, **t, *list;
833 uintptr offset;
835 if(span->sweepgen != runtime_mheap.sweepgen)
836 runtime_throw("runtime: freeallspecials: unswept span");
837 // first, collect all specials into the list; then, free them
838 // this is required to not cause deadlock between span->specialLock and proflock
839 list = nil;
840 offset = (uintptr)p - (span->start << PageShift);
841 runtime_lock(&span->specialLock);
842 t = &span->specials;
843 while((s = *t) != nil) {
844 if(offset + size <= s->offset)
845 break;
846 if(offset <= s->offset) {
847 *t = s->next;
848 s->next = list;
849 list = s;
850 } else
851 t = &s->next;
853 runtime_unlock(&span->specialLock);
855 while(list != nil) {
856 s = list;
857 list = s->next;
858 if(!runtime_freespecial(s, p, size, true))
859 runtime_throw("can't explicitly free an object with a finalizer");
863 // Split an allocated span into two equal parts.
864 void
865 runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
867 MSpan *t;
868 MCentral *c;
869 uintptr i;
870 uintptr npages;
871 PageID p;
873 if(s->state != MSpanInUse)
874 runtime_throw("MHeap_SplitSpan on a free span");
875 if(s->sizeclass != 0 && s->ref != 1)
876 runtime_throw("MHeap_SplitSpan doesn't have an allocated object");
877 npages = s->npages;
879 // remove the span from whatever list it is in now
880 if(s->sizeclass > 0) {
881 // must be in h->central[x].empty
882 c = &h->central[s->sizeclass];
883 runtime_lock(c);
884 runtime_MSpanList_Remove(s);
885 runtime_unlock(c);
886 runtime_lock(h);
887 } else {
888 // must be in h->busy/busylarge
889 runtime_lock(h);
890 runtime_MSpanList_Remove(s);
892 // heap is locked now
894 if(npages == 1) {
895 // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
896 s->ref = 2;
897 s->sizeclass = runtime_SizeToClass(PageSize/2);
898 s->elemsize = PageSize/2;
899 } else {
900 // convert span of n>1 pages into two spans of n/2 pages each.
901 if((s->npages & 1) != 0)
902 runtime_throw("MHeap_SplitSpan on an odd size span");
904 // compute position in h->spans
905 p = s->start;
906 p -= (uintptr)h->arena_start >> PageShift;
908 // Allocate a new span for the first half.
909 t = runtime_FixAlloc_Alloc(&h->spanalloc);
910 runtime_MSpan_Init(t, s->start, npages/2);
911 t->limit = (byte*)((t->start + npages/2) << PageShift);
912 t->state = MSpanInUse;
913 t->elemsize = npages << (PageShift - 1);
914 t->sweepgen = s->sweepgen;
915 if(t->elemsize <= MaxSmallSize) {
916 t->sizeclass = runtime_SizeToClass(t->elemsize);
917 t->ref = 1;
920 // the old span holds the second half.
921 s->start += npages/2;
922 s->npages = npages/2;
923 s->elemsize = npages << (PageShift - 1);
924 if(s->elemsize <= MaxSmallSize) {
925 s->sizeclass = runtime_SizeToClass(s->elemsize);
926 s->ref = 1;
929 // update span lookup table
930 for(i = p; i < p + npages/2; i++)
931 h->spans[i] = t;
934 // place the span into a new list
935 if(s->sizeclass > 0) {
936 runtime_unlock(h);
937 c = &h->central[s->sizeclass];
938 runtime_lock(c);
939 // swept spans are at the end of the list
940 runtime_MSpanList_InsertBack(&c->empty, s);
941 runtime_unlock(c);
942 } else {
943 // Swept spans are at the end of lists.
944 if(s->npages < nelem(h->free))
945 runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
946 else
947 runtime_MSpanList_InsertBack(&h->busylarge, s);
948 runtime_unlock(h);