2017-04-28 Hristian Kirtchev <kirtchev@adacore.com>
[official-gcc.git] / libgo / runtime / mheap.c
blobc167bdc81959ee4144400180209a42f48e9ef639
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Page heap.
6 //
7 // See malloc.h for overview.
8 //
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
15 #include "runtime.h"
16 #include "arch.h"
17 #include "malloc.h"
19 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
20 static bool MHeap_Grow(MHeap*, uintptr);
21 static void MHeap_FreeLocked(MHeap*, MSpan*);
22 static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
23 static MSpan *BestFit(MSpan*, uintptr, MSpan*);
25 static void
26 RecordSpan(void *vh, byte *p)
28 MHeap *h;
29 MSpan *s;
30 MSpan **all;
31 uint32 cap;
33 h = vh;
34 s = (MSpan*)p;
35 if(h->nspan >= h->nspancap) {
36 cap = 64*1024/sizeof(all[0]);
37 if(cap < h->nspancap*3/2)
38 cap = h->nspancap*3/2;
39 all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]), &mstats()->other_sys);
40 if(all == nil)
41 runtime_throw("runtime: cannot allocate memory");
42 if(h->allspans) {
43 runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
44 // Don't free the old array if it's referenced by sweep.
45 // See the comment in mgc0.c.
46 if(h->allspans != runtime_mheap.sweepspans)
47 runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]), &mstats()->other_sys);
49 h->allspans = all;
50 h->nspancap = cap;
52 h->allspans[h->nspan++] = s;
55 // Initialize the heap; fetch memory using alloc.
56 void
57 runtime_MHeap_Init(MHeap *h)
59 MStats *pmstats;
60 uint32 i;
62 pmstats = mstats();
63 runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), RecordSpan, h, &pmstats->mspan_sys);
64 runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), nil, nil, &pmstats->mcache_sys);
65 runtime_FixAlloc_Init(&h->specialfinalizeralloc, sizeof(SpecialFinalizer), nil, nil, &pmstats->other_sys);
66 runtime_FixAlloc_Init(&h->specialprofilealloc, sizeof(SpecialProfile), nil, nil, &pmstats->other_sys);
67 // h->mapcache needs no init
68 for(i=0; i<nelem(h->free); i++) {
69 runtime_MSpanList_Init(&h->free[i]);
70 runtime_MSpanList_Init(&h->busy[i]);
72 runtime_MSpanList_Init(&h->freelarge);
73 runtime_MSpanList_Init(&h->busylarge);
74 for(i=0; i<nelem(h->central); i++)
75 runtime_MCentral_Init(&h->central[i], i);
78 void
79 runtime_MHeap_MapSpans(MHeap *h)
81 uintptr pagesize;
82 uintptr n;
84 // Map spans array, PageSize at a time.
85 n = (uintptr)h->arena_used;
86 n -= (uintptr)h->arena_start;
87 n = n / PageSize * sizeof(h->spans[0]);
88 n = ROUND(n, PageSize);
89 pagesize = getpagesize();
90 n = ROUND(n, pagesize);
91 if(h->spans_mapped >= n)
92 return;
93 runtime_SysMap((byte*)h->spans + h->spans_mapped, n - h->spans_mapped, h->arena_reserved, &mstats()->other_sys);
94 h->spans_mapped = n;
97 // Sweeps spans in list until reclaims at least npages into heap.
98 // Returns the actual number of pages reclaimed.
99 static uintptr
100 MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages)
102 MSpan *s;
103 uintptr n;
104 uint32 sg;
106 n = 0;
107 sg = runtime_mheap.sweepgen;
108 retry:
109 for(s = list->next; s != list; s = s->next) {
110 if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) {
111 runtime_MSpanList_Remove(s);
112 // swept spans are at the end of the list
113 runtime_MSpanList_InsertBack(list, s);
114 runtime_unlock(h);
115 n += runtime_MSpan_Sweep(s);
116 runtime_lock(h);
117 if(n >= npages)
118 return n;
119 // the span could have been moved elsewhere
120 goto retry;
122 if(s->sweepgen == sg-1) {
123 // the span is being sweept by background sweeper, skip
124 continue;
126 // already swept empty span,
127 // all subsequent ones must also be either swept or in process of sweeping
128 break;
130 return n;
133 // Sweeps and reclaims at least npage pages into heap.
134 // Called before allocating npage pages.
135 static void
136 MHeap_Reclaim(MHeap *h, uintptr npage)
138 uintptr reclaimed, n;
140 // First try to sweep busy spans with large objects of size >= npage,
141 // this has good chances of reclaiming the necessary space.
142 for(n=npage; n < nelem(h->busy); n++) {
143 if(MHeap_ReclaimList(h, &h->busy[n], npage))
144 return; // Bingo!
147 // Then -- even larger objects.
148 if(MHeap_ReclaimList(h, &h->busylarge, npage))
149 return; // Bingo!
151 // Now try smaller objects.
152 // One such object is not enough, so we need to reclaim several of them.
153 reclaimed = 0;
154 for(n=0; n < npage && n < nelem(h->busy); n++) {
155 reclaimed += MHeap_ReclaimList(h, &h->busy[n], npage-reclaimed);
156 if(reclaimed >= npage)
157 return;
160 // Now sweep everything that is not yet swept.
161 runtime_unlock(h);
162 for(;;) {
163 n = runtime_sweepone();
164 if(n == (uintptr)-1) // all spans are swept
165 break;
166 reclaimed += n;
167 if(reclaimed >= npage)
168 break;
170 runtime_lock(h);
173 // Allocate a new span of npage pages from the heap
174 // and record its size class in the HeapMap and HeapMapCache.
175 MSpan*
176 runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero)
178 MStats *pmstats;
179 MSpan *s;
181 runtime_lock(h);
182 pmstats = mstats();
183 pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
184 runtime_m()->mcache->local_cachealloc = 0;
185 s = MHeap_AllocLocked(h, npage, sizeclass);
186 if(s != nil) {
187 pmstats->heap_inuse += npage<<PageShift;
188 if(large) {
189 pmstats->heap_objects++;
190 pmstats->heap_alloc += npage<<PageShift;
191 // Swept spans are at the end of lists.
192 if(s->npages < nelem(h->free))
193 runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
194 else
195 runtime_MSpanList_InsertBack(&h->busylarge, s);
198 runtime_unlock(h);
199 if(s != nil) {
200 if(needzero && s->needzero)
201 runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
202 s->needzero = 0;
204 return s;
207 static MSpan*
208 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
210 uintptr n;
211 MSpan *s, *t;
212 PageID p;
214 // To prevent excessive heap growth, before allocating n pages
215 // we need to sweep and reclaim at least n pages.
216 if(!h->sweepdone)
217 MHeap_Reclaim(h, npage);
219 // Try in fixed-size lists up to max.
220 for(n=npage; n < nelem(h->free); n++) {
221 if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
222 s = h->free[n].next;
223 goto HaveSpan;
227 // Best fit in list of large spans.
228 if((s = MHeap_AllocLarge(h, npage)) == nil) {
229 if(!MHeap_Grow(h, npage))
230 return nil;
231 if((s = MHeap_AllocLarge(h, npage)) == nil)
232 return nil;
235 HaveSpan:
236 // Mark span in use.
237 if(s->state != MSpanFree)
238 runtime_throw("MHeap_AllocLocked - MSpan not free");
239 if(s->npages < npage)
240 runtime_throw("MHeap_AllocLocked - bad npages");
241 runtime_MSpanList_Remove(s);
242 runtime_atomicstore(&s->sweepgen, h->sweepgen);
243 s->state = MSpanInUse;
244 mstats()->heap_idle -= s->npages<<PageShift;
245 mstats()->heap_released -= s->npreleased<<PageShift;
246 if(s->npreleased > 0)
247 runtime_SysUsed((void*)(s->start<<PageShift), s->npages<<PageShift);
248 s->npreleased = 0;
250 if(s->npages > npage) {
251 // Trim extra and put it back in the heap.
252 t = runtime_FixAlloc_Alloc(&h->spanalloc);
253 runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
254 s->npages = npage;
255 p = t->start;
256 p -= ((uintptr)h->arena_start>>PageShift);
257 if(p > 0)
258 h->spans[p-1] = s;
259 h->spans[p] = t;
260 h->spans[p+t->npages-1] = t;
261 t->needzero = s->needzero;
262 runtime_atomicstore(&t->sweepgen, h->sweepgen);
263 t->state = MSpanInUse;
264 MHeap_FreeLocked(h, t);
265 t->unusedsince = s->unusedsince; // preserve age
267 s->unusedsince = 0;
269 // Record span info, because gc needs to be
270 // able to map interior pointer to containing span.
271 s->sizeclass = sizeclass;
272 s->elemsize = (sizeclass==0 ? s->npages<<PageShift : (uintptr)runtime_class_to_size[sizeclass]);
273 s->types.compression = MTypes_Empty;
274 p = s->start;
275 p -= ((uintptr)h->arena_start>>PageShift);
276 for(n=0; n<npage; n++)
277 h->spans[p+n] = s;
278 return s;
281 // Allocate a span of exactly npage pages from the list of large spans.
282 static MSpan*
283 MHeap_AllocLarge(MHeap *h, uintptr npage)
285 return BestFit(&h->freelarge, npage, nil);
288 // Search list for smallest span with >= npage pages.
289 // If there are multiple smallest spans, take the one
290 // with the earliest starting address.
291 static MSpan*
292 BestFit(MSpan *list, uintptr npage, MSpan *best)
294 MSpan *s;
296 for(s=list->next; s != list; s=s->next) {
297 if(s->npages < npage)
298 continue;
299 if(best == nil
300 || s->npages < best->npages
301 || (s->npages == best->npages && s->start < best->start))
302 best = s;
304 return best;
307 // Try to add at least npage pages of memory to the heap,
308 // returning whether it worked.
309 static bool
310 MHeap_Grow(MHeap *h, uintptr npage)
312 uintptr ask;
313 void *v;
314 MSpan *s;
315 PageID p;
317 // Ask for a big chunk, to reduce the number of mappings
318 // the operating system needs to track; also amortizes
319 // the overhead of an operating system mapping.
320 // Allocate a multiple of 64kB (16 pages).
321 npage = (npage+15)&~15;
322 ask = npage<<PageShift;
323 if(ask < HeapAllocChunk)
324 ask = HeapAllocChunk;
326 v = runtime_MHeap_SysAlloc(h, ask);
327 if(v == nil) {
328 if(ask > (npage<<PageShift)) {
329 ask = npage<<PageShift;
330 v = runtime_MHeap_SysAlloc(h, ask);
332 if(v == nil) {
333 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats()->heap_sys);
334 return false;
338 // Create a fake "in use" span and free it, so that the
339 // right coalescing happens.
340 s = runtime_FixAlloc_Alloc(&h->spanalloc);
341 runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
342 p = s->start;
343 p -= ((uintptr)h->arena_start>>PageShift);
344 h->spans[p] = s;
345 h->spans[p + s->npages - 1] = s;
346 runtime_atomicstore(&s->sweepgen, h->sweepgen);
347 s->state = MSpanInUse;
348 MHeap_FreeLocked(h, s);
349 return true;
352 // Look up the span at the given address.
353 // Address is guaranteed to be in map
354 // and is guaranteed to be start or end of span.
355 MSpan*
356 runtime_MHeap_Lookup(MHeap *h, void *v)
358 uintptr p;
360 p = (uintptr)v;
361 p -= (uintptr)h->arena_start;
362 return h->spans[p >> PageShift];
365 // Look up the span at the given address.
366 // Address is *not* guaranteed to be in map
367 // and may be anywhere in the span.
368 // Map entries for the middle of a span are only
369 // valid for allocated spans. Free spans may have
370 // other garbage in their middles, so we have to
371 // check for that.
372 MSpan*
373 runtime_MHeap_LookupMaybe(MHeap *h, void *v)
375 MSpan *s;
376 PageID p, q;
378 if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
379 return nil;
380 p = (uintptr)v>>PageShift;
381 q = p;
382 q -= (uintptr)h->arena_start >> PageShift;
383 s = h->spans[q];
384 if(s == nil || p < s->start || (uintptr)v >= s->limit || s->state != MSpanInUse)
385 return nil;
386 return s;
389 // Free the span back into the heap.
390 void
391 runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
393 MStats *pmstats;
395 runtime_lock(h);
396 pmstats = mstats();
397 pmstats->heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc;
398 runtime_m()->mcache->local_cachealloc = 0;
399 pmstats->heap_inuse -= s->npages<<PageShift;
400 if(acct) {
401 pmstats->heap_alloc -= s->npages<<PageShift;
402 pmstats->heap_objects--;
404 MHeap_FreeLocked(h, s);
405 runtime_unlock(h);
408 static void
409 MHeap_FreeLocked(MHeap *h, MSpan *s)
411 MSpan *t;
412 PageID p;
414 s->types.compression = MTypes_Empty;
416 if(s->state != MSpanInUse || s->ref != 0 || s->sweepgen != h->sweepgen) {
417 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d sweepgen %d/%d\n",
418 s, s->start<<PageShift, s->state, s->ref, s->sweepgen, h->sweepgen);
419 runtime_throw("MHeap_FreeLocked - invalid free");
421 mstats()->heap_idle += s->npages<<PageShift;
422 s->state = MSpanFree;
423 runtime_MSpanList_Remove(s);
424 // Stamp newly unused spans. The scavenger will use that
425 // info to potentially give back some pages to the OS.
426 s->unusedsince = runtime_nanotime();
427 s->npreleased = 0;
429 // Coalesce with earlier, later spans.
430 p = s->start;
431 p -= (uintptr)h->arena_start >> PageShift;
432 if(p > 0 && (t = h->spans[p-1]) != nil && t->state != MSpanInUse) {
433 s->start = t->start;
434 s->npages += t->npages;
435 s->npreleased = t->npreleased; // absorb released pages
436 s->needzero |= t->needzero;
437 p -= t->npages;
438 h->spans[p] = s;
439 runtime_MSpanList_Remove(t);
440 t->state = MSpanDead;
441 runtime_FixAlloc_Free(&h->spanalloc, t);
443 if((p+s->npages)*sizeof(h->spans[0]) < h->spans_mapped && (t = h->spans[p+s->npages]) != nil && t->state != MSpanInUse) {
444 s->npages += t->npages;
445 s->npreleased += t->npreleased;
446 s->needzero |= t->needzero;
447 h->spans[p + s->npages - 1] = s;
448 runtime_MSpanList_Remove(t);
449 t->state = MSpanDead;
450 runtime_FixAlloc_Free(&h->spanalloc, t);
453 // Insert s into appropriate list.
454 if(s->npages < nelem(h->free))
455 runtime_MSpanList_Insert(&h->free[s->npages], s);
456 else
457 runtime_MSpanList_Insert(&h->freelarge, s);
460 static void
461 forcegchelper(void *vnote)
463 Note *note = (Note*)vnote;
465 runtime_gc(1);
466 runtime_notewakeup(note);
469 static uintptr
470 scavengelist(MSpan *list, uint64 now, uint64 limit)
472 uintptr released, sumreleased, start, end, pagesize;
473 MSpan *s;
475 if(runtime_MSpanList_IsEmpty(list))
476 return 0;
478 sumreleased = 0;
479 for(s=list->next; s != list; s=s->next) {
480 if((now - s->unusedsince) > limit && s->npreleased != s->npages) {
481 released = (s->npages - s->npreleased) << PageShift;
482 mstats()->heap_released += released;
483 sumreleased += released;
484 s->npreleased = s->npages;
486 start = s->start << PageShift;
487 end = start + (s->npages << PageShift);
489 // Round start up and end down to ensure we
490 // are acting on entire pages.
491 pagesize = getpagesize();
492 start = ROUND(start, pagesize);
493 end &= ~(pagesize - 1);
494 if(end > start)
495 runtime_SysUnused((void*)start, end - start);
498 return sumreleased;
501 static void
502 scavenge(int32 k, uint64 now, uint64 limit)
504 uint32 i;
505 uintptr sumreleased;
506 MHeap *h;
508 h = &runtime_mheap;
509 sumreleased = 0;
510 for(i=0; i < nelem(h->free); i++)
511 sumreleased += scavengelist(&h->free[i], now, limit);
512 sumreleased += scavengelist(&h->freelarge, now, limit);
514 if(runtime_debug.gctrace > 0) {
515 if(sumreleased > 0)
516 runtime_printf("scvg%d: %D MB released\n", k, (uint64)sumreleased>>20);
517 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
518 k, mstats()->heap_inuse>>20, mstats()->heap_idle>>20, mstats()->heap_sys>>20,
519 mstats()->heap_released>>20, (mstats()->heap_sys - mstats()->heap_released)>>20);
523 // Release (part of) unused memory to OS.
524 // Goroutine created at startup.
525 // Loop forever.
526 void
527 runtime_MHeap_Scavenger(void* dummy)
529 G *g;
530 MHeap *h;
531 uint64 tick, now, forcegc, limit;
532 int64 unixnow;
533 uint32 k;
534 Note note, *notep;
536 USED(dummy);
538 g = runtime_g();
539 g->issystem = true;
540 g->isbackground = true;
542 // If we go two minutes without a garbage collection, force one to run.
543 forcegc = 2*60*1e9;
544 // If a span goes unused for 5 minutes after a garbage collection,
545 // we hand it back to the operating system.
546 limit = 5*60*1e9;
547 // Make wake-up period small enough for the sampling to be correct.
548 if(forcegc < limit)
549 tick = forcegc/2;
550 else
551 tick = limit/2;
553 h = &runtime_mheap;
554 for(k=0;; k++) {
555 runtime_noteclear(&note);
556 runtime_notetsleepg(&note, tick);
558 runtime_lock(h);
559 unixnow = runtime_unixnanotime();
560 if(unixnow - mstats()->last_gc > forcegc) {
561 runtime_unlock(h);
562 // The scavenger can not block other goroutines,
563 // otherwise deadlock detector can fire spuriously.
564 // GC blocks other goroutines via the runtime_worldsema.
565 runtime_noteclear(&note);
566 notep = &note;
567 __go_go(forcegchelper, (void*)notep);
568 runtime_notetsleepg(&note, -1);
569 if(runtime_debug.gctrace > 0)
570 runtime_printf("scvg%d: GC forced\n", k);
571 runtime_lock(h);
573 now = runtime_nanotime();
574 scavenge(k, now, limit);
575 runtime_unlock(h);
579 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
581 void
582 runtime_debug_freeOSMemory(void)
584 runtime_gc(2); // force GC and do eager sweep
585 runtime_lock(&runtime_mheap);
586 scavenge(-1, ~(uintptr)0, 0);
587 runtime_unlock(&runtime_mheap);
590 // Initialize a new span with the given start and npages.
591 void
592 runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
594 span->next = nil;
595 span->prev = nil;
596 span->start = start;
597 span->npages = npages;
598 span->freelist = nil;
599 span->ref = 0;
600 span->sizeclass = 0;
601 span->incache = false;
602 span->elemsize = 0;
603 span->state = MSpanDead;
604 span->unusedsince = 0;
605 span->npreleased = 0;
606 span->types.compression = MTypes_Empty;
607 span->speciallock.key = 0;
608 span->specials = nil;
609 span->needzero = 0;
610 span->freebuf = nil;
613 // Initialize an empty doubly-linked list.
614 void
615 runtime_MSpanList_Init(MSpan *list)
617 list->state = MSpanListHead;
618 list->next = list;
619 list->prev = list;
622 void
623 runtime_MSpanList_Remove(MSpan *span)
625 if(span->prev == nil && span->next == nil)
626 return;
627 span->prev->next = span->next;
628 span->next->prev = span->prev;
629 span->prev = nil;
630 span->next = nil;
633 bool
634 runtime_MSpanList_IsEmpty(MSpan *list)
636 return list->next == list;
639 void
640 runtime_MSpanList_Insert(MSpan *list, MSpan *span)
642 if(span->next != nil || span->prev != nil) {
643 runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
644 runtime_throw("MSpanList_Insert");
646 span->next = list->next;
647 span->prev = list;
648 span->next->prev = span;
649 span->prev->next = span;
652 void
653 runtime_MSpanList_InsertBack(MSpan *list, MSpan *span)
655 if(span->next != nil || span->prev != nil) {
656 runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
657 runtime_throw("MSpanList_Insert");
659 span->next = list;
660 span->prev = list->prev;
661 span->next->prev = span;
662 span->prev->next = span;
665 // Adds the special record s to the list of special records for
666 // the object p. All fields of s should be filled in except for
667 // offset & next, which this routine will fill in.
668 // Returns true if the special was successfully added, false otherwise.
669 // (The add will fail only if a record with the same p and s->kind
670 // already exists.)
671 static bool
672 addspecial(void *p, Special *s)
674 MSpan *span;
675 Special **t, *x;
676 uintptr offset;
677 byte kind;
679 span = runtime_MHeap_LookupMaybe(&runtime_mheap, p);
680 if(span == nil)
681 runtime_throw("addspecial on invalid pointer");
683 // Ensure that the span is swept.
684 // GC accesses specials list w/o locks. And it's just much safer.
685 runtime_m()->locks++;
686 runtime_MSpan_EnsureSwept(span);
688 offset = (uintptr)p - (span->start << PageShift);
689 kind = s->kind;
691 runtime_lock(&span->speciallock);
693 // Find splice point, check for existing record.
694 t = &span->specials;
695 while((x = *t) != nil) {
696 if(offset == x->offset && kind == x->kind) {
697 runtime_unlock(&span->speciallock);
698 runtime_m()->locks--;
699 return false; // already exists
701 if(offset < x->offset || (offset == x->offset && kind < x->kind))
702 break;
703 t = &x->next;
705 // Splice in record, fill in offset.
706 s->offset = offset;
707 s->next = x;
708 *t = s;
709 runtime_unlock(&span->speciallock);
710 runtime_m()->locks--;
711 return true;
714 // Removes the Special record of the given kind for the object p.
715 // Returns the record if the record existed, nil otherwise.
716 // The caller must FixAlloc_Free the result.
717 static Special*
718 removespecial(void *p, byte kind)
720 MSpan *span;
721 Special *s, **t;
722 uintptr offset;
724 span = runtime_MHeap_LookupMaybe(&runtime_mheap, p);
725 if(span == nil)
726 runtime_throw("removespecial on invalid pointer");
728 // Ensure that the span is swept.
729 // GC accesses specials list w/o locks. And it's just much safer.
730 runtime_m()->locks++;
731 runtime_MSpan_EnsureSwept(span);
733 offset = (uintptr)p - (span->start << PageShift);
735 runtime_lock(&span->speciallock);
736 t = &span->specials;
737 while((s = *t) != nil) {
738 // This function is used for finalizers only, so we don't check for
739 // "interior" specials (p must be exactly equal to s->offset).
740 if(offset == s->offset && kind == s->kind) {
741 *t = s->next;
742 runtime_unlock(&span->speciallock);
743 runtime_m()->locks--;
744 return s;
746 t = &s->next;
748 runtime_unlock(&span->speciallock);
749 runtime_m()->locks--;
750 return nil;
753 // Adds a finalizer to the object p. Returns true if it succeeded.
754 bool
755 runtime_addfinalizer(void *p, FuncVal *f, const FuncType *ft, const PtrType *ot)
757 SpecialFinalizer *s;
759 runtime_lock(&runtime_mheap.speciallock);
760 s = runtime_FixAlloc_Alloc(&runtime_mheap.specialfinalizeralloc);
761 runtime_unlock(&runtime_mheap.speciallock);
762 s->kind = KindSpecialFinalizer;
763 s->fn = f;
764 s->ft = ft;
765 s->ot = ot;
766 if(addspecial(p, s))
767 return true;
769 // There was an old finalizer
770 runtime_lock(&runtime_mheap.speciallock);
771 runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, s);
772 runtime_unlock(&runtime_mheap.speciallock);
773 return false;
776 // Removes the finalizer (if any) from the object p.
777 void
778 runtime_removefinalizer(void *p)
780 SpecialFinalizer *s;
782 s = (SpecialFinalizer*)removespecial(p, KindSpecialFinalizer);
783 if(s == nil)
784 return; // there wasn't a finalizer to remove
785 runtime_lock(&runtime_mheap.speciallock);
786 runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, s);
787 runtime_unlock(&runtime_mheap.speciallock);
790 // Set the heap profile bucket associated with addr to b.
791 void
792 runtime_setprofilebucket(void *p, Bucket *b)
794 SpecialProfile *s;
796 runtime_lock(&runtime_mheap.speciallock);
797 s = runtime_FixAlloc_Alloc(&runtime_mheap.specialprofilealloc);
798 runtime_unlock(&runtime_mheap.speciallock);
799 s->kind = KindSpecialProfile;
800 s->b = b;
801 if(!addspecial(p, s))
802 runtime_throw("setprofilebucket: profile already set");
805 // Do whatever cleanup needs to be done to deallocate s. It has
806 // already been unlinked from the MSpan specials list.
807 // Returns true if we should keep working on deallocating p.
808 bool
809 runtime_freespecial(Special *s, void *p, uintptr size, bool freed)
811 SpecialFinalizer *sf;
812 SpecialProfile *sp;
814 switch(s->kind) {
815 case KindSpecialFinalizer:
816 sf = (SpecialFinalizer*)s;
817 runtime_queuefinalizer(p, sf->fn, sf->ft, sf->ot);
818 runtime_lock(&runtime_mheap.speciallock);
819 runtime_FixAlloc_Free(&runtime_mheap.specialfinalizeralloc, sf);
820 runtime_unlock(&runtime_mheap.speciallock);
821 return false; // don't free p until finalizer is done
822 case KindSpecialProfile:
823 sp = (SpecialProfile*)s;
824 runtime_MProf_Free(sp->b, size, freed);
825 runtime_lock(&runtime_mheap.speciallock);
826 runtime_FixAlloc_Free(&runtime_mheap.specialprofilealloc, sp);
827 runtime_unlock(&runtime_mheap.speciallock);
828 return true;
829 default:
830 runtime_throw("bad special kind");
831 return true;
835 // Free all special records for p.
836 void
837 runtime_freeallspecials(MSpan *span, void *p, uintptr size)
839 Special *s, **t, *list;
840 uintptr offset;
842 if(span->sweepgen != runtime_mheap.sweepgen)
843 runtime_throw("runtime: freeallspecials: unswept span");
844 // first, collect all specials into the list; then, free them
845 // this is required to not cause deadlock between span->specialLock and proflock
846 list = nil;
847 offset = (uintptr)p - (span->start << PageShift);
848 runtime_lock(&span->speciallock);
849 t = &span->specials;
850 while((s = *t) != nil) {
851 if(offset + size <= s->offset)
852 break;
853 if(offset <= s->offset) {
854 *t = s->next;
855 s->next = list;
856 list = s;
857 } else
858 t = &s->next;
860 runtime_unlock(&span->speciallock);
862 while(list != nil) {
863 s = list;
864 list = s->next;
865 if(!runtime_freespecial(s, p, size, true))
866 runtime_throw("can't explicitly free an object with a finalizer");
870 // Split an allocated span into two equal parts.
871 void
872 runtime_MHeap_SplitSpan(MHeap *h, MSpan *s)
874 MSpan *t;
875 MCentral *c;
876 uintptr i;
877 uintptr npages;
878 PageID p;
880 if(s->state != MSpanInUse)
881 runtime_throw("MHeap_SplitSpan on a free span");
882 if(s->sizeclass != 0 && s->ref != 1)
883 runtime_throw("MHeap_SplitSpan doesn't have an allocated object");
884 npages = s->npages;
886 // remove the span from whatever list it is in now
887 if(s->sizeclass > 0) {
888 // must be in h->central[x].mempty
889 c = &h->central[s->sizeclass];
890 runtime_lock(c);
891 runtime_MSpanList_Remove(s);
892 runtime_unlock(c);
893 runtime_lock(h);
894 } else {
895 // must be in h->busy/busylarge
896 runtime_lock(h);
897 runtime_MSpanList_Remove(s);
899 // heap is locked now
901 if(npages == 1) {
902 // convert span of 1 PageSize object to a span of 2 PageSize/2 objects.
903 s->ref = 2;
904 s->sizeclass = runtime_SizeToClass(PageSize/2);
905 s->elemsize = PageSize/2;
906 } else {
907 // convert span of n>1 pages into two spans of n/2 pages each.
908 if((s->npages & 1) != 0)
909 runtime_throw("MHeap_SplitSpan on an odd size span");
911 // compute position in h->spans
912 p = s->start;
913 p -= (uintptr)h->arena_start >> PageShift;
915 // Allocate a new span for the first half.
916 t = runtime_FixAlloc_Alloc(&h->spanalloc);
917 runtime_MSpan_Init(t, s->start, npages/2);
918 t->limit = (uintptr)((t->start + npages/2) << PageShift);
919 t->state = MSpanInUse;
920 t->elemsize = npages << (PageShift - 1);
921 t->sweepgen = s->sweepgen;
922 if(t->elemsize <= MaxSmallSize) {
923 t->sizeclass = runtime_SizeToClass(t->elemsize);
924 t->ref = 1;
927 // the old span holds the second half.
928 s->start += npages/2;
929 s->npages = npages/2;
930 s->elemsize = npages << (PageShift - 1);
931 if(s->elemsize <= MaxSmallSize) {
932 s->sizeclass = runtime_SizeToClass(s->elemsize);
933 s->ref = 1;
936 // update span lookup table
937 for(i = p; i < p + npages/2; i++)
938 h->spans[i] = t;
941 // place the span into a new list
942 if(s->sizeclass > 0) {
943 runtime_unlock(h);
944 c = &h->central[s->sizeclass];
945 runtime_lock(c);
946 // swept spans are at the end of the list
947 runtime_MSpanList_InsertBack(&c->mempty, s);
948 runtime_unlock(c);
949 } else {
950 // Swept spans are at the end of lists.
951 if(s->npages < nelem(h->free))
952 runtime_MSpanList_InsertBack(&h->busy[s->npages], s);
953 else
954 runtime_MSpanList_InsertBack(&h->busylarge, s);
955 runtime_unlock(h);