[Patch Doc] Update documentation for __fp16 type
[official-gcc.git] / libgo / runtime / mgc0.c
blob7efad5e0ee431bafc110d1661b54a522509a9e64
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector (GC).
6 //
7 // GC is:
8 // - mark&sweep
9 // - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
10 // - parallel (up to MaxGcproc threads)
11 // - partially concurrent (mark is stop-the-world, while sweep is concurrent)
12 // - non-moving/non-compacting
13 // - full (non-partial)
15 // GC rate.
16 // Next GC is after we've allocated an extra amount of memory proportional to
17 // the amount already in use. The proportion is controlled by GOGC environment variable
18 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
19 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
20 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
21 // (and also the amount of extra memory used).
23 // Concurrent sweep.
24 // The sweep phase proceeds concurrently with normal program execution.
25 // The heap is swept span-by-span both lazily (when a goroutine needs another span)
26 // and concurrently in a background goroutine (this helps programs that are not CPU bound).
27 // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
28 // and so next_gc calculation is tricky and happens as follows.
29 // At the end of the stop-the-world phase next_gc is conservatively set based on total
30 // heap size; all spans are marked as "needs sweeping".
31 // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
32 // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
33 // closer to the target value. However, this is not enough to avoid over-allocating memory.
34 // Consider that a goroutine wants to allocate a new span for a large object and
35 // there are no free swept spans, but there are small-object unswept spans.
36 // If the goroutine naively allocates a new span, it can surpass the yet-unknown
37 // target next_gc value. In order to prevent such cases (1) when a goroutine needs
38 // to allocate a new small-object span, it sweeps small-object spans for the same
39 // object size until it frees at least one object; (2) when a goroutine needs to
40 // allocate large-object span from heap, it sweeps spans until it frees at least
41 // that many pages into heap. Together these two measures ensure that we don't surpass
42 // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
43 // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
44 // but there can still be other one-page unswept spans which could be combined into a two-page span.
45 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
46 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
47 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
48 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
49 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
50 // The finalizer goroutine is kicked off only when all spans are swept.
51 // When the next GC starts, it sweeps all not-yet-swept spans (if any).
53 #include <unistd.h>
55 #include "runtime.h"
56 #include "arch.h"
57 #include "malloc.h"
58 #include "mgc0.h"
59 #include "go-type.h"
61 // Map gccgo field names to gc field names.
62 // Slice aka __go_open_array.
63 #define array __values
64 #define cap __capacity
65 // Hmap aka __go_map
66 typedef struct __go_map Hmap;
67 // Type aka __go_type_descriptor
68 #define string __reflection
69 // PtrType aka __go_ptr_type
70 #define elem __element_type
72 #ifdef USING_SPLIT_STACK
74 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
75 void **);
77 extern void * __splitstack_find_context (void *context[10], size_t *, void **,
78 void **, void **);
80 #endif
82 enum {
83 Debug = 0,
84 CollectStats = 0,
85 ConcurrentSweep = 1,
87 WorkbufSize = 16*1024,
88 FinBlockSize = 4*1024,
90 handoffThreshold = 4,
91 IntermediateBufferCapacity = 64,
93 // Bits in type information
94 PRECISE = 1,
95 LOOP = 2,
96 PC_BITS = PRECISE | LOOP,
98 RootData = 0,
99 RootBss = 1,
100 RootFinalizers = 2,
101 RootSpanTypes = 3,
102 RootFlushCaches = 4,
103 RootCount = 5,
106 #define GcpercentUnknown (-2)
108 // Initialized from $GOGC. GOGC=off means no gc.
109 static int32 gcpercent = GcpercentUnknown;
111 static FuncVal* poolcleanup;
113 void sync_runtime_registerPoolCleanup(FuncVal*)
114 __asm__ (GOSYM_PREFIX "sync.runtime_registerPoolCleanup");
116 void
117 sync_runtime_registerPoolCleanup(FuncVal *f)
119 poolcleanup = f;
122 static void
123 clearpools(void)
125 P *p, **pp;
126 MCache *c;
127 Defer *d, *dlink;
129 // clear sync.Pool's
130 if(poolcleanup != nil) {
131 __builtin_call_with_static_chain(poolcleanup->fn(),
132 poolcleanup);
135 for(pp=runtime_allp; (p=*pp) != nil; pp++) {
136 // clear tinyalloc pool
137 c = p->mcache;
138 if(c != nil) {
139 c->tiny = nil;
140 c->tinysize = 0;
144 // Clear central defer pools.
145 // Leave per-P pools alone, they have strictly bounded size.
146 runtime_lock(&runtime_sched->deferlock);
147 for(d = runtime_sched->deferpool; d != nil; d = dlink) {
148 dlink = d->link;
149 d->link = nil;
151 runtime_sched->deferpool = nil;
152 runtime_unlock(&runtime_sched->deferlock);
155 typedef struct Workbuf Workbuf;
156 struct Workbuf
158 #define SIZE (WorkbufSize-sizeof(LFNode)-sizeof(uintptr))
159 LFNode node; // must be first
160 uintptr nobj;
161 Obj obj[SIZE/sizeof(Obj) - 1];
162 uint8 _padding[SIZE%sizeof(Obj) + sizeof(Obj)];
163 #undef SIZE
166 typedef struct Finalizer Finalizer;
167 struct Finalizer
169 FuncVal *fn;
170 void *arg;
171 const struct __go_func_type *ft;
172 const PtrType *ot;
175 typedef struct FinBlock FinBlock;
176 struct FinBlock
178 FinBlock *alllink;
179 FinBlock *next;
180 int32 cnt;
181 int32 cap;
182 Finalizer fin[1];
185 static Lock finlock; // protects the following variables
186 static FinBlock *finq; // list of finalizers that are to be executed
187 static FinBlock *finc; // cache of free blocks
188 static FinBlock *allfin; // list of all blocks
189 bool runtime_fingwait;
190 bool runtime_fingwake;
192 static Lock gclock;
193 static G* fing;
195 static void runfinq(void*);
196 static void bgsweep(void*);
197 static Workbuf* getempty(Workbuf*);
198 static Workbuf* getfull(Workbuf*);
199 static void putempty(Workbuf*);
200 static Workbuf* handoff(Workbuf*);
201 static void gchelperstart(void);
202 static void flushallmcaches(void);
203 static void addstackroots(G *gp, Workbuf **wbufp);
205 static struct {
206 uint64 full; // lock-free list of full blocks
207 uint64 wempty; // lock-free list of empty blocks
208 byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
209 uint32 nproc;
210 int64 tstart;
211 volatile uint32 nwait;
212 volatile uint32 ndone;
213 Note alldone;
214 ParFor *markfor;
216 Lock;
217 byte *chunk;
218 uintptr nchunk;
219 } work __attribute__((aligned(8)));
221 enum {
222 GC_DEFAULT_PTR = GC_NUM_INSTR,
223 GC_CHAN,
225 GC_NUM_INSTR2
228 static struct {
229 struct {
230 uint64 sum;
231 uint64 cnt;
232 } ptr;
233 uint64 nbytes;
234 struct {
235 uint64 sum;
236 uint64 cnt;
237 uint64 notype;
238 uint64 typelookup;
239 } obj;
240 uint64 rescan;
241 uint64 rescanbytes;
242 uint64 instr[GC_NUM_INSTR2];
243 uint64 putempty;
244 uint64 getfull;
245 struct {
246 uint64 foundbit;
247 uint64 foundword;
248 uint64 foundspan;
249 } flushptrbuf;
250 struct {
251 uint64 foundbit;
252 uint64 foundword;
253 uint64 foundspan;
254 } markonly;
255 uint32 nbgsweep;
256 uint32 npausesweep;
257 } gcstats;
259 // markonly marks an object. It returns true if the object
260 // has been marked by this function, false otherwise.
261 // This function doesn't append the object to any buffer.
262 static bool
263 markonly(const void *obj)
265 byte *p;
266 uintptr *bitp, bits, shift, x, xbits, off, j;
267 MSpan *s;
268 PageID k;
270 // Words outside the arena cannot be pointers.
271 if((const byte*)obj < runtime_mheap.arena_start || (const byte*)obj >= runtime_mheap.arena_used)
272 return false;
274 // obj may be a pointer to a live object.
275 // Try to find the beginning of the object.
277 // Round down to word boundary.
278 obj = (const void*)((uintptr)obj & ~((uintptr)PtrSize-1));
280 // Find bits for this word.
281 off = (const uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
282 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
283 shift = off % wordsPerBitmapWord;
284 xbits = *bitp;
285 bits = xbits >> shift;
287 // Pointing at the beginning of a block?
288 if((bits & (bitAllocated|bitBlockBoundary)) != 0) {
289 if(CollectStats)
290 runtime_xadd64(&gcstats.markonly.foundbit, 1);
291 goto found;
294 // Pointing just past the beginning?
295 // Scan backward a little to find a block boundary.
296 for(j=shift; j-->0; ) {
297 if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
298 shift = j;
299 bits = xbits>>shift;
300 if(CollectStats)
301 runtime_xadd64(&gcstats.markonly.foundword, 1);
302 goto found;
306 // Otherwise consult span table to find beginning.
307 // (Manually inlined copy of MHeap_LookupMaybe.)
308 k = (uintptr)obj>>PageShift;
309 x = k;
310 x -= (uintptr)runtime_mheap.arena_start>>PageShift;
311 s = runtime_mheap.spans[x];
312 if(s == nil || k < s->start || (uintptr)obj >= s->limit || s->state != MSpanInUse)
313 return false;
314 p = (byte*)((uintptr)s->start<<PageShift);
315 if(s->sizeclass == 0) {
316 obj = p;
317 } else {
318 uintptr size = s->elemsize;
319 int32 i = ((const byte*)obj - p)/size;
320 obj = p+i*size;
323 // Now that we know the object header, reload bits.
324 off = (const uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
325 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
326 shift = off % wordsPerBitmapWord;
327 xbits = *bitp;
328 bits = xbits >> shift;
329 if(CollectStats)
330 runtime_xadd64(&gcstats.markonly.foundspan, 1);
332 found:
333 // Now we have bits, bitp, and shift correct for
334 // obj pointing at the base of the object.
335 // Only care about allocated and not marked.
336 if((bits & (bitAllocated|bitMarked)) != bitAllocated)
337 return false;
338 if(work.nproc == 1)
339 *bitp |= bitMarked<<shift;
340 else {
341 for(;;) {
342 x = *bitp;
343 if(x & (bitMarked<<shift))
344 return false;
345 if(runtime_casp((void**)bitp, (void*)x, (void*)(x|(bitMarked<<shift))))
346 break;
350 // The object is now marked
351 return true;
354 // PtrTarget is a structure used by intermediate buffers.
355 // The intermediate buffers hold GC data before it
356 // is moved/flushed to the work buffer (Workbuf).
357 // The size of an intermediate buffer is very small,
358 // such as 32 or 64 elements.
359 typedef struct PtrTarget PtrTarget;
360 struct PtrTarget
362 void *p;
363 uintptr ti;
366 typedef struct Scanbuf Scanbuf;
367 struct Scanbuf
369 struct {
370 PtrTarget *begin;
371 PtrTarget *end;
372 PtrTarget *pos;
373 } ptr;
374 struct {
375 Obj *begin;
376 Obj *end;
377 Obj *pos;
378 } obj;
379 Workbuf *wbuf;
380 Obj *wp;
381 uintptr nobj;
384 typedef struct BufferList BufferList;
385 struct BufferList
387 PtrTarget ptrtarget[IntermediateBufferCapacity];
388 Obj obj[IntermediateBufferCapacity];
389 uint32 busy;
390 byte pad[CacheLineSize];
392 static BufferList bufferList[MaxGcproc];
394 static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
396 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
397 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
398 // while the work buffer contains blocks which have been marked
399 // and are prepared to be scanned by the garbage collector.
401 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
403 // A simplified drawing explaining how the todo-list moves from a structure to another:
405 // scanblock
406 // (find pointers)
407 // Obj ------> PtrTarget (pointer targets)
408 // ↑ |
409 // | |
410 // `----------'
411 // flushptrbuf
412 // (find block start, mark and enqueue)
413 static void
414 flushptrbuf(Scanbuf *sbuf)
416 byte *p, *arena_start, *obj;
417 uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti, n;
418 MSpan *s;
419 PageID k;
420 Obj *wp;
421 Workbuf *wbuf;
422 PtrTarget *ptrbuf;
423 PtrTarget *ptrbuf_end;
425 arena_start = runtime_mheap.arena_start;
427 wp = sbuf->wp;
428 wbuf = sbuf->wbuf;
429 nobj = sbuf->nobj;
431 ptrbuf = sbuf->ptr.begin;
432 ptrbuf_end = sbuf->ptr.pos;
433 n = ptrbuf_end - sbuf->ptr.begin;
434 sbuf->ptr.pos = sbuf->ptr.begin;
436 if(CollectStats) {
437 runtime_xadd64(&gcstats.ptr.sum, n);
438 runtime_xadd64(&gcstats.ptr.cnt, 1);
441 // If buffer is nearly full, get a new one.
442 if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
443 if(wbuf != nil)
444 wbuf->nobj = nobj;
445 wbuf = getempty(wbuf);
446 wp = wbuf->obj;
447 nobj = 0;
449 if(n >= nelem(wbuf->obj))
450 runtime_throw("ptrbuf has to be smaller than WorkBuf");
453 while(ptrbuf < ptrbuf_end) {
454 obj = ptrbuf->p;
455 ti = ptrbuf->ti;
456 ptrbuf++;
458 // obj belongs to interval [mheap.arena_start, mheap.arena_used).
459 if(Debug > 1) {
460 if(obj < runtime_mheap.arena_start || obj >= runtime_mheap.arena_used)
461 runtime_throw("object is outside of mheap");
464 // obj may be a pointer to a live object.
465 // Try to find the beginning of the object.
467 // Round down to word boundary.
468 if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
469 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
470 ti = 0;
473 // Find bits for this word.
474 off = (uintptr*)obj - (uintptr*)arena_start;
475 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
476 shift = off % wordsPerBitmapWord;
477 xbits = *bitp;
478 bits = xbits >> shift;
480 // Pointing at the beginning of a block?
481 if((bits & (bitAllocated|bitBlockBoundary)) != 0) {
482 if(CollectStats)
483 runtime_xadd64(&gcstats.flushptrbuf.foundbit, 1);
484 goto found;
487 ti = 0;
489 // Pointing just past the beginning?
490 // Scan backward a little to find a block boundary.
491 for(j=shift; j-->0; ) {
492 if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
493 obj = (byte*)obj - (shift-j)*PtrSize;
494 shift = j;
495 bits = xbits>>shift;
496 if(CollectStats)
497 runtime_xadd64(&gcstats.flushptrbuf.foundword, 1);
498 goto found;
502 // Otherwise consult span table to find beginning.
503 // (Manually inlined copy of MHeap_LookupMaybe.)
504 k = (uintptr)obj>>PageShift;
505 x = k;
506 x -= (uintptr)arena_start>>PageShift;
507 s = runtime_mheap.spans[x];
508 if(s == nil || k < s->start || (uintptr)obj >= s->limit || s->state != MSpanInUse)
509 continue;
510 p = (byte*)((uintptr)s->start<<PageShift);
511 if(s->sizeclass == 0) {
512 obj = p;
513 } else {
514 size = s->elemsize;
515 int32 i = ((byte*)obj - p)/size;
516 obj = p+i*size;
519 // Now that we know the object header, reload bits.
520 off = (uintptr*)obj - (uintptr*)arena_start;
521 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
522 shift = off % wordsPerBitmapWord;
523 xbits = *bitp;
524 bits = xbits >> shift;
525 if(CollectStats)
526 runtime_xadd64(&gcstats.flushptrbuf.foundspan, 1);
528 found:
529 // Now we have bits, bitp, and shift correct for
530 // obj pointing at the base of the object.
531 // Only care about allocated and not marked.
532 if((bits & (bitAllocated|bitMarked)) != bitAllocated)
533 continue;
534 if(work.nproc == 1)
535 *bitp |= bitMarked<<shift;
536 else {
537 for(;;) {
538 x = *bitp;
539 if(x & (bitMarked<<shift))
540 goto continue_obj;
541 if(runtime_casp((void**)bitp, (void*)x, (void*)(x|(bitMarked<<shift))))
542 break;
546 // If object has no pointers, don't need to scan further.
547 if((bits & bitScan) == 0)
548 continue;
550 // Ask span about size class.
551 // (Manually inlined copy of MHeap_Lookup.)
552 x = (uintptr)obj >> PageShift;
553 x -= (uintptr)arena_start>>PageShift;
554 s = runtime_mheap.spans[x];
556 PREFETCH(obj);
558 *wp = (Obj){obj, s->elemsize, ti};
559 wp++;
560 nobj++;
561 continue_obj:;
564 // If another proc wants a pointer, give it some.
565 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
566 wbuf->nobj = nobj;
567 wbuf = handoff(wbuf);
568 nobj = wbuf->nobj;
569 wp = wbuf->obj + nobj;
572 sbuf->wp = wp;
573 sbuf->wbuf = wbuf;
574 sbuf->nobj = nobj;
577 static void
578 flushobjbuf(Scanbuf *sbuf)
580 uintptr nobj, off;
581 Obj *wp, obj;
582 Workbuf *wbuf;
583 Obj *objbuf;
584 Obj *objbuf_end;
586 wp = sbuf->wp;
587 wbuf = sbuf->wbuf;
588 nobj = sbuf->nobj;
590 objbuf = sbuf->obj.begin;
591 objbuf_end = sbuf->obj.pos;
592 sbuf->obj.pos = sbuf->obj.begin;
594 while(objbuf < objbuf_end) {
595 obj = *objbuf++;
597 // Align obj.b to a word boundary.
598 off = (uintptr)obj.p & (PtrSize-1);
599 if(off != 0) {
600 obj.p += PtrSize - off;
601 obj.n -= PtrSize - off;
602 obj.ti = 0;
605 if(obj.p == nil || obj.n == 0)
606 continue;
608 // If buffer is full, get a new one.
609 if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
610 if(wbuf != nil)
611 wbuf->nobj = nobj;
612 wbuf = getempty(wbuf);
613 wp = wbuf->obj;
614 nobj = 0;
617 *wp = obj;
618 wp++;
619 nobj++;
622 // If another proc wants a pointer, give it some.
623 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
624 wbuf->nobj = nobj;
625 wbuf = handoff(wbuf);
626 nobj = wbuf->nobj;
627 wp = wbuf->obj + nobj;
630 sbuf->wp = wp;
631 sbuf->wbuf = wbuf;
632 sbuf->nobj = nobj;
635 // Program that scans the whole block and treats every block element as a potential pointer
636 static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
638 // Hchan program
639 static uintptr chanProg[2] = {0, GC_CHAN};
641 // Local variables of a program fragment or loop
642 typedef struct GCFrame GCFrame;
643 struct GCFrame {
644 uintptr count, elemsize, b;
645 const uintptr *loop_or_ret;
648 // Sanity check for the derived type info objti.
649 static void
650 checkptr(void *obj, uintptr objti)
652 uintptr *pc1, type, tisize, i, j, x;
653 const uintptr *pc2;
654 byte *objstart;
655 Type *t;
656 MSpan *s;
658 if(!Debug)
659 runtime_throw("checkptr is debug only");
661 if((byte*)obj < runtime_mheap.arena_start || (byte*)obj >= runtime_mheap.arena_used)
662 return;
663 type = runtime_gettype(obj);
664 t = (Type*)(type & ~(uintptr)(PtrSize-1));
665 if(t == nil)
666 return;
667 x = (uintptr)obj >> PageShift;
668 x -= (uintptr)(runtime_mheap.arena_start)>>PageShift;
669 s = runtime_mheap.spans[x];
670 objstart = (byte*)((uintptr)s->start<<PageShift);
671 if(s->sizeclass != 0) {
672 i = ((byte*)obj - objstart)/s->elemsize;
673 objstart += i*s->elemsize;
675 tisize = *(uintptr*)objti;
676 // Sanity check for object size: it should fit into the memory block.
677 if((byte*)obj + tisize > objstart + s->elemsize) {
678 runtime_printf("object of type '%S' at %p/%p does not fit in block %p/%p\n",
679 *t->string, obj, tisize, objstart, s->elemsize);
680 runtime_throw("invalid gc type info");
682 if(obj != objstart)
683 return;
684 // If obj points to the beginning of the memory block,
685 // check type info as well.
686 if(t->string == nil ||
687 // Gob allocates unsafe pointers for indirection.
688 (runtime_strcmp((const char *)t->string->str, (const char*)"unsafe.Pointer") &&
689 // Runtime and gc think differently about closures.
690 runtime_strstr((const char *)t->string->str, (const char*)"struct { F uintptr") != (const char *)t->string->str)) {
691 pc1 = (uintptr*)objti;
692 pc2 = (const uintptr*)t->__gc;
693 // A simple best-effort check until first GC_END.
694 for(j = 1; pc1[j] != GC_END && pc2[j] != GC_END; j++) {
695 if(pc1[j] != pc2[j]) {
696 runtime_printf("invalid gc type info for '%s', type info %p [%d]=%p, block info %p [%d]=%p\n",
697 t->string ? (const int8*)t->string->str : (const int8*)"?", pc1, (int32)j, pc1[j], pc2, (int32)j, pc2[j]);
698 runtime_throw("invalid gc type info");
704 // scanblock scans a block of n bytes starting at pointer b for references
705 // to other objects, scanning any it finds recursively until there are no
706 // unscanned objects left. Instead of using an explicit recursion, it keeps
707 // a work list in the Workbuf* structures and loops in the main function
708 // body. Keeping an explicit work list is easier on the stack allocator and
709 // more efficient.
710 static void
711 scanblock(Workbuf *wbuf, bool keepworking)
713 byte *b, *arena_start, *arena_used;
714 uintptr n, i, end_b, elemsize, size, ti, objti, count, type, nobj;
715 uintptr precise_type, nominal_size;
716 const uintptr *pc, *chan_ret;
717 uintptr chancap;
718 void *obj;
719 const Type *t, *et;
720 Slice *sliceptr;
721 String *stringptr;
722 GCFrame *stack_ptr, stack_top, stack[GC_STACK_CAPACITY+4];
723 BufferList *scanbuffers;
724 Scanbuf sbuf;
725 Eface *eface;
726 Iface *iface;
727 Hchan *chan;
728 const ChanType *chantype;
729 Obj *wp;
731 if(sizeof(Workbuf) % WorkbufSize != 0)
732 runtime_throw("scanblock: size of Workbuf is suboptimal");
734 // Memory arena parameters.
735 arena_start = runtime_mheap.arena_start;
736 arena_used = runtime_mheap.arena_used;
738 stack_ptr = stack+nelem(stack)-1;
740 precise_type = false;
741 nominal_size = 0;
743 if(wbuf) {
744 nobj = wbuf->nobj;
745 wp = &wbuf->obj[nobj];
746 } else {
747 nobj = 0;
748 wp = nil;
751 // Initialize sbuf
752 scanbuffers = &bufferList[runtime_m()->helpgc];
754 sbuf.ptr.begin = sbuf.ptr.pos = &scanbuffers->ptrtarget[0];
755 sbuf.ptr.end = sbuf.ptr.begin + nelem(scanbuffers->ptrtarget);
757 sbuf.obj.begin = sbuf.obj.pos = &scanbuffers->obj[0];
758 sbuf.obj.end = sbuf.obj.begin + nelem(scanbuffers->obj);
760 sbuf.wbuf = wbuf;
761 sbuf.wp = wp;
762 sbuf.nobj = nobj;
764 // (Silence the compiler)
765 chan = nil;
766 chantype = nil;
767 chan_ret = nil;
769 goto next_block;
771 for(;;) {
772 // Each iteration scans the block b of length n, queueing pointers in
773 // the work buffer.
775 if(CollectStats) {
776 runtime_xadd64(&gcstats.nbytes, n);
777 runtime_xadd64(&gcstats.obj.sum, sbuf.nobj);
778 runtime_xadd64(&gcstats.obj.cnt, 1);
781 if(ti != 0) {
782 if(Debug > 1) {
783 runtime_printf("scanblock %p %D ti %p\n", b, (int64)n, ti);
785 pc = (uintptr*)(ti & ~(uintptr)PC_BITS);
786 precise_type = (ti & PRECISE);
787 stack_top.elemsize = pc[0];
788 if(!precise_type)
789 nominal_size = pc[0];
790 if(ti & LOOP) {
791 stack_top.count = 0; // 0 means an infinite number of iterations
792 stack_top.loop_or_ret = pc+1;
793 } else {
794 stack_top.count = 1;
796 if(Debug) {
797 // Simple sanity check for provided type info ti:
798 // The declared size of the object must be not larger than the actual size
799 // (it can be smaller due to inferior pointers).
800 // It's difficult to make a comprehensive check due to inferior pointers,
801 // reflection, gob, etc.
802 if(pc[0] > n) {
803 runtime_printf("invalid gc type info: type info size %p, block size %p\n", pc[0], n);
804 runtime_throw("invalid gc type info");
807 } else if(UseSpanType) {
808 if(CollectStats)
809 runtime_xadd64(&gcstats.obj.notype, 1);
811 type = runtime_gettype(b);
812 if(type != 0) {
813 if(CollectStats)
814 runtime_xadd64(&gcstats.obj.typelookup, 1);
816 t = (Type*)(type & ~(uintptr)(PtrSize-1));
817 switch(type & (PtrSize-1)) {
818 case TypeInfo_SingleObject:
819 pc = (const uintptr*)t->__gc;
820 precise_type = true; // type information about 'b' is precise
821 stack_top.count = 1;
822 stack_top.elemsize = pc[0];
823 break;
824 case TypeInfo_Array:
825 pc = (const uintptr*)t->__gc;
826 if(pc[0] == 0)
827 goto next_block;
828 precise_type = true; // type information about 'b' is precise
829 stack_top.count = 0; // 0 means an infinite number of iterations
830 stack_top.elemsize = pc[0];
831 stack_top.loop_or_ret = pc+1;
832 break;
833 case TypeInfo_Chan:
834 chan = (Hchan*)b;
835 chantype = (const ChanType*)t;
836 chan_ret = nil;
837 pc = chanProg;
838 break;
839 default:
840 if(Debug > 1)
841 runtime_printf("scanblock %p %D type %p %S\n", b, (int64)n, type, *t->string);
842 runtime_throw("scanblock: invalid type");
843 return;
845 if(Debug > 1)
846 runtime_printf("scanblock %p %D type %p %S pc=%p\n", b, (int64)n, type, *t->string, pc);
847 } else {
848 pc = defaultProg;
849 if(Debug > 1)
850 runtime_printf("scanblock %p %D unknown type\n", b, (int64)n);
852 } else {
853 pc = defaultProg;
854 if(Debug > 1)
855 runtime_printf("scanblock %p %D no span types\n", b, (int64)n);
858 if(IgnorePreciseGC)
859 pc = defaultProg;
861 pc++;
862 stack_top.b = (uintptr)b;
863 end_b = (uintptr)b + n - PtrSize;
865 for(;;) {
866 if(CollectStats)
867 runtime_xadd64(&gcstats.instr[pc[0]], 1);
869 obj = nil;
870 objti = 0;
871 switch(pc[0]) {
872 case GC_PTR:
873 obj = *(void**)(stack_top.b + pc[1]);
874 objti = pc[2];
875 if(Debug > 2)
876 runtime_printf("gc_ptr @%p: %p ti=%p\n", stack_top.b+pc[1], obj, objti);
877 pc += 3;
878 if(Debug)
879 checkptr(obj, objti);
880 break;
882 case GC_SLICE:
883 sliceptr = (Slice*)(stack_top.b + pc[1]);
884 if(Debug > 2)
885 runtime_printf("gc_slice @%p: %p/%D/%D\n", sliceptr, sliceptr->array, (int64)sliceptr->__count, (int64)sliceptr->cap);
886 if(sliceptr->cap != 0) {
887 obj = sliceptr->array;
888 // Can't use slice element type for scanning,
889 // because if it points to an array embedded
890 // in the beginning of a struct,
891 // we will scan the whole struct as the slice.
892 // So just obtain type info from heap.
894 pc += 3;
895 break;
897 case GC_APTR:
898 obj = *(void**)(stack_top.b + pc[1]);
899 if(Debug > 2)
900 runtime_printf("gc_aptr @%p: %p\n", stack_top.b+pc[1], obj);
901 pc += 2;
902 break;
904 case GC_STRING:
905 stringptr = (String*)(stack_top.b + pc[1]);
906 if(Debug > 2)
907 runtime_printf("gc_string @%p: %p/%D\n", stack_top.b+pc[1], stringptr->str, (int64)stringptr->len);
908 if(stringptr->len != 0)
909 markonly(stringptr->str);
910 pc += 2;
911 continue;
913 case GC_EFACE:
914 eface = (Eface*)(stack_top.b + pc[1]);
915 pc += 2;
916 if(Debug > 2)
917 runtime_printf("gc_eface @%p: %p %p\n", stack_top.b+pc[1], eface->_type, eface->data);
918 if(eface->_type == nil)
919 continue;
921 // eface->type
922 t = eface->_type;
923 if((const byte*)t >= arena_start && (const byte*)t < arena_used) {
924 union { const Type *tc; Type *tr; } u;
925 u.tc = t;
926 *sbuf.ptr.pos++ = (PtrTarget){u.tr, 0};
927 if(sbuf.ptr.pos == sbuf.ptr.end)
928 flushptrbuf(&sbuf);
931 // eface->data
932 if((byte*)eface->data >= arena_start && (byte*)eface->data < arena_used) {
933 if(__go_is_pointer_type(t)) {
934 if((t->__code & kindNoPointers))
935 continue;
937 obj = eface->data;
938 if((t->__code & kindMask) == kindPtr) {
939 // Only use type information if it is a pointer-containing type.
940 // This matches the GC programs written by cmd/gc/reflect.c's
941 // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
942 et = ((const PtrType*)t)->elem;
943 if(!(et->__code & kindNoPointers))
944 objti = (uintptr)((const PtrType*)t)->elem->__gc;
946 } else {
947 obj = eface->data;
948 objti = (uintptr)t->__gc;
951 break;
953 case GC_IFACE:
954 iface = (Iface*)(stack_top.b + pc[1]);
955 pc += 2;
956 if(Debug > 2)
957 runtime_printf("gc_iface @%p: %p/%p %p\n", stack_top.b+pc[1], *(Type**)iface->tab, nil, iface->data);
958 if(iface->tab == nil)
959 continue;
961 // iface->tab
962 if((byte*)iface->tab >= arena_start && (byte*)iface->tab < arena_used) {
963 *sbuf.ptr.pos++ = (PtrTarget){iface->tab, 0};
964 if(sbuf.ptr.pos == sbuf.ptr.end)
965 flushptrbuf(&sbuf);
968 // iface->data
969 if((byte*)iface->data >= arena_start && (byte*)iface->data < arena_used) {
970 t = *(Type**)iface->tab;
971 if(__go_is_pointer_type(t)) {
972 if((t->__code & kindNoPointers))
973 continue;
975 obj = iface->data;
976 if((t->__code & kindMask) == kindPtr) {
977 // Only use type information if it is a pointer-containing type.
978 // This matches the GC programs written by cmd/gc/reflect.c's
979 // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
980 et = ((const PtrType*)t)->elem;
981 if(!(et->__code & kindNoPointers))
982 objti = (uintptr)((const PtrType*)t)->elem->__gc;
984 } else {
985 obj = iface->data;
986 objti = (uintptr)t->__gc;
989 break;
991 case GC_DEFAULT_PTR:
992 while(stack_top.b <= end_b) {
993 obj = *(byte**)stack_top.b;
994 if(Debug > 2)
995 runtime_printf("gc_default_ptr @%p: %p\n", stack_top.b, obj);
996 stack_top.b += PtrSize;
997 if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
998 *sbuf.ptr.pos++ = (PtrTarget){obj, 0};
999 if(sbuf.ptr.pos == sbuf.ptr.end)
1000 flushptrbuf(&sbuf);
1003 goto next_block;
1005 case GC_END:
1006 if(--stack_top.count != 0) {
1007 // Next iteration of a loop if possible.
1008 stack_top.b += stack_top.elemsize;
1009 if(stack_top.b + stack_top.elemsize <= end_b+PtrSize) {
1010 pc = stack_top.loop_or_ret;
1011 continue;
1013 i = stack_top.b;
1014 } else {
1015 // Stack pop if possible.
1016 if(stack_ptr+1 < stack+nelem(stack)) {
1017 pc = stack_top.loop_or_ret;
1018 stack_top = *(++stack_ptr);
1019 continue;
1021 i = (uintptr)b + nominal_size;
1023 if(!precise_type) {
1024 // Quickly scan [b+i,b+n) for possible pointers.
1025 for(; i<=end_b; i+=PtrSize) {
1026 if(*(byte**)i != nil) {
1027 // Found a value that may be a pointer.
1028 // Do a rescan of the entire block.
1029 enqueue((Obj){b, n, 0}, &sbuf.wbuf, &sbuf.wp, &sbuf.nobj);
1030 if(CollectStats) {
1031 runtime_xadd64(&gcstats.rescan, 1);
1032 runtime_xadd64(&gcstats.rescanbytes, n);
1034 break;
1038 goto next_block;
1040 case GC_ARRAY_START:
1041 i = stack_top.b + pc[1];
1042 count = pc[2];
1043 elemsize = pc[3];
1044 pc += 4;
1046 // Stack push.
1047 *stack_ptr-- = stack_top;
1048 stack_top = (GCFrame){count, elemsize, i, pc};
1049 continue;
1051 case GC_ARRAY_NEXT:
1052 if(--stack_top.count != 0) {
1053 stack_top.b += stack_top.elemsize;
1054 pc = stack_top.loop_or_ret;
1055 } else {
1056 // Stack pop.
1057 stack_top = *(++stack_ptr);
1058 pc += 1;
1060 continue;
1062 case GC_CALL:
1063 // Stack push.
1064 *stack_ptr-- = stack_top;
1065 stack_top = (GCFrame){1, 0, stack_top.b + pc[1], pc+3 /*return address*/};
1066 pc = (const uintptr*)((const byte*)pc + *(const int32*)(pc+2)); // target of the CALL instruction
1067 continue;
1069 case GC_REGION:
1070 obj = (void*)(stack_top.b + pc[1]);
1071 size = pc[2];
1072 objti = pc[3];
1073 pc += 4;
1075 if(Debug > 2)
1076 runtime_printf("gc_region @%p: %D %p\n", stack_top.b+pc[1], (int64)size, objti);
1077 *sbuf.obj.pos++ = (Obj){obj, size, objti};
1078 if(sbuf.obj.pos == sbuf.obj.end)
1079 flushobjbuf(&sbuf);
1080 continue;
1082 case GC_CHAN_PTR:
1083 chan = *(Hchan**)(stack_top.b + pc[1]);
1084 if(Debug > 2 && chan != nil)
1085 runtime_printf("gc_chan_ptr @%p: %p/%D/%D %p\n", stack_top.b+pc[1], chan, (int64)chan->qcount, (int64)chan->dataqsiz, pc[2]);
1086 if(chan == nil) {
1087 pc += 3;
1088 continue;
1090 if(markonly(chan)) {
1091 chantype = (ChanType*)pc[2];
1092 if(!(chantype->elem->__code & kindNoPointers)) {
1093 // Start chanProg.
1094 chan_ret = pc+3;
1095 pc = chanProg+1;
1096 continue;
1099 pc += 3;
1100 continue;
1102 case GC_CHAN:
1103 // There are no heap pointers in struct Hchan,
1104 // so we can ignore the leading sizeof(Hchan) bytes.
1105 if(!(chantype->elem->__code & kindNoPointers)) {
1106 chancap = chan->dataqsiz;
1107 if(chancap > 0 && markonly(chan->buf)) {
1108 // TODO(atom): split into two chunks so that only the
1109 // in-use part of the circular buffer is scanned.
1110 // (Channel routines zero the unused part, so the current
1111 // code does not lead to leaks, it's just a little inefficient.)
1112 *sbuf.obj.pos++ = (Obj){chan->buf, chancap*chantype->elem->__size,
1113 (uintptr)chantype->elem->__gc | PRECISE | LOOP};
1114 if(sbuf.obj.pos == sbuf.obj.end)
1115 flushobjbuf(&sbuf);
1118 if(chan_ret == nil)
1119 goto next_block;
1120 pc = chan_ret;
1121 continue;
1123 default:
1124 runtime_printf("runtime: invalid GC instruction %p at %p\n", pc[0], pc);
1125 runtime_throw("scanblock: invalid GC instruction");
1126 return;
1129 if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
1130 *sbuf.ptr.pos++ = (PtrTarget){obj, objti};
1131 if(sbuf.ptr.pos == sbuf.ptr.end)
1132 flushptrbuf(&sbuf);
1136 next_block:
1137 // Done scanning [b, b+n). Prepare for the next iteration of
1138 // the loop by setting b, n, ti to the parameters for the next block.
1140 if(sbuf.nobj == 0) {
1141 flushptrbuf(&sbuf);
1142 flushobjbuf(&sbuf);
1144 if(sbuf.nobj == 0) {
1145 if(!keepworking) {
1146 if(sbuf.wbuf)
1147 putempty(sbuf.wbuf);
1148 return;
1150 // Emptied our buffer: refill.
1151 sbuf.wbuf = getfull(sbuf.wbuf);
1152 if(sbuf.wbuf == nil)
1153 return;
1154 sbuf.nobj = sbuf.wbuf->nobj;
1155 sbuf.wp = sbuf.wbuf->obj + sbuf.wbuf->nobj;
1159 // Fetch b from the work buffer.
1160 --sbuf.wp;
1161 b = sbuf.wp->p;
1162 n = sbuf.wp->n;
1163 ti = sbuf.wp->ti;
1164 sbuf.nobj--;
1168 static struct root_list* roots;
1170 void
1171 __go_register_gc_roots (struct root_list* r)
1173 // FIXME: This needs locking if multiple goroutines can call
1174 // dlopen simultaneously.
1175 r->next = roots;
1176 roots = r;
1179 // Append obj to the work buffer.
1180 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
1181 static void
1182 enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj)
1184 uintptr nobj, off;
1185 Obj *wp;
1186 Workbuf *wbuf;
1188 if(Debug > 1)
1189 runtime_printf("append obj(%p %D %p)\n", obj.p, (int64)obj.n, obj.ti);
1191 // Align obj.b to a word boundary.
1192 off = (uintptr)obj.p & (PtrSize-1);
1193 if(off != 0) {
1194 obj.p += PtrSize - off;
1195 obj.n -= PtrSize - off;
1196 obj.ti = 0;
1199 if(obj.p == nil || obj.n == 0)
1200 return;
1202 // Load work buffer state
1203 wp = *_wp;
1204 wbuf = *_wbuf;
1205 nobj = *_nobj;
1207 // If another proc wants a pointer, give it some.
1208 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
1209 wbuf->nobj = nobj;
1210 wbuf = handoff(wbuf);
1211 nobj = wbuf->nobj;
1212 wp = wbuf->obj + nobj;
1215 // If buffer is full, get a new one.
1216 if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
1217 if(wbuf != nil)
1218 wbuf->nobj = nobj;
1219 wbuf = getempty(wbuf);
1220 wp = wbuf->obj;
1221 nobj = 0;
1224 *wp = obj;
1225 wp++;
1226 nobj++;
1228 // Save work buffer state
1229 *_wp = wp;
1230 *_wbuf = wbuf;
1231 *_nobj = nobj;
1234 static void
1235 enqueue1(Workbuf **wbufp, Obj obj)
1237 Workbuf *wbuf;
1239 wbuf = *wbufp;
1240 if(wbuf->nobj >= nelem(wbuf->obj))
1241 *wbufp = wbuf = getempty(wbuf);
1242 wbuf->obj[wbuf->nobj++] = obj;
1245 static void
1246 markroot(ParFor *desc, uint32 i)
1248 Workbuf *wbuf;
1249 FinBlock *fb;
1250 MHeap *h;
1251 MSpan **allspans, *s;
1252 uint32 spanidx, sg;
1253 G *gp;
1254 void *p;
1256 USED(&desc);
1257 wbuf = getempty(nil);
1258 // Note: if you add a case here, please also update heapdump.c:dumproots.
1259 switch(i) {
1260 case RootData:
1261 // For gccgo this is both data and bss.
1263 struct root_list *pl;
1265 for(pl = roots; pl != nil; pl = pl->next) {
1266 struct root *pr = &pl->roots[0];
1267 while(1) {
1268 void *decl = pr->decl;
1269 if(decl == nil)
1270 break;
1271 enqueue1(&wbuf, (Obj){decl, pr->size, 0});
1272 pr++;
1276 break;
1278 case RootBss:
1279 // For gccgo we use this for all the other global roots.
1280 enqueue1(&wbuf, (Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
1281 enqueue1(&wbuf, (Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
1282 enqueue1(&wbuf, (Obj){(byte*)&runtime_allg, sizeof runtime_allg, 0});
1283 enqueue1(&wbuf, (Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
1284 enqueue1(&wbuf, (Obj){(byte*)&runtime_allp, sizeof runtime_allp, 0});
1285 enqueue1(&wbuf, (Obj){(byte*)&work, sizeof work, 0});
1286 runtime_proc_scan(&wbuf, enqueue1);
1287 break;
1289 case RootFinalizers:
1290 for(fb=allfin; fb; fb=fb->alllink)
1291 enqueue1(&wbuf, (Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
1292 break;
1294 case RootSpanTypes:
1295 // mark span types and MSpan.specials (to walk spans only once)
1296 h = &runtime_mheap;
1297 sg = h->sweepgen;
1298 allspans = h->allspans;
1299 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
1300 Special *sp;
1301 SpecialFinalizer *spf;
1303 s = allspans[spanidx];
1304 if(s->sweepgen != sg) {
1305 runtime_printf("sweep %d %d\n", s->sweepgen, sg);
1306 runtime_throw("gc: unswept span");
1308 if(s->state != MSpanInUse)
1309 continue;
1310 // The garbage collector ignores type pointers stored in MSpan.types:
1311 // - Compiler-generated types are stored outside of heap.
1312 // - The reflect package has runtime-generated types cached in its data structures.
1313 // The garbage collector relies on finding the references via that cache.
1314 if(s->types.compression == MTypes_Words || s->types.compression == MTypes_Bytes)
1315 markonly((byte*)s->types.data);
1316 for(sp = s->specials; sp != nil; sp = sp->next) {
1317 if(sp->kind != KindSpecialFinalizer)
1318 continue;
1319 // don't mark finalized object, but scan it so we
1320 // retain everything it points to.
1321 spf = (SpecialFinalizer*)sp;
1322 // A finalizer can be set for an inner byte of an object, find object beginning.
1323 p = (void*)((s->start << PageShift) + spf->offset/s->elemsize*s->elemsize);
1324 enqueue1(&wbuf, (Obj){p, s->elemsize, 0});
1325 enqueue1(&wbuf, (Obj){(void*)&spf->fn, PtrSize, 0});
1326 enqueue1(&wbuf, (Obj){(void*)&spf->ft, PtrSize, 0});
1327 enqueue1(&wbuf, (Obj){(void*)&spf->ot, PtrSize, 0});
1330 break;
1332 case RootFlushCaches:
1333 flushallmcaches();
1334 break;
1336 default:
1337 // the rest is scanning goroutine stacks
1338 if(i - RootCount >= runtime_allglen)
1339 runtime_throw("markroot: bad index");
1340 gp = runtime_allg[i - RootCount];
1341 // remember when we've first observed the G blocked
1342 // needed only to output in traceback
1343 if((gp->atomicstatus == _Gwaiting || gp->atomicstatus == _Gsyscall) && gp->waitsince == 0)
1344 gp->waitsince = work.tstart;
1345 addstackroots(gp, &wbuf);
1346 break;
1350 if(wbuf)
1351 scanblock(wbuf, false);
1354 static const FuncVal markroot_funcval = { (void *) markroot };
1356 // Get an empty work buffer off the work.empty list,
1357 // allocating new buffers as needed.
1358 static Workbuf*
1359 getempty(Workbuf *b)
1361 if(b != nil)
1362 runtime_lfstackpush(&work.full, &b->node);
1363 b = (Workbuf*)runtime_lfstackpop(&work.wempty);
1364 if(b == nil) {
1365 // Need to allocate.
1366 runtime_lock(&work);
1367 if(work.nchunk < sizeof *b) {
1368 work.nchunk = 1<<20;
1369 work.chunk = runtime_SysAlloc(work.nchunk, &mstats()->gc_sys);
1370 if(work.chunk == nil)
1371 runtime_throw("runtime: cannot allocate memory");
1373 b = (Workbuf*)work.chunk;
1374 work.chunk += sizeof *b;
1375 work.nchunk -= sizeof *b;
1376 runtime_unlock(&work);
1378 b->nobj = 0;
1379 return b;
1382 static void
1383 putempty(Workbuf *b)
1385 if(CollectStats)
1386 runtime_xadd64(&gcstats.putempty, 1);
1388 runtime_lfstackpush(&work.wempty, &b->node);
1391 // Get a full work buffer off the work.full list, or return nil.
1392 static Workbuf*
1393 getfull(Workbuf *b)
1395 M *m;
1396 int32 i;
1398 if(CollectStats)
1399 runtime_xadd64(&gcstats.getfull, 1);
1401 if(b != nil)
1402 runtime_lfstackpush(&work.wempty, &b->node);
1403 b = (Workbuf*)runtime_lfstackpop(&work.full);
1404 if(b != nil || work.nproc == 1)
1405 return b;
1407 m = runtime_m();
1408 runtime_xadd(&work.nwait, +1);
1409 for(i=0;; i++) {
1410 if(work.full != 0) {
1411 runtime_xadd(&work.nwait, -1);
1412 b = (Workbuf*)runtime_lfstackpop(&work.full);
1413 if(b != nil)
1414 return b;
1415 runtime_xadd(&work.nwait, +1);
1417 if(work.nwait == work.nproc)
1418 return nil;
1419 if(i < 10) {
1420 m->gcstats.nprocyield++;
1421 runtime_procyield(20);
1422 } else if(i < 20) {
1423 m->gcstats.nosyield++;
1424 runtime_osyield();
1425 } else {
1426 m->gcstats.nsleep++;
1427 runtime_usleep(100);
1432 static Workbuf*
1433 handoff(Workbuf *b)
1435 M *m;
1436 int32 n;
1437 Workbuf *b1;
1439 m = runtime_m();
1441 // Make new buffer with half of b's pointers.
1442 b1 = getempty(nil);
1443 n = b->nobj/2;
1444 b->nobj -= n;
1445 b1->nobj = n;
1446 runtime_memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
1447 m->gcstats.nhandoff++;
1448 m->gcstats.nhandoffcnt += n;
1450 // Put b on full list - let first half of b get stolen.
1451 runtime_lfstackpush(&work.full, &b->node);
1452 return b1;
1455 static void
1456 addstackroots(G *gp, Workbuf **wbufp)
1458 switch(gp->atomicstatus){
1459 default:
1460 runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp->atomicstatus, gp, gp->goid);
1461 runtime_throw("mark - bad status");
1462 case _Gdead:
1463 return;
1464 case _Grunning:
1465 runtime_throw("mark - world not stopped");
1466 case _Grunnable:
1467 case _Gsyscall:
1468 case _Gwaiting:
1469 break;
1472 #ifdef USING_SPLIT_STACK
1473 M *mp;
1474 void* sp;
1475 size_t spsize;
1476 void* next_segment;
1477 void* next_sp;
1478 void* initial_sp;
1480 if(gp == runtime_g()) {
1481 // Scanning our own stack.
1482 sp = __splitstack_find(nil, nil, &spsize, &next_segment,
1483 &next_sp, &initial_sp);
1484 } else if((mp = gp->m) != nil && mp->helpgc) {
1485 // gchelper's stack is in active use and has no interesting pointers.
1486 return;
1487 } else {
1488 // Scanning another goroutine's stack.
1489 // The goroutine is usually asleep (the world is stopped).
1491 // The exception is that if the goroutine is about to enter or might
1492 // have just exited a system call, it may be executing code such
1493 // as schedlock and may have needed to start a new stack segment.
1494 // Use the stack segment and stack pointer at the time of
1495 // the system call instead, since that won't change underfoot.
1496 if(gp->gcstack != nil) {
1497 sp = gp->gcstack;
1498 spsize = gp->gcstacksize;
1499 next_segment = gp->gcnextsegment;
1500 next_sp = gp->gcnextsp;
1501 initial_sp = gp->gcinitialsp;
1502 } else {
1503 sp = __splitstack_find_context(&gp->stackcontext[0],
1504 &spsize, &next_segment,
1505 &next_sp, &initial_sp);
1508 if(sp != nil) {
1509 enqueue1(wbufp, (Obj){sp, spsize, 0});
1510 while((sp = __splitstack_find(next_segment, next_sp,
1511 &spsize, &next_segment,
1512 &next_sp, &initial_sp)) != nil)
1513 enqueue1(wbufp, (Obj){sp, spsize, 0});
1515 #else
1516 M *mp;
1517 byte* bottom;
1518 byte* top;
1520 if(gp == runtime_g()) {
1521 // Scanning our own stack.
1522 bottom = (byte*)&gp;
1523 } else if((mp = gp->m) != nil && mp->helpgc) {
1524 // gchelper's stack is in active use and has no interesting pointers.
1525 return;
1526 } else {
1527 // Scanning another goroutine's stack.
1528 // The goroutine is usually asleep (the world is stopped).
1529 bottom = (byte*)gp->gcnextsp;
1530 if(bottom == nil)
1531 return;
1533 top = (byte*)gp->gcinitialsp + gp->gcstacksize;
1534 if(top > bottom)
1535 enqueue1(wbufp, (Obj){bottom, top - bottom, 0});
1536 else
1537 enqueue1(wbufp, (Obj){top, bottom - top, 0});
1538 #endif
1541 void
1542 runtime_queuefinalizer(void *p, FuncVal *fn, const FuncType *ft, const PtrType *ot)
1544 FinBlock *block;
1545 Finalizer *f;
1547 runtime_lock(&finlock);
1548 if(finq == nil || finq->cnt == finq->cap) {
1549 if(finc == nil) {
1550 finc = runtime_persistentalloc(FinBlockSize, 0, &mstats()->gc_sys);
1551 finc->cap = (FinBlockSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
1552 finc->alllink = allfin;
1553 allfin = finc;
1555 block = finc;
1556 finc = block->next;
1557 block->next = finq;
1558 finq = block;
1560 f = &finq->fin[finq->cnt];
1561 finq->cnt++;
1562 f->fn = fn;
1563 f->ft = ft;
1564 f->ot = ot;
1565 f->arg = p;
1566 runtime_fingwake = true;
1567 runtime_unlock(&finlock);
1570 void
1571 runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*))
1573 FinBlock *fb;
1574 Finalizer *f;
1575 int32 i;
1577 for(fb = allfin; fb; fb = fb->alllink) {
1578 for(i = 0; i < fb->cnt; i++) {
1579 f = &fb->fin[i];
1580 callback(f->fn, f->arg, f->ft, f->ot);
1585 void
1586 runtime_MSpan_EnsureSwept(MSpan *s)
1588 M *m = runtime_m();
1589 G *g = runtime_g();
1590 uint32 sg;
1592 // Caller must disable preemption.
1593 // Otherwise when this function returns the span can become unswept again
1594 // (if GC is triggered on another goroutine).
1595 if(m->locks == 0 && m->mallocing == 0 && g != m->g0)
1596 runtime_throw("MSpan_EnsureSwept: m is not locked");
1598 sg = runtime_mheap.sweepgen;
1599 if(runtime_atomicload(&s->sweepgen) == sg)
1600 return;
1601 if(runtime_cas(&s->sweepgen, sg-2, sg-1)) {
1602 runtime_MSpan_Sweep(s);
1603 return;
1605 // unfortunate condition, and we don't have efficient means to wait
1606 while(runtime_atomicload(&s->sweepgen) != sg)
1607 runtime_osyield();
1610 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1611 // It clears the mark bits in preparation for the next GC round.
1612 // Returns true if the span was returned to heap.
1613 bool
1614 runtime_MSpan_Sweep(MSpan *s)
1616 M *m;
1617 int32 cl, n, npages, nfree;
1618 uintptr size, off, *bitp, shift, bits;
1619 uint32 sweepgen;
1620 byte *p;
1621 MCache *c;
1622 byte *arena_start;
1623 MLink head, *end;
1624 byte *type_data;
1625 byte compression;
1626 uintptr type_data_inc;
1627 MLink *x;
1628 Special *special, **specialp, *y;
1629 bool res, sweepgenset;
1631 m = runtime_m();
1633 // It's critical that we enter this function with preemption disabled,
1634 // GC must not start while we are in the middle of this function.
1635 if(m->locks == 0 && m->mallocing == 0 && runtime_g() != m->g0)
1636 runtime_throw("MSpan_Sweep: m is not locked");
1637 sweepgen = runtime_mheap.sweepgen;
1638 if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
1639 runtime_printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1640 s->state, s->sweepgen, sweepgen);
1641 runtime_throw("MSpan_Sweep: bad span state");
1643 arena_start = runtime_mheap.arena_start;
1644 cl = s->sizeclass;
1645 size = s->elemsize;
1646 if(cl == 0) {
1647 n = 1;
1648 } else {
1649 // Chunk full of small blocks.
1650 npages = runtime_class_to_allocnpages[cl];
1651 n = (npages << PageShift) / size;
1653 res = false;
1654 nfree = 0;
1655 end = &head;
1656 c = m->mcache;
1657 sweepgenset = false;
1659 // mark any free objects in this span so we don't collect them
1660 for(x = s->freelist; x != nil; x = x->next) {
1661 // This is markonly(x) but faster because we don't need
1662 // atomic access and we're guaranteed to be pointing at
1663 // the head of a valid object.
1664 off = (uintptr*)x - (uintptr*)runtime_mheap.arena_start;
1665 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1666 shift = off % wordsPerBitmapWord;
1667 *bitp |= bitMarked<<shift;
1670 // Unlink & free special records for any objects we're about to free.
1671 specialp = &s->specials;
1672 special = *specialp;
1673 while(special != nil) {
1674 // A finalizer can be set for an inner byte of an object, find object beginning.
1675 p = (byte*)(s->start << PageShift) + special->offset/size*size;
1676 off = (uintptr*)p - (uintptr*)arena_start;
1677 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1678 shift = off % wordsPerBitmapWord;
1679 bits = *bitp>>shift;
1680 if((bits & (bitAllocated|bitMarked)) == bitAllocated) {
1681 // Find the exact byte for which the special was setup
1682 // (as opposed to object beginning).
1683 p = (byte*)(s->start << PageShift) + special->offset;
1684 // about to free object: splice out special record
1685 y = special;
1686 special = special->next;
1687 *specialp = special;
1688 if(!runtime_freespecial(y, p, size, false)) {
1689 // stop freeing of object if it has a finalizer
1690 *bitp |= bitMarked << shift;
1692 } else {
1693 // object is still live: keep special record
1694 specialp = &special->next;
1695 special = *specialp;
1699 type_data = (byte*)s->types.data;
1700 type_data_inc = sizeof(uintptr);
1701 compression = s->types.compression;
1702 switch(compression) {
1703 case MTypes_Bytes:
1704 type_data += 8*sizeof(uintptr);
1705 type_data_inc = 1;
1706 break;
1709 // Sweep through n objects of given size starting at p.
1710 // This thread owns the span now, so it can manipulate
1711 // the block bitmap without atomic operations.
1712 p = (byte*)(s->start << PageShift);
1713 for(; n > 0; n--, p += size, type_data+=type_data_inc) {
1714 off = (uintptr*)p - (uintptr*)arena_start;
1715 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1716 shift = off % wordsPerBitmapWord;
1717 bits = *bitp>>shift;
1719 if((bits & bitAllocated) == 0)
1720 continue;
1722 if((bits & bitMarked) != 0) {
1723 *bitp &= ~(bitMarked<<shift);
1724 continue;
1727 if(runtime_debug.allocfreetrace)
1728 runtime_tracefree(p, size);
1730 // Clear mark and scan bits.
1731 *bitp &= ~((bitScan|bitMarked)<<shift);
1733 if(cl == 0) {
1734 // Free large span.
1735 runtime_unmarkspan(p, 1<<PageShift);
1736 s->needzero = 1;
1737 // important to set sweepgen before returning it to heap
1738 runtime_atomicstore(&s->sweepgen, sweepgen);
1739 sweepgenset = true;
1740 // See note about SysFault vs SysFree in malloc.goc.
1741 if(runtime_debug.efence)
1742 runtime_SysFault(p, size);
1743 else
1744 runtime_MHeap_Free(&runtime_mheap, s, 1);
1745 c->local_nlargefree++;
1746 c->local_largefree += size;
1747 runtime_xadd64(&mstats()->next_gc, -(uint64)(size * (gcpercent + 100)/100));
1748 res = true;
1749 } else {
1750 // Free small object.
1751 switch(compression) {
1752 case MTypes_Words:
1753 *(uintptr*)type_data = 0;
1754 break;
1755 case MTypes_Bytes:
1756 *(byte*)type_data = 0;
1757 break;
1759 if(size > 2*sizeof(uintptr))
1760 ((uintptr*)p)[1] = (uintptr)0xdeaddeaddeaddeadll; // mark as "needs to be zeroed"
1761 else if(size > sizeof(uintptr))
1762 ((uintptr*)p)[1] = 0;
1764 end->next = (MLink*)p;
1765 end = (MLink*)p;
1766 nfree++;
1770 // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
1771 // because of the potential for a concurrent free/SetFinalizer.
1772 // But we need to set it before we make the span available for allocation
1773 // (return it to heap or mcentral), because allocation code assumes that a
1774 // span is already swept if available for allocation.
1776 if(!sweepgenset && nfree == 0) {
1777 // The span must be in our exclusive ownership until we update sweepgen,
1778 // check for potential races.
1779 if(s->state != MSpanInUse || s->sweepgen != sweepgen-1) {
1780 runtime_printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1781 s->state, s->sweepgen, sweepgen);
1782 runtime_throw("MSpan_Sweep: bad span state after sweep");
1784 runtime_atomicstore(&s->sweepgen, sweepgen);
1786 if(nfree > 0) {
1787 c->local_nsmallfree[cl] += nfree;
1788 c->local_cachealloc -= nfree * size;
1789 runtime_xadd64(&mstats()->next_gc, -(uint64)(nfree * size * (gcpercent + 100)/100));
1790 res = runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
1791 //MCentral_FreeSpan updates sweepgen
1793 return res;
1796 // State of background sweep.
1797 // Protected by gclock.
1798 static struct
1800 G* g;
1801 bool parked;
1803 MSpan** spans;
1804 uint32 nspan;
1805 uint32 spanidx;
1806 } sweep;
1808 // background sweeping goroutine
1809 static void
1810 bgsweep(void* dummy __attribute__ ((unused)))
1812 runtime_g()->issystem = 1;
1813 for(;;) {
1814 while(runtime_sweepone() != (uintptr)-1) {
1815 gcstats.nbgsweep++;
1816 runtime_gosched();
1818 runtime_lock(&gclock);
1819 if(!runtime_mheap.sweepdone) {
1820 // It's possible if GC has happened between sweepone has
1821 // returned -1 and gclock lock.
1822 runtime_unlock(&gclock);
1823 continue;
1825 sweep.parked = true;
1826 runtime_g()->isbackground = true;
1827 runtime_parkunlock(&gclock, "GC sweep wait");
1828 runtime_g()->isbackground = false;
1832 // sweeps one span
1833 // returns number of pages returned to heap, or -1 if there is nothing to sweep
1834 uintptr
1835 runtime_sweepone(void)
1837 M *m = runtime_m();
1838 MSpan *s;
1839 uint32 idx, sg;
1840 uintptr npages;
1842 // increment locks to ensure that the goroutine is not preempted
1843 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
1844 m->locks++;
1845 sg = runtime_mheap.sweepgen;
1846 for(;;) {
1847 idx = runtime_xadd(&sweep.spanidx, 1) - 1;
1848 if(idx >= sweep.nspan) {
1849 runtime_mheap.sweepdone = true;
1850 m->locks--;
1851 return (uintptr)-1;
1853 s = sweep.spans[idx];
1854 if(s->state != MSpanInUse) {
1855 s->sweepgen = sg;
1856 continue;
1858 if(s->sweepgen != sg-2 || !runtime_cas(&s->sweepgen, sg-2, sg-1))
1859 continue;
1860 if(s->incache)
1861 runtime_throw("sweep of incache span");
1862 npages = s->npages;
1863 if(!runtime_MSpan_Sweep(s))
1864 npages = 0;
1865 m->locks--;
1866 return npages;
1870 static void
1871 dumpspan(uint32 idx)
1873 int32 sizeclass, n, npages, i, column;
1874 uintptr size;
1875 byte *p;
1876 byte *arena_start;
1877 MSpan *s;
1878 bool allocated;
1880 s = runtime_mheap.allspans[idx];
1881 if(s->state != MSpanInUse)
1882 return;
1883 arena_start = runtime_mheap.arena_start;
1884 p = (byte*)(s->start << PageShift);
1885 sizeclass = s->sizeclass;
1886 size = s->elemsize;
1887 if(sizeclass == 0) {
1888 n = 1;
1889 } else {
1890 npages = runtime_class_to_allocnpages[sizeclass];
1891 n = (npages << PageShift) / size;
1894 runtime_printf("%p .. %p:\n", p, p+n*size);
1895 column = 0;
1896 for(; n>0; n--, p+=size) {
1897 uintptr off, *bitp, shift, bits;
1899 off = (uintptr*)p - (uintptr*)arena_start;
1900 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1901 shift = off % wordsPerBitmapWord;
1902 bits = *bitp>>shift;
1904 allocated = ((bits & bitAllocated) != 0);
1906 for(i=0; (uint32)i<size; i+=sizeof(void*)) {
1907 if(column == 0) {
1908 runtime_printf("\t");
1910 if(i == 0) {
1911 runtime_printf(allocated ? "(" : "[");
1912 runtime_printf("%p: ", p+i);
1913 } else {
1914 runtime_printf(" ");
1917 runtime_printf("%p", *(void**)(p+i));
1919 if(i+sizeof(void*) >= size) {
1920 runtime_printf(allocated ? ") " : "] ");
1923 column++;
1924 if(column == 8) {
1925 runtime_printf("\n");
1926 column = 0;
1930 runtime_printf("\n");
1933 // A debugging function to dump the contents of memory
1934 void
1935 runtime_memorydump(void)
1937 uint32 spanidx;
1939 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
1940 dumpspan(spanidx);
1944 void
1945 runtime_gchelper(void)
1947 uint32 nproc;
1949 runtime_m()->traceback = 2;
1950 gchelperstart();
1952 // parallel mark for over gc roots
1953 runtime_parfordo(work.markfor);
1955 // help other threads scan secondary blocks
1956 scanblock(nil, true);
1958 bufferList[runtime_m()->helpgc].busy = 0;
1959 nproc = work.nproc; // work.nproc can change right after we increment work.ndone
1960 if(runtime_xadd(&work.ndone, +1) == nproc-1)
1961 runtime_notewakeup(&work.alldone);
1962 runtime_m()->traceback = 0;
1965 static void
1966 cachestats(void)
1968 MCache *c;
1969 P *p, **pp;
1971 for(pp=runtime_allp; (p=*pp) != nil; pp++) {
1972 c = p->mcache;
1973 if(c==nil)
1974 continue;
1975 runtime_purgecachedstats(c);
1979 static void
1980 flushallmcaches(void)
1982 P *p, **pp;
1983 MCache *c;
1985 // Flush MCache's to MCentral.
1986 for(pp=runtime_allp; (p=*pp) != nil; pp++) {
1987 c = p->mcache;
1988 if(c==nil)
1989 continue;
1990 runtime_MCache_ReleaseAll(c);
1994 void
1995 runtime_updatememstats(GCStats *stats)
1997 M *mp;
1998 MSpan *s;
1999 uint32 i;
2000 uint64 stacks_inuse, smallfree;
2001 uint64 *src, *dst;
2002 MStats *pmstats;
2004 if(stats)
2005 runtime_memclr((byte*)stats, sizeof(*stats));
2006 stacks_inuse = 0;
2007 for(mp=runtime_allm; mp; mp=mp->alllink) {
2008 //stacks_inuse += mp->stackinuse*FixedStack;
2009 if(stats) {
2010 src = (uint64*)&mp->gcstats;
2011 dst = (uint64*)stats;
2012 for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
2013 dst[i] += src[i];
2014 runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
2017 pmstats = mstats();
2018 pmstats->stacks_inuse = stacks_inuse;
2019 pmstats->mcache_inuse = runtime_mheap.cachealloc.inuse;
2020 pmstats->mspan_inuse = runtime_mheap.spanalloc.inuse;
2021 pmstats->sys = pmstats->heap_sys + pmstats->stacks_sys + pmstats->mspan_sys +
2022 pmstats->mcache_sys + pmstats->buckhash_sys + pmstats->gc_sys + pmstats->other_sys;
2024 // Calculate memory allocator stats.
2025 // During program execution we only count number of frees and amount of freed memory.
2026 // Current number of alive object in the heap and amount of alive heap memory
2027 // are calculated by scanning all spans.
2028 // Total number of mallocs is calculated as number of frees plus number of alive objects.
2029 // Similarly, total amount of allocated memory is calculated as amount of freed memory
2030 // plus amount of alive heap memory.
2031 pmstats->alloc = 0;
2032 pmstats->total_alloc = 0;
2033 pmstats->nmalloc = 0;
2034 pmstats->nfree = 0;
2035 for(i = 0; i < nelem(pmstats->by_size); i++) {
2036 pmstats->by_size[i].nmalloc = 0;
2037 pmstats->by_size[i].nfree = 0;
2040 // Flush MCache's to MCentral.
2041 flushallmcaches();
2043 // Aggregate local stats.
2044 cachestats();
2046 // Scan all spans and count number of alive objects.
2047 for(i = 0; i < runtime_mheap.nspan; i++) {
2048 s = runtime_mheap.allspans[i];
2049 if(s->state != MSpanInUse)
2050 continue;
2051 if(s->sizeclass == 0) {
2052 pmstats->nmalloc++;
2053 pmstats->alloc += s->elemsize;
2054 } else {
2055 pmstats->nmalloc += s->ref;
2056 pmstats->by_size[s->sizeclass].nmalloc += s->ref;
2057 pmstats->alloc += s->ref*s->elemsize;
2061 // Aggregate by size class.
2062 smallfree = 0;
2063 pmstats->nfree = runtime_mheap.nlargefree;
2064 for(i = 0; i < nelem(pmstats->by_size); i++) {
2065 pmstats->nfree += runtime_mheap.nsmallfree[i];
2066 pmstats->by_size[i].nfree = runtime_mheap.nsmallfree[i];
2067 pmstats->by_size[i].nmalloc += runtime_mheap.nsmallfree[i];
2068 smallfree += runtime_mheap.nsmallfree[i] * runtime_class_to_size[i];
2070 pmstats->nmalloc += pmstats->nfree;
2072 // Calculate derived stats.
2073 pmstats->total_alloc = pmstats->alloc + runtime_mheap.largefree + smallfree;
2074 pmstats->heap_alloc = pmstats->alloc;
2075 pmstats->heap_objects = pmstats->nmalloc - pmstats->nfree;
2078 // Structure of arguments passed to function gc().
2079 // This allows the arguments to be passed via runtime_mcall.
2080 struct gc_args
2082 int64 start_time; // start time of GC in ns (just before stoptheworld)
2083 bool eagersweep;
2086 static void gc(struct gc_args *args);
2087 static void mgc(G *gp);
2089 static int32
2090 readgogc(void)
2092 String s;
2093 const byte *p;
2095 s = runtime_getenv("GOGC");
2096 if(s.len == 0)
2097 return 100;
2098 p = s.str;
2099 if(s.len == 3 && runtime_strcmp((const char *)p, "off") == 0)
2100 return -1;
2101 return runtime_atoi(p, s.len);
2104 // force = 1 - do GC regardless of current heap usage
2105 // force = 2 - go GC and eager sweep
2106 void
2107 runtime_gc(int32 force)
2109 M *m;
2110 G *g;
2111 struct gc_args a;
2112 int32 i;
2113 MStats *pmstats;
2115 // The atomic operations are not atomic if the uint64s
2116 // are not aligned on uint64 boundaries. This has been
2117 // a problem in the past.
2118 if((((uintptr)&work.wempty) & 7) != 0)
2119 runtime_throw("runtime: gc work buffer is misaligned");
2120 if((((uintptr)&work.full) & 7) != 0)
2121 runtime_throw("runtime: gc work buffer is misaligned");
2123 // Make sure all registers are saved on stack so that
2124 // scanstack sees them.
2125 __builtin_unwind_init();
2127 // The gc is turned off (via enablegc) until
2128 // the bootstrap has completed.
2129 // Also, malloc gets called in the guts
2130 // of a number of libraries that might be
2131 // holding locks. To avoid priority inversion
2132 // problems, don't bother trying to run gc
2133 // while holding a lock. The next mallocgc
2134 // without a lock will do the gc instead.
2135 m = runtime_m();
2136 pmstats = mstats();
2137 if(!pmstats->enablegc || runtime_g() == m->g0 || m->locks > 0 || runtime_panicking() || m->preemptoff.len > 0)
2138 return;
2140 if(gcpercent == GcpercentUnknown) { // first time through
2141 runtime_lock(&runtime_mheap);
2142 if(gcpercent == GcpercentUnknown)
2143 gcpercent = readgogc();
2144 runtime_unlock(&runtime_mheap);
2146 if(gcpercent < 0)
2147 return;
2149 runtime_acquireWorldsema();
2150 if(force==0 && pmstats->heap_alloc < pmstats->next_gc) {
2151 // typically threads which lost the race to grab
2152 // worldsema exit here when gc is done.
2153 runtime_releaseWorldsema();
2154 return;
2157 // Ok, we're doing it! Stop everybody else
2158 a.start_time = runtime_nanotime();
2159 a.eagersweep = force >= 2;
2160 m->gcing = 1;
2161 runtime_stopTheWorldWithSema();
2163 clearpools();
2165 // Run gc on the g0 stack. We do this so that the g stack
2166 // we're currently running on will no longer change. Cuts
2167 // the root set down a bit (g0 stacks are not scanned, and
2168 // we don't need to scan gc's internal state). Also an
2169 // enabler for copyable stacks.
2170 for(i = 0; i < (runtime_debug.gctrace > 1 ? 2 : 1); i++) {
2171 if(i > 0)
2172 a.start_time = runtime_nanotime();
2173 // switch to g0, call gc(&a), then switch back
2174 g = runtime_g();
2175 g->param = &a;
2176 g->atomicstatus = _Gwaiting;
2177 g->waitreason = runtime_gostringnocopy((const byte*)"garbage collection");
2178 runtime_mcall(mgc);
2179 m = runtime_m();
2182 // all done
2183 m->gcing = 0;
2184 m->locks++;
2185 runtime_releaseWorldsema();
2186 runtime_startTheWorldWithSema();
2187 m->locks--;
2189 // now that gc is done, kick off finalizer thread if needed
2190 if(!ConcurrentSweep) {
2191 // give the queued finalizers, if any, a chance to run
2192 runtime_gosched();
2193 } else {
2194 // For gccgo, let other goroutines run.
2195 runtime_gosched();
2199 static void
2200 mgc(G *gp)
2202 gc(gp->param);
2203 gp->param = nil;
2204 gp->atomicstatus = _Grunning;
2205 runtime_gogo(gp);
2208 static void
2209 gc(struct gc_args *args)
2211 M *m;
2212 int64 tm0, tm1, tm2, tm3, tm4;
2213 uint64 heap0, heap1, obj, ninstr;
2214 GCStats stats;
2215 uint32 i;
2216 MStats *pmstats;
2217 // Eface eface;
2219 m = runtime_m();
2221 if(runtime_debug.allocfreetrace)
2222 runtime_tracegc();
2224 m->traceback = 2;
2225 tm0 = args->start_time;
2226 work.tstart = args->start_time;
2228 if(CollectStats)
2229 runtime_memclr((byte*)&gcstats, sizeof(gcstats));
2231 m->locks++; // disable gc during mallocs in parforalloc
2232 if(work.markfor == nil)
2233 work.markfor = runtime_parforalloc(MaxGcproc);
2234 m->locks--;
2236 tm1 = 0;
2237 if(runtime_debug.gctrace)
2238 tm1 = runtime_nanotime();
2240 // Sweep what is not sweeped by bgsweep.
2241 while(runtime_sweepone() != (uintptr)-1)
2242 gcstats.npausesweep++;
2244 work.nwait = 0;
2245 work.ndone = 0;
2246 work.nproc = runtime_gcprocs();
2247 runtime_parforsetup(work.markfor, work.nproc, RootCount + runtime_allglen, false, &markroot_funcval);
2248 if(work.nproc > 1) {
2249 runtime_noteclear(&work.alldone);
2250 runtime_helpgc(work.nproc);
2253 tm2 = 0;
2254 if(runtime_debug.gctrace)
2255 tm2 = runtime_nanotime();
2257 gchelperstart();
2258 runtime_parfordo(work.markfor);
2259 scanblock(nil, true);
2261 tm3 = 0;
2262 if(runtime_debug.gctrace)
2263 tm3 = runtime_nanotime();
2265 bufferList[m->helpgc].busy = 0;
2266 if(work.nproc > 1)
2267 runtime_notesleep(&work.alldone);
2269 cachestats();
2270 // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
2271 // estimate what was live heap size after previous GC (for tracing only)
2272 pmstats = mstats();
2273 heap0 = pmstats->next_gc*100/(gcpercent+100);
2274 // conservatively set next_gc to high value assuming that everything is live
2275 // concurrent/lazy sweep will reduce this number while discovering new garbage
2276 pmstats->next_gc = pmstats->heap_alloc+(pmstats->heap_alloc-runtime_stacks_sys)*gcpercent/100;
2278 tm4 = runtime_nanotime();
2279 pmstats->last_gc = runtime_unixnanotime(); // must be Unix time to make sense to user
2280 pmstats->pause_ns[pmstats->numgc%nelem(pmstats->pause_ns)] = tm4 - tm0;
2281 pmstats->pause_end[pmstats->numgc%nelem(pmstats->pause_end)] = pmstats->last_gc;
2282 pmstats->pause_total_ns += tm4 - tm0;
2283 pmstats->numgc++;
2284 if(pmstats->debuggc)
2285 runtime_printf("pause %D\n", tm4-tm0);
2287 if(runtime_debug.gctrace) {
2288 heap1 = pmstats->heap_alloc;
2289 runtime_updatememstats(&stats);
2290 if(heap1 != pmstats->heap_alloc) {
2291 runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1, pmstats->heap_alloc);
2292 runtime_throw("mstats skew");
2294 obj = pmstats->nmalloc - pmstats->nfree;
2296 stats.nprocyield += work.markfor->nprocyield;
2297 stats.nosyield += work.markfor->nosyield;
2298 stats.nsleep += work.markfor->nsleep;
2300 runtime_printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
2301 " %d/%d/%d sweeps,"
2302 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
2303 pmstats->numgc, work.nproc, (tm1-tm0)/1000, (tm2-tm1)/1000, (tm3-tm2)/1000, (tm4-tm3)/1000,
2304 heap0>>20, heap1>>20, obj,
2305 pmstats->nmalloc, pmstats->nfree,
2306 sweep.nspan, gcstats.nbgsweep, gcstats.npausesweep,
2307 stats.nhandoff, stats.nhandoffcnt,
2308 work.markfor->nsteal, work.markfor->nstealcnt,
2309 stats.nprocyield, stats.nosyield, stats.nsleep);
2310 gcstats.nbgsweep = gcstats.npausesweep = 0;
2311 if(CollectStats) {
2312 runtime_printf("scan: %D bytes, %D objects, %D untyped, %D types from MSpan\n",
2313 gcstats.nbytes, gcstats.obj.cnt, gcstats.obj.notype, gcstats.obj.typelookup);
2314 if(gcstats.ptr.cnt != 0)
2315 runtime_printf("avg ptrbufsize: %D (%D/%D)\n",
2316 gcstats.ptr.sum/gcstats.ptr.cnt, gcstats.ptr.sum, gcstats.ptr.cnt);
2317 if(gcstats.obj.cnt != 0)
2318 runtime_printf("avg nobj: %D (%D/%D)\n",
2319 gcstats.obj.sum/gcstats.obj.cnt, gcstats.obj.sum, gcstats.obj.cnt);
2320 runtime_printf("rescans: %D, %D bytes\n", gcstats.rescan, gcstats.rescanbytes);
2322 runtime_printf("instruction counts:\n");
2323 ninstr = 0;
2324 for(i=0; i<nelem(gcstats.instr); i++) {
2325 runtime_printf("\t%d:\t%D\n", i, gcstats.instr[i]);
2326 ninstr += gcstats.instr[i];
2328 runtime_printf("\ttotal:\t%D\n", ninstr);
2330 runtime_printf("putempty: %D, getfull: %D\n", gcstats.putempty, gcstats.getfull);
2332 runtime_printf("markonly base lookup: bit %D word %D span %D\n", gcstats.markonly.foundbit, gcstats.markonly.foundword, gcstats.markonly.foundspan);
2333 runtime_printf("flushptrbuf base lookup: bit %D word %D span %D\n", gcstats.flushptrbuf.foundbit, gcstats.flushptrbuf.foundword, gcstats.flushptrbuf.foundspan);
2337 // We cache current runtime_mheap.allspans array in sweep.spans,
2338 // because the former can be resized and freed.
2339 // Otherwise we would need to take heap lock every time
2340 // we want to convert span index to span pointer.
2342 // Free the old cached array if necessary.
2343 if(sweep.spans && sweep.spans != runtime_mheap.allspans)
2344 runtime_SysFree(sweep.spans, sweep.nspan*sizeof(sweep.spans[0]), &pmstats->other_sys);
2345 // Cache the current array.
2346 runtime_mheap.sweepspans = runtime_mheap.allspans;
2347 runtime_mheap.sweepgen += 2;
2348 runtime_mheap.sweepdone = false;
2349 sweep.spans = runtime_mheap.allspans;
2350 sweep.nspan = runtime_mheap.nspan;
2351 sweep.spanidx = 0;
2353 // Temporary disable concurrent sweep, because we see failures on builders.
2354 if(ConcurrentSweep && !args->eagersweep) {
2355 runtime_lock(&gclock);
2356 if(sweep.g == nil)
2357 sweep.g = __go_go(bgsweep, nil);
2358 else if(sweep.parked) {
2359 sweep.parked = false;
2360 runtime_ready(sweep.g);
2362 runtime_unlock(&gclock);
2363 } else {
2364 // Sweep all spans eagerly.
2365 while(runtime_sweepone() != (uintptr)-1)
2366 gcstats.npausesweep++;
2367 // Do an additional mProf_GC, because all 'free' events are now real as well.
2368 runtime_MProf_GC();
2371 runtime_MProf_GC();
2372 m->traceback = 0;
2375 void runtime_debug_readGCStats(Slice*)
2376 __asm__("runtime_debug.readGCStats");
2378 void
2379 runtime_debug_readGCStats(Slice *pauses)
2381 uint64 *p;
2382 uint32 i, n;
2383 MStats *pmstats;
2385 // Calling code in runtime/debug should make the slice large enough.
2386 pmstats = mstats();
2387 if((size_t)pauses->cap < nelem(pmstats->pause_ns)+3)
2388 runtime_throw("runtime: short slice passed to readGCStats");
2390 // Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
2391 p = (uint64*)pauses->array;
2392 runtime_lock(&runtime_mheap);
2393 n = pmstats->numgc;
2394 if(n > nelem(pmstats->pause_ns))
2395 n = nelem(pmstats->pause_ns);
2397 // The pause buffer is circular. The most recent pause is at
2398 // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
2399 // from there to go back farther in time. We deliver the times
2400 // most recent first (in p[0]).
2401 for(i=0; i<n; i++) {
2402 p[i] = pmstats->pause_ns[(pmstats->numgc-1-i)%nelem(pmstats->pause_ns)];
2403 p[n+i] = pmstats->pause_end[(pmstats->numgc-1-i)%nelem(pmstats->pause_ns)];
2406 p[n+n] = pmstats->last_gc;
2407 p[n+n+1] = pmstats->numgc;
2408 p[n+n+2] = pmstats->pause_total_ns;
2409 runtime_unlock(&runtime_mheap);
2410 pauses->__count = n+n+3;
2413 int32
2414 runtime_setgcpercent(int32 in) {
2415 int32 out;
2417 runtime_lock(&runtime_mheap);
2418 if(gcpercent == GcpercentUnknown)
2419 gcpercent = readgogc();
2420 out = gcpercent;
2421 if(in < 0)
2422 in = -1;
2423 gcpercent = in;
2424 runtime_unlock(&runtime_mheap);
2425 return out;
2428 static void
2429 gchelperstart(void)
2431 M *m;
2433 m = runtime_m();
2434 if(m->helpgc < 0 || m->helpgc >= MaxGcproc)
2435 runtime_throw("gchelperstart: bad m->helpgc");
2436 if(runtime_xchg(&bufferList[m->helpgc].busy, 1))
2437 runtime_throw("gchelperstart: already busy");
2438 if(runtime_g() != m->g0)
2439 runtime_throw("gchelper not running on g0 stack");
2442 static void
2443 runfinq(void* dummy __attribute__ ((unused)))
2445 Finalizer *f;
2446 FinBlock *fb, *next;
2447 uint32 i;
2448 Eface ef;
2449 Iface iface;
2451 // This function blocks for long periods of time, and because it is written in C
2452 // we have no liveness information. Zero everything so that uninitialized pointers
2453 // do not cause memory leaks.
2454 f = nil;
2455 fb = nil;
2456 next = nil;
2457 i = 0;
2458 ef._type = nil;
2459 ef.data = nil;
2461 // force flush to memory
2462 USED(&f);
2463 USED(&fb);
2464 USED(&next);
2465 USED(&i);
2466 USED(&ef);
2468 for(;;) {
2469 runtime_lock(&finlock);
2470 fb = finq;
2471 finq = nil;
2472 if(fb == nil) {
2473 runtime_fingwait = true;
2474 runtime_g()->isbackground = true;
2475 runtime_parkunlock(&finlock, "finalizer wait");
2476 runtime_g()->isbackground = false;
2477 continue;
2479 runtime_unlock(&finlock);
2480 for(; fb; fb=next) {
2481 next = fb->next;
2482 for(i=0; i<(uint32)fb->cnt; i++) {
2483 const Type *fint;
2484 void *param;
2486 f = &fb->fin[i];
2487 fint = ((const Type**)f->ft->__in.array)[0];
2488 if((fint->__code & kindMask) == kindPtr) {
2489 // direct use of pointer
2490 param = &f->arg;
2491 } else if(((const InterfaceType*)fint)->__methods.__count == 0) {
2492 // convert to empty interface
2493 // using memcpy as const_cast.
2494 memcpy(&ef._type, &f->ot,
2495 sizeof ef._type);
2496 ef.data = f->arg;
2497 param = &ef;
2498 } else {
2499 // convert to interface with methods
2500 iface.tab = getitab(fint,
2501 (const Type*)f->ot,
2502 true);
2503 iface.data = f->arg;
2504 if(iface.data == nil)
2505 runtime_throw("invalid type conversion in runfinq");
2506 param = &iface;
2508 reflect_call(f->ft, f->fn, 0, 0, &param, nil);
2509 f->fn = nil;
2510 f->arg = nil;
2511 f->ot = nil;
2513 fb->cnt = 0;
2514 runtime_lock(&finlock);
2515 fb->next = finc;
2516 finc = fb;
2517 runtime_unlock(&finlock);
2520 // Zero everything that's dead, to avoid memory leaks.
2521 // See comment at top of function.
2522 f = nil;
2523 fb = nil;
2524 next = nil;
2525 i = 0;
2526 ef._type = nil;
2527 ef.data = nil;
2528 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
2532 void
2533 runtime_createfing(void)
2535 if(fing != nil)
2536 return;
2537 // Here we use gclock instead of finlock,
2538 // because newproc1 can allocate, which can cause on-demand span sweep,
2539 // which can queue finalizers, which would deadlock.
2540 runtime_lock(&gclock);
2541 if(fing == nil)
2542 fing = __go_go(runfinq, nil);
2543 runtime_unlock(&gclock);
2547 runtime_wakefing(void)
2549 G *res;
2551 res = nil;
2552 runtime_lock(&finlock);
2553 if(runtime_fingwait && runtime_fingwake) {
2554 runtime_fingwait = false;
2555 runtime_fingwake = false;
2556 res = fing;
2558 runtime_unlock(&finlock);
2559 return res;
2562 void
2563 runtime_marknogc(void *v)
2565 uintptr *b, off, shift;
2567 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
2568 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
2569 shift = off % wordsPerBitmapWord;
2570 *b = (*b & ~(bitAllocated<<shift)) | bitBlockBoundary<<shift;
2573 void
2574 runtime_markscan(void *v)
2576 uintptr *b, off, shift;
2578 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
2579 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
2580 shift = off % wordsPerBitmapWord;
2581 *b |= bitScan<<shift;
2584 // mark the block at v as freed.
2585 void
2586 runtime_markfreed(void *v)
2588 uintptr *b, off, shift;
2590 if(0)
2591 runtime_printf("markfreed %p\n", v);
2593 if((byte*)v > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
2594 runtime_throw("markfreed: bad pointer");
2596 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
2597 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
2598 shift = off % wordsPerBitmapWord;
2599 *b = (*b & ~(bitMask<<shift)) | (bitAllocated<<shift);
2602 // check that the block at v of size n is marked freed.
2603 void
2604 runtime_checkfreed(void *v, uintptr n)
2606 uintptr *b, bits, off, shift;
2608 if(!runtime_checking)
2609 return;
2611 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
2612 return; // not allocated, so okay
2614 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
2615 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
2616 shift = off % wordsPerBitmapWord;
2618 bits = *b>>shift;
2619 if((bits & bitAllocated) != 0) {
2620 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
2621 v, n, off, bits & bitMask);
2622 runtime_throw("checkfreed: not freed");
2626 // mark the span of memory at v as having n blocks of the given size.
2627 // if leftover is true, there is left over space at the end of the span.
2628 void
2629 runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
2631 uintptr *b, *b0, off, shift, i, x;
2632 byte *p;
2634 if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
2635 runtime_throw("markspan: bad pointer");
2637 if(runtime_checking) {
2638 // bits should be all zero at the start
2639 off = (byte*)v + size - runtime_mheap.arena_start;
2640 b = (uintptr*)(runtime_mheap.arena_start - off/wordsPerBitmapWord);
2641 for(i = 0; i < size/PtrSize/wordsPerBitmapWord; i++) {
2642 if(b[i] != 0)
2643 runtime_throw("markspan: span bits not zero");
2647 p = v;
2648 if(leftover) // mark a boundary just past end of last block too
2649 n++;
2651 b0 = nil;
2652 x = 0;
2653 for(; n-- > 0; p += size) {
2654 // Okay to use non-atomic ops here, because we control
2655 // the entire span, and each bitmap word has bits for only
2656 // one span, so no other goroutines are changing these
2657 // bitmap words.
2658 off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; // word offset
2659 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
2660 shift = off % wordsPerBitmapWord;
2661 if(b0 != b) {
2662 if(b0 != nil)
2663 *b0 = x;
2664 b0 = b;
2665 x = 0;
2667 x |= bitAllocated<<shift;
2669 *b0 = x;
2672 // unmark the span of memory at v of length n bytes.
2673 void
2674 runtime_unmarkspan(void *v, uintptr n)
2676 uintptr *p, *b, off;
2678 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
2679 runtime_throw("markspan: bad pointer");
2681 p = v;
2682 off = p - (uintptr*)runtime_mheap.arena_start; // word offset
2683 if(off % wordsPerBitmapWord != 0)
2684 runtime_throw("markspan: unaligned pointer");
2685 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
2686 n /= PtrSize;
2687 if(n%wordsPerBitmapWord != 0)
2688 runtime_throw("unmarkspan: unaligned length");
2689 // Okay to use non-atomic ops here, because we control
2690 // the entire span, and each bitmap word has bits for only
2691 // one span, so no other goroutines are changing these
2692 // bitmap words.
2693 n /= wordsPerBitmapWord;
2694 while(n-- > 0)
2695 *b-- = 0;
2698 void
2699 runtime_MHeap_MapBits(MHeap *h)
2701 size_t page_size;
2703 // Caller has added extra mappings to the arena.
2704 // Add extra mappings of bitmap words as needed.
2705 // We allocate extra bitmap pieces in chunks of bitmapChunk.
2706 enum {
2707 bitmapChunk = 8192
2709 uintptr n;
2711 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
2712 n = ROUND(n, bitmapChunk);
2713 n = ROUND(n, PageSize);
2714 page_size = getpagesize();
2715 n = ROUND(n, page_size);
2716 if(h->bitmap_mapped >= n)
2717 return;
2719 runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped, h->arena_reserved, &mstats()->gc_sys);
2720 h->bitmap_mapped = n;
2723 // typedmemmove copies a value of type t to dst from src.
2725 extern void typedmemmove(const Type* td, void *dst, const void *src)
2726 __asm__ (GOSYM_PREFIX "reflect.typedmemmove");
2728 void
2729 typedmemmove(const Type* td, void *dst, const void *src)
2731 runtime_memmove(dst, src, td->__size);
2734 // typedslicecopy copies a slice of elemType values from src to dst,
2735 // returning the number of elements copied.
2737 extern intgo typedslicecopy(const Type* elem, Slice dst, Slice src)
2738 __asm__ (GOSYM_PREFIX "reflect.typedslicecopy");
2740 intgo
2741 typedslicecopy(const Type* elem, Slice dst, Slice src)
2743 intgo n;
2744 void *dstp;
2745 void *srcp;
2747 n = dst.__count;
2748 if (n > src.__count)
2749 n = src.__count;
2750 if (n == 0)
2751 return 0;
2752 dstp = dst.__values;
2753 srcp = src.__values;
2754 memmove(dstp, srcp, (uintptr_t)n * elem->__size);
2755 return n;