1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector (GC).
9 // - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
10 // - parallel (up to MaxGcproc threads)
11 // - partially concurrent (mark is stop-the-world, while sweep is concurrent)
12 // - non-moving/non-compacting
13 // - full (non-partial)
16 // Next GC is after we've allocated an extra amount of memory proportional to
17 // the amount already in use. The proportion is controlled by GOGC environment variable
18 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
19 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
20 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
21 // (and also the amount of extra memory used).
24 // The sweep phase proceeds concurrently with normal program execution.
25 // The heap is swept span-by-span both lazily (when a goroutine needs another span)
26 // and concurrently in a background goroutine (this helps programs that are not CPU bound).
27 // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
28 // and so next_gc calculation is tricky and happens as follows.
29 // At the end of the stop-the-world phase next_gc is conservatively set based on total
30 // heap size; all spans are marked as "needs sweeping".
31 // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
32 // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
33 // closer to the target value. However, this is not enough to avoid over-allocating memory.
34 // Consider that a goroutine wants to allocate a new span for a large object and
35 // there are no free swept spans, but there are small-object unswept spans.
36 // If the goroutine naively allocates a new span, it can surpass the yet-unknown
37 // target next_gc value. In order to prevent such cases (1) when a goroutine needs
38 // to allocate a new small-object span, it sweeps small-object spans for the same
39 // object size until it frees at least one object; (2) when a goroutine needs to
40 // allocate large-object span from heap, it sweeps spans until it frees at least
41 // that many pages into heap. Together these two measures ensure that we don't surpass
42 // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
43 // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
44 // but there can still be other one-page unswept spans which could be combined into a two-page span.
45 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
46 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
47 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
48 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
49 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
50 // The finalizer goroutine is kicked off only when all spans are swept.
51 // When the next GC starts, it sweeps all not-yet-swept spans (if any).
61 // Map gccgo field names to gc field names.
62 // Slice aka __go_open_array.
63 #define array __values
64 #define cap __capacity
65 // Iface aka __go_interface
68 typedef struct __go_map Hmap
;
69 // Type aka __go_type_descriptor
70 #define string __reflection
71 // PtrType aka __go_ptr_type
72 #define elem __element_type
74 #ifdef USING_SPLIT_STACK
76 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
79 extern void * __splitstack_find_context (void *context
[10], size_t *, void **,
89 WorkbufSize
= 16*1024,
90 FinBlockSize
= 4*1024,
93 IntermediateBufferCapacity
= 64,
95 // Bits in type information
98 PC_BITS
= PRECISE
| LOOP
,
108 #define GcpercentUnknown (-2)
110 // Initialized from $GOGC. GOGC=off means no gc.
111 static int32 gcpercent
= GcpercentUnknown
;
113 static FuncVal
* poolcleanup
;
115 void sync_runtime_registerPoolCleanup(FuncVal
*)
116 __asm__ (GOSYM_PREFIX
"sync.runtime_registerPoolCleanup");
119 sync_runtime_registerPoolCleanup(FuncVal
*f
)
131 if(poolcleanup
!= nil
) {
132 __builtin_call_with_static_chain(poolcleanup
->fn(),
136 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
137 // clear tinyalloc pool
148 typedef struct Workbuf Workbuf
;
151 #define SIZE (WorkbufSize-sizeof(LFNode)-sizeof(uintptr))
152 LFNode node
; // must be first
154 Obj obj
[SIZE
/sizeof(Obj
) - 1];
155 uint8 _padding
[SIZE
%sizeof(Obj
) + sizeof(Obj
)];
159 typedef struct Finalizer Finalizer
;
164 const struct __go_func_type
*ft
;
168 typedef struct FinBlock FinBlock
;
178 static Lock finlock
; // protects the following variables
179 static FinBlock
*finq
; // list of finalizers that are to be executed
180 static FinBlock
*finc
; // cache of free blocks
181 static FinBlock
*allfin
; // list of all blocks
182 bool runtime_fingwait
;
183 bool runtime_fingwake
;
188 static void runfinq(void*);
189 static void bgsweep(void*);
190 static Workbuf
* getempty(Workbuf
*);
191 static Workbuf
* getfull(Workbuf
*);
192 static void putempty(Workbuf
*);
193 static Workbuf
* handoff(Workbuf
*);
194 static void gchelperstart(void);
195 static void flushallmcaches(void);
196 static void addstackroots(G
*gp
, Workbuf
**wbufp
);
199 uint64 full
; // lock-free list of full blocks
200 uint64 wempty
; // lock-free list of empty blocks
201 byte pad0
[CacheLineSize
]; // prevents false-sharing between full/empty and nproc/nwait
204 volatile uint32 nwait
;
205 volatile uint32 ndone
;
212 } work
__attribute__((aligned(8)));
215 GC_DEFAULT_PTR
= GC_NUM_INSTR
,
235 uint64 instr
[GC_NUM_INSTR2
];
252 // markonly marks an object. It returns true if the object
253 // has been marked by this function, false otherwise.
254 // This function doesn't append the object to any buffer.
256 markonly(const void *obj
)
259 uintptr
*bitp
, bits
, shift
, x
, xbits
, off
, j
;
263 // Words outside the arena cannot be pointers.
264 if((const byte
*)obj
< runtime_mheap
.arena_start
|| (const byte
*)obj
>= runtime_mheap
.arena_used
)
267 // obj may be a pointer to a live object.
268 // Try to find the beginning of the object.
270 // Round down to word boundary.
271 obj
= (const void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
273 // Find bits for this word.
274 off
= (const uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
275 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
276 shift
= off
% wordsPerBitmapWord
;
278 bits
= xbits
>> shift
;
280 // Pointing at the beginning of a block?
281 if((bits
& (bitAllocated
|bitBlockBoundary
)) != 0) {
283 runtime_xadd64(&gcstats
.markonly
.foundbit
, 1);
287 // Pointing just past the beginning?
288 // Scan backward a little to find a block boundary.
289 for(j
=shift
; j
-->0; ) {
290 if(((xbits
>>j
) & (bitAllocated
|bitBlockBoundary
)) != 0) {
294 runtime_xadd64(&gcstats
.markonly
.foundword
, 1);
299 // Otherwise consult span table to find beginning.
300 // (Manually inlined copy of MHeap_LookupMaybe.)
301 k
= (uintptr
)obj
>>PageShift
;
303 x
-= (uintptr
)runtime_mheap
.arena_start
>>PageShift
;
304 s
= runtime_mheap
.spans
[x
];
305 if(s
== nil
|| k
< s
->start
|| (uintptr
)obj
>= s
->limit
|| s
->state
!= MSpanInUse
)
307 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
308 if(s
->sizeclass
== 0) {
311 uintptr size
= s
->elemsize
;
312 int32 i
= ((const byte
*)obj
- p
)/size
;
316 // Now that we know the object header, reload bits.
317 off
= (const uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
318 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
319 shift
= off
% wordsPerBitmapWord
;
321 bits
= xbits
>> shift
;
323 runtime_xadd64(&gcstats
.markonly
.foundspan
, 1);
326 // Now we have bits, bitp, and shift correct for
327 // obj pointing at the base of the object.
328 // Only care about allocated and not marked.
329 if((bits
& (bitAllocated
|bitMarked
)) != bitAllocated
)
332 *bitp
|= bitMarked
<<shift
;
336 if(x
& (bitMarked
<<shift
))
338 if(runtime_casp((void**)bitp
, (void*)x
, (void*)(x
|(bitMarked
<<shift
))))
343 // The object is now marked
347 // PtrTarget is a structure used by intermediate buffers.
348 // The intermediate buffers hold GC data before it
349 // is moved/flushed to the work buffer (Workbuf).
350 // The size of an intermediate buffer is very small,
351 // such as 32 or 64 elements.
352 typedef struct PtrTarget PtrTarget
;
359 typedef struct Scanbuf Scanbuf
;
377 typedef struct BufferList BufferList
;
380 PtrTarget ptrtarget
[IntermediateBufferCapacity
];
381 Obj obj
[IntermediateBufferCapacity
];
383 byte pad
[CacheLineSize
];
385 static BufferList bufferList
[MaxGcproc
];
387 static void enqueue(Obj obj
, Workbuf
**_wbuf
, Obj
**_wp
, uintptr
*_nobj
);
389 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
390 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
391 // while the work buffer contains blocks which have been marked
392 // and are prepared to be scanned by the garbage collector.
394 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
396 // A simplified drawing explaining how the todo-list moves from a structure to another:
400 // Obj ------> PtrTarget (pointer targets)
405 // (find block start, mark and enqueue)
407 flushptrbuf(Scanbuf
*sbuf
)
409 byte
*p
, *arena_start
, *obj
;
410 uintptr size
, *bitp
, bits
, shift
, j
, x
, xbits
, off
, nobj
, ti
, n
;
416 PtrTarget
*ptrbuf_end
;
418 arena_start
= runtime_mheap
.arena_start
;
424 ptrbuf
= sbuf
->ptr
.begin
;
425 ptrbuf_end
= sbuf
->ptr
.pos
;
426 n
= ptrbuf_end
- sbuf
->ptr
.begin
;
427 sbuf
->ptr
.pos
= sbuf
->ptr
.begin
;
430 runtime_xadd64(&gcstats
.ptr
.sum
, n
);
431 runtime_xadd64(&gcstats
.ptr
.cnt
, 1);
434 // If buffer is nearly full, get a new one.
435 if(wbuf
== nil
|| nobj
+n
>= nelem(wbuf
->obj
)) {
438 wbuf
= getempty(wbuf
);
442 if(n
>= nelem(wbuf
->obj
))
443 runtime_throw("ptrbuf has to be smaller than WorkBuf");
446 while(ptrbuf
< ptrbuf_end
) {
451 // obj belongs to interval [mheap.arena_start, mheap.arena_used).
453 if(obj
< runtime_mheap
.arena_start
|| obj
>= runtime_mheap
.arena_used
)
454 runtime_throw("object is outside of mheap");
457 // obj may be a pointer to a live object.
458 // Try to find the beginning of the object.
460 // Round down to word boundary.
461 if(((uintptr
)obj
& ((uintptr
)PtrSize
-1)) != 0) {
462 obj
= (void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
466 // Find bits for this word.
467 off
= (uintptr
*)obj
- (uintptr
*)arena_start
;
468 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
469 shift
= off
% wordsPerBitmapWord
;
471 bits
= xbits
>> shift
;
473 // Pointing at the beginning of a block?
474 if((bits
& (bitAllocated
|bitBlockBoundary
)) != 0) {
476 runtime_xadd64(&gcstats
.flushptrbuf
.foundbit
, 1);
482 // Pointing just past the beginning?
483 // Scan backward a little to find a block boundary.
484 for(j
=shift
; j
-->0; ) {
485 if(((xbits
>>j
) & (bitAllocated
|bitBlockBoundary
)) != 0) {
486 obj
= (byte
*)obj
- (shift
-j
)*PtrSize
;
490 runtime_xadd64(&gcstats
.flushptrbuf
.foundword
, 1);
495 // Otherwise consult span table to find beginning.
496 // (Manually inlined copy of MHeap_LookupMaybe.)
497 k
= (uintptr
)obj
>>PageShift
;
499 x
-= (uintptr
)arena_start
>>PageShift
;
500 s
= runtime_mheap
.spans
[x
];
501 if(s
== nil
|| k
< s
->start
|| (uintptr
)obj
>= s
->limit
|| s
->state
!= MSpanInUse
)
503 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
504 if(s
->sizeclass
== 0) {
508 int32 i
= ((byte
*)obj
- p
)/size
;
512 // Now that we know the object header, reload bits.
513 off
= (uintptr
*)obj
- (uintptr
*)arena_start
;
514 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
515 shift
= off
% wordsPerBitmapWord
;
517 bits
= xbits
>> shift
;
519 runtime_xadd64(&gcstats
.flushptrbuf
.foundspan
, 1);
522 // Now we have bits, bitp, and shift correct for
523 // obj pointing at the base of the object.
524 // Only care about allocated and not marked.
525 if((bits
& (bitAllocated
|bitMarked
)) != bitAllocated
)
528 *bitp
|= bitMarked
<<shift
;
532 if(x
& (bitMarked
<<shift
))
534 if(runtime_casp((void**)bitp
, (void*)x
, (void*)(x
|(bitMarked
<<shift
))))
539 // If object has no pointers, don't need to scan further.
540 if((bits
& bitScan
) == 0)
543 // Ask span about size class.
544 // (Manually inlined copy of MHeap_Lookup.)
545 x
= (uintptr
)obj
>> PageShift
;
546 x
-= (uintptr
)arena_start
>>PageShift
;
547 s
= runtime_mheap
.spans
[x
];
551 *wp
= (Obj
){obj
, s
->elemsize
, ti
};
557 // If another proc wants a pointer, give it some.
558 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
560 wbuf
= handoff(wbuf
);
562 wp
= wbuf
->obj
+ nobj
;
571 flushobjbuf(Scanbuf
*sbuf
)
583 objbuf
= sbuf
->obj
.begin
;
584 objbuf_end
= sbuf
->obj
.pos
;
585 sbuf
->obj
.pos
= sbuf
->obj
.begin
;
587 while(objbuf
< objbuf_end
) {
590 // Align obj.b to a word boundary.
591 off
= (uintptr
)obj
.p
& (PtrSize
-1);
593 obj
.p
+= PtrSize
- off
;
594 obj
.n
-= PtrSize
- off
;
598 if(obj
.p
== nil
|| obj
.n
== 0)
601 // If buffer is full, get a new one.
602 if(wbuf
== nil
|| nobj
>= nelem(wbuf
->obj
)) {
605 wbuf
= getempty(wbuf
);
615 // If another proc wants a pointer, give it some.
616 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
618 wbuf
= handoff(wbuf
);
620 wp
= wbuf
->obj
+ nobj
;
628 // Program that scans the whole block and treats every block element as a potential pointer
629 static uintptr defaultProg
[2] = {PtrSize
, GC_DEFAULT_PTR
};
632 static uintptr chanProg
[2] = {0, GC_CHAN
};
634 // Local variables of a program fragment or loop
635 typedef struct GCFrame GCFrame
;
637 uintptr count
, elemsize
, b
;
638 const uintptr
*loop_or_ret
;
641 // Sanity check for the derived type info objti.
643 checkptr(void *obj
, uintptr objti
)
645 uintptr
*pc1
, type
, tisize
, i
, j
, x
;
652 runtime_throw("checkptr is debug only");
654 if((byte
*)obj
< runtime_mheap
.arena_start
|| (byte
*)obj
>= runtime_mheap
.arena_used
)
656 type
= runtime_gettype(obj
);
657 t
= (Type
*)(type
& ~(uintptr
)(PtrSize
-1));
660 x
= (uintptr
)obj
>> PageShift
;
661 x
-= (uintptr
)(runtime_mheap
.arena_start
)>>PageShift
;
662 s
= runtime_mheap
.spans
[x
];
663 objstart
= (byte
*)((uintptr
)s
->start
<<PageShift
);
664 if(s
->sizeclass
!= 0) {
665 i
= ((byte
*)obj
- objstart
)/s
->elemsize
;
666 objstart
+= i
*s
->elemsize
;
668 tisize
= *(uintptr
*)objti
;
669 // Sanity check for object size: it should fit into the memory block.
670 if((byte
*)obj
+ tisize
> objstart
+ s
->elemsize
) {
671 runtime_printf("object of type '%S' at %p/%p does not fit in block %p/%p\n",
672 *t
->string
, obj
, tisize
, objstart
, s
->elemsize
);
673 runtime_throw("invalid gc type info");
677 // If obj points to the beginning of the memory block,
678 // check type info as well.
679 if(t
->string
== nil
||
680 // Gob allocates unsafe pointers for indirection.
681 (runtime_strcmp((const char *)t
->string
->str
, (const char*)"unsafe.Pointer") &&
682 // Runtime and gc think differently about closures.
683 runtime_strstr((const char *)t
->string
->str
, (const char*)"struct { F uintptr") != (const char *)t
->string
->str
)) {
684 pc1
= (uintptr
*)objti
;
685 pc2
= (const uintptr
*)t
->__gc
;
686 // A simple best-effort check until first GC_END.
687 for(j
= 1; pc1
[j
] != GC_END
&& pc2
[j
] != GC_END
; j
++) {
688 if(pc1
[j
] != pc2
[j
]) {
689 runtime_printf("invalid gc type info for '%s', type info %p [%d]=%p, block info %p [%d]=%p\n",
690 t
->string
? (const int8
*)t
->string
->str
: (const int8
*)"?", pc1
, (int32
)j
, pc1
[j
], pc2
, (int32
)j
, pc2
[j
]);
691 runtime_throw("invalid gc type info");
697 // scanblock scans a block of n bytes starting at pointer b for references
698 // to other objects, scanning any it finds recursively until there are no
699 // unscanned objects left. Instead of using an explicit recursion, it keeps
700 // a work list in the Workbuf* structures and loops in the main function
701 // body. Keeping an explicit work list is easier on the stack allocator and
704 scanblock(Workbuf
*wbuf
, bool keepworking
)
706 byte
*b
, *arena_start
, *arena_used
;
707 uintptr n
, i
, end_b
, elemsize
, size
, ti
, objti
, count
, type
, nobj
;
708 uintptr precise_type
, nominal_size
;
709 const uintptr
*pc
, *chan_ret
;
715 GCFrame
*stack_ptr
, stack_top
, stack
[GC_STACK_CAPACITY
+4];
716 BufferList
*scanbuffers
;
721 const ChanType
*chantype
;
724 if(sizeof(Workbuf
) % WorkbufSize
!= 0)
725 runtime_throw("scanblock: size of Workbuf is suboptimal");
727 // Memory arena parameters.
728 arena_start
= runtime_mheap
.arena_start
;
729 arena_used
= runtime_mheap
.arena_used
;
731 stack_ptr
= stack
+nelem(stack
)-1;
733 precise_type
= false;
738 wp
= &wbuf
->obj
[nobj
];
745 scanbuffers
= &bufferList
[runtime_m()->helpgc
];
747 sbuf
.ptr
.begin
= sbuf
.ptr
.pos
= &scanbuffers
->ptrtarget
[0];
748 sbuf
.ptr
.end
= sbuf
.ptr
.begin
+ nelem(scanbuffers
->ptrtarget
);
750 sbuf
.obj
.begin
= sbuf
.obj
.pos
= &scanbuffers
->obj
[0];
751 sbuf
.obj
.end
= sbuf
.obj
.begin
+ nelem(scanbuffers
->obj
);
757 // (Silence the compiler)
765 // Each iteration scans the block b of length n, queueing pointers in
769 runtime_xadd64(&gcstats
.nbytes
, n
);
770 runtime_xadd64(&gcstats
.obj
.sum
, sbuf
.nobj
);
771 runtime_xadd64(&gcstats
.obj
.cnt
, 1);
776 runtime_printf("scanblock %p %D ti %p\n", b
, (int64
)n
, ti
);
778 pc
= (uintptr
*)(ti
& ~(uintptr
)PC_BITS
);
779 precise_type
= (ti
& PRECISE
);
780 stack_top
.elemsize
= pc
[0];
782 nominal_size
= pc
[0];
784 stack_top
.count
= 0; // 0 means an infinite number of iterations
785 stack_top
.loop_or_ret
= pc
+1;
790 // Simple sanity check for provided type info ti:
791 // The declared size of the object must be not larger than the actual size
792 // (it can be smaller due to inferior pointers).
793 // It's difficult to make a comprehensive check due to inferior pointers,
794 // reflection, gob, etc.
796 runtime_printf("invalid gc type info: type info size %p, block size %p\n", pc
[0], n
);
797 runtime_throw("invalid gc type info");
800 } else if(UseSpanType
) {
802 runtime_xadd64(&gcstats
.obj
.notype
, 1);
804 type
= runtime_gettype(b
);
807 runtime_xadd64(&gcstats
.obj
.typelookup
, 1);
809 t
= (Type
*)(type
& ~(uintptr
)(PtrSize
-1));
810 switch(type
& (PtrSize
-1)) {
811 case TypeInfo_SingleObject
:
812 pc
= (const uintptr
*)t
->__gc
;
813 precise_type
= true; // type information about 'b' is precise
815 stack_top
.elemsize
= pc
[0];
818 pc
= (const uintptr
*)t
->__gc
;
821 precise_type
= true; // type information about 'b' is precise
822 stack_top
.count
= 0; // 0 means an infinite number of iterations
823 stack_top
.elemsize
= pc
[0];
824 stack_top
.loop_or_ret
= pc
+1;
828 chantype
= (const ChanType
*)t
;
834 runtime_printf("scanblock %p %D type %p %S\n", b
, (int64
)n
, type
, *t
->string
);
835 runtime_throw("scanblock: invalid type");
839 runtime_printf("scanblock %p %D type %p %S pc=%p\n", b
, (int64
)n
, type
, *t
->string
, pc
);
843 runtime_printf("scanblock %p %D unknown type\n", b
, (int64
)n
);
848 runtime_printf("scanblock %p %D no span types\n", b
, (int64
)n
);
855 stack_top
.b
= (uintptr
)b
;
856 end_b
= (uintptr
)b
+ n
- PtrSize
;
860 runtime_xadd64(&gcstats
.instr
[pc
[0]], 1);
866 obj
= *(void**)(stack_top
.b
+ pc
[1]);
869 runtime_printf("gc_ptr @%p: %p ti=%p\n", stack_top
.b
+pc
[1], obj
, objti
);
872 checkptr(obj
, objti
);
876 sliceptr
= (Slice
*)(stack_top
.b
+ pc
[1]);
878 runtime_printf("gc_slice @%p: %p/%D/%D\n", sliceptr
, sliceptr
->array
, (int64
)sliceptr
->__count
, (int64
)sliceptr
->cap
);
879 if(sliceptr
->cap
!= 0) {
880 obj
= sliceptr
->array
;
881 // Can't use slice element type for scanning,
882 // because if it points to an array embedded
883 // in the beginning of a struct,
884 // we will scan the whole struct as the slice.
885 // So just obtain type info from heap.
891 obj
= *(void**)(stack_top
.b
+ pc
[1]);
893 runtime_printf("gc_aptr @%p: %p\n", stack_top
.b
+pc
[1], obj
);
898 stringptr
= (String
*)(stack_top
.b
+ pc
[1]);
900 runtime_printf("gc_string @%p: %p/%D\n", stack_top
.b
+pc
[1], stringptr
->str
, (int64
)stringptr
->len
);
901 if(stringptr
->len
!= 0)
902 markonly(stringptr
->str
);
907 eface
= (Eface
*)(stack_top
.b
+ pc
[1]);
910 runtime_printf("gc_eface @%p: %p %p\n", stack_top
.b
+pc
[1], eface
->__type_descriptor
, eface
->__object
);
911 if(eface
->__type_descriptor
== nil
)
915 t
= eface
->__type_descriptor
;
916 if((const byte
*)t
>= arena_start
&& (const byte
*)t
< arena_used
) {
917 union { const Type
*tc
; Type
*tr
; } u
;
919 *sbuf
.ptr
.pos
++ = (PtrTarget
){u
.tr
, 0};
920 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
925 if((byte
*)eface
->__object
>= arena_start
&& (byte
*)eface
->__object
< arena_used
) {
926 if(__go_is_pointer_type(t
)) {
927 if((t
->__code
& kindNoPointers
))
930 obj
= eface
->__object
;
931 if((t
->__code
& kindMask
) == kindPtr
) {
932 // Only use type information if it is a pointer-containing type.
933 // This matches the GC programs written by cmd/gc/reflect.c's
934 // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
935 et
= ((const PtrType
*)t
)->elem
;
936 if(!(et
->__code
& kindNoPointers
))
937 objti
= (uintptr
)((const PtrType
*)t
)->elem
->__gc
;
940 obj
= eface
->__object
;
941 objti
= (uintptr
)t
->__gc
;
947 iface
= (Iface
*)(stack_top
.b
+ pc
[1]);
950 runtime_printf("gc_iface @%p: %p/%p %p\n", stack_top
.b
+pc
[1], iface
->__methods
[0], nil
, iface
->__object
);
951 if(iface
->tab
== nil
)
955 if((byte
*)iface
->tab
>= arena_start
&& (byte
*)iface
->tab
< arena_used
) {
956 *sbuf
.ptr
.pos
++ = (PtrTarget
){iface
->tab
, 0};
957 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
962 if((byte
*)iface
->__object
>= arena_start
&& (byte
*)iface
->__object
< arena_used
) {
963 t
= (const Type
*)iface
->tab
[0];
964 if(__go_is_pointer_type(t
)) {
965 if((t
->__code
& kindNoPointers
))
968 obj
= iface
->__object
;
969 if((t
->__code
& kindMask
) == kindPtr
) {
970 // Only use type information if it is a pointer-containing type.
971 // This matches the GC programs written by cmd/gc/reflect.c's
972 // dgcsym1 in case TPTR32/case TPTR64. See rationale there.
973 et
= ((const PtrType
*)t
)->elem
;
974 if(!(et
->__code
& kindNoPointers
))
975 objti
= (uintptr
)((const PtrType
*)t
)->elem
->__gc
;
978 obj
= iface
->__object
;
979 objti
= (uintptr
)t
->__gc
;
985 while(stack_top
.b
<= end_b
) {
986 obj
= *(byte
**)stack_top
.b
;
988 runtime_printf("gc_default_ptr @%p: %p\n", stack_top
.b
, obj
);
989 stack_top
.b
+= PtrSize
;
990 if((byte
*)obj
>= arena_start
&& (byte
*)obj
< arena_used
) {
991 *sbuf
.ptr
.pos
++ = (PtrTarget
){obj
, 0};
992 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
999 if(--stack_top
.count
!= 0) {
1000 // Next iteration of a loop if possible.
1001 stack_top
.b
+= stack_top
.elemsize
;
1002 if(stack_top
.b
+ stack_top
.elemsize
<= end_b
+PtrSize
) {
1003 pc
= stack_top
.loop_or_ret
;
1008 // Stack pop if possible.
1009 if(stack_ptr
+1 < stack
+nelem(stack
)) {
1010 pc
= stack_top
.loop_or_ret
;
1011 stack_top
= *(++stack_ptr
);
1014 i
= (uintptr
)b
+ nominal_size
;
1017 // Quickly scan [b+i,b+n) for possible pointers.
1018 for(; i
<=end_b
; i
+=PtrSize
) {
1019 if(*(byte
**)i
!= nil
) {
1020 // Found a value that may be a pointer.
1021 // Do a rescan of the entire block.
1022 enqueue((Obj
){b
, n
, 0}, &sbuf
.wbuf
, &sbuf
.wp
, &sbuf
.nobj
);
1024 runtime_xadd64(&gcstats
.rescan
, 1);
1025 runtime_xadd64(&gcstats
.rescanbytes
, n
);
1033 case GC_ARRAY_START
:
1034 i
= stack_top
.b
+ pc
[1];
1040 *stack_ptr
-- = stack_top
;
1041 stack_top
= (GCFrame
){count
, elemsize
, i
, pc
};
1045 if(--stack_top
.count
!= 0) {
1046 stack_top
.b
+= stack_top
.elemsize
;
1047 pc
= stack_top
.loop_or_ret
;
1050 stack_top
= *(++stack_ptr
);
1057 *stack_ptr
-- = stack_top
;
1058 stack_top
= (GCFrame
){1, 0, stack_top
.b
+ pc
[1], pc
+3 /*return address*/};
1059 pc
= (const uintptr
*)((const byte
*)pc
+ *(const int32
*)(pc
+2)); // target of the CALL instruction
1063 obj
= (void*)(stack_top
.b
+ pc
[1]);
1069 runtime_printf("gc_region @%p: %D %p\n", stack_top
.b
+pc
[1], (int64
)size
, objti
);
1070 *sbuf
.obj
.pos
++ = (Obj
){obj
, size
, objti
};
1071 if(sbuf
.obj
.pos
== sbuf
.obj
.end
)
1076 chan
= *(Hchan
**)(stack_top
.b
+ pc
[1]);
1077 if(Debug
> 2 && chan
!= nil
)
1078 runtime_printf("gc_chan_ptr @%p: %p/%D/%D %p\n", stack_top
.b
+pc
[1], chan
, (int64
)chan
->qcount
, (int64
)chan
->dataqsiz
, pc
[2]);
1083 if(markonly(chan
)) {
1084 chantype
= (ChanType
*)pc
[2];
1085 if(!(chantype
->elem
->__code
& kindNoPointers
)) {
1096 // There are no heap pointers in struct Hchan,
1097 // so we can ignore the leading sizeof(Hchan) bytes.
1098 if(!(chantype
->elem
->__code
& kindNoPointers
)) {
1099 chancap
= chan
->dataqsiz
;
1100 if(chancap
> 0 && markonly(chan
->buf
)) {
1101 // TODO(atom): split into two chunks so that only the
1102 // in-use part of the circular buffer is scanned.
1103 // (Channel routines zero the unused part, so the current
1104 // code does not lead to leaks, it's just a little inefficient.)
1105 *sbuf
.obj
.pos
++ = (Obj
){chan
->buf
, chancap
*chantype
->elem
->__size
,
1106 (uintptr
)chantype
->elem
->__gc
| PRECISE
| LOOP
};
1107 if(sbuf
.obj
.pos
== sbuf
.obj
.end
)
1117 runtime_printf("runtime: invalid GC instruction %p at %p\n", pc
[0], pc
);
1118 runtime_throw("scanblock: invalid GC instruction");
1122 if((byte
*)obj
>= arena_start
&& (byte
*)obj
< arena_used
) {
1123 *sbuf
.ptr
.pos
++ = (PtrTarget
){obj
, objti
};
1124 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
1130 // Done scanning [b, b+n). Prepare for the next iteration of
1131 // the loop by setting b, n, ti to the parameters for the next block.
1133 if(sbuf
.nobj
== 0) {
1137 if(sbuf
.nobj
== 0) {
1140 putempty(sbuf
.wbuf
);
1143 // Emptied our buffer: refill.
1144 sbuf
.wbuf
= getfull(sbuf
.wbuf
);
1145 if(sbuf
.wbuf
== nil
)
1147 sbuf
.nobj
= sbuf
.wbuf
->nobj
;
1148 sbuf
.wp
= sbuf
.wbuf
->obj
+ sbuf
.wbuf
->nobj
;
1152 // Fetch b from the work buffer.
1161 static struct root_list
* roots
;
1164 __go_register_gc_roots (struct root_list
* r
)
1166 // FIXME: This needs locking if multiple goroutines can call
1167 // dlopen simultaneously.
1172 // Append obj to the work buffer.
1173 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
1175 enqueue(Obj obj
, Workbuf
**_wbuf
, Obj
**_wp
, uintptr
*_nobj
)
1182 runtime_printf("append obj(%p %D %p)\n", obj
.p
, (int64
)obj
.n
, obj
.ti
);
1184 // Align obj.b to a word boundary.
1185 off
= (uintptr
)obj
.p
& (PtrSize
-1);
1187 obj
.p
+= PtrSize
- off
;
1188 obj
.n
-= PtrSize
- off
;
1192 if(obj
.p
== nil
|| obj
.n
== 0)
1195 // Load work buffer state
1200 // If another proc wants a pointer, give it some.
1201 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
1203 wbuf
= handoff(wbuf
);
1205 wp
= wbuf
->obj
+ nobj
;
1208 // If buffer is full, get a new one.
1209 if(wbuf
== nil
|| nobj
>= nelem(wbuf
->obj
)) {
1212 wbuf
= getempty(wbuf
);
1221 // Save work buffer state
1228 enqueue1(Workbuf
**wbufp
, Obj obj
)
1233 if(wbuf
->nobj
>= nelem(wbuf
->obj
))
1234 *wbufp
= wbuf
= getempty(wbuf
);
1235 wbuf
->obj
[wbuf
->nobj
++] = obj
;
1239 markroot(ParFor
*desc
, uint32 i
)
1244 MSpan
**allspans
, *s
;
1250 wbuf
= getempty(nil
);
1251 // Note: if you add a case here, please also update heapdump.c:dumproots.
1254 // For gccgo this is both data and bss.
1256 struct root_list
*pl
;
1258 for(pl
= roots
; pl
!= nil
; pl
= pl
->next
) {
1259 struct root
*pr
= &pl
->roots
[0];
1261 void *decl
= pr
->decl
;
1264 enqueue1(&wbuf
, (Obj
){decl
, pr
->size
, 0});
1272 // For gccgo we use this for all the other global roots.
1273 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_m0
, sizeof runtime_m0
, 0});
1274 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_g0
, sizeof runtime_g0
, 0});
1275 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_allg
, sizeof runtime_allg
, 0});
1276 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_allm
, sizeof runtime_allm
, 0});
1277 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_allp
, sizeof runtime_allp
, 0});
1278 enqueue1(&wbuf
, (Obj
){(byte
*)&work
, sizeof work
, 0});
1279 runtime_proc_scan(&wbuf
, enqueue1
);
1280 runtime_MProf_Mark(&wbuf
, enqueue1
);
1281 runtime_time_scan(&wbuf
, enqueue1
);
1282 runtime_netpoll_scan(&wbuf
, enqueue1
);
1285 case RootFinalizers
:
1286 for(fb
=allfin
; fb
; fb
=fb
->alllink
)
1287 enqueue1(&wbuf
, (Obj
){(byte
*)fb
->fin
, fb
->cnt
*sizeof(fb
->fin
[0]), 0});
1291 // mark span types and MSpan.specials (to walk spans only once)
1294 allspans
= h
->allspans
;
1295 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
1297 SpecialFinalizer
*spf
;
1299 s
= allspans
[spanidx
];
1300 if(s
->sweepgen
!= sg
) {
1301 runtime_printf("sweep %d %d\n", s
->sweepgen
, sg
);
1302 runtime_throw("gc: unswept span");
1304 if(s
->state
!= MSpanInUse
)
1306 // The garbage collector ignores type pointers stored in MSpan.types:
1307 // - Compiler-generated types are stored outside of heap.
1308 // - The reflect package has runtime-generated types cached in its data structures.
1309 // The garbage collector relies on finding the references via that cache.
1310 if(s
->types
.compression
== MTypes_Words
|| s
->types
.compression
== MTypes_Bytes
)
1311 markonly((byte
*)s
->types
.data
);
1312 for(sp
= s
->specials
; sp
!= nil
; sp
= sp
->next
) {
1313 if(sp
->kind
!= KindSpecialFinalizer
)
1315 // don't mark finalized object, but scan it so we
1316 // retain everything it points to.
1317 spf
= (SpecialFinalizer
*)sp
;
1318 // A finalizer can be set for an inner byte of an object, find object beginning.
1319 p
= (void*)((s
->start
<< PageShift
) + spf
->offset
/s
->elemsize
*s
->elemsize
);
1320 enqueue1(&wbuf
, (Obj
){p
, s
->elemsize
, 0});
1321 enqueue1(&wbuf
, (Obj
){(void*)&spf
->fn
, PtrSize
, 0});
1322 enqueue1(&wbuf
, (Obj
){(void*)&spf
->ft
, PtrSize
, 0});
1323 enqueue1(&wbuf
, (Obj
){(void*)&spf
->ot
, PtrSize
, 0});
1328 case RootFlushCaches
:
1333 // the rest is scanning goroutine stacks
1334 if(i
- RootCount
>= runtime_allglen
)
1335 runtime_throw("markroot: bad index");
1336 gp
= runtime_allg
[i
- RootCount
];
1337 // remember when we've first observed the G blocked
1338 // needed only to output in traceback
1339 if((gp
->atomicstatus
== _Gwaiting
|| gp
->atomicstatus
== _Gsyscall
) && gp
->waitsince
== 0)
1340 gp
->waitsince
= work
.tstart
;
1341 addstackroots(gp
, &wbuf
);
1347 scanblock(wbuf
, false);
1350 static const FuncVal markroot_funcval
= { (void *) markroot
};
1352 // Get an empty work buffer off the work.empty list,
1353 // allocating new buffers as needed.
1355 getempty(Workbuf
*b
)
1358 runtime_lfstackpush(&work
.full
, &b
->node
);
1359 b
= (Workbuf
*)runtime_lfstackpop(&work
.wempty
);
1361 // Need to allocate.
1362 runtime_lock(&work
);
1363 if(work
.nchunk
< sizeof *b
) {
1364 work
.nchunk
= 1<<20;
1365 work
.chunk
= runtime_SysAlloc(work
.nchunk
, &mstats()->gc_sys
);
1366 if(work
.chunk
== nil
)
1367 runtime_throw("runtime: cannot allocate memory");
1369 b
= (Workbuf
*)work
.chunk
;
1370 work
.chunk
+= sizeof *b
;
1371 work
.nchunk
-= sizeof *b
;
1372 runtime_unlock(&work
);
1379 putempty(Workbuf
*b
)
1382 runtime_xadd64(&gcstats
.putempty
, 1);
1384 runtime_lfstackpush(&work
.wempty
, &b
->node
);
1387 // Get a full work buffer off the work.full list, or return nil.
1395 runtime_xadd64(&gcstats
.getfull
, 1);
1398 runtime_lfstackpush(&work
.wempty
, &b
->node
);
1399 b
= (Workbuf
*)runtime_lfstackpop(&work
.full
);
1400 if(b
!= nil
|| work
.nproc
== 1)
1404 runtime_xadd(&work
.nwait
, +1);
1406 if(work
.full
!= 0) {
1407 runtime_xadd(&work
.nwait
, -1);
1408 b
= (Workbuf
*)runtime_lfstackpop(&work
.full
);
1411 runtime_xadd(&work
.nwait
, +1);
1413 if(work
.nwait
== work
.nproc
)
1416 m
->gcstats
.nprocyield
++;
1417 runtime_procyield(20);
1419 m
->gcstats
.nosyield
++;
1422 m
->gcstats
.nsleep
++;
1423 runtime_usleep(100);
1437 // Make new buffer with half of b's pointers.
1442 runtime_memmove(b1
->obj
, b
->obj
+b
->nobj
, n
*sizeof b1
->obj
[0]);
1443 m
->gcstats
.nhandoff
++;
1444 m
->gcstats
.nhandoffcnt
+= n
;
1446 // Put b on full list - let first half of b get stolen.
1447 runtime_lfstackpush(&work
.full
, &b
->node
);
1452 addstackroots(G
*gp
, Workbuf
**wbufp
)
1454 switch(gp
->atomicstatus
){
1456 runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp
->atomicstatus
, gp
, gp
->goid
);
1457 runtime_throw("mark - bad status");
1461 runtime_throw("mark - world not stopped");
1468 #ifdef USING_SPLIT_STACK
1476 if(gp
== runtime_g()) {
1477 // Scanning our own stack.
1478 sp
= __splitstack_find(nil
, nil
, &spsize
, &next_segment
,
1479 &next_sp
, &initial_sp
);
1480 } else if((mp
= gp
->m
) != nil
&& mp
->helpgc
) {
1481 // gchelper's stack is in active use and has no interesting pointers.
1484 // Scanning another goroutine's stack.
1485 // The goroutine is usually asleep (the world is stopped).
1487 // The exception is that if the goroutine is about to enter or might
1488 // have just exited a system call, it may be executing code such
1489 // as schedlock and may have needed to start a new stack segment.
1490 // Use the stack segment and stack pointer at the time of
1491 // the system call instead, since that won't change underfoot.
1492 if(gp
->gcstack
!= nil
) {
1494 spsize
= gp
->gcstacksize
;
1495 next_segment
= gp
->gcnextsegment
;
1496 next_sp
= gp
->gcnextsp
;
1497 initial_sp
= gp
->gcinitialsp
;
1499 sp
= __splitstack_find_context(&gp
->stackcontext
[0],
1500 &spsize
, &next_segment
,
1501 &next_sp
, &initial_sp
);
1505 enqueue1(wbufp
, (Obj
){sp
, spsize
, 0});
1506 while((sp
= __splitstack_find(next_segment
, next_sp
,
1507 &spsize
, &next_segment
,
1508 &next_sp
, &initial_sp
)) != nil
)
1509 enqueue1(wbufp
, (Obj
){sp
, spsize
, 0});
1516 if(gp
== runtime_g()) {
1517 // Scanning our own stack.
1518 bottom
= (byte
*)&gp
;
1519 } else if((mp
= gp
->m
) != nil
&& mp
->helpgc
) {
1520 // gchelper's stack is in active use and has no interesting pointers.
1523 // Scanning another goroutine's stack.
1524 // The goroutine is usually asleep (the world is stopped).
1525 bottom
= (byte
*)gp
->gcnextsp
;
1529 top
= (byte
*)gp
->gcinitialsp
+ gp
->gcstacksize
;
1531 enqueue1(wbufp
, (Obj
){bottom
, top
- bottom
, 0});
1533 enqueue1(wbufp
, (Obj
){top
, bottom
- top
, 0});
1538 runtime_queuefinalizer(void *p
, FuncVal
*fn
, const FuncType
*ft
, const PtrType
*ot
)
1543 runtime_lock(&finlock
);
1544 if(finq
== nil
|| finq
->cnt
== finq
->cap
) {
1546 finc
= runtime_persistentalloc(FinBlockSize
, 0, &mstats()->gc_sys
);
1547 finc
->cap
= (FinBlockSize
- sizeof(FinBlock
)) / sizeof(Finalizer
) + 1;
1548 finc
->alllink
= allfin
;
1556 f
= &finq
->fin
[finq
->cnt
];
1562 runtime_fingwake
= true;
1563 runtime_unlock(&finlock
);
1567 runtime_iterate_finq(void (*callback
)(FuncVal
*, void*, const FuncType
*, const PtrType
*))
1573 for(fb
= allfin
; fb
; fb
= fb
->alllink
) {
1574 for(i
= 0; i
< fb
->cnt
; i
++) {
1576 callback(f
->fn
, f
->arg
, f
->ft
, f
->ot
);
1582 runtime_MSpan_EnsureSwept(MSpan
*s
)
1588 // Caller must disable preemption.
1589 // Otherwise when this function returns the span can become unswept again
1590 // (if GC is triggered on another goroutine).
1591 if(m
->locks
== 0 && m
->mallocing
== 0 && g
!= m
->g0
)
1592 runtime_throw("MSpan_EnsureSwept: m is not locked");
1594 sg
= runtime_mheap
.sweepgen
;
1595 if(runtime_atomicload(&s
->sweepgen
) == sg
)
1597 if(runtime_cas(&s
->sweepgen
, sg
-2, sg
-1)) {
1598 runtime_MSpan_Sweep(s
);
1601 // unfortunate condition, and we don't have efficient means to wait
1602 while(runtime_atomicload(&s
->sweepgen
) != sg
)
1606 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1607 // It clears the mark bits in preparation for the next GC round.
1608 // Returns true if the span was returned to heap.
1610 runtime_MSpan_Sweep(MSpan
*s
)
1613 int32 cl
, n
, npages
, nfree
;
1614 uintptr size
, off
, *bitp
, shift
, bits
;
1622 uintptr type_data_inc
;
1624 Special
*special
, **specialp
, *y
;
1625 bool res
, sweepgenset
;
1629 // It's critical that we enter this function with preemption disabled,
1630 // GC must not start while we are in the middle of this function.
1631 if(m
->locks
== 0 && m
->mallocing
== 0 && runtime_g() != m
->g0
)
1632 runtime_throw("MSpan_Sweep: m is not locked");
1633 sweepgen
= runtime_mheap
.sweepgen
;
1634 if(s
->state
!= MSpanInUse
|| s
->sweepgen
!= sweepgen
-1) {
1635 runtime_printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1636 s
->state
, s
->sweepgen
, sweepgen
);
1637 runtime_throw("MSpan_Sweep: bad span state");
1639 arena_start
= runtime_mheap
.arena_start
;
1645 // Chunk full of small blocks.
1646 npages
= runtime_class_to_allocnpages
[cl
];
1647 n
= (npages
<< PageShift
) / size
;
1653 sweepgenset
= false;
1655 // mark any free objects in this span so we don't collect them
1656 for(x
= s
->freelist
; x
!= nil
; x
= x
->next
) {
1657 // This is markonly(x) but faster because we don't need
1658 // atomic access and we're guaranteed to be pointing at
1659 // the head of a valid object.
1660 off
= (uintptr
*)x
- (uintptr
*)runtime_mheap
.arena_start
;
1661 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
1662 shift
= off
% wordsPerBitmapWord
;
1663 *bitp
|= bitMarked
<<shift
;
1666 // Unlink & free special records for any objects we're about to free.
1667 specialp
= &s
->specials
;
1668 special
= *specialp
;
1669 while(special
!= nil
) {
1670 // A finalizer can be set for an inner byte of an object, find object beginning.
1671 p
= (byte
*)(s
->start
<< PageShift
) + special
->offset
/size
*size
;
1672 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1673 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1674 shift
= off
% wordsPerBitmapWord
;
1675 bits
= *bitp
>>shift
;
1676 if((bits
& (bitAllocated
|bitMarked
)) == bitAllocated
) {
1677 // Find the exact byte for which the special was setup
1678 // (as opposed to object beginning).
1679 p
= (byte
*)(s
->start
<< PageShift
) + special
->offset
;
1680 // about to free object: splice out special record
1682 special
= special
->next
;
1683 *specialp
= special
;
1684 if(!runtime_freespecial(y
, p
, size
, false)) {
1685 // stop freeing of object if it has a finalizer
1686 *bitp
|= bitMarked
<< shift
;
1689 // object is still live: keep special record
1690 specialp
= &special
->next
;
1691 special
= *specialp
;
1695 type_data
= (byte
*)s
->types
.data
;
1696 type_data_inc
= sizeof(uintptr
);
1697 compression
= s
->types
.compression
;
1698 switch(compression
) {
1700 type_data
+= 8*sizeof(uintptr
);
1705 // Sweep through n objects of given size starting at p.
1706 // This thread owns the span now, so it can manipulate
1707 // the block bitmap without atomic operations.
1708 p
= (byte
*)(s
->start
<< PageShift
);
1709 for(; n
> 0; n
--, p
+= size
, type_data
+=type_data_inc
) {
1710 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1711 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1712 shift
= off
% wordsPerBitmapWord
;
1713 bits
= *bitp
>>shift
;
1715 if((bits
& bitAllocated
) == 0)
1718 if((bits
& bitMarked
) != 0) {
1719 *bitp
&= ~(bitMarked
<<shift
);
1723 if(runtime_debug
.allocfreetrace
)
1724 runtime_tracefree(p
, size
);
1726 // Clear mark and scan bits.
1727 *bitp
&= ~((bitScan
|bitMarked
)<<shift
);
1731 runtime_unmarkspan(p
, 1<<PageShift
);
1733 // important to set sweepgen before returning it to heap
1734 runtime_atomicstore(&s
->sweepgen
, sweepgen
);
1736 // See note about SysFault vs SysFree in malloc.goc.
1737 if(runtime_debug
.efence
)
1738 runtime_SysFault(p
, size
);
1740 runtime_MHeap_Free(&runtime_mheap
, s
, 1);
1741 c
->local_nlargefree
++;
1742 c
->local_largefree
+= size
;
1743 runtime_xadd64(&mstats()->next_gc
, -(uint64
)(size
* (gcpercent
+ 100)/100));
1746 // Free small object.
1747 switch(compression
) {
1749 *(uintptr
*)type_data
= 0;
1752 *(byte
*)type_data
= 0;
1755 if(size
> 2*sizeof(uintptr
))
1756 ((uintptr
*)p
)[1] = (uintptr
)0xdeaddeaddeaddeadll
; // mark as "needs to be zeroed"
1757 else if(size
> sizeof(uintptr
))
1758 ((uintptr
*)p
)[1] = 0;
1760 end
->next
= (MLink
*)p
;
1766 // We need to set s->sweepgen = h->sweepgen only when all blocks are swept,
1767 // because of the potential for a concurrent free/SetFinalizer.
1768 // But we need to set it before we make the span available for allocation
1769 // (return it to heap or mcentral), because allocation code assumes that a
1770 // span is already swept if available for allocation.
1772 if(!sweepgenset
&& nfree
== 0) {
1773 // The span must be in our exclusive ownership until we update sweepgen,
1774 // check for potential races.
1775 if(s
->state
!= MSpanInUse
|| s
->sweepgen
!= sweepgen
-1) {
1776 runtime_printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1777 s
->state
, s
->sweepgen
, sweepgen
);
1778 runtime_throw("MSpan_Sweep: bad span state after sweep");
1780 runtime_atomicstore(&s
->sweepgen
, sweepgen
);
1783 c
->local_nsmallfree
[cl
] += nfree
;
1784 c
->local_cachealloc
-= nfree
* size
;
1785 runtime_xadd64(&mstats()->next_gc
, -(uint64
)(nfree
* size
* (gcpercent
+ 100)/100));
1786 res
= runtime_MCentral_FreeSpan(&runtime_mheap
.central
[cl
], s
, nfree
, head
.next
, end
);
1787 //MCentral_FreeSpan updates sweepgen
1792 // State of background sweep.
1793 // Protected by gclock.
1804 // background sweeping goroutine
1806 bgsweep(void* dummy
__attribute__ ((unused
)))
1808 runtime_g()->issystem
= 1;
1810 while(runtime_sweepone() != (uintptr
)-1) {
1814 runtime_lock(&gclock
);
1815 if(!runtime_mheap
.sweepdone
) {
1816 // It's possible if GC has happened between sweepone has
1817 // returned -1 and gclock lock.
1818 runtime_unlock(&gclock
);
1821 sweep
.parked
= true;
1822 runtime_g()->isbackground
= true;
1823 runtime_parkunlock(&gclock
, "GC sweep wait");
1824 runtime_g()->isbackground
= false;
1829 // returns number of pages returned to heap, or -1 if there is nothing to sweep
1831 runtime_sweepone(void)
1838 // increment locks to ensure that the goroutine is not preempted
1839 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
1841 sg
= runtime_mheap
.sweepgen
;
1843 idx
= runtime_xadd(&sweep
.spanidx
, 1) - 1;
1844 if(idx
>= sweep
.nspan
) {
1845 runtime_mheap
.sweepdone
= true;
1849 s
= sweep
.spans
[idx
];
1850 if(s
->state
!= MSpanInUse
) {
1854 if(s
->sweepgen
!= sg
-2 || !runtime_cas(&s
->sweepgen
, sg
-2, sg
-1))
1857 runtime_throw("sweep of incache span");
1859 if(!runtime_MSpan_Sweep(s
))
1867 dumpspan(uint32 idx
)
1869 int32 sizeclass
, n
, npages
, i
, column
;
1876 s
= runtime_mheap
.allspans
[idx
];
1877 if(s
->state
!= MSpanInUse
)
1879 arena_start
= runtime_mheap
.arena_start
;
1880 p
= (byte
*)(s
->start
<< PageShift
);
1881 sizeclass
= s
->sizeclass
;
1883 if(sizeclass
== 0) {
1886 npages
= runtime_class_to_allocnpages
[sizeclass
];
1887 n
= (npages
<< PageShift
) / size
;
1890 runtime_printf("%p .. %p:\n", p
, p
+n
*size
);
1892 for(; n
>0; n
--, p
+=size
) {
1893 uintptr off
, *bitp
, shift
, bits
;
1895 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1896 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1897 shift
= off
% wordsPerBitmapWord
;
1898 bits
= *bitp
>>shift
;
1900 allocated
= ((bits
& bitAllocated
) != 0);
1902 for(i
=0; (uint32
)i
<size
; i
+=sizeof(void*)) {
1904 runtime_printf("\t");
1907 runtime_printf(allocated
? "(" : "[");
1908 runtime_printf("%p: ", p
+i
);
1910 runtime_printf(" ");
1913 runtime_printf("%p", *(void**)(p
+i
));
1915 if(i
+sizeof(void*) >= size
) {
1916 runtime_printf(allocated
? ") " : "] ");
1921 runtime_printf("\n");
1926 runtime_printf("\n");
1929 // A debugging function to dump the contents of memory
1931 runtime_memorydump(void)
1935 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
1941 runtime_gchelper(void)
1945 runtime_m()->traceback
= 2;
1948 // parallel mark for over gc roots
1949 runtime_parfordo(work
.markfor
);
1951 // help other threads scan secondary blocks
1952 scanblock(nil
, true);
1954 bufferList
[runtime_m()->helpgc
].busy
= 0;
1955 nproc
= work
.nproc
; // work.nproc can change right after we increment work.ndone
1956 if(runtime_xadd(&work
.ndone
, +1) == nproc
-1)
1957 runtime_notewakeup(&work
.alldone
);
1958 runtime_m()->traceback
= 0;
1967 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
1971 runtime_purgecachedstats(c
);
1976 flushallmcaches(void)
1981 // Flush MCache's to MCentral.
1982 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
1986 runtime_MCache_ReleaseAll(c
);
1991 runtime_updatememstats(GCStats
*stats
)
1996 uint64 stacks_inuse
, smallfree
;
2001 runtime_memclr((byte
*)stats
, sizeof(*stats
));
2003 for(mp
=runtime_allm
; mp
; mp
=mp
->alllink
) {
2004 //stacks_inuse += mp->stackinuse*FixedStack;
2006 src
= (uint64
*)&mp
->gcstats
;
2007 dst
= (uint64
*)stats
;
2008 for(i
=0; i
<sizeof(*stats
)/sizeof(uint64
); i
++)
2010 runtime_memclr((byte
*)&mp
->gcstats
, sizeof(mp
->gcstats
));
2014 pmstats
->stacks_inuse
= stacks_inuse
;
2015 pmstats
->mcache_inuse
= runtime_mheap
.cachealloc
.inuse
;
2016 pmstats
->mspan_inuse
= runtime_mheap
.spanalloc
.inuse
;
2017 pmstats
->sys
= pmstats
->heap_sys
+ pmstats
->stacks_sys
+ pmstats
->mspan_sys
+
2018 pmstats
->mcache_sys
+ pmstats
->buckhash_sys
+ pmstats
->gc_sys
+ pmstats
->other_sys
;
2020 // Calculate memory allocator stats.
2021 // During program execution we only count number of frees and amount of freed memory.
2022 // Current number of alive object in the heap and amount of alive heap memory
2023 // are calculated by scanning all spans.
2024 // Total number of mallocs is calculated as number of frees plus number of alive objects.
2025 // Similarly, total amount of allocated memory is calculated as amount of freed memory
2026 // plus amount of alive heap memory.
2028 pmstats
->total_alloc
= 0;
2029 pmstats
->nmalloc
= 0;
2031 for(i
= 0; i
< nelem(pmstats
->by_size
); i
++) {
2032 pmstats
->by_size
[i
].nmalloc
= 0;
2033 pmstats
->by_size
[i
].nfree
= 0;
2036 // Flush MCache's to MCentral.
2039 // Aggregate local stats.
2042 // Scan all spans and count number of alive objects.
2043 for(i
= 0; i
< runtime_mheap
.nspan
; i
++) {
2044 s
= runtime_mheap
.allspans
[i
];
2045 if(s
->state
!= MSpanInUse
)
2047 if(s
->sizeclass
== 0) {
2049 pmstats
->alloc
+= s
->elemsize
;
2051 pmstats
->nmalloc
+= s
->ref
;
2052 pmstats
->by_size
[s
->sizeclass
].nmalloc
+= s
->ref
;
2053 pmstats
->alloc
+= s
->ref
*s
->elemsize
;
2057 // Aggregate by size class.
2059 pmstats
->nfree
= runtime_mheap
.nlargefree
;
2060 for(i
= 0; i
< nelem(pmstats
->by_size
); i
++) {
2061 pmstats
->nfree
+= runtime_mheap
.nsmallfree
[i
];
2062 pmstats
->by_size
[i
].nfree
= runtime_mheap
.nsmallfree
[i
];
2063 pmstats
->by_size
[i
].nmalloc
+= runtime_mheap
.nsmallfree
[i
];
2064 smallfree
+= runtime_mheap
.nsmallfree
[i
] * runtime_class_to_size
[i
];
2066 pmstats
->nmalloc
+= pmstats
->nfree
;
2068 // Calculate derived stats.
2069 pmstats
->total_alloc
= pmstats
->alloc
+ runtime_mheap
.largefree
+ smallfree
;
2070 pmstats
->heap_alloc
= pmstats
->alloc
;
2071 pmstats
->heap_objects
= pmstats
->nmalloc
- pmstats
->nfree
;
2074 // Structure of arguments passed to function gc().
2075 // This allows the arguments to be passed via runtime_mcall.
2078 int64 start_time
; // start time of GC in ns (just before stoptheworld)
2082 static void gc(struct gc_args
*args
);
2083 static void mgc(G
*gp
);
2091 s
= runtime_getenv("GOGC");
2095 if(s
.len
== 3 && runtime_strcmp((const char *)p
, "off") == 0)
2097 return runtime_atoi(p
, s
.len
);
2100 // force = 1 - do GC regardless of current heap usage
2101 // force = 2 - go GC and eager sweep
2103 runtime_gc(int32 force
)
2111 // The atomic operations are not atomic if the uint64s
2112 // are not aligned on uint64 boundaries. This has been
2113 // a problem in the past.
2114 if((((uintptr
)&work
.wempty
) & 7) != 0)
2115 runtime_throw("runtime: gc work buffer is misaligned");
2116 if((((uintptr
)&work
.full
) & 7) != 0)
2117 runtime_throw("runtime: gc work buffer is misaligned");
2119 // Make sure all registers are saved on stack so that
2120 // scanstack sees them.
2121 __builtin_unwind_init();
2123 // The gc is turned off (via enablegc) until
2124 // the bootstrap has completed.
2125 // Also, malloc gets called in the guts
2126 // of a number of libraries that might be
2127 // holding locks. To avoid priority inversion
2128 // problems, don't bother trying to run gc
2129 // while holding a lock. The next mallocgc
2130 // without a lock will do the gc instead.
2133 if(!pmstats
->enablegc
|| runtime_g() == m
->g0
|| m
->locks
> 0 || runtime_panicking
|| m
->preemptoff
.len
> 0)
2136 if(gcpercent
== GcpercentUnknown
) { // first time through
2137 runtime_lock(&runtime_mheap
);
2138 if(gcpercent
== GcpercentUnknown
)
2139 gcpercent
= readgogc();
2140 runtime_unlock(&runtime_mheap
);
2145 runtime_acquireWorldsema();
2146 if(force
==0 && pmstats
->heap_alloc
< pmstats
->next_gc
) {
2147 // typically threads which lost the race to grab
2148 // worldsema exit here when gc is done.
2149 runtime_releaseWorldsema();
2153 // Ok, we're doing it! Stop everybody else
2154 a
.start_time
= runtime_nanotime();
2155 a
.eagersweep
= force
>= 2;
2157 runtime_stopTheWorldWithSema();
2161 // Run gc on the g0 stack. We do this so that the g stack
2162 // we're currently running on will no longer change. Cuts
2163 // the root set down a bit (g0 stacks are not scanned, and
2164 // we don't need to scan gc's internal state). Also an
2165 // enabler for copyable stacks.
2166 for(i
= 0; i
< (runtime_debug
.gctrace
> 1 ? 2 : 1); i
++) {
2168 a
.start_time
= runtime_nanotime();
2169 // switch to g0, call gc(&a), then switch back
2172 g
->atomicstatus
= _Gwaiting
;
2173 g
->waitreason
= runtime_gostringnocopy((const byte
*)"garbage collection");
2181 runtime_releaseWorldsema();
2182 runtime_startTheWorldWithSema();
2185 // now that gc is done, kick off finalizer thread if needed
2186 if(!ConcurrentSweep
) {
2187 // give the queued finalizers, if any, a chance to run
2190 // For gccgo, let other goroutines run.
2200 gp
->atomicstatus
= _Grunning
;
2205 gc(struct gc_args
*args
)
2208 int64 tm0
, tm1
, tm2
, tm3
, tm4
;
2209 uint64 heap0
, heap1
, obj
, ninstr
;
2217 if(runtime_debug
.allocfreetrace
)
2221 tm0
= args
->start_time
;
2222 work
.tstart
= args
->start_time
;
2225 runtime_memclr((byte
*)&gcstats
, sizeof(gcstats
));
2227 m
->locks
++; // disable gc during mallocs in parforalloc
2228 if(work
.markfor
== nil
)
2229 work
.markfor
= runtime_parforalloc(MaxGcproc
);
2233 if(runtime_debug
.gctrace
)
2234 tm1
= runtime_nanotime();
2236 // Sweep what is not sweeped by bgsweep.
2237 while(runtime_sweepone() != (uintptr
)-1)
2238 gcstats
.npausesweep
++;
2242 work
.nproc
= runtime_gcprocs();
2243 runtime_parforsetup(work
.markfor
, work
.nproc
, RootCount
+ runtime_allglen
, false, &markroot_funcval
);
2244 if(work
.nproc
> 1) {
2245 runtime_noteclear(&work
.alldone
);
2246 runtime_helpgc(work
.nproc
);
2250 if(runtime_debug
.gctrace
)
2251 tm2
= runtime_nanotime();
2254 runtime_parfordo(work
.markfor
);
2255 scanblock(nil
, true);
2258 if(runtime_debug
.gctrace
)
2259 tm3
= runtime_nanotime();
2261 bufferList
[m
->helpgc
].busy
= 0;
2263 runtime_notesleep(&work
.alldone
);
2266 // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
2267 // estimate what was live heap size after previous GC (for tracing only)
2269 heap0
= pmstats
->next_gc
*100/(gcpercent
+100);
2270 // conservatively set next_gc to high value assuming that everything is live
2271 // concurrent/lazy sweep will reduce this number while discovering new garbage
2272 pmstats
->next_gc
= pmstats
->heap_alloc
+(pmstats
->heap_alloc
-runtime_stacks_sys
)*gcpercent
/100;
2274 tm4
= runtime_nanotime();
2275 pmstats
->last_gc
= runtime_unixnanotime(); // must be Unix time to make sense to user
2276 pmstats
->pause_ns
[pmstats
->numgc
%nelem(pmstats
->pause_ns
)] = tm4
- tm0
;
2277 pmstats
->pause_end
[pmstats
->numgc
%nelem(pmstats
->pause_end
)] = pmstats
->last_gc
;
2278 pmstats
->pause_total_ns
+= tm4
- tm0
;
2280 if(pmstats
->debuggc
)
2281 runtime_printf("pause %D\n", tm4
-tm0
);
2283 if(runtime_debug
.gctrace
) {
2284 heap1
= pmstats
->heap_alloc
;
2285 runtime_updatememstats(&stats
);
2286 if(heap1
!= pmstats
->heap_alloc
) {
2287 runtime_printf("runtime: mstats skew: heap=%D/%D\n", heap1
, pmstats
->heap_alloc
);
2288 runtime_throw("mstats skew");
2290 obj
= pmstats
->nmalloc
- pmstats
->nfree
;
2292 stats
.nprocyield
+= work
.markfor
->nprocyield
;
2293 stats
.nosyield
+= work
.markfor
->nosyield
;
2294 stats
.nsleep
+= work
.markfor
->nsleep
;
2296 runtime_printf("gc%d(%d): %D+%D+%D+%D us, %D -> %D MB, %D (%D-%D) objects,"
2298 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
2299 pmstats
->numgc
, work
.nproc
, (tm1
-tm0
)/1000, (tm2
-tm1
)/1000, (tm3
-tm2
)/1000, (tm4
-tm3
)/1000,
2300 heap0
>>20, heap1
>>20, obj
,
2301 pmstats
->nmalloc
, pmstats
->nfree
,
2302 sweep
.nspan
, gcstats
.nbgsweep
, gcstats
.npausesweep
,
2303 stats
.nhandoff
, stats
.nhandoffcnt
,
2304 work
.markfor
->nsteal
, work
.markfor
->nstealcnt
,
2305 stats
.nprocyield
, stats
.nosyield
, stats
.nsleep
);
2306 gcstats
.nbgsweep
= gcstats
.npausesweep
= 0;
2308 runtime_printf("scan: %D bytes, %D objects, %D untyped, %D types from MSpan\n",
2309 gcstats
.nbytes
, gcstats
.obj
.cnt
, gcstats
.obj
.notype
, gcstats
.obj
.typelookup
);
2310 if(gcstats
.ptr
.cnt
!= 0)
2311 runtime_printf("avg ptrbufsize: %D (%D/%D)\n",
2312 gcstats
.ptr
.sum
/gcstats
.ptr
.cnt
, gcstats
.ptr
.sum
, gcstats
.ptr
.cnt
);
2313 if(gcstats
.obj
.cnt
!= 0)
2314 runtime_printf("avg nobj: %D (%D/%D)\n",
2315 gcstats
.obj
.sum
/gcstats
.obj
.cnt
, gcstats
.obj
.sum
, gcstats
.obj
.cnt
);
2316 runtime_printf("rescans: %D, %D bytes\n", gcstats
.rescan
, gcstats
.rescanbytes
);
2318 runtime_printf("instruction counts:\n");
2320 for(i
=0; i
<nelem(gcstats
.instr
); i
++) {
2321 runtime_printf("\t%d:\t%D\n", i
, gcstats
.instr
[i
]);
2322 ninstr
+= gcstats
.instr
[i
];
2324 runtime_printf("\ttotal:\t%D\n", ninstr
);
2326 runtime_printf("putempty: %D, getfull: %D\n", gcstats
.putempty
, gcstats
.getfull
);
2328 runtime_printf("markonly base lookup: bit %D word %D span %D\n", gcstats
.markonly
.foundbit
, gcstats
.markonly
.foundword
, gcstats
.markonly
.foundspan
);
2329 runtime_printf("flushptrbuf base lookup: bit %D word %D span %D\n", gcstats
.flushptrbuf
.foundbit
, gcstats
.flushptrbuf
.foundword
, gcstats
.flushptrbuf
.foundspan
);
2333 // We cache current runtime_mheap.allspans array in sweep.spans,
2334 // because the former can be resized and freed.
2335 // Otherwise we would need to take heap lock every time
2336 // we want to convert span index to span pointer.
2338 // Free the old cached array if necessary.
2339 if(sweep
.spans
&& sweep
.spans
!= runtime_mheap
.allspans
)
2340 runtime_SysFree(sweep
.spans
, sweep
.nspan
*sizeof(sweep
.spans
[0]), &pmstats
->other_sys
);
2341 // Cache the current array.
2342 runtime_mheap
.sweepspans
= runtime_mheap
.allspans
;
2343 runtime_mheap
.sweepgen
+= 2;
2344 runtime_mheap
.sweepdone
= false;
2345 sweep
.spans
= runtime_mheap
.allspans
;
2346 sweep
.nspan
= runtime_mheap
.nspan
;
2349 // Temporary disable concurrent sweep, because we see failures on builders.
2350 if(ConcurrentSweep
&& !args
->eagersweep
) {
2351 runtime_lock(&gclock
);
2353 sweep
.g
= __go_go(bgsweep
, nil
);
2354 else if(sweep
.parked
) {
2355 sweep
.parked
= false;
2356 runtime_ready(sweep
.g
);
2358 runtime_unlock(&gclock
);
2360 // Sweep all spans eagerly.
2361 while(runtime_sweepone() != (uintptr
)-1)
2362 gcstats
.npausesweep
++;
2363 // Do an additional mProf_GC, because all 'free' events are now real as well.
2371 void runtime_debug_readGCStats(Slice
*)
2372 __asm__("runtime_debug.readGCStats");
2375 runtime_debug_readGCStats(Slice
*pauses
)
2381 // Calling code in runtime/debug should make the slice large enough.
2383 if((size_t)pauses
->cap
< nelem(pmstats
->pause_ns
)+3)
2384 runtime_throw("runtime: short slice passed to readGCStats");
2386 // Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
2387 p
= (uint64
*)pauses
->array
;
2388 runtime_lock(&runtime_mheap
);
2390 if(n
> nelem(pmstats
->pause_ns
))
2391 n
= nelem(pmstats
->pause_ns
);
2393 // The pause buffer is circular. The most recent pause is at
2394 // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
2395 // from there to go back farther in time. We deliver the times
2396 // most recent first (in p[0]).
2398 p
[i
] = pmstats
->pause_ns
[(pmstats
->numgc
-1-i
)%nelem(pmstats
->pause_ns
)];
2400 p
[n
] = pmstats
->last_gc
;
2401 p
[n
+1] = pmstats
->numgc
;
2402 p
[n
+2] = pmstats
->pause_total_ns
;
2403 runtime_unlock(&runtime_mheap
);
2404 pauses
->__count
= n
+3;
2408 runtime_setgcpercent(int32 in
) {
2411 runtime_lock(&runtime_mheap
);
2412 if(gcpercent
== GcpercentUnknown
)
2413 gcpercent
= readgogc();
2418 runtime_unlock(&runtime_mheap
);
2428 if(m
->helpgc
< 0 || m
->helpgc
>= MaxGcproc
)
2429 runtime_throw("gchelperstart: bad m->helpgc");
2430 if(runtime_xchg(&bufferList
[m
->helpgc
].busy
, 1))
2431 runtime_throw("gchelperstart: already busy");
2432 if(runtime_g() != m
->g0
)
2433 runtime_throw("gchelper not running on g0 stack");
2437 runfinq(void* dummy
__attribute__ ((unused
)))
2440 FinBlock
*fb
, *next
;
2445 // This function blocks for long periods of time, and because it is written in C
2446 // we have no liveness information. Zero everything so that uninitialized pointers
2447 // do not cause memory leaks.
2452 ef
.__type_descriptor
= nil
;
2455 // force flush to memory
2463 runtime_lock(&finlock
);
2467 runtime_fingwait
= true;
2468 runtime_g()->isbackground
= true;
2469 runtime_parkunlock(&finlock
, "finalizer wait");
2470 runtime_g()->isbackground
= false;
2473 runtime_unlock(&finlock
);
2474 for(; fb
; fb
=next
) {
2476 for(i
=0; i
<(uint32
)fb
->cnt
; i
++) {
2481 fint
= ((const Type
**)f
->ft
->__in
.array
)[0];
2482 if((fint
->__code
& kindMask
) == kindPtr
) {
2483 // direct use of pointer
2485 } else if(((const InterfaceType
*)fint
)->__methods
.__count
== 0) {
2486 // convert to empty interface
2487 ef
.__type_descriptor
= (const Type
*)f
->ot
;
2488 ef
.__object
= f
->arg
;
2491 // convert to interface with methods
2492 iface
.__methods
= __go_convert_interface_2((const Type
*)fint
,
2495 iface
.__object
= f
->arg
;
2496 if(iface
.__methods
== nil
)
2497 runtime_throw("invalid type conversion in runfinq");
2500 reflect_call(f
->ft
, f
->fn
, 0, 0, ¶m
, nil
);
2506 runtime_lock(&finlock
);
2509 runtime_unlock(&finlock
);
2512 // Zero everything that's dead, to avoid memory leaks.
2513 // See comment at top of function.
2518 ef
.__type_descriptor
= nil
;
2520 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
2525 runtime_createfing(void)
2529 // Here we use gclock instead of finlock,
2530 // because newproc1 can allocate, which can cause on-demand span sweep,
2531 // which can queue finalizers, which would deadlock.
2532 runtime_lock(&gclock
);
2534 fing
= __go_go(runfinq
, nil
);
2535 runtime_unlock(&gclock
);
2539 runtime_wakefing(void)
2544 runtime_lock(&finlock
);
2545 if(runtime_fingwait
&& runtime_fingwake
) {
2546 runtime_fingwait
= false;
2547 runtime_fingwake
= false;
2550 runtime_unlock(&finlock
);
2555 runtime_marknogc(void *v
)
2557 uintptr
*b
, off
, shift
;
2559 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2560 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2561 shift
= off
% wordsPerBitmapWord
;
2562 *b
= (*b
& ~(bitAllocated
<<shift
)) | bitBlockBoundary
<<shift
;
2566 runtime_markscan(void *v
)
2568 uintptr
*b
, off
, shift
;
2570 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2571 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2572 shift
= off
% wordsPerBitmapWord
;
2573 *b
|= bitScan
<<shift
;
2576 // mark the block at v as freed.
2578 runtime_markfreed(void *v
)
2580 uintptr
*b
, off
, shift
;
2583 runtime_printf("markfreed %p\n", v
);
2585 if((byte
*)v
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2586 runtime_throw("markfreed: bad pointer");
2588 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2589 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2590 shift
= off
% wordsPerBitmapWord
;
2591 *b
= (*b
& ~(bitMask
<<shift
)) | (bitAllocated
<<shift
);
2594 // check that the block at v of size n is marked freed.
2596 runtime_checkfreed(void *v
, uintptr n
)
2598 uintptr
*b
, bits
, off
, shift
;
2600 if(!runtime_checking
)
2603 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2604 return; // not allocated, so okay
2606 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2607 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2608 shift
= off
% wordsPerBitmapWord
;
2611 if((bits
& bitAllocated
) != 0) {
2612 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
2613 v
, n
, off
, bits
& bitMask
);
2614 runtime_throw("checkfreed: not freed");
2618 // mark the span of memory at v as having n blocks of the given size.
2619 // if leftover is true, there is left over space at the end of the span.
2621 runtime_markspan(void *v
, uintptr size
, uintptr n
, bool leftover
)
2623 uintptr
*b
, *b0
, off
, shift
, i
, x
;
2626 if((byte
*)v
+size
*n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2627 runtime_throw("markspan: bad pointer");
2629 if(runtime_checking
) {
2630 // bits should be all zero at the start
2631 off
= (byte
*)v
+ size
- runtime_mheap
.arena_start
;
2632 b
= (uintptr
*)(runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
);
2633 for(i
= 0; i
< size
/PtrSize
/wordsPerBitmapWord
; i
++) {
2635 runtime_throw("markspan: span bits not zero");
2640 if(leftover
) // mark a boundary just past end of last block too
2645 for(; n
-- > 0; p
+= size
) {
2646 // Okay to use non-atomic ops here, because we control
2647 // the entire span, and each bitmap word has bits for only
2648 // one span, so no other goroutines are changing these
2650 off
= (uintptr
*)p
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2651 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2652 shift
= off
% wordsPerBitmapWord
;
2659 x
|= bitAllocated
<<shift
;
2664 // unmark the span of memory at v of length n bytes.
2666 runtime_unmarkspan(void *v
, uintptr n
)
2668 uintptr
*p
, *b
, off
;
2670 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2671 runtime_throw("markspan: bad pointer");
2674 off
= p
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2675 if(off
% wordsPerBitmapWord
!= 0)
2676 runtime_throw("markspan: unaligned pointer");
2677 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2679 if(n
%wordsPerBitmapWord
!= 0)
2680 runtime_throw("unmarkspan: unaligned length");
2681 // Okay to use non-atomic ops here, because we control
2682 // the entire span, and each bitmap word has bits for only
2683 // one span, so no other goroutines are changing these
2685 n
/= wordsPerBitmapWord
;
2691 runtime_MHeap_MapBits(MHeap
*h
)
2695 // Caller has added extra mappings to the arena.
2696 // Add extra mappings of bitmap words as needed.
2697 // We allocate extra bitmap pieces in chunks of bitmapChunk.
2703 n
= (h
->arena_used
- h
->arena_start
) / wordsPerBitmapWord
;
2704 n
= ROUND(n
, bitmapChunk
);
2705 n
= ROUND(n
, PageSize
);
2706 page_size
= getpagesize();
2707 n
= ROUND(n
, page_size
);
2708 if(h
->bitmap_mapped
>= n
)
2711 runtime_SysMap(h
->arena_start
- n
, n
- h
->bitmap_mapped
, h
->arena_reserved
, &mstats()->gc_sys
);
2712 h
->bitmap_mapped
= n
;
2715 // typedmemmove copies a value of type t to dst from src.
2717 extern void typedmemmove(const Type
* td
, void *dst
, const void *src
)
2718 __asm__ (GOSYM_PREFIX
"reflect.typedmemmove");
2721 typedmemmove(const Type
* td
, void *dst
, const void *src
)
2723 runtime_memmove(dst
, src
, td
->__size
);
2726 // typedslicecopy copies a slice of elemType values from src to dst,
2727 // returning the number of elements copied.
2729 extern intgo
typedslicecopy(const Type
* elem
, Slice dst
, Slice src
)
2730 __asm__ (GOSYM_PREFIX
"reflect.typedslicecopy");
2733 typedslicecopy(const Type
* elem
, Slice dst
, Slice src
)
2740 if (n
> src
.__count
)
2744 dstp
= dst
.__values
;
2745 srcp
= src
.__values
;
2746 memmove(dstp
, srcp
, (uintptr_t)n
* elem
->__size
);