1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
16 // Map gccgo field names to gc field names.
17 // Slice aka __go_open_array.
18 #define array __values
19 #define cap __capacity
20 // Iface aka __go_interface
22 // Eface aka __go_empty_interface.
23 #define type __type_descriptor
25 typedef struct __go_map Hmap
;
26 // Type aka __go_type_descriptor
28 #define string __reflection
29 #define KindPtr GO_PTR
30 #define KindNoPointers GO_NO_POINTERS
31 // PtrType aka __go_ptr_type
32 #define elem __element_type
34 #ifdef USING_SPLIT_STACK
36 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
39 extern void * __splitstack_find_context (void *context
[10], size_t *, void **,
46 DebugMark
= 0, // run second pass to check mark
48 ScanStackByFrames
= 0,
51 // Four bits per word (see #defines below).
52 wordsPerBitmapWord
= sizeof(void*)*8/4,
53 bitShift
= sizeof(void*)*8/4,
56 IntermediateBufferCapacity
= 64,
58 // Bits in type information
61 PC_BITS
= PRECISE
| LOOP
,
71 // Bits in per-word bitmap.
72 // #defines because enum might not be able to hold the values.
74 // Each word in the bitmap describes wordsPerBitmapWord words
75 // of heap memory. There are 4 bitmap bits dedicated to each heap word,
76 // so on a 64-bit system there is one bitmap word per 16 heap words.
77 // The bits in the word are packed together by type first, then by
78 // heap location, so each 64-bit bitmap word consists of, from top to bottom,
79 // the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
80 // then the 16 bitNoScan/bitBlockBoundary bits, then the 16 bitAllocated bits.
81 // This layout makes it easier to iterate over the bits of a given type.
83 // The bitmap starts at mheap.arena_start and extends *backward* from
84 // there. On a 64-bit system the off'th word in the arena is tracked by
85 // the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
86 // the only difference is that the divisor is 8.)
88 // To pull out the bits corresponding to a given pointer p, we use:
90 // off = p - (uintptr*)mheap.arena_start; // word offset
91 // b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
92 // shift = off % wordsPerBitmapWord
93 // bits = *b >> shift;
94 // /* then test bits & bitAllocated, bits & bitMarked, etc. */
96 #define bitAllocated ((uintptr)1<<(bitShift*0))
97 #define bitNoScan ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
98 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
99 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
100 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set */
102 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
104 // Holding worldsema grants an M the right to try to stop the world.
107 // runtime_semacquire(&runtime_worldsema);
109 // runtime_stoptheworld();
114 // runtime_semrelease(&runtime_worldsema);
115 // runtime_starttheworld();
117 uint32 runtime_worldsema
= 1;
119 // The size of Workbuf is N*PageSize.
120 typedef struct Workbuf Workbuf
;
123 #define SIZE (2*PageSize-sizeof(LFNode)-sizeof(uintptr))
124 LFNode node
; // must be first
126 Obj obj
[SIZE
/sizeof(Obj
) - 1];
127 uint8 _padding
[SIZE
%sizeof(Obj
) + sizeof(Obj
)];
131 typedef struct Finalizer Finalizer
;
136 const struct __go_func_type
*ft
;
137 const struct __go_ptr_type
*ot
;
140 typedef struct FinBlock FinBlock
;
151 static FinBlock
*finq
; // list of finalizers that are to be executed
152 static FinBlock
*finc
; // cache of free blocks
153 static FinBlock
*allfin
; // list of all blocks
155 static int32 fingwait
;
157 static void runfinq(void*);
158 static Workbuf
* getempty(Workbuf
*);
159 static Workbuf
* getfull(Workbuf
*);
160 static void putempty(Workbuf
*);
161 static Workbuf
* handoff(Workbuf
*);
162 static void gchelperstart(void);
165 uint64 full
; // lock-free list of full blocks
166 uint64 empty
; // lock-free list of empty blocks
167 byte pad0
[CacheLineSize
]; // prevents false-sharing between full/empty and nproc/nwait
169 volatile uint32 nwait
;
170 volatile uint32 ndone
;
171 volatile uint32 debugmarkdone
;
186 GC_DEFAULT_PTR
= GC_NUM_INSTR
,
206 uint64 instr
[GC_NUM_INSTR2
];
221 // markonly marks an object. It returns true if the object
222 // has been marked by this function, false otherwise.
223 // This function doesn't append the object to any buffer.
228 uintptr
*bitp
, bits
, shift
, x
, xbits
, off
, j
;
232 // Words outside the arena cannot be pointers.
233 if((byte
*)obj
< runtime_mheap
.arena_start
|| (byte
*)obj
>= runtime_mheap
.arena_used
)
236 // obj may be a pointer to a live object.
237 // Try to find the beginning of the object.
239 // Round down to word boundary.
240 obj
= (void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
242 // Find bits for this word.
243 off
= (uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
244 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
245 shift
= off
% wordsPerBitmapWord
;
247 bits
= xbits
>> shift
;
249 // Pointing at the beginning of a block?
250 if((bits
& (bitAllocated
|bitBlockBoundary
)) != 0) {
252 runtime_xadd64(&gcstats
.markonly
.foundbit
, 1);
256 // Pointing just past the beginning?
257 // Scan backward a little to find a block boundary.
258 for(j
=shift
; j
-->0; ) {
259 if(((xbits
>>j
) & (bitAllocated
|bitBlockBoundary
)) != 0) {
263 runtime_xadd64(&gcstats
.markonly
.foundword
, 1);
268 // Otherwise consult span table to find beginning.
269 // (Manually inlined copy of MHeap_LookupMaybe.)
270 k
= (uintptr
)obj
>>PageShift
;
272 if(sizeof(void*) == 8)
273 x
-= (uintptr
)runtime_mheap
.arena_start
>>PageShift
;
274 s
= runtime_mheap
.spans
[x
];
275 if(s
== nil
|| k
< s
->start
|| (byte
*)obj
>= s
->limit
|| s
->state
!= MSpanInUse
)
277 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
278 if(s
->sizeclass
== 0) {
281 uintptr size
= s
->elemsize
;
282 int32 i
= ((byte
*)obj
- p
)/size
;
286 // Now that we know the object header, reload bits.
287 off
= (uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
288 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
289 shift
= off
% wordsPerBitmapWord
;
291 bits
= xbits
>> shift
;
293 runtime_xadd64(&gcstats
.markonly
.foundspan
, 1);
296 // Now we have bits, bitp, and shift correct for
297 // obj pointing at the base of the object.
298 // Only care about allocated and not marked.
299 if((bits
& (bitAllocated
|bitMarked
)) != bitAllocated
)
302 *bitp
|= bitMarked
<<shift
;
306 if(x
& (bitMarked
<<shift
))
308 if(runtime_casp((void**)bitp
, (void*)x
, (void*)(x
|(bitMarked
<<shift
))))
313 // The object is now marked
317 // PtrTarget is a structure used by intermediate buffers.
318 // The intermediate buffers hold GC data before it
319 // is moved/flushed to the work buffer (Workbuf).
320 // The size of an intermediate buffer is very small,
321 // such as 32 or 64 elements.
322 typedef struct PtrTarget PtrTarget
;
329 typedef struct BufferList BufferList
;
332 PtrTarget ptrtarget
[IntermediateBufferCapacity
];
333 Obj obj
[IntermediateBufferCapacity
];
335 byte pad
[CacheLineSize
];
337 static BufferList bufferList
[MaxGcproc
];
339 static Type
*itabtype
;
341 static void enqueue(Obj obj
, Workbuf
**_wbuf
, Obj
**_wp
, uintptr
*_nobj
);
343 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
344 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
345 // while the work buffer contains blocks which have been marked
346 // and are prepared to be scanned by the garbage collector.
348 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
350 // A simplified drawing explaining how the todo-list moves from a structure to another:
354 // Obj ------> PtrTarget (pointer targets)
359 // (find block start, mark and enqueue)
361 flushptrbuf(PtrTarget
*ptrbuf
, PtrTarget
**ptrbufpos
, Obj
**_wp
, Workbuf
**_wbuf
, uintptr
*_nobj
)
363 byte
*p
, *arena_start
, *obj
;
364 uintptr size
, *bitp
, bits
, shift
, j
, x
, xbits
, off
, nobj
, ti
, n
;
369 PtrTarget
*ptrbuf_end
;
371 arena_start
= runtime_mheap
.arena_start
;
377 ptrbuf_end
= *ptrbufpos
;
378 n
= ptrbuf_end
- ptrbuf
;
382 runtime_xadd64(&gcstats
.ptr
.sum
, n
);
383 runtime_xadd64(&gcstats
.ptr
.cnt
, 1);
386 // If buffer is nearly full, get a new one.
387 if(wbuf
== nil
|| nobj
+n
>= nelem(wbuf
->obj
)) {
390 wbuf
= getempty(wbuf
);
394 if(n
>= nelem(wbuf
->obj
))
395 runtime_throw("ptrbuf has to be smaller than WorkBuf");
398 // TODO(atom): This block is a branch of an if-then-else statement.
399 // The single-threaded branch may be added in a next CL.
401 // Multi-threaded version.
403 while(ptrbuf
< ptrbuf_end
) {
408 // obj belongs to interval [mheap.arena_start, mheap.arena_used).
410 if(obj
< runtime_mheap
.arena_start
|| obj
>= runtime_mheap
.arena_used
)
411 runtime_throw("object is outside of mheap");
414 // obj may be a pointer to a live object.
415 // Try to find the beginning of the object.
417 // Round down to word boundary.
418 if(((uintptr
)obj
& ((uintptr
)PtrSize
-1)) != 0) {
419 obj
= (void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
423 // Find bits for this word.
424 off
= (uintptr
*)obj
- (uintptr
*)arena_start
;
425 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
426 shift
= off
% wordsPerBitmapWord
;
428 bits
= xbits
>> shift
;
430 // Pointing at the beginning of a block?
431 if((bits
& (bitAllocated
|bitBlockBoundary
)) != 0) {
433 runtime_xadd64(&gcstats
.flushptrbuf
.foundbit
, 1);
439 // Pointing just past the beginning?
440 // Scan backward a little to find a block boundary.
441 for(j
=shift
; j
-->0; ) {
442 if(((xbits
>>j
) & (bitAllocated
|bitBlockBoundary
)) != 0) {
443 obj
= (byte
*)obj
- (shift
-j
)*PtrSize
;
447 runtime_xadd64(&gcstats
.flushptrbuf
.foundword
, 1);
452 // Otherwise consult span table to find beginning.
453 // (Manually inlined copy of MHeap_LookupMaybe.)
454 k
= (uintptr
)obj
>>PageShift
;
456 if(sizeof(void*) == 8)
457 x
-= (uintptr
)arena_start
>>PageShift
;
458 s
= runtime_mheap
.spans
[x
];
459 if(s
== nil
|| k
< s
->start
|| obj
>= s
->limit
|| s
->state
!= MSpanInUse
)
461 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
462 if(s
->sizeclass
== 0) {
466 int32 i
= ((byte
*)obj
- p
)/size
;
470 // Now that we know the object header, reload bits.
471 off
= (uintptr
*)obj
- (uintptr
*)arena_start
;
472 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
473 shift
= off
% wordsPerBitmapWord
;
475 bits
= xbits
>> shift
;
477 runtime_xadd64(&gcstats
.flushptrbuf
.foundspan
, 1);
480 // Now we have bits, bitp, and shift correct for
481 // obj pointing at the base of the object.
482 // Only care about allocated and not marked.
483 if((bits
& (bitAllocated
|bitMarked
)) != bitAllocated
)
486 *bitp
|= bitMarked
<<shift
;
490 if(x
& (bitMarked
<<shift
))
492 if(runtime_casp((void**)bitp
, (void*)x
, (void*)(x
|(bitMarked
<<shift
))))
497 // If object has no pointers, don't need to scan further.
498 if((bits
& bitNoScan
) != 0)
501 // Ask span about size class.
502 // (Manually inlined copy of MHeap_Lookup.)
503 x
= (uintptr
)obj
>> PageShift
;
504 if(sizeof(void*) == 8)
505 x
-= (uintptr
)arena_start
>>PageShift
;
506 s
= runtime_mheap
.spans
[x
];
510 *wp
= (Obj
){obj
, s
->elemsize
, ti
};
516 // If another proc wants a pointer, give it some.
517 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
519 wbuf
= handoff(wbuf
);
521 wp
= wbuf
->obj
+ nobj
;
531 flushobjbuf(Obj
*objbuf
, Obj
**objbufpos
, Obj
**_wp
, Workbuf
**_wbuf
, uintptr
*_nobj
)
542 objbuf_end
= *objbufpos
;
545 while(objbuf
< objbuf_end
) {
548 // Align obj.b to a word boundary.
549 off
= (uintptr
)obj
.p
& (PtrSize
-1);
551 obj
.p
+= PtrSize
- off
;
552 obj
.n
-= PtrSize
- off
;
556 if(obj
.p
== nil
|| obj
.n
== 0)
559 // If buffer is full, get a new one.
560 if(wbuf
== nil
|| nobj
>= nelem(wbuf
->obj
)) {
563 wbuf
= getempty(wbuf
);
573 // If another proc wants a pointer, give it some.
574 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
576 wbuf
= handoff(wbuf
);
578 wp
= wbuf
->obj
+ nobj
;
586 // Program that scans the whole block and treats every block element as a potential pointer
587 static uintptr defaultProg
[2] = {PtrSize
, GC_DEFAULT_PTR
};
591 static uintptr chanProg
[2] = {0, GC_CHAN
};
594 // Local variables of a program fragment or loop
595 typedef struct Frame Frame
;
597 uintptr count
, elemsize
, b
;
598 uintptr
*loop_or_ret
;
601 // Sanity check for the derived type info objti.
603 checkptr(void *obj
, uintptr objti
)
605 uintptr type
, tisize
, i
, x
;
611 runtime_throw("checkptr is debug only");
613 if((byte
*)obj
< runtime_mheap
.arena_start
|| (byte
*)obj
>= runtime_mheap
.arena_used
)
615 type
= runtime_gettype(obj
);
616 t
= (Type
*)(type
& ~(uintptr
)(PtrSize
-1));
619 x
= (uintptr
)obj
>> PageShift
;
620 if(sizeof(void*) == 8)
621 x
-= (uintptr
)(runtime_mheap
.arena_start
)>>PageShift
;
622 s
= runtime_mheap
.spans
[x
];
623 objstart
= (byte
*)((uintptr
)s
->start
<<PageShift
);
624 if(s
->sizeclass
!= 0) {
625 i
= ((byte
*)obj
- objstart
)/s
->elemsize
;
626 objstart
+= i
*s
->elemsize
;
628 tisize
= *(uintptr
*)objti
;
629 // Sanity check for object size: it should fit into the memory block.
630 if((byte
*)obj
+ tisize
> objstart
+ s
->elemsize
) {
631 runtime_printf("object of type '%S' at %p/%p does not fit in block %p/%p\n",
632 *t
->string
, obj
, tisize
, objstart
, s
->elemsize
);
633 runtime_throw("invalid gc type info");
637 // If obj points to the beginning of the memory block,
638 // check type info as well.
639 if(t
->string
== nil
||
640 // Gob allocates unsafe pointers for indirection.
641 (runtime_strcmp((const char *)t
->string
->str
, (const char*)"unsafe.Pointer") &&
642 // Runtime and gc think differently about closures.
643 runtime_strstr((const char *)t
->string
->str
, (const char*)"struct { F uintptr") != (const char *)t
->string
->str
)) {
645 pc1
= (uintptr
*)objti
;
646 pc2
= (uintptr
*)t
->gc
;
647 // A simple best-effort check until first GC_END.
648 for(j
= 1; pc1
[j
] != GC_END
&& pc2
[j
] != GC_END
; j
++) {
649 if(pc1
[j
] != pc2
[j
]) {
650 runtime_printf("invalid gc type info for '%s' at %p, type info %p, block info %p\n",
651 t
->string
? (const int8
*)t
->string
->str
: (const int8
*)"?", j
, pc1
[j
], pc2
[j
]);
652 runtime_throw("invalid gc type info");
659 // scanblock scans a block of n bytes starting at pointer b for references
660 // to other objects, scanning any it finds recursively until there are no
661 // unscanned objects left. Instead of using an explicit recursion, it keeps
662 // a work list in the Workbuf* structures and loops in the main function
663 // body. Keeping an explicit work list is easier on the stack allocator and
666 // wbuf: current work buffer
667 // wp: storage for next queued pointer (write pointer)
668 // nobj: number of queued objects
670 scanblock(Workbuf
*wbuf
, Obj
*wp
, uintptr nobj
, bool keepworking
)
672 byte
*b
, *arena_start
, *arena_used
;
673 uintptr n
, i
, end_b
, elemsize
, size
, ti
, objti
, count
/* , type */;
674 uintptr
*pc
, precise_type
, nominal_size
;
676 uintptr
*chan_ret
, chancap
;
681 Frame
*stack_ptr
, stack_top
, stack
[GC_STACK_CAPACITY
+4];
682 BufferList
*scanbuffers
;
683 PtrTarget
*ptrbuf
, *ptrbuf_end
, *ptrbufpos
;
684 Obj
*objbuf
, *objbuf_end
, *objbufpos
;
692 if(sizeof(Workbuf
) % PageSize
!= 0)
693 runtime_throw("scanblock: size of Workbuf is suboptimal");
695 // Memory arena parameters.
696 arena_start
= runtime_mheap
.arena_start
;
697 arena_used
= runtime_mheap
.arena_used
;
699 stack_ptr
= stack
+nelem(stack
)-1;
701 precise_type
= false;
706 scanbuffers
= &bufferList
[runtime_m()->helpgc
];
707 ptrbuf
= &scanbuffers
->ptrtarget
[0];
708 ptrbuf_end
= &scanbuffers
->ptrtarget
[0] + nelem(scanbuffers
->ptrtarget
);
709 objbuf
= &scanbuffers
->obj
[0];
710 objbuf_end
= &scanbuffers
->obj
[0] + nelem(scanbuffers
->obj
);
716 // (Silence the compiler)
726 // Each iteration scans the block b of length n, queueing pointers in
729 runtime_printf("scanblock %p %D\n", b
, (int64
)n
);
733 runtime_xadd64(&gcstats
.nbytes
, n
);
734 runtime_xadd64(&gcstats
.obj
.sum
, nobj
);
735 runtime_xadd64(&gcstats
.obj
.cnt
, 1);
738 if(ti
!= 0 && false) {
739 pc
= (uintptr
*)(ti
& ~(uintptr
)PC_BITS
);
740 precise_type
= (ti
& PRECISE
);
741 stack_top
.elemsize
= pc
[0];
743 nominal_size
= pc
[0];
745 stack_top
.count
= 0; // 0 means an infinite number of iterations
746 stack_top
.loop_or_ret
= pc
+1;
751 // Simple sanity check for provided type info ti:
752 // The declared size of the object must be not larger than the actual size
753 // (it can be smaller due to inferior pointers).
754 // It's difficult to make a comprehensive check due to inferior pointers,
755 // reflection, gob, etc.
757 runtime_printf("invalid gc type info: type info size %p, block size %p\n", pc
[0], n
);
758 runtime_throw("invalid gc type info");
761 } else if(UseSpanType
&& false) {
763 runtime_xadd64(&gcstats
.obj
.notype
, 1);
766 type
= runtime_gettype(b
);
769 runtime_xadd64(&gcstats
.obj
.typelookup
, 1);
771 t
= (Type
*)(type
& ~(uintptr
)(PtrSize
-1));
772 switch(type
& (PtrSize
-1)) {
773 case TypeInfo_SingleObject
:
774 pc
= (uintptr
*)t
->gc
;
775 precise_type
= true; // type information about 'b' is precise
777 stack_top
.elemsize
= pc
[0];
780 pc
= (uintptr
*)t
->gc
;
783 precise_type
= true; // type information about 'b' is precise
784 stack_top
.count
= 0; // 0 means an infinite number of iterations
785 stack_top
.elemsize
= pc
[0];
786 stack_top
.loop_or_ret
= pc
+1;
790 chantype
= (ChanType
*)t
;
795 runtime_throw("scanblock: invalid type");
810 stack_top
.b
= (uintptr
)b
;
812 end_b
= (uintptr
)b
+ n
- PtrSize
;
816 runtime_xadd64(&gcstats
.instr
[pc
[0]], 1);
822 obj
= *(void**)(stack_top
.b
+ pc
[1]);
826 checkptr(obj
, objti
);
830 sliceptr
= (Slice
*)(stack_top
.b
+ pc
[1]);
831 if(sliceptr
->cap
!= 0) {
832 obj
= sliceptr
->array
;
833 // Can't use slice element type for scanning,
834 // because if it points to an array embedded
835 // in the beginning of a struct,
836 // we will scan the whole struct as the slice.
837 // So just obtain type info from heap.
843 obj
= *(void**)(stack_top
.b
+ pc
[1]);
848 obj
= *(void**)(stack_top
.b
+ pc
[1]);
854 eface
= (Eface
*)(stack_top
.b
+ pc
[1]);
856 if(eface
->type
== nil
)
861 if((const byte
*)t
>= arena_start
&& (const byte
*)t
< arena_used
) {
862 union { const Type
*tc
; Type
*tr
; } u
;
864 *ptrbufpos
++ = (struct PtrTarget
){(void*)u
.tr
, 0};
865 if(ptrbufpos
== ptrbuf_end
)
866 flushptrbuf(ptrbuf
, &ptrbufpos
, &wp
, &wbuf
, &nobj
);
870 if((byte
*)eface
->__object
>= arena_start
&& (byte
*)eface
->__object
< arena_used
) {
871 if(t
->__size
<= sizeof(void*)) {
872 if((t
->kind
& KindNoPointers
))
875 obj
= eface
->__object
;
876 if((t
->kind
& ~KindNoPointers
) == KindPtr
)
877 // objti = (uintptr)((PtrType*)t)->elem->gc;
880 obj
= eface
->__object
;
881 // objti = (uintptr)t->gc;
888 iface
= (Iface
*)(stack_top
.b
+ pc
[1]);
890 if(iface
->tab
== nil
)
894 if((byte
*)iface
->tab
>= arena_start
&& (byte
*)iface
->tab
< arena_used
) {
895 // *ptrbufpos++ = (struct PtrTarget){iface->tab, (uintptr)itabtype->gc};
896 *ptrbufpos
++ = (struct PtrTarget
){iface
->tab
, 0};
897 if(ptrbufpos
== ptrbuf_end
)
898 flushptrbuf(ptrbuf
, &ptrbufpos
, &wp
, &wbuf
, &nobj
);
902 if((byte
*)iface
->__object
>= arena_start
&& (byte
*)iface
->__object
< arena_used
) {
903 // t = iface->tab->type;
905 if(t
->__size
<= sizeof(void*)) {
906 if((t
->kind
& KindNoPointers
))
909 obj
= iface
->__object
;
910 if((t
->kind
& ~KindNoPointers
) == KindPtr
)
911 // objti = (uintptr)((const PtrType*)t)->elem->gc;
914 obj
= iface
->__object
;
915 // objti = (uintptr)t->gc;
922 while(stack_top
.b
<= end_b
) {
923 obj
= *(byte
**)stack_top
.b
;
924 stack_top
.b
+= PtrSize
;
925 if((byte
*)obj
>= arena_start
&& (byte
*)obj
< arena_used
) {
926 *ptrbufpos
++ = (struct PtrTarget
){obj
, 0};
927 if(ptrbufpos
== ptrbuf_end
)
928 flushptrbuf(ptrbuf
, &ptrbufpos
, &wp
, &wbuf
, &nobj
);
934 if(--stack_top
.count
!= 0) {
935 // Next iteration of a loop if possible.
936 stack_top
.b
+= stack_top
.elemsize
;
937 if(stack_top
.b
+ stack_top
.elemsize
<= end_b
+PtrSize
) {
938 pc
= stack_top
.loop_or_ret
;
943 // Stack pop if possible.
944 if(stack_ptr
+1 < stack
+nelem(stack
)) {
945 pc
= stack_top
.loop_or_ret
;
946 stack_top
= *(++stack_ptr
);
949 i
= (uintptr
)b
+ nominal_size
;
952 // Quickly scan [b+i,b+n) for possible pointers.
953 for(; i
<=end_b
; i
+=PtrSize
) {
954 if(*(byte
**)i
!= nil
) {
955 // Found a value that may be a pointer.
956 // Do a rescan of the entire block.
957 enqueue((Obj
){b
, n
, 0}, &wbuf
, &wp
, &nobj
);
959 runtime_xadd64(&gcstats
.rescan
, 1);
960 runtime_xadd64(&gcstats
.rescanbytes
, n
);
969 i
= stack_top
.b
+ pc
[1];
975 *stack_ptr
-- = stack_top
;
976 stack_top
= (Frame
){count
, elemsize
, i
, pc
};
980 if(--stack_top
.count
!= 0) {
981 stack_top
.b
+= stack_top
.elemsize
;
982 pc
= stack_top
.loop_or_ret
;
985 stack_top
= *(++stack_ptr
);
992 *stack_ptr
-- = stack_top
;
993 stack_top
= (Frame
){1, 0, stack_top
.b
+ pc
[1], pc
+3 /*return address*/};
994 pc
= (uintptr
*)((byte
*)pc
+ *(int32
*)(pc
+2)); // target of the CALL instruction
998 obj
= (void*)(stack_top
.b
+ pc
[1]);
1003 *objbufpos
++ = (Obj
){obj
, size
, objti
};
1004 if(objbufpos
== objbuf_end
)
1005 flushobjbuf(objbuf
, &objbufpos
, &wp
, &wbuf
, &nobj
);
1010 chan
= *(Hchan
**)(stack_top
.b
+ pc
[1]);
1015 if(markonly(chan
)) {
1016 chantype
= (ChanType
*)pc
[2];
1017 if(!(chantype
->elem
->kind
& KindNoPointers
)) {
1028 // There are no heap pointers in struct Hchan,
1029 // so we can ignore the leading sizeof(Hchan) bytes.
1030 if(!(chantype
->elem
->kind
& KindNoPointers
)) {
1031 // Channel's buffer follows Hchan immediately in memory.
1032 // Size of buffer (cap(c)) is second int in the chan struct.
1033 chancap
= ((uintgo
*)chan
)[1];
1035 // TODO(atom): split into two chunks so that only the
1036 // in-use part of the circular buffer is scanned.
1037 // (Channel routines zero the unused part, so the current
1038 // code does not lead to leaks, it's just a little inefficient.)
1039 *objbufpos
++ = (Obj
){(byte
*)chan
+runtime_Hchansize
, chancap
*chantype
->elem
->size
,
1040 (uintptr
)chantype
->elem
->gc
| PRECISE
| LOOP
};
1041 if(objbufpos
== objbuf_end
)
1042 flushobjbuf(objbuf
, &objbufpos
, &wp
, &wbuf
, &nobj
);
1052 runtime_throw("scanblock: invalid GC instruction");
1056 if((byte
*)obj
>= arena_start
&& (byte
*)obj
< arena_used
) {
1057 *ptrbufpos
++ = (struct PtrTarget
){obj
, objti
};
1058 if(ptrbufpos
== ptrbuf_end
)
1059 flushptrbuf(ptrbuf
, &ptrbufpos
, &wp
, &wbuf
, &nobj
);
1064 // Done scanning [b, b+n). Prepare for the next iteration of
1065 // the loop by setting b, n, ti to the parameters for the next block.
1068 flushptrbuf(ptrbuf
, &ptrbufpos
, &wp
, &wbuf
, &nobj
);
1069 flushobjbuf(objbuf
, &objbufpos
, &wp
, &wbuf
, &nobj
);
1077 // Emptied our buffer: refill.
1078 wbuf
= getfull(wbuf
);
1082 wp
= wbuf
->obj
+ wbuf
->nobj
;
1086 // Fetch b from the work buffer.
1097 // debug_scanblock is the debug copy of scanblock.
1098 // it is simpler, slower, single-threaded, recursive,
1099 // and uses bitSpecial as the mark bit.
1101 debug_scanblock(byte
*b
, uintptr n
)
1105 uintptr size
, *bitp
, bits
, shift
, i
, xbits
, off
;
1109 runtime_throw("debug_scanblock without DebugMark");
1112 runtime_printf("debug_scanblock %p %D\n", b
, (int64
)n
);
1113 runtime_throw("debug_scanblock");
1116 // Align b to a word boundary.
1117 off
= (uintptr
)b
& (PtrSize
-1);
1125 for(i
=0; i
<(uintptr
)n
; i
++) {
1128 // Words outside the arena cannot be pointers.
1129 if((byte
*)obj
< runtime_mheap
.arena_start
|| (byte
*)obj
>= runtime_mheap
.arena_used
)
1132 // Round down to word boundary.
1133 obj
= (void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
1135 // Consult span table to find beginning.
1136 s
= runtime_MHeap_LookupMaybe(&runtime_mheap
, obj
);
1140 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
1142 if(s
->sizeclass
== 0) {
1145 int32 i
= ((byte
*)obj
- p
)/size
;
1149 // Now that we know the object header, reload bits.
1150 off
= (uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
1151 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
1152 shift
= off
% wordsPerBitmapWord
;
1154 bits
= xbits
>> shift
;
1156 // Now we have bits, bitp, and shift correct for
1157 // obj pointing at the base of the object.
1158 // If not allocated or already marked, done.
1159 if((bits
& bitAllocated
) == 0 || (bits
& bitSpecial
) != 0) // NOTE: bitSpecial not bitMarked
1161 *bitp
|= bitSpecial
<<shift
;
1162 if(!(bits
& bitMarked
))
1163 runtime_printf("found unmarked block %p in %p\n", obj
, vp
+i
);
1165 // If object has no pointers, don't need to scan further.
1166 if((bits
& bitNoScan
) != 0)
1169 debug_scanblock(obj
, size
);
1173 // Append obj to the work buffer.
1174 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
1176 enqueue(Obj obj
, Workbuf
**_wbuf
, Obj
**_wp
, uintptr
*_nobj
)
1183 runtime_printf("append obj(%p %D %p)\n", obj
.p
, (int64
)obj
.n
, obj
.ti
);
1185 // Align obj.b to a word boundary.
1186 off
= (uintptr
)obj
.p
& (PtrSize
-1);
1188 obj
.p
+= PtrSize
- off
;
1189 obj
.n
-= PtrSize
- off
;
1193 if(obj
.p
== nil
|| obj
.n
== 0)
1196 // Load work buffer state
1201 // If another proc wants a pointer, give it some.
1202 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
1204 wbuf
= handoff(wbuf
);
1206 wp
= wbuf
->obj
+ nobj
;
1209 // If buffer is full, get a new one.
1210 if(wbuf
== nil
|| nobj
>= nelem(wbuf
->obj
)) {
1213 wbuf
= getempty(wbuf
);
1222 // Save work buffer state
1229 markroot(ParFor
*desc
, uint32 i
)
1239 enqueue(work
.roots
[i
], &wbuf
, &wp
, &nobj
);
1240 scanblock(wbuf
, wp
, nobj
, false);
1243 // Get an empty work buffer off the work.empty list,
1244 // allocating new buffers as needed.
1246 getempty(Workbuf
*b
)
1249 runtime_lfstackpush(&work
.full
, &b
->node
);
1250 b
= (Workbuf
*)runtime_lfstackpop(&work
.empty
);
1252 // Need to allocate.
1253 runtime_lock(&work
);
1254 if(work
.nchunk
< sizeof *b
) {
1255 work
.nchunk
= 1<<20;
1256 work
.chunk
= runtime_SysAlloc(work
.nchunk
, &mstats
.gc_sys
);
1257 if(work
.chunk
== nil
)
1258 runtime_throw("runtime: cannot allocate memory");
1260 b
= (Workbuf
*)work
.chunk
;
1261 work
.chunk
+= sizeof *b
;
1262 work
.nchunk
-= sizeof *b
;
1263 runtime_unlock(&work
);
1270 putempty(Workbuf
*b
)
1273 runtime_xadd64(&gcstats
.putempty
, 1);
1275 runtime_lfstackpush(&work
.empty
, &b
->node
);
1278 // Get a full work buffer off the work.full list, or return nil.
1286 runtime_xadd64(&gcstats
.getfull
, 1);
1289 runtime_lfstackpush(&work
.empty
, &b
->node
);
1290 b
= (Workbuf
*)runtime_lfstackpop(&work
.full
);
1291 if(b
!= nil
|| work
.nproc
== 1)
1295 runtime_xadd(&work
.nwait
, +1);
1297 if(work
.full
!= 0) {
1298 runtime_xadd(&work
.nwait
, -1);
1299 b
= (Workbuf
*)runtime_lfstackpop(&work
.full
);
1302 runtime_xadd(&work
.nwait
, +1);
1304 if(work
.nwait
== work
.nproc
)
1307 m
->gcstats
.nprocyield
++;
1308 runtime_procyield(20);
1310 m
->gcstats
.nosyield
++;
1313 m
->gcstats
.nsleep
++;
1314 runtime_usleep(100);
1328 // Make new buffer with half of b's pointers.
1333 runtime_memmove(b1
->obj
, b
->obj
+b
->nobj
, n
*sizeof b1
->obj
[0]);
1334 m
->gcstats
.nhandoff
++;
1335 m
->gcstats
.nhandoffcnt
+= n
;
1337 // Put b on full list - let first half of b get stolen.
1338 runtime_lfstackpush(&work
.full
, &b
->node
);
1348 if(work
.nroot
>= work
.rootcap
) {
1349 cap
= PageSize
/sizeof(Obj
);
1350 if(cap
< 2*work
.rootcap
)
1351 cap
= 2*work
.rootcap
;
1352 new = (Obj
*)runtime_SysAlloc(cap
*sizeof(Obj
), &mstats
.gc_sys
);
1354 runtime_throw("runtime: cannot allocate memory");
1355 if(work
.roots
!= nil
) {
1356 runtime_memmove(new, work
.roots
, work
.rootcap
*sizeof(Obj
));
1357 runtime_SysFree(work
.roots
, work
.rootcap
*sizeof(Obj
), &mstats
.gc_sys
);
1362 work
.roots
[work
.nroot
] = obj
;
1367 addstackroots(G
*gp
)
1369 #ifdef USING_SPLIT_STACK
1377 if(gp
== runtime_g()) {
1378 // Scanning our own stack.
1379 sp
= __splitstack_find(nil
, nil
, &spsize
, &next_segment
,
1380 &next_sp
, &initial_sp
);
1381 } else if((mp
= gp
->m
) != nil
&& mp
->helpgc
) {
1382 // gchelper's stack is in active use and has no interesting pointers.
1385 // Scanning another goroutine's stack.
1386 // The goroutine is usually asleep (the world is stopped).
1388 // The exception is that if the goroutine is about to enter or might
1389 // have just exited a system call, it may be executing code such
1390 // as schedlock and may have needed to start a new stack segment.
1391 // Use the stack segment and stack pointer at the time of
1392 // the system call instead, since that won't change underfoot.
1393 if(gp
->gcstack
!= nil
) {
1395 spsize
= gp
->gcstack_size
;
1396 next_segment
= gp
->gcnext_segment
;
1397 next_sp
= gp
->gcnext_sp
;
1398 initial_sp
= gp
->gcinitial_sp
;
1400 sp
= __splitstack_find_context(&gp
->stack_context
[0],
1401 &spsize
, &next_segment
,
1402 &next_sp
, &initial_sp
);
1406 addroot((Obj
){sp
, spsize
, 0});
1407 while((sp
= __splitstack_find(next_segment
, next_sp
,
1408 &spsize
, &next_segment
,
1409 &next_sp
, &initial_sp
)) != nil
)
1410 addroot((Obj
){sp
, spsize
, 0});
1417 if(gp
== runtime_g()) {
1418 // Scanning our own stack.
1419 bottom
= (byte
*)&gp
;
1420 } else if((mp
= gp
->m
) != nil
&& mp
->helpgc
) {
1421 // gchelper's stack is in active use and has no interesting pointers.
1424 // Scanning another goroutine's stack.
1425 // The goroutine is usually asleep (the world is stopped).
1426 bottom
= (byte
*)gp
->gcnext_sp
;
1430 top
= (byte
*)gp
->gcinitial_sp
+ gp
->gcstack_size
;
1432 addroot((Obj
){bottom
, top
- bottom
, 0});
1434 addroot((Obj
){top
, bottom
- top
, 0});
1439 addfinroots(void *v
)
1445 if(!runtime_mlookup(v
, (byte
**)&base
, &size
, nil
) || !runtime_blockspecial(base
))
1446 runtime_throw("mark - finalizer inconsistency");
1448 // do not mark the finalizer block itself. just mark the things it points at.
1449 addroot((Obj
){base
, size
, 0});
1452 static struct root_list
* roots
;
1455 __go_register_gc_roots (struct root_list
* r
)
1457 // FIXME: This needs locking if multiple goroutines can call
1458 // dlopen simultaneously.
1466 struct root_list
*pl
;
1469 MSpan
*s
, **allspans
;
1475 for(pl
= roots
; pl
!= nil
; pl
= pl
->next
) {
1476 struct root
* pr
= &pl
->roots
[0];
1478 void *decl
= pr
->decl
;
1481 addroot((Obj
){decl
, pr
->size
, 0});
1486 addroot((Obj
){(byte
*)&runtime_m0
, sizeof runtime_m0
, 0});
1487 addroot((Obj
){(byte
*)&runtime_g0
, sizeof runtime_g0
, 0});
1488 addroot((Obj
){(byte
*)&runtime_allg
, sizeof runtime_allg
, 0});
1489 addroot((Obj
){(byte
*)&runtime_allm
, sizeof runtime_allm
, 0});
1490 addroot((Obj
){(byte
*)&runtime_allp
, sizeof runtime_allp
, 0});
1491 runtime_proc_scan(addroot
);
1492 runtime_MProf_Mark(addroot
);
1493 runtime_time_scan(addroot
);
1494 runtime_netpoll_scan(addroot
);
1497 allspans
= runtime_mheap
.allspans
;
1498 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
1499 s
= allspans
[spanidx
];
1500 if(s
->state
== MSpanInUse
) {
1501 // The garbage collector ignores type pointers stored in MSpan.types:
1502 // - Compiler-generated types are stored outside of heap.
1503 // - The reflect package has runtime-generated types cached in its data structures.
1504 // The garbage collector relies on finding the references via that cache.
1505 switch(s
->types
.compression
) {
1511 markonly((byte
*)s
->types
.data
);
1518 for(gp
=runtime_allg
; gp
!=nil
; gp
=gp
->alllink
) {
1521 runtime_printf("unexpected G.status %d\n", gp
->status
);
1522 runtime_throw("mark - bad status");
1526 runtime_throw("mark - world not stopped");
1535 runtime_walkfintab(addfinroots
, addroot
);
1537 for(fb
=allfin
; fb
; fb
=fb
->alllink
)
1538 addroot((Obj
){(byte
*)fb
->fin
, fb
->cnt
*sizeof(fb
->fin
[0]), 0});
1540 addroot((Obj
){(byte
*)&work
, sizeof work
, 0});
1544 handlespecial(byte
*p
, uintptr size
)
1547 const struct __go_func_type
*ft
;
1548 const struct __go_ptr_type
*ot
;
1552 if(!runtime_getfinalizer(p
, true, &fn
, &ft
, &ot
)) {
1553 runtime_setblockspecial(p
, false);
1554 runtime_MProf_Free(p
, size
);
1558 runtime_lock(&finlock
);
1559 if(finq
== nil
|| finq
->cnt
== finq
->cap
) {
1561 finc
= runtime_persistentalloc(PageSize
, 0, &mstats
.gc_sys
);
1562 finc
->cap
= (PageSize
- sizeof(FinBlock
)) / sizeof(Finalizer
) + 1;
1563 finc
->alllink
= allfin
;
1571 f
= &finq
->fin
[finq
->cnt
];
1577 runtime_unlock(&finlock
);
1581 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1582 // It clears the mark bits in preparation for the next GC round.
1584 sweepspan(ParFor
*desc
, uint32 idx
)
1587 int32 cl
, n
, npages
;
1596 uintptr type_data_inc
;
1602 s
= runtime_mheap
.allspans
[idx
];
1603 if(s
->state
!= MSpanInUse
)
1605 arena_start
= runtime_mheap
.arena_start
;
1606 p
= (byte
*)(s
->start
<< PageShift
);
1612 // Chunk full of small blocks.
1613 npages
= runtime_class_to_allocnpages
[cl
];
1614 n
= (npages
<< PageShift
) / size
;
1620 type_data
= (byte
*)s
->types
.data
;
1621 type_data_inc
= sizeof(uintptr
);
1622 compression
= s
->types
.compression
;
1623 switch(compression
) {
1625 type_data
+= 8*sizeof(uintptr
);
1630 // Sweep through n objects of given size starting at p.
1631 // This thread owns the span now, so it can manipulate
1632 // the block bitmap without atomic operations.
1633 for(; n
> 0; n
--, p
+= size
, type_data
+=type_data_inc
) {
1634 uintptr off
, *bitp
, shift
, bits
;
1636 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1637 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1638 shift
= off
% wordsPerBitmapWord
;
1639 bits
= *bitp
>>shift
;
1641 if((bits
& bitAllocated
) == 0)
1644 if((bits
& bitMarked
) != 0) {
1646 if(!(bits
& bitSpecial
))
1647 runtime_printf("found spurious mark on %p\n", p
);
1648 *bitp
&= ~(bitSpecial
<<shift
);
1650 *bitp
&= ~(bitMarked
<<shift
);
1654 // Special means it has a finalizer or is being profiled.
1655 // In DebugMark mode, the bit has been coopted so
1656 // we have to assume all blocks are special.
1657 if(DebugMark
|| (bits
& bitSpecial
) != 0) {
1658 if(handlespecial(p
, size
))
1662 // Mark freed; restore block boundary bit.
1663 *bitp
= (*bitp
& ~(bitMask
<<shift
)) | (bitBlockBoundary
<<shift
);
1667 runtime_unmarkspan(p
, 1<<PageShift
);
1668 *(uintptr
*)p
= (uintptr
)0xdeaddeaddeaddeadll
; // needs zeroing
1669 runtime_MHeap_Free(&runtime_mheap
, s
, 1);
1670 c
->local_nlargefree
++;
1671 c
->local_largefree
+= size
;
1673 // Free small object.
1674 switch(compression
) {
1676 *(uintptr
*)type_data
= 0;
1679 *(byte
*)type_data
= 0;
1682 if(size
> sizeof(uintptr
))
1683 ((uintptr
*)p
)[1] = (uintptr
)0xdeaddeaddeaddeadll
; // mark as "needs to be zeroed"
1685 end
->next
= (MLink
*)p
;
1692 c
->local_nsmallfree
[cl
] += nfree
;
1693 c
->local_cachealloc
-= nfree
* size
;
1694 runtime_MCentral_FreeSpan(&runtime_mheap
.central
[cl
], s
, nfree
, head
.next
, end
);
1699 dumpspan(uint32 idx
)
1701 int32 sizeclass
, n
, npages
, i
, column
;
1706 bool allocated
, special
;
1708 s
= runtime_mheap
.allspans
[idx
];
1709 if(s
->state
!= MSpanInUse
)
1711 arena_start
= runtime_mheap
.arena_start
;
1712 p
= (byte
*)(s
->start
<< PageShift
);
1713 sizeclass
= s
->sizeclass
;
1715 if(sizeclass
== 0) {
1718 npages
= runtime_class_to_allocnpages
[sizeclass
];
1719 n
= (npages
<< PageShift
) / size
;
1722 runtime_printf("%p .. %p:\n", p
, p
+n
*size
);
1724 for(; n
>0; n
--, p
+=size
) {
1725 uintptr off
, *bitp
, shift
, bits
;
1727 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1728 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1729 shift
= off
% wordsPerBitmapWord
;
1730 bits
= *bitp
>>shift
;
1732 allocated
= ((bits
& bitAllocated
) != 0);
1733 special
= ((bits
& bitSpecial
) != 0);
1735 for(i
=0; (uint32
)i
<size
; i
+=sizeof(void*)) {
1737 runtime_printf("\t");
1740 runtime_printf(allocated
? "(" : "[");
1741 runtime_printf(special
? "@" : "");
1742 runtime_printf("%p: ", p
+i
);
1744 runtime_printf(" ");
1747 runtime_printf("%p", *(void**)(p
+i
));
1749 if(i
+sizeof(void*) >= size
) {
1750 runtime_printf(allocated
? ") " : "] ");
1755 runtime_printf("\n");
1760 runtime_printf("\n");
1763 // A debugging function to dump the contents of memory
1765 runtime_memorydump(void)
1769 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
1775 runtime_gchelper(void)
1779 // parallel mark for over gc roots
1780 runtime_parfordo(work
.markfor
);
1782 // help other threads scan secondary blocks
1783 scanblock(nil
, nil
, 0, true);
1786 // wait while the main thread executes mark(debug_scanblock)
1787 while(runtime_atomicload(&work
.debugmarkdone
) == 0)
1791 runtime_parfordo(work
.sweepfor
);
1792 bufferList
[runtime_m()->helpgc
].busy
= 0;
1793 if(runtime_xadd(&work
.ndone
, +1) == work
.nproc
-1)
1794 runtime_notewakeup(&work
.alldone
);
1797 #define GcpercentUnknown (-2)
1799 // Initialized from $GOGC. GOGC=off means no gc.
1801 // Next gc is after we've allocated an extra amount of
1802 // memory proportional to the amount already in use.
1803 // If gcpercent=100 and we're using 4M, we'll gc again
1804 // when we get to 8M. This keeps the gc cost in linear
1805 // proportion to the allocation cost. Adjusting gcpercent
1806 // just changes the linear constant (and also the amount of
1807 // extra memory used).
1808 static int32 gcpercent
= GcpercentUnknown
;
1816 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
1820 runtime_purgecachedstats(c
);
1825 updatememstats(GCStats
*stats
)
1832 uint64 stacks_inuse
, smallfree
;
1836 runtime_memclr((byte
*)stats
, sizeof(*stats
));
1838 for(mp
=runtime_allm
; mp
; mp
=mp
->alllink
) {
1839 //stacks_inuse += mp->stackinuse*FixedStack;
1841 src
= (uint64
*)&mp
->gcstats
;
1842 dst
= (uint64
*)stats
;
1843 for(i
=0; i
<sizeof(*stats
)/sizeof(uint64
); i
++)
1845 runtime_memclr((byte
*)&mp
->gcstats
, sizeof(mp
->gcstats
));
1848 mstats
.stacks_inuse
= stacks_inuse
;
1849 mstats
.mcache_inuse
= runtime_mheap
.cachealloc
.inuse
;
1850 mstats
.mspan_inuse
= runtime_mheap
.spanalloc
.inuse
;
1851 mstats
.sys
= mstats
.heap_sys
+ mstats
.stacks_sys
+ mstats
.mspan_sys
+
1852 mstats
.mcache_sys
+ mstats
.buckhash_sys
+ mstats
.gc_sys
+ mstats
.other_sys
;
1854 // Calculate memory allocator stats.
1855 // During program execution we only count number of frees and amount of freed memory.
1856 // Current number of alive object in the heap and amount of alive heap memory
1857 // are calculated by scanning all spans.
1858 // Total number of mallocs is calculated as number of frees plus number of alive objects.
1859 // Similarly, total amount of allocated memory is calculated as amount of freed memory
1860 // plus amount of alive heap memory.
1862 mstats
.total_alloc
= 0;
1865 for(i
= 0; i
< nelem(mstats
.by_size
); i
++) {
1866 mstats
.by_size
[i
].nmalloc
= 0;
1867 mstats
.by_size
[i
].nfree
= 0;
1870 // Flush MCache's to MCentral.
1871 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
1875 runtime_MCache_ReleaseAll(c
);
1878 // Aggregate local stats.
1881 // Scan all spans and count number of alive objects.
1882 for(i
= 0; i
< runtime_mheap
.nspan
; i
++) {
1883 s
= runtime_mheap
.allspans
[i
];
1884 if(s
->state
!= MSpanInUse
)
1886 if(s
->sizeclass
== 0) {
1888 mstats
.alloc
+= s
->elemsize
;
1890 mstats
.nmalloc
+= s
->ref
;
1891 mstats
.by_size
[s
->sizeclass
].nmalloc
+= s
->ref
;
1892 mstats
.alloc
+= s
->ref
*s
->elemsize
;
1896 // Aggregate by size class.
1898 mstats
.nfree
= runtime_mheap
.nlargefree
;
1899 for(i
= 0; i
< nelem(mstats
.by_size
); i
++) {
1900 mstats
.nfree
+= runtime_mheap
.nsmallfree
[i
];
1901 mstats
.by_size
[i
].nfree
= runtime_mheap
.nsmallfree
[i
];
1902 mstats
.by_size
[i
].nmalloc
+= runtime_mheap
.nsmallfree
[i
];
1903 smallfree
+= runtime_mheap
.nsmallfree
[i
] * runtime_class_to_size
[i
];
1905 mstats
.nmalloc
+= mstats
.nfree
;
1907 // Calculate derived stats.
1908 mstats
.total_alloc
= mstats
.alloc
+ runtime_mheap
.largefree
+ smallfree
;
1909 mstats
.heap_alloc
= mstats
.alloc
;
1910 mstats
.heap_objects
= mstats
.nmalloc
- mstats
.nfree
;
1913 // Structure of arguments passed to function gc().
1914 // This allows the arguments to be passed via runtime_mcall.
1917 int64 start_time
; // start time of GC in ns (just before stoptheworld)
1920 static void gc(struct gc_args
*args
);
1921 static void mgc(G
*gp
);
1928 p
= runtime_getenv("GOGC");
1929 if(p
== nil
|| p
[0] == '\0')
1931 if(runtime_strcmp((const char *)p
, "off") == 0)
1933 return runtime_atoi(p
);
1937 runtime_gc(int32 force
)
1944 // The atomic operations are not atomic if the uint64s
1945 // are not aligned on uint64 boundaries. This has been
1946 // a problem in the past.
1947 if((((uintptr
)&work
.empty
) & 7) != 0)
1948 runtime_throw("runtime: gc work buffer is misaligned");
1949 if((((uintptr
)&work
.full
) & 7) != 0)
1950 runtime_throw("runtime: gc work buffer is misaligned");
1952 // Make sure all registers are saved on stack so that
1953 // scanstack sees them.
1954 __builtin_unwind_init();
1956 // The gc is turned off (via enablegc) until
1957 // the bootstrap has completed.
1958 // Also, malloc gets called in the guts
1959 // of a number of libraries that might be
1960 // holding locks. To avoid priority inversion
1961 // problems, don't bother trying to run gc
1962 // while holding a lock. The next mallocgc
1963 // without a lock will do the gc instead.
1965 if(!mstats
.enablegc
|| runtime_g() == m
->g0
|| m
->locks
> 0 || runtime_panicking
)
1968 if(gcpercent
== GcpercentUnknown
) { // first time through
1969 runtime_lock(&runtime_mheap
);
1970 if(gcpercent
== GcpercentUnknown
)
1971 gcpercent
= readgogc();
1972 runtime_unlock(&runtime_mheap
);
1977 runtime_semacquire(&runtime_worldsema
, false);
1978 if(!force
&& mstats
.heap_alloc
< mstats
.next_gc
) {
1979 // typically threads which lost the race to grab
1980 // worldsema exit here when gc is done.
1981 runtime_semrelease(&runtime_worldsema
);
1985 // Ok, we're doing it! Stop everybody else
1986 a
.start_time
= runtime_nanotime();
1988 runtime_stoptheworld();
1990 // Run gc on the g0 stack. We do this so that the g stack
1991 // we're currently running on will no longer change. Cuts
1992 // the root set down a bit (g0 stacks are not scanned, and
1993 // we don't need to scan gc's internal state). Also an
1994 // enabler for copyable stacks.
1995 for(i
= 0; i
< (runtime_debug
.gctrace
> 1 ? 2 : 1); i
++) {
1996 // switch to g0, call gc(&a), then switch back
1999 g
->status
= Gwaiting
;
2000 g
->waitreason
= "garbage collection";
2002 // record a new start time in case we're going around again
2003 a
.start_time
= runtime_nanotime();
2009 runtime_semrelease(&runtime_worldsema
);
2010 runtime_starttheworld();
2013 // now that gc is done, kick off finalizer thread if needed
2015 runtime_lock(&finlock
);
2016 // kick off or wake up goroutine to run queued finalizers
2018 fing
= __go_go(runfinq
, nil
);
2021 runtime_ready(fing
);
2023 runtime_unlock(&finlock
);
2025 // give the queued finalizers, if any, a chance to run
2034 gp
->status
= Grunning
;
2039 gc(struct gc_args
*args
)
2042 int64 t0
, t1
, t2
, t3
, t4
;
2043 uint64 heap0
, heap1
, obj0
, obj1
, ninstr
;
2051 t0
= args
->start_time
;
2054 runtime_memclr((byte
*)&gcstats
, sizeof(gcstats
));
2056 for(mp
=runtime_allm
; mp
; mp
=mp
->alllink
)
2057 runtime_settype_flush(mp
);
2061 if(runtime_debug
.gctrace
) {
2062 updatememstats(nil
);
2063 heap0
= mstats
.heap_alloc
;
2064 obj0
= mstats
.nmalloc
- mstats
.nfree
;
2067 m
->locks
++; // disable gc during mallocs in parforalloc
2068 if(work
.markfor
== nil
)
2069 work
.markfor
= runtime_parforalloc(MaxGcproc
);
2070 if(work
.sweepfor
== nil
)
2071 work
.sweepfor
= runtime_parforalloc(MaxGcproc
);
2074 if(itabtype
== nil
) {
2075 // get C pointer to the Go type "itab"
2076 // runtime_gc_itab_ptr(&eface);
2077 // itabtype = ((PtrType*)eface.type)->elem;
2082 work
.debugmarkdone
= 0;
2083 work
.nproc
= runtime_gcprocs();
2085 runtime_parforsetup(work
.markfor
, work
.nproc
, work
.nroot
, nil
, false, markroot
);
2086 runtime_parforsetup(work
.sweepfor
, work
.nproc
, runtime_mheap
.nspan
, nil
, true, sweepspan
);
2087 if(work
.nproc
> 1) {
2088 runtime_noteclear(&work
.alldone
);
2089 runtime_helpgc(work
.nproc
);
2092 t1
= runtime_nanotime();
2095 runtime_parfordo(work
.markfor
);
2096 scanblock(nil
, nil
, 0, true);
2099 for(i
=0; i
<work
.nroot
; i
++)
2100 debug_scanblock(work
.roots
[i
].p
, work
.roots
[i
].n
);
2101 runtime_atomicstore(&work
.debugmarkdone
, 1);
2103 t2
= runtime_nanotime();
2105 runtime_parfordo(work
.sweepfor
);
2106 bufferList
[m
->helpgc
].busy
= 0;
2107 t3
= runtime_nanotime();
2110 runtime_notesleep(&work
.alldone
);
2113 mstats
.next_gc
= mstats
.heap_alloc
+mstats
.heap_alloc
*gcpercent
/100;
2115 t4
= runtime_nanotime();
2116 mstats
.last_gc
= t4
;
2117 mstats
.pause_ns
[mstats
.numgc
%nelem(mstats
.pause_ns
)] = t4
- t0
;
2118 mstats
.pause_total_ns
+= t4
- t0
;
2121 runtime_printf("pause %D\n", t4
-t0
);
2123 if(runtime_debug
.gctrace
) {
2124 updatememstats(&stats
);
2125 heap1
= mstats
.heap_alloc
;
2126 obj1
= mstats
.nmalloc
- mstats
.nfree
;
2128 stats
.nprocyield
+= work
.sweepfor
->nprocyield
;
2129 stats
.nosyield
+= work
.sweepfor
->nosyield
;
2130 stats
.nsleep
+= work
.sweepfor
->nsleep
;
2132 runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
2133 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
2134 mstats
.numgc
, work
.nproc
, (t2
-t1
)/1000000, (t3
-t2
)/1000000, (t1
-t0
+t4
-t3
)/1000000,
2135 heap0
>>20, heap1
>>20, obj0
, obj1
,
2136 mstats
.nmalloc
, mstats
.nfree
,
2137 stats
.nhandoff
, stats
.nhandoffcnt
,
2138 work
.sweepfor
->nsteal
, work
.sweepfor
->nstealcnt
,
2139 stats
.nprocyield
, stats
.nosyield
, stats
.nsleep
);
2141 runtime_printf("scan: %D bytes, %D objects, %D untyped, %D types from MSpan\n",
2142 gcstats
.nbytes
, gcstats
.obj
.cnt
, gcstats
.obj
.notype
, gcstats
.obj
.typelookup
);
2143 if(gcstats
.ptr
.cnt
!= 0)
2144 runtime_printf("avg ptrbufsize: %D (%D/%D)\n",
2145 gcstats
.ptr
.sum
/gcstats
.ptr
.cnt
, gcstats
.ptr
.sum
, gcstats
.ptr
.cnt
);
2146 if(gcstats
.obj
.cnt
!= 0)
2147 runtime_printf("avg nobj: %D (%D/%D)\n",
2148 gcstats
.obj
.sum
/gcstats
.obj
.cnt
, gcstats
.obj
.sum
, gcstats
.obj
.cnt
);
2149 runtime_printf("rescans: %D, %D bytes\n", gcstats
.rescan
, gcstats
.rescanbytes
);
2151 runtime_printf("instruction counts:\n");
2153 for(i
=0; i
<nelem(gcstats
.instr
); i
++) {
2154 runtime_printf("\t%d:\t%D\n", i
, gcstats
.instr
[i
]);
2155 ninstr
+= gcstats
.instr
[i
];
2157 runtime_printf("\ttotal:\t%D\n", ninstr
);
2159 runtime_printf("putempty: %D, getfull: %D\n", gcstats
.putempty
, gcstats
.getfull
);
2161 runtime_printf("markonly base lookup: bit %D word %D span %D\n", gcstats
.markonly
.foundbit
, gcstats
.markonly
.foundword
, gcstats
.markonly
.foundspan
);
2162 runtime_printf("flushptrbuf base lookup: bit %D word %D span %D\n", gcstats
.flushptrbuf
.foundbit
, gcstats
.flushptrbuf
.foundword
, gcstats
.flushptrbuf
.foundspan
);
2169 void runtime_ReadMemStats(MStats
*)
2170 __asm__ (GOSYM_PREFIX
"runtime.ReadMemStats");
2173 runtime_ReadMemStats(MStats
*stats
)
2177 // Have to acquire worldsema to stop the world,
2178 // because stoptheworld can only be used by
2179 // one goroutine at a time, and there might be
2180 // a pending garbage collection already calling it.
2181 runtime_semacquire(&runtime_worldsema
, false);
2184 runtime_stoptheworld();
2185 updatememstats(nil
);
2189 runtime_semrelease(&runtime_worldsema
);
2190 runtime_starttheworld();
2194 void runtime_debug_readGCStats(Slice
*)
2195 __asm__("runtime_debug.readGCStats");
2198 runtime_debug_readGCStats(Slice
*pauses
)
2203 // Calling code in runtime/debug should make the slice large enough.
2204 if((size_t)pauses
->cap
< nelem(mstats
.pause_ns
)+3)
2205 runtime_throw("runtime: short slice passed to readGCStats");
2207 // Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
2208 p
= (uint64
*)pauses
->array
;
2209 runtime_lock(&runtime_mheap
);
2211 if(n
> nelem(mstats
.pause_ns
))
2212 n
= nelem(mstats
.pause_ns
);
2214 // The pause buffer is circular. The most recent pause is at
2215 // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
2216 // from there to go back farther in time. We deliver the times
2217 // most recent first (in p[0]).
2219 p
[i
] = mstats
.pause_ns
[(mstats
.numgc
-1-i
)%nelem(mstats
.pause_ns
)];
2221 p
[n
] = mstats
.last_gc
;
2222 p
[n
+1] = mstats
.numgc
;
2223 p
[n
+2] = mstats
.pause_total_ns
;
2224 runtime_unlock(&runtime_mheap
);
2225 pauses
->__count
= n
+3;
2228 intgo
runtime_debug_setGCPercent(intgo
)
2229 __asm__("runtime_debug.setGCPercent");
2232 runtime_debug_setGCPercent(intgo in
)
2236 runtime_lock(&runtime_mheap
);
2237 if(gcpercent
== GcpercentUnknown
)
2238 gcpercent
= readgogc();
2243 runtime_unlock(&runtime_mheap
);
2253 if(m
->helpgc
< 0 || m
->helpgc
>= MaxGcproc
)
2254 runtime_throw("gchelperstart: bad m->helpgc");
2255 if(runtime_xchg(&bufferList
[m
->helpgc
].busy
, 1))
2256 runtime_throw("gchelperstart: already busy");
2257 if(runtime_g() != m
->g0
)
2258 runtime_throw("gchelper not running on g0 stack");
2262 runfinq(void* dummy
__attribute__ ((unused
)))
2265 FinBlock
*fb
, *next
;
2271 runtime_lock(&finlock
);
2276 runtime_park(runtime_unlock
, &finlock
, "finalizer wait");
2279 runtime_unlock(&finlock
);
2281 runtime_racefingo();
2282 for(; fb
; fb
=next
) {
2284 for(i
=0; i
<(uint32
)fb
->cnt
; i
++) {
2289 fint
= ((const Type
**)f
->ft
->__in
.array
)[0];
2290 if(fint
->kind
== KindPtr
) {
2291 // direct use of pointer
2293 } else if(((const InterfaceType
*)fint
)->__methods
.__count
== 0) {
2294 // convert to empty interface
2295 ef
.type
= (const Type
*)f
->ot
;
2296 ef
.__object
= f
->arg
;
2299 // convert to interface with methods
2300 iface
.__methods
= __go_convert_interface_2((const Type
*)fint
,
2303 iface
.__object
= f
->arg
;
2304 if(iface
.__methods
== nil
)
2305 runtime_throw("invalid type conversion in runfinq");
2308 reflect_call(f
->ft
, f
->fn
, 0, 0, ¶m
, nil
);
2317 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
2321 // mark the block at v of size n as allocated.
2322 // If noscan is true, mark it as not needing scanning.
2324 runtime_markallocated(void *v
, uintptr n
, bool noscan
)
2326 uintptr
*b
, obits
, bits
, off
, shift
;
2329 runtime_printf("markallocated %p+%p\n", v
, n
);
2331 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2332 runtime_throw("markallocated: bad pointer");
2334 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2335 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2336 shift
= off
% wordsPerBitmapWord
;
2340 bits
= (obits
& ~(bitMask
<<shift
)) | (bitAllocated
<<shift
);
2342 bits
|= bitNoScan
<<shift
;
2343 if(runtime_gomaxprocs
== 1) {
2347 // more than one goroutine is potentially running: use atomic op
2348 if(runtime_casp((void**)b
, (void*)obits
, (void*)bits
))
2354 // mark the block at v of size n as freed.
2356 runtime_markfreed(void *v
, uintptr n
)
2358 uintptr
*b
, obits
, bits
, off
, shift
;
2361 runtime_printf("markfreed %p+%p\n", v
, n
);
2363 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2364 runtime_throw("markfreed: bad pointer");
2366 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2367 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2368 shift
= off
% wordsPerBitmapWord
;
2372 bits
= (obits
& ~(bitMask
<<shift
)) | (bitBlockBoundary
<<shift
);
2373 if(runtime_gomaxprocs
== 1) {
2377 // more than one goroutine is potentially running: use atomic op
2378 if(runtime_casp((void**)b
, (void*)obits
, (void*)bits
))
2384 // check that the block at v of size n is marked freed.
2386 runtime_checkfreed(void *v
, uintptr n
)
2388 uintptr
*b
, bits
, off
, shift
;
2390 if(!runtime_checking
)
2393 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2394 return; // not allocated, so okay
2396 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2397 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2398 shift
= off
% wordsPerBitmapWord
;
2401 if((bits
& bitAllocated
) != 0) {
2402 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
2403 v
, n
, off
, bits
& bitMask
);
2404 runtime_throw("checkfreed: not freed");
2408 // mark the span of memory at v as having n blocks of the given size.
2409 // if leftover is true, there is left over space at the end of the span.
2411 runtime_markspan(void *v
, uintptr size
, uintptr n
, bool leftover
)
2413 uintptr
*b
, off
, shift
;
2416 if((byte
*)v
+size
*n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2417 runtime_throw("markspan: bad pointer");
2420 if(leftover
) // mark a boundary just past end of last block too
2422 for(; n
-- > 0; p
+= size
) {
2423 // Okay to use non-atomic ops here, because we control
2424 // the entire span, and each bitmap word has bits for only
2425 // one span, so no other goroutines are changing these
2427 off
= (uintptr
*)p
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2428 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2429 shift
= off
% wordsPerBitmapWord
;
2430 *b
= (*b
& ~(bitMask
<<shift
)) | (bitBlockBoundary
<<shift
);
2434 // unmark the span of memory at v of length n bytes.
2436 runtime_unmarkspan(void *v
, uintptr n
)
2438 uintptr
*p
, *b
, off
;
2440 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2441 runtime_throw("markspan: bad pointer");
2444 off
= p
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2445 if(off
% wordsPerBitmapWord
!= 0)
2446 runtime_throw("markspan: unaligned pointer");
2447 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2449 if(n
%wordsPerBitmapWord
!= 0)
2450 runtime_throw("unmarkspan: unaligned length");
2451 // Okay to use non-atomic ops here, because we control
2452 // the entire span, and each bitmap word has bits for only
2453 // one span, so no other goroutines are changing these
2455 n
/= wordsPerBitmapWord
;
2461 runtime_blockspecial(void *v
)
2463 uintptr
*b
, off
, shift
;
2468 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
;
2469 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2470 shift
= off
% wordsPerBitmapWord
;
2472 return (*b
& (bitSpecial
<<shift
)) != 0;
2476 runtime_setblockspecial(void *v
, bool s
)
2478 uintptr
*b
, off
, shift
, bits
, obits
;
2483 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
;
2484 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2485 shift
= off
% wordsPerBitmapWord
;
2490 bits
= obits
| (bitSpecial
<<shift
);
2492 bits
= obits
& ~(bitSpecial
<<shift
);
2493 if(runtime_gomaxprocs
== 1) {
2497 // more than one goroutine is potentially running: use atomic op
2498 if(runtime_casp((void**)b
, (void*)obits
, (void*)bits
))
2505 runtime_MHeap_MapBits(MHeap
*h
)
2509 // Caller has added extra mappings to the arena.
2510 // Add extra mappings of bitmap words as needed.
2511 // We allocate extra bitmap pieces in chunks of bitmapChunk.
2517 n
= (h
->arena_used
- h
->arena_start
) / wordsPerBitmapWord
;
2518 n
= ROUND(n
, bitmapChunk
);
2519 if(h
->bitmap_mapped
>= n
)
2522 page_size
= getpagesize();
2523 n
= (n
+page_size
-1) & ~(page_size
-1);
2525 runtime_SysMap(h
->arena_start
- n
, n
- h
->bitmap_mapped
, &mstats
.gc_sys
);
2526 h
->bitmap_mapped
= n
;