1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector (GC).
9 // - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
10 // - parallel (up to MaxGcproc threads)
11 // - partially concurrent (mark is stop-the-world, while sweep is concurrent)
12 // - non-moving/non-compacting
13 // - full (non-partial)
16 // Next GC is after we've allocated an extra amount of memory proportional to
17 // the amount already in use. The proportion is controlled by GOGC environment variable
18 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
19 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
20 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
21 // (and also the amount of extra memory used).
24 // The sweep phase proceeds concurrently with normal program execution.
25 // The heap is swept span-by-span both lazily (when a goroutine needs another span)
26 // and concurrently in a background goroutine (this helps programs that are not CPU bound).
27 // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
28 // and so next_gc calculation is tricky and happens as follows.
29 // At the end of the stop-the-world phase next_gc is conservatively set based on total
30 // heap size; all spans are marked as "needs sweeping".
31 // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
32 // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
33 // closer to the target value. However, this is not enough to avoid over-allocating memory.
34 // Consider that a goroutine wants to allocate a new span for a large object and
35 // there are no free swept spans, but there are small-object unswept spans.
36 // If the goroutine naively allocates a new span, it can surpass the yet-unknown
37 // target next_gc value. In order to prevent such cases (1) when a goroutine needs
38 // to allocate a new small-object span, it sweeps small-object spans for the same
39 // object size until it frees at least one object; (2) when a goroutine needs to
40 // allocate large-object span from heap, it sweeps spans until it frees at least
41 // that many pages into heap. Together these two measures ensure that we don't surpass
42 // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
43 // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
44 // but there can still be other one-page unswept spans which could be combined into a two-page span.
45 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
46 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
47 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
48 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
49 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
50 // The finalizer goroutine is kicked off only when all spans are swept.
51 // When the next GC starts, it sweeps all not-yet-swept spans (if any).
62 // Map gccgo field names to gc field names.
63 // Slice aka __go_open_array.
64 #define array __values
65 #define cap __capacity
66 // Iface aka __go_interface
69 typedef struct __go_map Hmap
;
70 // Type aka __go_type_descriptor
71 #define string __reflection
72 #define KindPtr GO_PTR
73 #define KindNoPointers GO_NO_POINTERS
74 // PtrType aka __go_ptr_type
75 #define elem __element_type
77 #ifdef USING_SPLIT_STACK
79 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
82 extern void * __splitstack_find_context (void *context
[10], size_t *, void **,
90 ScanStackByFrames
= 1,
94 // Four bits per word (see #defines below).
95 wordsPerBitmapWord
= sizeof(void*)*8/4,
96 bitShift
= sizeof(void*)*8/4,
98 WorkbufSize
= 16*1024,
99 RootBlockSize
= 4*1024,
100 FinBlockSize
= 4*1024,
102 handoffThreshold
= 4,
103 IntermediateBufferCapacity
= 64,
105 // Bits in type information
108 PC_BITS
= PRECISE
| LOOP
,
125 #define GcpercentUnknown (-2)
127 // Initialized from $GOGC. GOGC=off means no gc.
128 static int32 gcpercent
= GcpercentUnknown
;
136 void sync_runtime_registerPool(void **)
137 __asm__ (GOSYM_PREFIX
"sync.runtime_registerPool");
140 sync_runtime_registerPool(void **p
)
142 runtime_lock(&pools
);
145 runtime_unlock(&pools
);
157 for(pool
= pools
.head
; pool
!= nil
; pool
= next
) {
159 pool
[0] = nil
; // next
160 pool
[1] = nil
; // local
161 pool
[2] = nil
; // localSize
162 off
= (uintptr
)pool
[3] / sizeof(void*);
163 pool
[off
+0] = nil
; // global slice
169 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
170 // clear tinyalloc pool
181 // Bits in per-word bitmap.
182 // #defines because enum might not be able to hold the values.
184 // Each word in the bitmap describes wordsPerBitmapWord words
185 // of heap memory. There are 4 bitmap bits dedicated to each heap word,
186 // so on a 64-bit system there is one bitmap word per 16 heap words.
187 // The bits in the word are packed together by type first, then by
188 // heap location, so each 64-bit bitmap word consists of, from top to bottom,
189 // the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
190 // then the 16 bitScan/bitBlockBoundary bits, then the 16 bitAllocated bits.
191 // This layout makes it easier to iterate over the bits of a given type.
193 // The bitmap starts at mheap.arena_start and extends *backward* from
194 // there. On a 64-bit system the off'th word in the arena is tracked by
195 // the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
196 // the only difference is that the divisor is 8.)
198 // To pull out the bits corresponding to a given pointer p, we use:
200 // off = p - (uintptr*)mheap.arena_start; // word offset
201 // b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
202 // shift = off % wordsPerBitmapWord
203 // bits = *b >> shift;
204 // /* then test bits & bitAllocated, bits & bitMarked, etc. */
206 #define bitAllocated ((uintptr)1<<(bitShift*0)) /* block start; eligible for garbage collection */
207 #define bitScan ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
208 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
209 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
210 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set - mark for FlagNoGC objects */
212 #define bitMask (bitAllocated | bitScan | bitMarked | bitSpecial)
214 // Holding worldsema grants an M the right to try to stop the world.
217 // runtime_semacquire(&runtime_worldsema);
219 // runtime_stoptheworld();
224 // runtime_semrelease(&runtime_worldsema);
225 // runtime_starttheworld();
227 uint32 runtime_worldsema
= 1;
229 typedef struct Workbuf Workbuf
;
232 #define SIZE (WorkbufSize-sizeof(LFNode)-sizeof(uintptr))
233 LFNode node
; // must be first
235 Obj obj
[SIZE
/sizeof(Obj
) - 1];
236 uint8 _padding
[SIZE
%sizeof(Obj
) + sizeof(Obj
)];
240 typedef struct Finalizer Finalizer
;
245 const struct __go_func_type
*ft
;
246 const struct __go_ptr_type
*ot
;
249 typedef struct FinBlock FinBlock
;
260 static FinBlock
*finq
; // list of finalizers that are to be executed
261 static FinBlock
*finc
; // cache of free blocks
262 static FinBlock
*allfin
; // list of all blocks
263 static int32 fingwait
;
266 static void runfinq(void*);
267 static void bgsweep(void*);
268 static Workbuf
* getempty(Workbuf
*);
269 static Workbuf
* getfull(Workbuf
*);
270 static void putempty(Workbuf
*);
271 static Workbuf
* handoff(Workbuf
*);
272 static void gchelperstart(void);
273 static void flushallmcaches(void);
274 static void addstackroots(G
*gp
, Workbuf
**wbufp
);
277 uint64 full
; // lock-free list of full blocks
278 uint64 empty
; // lock-free list of empty blocks
279 byte pad0
[CacheLineSize
]; // prevents false-sharing between full/empty and nproc/nwait
282 volatile uint32 nwait
;
283 volatile uint32 ndone
;
290 } work
__attribute__((aligned(8)));
293 GC_DEFAULT_PTR
= GC_NUM_INSTR
,
313 uint64 instr
[GC_NUM_INSTR2
];
330 // markonly marks an object. It returns true if the object
331 // has been marked by this function, false otherwise.
332 // This function doesn't append the object to any buffer.
337 uintptr
*bitp
, bits
, shift
, x
, xbits
, off
, j
;
341 // Words outside the arena cannot be pointers.
342 if((byte
*)obj
< runtime_mheap
.arena_start
|| (byte
*)obj
>= runtime_mheap
.arena_used
)
345 // obj may be a pointer to a live object.
346 // Try to find the beginning of the object.
348 // Round down to word boundary.
349 obj
= (void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
351 // Find bits for this word.
352 off
= (uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
353 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
354 shift
= off
% wordsPerBitmapWord
;
356 bits
= xbits
>> shift
;
358 // Pointing at the beginning of a block?
359 if((bits
& (bitAllocated
|bitBlockBoundary
)) != 0) {
361 runtime_xadd64(&gcstats
.markonly
.foundbit
, 1);
365 // Pointing just past the beginning?
366 // Scan backward a little to find a block boundary.
367 for(j
=shift
; j
-->0; ) {
368 if(((xbits
>>j
) & (bitAllocated
|bitBlockBoundary
)) != 0) {
372 runtime_xadd64(&gcstats
.markonly
.foundword
, 1);
377 // Otherwise consult span table to find beginning.
378 // (Manually inlined copy of MHeap_LookupMaybe.)
379 k
= (uintptr
)obj
>>PageShift
;
381 x
-= (uintptr
)runtime_mheap
.arena_start
>>PageShift
;
382 s
= runtime_mheap
.spans
[x
];
383 if(s
== nil
|| k
< s
->start
|| (byte
*)obj
>= s
->limit
|| s
->state
!= MSpanInUse
)
385 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
386 if(s
->sizeclass
== 0) {
389 uintptr size
= s
->elemsize
;
390 int32 i
= ((byte
*)obj
- p
)/size
;
394 // Now that we know the object header, reload bits.
395 off
= (uintptr
*)obj
- (uintptr
*)runtime_mheap
.arena_start
;
396 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
397 shift
= off
% wordsPerBitmapWord
;
399 bits
= xbits
>> shift
;
401 runtime_xadd64(&gcstats
.markonly
.foundspan
, 1);
404 // Now we have bits, bitp, and shift correct for
405 // obj pointing at the base of the object.
406 // Only care about allocated and not marked.
407 if((bits
& (bitAllocated
|bitMarked
)) != bitAllocated
)
410 *bitp
|= bitMarked
<<shift
;
414 if(x
& (bitMarked
<<shift
))
416 if(runtime_casp((void**)bitp
, (void*)x
, (void*)(x
|(bitMarked
<<shift
))))
421 // The object is now marked
425 // PtrTarget is a structure used by intermediate buffers.
426 // The intermediate buffers hold GC data before it
427 // is moved/flushed to the work buffer (Workbuf).
428 // The size of an intermediate buffer is very small,
429 // such as 32 or 64 elements.
430 typedef struct PtrTarget PtrTarget
;
437 typedef struct Scanbuf Scanbuf
;
455 typedef struct BufferList BufferList
;
458 PtrTarget ptrtarget
[IntermediateBufferCapacity
];
459 Obj obj
[IntermediateBufferCapacity
];
461 byte pad
[CacheLineSize
];
463 static BufferList bufferList
[MaxGcproc
];
465 static Type
*itabtype
;
467 static void enqueue(Obj obj
, Workbuf
**_wbuf
, Obj
**_wp
, uintptr
*_nobj
);
469 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
470 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
471 // while the work buffer contains blocks which have been marked
472 // and are prepared to be scanned by the garbage collector.
474 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
476 // A simplified drawing explaining how the todo-list moves from a structure to another:
480 // Obj ------> PtrTarget (pointer targets)
485 // (find block start, mark and enqueue)
487 flushptrbuf(Scanbuf
*sbuf
)
489 byte
*p
, *arena_start
, *obj
;
490 uintptr size
, *bitp
, bits
, shift
, j
, x
, xbits
, off
, nobj
, ti
, n
;
496 PtrTarget
*ptrbuf_end
;
498 arena_start
= runtime_mheap
.arena_start
;
504 ptrbuf
= sbuf
->ptr
.begin
;
505 ptrbuf_end
= sbuf
->ptr
.pos
;
506 n
= ptrbuf_end
- sbuf
->ptr
.begin
;
507 sbuf
->ptr
.pos
= sbuf
->ptr
.begin
;
510 runtime_xadd64(&gcstats
.ptr
.sum
, n
);
511 runtime_xadd64(&gcstats
.ptr
.cnt
, 1);
514 // If buffer is nearly full, get a new one.
515 if(wbuf
== nil
|| nobj
+n
>= nelem(wbuf
->obj
)) {
518 wbuf
= getempty(wbuf
);
522 if(n
>= nelem(wbuf
->obj
))
523 runtime_throw("ptrbuf has to be smaller than WorkBuf");
526 while(ptrbuf
< ptrbuf_end
) {
531 // obj belongs to interval [mheap.arena_start, mheap.arena_used).
533 if(obj
< runtime_mheap
.arena_start
|| obj
>= runtime_mheap
.arena_used
)
534 runtime_throw("object is outside of mheap");
537 // obj may be a pointer to a live object.
538 // Try to find the beginning of the object.
540 // Round down to word boundary.
541 if(((uintptr
)obj
& ((uintptr
)PtrSize
-1)) != 0) {
542 obj
= (void*)((uintptr
)obj
& ~((uintptr
)PtrSize
-1));
546 // Find bits for this word.
547 off
= (uintptr
*)obj
- (uintptr
*)arena_start
;
548 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
549 shift
= off
% wordsPerBitmapWord
;
551 bits
= xbits
>> shift
;
553 // Pointing at the beginning of a block?
554 if((bits
& (bitAllocated
|bitBlockBoundary
)) != 0) {
556 runtime_xadd64(&gcstats
.flushptrbuf
.foundbit
, 1);
562 // Pointing just past the beginning?
563 // Scan backward a little to find a block boundary.
564 for(j
=shift
; j
-->0; ) {
565 if(((xbits
>>j
) & (bitAllocated
|bitBlockBoundary
)) != 0) {
566 obj
= (byte
*)obj
- (shift
-j
)*PtrSize
;
570 runtime_xadd64(&gcstats
.flushptrbuf
.foundword
, 1);
575 // Otherwise consult span table to find beginning.
576 // (Manually inlined copy of MHeap_LookupMaybe.)
577 k
= (uintptr
)obj
>>PageShift
;
579 x
-= (uintptr
)arena_start
>>PageShift
;
580 s
= runtime_mheap
.spans
[x
];
581 if(s
== nil
|| k
< s
->start
|| obj
>= s
->limit
|| s
->state
!= MSpanInUse
)
583 p
= (byte
*)((uintptr
)s
->start
<<PageShift
);
584 if(s
->sizeclass
== 0) {
588 int32 i
= ((byte
*)obj
- p
)/size
;
592 // Now that we know the object header, reload bits.
593 off
= (uintptr
*)obj
- (uintptr
*)arena_start
;
594 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
595 shift
= off
% wordsPerBitmapWord
;
597 bits
= xbits
>> shift
;
599 runtime_xadd64(&gcstats
.flushptrbuf
.foundspan
, 1);
602 // Now we have bits, bitp, and shift correct for
603 // obj pointing at the base of the object.
604 // Only care about allocated and not marked.
605 if((bits
& (bitAllocated
|bitMarked
)) != bitAllocated
)
608 *bitp
|= bitMarked
<<shift
;
612 if(x
& (bitMarked
<<shift
))
614 if(runtime_casp((void**)bitp
, (void*)x
, (void*)(x
|(bitMarked
<<shift
))))
619 // If object has no pointers, don't need to scan further.
620 if((bits
& bitScan
) == 0)
623 // Ask span about size class.
624 // (Manually inlined copy of MHeap_Lookup.)
625 x
= (uintptr
)obj
>> PageShift
;
626 x
-= (uintptr
)arena_start
>>PageShift
;
627 s
= runtime_mheap
.spans
[x
];
631 *wp
= (Obj
){obj
, s
->elemsize
, ti
};
637 // If another proc wants a pointer, give it some.
638 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
640 wbuf
= handoff(wbuf
);
642 wp
= wbuf
->obj
+ nobj
;
651 flushobjbuf(Scanbuf
*sbuf
)
663 objbuf
= sbuf
->obj
.begin
;
664 objbuf_end
= sbuf
->obj
.pos
;
665 sbuf
->obj
.pos
= sbuf
->obj
.begin
;
667 while(objbuf
< objbuf_end
) {
670 // Align obj.b to a word boundary.
671 off
= (uintptr
)obj
.p
& (PtrSize
-1);
673 obj
.p
+= PtrSize
- off
;
674 obj
.n
-= PtrSize
- off
;
678 if(obj
.p
== nil
|| obj
.n
== 0)
681 // If buffer is full, get a new one.
682 if(wbuf
== nil
|| nobj
>= nelem(wbuf
->obj
)) {
685 wbuf
= getempty(wbuf
);
695 // If another proc wants a pointer, give it some.
696 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
698 wbuf
= handoff(wbuf
);
700 wp
= wbuf
->obj
+ nobj
;
708 // Program that scans the whole block and treats every block element as a potential pointer
709 static uintptr defaultProg
[2] = {PtrSize
, GC_DEFAULT_PTR
};
713 static uintptr chanProg
[2] = {0, GC_CHAN
};
716 // Local variables of a program fragment or loop
717 typedef struct Frame Frame
;
719 uintptr count
, elemsize
, b
;
720 uintptr
*loop_or_ret
;
723 // Sanity check for the derived type info objti.
725 checkptr(void *obj
, uintptr objti
)
727 uintptr type
, tisize
, i
, x
;
733 runtime_throw("checkptr is debug only");
735 if((byte
*)obj
< runtime_mheap
.arena_start
|| (byte
*)obj
>= runtime_mheap
.arena_used
)
737 type
= runtime_gettype(obj
);
738 t
= (Type
*)(type
& ~(uintptr
)(PtrSize
-1));
741 x
= (uintptr
)obj
>> PageShift
;
742 x
-= (uintptr
)(runtime_mheap
.arena_start
)>>PageShift
;
743 s
= runtime_mheap
.spans
[x
];
744 objstart
= (byte
*)((uintptr
)s
->start
<<PageShift
);
745 if(s
->sizeclass
!= 0) {
746 i
= ((byte
*)obj
- objstart
)/s
->elemsize
;
747 objstart
+= i
*s
->elemsize
;
749 tisize
= *(uintptr
*)objti
;
750 // Sanity check for object size: it should fit into the memory block.
751 if((byte
*)obj
+ tisize
> objstart
+ s
->elemsize
) {
752 runtime_printf("object of type '%S' at %p/%p does not fit in block %p/%p\n",
753 *t
->string
, obj
, tisize
, objstart
, s
->elemsize
);
754 runtime_throw("invalid gc type info");
758 // If obj points to the beginning of the memory block,
759 // check type info as well.
760 if(t
->string
== nil
||
761 // Gob allocates unsafe pointers for indirection.
762 (runtime_strcmp((const char *)t
->string
->str
, (const char*)"unsafe.Pointer") &&
763 // Runtime and gc think differently about closures.
764 runtime_strstr((const char *)t
->string
->str
, (const char*)"struct { F uintptr") != (const char *)t
->string
->str
)) {
766 pc1
= (uintptr
*)objti
;
767 pc2
= (uintptr
*)t
->gc
;
768 // A simple best-effort check until first GC_END.
769 for(j
= 1; pc1
[j
] != GC_END
&& pc2
[j
] != GC_END
; j
++) {
770 if(pc1
[j
] != pc2
[j
]) {
771 runtime_printf("invalid gc type info for '%s' at %p, type info %p, block info %p\n",
772 t
->string
? (const int8
*)t
->string
->str
: (const int8
*)"?", j
, pc1
[j
], pc2
[j
]);
773 runtime_throw("invalid gc type info");
780 // scanblock scans a block of n bytes starting at pointer b for references
781 // to other objects, scanning any it finds recursively until there are no
782 // unscanned objects left. Instead of using an explicit recursion, it keeps
783 // a work list in the Workbuf* structures and loops in the main function
784 // body. Keeping an explicit work list is easier on the stack allocator and
787 scanblock(Workbuf
*wbuf
, bool keepworking
)
789 byte
*b
, *arena_start
, *arena_used
;
790 uintptr n
, i
, end_b
, elemsize
, size
, ti
, objti
, count
, /* type, */ nobj
;
791 uintptr
*pc
, precise_type
, nominal_size
;
793 uintptr
*chan_ret
, chancap
;
798 Frame
*stack_ptr
, stack_top
, stack
[GC_STACK_CAPACITY
+4];
799 BufferList
*scanbuffers
;
809 if(sizeof(Workbuf
) % WorkbufSize
!= 0)
810 runtime_throw("scanblock: size of Workbuf is suboptimal");
812 // Memory arena parameters.
813 arena_start
= runtime_mheap
.arena_start
;
814 arena_used
= runtime_mheap
.arena_used
;
816 stack_ptr
= stack
+nelem(stack
)-1;
818 precise_type
= false;
823 wp
= &wbuf
->obj
[nobj
];
830 scanbuffers
= &bufferList
[runtime_m()->helpgc
];
832 sbuf
.ptr
.begin
= sbuf
.ptr
.pos
= &scanbuffers
->ptrtarget
[0];
833 sbuf
.ptr
.end
= sbuf
.ptr
.begin
+ nelem(scanbuffers
->ptrtarget
);
835 sbuf
.obj
.begin
= sbuf
.obj
.pos
= &scanbuffers
->obj
[0];
836 sbuf
.obj
.end
= sbuf
.obj
.begin
+ nelem(scanbuffers
->obj
);
842 // (Silence the compiler)
852 // Each iteration scans the block b of length n, queueing pointers in
855 runtime_printf("scanblock %p %D\n", b
, (int64
)n
);
859 runtime_xadd64(&gcstats
.nbytes
, n
);
860 runtime_xadd64(&gcstats
.obj
.sum
, sbuf
.nobj
);
861 runtime_xadd64(&gcstats
.obj
.cnt
, 1);
864 if(ti
!= 0 && false) {
865 pc
= (uintptr
*)(ti
& ~(uintptr
)PC_BITS
);
866 precise_type
= (ti
& PRECISE
);
867 stack_top
.elemsize
= pc
[0];
869 nominal_size
= pc
[0];
871 stack_top
.count
= 0; // 0 means an infinite number of iterations
872 stack_top
.loop_or_ret
= pc
+1;
877 // Simple sanity check for provided type info ti:
878 // The declared size of the object must be not larger than the actual size
879 // (it can be smaller due to inferior pointers).
880 // It's difficult to make a comprehensive check due to inferior pointers,
881 // reflection, gob, etc.
883 runtime_printf("invalid gc type info: type info size %p, block size %p\n", pc
[0], n
);
884 runtime_throw("invalid gc type info");
887 } else if(UseSpanType
&& false) {
889 runtime_xadd64(&gcstats
.obj
.notype
, 1);
892 type
= runtime_gettype(b
);
895 runtime_xadd64(&gcstats
.obj
.typelookup
, 1);
897 t
= (Type
*)(type
& ~(uintptr
)(PtrSize
-1));
898 switch(type
& (PtrSize
-1)) {
899 case TypeInfo_SingleObject
:
900 pc
= (uintptr
*)t
->gc
;
901 precise_type
= true; // type information about 'b' is precise
903 stack_top
.elemsize
= pc
[0];
906 pc
= (uintptr
*)t
->gc
;
909 precise_type
= true; // type information about 'b' is precise
910 stack_top
.count
= 0; // 0 means an infinite number of iterations
911 stack_top
.elemsize
= pc
[0];
912 stack_top
.loop_or_ret
= pc
+1;
916 chantype
= (ChanType
*)t
;
921 runtime_throw("scanblock: invalid type");
936 stack_top
.b
= (uintptr
)b
;
938 end_b
= (uintptr
)b
+ n
- PtrSize
;
942 runtime_xadd64(&gcstats
.instr
[pc
[0]], 1);
948 obj
= *(void**)(stack_top
.b
+ pc
[1]);
952 checkptr(obj
, objti
);
956 sliceptr
= (Slice
*)(stack_top
.b
+ pc
[1]);
957 if(sliceptr
->cap
!= 0) {
958 obj
= sliceptr
->array
;
959 // Can't use slice element type for scanning,
960 // because if it points to an array embedded
961 // in the beginning of a struct,
962 // we will scan the whole struct as the slice.
963 // So just obtain type info from heap.
969 obj
= *(void**)(stack_top
.b
+ pc
[1]);
974 obj
= *(void**)(stack_top
.b
+ pc
[1]);
980 eface
= (Eface
*)(stack_top
.b
+ pc
[1]);
982 if(eface
->__type_descriptor
== nil
)
986 t
= eface
->__type_descriptor
;
987 if((const byte
*)t
>= arena_start
&& (const byte
*)t
< arena_used
) {
988 union { const Type
*tc
; Type
*tr
; } u
;
990 *sbuf
.ptr
.pos
++ = (PtrTarget
){u
.tr
, 0};
991 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
996 if((byte
*)eface
->__object
>= arena_start
&& (byte
*)eface
->__object
< arena_used
) {
997 if(t
->__size
<= sizeof(void*)) {
998 if((t
->__code
& KindNoPointers
))
1001 obj
= eface
->__object
;
1002 if((t
->__code
& ~KindNoPointers
) == KindPtr
)
1003 // objti = (uintptr)((PtrType*)t)->elem->gc;
1006 obj
= eface
->__object
;
1007 // objti = (uintptr)t->gc;
1014 iface
= (Iface
*)(stack_top
.b
+ pc
[1]);
1016 if(iface
->tab
== nil
)
1020 if((byte
*)iface
->tab
>= arena_start
&& (byte
*)iface
->tab
< arena_used
) {
1021 *sbuf
.ptr
.pos
++ = (PtrTarget
){iface
->tab
, /* (uintptr)itabtype->gc */ 0};
1022 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
1027 if((byte
*)iface
->__object
>= arena_start
&& (byte
*)iface
->__object
< arena_used
) {
1028 // t = iface->tab->type;
1030 if(t
->__size
<= sizeof(void*)) {
1031 if((t
->__code
& KindNoPointers
))
1034 obj
= iface
->__object
;
1035 if((t
->__code
& ~KindNoPointers
) == KindPtr
)
1036 // objti = (uintptr)((const PtrType*)t)->elem->gc;
1039 obj
= iface
->__object
;
1040 // objti = (uintptr)t->gc;
1046 case GC_DEFAULT_PTR
:
1047 while(stack_top
.b
<= end_b
) {
1048 obj
= *(byte
**)stack_top
.b
;
1049 stack_top
.b
+= PtrSize
;
1050 if((byte
*)obj
>= arena_start
&& (byte
*)obj
< arena_used
) {
1051 *sbuf
.ptr
.pos
++ = (PtrTarget
){obj
, 0};
1052 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
1059 if(--stack_top
.count
!= 0) {
1060 // Next iteration of a loop if possible.
1061 stack_top
.b
+= stack_top
.elemsize
;
1062 if(stack_top
.b
+ stack_top
.elemsize
<= end_b
+PtrSize
) {
1063 pc
= stack_top
.loop_or_ret
;
1068 // Stack pop if possible.
1069 if(stack_ptr
+1 < stack
+nelem(stack
)) {
1070 pc
= stack_top
.loop_or_ret
;
1071 stack_top
= *(++stack_ptr
);
1074 i
= (uintptr
)b
+ nominal_size
;
1077 // Quickly scan [b+i,b+n) for possible pointers.
1078 for(; i
<=end_b
; i
+=PtrSize
) {
1079 if(*(byte
**)i
!= nil
) {
1080 // Found a value that may be a pointer.
1081 // Do a rescan of the entire block.
1082 enqueue((Obj
){b
, n
, 0}, &sbuf
.wbuf
, &sbuf
.wp
, &sbuf
.nobj
);
1084 runtime_xadd64(&gcstats
.rescan
, 1);
1085 runtime_xadd64(&gcstats
.rescanbytes
, n
);
1093 case GC_ARRAY_START
:
1094 i
= stack_top
.b
+ pc
[1];
1100 *stack_ptr
-- = stack_top
;
1101 stack_top
= (Frame
){count
, elemsize
, i
, pc
};
1105 if(--stack_top
.count
!= 0) {
1106 stack_top
.b
+= stack_top
.elemsize
;
1107 pc
= stack_top
.loop_or_ret
;
1110 stack_top
= *(++stack_ptr
);
1117 *stack_ptr
-- = stack_top
;
1118 stack_top
= (Frame
){1, 0, stack_top
.b
+ pc
[1], pc
+3 /*return address*/};
1119 pc
= (uintptr
*)((byte
*)pc
+ *(int32
*)(pc
+2)); // target of the CALL instruction
1123 obj
= (void*)(stack_top
.b
+ pc
[1]);
1128 *sbuf
.obj
.pos
++ = (Obj
){obj
, size
, objti
};
1129 if(sbuf
.obj
.pos
== sbuf
.obj
.end
)
1135 chan
= *(Hchan
**)(stack_top
.b
+ pc
[1]);
1140 if(markonly(chan
)) {
1141 chantype
= (ChanType
*)pc
[2];
1142 if(!(chantype
->elem
->__code
& KindNoPointers
)) {
1153 // There are no heap pointers in struct Hchan,
1154 // so we can ignore the leading sizeof(Hchan) bytes.
1155 if(!(chantype
->elem
->__code
& KindNoPointers
)) {
1156 // Channel's buffer follows Hchan immediately in memory.
1157 // Size of buffer (cap(c)) is second int in the chan struct.
1158 chancap
= ((uintgo
*)chan
)[1];
1160 // TODO(atom): split into two chunks so that only the
1161 // in-use part of the circular buffer is scanned.
1162 // (Channel routines zero the unused part, so the current
1163 // code does not lead to leaks, it's just a little inefficient.)
1164 *sbuf
.obj
.pos
++ = (Obj
){(byte
*)chan
+runtime_Hchansize
, chancap
*chantype
->elem
->size
,
1165 (uintptr
)chantype
->elem
->gc
| PRECISE
| LOOP
};
1166 if(sbuf
.obj
.pos
== sbuf
.obj
.end
)
1177 runtime_printf("runtime: invalid GC instruction %p at %p\n", pc
[0], pc
);
1178 runtime_throw("scanblock: invalid GC instruction");
1182 if((byte
*)obj
>= arena_start
&& (byte
*)obj
< arena_used
) {
1183 *sbuf
.ptr
.pos
++ = (PtrTarget
){obj
, objti
};
1184 if(sbuf
.ptr
.pos
== sbuf
.ptr
.end
)
1190 // Done scanning [b, b+n). Prepare for the next iteration of
1191 // the loop by setting b, n, ti to the parameters for the next block.
1193 if(sbuf
.nobj
== 0) {
1197 if(sbuf
.nobj
== 0) {
1200 putempty(sbuf
.wbuf
);
1203 // Emptied our buffer: refill.
1204 sbuf
.wbuf
= getfull(sbuf
.wbuf
);
1205 if(sbuf
.wbuf
== nil
)
1207 sbuf
.nobj
= sbuf
.wbuf
->nobj
;
1208 sbuf
.wp
= sbuf
.wbuf
->obj
+ sbuf
.wbuf
->nobj
;
1212 // Fetch b from the work buffer.
1221 static struct root_list
* roots
;
1224 __go_register_gc_roots (struct root_list
* r
)
1226 // FIXME: This needs locking if multiple goroutines can call
1227 // dlopen simultaneously.
1232 // Append obj to the work buffer.
1233 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
1235 enqueue(Obj obj
, Workbuf
**_wbuf
, Obj
**_wp
, uintptr
*_nobj
)
1242 runtime_printf("append obj(%p %D %p)\n", obj
.p
, (int64
)obj
.n
, obj
.ti
);
1244 // Align obj.b to a word boundary.
1245 off
= (uintptr
)obj
.p
& (PtrSize
-1);
1247 obj
.p
+= PtrSize
- off
;
1248 obj
.n
-= PtrSize
- off
;
1252 if(obj
.p
== nil
|| obj
.n
== 0)
1255 // Load work buffer state
1260 // If another proc wants a pointer, give it some.
1261 if(work
.nwait
> 0 && nobj
> handoffThreshold
&& work
.full
== 0) {
1263 wbuf
= handoff(wbuf
);
1265 wp
= wbuf
->obj
+ nobj
;
1268 // If buffer is full, get a new one.
1269 if(wbuf
== nil
|| nobj
>= nelem(wbuf
->obj
)) {
1272 wbuf
= getempty(wbuf
);
1281 // Save work buffer state
1288 enqueue1(Workbuf
**wbufp
, Obj obj
)
1293 if(wbuf
->nobj
>= nelem(wbuf
->obj
))
1294 *wbufp
= wbuf
= getempty(wbuf
);
1295 wbuf
->obj
[wbuf
->nobj
++] = obj
;
1299 markroot(ParFor
*desc
, uint32 i
)
1304 MSpan
**allspans
, *s
;
1310 wbuf
= getempty(nil
);
1313 // For gccgo this is both data and bss.
1315 struct root_list
*pl
;
1317 for(pl
= roots
; pl
!= nil
; pl
= pl
->next
) {
1318 struct root
*pr
= &pl
->roots
[0];
1320 void *decl
= pr
->decl
;
1323 enqueue1(&wbuf
, (Obj
){decl
, pr
->size
, 0});
1331 // For gccgo we use this for all the other global roots.
1332 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_m0
, sizeof runtime_m0
, 0});
1333 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_g0
, sizeof runtime_g0
, 0});
1334 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_allg
, sizeof runtime_allg
, 0});
1335 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_allm
, sizeof runtime_allm
, 0});
1336 enqueue1(&wbuf
, (Obj
){(byte
*)&runtime_allp
, sizeof runtime_allp
, 0});
1337 enqueue1(&wbuf
, (Obj
){(byte
*)&work
, sizeof work
, 0});
1338 runtime_proc_scan(&wbuf
, enqueue1
);
1339 runtime_MProf_Mark(&wbuf
, enqueue1
);
1340 runtime_time_scan(&wbuf
, enqueue1
);
1341 runtime_netpoll_scan(&wbuf
, enqueue1
);
1344 case RootFinalizers
:
1345 for(fb
=allfin
; fb
; fb
=fb
->alllink
)
1346 enqueue1(&wbuf
, (Obj
){(byte
*)fb
->fin
, fb
->cnt
*sizeof(fb
->fin
[0]), 0});
1350 // mark span types and MSpan.specials (to walk spans only once)
1353 allspans
= h
->allspans
;
1354 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
1356 SpecialFinalizer
*spf
;
1358 s
= allspans
[spanidx
];
1359 if(s
->sweepgen
!= sg
) {
1360 runtime_printf("sweep %d %d\n", s
->sweepgen
, sg
);
1361 runtime_throw("gc: unswept span");
1363 if(s
->state
!= MSpanInUse
)
1365 // The garbage collector ignores type pointers stored in MSpan.types:
1366 // - Compiler-generated types are stored outside of heap.
1367 // - The reflect package has runtime-generated types cached in its data structures.
1368 // The garbage collector relies on finding the references via that cache.
1369 if(s
->types
.compression
== MTypes_Words
|| s
->types
.compression
== MTypes_Bytes
)
1370 markonly((byte
*)s
->types
.data
);
1371 for(sp
= s
->specials
; sp
!= nil
; sp
= sp
->next
) {
1372 if(sp
->kind
!= KindSpecialFinalizer
)
1374 // don't mark finalized object, but scan it so we
1375 // retain everything it points to.
1376 spf
= (SpecialFinalizer
*)sp
;
1377 // A finalizer can be set for an inner byte of an object, find object beginning.
1378 p
= (void*)((s
->start
<< PageShift
) + spf
->offset
/s
->elemsize
*s
->elemsize
);
1379 enqueue1(&wbuf
, (Obj
){p
, s
->elemsize
, 0});
1380 enqueue1(&wbuf
, (Obj
){(void*)&spf
->fn
, PtrSize
, 0});
1381 enqueue1(&wbuf
, (Obj
){(void*)&spf
->ft
, PtrSize
, 0});
1382 enqueue1(&wbuf
, (Obj
){(void*)&spf
->ot
, PtrSize
, 0});
1387 case RootFlushCaches
:
1392 // the rest is scanning goroutine stacks
1393 if(i
- RootCount
>= runtime_allglen
)
1394 runtime_throw("markroot: bad index");
1395 gp
= runtime_allg
[i
- RootCount
];
1396 // remember when we've first observed the G blocked
1397 // needed only to output in traceback
1398 if((gp
->status
== Gwaiting
|| gp
->status
== Gsyscall
) && gp
->waitsince
== 0)
1399 gp
->waitsince
= work
.tstart
;
1400 addstackroots(gp
, &wbuf
);
1406 scanblock(wbuf
, false);
1409 // Get an empty work buffer off the work.empty list,
1410 // allocating new buffers as needed.
1412 getempty(Workbuf
*b
)
1415 runtime_lfstackpush(&work
.full
, &b
->node
);
1416 b
= (Workbuf
*)runtime_lfstackpop(&work
.empty
);
1418 // Need to allocate.
1419 runtime_lock(&work
);
1420 if(work
.nchunk
< sizeof *b
) {
1421 work
.nchunk
= 1<<20;
1422 work
.chunk
= runtime_SysAlloc(work
.nchunk
, &mstats
.gc_sys
);
1423 if(work
.chunk
== nil
)
1424 runtime_throw("runtime: cannot allocate memory");
1426 b
= (Workbuf
*)work
.chunk
;
1427 work
.chunk
+= sizeof *b
;
1428 work
.nchunk
-= sizeof *b
;
1429 runtime_unlock(&work
);
1436 putempty(Workbuf
*b
)
1439 runtime_xadd64(&gcstats
.putempty
, 1);
1441 runtime_lfstackpush(&work
.empty
, &b
->node
);
1444 // Get a full work buffer off the work.full list, or return nil.
1452 runtime_xadd64(&gcstats
.getfull
, 1);
1455 runtime_lfstackpush(&work
.empty
, &b
->node
);
1456 b
= (Workbuf
*)runtime_lfstackpop(&work
.full
);
1457 if(b
!= nil
|| work
.nproc
== 1)
1461 runtime_xadd(&work
.nwait
, +1);
1463 if(work
.full
!= 0) {
1464 runtime_xadd(&work
.nwait
, -1);
1465 b
= (Workbuf
*)runtime_lfstackpop(&work
.full
);
1468 runtime_xadd(&work
.nwait
, +1);
1470 if(work
.nwait
== work
.nproc
)
1473 m
->gcstats
.nprocyield
++;
1474 runtime_procyield(20);
1476 m
->gcstats
.nosyield
++;
1479 m
->gcstats
.nsleep
++;
1480 runtime_usleep(100);
1494 // Make new buffer with half of b's pointers.
1499 runtime_memmove(b1
->obj
, b
->obj
+b
->nobj
, n
*sizeof b1
->obj
[0]);
1500 m
->gcstats
.nhandoff
++;
1501 m
->gcstats
.nhandoffcnt
+= n
;
1503 // Put b on full list - let first half of b get stolen.
1504 runtime_lfstackpush(&work
.full
, &b
->node
);
1509 addstackroots(G
*gp
, Workbuf
**wbufp
)
1513 runtime_printf("unexpected G.status %d (goroutine %p %D)\n", gp
->status
, gp
, gp
->goid
);
1514 runtime_throw("mark - bad status");
1518 runtime_throw("mark - world not stopped");
1525 #ifdef USING_SPLIT_STACK
1533 if(gp
== runtime_g()) {
1534 // Scanning our own stack.
1535 sp
= __splitstack_find(nil
, nil
, &spsize
, &next_segment
,
1536 &next_sp
, &initial_sp
);
1537 } else if((mp
= gp
->m
) != nil
&& mp
->helpgc
) {
1538 // gchelper's stack is in active use and has no interesting pointers.
1541 // Scanning another goroutine's stack.
1542 // The goroutine is usually asleep (the world is stopped).
1544 // The exception is that if the goroutine is about to enter or might
1545 // have just exited a system call, it may be executing code such
1546 // as schedlock and may have needed to start a new stack segment.
1547 // Use the stack segment and stack pointer at the time of
1548 // the system call instead, since that won't change underfoot.
1549 if(gp
->gcstack
!= nil
) {
1551 spsize
= gp
->gcstack_size
;
1552 next_segment
= gp
->gcnext_segment
;
1553 next_sp
= gp
->gcnext_sp
;
1554 initial_sp
= gp
->gcinitial_sp
;
1556 sp
= __splitstack_find_context(&gp
->stack_context
[0],
1557 &spsize
, &next_segment
,
1558 &next_sp
, &initial_sp
);
1562 enqueue1(wbufp
, (Obj
){sp
, spsize
, 0});
1563 while((sp
= __splitstack_find(next_segment
, next_sp
,
1564 &spsize
, &next_segment
,
1565 &next_sp
, &initial_sp
)) != nil
)
1566 enqueue1(wbufp
, (Obj
){sp
, spsize
, 0});
1573 if(gp
== runtime_g()) {
1574 // Scanning our own stack.
1575 bottom
= (byte
*)&gp
;
1576 } else if((mp
= gp
->m
) != nil
&& mp
->helpgc
) {
1577 // gchelper's stack is in active use and has no interesting pointers.
1580 // Scanning another goroutine's stack.
1581 // The goroutine is usually asleep (the world is stopped).
1582 bottom
= (byte
*)gp
->gcnext_sp
;
1586 top
= (byte
*)gp
->gcinitial_sp
+ gp
->gcstack_size
;
1588 enqueue1(wbufp
, (Obj
){bottom
, top
- bottom
, 0});
1590 enqueue1(wbufp
, (Obj
){top
, bottom
- top
, 0});
1595 runtime_queuefinalizer(void *p
, FuncVal
*fn
, const FuncType
*ft
, const PtrType
*ot
)
1600 runtime_lock(&gclock
);
1601 if(finq
== nil
|| finq
->cnt
== finq
->cap
) {
1603 finc
= runtime_persistentalloc(FinBlockSize
, 0, &mstats
.gc_sys
);
1604 finc
->cap
= (FinBlockSize
- sizeof(FinBlock
)) / sizeof(Finalizer
) + 1;
1605 finc
->alllink
= allfin
;
1613 f
= &finq
->fin
[finq
->cnt
];
1619 runtime_unlock(&gclock
);
1623 runtime_MSpan_EnsureSwept(MSpan
*s
)
1628 sg
= runtime_mheap
.sweepgen
;
1629 if(runtime_atomicload(&s
->sweepgen
) == sg
)
1632 if(runtime_cas(&s
->sweepgen
, sg
-2, sg
-1)) {
1633 runtime_MSpan_Sweep(s
);
1638 // unfortunate condition, and we don't have efficient means to wait
1639 while(runtime_atomicload(&s
->sweepgen
) != sg
)
1643 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
1644 // It clears the mark bits in preparation for the next GC round.
1645 // Returns true if the span was returned to heap.
1647 runtime_MSpan_Sweep(MSpan
*s
)
1650 int32 cl
, n
, npages
, nfree
;
1651 uintptr size
, off
, *bitp
, shift
, bits
;
1659 uintptr type_data_inc
;
1661 Special
*special
, **specialp
, *y
;
1662 bool res
, sweepgenset
;
1666 // It's critical that we enter this function with preemption disabled,
1667 // GC must not start while we are in the middle of this function.
1668 if(m
->locks
== 0 && m
->mallocing
== 0 && runtime_g() != m
->g0
)
1669 runtime_throw("MSpan_Sweep: m is not locked");
1670 sweepgen
= runtime_mheap
.sweepgen
;
1671 if(s
->state
!= MSpanInUse
|| s
->sweepgen
!= sweepgen
-1) {
1672 runtime_printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1673 s
->state
, s
->sweepgen
, sweepgen
);
1674 runtime_throw("MSpan_Sweep: bad span state");
1676 arena_start
= runtime_mheap
.arena_start
;
1682 // Chunk full of small blocks.
1683 npages
= runtime_class_to_allocnpages
[cl
];
1684 n
= (npages
<< PageShift
) / size
;
1690 sweepgenset
= false;
1692 // mark any free objects in this span so we don't collect them
1693 for(x
= s
->freelist
; x
!= nil
; x
= x
->next
) {
1694 // This is markonly(x) but faster because we don't need
1695 // atomic access and we're guaranteed to be pointing at
1696 // the head of a valid object.
1697 off
= (uintptr
*)x
- (uintptr
*)runtime_mheap
.arena_start
;
1698 bitp
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
1699 shift
= off
% wordsPerBitmapWord
;
1700 *bitp
|= bitMarked
<<shift
;
1703 // Unlink & free special records for any objects we're about to free.
1704 specialp
= &s
->specials
;
1705 special
= *specialp
;
1706 while(special
!= nil
) {
1707 // A finalizer can be set for an inner byte of an object, find object beginning.
1708 p
= (byte
*)(s
->start
<< PageShift
) + special
->offset
/size
*size
;
1709 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1710 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1711 shift
= off
% wordsPerBitmapWord
;
1712 bits
= *bitp
>>shift
;
1713 if((bits
& (bitAllocated
|bitMarked
)) == bitAllocated
) {
1714 // Find the exact byte for which the special was setup
1715 // (as opposed to object beginning).
1716 p
= (byte
*)(s
->start
<< PageShift
) + special
->offset
;
1717 // about to free object: splice out special record
1719 special
= special
->next
;
1720 *specialp
= special
;
1721 if(!runtime_freespecial(y
, p
, size
, false)) {
1722 // stop freeing of object if it has a finalizer
1723 *bitp
|= bitMarked
<< shift
;
1726 // object is still live: keep special record
1727 specialp
= &special
->next
;
1728 special
= *specialp
;
1732 type_data
= (byte
*)s
->types
.data
;
1733 type_data_inc
= sizeof(uintptr
);
1734 compression
= s
->types
.compression
;
1735 switch(compression
) {
1737 type_data
+= 8*sizeof(uintptr
);
1742 // Sweep through n objects of given size starting at p.
1743 // This thread owns the span now, so it can manipulate
1744 // the block bitmap without atomic operations.
1745 p
= (byte
*)(s
->start
<< PageShift
);
1746 for(; n
> 0; n
--, p
+= size
, type_data
+=type_data_inc
) {
1747 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1748 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1749 shift
= off
% wordsPerBitmapWord
;
1750 bits
= *bitp
>>shift
;
1752 if((bits
& bitAllocated
) == 0)
1755 if((bits
& bitMarked
) != 0) {
1756 *bitp
&= ~(bitMarked
<<shift
);
1760 // Clear mark, scan, and special bits.
1761 *bitp
&= ~((bitScan
|bitMarked
|bitSpecial
)<<shift
);
1765 runtime_unmarkspan(p
, 1<<PageShift
);
1767 // important to set sweepgen before returning it to heap
1768 runtime_atomicstore(&s
->sweepgen
, sweepgen
);
1770 if(runtime_debug
.efence
)
1771 runtime_SysFree(p
, size
, &mstats
.gc_sys
);
1773 runtime_MHeap_Free(&runtime_mheap
, s
, 1);
1774 c
->local_nlargefree
++;
1775 c
->local_largefree
+= size
;
1776 runtime_xadd64(&mstats
.next_gc
, -(uint64
)(size
* (gcpercent
+ 100)/100));
1779 // Free small object.
1780 switch(compression
) {
1782 *(uintptr
*)type_data
= 0;
1785 *(byte
*)type_data
= 0;
1788 if(size
> 2*sizeof(uintptr
))
1789 ((uintptr
*)p
)[1] = (uintptr
)0xdeaddeaddeaddeadll
; // mark as "needs to be zeroed"
1790 else if(size
> sizeof(uintptr
))
1791 ((uintptr
*)p
)[1] = 0;
1793 end
->next
= (MLink
*)p
;
1800 // The span must be in our exclusive ownership until we update sweepgen,
1801 // check for potential races.
1802 if(s
->state
!= MSpanInUse
|| s
->sweepgen
!= sweepgen
-1) {
1803 runtime_printf("MSpan_Sweep: state=%d sweepgen=%d mheap.sweepgen=%d\n",
1804 s
->state
, s
->sweepgen
, sweepgen
);
1805 runtime_throw("MSpan_Sweep: bad span state after sweep");
1807 runtime_atomicstore(&s
->sweepgen
, sweepgen
);
1810 c
->local_nsmallfree
[cl
] += nfree
;
1811 c
->local_cachealloc
-= nfree
* size
;
1812 runtime_xadd64(&mstats
.next_gc
, -(uint64
)(nfree
* size
* (gcpercent
+ 100)/100));
1813 res
= runtime_MCentral_FreeSpan(&runtime_mheap
.central
[cl
], s
, nfree
, head
.next
, end
);
1818 // State of background sweep.
1819 // Pretected by gclock.
1830 // background sweeping goroutine
1832 bgsweep(void* dummy
__attribute__ ((unused
)))
1834 runtime_g()->issystem
= 1;
1836 while(runtime_sweepone() != (uintptr
)-1) {
1840 runtime_lock(&gclock
);
1842 // kick off or wake up goroutine to run queued finalizers
1844 fing
= __go_go(runfinq
, nil
);
1847 runtime_ready(fing
);
1850 sweep
.parked
= true;
1851 runtime_parkunlock(&gclock
, "GC sweep wait");
1856 // returns number of pages returned to heap, or -1 if there is nothing to sweep
1858 runtime_sweepone(void)
1865 // increment locks to ensure that the goroutine is not preempted
1866 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
1868 sg
= runtime_mheap
.sweepgen
;
1870 idx
= runtime_xadd(&sweep
.spanidx
, 1) - 1;
1871 if(idx
>= sweep
.nspan
) {
1872 runtime_mheap
.sweepdone
= true;
1876 s
= sweep
.spans
[idx
];
1877 if(s
->state
!= MSpanInUse
) {
1881 if(s
->sweepgen
!= sg
-2 || !runtime_cas(&s
->sweepgen
, sg
-2, sg
-1))
1884 if(!runtime_MSpan_Sweep(s
))
1892 dumpspan(uint32 idx
)
1894 int32 sizeclass
, n
, npages
, i
, column
;
1899 bool allocated
, special
;
1901 s
= runtime_mheap
.allspans
[idx
];
1902 if(s
->state
!= MSpanInUse
)
1904 arena_start
= runtime_mheap
.arena_start
;
1905 p
= (byte
*)(s
->start
<< PageShift
);
1906 sizeclass
= s
->sizeclass
;
1908 if(sizeclass
== 0) {
1911 npages
= runtime_class_to_allocnpages
[sizeclass
];
1912 n
= (npages
<< PageShift
) / size
;
1915 runtime_printf("%p .. %p:\n", p
, p
+n
*size
);
1917 for(; n
>0; n
--, p
+=size
) {
1918 uintptr off
, *bitp
, shift
, bits
;
1920 off
= (uintptr
*)p
- (uintptr
*)arena_start
;
1921 bitp
= (uintptr
*)arena_start
- off
/wordsPerBitmapWord
- 1;
1922 shift
= off
% wordsPerBitmapWord
;
1923 bits
= *bitp
>>shift
;
1925 allocated
= ((bits
& bitAllocated
) != 0);
1926 special
= ((bits
& bitSpecial
) != 0);
1928 for(i
=0; (uint32
)i
<size
; i
+=sizeof(void*)) {
1930 runtime_printf("\t");
1933 runtime_printf(allocated
? "(" : "[");
1934 runtime_printf(special
? "@" : "");
1935 runtime_printf("%p: ", p
+i
);
1937 runtime_printf(" ");
1940 runtime_printf("%p", *(void**)(p
+i
));
1942 if(i
+sizeof(void*) >= size
) {
1943 runtime_printf(allocated
? ") " : "] ");
1948 runtime_printf("\n");
1953 runtime_printf("\n");
1956 // A debugging function to dump the contents of memory
1958 runtime_memorydump(void)
1962 for(spanidx
=0; spanidx
<runtime_mheap
.nspan
; spanidx
++) {
1968 runtime_gchelper(void)
1974 // parallel mark for over gc roots
1975 runtime_parfordo(work
.markfor
);
1977 // help other threads scan secondary blocks
1978 scanblock(nil
, true);
1980 bufferList
[runtime_m()->helpgc
].busy
= 0;
1981 nproc
= work
.nproc
; // work.nproc can change right after we increment work.ndone
1982 if(runtime_xadd(&work
.ndone
, +1) == nproc
-1)
1983 runtime_notewakeup(&work
.alldone
);
1992 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
1996 runtime_purgecachedstats(c
);
2001 flushallmcaches(void)
2006 // Flush MCache's to MCentral.
2007 for(pp
=runtime_allp
; (p
=*pp
) != nil
; pp
++) {
2011 runtime_MCache_ReleaseAll(c
);
2016 updatememstats(GCStats
*stats
)
2021 uint64 stacks_inuse
, smallfree
;
2025 runtime_memclr((byte
*)stats
, sizeof(*stats
));
2027 for(mp
=runtime_allm
; mp
; mp
=mp
->alllink
) {
2028 //stacks_inuse += mp->stackinuse*FixedStack;
2030 src
= (uint64
*)&mp
->gcstats
;
2031 dst
= (uint64
*)stats
;
2032 for(i
=0; i
<sizeof(*stats
)/sizeof(uint64
); i
++)
2034 runtime_memclr((byte
*)&mp
->gcstats
, sizeof(mp
->gcstats
));
2037 mstats
.stacks_inuse
= stacks_inuse
;
2038 mstats
.mcache_inuse
= runtime_mheap
.cachealloc
.inuse
;
2039 mstats
.mspan_inuse
= runtime_mheap
.spanalloc
.inuse
;
2040 mstats
.sys
= mstats
.heap_sys
+ mstats
.stacks_sys
+ mstats
.mspan_sys
+
2041 mstats
.mcache_sys
+ mstats
.buckhash_sys
+ mstats
.gc_sys
+ mstats
.other_sys
;
2043 // Calculate memory allocator stats.
2044 // During program execution we only count number of frees and amount of freed memory.
2045 // Current number of alive object in the heap and amount of alive heap memory
2046 // are calculated by scanning all spans.
2047 // Total number of mallocs is calculated as number of frees plus number of alive objects.
2048 // Similarly, total amount of allocated memory is calculated as amount of freed memory
2049 // plus amount of alive heap memory.
2051 mstats
.total_alloc
= 0;
2054 for(i
= 0; i
< nelem(mstats
.by_size
); i
++) {
2055 mstats
.by_size
[i
].nmalloc
= 0;
2056 mstats
.by_size
[i
].nfree
= 0;
2059 // Flush MCache's to MCentral.
2062 // Aggregate local stats.
2065 // Scan all spans and count number of alive objects.
2066 for(i
= 0; i
< runtime_mheap
.nspan
; i
++) {
2067 s
= runtime_mheap
.allspans
[i
];
2068 if(s
->state
!= MSpanInUse
)
2070 if(s
->sizeclass
== 0) {
2072 mstats
.alloc
+= s
->elemsize
;
2074 mstats
.nmalloc
+= s
->ref
;
2075 mstats
.by_size
[s
->sizeclass
].nmalloc
+= s
->ref
;
2076 mstats
.alloc
+= s
->ref
*s
->elemsize
;
2080 // Aggregate by size class.
2082 mstats
.nfree
= runtime_mheap
.nlargefree
;
2083 for(i
= 0; i
< nelem(mstats
.by_size
); i
++) {
2084 mstats
.nfree
+= runtime_mheap
.nsmallfree
[i
];
2085 mstats
.by_size
[i
].nfree
= runtime_mheap
.nsmallfree
[i
];
2086 mstats
.by_size
[i
].nmalloc
+= runtime_mheap
.nsmallfree
[i
];
2087 smallfree
+= runtime_mheap
.nsmallfree
[i
] * runtime_class_to_size
[i
];
2089 mstats
.nmalloc
+= mstats
.nfree
;
2091 // Calculate derived stats.
2092 mstats
.total_alloc
= mstats
.alloc
+ runtime_mheap
.largefree
+ smallfree
;
2093 mstats
.heap_alloc
= mstats
.alloc
;
2094 mstats
.heap_objects
= mstats
.nmalloc
- mstats
.nfree
;
2097 // Structure of arguments passed to function gc().
2098 // This allows the arguments to be passed via runtime_mcall.
2101 int64 start_time
; // start time of GC in ns (just before stoptheworld)
2104 static void gc(struct gc_args
*args
);
2105 static void mgc(G
*gp
);
2112 p
= runtime_getenv("GOGC");
2113 if(p
== nil
|| p
[0] == '\0')
2115 if(runtime_strcmp((const char *)p
, "off") == 0)
2117 return runtime_atoi(p
);
2121 runtime_gc(int32 force
)
2128 // The atomic operations are not atomic if the uint64s
2129 // are not aligned on uint64 boundaries. This has been
2130 // a problem in the past.
2131 if((((uintptr
)&work
.empty
) & 7) != 0)
2132 runtime_throw("runtime: gc work buffer is misaligned");
2133 if((((uintptr
)&work
.full
) & 7) != 0)
2134 runtime_throw("runtime: gc work buffer is misaligned");
2136 // Make sure all registers are saved on stack so that
2137 // scanstack sees them.
2138 __builtin_unwind_init();
2140 // The gc is turned off (via enablegc) until
2141 // the bootstrap has completed.
2142 // Also, malloc gets called in the guts
2143 // of a number of libraries that might be
2144 // holding locks. To avoid priority inversion
2145 // problems, don't bother trying to run gc
2146 // while holding a lock. The next mallocgc
2147 // without a lock will do the gc instead.
2149 if(!mstats
.enablegc
|| runtime_g() == m
->g0
|| m
->locks
> 0 || runtime_panicking
)
2152 if(gcpercent
== GcpercentUnknown
) { // first time through
2153 runtime_lock(&runtime_mheap
);
2154 if(gcpercent
== GcpercentUnknown
)
2155 gcpercent
= readgogc();
2156 runtime_unlock(&runtime_mheap
);
2161 runtime_semacquire(&runtime_worldsema
, false);
2162 if(!force
&& mstats
.heap_alloc
< mstats
.next_gc
) {
2163 // typically threads which lost the race to grab
2164 // worldsema exit here when gc is done.
2165 runtime_semrelease(&runtime_worldsema
);
2169 // Ok, we're doing it! Stop everybody else
2170 a
.start_time
= runtime_nanotime();
2172 runtime_stoptheworld();
2174 if(runtime_debug
.allocfreetrace
)
2175 runtime_MProf_TraceGC();
2179 // Run gc on the g0 stack. We do this so that the g stack
2180 // we're currently running on will no longer change. Cuts
2181 // the root set down a bit (g0 stacks are not scanned, and
2182 // we don't need to scan gc's internal state). Also an
2183 // enabler for copyable stacks.
2184 for(i
= 0; i
< (runtime_debug
.gctrace
> 1 ? 2 : 1); i
++) {
2185 // switch to g0, call gc(&a), then switch back
2188 g
->status
= Gwaiting
;
2189 g
->waitreason
= "garbage collection";
2191 // record a new start time in case we're going around again
2192 a
.start_time
= runtime_nanotime();
2198 runtime_semrelease(&runtime_worldsema
);
2199 runtime_starttheworld();
2202 // now that gc is done, kick off finalizer thread if needed
2203 if(!ConcurrentSweep
) {
2205 runtime_lock(&gclock
);
2206 // kick off or wake up goroutine to run queued finalizers
2208 fing
= __go_go(runfinq
, nil
);
2211 runtime_ready(fing
);
2213 runtime_unlock(&gclock
);
2215 // give the queued finalizers, if any, a chance to run
2218 // For gccgo, let other goroutines run.
2228 gp
->status
= Grunning
;
2233 gc(struct gc_args
*args
)
2236 int64 t0
, t1
, t2
, t3
, t4
;
2237 uint64 heap0
, heap1
, obj
, ninstr
;
2245 t0
= args
->start_time
;
2246 work
.tstart
= args
->start_time
;
2249 runtime_memclr((byte
*)&gcstats
, sizeof(gcstats
));
2251 for(mp
=runtime_allm
; mp
; mp
=mp
->alllink
)
2252 runtime_settype_flush(mp
);
2254 m
->locks
++; // disable gc during mallocs in parforalloc
2255 if(work
.markfor
== nil
)
2256 work
.markfor
= runtime_parforalloc(MaxGcproc
);
2259 if(itabtype
== nil
) {
2260 // get C pointer to the Go type "itab"
2261 // runtime_gc_itab_ptr(&eface);
2262 // itabtype = ((PtrType*)eface.__type_descriptor)->elem;
2265 t1
= runtime_nanotime();
2267 // Sweep what is not sweeped by bgsweep.
2268 while(runtime_sweepone() != (uintptr
)-1)
2269 gcstats
.npausesweep
++;
2273 work
.nproc
= runtime_gcprocs();
2274 runtime_parforsetup(work
.markfor
, work
.nproc
, RootCount
+ runtime_allglen
, nil
, false, markroot
);
2275 if(work
.nproc
> 1) {
2276 runtime_noteclear(&work
.alldone
);
2277 runtime_helpgc(work
.nproc
);
2280 t2
= runtime_nanotime();
2283 runtime_parfordo(work
.markfor
);
2284 scanblock(nil
, true);
2286 t3
= runtime_nanotime();
2288 bufferList
[m
->helpgc
].busy
= 0;
2290 runtime_notesleep(&work
.alldone
);
2293 // next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
2294 // estimate what was live heap size after previous GC (for tracing only)
2295 heap0
= mstats
.next_gc
*100/(gcpercent
+100);
2296 // conservatively set next_gc to high value assuming that everything is live
2297 // concurrent/lazy sweep will reduce this number while discovering new garbage
2298 mstats
.next_gc
= mstats
.heap_alloc
+mstats
.heap_alloc
*gcpercent
/100;
2300 t4
= runtime_nanotime();
2301 mstats
.last_gc
= t4
;
2302 mstats
.pause_ns
[mstats
.numgc
%nelem(mstats
.pause_ns
)] = t4
- t0
;
2303 mstats
.pause_total_ns
+= t4
- t0
;
2306 runtime_printf("pause %D\n", t4
-t0
);
2308 if(runtime_debug
.gctrace
) {
2309 updatememstats(&stats
);
2310 heap1
= mstats
.heap_alloc
;
2311 obj
= mstats
.nmalloc
- mstats
.nfree
;
2313 stats
.nprocyield
+= work
.markfor
->nprocyield
;
2314 stats
.nosyield
+= work
.markfor
->nosyield
;
2315 stats
.nsleep
+= work
.markfor
->nsleep
;
2317 runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB, %D (%D-%D) objects,"
2319 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
2320 mstats
.numgc
, work
.nproc
, (t3
-t2
)/1000000, (t2
-t1
)/1000000, (t1
-t0
+t4
-t3
)/1000000,
2321 heap0
>>20, heap1
>>20, obj
,
2322 mstats
.nmalloc
, mstats
.nfree
,
2323 sweep
.nspan
, gcstats
.nbgsweep
, gcstats
.npausesweep
,
2324 stats
.nhandoff
, stats
.nhandoffcnt
,
2325 work
.markfor
->nsteal
, work
.markfor
->nstealcnt
,
2326 stats
.nprocyield
, stats
.nosyield
, stats
.nsleep
);
2327 gcstats
.nbgsweep
= gcstats
.npausesweep
= 0;
2329 runtime_printf("scan: %D bytes, %D objects, %D untyped, %D types from MSpan\n",
2330 gcstats
.nbytes
, gcstats
.obj
.cnt
, gcstats
.obj
.notype
, gcstats
.obj
.typelookup
);
2331 if(gcstats
.ptr
.cnt
!= 0)
2332 runtime_printf("avg ptrbufsize: %D (%D/%D)\n",
2333 gcstats
.ptr
.sum
/gcstats
.ptr
.cnt
, gcstats
.ptr
.sum
, gcstats
.ptr
.cnt
);
2334 if(gcstats
.obj
.cnt
!= 0)
2335 runtime_printf("avg nobj: %D (%D/%D)\n",
2336 gcstats
.obj
.sum
/gcstats
.obj
.cnt
, gcstats
.obj
.sum
, gcstats
.obj
.cnt
);
2337 runtime_printf("rescans: %D, %D bytes\n", gcstats
.rescan
, gcstats
.rescanbytes
);
2339 runtime_printf("instruction counts:\n");
2341 for(i
=0; i
<nelem(gcstats
.instr
); i
++) {
2342 runtime_printf("\t%d:\t%D\n", i
, gcstats
.instr
[i
]);
2343 ninstr
+= gcstats
.instr
[i
];
2345 runtime_printf("\ttotal:\t%D\n", ninstr
);
2347 runtime_printf("putempty: %D, getfull: %D\n", gcstats
.putempty
, gcstats
.getfull
);
2349 runtime_printf("markonly base lookup: bit %D word %D span %D\n", gcstats
.markonly
.foundbit
, gcstats
.markonly
.foundword
, gcstats
.markonly
.foundspan
);
2350 runtime_printf("flushptrbuf base lookup: bit %D word %D span %D\n", gcstats
.flushptrbuf
.foundbit
, gcstats
.flushptrbuf
.foundword
, gcstats
.flushptrbuf
.foundspan
);
2354 // We cache current runtime_mheap.allspans array in sweep.spans,
2355 // because the former can be resized and freed.
2356 // Otherwise we would need to take heap lock every time
2357 // we want to convert span index to span pointer.
2359 // Free the old cached array if necessary.
2360 if(sweep
.spans
&& sweep
.spans
!= runtime_mheap
.allspans
)
2361 runtime_SysFree(sweep
.spans
, sweep
.nspan
*sizeof(sweep
.spans
[0]), &mstats
.other_sys
);
2362 // Cache the current array.
2363 runtime_mheap
.sweepspans
= runtime_mheap
.allspans
;
2364 runtime_mheap
.sweepgen
+= 2;
2365 runtime_mheap
.sweepdone
= false;
2366 sweep
.spans
= runtime_mheap
.allspans
;
2367 sweep
.nspan
= runtime_mheap
.nspan
;
2370 // Temporary disable concurrent sweep, because we see failures on builders.
2371 if(ConcurrentSweep
) {
2372 runtime_lock(&gclock
);
2374 sweep
.g
= __go_go(bgsweep
, nil
);
2375 else if(sweep
.parked
) {
2376 sweep
.parked
= false;
2377 runtime_ready(sweep
.g
);
2379 runtime_unlock(&gclock
);
2381 // Sweep all spans eagerly.
2382 while(runtime_sweepone() != (uintptr
)-1)
2383 gcstats
.npausesweep
++;
2389 extern uintptr runtime_sizeof_C_MStats
2390 __asm__ (GOSYM_PREFIX
"runtime.Sizeof_C_MStats");
2392 void runtime_ReadMemStats(MStats
*)
2393 __asm__ (GOSYM_PREFIX
"runtime.ReadMemStats");
2396 runtime_ReadMemStats(MStats
*stats
)
2400 // Have to acquire worldsema to stop the world,
2401 // because stoptheworld can only be used by
2402 // one goroutine at a time, and there might be
2403 // a pending garbage collection already calling it.
2404 runtime_semacquire(&runtime_worldsema
, false);
2407 runtime_stoptheworld();
2408 updatememstats(nil
);
2409 // Size of the trailing by_size array differs between Go and C,
2410 // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
2411 runtime_memmove(stats
, &mstats
, runtime_sizeof_C_MStats
);
2414 runtime_semrelease(&runtime_worldsema
);
2415 runtime_starttheworld();
2419 void runtime_debug_readGCStats(Slice
*)
2420 __asm__("runtime_debug.readGCStats");
2423 runtime_debug_readGCStats(Slice
*pauses
)
2428 // Calling code in runtime/debug should make the slice large enough.
2429 if((size_t)pauses
->cap
< nelem(mstats
.pause_ns
)+3)
2430 runtime_throw("runtime: short slice passed to readGCStats");
2432 // Pass back: pauses, last gc (absolute time), number of gc, total pause ns.
2433 p
= (uint64
*)pauses
->array
;
2434 runtime_lock(&runtime_mheap
);
2436 if(n
> nelem(mstats
.pause_ns
))
2437 n
= nelem(mstats
.pause_ns
);
2439 // The pause buffer is circular. The most recent pause is at
2440 // pause_ns[(numgc-1)%nelem(pause_ns)], and then backward
2441 // from there to go back farther in time. We deliver the times
2442 // most recent first (in p[0]).
2444 p
[i
] = mstats
.pause_ns
[(mstats
.numgc
-1-i
)%nelem(mstats
.pause_ns
)];
2446 p
[n
] = mstats
.last_gc
;
2447 p
[n
+1] = mstats
.numgc
;
2448 p
[n
+2] = mstats
.pause_total_ns
;
2449 runtime_unlock(&runtime_mheap
);
2450 pauses
->__count
= n
+3;
2454 runtime_setgcpercent(int32 in
) {
2457 runtime_lock(&runtime_mheap
);
2458 if(gcpercent
== GcpercentUnknown
)
2459 gcpercent
= readgogc();
2464 runtime_unlock(&runtime_mheap
);
2474 if(m
->helpgc
< 0 || m
->helpgc
>= MaxGcproc
)
2475 runtime_throw("gchelperstart: bad m->helpgc");
2476 if(runtime_xchg(&bufferList
[m
->helpgc
].busy
, 1))
2477 runtime_throw("gchelperstart: already busy");
2478 if(runtime_g() != m
->g0
)
2479 runtime_throw("gchelper not running on g0 stack");
2483 runfinq(void* dummy
__attribute__ ((unused
)))
2486 FinBlock
*fb
, *next
;
2492 runtime_lock(&gclock
);
2497 runtime_parkunlock(&gclock
, "finalizer wait");
2500 runtime_unlock(&gclock
);
2502 runtime_racefingo();
2503 for(; fb
; fb
=next
) {
2505 for(i
=0; i
<(uint32
)fb
->cnt
; i
++) {
2510 fint
= ((const Type
**)f
->ft
->__in
.array
)[0];
2511 if(fint
->__code
== KindPtr
) {
2512 // direct use of pointer
2514 } else if(((const InterfaceType
*)fint
)->__methods
.__count
== 0) {
2515 // convert to empty interface
2516 ef
.__type_descriptor
= (const Type
*)f
->ot
;
2517 ef
.__object
= f
->arg
;
2520 // convert to interface with methods
2521 iface
.__methods
= __go_convert_interface_2((const Type
*)fint
,
2524 iface
.__object
= f
->arg
;
2525 if(iface
.__methods
== nil
)
2526 runtime_throw("invalid type conversion in runfinq");
2529 reflect_call(f
->ft
, f
->fn
, 0, 0, ¶m
, nil
);
2538 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
2543 runtime_marknogc(void *v
)
2545 uintptr
*b
, obits
, bits
, off
, shift
;
2547 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2548 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2549 shift
= off
% wordsPerBitmapWord
;
2553 if((obits
>>shift
& bitMask
) != bitAllocated
)
2554 runtime_throw("bad initial state for marknogc");
2555 bits
= (obits
& ~(bitAllocated
<<shift
)) | bitBlockBoundary
<<shift
;
2556 if(runtime_gomaxprocs
== 1) {
2560 // more than one goroutine is potentially running: use atomic op
2561 if(runtime_casp((void**)b
, (void*)obits
, (void*)bits
))
2568 runtime_markscan(void *v
)
2570 uintptr
*b
, obits
, bits
, off
, shift
;
2572 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2573 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2574 shift
= off
% wordsPerBitmapWord
;
2578 if((obits
>>shift
& bitMask
) != bitAllocated
)
2579 runtime_throw("bad initial state for markscan");
2580 bits
= obits
| bitScan
<<shift
;
2581 if(runtime_gomaxprocs
== 1) {
2585 // more than one goroutine is potentially running: use atomic op
2586 if(runtime_casp((void**)b
, (void*)obits
, (void*)bits
))
2592 // mark the block at v of size n as freed.
2594 runtime_markfreed(void *v
, uintptr n
)
2596 uintptr
*b
, obits
, bits
, off
, shift
;
2599 runtime_printf("markfreed %p+%p\n", v
, n
);
2601 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2602 runtime_throw("markfreed: bad pointer");
2604 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2605 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2606 shift
= off
% wordsPerBitmapWord
;
2610 // This could be a free of a gc-eligible object (bitAllocated + others) or
2611 // a FlagNoGC object (bitBlockBoundary set). In either case, we revert to
2612 // a simple no-scan allocated object because it is going on a free list.
2613 bits
= (obits
& ~(bitMask
<<shift
)) | (bitAllocated
<<shift
);
2614 if(runtime_gomaxprocs
== 1) {
2618 // more than one goroutine is potentially running: use atomic op
2619 if(runtime_casp((void**)b
, (void*)obits
, (void*)bits
))
2625 // check that the block at v of size n is marked freed.
2627 runtime_checkfreed(void *v
, uintptr n
)
2629 uintptr
*b
, bits
, off
, shift
;
2631 if(!runtime_checking
)
2634 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2635 return; // not allocated, so okay
2637 off
= (uintptr
*)v
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2638 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2639 shift
= off
% wordsPerBitmapWord
;
2642 if((bits
& bitAllocated
) != 0) {
2643 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
2644 v
, n
, off
, bits
& bitMask
);
2645 runtime_throw("checkfreed: not freed");
2649 // mark the span of memory at v as having n blocks of the given size.
2650 // if leftover is true, there is left over space at the end of the span.
2652 runtime_markspan(void *v
, uintptr size
, uintptr n
, bool leftover
)
2654 uintptr
*b
, off
, shift
, i
;
2657 if((byte
*)v
+size
*n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2658 runtime_throw("markspan: bad pointer");
2660 if(runtime_checking
) {
2661 // bits should be all zero at the start
2662 off
= (byte
*)v
+ size
- runtime_mheap
.arena_start
;
2663 b
= (uintptr
*)(runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
);
2664 for(i
= 0; i
< size
/PtrSize
/wordsPerBitmapWord
; i
++) {
2666 runtime_throw("markspan: span bits not zero");
2671 if(leftover
) // mark a boundary just past end of last block too
2673 for(; n
-- > 0; p
+= size
) {
2674 // Okay to use non-atomic ops here, because we control
2675 // the entire span, and each bitmap word has bits for only
2676 // one span, so no other goroutines are changing these
2678 off
= (uintptr
*)p
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2679 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2680 shift
= off
% wordsPerBitmapWord
;
2681 *b
= (*b
& ~(bitMask
<<shift
)) | (bitAllocated
<<shift
);
2685 // unmark the span of memory at v of length n bytes.
2687 runtime_unmarkspan(void *v
, uintptr n
)
2689 uintptr
*p
, *b
, off
;
2691 if((byte
*)v
+n
> (byte
*)runtime_mheap
.arena_used
|| (byte
*)v
< runtime_mheap
.arena_start
)
2692 runtime_throw("markspan: bad pointer");
2695 off
= p
- (uintptr
*)runtime_mheap
.arena_start
; // word offset
2696 if(off
% wordsPerBitmapWord
!= 0)
2697 runtime_throw("markspan: unaligned pointer");
2698 b
= (uintptr
*)runtime_mheap
.arena_start
- off
/wordsPerBitmapWord
- 1;
2700 if(n
%wordsPerBitmapWord
!= 0)
2701 runtime_throw("unmarkspan: unaligned length");
2702 // Okay to use non-atomic ops here, because we control
2703 // the entire span, and each bitmap word has bits for only
2704 // one span, so no other goroutines are changing these
2706 n
/= wordsPerBitmapWord
;
2712 runtime_MHeap_MapBits(MHeap
*h
)
2716 // Caller has added extra mappings to the arena.
2717 // Add extra mappings of bitmap words as needed.
2718 // We allocate extra bitmap pieces in chunks of bitmapChunk.
2724 n
= (h
->arena_used
- h
->arena_start
) / wordsPerBitmapWord
;
2725 n
= ROUND(n
, bitmapChunk
);
2726 if(h
->bitmap_mapped
>= n
)
2729 page_size
= getpagesize();
2730 n
= (n
+page_size
-1) & ~(page_size
-1);
2732 runtime_SysMap(h
->arena_start
- n
, n
- h
->bitmap_mapped
, &mstats
.gc_sys
);
2733 h
->bitmap_mapped
= n
;