1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector -- step 0.
7 // Stop the world, mark and sweep garbage collector.
8 // NOT INTENDED FOR PRODUCTION USE.
10 // A mark and sweep collector provides a way to exercise
11 // and test the memory allocator and the stack walking machinery
12 // without also needing to get reference counting
22 typedef struct BlockList BlockList
;
29 static bool finstarted
;
30 static pthread_mutex_t finqlock
= PTHREAD_MUTEX_INITIALIZER
;
31 static pthread_cond_t finqcond
= PTHREAD_COND_INITIALIZER
;
32 static Finalizer
*finq
;
33 static int32 fingwait
;
34 static BlockList
*bl
, *ebl
;
36 static void runfinq(void*);
39 PtrSize
= sizeof(void*)
43 scanblock(byte
*b
, int64 n
)
64 runtime_printf("scanblock %p %lld\n", b
, (long long) n
);
65 off
= (uint32
)(uintptr
)b
& (PtrSize
-1);
77 if(runtime_mheap
.min
<= (byte
*)obj
&& (byte
*)obj
< runtime_mheap
.max
) {
78 if(runtime_mlookup(obj
, (byte
**)&obj
, &size
, nil
, &refp
)) {
80 switch(ref
& ~RefFlags
) {
83 runtime_printf("found at %p: ", &vp
[i
]);
84 *refp
= RefSome
| (ref
& RefFlags
);
85 if(!(ref
& RefNoPointers
)) {
87 runtime_throw("scanblock: garbage collection stack overflow");
108 if(!runtime_mlookup(v
, (byte
**)&v
, &size
, nil
, &refp
) || !(*refp
& RefHasFinalizer
))
109 runtime_throw("mark - finalizer inconsistency");
111 // do not mark the finalizer block itself. just mark the things it points at.
116 struct root_list
*next
;
123 static struct root_list
* roots
;
126 __go_register_gc_roots (struct root_list
* r
)
128 // FIXME: This needs locking if multiple goroutines can call
129 // dlopen simultaneously.
137 uintptr blsize
, nobj
;
138 struct root_list
*pl
;
140 // Figure out how big an object stack we need.
141 // Get a new one if we need more than we have
142 // or we need significantly less than we have.
143 nobj
= mstats
.heap_objects
;
144 if(nobj
> (uintptr
)(ebl
- bl
) || nobj
< (uintptr
)(ebl
-bl
)/4) {
146 runtime_SysFree(bl
, (byte
*)ebl
- (byte
*)bl
);
148 // While we're allocated a new object stack,
149 // add 20% headroom and also round up to
150 // the nearest page boundary, since mmap
153 blsize
= nobj
* sizeof *bl
;
154 blsize
= (blsize
+ 4095) & ~4095;
155 nobj
= blsize
/ sizeof *bl
;
156 bl
= runtime_SysAlloc(blsize
);
160 for(pl
= roots
; pl
!= nil
; pl
= pl
->next
) {
161 struct root
* pr
= &pl
->roots
[0];
163 void *decl
= pr
->decl
;
166 scanblock(decl
, pr
->size
);
171 scanblock((byte
*)&m0
, sizeof m0
);
172 scanblock((byte
*)&finq
, sizeof finq
);
173 runtime_MProf_Mark(scanblock
);
176 __go_scanstacks(scanblock
);
178 // mark things pointed at by objects with finalizers
179 runtime_walkfintab(markfin
, scanblock
);
182 // free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
186 int32 n
, npages
, size
;
188 uint32 ref
, *gcrefp
, *gcrefep
;
192 p
= (byte
*)(s
->start
<< PageShift
);
193 if(s
->sizeclass
== 0) {
196 switch(ref
& ~(RefFlags
^RefHasFinalizer
)) {
198 // Free large object.
199 mstats
.alloc
-= s
->npages
<<PageShift
;
201 runtime_memclr(p
, s
->npages
<<PageShift
);
202 if(ref
& RefProfiled
)
203 runtime_MProf_Free(p
, s
->npages
<<PageShift
);
205 runtime_MHeap_Free(&runtime_mheap
, s
, 1);
207 case RefNone
|RefHasFinalizer
:
208 f
= runtime_getfinalizer(p
, 1);
210 runtime_throw("finalizer inconsistency");
214 ref
&= ~RefHasFinalizer
;
217 case RefSome
|RefHasFinalizer
:
218 s
->gcref0
= RefNone
| (ref
&RefFlags
);
224 // Chunk full of small blocks.
225 runtime_MGetSizeClassInfo(s
->sizeclass
, &size
, &npages
, &n
);
227 gcrefep
= s
->gcref
+ n
;
228 for(; gcrefp
< gcrefep
; gcrefp
++, p
+= size
) {
230 if(ref
< RefNone
) // RefFree or RefStack
232 switch(ref
& ~(RefFlags
^RefHasFinalizer
)) {
234 // Free small object.
235 if(ref
& RefProfiled
)
236 runtime_MProf_Free(p
, size
);
239 if(size
> (int32
)sizeof(uintptr
))
240 ((uintptr
*)p
)[1] = 1; // mark as "needs to be zeroed"
241 mstats
.alloc
-= size
;
243 mstats
.by_size
[s
->sizeclass
].nfree
++;
244 runtime_MCache_Free(c
, p
, s
->sizeclass
, size
);
246 case RefNone
|RefHasFinalizer
:
247 f
= runtime_getfinalizer(p
, 1);
249 runtime_throw("finalizer inconsistency");
253 ref
&= ~RefHasFinalizer
;
256 case RefSome
|RefHasFinalizer
:
257 *gcrefp
= RefNone
| (ref
&RefFlags
);
268 for(s
= runtime_mheap
.allspans
; s
!= nil
; s
= s
->allnext
)
269 if(s
->state
== MSpanInUse
)
273 static pthread_mutex_t gcsema
= PTHREAD_MUTEX_INITIALIZER
;
275 // Initialized from $GOGC. GOGC=off means no gc.
277 // Next gc is after we've allocated an extra amount of
278 // memory proportional to the amount already in use.
279 // If gcpercent=100 and we're using 4M, we'll gc again
280 // when we get to 8M. This keeps the gc cost in linear
281 // proportion to the allocation cost. Adjusting gcpercent
282 // just changes the linear constant (and also the amount of
283 // extra memory used).
284 static int32 gcpercent
= -2;
287 runtime_gc(int32 force
__attribute__ ((unused
)))
293 // The gc is turned off (via enablegc) until
294 // the bootstrap has completed.
295 // Also, malloc gets called in the guts
296 // of a number of libraries that might be
297 // holding locks. To avoid priority inversion
298 // problems, don't bother trying to run gc
299 // while holding a lock. The next mallocgc
300 // without a lock will do the gc instead.
301 if(!mstats
.enablegc
|| m
->locks
> 0 /* || runtime_panicking */)
304 if(gcpercent
== -2) { // first time through
305 p
= runtime_getenv("GOGC");
306 if(p
== nil
|| p
[0] == '\0')
308 else if(runtime_strcmp(p
, "off") == 0)
311 gcpercent
= runtime_atoi(p
);
316 pthread_mutex_lock(&finqlock
);
317 pthread_mutex_lock(&gcsema
);
318 m
->locks
++; // disable gc during the mallocs in newproc
319 t0
= runtime_nanotime();
320 runtime_stoptheworld();
321 if(force
|| mstats
.heap_alloc
>= mstats
.next_gc
) {
326 mstats
.next_gc
= mstats
.heap_alloc
+mstats
.heap_alloc
*gcpercent
/100;
329 t1
= runtime_nanotime();
331 mstats
.pause_ns
[mstats
.numgc
%nelem(mstats
.pause_ns
)] = t1
- t0
;
332 mstats
.pause_total_ns
+= t1
- t0
;
334 runtime_printf("pause %llu\n", (unsigned long long)t1
-t0
);
335 pthread_mutex_unlock(&gcsema
);
336 runtime_starttheworld();
338 // finqlock is still held.
341 // kick off or wake up goroutine to run queued finalizers
343 __go_go(runfinq
, nil
);
348 pthread_cond_signal(&finqcond
);
352 pthread_mutex_unlock(&finqlock
);
363 pthread_mutex_lock(&finqlock
);
368 pthread_cond_wait(&finqcond
, &finqlock
);
369 pthread_mutex_unlock(&finqlock
);
372 pthread_mutex_unlock(&finqlock
);
378 reflect_call(f
->ft
, (void*)f
->fn
, 0, params
, nil
);
384 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible