runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / mgcsweep.go
blobc60214cdaf3dee8ea6b502713ae7bbefaf306b9f
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: sweeping
7 package runtime
9 import (
10 "runtime/internal/atomic"
11 "unsafe"
14 var sweep sweepdata
16 // State of background sweep.
17 type sweepdata struct {
18 lock mutex
19 g *g
20 parked bool
21 started bool
23 nbgsweep uint32
24 npausesweep uint32
27 // finishsweep_m ensures that all spans are swept.
29 // The world must be stopped. This ensures there are no sweeps in
30 // progress.
32 //go:nowritebarrier
33 func finishsweep_m() {
34 // Sweeping must be complete before marking commences, so
35 // sweep any unswept spans. If this is a concurrent GC, there
36 // shouldn't be any spans left to sweep, so this should finish
37 // instantly. If GC was forced before the concurrent sweep
38 // finished, there may be spans to sweep.
39 for sweepone() != ^uintptr(0) {
40 sweep.npausesweep++
43 nextMarkBitArenaEpoch()
46 func bgsweep(c chan int) {
47 setSystemGoroutine()
49 sweep.g = getg()
51 lock(&sweep.lock)
52 sweep.parked = true
53 c <- 1
54 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
56 for {
57 for gosweepone() != ^uintptr(0) {
58 sweep.nbgsweep++
59 Gosched()
61 for freeSomeWbufs(true) {
62 Gosched()
64 lock(&sweep.lock)
65 if !gosweepdone() {
66 // This can happen if a GC runs between
67 // gosweepone returning ^0 above
68 // and the lock being acquired.
69 unlock(&sweep.lock)
70 continue
72 sweep.parked = true
73 goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1)
77 // sweeps one span
78 // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
79 //go:nowritebarrier
80 func sweepone() uintptr {
81 _g_ := getg()
82 sweepRatio := mheap_.sweepPagesPerByte // For debugging
84 // increment locks to ensure that the goroutine is not preempted
85 // in the middle of sweep thus leaving the span in an inconsistent state for next GC
86 _g_.m.locks++
87 if atomic.Load(&mheap_.sweepdone) != 0 {
88 _g_.m.locks--
89 return ^uintptr(0)
91 atomic.Xadd(&mheap_.sweepers, +1)
93 npages := ^uintptr(0)
94 sg := mheap_.sweepgen
95 for {
96 s := mheap_.sweepSpans[1-sg/2%2].pop()
97 if s == nil {
98 atomic.Store(&mheap_.sweepdone, 1)
99 break
101 if s.state != mSpanInUse {
102 // This can happen if direct sweeping already
103 // swept this span, but in that case the sweep
104 // generation should always be up-to-date.
105 if s.sweepgen != sg {
106 print("runtime: bad span s.state=", s.state, " s.sweepgen=", s.sweepgen, " sweepgen=", sg, "\n")
107 throw("non in-use span in unswept list")
109 continue
111 if s.sweepgen != sg-2 || !atomic.Cas(&s.sweepgen, sg-2, sg-1) {
112 continue
114 npages = s.npages
115 if !s.sweep(false) {
116 // Span is still in-use, so this returned no
117 // pages to the heap and the span needs to
118 // move to the swept in-use list.
119 npages = 0
121 break
124 // Decrement the number of active sweepers and if this is the
125 // last one print trace information.
126 if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 {
127 if debug.gcpacertrace > 0 {
128 print("pacer: sweep done at heap size ", memstats.heap_live>>20, "MB; allocated ", (memstats.heap_live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept, " pages at ", sweepRatio, " pages/byte\n")
131 _g_.m.locks--
132 return npages
135 //go:nowritebarrier
136 func gosweepone() uintptr {
137 var ret uintptr
138 systemstack(func() {
139 ret = sweepone()
141 return ret
144 //go:nowritebarrier
145 func gosweepdone() bool {
146 return mheap_.sweepdone != 0
149 // Returns only when span s has been swept.
150 //go:nowritebarrier
151 func (s *mspan) ensureSwept() {
152 // Caller must disable preemption.
153 // Otherwise when this function returns the span can become unswept again
154 // (if GC is triggered on another goroutine).
155 _g_ := getg()
156 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
157 throw("MSpan_EnsureSwept: m is not locked")
160 sg := mheap_.sweepgen
161 if atomic.Load(&s.sweepgen) == sg {
162 return
164 // The caller must be sure that the span is a MSpanInUse span.
165 if atomic.Cas(&s.sweepgen, sg-2, sg-1) {
166 s.sweep(false)
167 return
169 // unfortunate condition, and we don't have efficient means to wait
170 for atomic.Load(&s.sweepgen) != sg {
171 osyield()
175 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
176 // It clears the mark bits in preparation for the next GC round.
177 // Returns true if the span was returned to heap.
178 // If preserve=true, don't return it to heap nor relink in MCentral lists;
179 // caller takes care of it.
180 //TODO go:nowritebarrier
181 func (s *mspan) sweep(preserve bool) bool {
182 // It's critical that we enter this function with preemption disabled,
183 // GC must not start while we are in the middle of this function.
184 _g_ := getg()
185 if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
186 throw("MSpan_Sweep: m is not locked")
188 sweepgen := mheap_.sweepgen
189 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
190 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
191 throw("MSpan_Sweep: bad span state")
194 if trace.enabled {
195 traceGCSweepSpan(s.npages * _PageSize)
198 atomic.Xadd64(&mheap_.pagesSwept, int64(s.npages))
200 spc := s.spanclass
201 size := s.elemsize
202 res := false
204 c := _g_.m.mcache
205 freeToHeap := false
207 // The allocBits indicate which unmarked objects don't need to be
208 // processed since they were free at the end of the last GC cycle
209 // and were not allocated since then.
210 // If the allocBits index is >= s.freeindex and the bit
211 // is not marked then the object remains unallocated
212 // since the last GC.
213 // This situation is analogous to being on a freelist.
215 // Unlink & free special records for any objects we're about to free.
216 // Two complications here:
217 // 1. An object can have both finalizer and profile special records.
218 // In such case we need to queue finalizer for execution,
219 // mark the object as live and preserve the profile special.
220 // 2. A tiny object can have several finalizers setup for different offsets.
221 // If such object is not marked, we need to queue all finalizers at once.
222 // Both 1 and 2 are possible at the same time.
223 specialp := &s.specials
224 special := *specialp
225 for special != nil {
226 // A finalizer can be set for an inner byte of an object, find object beginning.
227 objIndex := uintptr(special.offset) / size
228 p := s.base() + objIndex*size
229 mbits := s.markBitsForIndex(objIndex)
230 if !mbits.isMarked() {
231 // This object is not marked and has at least one special record.
232 // Pass 1: see if it has at least one finalizer.
233 hasFin := false
234 endOffset := p - s.base() + size
235 for tmp := special; tmp != nil && uintptr(tmp.offset) < endOffset; tmp = tmp.next {
236 if tmp.kind == _KindSpecialFinalizer {
237 // Stop freeing of object if it has a finalizer.
238 mbits.setMarkedNonAtomic()
239 hasFin = true
240 break
243 // Pass 2: queue all finalizers _or_ handle profile record.
244 for special != nil && uintptr(special.offset) < endOffset {
245 // Find the exact byte for which the special was setup
246 // (as opposed to object beginning).
247 p := s.base() + uintptr(special.offset)
248 if special.kind == _KindSpecialFinalizer || !hasFin {
249 // Splice out special record.
250 y := special
251 special = special.next
252 *specialp = special
253 freespecial(y, unsafe.Pointer(p), size)
254 } else {
255 // This is profile record, but the object has finalizers (so kept alive).
256 // Keep special record.
257 specialp = &special.next
258 special = *specialp
261 } else {
262 // object is still live: keep special record
263 specialp = &special.next
264 special = *specialp
268 if debug.allocfreetrace != 0 || raceenabled || msanenabled {
269 // Find all newly freed objects. This doesn't have to
270 // efficient; allocfreetrace has massive overhead.
271 mbits := s.markBitsForBase()
272 abits := s.allocBitsForIndex(0)
273 for i := uintptr(0); i < s.nelems; i++ {
274 if !mbits.isMarked() && (abits.index < s.freeindex || abits.isMarked()) {
275 x := s.base() + i*s.elemsize
276 if debug.allocfreetrace != 0 {
277 tracefree(unsafe.Pointer(x), size)
279 if raceenabled {
280 racefree(unsafe.Pointer(x), size)
282 if msanenabled {
283 msanfree(unsafe.Pointer(x), size)
286 mbits.advance()
287 abits.advance()
291 // Count the number of free objects in this span.
292 nalloc := uint16(s.countAlloc())
293 if spc.sizeclass() == 0 && nalloc == 0 {
294 s.needzero = 1
295 freeToHeap = true
297 nfreed := s.allocCount - nalloc
299 // This test is not reliable with gccgo, because of
300 // conservative stack scanning. The test boils down to
301 // checking that no new bits have been set in gcmarkBits since
302 // the span was added to the sweep count. New bits are set by
303 // greyobject. Seeing a new bit means that a live pointer has
304 // appeared that was not found during the mark phase. That can
305 // not happen when pointers are followed strictly. However,
306 // with conservative checking, it is possible for a pointer
307 // that will never be used to appear live and to cause a mark
308 // to be added. That is unfortunate in that it causes this
309 // check to be inaccurate, and it will keep an object live
310 // unnecessarily, but provided the pointer is not really live
311 // it is not otherwise a problem. So we disable the test for gccgo.
312 if false && nalloc > s.allocCount {
313 print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
314 throw("sweep increased allocation count")
317 s.allocCount = nalloc
318 wasempty := s.nextFreeIndex() == s.nelems
319 s.freeindex = 0 // reset allocation index to start of span.
320 if trace.enabled {
321 getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
324 // gcmarkBits becomes the allocBits.
325 // get a fresh cleared gcmarkBits in preparation for next GC
326 s.allocBits = s.gcmarkBits
327 s.gcmarkBits = newMarkBits(s.nelems)
329 // Initialize alloc bits cache.
330 s.refillAllocCache(0)
332 // We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
333 // because of the potential for a concurrent free/SetFinalizer.
334 // But we need to set it before we make the span available for allocation
335 // (return it to heap or mcentral), because allocation code assumes that a
336 // span is already swept if available for allocation.
337 if freeToHeap || nfreed == 0 {
338 // The span must be in our exclusive ownership until we update sweepgen,
339 // check for potential races.
340 if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
341 print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
342 throw("MSpan_Sweep: bad span state after sweep")
344 // Serialization point.
345 // At this point the mark bits are cleared and allocation ready
346 // to go so release the span.
347 atomic.Store(&s.sweepgen, sweepgen)
350 if nfreed > 0 && spc.sizeclass() != 0 {
351 c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
352 res = mheap_.central[spc].mcentral.freeSpan(s, preserve, wasempty)
353 // MCentral_FreeSpan updates sweepgen
354 } else if freeToHeap {
355 // Free large span to heap
357 // NOTE(rsc,dvyukov): The original implementation of efence
358 // in CL 22060046 used SysFree instead of SysFault, so that
359 // the operating system would eventually give the memory
360 // back to us again, so that an efence program could run
361 // longer without running out of memory. Unfortunately,
362 // calling SysFree here without any kind of adjustment of the
363 // heap data structures means that when the memory does
364 // come back to us, we have the wrong metadata for it, either in
365 // the MSpan structures or in the garbage collection bitmap.
366 // Using SysFault here means that the program will run out of
367 // memory fairly quickly in efence mode, but at least it won't
368 // have mysterious crashes due to confused memory reuse.
369 // It should be possible to switch back to SysFree if we also
370 // implement and then call some kind of MHeap_DeleteSpan.
371 if debug.efence > 0 {
372 s.limit = 0 // prevent mlookup from finding this span
373 sysFault(unsafe.Pointer(s.base()), size)
374 } else {
375 mheap_.freeSpan(s, 1)
377 c.local_nlargefree++
378 c.local_largefree += size
379 res = true
381 if !res {
382 // The span has been swept and is still in-use, so put
383 // it on the swept in-use list.
384 mheap_.sweepSpans[sweepgen/2%2].push(s)
386 return res
389 // deductSweepCredit deducts sweep credit for allocating a span of
390 // size spanBytes. This must be performed *before* the span is
391 // allocated to ensure the system has enough credit. If necessary, it
392 // performs sweeping to prevent going in to debt. If the caller will
393 // also sweep pages (e.g., for a large allocation), it can pass a
394 // non-zero callerSweepPages to leave that many pages unswept.
396 // deductSweepCredit makes a worst-case assumption that all spanBytes
397 // bytes of the ultimately allocated span will be available for object
398 // allocation.
400 // deductSweepCredit is the core of the "proportional sweep" system.
401 // It uses statistics gathered by the garbage collector to perform
402 // enough sweeping so that all pages are swept during the concurrent
403 // sweep phase between GC cycles.
405 // mheap_ must NOT be locked.
406 func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
407 if mheap_.sweepPagesPerByte == 0 {
408 // Proportional sweep is done or disabled.
409 return
412 if trace.enabled {
413 traceGCSweepStart()
416 retry:
417 sweptBasis := atomic.Load64(&mheap_.pagesSweptBasis)
419 // Fix debt if necessary.
420 newHeapLive := uintptr(atomic.Load64(&memstats.heap_live)-mheap_.sweepHeapLiveBasis) + spanBytes
421 pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
422 for pagesTarget > int64(atomic.Load64(&mheap_.pagesSwept)-sweptBasis) {
423 if gosweepone() == ^uintptr(0) {
424 mheap_.sweepPagesPerByte = 0
425 break
427 if atomic.Load64(&mheap_.pagesSweptBasis) != sweptBasis {
428 // Sweep pacing changed. Recompute debt.
429 goto retry
433 if trace.enabled {
434 traceGCSweepDone()