[1/7] Preprocessor cleanup
[official-gcc.git] / libgo / go / runtime / mcentral.go
blob50a4791e8e638bdabaf2c0ded35f4055242fb647
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Central free lists.
6 //
7 // See malloc.go for an overview.
8 //
9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
11 // and those that are completely allocated (c->empty).
13 package runtime
15 import "runtime/internal/atomic"
17 // Central list of free objects of a given size.
19 //go:notinheap
20 type mcentral struct {
21 lock mutex
22 spanclass spanClass
23 nonempty mSpanList // list of spans with a free object, ie a nonempty free list
24 empty mSpanList // list of spans with no free objects (or cached in an mcache)
26 // nmalloc is the cumulative count of objects allocated from
27 // this mcentral, assuming all spans in mcaches are
28 // fully-allocated. Written atomically, read under STW.
29 nmalloc uint64
32 // Initialize a single central free list.
33 func (c *mcentral) init(spc spanClass) {
34 c.spanclass = spc
35 c.nonempty.init()
36 c.empty.init()
39 // Allocate a span to use in an MCache.
40 func (c *mcentral) cacheSpan() *mspan {
41 // Deduct credit for this span allocation and sweep if necessary.
42 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
43 deductSweepCredit(spanBytes, 0)
45 lock(&c.lock)
46 traceDone := false
47 if trace.enabled {
48 traceGCSweepStart()
50 sg := mheap_.sweepgen
51 retry:
52 var s *mspan
53 for s = c.nonempty.first; s != nil; s = s.next {
54 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
55 c.nonempty.remove(s)
56 c.empty.insertBack(s)
57 unlock(&c.lock)
58 s.sweep(true)
60 // With gccgo's conservative GC, the returned span may
61 // now be full. See the comments in mspan.sweep.
62 if uintptr(s.allocCount) == s.nelems {
63 s.freeindex = s.nelems
64 lock(&c.lock)
65 goto retry
68 goto havespan
70 if s.sweepgen == sg-1 {
71 // the span is being swept by background sweeper, skip
72 continue
74 // we have a nonempty span that does not require sweeping, allocate from it
75 c.nonempty.remove(s)
76 c.empty.insertBack(s)
77 unlock(&c.lock)
78 goto havespan
81 for s = c.empty.first; s != nil; s = s.next {
82 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
83 // we have an empty span that requires sweeping,
84 // sweep it and see if we can free some space in it
85 c.empty.remove(s)
86 // swept spans are at the end of the list
87 c.empty.insertBack(s)
88 unlock(&c.lock)
89 s.sweep(true)
90 freeIndex := s.nextFreeIndex()
91 if freeIndex != s.nelems {
92 s.freeindex = freeIndex
93 goto havespan
95 lock(&c.lock)
96 // the span is still empty after sweep
97 // it is already in the empty list, so just retry
98 goto retry
100 if s.sweepgen == sg-1 {
101 // the span is being swept by background sweeper, skip
102 continue
104 // already swept empty span,
105 // all subsequent ones must also be either swept or in process of sweeping
106 break
108 if trace.enabled {
109 traceGCSweepDone()
110 traceDone = true
112 unlock(&c.lock)
114 // Replenish central list if empty.
115 s = c.grow()
116 if s == nil {
117 return nil
119 lock(&c.lock)
120 c.empty.insertBack(s)
121 unlock(&c.lock)
123 // At this point s is a non-empty span, queued at the end of the empty list,
124 // c is unlocked.
125 havespan:
126 if trace.enabled && !traceDone {
127 traceGCSweepDone()
129 cap := int32((s.npages << _PageShift) / s.elemsize)
130 n := cap - int32(s.allocCount)
131 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
132 throw("span has no free objects")
134 // Assume all objects from this span will be allocated in the
135 // mcache. If it gets uncached, we'll adjust this.
136 atomic.Xadd64(&c.nmalloc, int64(n))
137 usedBytes := uintptr(s.allocCount) * s.elemsize
138 atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
139 if trace.enabled {
140 // heap_live changed.
141 traceHeapAlloc()
143 if gcBlackenEnabled != 0 {
144 // heap_live changed.
145 gcController.revise()
147 s.incache = true
148 freeByteBase := s.freeindex &^ (64 - 1)
149 whichByte := freeByteBase / 8
150 // Init alloc bits cache.
151 s.refillAllocCache(whichByte)
153 // Adjust the allocCache so that s.freeindex corresponds to the low bit in
154 // s.allocCache.
155 s.allocCache >>= s.freeindex % 64
157 return s
160 // Return span from an MCache.
161 func (c *mcentral) uncacheSpan(s *mspan) {
162 lock(&c.lock)
164 s.incache = false
166 if s.allocCount == 0 {
167 throw("uncaching span but s.allocCount == 0")
170 cap := int32((s.npages << _PageShift) / s.elemsize)
171 n := cap - int32(s.allocCount)
172 if n > 0 {
173 c.empty.remove(s)
174 c.nonempty.insert(s)
175 // mCentral_CacheSpan conservatively counted
176 // unallocated slots in heap_live. Undo this.
177 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
178 // cacheSpan updated alloc assuming all objects on s
179 // were going to be allocated. Adjust for any that
180 // weren't.
181 atomic.Xadd64(&c.nmalloc, -int64(n))
183 unlock(&c.lock)
186 // freeSpan updates c and s after sweeping s.
187 // It sets s's sweepgen to the latest generation,
188 // and, based on the number of free objects in s,
189 // moves s to the appropriate list of c or returns it
190 // to the heap.
191 // freeSpan returns true if s was returned to the heap.
192 // If preserve=true, it does not move s (the caller
193 // must take care of it).
194 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
195 if s.incache {
196 throw("freeSpan given cached span")
198 s.needzero = 1
200 if preserve {
201 // preserve is set only when called from MCentral_CacheSpan above,
202 // the span must be in the empty list.
203 if !s.inList() {
204 throw("can't preserve unlinked span")
206 atomic.Store(&s.sweepgen, mheap_.sweepgen)
207 return false
210 lock(&c.lock)
212 // Move to nonempty if necessary.
213 if wasempty {
214 c.empty.remove(s)
215 c.nonempty.insert(s)
218 // delay updating sweepgen until here. This is the signal that
219 // the span may be used in an MCache, so it must come after the
220 // linked list operations above (actually, just after the
221 // lock of c above.)
222 atomic.Store(&s.sweepgen, mheap_.sweepgen)
224 if s.allocCount != 0 {
225 unlock(&c.lock)
226 return false
229 c.nonempty.remove(s)
230 unlock(&c.lock)
231 mheap_.freeSpan(s, 0)
232 return true
235 // grow allocates a new empty span from the heap and initializes it for c's size class.
236 func (c *mcentral) grow() *mspan {
237 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
238 size := uintptr(class_to_size[c.spanclass.sizeclass()])
239 n := (npages << _PageShift) / size
241 s := mheap_.alloc(npages, c.spanclass, false, true)
242 if s == nil {
243 return nil
246 p := s.base()
247 s.limit = p + size*n
249 heapBitsForAddr(s.base()).initSpan(s)
250 return s