re PR target/90530 (Invalid SUBREG insn generated by reload)
[official-gcc.git] / libgo / go / runtime / mcentral.go
blob0196ba44c5d5dbc21c14ef9f981bb608d0fa1cb3
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Central free lists.
6 //
7 // See malloc.go for an overview.
8 //
9 // The mcentral doesn't actually contain the list of free objects; the mspan does.
10 // Each mcentral is two lists of mspans: those with free objects (c->nonempty)
11 // and those that are completely allocated (c->empty).
13 package runtime
15 import "runtime/internal/atomic"
17 // Central list of free objects of a given size.
19 //go:notinheap
20 type mcentral struct {
21 lock mutex
22 spanclass spanClass
23 nonempty mSpanList // list of spans with a free object, ie a nonempty free list
24 empty mSpanList // list of spans with no free objects (or cached in an mcache)
26 // nmalloc is the cumulative count of objects allocated from
27 // this mcentral, assuming all spans in mcaches are
28 // fully-allocated. Written atomically, read under STW.
29 nmalloc uint64
32 // Initialize a single central free list.
33 func (c *mcentral) init(spc spanClass) {
34 c.spanclass = spc
35 c.nonempty.init()
36 c.empty.init()
39 // Allocate a span to use in an mcache.
40 func (c *mcentral) cacheSpan() *mspan {
41 // Deduct credit for this span allocation and sweep if necessary.
42 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
43 deductSweepCredit(spanBytes, 0)
45 lock(&c.lock)
46 traceDone := false
47 if trace.enabled {
48 traceGCSweepStart()
50 sg := mheap_.sweepgen
51 retry:
52 var s *mspan
53 for s = c.nonempty.first; s != nil; s = s.next {
54 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
55 c.nonempty.remove(s)
56 c.empty.insertBack(s)
57 unlock(&c.lock)
58 s.sweep(true)
60 // With gccgo's conservative GC, the returned span may
61 // now be full. See the comments in mspan.sweep.
62 if uintptr(s.allocCount) == s.nelems {
63 s.freeindex = s.nelems
64 lock(&c.lock)
65 goto retry
68 goto havespan
70 if s.sweepgen == sg-1 {
71 // the span is being swept by background sweeper, skip
72 continue
74 // we have a nonempty span that does not require sweeping, allocate from it
75 c.nonempty.remove(s)
76 c.empty.insertBack(s)
77 unlock(&c.lock)
78 goto havespan
81 for s = c.empty.first; s != nil; s = s.next {
82 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
83 // we have an empty span that requires sweeping,
84 // sweep it and see if we can free some space in it
85 c.empty.remove(s)
86 // swept spans are at the end of the list
87 c.empty.insertBack(s)
88 unlock(&c.lock)
89 s.sweep(true)
90 freeIndex := s.nextFreeIndex()
91 if freeIndex != s.nelems {
92 s.freeindex = freeIndex
93 goto havespan
95 lock(&c.lock)
96 // the span is still empty after sweep
97 // it is already in the empty list, so just retry
98 goto retry
100 if s.sweepgen == sg-1 {
101 // the span is being swept by background sweeper, skip
102 continue
104 // already swept empty span,
105 // all subsequent ones must also be either swept or in process of sweeping
106 break
108 if trace.enabled {
109 traceGCSweepDone()
110 traceDone = true
112 unlock(&c.lock)
114 // Replenish central list if empty.
115 s = c.grow()
116 if s == nil {
117 return nil
119 lock(&c.lock)
120 c.empty.insertBack(s)
121 unlock(&c.lock)
123 // At this point s is a non-empty span, queued at the end of the empty list,
124 // c is unlocked.
125 havespan:
126 if trace.enabled && !traceDone {
127 traceGCSweepDone()
129 n := int(s.nelems) - int(s.allocCount)
130 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
131 throw("span has no free objects")
133 // Assume all objects from this span will be allocated in the
134 // mcache. If it gets uncached, we'll adjust this.
135 atomic.Xadd64(&c.nmalloc, int64(n))
136 usedBytes := uintptr(s.allocCount) * s.elemsize
137 atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
138 if trace.enabled {
139 // heap_live changed.
140 traceHeapAlloc()
142 if gcBlackenEnabled != 0 {
143 // heap_live changed.
144 gcController.revise()
146 freeByteBase := s.freeindex &^ (64 - 1)
147 whichByte := freeByteBase / 8
148 // Init alloc bits cache.
149 s.refillAllocCache(whichByte)
151 // Adjust the allocCache so that s.freeindex corresponds to the low bit in
152 // s.allocCache.
153 s.allocCache >>= s.freeindex % 64
155 return s
158 // Return span from an mcache.
159 func (c *mcentral) uncacheSpan(s *mspan) {
160 if s.allocCount == 0 {
161 throw("uncaching span but s.allocCount == 0")
164 sg := mheap_.sweepgen
165 stale := s.sweepgen == sg+1
166 if stale {
167 // Span was cached before sweep began. It's our
168 // responsibility to sweep it.
170 // Set sweepgen to indicate it's not cached but needs
171 // sweeping and can't be allocated from. sweep will
172 // set s.sweepgen to indicate s is swept.
173 atomic.Store(&s.sweepgen, sg-1)
174 } else {
175 // Indicate that s is no longer cached.
176 atomic.Store(&s.sweepgen, sg)
179 n := int(s.nelems) - int(s.allocCount)
180 if n > 0 {
181 // cacheSpan updated alloc assuming all objects on s
182 // were going to be allocated. Adjust for any that
183 // weren't. We must do this before potentially
184 // sweeping the span.
185 atomic.Xadd64(&c.nmalloc, -int64(n))
187 lock(&c.lock)
188 c.empty.remove(s)
189 c.nonempty.insert(s)
190 if !stale {
191 // mCentral_CacheSpan conservatively counted
192 // unallocated slots in heap_live. Undo this.
194 // If this span was cached before sweep, then
195 // heap_live was totally recomputed since
196 // caching this span, so we don't do this for
197 // stale spans.
198 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
200 unlock(&c.lock)
203 if stale {
204 // Now that s is in the right mcentral list, we can
205 // sweep it.
206 s.sweep(false)
210 // freeSpan updates c and s after sweeping s.
211 // It sets s's sweepgen to the latest generation,
212 // and, based on the number of free objects in s,
213 // moves s to the appropriate list of c or returns it
214 // to the heap.
215 // freeSpan reports whether s was returned to the heap.
216 // If preserve=true, it does not move s (the caller
217 // must take care of it).
218 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
219 if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 {
220 throw("freeSpan given cached span")
222 s.needzero = 1
224 if preserve {
225 // preserve is set only when called from (un)cacheSpan above,
226 // the span must be in the empty list.
227 if !s.inList() {
228 throw("can't preserve unlinked span")
230 atomic.Store(&s.sweepgen, mheap_.sweepgen)
231 return false
234 lock(&c.lock)
236 // Move to nonempty if necessary.
237 if wasempty {
238 c.empty.remove(s)
239 c.nonempty.insert(s)
242 // delay updating sweepgen until here. This is the signal that
243 // the span may be used in an mcache, so it must come after the
244 // linked list operations above (actually, just after the
245 // lock of c above.)
246 atomic.Store(&s.sweepgen, mheap_.sweepgen)
248 if s.allocCount != 0 {
249 unlock(&c.lock)
250 return false
253 c.nonempty.remove(s)
254 unlock(&c.lock)
255 mheap_.freeSpan(s, false)
256 return true
259 // grow allocates a new empty span from the heap and initializes it for c's size class.
260 func (c *mcentral) grow() *mspan {
261 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
262 size := uintptr(class_to_size[c.spanclass.sizeclass()])
263 n := (npages << _PageShift) / size
265 s := mheap_.alloc(npages, c.spanclass, false, true)
266 if s == nil {
267 return nil
270 p := s.base()
271 s.limit = p + size*n
273 heapBitsForAddr(s.base()).initSpan(s)
274 return s