1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.go for an overview.
9 // The MCentral doesn't actually contain the list of free objects; the MSpan does.
10 // Each MCentral is two lists of MSpans: those with free objects (c->nonempty)
11 // and those that are completely allocated (c->empty).
15 import "runtime/internal/atomic"
17 // Central list of free objects of a given size.
20 type mcentral
struct {
23 nonempty mSpanList
// list of spans with a free object, ie a nonempty free list
24 empty mSpanList
// list of spans with no free objects (or cached in an mcache)
26 // nmalloc is the cumulative count of objects allocated from
27 // this mcentral, assuming all spans in mcaches are
28 // fully-allocated. Written atomically, read under STW.
32 // Initialize a single central free list.
33 func (c
*mcentral
) init(spc spanClass
) {
39 // Allocate a span to use in an MCache.
40 func (c
*mcentral
) cacheSpan() *mspan
{
41 // Deduct credit for this span allocation and sweep if necessary.
42 spanBytes
:= uintptr(class_to_allocnpages
[c
.spanclass
.sizeclass()]) * _PageSize
43 deductSweepCredit(spanBytes
, 0)
53 for s
= c
.nonempty
.first
; s
!= nil; s
= s
.next
{
54 if s
.sweepgen
== sg
-2 && atomic
.Cas(&s
.sweepgen
, sg
-2, sg
-1) {
60 // With gccgo's conservative GC, the returned span may
61 // now be full. See the comments in mspan.sweep.
62 if uintptr(s
.allocCount
) == s
.nelems
{
63 s
.freeindex
= s
.nelems
70 if s
.sweepgen
== sg
-1 {
71 // the span is being swept by background sweeper, skip
74 // we have a nonempty span that does not require sweeping, allocate from it
81 for s
= c
.empty
.first
; s
!= nil; s
= s
.next
{
82 if s
.sweepgen
== sg
-2 && atomic
.Cas(&s
.sweepgen
, sg
-2, sg
-1) {
83 // we have an empty span that requires sweeping,
84 // sweep it and see if we can free some space in it
86 // swept spans are at the end of the list
90 freeIndex
:= s
.nextFreeIndex()
91 if freeIndex
!= s
.nelems
{
92 s
.freeindex
= freeIndex
96 // the span is still empty after sweep
97 // it is already in the empty list, so just retry
100 if s
.sweepgen
== sg
-1 {
101 // the span is being swept by background sweeper, skip
104 // already swept empty span,
105 // all subsequent ones must also be either swept or in process of sweeping
114 // Replenish central list if empty.
120 c
.empty
.insertBack(s
)
123 // At this point s is a non-empty span, queued at the end of the empty list,
126 if trace
.enabled
&& !traceDone
{
129 cap := int32((s
.npages
<< _PageShift
) / s
.elemsize
)
130 n
:= cap - int32(s
.allocCount
)
131 if n
== 0 || s
.freeindex
== s
.nelems ||
uintptr(s
.allocCount
) == s
.nelems
{
132 throw("span has no free objects")
134 // Assume all objects from this span will be allocated in the
135 // mcache. If it gets uncached, we'll adjust this.
136 atomic
.Xadd64(&c
.nmalloc
, int64(n
))
137 usedBytes
:= uintptr(s
.allocCount
) * s
.elemsize
138 atomic
.Xadd64(&memstats
.heap_live
, int64(spanBytes
)-int64(usedBytes
))
140 // heap_live changed.
143 if gcBlackenEnabled
!= 0 {
144 // heap_live changed.
145 gcController
.revise()
148 freeByteBase
:= s
.freeindex
&^ (64 - 1)
149 whichByte
:= freeByteBase
/ 8
150 // Init alloc bits cache.
151 s
.refillAllocCache(whichByte
)
153 // Adjust the allocCache so that s.freeindex corresponds to the low bit in
155 s
.allocCache
>>= s
.freeindex
% 64
160 // Return span from an MCache.
161 func (c
*mcentral
) uncacheSpan(s
*mspan
) {
166 if s
.allocCount
== 0 {
167 throw("uncaching span but s.allocCount == 0")
170 cap := int32((s
.npages
<< _PageShift
) / s
.elemsize
)
171 n
:= cap - int32(s
.allocCount
)
175 // mCentral_CacheSpan conservatively counted
176 // unallocated slots in heap_live. Undo this.
177 atomic
.Xadd64(&memstats
.heap_live
, -int64(n
)*int64(s
.elemsize
))
178 // cacheSpan updated alloc assuming all objects on s
179 // were going to be allocated. Adjust for any that
181 atomic
.Xadd64(&c
.nmalloc
, -int64(n
))
186 // freeSpan updates c and s after sweeping s.
187 // It sets s's sweepgen to the latest generation,
188 // and, based on the number of free objects in s,
189 // moves s to the appropriate list of c or returns it
191 // freeSpan returns true if s was returned to the heap.
192 // If preserve=true, it does not move s (the caller
193 // must take care of it).
194 func (c
*mcentral
) freeSpan(s
*mspan
, preserve
bool, wasempty
bool) bool {
196 throw("freeSpan given cached span")
201 // preserve is set only when called from MCentral_CacheSpan above,
202 // the span must be in the empty list.
204 throw("can't preserve unlinked span")
206 atomic
.Store(&s
.sweepgen
, mheap_
.sweepgen
)
212 // Move to nonempty if necessary.
218 // delay updating sweepgen until here. This is the signal that
219 // the span may be used in an MCache, so it must come after the
220 // linked list operations above (actually, just after the
222 atomic
.Store(&s
.sweepgen
, mheap_
.sweepgen
)
224 if s
.allocCount
!= 0 {
231 mheap_
.freeSpan(s
, 0)
235 // grow allocates a new empty span from the heap and initializes it for c's size class.
236 func (c
*mcentral
) grow() *mspan
{
237 npages
:= uintptr(class_to_allocnpages
[c
.spanclass
.sizeclass()])
238 size
:= uintptr(class_to_size
[c
.spanclass
.sizeclass()])
239 n
:= (npages
<< _PageShift
) / size
241 s
:= mheap_
.alloc(npages
, c
.spanclass
, false, true)
249 heapBitsForAddr(s
.base()).initSpan(s
)