1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
13 // A gcSweepBuf is a set of *mspans.
15 // gcSweepBuf is safe for concurrent push operations *or* concurrent
16 // pop operations, but not both simultaneously.
17 type gcSweepBuf
struct {
18 // A gcSweepBuf is a two-level data structure consisting of a
19 // growable spine that points to fixed-sized blocks. The spine
20 // can be accessed without locks, but adding a block or
21 // growing it requires taking the spine lock.
23 // Because each mspan covers at least 8K of heap and takes at
24 // most 8 bytes in the gcSweepBuf, the growth of the spine is
27 // The spine and all blocks are allocated off-heap, which
28 // allows this to be used in the memory manager and avoids the
29 // need for write barriers on all of these. We never release
30 // this memory because there could be concurrent lock-free
31 // access and we're likely to reuse it anyway. (In principle,
32 // we could do this during STW.)
35 spine unsafe
.Pointer
// *[N]*gcSweepBlock, accessed atomically
36 spineLen
uintptr // Spine array length, accessed atomically
37 spineCap
uintptr // Spine array cap, accessed under lock
39 // index is the first unused slot in the logical concatenation
40 // of all blocks. It is accessed atomically.
45 gcSweepBlockEntries
= 512 // 4KB on 64-bit
46 gcSweepBufInitSpineCap
= 256 // Enough for 1GB heap on 64-bit
49 type gcSweepBlock
struct {
50 spans
[gcSweepBlockEntries
]*mspan
53 // push adds span s to buffer b. push is safe to call concurrently
54 // with other push operations, but NOT to call concurrently with pop.
55 func (b
*gcSweepBuf
) push(s
*mspan
) {
57 cursor
:= uintptr(atomic
.Xadd(&b
.index
, +1) - 1)
58 top
, bottom
:= cursor
/gcSweepBlockEntries
, cursor%gcSweepBlockEntries
60 // Do we need to add a block?
61 spineLen
:= atomic
.Loaduintptr(&b
.spineLen
)
62 var block
*gcSweepBlock
65 spine
:= atomic
.Loadp(unsafe
.Pointer(&b
.spine
))
66 blockp
:= add(spine
, sys
.PtrSize
*top
)
67 block
= (*gcSweepBlock
)(atomic
.Loadp(blockp
))
69 // Add a new block to the spine, potentially growing
72 // spineLen cannot change until we release the lock,
73 // but may have changed while we were waiting.
74 spineLen
= atomic
.Loaduintptr(&b
.spineLen
)
80 if spineLen
== b
.spineCap
{
82 newCap
:= b
.spineCap
* 2
84 newCap
= gcSweepBufInitSpineCap
86 newSpine
:= persistentalloc(newCap
*sys
.PtrSize
, sys
.CacheLineSize
, &memstats
.gc_sys
)
88 // Blocks are allocated off-heap, so
90 memmove(newSpine
, b
.spine
, b
.spineCap
*sys
.PtrSize
)
92 // Spine is allocated off-heap, so no write barrier.
93 atomic
.StorepNoWB(unsafe
.Pointer(&b
.spine
), newSpine
)
95 // We can't immediately free the old spine
96 // since a concurrent push with a lower index
97 // could still be reading from it. We let it
98 // leak because even a 1TB heap would waste
99 // less than 2MB of memory on old spines. If
100 // this is a problem, we could free old spines
104 // Allocate a new block and add it to the spine.
105 block
= (*gcSweepBlock
)(persistentalloc(unsafe
.Sizeof(gcSweepBlock
{}), sys
.CacheLineSize
, &memstats
.gc_sys
))
106 blockp
:= add(b
.spine
, sys
.PtrSize
*top
)
107 // Blocks are allocated off-heap, so no write barrier.
108 atomic
.StorepNoWB(blockp
, unsafe
.Pointer(block
))
109 atomic
.Storeuintptr(&b
.spineLen
, spineLen
+1)
113 // We have a block. Insert the span.
114 block
.spans
[bottom
] = s
117 // pop removes and returns a span from buffer b, or nil if b is empty.
118 // pop is safe to call concurrently with other pop operations, but NOT
119 // to call concurrently with push.
120 func (b
*gcSweepBuf
) pop() *mspan
{
121 cursor
:= atomic
.Xadd(&b
.index
, -1)
122 if int32(cursor
) < 0 {
123 atomic
.Xadd(&b
.index
, +1)
127 // There are no concurrent spine or block modifications during
128 // pop, so we can omit the atomics.
129 top
, bottom
:= cursor
/gcSweepBlockEntries
, cursor%gcSweepBlockEntries
130 blockp
:= (**gcSweepBlock
)(add(b
.spine
, sys
.PtrSize
*uintptr(top
)))
132 s
:= block
.spans
[bottom
]
133 // Clear the pointer for block(i).
134 block
.spans
[bottom
] = nil
138 // numBlocks returns the number of blocks in buffer b. numBlocks is
139 // safe to call concurrently with any other operation. Spans that have
140 // been pushed prior to the call to numBlocks are guaranteed to appear
141 // in some block in the range [0, numBlocks()), assuming there are no
142 // intervening pops. Spans that are pushed after the call may also
143 // appear in these blocks.
144 func (b
*gcSweepBuf
) numBlocks() int {
145 return int((atomic
.Load(&b
.index
) + gcSweepBlockEntries
- 1) / gcSweepBlockEntries
)
148 // block returns the spans in the i'th block of buffer b. block is
149 // safe to call concurrently with push.
150 func (b
*gcSweepBuf
) block(i
int) []*mspan
{
151 // Perform bounds check before loading spine address since
152 // push ensures the allocated length is at least spineLen.
153 if i
< 0 ||
uintptr(i
) >= atomic
.Loaduintptr(&b
.spineLen
) {
154 throw("block index out of range")
158 spine
:= atomic
.Loadp(unsafe
.Pointer(&b
.spine
))
159 blockp
:= add(spine
, sys
.PtrSize
*uintptr(i
))
160 block
:= (*gcSweepBlock
)(atomic
.Loadp(blockp
))
162 // Slice the block if necessary.
163 cursor
:= uintptr(atomic
.Load(&b
.index
))
164 top
, bottom
:= cursor
/gcSweepBlockEntries
, cursor%gcSweepBlockEntries
166 if uintptr(i
) < top
{
167 spans
= block
.spans
[:]
169 spans
= block
.spans
[:bottom
]
172 // push may have reserved a slot but not filled it yet, so
173 // trim away unused entries.
174 for len(spans
) > 0 && spans
[len(spans
)-1] == nil {
175 spans
= spans
[:len(spans
)-1]