1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 // A Pool is a set of temporary objects that may be individually saved and
17 // Any item stored in the Pool may be removed automatically at any time without
18 // notification. If the Pool holds the only reference when this happens, the
19 // item might be deallocated.
21 // A Pool is safe for use by multiple goroutines simultaneously.
23 // Pool's purpose is to cache allocated but unused items for later reuse,
24 // relieving pressure on the garbage collector. That is, it makes it easy to
25 // build efficient, thread-safe free lists. However, it is not suitable for all
28 // An appropriate use of a Pool is to manage a group of temporary items
29 // silently shared among and potentially reused by concurrent independent
30 // clients of a package. Pool provides a way to amortize allocation overhead
31 // across many clients.
33 // An example of good use of a Pool is in the fmt package, which maintains a
34 // dynamically-sized store of temporary output buffers. The store scales under
35 // load (when many goroutines are actively printing) and shrinks when
38 // On the other hand, a free list maintained as part of a short-lived object is
39 // not a suitable use for a Pool, since the overhead does not amortize well in
40 // that scenario. It is more efficient to have such objects implement their own
43 // A Pool must not be copied after first use.
47 local unsafe
.Pointer
// local fixed-size per-P pool, actual type is [P]poolLocal
48 localSize
uintptr // size of the local array
50 // New optionally specifies a function to generate
51 // a value when Get would otherwise return nil.
52 // It may not be changed concurrently with calls to Get.
53 New
func() interface{}
56 // Local per-P Pool appendix.
57 type poolLocalInternal
struct {
58 private
interface{} // Can be used only by the respective P.
59 shared
[]interface{} // Can be used by any P.
60 Mutex
// Protects shared.
63 type poolLocal
struct {
66 // Prevents false sharing on widespread platforms with
67 // 128 mod (cache line size) = 0 .
68 pad
[128 - unsafe
.Sizeof(poolLocalInternal
{})%128
]byte
72 func fastrand() uint32
74 var poolRaceHash
[128]uint64
76 // poolRaceAddr returns an address to use as the synchronization point
77 // for race detector logic. We don't use the actual pointer stored in x
78 // directly, for fear of conflicting with other synchronization on that address.
79 // Instead, we hash the pointer to get an index into poolRaceHash.
80 // See discussion on golang.org/cl/31589.
81 func poolRaceAddr(x
interface{}) unsafe
.Pointer
{
82 ptr
:= uintptr((*[2]unsafe
.Pointer
)(unsafe
.Pointer(&x
))[1])
83 h
:= uint32((uint64(uint32(ptr
)) * 0x85ebca6b) >> 16)
84 return unsafe
.Pointer(&poolRaceHash
[h%uint
32(len(poolRaceHash
))])
87 // Put adds x to the pool.
88 func (p
*Pool
) Put(x
interface{}) {
93 if fastrand()%4
== 0 {
94 // Randomly drop x on floor.
97 race
.ReleaseMerge(poolRaceAddr(x
))
101 if l
.private
== nil {
108 l
.shared
= append(l
.shared
, x
)
116 // Get selects an arbitrary item from the Pool, removes it from the
117 // Pool, and returns it to the caller.
118 // Get may choose to ignore the pool and treat it as empty.
119 // Callers should not assume any relation between values passed to Put and
120 // the values returned by Get.
122 // If Get would otherwise return nil and p.New is non-nil, Get returns
123 // the result of calling p.New.
124 func (p
*Pool
) Get() interface{} {
134 last
:= len(l
.shared
) - 1
137 l
.shared
= l
.shared
[:last
]
147 race
.Acquire(poolRaceAddr(x
))
150 if x
== nil && p
.New
!= nil {
156 func (p
*Pool
) getSlow() (x
interface{}) {
157 // See the comment in pin regarding ordering of the loads.
158 size
:= atomic
.LoadUintptr(&p
.localSize
) // load-acquire
159 local
:= p
.local
// load-consume
160 // Try to steal one element from other procs.
161 pid
:= runtime_procPin()
163 for i
:= 0; i
< int(size
); i
++ {
164 l
:= indexLocal(local
, (pid
+i
+1)%int
(size
))
166 last
:= len(l
.shared
) - 1
169 l
.shared
= l
.shared
[:last
]
178 // pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P.
179 // Caller must call runtime_procUnpin() when done with the pool.
180 func (p
*Pool
) pin() *poolLocal
{
181 pid
:= runtime_procPin()
182 // In pinSlow we store to localSize and then to local, here we load in opposite order.
183 // Since we've disabled preemption, GC cannot happen in between.
184 // Thus here we must observe local at least as large localSize.
185 // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
186 s
:= atomic
.LoadUintptr(&p
.localSize
) // load-acquire
187 l
:= p
.local
// load-consume
188 if uintptr(pid
) < s
{
189 return indexLocal(l
, pid
)
194 func (p
*Pool
) pinSlow() *poolLocal
{
195 // Retry under the mutex.
196 // Can not lock the mutex while pinned.
199 defer allPoolsMu
.Unlock()
200 pid
:= runtime_procPin()
201 // poolCleanup won't be called while we are pinned.
204 if uintptr(pid
) < s
{
205 return indexLocal(l
, pid
)
208 allPools
= append(allPools
, p
)
210 // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
211 size
:= runtime
.GOMAXPROCS(0)
212 local
:= make([]poolLocal
, size
)
213 atomic
.StorePointer(&p
.local
, unsafe
.Pointer(&local
[0])) // store-release
214 atomic
.StoreUintptr(&p
.localSize
, uintptr(size
)) // store-release
219 // This function is called with the world stopped, at the beginning of a garbage collection.
220 // It must not allocate and probably should not call any runtime functions.
221 // Defensively zero out everything, 2 reasons:
222 // 1. To prevent false retention of whole Pools.
223 // 2. If GC happens while a goroutine works with l.shared in Put/Get,
224 // it will retain whole Pool. So next cycle memory consumption would be doubled.
225 for i
, p
:= range allPools
{
227 for i
:= 0; i
< int(p
.localSize
); i
++ {
228 l
:= indexLocal(p
.local
, i
)
230 for j
:= range l
.shared
{
247 runtime_registerPoolCleanup(poolCleanup
)
250 func indexLocal(l unsafe
.Pointer
, i
int) *poolLocal
{
251 lp
:= unsafe
.Pointer(uintptr(l
) + uintptr(i
)*unsafe
.Sizeof(poolLocal
{}))
252 return (*poolLocal
)(lp
)
255 // Implemented in runtime.
256 func runtime_registerPoolCleanup(cleanup
func())
257 func runtime_procPin() int
258 func runtime_procUnpin()