* Add TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook for sh.
[official-gcc.git] / libgo / go / sync / pool.go
blob1f08707cd42269dbc654d4a2cf9e8f88753984a4
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package sync
7 import (
8 "runtime"
9 "sync/atomic"
10 "unsafe"
13 // A Pool is a set of temporary objects that may be individually saved and
14 // retrieved.
16 // Any item stored in the Pool may be removed automatically at any time without
17 // notification. If the Pool holds the only reference when this happens, the
18 // item might be deallocated.
20 // A Pool is safe for use by multiple goroutines simultaneously.
22 // Pool's purpose is to cache allocated but unused items for later reuse,
23 // relieving pressure on the garbage collector. That is, it makes it easy to
24 // build efficient, thread-safe free lists. However, it is not suitable for all
25 // free lists.
27 // An appropriate use of a Pool is to manage a group of temporary items
28 // silently shared among and potentially reused by concurrent independent
29 // clients of a package. Pool provides a way to amortize allocation overhead
30 // across many clients.
32 // An example of good use of a Pool is in the fmt package, which maintains a
33 // dynamically-sized store of temporary output buffers. The store scales under
34 // load (when many goroutines are actively printing) and shrinks when
35 // quiescent.
37 // On the other hand, a free list maintained as part of a short-lived object is
38 // not a suitable use for a Pool, since the overhead does not amortize well in
39 // that scenario. It is more efficient to have such objects implement their own
40 // free list.
42 type Pool struct {
43 local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
44 localSize uintptr // size of the local array
46 // New optionally specifies a function to generate
47 // a value when Get would otherwise return nil.
48 // It may not be changed concurrently with calls to Get.
49 New func() interface{}
52 // Local per-P Pool appendix.
53 type poolLocal struct {
54 private interface{} // Can be used only by the respective P.
55 shared []interface{} // Can be used by any P.
56 Mutex // Protects shared.
57 pad [128]byte // Prevents false sharing.
60 // Put adds x to the pool.
61 func (p *Pool) Put(x interface{}) {
62 if raceenabled {
63 // Under race detector the Pool degenerates into no-op.
64 // It's conforming, simple and does not introduce excessive
65 // happens-before edges between unrelated goroutines.
66 return
68 if x == nil {
69 return
71 l := p.pin()
72 if l.private == nil {
73 l.private = x
74 x = nil
76 runtime_procUnpin()
77 if x == nil {
78 return
80 l.Lock()
81 l.shared = append(l.shared, x)
82 l.Unlock()
85 // Get selects an arbitrary item from the Pool, removes it from the
86 // Pool, and returns it to the caller.
87 // Get may choose to ignore the pool and treat it as empty.
88 // Callers should not assume any relation between values passed to Put and
89 // the values returned by Get.
91 // If Get would otherwise return nil and p.New is non-nil, Get returns
92 // the result of calling p.New.
93 func (p *Pool) Get() interface{} {
94 if raceenabled {
95 if p.New != nil {
96 return p.New()
98 return nil
100 l := p.pin()
101 x := l.private
102 l.private = nil
103 runtime_procUnpin()
104 if x != nil {
105 return x
107 l.Lock()
108 last := len(l.shared) - 1
109 if last >= 0 {
110 x = l.shared[last]
111 l.shared = l.shared[:last]
113 l.Unlock()
114 if x != nil {
115 return x
117 return p.getSlow()
120 func (p *Pool) getSlow() (x interface{}) {
121 // See the comment in pin regarding ordering of the loads.
122 size := atomic.LoadUintptr(&p.localSize) // load-acquire
123 local := p.local // load-consume
124 // Try to steal one element from other procs.
125 pid := runtime_procPin()
126 runtime_procUnpin()
127 for i := 0; i < int(size); i++ {
128 l := indexLocal(local, (pid+i+1)%int(size))
129 l.Lock()
130 last := len(l.shared) - 1
131 if last >= 0 {
132 x = l.shared[last]
133 l.shared = l.shared[:last]
134 l.Unlock()
135 break
137 l.Unlock()
140 if x == nil && p.New != nil {
141 x = p.New()
143 return x
146 // pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P.
147 // Caller must call runtime_procUnpin() when done with the pool.
148 func (p *Pool) pin() *poolLocal {
149 pid := runtime_procPin()
150 // In pinSlow we store to localSize and then to local, here we load in opposite order.
151 // Since we've disabled preemption, GC can not happen in between.
152 // Thus here we must observe local at least as large localSize.
153 // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
154 s := atomic.LoadUintptr(&p.localSize) // load-acquire
155 l := p.local // load-consume
156 if uintptr(pid) < s {
157 return indexLocal(l, pid)
159 return p.pinSlow()
162 func (p *Pool) pinSlow() *poolLocal {
163 // Retry under the mutex.
164 // Can not lock the mutex while pinned.
165 runtime_procUnpin()
166 allPoolsMu.Lock()
167 defer allPoolsMu.Unlock()
168 pid := runtime_procPin()
169 // poolCleanup won't be called while we are pinned.
170 s := p.localSize
171 l := p.local
172 if uintptr(pid) < s {
173 return indexLocal(l, pid)
175 if p.local == nil {
176 allPools = append(allPools, p)
178 // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
179 size := runtime.GOMAXPROCS(0)
180 local := make([]poolLocal, size)
181 atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release
182 atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
183 return &local[pid]
186 func poolCleanup() {
187 // This function is called with the world stopped, at the beginning of a garbage collection.
188 // It must not allocate and probably should not call any runtime functions.
189 // Defensively zero out everything, 2 reasons:
190 // 1. To prevent false retention of whole Pools.
191 // 2. If GC happens while a goroutine works with l.shared in Put/Get,
192 // it will retain whole Pool. So next cycle memory consumption would be doubled.
193 for i, p := range allPools {
194 allPools[i] = nil
195 for i := 0; i < int(p.localSize); i++ {
196 l := indexLocal(p.local, i)
197 l.private = nil
198 for j := range l.shared {
199 l.shared[j] = nil
201 l.shared = nil
204 allPools = []*Pool{}
207 var (
208 allPoolsMu Mutex
209 allPools []*Pool
212 func init() {
213 runtime_registerPoolCleanup(poolCleanup)
216 func indexLocal(l unsafe.Pointer, i int) *poolLocal {
217 return &(*[1000000]poolLocal)(l)[i]
220 // Implemented in runtime.
221 func runtime_registerPoolCleanup(cleanup func())
222 func runtime_procPin() int
223 func runtime_procUnpin()