2018-23-01 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libgo / go / sync / map.go
blobc4a0dc4194ab6df1df102eaef113ac5ff149f886
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package sync
7 import (
8 "sync/atomic"
9 "unsafe"
12 // Map is like a Go map[interface{}]interface{} but is safe for concurrent use
13 // by multiple goroutines without additional locking or coordination.
14 // Loads, stores, and deletes run in amortized constant time.
16 // The Map type is specialized. Most code should use a plain Go map instead,
17 // with separate locking or coordination, for better type safety and to make it
18 // easier to maintain other invariants along with the map content.
20 // The Map type is optimized for two common use cases: (1) when the entry for a given
21 // key is only ever written once but read many times, as in caches that only grow,
22 // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
23 // sets of keys. In these two cases, use of a Map may significantly reduce lock
24 // contention compared to a Go map paired with a separate Mutex or RWMutex.
26 // The zero Map is empty and ready for use. A Map must not be copied after first use.
27 type Map struct {
28 mu Mutex
30 // read contains the portion of the map's contents that are safe for
31 // concurrent access (with or without mu held).
33 // The read field itself is always safe to load, but must only be stored with
34 // mu held.
36 // Entries stored in read may be updated concurrently without mu, but updating
37 // a previously-expunged entry requires that the entry be copied to the dirty
38 // map and unexpunged with mu held.
39 read atomic.Value // readOnly
41 // dirty contains the portion of the map's contents that require mu to be
42 // held. To ensure that the dirty map can be promoted to the read map quickly,
43 // it also includes all of the non-expunged entries in the read map.
45 // Expunged entries are not stored in the dirty map. An expunged entry in the
46 // clean map must be unexpunged and added to the dirty map before a new value
47 // can be stored to it.
49 // If the dirty map is nil, the next write to the map will initialize it by
50 // making a shallow copy of the clean map, omitting stale entries.
51 dirty map[interface{}]*entry
53 // misses counts the number of loads since the read map was last updated that
54 // needed to lock mu to determine whether the key was present.
56 // Once enough misses have occurred to cover the cost of copying the dirty
57 // map, the dirty map will be promoted to the read map (in the unamended
58 // state) and the next store to the map will make a new dirty copy.
59 misses int
62 // readOnly is an immutable struct stored atomically in the Map.read field.
63 type readOnly struct {
64 m map[interface{}]*entry
65 amended bool // true if the dirty map contains some key not in m.
68 // expunged is an arbitrary pointer that marks entries which have been deleted
69 // from the dirty map.
70 var expunged = unsafe.Pointer(new(interface{}))
72 // An entry is a slot in the map corresponding to a particular key.
73 type entry struct {
74 // p points to the interface{} value stored for the entry.
76 // If p == nil, the entry has been deleted and m.dirty == nil.
78 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
79 // is missing from m.dirty.
81 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
82 // != nil, in m.dirty[key].
84 // An entry can be deleted by atomic replacement with nil: when m.dirty is
85 // next created, it will atomically replace nil with expunged and leave
86 // m.dirty[key] unset.
88 // An entry's associated value can be updated by atomic replacement, provided
89 // p != expunged. If p == expunged, an entry's associated value can be updated
90 // only after first setting m.dirty[key] = e so that lookups using the dirty
91 // map find the entry.
92 p unsafe.Pointer // *interface{}
95 func newEntry(i interface{}) *entry {
96 return &entry{p: unsafe.Pointer(&i)}
99 // Load returns the value stored in the map for a key, or nil if no
100 // value is present.
101 // The ok result indicates whether value was found in the map.
102 func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
103 read, _ := m.read.Load().(readOnly)
104 e, ok := read.m[key]
105 if !ok && read.amended {
106 m.mu.Lock()
107 // Avoid reporting a spurious miss if m.dirty got promoted while we were
108 // blocked on m.mu. (If further loads of the same key will not miss, it's
109 // not worth copying the dirty map for this key.)
110 read, _ = m.read.Load().(readOnly)
111 e, ok = read.m[key]
112 if !ok && read.amended {
113 e, ok = m.dirty[key]
114 // Regardless of whether the entry was present, record a miss: this key
115 // will take the slow path until the dirty map is promoted to the read
116 // map.
117 m.missLocked()
119 m.mu.Unlock()
121 if !ok {
122 return nil, false
124 return e.load()
127 func (e *entry) load() (value interface{}, ok bool) {
128 p := atomic.LoadPointer(&e.p)
129 if p == nil || p == expunged {
130 return nil, false
132 return *(*interface{})(p), true
135 // Store sets the value for a key.
136 func (m *Map) Store(key, value interface{}) {
137 read, _ := m.read.Load().(readOnly)
138 if e, ok := read.m[key]; ok && e.tryStore(&value) {
139 return
142 m.mu.Lock()
143 read, _ = m.read.Load().(readOnly)
144 if e, ok := read.m[key]; ok {
145 if e.unexpungeLocked() {
146 // The entry was previously expunged, which implies that there is a
147 // non-nil dirty map and this entry is not in it.
148 m.dirty[key] = e
150 e.storeLocked(&value)
151 } else if e, ok := m.dirty[key]; ok {
152 e.storeLocked(&value)
153 } else {
154 if !read.amended {
155 // We're adding the first new key to the dirty map.
156 // Make sure it is allocated and mark the read-only map as incomplete.
157 m.dirtyLocked()
158 m.read.Store(readOnly{m: read.m, amended: true})
160 m.dirty[key] = newEntry(value)
162 m.mu.Unlock()
165 // tryStore stores a value if the entry has not been expunged.
167 // If the entry is expunged, tryStore returns false and leaves the entry
168 // unchanged.
169 func (e *entry) tryStore(i *interface{}) bool {
170 p := atomic.LoadPointer(&e.p)
171 if p == expunged {
172 return false
174 for {
175 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
176 return true
178 p = atomic.LoadPointer(&e.p)
179 if p == expunged {
180 return false
185 // unexpungeLocked ensures that the entry is not marked as expunged.
187 // If the entry was previously expunged, it must be added to the dirty map
188 // before m.mu is unlocked.
189 func (e *entry) unexpungeLocked() (wasExpunged bool) {
190 return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
193 // storeLocked unconditionally stores a value to the entry.
195 // The entry must be known not to be expunged.
196 func (e *entry) storeLocked(i *interface{}) {
197 atomic.StorePointer(&e.p, unsafe.Pointer(i))
200 // LoadOrStore returns the existing value for the key if present.
201 // Otherwise, it stores and returns the given value.
202 // The loaded result is true if the value was loaded, false if stored.
203 func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
204 // Avoid locking if it's a clean hit.
205 read, _ := m.read.Load().(readOnly)
206 if e, ok := read.m[key]; ok {
207 actual, loaded, ok := e.tryLoadOrStore(value)
208 if ok {
209 return actual, loaded
213 m.mu.Lock()
214 read, _ = m.read.Load().(readOnly)
215 if e, ok := read.m[key]; ok {
216 if e.unexpungeLocked() {
217 m.dirty[key] = e
219 actual, loaded, _ = e.tryLoadOrStore(value)
220 } else if e, ok := m.dirty[key]; ok {
221 actual, loaded, _ = e.tryLoadOrStore(value)
222 m.missLocked()
223 } else {
224 if !read.amended {
225 // We're adding the first new key to the dirty map.
226 // Make sure it is allocated and mark the read-only map as incomplete.
227 m.dirtyLocked()
228 m.read.Store(readOnly{m: read.m, amended: true})
230 m.dirty[key] = newEntry(value)
231 actual, loaded = value, false
233 m.mu.Unlock()
235 return actual, loaded
238 // tryLoadOrStore atomically loads or stores a value if the entry is not
239 // expunged.
241 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
242 // returns with ok==false.
243 func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
244 p := atomic.LoadPointer(&e.p)
245 if p == expunged {
246 return nil, false, false
248 if p != nil {
249 return *(*interface{})(p), true, true
252 // Copy the interface after the first load to make this method more amenable
253 // to escape analysis: if we hit the "load" path or the entry is expunged, we
254 // shouldn't bother heap-allocating.
255 ic := i
256 for {
257 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
258 return i, false, true
260 p = atomic.LoadPointer(&e.p)
261 if p == expunged {
262 return nil, false, false
264 if p != nil {
265 return *(*interface{})(p), true, true
270 // Delete deletes the value for a key.
271 func (m *Map) Delete(key interface{}) {
272 read, _ := m.read.Load().(readOnly)
273 e, ok := read.m[key]
274 if !ok && read.amended {
275 m.mu.Lock()
276 read, _ = m.read.Load().(readOnly)
277 e, ok = read.m[key]
278 if !ok && read.amended {
279 delete(m.dirty, key)
281 m.mu.Unlock()
283 if ok {
284 e.delete()
288 func (e *entry) delete() (hadValue bool) {
289 for {
290 p := atomic.LoadPointer(&e.p)
291 if p == nil || p == expunged {
292 return false
294 if atomic.CompareAndSwapPointer(&e.p, p, nil) {
295 return true
300 // Range calls f sequentially for each key and value present in the map.
301 // If f returns false, range stops the iteration.
303 // Range does not necessarily correspond to any consistent snapshot of the Map's
304 // contents: no key will be visited more than once, but if the value for any key
305 // is stored or deleted concurrently, Range may reflect any mapping for that key
306 // from any point during the Range call.
308 // Range may be O(N) with the number of elements in the map even if f returns
309 // false after a constant number of calls.
310 func (m *Map) Range(f func(key, value interface{}) bool) {
311 // We need to be able to iterate over all of the keys that were already
312 // present at the start of the call to Range.
313 // If read.amended is false, then read.m satisfies that property without
314 // requiring us to hold m.mu for a long time.
315 read, _ := m.read.Load().(readOnly)
316 if read.amended {
317 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
318 // (assuming the caller does not break out early), so a call to Range
319 // amortizes an entire copy of the map: we can promote the dirty copy
320 // immediately!
321 m.mu.Lock()
322 read, _ = m.read.Load().(readOnly)
323 if read.amended {
324 read = readOnly{m: m.dirty}
325 m.read.Store(read)
326 m.dirty = nil
327 m.misses = 0
329 m.mu.Unlock()
332 for k, e := range read.m {
333 v, ok := e.load()
334 if !ok {
335 continue
337 if !f(k, v) {
338 break
343 func (m *Map) missLocked() {
344 m.misses++
345 if m.misses < len(m.dirty) {
346 return
348 m.read.Store(readOnly{m: m.dirty})
349 m.dirty = nil
350 m.misses = 0
353 func (m *Map) dirtyLocked() {
354 if m.dirty != nil {
355 return
358 read, _ := m.read.Load().(readOnly)
359 m.dirty = make(map[interface{}]*entry, len(read.m))
360 for k, e := range read.m {
361 if !e.tryExpungeLocked() {
362 m.dirty[k] = e
367 func (e *entry) tryExpungeLocked() (isExpunged bool) {
368 p := atomic.LoadPointer(&e.p)
369 for p == nil {
370 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
371 return true
373 p = atomic.LoadPointer(&e.p)
375 return p == expunged