re PR target/90530 (Invalid SUBREG insn generated by reload)
[official-gcc.git] / libgo / go / sync / map.go
blobc6aa308856584abd2e1be8d47fa0c64ab926b5a6
1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package sync
7 import (
8 "sync/atomic"
9 "unsafe"
12 // Map is like a Go map[interface{}]interface{} but is safe for concurrent use
13 // by multiple goroutines without additional locking or coordination.
14 // Loads, stores, and deletes run in amortized constant time.
16 // The Map type is specialized. Most code should use a plain Go map instead,
17 // with separate locking or coordination, for better type safety and to make it
18 // easier to maintain other invariants along with the map content.
20 // The Map type is optimized for two common use cases: (1) when the entry for a given
21 // key is only ever written once but read many times, as in caches that only grow,
22 // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
23 // sets of keys. In these two cases, use of a Map may significantly reduce lock
24 // contention compared to a Go map paired with a separate Mutex or RWMutex.
26 // The zero Map is empty and ready for use. A Map must not be copied after first use.
27 type Map struct {
28 mu Mutex
30 // read contains the portion of the map's contents that are safe for
31 // concurrent access (with or without mu held).
33 // The read field itself is always safe to load, but must only be stored with
34 // mu held.
36 // Entries stored in read may be updated concurrently without mu, but updating
37 // a previously-expunged entry requires that the entry be copied to the dirty
38 // map and unexpunged with mu held.
39 read atomic.Value // readOnly
41 // dirty contains the portion of the map's contents that require mu to be
42 // held. To ensure that the dirty map can be promoted to the read map quickly,
43 // it also includes all of the non-expunged entries in the read map.
45 // Expunged entries are not stored in the dirty map. An expunged entry in the
46 // clean map must be unexpunged and added to the dirty map before a new value
47 // can be stored to it.
49 // If the dirty map is nil, the next write to the map will initialize it by
50 // making a shallow copy of the clean map, omitting stale entries.
51 dirty map[interface{}]*entry
53 // misses counts the number of loads since the read map was last updated that
54 // needed to lock mu to determine whether the key was present.
56 // Once enough misses have occurred to cover the cost of copying the dirty
57 // map, the dirty map will be promoted to the read map (in the unamended
58 // state) and the next store to the map will make a new dirty copy.
59 misses int
62 // readOnly is an immutable struct stored atomically in the Map.read field.
63 type readOnly struct {
64 m map[interface{}]*entry
65 amended bool // true if the dirty map contains some key not in m.
68 // expunged is an arbitrary pointer that marks entries which have been deleted
69 // from the dirty map.
70 var expunged = unsafe.Pointer(new(interface{}))
72 // An entry is a slot in the map corresponding to a particular key.
73 type entry struct {
74 // p points to the interface{} value stored for the entry.
76 // If p == nil, the entry has been deleted and m.dirty == nil.
78 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
79 // is missing from m.dirty.
81 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
82 // != nil, in m.dirty[key].
84 // An entry can be deleted by atomic replacement with nil: when m.dirty is
85 // next created, it will atomically replace nil with expunged and leave
86 // m.dirty[key] unset.
88 // An entry's associated value can be updated by atomic replacement, provided
89 // p != expunged. If p == expunged, an entry's associated value can be updated
90 // only after first setting m.dirty[key] = e so that lookups using the dirty
91 // map find the entry.
92 p unsafe.Pointer // *interface{}
95 func newEntry(i interface{}) *entry {
96 return &entry{p: unsafe.Pointer(&i)}
99 // Load returns the value stored in the map for a key, or nil if no
100 // value is present.
101 // The ok result indicates whether value was found in the map.
102 func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
103 read, _ := m.read.Load().(readOnly)
104 e, ok := read.m[key]
105 if !ok && read.amended {
106 m.mu.Lock()
107 // Avoid reporting a spurious miss if m.dirty got promoted while we were
108 // blocked on m.mu. (If further loads of the same key will not miss, it's
109 // not worth copying the dirty map for this key.)
110 read, _ = m.read.Load().(readOnly)
111 e, ok = read.m[key]
112 if !ok && read.amended {
113 e, ok = m.dirty[key]
114 // Regardless of whether the entry was present, record a miss: this key
115 // will take the slow path until the dirty map is promoted to the read
116 // map.
117 m.missLocked()
119 m.mu.Unlock()
121 if !ok {
122 return nil, false
124 return e.load()
127 func (e *entry) load() (value interface{}, ok bool) {
128 p := atomic.LoadPointer(&e.p)
129 if p == nil || p == expunged {
130 return nil, false
132 return *(*interface{})(p), true
135 // Store sets the value for a key.
136 func (m *Map) Store(key, value interface{}) {
137 read, _ := m.read.Load().(readOnly)
138 if e, ok := read.m[key]; ok && e.tryStore(&value) {
139 return
142 m.mu.Lock()
143 read, _ = m.read.Load().(readOnly)
144 if e, ok := read.m[key]; ok {
145 if e.unexpungeLocked() {
146 // The entry was previously expunged, which implies that there is a
147 // non-nil dirty map and this entry is not in it.
148 m.dirty[key] = e
150 e.storeLocked(&value)
151 } else if e, ok := m.dirty[key]; ok {
152 e.storeLocked(&value)
153 } else {
154 if !read.amended {
155 // We're adding the first new key to the dirty map.
156 // Make sure it is allocated and mark the read-only map as incomplete.
157 m.dirtyLocked()
158 m.read.Store(readOnly{m: read.m, amended: true})
160 m.dirty[key] = newEntry(value)
162 m.mu.Unlock()
165 // tryStore stores a value if the entry has not been expunged.
167 // If the entry is expunged, tryStore returns false and leaves the entry
168 // unchanged.
169 func (e *entry) tryStore(i *interface{}) bool {
170 for {
171 p := atomic.LoadPointer(&e.p)
172 if p == expunged {
173 return false
175 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
176 return true
181 // unexpungeLocked ensures that the entry is not marked as expunged.
183 // If the entry was previously expunged, it must be added to the dirty map
184 // before m.mu is unlocked.
185 func (e *entry) unexpungeLocked() (wasExpunged bool) {
186 return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
189 // storeLocked unconditionally stores a value to the entry.
191 // The entry must be known not to be expunged.
192 func (e *entry) storeLocked(i *interface{}) {
193 atomic.StorePointer(&e.p, unsafe.Pointer(i))
196 // LoadOrStore returns the existing value for the key if present.
197 // Otherwise, it stores and returns the given value.
198 // The loaded result is true if the value was loaded, false if stored.
199 func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
200 // Avoid locking if it's a clean hit.
201 read, _ := m.read.Load().(readOnly)
202 if e, ok := read.m[key]; ok {
203 actual, loaded, ok := e.tryLoadOrStore(value)
204 if ok {
205 return actual, loaded
209 m.mu.Lock()
210 read, _ = m.read.Load().(readOnly)
211 if e, ok := read.m[key]; ok {
212 if e.unexpungeLocked() {
213 m.dirty[key] = e
215 actual, loaded, _ = e.tryLoadOrStore(value)
216 } else if e, ok := m.dirty[key]; ok {
217 actual, loaded, _ = e.tryLoadOrStore(value)
218 m.missLocked()
219 } else {
220 if !read.amended {
221 // We're adding the first new key to the dirty map.
222 // Make sure it is allocated and mark the read-only map as incomplete.
223 m.dirtyLocked()
224 m.read.Store(readOnly{m: read.m, amended: true})
226 m.dirty[key] = newEntry(value)
227 actual, loaded = value, false
229 m.mu.Unlock()
231 return actual, loaded
234 // tryLoadOrStore atomically loads or stores a value if the entry is not
235 // expunged.
237 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
238 // returns with ok==false.
239 func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
240 p := atomic.LoadPointer(&e.p)
241 if p == expunged {
242 return nil, false, false
244 if p != nil {
245 return *(*interface{})(p), true, true
248 // Copy the interface after the first load to make this method more amenable
249 // to escape analysis: if we hit the "load" path or the entry is expunged, we
250 // shouldn't bother heap-allocating.
251 ic := i
252 for {
253 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
254 return i, false, true
256 p = atomic.LoadPointer(&e.p)
257 if p == expunged {
258 return nil, false, false
260 if p != nil {
261 return *(*interface{})(p), true, true
266 // Delete deletes the value for a key.
267 func (m *Map) Delete(key interface{}) {
268 read, _ := m.read.Load().(readOnly)
269 e, ok := read.m[key]
270 if !ok && read.amended {
271 m.mu.Lock()
272 read, _ = m.read.Load().(readOnly)
273 e, ok = read.m[key]
274 if !ok && read.amended {
275 delete(m.dirty, key)
277 m.mu.Unlock()
279 if ok {
280 e.delete()
284 func (e *entry) delete() (hadValue bool) {
285 for {
286 p := atomic.LoadPointer(&e.p)
287 if p == nil || p == expunged {
288 return false
290 if atomic.CompareAndSwapPointer(&e.p, p, nil) {
291 return true
296 // Range calls f sequentially for each key and value present in the map.
297 // If f returns false, range stops the iteration.
299 // Range does not necessarily correspond to any consistent snapshot of the Map's
300 // contents: no key will be visited more than once, but if the value for any key
301 // is stored or deleted concurrently, Range may reflect any mapping for that key
302 // from any point during the Range call.
304 // Range may be O(N) with the number of elements in the map even if f returns
305 // false after a constant number of calls.
306 func (m *Map) Range(f func(key, value interface{}) bool) {
307 // We need to be able to iterate over all of the keys that were already
308 // present at the start of the call to Range.
309 // If read.amended is false, then read.m satisfies that property without
310 // requiring us to hold m.mu for a long time.
311 read, _ := m.read.Load().(readOnly)
312 if read.amended {
313 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
314 // (assuming the caller does not break out early), so a call to Range
315 // amortizes an entire copy of the map: we can promote the dirty copy
316 // immediately!
317 m.mu.Lock()
318 read, _ = m.read.Load().(readOnly)
319 if read.amended {
320 read = readOnly{m: m.dirty}
321 m.read.Store(read)
322 m.dirty = nil
323 m.misses = 0
325 m.mu.Unlock()
328 for k, e := range read.m {
329 v, ok := e.load()
330 if !ok {
331 continue
333 if !f(k, v) {
334 break
339 func (m *Map) missLocked() {
340 m.misses++
341 if m.misses < len(m.dirty) {
342 return
344 m.read.Store(readOnly{m: m.dirty})
345 m.dirty = nil
346 m.misses = 0
349 func (m *Map) dirtyLocked() {
350 if m.dirty != nil {
351 return
354 read, _ := m.read.Load().(readOnly)
355 m.dirty = make(map[interface{}]*entry, len(read.m))
356 for k, e := range read.m {
357 if !e.tryExpungeLocked() {
358 m.dirty[k] = e
363 func (e *entry) tryExpungeLocked() (isExpunged bool) {
364 p := atomic.LoadPointer(&e.p)
365 for p == nil {
366 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
367 return true
369 p = atomic.LoadPointer(&e.p)
371 return p == expunged