1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 // Map is like a Go map[interface{}]interface{} but is safe for concurrent use
13 // by multiple goroutines without additional locking or coordination.
14 // Loads, stores, and deletes run in amortized constant time.
16 // The Map type is specialized. Most code should use a plain Go map instead,
17 // with separate locking or coordination, for better type safety and to make it
18 // easier to maintain other invariants along with the map content.
20 // The Map type is optimized for two common use cases: (1) when the entry for a given
21 // key is only ever written once but read many times, as in caches that only grow,
22 // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
23 // sets of keys. In these two cases, use of a Map may significantly reduce lock
24 // contention compared to a Go map paired with a separate Mutex or RWMutex.
26 // The zero Map is empty and ready for use. A Map must not be copied after first use.
30 // read contains the portion of the map's contents that are safe for
31 // concurrent access (with or without mu held).
33 // The read field itself is always safe to load, but must only be stored with
36 // Entries stored in read may be updated concurrently without mu, but updating
37 // a previously-expunged entry requires that the entry be copied to the dirty
38 // map and unexpunged with mu held.
39 read atomic
.Value
// readOnly
41 // dirty contains the portion of the map's contents that require mu to be
42 // held. To ensure that the dirty map can be promoted to the read map quickly,
43 // it also includes all of the non-expunged entries in the read map.
45 // Expunged entries are not stored in the dirty map. An expunged entry in the
46 // clean map must be unexpunged and added to the dirty map before a new value
47 // can be stored to it.
49 // If the dirty map is nil, the next write to the map will initialize it by
50 // making a shallow copy of the clean map, omitting stale entries.
53 // misses counts the number of loads since the read map was last updated that
54 // needed to lock mu to determine whether the key was present.
56 // Once enough misses have occurred to cover the cost of copying the dirty
57 // map, the dirty map will be promoted to the read map (in the unamended
58 // state) and the next store to the map will make a new dirty copy.
62 // readOnly is an immutable struct stored atomically in the Map.read field.
63 type readOnly
struct {
65 amended
bool // true if the dirty map contains some key not in m.
68 // expunged is an arbitrary pointer that marks entries which have been deleted
69 // from the dirty map.
70 var expunged
= unsafe
.Pointer(new(any
))
72 // An entry is a slot in the map corresponding to a particular key.
74 // p points to the interface{} value stored for the entry.
76 // If p == nil, the entry has been deleted, and either m.dirty == nil or
79 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
80 // is missing from m.dirty.
82 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
83 // != nil, in m.dirty[key].
85 // An entry can be deleted by atomic replacement with nil: when m.dirty is
86 // next created, it will atomically replace nil with expunged and leave
87 // m.dirty[key] unset.
89 // An entry's associated value can be updated by atomic replacement, provided
90 // p != expunged. If p == expunged, an entry's associated value can be updated
91 // only after first setting m.dirty[key] = e so that lookups using the dirty
92 // map find the entry.
93 p unsafe
.Pointer
// *interface{}
96 func newEntry(i any
) *entry
{
97 return &entry
{p
: unsafe
.Pointer(&i
)}
100 // Load returns the value stored in the map for a key, or nil if no
102 // The ok result indicates whether value was found in the map.
103 func (m
*Map
) Load(key any
) (value any
, ok
bool) {
104 read
, _
:= m
.read
.Load().(readOnly
)
106 if !ok
&& read
.amended
{
108 // Avoid reporting a spurious miss if m.dirty got promoted while we were
109 // blocked on m.mu. (If further loads of the same key will not miss, it's
110 // not worth copying the dirty map for this key.)
111 read
, _
= m
.read
.Load().(readOnly
)
113 if !ok
&& read
.amended
{
115 // Regardless of whether the entry was present, record a miss: this key
116 // will take the slow path until the dirty map is promoted to the read
128 func (e
*entry
) load() (value any
, ok
bool) {
129 p
:= atomic
.LoadPointer(&e
.p
)
130 if p
== nil || p
== expunged
{
133 return *(*any
)(p
), true
136 // Store sets the value for a key.
137 func (m
*Map
) Store(key
, value any
) {
138 read
, _
:= m
.read
.Load().(readOnly
)
139 if e
, ok
:= read
.m
[key
]; ok
&& e
.tryStore(&value
) {
144 read
, _
= m
.read
.Load().(readOnly
)
145 if e
, ok
:= read
.m
[key
]; ok
{
146 if e
.unexpungeLocked() {
147 // The entry was previously expunged, which implies that there is a
148 // non-nil dirty map and this entry is not in it.
151 e
.storeLocked(&value
)
152 } else if e
, ok
:= m
.dirty
[key
]; ok
{
153 e
.storeLocked(&value
)
156 // We're adding the first new key to the dirty map.
157 // Make sure it is allocated and mark the read-only map as incomplete.
159 m
.read
.Store(readOnly
{m
: read
.m
, amended
: true})
161 m
.dirty
[key
] = newEntry(value
)
166 // tryStore stores a value if the entry has not been expunged.
168 // If the entry is expunged, tryStore returns false and leaves the entry
170 func (e
*entry
) tryStore(i
*any
) bool {
172 p
:= atomic
.LoadPointer(&e
.p
)
176 if atomic
.CompareAndSwapPointer(&e
.p
, p
, unsafe
.Pointer(i
)) {
182 // unexpungeLocked ensures that the entry is not marked as expunged.
184 // If the entry was previously expunged, it must be added to the dirty map
185 // before m.mu is unlocked.
186 func (e
*entry
) unexpungeLocked() (wasExpunged
bool) {
187 return atomic
.CompareAndSwapPointer(&e
.p
, expunged
, nil)
190 // storeLocked unconditionally stores a value to the entry.
192 // The entry must be known not to be expunged.
193 func (e
*entry
) storeLocked(i
*any
) {
194 atomic
.StorePointer(&e
.p
, unsafe
.Pointer(i
))
197 // LoadOrStore returns the existing value for the key if present.
198 // Otherwise, it stores and returns the given value.
199 // The loaded result is true if the value was loaded, false if stored.
200 func (m
*Map
) LoadOrStore(key
, value any
) (actual any
, loaded
bool) {
201 // Avoid locking if it's a clean hit.
202 read
, _
:= m
.read
.Load().(readOnly
)
203 if e
, ok
:= read
.m
[key
]; ok
{
204 actual
, loaded
, ok
:= e
.tryLoadOrStore(value
)
206 return actual
, loaded
211 read
, _
= m
.read
.Load().(readOnly
)
212 if e
, ok
:= read
.m
[key
]; ok
{
213 if e
.unexpungeLocked() {
216 actual
, loaded
, _
= e
.tryLoadOrStore(value
)
217 } else if e
, ok
:= m
.dirty
[key
]; ok
{
218 actual
, loaded
, _
= e
.tryLoadOrStore(value
)
222 // We're adding the first new key to the dirty map.
223 // Make sure it is allocated and mark the read-only map as incomplete.
225 m
.read
.Store(readOnly
{m
: read
.m
, amended
: true})
227 m
.dirty
[key
] = newEntry(value
)
228 actual
, loaded
= value
, false
232 return actual
, loaded
235 // tryLoadOrStore atomically loads or stores a value if the entry is not
238 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
239 // returns with ok==false.
240 func (e
*entry
) tryLoadOrStore(i any
) (actual any
, loaded
, ok
bool) {
241 p
:= atomic
.LoadPointer(&e
.p
)
243 return nil, false, false
246 return *(*any
)(p
), true, true
249 // Copy the interface after the first load to make this method more amenable
250 // to escape analysis: if we hit the "load" path or the entry is expunged, we
251 // shouldn't bother heap-allocating.
254 if atomic
.CompareAndSwapPointer(&e
.p
, nil, unsafe
.Pointer(&ic
)) {
255 return i
, false, true
257 p
= atomic
.LoadPointer(&e
.p
)
259 return nil, false, false
262 return *(*any
)(p
), true, true
267 // LoadAndDelete deletes the value for a key, returning the previous value if any.
268 // The loaded result reports whether the key was present.
269 func (m
*Map
) LoadAndDelete(key any
) (value any
, loaded
bool) {
270 read
, _
:= m
.read
.Load().(readOnly
)
272 if !ok
&& read
.amended
{
274 read
, _
= m
.read
.Load().(readOnly
)
276 if !ok
&& read
.amended
{
279 // Regardless of whether the entry was present, record a miss: this key
280 // will take the slow path until the dirty map is promoted to the read
292 // Delete deletes the value for a key.
293 func (m
*Map
) Delete(key any
) {
297 func (e
*entry
) delete() (value any
, ok
bool) {
299 p
:= atomic
.LoadPointer(&e
.p
)
300 if p
== nil || p
== expunged
{
303 if atomic
.CompareAndSwapPointer(&e
.p
, p
, nil) {
304 return *(*any
)(p
), true
309 // Range calls f sequentially for each key and value present in the map.
310 // If f returns false, range stops the iteration.
312 // Range does not necessarily correspond to any consistent snapshot of the Map's
313 // contents: no key will be visited more than once, but if the value for any key
314 // is stored or deleted concurrently (including by f), Range may reflect any
315 // mapping for that key from any point during the Range call. Range does not
316 // block other methods on the receiver; even f itself may call any method on m.
318 // Range may be O(N) with the number of elements in the map even if f returns
319 // false after a constant number of calls.
320 func (m
*Map
) Range(f
func(key
, value any
) bool) {
321 // We need to be able to iterate over all of the keys that were already
322 // present at the start of the call to Range.
323 // If read.amended is false, then read.m satisfies that property without
324 // requiring us to hold m.mu for a long time.
325 read
, _
:= m
.read
.Load().(readOnly
)
327 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
328 // (assuming the caller does not break out early), so a call to Range
329 // amortizes an entire copy of the map: we can promote the dirty copy
332 read
, _
= m
.read
.Load().(readOnly
)
334 read
= readOnly
{m
: m
.dirty
}
342 for k
, e
:= range read
.m
{
353 func (m
*Map
) missLocked() {
355 if m
.misses
< len(m
.dirty
) {
358 m
.read
.Store(readOnly
{m
: m
.dirty
})
363 func (m
*Map
) dirtyLocked() {
368 read
, _
:= m
.read
.Load().(readOnly
)
369 m
.dirty
= make(map[any
]*entry
, len(read
.m
))
370 for k
, e
:= range read
.m
{
371 if !e
.tryExpungeLocked() {
377 func (e
*entry
) tryExpungeLocked() (isExpunged
bool) {
378 p
:= atomic
.LoadPointer(&e
.p
)
380 if atomic
.CompareAndSwapPointer(&e
.p
, nil, expunged
) {
383 p
= atomic
.LoadPointer(&e
.p
)