1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This file contains the implementation of Go's map type.
9 // A map is just a hash table. The data is arranged
10 // into an array of buckets. Each bucket contains up to
11 // 8 key/value pairs. The low-order bits of the hash are
12 // used to select a bucket. Each bucket contains a few
13 // high-order bits of each hash to distinguish the entries
14 // within a single bucket.
16 // If more than 8 keys hash to a bucket, we chain on
19 // When the hashtable grows, we allocate a new array
20 // of buckets twice as big. Buckets are incrementally
21 // copied from the old bucket array to the new bucket array.
23 // Map iterators walk through the array of buckets and
24 // return the keys in walk order (bucket #, then overflow
25 // chain order, then bucket index). To maintain iteration
26 // semantics, we never move keys within their bucket (if
27 // we did, keys might be returned 0 or 2 times). When
28 // growing the table, iterators remain iterating through the
29 // old table and must check the new table if the bucket
30 // they are iterating through has been moved ("evacuated")
33 // Picking loadFactor: too large and we have lots of overflow
34 // buckets, too small and we waste a lot of space. I wrote
35 // a simple program to check some stats for different loads:
36 // (64-bit, 8 byte keys and values)
37 // loadFactor %overflow bytes/entry hitprobe missprobe
38 // 4.00 2.13 20.77 3.00 4.00
39 // 4.50 4.05 17.30 3.25 4.50
40 // 5.00 6.85 14.77 3.50 5.00
41 // 5.50 10.55 12.94 3.75 5.50
42 // 6.00 15.27 11.67 4.00 6.00
43 // 6.50 20.90 10.79 4.25 6.50
44 // 7.00 27.14 10.15 4.50 7.00
45 // 7.50 34.03 9.73 4.75 7.50
46 // 8.00 41.10 9.40 5.00 8.00
48 // %overflow = percentage of buckets which have an overflow bucket
49 // bytes/entry = overhead bytes used per key/value pair
50 // hitprobe = # of entries to check when looking up a present key
51 // missprobe = # of entries to check when looking up an absent key
53 // Keep in mind this data is for maximally loaded tables, i.e. just
54 // before the table grows. Typical tables will be somewhat less loaded.
61 // Maximum number of key/value pairs a bucket can hold.
63 bucketCnt
= 1 << bucketCntBits
65 // Maximum average load of a bucket that triggers growth.
68 // Maximum key or value size to keep inline (instead of mallocing per element).
69 // Must fit in a uint8.
70 // Fast versions cannot handle big values - the cutoff size for
71 // fast versions in ../../cmd/gc/walk.c must be at most this value.
75 // data offset should be the size of the bmap struct, but needs to be
76 // aligned correctly. For amd64p32 this means 64-bit alignment
77 // even though pointers are 32 bit.
78 dataOffset
= unsafe
.Offsetof(struct {
83 // Possible tophash values. We reserve a few possibilities for special marks.
84 // Each bucket (including its overflow buckets, if any) will have either all or none of its
85 // entries in the evacuated* states (except during the evacuate() method, which only happens
86 // during map writes and thus no one else can observe the map during that time).
87 empty
= 0 // cell is empty
88 evacuatedEmpty
= 1 // cell is empty, bucket is evacuated.
89 evacuatedX
= 2 // key/value is valid. Entry has been evacuated to first half of larger table.
90 evacuatedY
= 3 // same as above, but evacuated to second half of larger table.
91 minTopHash
= 4 // minimum tophash for a normal filled cell.
94 iterator
= 1 // there may be an iterator using buckets
95 oldIterator
= 2 // there may be an iterator using oldbuckets
97 // sentinel bucket ID for iterator checks
98 noCheck
= 1<<(8*ptrSize
) - 1
100 // trigger a garbage collection at every alloc called from this code
104 // A header for a Go map.
106 // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
107 // ../reflect/type.go. Don't change this structure without also changing that code!
108 count
int // # live cells == size of map. Must be first (used by len() builtin)
110 hash0
uint32 // hash seed
111 B
uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
113 buckets unsafe
.Pointer
// array of 2^B Buckets. may be nil if count==0.
114 oldbuckets unsafe
.Pointer
// previous bucket array of half the size, non-nil only when growing
115 nevacuate
uintptr // progress counter for evacuation (buckets less than this have been evacuated)
118 // A bucket for a Go map.
120 tophash
[bucketCnt
]uint8
121 // Followed by bucketCnt keys and then bucketCnt values.
122 // NOTE: packing all the keys together and then all the values together makes the
123 // code a bit more complicated than alternating key/value/key/value/... but it allows
124 // us to eliminate padding which would be needed for, e.g., map[int64]int8.
125 // Followed by an overflow pointer.
128 // A hash iteration structure.
129 // If you modify hiter, also change cmd/gc/reflect.c to indicate
130 // the layout of this structure.
132 key unsafe
.Pointer
// Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
133 value unsafe
.Pointer
// Must be in second position (see cmd/gc/range.c).
136 buckets unsafe
.Pointer
// bucket ptr at hash_iter initialization time
137 bptr
*bmap
// current bucket
138 startBucket
uintptr // bucket iteration started at
139 offset
uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
140 wrapped
bool // already wrapped around from end of bucket array to beginning
147 func evacuated(b
*bmap
) bool {
149 return h
> empty
&& h
< minTopHash
152 func (b
*bmap
) overflow(t
*maptype
) *bmap
{
153 return *(**bmap
)(add(unsafe
.Pointer(b
), uintptr(t
.bucketsize
)-regSize
))
155 func (b
*bmap
) setoverflow(t
*maptype
, ovf
*bmap
) {
156 *(**bmap
)(add(unsafe
.Pointer(b
), uintptr(t
.bucketsize
)-regSize
)) = ovf
159 func makemap(t
*maptype
, hint
int64) *hmap
{
160 if sz
:= unsafe
.Sizeof(hmap
{}); sz
> 48 || sz
!= uintptr(t
.hmap
.size
) {
161 gothrow("bad hmap size")
164 if hint
< 0 ||
int64(int32(hint
)) != hint
{
165 panic("makemap: size out of range")
166 // TODO: make hint an int, then none of this nonsense
169 if !ismapkey(t
.key
) {
170 gothrow("runtime.makemap: unsupported map key type")
173 // check compiler's and reflect's math
174 if t
.key
.size
> maxKeySize
&& (!t
.indirectkey || t
.keysize
!= uint8(ptrSize
)) ||
175 t
.key
.size
<= maxKeySize
&& (t
.indirectkey || t
.keysize
!= uint8(t
.key
.size
)) {
176 gothrow("key size wrong")
178 if t
.elem
.size
> maxValueSize
&& (!t
.indirectvalue || t
.valuesize
!= uint8(ptrSize
)) ||
179 t
.elem
.size
<= maxValueSize
&& (t
.indirectvalue || t
.valuesize
!= uint8(t
.elem
.size
)) {
180 gothrow("value size wrong")
183 // invariants we depend on. We should probably check these at compile time
184 // somewhere, but for now we'll do it here.
185 if t
.key
.align
> bucketCnt
{
186 gothrow("key align too big")
188 if t
.elem
.align
> bucketCnt
{
189 gothrow("value align too big")
191 if uintptr(t
.key
.size
)%uintptr
(t
.key
.align
) != 0 {
192 gothrow("key size not a multiple of key align")
194 if uintptr(t
.elem
.size
)%uintptr
(t
.elem
.align
) != 0 {
195 gothrow("value size not a multiple of value align")
198 gothrow("bucketsize too small for proper alignment")
200 if dataOffset%uintptr
(t
.key
.align
) != 0 {
201 gothrow("need padding in bucket (key)")
203 if dataOffset%uintptr
(t
.elem
.align
) != 0 {
204 gothrow("need padding in bucket (value)")
207 // find size parameter which will hold the requested # of elements
209 for ; hint
> bucketCnt
&& float32(hint
) > loadFactor
*float32(uintptr(1)<<B
); B
++ {
212 // allocate initial hash table
213 // if B == 0, the buckets field is allocated lazily later (in mapassign)
214 // If hint is large zeroing this memory could take a while.
215 var buckets unsafe
.Pointer
218 memstats
.next_gc
= memstats
.heap_alloc
220 buckets
= newarray(t
.bucket
, uintptr(1)<<B
)
225 memstats
.next_gc
= memstats
.heap_alloc
227 h
:= (*hmap
)(newobject(t
.hmap
))
231 h
.hash0
= fastrand1()
239 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead
240 // it will return a reference to the zero object for the value type if
241 // the key is not in the map.
242 // NOTE: The returned pointer may keep the whole map live, so don't
243 // hold onto it for very long.
244 func mapaccess1(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
245 if raceenabled
&& h
!= nil {
246 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
247 pc
:= funcPC(mapaccess1
)
248 racereadpc(unsafe
.Pointer(h
), callerpc
, pc
)
249 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
251 if h
== nil || h
.count
== 0 {
252 return unsafe
.Pointer(t
.elem
.zero
)
254 alg
:= goalg(t
.key
.alg
)
255 hash
:= alg
.hash(key
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
256 m
:= uintptr(1)<<h
.B
- 1
257 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
258 if c
:= h
.oldbuckets
; c
!= nil {
259 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
260 if !evacuated(oldb
) {
264 top
:= uint8(hash
>> (ptrSize
*8 - 8))
265 if top
< minTopHash
{
269 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
270 if b
.tophash
[i
] != top
{
273 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
275 k
= *((*unsafe
.Pointer
)(k
))
277 if alg
.equal(key
, k
, uintptr(t
.key
.size
)) {
278 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
280 v
= *((*unsafe
.Pointer
)(v
))
287 return unsafe
.Pointer(t
.elem
.zero
)
292 func mapaccess2(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) (unsafe
.Pointer
, bool) {
293 if raceenabled
&& h
!= nil {
294 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
295 pc
:= funcPC(mapaccess2
)
296 racereadpc(unsafe
.Pointer(h
), callerpc
, pc
)
297 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
299 if h
== nil || h
.count
== 0 {
300 return unsafe
.Pointer(t
.elem
.zero
), false
302 alg
:= goalg(t
.key
.alg
)
303 hash
:= alg
.hash(key
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
304 m
:= uintptr(1)<<h
.B
- 1
305 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + (hash
&m
)*uintptr(t
.bucketsize
)))
306 if c
:= h
.oldbuckets
; c
!= nil {
307 oldb
:= (*bmap
)(unsafe
.Pointer(uintptr(c
) + (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
308 if !evacuated(oldb
) {
312 top
:= uint8(hash
>> (ptrSize
*8 - 8))
313 if top
< minTopHash
{
317 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
318 if b
.tophash
[i
] != top
{
321 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
323 k
= *((*unsafe
.Pointer
)(k
))
325 if alg
.equal(key
, k
, uintptr(t
.key
.size
)) {
326 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
328 v
= *((*unsafe
.Pointer
)(v
))
335 return unsafe
.Pointer(t
.elem
.zero
), false
340 // returns both key and value. Used by map iterator
341 func mapaccessK(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) (unsafe
.Pointer
, unsafe
.Pointer
) {
342 if h
== nil || h
.count
== 0 {
345 alg
:= goalg(t
.key
.alg
)
346 hash
:= alg
.hash(key
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
347 m
:= uintptr(1)<<h
.B
- 1
348 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + (hash
&m
)*uintptr(t
.bucketsize
)))
349 if c
:= h
.oldbuckets
; c
!= nil {
350 oldb
:= (*bmap
)(unsafe
.Pointer(uintptr(c
) + (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
351 if !evacuated(oldb
) {
355 top
:= uint8(hash
>> (ptrSize
*8 - 8))
356 if top
< minTopHash
{
360 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
361 if b
.tophash
[i
] != top
{
364 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
366 k
= *((*unsafe
.Pointer
)(k
))
368 if alg
.equal(key
, k
, uintptr(t
.key
.size
)) {
369 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
371 v
= *((*unsafe
.Pointer
)(v
))
383 func mapassign1(t
*maptype
, h
*hmap
, key unsafe
.Pointer
, val unsafe
.Pointer
) {
385 panic("assignment to entry in nil map")
388 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
389 pc
:= funcPC(mapassign1
)
390 racewritepc(unsafe
.Pointer(h
), callerpc
, pc
)
391 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
392 raceReadObjectPC(t
.elem
, val
, callerpc
, pc
)
395 alg
:= goalg(t
.key
.alg
)
396 hash
:= alg
.hash(key
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
398 if h
.buckets
== nil {
400 memstats
.next_gc
= memstats
.heap_alloc
402 h
.buckets
= newarray(t
.bucket
, 1)
406 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
407 if h
.oldbuckets
!= nil {
408 growWork(t
, h
, bucket
)
410 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
411 top
:= uint8(hash
>> (ptrSize
*8 - 8))
412 if top
< minTopHash
{
417 var insertk unsafe
.Pointer
418 var insertv unsafe
.Pointer
420 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
421 if b
.tophash
[i
] != top
{
422 if b
.tophash
[i
] == empty
&& inserti
== nil {
423 inserti
= &b
.tophash
[i
]
424 insertk
= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
425 insertv
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
429 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
432 k2
= *((*unsafe
.Pointer
)(k2
))
434 if !alg
.equal(key
, k2
, uintptr(t
.key
.size
)) {
437 // already have a mapping for key. Update it.
438 memmove(k2
, key
, uintptr(t
.key
.size
))
439 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
442 v2
= *((*unsafe
.Pointer
)(v2
))
444 memmove(v2
, val
, uintptr(t
.elem
.size
))
454 // did not find mapping for key. Allocate new cell & add entry.
455 if float32(h
.count
) >= loadFactor
*float32((uintptr(1)<<h
.B
)) && h
.count
>= bucketCnt
{
457 goto again
// Growing the table invalidates everything, so try again
461 // all current buckets are full, allocate a new one.
463 memstats
.next_gc
= memstats
.heap_alloc
465 newb
:= (*bmap
)(newobject(t
.bucket
))
466 b
.setoverflow(t
, newb
)
467 inserti
= &newb
.tophash
[0]
468 insertk
= add(unsafe
.Pointer(newb
), dataOffset
)
469 insertv
= add(insertk
, bucketCnt
*uintptr(t
.keysize
))
472 // store new key/value at insert position
475 memstats
.next_gc
= memstats
.heap_alloc
477 kmem
:= newobject(t
.key
)
478 *(*unsafe
.Pointer
)(insertk
) = kmem
483 memstats
.next_gc
= memstats
.heap_alloc
485 vmem
:= newobject(t
.elem
)
486 *(*unsafe
.Pointer
)(insertv
) = vmem
489 memmove(insertk
, key
, uintptr(t
.key
.size
))
490 memmove(insertv
, val
, uintptr(t
.elem
.size
))
495 func mapdelete(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) {
496 if raceenabled
&& h
!= nil {
497 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
498 pc
:= funcPC(mapdelete
)
499 racewritepc(unsafe
.Pointer(h
), callerpc
, pc
)
500 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
502 if h
== nil || h
.count
== 0 {
505 alg
:= goalg(t
.key
.alg
)
506 hash
:= alg
.hash(key
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
507 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
508 if h
.oldbuckets
!= nil {
509 growWork(t
, h
, bucket
)
511 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
512 top
:= uint8(hash
>> (ptrSize
*8 - 8))
513 if top
< minTopHash
{
517 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
518 if b
.tophash
[i
] != top
{
521 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
524 k2
= *((*unsafe
.Pointer
)(k2
))
526 if !alg
.equal(key
, k2
, uintptr(t
.key
.size
)) {
529 memclr(k
, uintptr(t
.keysize
))
530 v
:= unsafe
.Pointer(uintptr(unsafe
.Pointer(b
)) + dataOffset
+ bucketCnt
*uintptr(t
.keysize
) + i
*uintptr(t
.valuesize
))
531 memclr(v
, uintptr(t
.valuesize
))
543 func mapiterinit(t
*maptype
, h
*hmap
, it
*hiter
) {
544 // Clear pointer fields so garbage collector does not complain.
552 if raceenabled
&& h
!= nil {
553 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
554 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapiterinit
))
557 if h
== nil || h
.count
== 0 {
563 if unsafe
.Sizeof(hiter
{})/ptrSize
!= 10 {
564 gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
569 // grab snapshot of bucket state
571 it
.buckets
= h
.buckets
573 // decide where to start
574 r
:= uintptr(fastrand1())
575 if h
.B
> 31-bucketCntBits
{
576 r
+= uintptr(fastrand1()) << 31
578 it
.startBucket
= r
& (uintptr(1)<<h
.B
- 1)
579 it
.offset
= uint8(r
>> h
.B
& (bucketCnt
- 1))
582 it
.bucket
= it
.startBucket
586 // Remember we have an iterator.
587 // Can run concurrently with another hash_iter_init().
590 if old
== old|iterator|oldIterator
{
593 if cas(&h
.flags
, old
, old|iterator|oldIterator
) {
601 func mapiternext(it
*hiter
) {
604 callerpc
:= getcallerpc(unsafe
.Pointer(&it
))
605 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapiternext
))
611 checkBucket
:= it
.checkBucket
612 alg
:= goalg(t
.key
.alg
)
616 if bucket
== it
.startBucket
&& it
.wrapped
{
622 if h
.oldbuckets
!= nil && it
.B
== h
.B
{
623 // Iterator was started in the middle of a grow, and the grow isn't done yet.
624 // If the bucket we're looking at hasn't been filled in yet (i.e. the old
625 // bucket hasn't been evacuated) then we need to iterate through the old
626 // bucket and only return the ones that will be migrated to this bucket.
627 oldbucket
:= bucket
& (uintptr(1)<<(it
.B
-1) - 1)
628 b
= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
632 b
= (*bmap
)(add(it
.buckets
, bucket
*uintptr(t
.bucketsize
)))
633 checkBucket
= noCheck
636 b
= (*bmap
)(add(it
.buckets
, bucket
*uintptr(t
.bucketsize
)))
637 checkBucket
= noCheck
640 if bucket
== uintptr(1)<<it
.B
{
646 for ; i
< bucketCnt
; i
++ {
647 offi
:= (i
+ it
.offset
) & (bucketCnt
- 1)
648 k
:= add(unsafe
.Pointer(b
), dataOffset
+uintptr(offi
)*uintptr(t
.keysize
))
649 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+uintptr(offi
)*uintptr(t
.valuesize
))
650 if b
.tophash
[offi
] != empty
&& b
.tophash
[offi
] != evacuatedEmpty
{
651 if checkBucket
!= noCheck
{
652 // Special case: iterator was started during a grow and the
653 // grow is not done yet. We're working on a bucket whose
654 // oldbucket has not been evacuated yet. Or at least, it wasn't
655 // evacuated when we started the bucket. So we're iterating
656 // through the oldbucket, skipping any keys that will go
657 // to the other new bucket (each oldbucket expands to two
658 // buckets during a grow).
661 k2
= *((*unsafe
.Pointer
)(k2
))
663 if alg
.equal(k2
, k2
, uintptr(t
.key
.size
)) {
664 // If the item in the oldbucket is not destined for
665 // the current new bucket in the iteration, skip it.
666 hash
:= alg
.hash(k2
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
667 if hash
&(uintptr(1)<<it
.B
-1) != checkBucket
{
671 // Hash isn't repeatable if k != k (NaNs). We need a
672 // repeatable and randomish choice of which direction
673 // to send NaNs during evacuation. We'll use the low
674 // bit of tophash to decide which way NaNs go.
675 // NOTE: this case is why we need two evacuate tophash
676 // values, evacuatedX and evacuatedY, that differ in
678 if checkBucket
>>(it
.B
-1) != uintptr(b
.tophash
[offi
]&1) {
683 if b
.tophash
[offi
] != evacuatedX
&& b
.tophash
[offi
] != evacuatedY
{
684 // this is the golden data, we can return it.
686 k
= *((*unsafe
.Pointer
)(k
))
690 v
= *((*unsafe
.Pointer
)(v
))
694 // The hash table has grown since the iterator was started.
695 // The golden data for this key is now somewhere else.
698 k2
= *((*unsafe
.Pointer
)(k2
))
700 if alg
.equal(k2
, k2
, uintptr(t
.key
.size
)) {
701 // Check the current hash table for the data.
702 // This code handles the case where the key
703 // has been deleted, updated, or deleted and reinserted.
704 // NOTE: we need to regrab the key as it has potentially been
705 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
706 rk
, rv
:= mapaccessK(t
, h
, k2
)
708 continue // key has been deleted
713 // if key!=key then the entry can't be deleted or
714 // updated, so we can just return it. That's lucky for
715 // us because when key!=key we can't look it up
716 // successfully in the current table.
719 v
= *((*unsafe
.Pointer
)(v
))
727 it
.checkBucket
= checkBucket
736 func hashGrow(t
*maptype
, h
*hmap
) {
737 if h
.oldbuckets
!= nil {
738 gothrow("evacuation not done in time")
740 oldbuckets
:= h
.buckets
742 memstats
.next_gc
= memstats
.heap_alloc
744 newbuckets
:= newarray(t
.bucket
, uintptr(1)<<(h
.B
+1))
745 flags
:= h
.flags
&^ (iterator | oldIterator
)
746 if h
.flags
&iterator
!= 0 {
749 // commit the grow (atomic wrt gc)
752 h
.oldbuckets
= oldbuckets
753 h
.buckets
= newbuckets
756 // the actual copying of the hash table data is done incrementally
757 // by growWork() and evacuate().
760 func growWork(t
*maptype
, h
*hmap
, bucket
uintptr) {
761 noldbuckets
:= uintptr(1) << (h
.B
- 1)
763 // make sure we evacuate the oldbucket corresponding
764 // to the bucket we're about to use
765 evacuate(t
, h
, bucket
&(noldbuckets
-1))
767 // evacuate one more oldbucket to make progress on growing
768 if h
.oldbuckets
!= nil {
769 evacuate(t
, h
, h
.nevacuate
)
773 func evacuate(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
774 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
775 newbit
:= uintptr(1) << (h
.B
- 1)
776 alg
:= goalg(t
.key
.alg
)
778 // TODO: reuse overflow buckets instead of using new ones, if there
779 // is no iterator using the old buckets. (If !oldIterator.)
781 x
:= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
782 y
:= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
785 xk
:= add(unsafe
.Pointer(x
), dataOffset
)
786 yk
:= add(unsafe
.Pointer(y
), dataOffset
)
787 xv
:= add(xk
, bucketCnt
*uintptr(t
.keysize
))
788 yv
:= add(yk
, bucketCnt
*uintptr(t
.keysize
))
789 for ; b
!= nil; b
= b
.overflow(t
) {
790 k
:= add(unsafe
.Pointer(b
), dataOffset
)
791 v
:= add(k
, bucketCnt
*uintptr(t
.keysize
))
792 for i
:= 0; i
< bucketCnt
; i
, k
, v
= i
+1, add(k
, uintptr(t
.keysize
)), add(v
, uintptr(t
.valuesize
)) {
795 b
.tophash
[i
] = evacuatedEmpty
798 if top
< minTopHash
{
799 gothrow("bad map state")
803 k2
= *((*unsafe
.Pointer
)(k2
))
805 // Compute hash to make our evacuation decision (whether we need
806 // to send this key/value to bucket x or bucket y).
807 hash
:= alg
.hash(k2
, uintptr(t
.key
.size
), uintptr(h
.hash0
))
808 if h
.flags
&iterator
!= 0 {
809 if !alg
.equal(k2
, k2
, uintptr(t
.key
.size
)) {
810 // If key != key (NaNs), then the hash could be (and probably
811 // will be) entirely different from the old hash. Moreover,
812 // it isn't reproducible. Reproducibility is required in the
813 // presence of iterators, as our evacuation decision must
814 // match whatever decision the iterator made.
815 // Fortunately, we have the freedom to send these keys either
816 // way. Also, tophash is meaningless for these kinds of keys.
817 // We let the low bit of tophash drive the evacuation decision.
818 // We recompute a new random tophash for the next level so
819 // these keys will get evenly distributed across all buckets
820 // after multiple grows.
826 top
= uint8(hash
>> (ptrSize
*8 - 8))
827 if top
< minTopHash
{
832 if (hash
& newbit
) == 0 {
833 b
.tophash
[i
] = evacuatedX
836 memstats
.next_gc
= memstats
.heap_alloc
838 newx
:= (*bmap
)(newobject(t
.bucket
))
839 x
.setoverflow(t
, newx
)
842 xk
= add(unsafe
.Pointer(x
), dataOffset
)
843 xv
= add(xk
, bucketCnt
*uintptr(t
.keysize
))
847 *(*unsafe
.Pointer
)(xk
) = k2
// copy pointer
849 memmove(xk
, k
, uintptr(t
.key
.size
)) // copy value
852 *(*unsafe
.Pointer
)(xv
) = *(*unsafe
.Pointer
)(v
)
854 memmove(xv
, v
, uintptr(t
.elem
.size
))
857 xk
= add(xk
, uintptr(t
.keysize
))
858 xv
= add(xv
, uintptr(t
.valuesize
))
860 b
.tophash
[i
] = evacuatedY
863 memstats
.next_gc
= memstats
.heap_alloc
865 newy
:= (*bmap
)(newobject(t
.bucket
))
866 y
.setoverflow(t
, newy
)
869 yk
= add(unsafe
.Pointer(y
), dataOffset
)
870 yv
= add(yk
, bucketCnt
*uintptr(t
.keysize
))
874 *(*unsafe
.Pointer
)(yk
) = k2
876 memmove(yk
, k
, uintptr(t
.key
.size
))
879 *(*unsafe
.Pointer
)(yv
) = *(*unsafe
.Pointer
)(v
)
881 memmove(yv
, v
, uintptr(t
.elem
.size
))
884 yk
= add(yk
, uintptr(t
.keysize
))
885 yv
= add(yv
, uintptr(t
.valuesize
))
889 // Unlink the overflow buckets & clear key/value to help GC.
890 if h
.flags
&oldIterator
== 0 {
891 b
= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
892 memclr(add(unsafe
.Pointer(b
), dataOffset
), uintptr(t
.bucketsize
)-dataOffset
)
896 // Advance evacuation mark
897 if oldbucket
== h
.nevacuate
{
898 h
.nevacuate
= oldbucket
+ 1
899 if oldbucket
+1 == newbit
{ // newbit == # of oldbuckets
900 // Growing is all done. Free old main bucket array.
906 func ismapkey(t
*_type
) bool {
907 return goalg(t
.alg
).hash
!= nil
910 // Reflect stubs. Called from ../reflect/asm_*.s
912 func reflect_makemap(t
*maptype
) *hmap
{
916 func reflect_mapaccess(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
917 val
, ok
:= mapaccess2(t
, h
, key
)
919 // reflect wants nil for a missing element
925 func reflect_mapassign(t
*maptype
, h
*hmap
, key unsafe
.Pointer
, val unsafe
.Pointer
) {
926 mapassign1(t
, h
, key
, val
)
929 func reflect_mapdelete(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) {
933 func reflect_mapiterinit(t
*maptype
, h
*hmap
) *hiter
{
935 mapiterinit(t
, h
, it
)
939 func reflect_mapiternext(it
*hiter
) {
943 func reflect_mapiterkey(it
*hiter
) unsafe
.Pointer
{
947 func reflect_maplen(h
*hmap
) int {
952 callerpc
:= getcallerpc(unsafe
.Pointer(&h
))
953 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(reflect_maplen
))
958 func reflect_ismapkey(t
*_type
) bool {