1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This file contains the implementation of Go's map type.
9 // A map is just a hash table. The data is arranged
10 // into an array of buckets. Each bucket contains up to
11 // 8 key/value pairs. The low-order bits of the hash are
12 // used to select a bucket. Each bucket contains a few
13 // high-order bits of each hash to distinguish the entries
14 // within a single bucket.
16 // If more than 8 keys hash to a bucket, we chain on
19 // When the hashtable grows, we allocate a new array
20 // of buckets twice as big. Buckets are incrementally
21 // copied from the old bucket array to the new bucket array.
23 // Map iterators walk through the array of buckets and
24 // return the keys in walk order (bucket #, then overflow
25 // chain order, then bucket index). To maintain iteration
26 // semantics, we never move keys within their bucket (if
27 // we did, keys might be returned 0 or 2 times). When
28 // growing the table, iterators remain iterating through the
29 // old table and must check the new table if the bucket
30 // they are iterating through has been moved ("evacuated")
33 // Picking loadFactor: too large and we have lots of overflow
34 // buckets, too small and we waste a lot of space. I wrote
35 // a simple program to check some stats for different loads:
36 // (64-bit, 8 byte keys and values)
37 // loadFactor %overflow bytes/entry hitprobe missprobe
38 // 4.00 2.13 20.77 3.00 4.00
39 // 4.50 4.05 17.30 3.25 4.50
40 // 5.00 6.85 14.77 3.50 5.00
41 // 5.50 10.55 12.94 3.75 5.50
42 // 6.00 15.27 11.67 4.00 6.00
43 // 6.50 20.90 10.79 4.25 6.50
44 // 7.00 27.14 10.15 4.50 7.00
45 // 7.50 34.03 9.73 4.75 7.50
46 // 8.00 41.10 9.40 5.00 8.00
48 // %overflow = percentage of buckets which have an overflow bucket
49 // bytes/entry = overhead bytes used per key/value pair
50 // hitprobe = # of entries to check when looking up a present key
51 // missprobe = # of entries to check when looking up an absent key
53 // Keep in mind this data is for maximally loaded tables, i.e. just
54 // before the table grows. Typical tables will be somewhat less loaded.
57 "runtime/internal/atomic"
58 "runtime/internal/sys"
62 // For gccgo, use go:linkname to rename compiler-called functions to
63 // themselves, so that the compiler will export them.
65 //go:linkname makemap runtime.makemap
66 //go:linkname makemap64 runtime.makemap64
67 //go:linkname makemap_small runtime.makemap_small
68 //go:linkname mapaccess1 runtime.mapaccess1
69 //go:linkname mapaccess2 runtime.mapaccess2
70 //go:linkname mapaccess1_fat runtime.mapaccess1_fat
71 //go:linkname mapaccess2_fat runtime.mapaccess2_fat
72 //go:linkname mapassign runtime.mapassign
73 //go:linkname mapdelete runtime.mapdelete
74 //go:linkname mapiterinit runtime.mapiterinit
75 //go:linkname mapiternext runtime.mapiternext
78 // Maximum number of key/value pairs a bucket can hold.
80 bucketCnt
= 1 << bucketCntBits
82 // Maximum average load of a bucket that triggers growth is 6.5.
83 // Represent as loadFactorNum/loadFactDen, to allow integer math.
87 // Maximum key or value size to keep inline (instead of mallocing per element).
88 // Must fit in a uint8.
89 // Fast versions cannot handle big values - the cutoff size for
90 // fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
94 // data offset should be the size of the bmap struct, but needs to be
95 // aligned correctly. For amd64p32 this means 64-bit alignment
96 // even though pointers are 32 bit.
97 dataOffset
= unsafe
.Offsetof(struct {
102 // Possible tophash values. We reserve a few possibilities for special marks.
103 // Each bucket (including its overflow buckets, if any) will have either all or none of its
104 // entries in the evacuated* states (except during the evacuate() method, which only happens
105 // during map writes and thus no one else can observe the map during that time).
106 empty
= 0 // cell is empty
107 evacuatedEmpty
= 1 // cell is empty, bucket is evacuated.
108 evacuatedX
= 2 // key/value is valid. Entry has been evacuated to first half of larger table.
109 evacuatedY
= 3 // same as above, but evacuated to second half of larger table.
110 minTopHash
= 4 // minimum tophash for a normal filled cell.
113 iterator
= 1 // there may be an iterator using buckets
114 oldIterator
= 2 // there may be an iterator using oldbuckets
115 hashWriting
= 4 // a goroutine is writing to the map
116 sameSizeGrow
= 8 // the current map growth is to a new map of the same size
118 // sentinel bucket ID for iterator checks
119 noCheck
= 1<<(8*sys
.PtrSize
) - 1
122 // A header for a Go map.
124 // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
125 // ../reflect/type.go. Don't change this structure without also changing that code!
126 count
int // # live cells == size of map. Must be first (used by len() builtin)
128 B
uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
129 noverflow
uint16 // approximate number of overflow buckets; see incrnoverflow for details
130 hash0
uint32 // hash seed
132 buckets unsafe
.Pointer
// array of 2^B Buckets. may be nil if count==0.
133 oldbuckets unsafe
.Pointer
// previous bucket array of half the size, non-nil only when growing
134 nevacuate
uintptr // progress counter for evacuation (buckets less than this have been evacuated)
136 extra
*mapextra
// optional fields
139 // mapextra holds fields that are not present on all maps.
140 type mapextra
struct {
141 // If both key and value do not contain pointers and are inline, then we mark bucket
142 // type as containing no pointers. This avoids scanning such maps.
143 // However, bmap.overflow is a pointer. In order to keep overflow buckets
144 // alive, we store pointers to all overflow buckets in hmap.overflow and h.map.oldoverflow.
145 // overflow and oldoverflow are only used if key and value do not contain pointers.
146 // overflow contains overflow buckets for hmap.buckets.
147 // oldoverflow contains overflow buckets for hmap.oldbuckets.
148 // The indirection allows to store a pointer to the slice in hiter.
152 // nextOverflow holds a pointer to a free overflow bucket.
156 // A bucket for a Go map.
158 // tophash generally contains the top byte of the hash value
159 // for each key in this bucket. If tophash[0] < minTopHash,
160 // tophash[0] is a bucket evacuation state instead.
161 tophash
[bucketCnt
]uint8
162 // Followed by bucketCnt keys and then bucketCnt values.
163 // NOTE: packing all the keys together and then all the values together makes the
164 // code a bit more complicated than alternating key/value/key/value/... but it allows
165 // us to eliminate padding which would be needed for, e.g., map[int64]int8.
166 // Followed by an overflow pointer.
169 // A hash iteration structure.
170 // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
171 // the layout of this structure.
173 key unsafe
.Pointer
// Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
174 value unsafe
.Pointer
// Must be in second position (see cmd/internal/gc/range.go).
177 buckets unsafe
.Pointer
// bucket ptr at hash_iter initialization time
178 bptr
*bmap
// current bucket
179 overflow
*[]*bmap
// keeps overflow buckets of hmap.buckets alive
180 oldoverflow
*[]*bmap
// keeps overflow buckets of hmap.oldbuckets alive
181 startBucket
uintptr // bucket iteration started at
182 offset
uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
183 wrapped
bool // already wrapped around from end of bucket array to beginning
190 // bucketShift returns 1<<b, optimized for code generation.
191 func bucketShift(b
uint8) uintptr {
192 if sys
.GoarchAmd64|sys
.GoarchAmd64p32|sys
.Goarch386
!= 0 {
193 b
&= sys
.PtrSize
*8 - 1 // help x86 archs remove shift overflow checks
195 return uintptr(1) << b
198 // bucketMask returns 1<<b - 1, optimized for code generation.
199 func bucketMask(b
uint8) uintptr {
200 return bucketShift(b
) - 1
203 // tophash calculates the tophash value for hash.
204 func tophash(hash
uintptr) uint8 {
205 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
206 if top
< minTopHash
{
212 func evacuated(b
*bmap
) bool {
214 return h
> empty
&& h
< minTopHash
217 func (b
*bmap
) overflow(t
*maptype
) *bmap
{
218 return *(**bmap
)(add(unsafe
.Pointer(b
), uintptr(t
.bucketsize
)-sys
.PtrSize
))
221 func (b
*bmap
) setoverflow(t
*maptype
, ovf
*bmap
) {
222 *(**bmap
)(add(unsafe
.Pointer(b
), uintptr(t
.bucketsize
)-sys
.PtrSize
)) = ovf
225 func (b
*bmap
) keys() unsafe
.Pointer
{
226 return add(unsafe
.Pointer(b
), dataOffset
)
229 // incrnoverflow increments h.noverflow.
230 // noverflow counts the number of overflow buckets.
231 // This is used to trigger same-size map growth.
232 // See also tooManyOverflowBuckets.
233 // To keep hmap small, noverflow is a uint16.
234 // When there are few buckets, noverflow is an exact count.
235 // When there are many buckets, noverflow is an approximate count.
236 func (h
*hmap
) incrnoverflow() {
237 // We trigger same-size map growth if there are
238 // as many overflow buckets as buckets.
239 // We need to be able to count to 1<<h.B.
244 // Increment with probability 1/(1<<(h.B-15)).
245 // When we reach 1<<15 - 1, we will have approximately
246 // as many overflow buckets as buckets.
247 mask
:= uint32(1)<<(h
.B
-15) - 1
248 // Example: if h.B == 18, then mask == 7,
249 // and fastrand & 7 == 0 with probability 1/8.
250 if fastrand()&mask
== 0 {
255 func (h
*hmap
) newoverflow(t
*maptype
, b
*bmap
) *bmap
{
257 if h
.extra
!= nil && h
.extra
.nextOverflow
!= nil {
258 // We have preallocated overflow buckets available.
259 // See makeBucketArray for more details.
260 ovf
= h
.extra
.nextOverflow
261 if ovf
.overflow(t
) == nil {
262 // We're not at the end of the preallocated overflow buckets. Bump the pointer.
263 h
.extra
.nextOverflow
= (*bmap
)(add(unsafe
.Pointer(ovf
), uintptr(t
.bucketsize
)))
265 // This is the last preallocated overflow bucket.
266 // Reset the overflow pointer on this bucket,
267 // which was set to a non-nil sentinel value.
268 ovf
.setoverflow(t
, nil)
269 h
.extra
.nextOverflow
= nil
272 ovf
= (*bmap
)(newobject(t
.bucket
))
275 if t
.bucket
.kind
&kindNoPointers
!= 0 {
277 *h
.extra
.overflow
= append(*h
.extra
.overflow
, ovf
)
279 b
.setoverflow(t
, ovf
)
283 func (h
*hmap
) createOverflow() {
285 h
.extra
= new(mapextra
)
287 if h
.extra
.overflow
== nil {
288 h
.extra
.overflow
= new([]*bmap
)
292 func makemap64(t
*maptype
, hint
int64, h
*hmap
) *hmap
{
293 if int64(int(hint
)) != hint
{
296 return makemap(t
, int(hint
), h
)
299 // makehmap_small implements Go map creation for make(map[k]v) and
300 // make(map[k]v, hint) when hint is known to be at most bucketCnt
301 // at compile time and the map needs to be allocated on the heap.
302 func makemap_small() *hmap
{
308 // makemap implements Go map creation for make(map[k]v, hint).
309 // If the compiler has determined that the map or the first bucket
310 // can be created on the stack, h and/or bucket may be non-nil.
311 // If h != nil, the map can be created directly in h.
312 // If h.buckets != nil, bucket pointed to can be used as the first bucket.
313 func makemap(t
*maptype
, hint
int, h
*hmap
) *hmap
{
314 // The size of hmap should be 48 bytes on 64 bit
315 // and 28 bytes on 32 bit platforms.
316 if sz
:= unsafe
.Sizeof(hmap
{}); sz
!= 8+5*sys
.PtrSize
{
317 println("runtime: sizeof(hmap) =", sz
, ", t.hmap.size =", t
.hmap
.size
)
318 throw("bad hmap size")
321 if hint
< 0 || hint
> int(maxSliceCap(t
.bucket
.size
)) {
327 h
= (*hmap
)(newobject(t
.hmap
))
331 // find size parameter which will hold the requested # of elements
333 for overLoadFactor(hint
, B
) {
338 // allocate initial hash table
339 // if B == 0, the buckets field is allocated lazily later (in mapassign)
340 // If hint is large zeroing this memory could take a while.
342 var nextOverflow
*bmap
343 h
.buckets
, nextOverflow
= makeBucketArray(t
, h
.B
)
344 if nextOverflow
!= nil {
345 h
.extra
= new(mapextra
)
346 h
.extra
.nextOverflow
= nextOverflow
353 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead
354 // it will return a reference to the zero object for the value type if
355 // the key is not in the map.
356 // NOTE: The returned pointer may keep the whole map live, so don't
357 // hold onto it for very long.
358 func mapaccess1(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
359 if raceenabled
&& h
!= nil {
360 callerpc
:= getcallerpc()
361 pc
:= funcPC(mapaccess1
)
362 racereadpc(unsafe
.Pointer(h
), callerpc
, pc
)
363 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
365 if msanenabled
&& h
!= nil {
366 msanread(key
, t
.key
.size
)
368 if h
== nil || h
.count
== 0 {
369 return unsafe
.Pointer(&zeroVal
[0])
371 if h
.flags
&hashWriting
!= 0 {
372 throw("concurrent map read and map write")
374 hashfn
:= t
.key
.hashfn
375 equalfn
:= t
.key
.equalfn
376 hash
:= hashfn(key
, uintptr(h
.hash0
))
378 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
379 if c
:= h
.oldbuckets
; c
!= nil {
380 if !h
.sameSizeGrow() {
381 // There used to be half as many buckets; mask down one more power of two.
384 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
385 if !evacuated(oldb
) {
390 for ; b
!= nil; b
= b
.overflow(t
) {
391 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
392 if b
.tophash
[i
] != top
{
395 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
397 k
= *((*unsafe
.Pointer
)(k
))
400 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
402 v
= *((*unsafe
.Pointer
)(v
))
408 return unsafe
.Pointer(&zeroVal
[0])
411 func mapaccess2(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) (unsafe
.Pointer
, bool) {
412 if raceenabled
&& h
!= nil {
413 callerpc
:= getcallerpc()
414 pc
:= funcPC(mapaccess2
)
415 racereadpc(unsafe
.Pointer(h
), callerpc
, pc
)
416 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
418 if msanenabled
&& h
!= nil {
419 msanread(key
, t
.key
.size
)
421 if h
== nil || h
.count
== 0 {
422 return unsafe
.Pointer(&zeroVal
[0]), false
424 if h
.flags
&hashWriting
!= 0 {
425 throw("concurrent map read and map write")
427 hashfn
:= t
.key
.hashfn
428 equalfn
:= t
.key
.equalfn
429 hash
:= hashfn(key
, uintptr(h
.hash0
))
431 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + (hash
&m
)*uintptr(t
.bucketsize
)))
432 if c
:= h
.oldbuckets
; c
!= nil {
433 if !h
.sameSizeGrow() {
434 // There used to be half as many buckets; mask down one more power of two.
437 oldb
:= (*bmap
)(unsafe
.Pointer(uintptr(c
) + (hash
&m
)*uintptr(t
.bucketsize
)))
438 if !evacuated(oldb
) {
443 for ; b
!= nil; b
= b
.overflow(t
) {
444 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
445 if b
.tophash
[i
] != top
{
448 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
450 k
= *((*unsafe
.Pointer
)(k
))
453 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
455 v
= *((*unsafe
.Pointer
)(v
))
461 return unsafe
.Pointer(&zeroVal
[0]), false
464 // returns both key and value. Used by map iterator
465 func mapaccessK(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) (unsafe
.Pointer
, unsafe
.Pointer
) {
466 if h
== nil || h
.count
== 0 {
469 hashfn
:= t
.key
.hashfn
470 equalfn
:= t
.key
.equalfn
471 hash
:= hashfn(key
, uintptr(h
.hash0
))
473 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + (hash
&m
)*uintptr(t
.bucketsize
)))
474 if c
:= h
.oldbuckets
; c
!= nil {
475 if !h
.sameSizeGrow() {
476 // There used to be half as many buckets; mask down one more power of two.
479 oldb
:= (*bmap
)(unsafe
.Pointer(uintptr(c
) + (hash
&m
)*uintptr(t
.bucketsize
)))
480 if !evacuated(oldb
) {
485 for ; b
!= nil; b
= b
.overflow(t
) {
486 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
487 if b
.tophash
[i
] != top
{
490 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
492 k
= *((*unsafe
.Pointer
)(k
))
495 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
497 v
= *((*unsafe
.Pointer
)(v
))
506 func mapaccess1_fat(t
*maptype
, h
*hmap
, key
, zero unsafe
.Pointer
) unsafe
.Pointer
{
507 v
:= mapaccess1(t
, h
, key
)
508 if v
== unsafe
.Pointer(&zeroVal
[0]) {
514 func mapaccess2_fat(t
*maptype
, h
*hmap
, key
, zero unsafe
.Pointer
) (unsafe
.Pointer
, bool) {
515 v
:= mapaccess1(t
, h
, key
)
516 if v
== unsafe
.Pointer(&zeroVal
[0]) {
522 // Like mapaccess, but allocates a slot for the key if it is not present in the map.
523 func mapassign(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
525 panic(plainError("assignment to entry in nil map"))
528 callerpc
:= getcallerpc()
529 pc
:= funcPC(mapassign
)
530 racewritepc(unsafe
.Pointer(h
), callerpc
, pc
)
531 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
534 msanread(key
, t
.key
.size
)
536 if h
.flags
&hashWriting
!= 0 {
537 throw("concurrent map writes")
539 hashfn
:= t
.key
.hashfn
540 equalfn
:= t
.key
.equalfn
541 hash
:= hashfn(key
, uintptr(h
.hash0
))
543 // Set hashWriting after calling alg.hash, since alg.hash may panic,
544 // in which case we have not actually done a write.
545 h
.flags |
= hashWriting
547 if h
.buckets
== nil {
548 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
552 bucket
:= hash
& bucketMask(h
.B
)
554 growWork(t
, h
, bucket
)
556 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
560 var insertk unsafe
.Pointer
561 var val unsafe
.Pointer
563 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
564 if b
.tophash
[i
] != top
{
565 if b
.tophash
[i
] == empty
&& inserti
== nil {
566 inserti
= &b
.tophash
[i
]
567 insertk
= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
568 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
572 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
574 k
= *((*unsafe
.Pointer
)(k
))
576 if !equalfn(key
, k
) {
579 // already have a mapping for key. Update it.
581 typedmemmove(t
.key
, k
, key
)
583 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
593 // Did not find mapping for key. Allocate new cell & add entry.
595 // If we hit the max load factor or we have too many overflow buckets,
596 // and we're not already in the middle of growing, start growing.
597 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
599 goto again
// Growing the table invalidates everything, so try again
603 // all current buckets are full, allocate a new one.
604 newb
:= h
.newoverflow(t
, b
)
605 inserti
= &newb
.tophash
[0]
606 insertk
= add(unsafe
.Pointer(newb
), dataOffset
)
607 val
= add(insertk
, bucketCnt
*uintptr(t
.keysize
))
610 // store new key/value at insert position
612 kmem
:= newobject(t
.key
)
613 *(*unsafe
.Pointer
)(insertk
) = kmem
617 vmem
:= newobject(t
.elem
)
618 *(*unsafe
.Pointer
)(val
) = vmem
620 typedmemmove(t
.key
, insertk
, key
)
625 if h
.flags
&hashWriting
== 0 {
626 throw("concurrent map writes")
628 h
.flags
&^= hashWriting
630 val
= *((*unsafe
.Pointer
)(val
))
635 func mapdelete(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) {
636 if raceenabled
&& h
!= nil {
637 callerpc
:= getcallerpc()
638 pc
:= funcPC(mapdelete
)
639 racewritepc(unsafe
.Pointer(h
), callerpc
, pc
)
640 raceReadObjectPC(t
.key
, key
, callerpc
, pc
)
642 if msanenabled
&& h
!= nil {
643 msanread(key
, t
.key
.size
)
645 if h
== nil || h
.count
== 0 {
648 if h
.flags
&hashWriting
!= 0 {
649 throw("concurrent map writes")
652 hashfn
:= t
.key
.hashfn
653 equalfn
:= t
.key
.equalfn
654 hash
:= hashfn(key
, uintptr(h
.hash0
))
656 // Set hashWriting after calling alg.hash, since alg.hash may panic,
657 // in which case we have not actually done a write (delete).
658 h
.flags |
= hashWriting
660 bucket
:= hash
& bucketMask(h
.B
)
662 growWork(t
, h
, bucket
)
664 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
667 for ; b
!= nil; b
= b
.overflow(t
) {
668 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
669 if b
.tophash
[i
] != top
{
672 k
:= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
675 k2
= *((*unsafe
.Pointer
)(k2
))
677 if !equalfn(key
, k2
) {
680 // Only clear key if there are pointers in it.
682 *(*unsafe
.Pointer
)(k
) = nil
683 } else if t
.key
.kind
&kindNoPointers
== 0 {
684 memclrHasPointers(k
, t
.key
.size
)
686 // Only clear value if there are pointers in it.
687 if t
.indirectvalue || t
.elem
.kind
&kindNoPointers
== 0 {
688 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
690 *(*unsafe
.Pointer
)(v
) = nil
692 memclrHasPointers(v
, t
.elem
.size
)
701 if h
.flags
&hashWriting
== 0 {
702 throw("concurrent map writes")
704 h
.flags
&^= hashWriting
707 // mapiterinit initializes the hiter struct used for ranging over maps.
708 // The hiter struct pointed to by 'it' is allocated on the stack
709 // by the compilers order pass or on the heap by reflect_mapiterinit.
710 // Both need to have zeroed hiter since the struct contains pointers.
711 // Gccgo-specific: *it need not be zeroed by the compiler,
712 // and it's cheaper to zero it here.
713 func mapiterinit(t
*maptype
, h
*hmap
, it
*hiter
) {
726 if raceenabled
&& h
!= nil {
727 callerpc
:= getcallerpc()
728 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapiterinit
))
731 if h
== nil || h
.count
== 0 {
735 if unsafe
.Sizeof(hiter
{})/sys
.PtrSize
!= 12 {
736 throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
741 // grab snapshot of bucket state
743 it
.buckets
= h
.buckets
744 if t
.bucket
.kind
&kindNoPointers
!= 0 {
745 // Allocate the current slice and remember pointers to both current and old.
746 // This preserves all relevant overflow buckets alive even if
747 // the table grows and/or overflow buckets are added to the table
748 // while we are iterating.
750 it
.overflow
= h
.extra
.overflow
751 it
.oldoverflow
= h
.extra
.oldoverflow
754 // decide where to start
755 r
:= uintptr(fastrand())
756 if h
.B
> 31-bucketCntBits
{
757 r
+= uintptr(fastrand()) << 31
759 it
.startBucket
= r
& bucketMask(h
.B
)
760 it
.offset
= uint8(r
>> h
.B
& (bucketCnt
- 1))
763 it
.bucket
= it
.startBucket
765 // Remember we have an iterator.
766 // Can run concurrently with another mapiterinit().
767 if old
:= h
.flags
; old
&(iterator|oldIterator
) != iterator|oldIterator
{
768 atomic
.Or8(&h
.flags
, iterator|oldIterator
)
774 func mapiternext(it
*hiter
) {
777 callerpc
:= getcallerpc()
778 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapiternext
))
780 if h
.flags
&hashWriting
!= 0 {
781 throw("concurrent map iteration and map write")
787 checkBucket
:= it
.checkBucket
788 hashfn
:= t
.key
.hashfn
789 equalfn
:= t
.key
.equalfn
793 if bucket
== it
.startBucket
&& it
.wrapped
{
799 if h
.growing() && it
.B
== h
.B
{
800 // Iterator was started in the middle of a grow, and the grow isn't done yet.
801 // If the bucket we're looking at hasn't been filled in yet (i.e. the old
802 // bucket hasn't been evacuated) then we need to iterate through the old
803 // bucket and only return the ones that will be migrated to this bucket.
804 oldbucket
:= bucket
& it
.h
.oldbucketmask()
805 b
= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
809 b
= (*bmap
)(add(it
.buckets
, bucket
*uintptr(t
.bucketsize
)))
810 checkBucket
= noCheck
813 b
= (*bmap
)(add(it
.buckets
, bucket
*uintptr(t
.bucketsize
)))
814 checkBucket
= noCheck
817 if bucket
== bucketShift(it
.B
) {
823 for ; i
< bucketCnt
; i
++ {
824 offi
:= (i
+ it
.offset
) & (bucketCnt
- 1)
825 if b
.tophash
[offi
] == empty || b
.tophash
[offi
] == evacuatedEmpty
{
828 k
:= add(unsafe
.Pointer(b
), dataOffset
+uintptr(offi
)*uintptr(t
.keysize
))
830 k
= *((*unsafe
.Pointer
)(k
))
832 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+uintptr(offi
)*uintptr(t
.valuesize
))
833 if checkBucket
!= noCheck
&& !h
.sameSizeGrow() {
834 // Special case: iterator was started during a grow to a larger size
835 // and the grow is not done yet. We're working on a bucket whose
836 // oldbucket has not been evacuated yet. Or at least, it wasn't
837 // evacuated when we started the bucket. So we're iterating
838 // through the oldbucket, skipping any keys that will go
839 // to the other new bucket (each oldbucket expands to two
840 // buckets during a grow).
841 if t
.reflexivekey ||
equalfn(k
, k
) {
842 // If the item in the oldbucket is not destined for
843 // the current new bucket in the iteration, skip it.
844 hash
:= hashfn(k
, uintptr(h
.hash0
))
845 if hash
&bucketMask(it
.B
) != checkBucket
{
849 // Hash isn't repeatable if k != k (NaNs). We need a
850 // repeatable and randomish choice of which direction
851 // to send NaNs during evacuation. We'll use the low
852 // bit of tophash to decide which way NaNs go.
853 // NOTE: this case is why we need two evacuate tophash
854 // values, evacuatedX and evacuatedY, that differ in
856 if checkBucket
>>(it
.B
-1) != uintptr(b
.tophash
[offi
]&1) {
861 if (b
.tophash
[offi
] != evacuatedX
&& b
.tophash
[offi
] != evacuatedY
) ||
862 !(t
.reflexivekey ||
equalfn(k
, k
)) {
863 // This is the golden data, we can return it.
865 // key!=key, so the entry can't be deleted or updated, so we can just return it.
866 // That's lucky for us because when key!=key we can't look it up successfully.
869 v
= *((*unsafe
.Pointer
)(v
))
873 // The hash table has grown since the iterator was started.
874 // The golden data for this key is now somewhere else.
875 // Check the current hash table for the data.
876 // This code handles the case where the key
877 // has been deleted, updated, or deleted and reinserted.
878 // NOTE: we need to regrab the key as it has potentially been
879 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
880 rk
, rv
:= mapaccessK(t
, h
, k
)
882 continue // key has been deleted
888 if it
.bptr
!= b
{ // avoid unnecessary write barrier; see issue 14921
892 it
.checkBucket
= checkBucket
900 func makeBucketArray(t
*maptype
, b
uint8) (buckets unsafe
.Pointer
, nextOverflow
*bmap
) {
901 base
:= bucketShift(b
)
903 // For small b, overflow buckets are unlikely.
904 // Avoid the overhead of the calculation.
906 // Add on the estimated number of overflow buckets
907 // required to insert the median number of elements
908 // used with this value of b.
909 nbuckets
+= bucketShift(b
- 4)
910 sz
:= t
.bucket
.size
* nbuckets
911 up
:= roundupsize(sz
)
913 nbuckets
= up
/ t
.bucket
.size
916 buckets
= newarray(t
.bucket
, int(nbuckets
))
917 if base
!= nbuckets
{
918 // We preallocated some overflow buckets.
919 // To keep the overhead of tracking these overflow buckets to a minimum,
920 // we use the convention that if a preallocated overflow bucket's overflow
921 // pointer is nil, then there are more available by bumping the pointer.
922 // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
923 nextOverflow
= (*bmap
)(add(buckets
, base
*uintptr(t
.bucketsize
)))
924 last
:= (*bmap
)(add(buckets
, (nbuckets
-1)*uintptr(t
.bucketsize
)))
925 last
.setoverflow(t
, (*bmap
)(buckets
))
927 return buckets
, nextOverflow
930 func hashGrow(t
*maptype
, h
*hmap
) {
931 // If we've hit the load factor, get bigger.
932 // Otherwise, there are too many overflow buckets,
933 // so keep the same number of buckets and "grow" laterally.
935 if !overLoadFactor(h
.count
+1, h
.B
) {
937 h
.flags |
= sameSizeGrow
939 oldbuckets
:= h
.buckets
940 newbuckets
, nextOverflow
:= makeBucketArray(t
, h
.B
+bigger
)
942 flags
:= h
.flags
&^ (iterator | oldIterator
)
943 if h
.flags
&iterator
!= 0 {
946 // commit the grow (atomic wrt gc)
949 h
.oldbuckets
= oldbuckets
950 h
.buckets
= newbuckets
954 if h
.extra
!= nil && h
.extra
.overflow
!= nil {
955 // Promote current overflow buckets to the old generation.
956 if h
.extra
.oldoverflow
!= nil {
957 throw("oldoverflow is not nil")
959 h
.extra
.oldoverflow
= h
.extra
.overflow
960 h
.extra
.overflow
= nil
962 if nextOverflow
!= nil {
964 h
.extra
= new(mapextra
)
966 h
.extra
.nextOverflow
= nextOverflow
969 // the actual copying of the hash table data is done incrementally
970 // by growWork() and evacuate().
973 // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
974 func overLoadFactor(count
int, B
uint8) bool {
975 return count
> bucketCnt
&& uintptr(count
) > loadFactorNum
*(bucketShift(B
)/loadFactorDen
)
978 // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
979 // Note that most of these overflow buckets must be in sparse use;
980 // if use was dense, then we'd have already triggered regular map growth.
981 func tooManyOverflowBuckets(noverflow
uint16, B
uint8) bool {
982 // If the threshold is too low, we do extraneous work.
983 // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
984 // "too many" means (approximately) as many overflow buckets as regular buckets.
985 // See incrnoverflow for more details.
989 // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
990 return noverflow
>= uint16(1)<<(B
&15)
993 // growing reports whether h is growing. The growth may be to the same size or bigger.
994 func (h
*hmap
) growing() bool {
995 return h
.oldbuckets
!= nil
998 // sameSizeGrow reports whether the current growth is to a map of the same size.
999 func (h
*hmap
) sameSizeGrow() bool {
1000 return h
.flags
&sameSizeGrow
!= 0
1003 // noldbuckets calculates the number of buckets prior to the current map growth.
1004 func (h
*hmap
) noldbuckets() uintptr {
1006 if !h
.sameSizeGrow() {
1009 return bucketShift(oldB
)
1012 // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
1013 func (h
*hmap
) oldbucketmask() uintptr {
1014 return h
.noldbuckets() - 1
1017 func growWork(t
*maptype
, h
*hmap
, bucket
uintptr) {
1018 // make sure we evacuate the oldbucket corresponding
1019 // to the bucket we're about to use
1020 evacuate(t
, h
, bucket
&h
.oldbucketmask())
1022 // evacuate one more oldbucket to make progress on growing
1024 evacuate(t
, h
, h
.nevacuate
)
1028 func bucketEvacuated(t
*maptype
, h
*hmap
, bucket
uintptr) bool {
1029 b
:= (*bmap
)(add(h
.oldbuckets
, bucket
*uintptr(t
.bucketsize
)))
1033 // evacDst is an evacuation destination.
1034 type evacDst
struct {
1035 b
*bmap
// current destination bucket
1036 i
int // key/val index into b
1037 k unsafe
.Pointer
// pointer to current key storage
1038 v unsafe
.Pointer
// pointer to current value storage
1041 func evacuate(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
1042 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
1043 newbit
:= h
.noldbuckets()
1044 hashfn
:= t
.key
.hashfn
1046 // TODO: reuse overflow buckets instead of using new ones, if there
1047 // is no iterator using the old buckets. (If !oldIterator.)
1049 // xy contains the x and y (low and high) evacuation destinations.
1052 x
.b
= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
1053 x
.k
= add(unsafe
.Pointer(x
.b
), dataOffset
)
1054 x
.v
= add(x
.k
, bucketCnt
*uintptr(t
.keysize
))
1056 if !h
.sameSizeGrow() {
1057 // Only calculate y pointers if we're growing bigger.
1058 // Otherwise GC can see bad pointers.
1060 y
.b
= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
1061 y
.k
= add(unsafe
.Pointer(y
.b
), dataOffset
)
1062 y
.v
= add(y
.k
, bucketCnt
*uintptr(t
.keysize
))
1065 for ; b
!= nil; b
= b
.overflow(t
) {
1066 k
:= add(unsafe
.Pointer(b
), dataOffset
)
1067 v
:= add(k
, bucketCnt
*uintptr(t
.keysize
))
1068 for i
:= 0; i
< bucketCnt
; i
, k
, v
= i
+1, add(k
, uintptr(t
.keysize
)), add(v
, uintptr(t
.valuesize
)) {
1071 b
.tophash
[i
] = evacuatedEmpty
1074 if top
< minTopHash
{
1075 throw("bad map state")
1079 k2
= *((*unsafe
.Pointer
)(k2
))
1082 if !h
.sameSizeGrow() {
1083 // Compute hash to make our evacuation decision (whether we need
1084 // to send this key/value to bucket x or bucket y).
1085 hash
:= hashfn(k2
, uintptr(h
.hash0
))
1086 if h
.flags
&iterator
!= 0 && !t
.reflexivekey
&& !t
.key
.equalfn(k2
, k2
) {
1087 // If key != key (NaNs), then the hash could be (and probably
1088 // will be) entirely different from the old hash. Moreover,
1089 // it isn't reproducible. Reproducibility is required in the
1090 // presence of iterators, as our evacuation decision must
1091 // match whatever decision the iterator made.
1092 // Fortunately, we have the freedom to send these keys either
1093 // way. Also, tophash is meaningless for these kinds of keys.
1094 // We let the low bit of tophash drive the evacuation decision.
1095 // We recompute a new random tophash for the next level so
1096 // these keys will get evenly distributed across all buckets
1097 // after multiple grows.
1101 if hash
&newbit
!= 0 {
1107 if evacuatedX
+1 != evacuatedY
{
1108 throw("bad evacuatedN")
1111 b
.tophash
[i
] = evacuatedX
+ useY
// evacuatedX + 1 == evacuatedY
1112 dst
:= &xy
[useY
] // evacuation destination
1114 if dst
.i
== bucketCnt
{
1115 dst
.b
= h
.newoverflow(t
, dst
.b
)
1117 dst
.k
= add(unsafe
.Pointer(dst
.b
), dataOffset
)
1118 dst
.v
= add(dst
.k
, bucketCnt
*uintptr(t
.keysize
))
1120 dst
.b
.tophash
[dst
.i
&(bucketCnt
-1)] = top
// mask dst.i as an optimization, to avoid a bounds check
1122 *(*unsafe
.Pointer
)(dst
.k
) = k2
// copy pointer
1124 typedmemmove(t
.key
, dst
.k
, k
) // copy value
1126 if t
.indirectvalue
{
1127 *(*unsafe
.Pointer
)(dst
.v
) = *(*unsafe
.Pointer
)(v
)
1129 typedmemmove(t
.elem
, dst
.v
, v
)
1132 // These updates might push these pointers past the end of the
1133 // key or value arrays. That's ok, as we have the overflow pointer
1134 // at the end of the bucket to protect against pointing past the
1135 // end of the bucket.
1136 dst
.k
= add(dst
.k
, uintptr(t
.keysize
))
1137 dst
.v
= add(dst
.v
, uintptr(t
.valuesize
))
1140 // Unlink the overflow buckets & clear key/value to help GC.
1141 if h
.flags
&oldIterator
== 0 && t
.bucket
.kind
&kindNoPointers
== 0 {
1142 b
:= add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
))
1143 // Preserve b.tophash because the evacuation
1144 // state is maintained there.
1145 ptr
:= add(b
, dataOffset
)
1146 n
:= uintptr(t
.bucketsize
) - dataOffset
1147 memclrHasPointers(ptr
, n
)
1151 if oldbucket
== h
.nevacuate
{
1152 advanceEvacuationMark(h
, t
, newbit
)
1156 func advanceEvacuationMark(h
*hmap
, t
*maptype
, newbit
uintptr) {
1158 // Experiments suggest that 1024 is overkill by at least an order of magnitude.
1159 // Put it in there as a safeguard anyway, to ensure O(1) behavior.
1160 stop
:= h
.nevacuate
+ 1024
1164 for h
.nevacuate
!= stop
&& bucketEvacuated(t
, h
, h
.nevacuate
) {
1167 if h
.nevacuate
== newbit
{ // newbit == # of oldbuckets
1168 // Growing is all done. Free old main bucket array.
1170 // Can discard old overflow buckets as well.
1171 // If they are still referenced by an iterator,
1172 // then the iterator holds a pointers to the slice.
1174 h
.extra
.oldoverflow
= nil
1176 h
.flags
&^= sameSizeGrow
1180 func ismapkey(t
*_type
) bool {
1181 return t
.hashfn
!= nil
1184 // Reflect stubs. Called from ../reflect/asm_*.s
1186 //go:linkname reflect_makemap reflect.makemap
1187 func reflect_makemap(t
*maptype
, cap int) *hmap
{
1188 // Check invariants and reflects math.
1189 if sz
:= unsafe
.Sizeof(hmap
{}); sz
!= t
.hmap
.size
{
1190 println("runtime: sizeof(hmap) =", sz
, ", t.hmap.size =", t
.hmap
.size
)
1191 throw("bad hmap size")
1193 if !ismapkey(t
.key
) {
1194 throw("runtime.reflect_makemap: unsupported map key type")
1196 if t
.key
.size
> maxKeySize
&& (!t
.indirectkey || t
.keysize
!= uint8(sys
.PtrSize
)) ||
1197 t
.key
.size
<= maxKeySize
&& (t
.indirectkey || t
.keysize
!= uint8(t
.key
.size
)) {
1198 throw("key size wrong")
1200 if t
.elem
.size
> maxValueSize
&& (!t
.indirectvalue || t
.valuesize
!= uint8(sys
.PtrSize
)) ||
1201 t
.elem
.size
<= maxValueSize
&& (t
.indirectvalue || t
.valuesize
!= uint8(t
.elem
.size
)) {
1202 throw("value size wrong")
1204 if t
.key
.align
> bucketCnt
{
1205 throw("key align too big")
1207 if t
.elem
.align
> bucketCnt
{
1208 throw("value align too big")
1210 if t
.key
.size%uintptr
(t
.key
.align
) != 0 {
1211 throw("key size not a multiple of key align")
1213 if t
.elem
.size%uintptr
(t
.elem
.align
) != 0 {
1214 throw("value size not a multiple of value align")
1217 throw("bucketsize too small for proper alignment")
1219 if dataOffset%uintptr
(t
.key
.align
) != 0 {
1220 throw("need padding in bucket (key)")
1222 if dataOffset%uintptr
(t
.elem
.align
) != 0 {
1223 throw("need padding in bucket (value)")
1226 return makemap(t
, cap, nil)
1229 //go:linkname reflect_mapaccess reflect.mapaccess
1230 func reflect_mapaccess(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
1231 val
, ok
:= mapaccess2(t
, h
, key
)
1233 // reflect wants nil for a missing element
1239 //go:linkname reflect_mapassign reflect.mapassign
1240 func reflect_mapassign(t
*maptype
, h
*hmap
, key unsafe
.Pointer
, val unsafe
.Pointer
) {
1241 p
:= mapassign(t
, h
, key
)
1242 typedmemmove(t
.elem
, p
, val
)
1245 //go:linkname reflect_mapdelete reflect.mapdelete
1246 func reflect_mapdelete(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) {
1247 mapdelete(t
, h
, key
)
1250 //go:linkname reflect_mapiterinit reflect.mapiterinit
1251 func reflect_mapiterinit(t
*maptype
, h
*hmap
) *hiter
{
1253 mapiterinit(t
, h
, it
)
1257 //go:linkname reflect_mapiternext reflect.mapiternext
1258 func reflect_mapiternext(it
*hiter
) {
1262 //go:linkname reflect_mapiterkey reflect.mapiterkey
1263 func reflect_mapiterkey(it
*hiter
) unsafe
.Pointer
{
1267 //go:linkname reflect_maplen reflect.maplen
1268 func reflect_maplen(h
*hmap
) int {
1273 callerpc
:= getcallerpc()
1274 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(reflect_maplen
))
1279 //go:linkname reflect_ismapkey reflect.ismapkey
1280 func reflect_ismapkey(t
*_type
) bool {
1284 const maxZero
= 1024 // must match value in ../cmd/compile/internal/gc/walk.go
1285 var zeroVal
[maxZero
]byte