2015-05-29 François Dumont fdumont@gcc.gnu.org>
[official-gcc.git] / libgo / go / runtime / hashmap.go
blob791af8cf36a7b3512be85c5c352b6df7e9b0a1f6
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 // This file contains the implementation of Go's map type.
8 //
9 // A map is just a hash table. The data is arranged
10 // into an array of buckets. Each bucket contains up to
11 // 8 key/value pairs. The low-order bits of the hash are
12 // used to select a bucket. Each bucket contains a few
13 // high-order bits of each hash to distinguish the entries
14 // within a single bucket.
16 // If more than 8 keys hash to a bucket, we chain on
17 // extra buckets.
19 // When the hashtable grows, we allocate a new array
20 // of buckets twice as big. Buckets are incrementally
21 // copied from the old bucket array to the new bucket array.
23 // Map iterators walk through the array of buckets and
24 // return the keys in walk order (bucket #, then overflow
25 // chain order, then bucket index). To maintain iteration
26 // semantics, we never move keys within their bucket (if
27 // we did, keys might be returned 0 or 2 times). When
28 // growing the table, iterators remain iterating through the
29 // old table and must check the new table if the bucket
30 // they are iterating through has been moved ("evacuated")
31 // to the new table.
33 // Picking loadFactor: too large and we have lots of overflow
34 // buckets, too small and we waste a lot of space. I wrote
35 // a simple program to check some stats for different loads:
36 // (64-bit, 8 byte keys and values)
37 // loadFactor %overflow bytes/entry hitprobe missprobe
38 // 4.00 2.13 20.77 3.00 4.00
39 // 4.50 4.05 17.30 3.25 4.50
40 // 5.00 6.85 14.77 3.50 5.00
41 // 5.50 10.55 12.94 3.75 5.50
42 // 6.00 15.27 11.67 4.00 6.00
43 // 6.50 20.90 10.79 4.25 6.50
44 // 7.00 27.14 10.15 4.50 7.00
45 // 7.50 34.03 9.73 4.75 7.50
46 // 8.00 41.10 9.40 5.00 8.00
48 // %overflow = percentage of buckets which have an overflow bucket
49 // bytes/entry = overhead bytes used per key/value pair
50 // hitprobe = # of entries to check when looking up a present key
51 // missprobe = # of entries to check when looking up an absent key
53 // Keep in mind this data is for maximally loaded tables, i.e. just
54 // before the table grows. Typical tables will be somewhat less loaded.
56 import (
57 "unsafe"
60 const (
61 // Maximum number of key/value pairs a bucket can hold.
62 bucketCntBits = 3
63 bucketCnt = 1 << bucketCntBits
65 // Maximum average load of a bucket that triggers growth.
66 loadFactor = 6.5
68 // Maximum key or value size to keep inline (instead of mallocing per element).
69 // Must fit in a uint8.
70 // Fast versions cannot handle big values - the cutoff size for
71 // fast versions in ../../cmd/gc/walk.c must be at most this value.
72 maxKeySize = 128
73 maxValueSize = 128
75 // data offset should be the size of the bmap struct, but needs to be
76 // aligned correctly. For amd64p32 this means 64-bit alignment
77 // even though pointers are 32 bit.
78 dataOffset = unsafe.Offsetof(struct {
79 b bmap
80 v int64
81 }{}.v)
83 // Possible tophash values. We reserve a few possibilities for special marks.
84 // Each bucket (including its overflow buckets, if any) will have either all or none of its
85 // entries in the evacuated* states (except during the evacuate() method, which only happens
86 // during map writes and thus no one else can observe the map during that time).
87 empty = 0 // cell is empty
88 evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
89 evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
90 evacuatedY = 3 // same as above, but evacuated to second half of larger table.
91 minTopHash = 4 // minimum tophash for a normal filled cell.
93 // flags
94 iterator = 1 // there may be an iterator using buckets
95 oldIterator = 2 // there may be an iterator using oldbuckets
97 // sentinel bucket ID for iterator checks
98 noCheck = 1<<(8*ptrSize) - 1
100 // trigger a garbage collection at every alloc called from this code
101 checkgc = false
104 // A header for a Go map.
105 type hmap struct {
106 // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
107 // ../reflect/type.go. Don't change this structure without also changing that code!
108 count int // # live cells == size of map. Must be first (used by len() builtin)
109 flags uint32
110 hash0 uint32 // hash seed
111 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
113 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
114 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
115 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
118 // A bucket for a Go map.
119 type bmap struct {
120 tophash [bucketCnt]uint8
121 // Followed by bucketCnt keys and then bucketCnt values.
122 // NOTE: packing all the keys together and then all the values together makes the
123 // code a bit more complicated than alternating key/value/key/value/... but it allows
124 // us to eliminate padding which would be needed for, e.g., map[int64]int8.
125 // Followed by an overflow pointer.
128 // A hash iteration structure.
129 // If you modify hiter, also change cmd/gc/reflect.c to indicate
130 // the layout of this structure.
131 type hiter struct {
132 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
133 value unsafe.Pointer // Must be in second position (see cmd/gc/range.c).
134 t *maptype
135 h *hmap
136 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
137 bptr *bmap // current bucket
138 startBucket uintptr // bucket iteration started at
139 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
140 wrapped bool // already wrapped around from end of bucket array to beginning
141 B uint8
142 i uint8
143 bucket uintptr
144 checkBucket uintptr
147 func evacuated(b *bmap) bool {
148 h := b.tophash[0]
149 return h > empty && h < minTopHash
152 func (b *bmap) overflow(t *maptype) *bmap {
153 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize))
155 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
156 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf
159 func makemap(t *maptype, hint int64) *hmap {
160 if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
161 gothrow("bad hmap size")
164 if hint < 0 || int64(int32(hint)) != hint {
165 panic("makemap: size out of range")
166 // TODO: make hint an int, then none of this nonsense
169 if !ismapkey(t.key) {
170 gothrow("runtime.makemap: unsupported map key type")
173 // check compiler's and reflect's math
174 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
175 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
176 gothrow("key size wrong")
178 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
179 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
180 gothrow("value size wrong")
183 // invariants we depend on. We should probably check these at compile time
184 // somewhere, but for now we'll do it here.
185 if t.key.align > bucketCnt {
186 gothrow("key align too big")
188 if t.elem.align > bucketCnt {
189 gothrow("value align too big")
191 if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
192 gothrow("key size not a multiple of key align")
194 if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
195 gothrow("value size not a multiple of value align")
197 if bucketCnt < 8 {
198 gothrow("bucketsize too small for proper alignment")
200 if dataOffset%uintptr(t.key.align) != 0 {
201 gothrow("need padding in bucket (key)")
203 if dataOffset%uintptr(t.elem.align) != 0 {
204 gothrow("need padding in bucket (value)")
207 // find size parameter which will hold the requested # of elements
208 B := uint8(0)
209 for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
212 // allocate initial hash table
213 // if B == 0, the buckets field is allocated lazily later (in mapassign)
214 // If hint is large zeroing this memory could take a while.
215 var buckets unsafe.Pointer
216 if B != 0 {
217 if checkgc {
218 memstats.next_gc = memstats.heap_alloc
220 buckets = newarray(t.bucket, uintptr(1)<<B)
223 // initialize Hmap
224 if checkgc {
225 memstats.next_gc = memstats.heap_alloc
227 h := (*hmap)(newobject(t.hmap))
228 h.count = 0
229 h.B = B
230 h.flags = 0
231 h.hash0 = fastrand1()
232 h.buckets = buckets
233 h.oldbuckets = nil
234 h.nevacuate = 0
236 return h
239 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead
240 // it will return a reference to the zero object for the value type if
241 // the key is not in the map.
242 // NOTE: The returned pointer may keep the whole map live, so don't
243 // hold onto it for very long.
244 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
245 if raceenabled && h != nil {
246 callerpc := getcallerpc(unsafe.Pointer(&t))
247 pc := funcPC(mapaccess1)
248 racereadpc(unsafe.Pointer(h), callerpc, pc)
249 raceReadObjectPC(t.key, key, callerpc, pc)
251 if h == nil || h.count == 0 {
252 return unsafe.Pointer(t.elem.zero)
254 alg := goalg(t.key.alg)
255 hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
256 m := uintptr(1)<<h.B - 1
257 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
258 if c := h.oldbuckets; c != nil {
259 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
260 if !evacuated(oldb) {
261 b = oldb
264 top := uint8(hash >> (ptrSize*8 - 8))
265 if top < minTopHash {
266 top += minTopHash
268 for {
269 for i := uintptr(0); i < bucketCnt; i++ {
270 if b.tophash[i] != top {
271 continue
273 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
274 if t.indirectkey {
275 k = *((*unsafe.Pointer)(k))
277 if alg.equal(key, k, uintptr(t.key.size)) {
278 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
279 if t.indirectvalue {
280 v = *((*unsafe.Pointer)(v))
282 return v
285 b = b.overflow(t)
286 if b == nil {
287 return unsafe.Pointer(t.elem.zero)
292 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
293 if raceenabled && h != nil {
294 callerpc := getcallerpc(unsafe.Pointer(&t))
295 pc := funcPC(mapaccess2)
296 racereadpc(unsafe.Pointer(h), callerpc, pc)
297 raceReadObjectPC(t.key, key, callerpc, pc)
299 if h == nil || h.count == 0 {
300 return unsafe.Pointer(t.elem.zero), false
302 alg := goalg(t.key.alg)
303 hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
304 m := uintptr(1)<<h.B - 1
305 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
306 if c := h.oldbuckets; c != nil {
307 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
308 if !evacuated(oldb) {
309 b = oldb
312 top := uint8(hash >> (ptrSize*8 - 8))
313 if top < minTopHash {
314 top += minTopHash
316 for {
317 for i := uintptr(0); i < bucketCnt; i++ {
318 if b.tophash[i] != top {
319 continue
321 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
322 if t.indirectkey {
323 k = *((*unsafe.Pointer)(k))
325 if alg.equal(key, k, uintptr(t.key.size)) {
326 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
327 if t.indirectvalue {
328 v = *((*unsafe.Pointer)(v))
330 return v, true
333 b = b.overflow(t)
334 if b == nil {
335 return unsafe.Pointer(t.elem.zero), false
340 // returns both key and value. Used by map iterator
341 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
342 if h == nil || h.count == 0 {
343 return nil, nil
345 alg := goalg(t.key.alg)
346 hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
347 m := uintptr(1)<<h.B - 1
348 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
349 if c := h.oldbuckets; c != nil {
350 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
351 if !evacuated(oldb) {
352 b = oldb
355 top := uint8(hash >> (ptrSize*8 - 8))
356 if top < minTopHash {
357 top += minTopHash
359 for {
360 for i := uintptr(0); i < bucketCnt; i++ {
361 if b.tophash[i] != top {
362 continue
364 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
365 if t.indirectkey {
366 k = *((*unsafe.Pointer)(k))
368 if alg.equal(key, k, uintptr(t.key.size)) {
369 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
370 if t.indirectvalue {
371 v = *((*unsafe.Pointer)(v))
373 return k, v
376 b = b.overflow(t)
377 if b == nil {
378 return nil, nil
383 func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
384 if h == nil {
385 panic("assignment to entry in nil map")
387 if raceenabled {
388 callerpc := getcallerpc(unsafe.Pointer(&t))
389 pc := funcPC(mapassign1)
390 racewritepc(unsafe.Pointer(h), callerpc, pc)
391 raceReadObjectPC(t.key, key, callerpc, pc)
392 raceReadObjectPC(t.elem, val, callerpc, pc)
395 alg := goalg(t.key.alg)
396 hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
398 if h.buckets == nil {
399 if checkgc {
400 memstats.next_gc = memstats.heap_alloc
402 h.buckets = newarray(t.bucket, 1)
405 again:
406 bucket := hash & (uintptr(1)<<h.B - 1)
407 if h.oldbuckets != nil {
408 growWork(t, h, bucket)
410 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
411 top := uint8(hash >> (ptrSize*8 - 8))
412 if top < minTopHash {
413 top += minTopHash
416 var inserti *uint8
417 var insertk unsafe.Pointer
418 var insertv unsafe.Pointer
419 for {
420 for i := uintptr(0); i < bucketCnt; i++ {
421 if b.tophash[i] != top {
422 if b.tophash[i] == empty && inserti == nil {
423 inserti = &b.tophash[i]
424 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
425 insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
427 continue
429 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
430 k2 := k
431 if t.indirectkey {
432 k2 = *((*unsafe.Pointer)(k2))
434 if !alg.equal(key, k2, uintptr(t.key.size)) {
435 continue
437 // already have a mapping for key. Update it.
438 memmove(k2, key, uintptr(t.key.size))
439 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
440 v2 := v
441 if t.indirectvalue {
442 v2 = *((*unsafe.Pointer)(v2))
444 memmove(v2, val, uintptr(t.elem.size))
445 return
447 ovf := b.overflow(t)
448 if ovf == nil {
449 break
451 b = ovf
454 // did not find mapping for key. Allocate new cell & add entry.
455 if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
456 hashGrow(t, h)
457 goto again // Growing the table invalidates everything, so try again
460 if inserti == nil {
461 // all current buckets are full, allocate a new one.
462 if checkgc {
463 memstats.next_gc = memstats.heap_alloc
465 newb := (*bmap)(newobject(t.bucket))
466 b.setoverflow(t, newb)
467 inserti = &newb.tophash[0]
468 insertk = add(unsafe.Pointer(newb), dataOffset)
469 insertv = add(insertk, bucketCnt*uintptr(t.keysize))
472 // store new key/value at insert position
473 if t.indirectkey {
474 if checkgc {
475 memstats.next_gc = memstats.heap_alloc
477 kmem := newobject(t.key)
478 *(*unsafe.Pointer)(insertk) = kmem
479 insertk = kmem
481 if t.indirectvalue {
482 if checkgc {
483 memstats.next_gc = memstats.heap_alloc
485 vmem := newobject(t.elem)
486 *(*unsafe.Pointer)(insertv) = vmem
487 insertv = vmem
489 memmove(insertk, key, uintptr(t.key.size))
490 memmove(insertv, val, uintptr(t.elem.size))
491 *inserti = top
492 h.count++
495 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
496 if raceenabled && h != nil {
497 callerpc := getcallerpc(unsafe.Pointer(&t))
498 pc := funcPC(mapdelete)
499 racewritepc(unsafe.Pointer(h), callerpc, pc)
500 raceReadObjectPC(t.key, key, callerpc, pc)
502 if h == nil || h.count == 0 {
503 return
505 alg := goalg(t.key.alg)
506 hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
507 bucket := hash & (uintptr(1)<<h.B - 1)
508 if h.oldbuckets != nil {
509 growWork(t, h, bucket)
511 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
512 top := uint8(hash >> (ptrSize*8 - 8))
513 if top < minTopHash {
514 top += minTopHash
516 for {
517 for i := uintptr(0); i < bucketCnt; i++ {
518 if b.tophash[i] != top {
519 continue
521 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
522 k2 := k
523 if t.indirectkey {
524 k2 = *((*unsafe.Pointer)(k2))
526 if !alg.equal(key, k2, uintptr(t.key.size)) {
527 continue
529 memclr(k, uintptr(t.keysize))
530 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
531 memclr(v, uintptr(t.valuesize))
532 b.tophash[i] = empty
533 h.count--
534 return
536 b = b.overflow(t)
537 if b == nil {
538 return
543 func mapiterinit(t *maptype, h *hmap, it *hiter) {
544 // Clear pointer fields so garbage collector does not complain.
545 it.key = nil
546 it.value = nil
547 it.t = nil
548 it.h = nil
549 it.buckets = nil
550 it.bptr = nil
552 if raceenabled && h != nil {
553 callerpc := getcallerpc(unsafe.Pointer(&t))
554 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
557 if h == nil || h.count == 0 {
558 it.key = nil
559 it.value = nil
560 return
563 if unsafe.Sizeof(hiter{})/ptrSize != 10 {
564 gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
566 it.t = t
567 it.h = h
569 // grab snapshot of bucket state
570 it.B = h.B
571 it.buckets = h.buckets
573 // decide where to start
574 r := uintptr(fastrand1())
575 if h.B > 31-bucketCntBits {
576 r += uintptr(fastrand1()) << 31
578 it.startBucket = r & (uintptr(1)<<h.B - 1)
579 it.offset = uint8(r >> h.B & (bucketCnt - 1))
581 // iterator state
582 it.bucket = it.startBucket
583 it.wrapped = false
584 it.bptr = nil
586 // Remember we have an iterator.
587 // Can run concurrently with another hash_iter_init().
588 for {
589 old := h.flags
590 if old == old|iterator|oldIterator {
591 break
593 if cas(&h.flags, old, old|iterator|oldIterator) {
594 break
598 mapiternext(it)
601 func mapiternext(it *hiter) {
602 h := it.h
603 if raceenabled {
604 callerpc := getcallerpc(unsafe.Pointer(&it))
605 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
607 t := it.t
608 bucket := it.bucket
609 b := it.bptr
610 i := it.i
611 checkBucket := it.checkBucket
612 alg := goalg(t.key.alg)
614 next:
615 if b == nil {
616 if bucket == it.startBucket && it.wrapped {
617 // end of iteration
618 it.key = nil
619 it.value = nil
620 return
622 if h.oldbuckets != nil && it.B == h.B {
623 // Iterator was started in the middle of a grow, and the grow isn't done yet.
624 // If the bucket we're looking at hasn't been filled in yet (i.e. the old
625 // bucket hasn't been evacuated) then we need to iterate through the old
626 // bucket and only return the ones that will be migrated to this bucket.
627 oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
628 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
629 if !evacuated(b) {
630 checkBucket = bucket
631 } else {
632 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
633 checkBucket = noCheck
635 } else {
636 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
637 checkBucket = noCheck
639 bucket++
640 if bucket == uintptr(1)<<it.B {
641 bucket = 0
642 it.wrapped = true
644 i = 0
646 for ; i < bucketCnt; i++ {
647 offi := (i + it.offset) & (bucketCnt - 1)
648 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
649 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
650 if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
651 if checkBucket != noCheck {
652 // Special case: iterator was started during a grow and the
653 // grow is not done yet. We're working on a bucket whose
654 // oldbucket has not been evacuated yet. Or at least, it wasn't
655 // evacuated when we started the bucket. So we're iterating
656 // through the oldbucket, skipping any keys that will go
657 // to the other new bucket (each oldbucket expands to two
658 // buckets during a grow).
659 k2 := k
660 if t.indirectkey {
661 k2 = *((*unsafe.Pointer)(k2))
663 if alg.equal(k2, k2, uintptr(t.key.size)) {
664 // If the item in the oldbucket is not destined for
665 // the current new bucket in the iteration, skip it.
666 hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
667 if hash&(uintptr(1)<<it.B-1) != checkBucket {
668 continue
670 } else {
671 // Hash isn't repeatable if k != k (NaNs). We need a
672 // repeatable and randomish choice of which direction
673 // to send NaNs during evacuation. We'll use the low
674 // bit of tophash to decide which way NaNs go.
675 // NOTE: this case is why we need two evacuate tophash
676 // values, evacuatedX and evacuatedY, that differ in
677 // their low bit.
678 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
679 continue
683 if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
684 // this is the golden data, we can return it.
685 if t.indirectkey {
686 k = *((*unsafe.Pointer)(k))
688 it.key = k
689 if t.indirectvalue {
690 v = *((*unsafe.Pointer)(v))
692 it.value = v
693 } else {
694 // The hash table has grown since the iterator was started.
695 // The golden data for this key is now somewhere else.
696 k2 := k
697 if t.indirectkey {
698 k2 = *((*unsafe.Pointer)(k2))
700 if alg.equal(k2, k2, uintptr(t.key.size)) {
701 // Check the current hash table for the data.
702 // This code handles the case where the key
703 // has been deleted, updated, or deleted and reinserted.
704 // NOTE: we need to regrab the key as it has potentially been
705 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
706 rk, rv := mapaccessK(t, h, k2)
707 if rk == nil {
708 continue // key has been deleted
710 it.key = rk
711 it.value = rv
712 } else {
713 // if key!=key then the entry can't be deleted or
714 // updated, so we can just return it. That's lucky for
715 // us because when key!=key we can't look it up
716 // successfully in the current table.
717 it.key = k2
718 if t.indirectvalue {
719 v = *((*unsafe.Pointer)(v))
721 it.value = v
724 it.bucket = bucket
725 it.bptr = b
726 it.i = i + 1
727 it.checkBucket = checkBucket
728 return
731 b = b.overflow(t)
732 i = 0
733 goto next
736 func hashGrow(t *maptype, h *hmap) {
737 if h.oldbuckets != nil {
738 gothrow("evacuation not done in time")
740 oldbuckets := h.buckets
741 if checkgc {
742 memstats.next_gc = memstats.heap_alloc
744 newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
745 flags := h.flags &^ (iterator | oldIterator)
746 if h.flags&iterator != 0 {
747 flags |= oldIterator
749 // commit the grow (atomic wrt gc)
750 h.B++
751 h.flags = flags
752 h.oldbuckets = oldbuckets
753 h.buckets = newbuckets
754 h.nevacuate = 0
756 // the actual copying of the hash table data is done incrementally
757 // by growWork() and evacuate().
760 func growWork(t *maptype, h *hmap, bucket uintptr) {
761 noldbuckets := uintptr(1) << (h.B - 1)
763 // make sure we evacuate the oldbucket corresponding
764 // to the bucket we're about to use
765 evacuate(t, h, bucket&(noldbuckets-1))
767 // evacuate one more oldbucket to make progress on growing
768 if h.oldbuckets != nil {
769 evacuate(t, h, h.nevacuate)
773 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
774 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
775 newbit := uintptr(1) << (h.B - 1)
776 alg := goalg(t.key.alg)
777 if !evacuated(b) {
778 // TODO: reuse overflow buckets instead of using new ones, if there
779 // is no iterator using the old buckets. (If !oldIterator.)
781 x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
782 y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
783 xi := 0
784 yi := 0
785 xk := add(unsafe.Pointer(x), dataOffset)
786 yk := add(unsafe.Pointer(y), dataOffset)
787 xv := add(xk, bucketCnt*uintptr(t.keysize))
788 yv := add(yk, bucketCnt*uintptr(t.keysize))
789 for ; b != nil; b = b.overflow(t) {
790 k := add(unsafe.Pointer(b), dataOffset)
791 v := add(k, bucketCnt*uintptr(t.keysize))
792 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
793 top := b.tophash[i]
794 if top == empty {
795 b.tophash[i] = evacuatedEmpty
796 continue
798 if top < minTopHash {
799 gothrow("bad map state")
801 k2 := k
802 if t.indirectkey {
803 k2 = *((*unsafe.Pointer)(k2))
805 // Compute hash to make our evacuation decision (whether we need
806 // to send this key/value to bucket x or bucket y).
807 hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
808 if h.flags&iterator != 0 {
809 if !alg.equal(k2, k2, uintptr(t.key.size)) {
810 // If key != key (NaNs), then the hash could be (and probably
811 // will be) entirely different from the old hash. Moreover,
812 // it isn't reproducible. Reproducibility is required in the
813 // presence of iterators, as our evacuation decision must
814 // match whatever decision the iterator made.
815 // Fortunately, we have the freedom to send these keys either
816 // way. Also, tophash is meaningless for these kinds of keys.
817 // We let the low bit of tophash drive the evacuation decision.
818 // We recompute a new random tophash for the next level so
819 // these keys will get evenly distributed across all buckets
820 // after multiple grows.
821 if (top & 1) != 0 {
822 hash |= newbit
823 } else {
824 hash &^= newbit
826 top = uint8(hash >> (ptrSize*8 - 8))
827 if top < minTopHash {
828 top += minTopHash
832 if (hash & newbit) == 0 {
833 b.tophash[i] = evacuatedX
834 if xi == bucketCnt {
835 if checkgc {
836 memstats.next_gc = memstats.heap_alloc
838 newx := (*bmap)(newobject(t.bucket))
839 x.setoverflow(t, newx)
840 x = newx
841 xi = 0
842 xk = add(unsafe.Pointer(x), dataOffset)
843 xv = add(xk, bucketCnt*uintptr(t.keysize))
845 x.tophash[xi] = top
846 if t.indirectkey {
847 *(*unsafe.Pointer)(xk) = k2 // copy pointer
848 } else {
849 memmove(xk, k, uintptr(t.key.size)) // copy value
851 if t.indirectvalue {
852 *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
853 } else {
854 memmove(xv, v, uintptr(t.elem.size))
856 xi++
857 xk = add(xk, uintptr(t.keysize))
858 xv = add(xv, uintptr(t.valuesize))
859 } else {
860 b.tophash[i] = evacuatedY
861 if yi == bucketCnt {
862 if checkgc {
863 memstats.next_gc = memstats.heap_alloc
865 newy := (*bmap)(newobject(t.bucket))
866 y.setoverflow(t, newy)
867 y = newy
868 yi = 0
869 yk = add(unsafe.Pointer(y), dataOffset)
870 yv = add(yk, bucketCnt*uintptr(t.keysize))
872 y.tophash[yi] = top
873 if t.indirectkey {
874 *(*unsafe.Pointer)(yk) = k2
875 } else {
876 memmove(yk, k, uintptr(t.key.size))
878 if t.indirectvalue {
879 *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
880 } else {
881 memmove(yv, v, uintptr(t.elem.size))
883 yi++
884 yk = add(yk, uintptr(t.keysize))
885 yv = add(yv, uintptr(t.valuesize))
889 // Unlink the overflow buckets & clear key/value to help GC.
890 if h.flags&oldIterator == 0 {
891 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
892 memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
896 // Advance evacuation mark
897 if oldbucket == h.nevacuate {
898 h.nevacuate = oldbucket + 1
899 if oldbucket+1 == newbit { // newbit == # of oldbuckets
900 // Growing is all done. Free old main bucket array.
901 h.oldbuckets = nil
906 func ismapkey(t *_type) bool {
907 return goalg(t.alg).hash != nil
910 // Reflect stubs. Called from ../reflect/asm_*.s
912 func reflect_makemap(t *maptype) *hmap {
913 return makemap(t, 0)
916 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
917 val, ok := mapaccess2(t, h, key)
918 if !ok {
919 // reflect wants nil for a missing element
920 val = nil
922 return val
925 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
926 mapassign1(t, h, key, val)
929 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
930 mapdelete(t, h, key)
933 func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
934 it := new(hiter)
935 mapiterinit(t, h, it)
936 return it
939 func reflect_mapiternext(it *hiter) {
940 mapiternext(it)
943 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
944 return it.key
947 func reflect_maplen(h *hmap) int {
948 if h == nil {
949 return 0
951 if raceenabled {
952 callerpc := getcallerpc(unsafe.Pointer(&h))
953 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
955 return h.count
958 func reflect_ismapkey(t *_type) bool {
959 return ismapkey(t)