* cp-tree.h (build_noexcept_spec, add_exception_specifier): Adjust
[official-gcc.git] / libgo / go / runtime / hashmap_fast.go
blobe0fc9815131ee1ff6d1cea8784c41fc07bbfe47f
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/sys"
9 "unsafe"
12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
13 if raceenabled && h != nil {
14 callerpc := getcallerpc()
15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
17 if h == nil || h.count == 0 {
18 return unsafe.Pointer(&zeroVal[0])
20 if h.flags&hashWriting != 0 {
21 throw("concurrent map read and map write")
23 var b *bmap
24 if h.B == 0 {
25 // One-bucket table. No need to hash.
26 b = (*bmap)(h.buckets)
27 } else {
28 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29 m := bucketMask(h.B)
30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31 if c := h.oldbuckets; c != nil {
32 if !h.sameSizeGrow() {
33 // There used to be half as many buckets; mask down one more power of two.
34 m >>= 1
36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37 if !evacuated(oldb) {
38 b = oldb
42 for ; b != nil; b = b.overflow(t) {
43 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
44 if *(*uint32)(k) == key && b.tophash[i] != empty {
45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
49 return unsafe.Pointer(&zeroVal[0])
52 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
53 if raceenabled && h != nil {
54 callerpc := getcallerpc()
55 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
57 if h == nil || h.count == 0 {
58 return unsafe.Pointer(&zeroVal[0]), false
60 if h.flags&hashWriting != 0 {
61 throw("concurrent map read and map write")
63 var b *bmap
64 if h.B == 0 {
65 // One-bucket table. No need to hash.
66 b = (*bmap)(h.buckets)
67 } else {
68 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
69 m := bucketMask(h.B)
70 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
71 if c := h.oldbuckets; c != nil {
72 if !h.sameSizeGrow() {
73 // There used to be half as many buckets; mask down one more power of two.
74 m >>= 1
76 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
77 if !evacuated(oldb) {
78 b = oldb
82 for ; b != nil; b = b.overflow(t) {
83 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
84 if *(*uint32)(k) == key && b.tophash[i] != empty {
85 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
89 return unsafe.Pointer(&zeroVal[0]), false
92 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
93 if raceenabled && h != nil {
94 callerpc := getcallerpc()
95 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
97 if h == nil || h.count == 0 {
98 return unsafe.Pointer(&zeroVal[0])
100 if h.flags&hashWriting != 0 {
101 throw("concurrent map read and map write")
103 var b *bmap
104 if h.B == 0 {
105 // One-bucket table. No need to hash.
106 b = (*bmap)(h.buckets)
107 } else {
108 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
109 m := bucketMask(h.B)
110 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
111 if c := h.oldbuckets; c != nil {
112 if !h.sameSizeGrow() {
113 // There used to be half as many buckets; mask down one more power of two.
114 m >>= 1
116 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
117 if !evacuated(oldb) {
118 b = oldb
122 for ; b != nil; b = b.overflow(t) {
123 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
124 if *(*uint64)(k) == key && b.tophash[i] != empty {
125 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
129 return unsafe.Pointer(&zeroVal[0])
132 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
133 if raceenabled && h != nil {
134 callerpc := getcallerpc()
135 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
137 if h == nil || h.count == 0 {
138 return unsafe.Pointer(&zeroVal[0]), false
140 if h.flags&hashWriting != 0 {
141 throw("concurrent map read and map write")
143 var b *bmap
144 if h.B == 0 {
145 // One-bucket table. No need to hash.
146 b = (*bmap)(h.buckets)
147 } else {
148 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
149 m := bucketMask(h.B)
150 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
151 if c := h.oldbuckets; c != nil {
152 if !h.sameSizeGrow() {
153 // There used to be half as many buckets; mask down one more power of two.
154 m >>= 1
156 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
157 if !evacuated(oldb) {
158 b = oldb
162 for ; b != nil; b = b.overflow(t) {
163 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
164 if *(*uint64)(k) == key && b.tophash[i] != empty {
165 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
169 return unsafe.Pointer(&zeroVal[0]), false
172 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
173 if raceenabled && h != nil {
174 callerpc := getcallerpc()
175 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
177 if h == nil || h.count == 0 {
178 return unsafe.Pointer(&zeroVal[0])
180 if h.flags&hashWriting != 0 {
181 throw("concurrent map read and map write")
183 key := stringStructOf(&ky)
184 if h.B == 0 {
185 // One-bucket table.
186 b := (*bmap)(h.buckets)
187 if key.len < 32 {
188 // short key, doing lots of comparisons is ok
189 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
190 k := (*stringStruct)(kptr)
191 if k.len != key.len || b.tophash[i] == empty {
192 continue
194 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
195 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
198 return unsafe.Pointer(&zeroVal[0])
200 // long key, try not to do more comparisons than necessary
201 keymaybe := uintptr(bucketCnt)
202 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
203 k := (*stringStruct)(kptr)
204 if k.len != key.len || b.tophash[i] == empty {
205 continue
207 if k.str == key.str {
208 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
210 // check first 4 bytes
211 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
212 continue
214 // check last 4 bytes
215 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
216 continue
218 if keymaybe != bucketCnt {
219 // Two keys are potential matches. Use hash to distinguish them.
220 goto dohash
222 keymaybe = i
224 if keymaybe != bucketCnt {
225 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
226 if memequal(k.str, key.str, uintptr(key.len)) {
227 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
230 return unsafe.Pointer(&zeroVal[0])
232 dohash:
233 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
234 m := bucketMask(h.B)
235 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
236 if c := h.oldbuckets; c != nil {
237 if !h.sameSizeGrow() {
238 // There used to be half as many buckets; mask down one more power of two.
239 m >>= 1
241 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
242 if !evacuated(oldb) {
243 b = oldb
246 top := tophash(hash)
247 for ; b != nil; b = b.overflow(t) {
248 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
249 k := (*stringStruct)(kptr)
250 if k.len != key.len || b.tophash[i] != top {
251 continue
253 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
254 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
258 return unsafe.Pointer(&zeroVal[0])
261 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
262 if raceenabled && h != nil {
263 callerpc := getcallerpc()
264 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
266 if h == nil || h.count == 0 {
267 return unsafe.Pointer(&zeroVal[0]), false
269 if h.flags&hashWriting != 0 {
270 throw("concurrent map read and map write")
272 key := stringStructOf(&ky)
273 if h.B == 0 {
274 // One-bucket table.
275 b := (*bmap)(h.buckets)
276 if key.len < 32 {
277 // short key, doing lots of comparisons is ok
278 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
279 k := (*stringStruct)(kptr)
280 if k.len != key.len || b.tophash[i] == empty {
281 continue
283 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
284 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
287 return unsafe.Pointer(&zeroVal[0]), false
289 // long key, try not to do more comparisons than necessary
290 keymaybe := uintptr(bucketCnt)
291 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
292 k := (*stringStruct)(kptr)
293 if k.len != key.len || b.tophash[i] == empty {
294 continue
296 if k.str == key.str {
297 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
299 // check first 4 bytes
300 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
301 continue
303 // check last 4 bytes
304 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
305 continue
307 if keymaybe != bucketCnt {
308 // Two keys are potential matches. Use hash to distinguish them.
309 goto dohash
311 keymaybe = i
313 if keymaybe != bucketCnt {
314 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
315 if memequal(k.str, key.str, uintptr(key.len)) {
316 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
319 return unsafe.Pointer(&zeroVal[0]), false
321 dohash:
322 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
323 m := bucketMask(h.B)
324 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
325 if c := h.oldbuckets; c != nil {
326 if !h.sameSizeGrow() {
327 // There used to be half as many buckets; mask down one more power of two.
328 m >>= 1
330 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
331 if !evacuated(oldb) {
332 b = oldb
335 top := tophash(hash)
336 for ; b != nil; b = b.overflow(t) {
337 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
338 k := (*stringStruct)(kptr)
339 if k.len != key.len || b.tophash[i] != top {
340 continue
342 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
343 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
347 return unsafe.Pointer(&zeroVal[0]), false
350 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
351 if h == nil {
352 panic(plainError("assignment to entry in nil map"))
354 if raceenabled {
355 callerpc := getcallerpc()
356 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
358 if h.flags&hashWriting != 0 {
359 throw("concurrent map writes")
361 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
363 // Set hashWriting after calling alg.hash for consistency with mapassign.
364 h.flags |= hashWriting
366 if h.buckets == nil {
367 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
370 again:
371 bucket := hash & bucketMask(h.B)
372 if h.growing() {
373 growWork_fast32(t, h, bucket)
375 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
377 var insertb *bmap
378 var inserti uintptr
379 var insertk unsafe.Pointer
381 for {
382 for i := uintptr(0); i < bucketCnt; i++ {
383 if b.tophash[i] == empty {
384 if insertb == nil {
385 inserti = i
386 insertb = b
388 continue
390 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
391 if k != key {
392 continue
394 inserti = i
395 insertb = b
396 goto done
398 ovf := b.overflow(t)
399 if ovf == nil {
400 break
402 b = ovf
405 // Did not find mapping for key. Allocate new cell & add entry.
407 // If we hit the max load factor or we have too many overflow buckets,
408 // and we're not already in the middle of growing, start growing.
409 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
410 hashGrow(t, h)
411 goto again // Growing the table invalidates everything, so try again
414 if insertb == nil {
415 // all current buckets are full, allocate a new one.
416 insertb = h.newoverflow(t, b)
417 inserti = 0 // not necessary, but avoids needlessly spilling inserti
419 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
421 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
422 // store new key at insert position
423 *(*uint32)(insertk) = key
425 h.count++
427 done:
428 val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
429 if h.flags&hashWriting == 0 {
430 throw("concurrent map writes")
432 h.flags &^= hashWriting
433 return val
436 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
437 if h == nil {
438 panic(plainError("assignment to entry in nil map"))
440 if raceenabled {
441 callerpc := getcallerpc()
442 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
444 if h.flags&hashWriting != 0 {
445 throw("concurrent map writes")
447 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
449 // Set hashWriting after calling alg.hash for consistency with mapassign.
450 h.flags |= hashWriting
452 if h.buckets == nil {
453 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
456 again:
457 bucket := hash & bucketMask(h.B)
458 if h.growing() {
459 growWork_fast32(t, h, bucket)
461 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
463 var insertb *bmap
464 var inserti uintptr
465 var insertk unsafe.Pointer
467 for {
468 for i := uintptr(0); i < bucketCnt; i++ {
469 if b.tophash[i] == empty {
470 if insertb == nil {
471 inserti = i
472 insertb = b
474 continue
476 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
477 if k != key {
478 continue
480 inserti = i
481 insertb = b
482 goto done
484 ovf := b.overflow(t)
485 if ovf == nil {
486 break
488 b = ovf
491 // Did not find mapping for key. Allocate new cell & add entry.
493 // If we hit the max load factor or we have too many overflow buckets,
494 // and we're not already in the middle of growing, start growing.
495 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
496 hashGrow(t, h)
497 goto again // Growing the table invalidates everything, so try again
500 if insertb == nil {
501 // all current buckets are full, allocate a new one.
502 insertb = h.newoverflow(t, b)
503 inserti = 0 // not necessary, but avoids needlessly spilling inserti
505 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
507 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
508 // store new key at insert position
509 *(*unsafe.Pointer)(insertk) = key
511 h.count++
513 done:
514 val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
515 if h.flags&hashWriting == 0 {
516 throw("concurrent map writes")
518 h.flags &^= hashWriting
519 return val
522 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
523 if h == nil {
524 panic(plainError("assignment to entry in nil map"))
526 if raceenabled {
527 callerpc := getcallerpc()
528 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
530 if h.flags&hashWriting != 0 {
531 throw("concurrent map writes")
533 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
535 // Set hashWriting after calling alg.hash for consistency with mapassign.
536 h.flags |= hashWriting
538 if h.buckets == nil {
539 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
542 again:
543 bucket := hash & bucketMask(h.B)
544 if h.growing() {
545 growWork_fast64(t, h, bucket)
547 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
549 var insertb *bmap
550 var inserti uintptr
551 var insertk unsafe.Pointer
553 for {
554 for i := uintptr(0); i < bucketCnt; i++ {
555 if b.tophash[i] == empty {
556 if insertb == nil {
557 insertb = b
558 inserti = i
560 continue
562 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
563 if k != key {
564 continue
566 insertb = b
567 inserti = i
568 goto done
570 ovf := b.overflow(t)
571 if ovf == nil {
572 break
574 b = ovf
577 // Did not find mapping for key. Allocate new cell & add entry.
579 // If we hit the max load factor or we have too many overflow buckets,
580 // and we're not already in the middle of growing, start growing.
581 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
582 hashGrow(t, h)
583 goto again // Growing the table invalidates everything, so try again
586 if insertb == nil {
587 // all current buckets are full, allocate a new one.
588 insertb = h.newoverflow(t, b)
589 inserti = 0 // not necessary, but avoids needlessly spilling inserti
591 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
593 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
594 // store new key at insert position
595 *(*uint64)(insertk) = key
597 h.count++
599 done:
600 val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
601 if h.flags&hashWriting == 0 {
602 throw("concurrent map writes")
604 h.flags &^= hashWriting
605 return val
608 func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
609 if h == nil {
610 panic(plainError("assignment to entry in nil map"))
612 if raceenabled {
613 callerpc := getcallerpc()
614 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
616 if h.flags&hashWriting != 0 {
617 throw("concurrent map writes")
619 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
621 // Set hashWriting after calling alg.hash for consistency with mapassign.
622 h.flags |= hashWriting
624 if h.buckets == nil {
625 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
628 again:
629 bucket := hash & bucketMask(h.B)
630 if h.growing() {
631 growWork_fast64(t, h, bucket)
633 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
635 var insertb *bmap
636 var inserti uintptr
637 var insertk unsafe.Pointer
639 for {
640 for i := uintptr(0); i < bucketCnt; i++ {
641 if b.tophash[i] == empty {
642 if insertb == nil {
643 insertb = b
644 inserti = i
646 continue
648 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
649 if k != key {
650 continue
652 insertb = b
653 inserti = i
654 goto done
656 ovf := b.overflow(t)
657 if ovf == nil {
658 break
660 b = ovf
663 // Did not find mapping for key. Allocate new cell & add entry.
665 // If we hit the max load factor or we have too many overflow buckets,
666 // and we're not already in the middle of growing, start growing.
667 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
668 hashGrow(t, h)
669 goto again // Growing the table invalidates everything, so try again
672 if insertb == nil {
673 // all current buckets are full, allocate a new one.
674 insertb = h.newoverflow(t, b)
675 inserti = 0 // not necessary, but avoids needlessly spilling inserti
677 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
679 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
680 // store new key at insert position
681 *(*unsafe.Pointer)(insertk) = key
683 h.count++
685 done:
686 val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
687 if h.flags&hashWriting == 0 {
688 throw("concurrent map writes")
690 h.flags &^= hashWriting
691 return val
694 func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
695 if h == nil {
696 panic(plainError("assignment to entry in nil map"))
698 if raceenabled {
699 callerpc := getcallerpc()
700 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
702 if h.flags&hashWriting != 0 {
703 throw("concurrent map writes")
705 key := stringStructOf(&s)
706 hash := t.key.hashfn(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
708 // Set hashWriting after calling alg.hash for consistency with mapassign.
709 h.flags |= hashWriting
711 if h.buckets == nil {
712 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
715 again:
716 bucket := hash & bucketMask(h.B)
717 if h.growing() {
718 growWork_faststr(t, h, bucket)
720 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
721 top := tophash(hash)
723 var insertb *bmap
724 var inserti uintptr
725 var insertk unsafe.Pointer
727 for {
728 for i := uintptr(0); i < bucketCnt; i++ {
729 if b.tophash[i] != top {
730 if b.tophash[i] == empty && insertb == nil {
731 insertb = b
732 inserti = i
734 continue
736 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
737 if k.len != key.len {
738 continue
740 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
741 continue
743 // already have a mapping for key. Update it.
744 inserti = i
745 insertb = b
746 goto done
748 ovf := b.overflow(t)
749 if ovf == nil {
750 break
752 b = ovf
755 // Did not find mapping for key. Allocate new cell & add entry.
757 // If we hit the max load factor or we have too many overflow buckets,
758 // and we're not already in the middle of growing, start growing.
759 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
760 hashGrow(t, h)
761 goto again // Growing the table invalidates everything, so try again
764 if insertb == nil {
765 // all current buckets are full, allocate a new one.
766 insertb = h.newoverflow(t, b)
767 inserti = 0 // not necessary, but avoids needlessly spilling inserti
769 insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
771 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
772 // store new key at insert position
773 *((*stringStruct)(insertk)) = *key
774 h.count++
776 done:
777 val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.valuesize))
778 if h.flags&hashWriting == 0 {
779 throw("concurrent map writes")
781 h.flags &^= hashWriting
782 return val
785 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
786 if raceenabled && h != nil {
787 callerpc := getcallerpc()
788 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
790 if h == nil || h.count == 0 {
791 return
793 if h.flags&hashWriting != 0 {
794 throw("concurrent map writes")
797 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
799 // Set hashWriting after calling alg.hash for consistency with mapdelete
800 h.flags |= hashWriting
802 bucket := hash & bucketMask(h.B)
803 if h.growing() {
804 growWork_fast32(t, h, bucket)
806 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
807 search:
808 for ; b != nil; b = b.overflow(t) {
809 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
810 if key != *(*uint32)(k) || b.tophash[i] == empty {
811 continue
813 // Only clear key if there are pointers in it.
814 if t.key.kind&kindNoPointers == 0 {
815 memclrHasPointers(k, t.key.size)
817 // Only clear value if there are pointers in it.
818 if t.elem.kind&kindNoPointers == 0 {
819 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
820 memclrHasPointers(v, t.elem.size)
822 b.tophash[i] = empty
823 h.count--
824 break search
828 if h.flags&hashWriting == 0 {
829 throw("concurrent map writes")
831 h.flags &^= hashWriting
834 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
835 if raceenabled && h != nil {
836 callerpc := getcallerpc()
837 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
839 if h == nil || h.count == 0 {
840 return
842 if h.flags&hashWriting != 0 {
843 throw("concurrent map writes")
846 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
848 // Set hashWriting after calling alg.hash for consistency with mapdelete
849 h.flags |= hashWriting
851 bucket := hash & bucketMask(h.B)
852 if h.growing() {
853 growWork_fast64(t, h, bucket)
855 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
856 search:
857 for ; b != nil; b = b.overflow(t) {
858 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
859 if key != *(*uint64)(k) || b.tophash[i] == empty {
860 continue
862 // Only clear key if there are pointers in it.
863 if t.key.kind&kindNoPointers == 0 {
864 memclrHasPointers(k, t.key.size)
866 // Only clear value if there are pointers in it.
867 if t.elem.kind&kindNoPointers == 0 {
868 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
869 memclrHasPointers(v, t.elem.size)
871 b.tophash[i] = empty
872 h.count--
873 break search
877 if h.flags&hashWriting == 0 {
878 throw("concurrent map writes")
880 h.flags &^= hashWriting
883 func mapdelete_faststr(t *maptype, h *hmap, ky string) {
884 if raceenabled && h != nil {
885 callerpc := getcallerpc()
886 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
888 if h == nil || h.count == 0 {
889 return
891 if h.flags&hashWriting != 0 {
892 throw("concurrent map writes")
895 key := stringStructOf(&ky)
896 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
898 // Set hashWriting after calling alg.hash for consistency with mapdelete
899 h.flags |= hashWriting
901 bucket := hash & bucketMask(h.B)
902 if h.growing() {
903 growWork_faststr(t, h, bucket)
905 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
906 top := tophash(hash)
907 search:
908 for ; b != nil; b = b.overflow(t) {
909 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
910 k := (*stringStruct)(kptr)
911 if k.len != key.len || b.tophash[i] != top {
912 continue
914 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
915 continue
917 // Clear key's pointer.
918 k.str = nil
919 // Only clear value if there are pointers in it.
920 if t.elem.kind&kindNoPointers == 0 {
921 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
922 memclrHasPointers(v, t.elem.size)
924 b.tophash[i] = empty
925 h.count--
926 break search
930 if h.flags&hashWriting == 0 {
931 throw("concurrent map writes")
933 h.flags &^= hashWriting
936 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
937 // make sure we evacuate the oldbucket corresponding
938 // to the bucket we're about to use
939 evacuate_fast32(t, h, bucket&h.oldbucketmask())
941 // evacuate one more oldbucket to make progress on growing
942 if h.growing() {
943 evacuate_fast32(t, h, h.nevacuate)
947 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
948 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
949 newbit := h.noldbuckets()
950 if !evacuated(b) {
951 // TODO: reuse overflow buckets instead of using new ones, if there
952 // is no iterator using the old buckets. (If !oldIterator.)
954 // xy contains the x and y (low and high) evacuation destinations.
955 var xy [2]evacDst
956 x := &xy[0]
957 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
958 x.k = add(unsafe.Pointer(x.b), dataOffset)
959 x.v = add(x.k, bucketCnt*4)
961 if !h.sameSizeGrow() {
962 // Only calculate y pointers if we're growing bigger.
963 // Otherwise GC can see bad pointers.
964 y := &xy[1]
965 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
966 y.k = add(unsafe.Pointer(y.b), dataOffset)
967 y.v = add(y.k, bucketCnt*4)
970 for ; b != nil; b = b.overflow(t) {
971 k := add(unsafe.Pointer(b), dataOffset)
972 v := add(k, bucketCnt*4)
973 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) {
974 top := b.tophash[i]
975 if top == empty {
976 b.tophash[i] = evacuatedEmpty
977 continue
979 if top < minTopHash {
980 throw("bad map state")
982 var useY uint8
983 if !h.sameSizeGrow() {
984 // Compute hash to make our evacuation decision (whether we need
985 // to send this key/value to bucket x or bucket y).
986 hash := t.key.hashfn(k, uintptr(h.hash0))
987 if hash&newbit != 0 {
988 useY = 1
992 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
993 dst := &xy[useY] // evacuation destination
995 if dst.i == bucketCnt {
996 dst.b = h.newoverflow(t, dst.b)
997 dst.i = 0
998 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
999 dst.v = add(dst.k, bucketCnt*4)
1001 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1003 // Copy key.
1004 if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
1005 writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k))
1006 } else {
1007 *(*uint32)(dst.k) = *(*uint32)(k)
1010 typedmemmove(t.elem, dst.v, v)
1011 dst.i++
1012 // These updates might push these pointers past the end of the
1013 // key or value arrays. That's ok, as we have the overflow pointer
1014 // at the end of the bucket to protect against pointing past the
1015 // end of the bucket.
1016 dst.k = add(dst.k, 4)
1017 dst.v = add(dst.v, uintptr(t.valuesize))
1020 // Unlink the overflow buckets & clear key/value to help GC.
1021 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1022 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1023 // Preserve b.tophash because the evacuation
1024 // state is maintained there.
1025 ptr := add(b, dataOffset)
1026 n := uintptr(t.bucketsize) - dataOffset
1027 memclrHasPointers(ptr, n)
1031 if oldbucket == h.nevacuate {
1032 advanceEvacuationMark(h, t, newbit)
1036 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) {
1037 // make sure we evacuate the oldbucket corresponding
1038 // to the bucket we're about to use
1039 evacuate_fast64(t, h, bucket&h.oldbucketmask())
1041 // evacuate one more oldbucket to make progress on growing
1042 if h.growing() {
1043 evacuate_fast64(t, h, h.nevacuate)
1047 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
1048 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1049 newbit := h.noldbuckets()
1050 if !evacuated(b) {
1051 // TODO: reuse overflow buckets instead of using new ones, if there
1052 // is no iterator using the old buckets. (If !oldIterator.)
1054 // xy contains the x and y (low and high) evacuation destinations.
1055 var xy [2]evacDst
1056 x := &xy[0]
1057 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1058 x.k = add(unsafe.Pointer(x.b), dataOffset)
1059 x.v = add(x.k, bucketCnt*8)
1061 if !h.sameSizeGrow() {
1062 // Only calculate y pointers if we're growing bigger.
1063 // Otherwise GC can see bad pointers.
1064 y := &xy[1]
1065 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1066 y.k = add(unsafe.Pointer(y.b), dataOffset)
1067 y.v = add(y.k, bucketCnt*8)
1070 for ; b != nil; b = b.overflow(t) {
1071 k := add(unsafe.Pointer(b), dataOffset)
1072 v := add(k, bucketCnt*8)
1073 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) {
1074 top := b.tophash[i]
1075 if top == empty {
1076 b.tophash[i] = evacuatedEmpty
1077 continue
1079 if top < minTopHash {
1080 throw("bad map state")
1082 var useY uint8
1083 if !h.sameSizeGrow() {
1084 // Compute hash to make our evacuation decision (whether we need
1085 // to send this key/value to bucket x or bucket y).
1086 hash := t.key.hashfn(k, uintptr(h.hash0))
1087 if hash&newbit != 0 {
1088 useY = 1
1092 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
1093 dst := &xy[useY] // evacuation destination
1095 if dst.i == bucketCnt {
1096 dst.b = h.newoverflow(t, dst.b)
1097 dst.i = 0
1098 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1099 dst.v = add(dst.k, bucketCnt*8)
1101 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1103 // Copy key.
1104 if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
1105 if sys.PtrSize == 8 {
1106 writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k))
1107 } else {
1108 // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
1109 // Give up and call typedmemmove.
1110 typedmemmove(t.key, dst.k, k)
1112 } else {
1113 *(*uint64)(dst.k) = *(*uint64)(k)
1116 typedmemmove(t.elem, dst.v, v)
1117 dst.i++
1118 // These updates might push these pointers past the end of the
1119 // key or value arrays. That's ok, as we have the overflow pointer
1120 // at the end of the bucket to protect against pointing past the
1121 // end of the bucket.
1122 dst.k = add(dst.k, 8)
1123 dst.v = add(dst.v, uintptr(t.valuesize))
1126 // Unlink the overflow buckets & clear key/value to help GC.
1127 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1128 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1129 // Preserve b.tophash because the evacuation
1130 // state is maintained there.
1131 ptr := add(b, dataOffset)
1132 n := uintptr(t.bucketsize) - dataOffset
1133 memclrHasPointers(ptr, n)
1137 if oldbucket == h.nevacuate {
1138 advanceEvacuationMark(h, t, newbit)
1142 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
1143 // make sure we evacuate the oldbucket corresponding
1144 // to the bucket we're about to use
1145 evacuate_faststr(t, h, bucket&h.oldbucketmask())
1147 // evacuate one more oldbucket to make progress on growing
1148 if h.growing() {
1149 evacuate_faststr(t, h, h.nevacuate)
1153 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
1154 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
1155 newbit := h.noldbuckets()
1156 if !evacuated(b) {
1157 // TODO: reuse overflow buckets instead of using new ones, if there
1158 // is no iterator using the old buckets. (If !oldIterator.)
1160 // xy contains the x and y (low and high) evacuation destinations.
1161 var xy [2]evacDst
1162 x := &xy[0]
1163 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
1164 x.k = add(unsafe.Pointer(x.b), dataOffset)
1165 x.v = add(x.k, bucketCnt*2*sys.PtrSize)
1167 if !h.sameSizeGrow() {
1168 // Only calculate y pointers if we're growing bigger.
1169 // Otherwise GC can see bad pointers.
1170 y := &xy[1]
1171 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
1172 y.k = add(unsafe.Pointer(y.b), dataOffset)
1173 y.v = add(y.k, bucketCnt*2*sys.PtrSize)
1176 for ; b != nil; b = b.overflow(t) {
1177 k := add(unsafe.Pointer(b), dataOffset)
1178 v := add(k, bucketCnt*2*sys.PtrSize)
1179 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) {
1180 top := b.tophash[i]
1181 if top == empty {
1182 b.tophash[i] = evacuatedEmpty
1183 continue
1185 if top < minTopHash {
1186 throw("bad map state")
1188 var useY uint8
1189 if !h.sameSizeGrow() {
1190 // Compute hash to make our evacuation decision (whether we need
1191 // to send this key/value to bucket x or bucket y).
1192 hash := t.key.hashfn(k, uintptr(h.hash0))
1193 if hash&newbit != 0 {
1194 useY = 1
1198 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
1199 dst := &xy[useY] // evacuation destination
1201 if dst.i == bucketCnt {
1202 dst.b = h.newoverflow(t, dst.b)
1203 dst.i = 0
1204 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1205 dst.v = add(dst.k, bucketCnt*2*sys.PtrSize)
1207 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1209 // Copy key.
1210 *(*string)(dst.k) = *(*string)(k)
1212 typedmemmove(t.elem, dst.v, v)
1213 dst.i++
1214 // These updates might push these pointers past the end of the
1215 // key or value arrays. That's ok, as we have the overflow pointer
1216 // at the end of the bucket to protect against pointing past the
1217 // end of the bucket.
1218 dst.k = add(dst.k, 2*sys.PtrSize)
1219 dst.v = add(dst.v, uintptr(t.valuesize))
1222 // Unlink the overflow buckets & clear key/value to help GC.
1223 // Unlink the overflow buckets & clear key/value to help GC.
1224 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
1225 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
1226 // Preserve b.tophash because the evacuation
1227 // state is maintained there.
1228 ptr := add(b, dataOffset)
1229 n := uintptr(t.bucketsize) - dataOffset
1230 memclrHasPointers(ptr, n)
1234 if oldbucket == h.nevacuate {
1235 advanceEvacuationMark(h, t, newbit)