* builtins.def (BUILT_IN_SETJMP): Revert latest change.
[official-gcc.git] / libgo / go / runtime / hashmap_fast.go
blobbec8fdac14e778594625f867dcc82345acae0bab
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/sys"
9 "unsafe"
12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
13 if raceenabled && h != nil {
14 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
17 if h == nil || h.count == 0 {
18 return unsafe.Pointer(&zeroVal[0])
20 if h.flags&hashWriting != 0 {
21 throw("concurrent map read and map write")
23 var b *bmap
24 if h.B == 0 {
25 // One-bucket table. No need to hash.
26 b = (*bmap)(h.buckets)
27 } else {
28 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29 m := uintptr(1)<<h.B - 1
30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31 if c := h.oldbuckets; c != nil {
32 if !h.sameSizeGrow() {
33 // There used to be half as many buckets; mask down one more power of two.
34 m >>= 1
36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37 if !evacuated(oldb) {
38 b = oldb
42 for {
43 for i := uintptr(0); i < bucketCnt; i++ {
44 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
45 if k != key {
46 continue
48 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
49 if x == empty {
50 continue
52 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
54 b = b.overflow(t)
55 if b == nil {
56 return unsafe.Pointer(&zeroVal[0])
61 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
62 if raceenabled && h != nil {
63 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
64 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
66 if h == nil || h.count == 0 {
67 return unsafe.Pointer(&zeroVal[0]), false
69 if h.flags&hashWriting != 0 {
70 throw("concurrent map read and map write")
72 var b *bmap
73 if h.B == 0 {
74 // One-bucket table. No need to hash.
75 b = (*bmap)(h.buckets)
76 } else {
77 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
78 m := uintptr(1)<<h.B - 1
79 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
80 if c := h.oldbuckets; c != nil {
81 if !h.sameSizeGrow() {
82 // There used to be half as many buckets; mask down one more power of two.
83 m >>= 1
85 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
86 if !evacuated(oldb) {
87 b = oldb
91 for {
92 for i := uintptr(0); i < bucketCnt; i++ {
93 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
94 if k != key {
95 continue
97 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
98 if x == empty {
99 continue
101 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
103 b = b.overflow(t)
104 if b == nil {
105 return unsafe.Pointer(&zeroVal[0]), false
110 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
111 if raceenabled && h != nil {
112 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
113 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
115 if h == nil || h.count == 0 {
116 return unsafe.Pointer(&zeroVal[0])
118 if h.flags&hashWriting != 0 {
119 throw("concurrent map read and map write")
121 var b *bmap
122 if h.B == 0 {
123 // One-bucket table. No need to hash.
124 b = (*bmap)(h.buckets)
125 } else {
126 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
127 m := uintptr(1)<<h.B - 1
128 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
129 if c := h.oldbuckets; c != nil {
130 if !h.sameSizeGrow() {
131 // There used to be half as many buckets; mask down one more power of two.
132 m >>= 1
134 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
135 if !evacuated(oldb) {
136 b = oldb
140 for {
141 for i := uintptr(0); i < bucketCnt; i++ {
142 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
143 if k != key {
144 continue
146 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
147 if x == empty {
148 continue
150 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
152 b = b.overflow(t)
153 if b == nil {
154 return unsafe.Pointer(&zeroVal[0])
159 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
160 if raceenabled && h != nil {
161 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
162 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
164 if h == nil || h.count == 0 {
165 return unsafe.Pointer(&zeroVal[0]), false
167 if h.flags&hashWriting != 0 {
168 throw("concurrent map read and map write")
170 var b *bmap
171 if h.B == 0 {
172 // One-bucket table. No need to hash.
173 b = (*bmap)(h.buckets)
174 } else {
175 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
176 m := uintptr(1)<<h.B - 1
177 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
178 if c := h.oldbuckets; c != nil {
179 if !h.sameSizeGrow() {
180 // There used to be half as many buckets; mask down one more power of two.
181 m >>= 1
183 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
184 if !evacuated(oldb) {
185 b = oldb
189 for {
190 for i := uintptr(0); i < bucketCnt; i++ {
191 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
192 if k != key {
193 continue
195 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
196 if x == empty {
197 continue
199 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
201 b = b.overflow(t)
202 if b == nil {
203 return unsafe.Pointer(&zeroVal[0]), false
208 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
209 if raceenabled && h != nil {
210 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
211 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
213 if h == nil || h.count == 0 {
214 return unsafe.Pointer(&zeroVal[0])
216 if h.flags&hashWriting != 0 {
217 throw("concurrent map read and map write")
219 key := stringStructOf(&ky)
220 if h.B == 0 {
221 // One-bucket table.
222 b := (*bmap)(h.buckets)
223 if key.len < 32 {
224 // short key, doing lots of comparisons is ok
225 for i := uintptr(0); i < bucketCnt; i++ {
226 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
227 if x == empty {
228 continue
230 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
231 if k.len != key.len {
232 continue
234 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
235 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
238 return unsafe.Pointer(&zeroVal[0])
240 // long key, try not to do more comparisons than necessary
241 keymaybe := uintptr(bucketCnt)
242 for i := uintptr(0); i < bucketCnt; i++ {
243 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
244 if x == empty {
245 continue
247 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
248 if k.len != key.len {
249 continue
251 if k.str == key.str {
252 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
254 // check first 4 bytes
255 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
256 continue
258 // check last 4 bytes
259 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
260 continue
262 if keymaybe != bucketCnt {
263 // Two keys are potential matches. Use hash to distinguish them.
264 goto dohash
266 keymaybe = i
268 if keymaybe != bucketCnt {
269 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
270 if memequal(k.str, key.str, uintptr(key.len)) {
271 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
274 return unsafe.Pointer(&zeroVal[0])
276 dohash:
277 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
278 m := uintptr(1)<<h.B - 1
279 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
280 if c := h.oldbuckets; c != nil {
281 if !h.sameSizeGrow() {
282 // There used to be half as many buckets; mask down one more power of two.
283 m >>= 1
285 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
286 if !evacuated(oldb) {
287 b = oldb
290 top := uint8(hash >> (sys.PtrSize*8 - 8))
291 if top < minTopHash {
292 top += minTopHash
294 for {
295 for i := uintptr(0); i < bucketCnt; i++ {
296 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
297 if x != top {
298 continue
300 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
301 if k.len != key.len {
302 continue
304 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
305 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
308 b = b.overflow(t)
309 if b == nil {
310 return unsafe.Pointer(&zeroVal[0])
315 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
316 if raceenabled && h != nil {
317 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
318 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
320 if h == nil || h.count == 0 {
321 return unsafe.Pointer(&zeroVal[0]), false
323 if h.flags&hashWriting != 0 {
324 throw("concurrent map read and map write")
326 key := stringStructOf(&ky)
327 if h.B == 0 {
328 // One-bucket table.
329 b := (*bmap)(h.buckets)
330 if key.len < 32 {
331 // short key, doing lots of comparisons is ok
332 for i := uintptr(0); i < bucketCnt; i++ {
333 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
334 if x == empty {
335 continue
337 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
338 if k.len != key.len {
339 continue
341 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
342 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
345 return unsafe.Pointer(&zeroVal[0]), false
347 // long key, try not to do more comparisons than necessary
348 keymaybe := uintptr(bucketCnt)
349 for i := uintptr(0); i < bucketCnt; i++ {
350 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
351 if x == empty {
352 continue
354 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
355 if k.len != key.len {
356 continue
358 if k.str == key.str {
359 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
361 // check first 4 bytes
362 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
363 continue
365 // check last 4 bytes
366 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
367 continue
369 if keymaybe != bucketCnt {
370 // Two keys are potential matches. Use hash to distinguish them.
371 goto dohash
373 keymaybe = i
375 if keymaybe != bucketCnt {
376 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
377 if memequal(k.str, key.str, uintptr(key.len)) {
378 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
381 return unsafe.Pointer(&zeroVal[0]), false
383 dohash:
384 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
385 m := uintptr(1)<<h.B - 1
386 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
387 if c := h.oldbuckets; c != nil {
388 if !h.sameSizeGrow() {
389 // There used to be half as many buckets; mask down one more power of two.
390 m >>= 1
392 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
393 if !evacuated(oldb) {
394 b = oldb
397 top := uint8(hash >> (sys.PtrSize*8 - 8))
398 if top < minTopHash {
399 top += minTopHash
401 for {
402 for i := uintptr(0); i < bucketCnt; i++ {
403 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
404 if x != top {
405 continue
407 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
408 if k.len != key.len {
409 continue
411 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
412 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
415 b = b.overflow(t)
416 if b == nil {
417 return unsafe.Pointer(&zeroVal[0]), false
422 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
423 if h == nil {
424 panic(plainError("assignment to entry in nil map"))
426 if raceenabled {
427 callerpc := getcallerpc(unsafe.Pointer(&t))
428 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
430 if h.flags&hashWriting != 0 {
431 throw("concurrent map writes")
433 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
435 // Set hashWriting after calling alg.hash for consistency with mapassign.
436 h.flags |= hashWriting
438 if h.buckets == nil {
439 h.buckets = newarray(t.bucket, 1)
442 again:
443 bucket := hash & (uintptr(1)<<h.B - 1)
444 if h.growing() {
445 growWork(t, h, bucket)
447 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
448 top := uint8(hash >> (sys.PtrSize*8 - 8))
449 if top < minTopHash {
450 top += minTopHash
453 var inserti *uint8
454 var insertk unsafe.Pointer
455 var val unsafe.Pointer
456 for {
457 for i := uintptr(0); i < bucketCnt; i++ {
458 if b.tophash[i] != top {
459 if b.tophash[i] == empty && inserti == nil {
460 inserti = &b.tophash[i]
461 insertk = add(unsafe.Pointer(b), dataOffset+i*4)
462 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
464 continue
466 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
467 if k != key {
468 continue
470 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
471 goto done
473 ovf := b.overflow(t)
474 if ovf == nil {
475 break
477 b = ovf
480 // Did not find mapping for key. Allocate new cell & add entry.
482 // If we hit the max load factor or we have too many overflow buckets,
483 // and we're not already in the middle of growing, start growing.
484 if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
485 hashGrow(t, h)
486 goto again // Growing the table invalidates everything, so try again
489 if inserti == nil {
490 // all current buckets are full, allocate a new one.
491 newb := h.newoverflow(t, b)
492 inserti = &newb.tophash[0]
493 insertk = add(unsafe.Pointer(newb), dataOffset)
494 val = add(insertk, bucketCnt*4)
497 // store new key/value at insert position
498 typedmemmove(t.key, insertk, unsafe.Pointer(&key))
499 *inserti = top
500 h.count++
502 done:
503 if h.flags&hashWriting == 0 {
504 throw("concurrent map writes")
506 h.flags &^= hashWriting
507 return val
510 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
511 if h == nil {
512 panic(plainError("assignment to entry in nil map"))
514 if raceenabled {
515 callerpc := getcallerpc(unsafe.Pointer(&t))
516 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
518 if h.flags&hashWriting != 0 {
519 throw("concurrent map writes")
521 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
523 // Set hashWriting after calling alg.hash for consistency with mapassign.
524 h.flags |= hashWriting
526 if h.buckets == nil {
527 h.buckets = newarray(t.bucket, 1)
530 again:
531 bucket := hash & (uintptr(1)<<h.B - 1)
532 if h.growing() {
533 growWork(t, h, bucket)
535 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
536 top := uint8(hash >> (sys.PtrSize*8 - 8))
537 if top < minTopHash {
538 top += minTopHash
541 var inserti *uint8
542 var insertk unsafe.Pointer
543 var val unsafe.Pointer
544 for {
545 for i := uintptr(0); i < bucketCnt; i++ {
546 if b.tophash[i] != top {
547 if b.tophash[i] == empty && inserti == nil {
548 inserti = &b.tophash[i]
549 insertk = add(unsafe.Pointer(b), dataOffset+i*8)
550 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
552 continue
554 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
555 if k != key {
556 continue
558 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
559 goto done
561 ovf := b.overflow(t)
562 if ovf == nil {
563 break
565 b = ovf
568 // Did not find mapping for key. Allocate new cell & add entry.
570 // If we hit the max load factor or we have too many overflow buckets,
571 // and we're not already in the middle of growing, start growing.
572 if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
573 hashGrow(t, h)
574 goto again // Growing the table invalidates everything, so try again
577 if inserti == nil {
578 // all current buckets are full, allocate a new one.
579 newb := h.newoverflow(t, b)
580 inserti = &newb.tophash[0]
581 insertk = add(unsafe.Pointer(newb), dataOffset)
582 val = add(insertk, bucketCnt*8)
585 // store new key/value at insert position
586 typedmemmove(t.key, insertk, unsafe.Pointer(&key))
587 *inserti = top
588 h.count++
590 done:
591 if h.flags&hashWriting == 0 {
592 throw("concurrent map writes")
594 h.flags &^= hashWriting
595 return val
598 func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
599 if h == nil {
600 panic(plainError("assignment to entry in nil map"))
602 if raceenabled {
603 callerpc := getcallerpc(unsafe.Pointer(&t))
604 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
606 if h.flags&hashWriting != 0 {
607 throw("concurrent map writes")
609 key := stringStructOf(&ky)
610 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
612 // Set hashWriting after calling alg.hash for consistency with mapassign.
613 h.flags |= hashWriting
615 if h.buckets == nil {
616 h.buckets = newarray(t.bucket, 1)
619 again:
620 bucket := hash & (uintptr(1)<<h.B - 1)
621 if h.growing() {
622 growWork(t, h, bucket)
624 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
625 top := uint8(hash >> (sys.PtrSize*8 - 8))
626 if top < minTopHash {
627 top += minTopHash
630 var inserti *uint8
631 var insertk unsafe.Pointer
632 var val unsafe.Pointer
633 for {
634 for i := uintptr(0); i < bucketCnt; i++ {
635 if b.tophash[i] != top {
636 if b.tophash[i] == empty && inserti == nil {
637 inserti = &b.tophash[i]
638 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
639 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
641 continue
643 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
644 if k.len != key.len {
645 continue
647 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
648 continue
650 // already have a mapping for key. Update it.
651 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
652 goto done
654 ovf := b.overflow(t)
655 if ovf == nil {
656 break
658 b = ovf
661 // Did not find mapping for key. Allocate new cell & add entry.
663 // If we hit the max load factor or we have too many overflow buckets,
664 // and we're not already in the middle of growing, start growing.
665 if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
666 hashGrow(t, h)
667 goto again // Growing the table invalidates everything, so try again
670 if inserti == nil {
671 // all current buckets are full, allocate a new one.
672 newb := h.newoverflow(t, b)
673 inserti = &newb.tophash[0]
674 insertk = add(unsafe.Pointer(newb), dataOffset)
675 val = add(insertk, bucketCnt*2*sys.PtrSize)
678 // store new key/value at insert position
679 *((*stringStruct)(insertk)) = *key
680 *inserti = top
681 h.count++
683 done:
684 if h.flags&hashWriting == 0 {
685 throw("concurrent map writes")
687 h.flags &^= hashWriting
688 return val
691 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
692 if raceenabled && h != nil {
693 callerpc := getcallerpc(unsafe.Pointer(&t))
694 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
696 if h == nil || h.count == 0 {
697 return
699 if h.flags&hashWriting != 0 {
700 throw("concurrent map writes")
703 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
705 // Set hashWriting after calling alg.hash for consistency with mapdelete
706 h.flags |= hashWriting
708 bucket := hash & (uintptr(1)<<h.B - 1)
709 if h.growing() {
710 growWork(t, h, bucket)
712 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
713 top := uint8(hash >> (sys.PtrSize*8 - 8))
714 if top < minTopHash {
715 top += minTopHash
717 for {
718 for i := uintptr(0); i < bucketCnt; i++ {
719 if b.tophash[i] != top {
720 continue
722 k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))
723 if key != *k {
724 continue
726 typedmemclr(t.key, unsafe.Pointer(k))
727 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize))
728 typedmemclr(t.elem, v)
729 b.tophash[i] = empty
730 h.count--
731 goto done
733 b = b.overflow(t)
734 if b == nil {
735 goto done
739 done:
740 if h.flags&hashWriting == 0 {
741 throw("concurrent map writes")
743 h.flags &^= hashWriting
746 func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
747 if raceenabled && h != nil {
748 callerpc := getcallerpc(unsafe.Pointer(&t))
749 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
751 if h == nil || h.count == 0 {
752 return
754 if h.flags&hashWriting != 0 {
755 throw("concurrent map writes")
758 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
760 // Set hashWriting after calling alg.hash for consistency with mapdelete
761 h.flags |= hashWriting
763 bucket := hash & (uintptr(1)<<h.B - 1)
764 if h.growing() {
765 growWork(t, h, bucket)
767 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
768 top := uint8(hash >> (sys.PtrSize*8 - 8))
769 if top < minTopHash {
770 top += minTopHash
772 for {
773 for i := uintptr(0); i < bucketCnt; i++ {
774 if b.tophash[i] != top {
775 continue
777 k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))
778 if key != *k {
779 continue
781 typedmemclr(t.key, unsafe.Pointer(k))
782 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize))
783 typedmemclr(t.elem, v)
784 b.tophash[i] = empty
785 h.count--
786 goto done
788 b = b.overflow(t)
789 if b == nil {
790 goto done
794 done:
795 if h.flags&hashWriting == 0 {
796 throw("concurrent map writes")
798 h.flags &^= hashWriting
801 func mapdelete_faststr(t *maptype, h *hmap, ky string) {
802 if raceenabled && h != nil {
803 callerpc := getcallerpc(unsafe.Pointer(&t))
804 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
806 if h == nil || h.count == 0 {
807 return
809 if h.flags&hashWriting != 0 {
810 throw("concurrent map writes")
813 key := stringStructOf(&ky)
814 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
816 // Set hashWriting after calling alg.hash for consistency with mapdelete
817 h.flags |= hashWriting
819 bucket := hash & (uintptr(1)<<h.B - 1)
820 if h.growing() {
821 growWork(t, h, bucket)
823 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
824 top := uint8(hash >> (sys.PtrSize*8 - 8))
825 if top < minTopHash {
826 top += minTopHash
828 for {
829 for i := uintptr(0); i < bucketCnt; i++ {
830 if b.tophash[i] != top {
831 continue
833 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
834 if k.len != key.len {
835 continue
837 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
838 continue
840 typedmemclr(t.key, unsafe.Pointer(k))
841 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize))
842 typedmemclr(t.elem, v)
843 b.tophash[i] = empty
844 h.count--
845 goto done
847 b = b.overflow(t)
848 if b == nil {
849 goto done
853 done:
854 if h.flags&hashWriting == 0 {
855 throw("concurrent map writes")
857 h.flags &^= hashWriting