1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 func mapaccess1_fast32(t
*maptype
, h
*hmap
, key
uint32) unsafe
.Pointer
{
13 if raceenabled
&& h
!= nil {
14 callerpc
:= getcallerpc(unsafe
.Pointer( /* &t */ nil))
15 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast32
))
17 if h
== nil || h
.count
== 0 {
18 return unsafe
.Pointer(&zeroVal
[0])
20 if h
.flags
&hashWriting
!= 0 {
21 throw("concurrent map read and map write")
25 // One-bucket table. No need to hash.
26 b
= (*bmap
)(h
.buckets
)
28 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
29 m
:= uintptr(1)<<h
.B
- 1
30 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
31 if c
:= h
.oldbuckets
; c
!= nil {
32 if !h
.sameSizeGrow() {
33 // There used to be half as many buckets; mask down one more power of two.
36 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
43 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
44 k
:= *((*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
48 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
52 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
))
56 return unsafe
.Pointer(&zeroVal
[0])
61 func mapaccess2_fast32(t
*maptype
, h
*hmap
, key
uint32) (unsafe
.Pointer
, bool) {
62 if raceenabled
&& h
!= nil {
63 callerpc
:= getcallerpc(unsafe
.Pointer( /* &t */ nil))
64 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast32
))
66 if h
== nil || h
.count
== 0 {
67 return unsafe
.Pointer(&zeroVal
[0]), false
69 if h
.flags
&hashWriting
!= 0 {
70 throw("concurrent map read and map write")
74 // One-bucket table. No need to hash.
75 b
= (*bmap
)(h
.buckets
)
77 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
78 m
:= uintptr(1)<<h
.B
- 1
79 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
80 if c
:= h
.oldbuckets
; c
!= nil {
81 if !h
.sameSizeGrow() {
82 // There used to be half as many buckets; mask down one more power of two.
85 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
92 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
93 k
:= *((*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
97 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
101 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
)), true
105 return unsafe
.Pointer(&zeroVal
[0]), false
110 func mapaccess1_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
111 if raceenabled
&& h
!= nil {
112 callerpc
:= getcallerpc(unsafe
.Pointer( /* &t */ nil))
113 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast64
))
115 if h
== nil || h
.count
== 0 {
116 return unsafe
.Pointer(&zeroVal
[0])
118 if h
.flags
&hashWriting
!= 0 {
119 throw("concurrent map read and map write")
123 // One-bucket table. No need to hash.
124 b
= (*bmap
)(h
.buckets
)
126 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
127 m
:= uintptr(1)<<h
.B
- 1
128 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
129 if c
:= h
.oldbuckets
; c
!= nil {
130 if !h
.sameSizeGrow() {
131 // There used to be half as many buckets; mask down one more power of two.
134 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
135 if !evacuated(oldb
) {
141 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
142 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
146 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
150 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
154 return unsafe
.Pointer(&zeroVal
[0])
159 func mapaccess2_fast64(t
*maptype
, h
*hmap
, key
uint64) (unsafe
.Pointer
, bool) {
160 if raceenabled
&& h
!= nil {
161 callerpc
:= getcallerpc(unsafe
.Pointer( /* &t */ nil))
162 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast64
))
164 if h
== nil || h
.count
== 0 {
165 return unsafe
.Pointer(&zeroVal
[0]), false
167 if h
.flags
&hashWriting
!= 0 {
168 throw("concurrent map read and map write")
172 // One-bucket table. No need to hash.
173 b
= (*bmap
)(h
.buckets
)
175 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
176 m
:= uintptr(1)<<h
.B
- 1
177 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
178 if c
:= h
.oldbuckets
; c
!= nil {
179 if !h
.sameSizeGrow() {
180 // There used to be half as many buckets; mask down one more power of two.
183 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
184 if !evacuated(oldb
) {
190 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
191 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
195 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
199 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
)), true
203 return unsafe
.Pointer(&zeroVal
[0]), false
208 func mapaccess1_faststr(t
*maptype
, h
*hmap
, ky
string) unsafe
.Pointer
{
209 if raceenabled
&& h
!= nil {
210 callerpc
:= getcallerpc(unsafe
.Pointer( /* &t */ nil))
211 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_faststr
))
213 if h
== nil || h
.count
== 0 {
214 return unsafe
.Pointer(&zeroVal
[0])
216 if h
.flags
&hashWriting
!= 0 {
217 throw("concurrent map read and map write")
219 key
:= stringStructOf(&ky
)
222 b
:= (*bmap
)(h
.buckets
)
224 // short key, doing lots of comparisons is ok
225 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
226 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
230 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
231 if k
.len != key
.len {
234 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
235 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
238 return unsafe
.Pointer(&zeroVal
[0])
240 // long key, try not to do more comparisons than necessary
241 keymaybe
:= uintptr(bucketCnt
)
242 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
243 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
247 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
248 if k
.len != key
.len {
251 if k
.str
== key
.str
{
252 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
254 // check first 4 bytes
255 if *((*[4]byte)(key
.str
)) != *((*[4]byte)(k
.str
)) {
258 // check last 4 bytes
259 if *((*[4]byte)(add(key
.str
, uintptr(key
.len)-4))) != *((*[4]byte)(add(k
.str
, uintptr(key
.len)-4))) {
262 if keymaybe
!= bucketCnt
{
263 // Two keys are potential matches. Use hash to distinguish them.
268 if keymaybe
!= bucketCnt
{
269 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+keymaybe
*2*sys
.PtrSize
))
270 if memequal(k
.str
, key
.str
, uintptr(key
.len)) {
271 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+keymaybe
*uintptr(t
.valuesize
))
274 return unsafe
.Pointer(&zeroVal
[0])
277 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
278 m
:= uintptr(1)<<h
.B
- 1
279 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
280 if c
:= h
.oldbuckets
; c
!= nil {
281 if !h
.sameSizeGrow() {
282 // There used to be half as many buckets; mask down one more power of two.
285 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
286 if !evacuated(oldb
) {
290 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
291 if top
< minTopHash
{
295 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
296 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
300 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
301 if k
.len != key
.len {
304 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
305 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
310 return unsafe
.Pointer(&zeroVal
[0])
315 func mapaccess2_faststr(t
*maptype
, h
*hmap
, ky
string) (unsafe
.Pointer
, bool) {
316 if raceenabled
&& h
!= nil {
317 callerpc
:= getcallerpc(unsafe
.Pointer( /* &t */ nil))
318 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_faststr
))
320 if h
== nil || h
.count
== 0 {
321 return unsafe
.Pointer(&zeroVal
[0]), false
323 if h
.flags
&hashWriting
!= 0 {
324 throw("concurrent map read and map write")
326 key
:= stringStructOf(&ky
)
329 b
:= (*bmap
)(h
.buckets
)
331 // short key, doing lots of comparisons is ok
332 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
333 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
337 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
338 if k
.len != key
.len {
341 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
342 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
)), true
345 return unsafe
.Pointer(&zeroVal
[0]), false
347 // long key, try not to do more comparisons than necessary
348 keymaybe
:= uintptr(bucketCnt
)
349 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
350 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
354 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
355 if k
.len != key
.len {
358 if k
.str
== key
.str
{
359 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
)), true
361 // check first 4 bytes
362 if *((*[4]byte)(key
.str
)) != *((*[4]byte)(k
.str
)) {
365 // check last 4 bytes
366 if *((*[4]byte)(add(key
.str
, uintptr(key
.len)-4))) != *((*[4]byte)(add(k
.str
, uintptr(key
.len)-4))) {
369 if keymaybe
!= bucketCnt
{
370 // Two keys are potential matches. Use hash to distinguish them.
375 if keymaybe
!= bucketCnt
{
376 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+keymaybe
*2*sys
.PtrSize
))
377 if memequal(k
.str
, key
.str
, uintptr(key
.len)) {
378 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+keymaybe
*uintptr(t
.valuesize
)), true
381 return unsafe
.Pointer(&zeroVal
[0]), false
384 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
385 m
:= uintptr(1)<<h
.B
- 1
386 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
387 if c
:= h
.oldbuckets
; c
!= nil {
388 if !h
.sameSizeGrow() {
389 // There used to be half as many buckets; mask down one more power of two.
392 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
393 if !evacuated(oldb
) {
397 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
398 if top
< minTopHash
{
402 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
403 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.tophash[i] without the bounds check
407 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
408 if k
.len != key
.len {
411 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
412 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
)), true
417 return unsafe
.Pointer(&zeroVal
[0]), false
422 func mapassign_fast32(t
*maptype
, h
*hmap
, key
uint32) unsafe
.Pointer
{
424 panic(plainError("assignment to entry in nil map"))
427 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
428 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast32
))
430 if h
.flags
&hashWriting
!= 0 {
431 throw("concurrent map writes")
433 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
435 // Set hashWriting after calling alg.hash for consistency with mapassign.
436 h
.flags |
= hashWriting
438 if h
.buckets
== nil {
439 h
.buckets
= newarray(t
.bucket
, 1)
443 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
445 growWork(t
, h
, bucket
)
447 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
448 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
449 if top
< minTopHash
{
454 var insertk unsafe
.Pointer
455 var val unsafe
.Pointer
457 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
458 if b
.tophash
[i
] != top
{
459 if b
.tophash
[i
] == empty
&& inserti
== nil {
460 inserti
= &b
.tophash
[i
]
461 insertk
= add(unsafe
.Pointer(b
), dataOffset
+i
*4)
462 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
))
466 k
:= *((*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
470 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
))
480 // Did not find mapping for key. Allocate new cell & add entry.
482 // If we hit the max load factor or we have too many overflow buckets,
483 // and we're not already in the middle of growing, start growing.
484 if !h
.growing() && (overLoadFactor(int64(h
.count
), h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
486 goto again
// Growing the table invalidates everything, so try again
490 // all current buckets are full, allocate a new one.
491 newb
:= h
.newoverflow(t
, b
)
492 inserti
= &newb
.tophash
[0]
493 insertk
= add(unsafe
.Pointer(newb
), dataOffset
)
494 val
= add(insertk
, bucketCnt
*4)
497 // store new key/value at insert position
498 typedmemmove(t
.key
, insertk
, unsafe
.Pointer(&key
))
503 if h
.flags
&hashWriting
== 0 {
504 throw("concurrent map writes")
506 h
.flags
&^= hashWriting
510 func mapassign_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
512 panic(plainError("assignment to entry in nil map"))
515 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
516 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast64
))
518 if h
.flags
&hashWriting
!= 0 {
519 throw("concurrent map writes")
521 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
523 // Set hashWriting after calling alg.hash for consistency with mapassign.
524 h
.flags |
= hashWriting
526 if h
.buckets
== nil {
527 h
.buckets
= newarray(t
.bucket
, 1)
531 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
533 growWork(t
, h
, bucket
)
535 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
536 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
537 if top
< minTopHash
{
542 var insertk unsafe
.Pointer
543 var val unsafe
.Pointer
545 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
546 if b
.tophash
[i
] != top
{
547 if b
.tophash
[i
] == empty
&& inserti
== nil {
548 inserti
= &b
.tophash
[i
]
549 insertk
= add(unsafe
.Pointer(b
), dataOffset
+i
*8)
550 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
554 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
558 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
568 // Did not find mapping for key. Allocate new cell & add entry.
570 // If we hit the max load factor or we have too many overflow buckets,
571 // and we're not already in the middle of growing, start growing.
572 if !h
.growing() && (overLoadFactor(int64(h
.count
), h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
574 goto again
// Growing the table invalidates everything, so try again
578 // all current buckets are full, allocate a new one.
579 newb
:= h
.newoverflow(t
, b
)
580 inserti
= &newb
.tophash
[0]
581 insertk
= add(unsafe
.Pointer(newb
), dataOffset
)
582 val
= add(insertk
, bucketCnt
*8)
585 // store new key/value at insert position
586 typedmemmove(t
.key
, insertk
, unsafe
.Pointer(&key
))
591 if h
.flags
&hashWriting
== 0 {
592 throw("concurrent map writes")
594 h
.flags
&^= hashWriting
598 func mapassign_faststr(t
*maptype
, h
*hmap
, ky
string) unsafe
.Pointer
{
600 panic(plainError("assignment to entry in nil map"))
603 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
604 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_faststr
))
606 if h
.flags
&hashWriting
!= 0 {
607 throw("concurrent map writes")
609 key
:= stringStructOf(&ky
)
610 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
612 // Set hashWriting after calling alg.hash for consistency with mapassign.
613 h
.flags |
= hashWriting
615 if h
.buckets
== nil {
616 h
.buckets
= newarray(t
.bucket
, 1)
620 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
622 growWork(t
, h
, bucket
)
624 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
625 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
626 if top
< minTopHash
{
631 var insertk unsafe
.Pointer
632 var val unsafe
.Pointer
634 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
635 if b
.tophash
[i
] != top
{
636 if b
.tophash
[i
] == empty
&& inserti
== nil {
637 inserti
= &b
.tophash
[i
]
638 insertk
= add(unsafe
.Pointer(b
), dataOffset
+i
*uintptr(t
.keysize
))
639 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*uintptr(t
.keysize
)+i
*uintptr(t
.valuesize
))
643 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
644 if k
.len != key
.len {
647 if k
.str
!= key
.str
&& !memequal(k
.str
, key
.str
, uintptr(key
.len)) {
650 // already have a mapping for key. Update it.
651 val
= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
661 // Did not find mapping for key. Allocate new cell & add entry.
663 // If we hit the max load factor or we have too many overflow buckets,
664 // and we're not already in the middle of growing, start growing.
665 if !h
.growing() && (overLoadFactor(int64(h
.count
), h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
667 goto again
// Growing the table invalidates everything, so try again
671 // all current buckets are full, allocate a new one.
672 newb
:= h
.newoverflow(t
, b
)
673 inserti
= &newb
.tophash
[0]
674 insertk
= add(unsafe
.Pointer(newb
), dataOffset
)
675 val
= add(insertk
, bucketCnt
*2*sys
.PtrSize
)
678 // store new key/value at insert position
679 *((*stringStruct
)(insertk
)) = *key
684 if h
.flags
&hashWriting
== 0 {
685 throw("concurrent map writes")
687 h
.flags
&^= hashWriting
691 func mapdelete_fast32(t
*maptype
, h
*hmap
, key
uint32) {
692 if raceenabled
&& h
!= nil {
693 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
694 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_fast32
))
696 if h
== nil || h
.count
== 0 {
699 if h
.flags
&hashWriting
!= 0 {
700 throw("concurrent map writes")
703 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
705 // Set hashWriting after calling alg.hash for consistency with mapdelete
706 h
.flags |
= hashWriting
708 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
710 growWork(t
, h
, bucket
)
712 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
713 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
714 if top
< minTopHash
{
718 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
719 if b
.tophash
[i
] != top
{
722 k
:= (*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4))
726 typedmemclr(t
.key
, unsafe
.Pointer(k
))
727 v
:= unsafe
.Pointer(uintptr(unsafe
.Pointer(b
)) + dataOffset
+ bucketCnt
*4 + i
*uintptr(t
.valuesize
))
728 typedmemclr(t
.elem
, v
)
740 if h
.flags
&hashWriting
== 0 {
741 throw("concurrent map writes")
743 h
.flags
&^= hashWriting
746 func mapdelete_fast64(t
*maptype
, h
*hmap
, key
uint64) {
747 if raceenabled
&& h
!= nil {
748 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
749 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_fast64
))
751 if h
== nil || h
.count
== 0 {
754 if h
.flags
&hashWriting
!= 0 {
755 throw("concurrent map writes")
758 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
760 // Set hashWriting after calling alg.hash for consistency with mapdelete
761 h
.flags |
= hashWriting
763 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
765 growWork(t
, h
, bucket
)
767 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
768 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
769 if top
< minTopHash
{
773 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
774 if b
.tophash
[i
] != top
{
777 k
:= (*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8))
781 typedmemclr(t
.key
, unsafe
.Pointer(k
))
782 v
:= unsafe
.Pointer(uintptr(unsafe
.Pointer(b
)) + dataOffset
+ bucketCnt
*8 + i
*uintptr(t
.valuesize
))
783 typedmemclr(t
.elem
, v
)
795 if h
.flags
&hashWriting
== 0 {
796 throw("concurrent map writes")
798 h
.flags
&^= hashWriting
801 func mapdelete_faststr(t
*maptype
, h
*hmap
, ky
string) {
802 if raceenabled
&& h
!= nil {
803 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
804 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_faststr
))
806 if h
== nil || h
.count
== 0 {
809 if h
.flags
&hashWriting
!= 0 {
810 throw("concurrent map writes")
813 key
:= stringStructOf(&ky
)
814 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
816 // Set hashWriting after calling alg.hash for consistency with mapdelete
817 h
.flags |
= hashWriting
819 bucket
:= hash
& (uintptr(1)<<h
.B
- 1)
821 growWork(t
, h
, bucket
)
823 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
824 top
:= uint8(hash
>> (sys
.PtrSize
*8 - 8))
825 if top
< minTopHash
{
829 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
830 if b
.tophash
[i
] != top
{
833 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
834 if k
.len != key
.len {
837 if k
.str
!= key
.str
&& !memequal(k
.str
, key
.str
, uintptr(key
.len)) {
840 typedmemclr(t
.key
, unsafe
.Pointer(k
))
841 v
:= unsafe
.Pointer(uintptr(unsafe
.Pointer(b
)) + dataOffset
+ bucketCnt
*2*sys
.PtrSize
+ i
*uintptr(t
.valuesize
))
842 typedmemclr(t
.elem
, v
)
854 if h
.flags
&hashWriting
== 0 {
855 throw("concurrent map writes")
857 h
.flags
&^= hashWriting