1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 func mapaccess1_fast32(t
*maptype
, h
*hmap
, key
uint32) unsafe
.Pointer
{
13 if raceenabled
&& h
!= nil {
14 callerpc
:= getcallerpc()
15 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast32
))
17 if h
== nil || h
.count
== 0 {
18 return unsafe
.Pointer(&zeroVal
[0])
20 if h
.flags
&hashWriting
!= 0 {
21 throw("concurrent map read and map write")
25 // One-bucket table. No need to hash.
26 b
= (*bmap
)(h
.buckets
)
28 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
30 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
31 if c
:= h
.oldbuckets
; c
!= nil {
32 if !h
.sameSizeGrow() {
33 // There used to be half as many buckets; mask down one more power of two.
36 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
42 for ; b
!= nil; b
= b
.overflow(t
) {
43 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 4) {
44 if *(*uint32)(k
) == key
&& b
.tophash
[i
] != empty
{
45 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
))
49 return unsafe
.Pointer(&zeroVal
[0])
52 func mapaccess2_fast32(t
*maptype
, h
*hmap
, key
uint32) (unsafe
.Pointer
, bool) {
53 if raceenabled
&& h
!= nil {
54 callerpc
:= getcallerpc()
55 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast32
))
57 if h
== nil || h
.count
== 0 {
58 return unsafe
.Pointer(&zeroVal
[0]), false
60 if h
.flags
&hashWriting
!= 0 {
61 throw("concurrent map read and map write")
65 // One-bucket table. No need to hash.
66 b
= (*bmap
)(h
.buckets
)
68 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
70 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
71 if c
:= h
.oldbuckets
; c
!= nil {
72 if !h
.sameSizeGrow() {
73 // There used to be half as many buckets; mask down one more power of two.
76 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
82 for ; b
!= nil; b
= b
.overflow(t
) {
83 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 4) {
84 if *(*uint32)(k
) == key
&& b
.tophash
[i
] != empty
{
85 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
)), true
89 return unsafe
.Pointer(&zeroVal
[0]), false
92 func mapaccess1_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
93 if raceenabled
&& h
!= nil {
94 callerpc
:= getcallerpc()
95 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast64
))
97 if h
== nil || h
.count
== 0 {
98 return unsafe
.Pointer(&zeroVal
[0])
100 if h
.flags
&hashWriting
!= 0 {
101 throw("concurrent map read and map write")
105 // One-bucket table. No need to hash.
106 b
= (*bmap
)(h
.buckets
)
108 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
110 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
111 if c
:= h
.oldbuckets
; c
!= nil {
112 if !h
.sameSizeGrow() {
113 // There used to be half as many buckets; mask down one more power of two.
116 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
117 if !evacuated(oldb
) {
122 for ; b
!= nil; b
= b
.overflow(t
) {
123 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
124 if *(*uint64)(k
) == key
&& b
.tophash
[i
] != empty
{
125 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
129 return unsafe
.Pointer(&zeroVal
[0])
132 func mapaccess2_fast64(t
*maptype
, h
*hmap
, key
uint64) (unsafe
.Pointer
, bool) {
133 if raceenabled
&& h
!= nil {
134 callerpc
:= getcallerpc()
135 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast64
))
137 if h
== nil || h
.count
== 0 {
138 return unsafe
.Pointer(&zeroVal
[0]), false
140 if h
.flags
&hashWriting
!= 0 {
141 throw("concurrent map read and map write")
145 // One-bucket table. No need to hash.
146 b
= (*bmap
)(h
.buckets
)
148 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
150 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
151 if c
:= h
.oldbuckets
; c
!= nil {
152 if !h
.sameSizeGrow() {
153 // There used to be half as many buckets; mask down one more power of two.
156 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
157 if !evacuated(oldb
) {
162 for ; b
!= nil; b
= b
.overflow(t
) {
163 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
164 if *(*uint64)(k
) == key
&& b
.tophash
[i
] != empty
{
165 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
)), true
169 return unsafe
.Pointer(&zeroVal
[0]), false
172 func mapaccess1_faststr(t
*maptype
, h
*hmap
, ky
string) unsafe
.Pointer
{
173 if raceenabled
&& h
!= nil {
174 callerpc
:= getcallerpc()
175 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_faststr
))
177 if h
== nil || h
.count
== 0 {
178 return unsafe
.Pointer(&zeroVal
[0])
180 if h
.flags
&hashWriting
!= 0 {
181 throw("concurrent map read and map write")
183 key
:= stringStructOf(&ky
)
186 b
:= (*bmap
)(h
.buckets
)
188 // short key, doing lots of comparisons is ok
189 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
190 k
:= (*stringStruct
)(kptr
)
191 if k
.len != key
.len || b
.tophash
[i
] == empty
{
194 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
195 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
198 return unsafe
.Pointer(&zeroVal
[0])
200 // long key, try not to do more comparisons than necessary
201 keymaybe
:= uintptr(bucketCnt
)
202 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
203 k
:= (*stringStruct
)(kptr
)
204 if k
.len != key
.len || b
.tophash
[i
] == empty
{
207 if k
.str
== key
.str
{
208 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
210 // check first 4 bytes
211 if *((*[4]byte)(key
.str
)) != *((*[4]byte)(k
.str
)) {
214 // check last 4 bytes
215 if *((*[4]byte)(add(key
.str
, uintptr(key
.len)-4))) != *((*[4]byte)(add(k
.str
, uintptr(key
.len)-4))) {
218 if keymaybe
!= bucketCnt
{
219 // Two keys are potential matches. Use hash to distinguish them.
224 if keymaybe
!= bucketCnt
{
225 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+keymaybe
*2*sys
.PtrSize
))
226 if memequal(k
.str
, key
.str
, uintptr(key
.len)) {
227 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+keymaybe
*uintptr(t
.valuesize
))
230 return unsafe
.Pointer(&zeroVal
[0])
233 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
235 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
236 if c
:= h
.oldbuckets
; c
!= nil {
237 if !h
.sameSizeGrow() {
238 // There used to be half as many buckets; mask down one more power of two.
241 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
242 if !evacuated(oldb
) {
247 for ; b
!= nil; b
= b
.overflow(t
) {
248 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
249 k
:= (*stringStruct
)(kptr
)
250 if k
.len != key
.len || b
.tophash
[i
] != top
{
253 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
254 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
258 return unsafe
.Pointer(&zeroVal
[0])
261 func mapaccess2_faststr(t
*maptype
, h
*hmap
, ky
string) (unsafe
.Pointer
, bool) {
262 if raceenabled
&& h
!= nil {
263 callerpc
:= getcallerpc()
264 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_faststr
))
266 if h
== nil || h
.count
== 0 {
267 return unsafe
.Pointer(&zeroVal
[0]), false
269 if h
.flags
&hashWriting
!= 0 {
270 throw("concurrent map read and map write")
272 key
:= stringStructOf(&ky
)
275 b
:= (*bmap
)(h
.buckets
)
277 // short key, doing lots of comparisons is ok
278 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
279 k
:= (*stringStruct
)(kptr
)
280 if k
.len != key
.len || b
.tophash
[i
] == empty
{
283 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
284 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
)), true
287 return unsafe
.Pointer(&zeroVal
[0]), false
289 // long key, try not to do more comparisons than necessary
290 keymaybe
:= uintptr(bucketCnt
)
291 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
292 k
:= (*stringStruct
)(kptr
)
293 if k
.len != key
.len || b
.tophash
[i
] == empty
{
296 if k
.str
== key
.str
{
297 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
)), true
299 // check first 4 bytes
300 if *((*[4]byte)(key
.str
)) != *((*[4]byte)(k
.str
)) {
303 // check last 4 bytes
304 if *((*[4]byte)(add(key
.str
, uintptr(key
.len)-4))) != *((*[4]byte)(add(k
.str
, uintptr(key
.len)-4))) {
307 if keymaybe
!= bucketCnt
{
308 // Two keys are potential matches. Use hash to distinguish them.
313 if keymaybe
!= bucketCnt
{
314 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+keymaybe
*2*sys
.PtrSize
))
315 if memequal(k
.str
, key
.str
, uintptr(key
.len)) {
316 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+keymaybe
*uintptr(t
.valuesize
)), true
319 return unsafe
.Pointer(&zeroVal
[0]), false
322 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
324 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
325 if c
:= h
.oldbuckets
; c
!= nil {
326 if !h
.sameSizeGrow() {
327 // There used to be half as many buckets; mask down one more power of two.
330 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
331 if !evacuated(oldb
) {
336 for ; b
!= nil; b
= b
.overflow(t
) {
337 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
338 k
:= (*stringStruct
)(kptr
)
339 if k
.len != key
.len || b
.tophash
[i
] != top
{
342 if k
.str
== key
.str ||
memequal(k
.str
, key
.str
, uintptr(key
.len)) {
343 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
)), true
347 return unsafe
.Pointer(&zeroVal
[0]), false
350 func mapassign_fast32(t
*maptype
, h
*hmap
, key
uint32) unsafe
.Pointer
{
352 panic(plainError("assignment to entry in nil map"))
355 callerpc
:= getcallerpc()
356 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast32
))
358 if h
.flags
&hashWriting
!= 0 {
359 throw("concurrent map writes")
361 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
363 // Set hashWriting after calling alg.hash for consistency with mapassign.
364 h
.flags |
= hashWriting
366 if h
.buckets
== nil {
367 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
371 bucket
:= hash
& bucketMask(h
.B
)
373 growWork_fast32(t
, h
, bucket
)
375 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
379 var insertk unsafe
.Pointer
382 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
383 if b
.tophash
[i
] == empty
{
390 k
:= *((*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
405 // Did not find mapping for key. Allocate new cell & add entry.
407 // If we hit the max load factor or we have too many overflow buckets,
408 // and we're not already in the middle of growing, start growing.
409 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
411 goto again
// Growing the table invalidates everything, so try again
415 // all current buckets are full, allocate a new one.
416 insertb
= h
.newoverflow(t
, b
)
417 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
419 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
421 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*4)
422 // store new key at insert position
423 *(*uint32)(insertk
) = key
428 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*4+inserti
*uintptr(t
.valuesize
))
429 if h
.flags
&hashWriting
== 0 {
430 throw("concurrent map writes")
432 h
.flags
&^= hashWriting
436 func mapassign_fast32ptr(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
438 panic(plainError("assignment to entry in nil map"))
441 callerpc
:= getcallerpc()
442 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast32
))
444 if h
.flags
&hashWriting
!= 0 {
445 throw("concurrent map writes")
447 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
449 // Set hashWriting after calling alg.hash for consistency with mapassign.
450 h
.flags |
= hashWriting
452 if h
.buckets
== nil {
453 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
457 bucket
:= hash
& bucketMask(h
.B
)
459 growWork_fast32(t
, h
, bucket
)
461 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
465 var insertk unsafe
.Pointer
468 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
469 if b
.tophash
[i
] == empty
{
476 k
:= *((*unsafe
.Pointer
)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
491 // Did not find mapping for key. Allocate new cell & add entry.
493 // If we hit the max load factor or we have too many overflow buckets,
494 // and we're not already in the middle of growing, start growing.
495 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
497 goto again
// Growing the table invalidates everything, so try again
501 // all current buckets are full, allocate a new one.
502 insertb
= h
.newoverflow(t
, b
)
503 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
505 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
507 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*4)
508 // store new key at insert position
509 *(*unsafe
.Pointer
)(insertk
) = key
514 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*4+inserti
*uintptr(t
.valuesize
))
515 if h
.flags
&hashWriting
== 0 {
516 throw("concurrent map writes")
518 h
.flags
&^= hashWriting
522 func mapassign_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
524 panic(plainError("assignment to entry in nil map"))
527 callerpc
:= getcallerpc()
528 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast64
))
530 if h
.flags
&hashWriting
!= 0 {
531 throw("concurrent map writes")
533 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
535 // Set hashWriting after calling alg.hash for consistency with mapassign.
536 h
.flags |
= hashWriting
538 if h
.buckets
== nil {
539 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
543 bucket
:= hash
& bucketMask(h
.B
)
545 growWork_fast64(t
, h
, bucket
)
547 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
551 var insertk unsafe
.Pointer
554 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
555 if b
.tophash
[i
] == empty
{
562 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
577 // Did not find mapping for key. Allocate new cell & add entry.
579 // If we hit the max load factor or we have too many overflow buckets,
580 // and we're not already in the middle of growing, start growing.
581 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
583 goto again
// Growing the table invalidates everything, so try again
587 // all current buckets are full, allocate a new one.
588 insertb
= h
.newoverflow(t
, b
)
589 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
591 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
593 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*8)
594 // store new key at insert position
595 *(*uint64)(insertk
) = key
600 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*8+inserti
*uintptr(t
.valuesize
))
601 if h
.flags
&hashWriting
== 0 {
602 throw("concurrent map writes")
604 h
.flags
&^= hashWriting
608 func mapassign_fast64ptr(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
610 panic(plainError("assignment to entry in nil map"))
613 callerpc
:= getcallerpc()
614 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast64
))
616 if h
.flags
&hashWriting
!= 0 {
617 throw("concurrent map writes")
619 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
621 // Set hashWriting after calling alg.hash for consistency with mapassign.
622 h
.flags |
= hashWriting
624 if h
.buckets
== nil {
625 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
629 bucket
:= hash
& bucketMask(h
.B
)
631 growWork_fast64(t
, h
, bucket
)
633 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
637 var insertk unsafe
.Pointer
640 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
641 if b
.tophash
[i
] == empty
{
648 k
:= *((*unsafe
.Pointer
)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
663 // Did not find mapping for key. Allocate new cell & add entry.
665 // If we hit the max load factor or we have too many overflow buckets,
666 // and we're not already in the middle of growing, start growing.
667 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
669 goto again
// Growing the table invalidates everything, so try again
673 // all current buckets are full, allocate a new one.
674 insertb
= h
.newoverflow(t
, b
)
675 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
677 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
679 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*8)
680 // store new key at insert position
681 *(*unsafe
.Pointer
)(insertk
) = key
686 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*8+inserti
*uintptr(t
.valuesize
))
687 if h
.flags
&hashWriting
== 0 {
688 throw("concurrent map writes")
690 h
.flags
&^= hashWriting
694 func mapassign_faststr(t
*maptype
, h
*hmap
, s
string) unsafe
.Pointer
{
696 panic(plainError("assignment to entry in nil map"))
699 callerpc
:= getcallerpc()
700 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_faststr
))
702 if h
.flags
&hashWriting
!= 0 {
703 throw("concurrent map writes")
705 key
:= stringStructOf(&s
)
706 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&s
)), uintptr(h
.hash0
))
708 // Set hashWriting after calling alg.hash for consistency with mapassign.
709 h
.flags |
= hashWriting
711 if h
.buckets
== nil {
712 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
716 bucket
:= hash
& bucketMask(h
.B
)
718 growWork_faststr(t
, h
, bucket
)
720 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
725 var insertk unsafe
.Pointer
728 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
729 if b
.tophash
[i
] != top
{
730 if b
.tophash
[i
] == empty
&& insertb
== nil {
736 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*sys
.PtrSize
))
737 if k
.len != key
.len {
740 if k
.str
!= key
.str
&& !memequal(k
.str
, key
.str
, uintptr(key
.len)) {
743 // already have a mapping for key. Update it.
755 // Did not find mapping for key. Allocate new cell & add entry.
757 // If we hit the max load factor or we have too many overflow buckets,
758 // and we're not already in the middle of growing, start growing.
759 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
761 goto again
// Growing the table invalidates everything, so try again
765 // all current buckets are full, allocate a new one.
766 insertb
= h
.newoverflow(t
, b
)
767 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
769 insertb
.tophash
[inserti
&(bucketCnt
-1)] = top
// mask inserti to avoid bounds checks
771 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*2*sys
.PtrSize
)
772 // store new key at insert position
773 *((*stringStruct
)(insertk
)) = *key
777 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+inserti
*uintptr(t
.valuesize
))
778 if h
.flags
&hashWriting
== 0 {
779 throw("concurrent map writes")
781 h
.flags
&^= hashWriting
785 func mapdelete_fast32(t
*maptype
, h
*hmap
, key
uint32) {
786 if raceenabled
&& h
!= nil {
787 callerpc
:= getcallerpc()
788 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_fast32
))
790 if h
== nil || h
.count
== 0 {
793 if h
.flags
&hashWriting
!= 0 {
794 throw("concurrent map writes")
797 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
799 // Set hashWriting after calling alg.hash for consistency with mapdelete
800 h
.flags |
= hashWriting
802 bucket
:= hash
& bucketMask(h
.B
)
804 growWork_fast32(t
, h
, bucket
)
806 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
808 for ; b
!= nil; b
= b
.overflow(t
) {
809 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 4) {
810 if key
!= *(*uint32)(k
) || b
.tophash
[i
] == empty
{
813 // Only clear key if there are pointers in it.
814 if t
.key
.kind
&kindNoPointers
== 0 {
815 memclrHasPointers(k
, t
.key
.size
)
817 // Only clear value if there are pointers in it.
818 if t
.elem
.kind
&kindNoPointers
== 0 {
819 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
))
820 memclrHasPointers(v
, t
.elem
.size
)
828 if h
.flags
&hashWriting
== 0 {
829 throw("concurrent map writes")
831 h
.flags
&^= hashWriting
834 func mapdelete_fast64(t
*maptype
, h
*hmap
, key
uint64) {
835 if raceenabled
&& h
!= nil {
836 callerpc
:= getcallerpc()
837 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_fast64
))
839 if h
== nil || h
.count
== 0 {
842 if h
.flags
&hashWriting
!= 0 {
843 throw("concurrent map writes")
846 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
848 // Set hashWriting after calling alg.hash for consistency with mapdelete
849 h
.flags |
= hashWriting
851 bucket
:= hash
& bucketMask(h
.B
)
853 growWork_fast64(t
, h
, bucket
)
855 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
857 for ; b
!= nil; b
= b
.overflow(t
) {
858 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
859 if key
!= *(*uint64)(k
) || b
.tophash
[i
] == empty
{
862 // Only clear key if there are pointers in it.
863 if t
.key
.kind
&kindNoPointers
== 0 {
864 memclrHasPointers(k
, t
.key
.size
)
866 // Only clear value if there are pointers in it.
867 if t
.elem
.kind
&kindNoPointers
== 0 {
868 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
869 memclrHasPointers(v
, t
.elem
.size
)
877 if h
.flags
&hashWriting
== 0 {
878 throw("concurrent map writes")
880 h
.flags
&^= hashWriting
883 func mapdelete_faststr(t
*maptype
, h
*hmap
, ky
string) {
884 if raceenabled
&& h
!= nil {
885 callerpc
:= getcallerpc()
886 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_faststr
))
888 if h
== nil || h
.count
== 0 {
891 if h
.flags
&hashWriting
!= 0 {
892 throw("concurrent map writes")
895 key
:= stringStructOf(&ky
)
896 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&ky
)), uintptr(h
.hash0
))
898 // Set hashWriting after calling alg.hash for consistency with mapdelete
899 h
.flags |
= hashWriting
901 bucket
:= hash
& bucketMask(h
.B
)
903 growWork_faststr(t
, h
, bucket
)
905 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
908 for ; b
!= nil; b
= b
.overflow(t
) {
909 for i
, kptr
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, kptr
= i
+1, add(kptr
, 2*sys
.PtrSize
) {
910 k
:= (*stringStruct
)(kptr
)
911 if k
.len != key
.len || b
.tophash
[i
] != top
{
914 if k
.str
!= key
.str
&& !memequal(k
.str
, key
.str
, uintptr(key
.len)) {
917 // Clear key's pointer.
919 // Only clear value if there are pointers in it.
920 if t
.elem
.kind
&kindNoPointers
== 0 {
921 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*sys
.PtrSize
+i
*uintptr(t
.valuesize
))
922 memclrHasPointers(v
, t
.elem
.size
)
930 if h
.flags
&hashWriting
== 0 {
931 throw("concurrent map writes")
933 h
.flags
&^= hashWriting
936 func growWork_fast32(t
*maptype
, h
*hmap
, bucket
uintptr) {
937 // make sure we evacuate the oldbucket corresponding
938 // to the bucket we're about to use
939 evacuate_fast32(t
, h
, bucket
&h
.oldbucketmask())
941 // evacuate one more oldbucket to make progress on growing
943 evacuate_fast32(t
, h
, h
.nevacuate
)
947 func evacuate_fast32(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
948 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
949 newbit
:= h
.noldbuckets()
951 // TODO: reuse overflow buckets instead of using new ones, if there
952 // is no iterator using the old buckets. (If !oldIterator.)
954 // xy contains the x and y (low and high) evacuation destinations.
957 x
.b
= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
958 x
.k
= add(unsafe
.Pointer(x
.b
), dataOffset
)
959 x
.v
= add(x
.k
, bucketCnt
*4)
961 if !h
.sameSizeGrow() {
962 // Only calculate y pointers if we're growing bigger.
963 // Otherwise GC can see bad pointers.
965 y
.b
= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
966 y
.k
= add(unsafe
.Pointer(y
.b
), dataOffset
)
967 y
.v
= add(y
.k
, bucketCnt
*4)
970 for ; b
!= nil; b
= b
.overflow(t
) {
971 k
:= add(unsafe
.Pointer(b
), dataOffset
)
972 v
:= add(k
, bucketCnt
*4)
973 for i
:= 0; i
< bucketCnt
; i
, k
, v
= i
+1, add(k
, 4), add(v
, uintptr(t
.valuesize
)) {
976 b
.tophash
[i
] = evacuatedEmpty
979 if top
< minTopHash
{
980 throw("bad map state")
983 if !h
.sameSizeGrow() {
984 // Compute hash to make our evacuation decision (whether we need
985 // to send this key/value to bucket x or bucket y).
986 hash
:= t
.key
.hashfn(k
, uintptr(h
.hash0
))
987 if hash
&newbit
!= 0 {
992 b
.tophash
[i
] = evacuatedX
+ useY
// evacuatedX + 1 == evacuatedY, enforced in makemap
993 dst
:= &xy
[useY
] // evacuation destination
995 if dst
.i
== bucketCnt
{
996 dst
.b
= h
.newoverflow(t
, dst
.b
)
998 dst
.k
= add(unsafe
.Pointer(dst
.b
), dataOffset
)
999 dst
.v
= add(dst
.k
, bucketCnt
*4)
1001 dst
.b
.tophash
[dst
.i
&(bucketCnt
-1)] = top
// mask dst.i as an optimization, to avoid a bounds check
1004 if sys
.PtrSize
== 4 && t
.key
.kind
&kindNoPointers
== 0 && writeBarrier
.enabled
{
1005 writebarrierptr((*uintptr)(dst
.k
), *(*uintptr)(k
))
1007 *(*uint32)(dst
.k
) = *(*uint32)(k
)
1010 typedmemmove(t
.elem
, dst
.v
, v
)
1012 // These updates might push these pointers past the end of the
1013 // key or value arrays. That's ok, as we have the overflow pointer
1014 // at the end of the bucket to protect against pointing past the
1015 // end of the bucket.
1016 dst
.k
= add(dst
.k
, 4)
1017 dst
.v
= add(dst
.v
, uintptr(t
.valuesize
))
1020 // Unlink the overflow buckets & clear key/value to help GC.
1021 if h
.flags
&oldIterator
== 0 && t
.bucket
.kind
&kindNoPointers
== 0 {
1022 b
:= add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
))
1023 // Preserve b.tophash because the evacuation
1024 // state is maintained there.
1025 ptr
:= add(b
, dataOffset
)
1026 n
:= uintptr(t
.bucketsize
) - dataOffset
1027 memclrHasPointers(ptr
, n
)
1031 if oldbucket
== h
.nevacuate
{
1032 advanceEvacuationMark(h
, t
, newbit
)
1036 func growWork_fast64(t
*maptype
, h
*hmap
, bucket
uintptr) {
1037 // make sure we evacuate the oldbucket corresponding
1038 // to the bucket we're about to use
1039 evacuate_fast64(t
, h
, bucket
&h
.oldbucketmask())
1041 // evacuate one more oldbucket to make progress on growing
1043 evacuate_fast64(t
, h
, h
.nevacuate
)
1047 func evacuate_fast64(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
1048 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
1049 newbit
:= h
.noldbuckets()
1051 // TODO: reuse overflow buckets instead of using new ones, if there
1052 // is no iterator using the old buckets. (If !oldIterator.)
1054 // xy contains the x and y (low and high) evacuation destinations.
1057 x
.b
= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
1058 x
.k
= add(unsafe
.Pointer(x
.b
), dataOffset
)
1059 x
.v
= add(x
.k
, bucketCnt
*8)
1061 if !h
.sameSizeGrow() {
1062 // Only calculate y pointers if we're growing bigger.
1063 // Otherwise GC can see bad pointers.
1065 y
.b
= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
1066 y
.k
= add(unsafe
.Pointer(y
.b
), dataOffset
)
1067 y
.v
= add(y
.k
, bucketCnt
*8)
1070 for ; b
!= nil; b
= b
.overflow(t
) {
1071 k
:= add(unsafe
.Pointer(b
), dataOffset
)
1072 v
:= add(k
, bucketCnt
*8)
1073 for i
:= 0; i
< bucketCnt
; i
, k
, v
= i
+1, add(k
, 8), add(v
, uintptr(t
.valuesize
)) {
1076 b
.tophash
[i
] = evacuatedEmpty
1079 if top
< minTopHash
{
1080 throw("bad map state")
1083 if !h
.sameSizeGrow() {
1084 // Compute hash to make our evacuation decision (whether we need
1085 // to send this key/value to bucket x or bucket y).
1086 hash
:= t
.key
.hashfn(k
, uintptr(h
.hash0
))
1087 if hash
&newbit
!= 0 {
1092 b
.tophash
[i
] = evacuatedX
+ useY
// evacuatedX + 1 == evacuatedY, enforced in makemap
1093 dst
:= &xy
[useY
] // evacuation destination
1095 if dst
.i
== bucketCnt
{
1096 dst
.b
= h
.newoverflow(t
, dst
.b
)
1098 dst
.k
= add(unsafe
.Pointer(dst
.b
), dataOffset
)
1099 dst
.v
= add(dst
.k
, bucketCnt
*8)
1101 dst
.b
.tophash
[dst
.i
&(bucketCnt
-1)] = top
// mask dst.i as an optimization, to avoid a bounds check
1104 if t
.key
.kind
&kindNoPointers
== 0 && writeBarrier
.enabled
{
1105 if sys
.PtrSize
== 8 {
1106 writebarrierptr((*uintptr)(dst
.k
), *(*uintptr)(k
))
1108 // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
1109 // Give up and call typedmemmove.
1110 typedmemmove(t
.key
, dst
.k
, k
)
1113 *(*uint64)(dst
.k
) = *(*uint64)(k
)
1116 typedmemmove(t
.elem
, dst
.v
, v
)
1118 // These updates might push these pointers past the end of the
1119 // key or value arrays. That's ok, as we have the overflow pointer
1120 // at the end of the bucket to protect against pointing past the
1121 // end of the bucket.
1122 dst
.k
= add(dst
.k
, 8)
1123 dst
.v
= add(dst
.v
, uintptr(t
.valuesize
))
1126 // Unlink the overflow buckets & clear key/value to help GC.
1127 if h
.flags
&oldIterator
== 0 && t
.bucket
.kind
&kindNoPointers
== 0 {
1128 b
:= add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
))
1129 // Preserve b.tophash because the evacuation
1130 // state is maintained there.
1131 ptr
:= add(b
, dataOffset
)
1132 n
:= uintptr(t
.bucketsize
) - dataOffset
1133 memclrHasPointers(ptr
, n
)
1137 if oldbucket
== h
.nevacuate
{
1138 advanceEvacuationMark(h
, t
, newbit
)
1142 func growWork_faststr(t
*maptype
, h
*hmap
, bucket
uintptr) {
1143 // make sure we evacuate the oldbucket corresponding
1144 // to the bucket we're about to use
1145 evacuate_faststr(t
, h
, bucket
&h
.oldbucketmask())
1147 // evacuate one more oldbucket to make progress on growing
1149 evacuate_faststr(t
, h
, h
.nevacuate
)
1153 func evacuate_faststr(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
1154 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
1155 newbit
:= h
.noldbuckets()
1157 // TODO: reuse overflow buckets instead of using new ones, if there
1158 // is no iterator using the old buckets. (If !oldIterator.)
1160 // xy contains the x and y (low and high) evacuation destinations.
1163 x
.b
= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
1164 x
.k
= add(unsafe
.Pointer(x
.b
), dataOffset
)
1165 x
.v
= add(x
.k
, bucketCnt
*2*sys
.PtrSize
)
1167 if !h
.sameSizeGrow() {
1168 // Only calculate y pointers if we're growing bigger.
1169 // Otherwise GC can see bad pointers.
1171 y
.b
= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
1172 y
.k
= add(unsafe
.Pointer(y
.b
), dataOffset
)
1173 y
.v
= add(y
.k
, bucketCnt
*2*sys
.PtrSize
)
1176 for ; b
!= nil; b
= b
.overflow(t
) {
1177 k
:= add(unsafe
.Pointer(b
), dataOffset
)
1178 v
:= add(k
, bucketCnt
*2*sys
.PtrSize
)
1179 for i
:= 0; i
< bucketCnt
; i
, k
, v
= i
+1, add(k
, 2*sys
.PtrSize
), add(v
, uintptr(t
.valuesize
)) {
1182 b
.tophash
[i
] = evacuatedEmpty
1185 if top
< minTopHash
{
1186 throw("bad map state")
1189 if !h
.sameSizeGrow() {
1190 // Compute hash to make our evacuation decision (whether we need
1191 // to send this key/value to bucket x or bucket y).
1192 hash
:= t
.key
.hashfn(k
, uintptr(h
.hash0
))
1193 if hash
&newbit
!= 0 {
1198 b
.tophash
[i
] = evacuatedX
+ useY
// evacuatedX + 1 == evacuatedY, enforced in makemap
1199 dst
:= &xy
[useY
] // evacuation destination
1201 if dst
.i
== bucketCnt
{
1202 dst
.b
= h
.newoverflow(t
, dst
.b
)
1204 dst
.k
= add(unsafe
.Pointer(dst
.b
), dataOffset
)
1205 dst
.v
= add(dst
.k
, bucketCnt
*2*sys
.PtrSize
)
1207 dst
.b
.tophash
[dst
.i
&(bucketCnt
-1)] = top
// mask dst.i as an optimization, to avoid a bounds check
1210 *(*string)(dst
.k
) = *(*string)(k
)
1212 typedmemmove(t
.elem
, dst
.v
, v
)
1214 // These updates might push these pointers past the end of the
1215 // key or value arrays. That's ok, as we have the overflow pointer
1216 // at the end of the bucket to protect against pointing past the
1217 // end of the bucket.
1218 dst
.k
= add(dst
.k
, 2*sys
.PtrSize
)
1219 dst
.v
= add(dst
.v
, uintptr(t
.valuesize
))
1222 // Unlink the overflow buckets & clear key/value to help GC.
1223 // Unlink the overflow buckets & clear key/value to help GC.
1224 if h
.flags
&oldIterator
== 0 && t
.bucket
.kind
&kindNoPointers
== 0 {
1225 b
:= add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
))
1226 // Preserve b.tophash because the evacuation
1227 // state is maintained there.
1228 ptr
:= add(b
, dataOffset
)
1229 n
:= uintptr(t
.bucketsize
) - dataOffset
1230 memclrHasPointers(ptr
, n
)
1234 if oldbucket
== h
.nevacuate
{
1235 advanceEvacuationMark(h
, t
, newbit
)