1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
13 // For gccgo, use go:linkname to export compiler-called functions.
15 //go:linkname mapaccess1_fast64
16 //go:linkname mapaccess2_fast64
17 //go:linkname mapassign_fast64
18 //go:linkname mapassign_fast64ptr
19 //go:linkname mapdelete_fast64
21 func mapaccess1_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
22 if raceenabled
&& h
!= nil {
23 callerpc
:= getcallerpc()
24 racereadpc(unsafe
.Pointer(h
), callerpc
, abi
.FuncPCABIInternal(mapaccess1_fast64
))
26 if h
== nil || h
.count
== 0 {
27 return unsafe
.Pointer(&zeroVal
[0])
29 if h
.flags
&hashWriting
!= 0 {
30 throw("concurrent map read and map write")
34 // One-bucket table. No need to hash.
35 b
= (*bmap
)(h
.buckets
)
37 hash
:= t
.hasher(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
39 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
40 if c
:= h
.oldbuckets
; c
!= nil {
41 if !h
.sameSizeGrow() {
42 // There used to be half as many buckets; mask down one more power of two.
45 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
51 for ; b
!= nil; b
= b
.overflow(t
) {
52 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
53 if *(*uint64)(k
) == key
&& !isEmpty(b
.tophash
[i
]) {
54 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.elemsize
))
58 return unsafe
.Pointer(&zeroVal
[0])
61 func mapaccess2_fast64(t
*maptype
, h
*hmap
, key
uint64) (unsafe
.Pointer
, bool) {
62 if raceenabled
&& h
!= nil {
63 callerpc
:= getcallerpc()
64 racereadpc(unsafe
.Pointer(h
), callerpc
, abi
.FuncPCABIInternal(mapaccess2_fast64
))
66 if h
== nil || h
.count
== 0 {
67 return unsafe
.Pointer(&zeroVal
[0]), false
69 if h
.flags
&hashWriting
!= 0 {
70 throw("concurrent map read and map write")
74 // One-bucket table. No need to hash.
75 b
= (*bmap
)(h
.buckets
)
77 hash
:= t
.hasher(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
79 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
80 if c
:= h
.oldbuckets
; c
!= nil {
81 if !h
.sameSizeGrow() {
82 // There used to be half as many buckets; mask down one more power of two.
85 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
91 for ; b
!= nil; b
= b
.overflow(t
) {
92 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
93 if *(*uint64)(k
) == key
&& !isEmpty(b
.tophash
[i
]) {
94 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.elemsize
)), true
98 return unsafe
.Pointer(&zeroVal
[0]), false
101 func mapassign_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
103 panic(plainError("assignment to entry in nil map"))
106 callerpc
:= getcallerpc()
107 racewritepc(unsafe
.Pointer(h
), callerpc
, abi
.FuncPCABIInternal(mapassign_fast64
))
109 if h
.flags
&hashWriting
!= 0 {
110 throw("concurrent map writes")
112 hash
:= t
.hasher(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
114 // Set hashWriting after calling t.hasher for consistency with mapassign.
115 h
.flags
^= hashWriting
117 if h
.buckets
== nil {
118 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
122 bucket
:= hash
& bucketMask(h
.B
)
124 growWork_fast64(t
, h
, bucket
)
126 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
130 var insertk unsafe
.Pointer
134 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
135 if isEmpty(b
.tophash
[i
]) {
140 if b
.tophash
[i
] == emptyRest
{
145 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
160 // Did not find mapping for key. Allocate new cell & add entry.
162 // If we hit the max load factor or we have too many overflow buckets,
163 // and we're not already in the middle of growing, start growing.
164 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
166 goto again
// Growing the table invalidates everything, so try again
170 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
171 insertb
= h
.newoverflow(t
, b
)
172 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
174 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
176 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*8)
177 // store new key at insert position
178 *(*uint64)(insertk
) = key
183 elem
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*8+inserti
*uintptr(t
.elemsize
))
184 if h
.flags
&hashWriting
== 0 {
185 throw("concurrent map writes")
187 h
.flags
&^= hashWriting
191 func mapassign_fast64ptr(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
193 panic(plainError("assignment to entry in nil map"))
196 callerpc
:= getcallerpc()
197 racewritepc(unsafe
.Pointer(h
), callerpc
, abi
.FuncPCABIInternal(mapassign_fast64
))
199 if h
.flags
&hashWriting
!= 0 {
200 throw("concurrent map writes")
202 hash
:= t
.hasher(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
204 // Set hashWriting after calling t.hasher for consistency with mapassign.
205 h
.flags
^= hashWriting
207 if h
.buckets
== nil {
208 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
212 bucket
:= hash
& bucketMask(h
.B
)
214 growWork_fast64(t
, h
, bucket
)
216 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
220 var insertk unsafe
.Pointer
224 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
225 if isEmpty(b
.tophash
[i
]) {
230 if b
.tophash
[i
] == emptyRest
{
235 k
:= *((*unsafe
.Pointer
)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
250 // Did not find mapping for key. Allocate new cell & add entry.
252 // If we hit the max load factor or we have too many overflow buckets,
253 // and we're not already in the middle of growing, start growing.
254 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
256 goto again
// Growing the table invalidates everything, so try again
260 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
261 insertb
= h
.newoverflow(t
, b
)
262 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
264 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
266 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*8)
267 // store new key at insert position
268 *(*unsafe
.Pointer
)(insertk
) = key
273 elem
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*8+inserti
*uintptr(t
.elemsize
))
274 if h
.flags
&hashWriting
== 0 {
275 throw("concurrent map writes")
277 h
.flags
&^= hashWriting
281 func mapdelete_fast64(t
*maptype
, h
*hmap
, key
uint64) {
282 if raceenabled
&& h
!= nil {
283 callerpc
:= getcallerpc()
284 racewritepc(unsafe
.Pointer(h
), callerpc
, abi
.FuncPCABIInternal(mapdelete_fast64
))
286 if h
== nil || h
.count
== 0 {
289 if h
.flags
&hashWriting
!= 0 {
290 throw("concurrent map writes")
293 hash
:= t
.hasher(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
295 // Set hashWriting after calling t.hasher for consistency with mapdelete
296 h
.flags
^= hashWriting
298 bucket
:= hash
& bucketMask(h
.B
)
300 growWork_fast64(t
, h
, bucket
)
302 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
305 for ; b
!= nil; b
= b
.overflow(t
) {
306 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
307 if key
!= *(*uint64)(k
) ||
isEmpty(b
.tophash
[i
]) {
310 // Only clear key if there are pointers in it.
311 if t
.key
.ptrdata
!= 0 {
312 if goarch
.PtrSize
== 8 {
313 *(*unsafe
.Pointer
)(k
) = nil
315 // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
316 // Just call memclrHasPointers instead of trying to handle all cases here.
317 memclrHasPointers(k
, 8)
320 e
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.elemsize
))
321 if t
.elem
.ptrdata
!= 0 {
322 memclrHasPointers(e
, t
.elem
.size
)
324 memclrNoHeapPointers(e
, t
.elem
.size
)
326 b
.tophash
[i
] = emptyOne
327 // If the bucket now ends in a bunch of emptyOne states,
328 // change those to emptyRest states.
329 if i
== bucketCnt
-1 {
330 if b
.overflow(t
) != nil && b
.overflow(t
).tophash
[0] != emptyRest
{
334 if b
.tophash
[i
+1] != emptyRest
{
339 b
.tophash
[i
] = emptyRest
342 break // beginning of initial bucket, we're done.
344 // Find previous bucket, continue at its last entry.
346 for b
= bOrig
; b
.overflow(t
) != c
; b
= b
.overflow(t
) {
352 if b
.tophash
[i
] != emptyOne
{
358 // Reset the hash seed to make it more difficult for attackers to
359 // repeatedly trigger hash collisions. See issue 25237.
367 if h
.flags
&hashWriting
== 0 {
368 throw("concurrent map writes")
370 h
.flags
&^= hashWriting
373 func growWork_fast64(t
*maptype
, h
*hmap
, bucket
uintptr) {
374 // make sure we evacuate the oldbucket corresponding
375 // to the bucket we're about to use
376 evacuate_fast64(t
, h
, bucket
&h
.oldbucketmask())
378 // evacuate one more oldbucket to make progress on growing
380 evacuate_fast64(t
, h
, h
.nevacuate
)
384 func evacuate_fast64(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
385 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
386 newbit
:= h
.noldbuckets()
388 // TODO: reuse overflow buckets instead of using new ones, if there
389 // is no iterator using the old buckets. (If !oldIterator.)
391 // xy contains the x and y (low and high) evacuation destinations.
394 x
.b
= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
395 x
.k
= add(unsafe
.Pointer(x
.b
), dataOffset
)
396 x
.e
= add(x
.k
, bucketCnt
*8)
398 if !h
.sameSizeGrow() {
399 // Only calculate y pointers if we're growing bigger.
400 // Otherwise GC can see bad pointers.
402 y
.b
= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
403 y
.k
= add(unsafe
.Pointer(y
.b
), dataOffset
)
404 y
.e
= add(y
.k
, bucketCnt
*8)
407 for ; b
!= nil; b
= b
.overflow(t
) {
408 k
:= add(unsafe
.Pointer(b
), dataOffset
)
409 e
:= add(k
, bucketCnt
*8)
410 for i
:= 0; i
< bucketCnt
; i
, k
, e
= i
+1, add(k
, 8), add(e
, uintptr(t
.elemsize
)) {
413 b
.tophash
[i
] = evacuatedEmpty
416 if top
< minTopHash
{
417 throw("bad map state")
420 if !h
.sameSizeGrow() {
421 // Compute hash to make our evacuation decision (whether we need
422 // to send this key/elem to bucket x or bucket y).
423 hash
:= t
.hasher(k
, uintptr(h
.hash0
))
424 if hash
&newbit
!= 0 {
429 b
.tophash
[i
] = evacuatedX
+ useY
// evacuatedX + 1 == evacuatedY, enforced in makemap
430 dst
:= &xy
[useY
] // evacuation destination
432 if dst
.i
== bucketCnt
{
433 dst
.b
= h
.newoverflow(t
, dst
.b
)
435 dst
.k
= add(unsafe
.Pointer(dst
.b
), dataOffset
)
436 dst
.e
= add(dst
.k
, bucketCnt
*8)
438 dst
.b
.tophash
[dst
.i
&(bucketCnt
-1)] = top
// mask dst.i as an optimization, to avoid a bounds check
441 if t
.key
.ptrdata
!= 0 && writeBarrier
.enabled
{
442 if goarch
.PtrSize
== 8 {
443 // Write with a write barrier.
444 *(*unsafe
.Pointer
)(dst
.k
) = *(*unsafe
.Pointer
)(k
)
446 // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
447 // Give up and call typedmemmove.
448 typedmemmove(t
.key
, dst
.k
, k
)
451 *(*uint64)(dst
.k
) = *(*uint64)(k
)
454 typedmemmove(t
.elem
, dst
.e
, e
)
456 // These updates might push these pointers past the end of the
457 // key or elem arrays. That's ok, as we have the overflow pointer
458 // at the end of the bucket to protect against pointing past the
459 // end of the bucket.
460 dst
.k
= add(dst
.k
, 8)
461 dst
.e
= add(dst
.e
, uintptr(t
.elemsize
))
464 // Unlink the overflow buckets & clear key/elem to help GC.
465 if h
.flags
&oldIterator
== 0 && t
.bucket
.ptrdata
!= 0 {
466 b
:= add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
))
467 // Preserve b.tophash because the evacuation
468 // state is maintained there.
469 ptr
:= add(b
, dataOffset
)
470 n
:= uintptr(t
.bucketsize
) - dataOffset
471 memclrHasPointers(ptr
, n
)
475 if oldbucket
== h
.nevacuate
{
476 advanceEvacuationMark(h
, t
, newbit
)