1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 // For gccgo, use go:linkname to export compiler-called functions.
14 //go:linkname mapaccess1_fast64
15 //go:linkname mapaccess2_fast64
16 //go:linkname mapassign_fast64
17 //go:linkname mapassign_fast64ptr
18 //go:linkname mapdelete_fast64
20 func mapaccess1_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
21 if raceenabled
&& h
!= nil {
22 callerpc
:= getcallerpc()
23 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast64
))
25 if h
== nil || h
.count
== 0 {
26 return unsafe
.Pointer(&zeroVal
[0])
28 if h
.flags
&hashWriting
!= 0 {
29 throw("concurrent map read and map write")
33 // One-bucket table. No need to hash.
34 b
= (*bmap
)(h
.buckets
)
36 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
38 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
39 if c
:= h
.oldbuckets
; c
!= nil {
40 if !h
.sameSizeGrow() {
41 // There used to be half as many buckets; mask down one more power of two.
44 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
50 for ; b
!= nil; b
= b
.overflow(t
) {
51 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
52 if *(*uint64)(k
) == key
&& !isEmpty(b
.tophash
[i
]) {
53 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
57 return unsafe
.Pointer(&zeroVal
[0])
60 func mapaccess2_fast64(t
*maptype
, h
*hmap
, key
uint64) (unsafe
.Pointer
, bool) {
61 if raceenabled
&& h
!= nil {
62 callerpc
:= getcallerpc()
63 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast64
))
65 if h
== nil || h
.count
== 0 {
66 return unsafe
.Pointer(&zeroVal
[0]), false
68 if h
.flags
&hashWriting
!= 0 {
69 throw("concurrent map read and map write")
73 // One-bucket table. No need to hash.
74 b
= (*bmap
)(h
.buckets
)
76 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
78 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
79 if c
:= h
.oldbuckets
; c
!= nil {
80 if !h
.sameSizeGrow() {
81 // There used to be half as many buckets; mask down one more power of two.
84 oldb
:= (*bmap
)(add(c
, (hash
&m
)*uintptr(t
.bucketsize
)))
90 for ; b
!= nil; b
= b
.overflow(t
) {
91 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
92 if *(*uint64)(k
) == key
&& !isEmpty(b
.tophash
[i
]) {
93 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
)), true
97 return unsafe
.Pointer(&zeroVal
[0]), false
100 func mapassign_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
102 panic(plainError("assignment to entry in nil map"))
105 callerpc
:= getcallerpc()
106 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast64
))
108 if h
.flags
&hashWriting
!= 0 {
109 throw("concurrent map writes")
111 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
113 // Set hashWriting after calling alg.hash for consistency with mapassign.
114 h
.flags
^= hashWriting
116 if h
.buckets
== nil {
117 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
121 bucket
:= hash
& bucketMask(h
.B
)
123 growWork_fast64(t
, h
, bucket
)
125 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
129 var insertk unsafe
.Pointer
133 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
134 if isEmpty(b
.tophash
[i
]) {
139 if b
.tophash
[i
] == emptyRest
{
144 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
159 // Did not find mapping for key. Allocate new cell & add entry.
161 // If we hit the max load factor or we have too many overflow buckets,
162 // and we're not already in the middle of growing, start growing.
163 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
165 goto again
// Growing the table invalidates everything, so try again
169 // all current buckets are full, allocate a new one.
170 insertb
= h
.newoverflow(t
, b
)
171 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
173 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
175 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*8)
176 // store new key at insert position
177 *(*uint64)(insertk
) = key
182 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*8+inserti
*uintptr(t
.valuesize
))
183 if h
.flags
&hashWriting
== 0 {
184 throw("concurrent map writes")
186 h
.flags
&^= hashWriting
190 func mapassign_fast64ptr(t
*maptype
, h
*hmap
, key unsafe
.Pointer
) unsafe
.Pointer
{
192 panic(plainError("assignment to entry in nil map"))
195 callerpc
:= getcallerpc()
196 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapassign_fast64
))
198 if h
.flags
&hashWriting
!= 0 {
199 throw("concurrent map writes")
201 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
203 // Set hashWriting after calling alg.hash for consistency with mapassign.
204 h
.flags
^= hashWriting
206 if h
.buckets
== nil {
207 h
.buckets
= newobject(t
.bucket
) // newarray(t.bucket, 1)
211 bucket
:= hash
& bucketMask(h
.B
)
213 growWork_fast64(t
, h
, bucket
)
215 b
:= (*bmap
)(unsafe
.Pointer(uintptr(h
.buckets
) + bucket
*uintptr(t
.bucketsize
)))
219 var insertk unsafe
.Pointer
223 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
224 if isEmpty(b
.tophash
[i
]) {
229 if b
.tophash
[i
] == emptyRest
{
234 k
:= *((*unsafe
.Pointer
)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
249 // Did not find mapping for key. Allocate new cell & add entry.
251 // If we hit the max load factor or we have too many overflow buckets,
252 // and we're not already in the middle of growing, start growing.
253 if !h
.growing() && (overLoadFactor(h
.count
+1, h
.B
) ||
tooManyOverflowBuckets(h
.noverflow
, h
.B
)) {
255 goto again
// Growing the table invalidates everything, so try again
259 // all current buckets are full, allocate a new one.
260 insertb
= h
.newoverflow(t
, b
)
261 inserti
= 0 // not necessary, but avoids needlessly spilling inserti
263 insertb
.tophash
[inserti
&(bucketCnt
-1)] = tophash(hash
) // mask inserti to avoid bounds checks
265 insertk
= add(unsafe
.Pointer(insertb
), dataOffset
+inserti
*8)
266 // store new key at insert position
267 *(*unsafe
.Pointer
)(insertk
) = key
272 val
:= add(unsafe
.Pointer(insertb
), dataOffset
+bucketCnt
*8+inserti
*uintptr(t
.valuesize
))
273 if h
.flags
&hashWriting
== 0 {
274 throw("concurrent map writes")
276 h
.flags
&^= hashWriting
280 func mapdelete_fast64(t
*maptype
, h
*hmap
, key
uint64) {
281 if raceenabled
&& h
!= nil {
282 callerpc
:= getcallerpc()
283 racewritepc(unsafe
.Pointer(h
), callerpc
, funcPC(mapdelete_fast64
))
285 if h
== nil || h
.count
== 0 {
288 if h
.flags
&hashWriting
!= 0 {
289 throw("concurrent map writes")
292 hash
:= t
.key
.hashfn(noescape(unsafe
.Pointer(&key
)), uintptr(h
.hash0
))
294 // Set hashWriting after calling alg.hash for consistency with mapdelete
295 h
.flags
^= hashWriting
297 bucket
:= hash
& bucketMask(h
.B
)
299 growWork_fast64(t
, h
, bucket
)
301 b
:= (*bmap
)(add(h
.buckets
, bucket
*uintptr(t
.bucketsize
)))
304 for ; b
!= nil; b
= b
.overflow(t
) {
305 for i
, k
:= uintptr(0), b
.keys(); i
< bucketCnt
; i
, k
= i
+1, add(k
, 8) {
306 if key
!= *(*uint64)(k
) ||
isEmpty(b
.tophash
[i
]) {
309 // Only clear key if there are pointers in it.
310 if t
.key
.kind
&kindNoPointers
== 0 {
311 memclrHasPointers(k
, t
.key
.size
)
313 v
:= add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
314 if t
.elem
.kind
&kindNoPointers
== 0 {
315 memclrHasPointers(v
, t
.elem
.size
)
317 memclrNoHeapPointers(v
, t
.elem
.size
)
319 b
.tophash
[i
] = emptyOne
320 // If the bucket now ends in a bunch of emptyOne states,
321 // change those to emptyRest states.
322 if i
== bucketCnt
-1 {
323 if b
.overflow(t
) != nil && b
.overflow(t
).tophash
[0] != emptyRest
{
327 if b
.tophash
[i
+1] != emptyRest
{
332 b
.tophash
[i
] = emptyRest
335 break // beginning of initial bucket, we're done.
337 // Find previous bucket, continue at its last entry.
339 for b
= bOrig
; b
.overflow(t
) != c
; b
= b
.overflow(t
) {
345 if b
.tophash
[i
] != emptyOne
{
355 if h
.flags
&hashWriting
== 0 {
356 throw("concurrent map writes")
358 h
.flags
&^= hashWriting
361 func growWork_fast64(t
*maptype
, h
*hmap
, bucket
uintptr) {
362 // make sure we evacuate the oldbucket corresponding
363 // to the bucket we're about to use
364 evacuate_fast64(t
, h
, bucket
&h
.oldbucketmask())
366 // evacuate one more oldbucket to make progress on growing
368 evacuate_fast64(t
, h
, h
.nevacuate
)
372 func evacuate_fast64(t
*maptype
, h
*hmap
, oldbucket
uintptr) {
373 b
:= (*bmap
)(add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
)))
374 newbit
:= h
.noldbuckets()
376 // TODO: reuse overflow buckets instead of using new ones, if there
377 // is no iterator using the old buckets. (If !oldIterator.)
379 // xy contains the x and y (low and high) evacuation destinations.
382 x
.b
= (*bmap
)(add(h
.buckets
, oldbucket
*uintptr(t
.bucketsize
)))
383 x
.k
= add(unsafe
.Pointer(x
.b
), dataOffset
)
384 x
.v
= add(x
.k
, bucketCnt
*8)
386 if !h
.sameSizeGrow() {
387 // Only calculate y pointers if we're growing bigger.
388 // Otherwise GC can see bad pointers.
390 y
.b
= (*bmap
)(add(h
.buckets
, (oldbucket
+newbit
)*uintptr(t
.bucketsize
)))
391 y
.k
= add(unsafe
.Pointer(y
.b
), dataOffset
)
392 y
.v
= add(y
.k
, bucketCnt
*8)
395 for ; b
!= nil; b
= b
.overflow(t
) {
396 k
:= add(unsafe
.Pointer(b
), dataOffset
)
397 v
:= add(k
, bucketCnt
*8)
398 for i
:= 0; i
< bucketCnt
; i
, k
, v
= i
+1, add(k
, 8), add(v
, uintptr(t
.valuesize
)) {
401 b
.tophash
[i
] = evacuatedEmpty
404 if top
< minTopHash
{
405 throw("bad map state")
408 if !h
.sameSizeGrow() {
409 // Compute hash to make our evacuation decision (whether we need
410 // to send this key/value to bucket x or bucket y).
411 hash
:= t
.key
.hashfn(k
, uintptr(h
.hash0
))
412 if hash
&newbit
!= 0 {
417 b
.tophash
[i
] = evacuatedX
+ useY
// evacuatedX + 1 == evacuatedY, enforced in makemap
418 dst
:= &xy
[useY
] // evacuation destination
420 if dst
.i
== bucketCnt
{
421 dst
.b
= h
.newoverflow(t
, dst
.b
)
423 dst
.k
= add(unsafe
.Pointer(dst
.b
), dataOffset
)
424 dst
.v
= add(dst
.k
, bucketCnt
*8)
426 dst
.b
.tophash
[dst
.i
&(bucketCnt
-1)] = top
// mask dst.i as an optimization, to avoid a bounds check
429 if t
.key
.kind
&kindNoPointers
== 0 && writeBarrier
.enabled
{
430 if sys
.PtrSize
== 8 {
431 // Write with a write barrier.
432 *(*unsafe
.Pointer
)(dst
.k
) = *(*unsafe
.Pointer
)(k
)
434 // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
435 // Give up and call typedmemmove.
436 typedmemmove(t
.key
, dst
.k
, k
)
439 *(*uint64)(dst
.k
) = *(*uint64)(k
)
442 typedmemmove(t
.elem
, dst
.v
, v
)
444 // These updates might push these pointers past the end of the
445 // key or value arrays. That's ok, as we have the overflow pointer
446 // at the end of the bucket to protect against pointing past the
447 // end of the bucket.
448 dst
.k
= add(dst
.k
, 8)
449 dst
.v
= add(dst
.v
, uintptr(t
.valuesize
))
452 // Unlink the overflow buckets & clear key/value to help GC.
453 if h
.flags
&oldIterator
== 0 && t
.bucket
.kind
&kindNoPointers
== 0 {
454 b
:= add(h
.oldbuckets
, oldbucket
*uintptr(t
.bucketsize
))
455 // Preserve b.tophash because the evacuation
456 // state is maintained there.
457 ptr
:= add(b
, dataOffset
)
458 n
:= uintptr(t
.bucketsize
) - dataOffset
459 memclrHasPointers(ptr
, n
)
463 if oldbucket
== h
.nevacuate
{
464 advanceEvacuationMark(h
, t
, newbit
)