1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 func mapaccess1_fast32(t
*maptype
, h
*hmap
, key
uint32) unsafe
.Pointer
{
12 if raceenabled
&& h
!= nil {
13 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
14 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast32
))
16 if h
== nil || h
.count
== 0 {
17 return unsafe
.Pointer(t
.elem
.zero
)
21 // One-bucket table. No need to hash.
22 b
= (*bmap
)(h
.buckets
)
24 hash
:= goalg(t
.key
.alg
).hash(noescape(unsafe
.Pointer(&key
)), 4, uintptr(h
.hash0
))
25 m
:= uintptr(1)<<h
.B
- 1
26 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
27 if c
:= h
.oldbuckets
; c
!= nil {
28 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
35 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
36 k
:= *((*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
40 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
44 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
))
48 return unsafe
.Pointer(t
.elem
.zero
)
53 func mapaccess2_fast32(t
*maptype
, h
*hmap
, key
uint32) (unsafe
.Pointer
, bool) {
54 if raceenabled
&& h
!= nil {
55 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
56 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast32
))
58 if h
== nil || h
.count
== 0 {
59 return unsafe
.Pointer(t
.elem
.zero
), false
63 // One-bucket table. No need to hash.
64 b
= (*bmap
)(h
.buckets
)
66 hash
:= goalg(t
.key
.alg
).hash(noescape(unsafe
.Pointer(&key
)), 4, uintptr(h
.hash0
))
67 m
:= uintptr(1)<<h
.B
- 1
68 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
69 if c
:= h
.oldbuckets
; c
!= nil {
70 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
77 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
78 k
:= *((*uint32)(add(unsafe
.Pointer(b
), dataOffset
+i
*4)))
82 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
86 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*4+i
*uintptr(t
.valuesize
)), true
90 return unsafe
.Pointer(t
.elem
.zero
), false
95 func mapaccess1_fast64(t
*maptype
, h
*hmap
, key
uint64) unsafe
.Pointer
{
96 if raceenabled
&& h
!= nil {
97 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
98 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_fast64
))
100 if h
== nil || h
.count
== 0 {
101 return unsafe
.Pointer(t
.elem
.zero
)
105 // One-bucket table. No need to hash.
106 b
= (*bmap
)(h
.buckets
)
108 hash
:= goalg(t
.key
.alg
).hash(noescape(unsafe
.Pointer(&key
)), 8, uintptr(h
.hash0
))
109 m
:= uintptr(1)<<h
.B
- 1
110 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
111 if c
:= h
.oldbuckets
; c
!= nil {
112 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
113 if !evacuated(oldb
) {
119 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
120 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
124 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
128 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
))
132 return unsafe
.Pointer(t
.elem
.zero
)
137 func mapaccess2_fast64(t
*maptype
, h
*hmap
, key
uint64) (unsafe
.Pointer
, bool) {
138 if raceenabled
&& h
!= nil {
139 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
140 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_fast64
))
142 if h
== nil || h
.count
== 0 {
143 return unsafe
.Pointer(t
.elem
.zero
), false
147 // One-bucket table. No need to hash.
148 b
= (*bmap
)(h
.buckets
)
150 hash
:= goalg(t
.key
.alg
).hash(noescape(unsafe
.Pointer(&key
)), 8, uintptr(h
.hash0
))
151 m
:= uintptr(1)<<h
.B
- 1
152 b
= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
153 if c
:= h
.oldbuckets
; c
!= nil {
154 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
155 if !evacuated(oldb
) {
161 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
162 k
:= *((*uint64)(add(unsafe
.Pointer(b
), dataOffset
+i
*8)))
166 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
170 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*8+i
*uintptr(t
.valuesize
)), true
174 return unsafe
.Pointer(t
.elem
.zero
), false
179 func mapaccess1_faststr(t
*maptype
, h
*hmap
, ky
string) unsafe
.Pointer
{
180 if raceenabled
&& h
!= nil {
181 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
182 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess1_faststr
))
184 if h
== nil || h
.count
== 0 {
185 return unsafe
.Pointer(t
.elem
.zero
)
187 key
:= (*stringStruct
)(unsafe
.Pointer(&ky
))
190 b
:= (*bmap
)(h
.buckets
)
192 // short key, doing lots of comparisons is ok
193 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
194 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
198 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*ptrSize
))
199 if k
.len != key
.len {
202 if k
.str
== key
.str ||
memeq(k
.str
, key
.str
, uintptr(key
.len)) {
203 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+i
*uintptr(t
.valuesize
))
206 return unsafe
.Pointer(t
.elem
.zero
)
208 // long key, try not to do more comparisons than necessary
209 keymaybe
:= uintptr(bucketCnt
)
210 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
211 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
215 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*ptrSize
))
216 if k
.len != key
.len {
219 if k
.str
== key
.str
{
220 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+i
*uintptr(t
.valuesize
))
222 // check first 4 bytes
223 // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
224 // four 1-byte comparisons.
225 if *((*[4]byte)(key
.str
)) != *((*[4]byte)(k
.str
)) {
228 // check last 4 bytes
229 if *((*[4]byte)(add(key
.str
, uintptr(key
.len)-4))) != *((*[4]byte)(add(k
.str
, uintptr(key
.len)-4))) {
232 if keymaybe
!= bucketCnt
{
233 // Two keys are potential matches. Use hash to distinguish them.
238 if keymaybe
!= bucketCnt
{
239 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+keymaybe
*2*ptrSize
))
240 if memeq(k
.str
, key
.str
, uintptr(key
.len)) {
241 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+keymaybe
*uintptr(t
.valuesize
))
244 return unsafe
.Pointer(t
.elem
.zero
)
247 hash
:= goalg(t
.key
.alg
).hash(noescape(unsafe
.Pointer(&ky
)), 2*ptrSize
, uintptr(h
.hash0
))
248 m
:= uintptr(1)<<h
.B
- 1
249 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
250 if c
:= h
.oldbuckets
; c
!= nil {
251 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
252 if !evacuated(oldb
) {
256 top
:= uint8(hash
>> (ptrSize
*8 - 8))
257 if top
< minTopHash
{
261 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
262 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
266 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*ptrSize
))
267 if k
.len != key
.len {
270 if k
.str
== key
.str ||
memeq(k
.str
, key
.str
, uintptr(key
.len)) {
271 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+i
*uintptr(t
.valuesize
))
276 return unsafe
.Pointer(t
.elem
.zero
)
281 func mapaccess2_faststr(t
*maptype
, h
*hmap
, ky
string) (unsafe
.Pointer
, bool) {
282 if raceenabled
&& h
!= nil {
283 callerpc
:= getcallerpc(unsafe
.Pointer(&t
))
284 racereadpc(unsafe
.Pointer(h
), callerpc
, funcPC(mapaccess2_faststr
))
286 if h
== nil || h
.count
== 0 {
287 return unsafe
.Pointer(t
.elem
.zero
), false
289 key
:= (*stringStruct
)(unsafe
.Pointer(&ky
))
292 b
:= (*bmap
)(h
.buckets
)
294 // short key, doing lots of comparisons is ok
295 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
296 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
300 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*ptrSize
))
301 if k
.len != key
.len {
304 if k
.str
== key
.str ||
memeq(k
.str
, key
.str
, uintptr(key
.len)) {
305 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+i
*uintptr(t
.valuesize
)), true
308 return unsafe
.Pointer(t
.elem
.zero
), false
310 // long key, try not to do more comparisons than necessary
311 keymaybe
:= uintptr(bucketCnt
)
312 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
313 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
317 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*ptrSize
))
318 if k
.len != key
.len {
321 if k
.str
== key
.str
{
322 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+i
*uintptr(t
.valuesize
)), true
324 // check first 4 bytes
325 if *((*[4]byte)(key
.str
)) != *((*[4]byte)(k
.str
)) {
328 // check last 4 bytes
329 if *((*[4]byte)(add(key
.str
, uintptr(key
.len)-4))) != *((*[4]byte)(add(k
.str
, uintptr(key
.len)-4))) {
332 if keymaybe
!= bucketCnt
{
333 // Two keys are potential matches. Use hash to distinguish them.
338 if keymaybe
!= bucketCnt
{
339 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+keymaybe
*2*ptrSize
))
340 if memeq(k
.str
, key
.str
, uintptr(key
.len)) {
341 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+keymaybe
*uintptr(t
.valuesize
)), true
344 return unsafe
.Pointer(t
.elem
.zero
), false
347 hash
:= goalg(t
.key
.alg
).hash(noescape(unsafe
.Pointer(&ky
)), 2*ptrSize
, uintptr(h
.hash0
))
348 m
:= uintptr(1)<<h
.B
- 1
349 b
:= (*bmap
)(add(h
.buckets
, (hash
&m
)*uintptr(t
.bucketsize
)))
350 if c
:= h
.oldbuckets
; c
!= nil {
351 oldb
:= (*bmap
)(add(c
, (hash
&(m
>>1))*uintptr(t
.bucketsize
)))
352 if !evacuated(oldb
) {
356 top
:= uint8(hash
>> (ptrSize
*8 - 8))
357 if top
< minTopHash
{
361 for i
:= uintptr(0); i
< bucketCnt
; i
++ {
362 x
:= *((*uint8)(add(unsafe
.Pointer(b
), i
))) // b.topbits[i] without the bounds check
366 k
:= (*stringStruct
)(add(unsafe
.Pointer(b
), dataOffset
+i
*2*ptrSize
))
367 if k
.len != key
.len {
370 if k
.str
== key
.str ||
memeq(k
.str
, key
.str
, uintptr(key
.len)) {
371 return add(unsafe
.Pointer(b
), dataOffset
+bucketCnt
*2*ptrSize
+i
*uintptr(t
.valuesize
)), true
376 return unsafe
.Pointer(t
.elem
.zero
), false