2015-09-24 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / libgo / go / runtime / hashmap_fast.go
blobafa6ecc99a9fd5f9e581fe7603ac4b781460e9d9
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "unsafe"
11 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
12 if raceenabled && h != nil {
13 callerpc := getcallerpc(unsafe.Pointer(&t))
14 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
16 if h == nil || h.count == 0 {
17 return unsafe.Pointer(t.elem.zero)
19 var b *bmap
20 if h.B == 0 {
21 // One-bucket table. No need to hash.
22 b = (*bmap)(h.buckets)
23 } else {
24 hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
25 m := uintptr(1)<<h.B - 1
26 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
27 if c := h.oldbuckets; c != nil {
28 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
29 if !evacuated(oldb) {
30 b = oldb
34 for {
35 for i := uintptr(0); i < bucketCnt; i++ {
36 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
37 if k != key {
38 continue
40 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
41 if x == empty {
42 continue
44 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
46 b = b.overflow(t)
47 if b == nil {
48 return unsafe.Pointer(t.elem.zero)
53 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
54 if raceenabled && h != nil {
55 callerpc := getcallerpc(unsafe.Pointer(&t))
56 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
58 if h == nil || h.count == 0 {
59 return unsafe.Pointer(t.elem.zero), false
61 var b *bmap
62 if h.B == 0 {
63 // One-bucket table. No need to hash.
64 b = (*bmap)(h.buckets)
65 } else {
66 hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
67 m := uintptr(1)<<h.B - 1
68 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
69 if c := h.oldbuckets; c != nil {
70 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
71 if !evacuated(oldb) {
72 b = oldb
76 for {
77 for i := uintptr(0); i < bucketCnt; i++ {
78 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
79 if k != key {
80 continue
82 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
83 if x == empty {
84 continue
86 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
88 b = b.overflow(t)
89 if b == nil {
90 return unsafe.Pointer(t.elem.zero), false
95 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
96 if raceenabled && h != nil {
97 callerpc := getcallerpc(unsafe.Pointer(&t))
98 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
100 if h == nil || h.count == 0 {
101 return unsafe.Pointer(t.elem.zero)
103 var b *bmap
104 if h.B == 0 {
105 // One-bucket table. No need to hash.
106 b = (*bmap)(h.buckets)
107 } else {
108 hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
109 m := uintptr(1)<<h.B - 1
110 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
111 if c := h.oldbuckets; c != nil {
112 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
113 if !evacuated(oldb) {
114 b = oldb
118 for {
119 for i := uintptr(0); i < bucketCnt; i++ {
120 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
121 if k != key {
122 continue
124 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
125 if x == empty {
126 continue
128 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
130 b = b.overflow(t)
131 if b == nil {
132 return unsafe.Pointer(t.elem.zero)
137 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
138 if raceenabled && h != nil {
139 callerpc := getcallerpc(unsafe.Pointer(&t))
140 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
142 if h == nil || h.count == 0 {
143 return unsafe.Pointer(t.elem.zero), false
145 var b *bmap
146 if h.B == 0 {
147 // One-bucket table. No need to hash.
148 b = (*bmap)(h.buckets)
149 } else {
150 hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
151 m := uintptr(1)<<h.B - 1
152 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
153 if c := h.oldbuckets; c != nil {
154 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
155 if !evacuated(oldb) {
156 b = oldb
160 for {
161 for i := uintptr(0); i < bucketCnt; i++ {
162 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
163 if k != key {
164 continue
166 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
167 if x == empty {
168 continue
170 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
172 b = b.overflow(t)
173 if b == nil {
174 return unsafe.Pointer(t.elem.zero), false
179 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
180 if raceenabled && h != nil {
181 callerpc := getcallerpc(unsafe.Pointer(&t))
182 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
184 if h == nil || h.count == 0 {
185 return unsafe.Pointer(t.elem.zero)
187 key := (*stringStruct)(unsafe.Pointer(&ky))
188 if h.B == 0 {
189 // One-bucket table.
190 b := (*bmap)(h.buckets)
191 if key.len < 32 {
192 // short key, doing lots of comparisons is ok
193 for i := uintptr(0); i < bucketCnt; i++ {
194 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
195 if x == empty {
196 continue
198 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
199 if k.len != key.len {
200 continue
202 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
203 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
206 return unsafe.Pointer(t.elem.zero)
208 // long key, try not to do more comparisons than necessary
209 keymaybe := uintptr(bucketCnt)
210 for i := uintptr(0); i < bucketCnt; i++ {
211 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
212 if x == empty {
213 continue
215 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
216 if k.len != key.len {
217 continue
219 if k.str == key.str {
220 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
222 // check first 4 bytes
223 // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
224 // four 1-byte comparisons.
225 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
226 continue
228 // check last 4 bytes
229 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
230 continue
232 if keymaybe != bucketCnt {
233 // Two keys are potential matches. Use hash to distinguish them.
234 goto dohash
236 keymaybe = i
238 if keymaybe != bucketCnt {
239 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
240 if memeq(k.str, key.str, uintptr(key.len)) {
241 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
244 return unsafe.Pointer(t.elem.zero)
246 dohash:
247 hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
248 m := uintptr(1)<<h.B - 1
249 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
250 if c := h.oldbuckets; c != nil {
251 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
252 if !evacuated(oldb) {
253 b = oldb
256 top := uint8(hash >> (ptrSize*8 - 8))
257 if top < minTopHash {
258 top += minTopHash
260 for {
261 for i := uintptr(0); i < bucketCnt; i++ {
262 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
263 if x != top {
264 continue
266 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
267 if k.len != key.len {
268 continue
270 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
271 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
274 b = b.overflow(t)
275 if b == nil {
276 return unsafe.Pointer(t.elem.zero)
281 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
282 if raceenabled && h != nil {
283 callerpc := getcallerpc(unsafe.Pointer(&t))
284 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
286 if h == nil || h.count == 0 {
287 return unsafe.Pointer(t.elem.zero), false
289 key := (*stringStruct)(unsafe.Pointer(&ky))
290 if h.B == 0 {
291 // One-bucket table.
292 b := (*bmap)(h.buckets)
293 if key.len < 32 {
294 // short key, doing lots of comparisons is ok
295 for i := uintptr(0); i < bucketCnt; i++ {
296 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
297 if x == empty {
298 continue
300 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
301 if k.len != key.len {
302 continue
304 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
305 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
308 return unsafe.Pointer(t.elem.zero), false
310 // long key, try not to do more comparisons than necessary
311 keymaybe := uintptr(bucketCnt)
312 for i := uintptr(0); i < bucketCnt; i++ {
313 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
314 if x == empty {
315 continue
317 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
318 if k.len != key.len {
319 continue
321 if k.str == key.str {
322 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
324 // check first 4 bytes
325 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
326 continue
328 // check last 4 bytes
329 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
330 continue
332 if keymaybe != bucketCnt {
333 // Two keys are potential matches. Use hash to distinguish them.
334 goto dohash
336 keymaybe = i
338 if keymaybe != bucketCnt {
339 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
340 if memeq(k.str, key.str, uintptr(key.len)) {
341 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
344 return unsafe.Pointer(t.elem.zero), false
346 dohash:
347 hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
348 m := uintptr(1)<<h.B - 1
349 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
350 if c := h.oldbuckets; c != nil {
351 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
352 if !evacuated(oldb) {
353 b = oldb
356 top := uint8(hash >> (ptrSize*8 - 8))
357 if top < minTopHash {
358 top += minTopHash
360 for {
361 for i := uintptr(0); i < bucketCnt; i++ {
362 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
363 if x != top {
364 continue
366 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
367 if k.len != key.len {
368 continue
370 if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
371 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
374 b = b.overflow(t)
375 if b == nil {
376 return unsafe.Pointer(t.elem.zero), false