2017-03-02 Richard Biener <rguenther@suse.de>
[official-gcc.git] / libgo / go / runtime / hashmap_fast.go
blob853da70e9662729410d56173d56a4722980ea74d
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/sys"
9 "unsafe"
12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
13 if raceenabled && h != nil {
14 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
17 if h == nil || h.count == 0 {
18 return unsafe.Pointer(&zeroVal[0])
20 if h.flags&hashWriting != 0 {
21 throw("concurrent map read and map write")
23 var b *bmap
24 if h.B == 0 {
25 // One-bucket table. No need to hash.
26 b = (*bmap)(h.buckets)
27 } else {
28 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
29 m := uintptr(1)<<h.B - 1
30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
31 if c := h.oldbuckets; c != nil {
32 if !h.sameSizeGrow() {
33 // There used to be half as many buckets; mask down one more power of two.
34 m >>= 1
36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
37 if !evacuated(oldb) {
38 b = oldb
42 for {
43 for i := uintptr(0); i < bucketCnt; i++ {
44 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
45 if k != key {
46 continue
48 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
49 if x == empty {
50 continue
52 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
54 b = b.overflow(t)
55 if b == nil {
56 return unsafe.Pointer(&zeroVal[0])
61 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
62 if raceenabled && h != nil {
63 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
64 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
66 if h == nil || h.count == 0 {
67 return unsafe.Pointer(&zeroVal[0]), false
69 if h.flags&hashWriting != 0 {
70 throw("concurrent map read and map write")
72 var b *bmap
73 if h.B == 0 {
74 // One-bucket table. No need to hash.
75 b = (*bmap)(h.buckets)
76 } else {
77 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
78 m := uintptr(1)<<h.B - 1
79 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
80 if c := h.oldbuckets; c != nil {
81 if !h.sameSizeGrow() {
82 // There used to be half as many buckets; mask down one more power of two.
83 m >>= 1
85 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
86 if !evacuated(oldb) {
87 b = oldb
91 for {
92 for i := uintptr(0); i < bucketCnt; i++ {
93 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
94 if k != key {
95 continue
97 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
98 if x == empty {
99 continue
101 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
103 b = b.overflow(t)
104 if b == nil {
105 return unsafe.Pointer(&zeroVal[0]), false
110 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
111 if raceenabled && h != nil {
112 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
113 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
115 if h == nil || h.count == 0 {
116 return unsafe.Pointer(&zeroVal[0])
118 if h.flags&hashWriting != 0 {
119 throw("concurrent map read and map write")
121 var b *bmap
122 if h.B == 0 {
123 // One-bucket table. No need to hash.
124 b = (*bmap)(h.buckets)
125 } else {
126 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
127 m := uintptr(1)<<h.B - 1
128 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
129 if c := h.oldbuckets; c != nil {
130 if !h.sameSizeGrow() {
131 // There used to be half as many buckets; mask down one more power of two.
132 m >>= 1
134 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
135 if !evacuated(oldb) {
136 b = oldb
140 for {
141 for i := uintptr(0); i < bucketCnt; i++ {
142 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
143 if k != key {
144 continue
146 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
147 if x == empty {
148 continue
150 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
152 b = b.overflow(t)
153 if b == nil {
154 return unsafe.Pointer(&zeroVal[0])
159 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
160 if raceenabled && h != nil {
161 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
162 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
164 if h == nil || h.count == 0 {
165 return unsafe.Pointer(&zeroVal[0]), false
167 if h.flags&hashWriting != 0 {
168 throw("concurrent map read and map write")
170 var b *bmap
171 if h.B == 0 {
172 // One-bucket table. No need to hash.
173 b = (*bmap)(h.buckets)
174 } else {
175 hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
176 m := uintptr(1)<<h.B - 1
177 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
178 if c := h.oldbuckets; c != nil {
179 if !h.sameSizeGrow() {
180 // There used to be half as many buckets; mask down one more power of two.
181 m >>= 1
183 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
184 if !evacuated(oldb) {
185 b = oldb
189 for {
190 for i := uintptr(0); i < bucketCnt; i++ {
191 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
192 if k != key {
193 continue
195 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
196 if x == empty {
197 continue
199 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
201 b = b.overflow(t)
202 if b == nil {
203 return unsafe.Pointer(&zeroVal[0]), false
208 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
209 if raceenabled && h != nil {
210 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
211 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
213 if h == nil || h.count == 0 {
214 return unsafe.Pointer(&zeroVal[0])
216 if h.flags&hashWriting != 0 {
217 throw("concurrent map read and map write")
219 key := stringStructOf(&ky)
220 if h.B == 0 {
221 // One-bucket table.
222 b := (*bmap)(h.buckets)
223 if key.len < 32 {
224 // short key, doing lots of comparisons is ok
225 for i := uintptr(0); i < bucketCnt; i++ {
226 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
227 if x == empty {
228 continue
230 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
231 if k.len != key.len {
232 continue
234 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
235 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
238 return unsafe.Pointer(&zeroVal[0])
240 // long key, try not to do more comparisons than necessary
241 keymaybe := uintptr(bucketCnt)
242 for i := uintptr(0); i < bucketCnt; i++ {
243 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
244 if x == empty {
245 continue
247 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
248 if k.len != key.len {
249 continue
251 if k.str == key.str {
252 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
254 // check first 4 bytes
255 // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
256 // four 1-byte comparisons.
257 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
258 continue
260 // check last 4 bytes
261 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
262 continue
264 if keymaybe != bucketCnt {
265 // Two keys are potential matches. Use hash to distinguish them.
266 goto dohash
268 keymaybe = i
270 if keymaybe != bucketCnt {
271 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
272 if memequal(k.str, key.str, uintptr(key.len)) {
273 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
276 return unsafe.Pointer(&zeroVal[0])
278 dohash:
279 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
280 m := uintptr(1)<<h.B - 1
281 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
282 if c := h.oldbuckets; c != nil {
283 if !h.sameSizeGrow() {
284 // There used to be half as many buckets; mask down one more power of two.
285 m >>= 1
287 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
288 if !evacuated(oldb) {
289 b = oldb
292 top := uint8(hash >> (sys.PtrSize*8 - 8))
293 if top < minTopHash {
294 top += minTopHash
296 for {
297 for i := uintptr(0); i < bucketCnt; i++ {
298 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
299 if x != top {
300 continue
302 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
303 if k.len != key.len {
304 continue
306 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
307 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
310 b = b.overflow(t)
311 if b == nil {
312 return unsafe.Pointer(&zeroVal[0])
317 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
318 if raceenabled && h != nil {
319 callerpc := getcallerpc(unsafe.Pointer( /* &t */ nil))
320 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
322 if h == nil || h.count == 0 {
323 return unsafe.Pointer(&zeroVal[0]), false
325 if h.flags&hashWriting != 0 {
326 throw("concurrent map read and map write")
328 key := stringStructOf(&ky)
329 if h.B == 0 {
330 // One-bucket table.
331 b := (*bmap)(h.buckets)
332 if key.len < 32 {
333 // short key, doing lots of comparisons is ok
334 for i := uintptr(0); i < bucketCnt; i++ {
335 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
336 if x == empty {
337 continue
339 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
340 if k.len != key.len {
341 continue
343 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
344 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
347 return unsafe.Pointer(&zeroVal[0]), false
349 // long key, try not to do more comparisons than necessary
350 keymaybe := uintptr(bucketCnt)
351 for i := uintptr(0); i < bucketCnt; i++ {
352 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
353 if x == empty {
354 continue
356 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
357 if k.len != key.len {
358 continue
360 if k.str == key.str {
361 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
363 // check first 4 bytes
364 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
365 continue
367 // check last 4 bytes
368 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
369 continue
371 if keymaybe != bucketCnt {
372 // Two keys are potential matches. Use hash to distinguish them.
373 goto dohash
375 keymaybe = i
377 if keymaybe != bucketCnt {
378 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
379 if memequal(k.str, key.str, uintptr(key.len)) {
380 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
383 return unsafe.Pointer(&zeroVal[0]), false
385 dohash:
386 hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
387 m := uintptr(1)<<h.B - 1
388 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
389 if c := h.oldbuckets; c != nil {
390 if !h.sameSizeGrow() {
391 // There used to be half as many buckets; mask down one more power of two.
392 m >>= 1
394 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
395 if !evacuated(oldb) {
396 b = oldb
399 top := uint8(hash >> (sys.PtrSize*8 - 8))
400 if top < minTopHash {
401 top += minTopHash
403 for {
404 for i := uintptr(0); i < bucketCnt; i++ {
405 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
406 if x != top {
407 continue
409 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
410 if k.len != key.len {
411 continue
413 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
414 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
417 b = b.overflow(t)
418 if b == nil {
419 return unsafe.Pointer(&zeroVal[0]), false