compiler, runtime, reflect: generate hash functions only for map keys
[official-gcc.git] / libgo / go / runtime / map_faststr.go
blob1775214ff48b16ed31ce9d36aaf79e3db3a6d6d1
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/sys"
9 "unsafe"
12 // For gccgo, use go:linkname to export compiler-called functions.
14 //go:linkname mapaccess1_faststr
15 //go:linkname mapaccess2_faststr
16 //go:linkname mapassign_faststr
17 //go:linkname mapdelete_faststr
19 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
20 if raceenabled && h != nil {
21 callerpc := getcallerpc()
22 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
24 if h == nil || h.count == 0 {
25 return unsafe.Pointer(&zeroVal[0])
27 if h.flags&hashWriting != 0 {
28 throw("concurrent map read and map write")
30 key := stringStructOf(&ky)
31 if h.B == 0 {
32 // One-bucket table.
33 b := (*bmap)(h.buckets)
34 if key.len < 32 {
35 // short key, doing lots of comparisons is ok
36 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
37 k := (*stringStruct)(kptr)
38 if k.len != key.len || isEmpty(b.tophash[i]) {
39 if b.tophash[i] == emptyRest {
40 break
42 continue
44 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
48 return unsafe.Pointer(&zeroVal[0])
50 // long key, try not to do more comparisons than necessary
51 keymaybe := uintptr(bucketCnt)
52 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
53 k := (*stringStruct)(kptr)
54 if k.len != key.len || isEmpty(b.tophash[i]) {
55 if b.tophash[i] == emptyRest {
56 break
58 continue
60 if k.str == key.str {
61 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
63 // check first 4 bytes
64 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
65 continue
67 // check last 4 bytes
68 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
69 continue
71 if keymaybe != bucketCnt {
72 // Two keys are potential matches. Use hash to distinguish them.
73 goto dohash
75 keymaybe = i
77 if keymaybe != bucketCnt {
78 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
79 if memequal(k.str, key.str, uintptr(key.len)) {
80 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
83 return unsafe.Pointer(&zeroVal[0])
85 dohash:
86 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
87 m := bucketMask(h.B)
88 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
89 if c := h.oldbuckets; c != nil {
90 if !h.sameSizeGrow() {
91 // There used to be half as many buckets; mask down one more power of two.
92 m >>= 1
94 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
95 if !evacuated(oldb) {
96 b = oldb
99 top := tophash(hash)
100 for ; b != nil; b = b.overflow(t) {
101 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
102 k := (*stringStruct)(kptr)
103 if k.len != key.len || b.tophash[i] != top {
104 continue
106 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
107 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
111 return unsafe.Pointer(&zeroVal[0])
114 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
115 if raceenabled && h != nil {
116 callerpc := getcallerpc()
117 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
119 if h == nil || h.count == 0 {
120 return unsafe.Pointer(&zeroVal[0]), false
122 if h.flags&hashWriting != 0 {
123 throw("concurrent map read and map write")
125 key := stringStructOf(&ky)
126 if h.B == 0 {
127 // One-bucket table.
128 b := (*bmap)(h.buckets)
129 if key.len < 32 {
130 // short key, doing lots of comparisons is ok
131 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
132 k := (*stringStruct)(kptr)
133 if k.len != key.len || isEmpty(b.tophash[i]) {
134 if b.tophash[i] == emptyRest {
135 break
137 continue
139 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
140 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
143 return unsafe.Pointer(&zeroVal[0]), false
145 // long key, try not to do more comparisons than necessary
146 keymaybe := uintptr(bucketCnt)
147 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
148 k := (*stringStruct)(kptr)
149 if k.len != key.len || isEmpty(b.tophash[i]) {
150 if b.tophash[i] == emptyRest {
151 break
153 continue
155 if k.str == key.str {
156 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
158 // check first 4 bytes
159 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
160 continue
162 // check last 4 bytes
163 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
164 continue
166 if keymaybe != bucketCnt {
167 // Two keys are potential matches. Use hash to distinguish them.
168 goto dohash
170 keymaybe = i
172 if keymaybe != bucketCnt {
173 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
174 if memequal(k.str, key.str, uintptr(key.len)) {
175 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
178 return unsafe.Pointer(&zeroVal[0]), false
180 dohash:
181 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
182 m := bucketMask(h.B)
183 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
184 if c := h.oldbuckets; c != nil {
185 if !h.sameSizeGrow() {
186 // There used to be half as many buckets; mask down one more power of two.
187 m >>= 1
189 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
190 if !evacuated(oldb) {
191 b = oldb
194 top := tophash(hash)
195 for ; b != nil; b = b.overflow(t) {
196 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
197 k := (*stringStruct)(kptr)
198 if k.len != key.len || b.tophash[i] != top {
199 continue
201 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
202 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
206 return unsafe.Pointer(&zeroVal[0]), false
209 func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
210 if h == nil {
211 panic(plainError("assignment to entry in nil map"))
213 if raceenabled {
214 callerpc := getcallerpc()
215 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
217 if h.flags&hashWriting != 0 {
218 throw("concurrent map writes")
220 key := stringStructOf(&s)
221 hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
223 // Set hashWriting after calling t.hasher for consistency with mapassign.
224 h.flags ^= hashWriting
226 if h.buckets == nil {
227 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
230 again:
231 bucket := hash & bucketMask(h.B)
232 if h.growing() {
233 growWork_faststr(t, h, bucket)
235 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
236 top := tophash(hash)
238 var insertb *bmap
239 var inserti uintptr
240 var insertk unsafe.Pointer
242 bucketloop:
243 for {
244 for i := uintptr(0); i < bucketCnt; i++ {
245 if b.tophash[i] != top {
246 if isEmpty(b.tophash[i]) && insertb == nil {
247 insertb = b
248 inserti = i
250 if b.tophash[i] == emptyRest {
251 break bucketloop
253 continue
255 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
256 if k.len != key.len {
257 continue
259 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
260 continue
262 // already have a mapping for key. Update it.
263 inserti = i
264 insertb = b
265 goto done
267 ovf := b.overflow(t)
268 if ovf == nil {
269 break
271 b = ovf
274 // Did not find mapping for key. Allocate new cell & add entry.
276 // If we hit the max load factor or we have too many overflow buckets,
277 // and we're not already in the middle of growing, start growing.
278 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
279 hashGrow(t, h)
280 goto again // Growing the table invalidates everything, so try again
283 if insertb == nil {
284 // all current buckets are full, allocate a new one.
285 insertb = h.newoverflow(t, b)
286 inserti = 0 // not necessary, but avoids needlessly spilling inserti
288 insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
290 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
291 // store new key at insert position
292 *((*stringStruct)(insertk)) = *key
293 h.count++
295 done:
296 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
297 if h.flags&hashWriting == 0 {
298 throw("concurrent map writes")
300 h.flags &^= hashWriting
301 return elem
304 func mapdelete_faststr(t *maptype, h *hmap, ky string) {
305 if raceenabled && h != nil {
306 callerpc := getcallerpc()
307 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
309 if h == nil || h.count == 0 {
310 return
312 if h.flags&hashWriting != 0 {
313 throw("concurrent map writes")
316 key := stringStructOf(&ky)
317 hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
319 // Set hashWriting after calling t.hasher for consistency with mapdelete
320 h.flags ^= hashWriting
322 bucket := hash & bucketMask(h.B)
323 if h.growing() {
324 growWork_faststr(t, h, bucket)
326 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
327 bOrig := b
328 top := tophash(hash)
329 search:
330 for ; b != nil; b = b.overflow(t) {
331 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
332 k := (*stringStruct)(kptr)
333 if k.len != key.len || b.tophash[i] != top {
334 continue
336 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
337 continue
339 // Clear key's pointer.
340 k.str = nil
341 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
342 if t.elem.ptrdata != 0 {
343 memclrHasPointers(e, t.elem.size)
344 } else {
345 memclrNoHeapPointers(e, t.elem.size)
347 b.tophash[i] = emptyOne
348 // If the bucket now ends in a bunch of emptyOne states,
349 // change those to emptyRest states.
350 if i == bucketCnt-1 {
351 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
352 goto notLast
354 } else {
355 if b.tophash[i+1] != emptyRest {
356 goto notLast
359 for {
360 b.tophash[i] = emptyRest
361 if i == 0 {
362 if b == bOrig {
363 break // beginning of initial bucket, we're done.
365 // Find previous bucket, continue at its last entry.
366 c := b
367 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
369 i = bucketCnt - 1
370 } else {
373 if b.tophash[i] != emptyOne {
374 break
377 notLast:
378 h.count--
379 break search
383 if h.flags&hashWriting == 0 {
384 throw("concurrent map writes")
386 h.flags &^= hashWriting
389 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
390 // make sure we evacuate the oldbucket corresponding
391 // to the bucket we're about to use
392 evacuate_faststr(t, h, bucket&h.oldbucketmask())
394 // evacuate one more oldbucket to make progress on growing
395 if h.growing() {
396 evacuate_faststr(t, h, h.nevacuate)
400 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
401 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
402 newbit := h.noldbuckets()
403 if !evacuated(b) {
404 // TODO: reuse overflow buckets instead of using new ones, if there
405 // is no iterator using the old buckets. (If !oldIterator.)
407 // xy contains the x and y (low and high) evacuation destinations.
408 var xy [2]evacDst
409 x := &xy[0]
410 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
411 x.k = add(unsafe.Pointer(x.b), dataOffset)
412 x.e = add(x.k, bucketCnt*2*sys.PtrSize)
414 if !h.sameSizeGrow() {
415 // Only calculate y pointers if we're growing bigger.
416 // Otherwise GC can see bad pointers.
417 y := &xy[1]
418 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
419 y.k = add(unsafe.Pointer(y.b), dataOffset)
420 y.e = add(y.k, bucketCnt*2*sys.PtrSize)
423 for ; b != nil; b = b.overflow(t) {
424 k := add(unsafe.Pointer(b), dataOffset)
425 e := add(k, bucketCnt*2*sys.PtrSize)
426 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
427 top := b.tophash[i]
428 if isEmpty(top) {
429 b.tophash[i] = evacuatedEmpty
430 continue
432 if top < minTopHash {
433 throw("bad map state")
435 var useY uint8
436 if !h.sameSizeGrow() {
437 // Compute hash to make our evacuation decision (whether we need
438 // to send this key/elem to bucket x or bucket y).
439 hash := t.hasher(k, uintptr(h.hash0))
440 if hash&newbit != 0 {
441 useY = 1
445 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
446 dst := &xy[useY] // evacuation destination
448 if dst.i == bucketCnt {
449 dst.b = h.newoverflow(t, dst.b)
450 dst.i = 0
451 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
452 dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
454 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
456 // Copy key.
457 *(*string)(dst.k) = *(*string)(k)
459 typedmemmove(t.elem, dst.e, e)
460 dst.i++
461 // These updates might push these pointers past the end of the
462 // key or elem arrays. That's ok, as we have the overflow pointer
463 // at the end of the bucket to protect against pointing past the
464 // end of the bucket.
465 dst.k = add(dst.k, 2*sys.PtrSize)
466 dst.e = add(dst.e, uintptr(t.elemsize))
469 // Unlink the overflow buckets & clear key/elem to help GC.
470 if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
471 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
472 // Preserve b.tophash because the evacuation
473 // state is maintained there.
474 ptr := add(b, dataOffset)
475 n := uintptr(t.bucketsize) - dataOffset
476 memclrHasPointers(ptr, n)
480 if oldbucket == h.nevacuate {
481 advanceEvacuationMark(h, t, newbit)