runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / hash32.go
blob22daec52526929f34e659ed0f30106a5c447775a
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Hashing algorithm inspired by
6 // xxhash: https://code.google.com/p/xxhash/
7 // cityhash: https://code.google.com/p/cityhash/
9 // +build 386 arm armbe m68k mips mipsle ppc s390 sh shbe sparc
11 package runtime
13 import "unsafe"
15 // For gccgo, use go:linkname to rename compiler-called functions to
16 // themselves, so that the compiler will export them.
18 //go:linkname memhash runtime.memhash
20 const (
21 // Constants for multiplication: four random odd 32-bit numbers.
22 m1 = 3168982561
23 m2 = 3339683297
24 m3 = 832293441
25 m4 = 2336365089
28 func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
29 if GOARCH == "386" && GOOS != "nacl" && useAeshash {
30 return aeshash(p, seed, s)
32 h := uint32(seed + s*hashkey[0])
33 tail:
34 switch {
35 case s == 0:
36 case s < 4:
37 h ^= uint32(*(*byte)(p))
38 h ^= uint32(*(*byte)(add(p, s>>1))) << 8
39 h ^= uint32(*(*byte)(add(p, s-1))) << 16
40 h = rotl_15(h*m1) * m2
41 case s == 4:
42 h ^= readUnaligned32(p)
43 h = rotl_15(h*m1) * m2
44 case s <= 8:
45 h ^= readUnaligned32(p)
46 h = rotl_15(h*m1) * m2
47 h ^= readUnaligned32(add(p, s-4))
48 h = rotl_15(h*m1) * m2
49 case s <= 16:
50 h ^= readUnaligned32(p)
51 h = rotl_15(h*m1) * m2
52 h ^= readUnaligned32(add(p, 4))
53 h = rotl_15(h*m1) * m2
54 h ^= readUnaligned32(add(p, s-8))
55 h = rotl_15(h*m1) * m2
56 h ^= readUnaligned32(add(p, s-4))
57 h = rotl_15(h*m1) * m2
58 default:
59 v1 := h
60 v2 := uint32(seed * hashkey[1])
61 v3 := uint32(seed * hashkey[2])
62 v4 := uint32(seed * hashkey[3])
63 for s >= 16 {
64 v1 ^= readUnaligned32(p)
65 v1 = rotl_15(v1*m1) * m2
66 p = add(p, 4)
67 v2 ^= readUnaligned32(p)
68 v2 = rotl_15(v2*m2) * m3
69 p = add(p, 4)
70 v3 ^= readUnaligned32(p)
71 v3 = rotl_15(v3*m3) * m4
72 p = add(p, 4)
73 v4 ^= readUnaligned32(p)
74 v4 = rotl_15(v4*m4) * m1
75 p = add(p, 4)
76 s -= 16
78 h = v1 ^ v2 ^ v3 ^ v4
79 goto tail
81 h ^= h >> 17
82 h *= m3
83 h ^= h >> 13
84 h *= m4
85 h ^= h >> 16
86 return uintptr(h)
89 func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
90 h := uint32(seed + 4*hashkey[0])
91 h ^= readUnaligned32(p)
92 h = rotl_15(h*m1) * m2
93 h ^= h >> 17
94 h *= m3
95 h ^= h >> 13
96 h *= m4
97 h ^= h >> 16
98 return uintptr(h)
101 func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
102 h := uint32(seed + 8*hashkey[0])
103 h ^= readUnaligned32(p)
104 h = rotl_15(h*m1) * m2
105 h ^= readUnaligned32(add(p, 4))
106 h = rotl_15(h*m1) * m2
107 h ^= h >> 17
108 h *= m3
109 h ^= h >> 13
110 h *= m4
111 h ^= h >> 16
112 return uintptr(h)
115 // Note: in order to get the compiler to issue rotl instructions, we
116 // need to constant fold the shift amount by hand.
117 // TODO: convince the compiler to issue rotl instructions after inlining.
118 func rotl_15(x uint32) uint32 {
119 return (x << 15) | (x >> (32 - 15))