[1/7] Preprocessor cleanup
[official-gcc.git] / libgo / go / runtime / hash64.go
blob7c6513ebd2be2937fbd349b3f7347b4c5d226697
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Hashing algorithm inspired by
6 // xxhash: https://code.google.com/p/xxhash/
7 // cityhash: https://code.google.com/p/cityhash/
9 // +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x wasm alpha arm64be ia64 mips64p32 mips64p32le sparc64 riscv64
11 package runtime
13 import "unsafe"
15 // For gccgo, use go:linkname to rename compiler-called functions to
16 // themselves, so that the compiler will export them.
18 //go:linkname memhash runtime.memhash
20 const (
21 // Constants for multiplication: four random odd 64-bit numbers.
22 m1 = 16877499708836156737
23 m2 = 2820277070424839065
24 m3 = 9497967016996688599
25 m4 = 15839092249703872147
28 func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
29 if (GOARCH == "amd64" || GOARCH == "arm64") &&
30 GOOS != "nacl" && useAeshash {
31 return aeshash(p, seed, s)
33 h := uint64(seed + s*hashkey[0])
34 tail:
35 switch {
36 case s == 0:
37 case s < 4:
38 h ^= uint64(*(*byte)(p))
39 h ^= uint64(*(*byte)(add(p, s>>1))) << 8
40 h ^= uint64(*(*byte)(add(p, s-1))) << 16
41 h = rotl_31(h*m1) * m2
42 case s <= 8:
43 h ^= uint64(readUnaligned32(p))
44 h ^= uint64(readUnaligned32(add(p, s-4))) << 32
45 h = rotl_31(h*m1) * m2
46 case s <= 16:
47 h ^= readUnaligned64(p)
48 h = rotl_31(h*m1) * m2
49 h ^= readUnaligned64(add(p, s-8))
50 h = rotl_31(h*m1) * m2
51 case s <= 32:
52 h ^= readUnaligned64(p)
53 h = rotl_31(h*m1) * m2
54 h ^= readUnaligned64(add(p, 8))
55 h = rotl_31(h*m1) * m2
56 h ^= readUnaligned64(add(p, s-16))
57 h = rotl_31(h*m1) * m2
58 h ^= readUnaligned64(add(p, s-8))
59 h = rotl_31(h*m1) * m2
60 default:
61 v1 := h
62 v2 := uint64(seed * hashkey[1])
63 v3 := uint64(seed * hashkey[2])
64 v4 := uint64(seed * hashkey[3])
65 for s >= 32 {
66 v1 ^= readUnaligned64(p)
67 v1 = rotl_31(v1*m1) * m2
68 p = add(p, 8)
69 v2 ^= readUnaligned64(p)
70 v2 = rotl_31(v2*m2) * m3
71 p = add(p, 8)
72 v3 ^= readUnaligned64(p)
73 v3 = rotl_31(v3*m3) * m4
74 p = add(p, 8)
75 v4 ^= readUnaligned64(p)
76 v4 = rotl_31(v4*m4) * m1
77 p = add(p, 8)
78 s -= 32
80 h = v1 ^ v2 ^ v3 ^ v4
81 goto tail
84 h ^= h >> 29
85 h *= m3
86 h ^= h >> 32
87 return uintptr(h)
90 func memhash32(p unsafe.Pointer, seed uintptr) uintptr {
91 h := uint64(seed + 4*hashkey[0])
92 v := uint64(readUnaligned32(p))
93 h ^= v
94 h ^= v << 32
95 h = rotl_31(h*m1) * m2
96 h ^= h >> 29
97 h *= m3
98 h ^= h >> 32
99 return uintptr(h)
102 func memhash64(p unsafe.Pointer, seed uintptr) uintptr {
103 h := uint64(seed + 8*hashkey[0])
104 h ^= uint64(readUnaligned32(p)) | uint64(readUnaligned32(add(p, 4)))<<32
105 h = rotl_31(h*m1) * m2
106 h ^= h >> 29
107 h *= m3
108 h ^= h >> 32
109 return uintptr(h)
112 // Note: in order to get the compiler to issue rotl instructions, we
113 // need to constant fold the shift amount by hand.
114 // TODO: convince the compiler to issue rotl instructions after inlining.
115 func rotl_31(x uint64) uint64 {
116 return (x << 31) | (x >> (64 - 31))