1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
10 #include "MurmurHash3.h"
15 //-----------------------------------------------------------------------------
16 // Platform-specific functions and macros
18 // Microsoft Visual Studio
22 # define FORCE_INLINE __forceinline
24 # define ROTL32(x, y) _rotl(x, y)
25 # define ROTL64(x, y) _rotl64(x, y)
27 # define BIG_CONSTANT(x) (x)
31 #else // defined(_MSC_VER)
33 // We can't do always_inline, becasue -Werror -Wattribute will trigger
34 // a "might not be able to inline" warning.
35 //#define FORCE_INLINE __attribute__((always_inline))
36 # define FORCE_INLINE inline
38 inline uint32_t rotl32(uint32_t x
, int8_t r
) {
39 return (x
<< r
) | (x
>> (32 - r
));
42 inline uint64_t rotl64(uint64_t x
, int8_t r
) {
43 return (x
<< r
) | (x
>> (64 - r
));
46 # define ROTL32(x, y) rotl32(x, y)
47 # define ROTL64(x, y) rotl64(x, y)
49 # define BIG_CONSTANT(x) (x##LLU)
51 #endif // !defined(_MSC_VER)
53 //-----------------------------------------------------------------------------
54 // Block read - if your platform needs to do endian-swapping or can only
55 // handle aligned reads, do the conversion here
57 FORCE_INLINE
uint32_t getblock(const uint32_t* p
, int i
) { return p
[i
]; }
59 FORCE_INLINE
uint64_t getblock(const uint64_t* p
, int i
) { return p
[i
]; }
61 //-----------------------------------------------------------------------------
62 // Finalization mix - force all bits of a hash block to avalanche
64 FORCE_INLINE
uint32_t fmix(uint32_t h
) {
76 FORCE_INLINE
uint64_t fmix(uint64_t k
) {
78 k
*= BIG_CONSTANT(0xff51afd7ed558ccd);
80 k
*= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
86 } // unnamed namespace
88 //-----------------------------------------------------------------------------
90 void MurmurHash3_x86_32(const void* key
, int len
, uint32_t seed
, void* out
) {
91 const uint8_t* data
= (const uint8_t*)key
;
92 const int nblocks
= len
/ 4;
96 const uint32_t c1
= 0xcc9e2d51;
97 const uint32_t c2
= 0x1b873593;
102 const uint32_t* blocks
= (const uint32_t*)(data
+ nblocks
* 4);
104 for (int i
= -nblocks
; i
; i
++) {
105 uint32_t k1
= getblock(blocks
, i
);
113 h1
= h1
* 5 + 0xe6546b64;
119 const uint8_t* tail
= (const uint8_t*)(data
+ nblocks
* 4);
143 *(uint32_t*)out
= h1
;
146 //-----------------------------------------------------------------------------
148 void MurmurHash3_x86_128(const void* key
, const int len
, uint32_t seed
,
150 const uint8_t* data
= (const uint8_t*)key
;
151 const int nblocks
= len
/ 16;
158 const uint32_t c1
= 0x239b961b;
159 const uint32_t c2
= 0xab0e9789;
160 const uint32_t c3
= 0x38b34ae5;
161 const uint32_t c4
= 0xa1e38b93;
166 const uint32_t* blocks
= (const uint32_t*)(data
+ nblocks
* 16);
168 for (int i
= -nblocks
; i
; i
++) {
169 uint32_t k1
= getblock(blocks
, i
* 4 + 0);
170 uint32_t k2
= getblock(blocks
, i
* 4 + 1);
171 uint32_t k3
= getblock(blocks
, i
* 4 + 2);
172 uint32_t k4
= getblock(blocks
, i
* 4 + 3);
181 h1
= h1
* 5 + 0x561ccd1b;
190 h2
= h2
* 5 + 0x0bcaa747;
199 h3
= h3
* 5 + 0x96cd1c35;
208 h4
= h4
* 5 + 0x32ac3b17;
214 const uint8_t* tail
= (const uint8_t*)(data
+ nblocks
* 16);
223 k4
^= tail
[14] << 16;
234 k3
^= tail
[11] << 24;
236 k3
^= tail
[10] << 16;
300 ((uint32_t*)out
)[0] = h1
;
301 ((uint32_t*)out
)[1] = h2
;
302 ((uint32_t*)out
)[2] = h3
;
303 ((uint32_t*)out
)[3] = h4
;
306 //-----------------------------------------------------------------------------
308 void MurmurHash3_x64_128(const void* key
, const int len
, const uint32_t seed
,
310 const uint8_t* data
= (const uint8_t*)key
;
311 const int nblocks
= len
/ 16;
316 const uint64_t c1
= BIG_CONSTANT(0x87c37b91114253d5);
317 const uint64_t c2
= BIG_CONSTANT(0x4cf5ad432745937f);
322 const uint64_t* blocks
= (const uint64_t*)(data
);
324 for (int i
= 0; i
< nblocks
; i
++) {
325 uint64_t k1
= getblock(blocks
, i
* 2 + 0);
326 uint64_t k2
= getblock(blocks
, i
* 2 + 1);
335 h1
= h1
* 5 + 0x52dce729;
344 h2
= h2
* 5 + 0x38495ab5;
350 const uint8_t* tail
= (const uint8_t*)(data
+ nblocks
* 16);
357 k2
^= uint64_t(tail
[14]) << 48;
359 k2
^= uint64_t(tail
[13]) << 40;
361 k2
^= uint64_t(tail
[12]) << 32;
363 k2
^= uint64_t(tail
[11]) << 24;
365 k2
^= uint64_t(tail
[10]) << 16;
367 k2
^= uint64_t(tail
[9]) << 8;
369 k2
^= uint64_t(tail
[8]) << 0;
376 k1
^= uint64_t(tail
[7]) << 56;
378 k1
^= uint64_t(tail
[6]) << 48;
380 k1
^= uint64_t(tail
[5]) << 40;
382 k1
^= uint64_t(tail
[4]) << 32;
384 k1
^= uint64_t(tail
[3]) << 24;
386 k1
^= uint64_t(tail
[2]) << 16;
388 k1
^= uint64_t(tail
[1]) << 8;
390 k1
^= uint64_t(tail
[0]) << 0;
412 ((uint64_t*)out
)[0] = h1
;
413 ((uint64_t*)out
)[1] = h2
;
416 //-----------------------------------------------------------------------------