1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
10 #include "MurmurHash3.h"
14 //-----------------------------------------------------------------------------
15 // Platform-specific functions and macros
17 // Microsoft Visual Studio
21 # define FORCE_INLINE __forceinline
23 # define ROTL32(x, y) _rotl(x, y)
24 # define ROTL64(x, y) _rotl64(x, y)
26 # define BIG_CONSTANT(x) (x)
30 #else // defined(_MSC_VER)
32 // We can't do always_inline, becasue -Werror -Wattribute will trigger
33 // a "might not be able to inline" warning.
34 // #define FORCE_INLINE __attribute__((always_inline))
35 # define FORCE_INLINE inline
37 inline uint32_t rotl32(uint32_t x
, int8_t r
) {
38 return (x
<< r
) | (x
>> (32 - r
));
41 inline uint64_t rotl64(uint64_t x
, int8_t r
) {
42 return (x
<< r
) | (x
>> (64 - r
));
45 # define ROTL32(x, y) rotl32(x, y)
46 # define ROTL64(x, y) rotl64(x, y)
48 # define BIG_CONSTANT(x) (x##LLU)
50 #endif // !defined(_MSC_VER)
52 //-----------------------------------------------------------------------------
53 // Block read - if your platform needs to do endian-swapping or can only
54 // handle aligned reads, do the conversion here
56 FORCE_INLINE
uint32_t getblock(const uint32_t* p
, int i
) { return p
[i
]; }
58 FORCE_INLINE
uint64_t getblock(const uint64_t* p
, int i
) { return p
[i
]; }
60 //-----------------------------------------------------------------------------
61 // Finalization mix - force all bits of a hash block to avalanche
63 FORCE_INLINE
uint32_t fmix(uint32_t h
) {
75 FORCE_INLINE
uint64_t fmix(uint64_t k
) {
77 k
*= BIG_CONSTANT(0xff51afd7ed558ccd);
79 k
*= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
85 } // unnamed namespace
87 //-----------------------------------------------------------------------------
89 void MurmurHash3_x86_32(const void* key
, int len
, uint32_t seed
, void* out
) {
90 const uint8_t* data
= (const uint8_t*)key
;
91 const int nblocks
= len
/ 4;
95 const uint32_t c1
= 0xcc9e2d51;
96 const uint32_t c2
= 0x1b873593;
101 const uint32_t* blocks
= (const uint32_t*)(data
+ nblocks
* 4);
103 for (int i
= -nblocks
; i
; i
++) {
104 uint32_t k1
= getblock(blocks
, i
);
112 h1
= h1
* 5 + 0xe6546b64;
118 const uint8_t* tail
= (const uint8_t*)(data
+ nblocks
* 4);
142 *(uint32_t*)out
= h1
;
145 //-----------------------------------------------------------------------------
147 void MurmurHash3_x86_128(const void* key
, const int len
, uint32_t seed
,
149 const uint8_t* data
= (const uint8_t*)key
;
150 const int nblocks
= len
/ 16;
157 const uint32_t c1
= 0x239b961b;
158 const uint32_t c2
= 0xab0e9789;
159 const uint32_t c3
= 0x38b34ae5;
160 const uint32_t c4
= 0xa1e38b93;
165 const uint32_t* blocks
= (const uint32_t*)(data
+ nblocks
* 16);
167 for (int i
= -nblocks
; i
; i
++) {
168 uint32_t k1
= getblock(blocks
, i
* 4 + 0);
169 uint32_t k2
= getblock(blocks
, i
* 4 + 1);
170 uint32_t k3
= getblock(blocks
, i
* 4 + 2);
171 uint32_t k4
= getblock(blocks
, i
* 4 + 3);
180 h1
= h1
* 5 + 0x561ccd1b;
189 h2
= h2
* 5 + 0x0bcaa747;
198 h3
= h3
* 5 + 0x96cd1c35;
207 h4
= h4
* 5 + 0x32ac3b17;
213 const uint8_t* tail
= (const uint8_t*)(data
+ nblocks
* 16);
222 k4
^= tail
[14] << 16;
233 k3
^= tail
[11] << 24;
235 k3
^= tail
[10] << 16;
299 ((uint32_t*)out
)[0] = h1
;
300 ((uint32_t*)out
)[1] = h2
;
301 ((uint32_t*)out
)[2] = h3
;
302 ((uint32_t*)out
)[3] = h4
;
305 //-----------------------------------------------------------------------------
307 void MurmurHash3_x64_128(const void* key
, const int len
, const uint32_t seed
,
309 const uint8_t* data
= (const uint8_t*)key
;
310 const int nblocks
= len
/ 16;
315 const uint64_t c1
= BIG_CONSTANT(0x87c37b91114253d5);
316 const uint64_t c2
= BIG_CONSTANT(0x4cf5ad432745937f);
321 const uint64_t* blocks
= (const uint64_t*)(data
);
323 for (int i
= 0; i
< nblocks
; i
++) {
324 uint64_t k1
= getblock(blocks
, i
* 2 + 0);
325 uint64_t k2
= getblock(blocks
, i
* 2 + 1);
334 h1
= h1
* 5 + 0x52dce729;
343 h2
= h2
* 5 + 0x38495ab5;
349 const uint8_t* tail
= (const uint8_t*)(data
+ nblocks
* 16);
356 k2
^= uint64_t(tail
[14]) << 48;
358 k2
^= uint64_t(tail
[13]) << 40;
360 k2
^= uint64_t(tail
[12]) << 32;
362 k2
^= uint64_t(tail
[11]) << 24;
364 k2
^= uint64_t(tail
[10]) << 16;
366 k2
^= uint64_t(tail
[9]) << 8;
368 k2
^= uint64_t(tail
[8]) << 0;
375 k1
^= uint64_t(tail
[7]) << 56;
377 k1
^= uint64_t(tail
[6]) << 48;
379 k1
^= uint64_t(tail
[5]) << 40;
381 k1
^= uint64_t(tail
[4]) << 32;
383 k1
^= uint64_t(tail
[3]) << 24;
385 k1
^= uint64_t(tail
[2]) << 16;
387 k1
^= uint64_t(tail
[1]) << 8;
389 k1
^= uint64_t(tail
[0]) << 0;
411 ((uint64_t*)out
)[0] = h1
;
412 ((uint64_t*)out
)[1] = h2
;
415 //-----------------------------------------------------------------------------