1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
10 #include "MurmurHash3.h"
14 //-----------------------------------------------------------------------------
15 // Platform-specific functions and macros
17 // Microsoft Visual Studio
21 #define FORCE_INLINE __forceinline
25 #define ROTL32(x,y) _rotl(x,y)
26 #define ROTL64(x,y) _rotl64(x,y)
28 #define BIG_CONSTANT(x) (x)
32 #else // defined(_MSC_VER)
34 // We can't do always_inline, becasue -Werror -Wattribute will trigger
35 // a "might not be able to inline" warning.
36 //#define FORCE_INLINE __attribute__((always_inline))
37 #define FORCE_INLINE inline
39 inline uint32_t rotl32 ( uint32_t x
, int8_t r
)
41 return (x
<< r
) | (x
>> (32 - r
));
44 inline uint64_t rotl64 ( uint64_t x
, int8_t r
)
46 return (x
<< r
) | (x
>> (64 - r
));
49 #define ROTL32(x,y) rotl32(x,y)
50 #define ROTL64(x,y) rotl64(x,y)
52 #define BIG_CONSTANT(x) (x##LLU)
54 #endif // !defined(_MSC_VER)
56 //-----------------------------------------------------------------------------
57 // Block read - if your platform needs to do endian-swapping or can only
58 // handle aligned reads, do the conversion here
60 FORCE_INLINE
uint32_t getblock ( const uint32_t * p
, int i
)
65 FORCE_INLINE
uint64_t getblock ( const uint64_t * p
, int i
)
70 //-----------------------------------------------------------------------------
71 // Finalization mix - force all bits of a hash block to avalanche
73 FORCE_INLINE
uint32_t fmix ( uint32_t h
)
86 FORCE_INLINE
uint64_t fmix ( uint64_t k
)
89 k
*= BIG_CONSTANT(0xff51afd7ed558ccd);
91 k
*= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
97 } // unnamed namespace
99 //-----------------------------------------------------------------------------
101 void MurmurHash3_x86_32 ( const void * key
, int len
,
102 uint32_t seed
, void * out
)
104 const uint8_t * data
= (const uint8_t*)key
;
105 const int nblocks
= len
/ 4;
109 const uint32_t c1
= 0xcc9e2d51;
110 const uint32_t c2
= 0x1b873593;
115 const uint32_t * blocks
= (const uint32_t *)(data
+ nblocks
*4);
117 for(int i
= -nblocks
; i
; i
++)
119 uint32_t k1
= getblock(blocks
,i
);
127 h1
= h1
*5+0xe6546b64;
133 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*4);
139 case 3: k1
^= tail
[2] << 16;
140 case 2: k1
^= tail
[1] << 8;
141 case 1: k1
^= tail
[0];
142 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
152 *(uint32_t*)out
= h1
;
155 //-----------------------------------------------------------------------------
157 void MurmurHash3_x86_128 ( const void * key
, const int len
,
158 uint32_t seed
, void * out
)
160 const uint8_t * data
= (const uint8_t*)key
;
161 const int nblocks
= len
/ 16;
168 const uint32_t c1
= 0x239b961b;
169 const uint32_t c2
= 0xab0e9789;
170 const uint32_t c3
= 0x38b34ae5;
171 const uint32_t c4
= 0xa1e38b93;
176 const uint32_t * blocks
= (const uint32_t *)(data
+ nblocks
*16);
178 for(int i
= -nblocks
; i
; i
++)
180 uint32_t k1
= getblock(blocks
,i
*4+0);
181 uint32_t k2
= getblock(blocks
,i
*4+1);
182 uint32_t k3
= getblock(blocks
,i
*4+2);
183 uint32_t k4
= getblock(blocks
,i
*4+3);
185 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
187 h1
= ROTL32(h1
,19); h1
+= h2
; h1
= h1
*5+0x561ccd1b;
189 k2
*= c2
; k2
= ROTL32(k2
,16); k2
*= c3
; h2
^= k2
;
191 h2
= ROTL32(h2
,17); h2
+= h3
; h2
= h2
*5+0x0bcaa747;
193 k3
*= c3
; k3
= ROTL32(k3
,17); k3
*= c4
; h3
^= k3
;
195 h3
= ROTL32(h3
,15); h3
+= h4
; h3
= h3
*5+0x96cd1c35;
197 k4
*= c4
; k4
= ROTL32(k4
,18); k4
*= c1
; h4
^= k4
;
199 h4
= ROTL32(h4
,13); h4
+= h1
; h4
= h4
*5+0x32ac3b17;
205 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
214 case 15: k4
^= tail
[14] << 16;
215 case 14: k4
^= tail
[13] << 8;
216 case 13: k4
^= tail
[12] << 0;
217 k4
*= c4
; k4
= ROTL32(k4
,18); k4
*= c1
; h4
^= k4
;
219 case 12: k3
^= tail
[11] << 24;
220 case 11: k3
^= tail
[10] << 16;
221 case 10: k3
^= tail
[ 9] << 8;
222 case 9: k3
^= tail
[ 8] << 0;
223 k3
*= c3
; k3
= ROTL32(k3
,17); k3
*= c4
; h3
^= k3
;
225 case 8: k2
^= tail
[ 7] << 24;
226 case 7: k2
^= tail
[ 6] << 16;
227 case 6: k2
^= tail
[ 5] << 8;
228 case 5: k2
^= tail
[ 4] << 0;
229 k2
*= c2
; k2
= ROTL32(k2
,16); k2
*= c3
; h2
^= k2
;
231 case 4: k1
^= tail
[ 3] << 24;
232 case 3: k1
^= tail
[ 2] << 16;
233 case 2: k1
^= tail
[ 1] << 8;
234 case 1: k1
^= tail
[ 0] << 0;
235 k1
*= c1
; k1
= ROTL32(k1
,15); k1
*= c2
; h1
^= k1
;
241 h1
^= len
; h2
^= len
; h3
^= len
; h4
^= len
;
243 h1
+= h2
; h1
+= h3
; h1
+= h4
;
244 h2
+= h1
; h3
+= h1
; h4
+= h1
;
251 h1
+= h2
; h1
+= h3
; h1
+= h4
;
252 h2
+= h1
; h3
+= h1
; h4
+= h1
;
254 ((uint32_t*)out
)[0] = h1
;
255 ((uint32_t*)out
)[1] = h2
;
256 ((uint32_t*)out
)[2] = h3
;
257 ((uint32_t*)out
)[3] = h4
;
260 //-----------------------------------------------------------------------------
262 void MurmurHash3_x64_128 ( const void * key
, const int len
,
263 const uint32_t seed
, void * out
)
265 const uint8_t * data
= (const uint8_t*)key
;
266 const int nblocks
= len
/ 16;
271 const uint64_t c1
= BIG_CONSTANT(0x87c37b91114253d5);
272 const uint64_t c2
= BIG_CONSTANT(0x4cf5ad432745937f);
277 const uint64_t * blocks
= (const uint64_t *)(data
);
279 for(int i
= 0; i
< nblocks
; i
++)
281 uint64_t k1
= getblock(blocks
,i
*2+0);
282 uint64_t k2
= getblock(blocks
,i
*2+1);
284 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
286 h1
= ROTL64(h1
,27); h1
+= h2
; h1
= h1
*5+0x52dce729;
288 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
290 h2
= ROTL64(h2
,31); h2
+= h1
; h2
= h2
*5+0x38495ab5;
296 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
303 case 15: k2
^= uint64_t(tail
[14]) << 48;
304 case 14: k2
^= uint64_t(tail
[13]) << 40;
305 case 13: k2
^= uint64_t(tail
[12]) << 32;
306 case 12: k2
^= uint64_t(tail
[11]) << 24;
307 case 11: k2
^= uint64_t(tail
[10]) << 16;
308 case 10: k2
^= uint64_t(tail
[ 9]) << 8;
309 case 9: k2
^= uint64_t(tail
[ 8]) << 0;
310 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
312 case 8: k1
^= uint64_t(tail
[ 7]) << 56;
313 case 7: k1
^= uint64_t(tail
[ 6]) << 48;
314 case 6: k1
^= uint64_t(tail
[ 5]) << 40;
315 case 5: k1
^= uint64_t(tail
[ 4]) << 32;
316 case 4: k1
^= uint64_t(tail
[ 3]) << 24;
317 case 3: k1
^= uint64_t(tail
[ 2]) << 16;
318 case 2: k1
^= uint64_t(tail
[ 1]) << 8;
319 case 1: k1
^= uint64_t(tail
[ 0]) << 0;
320 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
326 h1
^= len
; h2
^= len
;
337 ((uint64_t*)out
)[0] = h1
;
338 ((uint64_t*)out
)[1] = h2
;
341 //-----------------------------------------------------------------------------