1 //-----------------------------------------------------------------------------
2 // MurmurHash3 was written by Austin Appleby, and is placed in the public
3 // domain. The author hereby disclaims copyright to this source code.
5 // Note - The x86 and x64 versions do _not_ produce the same results, as the
6 // algorithms are optimized for their respective platforms. You can still
7 // compile and run any of them on any platform, but your performance with the
8 // non-native version will be less than optimal.
10 // This version is based on the C++ version and trimed to only MurmurHash3_x64_128.
11 // Edited by Hu Xuesong @ Apr 26 2011, based on r135 | 2011-04-14 07:36:49 +0800 (Thu, 14 Apr 2011)
13 #include "MurmurHash3.h"
15 //-----------------------------------------------------------------------------
16 // Platform-specific functions and macros
18 // Microsoft Visual Studio
22 #define FORCE_INLINE __forceinline
26 #define ROTL32(x,y) _rotl(x,y)
27 #define ROTL64(x,y) _rotl64(x,y)
29 #define BIG_CONSTANT(x) (x)
33 #else // defined(_MSC_VER)
35 #define FORCE_INLINE __attribute__((always_inline))
37 inline uint32_t rotl32 ( uint32_t x
, int8_t r
)
39 return (x
<< r
) | (x
>> (32 - r
));
42 inline uint64_t rotl64 ( uint64_t x
, int8_t r
)
44 return (x
<< r
) | (x
>> (64 - r
));
47 #define ROTL32(x,y) rotl32(x,y) // at least -O is needed for this to work.
48 #define ROTL64(x,y) rotl64(x,y) // Or, MurmurHash3.c:127: undefined reference to `rotl64' when link.
50 #define BIG_CONSTANT(x) (x##LLU)
52 #endif // !defined(_MSC_VER)
54 //-----------------------------------------------------------------------------
55 // Block read - if your platform needs to do endian-swapping or can only
56 // handle aligned reads, do the conversion here
59 FORCE_INLINE
uint64_t getblock ( const uint64_t * p
, int i
)
64 //-----------------------------------------------------------------------------
65 // Finalization mix - force all bits of a hash block to avalanche
69 FORCE_INLINE
uint64_t fmix ( uint64_t k
)
72 k
*= BIG_CONSTANT(0xff51afd7ed558ccd);
74 k
*= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
80 //-----------------------------------------------------------------------------
82 void MurmurHash3_x64_128 ( const void * key
, const int len
,
83 const uint32_t seed
, void * out
)
85 const uint8_t * data
= (const uint8_t*)key
;
86 const int nblocks
= len
/ 16;
91 uint64_t c1
= BIG_CONSTANT(0x87c37b91114253d5);
92 uint64_t c2
= BIG_CONSTANT(0x4cf5ad432745937f);
97 const uint64_t * blocks
= (const uint64_t *)(data
);
99 for(int i
= 0; i
< nblocks
; i
++)
101 uint64_t k1
= getblock(blocks
,i
*2+0);
102 uint64_t k2
= getblock(blocks
,i
*2+1);
104 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
106 h1
= ROTL64(h1
,27); h1
+= h2
; h1
= h1
*5+0x52dce729;
108 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
110 h2
= ROTL64(h2
,31); h2
+= h1
; h2
= h2
*5+0x38495ab5;
116 const uint8_t * tail
= (const uint8_t*)(data
+ nblocks
*16);
123 case 15: k2
^= ((uint64_t) tail
[14]) << 48;
124 case 14: k2
^= ((uint64_t) tail
[13]) << 40;
125 case 13: k2
^= ((uint64_t) tail
[12]) << 32;
126 case 12: k2
^= ((uint64_t) tail
[11]) << 24;
127 case 11: k2
^= ((uint64_t) tail
[10]) << 16;
128 case 10: k2
^= ((uint64_t) tail
[ 9]) << 8;
129 case 9: k2
^= ((uint64_t) tail
[ 8]) << 0;
130 k2
*= c2
; k2
= ROTL64(k2
,33); k2
*= c1
; h2
^= k2
;
132 case 8: k1
^= ((uint64_t) tail
[ 7]) << 56;
133 case 7: k1
^= ((uint64_t) tail
[ 6]) << 48;
134 case 6: k1
^= ((uint64_t) tail
[ 5]) << 40;
135 case 5: k1
^= ((uint64_t) tail
[ 4]) << 32;
136 case 4: k1
^= ((uint64_t) tail
[ 3]) << 24;
137 case 3: k1
^= ((uint64_t) tail
[ 2]) << 16;
138 case 2: k1
^= ((uint64_t) tail
[ 1]) << 8;
139 case 1: k1
^= ((uint64_t) tail
[ 0]) << 0;
140 k1
*= c1
; k1
= ROTL64(k1
,31); k1
*= c2
; h1
^= k1
;
146 h1
^= len
; h2
^= len
;
157 ((uint64_t*)out
)[0] = h1
;
158 ((uint64_t*)out
)[1] = h2
;
161 //-----------------------------------------------------------------------------