Tempfile document updated.
[ruby.git] / siphash.c
blob61b8604fc9df8c0b673121e26c6fecde85050e09
1 #include <string.h>
2 #include <stdio.h>
3 #include "siphash.h"
4 #ifndef SIP_HASH_STREAMING
5 #define SIP_HASH_STREAMING 1
6 #endif
8 #if defined(__MINGW32__)
9 #include <sys/param.h>
11 /* MinGW only defines LITTLE_ENDIAN and BIG_ENDIAN macros */
12 #define __LITTLE_ENDIAN LITTLE_ENDIAN
13 #define __BIG_ENDIAN BIG_ENDIAN
14 #elif defined(_WIN32)
15 #define BYTE_ORDER __LITTLE_ENDIAN
16 #elif !defined(BYTE_ORDER)
17 #include <endian.h>
18 #endif
20 #ifndef LITTLE_ENDIAN
21 #define LITTLE_ENDIAN __LITTLE_ENDIAN
22 #endif
23 #ifndef BIG_ENDIAN
24 #define BIG_ENDIAN __BIG_ENDIAN
25 #endif
27 #if BYTE_ORDER == LITTLE_ENDIAN
28 #define lo u32[0]
29 #define hi u32[1]
30 #elif BYTE_ORDER == BIG_ENDIAN
31 #define hi u32[0]
32 #define lo u32[1]
33 #else
34 #error "Only strictly little or big endian supported"
35 #endif
37 /* __POWERPC__ added to accommodate Darwin case. */
38 #ifndef UNALIGNED_WORD_ACCESS
39 # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
40 defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
41 defined(__powerpc64__) || defined(__POWERPC__) || defined(__aarch64__) || \
42 defined(__mc68020__)
43 # define UNALIGNED_WORD_ACCESS 1
44 # endif
45 #endif
46 #ifndef UNALIGNED_WORD_ACCESS
47 # define UNALIGNED_WORD_ACCESS 0
48 #endif
50 #define U8TO32_LE(p) \
51 (((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
52 ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) \
54 #define U32TO8_LE(p, v) \
55 do { \
56 (p)[0] = (uint8_t)((v) ); \
57 (p)[1] = (uint8_t)((v) >> 8); \
58 (p)[2] = (uint8_t)((v) >> 16); \
59 (p)[3] = (uint8_t)((v) >> 24); \
60 } while (0)
62 #ifdef HAVE_UINT64_T
63 #define U8TO64_LE(p) \
64 ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
66 #define U64TO8_LE(p, v) \
67 do { \
68 U32TO8_LE((p), (uint32_t)((v) )); \
69 U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); \
70 } while (0)
72 #define ROTL64(v, s) \
73 ((v) << (s)) | ((v) >> (64 - (s)))
75 #define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
77 #define ADD64_TO(v, s) ((v) += (s))
78 #define XOR64_TO(v, s) ((v) ^= (s))
79 #define XOR64_INT(v, x) ((v) ^= (x))
80 #else
81 #define U8TO64_LE(p) u8to64_le(p)
82 static inline uint64_t
83 u8to64_le(const uint8_t *p)
85 uint64_t ret;
86 ret.lo = U8TO32_LE(p);
87 ret.hi = U8TO32_LE(p + 4);
88 return ret;
91 #define U64TO8_LE(p, v) u64to8_le(p, v)
92 static inline void
93 u64to8_le(uint8_t *p, uint64_t v)
95 U32TO8_LE(p, v.lo);
96 U32TO8_LE(p + 4, v.hi);
99 #define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
100 (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
101 static inline uint64_t *
102 rotl64_to(uint64_t *v, unsigned int s)
104 uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
105 uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
106 v->hi = uhi;
107 v->lo = ulo;
108 return v;
111 static inline uint64_t *
112 rotl64_swap(uint64_t *v)
114 uint32_t t = v->lo;
115 v->lo = v->hi;
116 v->hi = t;
117 return v;
120 #define ADD64_TO(v, s) add64_to(&(v), (s))
121 static inline uint64_t *
122 add64_to(uint64_t *v, const uint64_t s)
124 v->lo += s.lo;
125 v->hi += s.hi;
126 if (v->lo < s.lo) v->hi++;
127 return v;
130 #define XOR64_TO(v, s) xor64_to(&(v), (s))
131 static inline uint64_t *
132 xor64_to(uint64_t *v, const uint64_t s)
134 v->lo ^= s.lo;
135 v->hi ^= s.hi;
136 return v;
139 #define XOR64_INT(v, x) ((v).lo ^= (x))
140 #endif
142 static const union {
143 char bin[32];
144 uint64_t u64[4];
145 } sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
146 #define sip_init_state sip_init_state_bin.u64
148 #if SIP_HASH_STREAMING
149 struct sip_interface_st {
150 void (*init)(sip_state *s, const uint8_t *key);
151 void (*update)(sip_state *s, const uint8_t *data, size_t len);
152 void (*final)(sip_state *s, uint64_t *digest);
155 static void int_sip_init(sip_state *state, const uint8_t *key);
156 static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
157 static void int_sip_final(sip_state *state, uint64_t *digest);
159 static const sip_interface sip_methods = {
160 int_sip_init,
161 int_sip_update,
162 int_sip_final
164 #endif /* SIP_HASH_STREAMING */
166 #define SIP_COMPRESS(v0, v1, v2, v3) \
167 do { \
168 ADD64_TO((v0), (v1)); \
169 ADD64_TO((v2), (v3)); \
170 ROTL64_TO((v1), 13); \
171 ROTL64_TO((v3), 16); \
172 XOR64_TO((v1), (v0)); \
173 XOR64_TO((v3), (v2)); \
174 ROTL64_TO((v0), 32); \
175 ADD64_TO((v2), (v1)); \
176 ADD64_TO((v0), (v3)); \
177 ROTL64_TO((v1), 17); \
178 ROTL64_TO((v3), 21); \
179 XOR64_TO((v1), (v2)); \
180 XOR64_TO((v3), (v0)); \
181 ROTL64_TO((v2), 32); \
182 } while(0)
184 #if SIP_HASH_STREAMING
185 static void
186 int_sip_dump(sip_state *state)
188 int v;
190 for (v = 0; v < 4; v++) {
191 #ifdef HAVE_UINT64_T
192 printf("v%d: %" PRIx64 "\n", v, state->v[v]);
193 #else
194 printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
195 #endif
199 static void
200 int_sip_init(sip_state *state, const uint8_t key[16])
202 uint64_t k0, k1;
204 k0 = U8TO64_LE(key);
205 k1 = U8TO64_LE(key + sizeof(uint64_t));
207 state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
208 state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
209 state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
210 state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
213 static inline void
214 int_sip_round(sip_state *state, int n)
216 int i;
218 for (i = 0; i < n; i++) {
219 SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
223 static inline void
224 int_sip_update_block(sip_state *state, uint64_t m)
226 XOR64_TO(state->v[3], m);
227 int_sip_round(state, state->c);
228 XOR64_TO(state->v[0], m);
231 static inline void
232 int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
234 int to_read;
235 uint64_t m;
237 if (!state->buflen) return;
239 to_read = sizeof(uint64_t) - state->buflen;
240 memcpy(state->buf + state->buflen, *pdata, to_read);
241 m = U8TO64_LE(state->buf);
242 int_sip_update_block(state, m);
243 *pdata += to_read;
244 *plen -= to_read;
245 state->buflen = 0;
248 static inline void
249 int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
251 uint8_t r = len % sizeof(uint64_t);
252 if (r) {
253 memcpy(state->buf, data + len - r, r);
254 state->buflen = r;
258 static void
259 int_sip_update(sip_state *state, const uint8_t *data, size_t len)
261 uint64_t *end;
262 uint64_t *data64;
264 state->msglen_byte = state->msglen_byte + (len % 256);
265 data64 = (uint64_t *) data;
267 int_sip_pre_update(state, &data, &len);
269 end = data64 + (len / sizeof(uint64_t));
271 #if BYTE_ORDER == LITTLE_ENDIAN
272 while (data64 != end) {
273 int_sip_update_block(state, *data64++);
275 #elif BYTE_ORDER == BIG_ENDIAN
277 uint64_t m;
278 uint8_t *data8 = data;
279 for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
280 m = U8TO64_LE(data8);
281 int_sip_update_block(state, m);
284 #endif
286 int_sip_post_update(state, data, len);
289 static inline void
290 int_sip_pad_final_block(sip_state *state)
292 int i;
293 /* pad with 0's and finalize with msg_len mod 256 */
294 for (i = state->buflen; i < sizeof(uint64_t); i++) {
295 state->buf[i] = 0x00;
297 state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
300 static void
301 int_sip_final(sip_state *state, uint64_t *digest)
303 uint64_t m;
305 int_sip_pad_final_block(state);
307 m = U8TO64_LE(state->buf);
308 int_sip_update_block(state, m);
310 XOR64_INT(state->v[2], 0xff);
312 int_sip_round(state, state->d);
314 *digest = state->v[0];
315 XOR64_TO(*digest, state->v[1]);
316 XOR64_TO(*digest, state->v[2]);
317 XOR64_TO(*digest, state->v[3]);
320 sip_hash *
321 sip_hash_new(const uint8_t key[16], int c, int d)
323 sip_hash *h = NULL;
325 if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
326 return sip_hash_init(h, key, c, d);
329 sip_hash *
330 sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
332 h->state->c = c;
333 h->state->d = d;
334 h->state->buflen = 0;
335 h->state->msglen_byte = 0;
336 h->methods = &sip_methods;
337 h->methods->init(h->state, key);
338 return h;
342 sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
344 h->methods->update(h->state, msg, len);
345 return 1;
349 sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
351 uint64_t digest64;
352 uint8_t *ret;
354 h->methods->final(h->state, &digest64);
355 if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
356 U64TO8_LE(ret, digest64);
357 *len = sizeof(uint64_t);
358 *digest = ret;
360 return 1;
364 sip_hash_final_integer(sip_hash *h, uint64_t *digest)
366 h->methods->final(h->state, digest);
367 return 1;
371 sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
373 if (!sip_hash_update(h, data, data_len)) return 0;
374 return sip_hash_final(h, digest, digest_len);
378 sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
380 if (!sip_hash_update(h, data, data_len)) return 0;
381 return sip_hash_final_integer(h, digest);
384 void
385 sip_hash_free(sip_hash *h)
387 free(h);
390 void
391 sip_hash_dump(sip_hash *h)
393 int_sip_dump(h->state);
395 #endif /* SIP_HASH_STREAMING */
397 #define SIP_ROUND(m, v0, v1, v2, v3) \
398 do { \
399 XOR64_TO((v3), (m)); \
400 SIP_COMPRESS(v0, v1, v2, v3); \
401 XOR64_TO((v0), (m)); \
402 } while (0)
404 uint64_t
405 sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len)
407 uint64_t k0, k1;
408 uint64_t v0, v1, v2, v3;
409 uint64_t m, last;
410 const uint8_t *end = data + len - (len % sizeof(uint64_t));
412 k0 = U8TO64_LE(key);
413 k1 = U8TO64_LE(key + sizeof(uint64_t));
415 v0 = k0; XOR64_TO(v0, sip_init_state[0]);
416 v1 = k1; XOR64_TO(v1, sip_init_state[1]);
417 v2 = k0; XOR64_TO(v2, sip_init_state[2]);
418 v3 = k1; XOR64_TO(v3, sip_init_state[3]);
420 #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
422 uint64_t *data64 = (uint64_t *)data;
423 while (data64 != (uint64_t *) end) {
424 m = *data64++;
425 SIP_ROUND(m, v0, v1, v2, v3);
428 #else
429 for (; data != end; data += sizeof(uint64_t)) {
430 m = U8TO64_LE(data);
431 SIP_ROUND(m, v0, v1, v2, v3);
433 #endif
435 #ifdef HAVE_UINT64_T
436 last = (uint64_t)len << 56;
437 #define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
438 #else
439 last.hi = len << 24;
440 last.lo = 0;
441 #define OR_BYTE(n) do { \
442 if (n >= 4) \
443 last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
444 else \
445 last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
446 } while (0)
447 #endif
449 switch (len % sizeof(uint64_t)) {
450 case 7:
451 OR_BYTE(6);
452 case 6:
453 OR_BYTE(5);
454 case 5:
455 OR_BYTE(4);
456 case 4:
457 #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
458 #ifdef HAVE_UINT64_T
459 last |= (uint64_t) ((uint32_t *) end)[0];
460 #else
461 last.lo |= ((uint32_t *) end)[0];
462 #endif
463 break;
464 #else
465 OR_BYTE(3);
466 #endif
467 case 3:
468 OR_BYTE(2);
469 case 2:
470 OR_BYTE(1);
471 case 1:
472 OR_BYTE(0);
473 break;
474 case 0:
475 break;
478 SIP_ROUND(last, v0, v1, v2, v3);
480 XOR64_INT(v2, 0xff);
482 SIP_COMPRESS(v0, v1, v2, v3);
483 SIP_COMPRESS(v0, v1, v2, v3);
484 SIP_COMPRESS(v0, v1, v2, v3);
486 XOR64_TO(v0, v1);
487 XOR64_TO(v0, v2);
488 XOR64_TO(v0, v3);
489 return v0;