[ruby/win32ole] Undefine allocator of WIN32OLE_VARIABLE to get rid of warning
[ruby-80x24.org.git] / siphash.c
blob091376747f7fa3a8ba3dfa4115eca4d29be9b9b2
1 #include <string.h>
2 #include <stdio.h>
3 #include "siphash.h"
4 #ifndef SIP_HASH_STREAMING
5 #define SIP_HASH_STREAMING 1
6 #endif
8 #if defined(__MINGW32__)
9 #include <sys/param.h>
11 /* MinGW only defines LITTLE_ENDIAN and BIG_ENDIAN macros */
12 #define __LITTLE_ENDIAN LITTLE_ENDIAN
13 #define __BIG_ENDIAN BIG_ENDIAN
14 #elif defined(_WIN32)
15 #define BYTE_ORDER __LITTLE_ENDIAN
16 #elif !defined(BYTE_ORDER)
17 #include <endian.h>
18 #endif
20 #ifndef LITTLE_ENDIAN
21 #define LITTLE_ENDIAN __LITTLE_ENDIAN
22 #endif
23 #ifndef BIG_ENDIAN
24 #define BIG_ENDIAN __BIG_ENDIAN
25 #endif
27 #if BYTE_ORDER == LITTLE_ENDIAN
28 #define lo u32[0]
29 #define hi u32[1]
30 #elif BYTE_ORDER == BIG_ENDIAN
31 #define hi u32[0]
32 #define lo u32[1]
33 #else
34 #error "Only strictly little or big endian supported"
35 #endif
37 #ifndef UNALIGNED_WORD_ACCESS
38 # if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
39 defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
40 defined(__powerpc64__) || defined(__aarch64__) || \
41 defined(__mc68020__)
42 # define UNALIGNED_WORD_ACCESS 1
43 # endif
44 #endif
45 #ifndef UNALIGNED_WORD_ACCESS
46 # define UNALIGNED_WORD_ACCESS 0
47 #endif
49 #define U8TO32_LE(p) \
50 (((uint32_t)((p)[0]) ) | ((uint32_t)((p)[1]) << 8) | \
51 ((uint32_t)((p)[2]) << 16) | ((uint32_t)((p)[3]) << 24)) \
53 #define U32TO8_LE(p, v) \
54 do { \
55 (p)[0] = (uint8_t)((v) ); \
56 (p)[1] = (uint8_t)((v) >> 8); \
57 (p)[2] = (uint8_t)((v) >> 16); \
58 (p)[3] = (uint8_t)((v) >> 24); \
59 } while (0)
61 #ifdef HAVE_UINT64_T
62 #define U8TO64_LE(p) \
63 ((uint64_t)U8TO32_LE(p) | ((uint64_t)U8TO32_LE((p) + 4)) << 32 )
65 #define U64TO8_LE(p, v) \
66 do { \
67 U32TO8_LE((p), (uint32_t)((v) )); \
68 U32TO8_LE((p) + 4, (uint32_t)((v) >> 32)); \
69 } while (0)
71 #define ROTL64(v, s) \
72 ((v) << (s)) | ((v) >> (64 - (s)))
74 #define ROTL64_TO(v, s) ((v) = ROTL64((v), (s)))
76 #define ADD64_TO(v, s) ((v) += (s))
77 #define XOR64_TO(v, s) ((v) ^= (s))
78 #define XOR64_INT(v, x) ((v) ^= (x))
79 #else
80 #define U8TO64_LE(p) u8to64_le(p)
81 static inline uint64_t
82 u8to64_le(const uint8_t *p)
84 uint64_t ret;
85 ret.lo = U8TO32_LE(p);
86 ret.hi = U8TO32_LE(p + 4);
87 return ret;
90 #define U64TO8_LE(p, v) u64to8_le(p, v)
91 static inline void
92 u64to8_le(uint8_t *p, uint64_t v)
94 U32TO8_LE(p, v.lo);
95 U32TO8_LE(p + 4, v.hi);
98 #define ROTL64_TO(v, s) ((s) > 32 ? rotl64_swap(rotl64_to(&(v), (s) - 32)) : \
99 (s) == 32 ? rotl64_swap(&(v)) : rotl64_to(&(v), (s)))
100 static inline uint64_t *
101 rotl64_to(uint64_t *v, unsigned int s)
103 uint32_t uhi = (v->hi << s) | (v->lo >> (32 - s));
104 uint32_t ulo = (v->lo << s) | (v->hi >> (32 - s));
105 v->hi = uhi;
106 v->lo = ulo;
107 return v;
110 static inline uint64_t *
111 rotl64_swap(uint64_t *v)
113 uint32_t t = v->lo;
114 v->lo = v->hi;
115 v->hi = t;
116 return v;
119 #define ADD64_TO(v, s) add64_to(&(v), (s))
120 static inline uint64_t *
121 add64_to(uint64_t *v, const uint64_t s)
123 v->lo += s.lo;
124 v->hi += s.hi;
125 if (v->lo < s.lo) v->hi++;
126 return v;
129 #define XOR64_TO(v, s) xor64_to(&(v), (s))
130 static inline uint64_t *
131 xor64_to(uint64_t *v, const uint64_t s)
133 v->lo ^= s.lo;
134 v->hi ^= s.hi;
135 return v;
138 #define XOR64_INT(v, x) ((v).lo ^= (x))
139 #endif
141 static const union {
142 char bin[32];
143 uint64_t u64[4];
144 } sip_init_state_bin = {"uespemos""modnarod""arenegyl""setybdet"};
145 #define sip_init_state sip_init_state_bin.u64
147 #if SIP_HASH_STREAMING
148 struct sip_interface_st {
149 void (*init)(sip_state *s, const uint8_t *key);
150 void (*update)(sip_state *s, const uint8_t *data, size_t len);
151 void (*final)(sip_state *s, uint64_t *digest);
154 static void int_sip_init(sip_state *state, const uint8_t *key);
155 static void int_sip_update(sip_state *state, const uint8_t *data, size_t len);
156 static void int_sip_final(sip_state *state, uint64_t *digest);
158 static const sip_interface sip_methods = {
159 int_sip_init,
160 int_sip_update,
161 int_sip_final
163 #endif /* SIP_HASH_STREAMING */
165 #define SIP_COMPRESS(v0, v1, v2, v3) \
166 do { \
167 ADD64_TO((v0), (v1)); \
168 ADD64_TO((v2), (v3)); \
169 ROTL64_TO((v1), 13); \
170 ROTL64_TO((v3), 16); \
171 XOR64_TO((v1), (v0)); \
172 XOR64_TO((v3), (v2)); \
173 ROTL64_TO((v0), 32); \
174 ADD64_TO((v2), (v1)); \
175 ADD64_TO((v0), (v3)); \
176 ROTL64_TO((v1), 17); \
177 ROTL64_TO((v3), 21); \
178 XOR64_TO((v1), (v2)); \
179 XOR64_TO((v3), (v0)); \
180 ROTL64_TO((v2), 32); \
181 } while(0)
183 #if SIP_HASH_STREAMING
184 static void
185 int_sip_dump(sip_state *state)
187 int v;
189 for (v = 0; v < 4; v++) {
190 #ifdef HAVE_UINT64_T
191 printf("v%d: %" PRIx64 "\n", v, state->v[v]);
192 #else
193 printf("v%d: %" PRIx32 "%.8" PRIx32 "\n", v, state->v[v].hi, state->v[v].lo);
194 #endif
198 static void
199 int_sip_init(sip_state *state, const uint8_t key[16])
201 uint64_t k0, k1;
203 k0 = U8TO64_LE(key);
204 k1 = U8TO64_LE(key + sizeof(uint64_t));
206 state->v[0] = k0; XOR64_TO(state->v[0], sip_init_state[0]);
207 state->v[1] = k1; XOR64_TO(state->v[1], sip_init_state[1]);
208 state->v[2] = k0; XOR64_TO(state->v[2], sip_init_state[2]);
209 state->v[3] = k1; XOR64_TO(state->v[3], sip_init_state[3]);
212 static inline void
213 int_sip_round(sip_state *state, int n)
215 int i;
217 for (i = 0; i < n; i++) {
218 SIP_COMPRESS(state->v[0], state->v[1], state->v[2], state->v[3]);
222 static inline void
223 int_sip_update_block(sip_state *state, uint64_t m)
225 XOR64_TO(state->v[3], m);
226 int_sip_round(state, state->c);
227 XOR64_TO(state->v[0], m);
230 static inline void
231 int_sip_pre_update(sip_state *state, const uint8_t **pdata, size_t *plen)
233 int to_read;
234 uint64_t m;
236 if (!state->buflen) return;
238 to_read = sizeof(uint64_t) - state->buflen;
239 memcpy(state->buf + state->buflen, *pdata, to_read);
240 m = U8TO64_LE(state->buf);
241 int_sip_update_block(state, m);
242 *pdata += to_read;
243 *plen -= to_read;
244 state->buflen = 0;
247 static inline void
248 int_sip_post_update(sip_state *state, const uint8_t *data, size_t len)
250 uint8_t r = len % sizeof(uint64_t);
251 if (r) {
252 memcpy(state->buf, data + len - r, r);
253 state->buflen = r;
257 static void
258 int_sip_update(sip_state *state, const uint8_t *data, size_t len)
260 uint64_t *end;
261 uint64_t *data64;
263 state->msglen_byte = state->msglen_byte + (len % 256);
264 data64 = (uint64_t *) data;
266 int_sip_pre_update(state, &data, &len);
268 end = data64 + (len / sizeof(uint64_t));
270 #if BYTE_ORDER == LITTLE_ENDIAN
271 while (data64 != end) {
272 int_sip_update_block(state, *data64++);
274 #elif BYTE_ORDER == BIG_ENDIAN
276 uint64_t m;
277 uint8_t *data8 = data;
278 for (; data8 != (uint8_t *) end; data8 += sizeof(uint64_t)) {
279 m = U8TO64_LE(data8);
280 int_sip_update_block(state, m);
283 #endif
285 int_sip_post_update(state, data, len);
288 static inline void
289 int_sip_pad_final_block(sip_state *state)
291 int i;
292 /* pad with 0's and finalize with msg_len mod 256 */
293 for (i = state->buflen; i < sizeof(uint64_t); i++) {
294 state->buf[i] = 0x00;
296 state->buf[sizeof(uint64_t) - 1] = state->msglen_byte;
299 static void
300 int_sip_final(sip_state *state, uint64_t *digest)
302 uint64_t m;
304 int_sip_pad_final_block(state);
306 m = U8TO64_LE(state->buf);
307 int_sip_update_block(state, m);
309 XOR64_INT(state->v[2], 0xff);
311 int_sip_round(state, state->d);
313 *digest = state->v[0];
314 XOR64_TO(*digest, state->v[1]);
315 XOR64_TO(*digest, state->v[2]);
316 XOR64_TO(*digest, state->v[3]);
319 sip_hash *
320 sip_hash_new(const uint8_t key[16], int c, int d)
322 sip_hash *h = NULL;
324 if (!(h = (sip_hash *) malloc(sizeof(sip_hash)))) return NULL;
325 return sip_hash_init(h, key, c, d);
328 sip_hash *
329 sip_hash_init(sip_hash *h, const uint8_t key[16], int c, int d)
331 h->state->c = c;
332 h->state->d = d;
333 h->state->buflen = 0;
334 h->state->msglen_byte = 0;
335 h->methods = &sip_methods;
336 h->methods->init(h->state, key);
337 return h;
341 sip_hash_update(sip_hash *h, const uint8_t *msg, size_t len)
343 h->methods->update(h->state, msg, len);
344 return 1;
348 sip_hash_final(sip_hash *h, uint8_t **digest, size_t* len)
350 uint64_t digest64;
351 uint8_t *ret;
353 h->methods->final(h->state, &digest64);
354 if (!(ret = (uint8_t *)malloc(sizeof(uint64_t)))) return 0;
355 U64TO8_LE(ret, digest64);
356 *len = sizeof(uint64_t);
357 *digest = ret;
359 return 1;
363 sip_hash_final_integer(sip_hash *h, uint64_t *digest)
365 h->methods->final(h->state, digest);
366 return 1;
370 sip_hash_digest(sip_hash *h, const uint8_t *data, size_t data_len, uint8_t **digest, size_t *digest_len)
372 if (!sip_hash_update(h, data, data_len)) return 0;
373 return sip_hash_final(h, digest, digest_len);
377 sip_hash_digest_integer(sip_hash *h, const uint8_t *data, size_t data_len, uint64_t *digest)
379 if (!sip_hash_update(h, data, data_len)) return 0;
380 return sip_hash_final_integer(h, digest);
383 void
384 sip_hash_free(sip_hash *h)
386 free(h);
389 void
390 sip_hash_dump(sip_hash *h)
392 int_sip_dump(h->state);
394 #endif /* SIP_HASH_STREAMING */
396 #define SIP_ROUND(m, v0, v1, v2, v3) \
397 do { \
398 XOR64_TO((v3), (m)); \
399 SIP_COMPRESS(v0, v1, v2, v3); \
400 XOR64_TO((v0), (m)); \
401 } while (0)
403 uint64_t
404 sip_hash13(const uint8_t key[16], const uint8_t *data, size_t len)
406 uint64_t k0, k1;
407 uint64_t v0, v1, v2, v3;
408 uint64_t m, last;
409 const uint8_t *end = data + len - (len % sizeof(uint64_t));
411 k0 = U8TO64_LE(key);
412 k1 = U8TO64_LE(key + sizeof(uint64_t));
414 v0 = k0; XOR64_TO(v0, sip_init_state[0]);
415 v1 = k1; XOR64_TO(v1, sip_init_state[1]);
416 v2 = k0; XOR64_TO(v2, sip_init_state[2]);
417 v3 = k1; XOR64_TO(v3, sip_init_state[3]);
419 #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
421 uint64_t *data64 = (uint64_t *)data;
422 while (data64 != (uint64_t *) end) {
423 m = *data64++;
424 SIP_ROUND(m, v0, v1, v2, v3);
427 #else
428 for (; data != end; data += sizeof(uint64_t)) {
429 m = U8TO64_LE(data);
430 SIP_ROUND(m, v0, v1, v2, v3);
432 #endif
434 #ifdef HAVE_UINT64_T
435 last = (uint64_t)len << 56;
436 #define OR_BYTE(n) (last |= ((uint64_t) end[n]) << ((n) * 8))
437 #else
438 last.hi = len << 24;
439 last.lo = 0;
440 #define OR_BYTE(n) do { \
441 if (n >= 4) \
442 last.hi |= ((uint32_t) end[n]) << ((n) >= 4 ? (n) * 8 - 32 : 0); \
443 else \
444 last.lo |= ((uint32_t) end[n]) << ((n) >= 4 ? 0 : (n) * 8); \
445 } while (0)
446 #endif
448 switch (len % sizeof(uint64_t)) {
449 case 7:
450 OR_BYTE(6);
451 case 6:
452 OR_BYTE(5);
453 case 5:
454 OR_BYTE(4);
455 case 4:
456 #if BYTE_ORDER == LITTLE_ENDIAN && UNALIGNED_WORD_ACCESS
457 #ifdef HAVE_UINT64_T
458 last |= (uint64_t) ((uint32_t *) end)[0];
459 #else
460 last.lo |= ((uint32_t *) end)[0];
461 #endif
462 break;
463 #else
464 OR_BYTE(3);
465 #endif
466 case 3:
467 OR_BYTE(2);
468 case 2:
469 OR_BYTE(1);
470 case 1:
471 OR_BYTE(0);
472 break;
473 case 0:
474 break;
477 SIP_ROUND(last, v0, v1, v2, v3);
479 XOR64_INT(v2, 0xff);
481 SIP_COMPRESS(v0, v1, v2, v3);
482 SIP_COMPRESS(v0, v1, v2, v3);
483 SIP_COMPRESS(v0, v1, v2, v3);
485 XOR64_TO(v0, v1);
486 XOR64_TO(v0, v2);
487 XOR64_TO(v0, v3);
488 return v0;