1 /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
3 Copyright (c) 2003-2010, Troy D. Hanson http://uthash.sourceforge.net
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
13 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
14 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
15 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
16 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
17 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
18 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
19 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
20 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
21 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
22 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #ifndef INCLUDED_SAL_ANDROID_UTHASH_H
26 #define INCLUDED_SAL_ANDROID_UTHASH_H
28 #include <string.h> /* memcmp,strlen */
29 #include <stddef.h> /* ptrdiff_t */
31 /* These macros use decltype or the earlier __typeof GNU extension.
32 As decltype is only available in newer compilers (VS2010 or gcc 4.3+
33 when compiling c++ source) this code uses whatever method is needed
34 or, for VS2008 where neither is available, uses casting workarounds. */
35 #ifdef _MSC_VER /* MS compiler */
36 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
37 #define DECLTYPE(x) (decltype(x))
38 #else /* VS2008 or older (or VS2010 in C mode) */
42 #else /* GNU, Sun and other compilers */
43 #define DECLTYPE(x) (__typeof(x))
47 #define DECLTYPE_ASSIGN(dst,src) \
49 char **_da_dst = (char**)(&(dst)); \
50 *_da_dst = (char*)(src); \
53 #define DECLTYPE_ASSIGN(dst,src) \
55 (dst) = DECLTYPE(dst)(src); \
59 /* a number of the hash function use uint32_t which isn't defined on win32 */
61 typedef unsigned int uint32_t;
63 #include <inttypes.h> /* uint32_t */
67 #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */
68 #define uthash_malloc(sz) malloc(sz) /* malloc fcn */
69 #define uthash_free(ptr,sz) free(ptr) /* free fcn */
71 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
72 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
74 /* initial number of buckets */
75 #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */
76 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */
77 #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */
79 /* calculate the element whose hash handle address is the */
80 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
82 #define HASH_FIND(hh,head,keyptr,keylen,out) \
86 unsigned _hf_bkt,_hf_hashv; \
87 HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \
88 if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \
89 HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \
96 #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM)
97 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0)
98 #define HASH_BLOOM_MAKE(tbl) \
100 (tbl)->bloom_nbits = HASH_BLOOM; \
101 (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
102 if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
103 memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
104 (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
107 #define HASH_BLOOM_FREE(tbl) \
109 uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
112 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8)))
113 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8)))
115 #define HASH_BLOOM_ADD(tbl,hashv) \
116 HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
118 #define HASH_BLOOM_TEST(tbl,hashv) \
119 HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1)))
122 #define HASH_BLOOM_MAKE(tbl)
123 #define HASH_BLOOM_FREE(tbl)
124 #define HASH_BLOOM_ADD(tbl,hashv)
125 #define HASH_BLOOM_TEST(tbl,hashv) (1)
128 #define HASH_MAKE_TABLE(hh,head) \
130 (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
131 sizeof(UT_hash_table)); \
132 if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
133 memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
134 (head)->hh.tbl->tail = &((head)->hh); \
135 (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
136 (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
137 (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
138 (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
139 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
140 if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
141 memset((head)->hh.tbl->buckets, 0, \
142 HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
143 HASH_BLOOM_MAKE((head)->hh.tbl); \
144 (head)->hh.tbl->signature = HASH_SIGNATURE; \
147 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
148 HASH_ADD_KEYPTR(hh,head,&add->fieldname,keylen_in,add)
150 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
153 (add)->hh.next = NULL; \
154 (add)->hh.key = (char*)keyptr; \
155 (add)->hh.keylen = keylen_in; \
158 (head)->hh.prev = NULL; \
159 HASH_MAKE_TABLE(hh,head); \
161 (head)->hh.tbl->tail->next = (add); \
162 (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
163 (head)->hh.tbl->tail = &((add)->hh); \
165 (head)->hh.tbl->num_items++; \
166 (add)->hh.tbl = (head)->hh.tbl; \
167 HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \
168 (add)->hh.hashv, _ha_bkt); \
169 HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \
170 HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \
171 HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \
172 HASH_FSCK(hh,head); \
175 #define HASH_TO_BKT( hashv, num_bkts, bkt ) \
177 bkt = ((hashv) & ((num_bkts) - 1)); \
180 /* delete "delptr" from the hash table.
181 * "the usual" patch-up process for the app-order doubly-linked-list.
182 * The use of _hd_hh_del below deserves special explanation.
183 * These used to be expressed using (delptr) but that led to a bug
184 * if someone used the same symbol for the head and deletee, like
185 * HASH_DELETE(hh,users,users);
186 * We want that to work, but by changing the head (users) below
187 * we were forfeiting our ability to further refer to the deletee (users)
188 * in the patch-up process. Solution: use scratch space to
189 * copy the deletee pointer, then the latter references are via that
190 * scratch pointer rather than through the repointed (users) symbol.
192 #define HASH_DELETE(hh,head,delptr) \
195 struct UT_hash_handle *_hd_hh_del; \
196 if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
197 uthash_free((head)->hh.tbl->buckets, \
198 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
199 HASH_BLOOM_FREE((head)->hh.tbl); \
200 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
203 _hd_hh_del = &((delptr)->hh); \
204 if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
205 (head)->hh.tbl->tail = \
206 (UT_hash_handle*)((char*)((delptr)->hh.prev) + \
207 (head)->hh.tbl->hho); \
209 if ((delptr)->hh.prev) { \
210 ((UT_hash_handle*)((char*)((delptr)->hh.prev) + \
211 (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
213 DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
215 if (_hd_hh_del->next) { \
216 ((UT_hash_handle*)((char*)_hd_hh_del->next + \
217 (head)->hh.tbl->hho))->prev = \
220 HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
221 HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
222 (head)->hh.tbl->num_items--; \
224 HASH_FSCK(hh,head); \
227 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
228 #define HASH_FIND_STR(head,findstr,out) \
229 HASH_FIND(hh,head,findstr,strlen(findstr),out)
230 #define HASH_ADD_STR(head,strfield,add) \
231 HASH_ADD(hh,head,strfield,strlen(add->strfield),add)
232 #define HASH_FIND_INT(head,findint,out) \
233 HASH_FIND(hh,head,findint,sizeof(int),out)
234 #define HASH_ADD_INT(head,intfield,add) \
235 HASH_ADD(hh,head,intfield,sizeof(int),add)
236 #define HASH_FIND_PTR(head,findptr,out) \
237 HASH_FIND(hh,head,findptr,sizeof(void *),out)
238 #define HASH_ADD_PTR(head,ptrfield,add) \
239 HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
240 #define HASH_DEL(head,delptr) \
241 HASH_DELETE(hh,head,delptr)
243 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
244 * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
247 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
248 #define HASH_FSCK(hh,head) \
252 unsigned _count, _bkt_count; \
254 struct UT_hash_handle *_thh; \
256 for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
258 _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
261 if (_prev != (char*)(_thh->hh_prev)) { \
262 HASH_OOPS("invalid hh_prev %p, actual %p\n", \
263 _thh->hh_prev, _prev ); \
266 _prev = (char*)(_thh); \
267 _thh = _thh->hh_next; \
269 _count += _bkt_count; \
270 if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
271 HASH_OOPS("invalid bucket count %d, actual %u\n", \
272 (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
275 if (_count != (head)->hh.tbl->num_items) { \
276 HASH_OOPS("invalid hh item count %d, actual %u\n", \
277 (head)->hh.tbl->num_items, _count ); \
279 /* traverse hh in app order; check next/prev integrity, count */ \
282 _thh = &(head)->hh; \
285 if (_prev !=(char*)(_thh->prev)) { \
286 HASH_OOPS("invalid prev %p, actual %p\n", \
287 _thh->prev, _prev ); \
289 _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
290 _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
291 (head)->hh.tbl->hho) : NULL ); \
293 if (_count != (head)->hh.tbl->num_items) { \
294 HASH_OOPS("invalid app item count %d, actual %u\n", \
295 (head)->hh.tbl->num_items, _count ); \
300 #define HASH_FSCK(hh,head)
303 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
304 * the descriptor to which this macro is defined for tuning the hash function.
305 * The app can #include <unistd.h> to get the prototype for write(2). */
306 #ifdef HASH_EMIT_KEYS
307 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
309 unsigned _klen = fieldlen; \
310 write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
311 write(HASH_EMIT_KEYS, keyptr, fieldlen); \
314 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
317 /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */
319 #define HASH_FCN HASH_FUNCTION
321 #define HASH_FCN HASH_JEN
324 /* The Bernstein hash function, used in Perl prior to v5.6 */
325 #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \
327 unsigned _hb_keylen=keylen; \
328 char *_hb_key=(char*)(key); \
330 while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \
331 bkt = (hashv) & (num_bkts-1); \
334 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
335 * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
336 #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \
339 char *_hs_key=(char*)(key); \
341 for(_sx_i=0; _sx_i < keylen; _sx_i++) \
342 hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
343 bkt = hashv & (num_bkts-1); \
346 #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \
349 char *_hf_key=(char*)(key); \
350 hashv = 2166136261UL; \
351 for(_fn_i=0; _fn_i < keylen; _fn_i++) \
352 hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \
353 bkt = hashv & (num_bkts-1); \
356 #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \
359 char *_ho_key=(char*)(key); \
361 for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
362 hashv += _ho_key[_ho_i]; \
363 hashv += (hashv << 10); \
364 hashv ^= (hashv >> 6); \
366 hashv += (hashv << 3); \
367 hashv ^= (hashv >> 11); \
368 hashv += (hashv << 15); \
369 bkt = hashv & (num_bkts-1); \
372 #define HASH_JEN_MIX(a,b,c) \
374 a -= b; a -= c; a ^= ( c >> 13 ); \
375 b -= c; b -= a; b ^= ( a << 8 ); \
376 c -= a; c -= b; c ^= ( b >> 13 ); \
377 a -= b; a -= c; a ^= ( c >> 12 ); \
378 b -= c; b -= a; b ^= ( a << 16 ); \
379 c -= a; c -= b; c ^= ( b >> 5 ); \
380 a -= b; a -= c; a ^= ( c >> 3 ); \
381 b -= c; b -= a; b ^= ( a << 10 ); \
382 c -= a; c -= b; c ^= ( b >> 15 ); \
385 #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \
387 unsigned _hj_i,_hj_j,_hj_k; \
388 char *_hj_key=(char*)(key); \
389 hashv = 0xfeedbeef; \
390 _hj_i = _hj_j = 0x9e3779b9; \
392 while (_hj_k >= 12) { \
393 _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
394 + ( (unsigned)_hj_key[2] << 16 ) \
395 + ( (unsigned)_hj_key[3] << 24 ) ); \
396 _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
397 + ( (unsigned)_hj_key[6] << 16 ) \
398 + ( (unsigned)_hj_key[7] << 24 ) ); \
399 hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
400 + ( (unsigned)_hj_key[10] << 16 ) \
401 + ( (unsigned)_hj_key[11] << 24 ) ); \
403 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
410 case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \
411 case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \
412 case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \
413 case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \
414 case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \
415 case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \
416 case 5: _hj_j += _hj_key[4]; \
417 case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \
418 case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \
419 case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \
420 case 1: _hj_i += _hj_key[0]; \
422 HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
423 bkt = hashv & (num_bkts-1); \
426 /* The Paul Hsieh hash function */
428 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
429 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
430 #define get16bits(d) (*((const uint16_t *) (d)))
433 #if !defined (get16bits)
434 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
435 +(uint32_t)(((const uint8_t *)(d))[0]) )
437 #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \
439 char *_sfh_key=(char*)(key); \
440 uint32_t _sfh_tmp, _sfh_len = keylen; \
442 int _sfh_rem = _sfh_len & 3; \
444 hashv = 0xcafebabe; \
447 for (;_sfh_len > 0; _sfh_len--) { \
448 hashv += get16bits (_sfh_key); \
449 _sfh_tmp = (get16bits (_sfh_key+2) << 11) ^ hashv; \
450 hashv = (hashv << 16) ^ _sfh_tmp; \
451 _sfh_key += 2*sizeof (uint16_t); \
452 hashv += hashv >> 11; \
455 /* Handle end cases */ \
456 switch (_sfh_rem) { \
457 case 3: hashv += get16bits (_sfh_key); \
458 hashv ^= hashv << 16; \
459 hashv ^= _sfh_key[sizeof (uint16_t)] << 18; \
460 hashv += hashv >> 11; \
462 case 2: hashv += get16bits (_sfh_key); \
463 hashv ^= hashv << 11; \
464 hashv += hashv >> 17; \
466 case 1: hashv += *_sfh_key; \
467 hashv ^= hashv << 10; \
468 hashv += hashv >> 1; \
471 /* Force "avalanching" of final 127 bits */ \
472 hashv ^= hashv << 3; \
473 hashv += hashv >> 5; \
474 hashv ^= hashv << 4; \
475 hashv += hashv >> 17; \
476 hashv ^= hashv << 25; \
477 hashv += hashv >> 6; \
478 bkt = hashv & (num_bkts-1); \
481 #ifdef HASH_USING_NO_STRICT_ALIASING
482 /* The MurmurHash exploits some CPU's (e.g. x86) tolerance for unaligned reads.
483 * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
484 * So MurmurHash comes in two versions, the faster unaligned one and the slower
485 * aligned one. We only use the faster one on CPU's where we know it's safe.
487 * Note the preprocessor built-in defines can be emitted using:
489 * gcc -m64 -dM -E - < /dev/null (on gcc)
490 * cc -## a.c (where a.c is a simple test file) (Sun Studio)
492 #if (defined(__i386__) || defined(__x86_64__))
493 #define HASH_MUR HASH_MUR_UNALIGNED
495 #define HASH_MUR HASH_MUR_ALIGNED
498 /* Appleby's MurmurHash fast version for unaligned-tolerant archs like i386 */
499 #define HASH_MUR_UNALIGNED(key,keylen,num_bkts,hashv,bkt) \
501 const unsigned int _mur_m = 0x5bd1e995; \
502 const int _mur_r = 24; \
503 hashv = 0xcafebabe ^ keylen; \
504 char *_mur_key = (char *)(key); \
505 uint32_t _mur_tmp, _mur_len = keylen; \
507 for (;_mur_len >= 4; _mur_len-=4) { \
508 _mur_tmp = *(uint32_t *)_mur_key; \
509 _mur_tmp *= _mur_m; \
510 _mur_tmp ^= _mur_tmp >> _mur_r; \
511 _mur_tmp *= _mur_m; \
519 case 3: hashv ^= _mur_key[2] << 16; \
520 case 2: hashv ^= _mur_key[1] << 8; \
521 case 1: hashv ^= _mur_key[0]; \
525 hashv ^= hashv >> 13; \
527 hashv ^= hashv >> 15; \
529 bkt = hashv & (num_bkts-1); \
532 /* Appleby's MurmurHash version for alignment-sensitive archs like Sparc */
533 #define HASH_MUR_ALIGNED(key,keylen,num_bkts,hashv,bkt) \
535 const unsigned int _mur_m = 0x5bd1e995; \
536 const int _mur_r = 24; \
537 hashv = 0xcafebabe ^ (keylen); \
538 char *_mur_key = (char *)(key); \
539 uint32_t _mur_len = keylen; \
540 int _mur_align = (int)_mur_key & 3; \
542 if (_mur_align && (_mur_len >= 4)) { \
543 unsigned _mur_t = 0, _mur_d = 0; \
544 switch(_mur_align) { \
545 case 1: _mur_t |= _mur_key[2] << 16; \
546 case 2: _mur_t |= _mur_key[1] << 8; \
547 case 3: _mur_t |= _mur_key[0]; \
549 _mur_t <<= (8 * _mur_align); \
550 _mur_key += 4-_mur_align; \
551 _mur_len -= 4-_mur_align; \
552 int _mur_sl = 8 * (4-_mur_align); \
553 int _mur_sr = 8 * _mur_align; \
555 for (;_mur_len >= 4; _mur_len-=4) { \
556 _mur_d = *(unsigned *)_mur_key; \
557 _mur_t = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
558 unsigned _mur_k = _mur_t; \
560 _mur_k ^= _mur_k >> _mur_r; \
568 if(_mur_len >= _mur_align) { \
569 switch(_mur_align) { \
570 case 3: _mur_d |= _mur_key[2] << 16; \
571 case 2: _mur_d |= _mur_key[1] << 8; \
572 case 1: _mur_d |= _mur_key[0]; \
574 unsigned _mur_k = (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
576 _mur_k ^= _mur_k >> _mur_r; \
580 _mur_k += _mur_align; \
581 _mur_len -= _mur_align; \
585 case 3: hashv ^= _mur_key[2] << 16; \
586 case 2: hashv ^= _mur_key[1] << 8; \
587 case 1: hashv ^= _mur_key[0]; \
593 case 3: _mur_d ^= _mur_key[2] << 16; \
594 case 2: _mur_d ^= _mur_key[1] << 8; \
595 case 1: _mur_d ^= _mur_key[0]; \
596 case 0: hashv ^= (_mur_t >> _mur_sr) | (_mur_d << _mur_sl); \
601 hashv ^= hashv >> 13; \
603 hashv ^= hashv >> 15; \
605 for (;_mur_len >= 4; _mur_len-=4) { \
606 unsigned _mur_k = *(unsigned*)_mur_key; \
608 _mur_k ^= _mur_k >> _mur_r; \
616 case 3: hashv ^= _mur_key[2] << 16; \
617 case 2: hashv ^= _mur_key[1] << 8; \
618 case 1: hashv ^= _mur_key[0]; \
622 hashv ^= hashv >> 13; \
624 hashv ^= hashv >> 15; \
626 bkt = hashv & (num_bkts-1); \
628 #endif /* HASH_USING_NO_STRICT_ALIASING */
630 /* key comparison function; return 0 if keys equal */
631 #define HASH_KEYCMP(a,b,len) memcmp(a,b,len)
633 /* iterate over items in a known bucket to find desired item */
634 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \
636 if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \
639 if (out->hh.keylen == keylen_in) { \
640 if ((HASH_KEYCMP(out->hh.key,keyptr,keylen_in)) == 0) break; \
642 if (out->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,out->hh.hh_next)); \
647 /* add an item to a bucket */
648 #define HASH_ADD_TO_BKT(head,addhh) \
651 (addhh)->hh_next = head.hh_head; \
652 (addhh)->hh_prev = NULL; \
653 if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \
654 (head).hh_head=addhh; \
655 if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \
656 && (addhh)->tbl->noexpand != 1) { \
657 HASH_EXPAND_BUCKETS((addhh)->tbl); \
661 /* remove an item from a given bucket */
662 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
664 if ((head).hh_head == hh_del) { \
665 (head).hh_head = hh_del->hh_next; \
667 if (hh_del->hh_prev) { \
668 hh_del->hh_prev->hh_next = hh_del->hh_next; \
670 if (hh_del->hh_next) { \
671 hh_del->hh_next->hh_prev = hh_del->hh_prev; \
674 /* Bucket expansion has the effect of doubling the number of buckets
675 * and redistributing the items into the new buckets. Ideally the
676 * items will distribute more or less evenly into the new buckets
677 * (the extent to which this is true is a measure of the quality of
678 * the hash function as it applies to the key domain).
680 * With the items distributed into more buckets, the chain length
681 * (item count) in each bucket is reduced. Thus by expanding buckets
682 * the hash keeps a bound on the chain length. This bounded chain
683 * length is the essence of how a hash provides constant time lookup.
685 * The calculation of tbl->ideal_chain_maxlen below deserves some
686 * explanation. First, keep in mind that we're calculating the ideal
687 * maximum chain length based on the *new* (doubled) bucket count.
688 * In fractions this is just n/b (n=number of items,b=new num buckets).
689 * Since the ideal chain length is an integer, we want to calculate
690 * ceil(n/b). We don't depend on floating point arithmetic in this
691 * hash, so to calculate ceil(n/b) with integers we could write
693 * ceil(n/b) = (n/b) + ((n%b)?1:0)
695 * and in fact a previous version of this hash did just that.
696 * But now we have improved things a bit by recognizing that b is
697 * always a power of two. We keep its base 2 log handy (call it lb),
698 * so now we can write this with a bit shift and logical AND:
700 * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
703 #define HASH_EXPAND_BUCKETS(tbl) \
706 unsigned _he_bkt_i; \
707 struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
708 UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
709 _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
710 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
711 if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
712 memset(_he_new_buckets, 0, \
713 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
714 tbl->ideal_chain_maxlen = \
715 (tbl->num_items >> (tbl->log2_num_buckets+1)) + \
716 ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \
717 tbl->nonideal_items = 0; \
718 for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
720 _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
722 _he_hh_nxt = _he_thh->hh_next; \
723 HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \
724 _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
725 if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
726 tbl->nonideal_items++; \
727 _he_newbkt->expand_mult = _he_newbkt->count / \
728 tbl->ideal_chain_maxlen; \
730 _he_thh->hh_prev = NULL; \
731 _he_thh->hh_next = _he_newbkt->hh_head; \
732 if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \
734 _he_newbkt->hh_head = _he_thh; \
735 _he_thh = _he_hh_nxt; \
738 uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
739 tbl->num_buckets *= 2; \
740 tbl->log2_num_buckets++; \
741 tbl->buckets = _he_new_buckets; \
742 tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
743 (tbl->ineff_expands+1) : 0; \
744 if (tbl->ineff_expands > 1) { \
746 uthash_noexpand_fyi(tbl); \
748 uthash_expand_fyi(tbl); \
751 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
752 /* Note that HASH_SORT assumes the hash handle name to be hh.
753 * HASH_SRT was added to allow the hash handle name to be passed in. */
754 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
755 #define HASH_SRT(hh,head,cmpfcn) \
758 unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
759 struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
763 _hs_list = &((head)->hh); \
764 while (_hs_looping) { \
773 for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
775 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
776 ((void*)((char*)(_hs_q->next) + \
777 (head)->hh.tbl->hho)) : NULL); \
778 if (! (_hs_q) ) break; \
780 _hs_qsize = _hs_insize; \
781 while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \
782 if (_hs_psize == 0) { \
784 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
785 ((void*)((char*)(_hs_q->next) + \
786 (head)->hh.tbl->hho)) : NULL); \
788 } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \
790 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
791 ((void*)((char*)(_hs_p->next) + \
792 (head)->hh.tbl->hho)) : NULL); \
795 cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
796 DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
799 _hs_p = (UT_hash_handle*)((_hs_p->next) ? \
800 ((void*)((char*)(_hs_p->next) + \
801 (head)->hh.tbl->hho)) : NULL); \
805 _hs_q = (UT_hash_handle*)((_hs_q->next) ? \
806 ((void*)((char*)(_hs_q->next) + \
807 (head)->hh.tbl->hho)) : NULL); \
811 _hs_tail->next = ((_hs_e) ? \
812 ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
816 _hs_e->prev = ((_hs_tail) ? \
817 ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
822 _hs_tail->next = NULL; \
823 if ( _hs_nmerges <= 1 ) { \
825 (head)->hh.tbl->tail = _hs_tail; \
826 DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
830 HASH_FSCK(hh,head); \
834 /* This function selects items from one hash into another hash.
835 * The end result is that the selected items have dual presence
836 * in both hashes. There is no copy of the items made; rather
837 * they are added into the new hash through a secondary hash
838 * hash handle that must be present in the structure. */
839 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
841 unsigned _src_bkt, _dst_bkt; \
842 void *_last_elt=NULL, *_elt; \
843 UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
844 ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
846 for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
847 for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
849 _src_hh = _src_hh->hh_next) { \
850 _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
852 _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
853 _dst_hh->key = _src_hh->key; \
854 _dst_hh->keylen = _src_hh->keylen; \
855 _dst_hh->hashv = _src_hh->hashv; \
856 _dst_hh->prev = _last_elt; \
857 _dst_hh->next = NULL; \
858 if (_last_elt_hh) { _last_elt_hh->next = _elt; } \
860 DECLTYPE_ASSIGN(dst,_elt); \
861 HASH_MAKE_TABLE(hh_dst,dst); \
863 _dst_hh->tbl = (dst)->hh_dst.tbl; \
865 HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
866 HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
867 (dst)->hh_dst.tbl->num_items++; \
869 _last_elt_hh = _dst_hh; \
874 HASH_FSCK(hh_dst,dst); \
877 #define HASH_CLEAR(hh,head) \
880 uthash_free((head)->hh.tbl->buckets, \
881 (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
882 uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
888 #define HASH_ITER(hh,head,el,tmp) \
889 for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \
890 el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL))
892 #define HASH_ITER(hh,head,el,tmp) \
893 for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \
894 el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL))
897 /* obtain a count of items in the hash */
898 #define HASH_COUNT(head) HASH_CNT(hh,head)
899 #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0)
901 typedef struct UT_hash_bucket
{
902 struct UT_hash_handle
*hh_head
;
905 /* expand_mult is normally set to 0. In this situation, the max chain length
906 * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
907 * the bucket's chain exceeds this length, bucket expansion is triggered).
908 * However, setting expand_mult to a non-zero value delays bucket expansion
909 * (that would be triggered by additions to this particular bucket)
910 * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
911 * (The multiplier is simply expand_mult+1). The whole idea of this
912 * multiplier is to reduce bucket expansions, since they are expensive, in
913 * situations where we know that a particular bucket tends to be overused.
914 * It is better to let its chain length grow to a longer yet-still-bounded
915 * value, than to do an O(n) bucket expansion too often.
917 unsigned expand_mult
;
921 /* random signature used only to find hash tables in external analysis */
922 #define HASH_SIGNATURE 0xa0111fe1
923 #define HASH_BLOOM_SIGNATURE 0xb12220f2
925 typedef struct UT_hash_table
{
926 UT_hash_bucket
*buckets
;
927 unsigned num_buckets
, log2_num_buckets
;
929 struct UT_hash_handle
*tail
; /* tail hh in app order, for fast append */
930 ptrdiff_t hho
; /* hash handle offset (byte pos of hash handle in element */
932 /* in an ideal situation (all buckets used equally), no bucket would have
933 * more than ceil(#items/#buckets) items. that's the ideal chain length. */
934 unsigned ideal_chain_maxlen
;
936 /* nonideal_items is the number of items in the hash whose chain position
937 * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
938 * hash distribution; reaching them in a chain traversal takes >ideal steps */
939 unsigned nonideal_items
;
941 /* ineffective expands occur when a bucket doubling was performed, but
942 * afterward, more than half the items in the hash had nonideal chain
943 * positions. If this happens on two consecutive expansions we inhibit any
944 * further expansion, as it's not helping; this happens when the hash
945 * function isn't a good fit for the key domain. When expansion is inhibited
946 * the hash will still work, albeit no longer in constant time. */
947 unsigned ineff_expands
, noexpand
;
949 uint32_t signature
; /* used only to find hash tables in external analysis */
951 uint32_t bloom_sig
; /* used only to test bloom exists in external analysis */
958 typedef struct UT_hash_handle
{
959 struct UT_hash_table
*tbl
;
960 void *prev
; /* prev element in app order */
961 void *next
; /* next element in app order */
962 struct UT_hash_handle
*hh_prev
; /* previous hh in bucket order */
963 struct UT_hash_handle
*hh_next
; /* next hh in bucket order */
964 void *key
; /* ptr to enclosing struct's key */
965 unsigned keylen
; /* enclosing struct's key len */
966 unsigned hashv
; /* result of hash-fcn(key) */
969 #endif // INCLUDED_SAL_ANDROID_UTHASH_H
971 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */