2 * Generic implementation of hash-based key value mappings.
7 #define FNV32_BASE ((unsigned int) 0x811c9dc5)
8 #define FNV32_PRIME ((unsigned int) 0x01000193)
10 unsigned int strhash(const char *str
)
12 unsigned int c
, hash
= FNV32_BASE
;
13 while ((c
= (unsigned char) *str
++))
14 hash
= (hash
* FNV32_PRIME
) ^ c
;
18 unsigned int strihash(const char *str
)
20 unsigned int c
, hash
= FNV32_BASE
;
21 while ((c
= (unsigned char) *str
++)) {
22 if (c
>= 'a' && c
<= 'z')
24 hash
= (hash
* FNV32_PRIME
) ^ c
;
29 unsigned int memhash(const void *buf
, size_t len
)
31 unsigned int hash
= FNV32_BASE
;
32 unsigned char *ucbuf
= (unsigned char *) buf
;
34 unsigned int c
= *ucbuf
++;
35 hash
= (hash
* FNV32_PRIME
) ^ c
;
40 unsigned int memihash(const void *buf
, size_t len
)
42 unsigned int hash
= FNV32_BASE
;
43 unsigned char *ucbuf
= (unsigned char *) buf
;
45 unsigned int c
= *ucbuf
++;
46 if (c
>= 'a' && c
<= 'z')
48 hash
= (hash
* FNV32_PRIME
) ^ c
;
54 * Incorporate another chunk of data into a memihash
57 unsigned int memihash_cont(unsigned int hash_seed
, const void *buf
, size_t len
)
59 unsigned int hash
= hash_seed
;
60 unsigned char *ucbuf
= (unsigned char *) buf
;
62 unsigned int c
= *ucbuf
++;
63 if (c
>= 'a' && c
<= 'z')
65 hash
= (hash
* FNV32_PRIME
) ^ c
;
70 #define HASHMAP_INITIAL_SIZE 64
71 /* grow / shrink by 2^2 */
72 #define HASHMAP_RESIZE_BITS 2
73 /* load factor in percent */
74 #define HASHMAP_LOAD_FACTOR 80
76 static void alloc_table(struct hashmap
*map
, unsigned int size
)
78 map
->tablesize
= size
;
79 CALLOC_ARRAY(map
->table
, size
);
81 /* calculate resize thresholds for new size */
82 map
->grow_at
= (unsigned int) ((uint64_t) size
* HASHMAP_LOAD_FACTOR
/ 100);
83 if (size
<= HASHMAP_INITIAL_SIZE
)
87 * The shrink-threshold must be slightly smaller than
88 * (grow-threshold / resize-factor) to prevent erratic resizing,
89 * thus we divide by (resize-factor + 1).
91 map
->shrink_at
= map
->grow_at
/ ((1 << HASHMAP_RESIZE_BITS
) + 1);
94 static inline int entry_equals(const struct hashmap
*map
,
95 const struct hashmap_entry
*e1
,
96 const struct hashmap_entry
*e2
,
100 (e1
->hash
== e2
->hash
&&
101 !map
->cmpfn(map
->cmpfn_data
, e1
, e2
, keydata
));
104 static inline unsigned int bucket(const struct hashmap
*map
,
105 const struct hashmap_entry
*key
)
107 return key
->hash
& (map
->tablesize
- 1);
110 int hashmap_bucket(const struct hashmap
*map
, unsigned int hash
)
112 return hash
& (map
->tablesize
- 1);
115 static void rehash(struct hashmap
*map
, unsigned int newsize
)
117 /* map->table MUST NOT be NULL when this function is called */
118 unsigned int i
, oldsize
= map
->tablesize
;
119 struct hashmap_entry
**oldtable
= map
->table
;
121 alloc_table(map
, newsize
);
122 for (i
= 0; i
< oldsize
; i
++) {
123 struct hashmap_entry
*e
= oldtable
[i
];
125 struct hashmap_entry
*next
= e
->next
;
126 unsigned int b
= bucket(map
, e
);
127 e
->next
= map
->table
[b
];
135 static inline struct hashmap_entry
**find_entry_ptr(const struct hashmap
*map
,
136 const struct hashmap_entry
*key
, const void *keydata
)
138 /* map->table MUST NOT be NULL when this function is called */
139 struct hashmap_entry
**e
= &map
->table
[bucket(map
, key
)];
140 while (*e
&& !entry_equals(map
, *e
, key
, keydata
))
145 static int always_equal(const void *unused_cmp_data
,
146 const struct hashmap_entry
*unused1
,
147 const struct hashmap_entry
*unused2
,
148 const void *unused_keydata
)
153 void hashmap_init(struct hashmap
*map
, hashmap_cmp_fn equals_function
,
154 const void *cmpfn_data
, size_t initial_size
)
156 unsigned int size
= HASHMAP_INITIAL_SIZE
;
158 memset(map
, 0, sizeof(*map
));
160 map
->cmpfn
= equals_function
? equals_function
: always_equal
;
161 map
->cmpfn_data
= cmpfn_data
;
163 /* calculate initial table size and allocate the table */
164 initial_size
= (unsigned int) ((uint64_t) initial_size
* 100
165 / HASHMAP_LOAD_FACTOR
);
166 while (initial_size
> size
)
167 size
<<= HASHMAP_RESIZE_BITS
;
168 alloc_table(map
, size
);
171 * Keep track of the number of items in the map and
172 * allow the map to automatically grow as necessary.
174 map
->do_count_items
= 1;
177 static void free_individual_entries(struct hashmap
*map
, ssize_t entry_offset
)
179 struct hashmap_iter iter
;
180 struct hashmap_entry
*e
;
182 hashmap_iter_init(map
, &iter
);
183 while ((e
= hashmap_iter_next(&iter
)))
185 * like container_of, but using caller-calculated
186 * offset (caller being hashmap_clear_and_free)
188 free((char *)e
- entry_offset
);
191 void hashmap_partial_clear_(struct hashmap
*map
, ssize_t entry_offset
)
193 if (!map
|| !map
->table
)
195 if (entry_offset
>= 0) /* called by hashmap_clear_entries */
196 free_individual_entries(map
, entry_offset
);
197 memset(map
->table
, 0, map
->tablesize
* sizeof(struct hashmap_entry
*));
199 map
->private_size
= 0;
202 void hashmap_clear_(struct hashmap
*map
, ssize_t entry_offset
)
204 if (!map
|| !map
->table
)
206 if (entry_offset
>= 0) /* called by hashmap_clear_and_free */
207 free_individual_entries(map
, entry_offset
);
209 memset(map
, 0, sizeof(*map
));
212 struct hashmap_entry
*hashmap_get(const struct hashmap
*map
,
213 const struct hashmap_entry
*key
,
218 return *find_entry_ptr(map
, key
, keydata
);
221 struct hashmap_entry
*hashmap_get_next(const struct hashmap
*map
,
222 const struct hashmap_entry
*entry
)
224 struct hashmap_entry
*e
= entry
->next
;
225 for (; e
; e
= e
->next
)
226 if (entry_equals(map
, entry
, e
, NULL
))
231 void hashmap_add(struct hashmap
*map
, struct hashmap_entry
*entry
)
236 alloc_table(map
, HASHMAP_INITIAL_SIZE
);
238 b
= bucket(map
, entry
);
240 entry
->next
= map
->table
[b
];
241 map
->table
[b
] = entry
;
243 /* fix size and rehash if appropriate */
244 if (map
->do_count_items
) {
246 if (map
->private_size
> map
->grow_at
)
247 rehash(map
, map
->tablesize
<< HASHMAP_RESIZE_BITS
);
251 struct hashmap_entry
*hashmap_remove(struct hashmap
*map
,
252 const struct hashmap_entry
*key
,
255 struct hashmap_entry
*old
;
256 struct hashmap_entry
**e
;
260 e
= find_entry_ptr(map
, key
, keydata
);
264 /* remove existing entry */
269 /* fix size and rehash if appropriate */
270 if (map
->do_count_items
) {
272 if (map
->private_size
< map
->shrink_at
)
273 rehash(map
, map
->tablesize
>> HASHMAP_RESIZE_BITS
);
279 struct hashmap_entry
*hashmap_put(struct hashmap
*map
,
280 struct hashmap_entry
*entry
)
282 struct hashmap_entry
*old
= hashmap_remove(map
, entry
, NULL
);
283 hashmap_add(map
, entry
);
287 void hashmap_iter_init(struct hashmap
*map
, struct hashmap_iter
*iter
)
294 struct hashmap_entry
*hashmap_iter_next(struct hashmap_iter
*iter
)
296 struct hashmap_entry
*current
= iter
->next
;
299 iter
->next
= current
->next
;
303 if (iter
->tablepos
>= iter
->map
->tablesize
)
306 current
= iter
->map
->table
[iter
->tablepos
++];
311 struct hashmap_entry ent
;
313 unsigned char data
[FLEX_ARRAY
];
316 static int pool_entry_cmp(const void *unused_cmp_data
,
317 const struct hashmap_entry
*eptr
,
318 const struct hashmap_entry
*entry_or_key
,
321 const struct pool_entry
*e1
, *e2
;
323 e1
= container_of(eptr
, const struct pool_entry
, ent
);
324 e2
= container_of(entry_or_key
, const struct pool_entry
, ent
);
326 return e1
->data
!= keydata
&&
327 (e1
->len
!= e2
->len
|| memcmp(e1
->data
, keydata
, e1
->len
));
330 const void *memintern(const void *data
, size_t len
)
332 static struct hashmap map
;
333 struct pool_entry key
, *e
;
335 /* initialize string pool hashmap */
337 hashmap_init(&map
, pool_entry_cmp
, NULL
, 0);
339 /* lookup interned string in pool */
340 hashmap_entry_init(&key
.ent
, memhash(data
, len
));
342 e
= hashmap_get_entry(&map
, &key
, ent
, data
);
344 /* not found: create it */
345 FLEX_ALLOC_MEM(e
, data
, data
, len
);
346 hashmap_entry_init(&e
->ent
, key
.ent
.hash
);
348 hashmap_add(&map
, &e
->ent
);