target/sh4: Prefer fast cpu_env() over slower CPU QOM cast macro
[qemu/ar7.git] / util / qht.c
blob92c6b787593d60128bd42561d65ea15b4a37c56d
1 /*
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
4 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 * Assumptions:
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
13 * pairs that have the same pointer value, but not the hashes.
14 * - Lookups are performed under an RCU read-critical section; removals
15 * must wait for a grace period to elapse before freeing removed objects.
17 * Features:
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19 * Lookups that are concurrent with writes to the same bucket will retry
20 * via a seqlock; iterators acquire all bucket locks and therefore can be
21 * concurrent with lookups and are serialized wrt writers.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
23 * different buckets; writes to the same bucket are serialized through a lock.
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
25 * a certain threshold. Resizing is done concurrently with readers; writes
26 * are serialized with the resize operation.
28 * The key structure is the bucket, which is cacheline-sized. Buckets
29 * contain a few hash values and pointers; the u32 hash values are stored in
30 * full so that resizing is fast. Having this structure instead of directly
31 * chaining items has two advantages:
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
35 * There are two types of buckets:
36 * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38 * starts from a head bucket.
39 * Note that the seqlock and spinlock of a head bucket applies to all buckets
40 * chained to it; these two fields are unused in non-head buckets.
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
44 * invalid entry is found, the (failed) lookup is over.
46 * Resizing is done by taking all bucket spinlocks (so that no other writers can
47 * race with us) and then copying all entries into a new hash map. Then, the
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
49 * it anymore.
51 * Writers check for concurrent resizes by comparing ht->map before and after
52 * acquiring their bucket lock. If they don't match, a resize has occurred
53 * while the bucket spinlock was being acquired.
55 * Related Work:
56 * - Idea of cacheline-sized buckets with full hashes taken from:
57 * David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58 * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
60 * seqlock, but resizing would take forever since RCU read critical
61 * sections in QEMU take quite a long time.
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64 * Tables via Relativistic Programming", USENIX ATC'11.
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66 * https://lwn.net/Articles/612021/
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72 #include "qemu/memalign.h"
74 //#define QHT_DEBUG
77 * We want to avoid false sharing of cache lines. Most systems have 64-byte
78 * cache lines so we go with it for simplicity.
80 * Note that systems with smaller cache lines will be fine (the struct is
81 * almost 64-bytes); systems with larger cache lines might suffer from
82 * some false sharing.
84 #define QHT_BUCKET_ALIGN 64
86 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
87 #if HOST_LONG_BITS == 32
88 #define QHT_BUCKET_ENTRIES 6
89 #else /* 64-bit */
90 #define QHT_BUCKET_ENTRIES 4
91 #endif
93 enum qht_iter_type {
94 QHT_ITER_VOID, /* do nothing; use retvoid */
95 QHT_ITER_RM, /* remove element if retbool returns true */
98 struct qht_iter {
99 union {
100 qht_iter_func_t retvoid;
101 qht_iter_bool_func_t retbool;
102 } f;
103 enum qht_iter_type type;
107 * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
108 * the profiler (QSP) will deadlock.
110 static inline void qht_lock(struct qht *ht)
112 if (ht->mode & QHT_MODE_RAW_MUTEXES) {
113 qemu_mutex_lock__raw(&ht->lock);
114 } else {
115 qemu_mutex_lock(&ht->lock);
119 static inline int qht_trylock(struct qht *ht)
121 if (ht->mode & QHT_MODE_RAW_MUTEXES) {
122 return qemu_mutex_trylock__raw(&(ht)->lock);
124 return qemu_mutex_trylock(&(ht)->lock);
127 /* this inline is not really necessary, but it helps keep code consistent */
128 static inline void qht_unlock(struct qht *ht)
130 qemu_mutex_unlock(&ht->lock);
134 * Note: reading partially-updated pointers in @pointers could lead to
135 * segfaults. We thus access them with qatomic_read/set; this guarantees
136 * that the compiler makes all those accesses atomic. We also need the
137 * volatile-like behavior in qatomic_read, since otherwise the compiler
138 * might refetch the pointer.
139 * qatomic_read's are of course not necessary when the bucket lock is held.
141 * If both ht->lock and b->lock are grabbed, ht->lock should always
142 * be grabbed first.
144 struct qht_bucket {
145 QemuSpin lock;
146 QemuSeqLock sequence;
147 uint32_t hashes[QHT_BUCKET_ENTRIES];
148 void *pointers[QHT_BUCKET_ENTRIES];
149 struct qht_bucket *next;
150 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
152 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
155 * Under TSAN, we use striped locks instead of one lock per bucket chain.
156 * This avoids crashing under TSAN, since TSAN aborts the program if more than
157 * 64 locks are held (this is a hardcoded limit in TSAN).
158 * When resizing a QHT we grab all the buckets' locks, which can easily
159 * go over TSAN's limit. By using striped locks, we avoid this problem.
161 * Note: this number must be a power of two for easy index computation.
163 #define QHT_TSAN_BUCKET_LOCKS_BITS 4
164 #define QHT_TSAN_BUCKET_LOCKS (1 << QHT_TSAN_BUCKET_LOCKS_BITS)
166 struct qht_tsan_lock {
167 QemuSpin lock;
168 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
171 * struct qht_map - structure to track an array of buckets
172 * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
173 * find the whole struct.
174 * @buckets: array of head buckets. It is constant once the map is created.
175 * @n_buckets: number of head buckets. It is constant once the map is created.
176 * @n_added_buckets: number of added (i.e. "non-head") buckets
177 * @n_added_buckets_threshold: threshold to trigger an upward resize once the
178 * number of added buckets surpasses it.
179 * @tsan_bucket_locks: Array of striped locks to be used only under TSAN.
181 * Buckets are tracked in what we call a "map", i.e. this structure.
183 struct qht_map {
184 struct rcu_head rcu;
185 struct qht_bucket *buckets;
186 size_t n_buckets;
187 size_t n_added_buckets;
188 size_t n_added_buckets_threshold;
189 #ifdef CONFIG_TSAN
190 struct qht_tsan_lock tsan_bucket_locks[QHT_TSAN_BUCKET_LOCKS];
191 #endif
194 /* trigger a resize when n_added_buckets > n_buckets / div */
195 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
197 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
198 bool reset);
199 static void qht_grow_maybe(struct qht *ht);
201 #ifdef QHT_DEBUG
203 #define qht_debug_assert(X) do { assert(X); } while (0)
205 static void qht_bucket_debug__locked(struct qht_bucket *b)
207 bool seen_empty = false;
208 bool corrupt = false;
209 int i;
211 do {
212 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
213 if (b->pointers[i] == NULL) {
214 seen_empty = true;
215 continue;
217 if (seen_empty) {
218 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
219 __func__, b, i, b->hashes[i], b->pointers[i]);
220 corrupt = true;
223 b = b->next;
224 } while (b);
225 qht_debug_assert(!corrupt);
228 static void qht_map_debug__all_locked(struct qht_map *map)
230 int i;
232 for (i = 0; i < map->n_buckets; i++) {
233 qht_bucket_debug__locked(&map->buckets[i]);
236 #else
238 #define qht_debug_assert(X) do { (void)(X); } while (0)
240 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
243 static inline void qht_map_debug__all_locked(struct qht_map *map)
245 #endif /* QHT_DEBUG */
247 static inline size_t qht_elems_to_buckets(size_t n_elems)
249 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
253 * When using striped locks (i.e. under TSAN), we have to be careful not
254 * to operate on the same lock twice (e.g. when iterating through all buckets).
255 * We achieve this by operating only on each stripe's first matching lock.
257 static inline void qht_do_if_first_in_stripe(struct qht_map *map,
258 struct qht_bucket *b,
259 void (*func)(QemuSpin *spin))
261 #ifdef CONFIG_TSAN
262 unsigned long bucket_idx = b - map->buckets;
263 bool is_first_in_stripe = (bucket_idx >> QHT_TSAN_BUCKET_LOCKS_BITS) == 0;
264 if (is_first_in_stripe) {
265 unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
266 func(&map->tsan_bucket_locks[lock_idx].lock);
268 #else
269 func(&b->lock);
270 #endif
273 static inline void qht_bucket_lock_do(struct qht_map *map,
274 struct qht_bucket *b,
275 void (*func)(QemuSpin *lock))
277 #ifdef CONFIG_TSAN
278 unsigned long bucket_idx = b - map->buckets;
279 unsigned long lock_idx = bucket_idx & (QHT_TSAN_BUCKET_LOCKS - 1);
280 func(&map->tsan_bucket_locks[lock_idx].lock);
281 #else
282 func(&b->lock);
283 #endif
286 static inline void qht_bucket_lock(struct qht_map *map,
287 struct qht_bucket *b)
289 qht_bucket_lock_do(map, b, qemu_spin_lock);
292 static inline void qht_bucket_unlock(struct qht_map *map,
293 struct qht_bucket *b)
295 qht_bucket_lock_do(map, b, qemu_spin_unlock);
298 static inline void qht_head_init(struct qht_map *map, struct qht_bucket *b)
300 memset(b, 0, sizeof(*b));
301 qht_do_if_first_in_stripe(map, b, qemu_spin_init);
302 seqlock_init(&b->sequence);
305 static inline
306 struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
308 return &map->buckets[hash & (map->n_buckets - 1)];
311 /* acquire all bucket locks from a map */
312 static void qht_map_lock_buckets(struct qht_map *map)
314 size_t i;
316 for (i = 0; i < map->n_buckets; i++) {
317 struct qht_bucket *b = &map->buckets[i];
319 qht_do_if_first_in_stripe(map, b, qemu_spin_lock);
323 static void qht_map_unlock_buckets(struct qht_map *map)
325 size_t i;
327 for (i = 0; i < map->n_buckets; i++) {
328 struct qht_bucket *b = &map->buckets[i];
330 qht_do_if_first_in_stripe(map, b, qemu_spin_unlock);
335 * Call with at least a bucket lock held.
336 * @map should be the value read before acquiring the lock (or locks).
338 static inline bool qht_map_is_stale__locked(const struct qht *ht,
339 const struct qht_map *map)
341 return map != ht->map;
345 * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
347 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
349 * Note: callers cannot have ht->lock held.
351 static inline
352 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
354 struct qht_map *map;
356 map = qatomic_rcu_read(&ht->map);
357 qht_map_lock_buckets(map);
358 if (likely(!qht_map_is_stale__locked(ht, map))) {
359 *pmap = map;
360 return;
362 qht_map_unlock_buckets(map);
364 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
365 qht_lock(ht);
366 map = ht->map;
367 qht_map_lock_buckets(map);
368 qht_unlock(ht);
369 *pmap = map;
370 return;
374 * Get a head bucket and lock it, making sure its parent map is not stale.
375 * @pmap is filled with a pointer to the bucket's parent map.
377 * Unlock with qht_bucket_unlock.
379 * Note: callers cannot have ht->lock held.
381 static inline
382 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
383 struct qht_map **pmap)
385 struct qht_bucket *b;
386 struct qht_map *map;
388 map = qatomic_rcu_read(&ht->map);
389 b = qht_map_to_bucket(map, hash);
391 qht_bucket_lock(map, b);
392 if (likely(!qht_map_is_stale__locked(ht, map))) {
393 *pmap = map;
394 return b;
396 qht_bucket_unlock(map, b);
398 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
399 qht_lock(ht);
400 map = ht->map;
401 b = qht_map_to_bucket(map, hash);
402 qht_bucket_lock(map, b);
403 qht_unlock(ht);
404 *pmap = map;
405 return b;
408 static inline bool qht_map_needs_resize(const struct qht_map *map)
410 return qatomic_read(&map->n_added_buckets) >
411 map->n_added_buckets_threshold;
414 static inline void qht_chain_destroy(struct qht_map *map,
415 struct qht_bucket *head)
417 struct qht_bucket *curr = head->next;
418 struct qht_bucket *prev;
420 qht_do_if_first_in_stripe(map, head, qemu_spin_destroy);
421 while (curr) {
422 prev = curr;
423 curr = curr->next;
424 qemu_vfree(prev);
428 /* pass only an orphan map */
429 static void qht_map_destroy(struct qht_map *map)
431 size_t i;
433 for (i = 0; i < map->n_buckets; i++) {
434 qht_chain_destroy(map, &map->buckets[i]);
436 qemu_vfree(map->buckets);
437 g_free(map);
440 static struct qht_map *qht_map_create(size_t n_buckets)
442 struct qht_map *map;
443 size_t i;
445 map = g_malloc(sizeof(*map));
446 map->n_buckets = n_buckets;
448 map->n_added_buckets = 0;
449 map->n_added_buckets_threshold = n_buckets /
450 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
452 /* let tiny hash tables to at least add one non-head bucket */
453 if (unlikely(map->n_added_buckets_threshold == 0)) {
454 map->n_added_buckets_threshold = 1;
457 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
458 sizeof(*map->buckets) * n_buckets);
459 for (i = 0; i < n_buckets; i++) {
460 qht_head_init(map, &map->buckets[i]);
462 return map;
465 void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
466 unsigned int mode)
468 struct qht_map *map;
469 size_t n_buckets = qht_elems_to_buckets(n_elems);
471 g_assert(cmp);
472 ht->cmp = cmp;
473 ht->mode = mode;
474 qemu_mutex_init(&ht->lock);
475 map = qht_map_create(n_buckets);
476 qatomic_rcu_set(&ht->map, map);
479 /* call only when there are no readers/writers left */
480 void qht_destroy(struct qht *ht)
482 qht_map_destroy(ht->map);
483 memset(ht, 0, sizeof(*ht));
486 static void qht_bucket_reset__locked(struct qht_bucket *head)
488 struct qht_bucket *b = head;
489 int i;
491 seqlock_write_begin(&head->sequence);
492 do {
493 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
494 if (b->pointers[i] == NULL) {
495 goto done;
497 qatomic_set(&b->hashes[i], 0);
498 qatomic_set(&b->pointers[i], NULL);
500 b = b->next;
501 } while (b);
502 done:
503 seqlock_write_end(&head->sequence);
506 /* call with all bucket locks held */
507 static void qht_map_reset__all_locked(struct qht_map *map)
509 size_t i;
511 for (i = 0; i < map->n_buckets; i++) {
512 qht_bucket_reset__locked(&map->buckets[i]);
514 qht_map_debug__all_locked(map);
517 void qht_reset(struct qht *ht)
519 struct qht_map *map;
521 qht_map_lock_buckets__no_stale(ht, &map);
522 qht_map_reset__all_locked(map);
523 qht_map_unlock_buckets(map);
526 static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
528 qht_do_resize_reset(ht, new, false);
531 static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
533 qht_do_resize_reset(ht, new, true);
536 bool qht_reset_size(struct qht *ht, size_t n_elems)
538 struct qht_map *new = NULL;
539 struct qht_map *map;
540 size_t n_buckets;
542 n_buckets = qht_elems_to_buckets(n_elems);
544 qht_lock(ht);
545 map = ht->map;
546 if (n_buckets != map->n_buckets) {
547 new = qht_map_create(n_buckets);
549 qht_do_resize_and_reset(ht, new);
550 qht_unlock(ht);
552 return !!new;
555 static inline
556 void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
557 const void *userp, uint32_t hash)
559 const struct qht_bucket *b = head;
560 int i;
562 do {
563 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
564 if (qatomic_read(&b->hashes[i]) == hash) {
565 /* The pointer is dereferenced before seqlock_read_retry,
566 * so (unlike qht_insert__locked) we need to use
567 * qatomic_rcu_read here.
569 void *p = qatomic_rcu_read(&b->pointers[i]);
571 if (likely(p) && likely(func(p, userp))) {
572 return p;
576 b = qatomic_rcu_read(&b->next);
577 } while (b);
579 return NULL;
582 static __attribute__((noinline))
583 void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
584 const void *userp, uint32_t hash)
586 unsigned int version;
587 void *ret;
589 do {
590 version = seqlock_read_begin(&b->sequence);
591 ret = qht_do_lookup(b, func, userp, hash);
592 } while (seqlock_read_retry(&b->sequence, version));
593 return ret;
596 void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
597 qht_lookup_func_t func)
599 const struct qht_bucket *b;
600 const struct qht_map *map;
601 unsigned int version;
602 void *ret;
604 map = qatomic_rcu_read(&ht->map);
605 b = qht_map_to_bucket(map, hash);
607 version = seqlock_read_begin(&b->sequence);
608 ret = qht_do_lookup(b, func, userp, hash);
609 if (likely(!seqlock_read_retry(&b->sequence, version))) {
610 return ret;
613 * Removing the do/while from the fastpath gives a 4% perf. increase when
614 * running a 100%-lookup microbenchmark.
616 return qht_lookup__slowpath(b, func, userp, hash);
619 void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
621 return qht_lookup_custom(ht, userp, hash, ht->cmp);
625 * call with head->lock held
626 * @ht is const since it is only used for ht->cmp()
628 static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
629 struct qht_bucket *head, void *p, uint32_t hash,
630 bool *needs_resize)
632 struct qht_bucket *b = head;
633 struct qht_bucket *prev = NULL;
634 struct qht_bucket *new = NULL;
635 int i;
637 do {
638 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
639 if (b->pointers[i]) {
640 if (unlikely(b->hashes[i] == hash &&
641 ht->cmp(b->pointers[i], p))) {
642 return b->pointers[i];
644 } else {
645 goto found;
648 prev = b;
649 b = b->next;
650 } while (b);
652 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
653 memset(b, 0, sizeof(*b));
654 new = b;
655 i = 0;
656 qatomic_inc(&map->n_added_buckets);
657 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
658 *needs_resize = true;
661 found:
662 /* found an empty key: acquire the seqlock and write */
663 seqlock_write_begin(&head->sequence);
664 if (new) {
665 qatomic_rcu_set(&prev->next, b);
667 /* smp_wmb() implicit in seqlock_write_begin. */
668 qatomic_set(&b->hashes[i], hash);
669 qatomic_set(&b->pointers[i], p);
670 seqlock_write_end(&head->sequence);
671 return NULL;
674 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
676 struct qht_map *map;
679 * If the lock is taken it probably means there's an ongoing resize,
680 * so bail out.
682 if (qht_trylock(ht)) {
683 return;
685 map = ht->map;
686 /* another thread might have just performed the resize we were after */
687 if (qht_map_needs_resize(map)) {
688 struct qht_map *new = qht_map_create(map->n_buckets * 2);
690 qht_do_resize(ht, new);
692 qht_unlock(ht);
695 bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
697 struct qht_bucket *b;
698 struct qht_map *map;
699 bool needs_resize = false;
700 void *prev;
702 /* NULL pointers are not supported */
703 qht_debug_assert(p);
705 b = qht_bucket_lock__no_stale(ht, hash, &map);
706 prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
707 qht_bucket_debug__locked(b);
708 qht_bucket_unlock(map, b);
710 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
711 qht_grow_maybe(ht);
713 if (likely(prev == NULL)) {
714 return true;
716 if (existing) {
717 *existing = prev;
719 return false;
722 static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
724 if (pos == QHT_BUCKET_ENTRIES - 1) {
725 if (b->next == NULL) {
726 return true;
728 return b->next->pointers[0] == NULL;
730 return b->pointers[pos + 1] == NULL;
733 static void
734 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
736 qht_debug_assert(!(to == from && i == j));
737 qht_debug_assert(to->pointers[i]);
738 qht_debug_assert(from->pointers[j]);
740 qatomic_set(&to->hashes[i], from->hashes[j]);
741 qatomic_set(&to->pointers[i], from->pointers[j]);
743 qatomic_set(&from->hashes[j], 0);
744 qatomic_set(&from->pointers[j], NULL);
748 * Find the last valid entry in @orig, and swap it with @orig[pos], which has
749 * just been invalidated.
751 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
753 struct qht_bucket *b = orig;
754 struct qht_bucket *prev = NULL;
755 int i;
757 if (qht_entry_is_last(orig, pos)) {
758 qatomic_set(&orig->hashes[pos], 0);
759 qatomic_set(&orig->pointers[pos], NULL);
760 return;
762 do {
763 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
764 if (b->pointers[i]) {
765 continue;
767 if (i > 0) {
768 return qht_entry_move(orig, pos, b, i - 1);
770 qht_debug_assert(prev);
771 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
773 prev = b;
774 b = b->next;
775 } while (b);
776 /* no free entries other than orig[pos], so swap it with the last one */
777 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
780 /* call with b->lock held */
781 static inline
782 bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
784 struct qht_bucket *b = head;
785 int i;
787 do {
788 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
789 void *q = b->pointers[i];
791 if (unlikely(q == NULL)) {
792 return false;
794 if (q == p) {
795 qht_debug_assert(b->hashes[i] == hash);
796 seqlock_write_begin(&head->sequence);
797 qht_bucket_remove_entry(b, i);
798 seqlock_write_end(&head->sequence);
799 return true;
802 b = b->next;
803 } while (b);
804 return false;
807 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
809 struct qht_bucket *b;
810 struct qht_map *map;
811 bool ret;
813 /* NULL pointers are not supported */
814 qht_debug_assert(p);
816 b = qht_bucket_lock__no_stale(ht, hash, &map);
817 ret = qht_remove__locked(b, p, hash);
818 qht_bucket_debug__locked(b);
819 qht_bucket_unlock(map, b);
820 return ret;
823 static inline void qht_bucket_iter(struct qht_bucket *head,
824 const struct qht_iter *iter, void *userp)
826 struct qht_bucket *b = head;
827 int i;
829 do {
830 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
831 if (b->pointers[i] == NULL) {
832 return;
834 switch (iter->type) {
835 case QHT_ITER_VOID:
836 iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
837 break;
838 case QHT_ITER_RM:
839 if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
840 /* replace i with the last valid element in the bucket */
841 seqlock_write_begin(&head->sequence);
842 qht_bucket_remove_entry(b, i);
843 seqlock_write_end(&head->sequence);
844 qht_bucket_debug__locked(b);
845 /* reevaluate i, since it just got replaced */
846 i--;
847 continue;
849 break;
850 default:
851 g_assert_not_reached();
854 b = b->next;
855 } while (b);
858 /* call with all of the map's locks held */
859 static inline void qht_map_iter__all_locked(struct qht_map *map,
860 const struct qht_iter *iter,
861 void *userp)
863 size_t i;
865 for (i = 0; i < map->n_buckets; i++) {
866 qht_bucket_iter(&map->buckets[i], iter, userp);
870 static inline void
871 do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
873 struct qht_map *map;
875 map = qatomic_rcu_read(&ht->map);
876 qht_map_lock_buckets(map);
877 qht_map_iter__all_locked(map, iter, userp);
878 qht_map_unlock_buckets(map);
881 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
883 const struct qht_iter iter = {
884 .f.retvoid = func,
885 .type = QHT_ITER_VOID,
888 do_qht_iter(ht, &iter, userp);
891 void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
893 const struct qht_iter iter = {
894 .f.retbool = func,
895 .type = QHT_ITER_RM,
898 do_qht_iter(ht, &iter, userp);
901 struct qht_map_copy_data {
902 struct qht *ht;
903 struct qht_map *new;
906 static void qht_map_copy(void *p, uint32_t hash, void *userp)
908 struct qht_map_copy_data *data = userp;
909 struct qht *ht = data->ht;
910 struct qht_map *new = data->new;
911 struct qht_bucket *b = qht_map_to_bucket(new, hash);
913 /* no need to acquire b->lock because no thread has seen this map yet */
914 qht_insert__locked(ht, new, b, p, hash, NULL);
918 * Atomically perform a resize and/or reset.
919 * Call with ht->lock held.
921 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
923 struct qht_map *old;
924 const struct qht_iter iter = {
925 .f.retvoid = qht_map_copy,
926 .type = QHT_ITER_VOID,
928 struct qht_map_copy_data data;
930 old = ht->map;
931 qht_map_lock_buckets(old);
933 if (reset) {
934 qht_map_reset__all_locked(old);
937 if (new == NULL) {
938 qht_map_unlock_buckets(old);
939 return;
942 g_assert(new->n_buckets != old->n_buckets);
943 data.ht = ht;
944 data.new = new;
945 qht_map_iter__all_locked(old, &iter, &data);
946 qht_map_debug__all_locked(new);
948 qatomic_rcu_set(&ht->map, new);
949 qht_map_unlock_buckets(old);
950 call_rcu(old, qht_map_destroy, rcu);
953 bool qht_resize(struct qht *ht, size_t n_elems)
955 size_t n_buckets = qht_elems_to_buckets(n_elems);
956 size_t ret = false;
958 qht_lock(ht);
959 if (n_buckets != ht->map->n_buckets) {
960 struct qht_map *new;
962 new = qht_map_create(n_buckets);
963 qht_do_resize(ht, new);
964 ret = true;
966 qht_unlock(ht);
968 return ret;
971 /* pass @stats to qht_statistics_destroy() when done */
972 void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
974 const struct qht_map *map;
975 int i;
977 map = qatomic_rcu_read(&ht->map);
979 stats->used_head_buckets = 0;
980 stats->entries = 0;
981 qdist_init(&stats->chain);
982 qdist_init(&stats->occupancy);
983 /* bail out if the qht has not yet been initialized */
984 if (unlikely(map == NULL)) {
985 stats->head_buckets = 0;
986 return;
988 stats->head_buckets = map->n_buckets;
990 for (i = 0; i < map->n_buckets; i++) {
991 const struct qht_bucket *head = &map->buckets[i];
992 const struct qht_bucket *b;
993 unsigned int version;
994 size_t buckets;
995 size_t entries;
996 int j;
998 do {
999 version = seqlock_read_begin(&head->sequence);
1000 buckets = 0;
1001 entries = 0;
1002 b = head;
1003 do {
1004 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
1005 if (qatomic_read(&b->pointers[j]) == NULL) {
1006 break;
1008 entries++;
1010 buckets++;
1011 b = qatomic_rcu_read(&b->next);
1012 } while (b);
1013 } while (seqlock_read_retry(&head->sequence, version));
1015 if (entries) {
1016 qdist_inc(&stats->chain, buckets);
1017 qdist_inc(&stats->occupancy,
1018 (double)entries / QHT_BUCKET_ENTRIES / buckets);
1019 stats->used_head_buckets++;
1020 stats->entries += entries;
1021 } else {
1022 qdist_inc(&stats->occupancy, 0);
1027 void qht_statistics_destroy(struct qht_stats *stats)
1029 qdist_destroy(&stats->occupancy);
1030 qdist_destroy(&stats->chain);