hw/riscv: virt: Create a platform bus
[qemu/ar7.git] / util / qht.c
blob065fc501f44c46dffac74134ee2ccf30c10e3517
1 /*
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
4 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 * Assumptions:
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
13 * pairs that have the same pointer value, but not the hashes.
14 * - Lookups are performed under an RCU read-critical section; removals
15 * must wait for a grace period to elapse before freeing removed objects.
17 * Features:
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19 * Lookups that are concurrent with writes to the same bucket will retry
20 * via a seqlock; iterators acquire all bucket locks and therefore can be
21 * concurrent with lookups and are serialized wrt writers.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
23 * different buckets; writes to the same bucket are serialized through a lock.
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
25 * a certain threshold. Resizing is done concurrently with readers; writes
26 * are serialized with the resize operation.
28 * The key structure is the bucket, which is cacheline-sized. Buckets
29 * contain a few hash values and pointers; the u32 hash values are stored in
30 * full so that resizing is fast. Having this structure instead of directly
31 * chaining items has two advantages:
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
35 * There are two types of buckets:
36 * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38 * starts from a head bucket.
39 * Note that the seqlock and spinlock of a head bucket applies to all buckets
40 * chained to it; these two fields are unused in non-head buckets.
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
44 * invalid entry is found, the (failed) lookup is over.
46 * Resizing is done by taking all bucket spinlocks (so that no other writers can
47 * race with us) and then copying all entries into a new hash map. Then, the
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
49 * it anymore.
51 * Writers check for concurrent resizes by comparing ht->map before and after
52 * acquiring their bucket lock. If they don't match, a resize has occurred
53 * while the bucket spinlock was being acquired.
55 * Related Work:
56 * - Idea of cacheline-sized buckets with full hashes taken from:
57 * David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58 * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
60 * seqlock, but resizing would take forever since RCU read critical
61 * sections in QEMU take quite a long time.
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64 * Tables via Relativistic Programming", USENIX ATC'11.
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66 * https://lwn.net/Articles/612021/
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
72 #include "qemu/memalign.h"
74 //#define QHT_DEBUG
77 * We want to avoid false sharing of cache lines. Most systems have 64-byte
78 * cache lines so we go with it for simplicity.
80 * Note that systems with smaller cache lines will be fine (the struct is
81 * almost 64-bytes); systems with larger cache lines might suffer from
82 * some false sharing.
84 #define QHT_BUCKET_ALIGN 64
86 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
87 #if HOST_LONG_BITS == 32
88 #define QHT_BUCKET_ENTRIES 6
89 #else /* 64-bit */
90 #define QHT_BUCKET_ENTRIES 4
91 #endif
93 enum qht_iter_type {
94 QHT_ITER_VOID, /* do nothing; use retvoid */
95 QHT_ITER_RM, /* remove element if retbool returns true */
98 struct qht_iter {
99 union {
100 qht_iter_func_t retvoid;
101 qht_iter_bool_func_t retbool;
102 } f;
103 enum qht_iter_type type;
107 * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
108 * the profiler (QSP) will deadlock.
110 static inline void qht_lock(struct qht *ht)
112 if (ht->mode & QHT_MODE_RAW_MUTEXES) {
113 qemu_mutex_lock__raw(&ht->lock);
114 } else {
115 qemu_mutex_lock(&ht->lock);
119 static inline int qht_trylock(struct qht *ht)
121 if (ht->mode & QHT_MODE_RAW_MUTEXES) {
122 return qemu_mutex_trylock__raw(&(ht)->lock);
124 return qemu_mutex_trylock(&(ht)->lock);
127 /* this inline is not really necessary, but it helps keep code consistent */
128 static inline void qht_unlock(struct qht *ht)
130 qemu_mutex_unlock(&ht->lock);
134 * Note: reading partially-updated pointers in @pointers could lead to
135 * segfaults. We thus access them with qatomic_read/set; this guarantees
136 * that the compiler makes all those accesses atomic. We also need the
137 * volatile-like behavior in qatomic_read, since otherwise the compiler
138 * might refetch the pointer.
139 * qatomic_read's are of course not necessary when the bucket lock is held.
141 * If both ht->lock and b->lock are grabbed, ht->lock should always
142 * be grabbed first.
144 struct qht_bucket {
145 QemuSpin lock;
146 QemuSeqLock sequence;
147 uint32_t hashes[QHT_BUCKET_ENTRIES];
148 void *pointers[QHT_BUCKET_ENTRIES];
149 struct qht_bucket *next;
150 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
152 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
155 * struct qht_map - structure to track an array of buckets
156 * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
157 * find the whole struct.
158 * @buckets: array of head buckets. It is constant once the map is created.
159 * @n_buckets: number of head buckets. It is constant once the map is created.
160 * @n_added_buckets: number of added (i.e. "non-head") buckets
161 * @n_added_buckets_threshold: threshold to trigger an upward resize once the
162 * number of added buckets surpasses it.
164 * Buckets are tracked in what we call a "map", i.e. this structure.
166 struct qht_map {
167 struct rcu_head rcu;
168 struct qht_bucket *buckets;
169 size_t n_buckets;
170 size_t n_added_buckets;
171 size_t n_added_buckets_threshold;
174 /* trigger a resize when n_added_buckets > n_buckets / div */
175 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
177 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new,
178 bool reset);
179 static void qht_grow_maybe(struct qht *ht);
181 #ifdef QHT_DEBUG
183 #define qht_debug_assert(X) do { assert(X); } while (0)
185 static void qht_bucket_debug__locked(struct qht_bucket *b)
187 bool seen_empty = false;
188 bool corrupt = false;
189 int i;
191 do {
192 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
193 if (b->pointers[i] == NULL) {
194 seen_empty = true;
195 continue;
197 if (seen_empty) {
198 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
199 __func__, b, i, b->hashes[i], b->pointers[i]);
200 corrupt = true;
203 b = b->next;
204 } while (b);
205 qht_debug_assert(!corrupt);
208 static void qht_map_debug__all_locked(struct qht_map *map)
210 int i;
212 for (i = 0; i < map->n_buckets; i++) {
213 qht_bucket_debug__locked(&map->buckets[i]);
216 #else
218 #define qht_debug_assert(X) do { (void)(X); } while (0)
220 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
223 static inline void qht_map_debug__all_locked(struct qht_map *map)
225 #endif /* QHT_DEBUG */
227 static inline size_t qht_elems_to_buckets(size_t n_elems)
229 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
232 static inline void qht_head_init(struct qht_bucket *b)
234 memset(b, 0, sizeof(*b));
235 qemu_spin_init(&b->lock);
236 seqlock_init(&b->sequence);
239 static inline
240 struct qht_bucket *qht_map_to_bucket(const struct qht_map *map, uint32_t hash)
242 return &map->buckets[hash & (map->n_buckets - 1)];
245 /* acquire all bucket locks from a map */
246 static void qht_map_lock_buckets(struct qht_map *map)
248 size_t i;
250 for (i = 0; i < map->n_buckets; i++) {
251 struct qht_bucket *b = &map->buckets[i];
253 qemu_spin_lock(&b->lock);
257 static void qht_map_unlock_buckets(struct qht_map *map)
259 size_t i;
261 for (i = 0; i < map->n_buckets; i++) {
262 struct qht_bucket *b = &map->buckets[i];
264 qemu_spin_unlock(&b->lock);
269 * Call with at least a bucket lock held.
270 * @map should be the value read before acquiring the lock (or locks).
272 static inline bool qht_map_is_stale__locked(const struct qht *ht,
273 const struct qht_map *map)
275 return map != ht->map;
279 * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
281 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
283 * Note: callers cannot have ht->lock held.
285 static inline
286 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
288 struct qht_map *map;
290 map = qatomic_rcu_read(&ht->map);
291 qht_map_lock_buckets(map);
292 if (likely(!qht_map_is_stale__locked(ht, map))) {
293 *pmap = map;
294 return;
296 qht_map_unlock_buckets(map);
298 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
299 qht_lock(ht);
300 map = ht->map;
301 qht_map_lock_buckets(map);
302 qht_unlock(ht);
303 *pmap = map;
304 return;
308 * Get a head bucket and lock it, making sure its parent map is not stale.
309 * @pmap is filled with a pointer to the bucket's parent map.
311 * Unlock with qemu_spin_unlock(&b->lock).
313 * Note: callers cannot have ht->lock held.
315 static inline
316 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
317 struct qht_map **pmap)
319 struct qht_bucket *b;
320 struct qht_map *map;
322 map = qatomic_rcu_read(&ht->map);
323 b = qht_map_to_bucket(map, hash);
325 qemu_spin_lock(&b->lock);
326 if (likely(!qht_map_is_stale__locked(ht, map))) {
327 *pmap = map;
328 return b;
330 qemu_spin_unlock(&b->lock);
332 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
333 qht_lock(ht);
334 map = ht->map;
335 b = qht_map_to_bucket(map, hash);
336 qemu_spin_lock(&b->lock);
337 qht_unlock(ht);
338 *pmap = map;
339 return b;
342 static inline bool qht_map_needs_resize(const struct qht_map *map)
344 return qatomic_read(&map->n_added_buckets) >
345 map->n_added_buckets_threshold;
348 static inline void qht_chain_destroy(const struct qht_bucket *head)
350 struct qht_bucket *curr = head->next;
351 struct qht_bucket *prev;
353 qemu_spin_destroy(&head->lock);
354 while (curr) {
355 prev = curr;
356 curr = curr->next;
357 qemu_vfree(prev);
361 /* pass only an orphan map */
362 static void qht_map_destroy(struct qht_map *map)
364 size_t i;
366 for (i = 0; i < map->n_buckets; i++) {
367 qht_chain_destroy(&map->buckets[i]);
369 qemu_vfree(map->buckets);
370 g_free(map);
373 static struct qht_map *qht_map_create(size_t n_buckets)
375 struct qht_map *map;
376 size_t i;
378 map = g_malloc(sizeof(*map));
379 map->n_buckets = n_buckets;
381 map->n_added_buckets = 0;
382 map->n_added_buckets_threshold = n_buckets /
383 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
385 /* let tiny hash tables to at least add one non-head bucket */
386 if (unlikely(map->n_added_buckets_threshold == 0)) {
387 map->n_added_buckets_threshold = 1;
390 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
391 sizeof(*map->buckets) * n_buckets);
392 for (i = 0; i < n_buckets; i++) {
393 qht_head_init(&map->buckets[i]);
395 return map;
398 void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
399 unsigned int mode)
401 struct qht_map *map;
402 size_t n_buckets = qht_elems_to_buckets(n_elems);
404 g_assert(cmp);
405 ht->cmp = cmp;
406 ht->mode = mode;
407 qemu_mutex_init(&ht->lock);
408 map = qht_map_create(n_buckets);
409 qatomic_rcu_set(&ht->map, map);
412 /* call only when there are no readers/writers left */
413 void qht_destroy(struct qht *ht)
415 qht_map_destroy(ht->map);
416 memset(ht, 0, sizeof(*ht));
419 static void qht_bucket_reset__locked(struct qht_bucket *head)
421 struct qht_bucket *b = head;
422 int i;
424 seqlock_write_begin(&head->sequence);
425 do {
426 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
427 if (b->pointers[i] == NULL) {
428 goto done;
430 qatomic_set(&b->hashes[i], 0);
431 qatomic_set(&b->pointers[i], NULL);
433 b = b->next;
434 } while (b);
435 done:
436 seqlock_write_end(&head->sequence);
439 /* call with all bucket locks held */
440 static void qht_map_reset__all_locked(struct qht_map *map)
442 size_t i;
444 for (i = 0; i < map->n_buckets; i++) {
445 qht_bucket_reset__locked(&map->buckets[i]);
447 qht_map_debug__all_locked(map);
450 void qht_reset(struct qht *ht)
452 struct qht_map *map;
454 qht_map_lock_buckets__no_stale(ht, &map);
455 qht_map_reset__all_locked(map);
456 qht_map_unlock_buckets(map);
459 static inline void qht_do_resize(struct qht *ht, struct qht_map *new)
461 qht_do_resize_reset(ht, new, false);
464 static inline void qht_do_resize_and_reset(struct qht *ht, struct qht_map *new)
466 qht_do_resize_reset(ht, new, true);
469 bool qht_reset_size(struct qht *ht, size_t n_elems)
471 struct qht_map *new = NULL;
472 struct qht_map *map;
473 size_t n_buckets;
475 n_buckets = qht_elems_to_buckets(n_elems);
477 qht_lock(ht);
478 map = ht->map;
479 if (n_buckets != map->n_buckets) {
480 new = qht_map_create(n_buckets);
482 qht_do_resize_and_reset(ht, new);
483 qht_unlock(ht);
485 return !!new;
488 static inline
489 void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
490 const void *userp, uint32_t hash)
492 const struct qht_bucket *b = head;
493 int i;
495 do {
496 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
497 if (qatomic_read(&b->hashes[i]) == hash) {
498 /* The pointer is dereferenced before seqlock_read_retry,
499 * so (unlike qht_insert__locked) we need to use
500 * qatomic_rcu_read here.
502 void *p = qatomic_rcu_read(&b->pointers[i]);
504 if (likely(p) && likely(func(p, userp))) {
505 return p;
509 b = qatomic_rcu_read(&b->next);
510 } while (b);
512 return NULL;
515 static __attribute__((noinline))
516 void *qht_lookup__slowpath(const struct qht_bucket *b, qht_lookup_func_t func,
517 const void *userp, uint32_t hash)
519 unsigned int version;
520 void *ret;
522 do {
523 version = seqlock_read_begin(&b->sequence);
524 ret = qht_do_lookup(b, func, userp, hash);
525 } while (seqlock_read_retry(&b->sequence, version));
526 return ret;
529 void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
530 qht_lookup_func_t func)
532 const struct qht_bucket *b;
533 const struct qht_map *map;
534 unsigned int version;
535 void *ret;
537 map = qatomic_rcu_read(&ht->map);
538 b = qht_map_to_bucket(map, hash);
540 version = seqlock_read_begin(&b->sequence);
541 ret = qht_do_lookup(b, func, userp, hash);
542 if (likely(!seqlock_read_retry(&b->sequence, version))) {
543 return ret;
546 * Removing the do/while from the fastpath gives a 4% perf. increase when
547 * running a 100%-lookup microbenchmark.
549 return qht_lookup__slowpath(b, func, userp, hash);
552 void *qht_lookup(const struct qht *ht, const void *userp, uint32_t hash)
554 return qht_lookup_custom(ht, userp, hash, ht->cmp);
558 * call with head->lock held
559 * @ht is const since it is only used for ht->cmp()
561 static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
562 struct qht_bucket *head, void *p, uint32_t hash,
563 bool *needs_resize)
565 struct qht_bucket *b = head;
566 struct qht_bucket *prev = NULL;
567 struct qht_bucket *new = NULL;
568 int i;
570 do {
571 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
572 if (b->pointers[i]) {
573 if (unlikely(b->hashes[i] == hash &&
574 ht->cmp(b->pointers[i], p))) {
575 return b->pointers[i];
577 } else {
578 goto found;
581 prev = b;
582 b = b->next;
583 } while (b);
585 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
586 memset(b, 0, sizeof(*b));
587 new = b;
588 i = 0;
589 qatomic_inc(&map->n_added_buckets);
590 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
591 *needs_resize = true;
594 found:
595 /* found an empty key: acquire the seqlock and write */
596 seqlock_write_begin(&head->sequence);
597 if (new) {
598 qatomic_rcu_set(&prev->next, b);
600 /* smp_wmb() implicit in seqlock_write_begin. */
601 qatomic_set(&b->hashes[i], hash);
602 qatomic_set(&b->pointers[i], p);
603 seqlock_write_end(&head->sequence);
604 return NULL;
607 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
609 struct qht_map *map;
612 * If the lock is taken it probably means there's an ongoing resize,
613 * so bail out.
615 if (qht_trylock(ht)) {
616 return;
618 map = ht->map;
619 /* another thread might have just performed the resize we were after */
620 if (qht_map_needs_resize(map)) {
621 struct qht_map *new = qht_map_create(map->n_buckets * 2);
623 qht_do_resize(ht, new);
625 qht_unlock(ht);
628 bool qht_insert(struct qht *ht, void *p, uint32_t hash, void **existing)
630 struct qht_bucket *b;
631 struct qht_map *map;
632 bool needs_resize = false;
633 void *prev;
635 /* NULL pointers are not supported */
636 qht_debug_assert(p);
638 b = qht_bucket_lock__no_stale(ht, hash, &map);
639 prev = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
640 qht_bucket_debug__locked(b);
641 qemu_spin_unlock(&b->lock);
643 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
644 qht_grow_maybe(ht);
646 if (likely(prev == NULL)) {
647 return true;
649 if (existing) {
650 *existing = prev;
652 return false;
655 static inline bool qht_entry_is_last(const struct qht_bucket *b, int pos)
657 if (pos == QHT_BUCKET_ENTRIES - 1) {
658 if (b->next == NULL) {
659 return true;
661 return b->next->pointers[0] == NULL;
663 return b->pointers[pos + 1] == NULL;
666 static void
667 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
669 qht_debug_assert(!(to == from && i == j));
670 qht_debug_assert(to->pointers[i]);
671 qht_debug_assert(from->pointers[j]);
673 qatomic_set(&to->hashes[i], from->hashes[j]);
674 qatomic_set(&to->pointers[i], from->pointers[j]);
676 qatomic_set(&from->hashes[j], 0);
677 qatomic_set(&from->pointers[j], NULL);
681 * Find the last valid entry in @orig, and swap it with @orig[pos], which has
682 * just been invalidated.
684 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
686 struct qht_bucket *b = orig;
687 struct qht_bucket *prev = NULL;
688 int i;
690 if (qht_entry_is_last(orig, pos)) {
691 orig->hashes[pos] = 0;
692 qatomic_set(&orig->pointers[pos], NULL);
693 return;
695 do {
696 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
697 if (b->pointers[i]) {
698 continue;
700 if (i > 0) {
701 return qht_entry_move(orig, pos, b, i - 1);
703 qht_debug_assert(prev);
704 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
706 prev = b;
707 b = b->next;
708 } while (b);
709 /* no free entries other than orig[pos], so swap it with the last one */
710 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
713 /* call with b->lock held */
714 static inline
715 bool qht_remove__locked(struct qht_bucket *head, const void *p, uint32_t hash)
717 struct qht_bucket *b = head;
718 int i;
720 do {
721 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
722 void *q = b->pointers[i];
724 if (unlikely(q == NULL)) {
725 return false;
727 if (q == p) {
728 qht_debug_assert(b->hashes[i] == hash);
729 seqlock_write_begin(&head->sequence);
730 qht_bucket_remove_entry(b, i);
731 seqlock_write_end(&head->sequence);
732 return true;
735 b = b->next;
736 } while (b);
737 return false;
740 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
742 struct qht_bucket *b;
743 struct qht_map *map;
744 bool ret;
746 /* NULL pointers are not supported */
747 qht_debug_assert(p);
749 b = qht_bucket_lock__no_stale(ht, hash, &map);
750 ret = qht_remove__locked(b, p, hash);
751 qht_bucket_debug__locked(b);
752 qemu_spin_unlock(&b->lock);
753 return ret;
756 static inline void qht_bucket_iter(struct qht_bucket *head,
757 const struct qht_iter *iter, void *userp)
759 struct qht_bucket *b = head;
760 int i;
762 do {
763 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
764 if (b->pointers[i] == NULL) {
765 return;
767 switch (iter->type) {
768 case QHT_ITER_VOID:
769 iter->f.retvoid(b->pointers[i], b->hashes[i], userp);
770 break;
771 case QHT_ITER_RM:
772 if (iter->f.retbool(b->pointers[i], b->hashes[i], userp)) {
773 /* replace i with the last valid element in the bucket */
774 seqlock_write_begin(&head->sequence);
775 qht_bucket_remove_entry(b, i);
776 seqlock_write_end(&head->sequence);
777 qht_bucket_debug__locked(b);
778 /* reevaluate i, since it just got replaced */
779 i--;
780 continue;
782 break;
783 default:
784 g_assert_not_reached();
787 b = b->next;
788 } while (b);
791 /* call with all of the map's locks held */
792 static inline void qht_map_iter__all_locked(struct qht_map *map,
793 const struct qht_iter *iter,
794 void *userp)
796 size_t i;
798 for (i = 0; i < map->n_buckets; i++) {
799 qht_bucket_iter(&map->buckets[i], iter, userp);
803 static inline void
804 do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
806 struct qht_map *map;
808 map = qatomic_rcu_read(&ht->map);
809 qht_map_lock_buckets(map);
810 qht_map_iter__all_locked(map, iter, userp);
811 qht_map_unlock_buckets(map);
814 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
816 const struct qht_iter iter = {
817 .f.retvoid = func,
818 .type = QHT_ITER_VOID,
821 do_qht_iter(ht, &iter, userp);
824 void qht_iter_remove(struct qht *ht, qht_iter_bool_func_t func, void *userp)
826 const struct qht_iter iter = {
827 .f.retbool = func,
828 .type = QHT_ITER_RM,
831 do_qht_iter(ht, &iter, userp);
834 struct qht_map_copy_data {
835 struct qht *ht;
836 struct qht_map *new;
839 static void qht_map_copy(void *p, uint32_t hash, void *userp)
841 struct qht_map_copy_data *data = userp;
842 struct qht *ht = data->ht;
843 struct qht_map *new = data->new;
844 struct qht_bucket *b = qht_map_to_bucket(new, hash);
846 /* no need to acquire b->lock because no thread has seen this map yet */
847 qht_insert__locked(ht, new, b, p, hash, NULL);
851 * Atomically perform a resize and/or reset.
852 * Call with ht->lock held.
854 static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
856 struct qht_map *old;
857 const struct qht_iter iter = {
858 .f.retvoid = qht_map_copy,
859 .type = QHT_ITER_VOID,
861 struct qht_map_copy_data data;
863 old = ht->map;
864 qht_map_lock_buckets(old);
866 if (reset) {
867 qht_map_reset__all_locked(old);
870 if (new == NULL) {
871 qht_map_unlock_buckets(old);
872 return;
875 g_assert(new->n_buckets != old->n_buckets);
876 data.ht = ht;
877 data.new = new;
878 qht_map_iter__all_locked(old, &iter, &data);
879 qht_map_debug__all_locked(new);
881 qatomic_rcu_set(&ht->map, new);
882 qht_map_unlock_buckets(old);
883 call_rcu(old, qht_map_destroy, rcu);
886 bool qht_resize(struct qht *ht, size_t n_elems)
888 size_t n_buckets = qht_elems_to_buckets(n_elems);
889 size_t ret = false;
891 qht_lock(ht);
892 if (n_buckets != ht->map->n_buckets) {
893 struct qht_map *new;
895 new = qht_map_create(n_buckets);
896 qht_do_resize(ht, new);
897 ret = true;
899 qht_unlock(ht);
901 return ret;
904 /* pass @stats to qht_statistics_destroy() when done */
905 void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
907 const struct qht_map *map;
908 int i;
910 map = qatomic_rcu_read(&ht->map);
912 stats->used_head_buckets = 0;
913 stats->entries = 0;
914 qdist_init(&stats->chain);
915 qdist_init(&stats->occupancy);
916 /* bail out if the qht has not yet been initialized */
917 if (unlikely(map == NULL)) {
918 stats->head_buckets = 0;
919 return;
921 stats->head_buckets = map->n_buckets;
923 for (i = 0; i < map->n_buckets; i++) {
924 const struct qht_bucket *head = &map->buckets[i];
925 const struct qht_bucket *b;
926 unsigned int version;
927 size_t buckets;
928 size_t entries;
929 int j;
931 do {
932 version = seqlock_read_begin(&head->sequence);
933 buckets = 0;
934 entries = 0;
935 b = head;
936 do {
937 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
938 if (qatomic_read(&b->pointers[j]) == NULL) {
939 break;
941 entries++;
943 buckets++;
944 b = qatomic_rcu_read(&b->next);
945 } while (b);
946 } while (seqlock_read_retry(&head->sequence, version));
948 if (entries) {
949 qdist_inc(&stats->chain, buckets);
950 qdist_inc(&stats->occupancy,
951 (double)entries / QHT_BUCKET_ENTRIES / buckets);
952 stats->used_head_buckets++;
953 stats->entries += entries;
954 } else {
955 qdist_inc(&stats->occupancy, 0);
960 void qht_statistics_destroy(struct qht_stats *stats)
962 qdist_destroy(&stats->occupancy);
963 qdist_destroy(&stats->chain);