qapi: Require all branches of flat union enum to be covered
[qemu/kevin.git] / util / qht.c
blob40d6e218f75912e8c72e9c79e9e5d20071da4ac7
1 /*
2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
4 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 * Assumptions:
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
13 * pairs that have the same pointer value, but not the hashes.
14 * - Lookups are performed under an RCU read-critical section; removals
15 * must wait for a grace period to elapse before freeing removed objects.
17 * Features:
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19 * Lookups that are concurrent with writes to the same bucket will retry
20 * via a seqlock; iterators acquire all bucket locks and therefore can be
21 * concurrent with lookups and are serialized wrt writers.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
23 * different buckets; writes to the same bucket are serialized through a lock.
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
25 * a certain threshold. Resizing is done concurrently with readers; writes
26 * are serialized with the resize operation.
28 * The key structure is the bucket, which is cacheline-sized. Buckets
29 * contain a few hash values and pointers; the u32 hash values are stored in
30 * full so that resizing is fast. Having this structure instead of directly
31 * chaining items has two advantages:
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
35 * There are two types of buckets:
36 * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38 * starts from a head bucket.
39 * Note that the seqlock and spinlock of a head bucket applies to all buckets
40 * chained to it; these two fields are unused in non-head buckets.
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
44 * invalid entry is found, the (failed) lookup is over.
46 * Resizing is done by taking all bucket spinlocks (so that no other writers can
47 * race with us) and then copying all entries into a new hash map. Then, the
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
49 * it anymore.
51 * Writers check for concurrent resizes by comparing ht->map before and after
52 * acquiring their bucket lock. If they don't match, a resize has occured
53 * while the bucket spinlock was being acquired.
55 * Related Work:
56 * - Idea of cacheline-sized buckets with full hashes taken from:
57 * David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58 * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
60 * seqlock, but resizing would take forever since RCU read critical
61 * sections in QEMU take quite a long time.
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64 * Tables via Relativistic Programming", USENIX ATC'11.
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66 * https://lwn.net/Articles/612021/
68 #include "qemu/osdep.h"
69 #include "qemu/qht.h"
70 #include "qemu/atomic.h"
71 #include "qemu/rcu.h"
73 //#define QHT_DEBUG
76 * We want to avoid false sharing of cache lines. Most systems have 64-byte
77 * cache lines so we go with it for simplicity.
79 * Note that systems with smaller cache lines will be fine (the struct is
80 * almost 64-bytes); systems with larger cache lines might suffer from
81 * some false sharing.
83 #define QHT_BUCKET_ALIGN 64
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
88 #else /* 64-bit */
89 #define QHT_BUCKET_ENTRIES 4
90 #endif
93 * Note: reading partially-updated pointers in @pointers could lead to
94 * segfaults. We thus access them with atomic_read/set; this guarantees
95 * that the compiler makes all those accesses atomic. We also need the
96 * volatile-like behavior in atomic_read, since otherwise the compiler
97 * might refetch the pointer.
98 * atomic_read's are of course not necessary when the bucket lock is held.
100 * If both ht->lock and b->lock are grabbed, ht->lock should always
101 * be grabbed first.
103 struct qht_bucket {
104 QemuSpin lock;
105 QemuSeqLock sequence;
106 uint32_t hashes[QHT_BUCKET_ENTRIES];
107 void *pointers[QHT_BUCKET_ENTRIES];
108 struct qht_bucket *next;
109 } QEMU_ALIGNED(QHT_BUCKET_ALIGN);
111 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket) > QHT_BUCKET_ALIGN);
114 * struct qht_map - structure to track an array of buckets
115 * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
116 * find the whole struct.
117 * @buckets: array of head buckets. It is constant once the map is created.
118 * @n_buckets: number of head buckets. It is constant once the map is created.
119 * @n_added_buckets: number of added (i.e. "non-head") buckets
120 * @n_added_buckets_threshold: threshold to trigger an upward resize once the
121 * number of added buckets surpasses it.
123 * Buckets are tracked in what we call a "map", i.e. this structure.
125 struct qht_map {
126 struct rcu_head rcu;
127 struct qht_bucket *buckets;
128 size_t n_buckets;
129 size_t n_added_buckets;
130 size_t n_added_buckets_threshold;
133 /* trigger a resize when n_added_buckets > n_buckets / div */
134 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
136 static void qht_do_resize(struct qht *ht, struct qht_map *new);
137 static void qht_grow_maybe(struct qht *ht);
139 #ifdef QHT_DEBUG
141 #define qht_debug_assert(X) do { assert(X); } while (0)
143 static void qht_bucket_debug__locked(struct qht_bucket *b)
145 bool seen_empty = false;
146 bool corrupt = false;
147 int i;
149 do {
150 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
151 if (b->pointers[i] == NULL) {
152 seen_empty = true;
153 continue;
155 if (seen_empty) {
156 fprintf(stderr, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
157 __func__, b, i, b->hashes[i], b->pointers[i]);
158 corrupt = true;
161 b = b->next;
162 } while (b);
163 qht_debug_assert(!corrupt);
166 static void qht_map_debug__all_locked(struct qht_map *map)
168 int i;
170 for (i = 0; i < map->n_buckets; i++) {
171 qht_bucket_debug__locked(&map->buckets[i]);
174 #else
176 #define qht_debug_assert(X) do { (void)(X); } while (0)
178 static inline void qht_bucket_debug__locked(struct qht_bucket *b)
181 static inline void qht_map_debug__all_locked(struct qht_map *map)
183 #endif /* QHT_DEBUG */
185 static inline size_t qht_elems_to_buckets(size_t n_elems)
187 return pow2ceil(n_elems / QHT_BUCKET_ENTRIES);
190 static inline void qht_head_init(struct qht_bucket *b)
192 memset(b, 0, sizeof(*b));
193 qemu_spin_init(&b->lock);
194 seqlock_init(&b->sequence);
197 static inline
198 struct qht_bucket *qht_map_to_bucket(struct qht_map *map, uint32_t hash)
200 return &map->buckets[hash & (map->n_buckets - 1)];
203 /* acquire all bucket locks from a map */
204 static void qht_map_lock_buckets(struct qht_map *map)
206 size_t i;
208 for (i = 0; i < map->n_buckets; i++) {
209 struct qht_bucket *b = &map->buckets[i];
211 qemu_spin_lock(&b->lock);
215 static void qht_map_unlock_buckets(struct qht_map *map)
217 size_t i;
219 for (i = 0; i < map->n_buckets; i++) {
220 struct qht_bucket *b = &map->buckets[i];
222 qemu_spin_unlock(&b->lock);
227 * Call with at least a bucket lock held.
228 * @map should be the value read before acquiring the lock (or locks).
230 static inline bool qht_map_is_stale__locked(struct qht *ht, struct qht_map *map)
232 return map != ht->map;
236 * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
238 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
240 * Note: callers cannot have ht->lock held.
242 static inline
243 void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
245 struct qht_map *map;
247 map = atomic_rcu_read(&ht->map);
248 qht_map_lock_buckets(map);
249 if (likely(!qht_map_is_stale__locked(ht, map))) {
250 *pmap = map;
251 return;
253 qht_map_unlock_buckets(map);
255 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
256 qemu_mutex_lock(&ht->lock);
257 map = ht->map;
258 qht_map_lock_buckets(map);
259 qemu_mutex_unlock(&ht->lock);
260 *pmap = map;
261 return;
265 * Get a head bucket and lock it, making sure its parent map is not stale.
266 * @pmap is filled with a pointer to the bucket's parent map.
268 * Unlock with qemu_spin_unlock(&b->lock).
270 * Note: callers cannot have ht->lock held.
272 static inline
273 struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
274 struct qht_map **pmap)
276 struct qht_bucket *b;
277 struct qht_map *map;
279 map = atomic_rcu_read(&ht->map);
280 b = qht_map_to_bucket(map, hash);
282 qemu_spin_lock(&b->lock);
283 if (likely(!qht_map_is_stale__locked(ht, map))) {
284 *pmap = map;
285 return b;
287 qemu_spin_unlock(&b->lock);
289 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
290 qemu_mutex_lock(&ht->lock);
291 map = ht->map;
292 b = qht_map_to_bucket(map, hash);
293 qemu_spin_lock(&b->lock);
294 qemu_mutex_unlock(&ht->lock);
295 *pmap = map;
296 return b;
299 static inline bool qht_map_needs_resize(struct qht_map *map)
301 return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
304 static inline void qht_chain_destroy(struct qht_bucket *head)
306 struct qht_bucket *curr = head->next;
307 struct qht_bucket *prev;
309 while (curr) {
310 prev = curr;
311 curr = curr->next;
312 qemu_vfree(prev);
316 /* pass only an orphan map */
317 static void qht_map_destroy(struct qht_map *map)
319 size_t i;
321 for (i = 0; i < map->n_buckets; i++) {
322 qht_chain_destroy(&map->buckets[i]);
324 qemu_vfree(map->buckets);
325 g_free(map);
328 static struct qht_map *qht_map_create(size_t n_buckets)
330 struct qht_map *map;
331 size_t i;
333 map = g_malloc(sizeof(*map));
334 map->n_buckets = n_buckets;
336 map->n_added_buckets = 0;
337 map->n_added_buckets_threshold = n_buckets /
338 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV;
340 /* let tiny hash tables to at least add one non-head bucket */
341 if (unlikely(map->n_added_buckets_threshold == 0)) {
342 map->n_added_buckets_threshold = 1;
345 map->buckets = qemu_memalign(QHT_BUCKET_ALIGN,
346 sizeof(*map->buckets) * n_buckets);
347 for (i = 0; i < n_buckets; i++) {
348 qht_head_init(&map->buckets[i]);
350 return map;
353 void qht_init(struct qht *ht, size_t n_elems, unsigned int mode)
355 struct qht_map *map;
356 size_t n_buckets = qht_elems_to_buckets(n_elems);
358 ht->mode = mode;
359 qemu_mutex_init(&ht->lock);
360 map = qht_map_create(n_buckets);
361 atomic_rcu_set(&ht->map, map);
364 /* call only when there are no readers/writers left */
365 void qht_destroy(struct qht *ht)
367 qht_map_destroy(ht->map);
368 memset(ht, 0, sizeof(*ht));
371 static void qht_bucket_reset__locked(struct qht_bucket *head)
373 struct qht_bucket *b = head;
374 int i;
376 seqlock_write_begin(&head->sequence);
377 do {
378 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
379 if (b->pointers[i] == NULL) {
380 goto done;
382 b->hashes[i] = 0;
383 atomic_set(&b->pointers[i], NULL);
385 b = b->next;
386 } while (b);
387 done:
388 seqlock_write_end(&head->sequence);
391 /* call with all bucket locks held */
392 static void qht_map_reset__all_locked(struct qht_map *map)
394 size_t i;
396 for (i = 0; i < map->n_buckets; i++) {
397 qht_bucket_reset__locked(&map->buckets[i]);
399 qht_map_debug__all_locked(map);
402 void qht_reset(struct qht *ht)
404 struct qht_map *map;
406 qht_map_lock_buckets__no_stale(ht, &map);
407 qht_map_reset__all_locked(map);
408 qht_map_unlock_buckets(map);
411 bool qht_reset_size(struct qht *ht, size_t n_elems)
413 struct qht_map *new;
414 struct qht_map *map;
415 size_t n_buckets;
416 bool resize = false;
418 n_buckets = qht_elems_to_buckets(n_elems);
420 qemu_mutex_lock(&ht->lock);
421 map = ht->map;
422 if (n_buckets != map->n_buckets) {
423 new = qht_map_create(n_buckets);
424 resize = true;
427 qht_map_lock_buckets(map);
428 qht_map_reset__all_locked(map);
429 if (resize) {
430 qht_do_resize(ht, new);
432 qht_map_unlock_buckets(map);
433 qemu_mutex_unlock(&ht->lock);
435 return resize;
438 static inline
439 void *qht_do_lookup(struct qht_bucket *head, qht_lookup_func_t func,
440 const void *userp, uint32_t hash)
442 struct qht_bucket *b = head;
443 int i;
445 do {
446 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
447 if (b->hashes[i] == hash) {
448 void *p = atomic_read(&b->pointers[i]);
450 if (likely(p) && likely(func(p, userp))) {
451 return p;
455 b = atomic_rcu_read(&b->next);
456 } while (b);
458 return NULL;
461 static __attribute__((noinline))
462 void *qht_lookup__slowpath(struct qht_bucket *b, qht_lookup_func_t func,
463 const void *userp, uint32_t hash)
465 unsigned int version;
466 void *ret;
468 do {
469 version = seqlock_read_begin(&b->sequence);
470 ret = qht_do_lookup(b, func, userp, hash);
471 } while (seqlock_read_retry(&b->sequence, version));
472 return ret;
475 void *qht_lookup(struct qht *ht, qht_lookup_func_t func, const void *userp,
476 uint32_t hash)
478 struct qht_bucket *b;
479 struct qht_map *map;
480 unsigned int version;
481 void *ret;
483 map = atomic_rcu_read(&ht->map);
484 b = qht_map_to_bucket(map, hash);
486 version = seqlock_read_begin(&b->sequence);
487 ret = qht_do_lookup(b, func, userp, hash);
488 if (likely(!seqlock_read_retry(&b->sequence, version))) {
489 return ret;
492 * Removing the do/while from the fastpath gives a 4% perf. increase when
493 * running a 100%-lookup microbenchmark.
495 return qht_lookup__slowpath(b, func, userp, hash);
498 /* call with head->lock held */
499 static bool qht_insert__locked(struct qht *ht, struct qht_map *map,
500 struct qht_bucket *head, void *p, uint32_t hash,
501 bool *needs_resize)
503 struct qht_bucket *b = head;
504 struct qht_bucket *prev = NULL;
505 struct qht_bucket *new = NULL;
506 int i;
508 do {
509 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
510 if (b->pointers[i]) {
511 if (unlikely(b->pointers[i] == p)) {
512 return false;
514 } else {
515 goto found;
518 prev = b;
519 b = b->next;
520 } while (b);
522 b = qemu_memalign(QHT_BUCKET_ALIGN, sizeof(*b));
523 memset(b, 0, sizeof(*b));
524 new = b;
525 i = 0;
526 atomic_inc(&map->n_added_buckets);
527 if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
528 *needs_resize = true;
531 found:
532 /* found an empty key: acquire the seqlock and write */
533 seqlock_write_begin(&head->sequence);
534 if (new) {
535 atomic_rcu_set(&prev->next, b);
537 b->hashes[i] = hash;
538 atomic_set(&b->pointers[i], p);
539 seqlock_write_end(&head->sequence);
540 return true;
543 static __attribute__((noinline)) void qht_grow_maybe(struct qht *ht)
545 struct qht_map *map;
548 * If the lock is taken it probably means there's an ongoing resize,
549 * so bail out.
551 if (qemu_mutex_trylock(&ht->lock)) {
552 return;
554 map = ht->map;
555 /* another thread might have just performed the resize we were after */
556 if (qht_map_needs_resize(map)) {
557 struct qht_map *new = qht_map_create(map->n_buckets * 2);
559 qht_map_lock_buckets(map);
560 qht_do_resize(ht, new);
561 qht_map_unlock_buckets(map);
563 qemu_mutex_unlock(&ht->lock);
566 bool qht_insert(struct qht *ht, void *p, uint32_t hash)
568 struct qht_bucket *b;
569 struct qht_map *map;
570 bool needs_resize = false;
571 bool ret;
573 /* NULL pointers are not supported */
574 qht_debug_assert(p);
576 b = qht_bucket_lock__no_stale(ht, hash, &map);
577 ret = qht_insert__locked(ht, map, b, p, hash, &needs_resize);
578 qht_bucket_debug__locked(b);
579 qemu_spin_unlock(&b->lock);
581 if (unlikely(needs_resize) && ht->mode & QHT_MODE_AUTO_RESIZE) {
582 qht_grow_maybe(ht);
584 return ret;
587 static inline bool qht_entry_is_last(struct qht_bucket *b, int pos)
589 if (pos == QHT_BUCKET_ENTRIES - 1) {
590 if (b->next == NULL) {
591 return true;
593 return b->next->pointers[0] == NULL;
595 return b->pointers[pos + 1] == NULL;
598 static void
599 qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
601 qht_debug_assert(!(to == from && i == j));
602 qht_debug_assert(to->pointers[i]);
603 qht_debug_assert(from->pointers[j]);
605 to->hashes[i] = from->hashes[j];
606 atomic_set(&to->pointers[i], from->pointers[j]);
608 from->hashes[j] = 0;
609 atomic_set(&from->pointers[j], NULL);
613 * Find the last valid entry in @head, and swap it with @orig[pos], which has
614 * just been invalidated.
616 static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
618 struct qht_bucket *b = orig;
619 struct qht_bucket *prev = NULL;
620 int i;
622 if (qht_entry_is_last(orig, pos)) {
623 orig->hashes[pos] = 0;
624 atomic_set(&orig->pointers[pos], NULL);
625 return;
627 do {
628 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
629 if (b->pointers[i]) {
630 continue;
632 if (i > 0) {
633 return qht_entry_move(orig, pos, b, i - 1);
635 qht_debug_assert(prev);
636 return qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
638 prev = b;
639 b = b->next;
640 } while (b);
641 /* no free entries other than orig[pos], so swap it with the last one */
642 qht_entry_move(orig, pos, prev, QHT_BUCKET_ENTRIES - 1);
645 /* call with b->lock held */
646 static inline
647 bool qht_remove__locked(struct qht_map *map, struct qht_bucket *head,
648 const void *p, uint32_t hash)
650 struct qht_bucket *b = head;
651 int i;
653 do {
654 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
655 void *q = b->pointers[i];
657 if (unlikely(q == NULL)) {
658 return false;
660 if (q == p) {
661 qht_debug_assert(b->hashes[i] == hash);
662 seqlock_write_begin(&head->sequence);
663 qht_bucket_remove_entry(b, i);
664 seqlock_write_end(&head->sequence);
665 return true;
668 b = b->next;
669 } while (b);
670 return false;
673 bool qht_remove(struct qht *ht, const void *p, uint32_t hash)
675 struct qht_bucket *b;
676 struct qht_map *map;
677 bool ret;
679 /* NULL pointers are not supported */
680 qht_debug_assert(p);
682 b = qht_bucket_lock__no_stale(ht, hash, &map);
683 ret = qht_remove__locked(map, b, p, hash);
684 qht_bucket_debug__locked(b);
685 qemu_spin_unlock(&b->lock);
686 return ret;
689 static inline void qht_bucket_iter(struct qht *ht, struct qht_bucket *b,
690 qht_iter_func_t func, void *userp)
692 int i;
694 do {
695 for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
696 if (b->pointers[i] == NULL) {
697 return;
699 func(ht, b->pointers[i], b->hashes[i], userp);
701 b = b->next;
702 } while (b);
705 /* call with all of the map's locks held */
706 static inline void qht_map_iter__all_locked(struct qht *ht, struct qht_map *map,
707 qht_iter_func_t func, void *userp)
709 size_t i;
711 for (i = 0; i < map->n_buckets; i++) {
712 qht_bucket_iter(ht, &map->buckets[i], func, userp);
716 void qht_iter(struct qht *ht, qht_iter_func_t func, void *userp)
718 struct qht_map *map;
720 map = atomic_rcu_read(&ht->map);
721 qht_map_lock_buckets(map);
722 /* Note: ht here is merely for carrying ht->mode; ht->map won't be read */
723 qht_map_iter__all_locked(ht, map, func, userp);
724 qht_map_unlock_buckets(map);
727 static void qht_map_copy(struct qht *ht, void *p, uint32_t hash, void *userp)
729 struct qht_map *new = userp;
730 struct qht_bucket *b = qht_map_to_bucket(new, hash);
732 /* no need to acquire b->lock because no thread has seen this map yet */
733 qht_insert__locked(ht, new, b, p, hash, NULL);
737 * Call with ht->lock and all bucket locks held.
739 * Creating the @new map here would add unnecessary delay while all the locks
740 * are held--holding up the bucket locks is particularly bad, since no writes
741 * can occur while these are held. Thus, we let callers create the new map,
742 * hopefully without the bucket locks held.
744 static void qht_do_resize(struct qht *ht, struct qht_map *new)
746 struct qht_map *old;
748 old = ht->map;
749 g_assert_cmpuint(new->n_buckets, !=, old->n_buckets);
751 qht_map_iter__all_locked(ht, old, qht_map_copy, new);
752 qht_map_debug__all_locked(new);
754 atomic_rcu_set(&ht->map, new);
755 call_rcu(old, qht_map_destroy, rcu);
758 bool qht_resize(struct qht *ht, size_t n_elems)
760 size_t n_buckets = qht_elems_to_buckets(n_elems);
761 size_t ret = false;
763 qemu_mutex_lock(&ht->lock);
764 if (n_buckets != ht->map->n_buckets) {
765 struct qht_map *new;
766 struct qht_map *old = ht->map;
768 new = qht_map_create(n_buckets);
769 qht_map_lock_buckets(old);
770 qht_do_resize(ht, new);
771 qht_map_unlock_buckets(old);
772 ret = true;
774 qemu_mutex_unlock(&ht->lock);
776 return ret;
779 /* pass @stats to qht_statistics_destroy() when done */
780 void qht_statistics_init(struct qht *ht, struct qht_stats *stats)
782 struct qht_map *map;
783 int i;
785 map = atomic_rcu_read(&ht->map);
787 stats->head_buckets = map->n_buckets;
788 stats->used_head_buckets = 0;
789 stats->entries = 0;
790 qdist_init(&stats->chain);
791 qdist_init(&stats->occupancy);
793 for (i = 0; i < map->n_buckets; i++) {
794 struct qht_bucket *head = &map->buckets[i];
795 struct qht_bucket *b;
796 unsigned int version;
797 size_t buckets;
798 size_t entries;
799 int j;
801 do {
802 version = seqlock_read_begin(&head->sequence);
803 buckets = 0;
804 entries = 0;
805 b = head;
806 do {
807 for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
808 if (atomic_read(&b->pointers[j]) == NULL) {
809 break;
811 entries++;
813 buckets++;
814 b = atomic_rcu_read(&b->next);
815 } while (b);
816 } while (seqlock_read_retry(&head->sequence, version));
818 if (entries) {
819 qdist_inc(&stats->chain, buckets);
820 qdist_inc(&stats->occupancy,
821 (double)entries / QHT_BUCKET_ENTRIES / buckets);
822 stats->used_head_buckets++;
823 stats->entries += entries;
824 } else {
825 qdist_inc(&stats->occupancy, 0);
830 void qht_statistics_destroy(struct qht_stats *stats)
832 qdist_destroy(&stats->occupancy);
833 qdist_destroy(&stats->chain);