2 * qht.c - QEMU Hash Table, designed to scale for read-mostly workloads.
4 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
10 * - NULL cannot be inserted/removed as a pointer value.
11 * - Trying to insert an already-existing hash-pointer pair is OK. However,
12 * it is not OK to insert into the same hash table different hash-pointer
13 * pairs that have the same pointer value, but not the hashes.
14 * - Lookups are performed under an RCU read-critical section; removals
15 * must wait for a grace period to elapse before freeing removed objects.
18 * - Reads (i.e. lookups and iterators) can be concurrent with other reads.
19 * Lookups that are concurrent with writes to the same bucket will retry
20 * via a seqlock; iterators acquire all bucket locks and therefore can be
21 * concurrent with lookups and are serialized wrt writers.
22 * - Writes (i.e. insertions/removals) can be concurrent with writes to
23 * different buckets; writes to the same bucket are serialized through a lock.
24 * - Optional auto-resizing: the hash table resizes up if the load surpasses
25 * a certain threshold. Resizing is done concurrently with readers; writes
26 * are serialized with the resize operation.
28 * The key structure is the bucket, which is cacheline-sized. Buckets
29 * contain a few hash values and pointers; the u32 hash values are stored in
30 * full so that resizing is fast. Having this structure instead of directly
31 * chaining items has two advantages:
32 * - Failed lookups fail fast, and touch a minimum number of cache lines.
33 * - Resizing the hash table with concurrent lookups is easy.
35 * There are two types of buckets:
36 * 1. "head" buckets are the ones allocated in the array of buckets in qht_map.
37 * 2. all "non-head" buckets (i.e. all others) are members of a chain that
38 * starts from a head bucket.
39 * Note that the seqlock and spinlock of a head bucket applies to all buckets
40 * chained to it; these two fields are unused in non-head buckets.
42 * On removals, we move the last valid item in the chain to the position of the
43 * just-removed entry. This makes lookups slightly faster, since the moment an
44 * invalid entry is found, the (failed) lookup is over.
46 * Resizing is done by taking all bucket spinlocks (so that no other writers can
47 * race with us) and then copying all entries into a new hash map. Then, the
48 * ht->map pointer is set, and the old map is freed once no RCU readers can see
51 * Writers check for concurrent resizes by comparing ht->map before and after
52 * acquiring their bucket lock. If they don't match, a resize has occured
53 * while the bucket spinlock was being acquired.
56 * - Idea of cacheline-sized buckets with full hashes taken from:
57 * David, Guerraoui & Trigonakis, "Asynchronized Concurrency:
58 * The Secret to Scaling Concurrent Search Data Structures", ASPLOS'15.
59 * - Why not RCU-based hash tables? They would allow us to get rid of the
60 * seqlock, but resizing would take forever since RCU read critical
61 * sections in QEMU take quite a long time.
62 * More info on relativistic hash tables:
63 * + Triplett, McKenney & Walpole, "Resizable, Scalable, Concurrent Hash
64 * Tables via Relativistic Programming", USENIX ATC'11.
65 * + Corbet, "Relativistic hash tables, part 1: Algorithms", @ lwn.net, 2014.
66 * https://lwn.net/Articles/612021/
68 #include "qemu/osdep.h"
70 #include "qemu/atomic.h"
76 * We want to avoid false sharing of cache lines. Most systems have 64-byte
77 * cache lines so we go with it for simplicity.
79 * Note that systems with smaller cache lines will be fine (the struct is
80 * almost 64-bytes); systems with larger cache lines might suffer from
83 #define QHT_BUCKET_ALIGN 64
85 /* define these to keep sizeof(qht_bucket) within QHT_BUCKET_ALIGN */
86 #if HOST_LONG_BITS == 32
87 #define QHT_BUCKET_ENTRIES 6
89 #define QHT_BUCKET_ENTRIES 4
93 QHT_ITER_VOID
, /* do nothing; use retvoid */
94 QHT_ITER_RM
, /* remove element if retbool returns true */
99 qht_iter_func_t retvoid
;
100 qht_iter_bool_func_t retbool
;
102 enum qht_iter_type type
;
106 * Do _not_ use qemu_mutex_[try]lock directly! Use these macros, otherwise
107 * the profiler (QSP) will deadlock.
109 static inline void qht_lock(struct qht
*ht
)
111 if (ht
->mode
& QHT_MODE_RAW_MUTEXES
) {
112 qemu_mutex_lock__raw(&ht
->lock
);
114 qemu_mutex_lock(&ht
->lock
);
118 static inline int qht_trylock(struct qht
*ht
)
120 if (ht
->mode
& QHT_MODE_RAW_MUTEXES
) {
121 return qemu_mutex_trylock__raw(&(ht
)->lock
);
123 return qemu_mutex_trylock(&(ht
)->lock
);
126 /* this inline is not really necessary, but it helps keep code consistent */
127 static inline void qht_unlock(struct qht
*ht
)
129 qemu_mutex_unlock(&ht
->lock
);
133 * Note: reading partially-updated pointers in @pointers could lead to
134 * segfaults. We thus access them with atomic_read/set; this guarantees
135 * that the compiler makes all those accesses atomic. We also need the
136 * volatile-like behavior in atomic_read, since otherwise the compiler
137 * might refetch the pointer.
138 * atomic_read's are of course not necessary when the bucket lock is held.
140 * If both ht->lock and b->lock are grabbed, ht->lock should always
145 QemuSeqLock sequence
;
146 uint32_t hashes
[QHT_BUCKET_ENTRIES
];
147 void *pointers
[QHT_BUCKET_ENTRIES
];
148 struct qht_bucket
*next
;
149 } QEMU_ALIGNED(QHT_BUCKET_ALIGN
);
151 QEMU_BUILD_BUG_ON(sizeof(struct qht_bucket
) > QHT_BUCKET_ALIGN
);
154 * struct qht_map - structure to track an array of buckets
155 * @rcu: used by RCU. Keep it as the top field in the struct to help valgrind
156 * find the whole struct.
157 * @buckets: array of head buckets. It is constant once the map is created.
158 * @n_buckets: number of head buckets. It is constant once the map is created.
159 * @n_added_buckets: number of added (i.e. "non-head") buckets
160 * @n_added_buckets_threshold: threshold to trigger an upward resize once the
161 * number of added buckets surpasses it.
163 * Buckets are tracked in what we call a "map", i.e. this structure.
167 struct qht_bucket
*buckets
;
169 size_t n_added_buckets
;
170 size_t n_added_buckets_threshold
;
173 /* trigger a resize when n_added_buckets > n_buckets / div */
174 #define QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV 8
176 static void qht_do_resize_reset(struct qht
*ht
, struct qht_map
*new,
178 static void qht_grow_maybe(struct qht
*ht
);
182 #define qht_debug_assert(X) do { assert(X); } while (0)
184 static void qht_bucket_debug__locked(struct qht_bucket
*b
)
186 bool seen_empty
= false;
187 bool corrupt
= false;
191 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
192 if (b
->pointers
[i
] == NULL
) {
197 fprintf(stderr
, "%s: b: %p, pos: %i, hash: 0x%x, p: %p\n",
198 __func__
, b
, i
, b
->hashes
[i
], b
->pointers
[i
]);
204 qht_debug_assert(!corrupt
);
207 static void qht_map_debug__all_locked(struct qht_map
*map
)
211 for (i
= 0; i
< map
->n_buckets
; i
++) {
212 qht_bucket_debug__locked(&map
->buckets
[i
]);
217 #define qht_debug_assert(X) do { (void)(X); } while (0)
219 static inline void qht_bucket_debug__locked(struct qht_bucket
*b
)
222 static inline void qht_map_debug__all_locked(struct qht_map
*map
)
224 #endif /* QHT_DEBUG */
226 static inline size_t qht_elems_to_buckets(size_t n_elems
)
228 return pow2ceil(n_elems
/ QHT_BUCKET_ENTRIES
);
231 static inline void qht_head_init(struct qht_bucket
*b
)
233 memset(b
, 0, sizeof(*b
));
234 qemu_spin_init(&b
->lock
);
235 seqlock_init(&b
->sequence
);
239 struct qht_bucket
*qht_map_to_bucket(const struct qht_map
*map
, uint32_t hash
)
241 return &map
->buckets
[hash
& (map
->n_buckets
- 1)];
244 /* acquire all bucket locks from a map */
245 static void qht_map_lock_buckets(struct qht_map
*map
)
249 for (i
= 0; i
< map
->n_buckets
; i
++) {
250 struct qht_bucket
*b
= &map
->buckets
[i
];
252 qemu_spin_lock(&b
->lock
);
256 static void qht_map_unlock_buckets(struct qht_map
*map
)
260 for (i
= 0; i
< map
->n_buckets
; i
++) {
261 struct qht_bucket
*b
= &map
->buckets
[i
];
263 qemu_spin_unlock(&b
->lock
);
268 * Call with at least a bucket lock held.
269 * @map should be the value read before acquiring the lock (or locks).
271 static inline bool qht_map_is_stale__locked(const struct qht
*ht
,
272 const struct qht_map
*map
)
274 return map
!= ht
->map
;
278 * Grab all bucket locks, and set @pmap after making sure the map isn't stale.
280 * Pairs with qht_map_unlock_buckets(), hence the pass-by-reference.
282 * Note: callers cannot have ht->lock held.
285 void qht_map_lock_buckets__no_stale(struct qht
*ht
, struct qht_map
**pmap
)
289 map
= atomic_rcu_read(&ht
->map
);
290 qht_map_lock_buckets(map
);
291 if (likely(!qht_map_is_stale__locked(ht
, map
))) {
295 qht_map_unlock_buckets(map
);
297 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
300 qht_map_lock_buckets(map
);
307 * Get a head bucket and lock it, making sure its parent map is not stale.
308 * @pmap is filled with a pointer to the bucket's parent map.
310 * Unlock with qemu_spin_unlock(&b->lock).
312 * Note: callers cannot have ht->lock held.
315 struct qht_bucket
*qht_bucket_lock__no_stale(struct qht
*ht
, uint32_t hash
,
316 struct qht_map
**pmap
)
318 struct qht_bucket
*b
;
321 map
= atomic_rcu_read(&ht
->map
);
322 b
= qht_map_to_bucket(map
, hash
);
324 qemu_spin_lock(&b
->lock
);
325 if (likely(!qht_map_is_stale__locked(ht
, map
))) {
329 qemu_spin_unlock(&b
->lock
);
331 /* we raced with a resize; acquire ht->lock to see the updated ht->map */
334 b
= qht_map_to_bucket(map
, hash
);
335 qemu_spin_lock(&b
->lock
);
341 static inline bool qht_map_needs_resize(const struct qht_map
*map
)
343 return atomic_read(&map
->n_added_buckets
) > map
->n_added_buckets_threshold
;
346 static inline void qht_chain_destroy(const struct qht_bucket
*head
)
348 struct qht_bucket
*curr
= head
->next
;
349 struct qht_bucket
*prev
;
351 qemu_spin_destroy(&head
->lock
);
359 /* pass only an orphan map */
360 static void qht_map_destroy(struct qht_map
*map
)
364 for (i
= 0; i
< map
->n_buckets
; i
++) {
365 qht_chain_destroy(&map
->buckets
[i
]);
367 qemu_vfree(map
->buckets
);
371 static struct qht_map
*qht_map_create(size_t n_buckets
)
376 map
= g_malloc(sizeof(*map
));
377 map
->n_buckets
= n_buckets
;
379 map
->n_added_buckets
= 0;
380 map
->n_added_buckets_threshold
= n_buckets
/
381 QHT_NR_ADDED_BUCKETS_THRESHOLD_DIV
;
383 /* let tiny hash tables to at least add one non-head bucket */
384 if (unlikely(map
->n_added_buckets_threshold
== 0)) {
385 map
->n_added_buckets_threshold
= 1;
388 map
->buckets
= qemu_memalign(QHT_BUCKET_ALIGN
,
389 sizeof(*map
->buckets
) * n_buckets
);
390 for (i
= 0; i
< n_buckets
; i
++) {
391 qht_head_init(&map
->buckets
[i
]);
396 void qht_init(struct qht
*ht
, qht_cmp_func_t cmp
, size_t n_elems
,
400 size_t n_buckets
= qht_elems_to_buckets(n_elems
);
405 qemu_mutex_init(&ht
->lock
);
406 map
= qht_map_create(n_buckets
);
407 atomic_rcu_set(&ht
->map
, map
);
410 /* call only when there are no readers/writers left */
411 void qht_destroy(struct qht
*ht
)
413 qht_map_destroy(ht
->map
);
414 memset(ht
, 0, sizeof(*ht
));
417 static void qht_bucket_reset__locked(struct qht_bucket
*head
)
419 struct qht_bucket
*b
= head
;
422 seqlock_write_begin(&head
->sequence
);
424 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
425 if (b
->pointers
[i
] == NULL
) {
428 atomic_set(&b
->hashes
[i
], 0);
429 atomic_set(&b
->pointers
[i
], NULL
);
434 seqlock_write_end(&head
->sequence
);
437 /* call with all bucket locks held */
438 static void qht_map_reset__all_locked(struct qht_map
*map
)
442 for (i
= 0; i
< map
->n_buckets
; i
++) {
443 qht_bucket_reset__locked(&map
->buckets
[i
]);
445 qht_map_debug__all_locked(map
);
448 void qht_reset(struct qht
*ht
)
452 qht_map_lock_buckets__no_stale(ht
, &map
);
453 qht_map_reset__all_locked(map
);
454 qht_map_unlock_buckets(map
);
457 static inline void qht_do_resize(struct qht
*ht
, struct qht_map
*new)
459 qht_do_resize_reset(ht
, new, false);
462 static inline void qht_do_resize_and_reset(struct qht
*ht
, struct qht_map
*new)
464 qht_do_resize_reset(ht
, new, true);
467 bool qht_reset_size(struct qht
*ht
, size_t n_elems
)
469 struct qht_map
*new = NULL
;
473 n_buckets
= qht_elems_to_buckets(n_elems
);
477 if (n_buckets
!= map
->n_buckets
) {
478 new = qht_map_create(n_buckets
);
480 qht_do_resize_and_reset(ht
, new);
487 void *qht_do_lookup(const struct qht_bucket
*head
, qht_lookup_func_t func
,
488 const void *userp
, uint32_t hash
)
490 const struct qht_bucket
*b
= head
;
494 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
495 if (atomic_read(&b
->hashes
[i
]) == hash
) {
496 /* The pointer is dereferenced before seqlock_read_retry,
497 * so (unlike qht_insert__locked) we need to use
498 * atomic_rcu_read here.
500 void *p
= atomic_rcu_read(&b
->pointers
[i
]);
502 if (likely(p
) && likely(func(p
, userp
))) {
507 b
= atomic_rcu_read(&b
->next
);
513 static __attribute__((noinline
))
514 void *qht_lookup__slowpath(const struct qht_bucket
*b
, qht_lookup_func_t func
,
515 const void *userp
, uint32_t hash
)
517 unsigned int version
;
521 version
= seqlock_read_begin(&b
->sequence
);
522 ret
= qht_do_lookup(b
, func
, userp
, hash
);
523 } while (seqlock_read_retry(&b
->sequence
, version
));
527 void *qht_lookup_custom(const struct qht
*ht
, const void *userp
, uint32_t hash
,
528 qht_lookup_func_t func
)
530 const struct qht_bucket
*b
;
531 const struct qht_map
*map
;
532 unsigned int version
;
535 map
= atomic_rcu_read(&ht
->map
);
536 b
= qht_map_to_bucket(map
, hash
);
538 version
= seqlock_read_begin(&b
->sequence
);
539 ret
= qht_do_lookup(b
, func
, userp
, hash
);
540 if (likely(!seqlock_read_retry(&b
->sequence
, version
))) {
544 * Removing the do/while from the fastpath gives a 4% perf. increase when
545 * running a 100%-lookup microbenchmark.
547 return qht_lookup__slowpath(b
, func
, userp
, hash
);
550 void *qht_lookup(const struct qht
*ht
, const void *userp
, uint32_t hash
)
552 return qht_lookup_custom(ht
, userp
, hash
, ht
->cmp
);
556 * call with head->lock held
557 * @ht is const since it is only used for ht->cmp()
559 static void *qht_insert__locked(const struct qht
*ht
, struct qht_map
*map
,
560 struct qht_bucket
*head
, void *p
, uint32_t hash
,
563 struct qht_bucket
*b
= head
;
564 struct qht_bucket
*prev
= NULL
;
565 struct qht_bucket
*new = NULL
;
569 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
570 if (b
->pointers
[i
]) {
571 if (unlikely(b
->hashes
[i
] == hash
&&
572 ht
->cmp(b
->pointers
[i
], p
))) {
573 return b
->pointers
[i
];
583 b
= qemu_memalign(QHT_BUCKET_ALIGN
, sizeof(*b
));
584 memset(b
, 0, sizeof(*b
));
587 atomic_inc(&map
->n_added_buckets
);
588 if (unlikely(qht_map_needs_resize(map
)) && needs_resize
) {
589 *needs_resize
= true;
593 /* found an empty key: acquire the seqlock and write */
594 seqlock_write_begin(&head
->sequence
);
596 atomic_rcu_set(&prev
->next
, b
);
598 /* smp_wmb() implicit in seqlock_write_begin. */
599 atomic_set(&b
->hashes
[i
], hash
);
600 atomic_set(&b
->pointers
[i
], p
);
601 seqlock_write_end(&head
->sequence
);
605 static __attribute__((noinline
)) void qht_grow_maybe(struct qht
*ht
)
610 * If the lock is taken it probably means there's an ongoing resize,
613 if (qht_trylock(ht
)) {
617 /* another thread might have just performed the resize we were after */
618 if (qht_map_needs_resize(map
)) {
619 struct qht_map
*new = qht_map_create(map
->n_buckets
* 2);
621 qht_do_resize(ht
, new);
626 bool qht_insert(struct qht
*ht
, void *p
, uint32_t hash
, void **existing
)
628 struct qht_bucket
*b
;
630 bool needs_resize
= false;
633 /* NULL pointers are not supported */
636 b
= qht_bucket_lock__no_stale(ht
, hash
, &map
);
637 prev
= qht_insert__locked(ht
, map
, b
, p
, hash
, &needs_resize
);
638 qht_bucket_debug__locked(b
);
639 qemu_spin_unlock(&b
->lock
);
641 if (unlikely(needs_resize
) && ht
->mode
& QHT_MODE_AUTO_RESIZE
) {
644 if (likely(prev
== NULL
)) {
653 static inline bool qht_entry_is_last(const struct qht_bucket
*b
, int pos
)
655 if (pos
== QHT_BUCKET_ENTRIES
- 1) {
656 if (b
->next
== NULL
) {
659 return b
->next
->pointers
[0] == NULL
;
661 return b
->pointers
[pos
+ 1] == NULL
;
665 qht_entry_move(struct qht_bucket
*to
, int i
, struct qht_bucket
*from
, int j
)
667 qht_debug_assert(!(to
== from
&& i
== j
));
668 qht_debug_assert(to
->pointers
[i
]);
669 qht_debug_assert(from
->pointers
[j
]);
671 atomic_set(&to
->hashes
[i
], from
->hashes
[j
]);
672 atomic_set(&to
->pointers
[i
], from
->pointers
[j
]);
674 atomic_set(&from
->hashes
[j
], 0);
675 atomic_set(&from
->pointers
[j
], NULL
);
679 * Find the last valid entry in @orig, and swap it with @orig[pos], which has
680 * just been invalidated.
682 static inline void qht_bucket_remove_entry(struct qht_bucket
*orig
, int pos
)
684 struct qht_bucket
*b
= orig
;
685 struct qht_bucket
*prev
= NULL
;
688 if (qht_entry_is_last(orig
, pos
)) {
689 orig
->hashes
[pos
] = 0;
690 atomic_set(&orig
->pointers
[pos
], NULL
);
694 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
695 if (b
->pointers
[i
]) {
699 return qht_entry_move(orig
, pos
, b
, i
- 1);
701 qht_debug_assert(prev
);
702 return qht_entry_move(orig
, pos
, prev
, QHT_BUCKET_ENTRIES
- 1);
707 /* no free entries other than orig[pos], so swap it with the last one */
708 qht_entry_move(orig
, pos
, prev
, QHT_BUCKET_ENTRIES
- 1);
711 /* call with b->lock held */
713 bool qht_remove__locked(struct qht_bucket
*head
, const void *p
, uint32_t hash
)
715 struct qht_bucket
*b
= head
;
719 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
720 void *q
= b
->pointers
[i
];
722 if (unlikely(q
== NULL
)) {
726 qht_debug_assert(b
->hashes
[i
] == hash
);
727 seqlock_write_begin(&head
->sequence
);
728 qht_bucket_remove_entry(b
, i
);
729 seqlock_write_end(&head
->sequence
);
738 bool qht_remove(struct qht
*ht
, const void *p
, uint32_t hash
)
740 struct qht_bucket
*b
;
744 /* NULL pointers are not supported */
747 b
= qht_bucket_lock__no_stale(ht
, hash
, &map
);
748 ret
= qht_remove__locked(b
, p
, hash
);
749 qht_bucket_debug__locked(b
);
750 qemu_spin_unlock(&b
->lock
);
754 static inline void qht_bucket_iter(struct qht_bucket
*head
,
755 const struct qht_iter
*iter
, void *userp
)
757 struct qht_bucket
*b
= head
;
761 for (i
= 0; i
< QHT_BUCKET_ENTRIES
; i
++) {
762 if (b
->pointers
[i
] == NULL
) {
765 switch (iter
->type
) {
767 iter
->f
.retvoid(b
->pointers
[i
], b
->hashes
[i
], userp
);
770 if (iter
->f
.retbool(b
->pointers
[i
], b
->hashes
[i
], userp
)) {
771 /* replace i with the last valid element in the bucket */
772 seqlock_write_begin(&head
->sequence
);
773 qht_bucket_remove_entry(b
, i
);
774 seqlock_write_end(&head
->sequence
);
775 qht_bucket_debug__locked(b
);
776 /* reevaluate i, since it just got replaced */
782 g_assert_not_reached();
789 /* call with all of the map's locks held */
790 static inline void qht_map_iter__all_locked(struct qht_map
*map
,
791 const struct qht_iter
*iter
,
796 for (i
= 0; i
< map
->n_buckets
; i
++) {
797 qht_bucket_iter(&map
->buckets
[i
], iter
, userp
);
802 do_qht_iter(struct qht
*ht
, const struct qht_iter
*iter
, void *userp
)
806 map
= atomic_rcu_read(&ht
->map
);
807 qht_map_lock_buckets(map
);
808 qht_map_iter__all_locked(map
, iter
, userp
);
809 qht_map_unlock_buckets(map
);
812 void qht_iter(struct qht
*ht
, qht_iter_func_t func
, void *userp
)
814 const struct qht_iter iter
= {
816 .type
= QHT_ITER_VOID
,
819 do_qht_iter(ht
, &iter
, userp
);
822 void qht_iter_remove(struct qht
*ht
, qht_iter_bool_func_t func
, void *userp
)
824 const struct qht_iter iter
= {
829 do_qht_iter(ht
, &iter
, userp
);
832 struct qht_map_copy_data
{
837 static void qht_map_copy(void *p
, uint32_t hash
, void *userp
)
839 struct qht_map_copy_data
*data
= userp
;
840 struct qht
*ht
= data
->ht
;
841 struct qht_map
*new = data
->new;
842 struct qht_bucket
*b
= qht_map_to_bucket(new, hash
);
844 /* no need to acquire b->lock because no thread has seen this map yet */
845 qht_insert__locked(ht
, new, b
, p
, hash
, NULL
);
849 * Atomically perform a resize and/or reset.
850 * Call with ht->lock held.
852 static void qht_do_resize_reset(struct qht
*ht
, struct qht_map
*new, bool reset
)
855 const struct qht_iter iter
= {
856 .f
.retvoid
= qht_map_copy
,
857 .type
= QHT_ITER_VOID
,
859 struct qht_map_copy_data data
;
862 qht_map_lock_buckets(old
);
865 qht_map_reset__all_locked(old
);
869 qht_map_unlock_buckets(old
);
873 g_assert(new->n_buckets
!= old
->n_buckets
);
876 qht_map_iter__all_locked(old
, &iter
, &data
);
877 qht_map_debug__all_locked(new);
879 atomic_rcu_set(&ht
->map
, new);
880 qht_map_unlock_buckets(old
);
881 call_rcu(old
, qht_map_destroy
, rcu
);
884 bool qht_resize(struct qht
*ht
, size_t n_elems
)
886 size_t n_buckets
= qht_elems_to_buckets(n_elems
);
890 if (n_buckets
!= ht
->map
->n_buckets
) {
893 new = qht_map_create(n_buckets
);
894 qht_do_resize(ht
, new);
902 /* pass @stats to qht_statistics_destroy() when done */
903 void qht_statistics_init(const struct qht
*ht
, struct qht_stats
*stats
)
905 const struct qht_map
*map
;
908 map
= atomic_rcu_read(&ht
->map
);
910 stats
->used_head_buckets
= 0;
912 qdist_init(&stats
->chain
);
913 qdist_init(&stats
->occupancy
);
914 /* bail out if the qht has not yet been initialized */
915 if (unlikely(map
== NULL
)) {
916 stats
->head_buckets
= 0;
919 stats
->head_buckets
= map
->n_buckets
;
921 for (i
= 0; i
< map
->n_buckets
; i
++) {
922 const struct qht_bucket
*head
= &map
->buckets
[i
];
923 const struct qht_bucket
*b
;
924 unsigned int version
;
930 version
= seqlock_read_begin(&head
->sequence
);
935 for (j
= 0; j
< QHT_BUCKET_ENTRIES
; j
++) {
936 if (atomic_read(&b
->pointers
[j
]) == NULL
) {
942 b
= atomic_rcu_read(&b
->next
);
944 } while (seqlock_read_retry(&head
->sequence
, version
));
947 qdist_inc(&stats
->chain
, buckets
);
948 qdist_inc(&stats
->occupancy
,
949 (double)entries
/ QHT_BUCKET_ENTRIES
/ buckets
);
950 stats
->used_head_buckets
++;
951 stats
->entries
+= entries
;
953 qdist_inc(&stats
->occupancy
, 0);
958 void qht_statistics_destroy(struct qht_stats
*stats
)
960 qdist_destroy(&stats
->occupancy
);
961 qdist_destroy(&stats
->chain
);