2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/atomic.h>
21 #include <linux/compiler.h>
22 #include <linux/errno.h>
23 #include <linux/jhash.h>
24 #include <linux/list_nulls.h>
25 #include <linux/workqueue.h>
26 #include <linux/mutex.h>
27 #include <linux/rcupdate.h>
30 * The end of the chain is marked with a special nulls marks which has
31 * the following format:
33 * +-------+-----------------------------------------------------+-+
35 * +-------+-----------------------------------------------------+-+
37 * Base (4 bits) : Reserved to distinguish between multiple tables.
38 * Specified via &struct rhashtable_params.nulls_base.
39 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
40 * 1 (1 bit) : Nulls marker (always set)
42 * The remaining bits of the next pointer remain unused for now.
44 #define RHT_BASE_BITS 4
45 #define RHT_HASH_BITS 27
46 #define RHT_BASE_SHIFT RHT_HASH_BITS
48 /* Base bits plus 1 bit for nulls marker */
49 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
52 struct rhash_head __rcu
*next
;
56 * struct bucket_table - Table of hash buckets
57 * @size: Number of hash buckets
58 * @rehash: Current bucket being rehashed
59 * @hash_rnd: Random seed to fold into hash
60 * @locks_mask: Mask to apply before accessing locks[]
61 * @locks: Array of spinlocks protecting individual buckets
62 * @walkers: List of active walkers
63 * @rcu: RCU structure for freeing the table
64 * @future_tbl: Table under construction during rehashing
65 * @buckets: size * hash buckets
71 unsigned int locks_mask
;
73 struct list_head walkers
;
76 struct bucket_table __rcu
*future_tbl
;
78 struct rhash_head __rcu
*buckets
[] ____cacheline_aligned_in_smp
;
82 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
84 * @key: Key to compare against
86 struct rhashtable_compare_arg
{
87 struct rhashtable
*ht
;
91 typedef u32 (*rht_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
92 typedef u32 (*rht_obj_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
93 typedef int (*rht_obj_cmpfn_t
)(struct rhashtable_compare_arg
*arg
,
99 * struct rhashtable_params - Hash table construction parameters
100 * @nelem_hint: Hint on number of elements, should be 75% of desired size
101 * @key_len: Length of key
102 * @key_offset: Offset of key in struct to be hashed
103 * @head_offset: Offset of rhash_head in struct to be hashed
104 * @insecure_max_entries: Maximum number of entries (may be exceeded)
105 * @max_size: Maximum size while expanding
106 * @min_size: Minimum size while shrinking
107 * @nulls_base: Base value to generate nulls marker
108 * @insecure_elasticity: Set to true to disable chain length checks
109 * @automatic_shrinking: Enable automatic shrinking of tables
110 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
111 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
112 * @obj_hashfn: Function to hash object
113 * @obj_cmpfn: Function to compare key with object
115 struct rhashtable_params
{
120 unsigned int insecure_max_entries
;
121 unsigned int max_size
;
122 unsigned int min_size
;
124 bool insecure_elasticity
;
125 bool automatic_shrinking
;
128 rht_obj_hashfn_t obj_hashfn
;
129 rht_obj_cmpfn_t obj_cmpfn
;
133 * struct rhashtable - Hash table handle
135 * @nelems: Number of elements in table
136 * @key_len: Key length for hashfn
137 * @elasticity: Maximum chain length before rehash
138 * @p: Configuration parameters
139 * @run_work: Deferred worker to expand/shrink asynchronously
140 * @mutex: Mutex to protect current/future table swapping
141 * @lock: Spin lock to protect walker list
144 struct bucket_table __rcu
*tbl
;
146 unsigned int key_len
;
147 unsigned int elasticity
;
148 struct rhashtable_params p
;
149 struct work_struct run_work
;
155 * struct rhashtable_walker - Hash table walker
156 * @list: List entry on list of walkers
157 * @tbl: The table that we were walking over
159 struct rhashtable_walker
{
160 struct list_head list
;
161 struct bucket_table
*tbl
;
165 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
166 * @ht: Table to iterate through
167 * @p: Current pointer
168 * @walker: Associated rhashtable walker
169 * @slot: Current slot
170 * @skip: Number of entries to skip in slot
172 struct rhashtable_iter
{
173 struct rhashtable
*ht
;
174 struct rhash_head
*p
;
175 struct rhashtable_walker
*walker
;
180 static inline unsigned long rht_marker(const struct rhashtable
*ht
, u32 hash
)
182 return NULLS_MARKER(ht
->p
.nulls_base
+ hash
);
185 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
186 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
188 static inline bool rht_is_a_nulls(const struct rhash_head
*ptr
)
190 return ((unsigned long) ptr
& 1);
193 static inline unsigned long rht_get_nulls_value(const struct rhash_head
*ptr
)
195 return ((unsigned long) ptr
) >> 1;
198 static inline void *rht_obj(const struct rhashtable
*ht
,
199 const struct rhash_head
*he
)
201 return (char *)he
- ht
->p
.head_offset
;
204 static inline unsigned int rht_bucket_index(const struct bucket_table
*tbl
,
207 return (hash
>> RHT_HASH_RESERVED_SPACE
) & (tbl
->size
- 1);
210 static inline unsigned int rht_key_hashfn(
211 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
212 const void *key
, const struct rhashtable_params params
)
216 /* params must be equal to ht->p if it isn't constant. */
217 if (!__builtin_constant_p(params
.key_len
))
218 hash
= ht
->p
.hashfn(key
, ht
->key_len
, tbl
->hash_rnd
);
219 else if (params
.key_len
) {
220 unsigned int key_len
= params
.key_len
;
223 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
224 else if (key_len
& (sizeof(u32
) - 1))
225 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
227 hash
= jhash2(key
, key_len
/ sizeof(u32
),
230 unsigned int key_len
= ht
->p
.key_len
;
233 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
235 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
238 return rht_bucket_index(tbl
, hash
);
241 static inline unsigned int rht_head_hashfn(
242 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
243 const struct rhash_head
*he
, const struct rhashtable_params params
)
245 const char *ptr
= rht_obj(ht
, he
);
247 return likely(params
.obj_hashfn
) ?
248 rht_bucket_index(tbl
, params
.obj_hashfn(ptr
, params
.key_len
?:
251 rht_key_hashfn(ht
, tbl
, ptr
+ params
.key_offset
, params
);
255 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
257 * @tbl: current table
259 static inline bool rht_grow_above_75(const struct rhashtable
*ht
,
260 const struct bucket_table
*tbl
)
262 /* Expand table when exceeding 75% load */
263 return atomic_read(&ht
->nelems
) > (tbl
->size
/ 4 * 3) &&
264 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
268 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
270 * @tbl: current table
272 static inline bool rht_shrink_below_30(const struct rhashtable
*ht
,
273 const struct bucket_table
*tbl
)
275 /* Shrink table beneath 30% load */
276 return atomic_read(&ht
->nelems
) < (tbl
->size
* 3 / 10) &&
277 tbl
->size
> ht
->p
.min_size
;
281 * rht_grow_above_100 - returns true if nelems > table-size
283 * @tbl: current table
285 static inline bool rht_grow_above_100(const struct rhashtable
*ht
,
286 const struct bucket_table
*tbl
)
288 return atomic_read(&ht
->nelems
) > tbl
->size
&&
289 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
293 * rht_grow_above_max - returns true if table is above maximum
295 * @tbl: current table
297 static inline bool rht_grow_above_max(const struct rhashtable
*ht
,
298 const struct bucket_table
*tbl
)
300 return ht
->p
.insecure_max_entries
&&
301 atomic_read(&ht
->nelems
) >= ht
->p
.insecure_max_entries
;
304 /* The bucket lock is selected based on the hash and protects mutations
305 * on a group of hash buckets.
307 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
308 * a single lock always covers both buckets which may both contains
309 * entries which link to the same bucket of the old table during resizing.
310 * This allows to simplify the locking as locking the bucket in both
311 * tables during resize always guarantee protection.
313 * IMPORTANT: When holding the bucket lock of both the old and new table
314 * during expansions and shrinking, the old bucket lock must always be
317 static inline spinlock_t
*rht_bucket_lock(const struct bucket_table
*tbl
,
320 return &tbl
->locks
[hash
& tbl
->locks_mask
];
323 #ifdef CONFIG_PROVE_LOCKING
324 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
);
325 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
);
327 static inline int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
332 static inline int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
,
337 #endif /* CONFIG_PROVE_LOCKING */
339 int rhashtable_init(struct rhashtable
*ht
,
340 const struct rhashtable_params
*params
);
342 int rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
343 struct rhash_head
*obj
,
344 struct bucket_table
*old_tbl
);
345 int rhashtable_insert_rehash(struct rhashtable
*ht
);
347 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
);
348 void rhashtable_walk_exit(struct rhashtable_iter
*iter
);
349 int rhashtable_walk_start(struct rhashtable_iter
*iter
) __acquires(RCU
);
350 void *rhashtable_walk_next(struct rhashtable_iter
*iter
);
351 void rhashtable_walk_stop(struct rhashtable_iter
*iter
) __releases(RCU
);
353 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
354 void (*free_fn
)(void *ptr
, void *arg
),
356 void rhashtable_destroy(struct rhashtable
*ht
);
358 #define rht_dereference(p, ht) \
359 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
361 #define rht_dereference_rcu(p, ht) \
362 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
364 #define rht_dereference_bucket(p, tbl, hash) \
365 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
367 #define rht_dereference_bucket_rcu(p, tbl, hash) \
368 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
370 #define rht_entry(tpos, pos, member) \
371 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
374 * rht_for_each_continue - continue iterating over hash chain
375 * @pos: the &struct rhash_head to use as a loop cursor.
376 * @head: the previous &struct rhash_head to continue from
377 * @tbl: the &struct bucket_table
378 * @hash: the hash value / bucket index
380 #define rht_for_each_continue(pos, head, tbl, hash) \
381 for (pos = rht_dereference_bucket(head, tbl, hash); \
382 !rht_is_a_nulls(pos); \
383 pos = rht_dereference_bucket((pos)->next, tbl, hash))
386 * rht_for_each - iterate over hash chain
387 * @pos: the &struct rhash_head to use as a loop cursor.
388 * @tbl: the &struct bucket_table
389 * @hash: the hash value / bucket index
391 #define rht_for_each(pos, tbl, hash) \
392 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
395 * rht_for_each_entry_continue - continue iterating over hash chain
396 * @tpos: the type * to use as a loop cursor.
397 * @pos: the &struct rhash_head to use as a loop cursor.
398 * @head: the previous &struct rhash_head to continue from
399 * @tbl: the &struct bucket_table
400 * @hash: the hash value / bucket index
401 * @member: name of the &struct rhash_head within the hashable struct.
403 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
404 for (pos = rht_dereference_bucket(head, tbl, hash); \
405 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
406 pos = rht_dereference_bucket((pos)->next, tbl, hash))
409 * rht_for_each_entry - iterate over hash chain of given type
410 * @tpos: the type * to use as a loop cursor.
411 * @pos: the &struct rhash_head to use as a loop cursor.
412 * @tbl: the &struct bucket_table
413 * @hash: the hash value / bucket index
414 * @member: name of the &struct rhash_head within the hashable struct.
416 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
417 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
421 * rht_for_each_entry_safe - safely iterate over hash chain of given type
422 * @tpos: the type * to use as a loop cursor.
423 * @pos: the &struct rhash_head to use as a loop cursor.
424 * @next: the &struct rhash_head to use as next in loop cursor.
425 * @tbl: the &struct bucket_table
426 * @hash: the hash value / bucket index
427 * @member: name of the &struct rhash_head within the hashable struct.
429 * This hash chain list-traversal primitive allows for the looped code to
430 * remove the loop cursor from the list.
432 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
433 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
434 next = !rht_is_a_nulls(pos) ? \
435 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
436 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
438 next = !rht_is_a_nulls(pos) ? \
439 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
442 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
443 * @pos: the &struct rhash_head to use as a loop cursor.
444 * @head: the previous &struct rhash_head to continue from
445 * @tbl: the &struct bucket_table
446 * @hash: the hash value / bucket index
448 * This hash chain list-traversal primitive may safely run concurrently with
449 * the _rcu mutation primitives such as rhashtable_insert() as long as the
450 * traversal is guarded by rcu_read_lock().
452 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
453 for (({barrier(); }), \
454 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
455 !rht_is_a_nulls(pos); \
456 pos = rcu_dereference_raw(pos->next))
459 * rht_for_each_rcu - iterate over rcu hash chain
460 * @pos: the &struct rhash_head to use as a loop cursor.
461 * @tbl: the &struct bucket_table
462 * @hash: the hash value / bucket index
464 * This hash chain list-traversal primitive may safely run concurrently with
465 * the _rcu mutation primitives such as rhashtable_insert() as long as the
466 * traversal is guarded by rcu_read_lock().
468 #define rht_for_each_rcu(pos, tbl, hash) \
469 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
472 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
473 * @tpos: the type * to use as a loop cursor.
474 * @pos: the &struct rhash_head to use as a loop cursor.
475 * @head: the previous &struct rhash_head to continue from
476 * @tbl: the &struct bucket_table
477 * @hash: the hash value / bucket index
478 * @member: name of the &struct rhash_head within the hashable struct.
480 * This hash chain list-traversal primitive may safely run concurrently with
481 * the _rcu mutation primitives such as rhashtable_insert() as long as the
482 * traversal is guarded by rcu_read_lock().
484 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
485 for (({barrier(); }), \
486 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
487 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
488 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
491 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
492 * @tpos: the type * to use as a loop cursor.
493 * @pos: the &struct rhash_head to use as a loop cursor.
494 * @tbl: the &struct bucket_table
495 * @hash: the hash value / bucket index
496 * @member: name of the &struct rhash_head within the hashable struct.
498 * This hash chain list-traversal primitive may safely run concurrently with
499 * the _rcu mutation primitives such as rhashtable_insert() as long as the
500 * traversal is guarded by rcu_read_lock().
502 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
503 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
506 static inline int rhashtable_compare(struct rhashtable_compare_arg
*arg
,
509 struct rhashtable
*ht
= arg
->ht
;
510 const char *ptr
= obj
;
512 return memcmp(ptr
+ ht
->p
.key_offset
, arg
->key
, ht
->p
.key_len
);
516 * rhashtable_lookup_fast - search hash table, inlined version
518 * @key: the pointer to the key
519 * @params: hash table parameters
521 * Computes the hash value for the key and traverses the bucket chain looking
522 * for a entry with an identical key. The first matching entry is returned.
524 * Returns the first entry on which the compare function returned true.
526 static inline void *rhashtable_lookup_fast(
527 struct rhashtable
*ht
, const void *key
,
528 const struct rhashtable_params params
)
530 struct rhashtable_compare_arg arg
= {
534 const struct bucket_table
*tbl
;
535 struct rhash_head
*he
;
540 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
542 hash
= rht_key_hashfn(ht
, tbl
, key
, params
);
543 rht_for_each_rcu(he
, tbl
, hash
) {
544 if (params
.obj_cmpfn
?
545 params
.obj_cmpfn(&arg
, rht_obj(ht
, he
)) :
546 rhashtable_compare(&arg
, rht_obj(ht
, he
)))
549 return rht_obj(ht
, he
);
552 /* Ensure we see any new tables. */
555 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
563 /* Internal function, please use rhashtable_insert_fast() instead */
564 static inline int __rhashtable_insert_fast(
565 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
566 const struct rhashtable_params params
)
568 struct rhashtable_compare_arg arg
= {
572 struct bucket_table
*tbl
, *new_tbl
;
573 struct rhash_head
*head
;
575 unsigned int elasticity
;
582 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
584 /* All insertions must grab the oldest table containing
585 * the hashed bucket that is yet to be rehashed.
588 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
589 lock
= rht_bucket_lock(tbl
, hash
);
592 if (tbl
->rehash
<= hash
)
595 spin_unlock_bh(lock
);
596 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
599 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
600 if (unlikely(new_tbl
)) {
601 err
= rhashtable_insert_slow(ht
, key
, obj
, new_tbl
);
608 if (unlikely(rht_grow_above_max(ht
, tbl
)))
611 if (unlikely(rht_grow_above_100(ht
, tbl
))) {
613 spin_unlock_bh(lock
);
614 err
= rhashtable_insert_rehash(ht
);
623 elasticity
= ht
->elasticity
;
624 rht_for_each(head
, tbl
, hash
) {
626 unlikely(!(params
.obj_cmpfn
?
627 params
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
628 rhashtable_compare(&arg
, rht_obj(ht
, head
)))))
636 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
638 RCU_INIT_POINTER(obj
->next
, head
);
640 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
642 atomic_inc(&ht
->nelems
);
643 if (rht_grow_above_75(ht
, tbl
))
644 schedule_work(&ht
->run_work
);
647 spin_unlock_bh(lock
);
654 * rhashtable_insert_fast - insert object into hash table
656 * @obj: pointer to hash head inside object
657 * @params: hash table parameters
659 * Will take a per bucket spinlock to protect against mutual mutations
660 * on the same bucket. Multiple insertions may occur in parallel unless
661 * they map to the same bucket lock.
663 * It is safe to call this function from atomic context.
665 * Will trigger an automatic deferred table resizing if the size grows
666 * beyond the watermark indicated by grow_decision() which can be passed
667 * to rhashtable_init().
669 static inline int rhashtable_insert_fast(
670 struct rhashtable
*ht
, struct rhash_head
*obj
,
671 const struct rhashtable_params params
)
673 return __rhashtable_insert_fast(ht
, NULL
, obj
, params
);
677 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
679 * @obj: pointer to hash head inside object
680 * @params: hash table parameters
682 * Locks down the bucket chain in both the old and new table if a resize
683 * is in progress to ensure that writers can't remove from the old table
684 * and can't insert to the new table during the atomic operation of search
685 * and insertion. Searches for duplicates in both the old and new table if
686 * a resize is in progress.
688 * This lookup function may only be used for fixed key hash table (key_len
689 * parameter set). It will BUG() if used inappropriately.
691 * It is safe to call this function from atomic context.
693 * Will trigger an automatic deferred table resizing if the size grows
694 * beyond the watermark indicated by grow_decision() which can be passed
695 * to rhashtable_init().
697 static inline int rhashtable_lookup_insert_fast(
698 struct rhashtable
*ht
, struct rhash_head
*obj
,
699 const struct rhashtable_params params
)
701 const char *key
= rht_obj(ht
, obj
);
703 BUG_ON(ht
->p
.obj_hashfn
);
705 return __rhashtable_insert_fast(ht
, key
+ ht
->p
.key_offset
, obj
,
710 * rhashtable_lookup_insert_key - search and insert object to hash table
714 * @obj: pointer to hash head inside object
715 * @params: hash table parameters
717 * Locks down the bucket chain in both the old and new table if a resize
718 * is in progress to ensure that writers can't remove from the old table
719 * and can't insert to the new table during the atomic operation of search
720 * and insertion. Searches for duplicates in both the old and new table if
721 * a resize is in progress.
723 * Lookups may occur in parallel with hashtable mutations and resizing.
725 * Will trigger an automatic deferred table resizing if the size grows
726 * beyond the watermark indicated by grow_decision() which can be passed
727 * to rhashtable_init().
729 * Returns zero on success.
731 static inline int rhashtable_lookup_insert_key(
732 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
733 const struct rhashtable_params params
)
735 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
737 return __rhashtable_insert_fast(ht
, key
, obj
, params
);
740 /* Internal function, please use rhashtable_remove_fast() instead */
741 static inline int __rhashtable_remove_fast(
742 struct rhashtable
*ht
, struct bucket_table
*tbl
,
743 struct rhash_head
*obj
, const struct rhashtable_params params
)
745 struct rhash_head __rcu
**pprev
;
746 struct rhash_head
*he
;
751 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
752 lock
= rht_bucket_lock(tbl
, hash
);
756 pprev
= &tbl
->buckets
[hash
];
757 rht_for_each(he
, tbl
, hash
) {
763 rcu_assign_pointer(*pprev
, obj
->next
);
768 spin_unlock_bh(lock
);
774 * rhashtable_remove_fast - remove object from hash table
776 * @obj: pointer to hash head inside object
777 * @params: hash table parameters
779 * Since the hash chain is single linked, the removal operation needs to
780 * walk the bucket chain upon removal. The removal operation is thus
781 * considerable slow if the hash table is not correctly sized.
783 * Will automatically shrink the table via rhashtable_expand() if the
784 * shrink_decision function specified at rhashtable_init() returns true.
786 * Returns zero on success, -ENOENT if the entry could not be found.
788 static inline int rhashtable_remove_fast(
789 struct rhashtable
*ht
, struct rhash_head
*obj
,
790 const struct rhashtable_params params
)
792 struct bucket_table
*tbl
;
797 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
799 /* Because we have already taken (and released) the bucket
800 * lock in old_tbl, if we find that future_tbl is not yet
801 * visible then that guarantees the entry to still be in
802 * the old tbl if it exists.
804 while ((err
= __rhashtable_remove_fast(ht
, tbl
, obj
, params
)) &&
805 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
811 atomic_dec(&ht
->nelems
);
812 if (unlikely(ht
->p
.automatic_shrinking
&&
813 rht_shrink_below_30(ht
, tbl
)))
814 schedule_work(&ht
->run_work
);
822 #endif /* _LINUX_RHASHTABLE_H */