1 #ifndef __LINUX_LOCKREF_H
2 #define __LINUX_LOCKREF_H
5 * Locked reference counts.
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
17 #include <linux/spinlock.h>
18 #include <generated/bounds.h>
20 #define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
26 #if USE_CMPXCHG_LOCKREF
27 aligned_u64 lock_count
;
36 extern void lockref_get(struct lockref
*);
37 extern int lockref_get_not_zero(struct lockref
*);
38 extern int lockref_get_or_lock(struct lockref
*);
39 extern int lockref_put_or_lock(struct lockref
*);
41 extern void lockref_mark_dead(struct lockref
*);
42 extern int lockref_get_not_dead(struct lockref
*);
44 /* Must be called under spinlock for reliable results */
45 static inline int __lockref_is_dead(const struct lockref
*l
)
47 return ((int)l
->count
< 0);
50 #endif /* __LINUX_LOCKREF_H */