ASoC: wm_adsp: Separate concept of booted and running
[linux-2.6/btrfs-unstable.git] / include / linux / lockref.h
blobb10b122dd09989be9bcf70f3700d22ffce0b3623
1 #ifndef __LINUX_LOCKREF_H
2 #define __LINUX_LOCKREF_H
4 /*
5 * Locked reference counts.
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
14 * example).
17 #include <linux/spinlock.h>
18 #include <generated/bounds.h>
20 #define USE_CMPXCHG_LOCKREF \
21 (IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
24 struct lockref {
25 union {
26 #if USE_CMPXCHG_LOCKREF
27 aligned_u64 lock_count;
28 #endif
29 struct {
30 spinlock_t lock;
31 int count;
36 extern void lockref_get(struct lockref *);
37 extern int lockref_put_return(struct lockref *);
38 extern int lockref_get_not_zero(struct lockref *);
39 extern int lockref_get_or_lock(struct lockref *);
40 extern int lockref_put_or_lock(struct lockref *);
42 extern void lockref_mark_dead(struct lockref *);
43 extern int lockref_get_not_dead(struct lockref *);
45 /* Must be called under spinlock for reliable results */
46 static inline int __lockref_is_dead(const struct lockref *l)
48 return ((int)l->count < 0);
51 #endif /* __LINUX_LOCKREF_H */