1 #ifndef __LINUX_LOCKREF_H
2 #define __LINUX_LOCKREF_H
5 * Locked reference counts.
7 * These are different from just plain atomic refcounts in that they
8 * are atomic with respect to the spinlock that goes with them. In
9 * particular, there can be implementations that don't actually get
10 * the spinlock for the common decrement/increment operations, but they
11 * still have to check that the operation is done semantically as if
12 * the spinlock had been taken (using a cmpxchg operation that covers
13 * both the lock and the count word, or using memory transactions, for
17 #include <linux/spinlock.h>
25 * lockref_get - Increments reference count unconditionally
26 * @lockcnt: pointer to lockref structure
28 * This operation is only valid if you already hold a reference
29 * to the object, so you know the count cannot be zero.
31 static inline void lockref_get(struct lockref
*lockref
)
33 spin_lock(&lockref
->lock
);
35 spin_unlock(&lockref
->lock
);
39 * lockref_get_not_zero - Increments count unless the count is 0
40 * @lockcnt: pointer to lockref structure
41 * Return: 1 if count updated successfully or 0 if count is 0
43 static inline int lockref_get_not_zero(struct lockref
*lockref
)
47 spin_lock(&lockref
->lock
);
52 spin_unlock(&lockref
->lock
);
57 * lockref_get_or_lock - Increments count unless the count is 0
58 * @lockcnt: pointer to lockref structure
59 * Return: 1 if count updated successfully or 0 if count was zero
60 * and we got the lock instead.
62 static inline int lockref_get_or_lock(struct lockref
*lockref
)
64 spin_lock(&lockref
->lock
);
68 spin_unlock(&lockref
->lock
);
73 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
74 * @lockcnt: pointer to lockref structure
75 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
77 static inline int lockref_put_or_lock(struct lockref
*lockref
)
79 spin_lock(&lockref
->lock
);
80 if (lockref
->count
<= 1)
83 spin_unlock(&lockref
->lock
);
87 #endif /* __LINUX_LOCKREF_H */