arm64: efi: only attempt efi map setup if booting via EFI
[linux-2.6/btrfs-unstable.git] / lib / lockref.c
blobf07a40d33871e2b9414ea40b53ee154108932d4e
1 #include <linux/export.h>
2 #include <linux/lockref.h>
3 #include <linux/mutex.h>
5 #if USE_CMPXCHG_LOCKREF
7 /*
8 * Allow weakly-ordered memory architectures to provide barrier-less
9 * cmpxchg semantics for lockref updates.
11 #ifndef cmpxchg64_relaxed
12 # define cmpxchg64_relaxed cmpxchg64
13 #endif
16 * Note that the "cmpxchg()" reloads the "old" value for the
17 * failure case.
19 #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
20 struct lockref old; \
21 BUILD_BUG_ON(sizeof(old) != 8); \
22 old.lock_count = ACCESS_ONCE(lockref->lock_count); \
23 while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
24 struct lockref new = old, prev = old; \
25 CODE \
26 old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
27 old.lock_count, \
28 new.lock_count); \
29 if (likely(old.lock_count == prev.lock_count)) { \
30 SUCCESS; \
31 } \
32 arch_mutex_cpu_relax(); \
33 } \
34 } while (0)
36 #else
38 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
40 #endif
42 /**
43 * lockref_get - Increments reference count unconditionally
44 * @lockref: pointer to lockref structure
46 * This operation is only valid if you already hold a reference
47 * to the object, so you know the count cannot be zero.
49 void lockref_get(struct lockref *lockref)
51 CMPXCHG_LOOP(
52 new.count++;
54 return;
57 spin_lock(&lockref->lock);
58 lockref->count++;
59 spin_unlock(&lockref->lock);
61 EXPORT_SYMBOL(lockref_get);
63 /**
64 * lockref_get_not_zero - Increments count unless the count is 0
65 * @lockref: pointer to lockref structure
66 * Return: 1 if count updated successfully or 0 if count was zero
68 int lockref_get_not_zero(struct lockref *lockref)
70 int retval;
72 CMPXCHG_LOOP(
73 new.count++;
74 if (!old.count)
75 return 0;
77 return 1;
80 spin_lock(&lockref->lock);
81 retval = 0;
82 if (lockref->count) {
83 lockref->count++;
84 retval = 1;
86 spin_unlock(&lockref->lock);
87 return retval;
89 EXPORT_SYMBOL(lockref_get_not_zero);
91 /**
92 * lockref_get_or_lock - Increments count unless the count is 0
93 * @lockref: pointer to lockref structure
94 * Return: 1 if count updated successfully or 0 if count was zero
95 * and we got the lock instead.
97 int lockref_get_or_lock(struct lockref *lockref)
99 CMPXCHG_LOOP(
100 new.count++;
101 if (!old.count)
102 break;
104 return 1;
107 spin_lock(&lockref->lock);
108 if (!lockref->count)
109 return 0;
110 lockref->count++;
111 spin_unlock(&lockref->lock);
112 return 1;
114 EXPORT_SYMBOL(lockref_get_or_lock);
117 * lockref_put_or_lock - decrements count unless count <= 1 before decrement
118 * @lockref: pointer to lockref structure
119 * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
121 int lockref_put_or_lock(struct lockref *lockref)
123 CMPXCHG_LOOP(
124 new.count--;
125 if (old.count <= 1)
126 break;
128 return 1;
131 spin_lock(&lockref->lock);
132 if (lockref->count <= 1)
133 return 0;
134 lockref->count--;
135 spin_unlock(&lockref->lock);
136 return 1;
138 EXPORT_SYMBOL(lockref_put_or_lock);
141 * lockref_mark_dead - mark lockref dead
142 * @lockref: pointer to lockref structure
144 void lockref_mark_dead(struct lockref *lockref)
146 assert_spin_locked(&lockref->lock);
147 lockref->count = -128;
149 EXPORT_SYMBOL(lockref_mark_dead);
152 * lockref_get_not_dead - Increments count unless the ref is dead
153 * @lockref: pointer to lockref structure
154 * Return: 1 if count updated successfully or 0 if lockref was dead
156 int lockref_get_not_dead(struct lockref *lockref)
158 int retval;
160 CMPXCHG_LOOP(
161 new.count++;
162 if ((int)old.count < 0)
163 return 0;
165 return 1;
168 spin_lock(&lockref->lock);
169 retval = 0;
170 if ((int) lockref->count >= 0) {
171 lockref->count++;
172 retval = 1;
174 spin_unlock(&lockref->lock);
175 return retval;
177 EXPORT_SYMBOL(lockref_get_not_dead);