[PATCH] NFSv4: unbalanced BKL in nfs_atomic_lookup()
[firewire-audio.git] / include / asm-arm / spinlock.h
blob1f906d09b6880f2ac04e9bb0dedf3dd0eafdafee
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
6 #endif
8 /*
9 * ARMv6 Spin-locking.
11 * We exclusively read the old value. If it is zero, we may have
12 * won the lock, so we try exclusively storing it. A memory barrier
13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
16 * Unlocked value: 0
17 * Locked value: 1
19 typedef struct {
20 volatile unsigned int lock;
21 #ifdef CONFIG_PREEMPT
22 unsigned int break_lock;
23 #endif
24 } spinlock_t;
26 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
28 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while (0)
29 #define spin_is_locked(x) ((x)->lock != 0)
30 #define spin_unlock_wait(x) do { barrier(); } while (spin_is_locked(x))
31 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
33 static inline void _raw_spin_lock(spinlock_t *lock)
35 unsigned long tmp;
37 __asm__ __volatile__(
38 "1: ldrex %0, [%1]\n"
39 " teq %0, #0\n"
40 " strexeq %0, %2, [%1]\n"
41 " teqeq %0, #0\n"
42 " bne 1b"
43 : "=&r" (tmp)
44 : "r" (&lock->lock), "r" (1)
45 : "cc");
47 smp_mb();
50 static inline int _raw_spin_trylock(spinlock_t *lock)
52 unsigned long tmp;
54 __asm__ __volatile__(
55 " ldrex %0, [%1]\n"
56 " teq %0, #0\n"
57 " strexeq %0, %2, [%1]"
58 : "=&r" (tmp)
59 : "r" (&lock->lock), "r" (1)
60 : "cc");
62 if (tmp == 0) {
63 smp_mb();
64 return 1;
65 } else {
66 return 0;
70 static inline void _raw_spin_unlock(spinlock_t *lock)
72 smp_mb();
74 __asm__ __volatile__(
75 " str %1, [%0]"
77 : "r" (&lock->lock), "r" (0)
78 : "cc");
82 * RWLOCKS
84 typedef struct {
85 volatile unsigned int lock;
86 #ifdef CONFIG_PREEMPT
87 unsigned int break_lock;
88 #endif
89 } rwlock_t;
91 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
92 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while (0)
93 #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0)
96 * Write locks are easy - we just set bit 31. When unlocking, we can
97 * just write zero since the lock is exclusively held.
99 static inline void _raw_write_lock(rwlock_t *rw)
101 unsigned long tmp;
103 __asm__ __volatile__(
104 "1: ldrex %0, [%1]\n"
105 " teq %0, #0\n"
106 " strexeq %0, %2, [%1]\n"
107 " teq %0, #0\n"
108 " bne 1b"
109 : "=&r" (tmp)
110 : "r" (&rw->lock), "r" (0x80000000)
111 : "cc");
113 smp_mb();
116 static inline int _raw_write_trylock(rwlock_t *rw)
118 unsigned long tmp;
120 __asm__ __volatile__(
121 "1: ldrex %0, [%1]\n"
122 " teq %0, #0\n"
123 " strexeq %0, %2, [%1]"
124 : "=&r" (tmp)
125 : "r" (&rw->lock), "r" (0x80000000)
126 : "cc");
128 if (tmp == 0) {
129 smp_mb();
130 return 1;
131 } else {
132 return 0;
136 static inline void _raw_write_unlock(rwlock_t *rw)
138 smp_mb();
140 __asm__ __volatile__(
141 "str %1, [%0]"
143 : "r" (&rw->lock), "r" (0)
144 : "cc");
148 * Read locks are a bit more hairy:
149 * - Exclusively load the lock value.
150 * - Increment it.
151 * - Store new lock value if positive, and we still own this location.
152 * If the value is negative, we've already failed.
153 * - If we failed to store the value, we want a negative result.
154 * - If we failed, try again.
155 * Unlocking is similarly hairy. We may have multiple read locks
156 * currently active. However, we know we won't have any write
157 * locks.
159 static inline void _raw_read_lock(rwlock_t *rw)
161 unsigned long tmp, tmp2;
163 __asm__ __volatile__(
164 "1: ldrex %0, [%2]\n"
165 " adds %0, %0, #1\n"
166 " strexpl %1, %0, [%2]\n"
167 " rsbpls %0, %1, #0\n"
168 " bmi 1b"
169 : "=&r" (tmp), "=&r" (tmp2)
170 : "r" (&rw->lock)
171 : "cc");
173 smp_mb();
176 static inline void _raw_read_unlock(rwlock_t *rw)
178 unsigned long tmp, tmp2;
180 smp_mb();
182 __asm__ __volatile__(
183 "1: ldrex %0, [%2]\n"
184 " sub %0, %0, #1\n"
185 " strex %1, %0, [%2]\n"
186 " teq %1, #0\n"
187 " bne 1b"
188 : "=&r" (tmp), "=&r" (tmp2)
189 : "r" (&rw->lock)
190 : "cc");
193 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
195 #endif /* __ASM_SPINLOCK_H */