[NETFILTER]: {ip,ip6}_tables: remove x_tables wrapper functions
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-mips / spinlock.h
blobfc3217fc1118deabd9c4c40cb3442cdf1573c2ab
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (C) 1999, 2000, 06 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
12 #include <asm/barrier.h>
13 #include <asm/war.h>
16 * Your basic SMP spinlocks, allowing only a single CPU anywhere
19 #define __raw_spin_is_locked(x) ((x)->lock != 0)
20 #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
21 #define __raw_spin_unlock_wait(x) \
22 do { cpu_relax(); } while ((x)->lock)
25 * Simple spin lock operations. There are two variants, one clears IRQ's
26 * on the local processor, one does not.
28 * We make no fairness assumptions. They have a cost.
31 static inline void __raw_spin_lock(raw_spinlock_t *lock)
33 unsigned int tmp;
35 if (R10000_LLSC_WAR) {
36 __asm__ __volatile__(
37 " .set noreorder # __raw_spin_lock \n"
38 "1: ll %1, %2 \n"
39 " bnez %1, 1b \n"
40 " li %1, 1 \n"
41 " sc %1, %0 \n"
42 " beqzl %1, 1b \n"
43 " nop \n"
44 " .set reorder \n"
45 : "=m" (lock->lock), "=&r" (tmp)
46 : "m" (lock->lock)
47 : "memory");
48 } else {
49 __asm__ __volatile__(
50 " .set noreorder # __raw_spin_lock \n"
51 "1: ll %1, %2 \n"
52 " bnez %1, 1b \n"
53 " li %1, 1 \n"
54 " sc %1, %0 \n"
55 " beqz %1, 1b \n"
56 " nop \n"
57 " .set reorder \n"
58 : "=m" (lock->lock), "=&r" (tmp)
59 : "m" (lock->lock)
60 : "memory");
63 smp_mb();
66 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
68 smp_mb();
70 __asm__ __volatile__(
71 " .set noreorder # __raw_spin_unlock \n"
72 " sw $0, %0 \n"
73 " .set\treorder \n"
74 : "=m" (lock->lock)
75 : "m" (lock->lock)
76 : "memory");
79 static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
81 unsigned int temp, res;
83 if (R10000_LLSC_WAR) {
84 __asm__ __volatile__(
85 " .set noreorder # __raw_spin_trylock \n"
86 "1: ll %0, %3 \n"
87 " ori %2, %0, 1 \n"
88 " sc %2, %1 \n"
89 " beqzl %2, 1b \n"
90 " nop \n"
91 " andi %2, %0, 1 \n"
92 " .set reorder"
93 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
94 : "m" (lock->lock)
95 : "memory");
96 } else {
97 __asm__ __volatile__(
98 " .set noreorder # __raw_spin_trylock \n"
99 "1: ll %0, %3 \n"
100 " ori %2, %0, 1 \n"
101 " sc %2, %1 \n"
102 " beqz %2, 1b \n"
103 " andi %2, %0, 1 \n"
104 " .set reorder"
105 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
106 : "m" (lock->lock)
107 : "memory");
110 smp_mb();
112 return res == 0;
116 * Read-write spinlocks, allowing multiple readers but only one writer.
118 * NOTE! it is quite common to have readers in interrupts but no interrupt
119 * writers. For those circumstances we can "mix" irq-safe locks - any writer
120 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
121 * read-locks.
125 * read_can_lock - would read_trylock() succeed?
126 * @lock: the rwlock in question.
128 #define __raw_read_can_lock(rw) ((rw)->lock >= 0)
131 * write_can_lock - would write_trylock() succeed?
132 * @lock: the rwlock in question.
134 #define __raw_write_can_lock(rw) (!(rw)->lock)
136 static inline void __raw_read_lock(raw_rwlock_t *rw)
138 unsigned int tmp;
140 if (R10000_LLSC_WAR) {
141 __asm__ __volatile__(
142 " .set noreorder # __raw_read_lock \n"
143 "1: ll %1, %2 \n"
144 " bltz %1, 1b \n"
145 " addu %1, 1 \n"
146 " sc %1, %0 \n"
147 " beqzl %1, 1b \n"
148 " nop \n"
149 " .set reorder \n"
150 : "=m" (rw->lock), "=&r" (tmp)
151 : "m" (rw->lock)
152 : "memory");
153 } else {
154 __asm__ __volatile__(
155 " .set noreorder # __raw_read_lock \n"
156 "1: ll %1, %2 \n"
157 " bltz %1, 1b \n"
158 " addu %1, 1 \n"
159 " sc %1, %0 \n"
160 " beqz %1, 1b \n"
161 " nop \n"
162 " .set reorder \n"
163 : "=m" (rw->lock), "=&r" (tmp)
164 : "m" (rw->lock)
165 : "memory");
168 smp_mb();
171 /* Note the use of sub, not subu which will make the kernel die with an
172 overflow exception if we ever try to unlock an rwlock that is already
173 unlocked or is being held by a writer. */
174 static inline void __raw_read_unlock(raw_rwlock_t *rw)
176 unsigned int tmp;
178 smp_mb();
180 if (R10000_LLSC_WAR) {
181 __asm__ __volatile__(
182 "1: ll %1, %2 # __raw_read_unlock \n"
183 " sub %1, 1 \n"
184 " sc %1, %0 \n"
185 " beqzl %1, 1b \n"
186 : "=m" (rw->lock), "=&r" (tmp)
187 : "m" (rw->lock)
188 : "memory");
189 } else {
190 __asm__ __volatile__(
191 " .set noreorder # __raw_read_unlock \n"
192 "1: ll %1, %2 \n"
193 " sub %1, 1 \n"
194 " sc %1, %0 \n"
195 " beqz %1, 1b \n"
196 " nop \n"
197 " .set reorder \n"
198 : "=m" (rw->lock), "=&r" (tmp)
199 : "m" (rw->lock)
200 : "memory");
204 static inline void __raw_write_lock(raw_rwlock_t *rw)
206 unsigned int tmp;
208 if (R10000_LLSC_WAR) {
209 __asm__ __volatile__(
210 " .set noreorder # __raw_write_lock \n"
211 "1: ll %1, %2 \n"
212 " bnez %1, 1b \n"
213 " lui %1, 0x8000 \n"
214 " sc %1, %0 \n"
215 " beqzl %1, 1b \n"
216 " nop \n"
217 " .set reorder \n"
218 : "=m" (rw->lock), "=&r" (tmp)
219 : "m" (rw->lock)
220 : "memory");
221 } else {
222 __asm__ __volatile__(
223 " .set noreorder # __raw_write_lock \n"
224 "1: ll %1, %2 \n"
225 " bnez %1, 1b \n"
226 " lui %1, 0x8000 \n"
227 " sc %1, %0 \n"
228 " beqz %1, 1b \n"
229 " nop \n"
230 " .set reorder \n"
231 : "=m" (rw->lock), "=&r" (tmp)
232 : "m" (rw->lock)
233 : "memory");
236 smp_mb();
239 static inline void __raw_write_unlock(raw_rwlock_t *rw)
241 smp_mb();
243 __asm__ __volatile__(
244 " # __raw_write_unlock \n"
245 " sw $0, %0 \n"
246 : "=m" (rw->lock)
247 : "m" (rw->lock)
248 : "memory");
251 static inline int __raw_read_trylock(raw_rwlock_t *rw)
253 unsigned int tmp;
254 int ret;
256 if (R10000_LLSC_WAR) {
257 __asm__ __volatile__(
258 " .set noreorder # __raw_read_trylock \n"
259 " li %2, 0 \n"
260 "1: ll %1, %3 \n"
261 " bnez %1, 2f \n"
262 " addu %1, 1 \n"
263 " sc %1, %0 \n"
264 " .set reorder \n"
265 " beqzl %1, 1b \n"
266 " nop \n"
267 __WEAK_ORDERING_MB
268 " li %2, 1 \n"
269 "2: \n"
270 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
271 : "m" (rw->lock)
272 : "memory");
273 } else {
274 __asm__ __volatile__(
275 " .set noreorder # __raw_read_trylock \n"
276 " li %2, 0 \n"
277 "1: ll %1, %3 \n"
278 " bnez %1, 2f \n"
279 " addu %1, 1 \n"
280 " sc %1, %0 \n"
281 " beqz %1, 1b \n"
282 " nop \n"
283 " .set reorder \n"
284 __WEAK_ORDERING_MB
285 " li %2, 1 \n"
286 "2: \n"
287 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
288 : "m" (rw->lock)
289 : "memory");
292 return ret;
295 static inline int __raw_write_trylock(raw_rwlock_t *rw)
297 unsigned int tmp;
298 int ret;
300 if (R10000_LLSC_WAR) {
301 __asm__ __volatile__(
302 " .set noreorder # __raw_write_trylock \n"
303 " li %2, 0 \n"
304 "1: ll %1, %3 \n"
305 " bnez %1, 2f \n"
306 " lui %1, 0x8000 \n"
307 " sc %1, %0 \n"
308 " beqzl %1, 1b \n"
309 " nop \n"
310 __WEAK_ORDERING_MB
311 " li %2, 1 \n"
312 " .set reorder \n"
313 "2: \n"
314 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
315 : "m" (rw->lock)
316 : "memory");
317 } else {
318 __asm__ __volatile__(
319 " .set noreorder # __raw_write_trylock \n"
320 " li %2, 0 \n"
321 "1: ll %1, %3 \n"
322 " bnez %1, 2f \n"
323 " lui %1, 0x8000 \n"
324 " sc %1, %0 \n"
325 " beqz %1, 1b \n"
326 " nop \n"
327 __WEAK_ORDERING_MB
328 " li %2, 1 \n"
329 " .set reorder \n"
330 "2: \n"
331 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
332 : "m" (rw->lock)
333 : "memory");
336 return ret;
340 #define _raw_spin_relax(lock) cpu_relax()
341 #define _raw_read_relax(lock) cpu_relax()
342 #define _raw_write_relax(lock) cpu_relax()
344 #endif /* _ASM_SPINLOCK_H */