added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / include / linux / spinlock.h
blobc11b3a254d7edc4a89398f7669b94d2969bd32b1
1 #ifndef __LINUX_SPINLOCK_H
2 #define __LINUX_SPINLOCK_H
4 /*
5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
7 * here's the role of the various spinlock/rwlock related include files:
9 * on SMP builds:
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
12 * initializers
14 * linux/spinlock_types.h:
15 * defines the generic type and initializers
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code
20 * (also included on UP-debug builds:)
22 * linux/spinlock_api_smp.h:
23 * contains the prototypes for the _spin_*() APIs.
25 * linux/spinlock.h: builds the final spin_*() APIs.
27 * on UP builds:
29 * linux/spinlock_type_up.h:
30 * contains the generic, simplified UP spinlock type.
31 * (which is an empty structure on non-debug builds)
33 * linux/spinlock_types.h:
34 * defines the generic type and initializers
36 * linux/spinlock_up.h:
37 * contains the __raw_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds)
41 * (included on UP-non-debug builds:)
43 * linux/spinlock_api_up.h:
44 * builds the _spin_*() APIs.
46 * linux/spinlock.h: builds the final spin_*() APIs.
49 * Public types and naming conventions:
50 * ------------------------------------
51 * spinlock_t: type: sleep-lock
52 * raw_spinlock_t: type: spin-lock (debug)
54 * spin_lock([raw_]spinlock_t): API: acquire lock, both types
57 * Internal types and naming conventions:
58 * -------------------------------------
59 * __raw_spinlock_t: type: lowlevel spin-lock
61 * _spin_lock(struct rt_mutex): API: acquire sleep-lock
62 * __spin_lock(raw_spinlock_t): API: acquire spin-lock (highlevel)
63 * _raw_spin_lock(raw_spinlock_t): API: acquire spin-lock (debug)
64 * __raw_spin_lock(__raw_spinlock_t): API: acquire spin-lock (lowlevel)
67 * spin_lock(raw_spinlock_t) translates into the following chain of
68 * calls/inlines/macros, if spin-lock debugging is enabled:
70 * spin_lock() [include/linux/spinlock.h]
71 * -> __spin_lock() [kernel/spinlock.c]
72 * -> _raw_spin_lock() [lib/spinlock_debug.c]
73 * -> __raw_spin_lock() [include/asm/spinlock.h]
75 * spin_lock(spinlock_t) translates into the following chain of
76 * calls/inlines/macros:
78 * spin_lock() [include/linux/spinlock.h]
79 * -> _spin_lock() [include/linux/spinlock.h]
80 * -> rt_spin_lock() [kernel/rtmutex.c]
81 * -> rt_spin_lock_fastlock() [kernel/rtmutex.c]
82 * -> rt_spin_lock_slowlock() [kernel/rtmutex.c]
85 #include <linux/typecheck.h>
86 #include <linux/preempt.h>
87 #include <linux/linkage.h>
88 #include <linux/compiler.h>
89 #include <linux/thread_info.h>
90 #include <linux/kernel.h>
91 #include <linux/cache.h>
92 #include <linux/stringify.h>
93 #include <linux/bottom_half.h>
94 #include <linux/irqflags.h>
95 #include <linux/pickop.h>
97 #include <asm/system.h>
100 * Pull the raw_spinlock_t and raw_rwlock_t definitions:
102 #include <linux/spinlock_types.h>
104 extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
107 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
109 #ifdef CONFIG_SMP
110 # include <asm/spinlock.h>
111 #else
112 # include <linux/spinlock_up.h>
113 #endif
116 * Pull the RT types:
118 #include <linux/rt_lock.h>
120 #ifdef CONFIG_GENERIC_LOCKBREAK
121 #define spin_is_contended(lock) ((lock)->break_lock)
122 #else
124 #ifdef __raw_spin_is_contended
125 #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
126 #else
127 #define spin_is_contended(lock) (((void)(lock), 0))
128 #endif /*__raw_spin_is_contended*/
129 #endif
132 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
134 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
135 # include <linux/spinlock_api_smp.h>
136 #else
137 # include <linux/spinlock_api_up.h>
138 #endif
140 #ifdef CONFIG_DEBUG_SPINLOCK
141 extern __lockfunc void _raw_spin_lock(raw_spinlock_t *lock);
142 # define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
143 extern __lockfunc int _raw_spin_trylock(raw_spinlock_t *lock);
144 extern __lockfunc void _raw_spin_unlock(raw_spinlock_t *lock);
145 extern __lockfunc void _raw_read_lock(raw_rwlock_t *lock);
146 extern __lockfunc int _raw_read_trylock(raw_rwlock_t *lock);
147 extern __lockfunc void _raw_read_unlock(raw_rwlock_t *lock);
148 extern __lockfunc void _raw_write_lock(raw_rwlock_t *lock);
149 extern __lockfunc int _raw_write_trylock(raw_rwlock_t *lock);
150 extern __lockfunc void _raw_write_unlock(raw_rwlock_t *lock);
151 #else
152 # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
153 # define _raw_spin_lock_flags(lock, flags) \
154 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
155 # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
156 # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
157 # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
158 # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
159 # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
160 # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
161 # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
162 # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
163 #endif
165 extern int __bad_spinlock_type(void);
166 extern int __bad_rwlock_type(void);
168 extern void
169 __rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
171 extern void __lockfunc rt_spin_lock(spinlock_t *lock);
172 extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
173 extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
174 extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
175 extern int __lockfunc
176 rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
177 extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
178 extern int _atomic_dec_and_spin_lock(spinlock_t *lock, atomic_t *atomic);
181 * lockdep-less calls, for derived types like rwlock:
182 * (for trylock they can use rt_mutex_trylock() directly.
184 extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
185 extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
187 #ifdef CONFIG_PREEMPT_RT
188 # define _spin_lock(l) rt_spin_lock(l)
189 # define _spin_lock_nested(l, s) rt_spin_lock_nested(l, s)
190 # define _spin_lock_bh(l) rt_spin_lock(l)
191 # define _spin_lock_irq(l) rt_spin_lock(l)
192 # define _spin_unlock(l) rt_spin_unlock(l)
193 # define _spin_unlock_no_resched(l) rt_spin_unlock(l)
194 # define _spin_unlock_bh(l) rt_spin_unlock(l)
195 # define _spin_unlock_irq(l) rt_spin_unlock(l)
196 # define _spin_unlock_irqrestore(l, f) rt_spin_unlock(l)
197 static inline unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
199 rt_spin_lock(lock);
200 return 0;
202 static inline unsigned long __lockfunc
203 _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
205 rt_spin_lock_nested(lock, subclass);
206 return 0;
208 #else
209 static inline unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
211 return 0;
213 static inline unsigned long __lockfunc
214 _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
216 return 0;
218 # define _spin_lock(l) do { } while (0)
219 # define _spin_lock_nested(l, s) do { } while (0)
220 # define _spin_lock_bh(l) do { } while (0)
221 # define _spin_lock_irq(l) do { } while (0)
222 # define _spin_unlock(l) do { } while (0)
223 # define _spin_unlock_no_resched(l) do { } while (0)
224 # define _spin_unlock_bh(l) do { } while (0)
225 # define _spin_unlock_irq(l) do { } while (0)
226 # define _spin_unlock_irqrestore(l, f) do { } while (0)
227 #endif
229 #define _spin_lock_init(sl, n, f, l) \
230 do { \
231 static struct lock_class_key __key; \
233 __rt_spin_lock_init(sl, n, &__key); \
234 } while (0)
236 # ifdef CONFIG_PREEMPT_RT
237 # define _spin_can_lock(l) (!rt_mutex_is_locked(&(l)->lock))
238 # define _spin_is_locked(l) rt_mutex_is_locked(&(l)->lock)
239 # define _spin_unlock_wait(l) rt_spin_unlock_wait(l)
241 # define _spin_trylock(l) rt_spin_trylock(l)
242 # define _spin_trylock_bh(l) rt_spin_trylock(l)
243 # define _spin_trylock_irq(l) rt_spin_trylock(l)
244 # define _spin_trylock_irqsave(l,f) rt_spin_trylock_irqsave(l, f)
245 # else
247 extern int this_should_never_be_called_on_non_rt(spinlock_t *lock);
248 # define TSNBCONRT(l) this_should_never_be_called_on_non_rt(l)
249 # define _spin_can_lock(l) TSNBCONRT(l)
250 # define _spin_is_locked(l) TSNBCONRT(l)
251 # define _spin_unlock_wait(l) TSNBCONRT(l)
253 # define _spin_trylock(l) TSNBCONRT(l)
254 # define _spin_trylock_bh(l) TSNBCONRT(l)
255 # define _spin_trylock_irq(l) TSNBCONRT(l)
256 # define _spin_trylock_irqsave(l,f) TSNBCONRT(l)
257 #endif
259 extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
260 extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
261 extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
262 extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock,
263 unsigned long *flags);
264 extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
265 extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
266 extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
267 extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
268 extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
269 extern void
270 __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
272 #define _rwlock_init(rwl, n, f, l) \
273 do { \
274 static struct lock_class_key __key; \
276 __rt_rwlock_init(rwl, n, &__key); \
277 } while (0)
279 #ifdef CONFIG_PREEMPT_RT
280 # define rt_read_can_lock(rwl) (!rt_mutex_is_locked(&(rwl)->lock))
281 # define rt_write_can_lock(rwl) (!rt_mutex_is_locked(&(rwl)->lock))
282 #else
283 extern int rt_rwlock_can_lock_never_call_on_non_rt(rwlock_t *rwlock);
284 # define rt_read_can_lock(rwl) rt_rwlock_can_lock_never_call_on_non_rt(rwl)
285 # define rt_write_can_lock(rwl) rt_rwlock_can_lock_never_call_on_non_rt(rwl)
286 #endif
288 # define _read_can_lock(rwl) rt_read_can_lock(rwl)
289 # define _write_can_lock(rwl) rt_write_can_lock(rwl)
291 # define _read_trylock(rwl) rt_read_trylock(rwl)
292 # define _write_trylock(rwl) rt_write_trylock(rwl)
293 # define _write_trylock_irqsave(rwl, flags) \
294 rt_write_trylock_irqsave(rwl, flags)
296 # define _read_lock(rwl) rt_read_lock(rwl)
297 # define _write_lock(rwl) rt_write_lock(rwl)
298 # define _read_unlock(rwl) rt_read_unlock(rwl)
299 # define _write_unlock(rwl) rt_write_unlock(rwl)
301 # define _read_lock_bh(rwl) rt_read_lock(rwl)
302 # define _write_lock_bh(rwl) rt_write_lock(rwl)
303 # define _read_unlock_bh(rwl) rt_read_unlock(rwl)
304 # define _write_unlock_bh(rwl) rt_write_unlock(rwl)
306 # define _read_lock_irq(rwl) rt_read_lock(rwl)
307 # define _write_lock_irq(rwl) rt_write_lock(rwl)
308 # define _read_unlock_irq(rwl) rt_read_unlock(rwl)
309 # define _write_unlock_irq(rwl) rt_write_unlock(rwl)
311 # define _read_lock_irqsave(rwl) rt_read_lock_irqsave(rwl)
312 # define _write_lock_irqsave(rwl) rt_write_lock_irqsave(rwl)
314 # define _read_unlock_irqrestore(rwl, f) rt_read_unlock(rwl)
315 # define _write_unlock_irqrestore(rwl, f) rt_write_unlock(rwl)
317 #ifdef CONFIG_DEBUG_SPINLOCK
318 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
319 struct lock_class_key *key);
320 # define _raw_spin_lock_init(lock, name, file, line) \
321 do { \
322 static struct lock_class_key __key; \
324 __raw_spin_lock_init((lock), #lock, &__key); \
325 } while (0)
327 #else
328 #define __raw_spin_lock_init(lock) \
329 do { *(lock) = RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
330 # define _raw_spin_lock_init(lock, name, file, line) __raw_spin_lock_init(lock)
331 #endif
334 * PICK_SPIN_OP()/PICK_RW_OP() are simple redirectors for PICK_FUNCTION
336 #define PICK_SPIN_OP(...) \
337 PICK_FUNCTION(raw_spinlock_t *, spinlock_t *, ##__VA_ARGS__)
338 #define PICK_SPIN_OP_RET(...) \
339 PICK_FUNCTION_RET(raw_spinlock_t *, spinlock_t *, ##__VA_ARGS__)
340 #define PICK_RW_OP(...) PICK_FUNCTION(raw_rwlock_t *, rwlock_t *, ##__VA_ARGS__)
341 #define PICK_RW_OP_RET(...) \
342 PICK_FUNCTION_RET(raw_rwlock_t *, rwlock_t *, ##__VA_ARGS__)
344 #define spin_lock_init(lock) \
345 PICK_SPIN_OP(_raw_spin_lock_init, _spin_lock_init, lock, #lock, \
346 __FILE__, __LINE__)
348 #ifdef CONFIG_DEBUG_SPINLOCK
349 extern void __raw_rwlock_init(raw_rwlock_t *lock, const char *name,
350 struct lock_class_key *key);
351 # define _raw_rwlock_init(lock, name, file, line) \
352 do { \
353 static struct lock_class_key __key; \
355 __raw_rwlock_init((lock), #lock, &__key); \
356 } while (0)
357 #else
358 #define __raw_rwlock_init(lock) \
359 do { *(lock) = RAW_RW_LOCK_UNLOCKED(lock); } while (0)
360 # define _raw_rwlock_init(lock, name, file, line) __raw_rwlock_init(lock)
361 #endif
363 #define rwlock_init(lock) \
364 PICK_RW_OP(_raw_rwlock_init, _rwlock_init, lock, #lock, \
365 __FILE__, __LINE__)
367 #define __spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
369 #define spin_is_locked(lock) \
370 PICK_SPIN_OP_RET(__spin_is_locked, _spin_is_locked, lock)
372 #define __spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
374 #define spin_unlock_wait(lock) \
375 PICK_SPIN_OP(__spin_unlock_wait, _spin_unlock_wait, lock)
378 * Define the various spin_lock and rw_lock methods. Note we define these
379 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
380 * methods are defined as nops in the case they are not required.
382 #define spin_trylock(lock) \
383 __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock, _spin_trylock, lock))
385 #define read_trylock(lock) \
386 __cond_lock(lock, PICK_RW_OP_RET(__read_trylock, _read_trylock, lock))
388 #define write_trylock(lock) \
389 __cond_lock(lock, PICK_RW_OP_RET(__write_trylock, _write_trylock, lock))
391 #define write_trylock_irqsave(lock, flags) \
392 __cond_lock(lock, PICK_RW_OP_RET(__write_trylock_irqsave, \
393 _write_trylock_irqsave, lock, &flags))
395 #define __spin_can_lock(lock) __raw_spin_can_lock(&(lock)->raw_lock)
396 #define __read_can_lock(lock) __raw_read_can_lock(&(lock)->raw_lock)
397 #define __write_can_lock(lock) __raw_write_can_lock(&(lock)->raw_lock)
399 #define read_can_lock(lock) \
400 __cond_lock(lock, PICK_RW_OP_RET(__read_can_lock, _read_can_lock, lock))
402 #define write_can_lock(lock) \
403 __cond_lock(lock, PICK_RW_OP_RET(__write_can_lock, _write_can_lock,\
404 lock))
406 #define spin_lock(lock) PICK_SPIN_OP(__spin_lock, _spin_lock, lock)
408 #ifdef CONFIG_DEBUG_LOCK_ALLOC
409 # define spin_lock_nested(lock, subclass) \
410 PICK_SPIN_OP(__spin_lock_nested, _spin_lock_nested, lock, subclass)
411 #else
412 # define spin_lock_nested(lock, subclass) spin_lock(lock)
413 #endif
415 #define write_lock(lock) PICK_RW_OP(__write_lock, _write_lock, lock)
417 #define read_lock(lock) PICK_RW_OP(__read_lock, _read_lock, lock)
419 # define spin_lock_irqsave(lock, flags) \
420 do { \
421 BUILD_CHECK_IRQ_FLAGS(flags); \
422 flags = PICK_SPIN_OP_RET(__spin_lock_irqsave, _spin_lock_irqsave, \
423 lock); \
424 } while (0)
426 #ifdef CONFIG_DEBUG_LOCK_ALLOC
427 # define spin_lock_irqsave_nested(lock, flags, subclass) \
428 do { \
429 BUILD_CHECK_IRQ_FLAGS(flags); \
430 flags = PICK_SPIN_OP_RET(__spin_lock_irqsave_nested, \
431 _spin_lock_irqsave_nested, lock, subclass); \
432 } while (0)
433 #else
434 # define spin_lock_irqsave_nested(lock, flags, subclass) \
435 spin_lock_irqsave(lock, flags)
436 #endif
438 # define read_lock_irqsave(lock, flags) \
439 do { \
440 BUILD_CHECK_IRQ_FLAGS(flags); \
441 flags = PICK_RW_OP_RET(__read_lock_irqsave, _read_lock_irqsave, lock);\
442 } while (0)
444 # define write_lock_irqsave(lock, flags) \
445 do { \
446 BUILD_CHECK_IRQ_FLAGS(flags); \
447 flags = PICK_RW_OP_RET(__write_lock_irqsave, _write_lock_irqsave,lock);\
448 } while (0)
450 #define spin_lock_irq(lock) PICK_SPIN_OP(__spin_lock_irq, _spin_lock_irq, lock)
452 #define spin_lock_bh(lock) PICK_SPIN_OP(__spin_lock_bh, _spin_lock_bh, lock)
454 #define read_lock_irq(lock) PICK_RW_OP(__read_lock_irq, _read_lock_irq, lock)
456 #define read_lock_bh(lock) PICK_RW_OP(__read_lock_bh, _read_lock_bh, lock)
458 #define write_lock_irq(lock) PICK_RW_OP(__write_lock_irq, _write_lock_irq, lock)
460 #define write_lock_bh(lock) PICK_RW_OP(__write_lock_bh, _write_lock_bh, lock)
462 #define spin_unlock(lock) PICK_SPIN_OP(__spin_unlock, _spin_unlock, lock)
464 #define read_unlock(lock) PICK_RW_OP(__read_unlock, _read_unlock, lock)
466 #define write_unlock(lock) PICK_RW_OP(__write_unlock, _write_unlock, lock)
468 #define spin_unlock_no_resched(lock) \
469 PICK_SPIN_OP(__spin_unlock_no_resched, _spin_unlock_no_resched, lock)
471 #define spin_unlock_irqrestore(lock, flags) \
472 do { \
473 BUILD_CHECK_IRQ_FLAGS(flags); \
474 PICK_SPIN_OP(__spin_unlock_irqrestore, _spin_unlock_irqrestore, \
475 lock, flags); \
476 } while (0)
478 #define spin_unlock_irq(lock) \
479 PICK_SPIN_OP(__spin_unlock_irq, _spin_unlock_irq, lock)
480 #define spin_unlock_bh(lock) \
481 PICK_SPIN_OP(__spin_unlock_bh, _spin_unlock_bh, lock)
483 #define read_unlock_irqrestore(lock, flags) \
484 do { \
485 BUILD_CHECK_IRQ_FLAGS(flags); \
486 PICK_RW_OP(__read_unlock_irqrestore, _read_unlock_irqrestore, \
487 lock, flags); \
488 } while (0)
490 #define read_unlock_irq(lock) \
491 PICK_RW_OP(__read_unlock_irq, _read_unlock_irq, lock)
492 #define read_unlock_bh(lock) PICK_RW_OP(__read_unlock_bh, _read_unlock_bh, lock)
494 #define write_unlock_irqrestore(lock, flags) \
495 do { \
496 BUILD_CHECK_IRQ_FLAGS(flags); \
497 PICK_RW_OP(__write_unlock_irqrestore, _write_unlock_irqrestore, \
498 lock, flags); \
499 } while (0)
500 #define write_unlock_irq(lock) \
501 PICK_RW_OP(__write_unlock_irq, _write_unlock_irq, lock)
503 #define write_unlock_bh(lock) \
504 PICK_RW_OP(__write_unlock_bh, _write_unlock_bh, lock)
506 #define spin_trylock_bh(lock) \
507 __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_bh, _spin_trylock_bh,\
508 lock))
510 #define spin_trylock_irq(lock) \
511 __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irq, \
512 _spin_trylock_irq, lock))
514 #define spin_trylock_irqsave(lock, flags) \
515 __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irqsave, \
516 _spin_trylock_irqsave, lock, &flags))
519 * bit-based spin_lock()
521 * Don't use this unless you really need to: spin_lock() and spin_unlock()
522 * are significantly faster.
524 static inline void bit_spin_lock(int bitnum, unsigned long *addr)
527 * Assuming the lock is uncontended, this never enters
528 * the body of the outer loop. If it is contended, then
529 * within the inner loop a non-atomic test is used to
530 * busywait with less bus contention for a good time to
531 * attempt to acquire the lock bit.
533 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT)
534 while (unlikely(test_and_set_bit_lock(bitnum, addr)))
535 while (test_bit(bitnum, addr))
536 cpu_relax();
537 #endif
538 __acquire(bitlock);
542 * Return true if it was acquired
544 static inline int bit_spin_trylock(int bitnum, unsigned long *addr)
546 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT)
547 if (unlikely(test_and_set_bit_lock(bitnum, addr)))
548 return 0;
549 #endif
550 __acquire(bitlock);
551 return 1;
555 * bit-based spin_unlock():
557 static inline void bit_spin_unlock(int bitnum, unsigned long *addr)
559 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT)
560 # ifdef CONFIG_DEBUG_SPINLOCK
561 BUG_ON(!test_bit(bitnum, addr));
562 # endif
563 clear_bit_unlock(bitnum, addr);
564 #endif
565 __release(bitlock);
569 * bit-based spin_unlock() - non-atomic version:
571 static inline void __bit_spin_unlock(int bitnum, unsigned long *addr)
573 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT)
574 # ifdef CONFIG_DEBUG_SPINLOCK
575 BUG_ON(!test_bit(bitnum, addr));
576 # endif
577 __clear_bit_unlock(bitnum, addr);
578 #endif
579 __release(bitlock);
583 * Return true if the lock is held.
585 static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
587 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT)
588 return test_bit(bitnum, addr);
589 #else
590 return 1;
591 #endif
595 * __raw_spin_can_lock - would __raw_spin_trylock() succeed?
596 * @lock: the spinlock in question.
598 #define __raw_spin_can_lock(lock) (!__raw_spin_is_locked(lock))
601 * Pull the atomic_t declaration:
602 * (asm-mips/atomic.h needs above definitions)
604 #include <asm/atomic.h>
606 * atomic_dec_and_lock - lock on reaching reference count zero
607 * @atomic: the atomic counter
608 * @lock: the spinlock in question
610 * Decrements @atomic by 1. If the result is 0, returns true and locks
611 * @lock. Returns false for all other cases.
613 /* "lock on reference count zero" */
614 #ifndef ATOMIC_DEC_AND_LOCK
615 # include <asm/atomic.h>
616 extern int __atomic_dec_and_spin_lock(raw_spinlock_t *lock, atomic_t *atomic);
617 #endif
619 #define atomic_dec_and_lock(atomic, lock) \
620 __cond_lock(lock, PICK_SPIN_OP_RET(__atomic_dec_and_spin_lock, \
621 _atomic_dec_and_spin_lock, lock, atomic))
624 * spin_can_lock - would spin_trylock() succeed?
625 * @lock: the spinlock in question.
627 #define spin_can_lock(lock) \
628 __cond_lock(lock, PICK_SPIN_OP_RET(__spin_can_lock, _spin_can_lock,\
629 lock))
631 /* FIXME: porting hack! */
632 #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
634 #endif /* __LINUX_SPINLOCK_H */