added 2.6.29.6 aldebaran kernel
[nao-ulib.git] / kernel / 2.6.29.6-aldebaran-rt / kernel / spinlock.c
blobdebe82ad610f0f353a389c223b5af68fd406d164
1 /*
2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004, 2005) Ingo Molnar
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
17 #include <linux/linkage.h>
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/debug_locks.h>
22 #include <linux/module.h>
24 int __lockfunc __spin_trylock(raw_spinlock_t *lock)
26 preempt_disable();
27 if (_raw_spin_trylock(lock)) {
28 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
29 return 1;
32 preempt_enable();
33 return 0;
35 EXPORT_SYMBOL(__spin_trylock);
37 int __lockfunc __spin_trylock_irq(raw_spinlock_t *lock)
39 local_irq_disable();
40 preempt_disable();
42 if (_raw_spin_trylock(lock)) {
43 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
44 return 1;
47 __preempt_enable_no_resched();
48 local_irq_enable();
49 preempt_check_resched();
51 return 0;
53 EXPORT_SYMBOL(__spin_trylock_irq);
55 int __lockfunc __spin_trylock_irqsave(raw_spinlock_t *lock,
56 unsigned long *flags)
58 local_irq_save(*flags);
59 preempt_disable();
61 if (_raw_spin_trylock(lock)) {
62 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
63 return 1;
66 __preempt_enable_no_resched();
67 local_irq_restore(*flags);
68 preempt_check_resched();
70 return 0;
72 EXPORT_SYMBOL(__spin_trylock_irqsave);
74 int __lockfunc __read_trylock(raw_rwlock_t *lock)
76 preempt_disable();
77 if (_raw_read_trylock(lock)) {
78 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
79 return 1;
82 preempt_enable();
83 return 0;
85 EXPORT_SYMBOL(__read_trylock);
87 int __lockfunc __write_trylock(raw_rwlock_t *lock)
89 preempt_disable();
90 if (_raw_write_trylock(lock)) {
91 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
92 return 1;
95 preempt_enable();
96 return 0;
98 EXPORT_SYMBOL(__write_trylock);
100 int __lockfunc __write_trylock_irqsave(raw_rwlock_t *lock, unsigned long *flags)
102 int ret;
104 local_irq_save(*flags);
105 ret = __write_trylock(lock);
106 if (ret)
107 return ret;
109 local_irq_restore(*flags);
110 return 0;
112 EXPORT_SYMBOL(__write_trylock_irqsave);
115 * If lockdep is enabled then we use the non-preemption spin-ops
116 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
117 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
119 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
121 void __lockfunc __read_lock(raw_rwlock_t *lock)
123 preempt_disable();
124 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
125 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
127 EXPORT_SYMBOL(__read_lock);
129 unsigned long __lockfunc __spin_lock_irqsave(raw_spinlock_t *lock)
131 unsigned long flags;
133 local_irq_save(flags);
134 preempt_disable();
135 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
137 * On lockdep we dont want the hand-coded irq-enable of
138 * _raw_spin_lock_flags() code, because lockdep assumes
139 * that interrupts are not re-enabled during lock-acquire:
141 #ifdef CONFIG_LOCKDEP
142 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
143 #else
144 _raw_spin_lock_flags(lock, &flags);
145 #endif
146 return flags;
148 EXPORT_SYMBOL(__spin_lock_irqsave);
150 void __lockfunc __spin_lock_irq(raw_spinlock_t *lock)
152 local_irq_disable();
153 preempt_disable();
154 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
155 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
157 EXPORT_SYMBOL(__spin_lock_irq);
159 void __lockfunc __spin_lock_bh(raw_spinlock_t *lock)
161 local_bh_disable();
162 preempt_disable();
163 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
164 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
166 EXPORT_SYMBOL(__spin_lock_bh);
168 unsigned long __lockfunc __read_lock_irqsave(raw_rwlock_t *lock)
170 unsigned long flags;
172 local_irq_save(flags);
173 preempt_disable();
174 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
175 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
176 return flags;
178 EXPORT_SYMBOL(__read_lock_irqsave);
180 void __lockfunc __read_lock_irq(raw_rwlock_t *lock)
182 local_irq_disable();
183 preempt_disable();
184 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
185 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
187 EXPORT_SYMBOL(__read_lock_irq);
189 void __lockfunc __read_lock_bh(raw_rwlock_t *lock)
191 local_bh_disable();
192 preempt_disable();
193 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
194 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
196 EXPORT_SYMBOL(__read_lock_bh);
198 unsigned long __lockfunc __write_lock_irqsave(raw_rwlock_t *lock)
200 unsigned long flags;
202 local_irq_save(flags);
203 preempt_disable();
204 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
205 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
206 return flags;
208 EXPORT_SYMBOL(__write_lock_irqsave);
210 void __lockfunc __write_lock_irq(raw_rwlock_t *lock)
212 local_irq_disable();
213 preempt_disable();
214 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
215 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
217 EXPORT_SYMBOL(__write_lock_irq);
219 void __lockfunc __write_lock_bh(raw_rwlock_t *lock)
221 local_bh_disable();
222 preempt_disable();
223 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
224 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
226 EXPORT_SYMBOL(__write_lock_bh);
228 void __lockfunc __spin_lock(raw_spinlock_t *lock)
230 preempt_disable();
231 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
232 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
235 EXPORT_SYMBOL(__spin_lock);
237 void __lockfunc __write_lock(raw_rwlock_t *lock)
239 preempt_disable();
240 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
241 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
244 EXPORT_SYMBOL(__write_lock);
246 #else /* CONFIG_PREEMPT: */
249 * This could be a long-held lock. We both prepare to spin for a long
250 * time (making _this_ CPU preemptable if possible), and we also signal
251 * towards that other CPU that it should break the lock ASAP.
253 * (We do this in a function because inlining it would be excessive.)
256 #define BUILD_LOCK_OPS(op, locktype) \
257 void __lockfunc __##op##_lock(locktype##_t *lock) \
259 for (;;) { \
260 preempt_disable(); \
261 if (likely(_raw_##op##_trylock(lock))) \
262 break; \
263 preempt_enable(); \
265 if (!(lock)->break_lock) \
266 (lock)->break_lock = 1; \
267 while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \
268 (lock)->break_lock) \
269 __raw_##op##_relax(&lock->raw_lock); \
271 (lock)->break_lock = 0; \
274 EXPORT_SYMBOL(__##op##_lock); \
276 unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
278 unsigned long flags; \
280 for (;;) { \
281 preempt_disable(); \
282 local_irq_save(flags); \
283 if (likely(_raw_##op##_trylock(lock))) \
284 break; \
285 local_irq_restore(flags); \
286 preempt_enable(); \
288 if (!(lock)->break_lock) \
289 (lock)->break_lock = 1; \
290 while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \
291 (lock)->break_lock) \
292 __raw_##op##_relax(&lock->raw_lock); \
294 (lock)->break_lock = 0; \
295 return flags; \
298 EXPORT_SYMBOL(__##op##_lock_irqsave); \
300 void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
302 __##op##_lock_irqsave(lock); \
305 EXPORT_SYMBOL(__##op##_lock_irq); \
307 void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
309 unsigned long flags; \
311 /* */ \
312 /* Careful: we must exclude softirqs too, hence the */ \
313 /* irq-disabling. We use the generic preemption-aware */ \
314 /* function: */ \
315 /**/ \
316 flags = __##op##_lock_irqsave(lock); \
317 local_bh_disable(); \
318 local_irq_restore(flags); \
321 EXPORT_SYMBOL(__##op##_lock_bh)
324 * Build preemption-friendly versions of the following
325 * lock-spinning functions:
327 * __[spin|read|write]_lock()
328 * __[spin|read|write]_lock_irq()
329 * __[spin|read|write]_lock_irqsave()
330 * __[spin|read|write]_lock_bh()
332 BUILD_LOCK_OPS(spin, raw_spinlock);
333 BUILD_LOCK_OPS(read, raw_rwlock);
334 BUILD_LOCK_OPS(write, raw_rwlock);
336 #endif /* CONFIG_PREEMPT */
338 #ifdef CONFIG_DEBUG_LOCK_ALLOC
340 void __lockfunc __spin_lock_nested(raw_spinlock_t *lock, int subclass)
342 preempt_disable();
343 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
344 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
346 EXPORT_SYMBOL(__spin_lock_nested);
348 void __lockfunc __spin_lock_nest_lock(raw_spinlock_t *lock,
349 struct lockdep_map *nest_lock)
351 preempt_disable();
352 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
353 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
355 EXPORT_SYMBOL(__spin_lock_nest_lock);
357 unsigned long __lockfunc __spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
359 unsigned long flags;
361 local_irq_save(flags);
362 preempt_disable();
363 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
365 * On lockdep we dont want the hand-coded irq-enable of
366 * _raw_spin_lock_flags() code, because lockdep assumes
367 * that interrupts are not re-enabled during lock-acquire:
369 #ifdef CONFIG_LOCKDEP
370 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
371 #else
372 _raw_spin_lock_flags(lock, &flags);
373 #endif
374 return flags;
376 EXPORT_SYMBOL(__spin_lock_irqsave_nested);
378 #endif
380 void __lockfunc __spin_unlock(raw_spinlock_t *lock)
382 spin_release(&lock->dep_map, 1, _RET_IP_);
383 _raw_spin_unlock(lock);
384 preempt_enable();
386 EXPORT_SYMBOL(__spin_unlock);
388 void __lockfunc __spin_unlock_no_resched(raw_spinlock_t *lock)
390 spin_release(&lock->dep_map, 1, _RET_IP_);
391 _raw_spin_unlock(lock);
392 __preempt_enable_no_resched();
394 /* not exported */
396 void __lockfunc __write_unlock(raw_rwlock_t *lock)
398 rwlock_release(&lock->dep_map, 1, _RET_IP_);
399 _raw_write_unlock(lock);
400 preempt_enable();
402 EXPORT_SYMBOL(__write_unlock);
404 void __lockfunc __read_unlock(raw_rwlock_t *lock)
406 rwlock_release(&lock->dep_map, 1, _RET_IP_);
407 _raw_read_unlock(lock);
408 preempt_enable();
410 EXPORT_SYMBOL(__read_unlock);
412 void __lockfunc __spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
414 spin_release(&lock->dep_map, 1, _RET_IP_);
415 _raw_spin_unlock(lock);
416 __preempt_enable_no_resched();
417 local_irq_restore(flags);
418 preempt_check_resched();
420 EXPORT_SYMBOL(__spin_unlock_irqrestore);
422 void __lockfunc __spin_unlock_irq(raw_spinlock_t *lock)
424 spin_release(&lock->dep_map, 1, _RET_IP_);
425 _raw_spin_unlock(lock);
426 __preempt_enable_no_resched();
427 local_irq_enable();
428 preempt_check_resched();
430 EXPORT_SYMBOL(__spin_unlock_irq);
432 void __lockfunc __spin_unlock_bh(raw_spinlock_t *lock)
434 spin_release(&lock->dep_map, 1, _RET_IP_);
435 _raw_spin_unlock(lock);
436 __preempt_enable_no_resched();
437 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
439 EXPORT_SYMBOL(__spin_unlock_bh);
441 void __lockfunc __read_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags)
443 rwlock_release(&lock->dep_map, 1, _RET_IP_);
444 _raw_read_unlock(lock);
445 __preempt_enable_no_resched();
446 local_irq_restore(flags);
447 preempt_check_resched();
449 EXPORT_SYMBOL(__read_unlock_irqrestore);
451 void __lockfunc __read_unlock_irq(raw_rwlock_t *lock)
453 rwlock_release(&lock->dep_map, 1, _RET_IP_);
454 _raw_read_unlock(lock);
455 __preempt_enable_no_resched();
456 local_irq_enable();
457 preempt_check_resched();
459 EXPORT_SYMBOL(__read_unlock_irq);
461 void __lockfunc __read_unlock_bh(raw_rwlock_t *lock)
463 rwlock_release(&lock->dep_map, 1, _RET_IP_);
464 _raw_read_unlock(lock);
465 __preempt_enable_no_resched();
466 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
468 EXPORT_SYMBOL(__read_unlock_bh);
470 void __lockfunc __write_unlock_irqrestore(raw_rwlock_t *lock, unsigned long flags)
472 rwlock_release(&lock->dep_map, 1, _RET_IP_);
473 _raw_write_unlock(lock);
474 __preempt_enable_no_resched();
475 local_irq_restore(flags);
476 preempt_check_resched();
478 EXPORT_SYMBOL(__write_unlock_irqrestore);
480 void __lockfunc __write_unlock_irq(raw_rwlock_t *lock)
482 rwlock_release(&lock->dep_map, 1, _RET_IP_);
483 _raw_write_unlock(lock);
484 __preempt_enable_no_resched();
485 local_irq_enable();
486 preempt_check_resched();
488 EXPORT_SYMBOL(__write_unlock_irq);
490 void __lockfunc __write_unlock_bh(raw_rwlock_t *lock)
492 rwlock_release(&lock->dep_map, 1, _RET_IP_);
493 _raw_write_unlock(lock);
494 __preempt_enable_no_resched();
495 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
497 EXPORT_SYMBOL(__write_unlock_bh);
499 int __lockfunc __spin_trylock_bh(raw_spinlock_t *lock)
501 local_bh_disable();
502 preempt_disable();
503 if (_raw_spin_trylock(lock)) {
504 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
505 return 1;
508 __preempt_enable_no_resched();
509 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
510 return 0;
512 EXPORT_SYMBOL(__spin_trylock_bh);
514 notrace int in_lock_functions(unsigned long addr)
516 /* Linker adds these: start and end of __lockfunc functions */
517 extern char __lock_text_start[], __lock_text_end[];
519 return addr >= (unsigned long)__lock_text_start
520 && addr < (unsigned long)__lock_text_end;
522 EXPORT_SYMBOL(in_lock_functions);
524 void notrace __debug_atomic_dec_and_test(atomic_t *v)
526 static int warn_once = 1;
528 if (!atomic_read(v) && warn_once) {
529 warn_once = 0;
530 printk("BUG: atomic counter underflow!\n");
531 WARN_ON(1);