2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004, 2005) Ingo Molnar
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
17 #include <linux/linkage.h>
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/debug_locks.h>
22 #include <linux/module.h>
24 int __lockfunc
__spin_trylock(raw_spinlock_t
*lock
)
27 if (_raw_spin_trylock(lock
)) {
28 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
35 EXPORT_SYMBOL(__spin_trylock
);
37 int __lockfunc
__spin_trylock_irq(raw_spinlock_t
*lock
)
42 if (_raw_spin_trylock(lock
)) {
43 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
47 __preempt_enable_no_resched();
49 preempt_check_resched();
53 EXPORT_SYMBOL(__spin_trylock_irq
);
55 int __lockfunc
__spin_trylock_irqsave(raw_spinlock_t
*lock
,
58 local_irq_save(*flags
);
61 if (_raw_spin_trylock(lock
)) {
62 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
66 __preempt_enable_no_resched();
67 local_irq_restore(*flags
);
68 preempt_check_resched();
72 EXPORT_SYMBOL(__spin_trylock_irqsave
);
74 int __lockfunc
__read_trylock(raw_rwlock_t
*lock
)
77 if (_raw_read_trylock(lock
)) {
78 rwlock_acquire_read(&lock
->dep_map
, 0, 1, _RET_IP_
);
85 EXPORT_SYMBOL(__read_trylock
);
87 int __lockfunc
__write_trylock(raw_rwlock_t
*lock
)
90 if (_raw_write_trylock(lock
)) {
91 rwlock_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
98 EXPORT_SYMBOL(__write_trylock
);
100 int __lockfunc
__write_trylock_irqsave(raw_rwlock_t
*lock
, unsigned long *flags
)
104 local_irq_save(*flags
);
105 ret
= __write_trylock(lock
);
109 local_irq_restore(*flags
);
112 EXPORT_SYMBOL(__write_trylock_irqsave
);
115 * If lockdep is enabled then we use the non-preemption spin-ops
116 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
117 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
119 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
121 void __lockfunc
__read_lock(raw_rwlock_t
*lock
)
124 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
125 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
127 EXPORT_SYMBOL(__read_lock
);
129 unsigned long __lockfunc
__spin_lock_irqsave(raw_spinlock_t
*lock
)
133 local_irq_save(flags
);
135 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
137 * On lockdep we dont want the hand-coded irq-enable of
138 * _raw_spin_lock_flags() code, because lockdep assumes
139 * that interrupts are not re-enabled during lock-acquire:
141 #ifdef CONFIG_LOCKDEP
142 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
144 _raw_spin_lock_flags(lock
, &flags
);
148 EXPORT_SYMBOL(__spin_lock_irqsave
);
150 void __lockfunc
__spin_lock_irq(raw_spinlock_t
*lock
)
154 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
155 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
157 EXPORT_SYMBOL(__spin_lock_irq
);
159 void __lockfunc
__spin_lock_bh(raw_spinlock_t
*lock
)
163 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
164 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
166 EXPORT_SYMBOL(__spin_lock_bh
);
168 unsigned long __lockfunc
__read_lock_irqsave(raw_rwlock_t
*lock
)
172 local_irq_save(flags
);
174 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
175 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
178 EXPORT_SYMBOL(__read_lock_irqsave
);
180 void __lockfunc
__read_lock_irq(raw_rwlock_t
*lock
)
184 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
185 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
187 EXPORT_SYMBOL(__read_lock_irq
);
189 void __lockfunc
__read_lock_bh(raw_rwlock_t
*lock
)
193 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
194 LOCK_CONTENDED(lock
, _raw_read_trylock
, _raw_read_lock
);
196 EXPORT_SYMBOL(__read_lock_bh
);
198 unsigned long __lockfunc
__write_lock_irqsave(raw_rwlock_t
*lock
)
202 local_irq_save(flags
);
204 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
205 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
208 EXPORT_SYMBOL(__write_lock_irqsave
);
210 void __lockfunc
__write_lock_irq(raw_rwlock_t
*lock
)
214 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
215 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
217 EXPORT_SYMBOL(__write_lock_irq
);
219 void __lockfunc
__write_lock_bh(raw_rwlock_t
*lock
)
223 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
224 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
226 EXPORT_SYMBOL(__write_lock_bh
);
228 void __lockfunc
__spin_lock(raw_spinlock_t
*lock
)
231 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
232 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
235 EXPORT_SYMBOL(__spin_lock
);
237 void __lockfunc
__write_lock(raw_rwlock_t
*lock
)
240 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
241 LOCK_CONTENDED(lock
, _raw_write_trylock
, _raw_write_lock
);
244 EXPORT_SYMBOL(__write_lock
);
246 #else /* CONFIG_PREEMPT: */
249 * This could be a long-held lock. We both prepare to spin for a long
250 * time (making _this_ CPU preemptable if possible), and we also signal
251 * towards that other CPU that it should break the lock ASAP.
253 * (We do this in a function because inlining it would be excessive.)
256 #define BUILD_LOCK_OPS(op, locktype) \
257 void __lockfunc __##op##_lock(locktype##_t *lock) \
261 if (likely(_raw_##op##_trylock(lock))) \
265 if (!(lock)->break_lock) \
266 (lock)->break_lock = 1; \
267 while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \
268 (lock)->break_lock) \
269 __raw_##op##_relax(&lock->raw_lock); \
271 (lock)->break_lock = 0; \
274 EXPORT_SYMBOL(__##op##_lock); \
276 unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
278 unsigned long flags; \
282 local_irq_save(flags); \
283 if (likely(_raw_##op##_trylock(lock))) \
285 local_irq_restore(flags); \
288 if (!(lock)->break_lock) \
289 (lock)->break_lock = 1; \
290 while (!__raw_##op##_can_lock(&(lock)->raw_lock) && \
291 (lock)->break_lock) \
292 __raw_##op##_relax(&lock->raw_lock); \
294 (lock)->break_lock = 0; \
298 EXPORT_SYMBOL(__##op##_lock_irqsave); \
300 void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
302 __##op##_lock_irqsave(lock); \
305 EXPORT_SYMBOL(__##op##_lock_irq); \
307 void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
309 unsigned long flags; \
312 /* Careful: we must exclude softirqs too, hence the */ \
313 /* irq-disabling. We use the generic preemption-aware */ \
316 flags = __##op##_lock_irqsave(lock); \
317 local_bh_disable(); \
318 local_irq_restore(flags); \
321 EXPORT_SYMBOL(__##op##_lock_bh)
324 * Build preemption-friendly versions of the following
325 * lock-spinning functions:
327 * __[spin|read|write]_lock()
328 * __[spin|read|write]_lock_irq()
329 * __[spin|read|write]_lock_irqsave()
330 * __[spin|read|write]_lock_bh()
332 BUILD_LOCK_OPS(spin
, raw_spinlock
);
333 BUILD_LOCK_OPS(read
, raw_rwlock
);
334 BUILD_LOCK_OPS(write
, raw_rwlock
);
336 #endif /* CONFIG_PREEMPT */
338 #ifdef CONFIG_DEBUG_LOCK_ALLOC
340 void __lockfunc
__spin_lock_nested(raw_spinlock_t
*lock
, int subclass
)
343 spin_acquire(&lock
->dep_map
, subclass
, 0, _RET_IP_
);
344 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
346 EXPORT_SYMBOL(__spin_lock_nested
);
348 void __lockfunc
__spin_lock_nest_lock(raw_spinlock_t
*lock
,
349 struct lockdep_map
*nest_lock
)
352 spin_acquire_nest(&lock
->dep_map
, 0, 0, nest_lock
, _RET_IP_
);
353 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
355 EXPORT_SYMBOL(__spin_lock_nest_lock
);
357 unsigned long __lockfunc
__spin_lock_irqsave_nested(raw_spinlock_t
*lock
, int subclass
)
361 local_irq_save(flags
);
363 spin_acquire(&lock
->dep_map
, subclass
, 0, _RET_IP_
);
365 * On lockdep we dont want the hand-coded irq-enable of
366 * _raw_spin_lock_flags() code, because lockdep assumes
367 * that interrupts are not re-enabled during lock-acquire:
369 #ifdef CONFIG_LOCKDEP
370 LOCK_CONTENDED(lock
, _raw_spin_trylock
, _raw_spin_lock
);
372 _raw_spin_lock_flags(lock
, &flags
);
376 EXPORT_SYMBOL(__spin_lock_irqsave_nested
);
380 void __lockfunc
__spin_unlock(raw_spinlock_t
*lock
)
382 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
383 _raw_spin_unlock(lock
);
386 EXPORT_SYMBOL(__spin_unlock
);
388 void __lockfunc
__spin_unlock_no_resched(raw_spinlock_t
*lock
)
390 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
391 _raw_spin_unlock(lock
);
392 __preempt_enable_no_resched();
396 void __lockfunc
__write_unlock(raw_rwlock_t
*lock
)
398 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
399 _raw_write_unlock(lock
);
402 EXPORT_SYMBOL(__write_unlock
);
404 void __lockfunc
__read_unlock(raw_rwlock_t
*lock
)
406 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
407 _raw_read_unlock(lock
);
410 EXPORT_SYMBOL(__read_unlock
);
412 void __lockfunc
__spin_unlock_irqrestore(raw_spinlock_t
*lock
, unsigned long flags
)
414 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
415 _raw_spin_unlock(lock
);
416 __preempt_enable_no_resched();
417 local_irq_restore(flags
);
418 preempt_check_resched();
420 EXPORT_SYMBOL(__spin_unlock_irqrestore
);
422 void __lockfunc
__spin_unlock_irq(raw_spinlock_t
*lock
)
424 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
425 _raw_spin_unlock(lock
);
426 __preempt_enable_no_resched();
428 preempt_check_resched();
430 EXPORT_SYMBOL(__spin_unlock_irq
);
432 void __lockfunc
__spin_unlock_bh(raw_spinlock_t
*lock
)
434 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
435 _raw_spin_unlock(lock
);
436 __preempt_enable_no_resched();
437 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
439 EXPORT_SYMBOL(__spin_unlock_bh
);
441 void __lockfunc
__read_unlock_irqrestore(raw_rwlock_t
*lock
, unsigned long flags
)
443 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
444 _raw_read_unlock(lock
);
445 __preempt_enable_no_resched();
446 local_irq_restore(flags
);
447 preempt_check_resched();
449 EXPORT_SYMBOL(__read_unlock_irqrestore
);
451 void __lockfunc
__read_unlock_irq(raw_rwlock_t
*lock
)
453 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
454 _raw_read_unlock(lock
);
455 __preempt_enable_no_resched();
457 preempt_check_resched();
459 EXPORT_SYMBOL(__read_unlock_irq
);
461 void __lockfunc
__read_unlock_bh(raw_rwlock_t
*lock
)
463 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
464 _raw_read_unlock(lock
);
465 __preempt_enable_no_resched();
466 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
468 EXPORT_SYMBOL(__read_unlock_bh
);
470 void __lockfunc
__write_unlock_irqrestore(raw_rwlock_t
*lock
, unsigned long flags
)
472 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
473 _raw_write_unlock(lock
);
474 __preempt_enable_no_resched();
475 local_irq_restore(flags
);
476 preempt_check_resched();
478 EXPORT_SYMBOL(__write_unlock_irqrestore
);
480 void __lockfunc
__write_unlock_irq(raw_rwlock_t
*lock
)
482 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
483 _raw_write_unlock(lock
);
484 __preempt_enable_no_resched();
486 preempt_check_resched();
488 EXPORT_SYMBOL(__write_unlock_irq
);
490 void __lockfunc
__write_unlock_bh(raw_rwlock_t
*lock
)
492 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
493 _raw_write_unlock(lock
);
494 __preempt_enable_no_resched();
495 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
497 EXPORT_SYMBOL(__write_unlock_bh
);
499 int __lockfunc
__spin_trylock_bh(raw_spinlock_t
*lock
)
503 if (_raw_spin_trylock(lock
)) {
504 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
508 __preempt_enable_no_resched();
509 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
512 EXPORT_SYMBOL(__spin_trylock_bh
);
514 notrace
int in_lock_functions(unsigned long addr
)
516 /* Linker adds these: start and end of __lockfunc functions */
517 extern char __lock_text_start
[], __lock_text_end
[];
519 return addr
>= (unsigned long)__lock_text_start
520 && addr
< (unsigned long)__lock_text_end
;
522 EXPORT_SYMBOL(in_lock_functions
);
524 void notrace
__debug_atomic_dec_and_test(atomic_t
*v
)
526 static int warn_once
= 1;
528 if (!atomic_read(v
) && warn_once
) {
530 printk("BUG: atomic counter underflow!\n");