2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004, 2005) Ingo Molnar
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
12 #include <linux/linkage.h>
13 #include <linux/preempt.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/debug_locks.h>
17 #include <linux/module.h>
20 * Generic declaration of the raw read_trylock() function,
21 * architectures are supposed to optimize this:
23 int __lockfunc
generic__raw_read_trylock(raw_rwlock_t
*lock
)
25 __raw_read_lock(lock
);
28 EXPORT_SYMBOL(generic__raw_read_trylock
);
30 int __lockfunc
_spin_trylock(spinlock_t
*lock
)
33 if (_raw_spin_trylock(lock
)) {
34 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
41 EXPORT_SYMBOL(_spin_trylock
);
43 int __lockfunc
_read_trylock(rwlock_t
*lock
)
46 if (_raw_read_trylock(lock
)) {
47 rwlock_acquire_read(&lock
->dep_map
, 0, 1, _RET_IP_
);
54 EXPORT_SYMBOL(_read_trylock
);
56 int __lockfunc
_write_trylock(rwlock_t
*lock
)
59 if (_raw_write_trylock(lock
)) {
60 rwlock_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
67 EXPORT_SYMBOL(_write_trylock
);
70 * If lockdep is enabled then we use the non-preemption spin-ops
71 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
72 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
74 #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
75 defined(CONFIG_DEBUG_LOCK_ALLOC)
77 void __lockfunc
_read_lock(rwlock_t
*lock
)
80 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
83 EXPORT_SYMBOL(_read_lock
);
85 unsigned long __lockfunc
_spin_lock_irqsave(spinlock_t
*lock
)
89 local_irq_save(flags
);
91 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
93 * On lockdep we dont want the hand-coded irq-enable of
94 * _raw_spin_lock_flags() code, because lockdep assumes
95 * that interrupts are not re-enabled during lock-acquire:
97 #ifdef CONFIG_PROVE_LOCKING
100 _raw_spin_lock_flags(lock
, &flags
);
104 EXPORT_SYMBOL(_spin_lock_irqsave
);
106 void __lockfunc
_spin_lock_irq(spinlock_t
*lock
)
110 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
111 _raw_spin_lock(lock
);
113 EXPORT_SYMBOL(_spin_lock_irq
);
115 void __lockfunc
_spin_lock_bh(spinlock_t
*lock
)
119 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
120 _raw_spin_lock(lock
);
122 EXPORT_SYMBOL(_spin_lock_bh
);
124 unsigned long __lockfunc
_read_lock_irqsave(rwlock_t
*lock
)
128 local_irq_save(flags
);
130 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
131 _raw_read_lock(lock
);
134 EXPORT_SYMBOL(_read_lock_irqsave
);
136 void __lockfunc
_read_lock_irq(rwlock_t
*lock
)
140 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
141 _raw_read_lock(lock
);
143 EXPORT_SYMBOL(_read_lock_irq
);
145 void __lockfunc
_read_lock_bh(rwlock_t
*lock
)
149 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
150 _raw_read_lock(lock
);
152 EXPORT_SYMBOL(_read_lock_bh
);
154 unsigned long __lockfunc
_write_lock_irqsave(rwlock_t
*lock
)
158 local_irq_save(flags
);
160 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
161 _raw_write_lock(lock
);
164 EXPORT_SYMBOL(_write_lock_irqsave
);
166 void __lockfunc
_write_lock_irq(rwlock_t
*lock
)
170 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
171 _raw_write_lock(lock
);
173 EXPORT_SYMBOL(_write_lock_irq
);
175 void __lockfunc
_write_lock_bh(rwlock_t
*lock
)
179 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
180 _raw_write_lock(lock
);
182 EXPORT_SYMBOL(_write_lock_bh
);
184 void __lockfunc
_spin_lock(spinlock_t
*lock
)
187 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
188 _raw_spin_lock(lock
);
191 EXPORT_SYMBOL(_spin_lock
);
193 void __lockfunc
_write_lock(rwlock_t
*lock
)
196 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
197 _raw_write_lock(lock
);
200 EXPORT_SYMBOL(_write_lock
);
202 #else /* CONFIG_PREEMPT: */
205 * This could be a long-held lock. We both prepare to spin for a long
206 * time (making _this_ CPU preemptable if possible), and we also signal
207 * towards that other CPU that it should break the lock ASAP.
209 * (We do this in a function because inlining it would be excessive.)
212 #define BUILD_LOCK_OPS(op, locktype) \
213 void __lockfunc _##op##_lock(locktype##_t *lock) \
217 if (likely(_raw_##op##_trylock(lock))) \
221 if (!(lock)->break_lock) \
222 (lock)->break_lock = 1; \
223 while (!op##_can_lock(lock) && (lock)->break_lock) \
226 (lock)->break_lock = 0; \
229 EXPORT_SYMBOL(_##op##_lock); \
231 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
233 unsigned long flags; \
237 local_irq_save(flags); \
238 if (likely(_raw_##op##_trylock(lock))) \
240 local_irq_restore(flags); \
243 if (!(lock)->break_lock) \
244 (lock)->break_lock = 1; \
245 while (!op##_can_lock(lock) && (lock)->break_lock) \
248 (lock)->break_lock = 0; \
252 EXPORT_SYMBOL(_##op##_lock_irqsave); \
254 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
256 _##op##_lock_irqsave(lock); \
259 EXPORT_SYMBOL(_##op##_lock_irq); \
261 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
263 unsigned long flags; \
266 /* Careful: we must exclude softirqs too, hence the */ \
267 /* irq-disabling. We use the generic preemption-aware */ \
270 flags = _##op##_lock_irqsave(lock); \
271 local_bh_disable(); \
272 local_irq_restore(flags); \
275 EXPORT_SYMBOL(_##op##_lock_bh)
278 * Build preemption-friendly versions of the following
279 * lock-spinning functions:
281 * _[spin|read|write]_lock()
282 * _[spin|read|write]_lock_irq()
283 * _[spin|read|write]_lock_irqsave()
284 * _[spin|read|write]_lock_bh()
286 BUILD_LOCK_OPS(spin
, spinlock
);
287 BUILD_LOCK_OPS(read
, rwlock
);
288 BUILD_LOCK_OPS(write
, rwlock
);
290 #endif /* CONFIG_PREEMPT */
292 #ifdef CONFIG_DEBUG_LOCK_ALLOC
294 void __lockfunc
_spin_lock_nested(spinlock_t
*lock
, int subclass
)
297 spin_acquire(&lock
->dep_map
, subclass
, 0, _RET_IP_
);
298 _raw_spin_lock(lock
);
301 EXPORT_SYMBOL(_spin_lock_nested
);
305 void __lockfunc
_spin_unlock(spinlock_t
*lock
)
307 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
308 _raw_spin_unlock(lock
);
311 EXPORT_SYMBOL(_spin_unlock
);
313 void __lockfunc
_write_unlock(rwlock_t
*lock
)
315 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
316 _raw_write_unlock(lock
);
319 EXPORT_SYMBOL(_write_unlock
);
321 void __lockfunc
_read_unlock(rwlock_t
*lock
)
323 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
324 _raw_read_unlock(lock
);
327 EXPORT_SYMBOL(_read_unlock
);
329 void __lockfunc
_spin_unlock_irqrestore(spinlock_t
*lock
, unsigned long flags
)
331 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
332 _raw_spin_unlock(lock
);
333 local_irq_restore(flags
);
336 EXPORT_SYMBOL(_spin_unlock_irqrestore
);
338 void __lockfunc
_spin_unlock_irq(spinlock_t
*lock
)
340 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
341 _raw_spin_unlock(lock
);
345 EXPORT_SYMBOL(_spin_unlock_irq
);
347 void __lockfunc
_spin_unlock_bh(spinlock_t
*lock
)
349 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
350 _raw_spin_unlock(lock
);
351 preempt_enable_no_resched();
352 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
354 EXPORT_SYMBOL(_spin_unlock_bh
);
356 void __lockfunc
_read_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
358 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
359 _raw_read_unlock(lock
);
360 local_irq_restore(flags
);
363 EXPORT_SYMBOL(_read_unlock_irqrestore
);
365 void __lockfunc
_read_unlock_irq(rwlock_t
*lock
)
367 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
368 _raw_read_unlock(lock
);
372 EXPORT_SYMBOL(_read_unlock_irq
);
374 void __lockfunc
_read_unlock_bh(rwlock_t
*lock
)
376 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
377 _raw_read_unlock(lock
);
378 preempt_enable_no_resched();
379 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
381 EXPORT_SYMBOL(_read_unlock_bh
);
383 void __lockfunc
_write_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
385 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
386 _raw_write_unlock(lock
);
387 local_irq_restore(flags
);
390 EXPORT_SYMBOL(_write_unlock_irqrestore
);
392 void __lockfunc
_write_unlock_irq(rwlock_t
*lock
)
394 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
395 _raw_write_unlock(lock
);
399 EXPORT_SYMBOL(_write_unlock_irq
);
401 void __lockfunc
_write_unlock_bh(rwlock_t
*lock
)
403 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
404 _raw_write_unlock(lock
);
405 preempt_enable_no_resched();
406 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
408 EXPORT_SYMBOL(_write_unlock_bh
);
410 int __lockfunc
_spin_trylock_bh(spinlock_t
*lock
)
414 if (_raw_spin_trylock(lock
)) {
415 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
419 preempt_enable_no_resched();
420 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423 EXPORT_SYMBOL(_spin_trylock_bh
);
425 int in_lock_functions(unsigned long addr
)
427 /* Linker adds these: start and end of __lockfunc functions */
428 extern char __lock_text_start
[], __lock_text_end
[];
430 return addr
>= (unsigned long)__lock_text_start
431 && addr
< (unsigned long)__lock_text_end
;
433 EXPORT_SYMBOL(in_lock_functions
);