2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004, 2005) Ingo Molnar
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
11 * Note that some architectures have special knowledge about the
12 * stack frames of these functions in their profile_pc. If you
13 * change anything significant here that could change the stack
14 * frame contact the architecture maintainers.
17 #include <linux/linkage.h>
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/interrupt.h>
21 #include <linux/debug_locks.h>
22 #include <linux/module.h>
24 int __lockfunc
_spin_trylock(spinlock_t
*lock
)
27 if (_raw_spin_trylock(lock
)) {
28 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
35 EXPORT_SYMBOL(_spin_trylock
);
37 int __lockfunc
_read_trylock(rwlock_t
*lock
)
40 if (_raw_read_trylock(lock
)) {
41 rwlock_acquire_read(&lock
->dep_map
, 0, 1, _RET_IP_
);
48 EXPORT_SYMBOL(_read_trylock
);
50 int __lockfunc
_write_trylock(rwlock_t
*lock
)
53 if (_raw_write_trylock(lock
)) {
54 rwlock_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
61 EXPORT_SYMBOL(_write_trylock
);
64 * If lockdep is enabled then we use the non-preemption spin-ops
65 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
66 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
68 #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
69 defined(CONFIG_DEBUG_LOCK_ALLOC)
71 void __lockfunc
_read_lock(rwlock_t
*lock
)
74 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
77 EXPORT_SYMBOL(_read_lock
);
79 unsigned long __lockfunc
_spin_lock_irqsave(spinlock_t
*lock
)
83 local_irq_save(flags
);
85 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
87 * On lockdep we dont want the hand-coded irq-enable of
88 * _raw_spin_lock_flags() code, because lockdep assumes
89 * that interrupts are not re-enabled during lock-acquire:
91 #ifdef CONFIG_PROVE_LOCKING
94 _raw_spin_lock_flags(lock
, &flags
);
98 EXPORT_SYMBOL(_spin_lock_irqsave
);
100 void __lockfunc
_spin_lock_irq(spinlock_t
*lock
)
104 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
105 _raw_spin_lock(lock
);
107 EXPORT_SYMBOL(_spin_lock_irq
);
109 void __lockfunc
_spin_lock_bh(spinlock_t
*lock
)
113 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
114 _raw_spin_lock(lock
);
116 EXPORT_SYMBOL(_spin_lock_bh
);
118 unsigned long __lockfunc
_read_lock_irqsave(rwlock_t
*lock
)
122 local_irq_save(flags
);
124 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
125 _raw_read_lock(lock
);
128 EXPORT_SYMBOL(_read_lock_irqsave
);
130 void __lockfunc
_read_lock_irq(rwlock_t
*lock
)
134 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
135 _raw_read_lock(lock
);
137 EXPORT_SYMBOL(_read_lock_irq
);
139 void __lockfunc
_read_lock_bh(rwlock_t
*lock
)
143 rwlock_acquire_read(&lock
->dep_map
, 0, 0, _RET_IP_
);
144 _raw_read_lock(lock
);
146 EXPORT_SYMBOL(_read_lock_bh
);
148 unsigned long __lockfunc
_write_lock_irqsave(rwlock_t
*lock
)
152 local_irq_save(flags
);
154 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
155 _raw_write_lock(lock
);
158 EXPORT_SYMBOL(_write_lock_irqsave
);
160 void __lockfunc
_write_lock_irq(rwlock_t
*lock
)
164 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
165 _raw_write_lock(lock
);
167 EXPORT_SYMBOL(_write_lock_irq
);
169 void __lockfunc
_write_lock_bh(rwlock_t
*lock
)
173 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
174 _raw_write_lock(lock
);
176 EXPORT_SYMBOL(_write_lock_bh
);
178 void __lockfunc
_spin_lock(spinlock_t
*lock
)
181 spin_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
182 _raw_spin_lock(lock
);
185 EXPORT_SYMBOL(_spin_lock
);
187 void __lockfunc
_write_lock(rwlock_t
*lock
)
190 rwlock_acquire(&lock
->dep_map
, 0, 0, _RET_IP_
);
191 _raw_write_lock(lock
);
194 EXPORT_SYMBOL(_write_lock
);
196 #else /* CONFIG_PREEMPT: */
199 * This could be a long-held lock. We both prepare to spin for a long
200 * time (making _this_ CPU preemptable if possible), and we also signal
201 * towards that other CPU that it should break the lock ASAP.
203 * (We do this in a function because inlining it would be excessive.)
206 #define BUILD_LOCK_OPS(op, locktype) \
207 void __lockfunc _##op##_lock(locktype##_t *lock) \
211 if (likely(_raw_##op##_trylock(lock))) \
215 if (!(lock)->break_lock) \
216 (lock)->break_lock = 1; \
217 while (!op##_can_lock(lock) && (lock)->break_lock) \
218 _raw_##op##_relax(&lock->raw_lock); \
220 (lock)->break_lock = 0; \
223 EXPORT_SYMBOL(_##op##_lock); \
225 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
227 unsigned long flags; \
231 local_irq_save(flags); \
232 if (likely(_raw_##op##_trylock(lock))) \
234 local_irq_restore(flags); \
237 if (!(lock)->break_lock) \
238 (lock)->break_lock = 1; \
239 while (!op##_can_lock(lock) && (lock)->break_lock) \
240 _raw_##op##_relax(&lock->raw_lock); \
242 (lock)->break_lock = 0; \
246 EXPORT_SYMBOL(_##op##_lock_irqsave); \
248 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
250 _##op##_lock_irqsave(lock); \
253 EXPORT_SYMBOL(_##op##_lock_irq); \
255 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
257 unsigned long flags; \
260 /* Careful: we must exclude softirqs too, hence the */ \
261 /* irq-disabling. We use the generic preemption-aware */ \
264 flags = _##op##_lock_irqsave(lock); \
265 local_bh_disable(); \
266 local_irq_restore(flags); \
269 EXPORT_SYMBOL(_##op##_lock_bh)
272 * Build preemption-friendly versions of the following
273 * lock-spinning functions:
275 * _[spin|read|write]_lock()
276 * _[spin|read|write]_lock_irq()
277 * _[spin|read|write]_lock_irqsave()
278 * _[spin|read|write]_lock_bh()
280 BUILD_LOCK_OPS(spin
, spinlock
);
281 BUILD_LOCK_OPS(read
, rwlock
);
282 BUILD_LOCK_OPS(write
, rwlock
);
284 #endif /* CONFIG_PREEMPT */
286 #ifdef CONFIG_DEBUG_LOCK_ALLOC
288 void __lockfunc
_spin_lock_nested(spinlock_t
*lock
, int subclass
)
291 spin_acquire(&lock
->dep_map
, subclass
, 0, _RET_IP_
);
292 _raw_spin_lock(lock
);
295 EXPORT_SYMBOL(_spin_lock_nested
);
299 void __lockfunc
_spin_unlock(spinlock_t
*lock
)
301 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
302 _raw_spin_unlock(lock
);
305 EXPORT_SYMBOL(_spin_unlock
);
307 void __lockfunc
_write_unlock(rwlock_t
*lock
)
309 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
310 _raw_write_unlock(lock
);
313 EXPORT_SYMBOL(_write_unlock
);
315 void __lockfunc
_read_unlock(rwlock_t
*lock
)
317 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
318 _raw_read_unlock(lock
);
321 EXPORT_SYMBOL(_read_unlock
);
323 void __lockfunc
_spin_unlock_irqrestore(spinlock_t
*lock
, unsigned long flags
)
325 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
326 _raw_spin_unlock(lock
);
327 local_irq_restore(flags
);
330 EXPORT_SYMBOL(_spin_unlock_irqrestore
);
332 void __lockfunc
_spin_unlock_irq(spinlock_t
*lock
)
334 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
335 _raw_spin_unlock(lock
);
339 EXPORT_SYMBOL(_spin_unlock_irq
);
341 void __lockfunc
_spin_unlock_bh(spinlock_t
*lock
)
343 spin_release(&lock
->dep_map
, 1, _RET_IP_
);
344 _raw_spin_unlock(lock
);
345 preempt_enable_no_resched();
346 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
348 EXPORT_SYMBOL(_spin_unlock_bh
);
350 void __lockfunc
_read_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
352 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
353 _raw_read_unlock(lock
);
354 local_irq_restore(flags
);
357 EXPORT_SYMBOL(_read_unlock_irqrestore
);
359 void __lockfunc
_read_unlock_irq(rwlock_t
*lock
)
361 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
362 _raw_read_unlock(lock
);
366 EXPORT_SYMBOL(_read_unlock_irq
);
368 void __lockfunc
_read_unlock_bh(rwlock_t
*lock
)
370 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
371 _raw_read_unlock(lock
);
372 preempt_enable_no_resched();
373 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
375 EXPORT_SYMBOL(_read_unlock_bh
);
377 void __lockfunc
_write_unlock_irqrestore(rwlock_t
*lock
, unsigned long flags
)
379 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
380 _raw_write_unlock(lock
);
381 local_irq_restore(flags
);
384 EXPORT_SYMBOL(_write_unlock_irqrestore
);
386 void __lockfunc
_write_unlock_irq(rwlock_t
*lock
)
388 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
389 _raw_write_unlock(lock
);
393 EXPORT_SYMBOL(_write_unlock_irq
);
395 void __lockfunc
_write_unlock_bh(rwlock_t
*lock
)
397 rwlock_release(&lock
->dep_map
, 1, _RET_IP_
);
398 _raw_write_unlock(lock
);
399 preempt_enable_no_resched();
400 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
402 EXPORT_SYMBOL(_write_unlock_bh
);
404 int __lockfunc
_spin_trylock_bh(spinlock_t
*lock
)
408 if (_raw_spin_trylock(lock
)) {
409 spin_acquire(&lock
->dep_map
, 0, 1, _RET_IP_
);
413 preempt_enable_no_resched();
414 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
417 EXPORT_SYMBOL(_spin_trylock_bh
);
419 int in_lock_functions(unsigned long addr
)
421 /* Linker adds these: start and end of __lockfunc functions */
422 extern char __lock_text_start
[], __lock_text_end
[];
424 return addr
>= (unsigned long)__lock_text_start
425 && addr
< (unsigned long)__lock_text_end
;
427 EXPORT_SYMBOL(in_lock_functions
);