Linux 2.6.18.4
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / spinlock.c
blobfb524b009eefeca3ffd2a2a929421a7eeb997660
1 /*
2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004, 2005) Ingo Molnar
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
12 #include <linux/linkage.h>
13 #include <linux/preempt.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/debug_locks.h>
17 #include <linux/module.h>
20 * Generic declaration of the raw read_trylock() function,
21 * architectures are supposed to optimize this:
23 int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
25 __raw_read_lock(lock);
26 return 1;
28 EXPORT_SYMBOL(generic__raw_read_trylock);
30 int __lockfunc _spin_trylock(spinlock_t *lock)
32 preempt_disable();
33 if (_raw_spin_trylock(lock)) {
34 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
35 return 1;
38 preempt_enable();
39 return 0;
41 EXPORT_SYMBOL(_spin_trylock);
43 int __lockfunc _read_trylock(rwlock_t *lock)
45 preempt_disable();
46 if (_raw_read_trylock(lock)) {
47 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
48 return 1;
51 preempt_enable();
52 return 0;
54 EXPORT_SYMBOL(_read_trylock);
56 int __lockfunc _write_trylock(rwlock_t *lock)
58 preempt_disable();
59 if (_raw_write_trylock(lock)) {
60 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
61 return 1;
64 preempt_enable();
65 return 0;
67 EXPORT_SYMBOL(_write_trylock);
70 * If lockdep is enabled then we use the non-preemption spin-ops
71 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
72 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
74 #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \
75 defined(CONFIG_DEBUG_LOCK_ALLOC)
77 void __lockfunc _read_lock(rwlock_t *lock)
79 preempt_disable();
80 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
81 _raw_read_lock(lock);
83 EXPORT_SYMBOL(_read_lock);
85 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
87 unsigned long flags;
89 local_irq_save(flags);
90 preempt_disable();
91 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
93 * On lockdep we dont want the hand-coded irq-enable of
94 * _raw_spin_lock_flags() code, because lockdep assumes
95 * that interrupts are not re-enabled during lock-acquire:
97 #ifdef CONFIG_PROVE_LOCKING
98 _raw_spin_lock(lock);
99 #else
100 _raw_spin_lock_flags(lock, &flags);
101 #endif
102 return flags;
104 EXPORT_SYMBOL(_spin_lock_irqsave);
106 void __lockfunc _spin_lock_irq(spinlock_t *lock)
108 local_irq_disable();
109 preempt_disable();
110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
111 _raw_spin_lock(lock);
113 EXPORT_SYMBOL(_spin_lock_irq);
115 void __lockfunc _spin_lock_bh(spinlock_t *lock)
117 local_bh_disable();
118 preempt_disable();
119 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
120 _raw_spin_lock(lock);
122 EXPORT_SYMBOL(_spin_lock_bh);
124 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
126 unsigned long flags;
128 local_irq_save(flags);
129 preempt_disable();
130 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
131 _raw_read_lock(lock);
132 return flags;
134 EXPORT_SYMBOL(_read_lock_irqsave);
136 void __lockfunc _read_lock_irq(rwlock_t *lock)
138 local_irq_disable();
139 preempt_disable();
140 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
141 _raw_read_lock(lock);
143 EXPORT_SYMBOL(_read_lock_irq);
145 void __lockfunc _read_lock_bh(rwlock_t *lock)
147 local_bh_disable();
148 preempt_disable();
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
150 _raw_read_lock(lock);
152 EXPORT_SYMBOL(_read_lock_bh);
154 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
156 unsigned long flags;
158 local_irq_save(flags);
159 preempt_disable();
160 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
161 _raw_write_lock(lock);
162 return flags;
164 EXPORT_SYMBOL(_write_lock_irqsave);
166 void __lockfunc _write_lock_irq(rwlock_t *lock)
168 local_irq_disable();
169 preempt_disable();
170 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
171 _raw_write_lock(lock);
173 EXPORT_SYMBOL(_write_lock_irq);
175 void __lockfunc _write_lock_bh(rwlock_t *lock)
177 local_bh_disable();
178 preempt_disable();
179 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
180 _raw_write_lock(lock);
182 EXPORT_SYMBOL(_write_lock_bh);
184 void __lockfunc _spin_lock(spinlock_t *lock)
186 preempt_disable();
187 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
188 _raw_spin_lock(lock);
191 EXPORT_SYMBOL(_spin_lock);
193 void __lockfunc _write_lock(rwlock_t *lock)
195 preempt_disable();
196 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
197 _raw_write_lock(lock);
200 EXPORT_SYMBOL(_write_lock);
202 #else /* CONFIG_PREEMPT: */
205 * This could be a long-held lock. We both prepare to spin for a long
206 * time (making _this_ CPU preemptable if possible), and we also signal
207 * towards that other CPU that it should break the lock ASAP.
209 * (We do this in a function because inlining it would be excessive.)
212 #define BUILD_LOCK_OPS(op, locktype) \
213 void __lockfunc _##op##_lock(locktype##_t *lock) \
215 for (;;) { \
216 preempt_disable(); \
217 if (likely(_raw_##op##_trylock(lock))) \
218 break; \
219 preempt_enable(); \
221 if (!(lock)->break_lock) \
222 (lock)->break_lock = 1; \
223 while (!op##_can_lock(lock) && (lock)->break_lock) \
224 cpu_relax(); \
226 (lock)->break_lock = 0; \
229 EXPORT_SYMBOL(_##op##_lock); \
231 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
233 unsigned long flags; \
235 for (;;) { \
236 preempt_disable(); \
237 local_irq_save(flags); \
238 if (likely(_raw_##op##_trylock(lock))) \
239 break; \
240 local_irq_restore(flags); \
241 preempt_enable(); \
243 if (!(lock)->break_lock) \
244 (lock)->break_lock = 1; \
245 while (!op##_can_lock(lock) && (lock)->break_lock) \
246 cpu_relax(); \
248 (lock)->break_lock = 0; \
249 return flags; \
252 EXPORT_SYMBOL(_##op##_lock_irqsave); \
254 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
256 _##op##_lock_irqsave(lock); \
259 EXPORT_SYMBOL(_##op##_lock_irq); \
261 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
263 unsigned long flags; \
265 /* */ \
266 /* Careful: we must exclude softirqs too, hence the */ \
267 /* irq-disabling. We use the generic preemption-aware */ \
268 /* function: */ \
269 /**/ \
270 flags = _##op##_lock_irqsave(lock); \
271 local_bh_disable(); \
272 local_irq_restore(flags); \
275 EXPORT_SYMBOL(_##op##_lock_bh)
278 * Build preemption-friendly versions of the following
279 * lock-spinning functions:
281 * _[spin|read|write]_lock()
282 * _[spin|read|write]_lock_irq()
283 * _[spin|read|write]_lock_irqsave()
284 * _[spin|read|write]_lock_bh()
286 BUILD_LOCK_OPS(spin, spinlock);
287 BUILD_LOCK_OPS(read, rwlock);
288 BUILD_LOCK_OPS(write, rwlock);
290 #endif /* CONFIG_PREEMPT */
292 #ifdef CONFIG_DEBUG_LOCK_ALLOC
294 void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
296 preempt_disable();
297 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
298 _raw_spin_lock(lock);
301 EXPORT_SYMBOL(_spin_lock_nested);
303 #endif
305 void __lockfunc _spin_unlock(spinlock_t *lock)
307 spin_release(&lock->dep_map, 1, _RET_IP_);
308 _raw_spin_unlock(lock);
309 preempt_enable();
311 EXPORT_SYMBOL(_spin_unlock);
313 void __lockfunc _write_unlock(rwlock_t *lock)
315 rwlock_release(&lock->dep_map, 1, _RET_IP_);
316 _raw_write_unlock(lock);
317 preempt_enable();
319 EXPORT_SYMBOL(_write_unlock);
321 void __lockfunc _read_unlock(rwlock_t *lock)
323 rwlock_release(&lock->dep_map, 1, _RET_IP_);
324 _raw_read_unlock(lock);
325 preempt_enable();
327 EXPORT_SYMBOL(_read_unlock);
329 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
331 spin_release(&lock->dep_map, 1, _RET_IP_);
332 _raw_spin_unlock(lock);
333 local_irq_restore(flags);
334 preempt_enable();
336 EXPORT_SYMBOL(_spin_unlock_irqrestore);
338 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
340 spin_release(&lock->dep_map, 1, _RET_IP_);
341 _raw_spin_unlock(lock);
342 local_irq_enable();
343 preempt_enable();
345 EXPORT_SYMBOL(_spin_unlock_irq);
347 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
349 spin_release(&lock->dep_map, 1, _RET_IP_);
350 _raw_spin_unlock(lock);
351 preempt_enable_no_resched();
352 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
354 EXPORT_SYMBOL(_spin_unlock_bh);
356 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
358 rwlock_release(&lock->dep_map, 1, _RET_IP_);
359 _raw_read_unlock(lock);
360 local_irq_restore(flags);
361 preempt_enable();
363 EXPORT_SYMBOL(_read_unlock_irqrestore);
365 void __lockfunc _read_unlock_irq(rwlock_t *lock)
367 rwlock_release(&lock->dep_map, 1, _RET_IP_);
368 _raw_read_unlock(lock);
369 local_irq_enable();
370 preempt_enable();
372 EXPORT_SYMBOL(_read_unlock_irq);
374 void __lockfunc _read_unlock_bh(rwlock_t *lock)
376 rwlock_release(&lock->dep_map, 1, _RET_IP_);
377 _raw_read_unlock(lock);
378 preempt_enable_no_resched();
379 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
381 EXPORT_SYMBOL(_read_unlock_bh);
383 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
385 rwlock_release(&lock->dep_map, 1, _RET_IP_);
386 _raw_write_unlock(lock);
387 local_irq_restore(flags);
388 preempt_enable();
390 EXPORT_SYMBOL(_write_unlock_irqrestore);
392 void __lockfunc _write_unlock_irq(rwlock_t *lock)
394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
395 _raw_write_unlock(lock);
396 local_irq_enable();
397 preempt_enable();
399 EXPORT_SYMBOL(_write_unlock_irq);
401 void __lockfunc _write_unlock_bh(rwlock_t *lock)
403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
404 _raw_write_unlock(lock);
405 preempt_enable_no_resched();
406 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
408 EXPORT_SYMBOL(_write_unlock_bh);
410 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
412 local_bh_disable();
413 preempt_disable();
414 if (_raw_spin_trylock(lock)) {
415 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
416 return 1;
419 preempt_enable_no_resched();
420 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
421 return 0;
423 EXPORT_SYMBOL(_spin_trylock_bh);
425 int in_lock_functions(unsigned long addr)
427 /* Linker adds these: start and end of __lockfunc functions */
428 extern char __lock_text_start[], __lock_text_end[];
430 return addr >= (unsigned long)__lock_text_start
431 && addr < (unsigned long)__lock_text_end;
433 EXPORT_SYMBOL(in_lock_functions);