ACPI: asus_acpi: rework model detection
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / kernel / spinlock.c
blobb31e54eadf56585d8da288e076768bf9932f5711
1 /*
2 * Copyright (2004) Linus Torvalds
4 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
6 * Copyright (2004, 2005) Ingo Molnar
8 * This file contains the spinlock/rwlock implementations for the
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
12 #include <linux/linkage.h>
13 #include <linux/preempt.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/module.h>
19 * Generic declaration of the raw read_trylock() function,
20 * architectures are supposed to optimize this:
22 int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock)
24 __raw_read_lock(lock);
25 return 1;
27 EXPORT_SYMBOL(generic__raw_read_trylock);
29 int __lockfunc _spin_trylock(spinlock_t *lock)
31 preempt_disable();
32 if (_raw_spin_trylock(lock))
33 return 1;
35 preempt_enable();
36 return 0;
38 EXPORT_SYMBOL(_spin_trylock);
40 int __lockfunc _read_trylock(rwlock_t *lock)
42 preempt_disable();
43 if (_raw_read_trylock(lock))
44 return 1;
46 preempt_enable();
47 return 0;
49 EXPORT_SYMBOL(_read_trylock);
51 int __lockfunc _write_trylock(rwlock_t *lock)
53 preempt_disable();
54 if (_raw_write_trylock(lock))
55 return 1;
57 preempt_enable();
58 return 0;
60 EXPORT_SYMBOL(_write_trylock);
62 #if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)
64 void __lockfunc _read_lock(rwlock_t *lock)
66 preempt_disable();
67 _raw_read_lock(lock);
69 EXPORT_SYMBOL(_read_lock);
71 unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
73 unsigned long flags;
75 local_irq_save(flags);
76 preempt_disable();
77 _raw_spin_lock_flags(lock, &flags);
78 return flags;
80 EXPORT_SYMBOL(_spin_lock_irqsave);
82 void __lockfunc _spin_lock_irq(spinlock_t *lock)
84 local_irq_disable();
85 preempt_disable();
86 _raw_spin_lock(lock);
88 EXPORT_SYMBOL(_spin_lock_irq);
90 void __lockfunc _spin_lock_bh(spinlock_t *lock)
92 local_bh_disable();
93 preempt_disable();
94 _raw_spin_lock(lock);
96 EXPORT_SYMBOL(_spin_lock_bh);
98 unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
100 unsigned long flags;
102 local_irq_save(flags);
103 preempt_disable();
104 _raw_read_lock(lock);
105 return flags;
107 EXPORT_SYMBOL(_read_lock_irqsave);
109 void __lockfunc _read_lock_irq(rwlock_t *lock)
111 local_irq_disable();
112 preempt_disable();
113 _raw_read_lock(lock);
115 EXPORT_SYMBOL(_read_lock_irq);
117 void __lockfunc _read_lock_bh(rwlock_t *lock)
119 local_bh_disable();
120 preempt_disable();
121 _raw_read_lock(lock);
123 EXPORT_SYMBOL(_read_lock_bh);
125 unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
127 unsigned long flags;
129 local_irq_save(flags);
130 preempt_disable();
131 _raw_write_lock(lock);
132 return flags;
134 EXPORT_SYMBOL(_write_lock_irqsave);
136 void __lockfunc _write_lock_irq(rwlock_t *lock)
138 local_irq_disable();
139 preempt_disable();
140 _raw_write_lock(lock);
142 EXPORT_SYMBOL(_write_lock_irq);
144 void __lockfunc _write_lock_bh(rwlock_t *lock)
146 local_bh_disable();
147 preempt_disable();
148 _raw_write_lock(lock);
150 EXPORT_SYMBOL(_write_lock_bh);
152 void __lockfunc _spin_lock(spinlock_t *lock)
154 preempt_disable();
155 _raw_spin_lock(lock);
158 EXPORT_SYMBOL(_spin_lock);
160 void __lockfunc _write_lock(rwlock_t *lock)
162 preempt_disable();
163 _raw_write_lock(lock);
166 EXPORT_SYMBOL(_write_lock);
168 #else /* CONFIG_PREEMPT: */
171 * This could be a long-held lock. We both prepare to spin for a long
172 * time (making _this_ CPU preemptable if possible), and we also signal
173 * towards that other CPU that it should break the lock ASAP.
175 * (We do this in a function because inlining it would be excessive.)
178 #define BUILD_LOCK_OPS(op, locktype) \
179 void __lockfunc _##op##_lock(locktype##_t *lock) \
181 for (;;) { \
182 preempt_disable(); \
183 if (likely(_raw_##op##_trylock(lock))) \
184 break; \
185 preempt_enable(); \
187 if (!(lock)->break_lock) \
188 (lock)->break_lock = 1; \
189 while (!op##_can_lock(lock) && (lock)->break_lock) \
190 cpu_relax(); \
192 (lock)->break_lock = 0; \
195 EXPORT_SYMBOL(_##op##_lock); \
197 unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
199 unsigned long flags; \
201 for (;;) { \
202 preempt_disable(); \
203 local_irq_save(flags); \
204 if (likely(_raw_##op##_trylock(lock))) \
205 break; \
206 local_irq_restore(flags); \
207 preempt_enable(); \
209 if (!(lock)->break_lock) \
210 (lock)->break_lock = 1; \
211 while (!op##_can_lock(lock) && (lock)->break_lock) \
212 cpu_relax(); \
214 (lock)->break_lock = 0; \
215 return flags; \
218 EXPORT_SYMBOL(_##op##_lock_irqsave); \
220 void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
222 _##op##_lock_irqsave(lock); \
225 EXPORT_SYMBOL(_##op##_lock_irq); \
227 void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
229 unsigned long flags; \
231 /* */ \
232 /* Careful: we must exclude softirqs too, hence the */ \
233 /* irq-disabling. We use the generic preemption-aware */ \
234 /* function: */ \
235 /**/ \
236 flags = _##op##_lock_irqsave(lock); \
237 local_bh_disable(); \
238 local_irq_restore(flags); \
241 EXPORT_SYMBOL(_##op##_lock_bh)
244 * Build preemption-friendly versions of the following
245 * lock-spinning functions:
247 * _[spin|read|write]_lock()
248 * _[spin|read|write]_lock_irq()
249 * _[spin|read|write]_lock_irqsave()
250 * _[spin|read|write]_lock_bh()
252 BUILD_LOCK_OPS(spin, spinlock);
253 BUILD_LOCK_OPS(read, rwlock);
254 BUILD_LOCK_OPS(write, rwlock);
256 #endif /* CONFIG_PREEMPT */
258 void __lockfunc _spin_unlock(spinlock_t *lock)
260 _raw_spin_unlock(lock);
261 preempt_enable();
263 EXPORT_SYMBOL(_spin_unlock);
265 void __lockfunc _write_unlock(rwlock_t *lock)
267 _raw_write_unlock(lock);
268 preempt_enable();
270 EXPORT_SYMBOL(_write_unlock);
272 void __lockfunc _read_unlock(rwlock_t *lock)
274 _raw_read_unlock(lock);
275 preempt_enable();
277 EXPORT_SYMBOL(_read_unlock);
279 void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
281 _raw_spin_unlock(lock);
282 local_irq_restore(flags);
283 preempt_enable();
285 EXPORT_SYMBOL(_spin_unlock_irqrestore);
287 void __lockfunc _spin_unlock_irq(spinlock_t *lock)
289 _raw_spin_unlock(lock);
290 local_irq_enable();
291 preempt_enable();
293 EXPORT_SYMBOL(_spin_unlock_irq);
295 void __lockfunc _spin_unlock_bh(spinlock_t *lock)
297 _raw_spin_unlock(lock);
298 preempt_enable_no_resched();
299 local_bh_enable();
301 EXPORT_SYMBOL(_spin_unlock_bh);
303 void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
305 _raw_read_unlock(lock);
306 local_irq_restore(flags);
307 preempt_enable();
309 EXPORT_SYMBOL(_read_unlock_irqrestore);
311 void __lockfunc _read_unlock_irq(rwlock_t *lock)
313 _raw_read_unlock(lock);
314 local_irq_enable();
315 preempt_enable();
317 EXPORT_SYMBOL(_read_unlock_irq);
319 void __lockfunc _read_unlock_bh(rwlock_t *lock)
321 _raw_read_unlock(lock);
322 preempt_enable_no_resched();
323 local_bh_enable();
325 EXPORT_SYMBOL(_read_unlock_bh);
327 void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
329 _raw_write_unlock(lock);
330 local_irq_restore(flags);
331 preempt_enable();
333 EXPORT_SYMBOL(_write_unlock_irqrestore);
335 void __lockfunc _write_unlock_irq(rwlock_t *lock)
337 _raw_write_unlock(lock);
338 local_irq_enable();
339 preempt_enable();
341 EXPORT_SYMBOL(_write_unlock_irq);
343 void __lockfunc _write_unlock_bh(rwlock_t *lock)
345 _raw_write_unlock(lock);
346 preempt_enable_no_resched();
347 local_bh_enable();
349 EXPORT_SYMBOL(_write_unlock_bh);
351 int __lockfunc _spin_trylock_bh(spinlock_t *lock)
353 local_bh_disable();
354 preempt_disable();
355 if (_raw_spin_trylock(lock))
356 return 1;
358 preempt_enable_no_resched();
359 local_bh_enable();
360 return 0;
362 EXPORT_SYMBOL(_spin_trylock_bh);
364 int in_lock_functions(unsigned long addr)
366 /* Linker adds these: start and end of __lockfunc functions */
367 extern char __lock_text_start[], __lock_text_end[];
369 return addr >= (unsigned long)__lock_text_start
370 && addr < (unsigned long)__lock_text_end;
372 EXPORT_SYMBOL(in_lock_functions);