[PATCH] uml: micro fixups to arch Kconfig
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / lib / spinlock_debug.c
blob906ad101eab3cf612eab214fa466c6ce872c08c5
1 /*
2 * Copyright 2005, Red Hat, Inc., Ingo Molnar
3 * Released under the General Public License (GPL).
5 * This file contains the spinlock/rwlock implementations for
6 * DEBUG_SPINLOCK.
7 */
9 #include <linux/config.h>
10 #include <linux/spinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/delay.h>
14 static void spin_bug(spinlock_t *lock, const char *msg)
16 static long print_once = 1;
17 struct task_struct *owner = NULL;
19 if (xchg(&print_once, 0)) {
20 if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
21 owner = lock->owner;
22 printk("BUG: spinlock %s on CPU#%d, %s/%d\n",
23 msg, smp_processor_id(), current->comm, current->pid);
24 printk(" lock: %p, .magic: %08x, .owner: %s/%d, .owner_cpu: %d\n",
25 lock, lock->magic,
26 owner ? owner->comm : "<none>",
27 owner ? owner->pid : -1,
28 lock->owner_cpu);
29 dump_stack();
30 #ifdef CONFIG_SMP
32 * We cannot continue on SMP:
34 // panic("bad locking");
35 #endif
39 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
41 static inline void debug_spin_lock_before(spinlock_t *lock)
43 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
44 SPIN_BUG_ON(lock->owner == current, lock, "recursion");
45 SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
46 lock, "cpu recursion");
49 static inline void debug_spin_lock_after(spinlock_t *lock)
51 lock->owner_cpu = raw_smp_processor_id();
52 lock->owner = current;
55 static inline void debug_spin_unlock(spinlock_t *lock)
57 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
58 SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked");
59 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
60 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
61 lock, "wrong CPU");
62 lock->owner = SPINLOCK_OWNER_INIT;
63 lock->owner_cpu = -1;
66 static void __spin_lock_debug(spinlock_t *lock)
68 int print_once = 1;
69 u64 i;
71 for (;;) {
72 for (i = 0; i < loops_per_jiffy * HZ; i++) {
73 cpu_relax();
74 if (__raw_spin_trylock(&lock->raw_lock))
75 return;
77 /* lockup suspected: */
78 if (print_once) {
79 print_once = 0;
80 printk("BUG: spinlock lockup on CPU#%d, %s/%d, %p\n",
81 smp_processor_id(), current->comm, current->pid,
82 lock);
83 dump_stack();
88 void _raw_spin_lock(spinlock_t *lock)
90 debug_spin_lock_before(lock);
91 if (unlikely(!__raw_spin_trylock(&lock->raw_lock)))
92 __spin_lock_debug(lock);
93 debug_spin_lock_after(lock);
96 int _raw_spin_trylock(spinlock_t *lock)
98 int ret = __raw_spin_trylock(&lock->raw_lock);
100 if (ret)
101 debug_spin_lock_after(lock);
102 #ifndef CONFIG_SMP
104 * Must not happen on UP:
106 SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
107 #endif
108 return ret;
111 void _raw_spin_unlock(spinlock_t *lock)
113 debug_spin_unlock(lock);
114 __raw_spin_unlock(&lock->raw_lock);
117 static void rwlock_bug(rwlock_t *lock, const char *msg)
119 static long print_once = 1;
121 if (xchg(&print_once, 0)) {
122 printk("BUG: rwlock %s on CPU#%d, %s/%d, %p\n", msg,
123 smp_processor_id(), current->comm, current->pid, lock);
124 dump_stack();
125 #ifdef CONFIG_SMP
127 * We cannot continue on SMP:
129 panic("bad locking");
130 #endif
134 #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
136 static void __read_lock_debug(rwlock_t *lock)
138 int print_once = 1;
139 u64 i;
141 for (;;) {
142 for (i = 0; i < loops_per_jiffy * HZ; i++) {
143 cpu_relax();
144 if (__raw_read_trylock(&lock->raw_lock))
145 return;
147 /* lockup suspected: */
148 if (print_once) {
149 print_once = 0;
150 printk("BUG: read-lock lockup on CPU#%d, %s/%d, %p\n",
151 smp_processor_id(), current->comm, current->pid,
152 lock);
153 dump_stack();
158 void _raw_read_lock(rwlock_t *lock)
160 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
161 if (unlikely(!__raw_read_trylock(&lock->raw_lock)))
162 __read_lock_debug(lock);
165 int _raw_read_trylock(rwlock_t *lock)
167 int ret = __raw_read_trylock(&lock->raw_lock);
169 #ifndef CONFIG_SMP
171 * Must not happen on UP:
173 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
174 #endif
175 return ret;
178 void _raw_read_unlock(rwlock_t *lock)
180 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
181 __raw_read_unlock(&lock->raw_lock);
184 static inline void debug_write_lock_before(rwlock_t *lock)
186 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
187 RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
188 RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
189 lock, "cpu recursion");
192 static inline void debug_write_lock_after(rwlock_t *lock)
194 lock->owner_cpu = raw_smp_processor_id();
195 lock->owner = current;
198 static inline void debug_write_unlock(rwlock_t *lock)
200 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
201 RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
202 RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
203 lock, "wrong CPU");
204 lock->owner = SPINLOCK_OWNER_INIT;
205 lock->owner_cpu = -1;
208 static void __write_lock_debug(rwlock_t *lock)
210 int print_once = 1;
211 u64 i;
213 for (;;) {
214 for (i = 0; i < loops_per_jiffy * HZ; i++) {
215 cpu_relax();
216 if (__raw_write_trylock(&lock->raw_lock))
217 return;
219 /* lockup suspected: */
220 if (print_once) {
221 print_once = 0;
222 printk("BUG: write-lock lockup on CPU#%d, %s/%d, %p\n",
223 smp_processor_id(), current->comm, current->pid,
224 lock);
225 dump_stack();
230 void _raw_write_lock(rwlock_t *lock)
232 debug_write_lock_before(lock);
233 if (unlikely(!__raw_write_trylock(&lock->raw_lock)))
234 __write_lock_debug(lock);
235 debug_write_lock_after(lock);
238 int _raw_write_trylock(rwlock_t *lock)
240 int ret = __raw_write_trylock(&lock->raw_lock);
242 if (ret)
243 debug_write_lock_after(lock);
244 #ifndef CONFIG_SMP
246 * Must not happen on UP:
248 RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
249 #endif
250 return ret;
253 void _raw_write_unlock(rwlock_t *lock)
255 debug_write_unlock(lock);
256 __raw_write_unlock(&lock->raw_lock);