1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
6 #define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
8 #if (DEBUG_SPINLOCKS < 1)
11 * Your basic spinlocks, allowing only a single CPU anywhere
13 * Gcc-2.7.x has a nasty bug with empty initializers.
15 #if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
16 typedef struct { } spinlock_t
;
17 #define SPIN_LOCK_UNLOCKED { }
19 typedef struct { int gcc_is_buggy
; } spinlock_t
;
20 #define SPIN_LOCK_UNLOCKED { 0 }
23 #define spin_lock_init(lock) do { } while(0)
24 #define spin_lock(lock) do { } while(0)
25 #define spin_trylock(lock) (1)
26 #define spin_unlock_wait(lock) do { } while(0)
27 #define spin_unlock(lock) do { } while(0)
28 #define spin_lock_irq(lock) cli()
29 #define spin_unlock_irq(lock) sti()
31 #define spin_lock_irqsave(lock, flags) \
32 do { save_flags(flags); cli(); } while (0)
33 #define spin_unlock_irqrestore(lock, flags) \
36 #elif (DEBUG_SPINLOCKS < 2)
39 volatile unsigned int lock
;
41 #define SPIN_LOCK_UNLOCKED { 0 }
43 #define spin_lock_init(x) do { (x)->lock = 0; } while (0)
44 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
46 #define spin_lock(x) do { (x)->lock = 1; } while (0)
47 #define spin_unlock_wait(x) do { } while (0)
48 #define spin_unlock(x) do { (x)->lock = 0; } while (0)
49 #define spin_lock_irq(x) do { cli(); spin_lock(x); } while (0)
50 #define spin_unlock_irq(x) do { spin_unlock(x); sti(); } while (0)
52 #define spin_lock_irqsave(x, flags) \
53 do { save_flags(flags); spin_lock_irq(x); } while (0)
54 #define spin_unlock_irqrestore(x, flags) \
55 do { spin_unlock(x); restore_flags(flags); } while (0)
57 #else /* (DEBUG_SPINLOCKS >= 2) */
60 volatile unsigned int lock
;
61 volatile unsigned int babble
;
64 #define SPIN_LOCK_UNLOCKED { 0, 25, __BASE_FILE__ }
66 #include <linux/kernel.h>
68 #define spin_lock_init(x) do { (x)->lock = 0; } while (0)
69 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
71 #define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
72 #define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
73 #define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
74 #define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
75 #define spin_unlock_irq(x) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; sti();} while (0)
77 #define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
78 #define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
80 #endif /* DEBUG_SPINLOCKS */
83 * Read-write spinlocks, allowing multiple readers
84 * but only one writer.
86 * NOTE! it is quite common to have readers in interrupts
87 * but no interrupt writers. For those circumstances we
88 * can "mix" irq-safe locks - any writer needs to get a
89 * irq-safe write-lock, but readers can get non-irqsafe
92 typedef struct { } rwlock_t
;
93 #define RW_LOCK_UNLOCKED { }
95 #define read_lock(lock) do { } while(0)
96 #define read_unlock(lock) do { } while(0)
97 #define write_lock(lock) do { } while(0)
98 #define write_unlock(lock) do { } while(0)
99 #define read_lock_irq(lock) cli()
100 #define read_unlock_irq(lock) sti()
101 #define write_lock_irq(lock) cli()
102 #define write_unlock_irq(lock) sti()
104 #define read_lock_irqsave(lock, flags) \
105 do { save_flags(flags); cli(); } while (0)
106 #define read_unlock_irqrestore(lock, flags) \
108 #define write_lock_irqsave(lock, flags) \
109 do { save_flags(flags); cli(); } while (0)
110 #define write_unlock_irqrestore(lock, flags) \
116 * Your basic spinlocks, allowing only a single CPU anywhere
120 volatile unsigned int lock
;
123 #define SPIN_LOCK_UNLOCKED { 0 }
125 #define spin_lock_init(x) do { (x)->lock = 0; } while(0)
127 * Simple spin lock operations. There are two variants, one clears IRQ's
128 * on the local processor, one does not.
130 * We make no fairness assumptions. They have a cost.
133 #define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
135 typedef struct { unsigned long a
[100]; } __dummy_lock_t
;
136 #define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
138 #define spin_lock_string \
140 "lock ; btsl $0,%0\n\t" \
142 ".section .text.lock,\"ax\"\n" \
149 #define spin_unlock_string \
152 #define spin_lock(lock) \
153 __asm__ __volatile__( \
155 "\n\tcall __getlock" \
156 :"=m" (__dummy_lock(lock)))
158 #define spin_unlock(lock) \
159 __asm__ __volatile__( \
160 "call __putlock\n\t" \
162 :"=m" (__dummy_lock(lock)))
164 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
166 #define spin_lock_irq(lock) \
167 do { __cli(); spin_lock(lock); } while (0)
169 #define spin_unlock_irq(lock) \
170 do { spin_unlock(lock); __sti(); } while (0)
172 #define spin_lock_irqsave(lock, flags) \
173 do { __save_flags(flags); __cli(); spin_lock(lock); } while (0)
175 #define spin_unlock_irqrestore(lock, flags) \
176 do { spin_unlock(lock); __restore_flags(flags); } while (0)
179 * Read-write spinlocks, allowing multiple readers
180 * but only one writer.
182 * NOTE! it is quite common to have readers in interrupts
183 * but no interrupt writers. For those circumstances we
184 * can "mix" irq-safe locks - any writer needs to get a
185 * irq-safe write-lock, but readers can get non-irqsafe
189 volatile unsigned int lock
;
190 unsigned long previous
;
193 #define RW_LOCK_UNLOCKED { 0, 0 }
196 * On x86, we implement read-write locks as a 32-bit counter
197 * with the high bit (sign) being the "write" bit.
199 * The inline assembly is non-obvious. Think about it.
201 #define read_lock(rw) \
202 asm volatile("\n1:\t" \
203 "lock ; incl %0\n\t" \
206 ".section .text.lock,\"ax\"\n" \
207 "2:\tlock ; decl %0\n" \
208 "3:\tcmpl $0,%0\n\t" \
212 :"=m" (__dummy_lock(&(rw)->lock)))
214 #define read_unlock(rw) \
218 :"=m" (__dummy_lock(&(rw)->lock)))
220 #define write_lock(rw) \
221 asm volatile("\n1:\t" \
222 "lock ; btsl $31,%0\n\t" \
224 "2:\ttestl $0x7fffffff,%0\n\t" \
227 ".section .text.lock,\"ax\"\n" \
228 "3:\tlock ; btrl $31,%0\n" \
229 "4:\tcmp $0,%0\n\t" \
233 :"=m" (__dummy_lock(&(rw)->lock)))
235 #define write_unlock(rw) \
238 "lock ; btrl $31,%0": \
239 "=m" (__dummy_lock(&(rw)->lock)))
241 #define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
242 #define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
243 #define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
244 #define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
246 #define read_lock_irqsave(lock, flags) \
247 do { __save_flags(flags); __cli(); read_lock(lock); } while (0)
248 #define read_unlock_irqrestore(lock, flags) \
249 do { read_unlock(lock); __restore_flags(flags); } while (0)
250 #define write_lock_irqsave(lock, flags) \
251 do { __save_flags(flags); __cli(); write_lock(lock); } while (0)
252 #define write_unlock_irqrestore(lock, flags) \
253 do { write_unlock(lock); __restore_flags(flags); } while (0)
256 #endif /* __ASM_SPINLOCK_H */