pre-2.3.4..
[davej-history.git] / include / asm-i386 / spinlock.h
blobb447a402f956d64c1e2c3ed69e0428e6475e835e
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 /*
5 * These are the generic versions of the spinlocks
6 * and read-write locks.. We should actually do a
7 * <linux/spinlock.h> with all of this. Oh, well.
8 */
9 #define spin_lock_irqsave(lock, flags) do { local_irq_save(flags); spin_lock(lock); } while (0)
10 #define spin_lock_irq(lock) do { local_irq_disable(); spin_lock(lock); } while (0)
11 #define spin_lock_bh(lock) do { local_bh_disable(); spin_lock(lock); } while (0)
13 #define read_lock_irqsave(lock, flags) do { local_irq_save(flags); read_lock(lock); } while (0)
14 #define read_lock_irq(lock) do { local_irq_disable(); read_lock(lock); } while (0)
15 #define read_lock_bh(lock) do { local_bh_disable(); read_lock(lock); } while (0)
17 #define write_lock_irqsave(lock, flags) do { local_irq_save(flags); write_lock(lock); } while (0)
18 #define write_lock_irq(lock) do { local_irq_disable(); write_lock(lock); } while (0)
19 #define write_lock_bh(lock) do { local_bh_disable(); write_lock(lock); } while (0)
21 #define spin_unlock_irqrestore(lock, flags) do { spin_unlock(lock); local_irq_restore(flags); } while (0)
22 #define spin_unlock_irq(lock) do { spin_unlock(lock); local_irq_enable(); } while (0)
23 #define spin_unlock_bh(lock) do { spin_unlock(lock); local_bh_enable(); } while (0)
25 #define read_unlock_irqrestore(lock, flags) do { read_unlock(lock); local_irq_restore(flags); } while (0)
26 #define read_unlock_irq(lock) do { read_unlock(lock); local_irq_enable(); } while (0)
27 #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_enable(); } while (0)
29 #define write_unlock_irqrestore(lock, flags) do { write_unlock(lock); local_irq_restore(flags); } while (0)
30 #define write_unlock_irq(lock) do { write_unlock(lock); local_irq_enable(); } while (0)
31 #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_enable(); } while (0)
33 #ifndef __SMP__
35 #define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
37 #if (DEBUG_SPINLOCKS < 1)
40 * Your basic spinlocks, allowing only a single CPU anywhere
42 * Gcc-2.7.x has a nasty bug with empty initializers.
44 #if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
45 typedef struct { } spinlock_t;
46 #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
47 #else
48 typedef struct { int gcc_is_buggy; } spinlock_t;
49 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
50 #endif
52 #define spin_lock_init(lock) do { } while(0)
53 #define spin_lock(lock) do { } while(0)
54 #define spin_trylock(lock) (1)
55 #define spin_unlock_wait(lock) do { } while(0)
56 #define spin_unlock(lock) do { } while(0)
58 #elif (DEBUG_SPINLOCKS < 2)
60 typedef struct {
61 volatile unsigned int lock;
62 } spinlock_t;
63 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
65 #define spin_lock_init(x) do { (x)->lock = 0; } while (0)
66 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
68 #define spin_lock(x) do { (x)->lock = 1; } while (0)
69 #define spin_unlock_wait(x) do { } while (0)
70 #define spin_unlock(x) do { (x)->lock = 0; } while (0)
72 #else /* (DEBUG_SPINLOCKS >= 2) */
74 typedef struct {
75 volatile unsigned int lock;
76 volatile unsigned int babble;
77 const char *module;
78 } spinlock_t;
79 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
81 #include <linux/kernel.h>
83 #define spin_lock_init(x) do { (x)->lock = 0; } while (0)
84 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
86 #define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
87 #define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
88 #define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
90 #endif /* DEBUG_SPINLOCKS */
93 * Read-write spinlocks, allowing multiple readers
94 * but only one writer.
96 * NOTE! it is quite common to have readers in interrupts
97 * but no interrupt writers. For those circumstances we
98 * can "mix" irq-safe locks - any writer needs to get a
99 * irq-safe write-lock, but readers can get non-irqsafe
100 * read-locks.
102 * Gcc-2.7.x has a nasty bug with empty initializers.
104 #if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
105 typedef struct { } rwlock_t;
106 #define RW_LOCK_UNLOCKED (rwlock_t) { }
107 #else
108 typedef struct { int gcc_is_buggy; } rwlock_t;
109 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
110 #endif
112 #define read_lock(lock) do { } while(0)
113 #define read_unlock(lock) do { } while(0)
114 #define write_lock(lock) do { } while(0)
115 #define write_unlock(lock) do { } while(0)
117 #else /* __SMP__ */
120 * Your basic spinlocks, allowing only a single CPU anywhere
123 typedef struct {
124 volatile unsigned int lock;
125 } spinlock_t;
127 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
129 #define spin_lock_init(x) do { (x)->lock = 0; } while(0)
131 * Simple spin lock operations. There are two variants, one clears IRQ's
132 * on the local processor, one does not.
134 * We make no fairness assumptions. They have a cost.
137 #define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
139 typedef struct { unsigned long a[100]; } __dummy_lock_t;
140 #define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
142 #define spin_lock_string \
143 "\n1:\t" \
144 "lock ; btsl $0,%0\n\t" \
145 "jc 2f\n" \
146 ".section .text.lock,\"ax\"\n" \
147 "2:\t" \
148 "testb $1,%0\n\t" \
149 "jne 2b\n\t" \
150 "jmp 1b\n" \
151 ".previous"
153 #define spin_unlock_string \
154 "lock ; btrl $0,%0"
156 #define spin_lock(lock) \
157 __asm__ __volatile__( \
158 spin_lock_string \
159 :"=m" (__dummy_lock(lock)))
161 #define spin_unlock(lock) \
162 __asm__ __volatile__( \
163 spin_unlock_string \
164 :"=m" (__dummy_lock(lock)))
166 #define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
169 * Read-write spinlocks, allowing multiple readers
170 * but only one writer.
172 * NOTE! it is quite common to have readers in interrupts
173 * but no interrupt writers. For those circumstances we
174 * can "mix" irq-safe locks - any writer needs to get a
175 * irq-safe write-lock, but readers can get non-irqsafe
176 * read-locks.
178 typedef struct {
179 volatile unsigned int lock;
180 unsigned long previous;
181 } rwlock_t;
183 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
186 * On x86, we implement read-write locks as a 32-bit counter
187 * with the high bit (sign) being the "write" bit.
189 * The inline assembly is non-obvious. Think about it.
191 #define read_lock(rw) \
192 asm volatile("\n1:\t" \
193 "lock ; incl %0\n\t" \
194 "js 2f\n" \
195 ".section .text.lock,\"ax\"\n" \
196 "2:\tlock ; decl %0\n" \
197 "3:\tcmpl $0,%0\n\t" \
198 "js 3b\n\t" \
199 "jmp 1b\n" \
200 ".previous" \
201 :"=m" (__dummy_lock(&(rw)->lock)))
203 #define read_unlock(rw) \
204 asm volatile("lock ; decl %0" \
205 :"=m" (__dummy_lock(&(rw)->lock)))
207 #define write_lock(rw) \
208 asm volatile("\n1:\t" \
209 "lock ; btsl $31,%0\n\t" \
210 "jc 4f\n" \
211 "2:\ttestl $0x7fffffff,%0\n\t" \
212 "jne 3f\n" \
213 ".section .text.lock,\"ax\"\n" \
214 "3:\tlock ; btrl $31,%0\n" \
215 "4:\tcmp $0,%0\n\t" \
216 "jne 4b\n\t" \
217 "jmp 1b\n" \
218 ".previous" \
219 :"=m" (__dummy_lock(&(rw)->lock)))
221 #define write_unlock(rw) \
222 asm volatile("lock ; btrl $31,%0":"=m" (__dummy_lock(&(rw)->lock)))
224 #endif /* __SMP__ */
225 #endif /* __ASM_SPINLOCK_H */