Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / asm-sparc / spinlock.h
blob84b0f03f550607ec360981b9422e3dd2372da433
1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
9 #include <linux/threads.h> /* For NR_CPUS */
11 #ifndef __ASSEMBLY__
13 #include <asm/psr.h>
16 * Define this to use the verbose/debugging versions in
17 * arch/sparc/lib/debuglocks.c
19 * Be sure to make dep whenever changing this option.
21 #define SPIN_LOCK_DEBUG
23 #ifdef SPIN_LOCK_DEBUG
24 struct _spinlock_debug {
25 unsigned char lock;
26 unsigned long owner_pc;
28 typedef struct _spinlock_debug spinlock_t;
30 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 }
31 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
32 #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0)
33 #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
35 extern void _do_spin_lock(spinlock_t *lock, char *str);
36 extern int _spin_trylock(spinlock_t *lock);
37 extern void _do_spin_unlock(spinlock_t *lock);
39 #define spin_trylock(lp) _spin_trylock(lp)
40 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
41 #define spin_unlock(lock) _do_spin_unlock(lock)
43 struct _rwlock_debug {
44 volatile unsigned int lock;
45 unsigned long owner_pc;
46 unsigned long reader_pc[NR_CPUS];
48 typedef struct _rwlock_debug rwlock_t;
50 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
52 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
54 extern void _do_read_lock(rwlock_t *rw, char *str);
55 extern void _do_read_unlock(rwlock_t *rw, char *str);
56 extern void _do_write_lock(rwlock_t *rw, char *str);
57 extern void _do_write_unlock(rwlock_t *rw);
59 #define read_lock(lock) \
60 do { unsigned long flags; \
61 __save_and_cli(flags); \
62 _do_read_lock(lock, "read_lock"); \
63 __restore_flags(flags); \
64 } while(0)
66 #define read_unlock(lock) \
67 do { unsigned long flags; \
68 __save_and_cli(flags); \
69 _do_read_unlock(lock, "read_unlock"); \
70 __restore_flags(flags); \
71 } while(0)
73 #define write_lock(lock) \
74 do { unsigned long flags; \
75 __save_and_cli(flags); \
76 _do_write_lock(lock, "write_lock"); \
77 __restore_flags(flags); \
78 } while(0)
80 #define write_unlock(lock) \
81 do { unsigned long flags; \
82 __save_and_cli(flags); \
83 _do_write_unlock(lock); \
84 __restore_flags(flags); \
85 } while(0)
87 #else /* !SPIN_LOCK_DEBUG */
89 typedef unsigned char spinlock_t;
90 #define SPIN_LOCK_UNLOCKED 0
92 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
93 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
95 #define spin_unlock_wait(lock) \
96 do { \
97 barrier(); \
98 } while(*((volatile unsigned char *)lock))
100 extern __inline__ void spin_lock(spinlock_t *lock)
102 __asm__ __volatile__("
103 1: ldstub [%0], %%g2
104 orcc %%g2, 0x0, %%g0
105 bne,a 2f
106 ldub [%0], %%g2
107 .subsection 2
108 2: orcc %%g2, 0x0, %%g0
109 bne,a 2b
110 ldub [%0], %%g2
111 b,a 1b
112 .previous
113 " : /* no outputs */
114 : "r" (lock)
115 : "g2", "memory", "cc");
118 extern __inline__ int spin_trylock(spinlock_t *lock)
120 unsigned int result;
121 __asm__ __volatile__("ldstub [%1], %0"
122 : "=r" (result)
123 : "r" (lock)
124 : "memory");
125 return (result == 0);
128 extern __inline__ void spin_unlock(spinlock_t *lock)
130 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
133 /* Read-write spinlocks, allowing multiple readers
134 * but only one writer.
136 * NOTE! it is quite common to have readers in interrupts
137 * but no interrupt writers. For those circumstances we
138 * can "mix" irq-safe locks - any writer needs to get a
139 * irq-safe write-lock, but readers can get non-irqsafe
140 * read-locks.
142 * XXX This might create some problems with my dual spinlock
143 * XXX scheme, deadlocks etc. -DaveM
145 typedef struct { volatile unsigned int lock; } rwlock_t;
147 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
149 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
152 /* Sort of like atomic_t's on Sparc, but even more clever.
154 * ------------------------------------
155 * | 24-bit counter | wlock | rwlock_t
156 * ------------------------------------
157 * 31 8 7 0
159 * wlock signifies the one writer is in or somebody is updating
160 * counter. For a writer, if he successfully acquires the wlock,
161 * but counter is non-zero, he has to release the lock and wait,
162 * till both counter and wlock are zero.
164 * Unfortunately this scheme limits us to ~16,000,000 cpus.
166 extern __inline__ void _read_lock(rwlock_t *rw)
168 register rwlock_t *lp asm("g1");
169 lp = rw;
170 __asm__ __volatile__("
171 mov %%o7, %%g4
172 call ___rw_read_enter
173 ldstub [%%g1 + 3], %%g2
174 " : /* no outputs */
175 : "r" (lp)
176 : "g2", "g4", "memory", "cc");
179 #define read_lock(lock) \
180 do { unsigned long flags; \
181 __save_and_cli(flags); \
182 _read_lock(lock); \
183 __restore_flags(flags); \
184 } while(0)
186 extern __inline__ void _read_unlock(rwlock_t *rw)
188 register rwlock_t *lp asm("g1");
189 lp = rw;
190 __asm__ __volatile__("
191 mov %%o7, %%g4
192 call ___rw_read_exit
193 ldstub [%%g1 + 3], %%g2
194 " : /* no outputs */
195 : "r" (lp)
196 : "g2", "g4", "memory", "cc");
199 #define read_unlock(lock) \
200 do { unsigned long flags; \
201 __save_and_cli(flags); \
202 _read_unlock(lock); \
203 __restore_flags(flags); \
204 } while(0)
206 extern __inline__ void write_lock(rwlock_t *rw)
208 register rwlock_t *lp asm("g1");
209 lp = rw;
210 __asm__ __volatile__("
211 mov %%o7, %%g4
212 call ___rw_write_enter
213 ldstub [%%g1 + 3], %%g2
214 " : /* no outputs */
215 : "r" (lp)
216 : "g2", "g4", "memory", "cc");
219 #define write_unlock(rw) do { (rw)->lock = 0; } while(0)
221 #endif /* SPIN_LOCK_DEBUG */
223 #endif /* !(__ASSEMBLY__) */
225 #endif /* __SPARC_SPINLOCK_H */