Import 2.3.18pre1
[davej-history.git] / include / asm-sparc64 / spinlock.h
blob02a8d2d9b7c0a0061f1103e43a26bf16a118a066
1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #ifndef __ASSEMBLY__
11 /* To get debugging spinlocks which detect and catch
12 * deadlock situations, set DEBUG_SPINLOCKS in the sparc64
13 * specific makefile and rebuild your kernel.
16 /* All of these locking primitives are expected to work properly
17 * even in an RMO memory model, which currently is what the kernel
18 * runs in.
20 * There is another issue. Because we play games to save cycles
21 * in the non-contention case, we need to be extra careful about
22 * branch targets into the "spinning" code. They live in their
23 * own section, but the newer V9 branches have a shorter range
24 * than the traditional 32-bit sparc branch variants. The rule
25 * is that the branches that go into and out of the spinner sections
26 * must be pre-V9 branches.
29 #ifndef SPIN_LOCK_DEBUG
31 typedef unsigned char spinlock_t;
32 #define SPIN_LOCK_UNLOCKED 0
34 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
35 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
37 #define spin_unlock_wait(lock) \
38 do { membar("#LoadLoad"); \
39 } while(*((volatile unsigned char *)lock))
41 extern __inline__ void spin_lock(spinlock_t *lock)
43 __asm__ __volatile__("
44 1: ldstub [%0], %%g7
45 brnz,pn %%g7, 2f
46 membar #StoreLoad | #StoreStore
47 .subsection 2
48 2: ldub [%0], %%g7
49 brnz,pt %%g7, 2b
50 membar #LoadLoad
51 b,a,pt %%xcc, 1b
52 .previous
53 " : /* no outputs */
54 : "r" (lock)
55 : "g7", "memory");
58 extern __inline__ int spin_trylock(spinlock_t *lock)
60 unsigned int result;
61 __asm__ __volatile__("ldstub [%1], %0\n\t"
62 "membar #StoreLoad | #StoreStore"
63 : "=r" (result)
64 : "r" (lock)
65 : "memory");
66 return (result == 0);
69 extern __inline__ void spin_unlock(spinlock_t *lock)
71 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
72 "stb %%g0, [%0]\n\t"
73 "membar #StoreStore | #StoreLoad"
74 : /* No outputs */
75 : "r" (lock)
76 : "memory");
79 extern __inline__ void spin_lock_irq(spinlock_t *lock)
81 __asm__ __volatile__("
82 wrpr %%g0, 15, %%pil
83 1: ldstub [%0], %%g7
84 brnz,pn %%g7, 2f
85 membar #StoreLoad | #StoreStore
86 .subsection 2
87 2: ldub [%0], %%g7
88 brnz,pt %%g7, 2b
89 membar #LoadLoad
90 b,a,pt %%xcc, 1b
91 .previous
92 " : /* no outputs */
93 : "r" (lock)
94 : "g7", "memory");
97 extern __inline__ void spin_unlock_irq(spinlock_t *lock)
99 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
100 "stb %%g0, [%0]\n\t"
101 "membar #StoreStore | #StoreLoad\n\t"
102 "wrpr %%g0, 0x0, %%pil"
103 : /* no outputs */
104 : "r" (lock)
105 : "memory");
108 #define spin_lock_bh(__lock) \
109 do { local_bh_count++; \
110 spin_lock(__lock); \
111 } while(0)
113 #define spin_unlock_bh(__lock) \
114 do { spin_unlock(__lock); \
115 local_bh_count--; \
116 } while(0)
118 #define spin_lock_irqsave(__lock, flags) \
119 do { register spinlock_t *__lp asm("g1"); \
120 __lp = (__lock); \
121 __asm__ __volatile__( \
122 "\n rdpr %%pil, %0\n" \
123 " wrpr %%g0, 15, %%pil\n" \
124 "1: ldstub [%1], %%g7\n" \
125 " brnz,pn %%g7, 2f\n" \
126 " membar #StoreLoad | #StoreStore\n" \
127 " .subsection 2\n" \
128 "2: ldub [%1], %%g7\n" \
129 " brnz,pt %%g7, 2b\n" \
130 " membar #LoadLoad\n" \
131 " b,a,pt %%xcc, 1b\n" \
132 " .previous\n" \
133 : "=&r" (flags) \
134 : "r" (__lp) \
135 : "g7", "memory"); \
136 } while(0)
138 extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
140 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
141 "stb %%g0, [%0]\n\t"
142 "membar #StoreStore | #StoreLoad\n\t"
143 "wrpr %1, 0x0, %%pil"
144 : /* no outputs */
145 : "r" (lock), "r" (flags)
146 : "memory");
149 #else /* !(SPIN_LOCK_DEBUG) */
151 typedef struct {
152 unsigned char lock;
153 unsigned int owner_pc, owner_cpu;
154 } spinlock_t;
155 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
156 #define spin_lock_init(__lock) \
157 do { (__lock)->lock = 0; \
158 (__lock)->owner_pc = 0; \
159 (__lock)->owner_cpu = 0xff; \
160 } while(0)
161 #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
162 #define spin_unlock_wait(__lock) \
163 do { \
164 membar("#LoadLoad"); \
165 } while(*((volatile unsigned char *)(&((__lock)->lock))))
167 extern void _do_spin_lock (spinlock_t *lock, char *str);
168 extern void _do_spin_unlock (spinlock_t *lock);
169 extern int _spin_trylock (spinlock_t *lock);
171 #define spin_trylock(lp) _spin_trylock(lp)
172 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
173 #define spin_lock_irq(lock) do { __cli(); _do_spin_lock(lock, "spin_lock_irq"); } while(0)
174 #define spin_lock_bh(lock) do { local_bh_count++; _do_spin_lock(lock, "spin_lock_bh"); } while(0)
175 #define spin_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_spin_lock(lock, "spin_lock_irqsave"); } while(0)
176 #define spin_unlock(lock) _do_spin_unlock(lock)
177 #define spin_unlock_irq(lock) do { _do_spin_unlock(lock); __sti(); } while(0)
178 #define spin_unlock_bh(lock) do { _do_spin_unlock(lock); local_bh_count--; } while(0)
179 #define spin_unlock_irqrestore(lock, flags) do { _do_spin_unlock(lock); __restore_flags(flags); } while(0)
181 #endif /* SPIN_LOCK_DEBUG */
183 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
185 #ifndef SPIN_LOCK_DEBUG
187 typedef unsigned int rwlock_t;
188 #define RW_LOCK_UNLOCKED 0
190 #define read_lock(__rw_lck) \
191 do { register rwlock_t *__X asm("g1"); \
192 __asm__ __volatile__("sethi %%hi(__read_lock), %%g3\n\t" \
193 "jmpl %%g3 + %%lo(__read_lock), %%g3\n\t" \
194 " nop\n1:" \
195 : : "r" (__X = (__rw_lck)) \
196 : "g3", "g5", "g7", "cc", "memory"); \
197 } while(0)
199 #define read_unlock(__rw_lck) \
200 do { register rwlock_t *__X asm("g1"); \
201 __asm__ __volatile__("sethi %%hi(__read_unlock), %%g3\n\t" \
202 "jmpl %%g3 + %%lo(__read_unlock), %%g3\n\t" \
203 " nop\n1:" \
204 : : "r" (__X = (__rw_lck)) \
205 : "g3", "g5", "g7", "cc", "memory"); \
206 } while(0)
208 #define write_lock(__rw_lck) \
209 do { register rwlock_t *__X asm("g1"); \
210 __asm__ __volatile__("sethi %%hi(__write_lock), %%g3\n\t" \
211 "jmpl %%g3 + %%lo(__write_lock), %%g3\n\t" \
212 " nop\n1:" \
213 : : "r" (__X = (__rw_lck)) \
214 : "g2", "g3", "g5", "g7", "cc", "memory"); \
215 } while(0)
217 #define write_unlock(__rw_lck) \
218 do { register rwlock_t *__X asm("g1"); \
219 __asm__ __volatile__("sethi %%hi(__write_unlock), %%g3\n\t" \
220 "jmpl %%g3 + %%lo(__write_unlock), %%g3\n\t" \
221 " nop\n1:" \
222 : : "r" (__X = (__rw_lck)) \
223 : "g2", "g3", "g5", "g7", "cc", "memory"); \
224 } while(0)
226 #define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
227 #define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
228 #define read_lock_bh(lock) do { local_bh_count++; read_lock(lock); } while (0)
229 #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_count--; } while (0)
230 #define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
231 #define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
232 #define write_lock_bh(lock) do { local_bh_count++; write_lock(lock); } while (0)
233 #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_count--; } while (0)
235 #define read_lock_irqsave(lock, flags) \
236 do { __save_and_cli(flags); read_lock(lock); } while (0)
237 #define read_unlock_irqrestore(lock, flags) \
238 do { read_unlock(lock); __restore_flags(flags); } while (0)
239 #define write_lock_irqsave(lock, flags) \
240 do { __save_and_cli(flags); write_lock(lock); } while (0)
241 #define write_unlock_irqrestore(lock, flags) \
242 do { write_unlock(lock); __restore_flags(flags); } while (0)
244 #else /* !(SPIN_LOCK_DEBUG) */
246 typedef struct {
247 unsigned long lock;
248 unsigned int writer_pc, writer_cpu;
249 unsigned int reader_pc[4];
250 } rwlock_t;
251 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { 0, 0, 0, 0 } }
253 extern void _do_read_lock(rwlock_t *rw, char *str);
254 extern void _do_read_unlock(rwlock_t *rw, char *str);
255 extern void _do_write_lock(rwlock_t *rw, char *str);
256 extern void _do_write_unlock(rwlock_t *rw);
258 #define read_lock(lock) \
259 do { unsigned long flags; \
260 __save_and_cli(flags); \
261 _do_read_lock(lock, "read_lock"); \
262 __restore_flags(flags); \
263 } while(0)
264 #define read_lock_irq(lock) do { __cli(); _do_read_lock(lock, "read_lock_irq"); } while(0)
265 #define read_lock_bh(lock) do { local_bh_count++; _do_read_lock(lock, "read_lock_bh"); } while(0)
266 #define read_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_read_lock(lock, "read_lock_irqsave"); } while(0)
268 #define read_unlock(lock) \
269 do { unsigned long flags; \
270 __save_and_cli(flags); \
271 _do_read_unlock(lock, "read_unlock"); \
272 __restore_flags(flags); \
273 } while(0)
274 #define read_unlock_irq(lock) do { _do_read_unlock(lock, "read_unlock_irq"); __sti() } while(0)
275 #define read_unlock_bh(lock) do { _do_read_unlock(lock, "read_unlock_bh"); local_bh_count--; } while(0)
276 #define read_unlock_irqrestore(lock, flags) do { _do_read_unlock(lock, "read_unlock_irqrestore"); __restore_flags(flags); } while(0)
278 #define write_lock(lock) \
279 do { unsigned long flags; \
280 __save_and_cli(flags); \
281 _do_write_lock(lock, "write_lock"); \
282 __restore_flags(flags); \
283 } while(0)
284 #define write_lock_irq(lock) do { __cli(); _do_write_lock(lock, "write_lock_irq"); } while(0)
285 #define write_lock_bh(lock) do { local_bh_count++; _do_write_lock(lock, "write_lock_bh"); } while(0)
286 #define write_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_write_lock(lock, "write_lock_irqsave"); } while(0)
288 #define write_unlock(lock) \
289 do { unsigned long flags; \
290 __save_and_cli(flags); \
291 _do_write_unlock(lock); \
292 __restore_flags(flags); \
293 } while(0)
294 #define write_unlock_irq(lock) do { _do_write_unlock(lock); __sti(); } while(0)
295 #define write_unlock_bh(lock) do { _do_write_unlock(lock); local_bh_count--; } while(0)
296 #define write_unlock_irqrestore(lock, flags) do { _do_write_unlock(lock); __restore_flags(flags); } while(0)
298 #endif /* SPIN_LOCK_DEBUG */
300 #endif /* !(__ASSEMBLY__) */
302 #endif /* !(__SPARC64_SPINLOCK_H) */