pre-2.3.4..
[davej-history.git] / include / asm-sparc64 / spinlock.h
blobd0c25a965966ad43b9331b6eccfc399dd4194754
1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #ifndef __ASSEMBLY__
11 #ifndef __SMP__
13 typedef unsigned char spinlock_t;
14 #define SPIN_LOCK_UNLOCKED 0
16 #define spin_lock_init(lock) do { } while(0)
17 #define spin_lock(lock) do { } while(0)
18 #define spin_trylock(lock) do { } while(0)
19 #define spin_unlock_wait(lock) do { } while(0)
20 #define spin_unlock(lock) do { } while(0)
21 #define spin_lock_irq(lock) cli()
22 #define spin_unlock_irq(lock) sti()
23 #define spin_lock_bh(lock) \
24 do { local_bh_count++; \
25 barrier(); \
26 } while(0)
27 #define spin_unlock_bh(lock) \
28 do { barrier(); \
29 local_bh_count--; \
30 } while(0)
32 #define spin_lock_irqsave(lock, flags) save_and_cli(flags)
33 #define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
36 * Read-write spinlocks, allowing multiple readers
37 * but only one writer.
39 * NOTE! it is quite common to have readers in interrupts
40 * but no interrupt writers. For those circumstances we
41 * can "mix" irq-safe locks - any writer needs to get a
42 * irq-safe write-lock, but readers can get non-irqsafe
43 * read-locks.
45 typedef unsigned long rwlock_t;
46 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
48 #define read_lock(lock) do { } while(0)
49 #define read_unlock(lock) do { } while(0)
50 #define write_lock(lock) do { } while(0)
51 #define write_unlock(lock) do { } while(0)
52 #define read_lock_irq(lock) cli()
53 #define read_unlock_irq(lock) sti()
54 #define read_lock_bh(lock) \
55 do { local_bh_count++; \
56 barrier(); \
57 } while(0)
58 #define read_unlock_bh(lock) \
59 do { barrier(); \
60 local_bh_count--; \
61 } while(0)
63 #define write_lock_irq(lock) cli()
64 #define write_unlock_irq(lock) sti()
66 #define write_lock_bh(lock) \
67 do { local_bh_count++; \
68 barrier(); \
69 } while(0)
71 #define write_unlock_bh(lock) \
72 do { barrier(); \
73 local_bh_count--; \
74 } while(0)
76 #define read_lock_irqsave(lock, flags) save_and_cli(flags)
77 #define read_unlock_irqrestore(lock, flags) restore_flags(flags)
78 #define write_lock_irqsave(lock, flags) save_and_cli(flags)
79 #define write_unlock_irqrestore(lock, flags) restore_flags(flags)
81 #else /* !(__SMP__) */
83 /* To get debugging spinlocks which detect and catch
84 * deadlock situations, set DEBUG_SPINLOCKS in the sparc64
85 * specific makefile and rebuild your kernel.
88 /* All of these locking primitives are expected to work properly
89 * even in an RMO memory model, which currently is what the kernel
90 * runs in.
92 * There is another issue. Because we play games to save cycles
93 * in the non-contention case, we need to be extra careful about
94 * branch targets into the "spinning" code. They live in their
95 * own section, but the newer V9 branches have a shorter range
96 * than the traditional 32-bit sparc branch variants. The rule
97 * is that the branches that go into and out of the spinner sections
98 * must be pre-V9 branches.
101 #ifndef SPIN_LOCK_DEBUG
103 typedef unsigned char spinlock_t;
104 #define SPIN_LOCK_UNLOCKED 0
106 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
107 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
109 #define spin_unlock_wait(lock) \
110 do { membar("#LoadLoad"); \
111 } while(*((volatile unsigned char *)lock))
113 extern __inline__ void spin_lock(spinlock_t *lock)
115 __asm__ __volatile__("
116 1: ldstub [%0], %%g7
117 brnz,pn %%g7, 2f
118 membar #StoreLoad | #StoreStore
119 .subsection 2
120 2: ldub [%0], %%g7
121 brnz,pt %%g7, 2b
122 membar #LoadLoad
123 b,a,pt %%xcc, 1b
124 .previous
125 " : /* no outputs */
126 : "r" (lock)
127 : "g7", "memory");
130 extern __inline__ int spin_trylock(spinlock_t *lock)
132 unsigned int result;
133 __asm__ __volatile__("ldstub [%1], %0\n\t"
134 "membar #StoreLoad | #StoreStore"
135 : "=r" (result)
136 : "r" (lock)
137 : "memory");
138 return (result == 0);
141 extern __inline__ void spin_unlock(spinlock_t *lock)
143 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
144 "stb %%g0, [%0]\n\t"
145 : /* No outputs */
146 : "r" (lock)
147 : "memory");
150 extern __inline__ void spin_lock_irq(spinlock_t *lock)
152 __asm__ __volatile__("
153 wrpr %%g0, 15, %%pil
154 1: ldstub [%0], %%g7
155 brnz,pn %%g7, 2f
156 membar #StoreLoad | #StoreStore
157 .subsection 2
158 2: ldub [%0], %%g7
159 brnz,pt %%g7, 2b
160 membar #LoadLoad
161 b,a,pt %%xcc, 1b
162 .previous
163 " : /* no outputs */
164 : "r" (lock)
165 : "g7", "memory");
168 extern __inline__ void spin_unlock_irq(spinlock_t *lock)
170 __asm__ __volatile__("
171 membar #StoreStore | #LoadStore
172 stb %%g0, [%0]
173 wrpr %%g0, 0x0, %%pil
174 " : /* no outputs */
175 : "r" (lock)
176 : "memory");
179 #define spin_lock_bh(__lock) \
180 do { local_bh_count++; \
181 spin_lock(__lock); \
182 } while(0)
184 #define spin_unlock_bh(__lock) \
185 do { spin_unlock(__lock); \
186 local_bh_count--; \
187 } while(0)
189 #define spin_lock_irqsave(__lock, flags) \
190 do { register spinlock_t *__lp asm("g1"); \
191 __lp = (__lock); \
192 __asm__ __volatile__( \
193 "\n rdpr %%pil, %0\n" \
194 " wrpr %%g0, 15, %%pil\n" \
195 "1: ldstub [%1], %%g7\n" \
196 " brnz,pn %%g7, 2f\n" \
197 " membar #StoreLoad | #StoreStore\n" \
198 " .subsection 2\n" \
199 "2: ldub [%1], %%g7\n" \
200 " brnz,pt %%g7, 2b\n" \
201 " membar #LoadLoad\n" \
202 " b,a,pt %%xcc, 1b\n" \
203 " .previous\n" \
204 : "=&r" (flags) \
205 : "r" (__lp) \
206 : "g7", "memory"); \
207 } while(0)
209 extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
211 __asm__ __volatile__("
212 membar #StoreStore | #LoadStore
213 stb %%g0, [%0]
214 wrpr %1, 0x0, %%pil
215 " : /* no outputs */
216 : "r" (lock), "r" (flags)
217 : "memory");
220 #else /* !(SPIN_LOCK_DEBUG) */
222 typedef struct {
223 unsigned char lock;
224 unsigned int owner_pc, owner_cpu;
225 } spinlock_t;
226 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
227 #define spin_lock_init(__lock) \
228 do { (__lock)->lock = 0; \
229 (__lock)->owner_pc = 0; \
230 (__lock)->owner_cpu = 0xff; \
231 } while(0)
232 #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
233 #define spin_unlock_wait(__lock) \
234 do { \
235 membar("#LoadLoad"); \
236 } while(*((volatile unsigned char *)(&((__lock)->lock))))
238 extern void _do_spin_lock (spinlock_t *lock, char *str);
239 extern void _do_spin_unlock (spinlock_t *lock);
240 extern int _spin_trylock (spinlock_t *lock);
242 #define spin_trylock(lp) _spin_trylock(lp)
243 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
244 #define spin_lock_irq(lock) do { __cli(); _do_spin_lock(lock, "spin_lock_irq"); } while(0)
245 #define spin_lock_bh(lock) do { local_bh_count++; _do_spin_lock(lock, "spin_lock_bh"); } while(0)
246 #define spin_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_spin_lock(lock, "spin_lock_irqsave"); } while(0)
247 #define spin_unlock(lock) _do_spin_unlock(lock)
248 #define spin_unlock_irq(lock) do { _do_spin_unlock(lock); __sti(); } while(0)
249 #define spin_unlock_bh(lock) do { _do_spin_unlock(lock); local_bh_count--; } while(0)
250 #define spin_unlock_irqrestore(lock, flags) do { _do_spin_unlock(lock); __restore_flags(flags); } while(0)
252 #endif /* SPIN_LOCK_DEBUG */
254 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
256 #ifndef SPIN_LOCK_DEBUG
258 typedef unsigned long rwlock_t;
259 #define RW_LOCK_UNLOCKED 0
261 extern __inline__ void read_lock(rwlock_t *rw)
263 __asm__ __volatile__("
264 1: ldx [%0], %%g5
265 brlz,pn %%g5, 2f
266 4: add %%g5, 1, %%g7
267 casx [%0], %%g5, %%g7
268 cmp %%g5, %%g7
269 bne,pn %%xcc, 1b
270 membar #StoreLoad | #StoreStore
271 .subsection 2
272 2: ldx [%0], %%g5
273 brlz,pt %%g5, 2b
274 membar #LoadLoad
275 b,a,pt %%xcc, 4b
276 .previous
277 " : /* no outputs */
278 : "r" (rw)
279 : "g5", "g7", "cc", "memory");
282 extern __inline__ void read_unlock(rwlock_t *rw)
284 __asm__ __volatile__("
285 1: ldx [%0], %%g5
286 sub %%g5, 1, %%g7
287 casx [%0], %%g5, %%g7
288 cmp %%g5, %%g7
289 bne,pn %%xcc, 1b
290 membar #StoreLoad | #StoreStore
291 " : /* no outputs */
292 : "r" (rw)
293 : "g5", "g7", "cc", "memory");
296 extern __inline__ void write_lock(rwlock_t *rw)
298 __asm__ __volatile__("
299 sethi %%uhi(0x8000000000000000), %%g3
300 sllx %%g3, 32, %%g3
301 1: ldx [%0], %%g5
302 brlz,pn %%g5, 5f
303 4: or %%g5, %%g3, %%g7
304 casx [%0], %%g5, %%g7
305 cmp %%g5, %%g7
306 bne,pn %%xcc, 1b
307 andncc %%g7, %%g3, %%g0
308 bne,pn %%xcc, 7f
309 membar #StoreLoad | #StoreStore
310 .subsection 2
311 7: ldx [%0], %%g5
312 andn %%g5, %%g3, %%g7
313 casx [%0], %%g5, %%g7
314 cmp %%g5, %%g7
315 bne,pn %%xcc, 7b
316 membar #StoreLoad | #StoreStore
317 5: ldx [%0], %%g5
318 brnz,pt %%g5, 5b
319 membar #LoadLoad
320 b,a,pt %%xcc, 4b
321 .previous
322 " : /* no outputs */
323 : "r" (rw)
324 : "g3", "g5", "g7", "memory", "cc");
327 extern __inline__ void write_unlock(rwlock_t *rw)
329 __asm__ __volatile__("
330 sethi %%uhi(0x8000000000000000), %%g3
331 sllx %%g3, 32, %%g3
332 1: ldx [%0], %%g5
333 andn %%g5, %%g3, %%g7
334 casx [%0], %%g5, %%g7
335 cmp %%g5, %%g7
336 bne,pn %%xcc, 1b
337 membar #StoreLoad | #StoreStore
338 " : /* no outputs */
339 : "r" (rw)
340 : "g3", "g5", "g7", "memory", "cc");
343 #define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
344 #define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
345 #define read_lock_bh(lock) do { local_bh_count++; read_lock(lock); } while (0)
346 #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_count--; } while (0)
347 #define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
348 #define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
349 #define write_lock_bh(lock) do { local_bh_count++; write_lock(lock); } while (0)
350 #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_count--; } while (0)
352 #define read_lock_irqsave(lock, flags) \
353 do { __save_and_cli(flags); read_lock(lock); } while (0)
354 #define read_unlock_irqrestore(lock, flags) \
355 do { read_unlock(lock); __restore_flags(flags); } while (0)
356 #define write_lock_irqsave(lock, flags) \
357 do { __save_and_cli(flags); write_lock(lock); } while (0)
358 #define write_unlock_irqrestore(lock, flags) \
359 do { write_unlock(lock); __restore_flags(flags); } while (0)
361 #else /* !(SPIN_LOCK_DEBUG) */
363 typedef struct {
364 unsigned long lock;
365 unsigned int writer_pc, writer_cpu;
366 unsigned int reader_pc[4];
367 } rwlock_t;
368 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { 0, 0, 0, 0 } }
370 extern void _do_read_lock(rwlock_t *rw, char *str);
371 extern void _do_read_unlock(rwlock_t *rw, char *str);
372 extern void _do_write_lock(rwlock_t *rw, char *str);
373 extern void _do_write_unlock(rwlock_t *rw);
375 #define read_lock(lock) \
376 do { unsigned long flags; \
377 __save_and_cli(flags); \
378 _do_read_lock(lock, "read_lock"); \
379 __restore_flags(flags); \
380 } while(0)
381 #define read_lock_irq(lock) do { __cli(); _do_read_lock(lock, "read_lock_irq"); } while(0)
382 #define read_lock_bh(lock) do { local_bh_count++; _do_read_lock(lock, "read_lock_bh"); } while(0)
383 #define read_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_read_lock(lock, "read_lock_irqsave"); } while(0)
385 #define read_unlock(lock) \
386 do { unsigned long flags; \
387 __save_and_cli(flags); \
388 _do_read_unlock(lock, "read_unlock"); \
389 __restore_flags(flags); \
390 } while(0)
391 #define read_unlock_irq(lock) do { _do_read_unlock(lock, "read_unlock_irq"); __sti() } while(0)
392 #define read_unlock_bh(lock) do { _do_read_unlock(lock, "read_unlock_bh"); local_bh_count--; } while(0)
393 #define read_unlock_irqrestore(lock, flags) do { _do_read_unlock(lock, "read_unlock_irqrestore"); __restore_flags(flags); } while(0)
395 #define write_lock(lock) \
396 do { unsigned long flags; \
397 __save_and_cli(flags); \
398 _do_write_lock(lock, "write_lock"); \
399 __restore_flags(flags); \
400 } while(0)
401 #define write_lock_irq(lock) do { __cli(); _do_write_lock(lock, "write_lock_irq"); } while(0)
402 #define write_lock_bh(lock) do { local_bh_count++; _do_write_lock(lock, "write_lock_bh"); } while(0)
403 #define write_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_write_lock(lock, "write_lock_irqsave"); } while(0)
405 #define write_unlock(lock) \
406 do { unsigned long flags; \
407 __save_and_cli(flags); \
408 _do_write_unlock(lock); \
409 __restore_flags(flags); \
410 } while(0)
411 #define write_unlock_irq(lock) do { _do_write_unlock(lock); __sti(); } while(0)
412 #define write_unlock_bh(lock) do { _do_write_unlock(lock); local_bh_count--; } while(0)
413 #define write_unlock_irqrestore(lock, flags) do { _do_write_unlock(lock); __restore_flags(flags); } while(0)
415 #endif /* SPIN_LOCK_DEBUG */
417 #endif /* __SMP__ */
419 #endif /* !(__ASSEMBLY__) */
421 #endif /* !(__SPARC64_SPIN%0_H) */