1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
13 typedef unsigned char spinlock_t
;
14 #define SPIN_LOCK_UNLOCKED 0
16 #define spin_lock_init(lock) do { } while(0)
17 #define spin_lock(lock) do { } while(0)
18 #define spin_trylock(lock) do { } while(0)
19 #define spin_unlock_wait(lock) do { } while(0)
20 #define spin_unlock(lock) do { } while(0)
21 #define spin_lock_irq(lock) cli()
22 #define spin_unlock_irq(lock) sti()
23 #define spin_lock_bh(lock) \
24 do { local_bh_count++; \
27 #define spin_unlock_bh(lock) \
32 #define spin_lock_irqsave(lock, flags) save_and_cli(flags)
33 #define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
36 * Read-write spinlocks, allowing multiple readers
37 * but only one writer.
39 * NOTE! it is quite common to have readers in interrupts
40 * but no interrupt writers. For those circumstances we
41 * can "mix" irq-safe locks - any writer needs to get a
42 * irq-safe write-lock, but readers can get non-irqsafe
45 typedef unsigned long rwlock_t
;
46 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
48 #define read_lock(lock) do { } while(0)
49 #define read_unlock(lock) do { } while(0)
50 #define write_lock(lock) do { } while(0)
51 #define write_unlock(lock) do { } while(0)
52 #define read_lock_irq(lock) cli()
53 #define read_unlock_irq(lock) sti()
54 #define read_lock_bh(lock) \
55 do { local_bh_count++; \
58 #define read_unlock_bh(lock) \
63 #define write_lock_irq(lock) cli()
64 #define write_unlock_irq(lock) sti()
66 #define write_lock_bh(lock) \
67 do { local_bh_count++; \
71 #define write_unlock_bh(lock) \
76 #define read_lock_irqsave(lock, flags) save_and_cli(flags)
77 #define read_unlock_irqrestore(lock, flags) restore_flags(flags)
78 #define write_lock_irqsave(lock, flags) save_and_cli(flags)
79 #define write_unlock_irqrestore(lock, flags) restore_flags(flags)
81 #else /* !(__SMP__) */
83 /* To get debugging spinlocks which detect and catch
84 * deadlock situations, set DEBUG_SPINLOCKS in the sparc64
85 * specific makefile and rebuild your kernel.
88 /* All of these locking primitives are expected to work properly
89 * even in an RMO memory model, which currently is what the kernel
92 * There is another issue. Because we play games to save cycles
93 * in the non-contention case, we need to be extra careful about
94 * branch targets into the "spinning" code. They live in their
95 * own section, but the newer V9 branches have a shorter range
96 * than the traditional 32-bit sparc branch variants. The rule
97 * is that the branches that go into and out of the spinner sections
98 * must be pre-V9 branches.
101 #ifndef SPIN_LOCK_DEBUG
103 typedef unsigned char spinlock_t
;
104 #define SPIN_LOCK_UNLOCKED 0
106 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
107 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
109 #define spin_unlock_wait(lock) \
110 do { membar("#LoadLoad"); \
111 } while(*((volatile unsigned char *)lock))
113 extern __inline__
void spin_lock(spinlock_t
*lock
)
115 __asm__
__volatile__("
118 membar #StoreLoad | #StoreStore
130 extern __inline__
int spin_trylock(spinlock_t
*lock
)
133 __asm__
__volatile__("ldstub [%1], %0\n\t"
134 "membar #StoreLoad | #StoreStore"
138 return (result
== 0);
141 extern __inline__
void spin_unlock(spinlock_t
*lock
)
143 __asm__
__volatile__("membar #StoreStore | #LoadStore\n\t"
150 extern __inline__
void spin_lock_irq(spinlock_t
*lock
)
152 __asm__
__volatile__("
156 membar #StoreLoad | #StoreStore
168 extern __inline__
void spin_unlock_irq(spinlock_t
*lock
)
170 __asm__
__volatile__("
171 membar #StoreStore | #LoadStore
173 wrpr %%g0, 0x0, %%pil
179 #define spin_lock_bh(__lock) \
180 do { local_bh_count++; \
184 #define spin_unlock_bh(__lock) \
185 do { spin_unlock(__lock); \
189 #define spin_lock_irqsave(__lock, flags) \
190 do { register spinlock_t *__lp asm("g1"); \
192 __asm__ __volatile__( \
193 "\n rdpr %%pil, %0\n" \
194 " wrpr %%g0, 15, %%pil\n" \
195 "1: ldstub [%1], %%g7\n" \
196 " brnz,pn %%g7, 2f\n" \
197 " membar #StoreLoad | #StoreStore\n" \
199 "2: ldub [%1], %%g7\n" \
200 " brnz,pt %%g7, 2b\n" \
201 " membar #LoadLoad\n" \
202 " b,a,pt %%xcc, 1b\n" \
209 extern __inline__
void spin_unlock_irqrestore(spinlock_t
*lock
, unsigned long flags
)
211 __asm__
__volatile__("
212 membar #StoreStore | #LoadStore
216 : "r" (lock
), "r" (flags
)
220 #else /* !(SPIN_LOCK_DEBUG) */
224 unsigned int owner_pc
, owner_cpu
;
226 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
227 #define spin_lock_init(__lock) \
228 do { (__lock)->lock = 0; \
229 (__lock)->owner_pc = 0; \
230 (__lock)->owner_cpu = 0xff; \
232 #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
233 #define spin_unlock_wait(__lock) \
235 membar("#LoadLoad"); \
236 } while(*((volatile unsigned char *)(&((__lock)->lock))))
238 extern void _do_spin_lock (spinlock_t
*lock
, char *str
);
239 extern void _do_spin_unlock (spinlock_t
*lock
);
240 extern int _spin_trylock (spinlock_t
*lock
);
242 #define spin_trylock(lp) _spin_trylock(lp)
243 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
244 #define spin_lock_irq(lock) do { __cli(); _do_spin_lock(lock, "spin_lock_irq"); } while(0)
245 #define spin_lock_bh(lock) do { local_bh_count++; _do_spin_lock(lock, "spin_lock_bh"); } while(0)
246 #define spin_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_spin_lock(lock, "spin_lock_irqsave"); } while(0)
247 #define spin_unlock(lock) _do_spin_unlock(lock)
248 #define spin_unlock_irq(lock) do { _do_spin_unlock(lock); __sti(); } while(0)
249 #define spin_unlock_bh(lock) do { _do_spin_unlock(lock); local_bh_count--; } while(0)
250 #define spin_unlock_irqrestore(lock, flags) do { _do_spin_unlock(lock); __restore_flags(flags); } while(0)
252 #endif /* SPIN_LOCK_DEBUG */
254 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
256 #ifndef SPIN_LOCK_DEBUG
258 typedef unsigned long rwlock_t
;
259 #define RW_LOCK_UNLOCKED 0
261 extern __inline__
void read_lock(rwlock_t
*rw
)
263 __asm__
__volatile__("
267 casx [%0], %%g5, %%g7
270 membar #StoreLoad | #StoreStore
279 : "g5", "g7", "cc", "memory");
282 extern __inline__
void read_unlock(rwlock_t
*rw
)
284 __asm__
__volatile__("
287 casx [%0], %%g5, %%g7
290 membar #StoreLoad | #StoreStore
293 : "g5", "g7", "cc", "memory");
296 extern __inline__
void write_lock(rwlock_t
*rw
)
298 __asm__
__volatile__("
299 sethi %%uhi(0x8000000000000000), %%g3
303 4: or %%g5, %%g3, %%g7
304 casx [%0], %%g5, %%g7
307 andncc %%g7, %%g3, %%g0
309 membar #StoreLoad | #StoreStore
312 andn %%g5, %%g3, %%g7
313 casx [%0], %%g5, %%g7
316 membar #StoreLoad | #StoreStore
324 : "g3", "g5", "g7", "memory", "cc");
327 extern __inline__
void write_unlock(rwlock_t
*rw
)
329 __asm__
__volatile__("
330 sethi %%uhi(0x8000000000000000), %%g3
333 andn %%g5, %%g3, %%g7
334 casx [%0], %%g5, %%g7
337 membar #StoreLoad | #StoreStore
340 : "g3", "g5", "g7", "memory", "cc");
343 #define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
344 #define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
345 #define read_lock_bh(lock) do { local_bh_count++; read_lock(lock); } while (0)
346 #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_count--; } while (0)
347 #define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
348 #define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
349 #define write_lock_bh(lock) do { local_bh_count++; write_lock(lock); } while (0)
350 #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_count--; } while (0)
352 #define read_lock_irqsave(lock, flags) \
353 do { __save_and_cli(flags); read_lock(lock); } while (0)
354 #define read_unlock_irqrestore(lock, flags) \
355 do { read_unlock(lock); __restore_flags(flags); } while (0)
356 #define write_lock_irqsave(lock, flags) \
357 do { __save_and_cli(flags); write_lock(lock); } while (0)
358 #define write_unlock_irqrestore(lock, flags) \
359 do { write_unlock(lock); __restore_flags(flags); } while (0)
361 #else /* !(SPIN_LOCK_DEBUG) */
365 unsigned int writer_pc
, writer_cpu
;
366 unsigned int reader_pc
[4];
368 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { 0, 0, 0, 0 } }
370 extern void _do_read_lock(rwlock_t
*rw
, char *str
);
371 extern void _do_read_unlock(rwlock_t
*rw
, char *str
);
372 extern void _do_write_lock(rwlock_t
*rw
, char *str
);
373 extern void _do_write_unlock(rwlock_t
*rw
);
375 #define read_lock(lock) \
376 do { unsigned long flags; \
377 __save_and_cli(flags); \
378 _do_read_lock(lock, "read_lock"); \
379 __restore_flags(flags); \
381 #define read_lock_irq(lock) do { __cli(); _do_read_lock(lock, "read_lock_irq"); } while(0)
382 #define read_lock_bh(lock) do { local_bh_count++; _do_read_lock(lock, "read_lock_bh"); } while(0)
383 #define read_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_read_lock(lock, "read_lock_irqsave"); } while(0)
385 #define read_unlock(lock) \
386 do { unsigned long flags; \
387 __save_and_cli(flags); \
388 _do_read_unlock(lock, "read_unlock"); \
389 __restore_flags(flags); \
391 #define read_unlock_irq(lock) do { _do_read_unlock(lock, "read_unlock_irq"); __sti() } while(0)
392 #define read_unlock_bh(lock) do { _do_read_unlock(lock, "read_unlock_bh"); local_bh_count--; } while(0)
393 #define read_unlock_irqrestore(lock, flags) do { _do_read_unlock(lock, "read_unlock_irqrestore"); __restore_flags(flags); } while(0)
395 #define write_lock(lock) \
396 do { unsigned long flags; \
397 __save_and_cli(flags); \
398 _do_write_lock(lock, "write_lock"); \
399 __restore_flags(flags); \
401 #define write_lock_irq(lock) do { __cli(); _do_write_lock(lock, "write_lock_irq"); } while(0)
402 #define write_lock_bh(lock) do { local_bh_count++; _do_write_lock(lock, "write_lock_bh"); } while(0)
403 #define write_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_write_lock(lock, "write_lock_irqsave"); } while(0)
405 #define write_unlock(lock) \
406 do { unsigned long flags; \
407 __save_and_cli(flags); \
408 _do_write_unlock(lock); \
409 __restore_flags(flags); \
411 #define write_unlock_irq(lock) do { _do_write_unlock(lock); __sti(); } while(0)
412 #define write_unlock_bh(lock) do { _do_write_unlock(lock); local_bh_count--; } while(0)
413 #define write_unlock_irqrestore(lock, flags) do { _do_write_unlock(lock); __restore_flags(flags); } while(0)
415 #endif /* SPIN_LOCK_DEBUG */
419 #endif /* !(__ASSEMBLY__) */
421 #endif /* !(__SPARC64_SPIN%0_H) */