1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
9 #include <linux/threads.h> /* For NR_CPUS */
16 * Define this to use the verbose/debugging versions in
17 * arch/sparc/lib/debuglocks.c
19 * Be sure to make dep whenever changing this option.
21 #define SPIN_LOCK_DEBUG
23 #ifdef SPIN_LOCK_DEBUG
24 struct _spinlock_debug
{
26 unsigned long owner_pc
;
28 typedef struct _spinlock_debug spinlock_t
;
30 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 }
31 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
32 #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0)
33 #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
35 extern void _do_spin_lock(spinlock_t
*lock
, char *str
);
36 extern int _spin_trylock(spinlock_t
*lock
);
37 extern void _do_spin_unlock(spinlock_t
*lock
);
39 #define spin_trylock(lp) _spin_trylock(lp)
40 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
41 #define spin_unlock(lock) _do_spin_unlock(lock)
43 struct _rwlock_debug
{
44 volatile unsigned int lock
;
45 unsigned long owner_pc
;
46 unsigned long reader_pc
[NR_CPUS
];
48 typedef struct _rwlock_debug rwlock_t
;
50 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
52 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
54 extern void _do_read_lock(rwlock_t
*rw
, char *str
);
55 extern void _do_read_unlock(rwlock_t
*rw
, char *str
);
56 extern void _do_write_lock(rwlock_t
*rw
, char *str
);
57 extern void _do_write_unlock(rwlock_t
*rw
);
59 #define read_lock(lock) \
60 do { unsigned long flags; \
61 __save_and_cli(flags); \
62 _do_read_lock(lock, "read_lock"); \
63 __restore_flags(flags); \
66 #define read_unlock(lock) \
67 do { unsigned long flags; \
68 __save_and_cli(flags); \
69 _do_read_unlock(lock, "read_unlock"); \
70 __restore_flags(flags); \
73 #define write_lock(lock) \
74 do { unsigned long flags; \
75 __save_and_cli(flags); \
76 _do_write_lock(lock, "write_lock"); \
77 __restore_flags(flags); \
80 #define write_unlock(lock) \
81 do { unsigned long flags; \
82 __save_and_cli(flags); \
83 _do_write_unlock(lock); \
84 __restore_flags(flags); \
87 #else /* !SPIN_LOCK_DEBUG */
89 typedef unsigned char spinlock_t
;
90 #define SPIN_LOCK_UNLOCKED 0
92 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
93 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
95 #define spin_unlock_wait(lock) \
98 } while(*((volatile unsigned char *)lock))
100 extern __inline__
void spin_lock(spinlock_t
*lock
)
102 __asm__
__volatile__("
108 2: orcc %%g2, 0x0, %%g0
115 : "g2", "memory", "cc");
118 extern __inline__
int spin_trylock(spinlock_t
*lock
)
121 __asm__
__volatile__("ldstub [%1], %0"
125 return (result
== 0);
128 extern __inline__
void spin_unlock(spinlock_t
*lock
)
130 __asm__
__volatile__("stb %%g0, [%0]" : : "r" (lock
) : "memory");
133 /* Read-write spinlocks, allowing multiple readers
134 * but only one writer.
136 * NOTE! it is quite common to have readers in interrupts
137 * but no interrupt writers. For those circumstances we
138 * can "mix" irq-safe locks - any writer needs to get a
139 * irq-safe write-lock, but readers can get non-irqsafe
142 * XXX This might create some problems with my dual spinlock
143 * XXX scheme, deadlocks etc. -DaveM
145 typedef struct { volatile unsigned int lock
; } rwlock_t
;
147 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
149 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
152 /* Sort of like atomic_t's on Sparc, but even more clever.
154 * ------------------------------------
155 * | 24-bit counter | wlock | rwlock_t
156 * ------------------------------------
159 * wlock signifies the one writer is in or somebody is updating
160 * counter. For a writer, if he successfully acquires the wlock,
161 * but counter is non-zero, he has to release the lock and wait,
162 * till both counter and wlock are zero.
164 * Unfortunately this scheme limits us to ~16,000,000 cpus.
166 extern __inline__
void _read_lock(rwlock_t
*rw
)
168 register rwlock_t
*lp
asm("g1");
170 __asm__
__volatile__("
172 call ___rw_read_enter
173 ldstub [%%g1 + 3], %%g2
176 : "g2", "g4", "memory", "cc");
179 #define read_lock(lock) \
180 do { unsigned long flags; \
181 __save_and_cli(flags); \
183 __restore_flags(flags); \
186 extern __inline__
void _read_unlock(rwlock_t
*rw
)
188 register rwlock_t
*lp
asm("g1");
190 __asm__
__volatile__("
193 ldstub [%%g1 + 3], %%g2
196 : "g2", "g4", "memory", "cc");
199 #define read_unlock(lock) \
200 do { unsigned long flags; \
201 __save_and_cli(flags); \
202 _read_unlock(lock); \
203 __restore_flags(flags); \
206 extern __inline__
void write_lock(rwlock_t
*rw
)
208 register rwlock_t
*lp
asm("g1");
210 __asm__
__volatile__("
212 call ___rw_write_enter
213 ldstub [%%g1 + 3], %%g2
216 : "g2", "g4", "memory", "cc");
219 #define write_unlock(rw) do { (rw)->lock = 0; } while(0)
221 #endif /* SPIN_LOCK_DEBUG */
223 #endif /* !(__ASSEMBLY__) */
225 #endif /* __SPARC_SPINLOCK_H */