1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
9 #include <linux/threads.h> /* For NR_CPUS */
15 #ifdef CONFIG_DEBUG_SPINLOCK
16 struct _spinlock_debug
{
18 unsigned long owner_pc
;
20 typedef struct _spinlock_debug spinlock_t
;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 }
23 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
24 #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0)
25 #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
27 extern void _do_spin_lock(spinlock_t
*lock
, char *str
);
28 extern int _spin_trylock(spinlock_t
*lock
);
29 extern void _do_spin_unlock(spinlock_t
*lock
);
31 #define _raw_spin_trylock(lp) _spin_trylock(lp)
32 #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
33 #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
35 struct _rwlock_debug
{
36 volatile unsigned int lock
;
37 unsigned long owner_pc
;
38 unsigned long reader_pc
[NR_CPUS
];
40 typedef struct _rwlock_debug rwlock_t
;
42 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
44 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
45 #define rwlock_is_locked(lp) ((lp)->lock != 0)
47 extern void _do_read_lock(rwlock_t
*rw
, char *str
);
48 extern void _do_read_unlock(rwlock_t
*rw
, char *str
);
49 extern void _do_write_lock(rwlock_t
*rw
, char *str
);
50 extern void _do_write_unlock(rwlock_t
*rw
);
52 #define _raw_read_lock(lock) \
53 do { unsigned long flags; \
54 local_irq_save(flags); \
55 _do_read_lock(lock, "read_lock"); \
56 local_irq_restore(flags); \
59 #define _raw_read_unlock(lock) \
60 do { unsigned long flags; \
61 local_irq_save(flags); \
62 _do_read_unlock(lock, "read_unlock"); \
63 local_irq_restore(flags); \
66 #define _raw_write_lock(lock) \
67 do { unsigned long flags; \
68 local_irq_save(flags); \
69 _do_write_lock(lock, "write_lock"); \
70 local_irq_restore(flags); \
73 #define _raw_write_unlock(lock) \
74 do { unsigned long flags; \
75 local_irq_save(flags); \
76 _do_write_unlock(lock); \
77 local_irq_restore(flags); \
80 #else /* !CONFIG_DEBUG_SPINLOCK */
82 typedef unsigned char spinlock_t
;
83 #define SPIN_LOCK_UNLOCKED 0
85 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
86 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
88 #define spin_unlock_wait(lock) \
91 } while(*((volatile unsigned char *)lock))
93 extern __inline__
void _raw_spin_lock(spinlock_t
*lock
)
97 "ldstub [%0], %%g2\n\t"
98 "orcc %%g2, 0x0, %%g0\n\t"
100 " ldub [%0], %%g2\n\t"
103 "orcc %%g2, 0x0, %%g0\n\t"
105 " ldub [%0], %%g2\n\t"
110 : "g2", "memory", "cc");
113 extern __inline__
int _raw_spin_trylock(spinlock_t
*lock
)
116 __asm__
__volatile__("ldstub [%1], %0"
120 return (result
== 0);
123 extern __inline__
void _raw_spin_unlock(spinlock_t
*lock
)
125 __asm__
__volatile__("stb %%g0, [%0]" : : "r" (lock
) : "memory");
128 /* Read-write spinlocks, allowing multiple readers
129 * but only one writer.
131 * NOTE! it is quite common to have readers in interrupts
132 * but no interrupt writers. For those circumstances we
133 * can "mix" irq-safe locks - any writer needs to get a
134 * irq-safe write-lock, but readers can get non-irqsafe
137 * XXX This might create some problems with my dual spinlock
138 * XXX scheme, deadlocks etc. -DaveM
140 typedef struct { volatile unsigned int lock
; } rwlock_t
;
142 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
144 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
145 #define rwlock_is_locked(lp) ((lp)->lock != 0)
148 /* Sort of like atomic_t's on Sparc, but even more clever.
150 * ------------------------------------
151 * | 24-bit counter | wlock | rwlock_t
152 * ------------------------------------
155 * wlock signifies the one writer is in or somebody is updating
156 * counter. For a writer, if he successfully acquires the wlock,
157 * but counter is non-zero, he has to release the lock and wait,
158 * till both counter and wlock are zero.
160 * Unfortunately this scheme limits us to ~16,000,000 cpus.
162 extern __inline__
void _read_lock(rwlock_t
*rw
)
164 register rwlock_t
*lp
asm("g1");
166 __asm__
__volatile__(
168 "call ___rw_read_enter\n\t"
169 " ldstub [%%g1 + 3], %%g2\n"
172 : "g2", "g4", "memory", "cc");
175 #define _raw_read_lock(lock) \
176 do { unsigned long flags; \
177 local_irq_save(flags); \
179 local_irq_restore(flags); \
182 extern __inline__
void _read_unlock(rwlock_t
*rw
)
184 register rwlock_t
*lp
asm("g1");
186 __asm__
__volatile__(
188 "call ___rw_read_exit\n\t"
189 " ldstub [%%g1 + 3], %%g2\n"
192 : "g2", "g4", "memory", "cc");
195 #define _raw_read_unlock(lock) \
196 do { unsigned long flags; \
197 local_irq_save(flags); \
198 _read_unlock(lock); \
199 local_irq_restore(flags); \
202 extern __inline__
void _raw_write_lock(rwlock_t
*rw
)
204 register rwlock_t
*lp
asm("g1");
206 __asm__
__volatile__(
208 "call ___rw_write_enter\n\t"
209 " ldstub [%%g1 + 3], %%g2\n"
212 : "g2", "g4", "memory", "cc");
215 #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
217 #endif /* CONFIG_DEBUG_SPINLOCK */
219 #endif /* !(__ASSEMBLY__) */
221 #endif /* __SPARC_SPINLOCK_H */