1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
11 /* To get debugging spinlocks which detect and catch
12 * deadlock situations, set DEBUG_SPINLOCKS in the sparc64
13 * specific makefile and rebuild your kernel.
16 /* All of these locking primitives are expected to work properly
17 * even in an RMO memory model, which currently is what the kernel
20 * There is another issue. Because we play games to save cycles
21 * in the non-contention case, we need to be extra careful about
22 * branch targets into the "spinning" code. They live in their
23 * own section, but the newer V9 branches have a shorter range
24 * than the traditional 32-bit sparc branch variants. The rule
25 * is that the branches that go into and out of the spinner sections
26 * must be pre-V9 branches.
29 #ifndef SPIN_LOCK_DEBUG
31 typedef unsigned char spinlock_t
;
32 #define SPIN_LOCK_UNLOCKED 0
34 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
35 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
37 #define spin_unlock_wait(lock) \
38 do { membar("#LoadLoad"); \
39 } while(*((volatile unsigned char *)lock))
41 extern __inline__
void spin_lock(spinlock_t
*lock
)
43 __asm__
__volatile__("
46 membar #StoreLoad | #StoreStore
58 extern __inline__
int spin_trylock(spinlock_t
*lock
)
61 __asm__
__volatile__("ldstub [%1], %0\n\t"
62 "membar #StoreLoad | #StoreStore"
69 extern __inline__
void spin_unlock(spinlock_t
*lock
)
71 __asm__
__volatile__("membar #StoreStore | #LoadStore\n\t"
73 "membar #StoreStore | #StoreLoad"
79 extern __inline__
void spin_lock_irq(spinlock_t
*lock
)
81 __asm__
__volatile__("
85 membar #StoreLoad | #StoreStore
97 extern __inline__
void spin_unlock_irq(spinlock_t
*lock
)
99 __asm__
__volatile__("membar #StoreStore | #LoadStore\n\t"
101 "membar #StoreStore | #StoreLoad\n\t"
102 "wrpr %%g0, 0x0, %%pil"
108 #define spin_lock_bh(__lock) \
109 do { local_bh_count++; \
113 #define spin_unlock_bh(__lock) \
114 do { spin_unlock(__lock); \
118 #define spin_lock_irqsave(__lock, flags) \
119 do { register spinlock_t *__lp asm("g1"); \
121 __asm__ __volatile__( \
122 "\n rdpr %%pil, %0\n" \
123 " wrpr %%g0, 15, %%pil\n" \
124 "1: ldstub [%1], %%g7\n" \
125 " brnz,pn %%g7, 2f\n" \
126 " membar #StoreLoad | #StoreStore\n" \
128 "2: ldub [%1], %%g7\n" \
129 " brnz,pt %%g7, 2b\n" \
130 " membar #LoadLoad\n" \
131 " b,a,pt %%xcc, 1b\n" \
138 extern __inline__
void spin_unlock_irqrestore(spinlock_t
*lock
, unsigned long flags
)
140 __asm__
__volatile__("membar #StoreStore | #LoadStore\n\t"
142 "membar #StoreStore | #StoreLoad\n\t"
143 "wrpr %1, 0x0, %%pil"
145 : "r" (lock
), "r" (flags
)
149 #else /* !(SPIN_LOCK_DEBUG) */
153 unsigned int owner_pc
, owner_cpu
;
155 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
156 #define spin_lock_init(__lock) \
157 do { (__lock)->lock = 0; \
158 (__lock)->owner_pc = 0; \
159 (__lock)->owner_cpu = 0xff; \
161 #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
162 #define spin_unlock_wait(__lock) \
164 membar("#LoadLoad"); \
165 } while(*((volatile unsigned char *)(&((__lock)->lock))))
167 extern void _do_spin_lock (spinlock_t
*lock
, char *str
);
168 extern void _do_spin_unlock (spinlock_t
*lock
);
169 extern int _spin_trylock (spinlock_t
*lock
);
171 #define spin_trylock(lp) _spin_trylock(lp)
172 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
173 #define spin_lock_irq(lock) do { __cli(); _do_spin_lock(lock, "spin_lock_irq"); } while(0)
174 #define spin_lock_bh(lock) do { local_bh_count++; _do_spin_lock(lock, "spin_lock_bh"); } while(0)
175 #define spin_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_spin_lock(lock, "spin_lock_irqsave"); } while(0)
176 #define spin_unlock(lock) _do_spin_unlock(lock)
177 #define spin_unlock_irq(lock) do { _do_spin_unlock(lock); __sti(); } while(0)
178 #define spin_unlock_bh(lock) do { _do_spin_unlock(lock); local_bh_count--; } while(0)
179 #define spin_unlock_irqrestore(lock, flags) do { _do_spin_unlock(lock); __restore_flags(flags); } while(0)
181 #endif /* SPIN_LOCK_DEBUG */
183 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
185 #ifndef SPIN_LOCK_DEBUG
187 typedef unsigned int rwlock_t
;
188 #define RW_LOCK_UNLOCKED 0
190 #define read_lock(__rw_lck) \
191 do { register rwlock_t *__X asm("g1"); \
192 __asm__ __volatile__("sethi %%hi(__read_lock), %%g3\n\t" \
193 "jmpl %%g3 + %%lo(__read_lock), %%g3\n\t" \
195 : : "r" (__X = (__rw_lck)) \
196 : "g3", "g5", "g7", "cc", "memory"); \
199 #define read_unlock(__rw_lck) \
200 do { register rwlock_t *__X asm("g1"); \
201 __asm__ __volatile__("sethi %%hi(__read_unlock), %%g3\n\t" \
202 "jmpl %%g3 + %%lo(__read_unlock), %%g3\n\t" \
204 : : "r" (__X = (__rw_lck)) \
205 : "g3", "g5", "g7", "cc", "memory"); \
208 #define write_lock(__rw_lck) \
209 do { register rwlock_t *__X asm("g1"); \
210 __asm__ __volatile__("sethi %%hi(__write_lock), %%g3\n\t" \
211 "jmpl %%g3 + %%lo(__write_lock), %%g3\n\t" \
213 : : "r" (__X = (__rw_lck)) \
214 : "g2", "g3", "g5", "g7", "cc", "memory"); \
217 #define write_unlock(__rw_lck) \
218 do { register rwlock_t *__X asm("g1"); \
219 __asm__ __volatile__("sethi %%hi(__write_unlock), %%g3\n\t" \
220 "jmpl %%g3 + %%lo(__write_unlock), %%g3\n\t" \
222 : : "r" (__X = (__rw_lck)) \
223 : "g2", "g3", "g5", "g7", "cc", "memory"); \
226 #define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
227 #define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
228 #define read_lock_bh(lock) do { local_bh_count++; read_lock(lock); } while (0)
229 #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_count--; } while (0)
230 #define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
231 #define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
232 #define write_lock_bh(lock) do { local_bh_count++; write_lock(lock); } while (0)
233 #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_count--; } while (0)
235 #define read_lock_irqsave(lock, flags) \
236 do { __save_and_cli(flags); read_lock(lock); } while (0)
237 #define read_unlock_irqrestore(lock, flags) \
238 do { read_unlock(lock); __restore_flags(flags); } while (0)
239 #define write_lock_irqsave(lock, flags) \
240 do { __save_and_cli(flags); write_lock(lock); } while (0)
241 #define write_unlock_irqrestore(lock, flags) \
242 do { write_unlock(lock); __restore_flags(flags); } while (0)
244 #else /* !(SPIN_LOCK_DEBUG) */
248 unsigned int writer_pc
, writer_cpu
;
249 unsigned int reader_pc
[4];
251 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { 0, 0, 0, 0 } }
253 extern void _do_read_lock(rwlock_t
*rw
, char *str
);
254 extern void _do_read_unlock(rwlock_t
*rw
, char *str
);
255 extern void _do_write_lock(rwlock_t
*rw
, char *str
);
256 extern void _do_write_unlock(rwlock_t
*rw
);
258 #define read_lock(lock) \
259 do { unsigned long flags; \
260 __save_and_cli(flags); \
261 _do_read_lock(lock, "read_lock"); \
262 __restore_flags(flags); \
264 #define read_lock_irq(lock) do { __cli(); _do_read_lock(lock, "read_lock_irq"); } while(0)
265 #define read_lock_bh(lock) do { local_bh_count++; _do_read_lock(lock, "read_lock_bh"); } while(0)
266 #define read_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_read_lock(lock, "read_lock_irqsave"); } while(0)
268 #define read_unlock(lock) \
269 do { unsigned long flags; \
270 __save_and_cli(flags); \
271 _do_read_unlock(lock, "read_unlock"); \
272 __restore_flags(flags); \
274 #define read_unlock_irq(lock) do { _do_read_unlock(lock, "read_unlock_irq"); __sti() } while(0)
275 #define read_unlock_bh(lock) do { _do_read_unlock(lock, "read_unlock_bh"); local_bh_count--; } while(0)
276 #define read_unlock_irqrestore(lock, flags) do { _do_read_unlock(lock, "read_unlock_irqrestore"); __restore_flags(flags); } while(0)
278 #define write_lock(lock) \
279 do { unsigned long flags; \
280 __save_and_cli(flags); \
281 _do_write_lock(lock, "write_lock"); \
282 __restore_flags(flags); \
284 #define write_lock_irq(lock) do { __cli(); _do_write_lock(lock, "write_lock_irq"); } while(0)
285 #define write_lock_bh(lock) do { local_bh_count++; _do_write_lock(lock, "write_lock_bh"); } while(0)
286 #define write_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_write_lock(lock, "write_lock_irqsave"); } while(0)
288 #define write_unlock(lock) \
289 do { unsigned long flags; \
290 __save_and_cli(flags); \
291 _do_write_unlock(lock); \
292 __restore_flags(flags); \
294 #define write_unlock_irq(lock) do { _do_write_unlock(lock); __sti(); } while(0)
295 #define write_unlock_bh(lock) do { _do_write_unlock(lock); local_bh_count--; } while(0)
296 #define write_unlock_irqrestore(lock, flags) do { _do_write_unlock(lock); __restore_flags(flags); } while(0)
298 #endif /* SPIN_LOCK_DEBUG */
300 #endif /* !(__ASSEMBLY__) */
302 #endif /* !(__SPARC64_SPINLOCK_H) */