Import 2.3.16
[davej-history.git] / include / asm-sparc64 / spinlock.h
blobbd980ab18b28a9243ac05b63a9c289123f2e2c1b
1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #ifndef __ASSEMBLY__
11 #ifndef __SMP__
13 #if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
14 typedef struct { } spinlock_t;
15 # define SPIN_LOCK_UNLOCKED (spinlock_t) { }
16 #else
17 typedef unsigned char spinlock_t;
18 # define SPIN_LOCK_UNLOCKED 0
19 #endif
21 #define spin_lock_init(lock) do { } while(0)
22 #define spin_lock(lock) (void)(lock) /* Avoid warnings about unused variable */
23 #define spin_trylock(lock) (1)
24 #define spin_unlock_wait(lock) do { } while(0)
25 #define spin_unlock(lock) do { } while(0)
26 #define spin_lock_irq(lock) cli()
27 #define spin_unlock_irq(lock) sti()
28 #define spin_lock_bh(lock) \
29 do { local_bh_count++; \
30 barrier(); \
31 } while(0)
32 #define spin_unlock_bh(lock) \
33 do { barrier(); \
34 local_bh_count--; \
35 } while(0)
37 #define spin_lock_irqsave(lock, flags) save_and_cli(flags)
38 #define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
41 * Read-write spinlocks, allowing multiple readers
42 * but only one writer.
44 * NOTE! it is quite common to have readers in interrupts
45 * but no interrupt writers. For those circumstances we
46 * can "mix" irq-safe locks - any writer needs to get a
47 * irq-safe write-lock, but readers can get non-irqsafe
48 * read-locks.
50 #if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
51 typedef struct { } rwlock_t;
52 # define RW_LOCK_UNLOCKED (rwlock_t) { }
53 #else
54 typedef unsigned int rwlock_t;
55 # define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
56 #endif
58 #define read_lock(lock) (void)(lock) /* Avoid warnings about unused variable */
59 #define read_unlock(lock) do { } while(0)
60 #define write_lock(lock) (void)(lock) /* Likewise */
61 #define write_unlock(lock) do { } while(0)
62 #define read_lock_irq(lock) cli()
63 #define read_unlock_irq(lock) sti()
64 #define read_lock_bh(lock) \
65 do { local_bh_count++; \
66 barrier(); \
67 } while(0)
68 #define read_unlock_bh(lock) \
69 do { barrier(); \
70 local_bh_count--; \
71 } while(0)
73 #define write_lock_irq(lock) cli()
74 #define write_unlock_irq(lock) sti()
76 #define write_lock_bh(lock) \
77 do { local_bh_count++; \
78 barrier(); \
79 } while(0)
81 #define write_unlock_bh(lock) \
82 do { barrier(); \
83 local_bh_count--; \
84 } while(0)
86 #define read_lock_irqsave(lock, flags) save_and_cli(flags)
87 #define read_unlock_irqrestore(lock, flags) restore_flags(flags)
88 #define write_lock_irqsave(lock, flags) save_and_cli(flags)
89 #define write_unlock_irqrestore(lock, flags) restore_flags(flags)
91 #else /* !(__SMP__) */
93 /* To get debugging spinlocks which detect and catch
94 * deadlock situations, set DEBUG_SPINLOCKS in the sparc64
95 * specific makefile and rebuild your kernel.
98 /* All of these locking primitives are expected to work properly
99 * even in an RMO memory model, which currently is what the kernel
100 * runs in.
102 * There is another issue. Because we play games to save cycles
103 * in the non-contention case, we need to be extra careful about
104 * branch targets into the "spinning" code. They live in their
105 * own section, but the newer V9 branches have a shorter range
106 * than the traditional 32-bit sparc branch variants. The rule
107 * is that the branches that go into and out of the spinner sections
108 * must be pre-V9 branches.
111 #ifndef SPIN_LOCK_DEBUG
113 typedef unsigned char spinlock_t;
114 #define SPIN_LOCK_UNLOCKED 0
116 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
117 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
119 #define spin_unlock_wait(lock) \
120 do { membar("#LoadLoad"); \
121 } while(*((volatile unsigned char *)lock))
123 extern __inline__ void spin_lock(spinlock_t *lock)
125 __asm__ __volatile__("
126 1: ldstub [%0], %%g7
127 brnz,pn %%g7, 2f
128 membar #StoreLoad | #StoreStore
129 .subsection 2
130 2: ldub [%0], %%g7
131 brnz,pt %%g7, 2b
132 membar #LoadLoad
133 b,a,pt %%xcc, 1b
134 .previous
135 " : /* no outputs */
136 : "r" (lock)
137 : "g7", "memory");
140 extern __inline__ int spin_trylock(spinlock_t *lock)
142 unsigned int result;
143 __asm__ __volatile__("ldstub [%1], %0\n\t"
144 "membar #StoreLoad | #StoreStore"
145 : "=r" (result)
146 : "r" (lock)
147 : "memory");
148 return (result == 0);
151 extern __inline__ void spin_unlock(spinlock_t *lock)
153 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
154 "stb %%g0, [%0]\n\t"
155 "membar #StoreStore | #StoreLoad"
156 : /* No outputs */
157 : "r" (lock)
158 : "memory");
161 extern __inline__ void spin_lock_irq(spinlock_t *lock)
163 __asm__ __volatile__("
164 wrpr %%g0, 15, %%pil
165 1: ldstub [%0], %%g7
166 brnz,pn %%g7, 2f
167 membar #StoreLoad | #StoreStore
168 .subsection 2
169 2: ldub [%0], %%g7
170 brnz,pt %%g7, 2b
171 membar #LoadLoad
172 b,a,pt %%xcc, 1b
173 .previous
174 " : /* no outputs */
175 : "r" (lock)
176 : "g7", "memory");
179 extern __inline__ void spin_unlock_irq(spinlock_t *lock)
181 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
182 "stb %%g0, [%0]\n\t"
183 "membar #StoreStore | #StoreLoad\n\t"
184 "wrpr %%g0, 0x0, %%pil"
185 : /* no outputs */
186 : "r" (lock)
187 : "memory");
190 #define spin_lock_bh(__lock) \
191 do { local_bh_count++; \
192 spin_lock(__lock); \
193 } while(0)
195 #define spin_unlock_bh(__lock) \
196 do { spin_unlock(__lock); \
197 local_bh_count--; \
198 } while(0)
200 #define spin_lock_irqsave(__lock, flags) \
201 do { register spinlock_t *__lp asm("g1"); \
202 __lp = (__lock); \
203 __asm__ __volatile__( \
204 "\n rdpr %%pil, %0\n" \
205 " wrpr %%g0, 15, %%pil\n" \
206 "1: ldstub [%1], %%g7\n" \
207 " brnz,pn %%g7, 2f\n" \
208 " membar #StoreLoad | #StoreStore\n" \
209 " .subsection 2\n" \
210 "2: ldub [%1], %%g7\n" \
211 " brnz,pt %%g7, 2b\n" \
212 " membar #LoadLoad\n" \
213 " b,a,pt %%xcc, 1b\n" \
214 " .previous\n" \
215 : "=&r" (flags) \
216 : "r" (__lp) \
217 : "g7", "memory"); \
218 } while(0)
220 extern __inline__ void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
222 __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
223 "stb %%g0, [%0]\n\t"
224 "membar #StoreStore | #StoreLoad\n\t"
225 "wrpr %1, 0x0, %%pil"
226 : /* no outputs */
227 : "r" (lock), "r" (flags)
228 : "memory");
231 #else /* !(SPIN_LOCK_DEBUG) */
233 typedef struct {
234 unsigned char lock;
235 unsigned int owner_pc, owner_cpu;
236 } spinlock_t;
237 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff }
238 #define spin_lock_init(__lock) \
239 do { (__lock)->lock = 0; \
240 (__lock)->owner_pc = 0; \
241 (__lock)->owner_cpu = 0xff; \
242 } while(0)
243 #define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0)
244 #define spin_unlock_wait(__lock) \
245 do { \
246 membar("#LoadLoad"); \
247 } while(*((volatile unsigned char *)(&((__lock)->lock))))
249 extern void _do_spin_lock (spinlock_t *lock, char *str);
250 extern void _do_spin_unlock (spinlock_t *lock);
251 extern int _spin_trylock (spinlock_t *lock);
253 #define spin_trylock(lp) _spin_trylock(lp)
254 #define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
255 #define spin_lock_irq(lock) do { __cli(); _do_spin_lock(lock, "spin_lock_irq"); } while(0)
256 #define spin_lock_bh(lock) do { local_bh_count++; _do_spin_lock(lock, "spin_lock_bh"); } while(0)
257 #define spin_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_spin_lock(lock, "spin_lock_irqsave"); } while(0)
258 #define spin_unlock(lock) _do_spin_unlock(lock)
259 #define spin_unlock_irq(lock) do { _do_spin_unlock(lock); __sti(); } while(0)
260 #define spin_unlock_bh(lock) do { _do_spin_unlock(lock); local_bh_count--; } while(0)
261 #define spin_unlock_irqrestore(lock, flags) do { _do_spin_unlock(lock); __restore_flags(flags); } while(0)
263 #endif /* SPIN_LOCK_DEBUG */
265 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
267 #ifndef SPIN_LOCK_DEBUG
269 typedef unsigned int rwlock_t;
270 #define RW_LOCK_UNLOCKED 0
272 #define read_lock(__rw_lck) \
273 do { register rwlock_t *__X asm("g1"); \
274 __asm__ __volatile__("sethi %%hi(__read_lock), %%g3\n\t" \
275 "jmpl %%g3 + %%lo(__read_lock), %%g3\n\t" \
276 " nop\n1:" \
277 : : "r" (__X = (__rw_lck)) \
278 : "g3", "g5", "g7", "cc", "memory"); \
279 } while(0)
281 #define read_unlock(__rw_lck) \
282 do { register rwlock_t *__X asm("g1"); \
283 __asm__ __volatile__("sethi %%hi(__read_unlock), %%g3\n\t" \
284 "jmpl %%g3 + %%lo(__read_unlock), %%g3\n\t" \
285 " nop\n1:" \
286 : : "r" (__X = (__rw_lck)) \
287 : "g3", "g5", "g7", "cc", "memory"); \
288 } while(0)
290 #define write_lock(__rw_lck) \
291 do { register rwlock_t *__X asm("g1"); \
292 __asm__ __volatile__("sethi %%hi(__write_lock), %%g3\n\t" \
293 "jmpl %%g3 + %%lo(__write_lock), %%g3\n\t" \
294 " nop\n1:" \
295 : : "r" (__X = (__rw_lck)) \
296 : "g2", "g3", "g5", "g7", "cc", "memory"); \
297 } while(0)
299 #define write_unlock(__rw_lck) \
300 do { register rwlock_t *__X asm("g1"); \
301 __asm__ __volatile__("sethi %%hi(__write_unlock), %%g3\n\t" \
302 "jmpl %%g3 + %%lo(__write_unlock), %%g3\n\t" \
303 " nop\n1:" \
304 : : "r" (__X = (__rw_lck)) \
305 : "g2", "g3", "g5", "g7", "cc", "memory"); \
306 } while(0)
308 #define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
309 #define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
310 #define read_lock_bh(lock) do { local_bh_count++; read_lock(lock); } while (0)
311 #define read_unlock_bh(lock) do { read_unlock(lock); local_bh_count--; } while (0)
312 #define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
313 #define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
314 #define write_lock_bh(lock) do { local_bh_count++; write_lock(lock); } while (0)
315 #define write_unlock_bh(lock) do { write_unlock(lock); local_bh_count--; } while (0)
317 #define read_lock_irqsave(lock, flags) \
318 do { __save_and_cli(flags); read_lock(lock); } while (0)
319 #define read_unlock_irqrestore(lock, flags) \
320 do { read_unlock(lock); __restore_flags(flags); } while (0)
321 #define write_lock_irqsave(lock, flags) \
322 do { __save_and_cli(flags); write_lock(lock); } while (0)
323 #define write_unlock_irqrestore(lock, flags) \
324 do { write_unlock(lock); __restore_flags(flags); } while (0)
326 #else /* !(SPIN_LOCK_DEBUG) */
328 typedef struct {
329 unsigned long lock;
330 unsigned int writer_pc, writer_cpu;
331 unsigned int reader_pc[4];
332 } rwlock_t;
333 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { 0, 0, 0, 0 } }
335 extern void _do_read_lock(rwlock_t *rw, char *str);
336 extern void _do_read_unlock(rwlock_t *rw, char *str);
337 extern void _do_write_lock(rwlock_t *rw, char *str);
338 extern void _do_write_unlock(rwlock_t *rw);
340 #define read_lock(lock) \
341 do { unsigned long flags; \
342 __save_and_cli(flags); \
343 _do_read_lock(lock, "read_lock"); \
344 __restore_flags(flags); \
345 } while(0)
346 #define read_lock_irq(lock) do { __cli(); _do_read_lock(lock, "read_lock_irq"); } while(0)
347 #define read_lock_bh(lock) do { local_bh_count++; _do_read_lock(lock, "read_lock_bh"); } while(0)
348 #define read_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_read_lock(lock, "read_lock_irqsave"); } while(0)
350 #define read_unlock(lock) \
351 do { unsigned long flags; \
352 __save_and_cli(flags); \
353 _do_read_unlock(lock, "read_unlock"); \
354 __restore_flags(flags); \
355 } while(0)
356 #define read_unlock_irq(lock) do { _do_read_unlock(lock, "read_unlock_irq"); __sti() } while(0)
357 #define read_unlock_bh(lock) do { _do_read_unlock(lock, "read_unlock_bh"); local_bh_count--; } while(0)
358 #define read_unlock_irqrestore(lock, flags) do { _do_read_unlock(lock, "read_unlock_irqrestore"); __restore_flags(flags); } while(0)
360 #define write_lock(lock) \
361 do { unsigned long flags; \
362 __save_and_cli(flags); \
363 _do_write_lock(lock, "write_lock"); \
364 __restore_flags(flags); \
365 } while(0)
366 #define write_lock_irq(lock) do { __cli(); _do_write_lock(lock, "write_lock_irq"); } while(0)
367 #define write_lock_bh(lock) do { local_bh_count++; _do_write_lock(lock, "write_lock_bh"); } while(0)
368 #define write_lock_irqsave(lock, flags) do { __save_and_cli(flags); _do_write_lock(lock, "write_lock_irqsave"); } while(0)
370 #define write_unlock(lock) \
371 do { unsigned long flags; \
372 __save_and_cli(flags); \
373 _do_write_unlock(lock); \
374 __restore_flags(flags); \
375 } while(0)
376 #define write_unlock_irq(lock) do { _do_write_unlock(lock); __sti(); } while(0)
377 #define write_unlock_bh(lock) do { _do_write_unlock(lock); local_bh_count--; } while(0)
378 #define write_unlock_irqrestore(lock, flags) do { _do_write_unlock(lock); __restore_flags(flags); } while(0)
380 #endif /* SPIN_LOCK_DEBUG */
382 #endif /* __SMP__ */
384 #endif /* !(__ASSEMBLY__) */
386 #endif /* !(__SPARC64_SPINLOCK_H) */