Fix IP22 timer calibration.
[linux-2.6/linux-mips.git] / include / asm-sparc / spinlock.h
blob99ee4641178636e277d6d83056e85bc0d2fe0283
1 /* spinlock.h: 32-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
6 #ifndef __SPARC_SPINLOCK_H
7 #define __SPARC_SPINLOCK_H
9 #include <linux/threads.h> /* For NR_CPUS */
11 #ifndef __ASSEMBLY__
13 #include <asm/psr.h>
15 #ifdef CONFIG_DEBUG_SPINLOCK
16 struct _spinlock_debug {
17 unsigned char lock;
18 unsigned long owner_pc;
20 typedef struct _spinlock_debug spinlock_t;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0 }
23 #define spin_lock_init(lp) do { *(lp)= SPIN_LOCK_UNLOCKED; } while(0)
24 #define spin_is_locked(lp) (*((volatile unsigned char *)(&((lp)->lock))) != 0)
25 #define spin_unlock_wait(lp) do { barrier(); } while(*(volatile unsigned char *)(&(lp)->lock))
27 extern void _do_spin_lock(spinlock_t *lock, char *str);
28 extern int _spin_trylock(spinlock_t *lock);
29 extern void _do_spin_unlock(spinlock_t *lock);
31 #define _raw_spin_trylock(lp) _spin_trylock(lp)
32 #define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
33 #define _raw_spin_unlock(lock) _do_spin_unlock(lock)
35 struct _rwlock_debug {
36 volatile unsigned int lock;
37 unsigned long owner_pc;
38 unsigned long reader_pc[NR_CPUS];
40 typedef struct _rwlock_debug rwlock_t;
42 #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, {0} }
44 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
45 #define rwlock_is_locked(lp) ((lp)->lock != 0)
47 extern void _do_read_lock(rwlock_t *rw, char *str);
48 extern void _do_read_unlock(rwlock_t *rw, char *str);
49 extern void _do_write_lock(rwlock_t *rw, char *str);
50 extern void _do_write_unlock(rwlock_t *rw);
52 #define _raw_read_lock(lock) \
53 do { unsigned long flags; \
54 local_irq_save(flags); \
55 _do_read_lock(lock, "read_lock"); \
56 local_irq_restore(flags); \
57 } while(0)
59 #define _raw_read_unlock(lock) \
60 do { unsigned long flags; \
61 local_irq_save(flags); \
62 _do_read_unlock(lock, "read_unlock"); \
63 local_irq_restore(flags); \
64 } while(0)
66 #define _raw_write_lock(lock) \
67 do { unsigned long flags; \
68 local_irq_save(flags); \
69 _do_write_lock(lock, "write_lock"); \
70 local_irq_restore(flags); \
71 } while(0)
73 #define _raw_write_unlock(lock) \
74 do { unsigned long flags; \
75 local_irq_save(flags); \
76 _do_write_unlock(lock); \
77 local_irq_restore(flags); \
78 } while(0)
80 #else /* !CONFIG_DEBUG_SPINLOCK */
82 typedef unsigned char spinlock_t;
83 #define SPIN_LOCK_UNLOCKED 0
85 #define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0)
86 #define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
88 #define spin_unlock_wait(lock) \
89 do { \
90 barrier(); \
91 } while(*((volatile unsigned char *)lock))
93 extern __inline__ void _raw_spin_lock(spinlock_t *lock)
95 __asm__ __volatile__(
96 "\n1:\n\t"
97 "ldstub [%0], %%g2\n\t"
98 "orcc %%g2, 0x0, %%g0\n\t"
99 "bne,a 2f\n\t"
100 " ldub [%0], %%g2\n\t"
101 ".subsection 2\n"
102 "2:\n\t"
103 "orcc %%g2, 0x0, %%g0\n\t"
104 "bne,a 2b\n\t"
105 " ldub [%0], %%g2\n\t"
106 "b,a 1b\n\t"
107 ".previous\n"
108 : /* no outputs */
109 : "r" (lock)
110 : "g2", "memory", "cc");
113 extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
115 unsigned int result;
116 __asm__ __volatile__("ldstub [%1], %0"
117 : "=r" (result)
118 : "r" (lock)
119 : "memory");
120 return (result == 0);
123 extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
125 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
128 /* Read-write spinlocks, allowing multiple readers
129 * but only one writer.
131 * NOTE! it is quite common to have readers in interrupts
132 * but no interrupt writers. For those circumstances we
133 * can "mix" irq-safe locks - any writer needs to get a
134 * irq-safe write-lock, but readers can get non-irqsafe
135 * read-locks.
137 * XXX This might create some problems with my dual spinlock
138 * XXX scheme, deadlocks etc. -DaveM
140 typedef struct { volatile unsigned int lock; } rwlock_t;
142 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
144 #define rwlock_init(lp) do { *(lp)= RW_LOCK_UNLOCKED; } while(0)
145 #define rwlock_is_locked(lp) ((lp)->lock != 0)
148 /* Sort of like atomic_t's on Sparc, but even more clever.
150 * ------------------------------------
151 * | 24-bit counter | wlock | rwlock_t
152 * ------------------------------------
153 * 31 8 7 0
155 * wlock signifies the one writer is in or somebody is updating
156 * counter. For a writer, if he successfully acquires the wlock,
157 * but counter is non-zero, he has to release the lock and wait,
158 * till both counter and wlock are zero.
160 * Unfortunately this scheme limits us to ~16,000,000 cpus.
162 extern __inline__ void _read_lock(rwlock_t *rw)
164 register rwlock_t *lp asm("g1");
165 lp = rw;
166 __asm__ __volatile__(
167 "mov %%o7, %%g4\n\t"
168 "call ___rw_read_enter\n\t"
169 " ldstub [%%g1 + 3], %%g2\n"
170 : /* no outputs */
171 : "r" (lp)
172 : "g2", "g4", "memory", "cc");
175 #define _raw_read_lock(lock) \
176 do { unsigned long flags; \
177 local_irq_save(flags); \
178 _read_lock(lock); \
179 local_irq_restore(flags); \
180 } while(0)
182 extern __inline__ void _read_unlock(rwlock_t *rw)
184 register rwlock_t *lp asm("g1");
185 lp = rw;
186 __asm__ __volatile__(
187 "mov %%o7, %%g4\n\t"
188 "call ___rw_read_exit\n\t"
189 " ldstub [%%g1 + 3], %%g2\n"
190 : /* no outputs */
191 : "r" (lp)
192 : "g2", "g4", "memory", "cc");
195 #define _raw_read_unlock(lock) \
196 do { unsigned long flags; \
197 local_irq_save(flags); \
198 _read_unlock(lock); \
199 local_irq_restore(flags); \
200 } while(0)
202 extern __inline__ void _raw_write_lock(rwlock_t *rw)
204 register rwlock_t *lp asm("g1");
205 lp = rw;
206 __asm__ __volatile__(
207 "mov %%o7, %%g4\n\t"
208 "call ___rw_write_enter\n\t"
209 " ldstub [%%g1 + 3], %%g2\n"
210 : /* no outputs */
211 : "r" (lp)
212 : "g2", "g4", "memory", "cc");
215 #define _raw_write_unlock(rw) do { (rw)->lock = 0; } while(0)
217 #endif /* CONFIG_DEBUG_SPINLOCK */
219 #endif /* !(__ASSEMBLY__) */
221 #endif /* __SPARC_SPINLOCK_H */