initial commit with v2.6.9
[linux-2.6.9-moxart.git] / include / asm-m32r / spinlock.h
blob6fd012a5e15007e1d86ecc32020ff8ec58b2232e
1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
4 /*
5 * linux/include/asm-m32r/spinlock.h
7 * M32R version:
8 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
9 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
12 #include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
13 #include <linux/compiler.h>
14 #include <asm/atomic.h>
15 #include <asm/page.h>
17 extern int printk(const char * fmt, ...)
18 __attribute__ ((format (printf, 1, 2)));
20 #define RW_LOCK_BIAS 0x01000000
21 #define RW_LOCK_BIAS_STR "0x01000000"
23 /* It seems that people are forgetting to
24 * initialize their spinlocks properly, tsk tsk.
25 * Remember to turn this off in 2.4. -ben
27 #if defined(CONFIG_DEBUG_SPINLOCK)
28 #define SPINLOCK_DEBUG 1
29 #else
30 #define SPINLOCK_DEBUG 0
31 #endif
34 * Your basic SMP spinlocks, allowing only a single CPU anywhere
37 typedef struct {
38 volatile int lock;
39 #if SPINLOCK_DEBUG
40 unsigned magic;
41 #endif
42 #ifdef CONFIG_PREEMPT
43 unsigned int break_lock;
44 #endif
45 } spinlock_t;
47 #define SPINLOCK_MAGIC 0xdead4ead
49 #if SPINLOCK_DEBUG
50 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
51 #else
52 #define SPINLOCK_MAGIC_INIT /* */
53 #endif
55 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
57 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
60 * Simple spin lock operations. There are two variants, one clears IRQ's
61 * on the local processor, one does not.
63 * We make no fairness assumptions. They have a cost.
66 #define spin_is_locked(x) (*(volatile int *)(&(x)->lock) <= 0)
67 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
68 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
70 /**
71 * _raw_spin_trylock - Try spin lock and return a result
72 * @lock: Pointer to the lock variable
74 * _raw_spin_trylock() tries to get the lock and returns a result.
75 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
77 static inline int _raw_spin_trylock(spinlock_t *lock)
79 int oldval;
80 unsigned long tmp1, tmp2;
83 * lock->lock : =1 : unlock
84 * : <=0 : lock
85 * {
86 * oldval = lock->lock; <--+ need atomic operation
87 * lock->lock = 0; <--+
88 * }
90 __asm__ __volatile__ (
91 "# spin_trylock \n\t"
92 "ldi %1, #0; \n\t"
93 "mvfc %2, psw; \n\t"
94 "clrpsw #0x40 -> nop; \n\t"
95 DCACHE_CLEAR("%0", "r6", "%3")
96 "lock %0, @%3; \n\t"
97 "unlock %1, @%3; \n\t"
98 "mvtc %2, psw; \n\t"
99 : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
100 : "r" (&lock->lock)
101 : "memory"
102 #ifdef CONFIG_CHIP_M32700_TS1
103 , "r6"
104 #endif /* CONFIG_CHIP_M32700_TS1 */
107 return (oldval > 0);
110 static inline void _raw_spin_lock(spinlock_t *lock)
112 unsigned long tmp0, tmp1;
114 #if SPINLOCK_DEBUG
115 __label__ here;
116 here:
117 if (lock->magic != SPINLOCK_MAGIC) {
118 printk("eip: %p\n", &&here);
119 BUG();
121 #endif
123 * lock->lock : =1 : unlock
124 * : <=0 : lock
126 * for ( ; ; ) {
127 * lock->lock -= 1; <-- need atomic operation
128 * if (lock->lock == 0) break;
129 * for ( ; lock->lock <= 0 ; );
132 __asm__ __volatile__ (
133 "# spin_lock \n\t"
134 ".fillinsn \n"
135 "1: \n\t"
136 "mvfc %1, psw; \n\t"
137 "clrpsw #0x40 -> nop; \n\t"
138 DCACHE_CLEAR("%0", "r6", "%2")
139 "lock %0, @%2; \n\t"
140 "addi %0, #-1; \n\t"
141 "unlock %0, @%2; \n\t"
142 "mvtc %1, psw; \n\t"
143 "bltz %0, 2f; \n\t"
144 LOCK_SECTION_START(".balign 4 \n\t")
145 ".fillinsn \n"
146 "2: \n\t"
147 "ld %0, @%2; \n\t"
148 "bgtz %0, 1b; \n\t"
149 "bra 2b; \n\t"
150 LOCK_SECTION_END
151 : "=&r" (tmp0), "=&r" (tmp1)
152 : "r" (&lock->lock)
153 : "memory"
154 #ifdef CONFIG_CHIP_M32700_TS1
155 , "r6"
156 #endif /* CONFIG_CHIP_M32700_TS1 */
160 static inline void _raw_spin_unlock(spinlock_t *lock)
162 #if SPINLOCK_DEBUG
163 BUG_ON(lock->magic != SPINLOCK_MAGIC);
164 BUG_ON(!spin_is_locked(lock));
165 #endif
166 mb();
167 lock->lock = 1;
171 * Read-write spinlocks, allowing multiple readers
172 * but only one writer.
174 * NOTE! it is quite common to have readers in interrupts
175 * but no interrupt writers. For those circumstances we
176 * can "mix" irq-safe locks - any writer needs to get a
177 * irq-safe write-lock, but readers can get non-irqsafe
178 * read-locks.
180 typedef struct {
181 volatile int lock;
182 #if SPINLOCK_DEBUG
183 unsigned magic;
184 #endif
185 #ifdef CONFIG_PREEMPT
186 unsigned int break_lock;
187 #endif
188 } rwlock_t;
190 #define RWLOCK_MAGIC 0xdeaf1eed
192 #if SPINLOCK_DEBUG
193 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
194 #else
195 #define RWLOCK_MAGIC_INIT /* */
196 #endif
198 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
200 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
202 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
205 * On x86, we implement read-write locks as a 32-bit counter
206 * with the high bit (sign) being the "contended" bit.
208 * The inline assembly is non-obvious. Think about it.
210 * Changed to use the same technique as rw semaphores. See
211 * semaphore.h for details. -ben
213 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
215 static inline void _raw_read_lock(rwlock_t *rw)
217 unsigned long tmp0, tmp1;
219 #if SPINLOCK_DEBUG
220 BUG_ON(rw->magic != RWLOCK_MAGIC);
221 #endif
223 * rw->lock : >0 : unlock
224 * : <=0 : lock
226 * for ( ; ; ) {
227 * rw->lock -= 1; <-- need atomic operation
228 * if (rw->lock >= 0) break;
229 * rw->lock += 1; <-- need atomic operation
230 * for ( ; rw->lock <= 0 ; );
233 __asm__ __volatile__ (
234 "# read_lock \n\t"
235 ".fillinsn \n"
236 "1: \n\t"
237 "mvfc %1, psw; \n\t"
238 "clrpsw #0x40 -> nop; \n\t"
239 DCACHE_CLEAR("%0", "r6", "%2")
240 "lock %0, @%2; \n\t"
241 "addi %0, #-1; \n\t"
242 "unlock %0, @%2; \n\t"
243 "mvtc %1, psw; \n\t"
244 "bltz %0, 2f; \n\t"
245 LOCK_SECTION_START(".balign 4 \n\t")
246 ".fillinsn \n"
247 "2: \n\t"
248 "clrpsw #0x40 -> nop; \n\t"
249 DCACHE_CLEAR("%0", "r6", "%2")
250 "lock %0, @%2; \n\t"
251 "addi %0, #1; \n\t"
252 "unlock %0, @%2; \n\t"
253 "mvtc %1, psw; \n\t"
254 ".fillinsn \n"
255 "3: \n\t"
256 "ld %0, @%2; \n\t"
257 "bgtz %0, 1b; \n\t"
258 "bra 3b; \n\t"
259 LOCK_SECTION_END
260 : "=&r" (tmp0), "=&r" (tmp1)
261 : "r" (&rw->lock)
262 : "memory"
263 #ifdef CONFIG_CHIP_M32700_TS1
264 , "r6"
265 #endif /* CONFIG_CHIP_M32700_TS1 */
269 static inline void _raw_write_lock(rwlock_t *rw)
271 unsigned long tmp0, tmp1, tmp2;
273 #if SPINLOCK_DEBUG
274 BUG_ON(rw->magic != RWLOCK_MAGIC);
275 #endif
277 * rw->lock : =RW_LOCK_BIAS_STR : unlock
278 * : !=RW_LOCK_BIAS_STR : lock
280 * for ( ; ; ) {
281 * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
282 * if (rw->lock == 0) break;
283 * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
284 * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
287 __asm__ __volatile__ (
288 "# write_lock \n\t"
289 "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
290 "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
291 ".fillinsn \n"
292 "1: \n\t"
293 "mvfc %2, psw; \n\t"
294 "clrpsw #0x40 -> nop; \n\t"
295 DCACHE_CLEAR("%0", "r7", "%3")
296 "lock %0, @%3; \n\t"
297 "sub %0, %1; \n\t"
298 "unlock %0, @%3; \n\t"
299 "mvtc %2, psw; \n\t"
300 "bnez %0, 2f; \n\t"
301 LOCK_SECTION_START(".balign 4 \n\t")
302 ".fillinsn \n"
303 "2: \n\t"
304 "clrpsw #0x40 -> nop; \n\t"
305 DCACHE_CLEAR("%0", "r7", "%3")
306 "lock %0, @%3; \n\t"
307 "add %0, %1; \n\t"
308 "unlock %0, @%3; \n\t"
309 "mvtc %2, psw; \n\t"
310 ".fillinsn \n"
311 "3: \n\t"
312 "ld %0, @%3; \n\t"
313 "beq %0, %1, 1b; \n\t"
314 "bra 3b; \n\t"
315 LOCK_SECTION_END
316 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
317 : "r" (&rw->lock)
318 : "memory"
319 #ifdef CONFIG_CHIP_M32700_TS1
320 , "r7"
321 #endif /* CONFIG_CHIP_M32700_TS1 */
325 static inline void _raw_read_unlock(rwlock_t *rw)
327 unsigned long tmp0, tmp1;
329 __asm__ __volatile__ (
330 "# read_unlock \n\t"
331 "mvfc %1, psw; \n\t"
332 "clrpsw #0x40 -> nop; \n\t"
333 DCACHE_CLEAR("%0", "r6", "%2")
334 "lock %0, @%2; \n\t"
335 "addi %0, #1; \n\t"
336 "unlock %0, @%2; \n\t"
337 "mvtc %1, psw; \n\t"
338 : "=&r" (tmp0), "=&r" (tmp1)
339 : "r" (&rw->lock)
340 : "memory"
341 #ifdef CONFIG_CHIP_M32700_TS1
342 , "r6"
343 #endif /* CONFIG_CHIP_M32700_TS1 */
347 static inline void _raw_write_unlock(rwlock_t *rw)
349 unsigned long tmp0, tmp1, tmp2;
351 __asm__ __volatile__ (
352 "# write_unlock \n\t"
353 "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
354 "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
355 "mvfc %2, psw; \n\t"
356 "clrpsw #0x40 -> nop; \n\t"
357 DCACHE_CLEAR("%0", "r7", "%3")
358 "lock %0, @%3; \n\t"
359 "add %0, %1; \n\t"
360 "unlock %0, @%3; \n\t"
361 "mvtc %2, psw; \n\t"
362 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
363 : "r" (&rw->lock)
364 : "memory"
365 #ifdef CONFIG_CHIP_M32700_TS1
366 , "r7"
367 #endif /* CONFIG_CHIP_M32700_TS1 */
371 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
373 static inline int _raw_write_trylock(rwlock_t *lock)
375 atomic_t *count = (atomic_t *)lock;
376 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
377 return 1;
378 atomic_add(RW_LOCK_BIAS, count);
379 return 0;
382 #endif /* _ASM_M32R_SPINLOCK_H */