MN10300: And Panasonic AM34 subarch and implement SMP
[linux-2.6/x86.git] / arch / mn10300 / include / asm / spinlock.h
blob93429154e898e72c1f3e39a4e6ac71f8b7cc1ed5
1 /* MN10300 spinlock support
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 #ifndef _ASM_SPINLOCK_H
12 #define _ASM_SPINLOCK_H
14 #include <asm/atomic.h>
15 #include <asm/rwlock.h>
16 #include <asm/page.h>
19 * Simple spin lock operations. There are two variants, one clears IRQ's
20 * on the local processor, one does not.
22 * We make no fairness assumptions. They have a cost.
25 #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
26 #define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
28 static inline void arch_spin_unlock(arch_spinlock_t *lock)
30 asm volatile(
31 " bclr 1,(0,%0) \n"
33 : "a"(&lock->slock)
34 : "memory", "cc");
37 static inline int arch_spin_trylock(arch_spinlock_t *lock)
39 int ret;
41 asm volatile(
42 " mov 1,%0 \n"
43 " bset %0,(%1) \n"
44 " bne 1f \n"
45 " clr %0 \n"
46 "1: xor 1,%0 \n"
47 : "=d"(ret)
48 : "a"(&lock->slock)
49 : "memory", "cc");
51 return ret;
54 static inline void arch_spin_lock(arch_spinlock_t *lock)
56 asm volatile(
57 "1: bset 1,(0,%0) \n"
58 " bne 1b \n"
60 : "a"(&lock->slock)
61 : "memory", "cc");
64 static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
65 unsigned long flags)
67 int temp;
69 asm volatile(
70 "1: bset 1,(0,%2) \n"
71 " beq 3f \n"
72 " mov %1,epsw \n"
73 "2: mov (0,%2),%0 \n"
74 " or %0,%0 \n"
75 " bne 2b \n"
76 " mov %3,%0 \n"
77 " mov %0,epsw \n"
78 " nop \n"
79 " nop \n"
80 " bra 1b\n"
81 "3: \n"
82 : "=&d" (temp)
83 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
84 : "memory", "cc");
87 #ifdef __KERNEL__
90 * Read-write spinlocks, allowing multiple readers
91 * but only one writer.
93 * NOTE! it is quite common to have readers in interrupts
94 * but no interrupt writers. For those circumstances we
95 * can "mix" irq-safe locks - any writer needs to get a
96 * irq-safe write-lock, but readers can get non-irqsafe
97 * read-locks.
101 * read_can_lock - would read_trylock() succeed?
102 * @lock: the rwlock in question.
104 #define arch_read_can_lock(x) ((int)(x)->lock > 0)
107 * write_can_lock - would write_trylock() succeed?
108 * @lock: the rwlock in question.
110 #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
113 * On mn10300, we implement read-write locks as a 32-bit counter
114 * with the high bit (sign) being the "contended" bit.
116 static inline void arch_read_lock(arch_rwlock_t *rw)
118 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
119 __build_read_lock(rw, "__read_lock_failed");
120 #else
122 atomic_t *count = (atomic_t *)rw;
123 while (atomic_dec_return(count) < 0)
124 atomic_inc(count);
126 #endif
129 static inline void arch_write_lock(arch_rwlock_t *rw)
131 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
132 __build_write_lock(rw, "__write_lock_failed");
133 #else
135 atomic_t *count = (atomic_t *)rw;
136 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
137 atomic_add(RW_LOCK_BIAS, count);
139 #endif
142 static inline void arch_read_unlock(arch_rwlock_t *rw)
144 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
145 __build_read_unlock(rw);
146 #else
148 atomic_t *count = (atomic_t *)rw;
149 atomic_inc(count);
151 #endif
154 static inline void arch_write_unlock(arch_rwlock_t *rw)
156 #if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
157 __build_write_unlock(rw);
158 #else
160 atomic_t *count = (atomic_t *)rw;
161 atomic_add(RW_LOCK_BIAS, count);
163 #endif
166 static inline int arch_read_trylock(arch_rwlock_t *lock)
168 atomic_t *count = (atomic_t *)lock;
169 atomic_dec(count);
170 if (atomic_read(count) >= 0)
171 return 1;
172 atomic_inc(count);
173 return 0;
176 static inline int arch_write_trylock(arch_rwlock_t *lock)
178 atomic_t *count = (atomic_t *)lock;
179 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
180 return 1;
181 atomic_add(RW_LOCK_BIAS, count);
182 return 0;
185 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
186 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
188 #define _raw_spin_relax(lock) cpu_relax()
189 #define _raw_read_relax(lock) cpu_relax()
190 #define _raw_write_relax(lock) cpu_relax()
192 #endif /* __KERNEL__ */
193 #endif /* _ASM_SPINLOCK_H */