Optimize andes_clear_page() and andes_copy_page() with prefetch
[linux-2.6/linux-mips.git] / include / asm-arm / semaphore.h
blob0ce171a3f263424861a40c3b45ecd993c0cfc1e4
1 /*
2 * linux/include/asm-arm/semaphore.h
3 */
4 #ifndef __ASM_ARM_SEMAPHORE_H
5 #define __ASM_ARM_SEMAPHORE_H
7 #include <linux/linkage.h>
8 #include <linux/spinlock.h>
9 #include <linux/wait.h>
11 #include <asm/atomic.h>
12 #include <asm/proc/locks.h>
14 struct semaphore {
15 atomic_t count;
16 int sleepers;
17 wait_queue_head_t wait;
18 #if WAITQUEUE_DEBUG
19 long __magic;
20 #endif
23 #if WAITQUEUE_DEBUG
24 # define __SEM_DEBUG_INIT(name) \
25 , (long)&(name).__magic
26 #else
27 # define __SEM_DEBUG_INIT(name)
28 #endif
30 #define __SEMAPHORE_INIT(name,count) \
31 { ATOMIC_INIT(count), 0, \
32 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
33 __SEM_DEBUG_INIT(name) }
35 #define __MUTEX_INITIALIZER(name) \
36 __SEMAPHORE_INIT(name,1)
38 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
39 struct semaphore name = __SEMAPHORE_INIT(name,count)
41 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
42 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
44 extern inline void sema_init(struct semaphore *sem, int val)
46 atomic_set(&sem->count, val);
47 sem->sleepers = 0;
48 init_waitqueue_head(&sem->wait);
49 #if WAITQUEUE_DEBUG
50 sem->__magic = (long)&sem->__magic;
51 #endif
54 static inline void init_MUTEX(struct semaphore *sem)
56 sema_init(sem, 1);
59 static inline void init_MUTEX_LOCKED(struct semaphore *sem)
61 sema_init(sem, 0);
65 * special register calling convention
67 asmlinkage void __down_failed(void);
68 asmlinkage int __down_interruptible_failed(void);
69 asmlinkage int __down_trylock_failed(void);
70 asmlinkage void __up_wakeup(void);
72 extern void __down(struct semaphore * sem);
73 extern int __down_interruptible(struct semaphore * sem);
74 extern int __down_trylock(struct semaphore * sem);
75 extern void __up(struct semaphore * sem);
78 * This is ugly, but we want the default case to fall through.
79 * "__down" is the actual routine that waits...
81 extern inline void down(struct semaphore * sem)
83 #if WAITQUEUE_DEBUG
84 CHECK_MAGIC(sem->__magic);
85 #endif
87 __down_op(sem, __down_failed);
91 * This is ugly, but we want the default case to fall through.
92 * "__down_interruptible" is the actual routine that waits...
94 extern inline int down_interruptible (struct semaphore * sem)
96 #if WAITQUEUE_DEBUG
97 CHECK_MAGIC(sem->__magic);
98 #endif
100 return __down_op_ret(sem, __down_interruptible_failed);
103 extern inline int down_trylock(struct semaphore *sem)
105 #if WAITQUEUE_DEBUG
106 CHECK_MAGIC(sem->__magic);
107 #endif
109 return __down_op_ret(sem, __down_trylock_failed);
113 * Note! This is subtle. We jump to wake people up only if
114 * the semaphore was negative (== somebody was waiting on it).
115 * The default case (no contention) will result in NO
116 * jumps for both down() and up().
118 extern inline void up(struct semaphore * sem)
120 #if WAITQUEUE_DEBUG
121 CHECK_MAGIC(sem->__magic);
122 #endif
124 __up_op(sem, __up_wakeup);
127 /* rw mutexes (should that be mutices? =) -- throw rw
128 * spinlocks and semaphores together, and this is what we
129 * end up with...
131 * The lock is initialized to BIAS. This way, a writer
132 * subtracts BIAS ands gets 0 for the case of an uncontended
133 * lock. Readers decrement by 1 and see a positive value
134 * when uncontended, negative if there are writers waiting
135 * (in which case it goes to sleep).
137 * In terms of fairness, this should result in the lock
138 * flopping back and forth between readers and writers
139 * under heavy use.
141 * -ben
143 struct rw_semaphore {
144 atomic_t count;
145 volatile unsigned char write_bias_granted;
146 volatile unsigned char read_bias_granted;
147 volatile unsigned char pad1;
148 volatile unsigned char pad2;
149 wait_queue_head_t wait;
150 wait_queue_head_t write_bias_wait;
151 #if WAITQUEUE_DEBUG
152 long __magic;
153 atomic_t readers;
154 atomic_t writers;
155 #endif
158 #if WAITQUEUE_DEBUG
159 #define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
160 #else
161 #define __RWSEM_DEBUG_INIT /* */
162 #endif
164 #define __RWSEM_INITIALIZER(name,count) \
165 { ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
166 __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
167 __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
169 #define __DECLARE_RWSEM_GENERIC(name,count) \
170 struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
172 #define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
173 #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
174 #define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
176 extern inline void init_rwsem(struct rw_semaphore *sem)
178 atomic_set(&sem->count, RW_LOCK_BIAS);
179 sem->read_bias_granted = 0;
180 sem->write_bias_granted = 0;
181 init_waitqueue_head(&sem->wait);
182 init_waitqueue_head(&sem->write_bias_wait);
183 #if WAITQUEUE_DEBUG
184 sem->__magic = (long)&sem->__magic;
185 atomic_set(&sem->readers, 0);
186 atomic_set(&sem->writers, 0);
187 #endif
190 extern struct rw_semaphore *__down_read_failed(struct rw_semaphore *sem);
191 extern struct rw_semaphore *__down_write_failed(struct rw_semaphore *sem);
192 extern struct rw_semaphore *__rwsem_wake(struct rw_semaphore *sem);
194 extern inline void down_read(struct rw_semaphore *sem)
196 #if WAITQUEUE_DEBUG
197 CHECK_MAGIC(sem->__magic);
198 #endif
199 __down_op_read(sem, __down_read_failed);
200 #if WAITQUEUE_DEBUG
201 if (sem->write_bias_granted)
202 BUG();
203 if (atomic_read(&sem->writers))
204 BUG();
205 atomic_inc(&sem->readers);
206 #endif
209 extern inline void down_write(struct rw_semaphore *sem)
211 #if WAITQUEUE_DEBUG
212 CHECK_MAGIC(sem->__magic);
213 #endif
214 __down_op_write(sem, __down_write_failed);
215 #if WAITQUEUE_DEBUG
216 if (atomic_read(&sem->writers))
217 BUG();
218 if (atomic_read(&sem->readers))
219 BUG();
220 if (sem->read_bias_granted)
221 BUG();
222 if (sem->write_bias_granted)
223 BUG();
224 atomic_inc(&sem->writers);
225 #endif
228 extern inline void up_read(struct rw_semaphore *sem)
230 #if WAITQUEUE_DEBUG
231 if (sem->write_bias_granted)
232 BUG();
233 if (atomic_read(&sem->writers))
234 BUG();
235 atomic_dec(&sem->readers);
236 #endif
237 __up_op_read(sem, __rwsem_wake);
240 extern inline void up_write(struct rw_semaphore *sem)
242 #if WAITQUEUE_DEBUG
243 if (sem->read_bias_granted)
244 BUG();
245 if (sem->write_bias_granted)
246 BUG();
247 if (atomic_read(&sem->readers))
248 BUG();
249 if (atomic_read(&sem->writers) != 1)
250 BUG();
251 atomic_dec(&sem->writers);
252 #endif
253 __up_op_write(sem, __rwsem_wake);
256 #endif