- Linus: more PageDirty / swapcache handling
[davej-history.git] / include / asm-mips64 / semaphore.h
blob67c7b8772a0c44f0c7fbf29bb4e6cc2bd8281b80
1 /*
2 * License. See the file "COPYING" in the main directory of this archive
3 * for more details.
5 * Copyright (C) 1996 Linus Torvalds
6 * Copyright (C) 1998, 1999, 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_SEMAPHORE_H
10 #define _ASM_SEMAPHORE_H
12 #include <asm/system.h>
13 #include <asm/atomic.h>
14 #include <linux/spinlock.h>
15 #include <linux/wait.h>
17 struct semaphore {
18 #ifdef __MIPSEB__
19 atomic_t count;
20 atomic_t waking;
21 #else
22 atomic_t waking;
23 atomic_t count;
24 #endif
25 wait_queue_head_t wait;
26 #if WAITQUEUE_DEBUG
27 long __magic;
28 #endif
31 #if WAITQUEUE_DEBUG
32 # define __SEM_DEBUG_INIT(name) \
33 , (long)&(name).__magic
34 #else
35 # define __SEM_DEBUG_INIT(name)
36 #endif
38 #ifdef __MIPSEB__
39 #define __SEMAPHORE_INITIALIZER(name,count) \
40 { ATOMIC_INIT(count), ATOMIC_INIT(0), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
41 __SEM_DEBUG_INIT(name) }
42 #else
43 #define __SEMAPHORE_INITIALIZER(name,count) \
44 { ATOMIC_INIT(0), ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
45 __SEM_DEBUG_INIT(name) }
46 #endif
48 #define __MUTEX_INITIALIZER(name) \
49 __SEMAPHORE_INITIALIZER(name,1)
51 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
52 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
54 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
55 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
57 static inline void sema_init (struct semaphore *sem, int val)
59 atomic_set(&sem->count, val);
60 atomic_set(&sem->waking, 0);
61 init_waitqueue_head(&sem->wait);
62 #if WAITQUEUE_DEBUG
63 sem->__magic = (long)&sem->__magic;
64 #endif
67 static inline void init_MUTEX (struct semaphore *sem)
69 sema_init(sem, 1);
72 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
74 sema_init(sem, 0);
77 asmlinkage void __down(struct semaphore * sem);
78 asmlinkage int __down_interruptible(struct semaphore * sem);
79 asmlinkage int __down_trylock(struct semaphore * sem);
80 asmlinkage void __up(struct semaphore * sem);
82 static inline void down(struct semaphore * sem)
84 #if WAITQUEUE_DEBUG
85 CHECK_MAGIC(sem->__magic);
86 #endif
87 if (atomic_dec_return(&sem->count) < 0)
88 __down(sem);
91 static inline int down_interruptible(struct semaphore * sem)
93 int ret = 0;
95 #if WAITQUEUE_DEBUG
96 CHECK_MAGIC(sem->__magic);
97 #endif
98 if (atomic_dec_return(&sem->count) < 0)
99 ret = __down_interruptible(sem);
100 return ret;
104 * down_trylock returns 0 on success, 1 if we failed to get the lock.
106 * We must manipulate count and waking simultaneously and atomically.
107 * Here, we this by using ll/sc on the pair of 32-bit words.
109 * Pseudocode:
111 * Decrement(sem->count)
112 * If(sem->count >=0) {
113 * Return(SUCCESS) // resource is free
114 * } else {
115 * If(sem->waking <= 0) { // if no wakeup pending
116 * Increment(sem->count) // undo decrement
117 * Return(FAILURE)
118 * } else {
119 * Decrement(sem->waking) // otherwise "steal" wakeup
120 * Return(SUCCESS)
124 static inline int down_trylock(struct semaphore * sem)
126 long ret, tmp, tmp2, sub;
128 #if WAITQUEUE_DEBUG
129 CHECK_MAGIC(sem->__magic);
130 #endif
132 __asm__ __volatile__("
133 .set mips3
135 0: lld %1, %4
136 dli %3, 0x0000000100000000
137 dsubu %1, %3
138 li %0, 0
139 bgez %1, 2f
140 sll %2, %1, 0
141 blez %2, 1f
142 daddiu %1, %1, -1
143 b 2f
145 daddu %1, %1, %3
146 li %0, 1
148 scd %1, %4
149 beqz %1, 0b
151 .set mips0"
152 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
153 : "m"(*sem)
154 : "memory");
156 return ret;
160 * Note! This is subtle. We jump to wake people up only if
161 * the semaphore was negative (== somebody was waiting on it).
163 static inline void up(struct semaphore * sem)
165 #if WAITQUEUE_DEBUG
166 CHECK_MAGIC(sem->__magic);
167 #endif
168 if (atomic_inc_return(&sem->count) <= 0)
169 __up(sem);
173 * rw mutexes (should that be mutices? =) -- throw rw spinlocks and
174 * semaphores together, and this is what we end up with...
176 * The lock is initialized to BIAS. This way, a writer subtracts BIAS ands
177 * gets 0 for the case of an uncontended lock. Readers decrement by 1 and
178 * see a positive value when uncontended, negative if there are writers
179 * waiting (in which case it goes to sleep).
181 * The value 0x01000000 supports up to 128 processors and lots of processes.
182 * BIAS must be chosen such that subtracting BIAS once per CPU will result
183 * in the int remaining negative. In terms of fairness, this should result
184 * in the lock flopping back and forth between readers and writers under
185 * heavy use.
187 * Once we start supporting machines with more than 128 CPUs, we should go
188 * for using a 64bit atomic type instead of 32bit as counter. We shall
189 * probably go for bias 0x80000000 then, so that single sethi can set it.
190 * */
192 #define RW_LOCK_BIAS 0x01000000
194 struct rw_semaphore {
195 atomic_t count;
196 /* bit 0 means read bias granted;
197 bit 1 means write bias granted. */
198 unsigned long granted;
199 wait_queue_head_t wait;
200 wait_queue_head_t write_bias_wait;
201 #if WAITQUEUE_DEBUG
202 long __magic;
203 atomic_t readers;
204 atomic_t writers;
205 #endif
208 #if WAITQUEUE_DEBUG
209 #define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
210 #else
211 #define __RWSEM_DEBUG_INIT /* */
212 #endif
214 #define __RWSEM_INITIALIZER(name,count) \
215 { ATOMIC_INIT(count), 0, \
216 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
217 __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
218 __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
220 #define __DECLARE_RWSEM_GENERIC(name,count) \
221 struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
223 #define DECLARE_RWSEM(name) \
224 __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
225 #define DECLARE_RWSEM_READ_LOCKED(name) \
226 __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
227 #define DECLARE_RWSEM_WRITE_LOCKED(name) \
228 __DECLARE_RWSEM_GENERIC(name, 0)
230 static inline void init_rwsem(struct rw_semaphore *sem)
232 atomic_set(&sem->count, RW_LOCK_BIAS);
233 sem->granted = 0;
234 init_waitqueue_head(&sem->wait);
235 init_waitqueue_head(&sem->write_bias_wait);
236 #if WAITQUEUE_DEBUG
237 sem->__magic = (long)&sem->__magic;
238 atomic_set(&sem->readers, 0);
239 atomic_set(&sem->writers, 0);
240 #endif
243 /* The expensive part is outlined. */
244 extern void __down_read(struct rw_semaphore *sem, int count);
245 extern void __down_write(struct rw_semaphore *sem, int count);
246 extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
248 static inline void down_read(struct rw_semaphore *sem)
250 int count;
252 #if WAITQUEUE_DEBUG
253 CHECK_MAGIC(sem->__magic);
254 #endif
256 count = atomic_dec_return(&sem->count);
257 if (count < 0) {
258 __down_read(sem, count);
260 mb();
262 #if WAITQUEUE_DEBUG
263 if (sem->granted & 2)
264 BUG();
265 if (atomic_read(&sem->writers))
266 BUG();
267 atomic_inc(&sem->readers);
268 #endif
271 static inline void down_write(struct rw_semaphore *sem)
273 int count;
275 #if WAITQUEUE_DEBUG
276 CHECK_MAGIC(sem->__magic);
277 #endif
279 count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
280 if (count) {
281 __down_write(sem, count);
283 mb();
285 #if WAITQUEUE_DEBUG
286 if (atomic_read(&sem->writers))
287 BUG();
288 if (atomic_read(&sem->readers))
289 BUG();
290 if (sem->granted & 3)
291 BUG();
292 atomic_inc(&sem->writers);
293 #endif
296 /* When a reader does a release, the only significant case is when
297 there was a writer waiting, and we've bumped the count to 0: we must
298 wake the writer up. */
300 static inline void up_read(struct rw_semaphore *sem)
302 int count;
304 #if WAITQUEUE_DEBUG
305 CHECK_MAGIC(sem->__magic);
306 if (sem->granted & 2)
307 BUG();
308 if (atomic_read(&sem->writers))
309 BUG();
310 atomic_dec(&sem->readers);
311 #endif
313 mb();
314 count = atomic_inc_return(&sem->count);
315 if (count == 0) {
316 __rwsem_wake(sem, 0);
321 * Releasing the writer is easy -- just release it and wake up any sleepers.
323 static inline void up_write(struct rw_semaphore *sem)
325 int count;
327 #if WAITQUEUE_DEBUG
328 CHECK_MAGIC(sem->__magic);
329 if (sem->granted & 3)
330 BUG();
331 if (atomic_read(&sem->readers))
332 BUG();
333 if (atomic_read(&sem->writers) != 1)
334 BUG();
335 atomic_dec(&sem->writers);
336 #endif
338 mb();
339 count = atomic_add_return(RW_LOCK_BIAS, &sem->count);
340 if (count - RW_LOCK_BIAS < 0 && count >= 0) {
341 /* Only do the wake if we're no longer negative. */
342 __rwsem_wake(sem, count);
346 #endif /* _ASM_SEMAPHORE_H */