1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
14 #include <asm/compiler.h> /* __builtin_expect */
16 #define DEBUG_SEMAPHORE 0
17 #define DEBUG_RW_SEMAPHORE 0
20 /* Careful, inline assembly knows about the position of these two. */
21 atomic_t count
__attribute__((aligned(8)));
22 atomic_t waking
; /* biased by -1 */
24 wait_queue_head_t wait
;
31 # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
33 # define __SEM_DEBUG_INIT(name)
36 #define __SEMAPHORE_INITIALIZER(name,count) \
37 { ATOMIC_INIT(count), ATOMIC_INIT(-1), \
38 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
39 __SEM_DEBUG_INIT(name) }
41 #define __MUTEX_INITIALIZER(name) \
42 __SEMAPHORE_INITIALIZER(name,1)
44 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
45 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
47 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
48 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
50 static inline void sema_init(struct semaphore
*sem
, int val
)
54 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
55 * except that gcc produces better initializing by parts yet.
58 atomic_set(&sem
->count
, val
);
59 atomic_set(&sem
->waking
, -1);
60 init_waitqueue_head(&sem
->wait
);
62 sem
->__magic
= (long)&sem
->__magic
;
66 static inline void init_MUTEX (struct semaphore
*sem
)
71 static inline void init_MUTEX_LOCKED (struct semaphore
*sem
)
76 extern void down(struct semaphore
*);
77 extern void __down_failed(struct semaphore
*);
78 extern int down_interruptible(struct semaphore
*);
79 extern int __down_failed_interruptible(struct semaphore
*);
80 extern int down_trylock(struct semaphore
*);
81 extern void up(struct semaphore
*);
82 extern void __up_wakeup(struct semaphore
*);
85 * Hidden out of line code is fun, but extremely messy. Rely on newer
86 * compilers to do a respectable job with this. The contention cases
87 * are handled out of line in arch/alpha/kernel/semaphore.c.
90 static inline void __down(struct semaphore
*sem
)
92 long count
= atomic_dec_return(&sem
->count
);
93 if (__builtin_expect(count
< 0, 0))
97 static inline int __down_interruptible(struct semaphore
*sem
)
99 long count
= atomic_dec_return(&sem
->count
);
100 if (__builtin_expect(count
< 0, 0))
101 return __down_failed_interruptible(sem
);
106 * down_trylock returns 0 on success, 1 if we failed to get the lock.
108 * We must manipulate count and waking simultaneously and atomically.
109 * Do this by using ll/sc on the pair of 32-bit words.
112 static inline int __down_trylock(struct semaphore
* sem
)
114 long ret
, tmp
, tmp2
, sub
;
116 /* "Equivalent" C. Note that we have to do this all without
117 (taken) branches in order to be a valid ll/sc sequence.
121 sub = 0x0000000100000000;
122 ret = ((int)tmp <= 0); // count <= 0 ?
123 // Note that if count=0, the decrement overflows into
124 // waking, so cancel the 1 loaded above. Also cancel
125 // it if the lock was already free.
126 if ((int)tmp >= 0) sub = 0; // count >= 0 ?
127 ret &= ((long)tmp < 0); // waking < 0 ?
135 __asm__
__volatile__(
153 : "=&r"(ret
), "=&r"(tmp
), "=&r"(tmp2
), "=&r"(sub
)
160 static inline void __up(struct semaphore
*sem
)
162 long ret
, tmp
, tmp2
, tmp3
;
164 /* We must manipulate count and waking simultaneously and atomically.
165 Otherwise we have races between up and __down_failed_interruptible
166 waking up on a signal.
168 "Equivalent" C. Note that we have to do this all without
169 (taken) branches in order to be a valid ll/sc sequence.
173 ret = (int)tmp + 1; // count += 1;
174 tmp2 = tmp & 0xffffffff00000000; // extract waking
175 if (ret <= 0) // still sleepers?
176 tmp2 += 0x0000000100000000; // waking += 1;
177 tmp = ret & 0x00000000ffffffff; // insert count
178 tmp |= tmp2; // insert waking;
183 __asm__
__volatile__(
187 " zapnot %1,0xf0,%2\n"
190 " zapnot %0,0x0f,%1\n"
198 : "=&r"(ret
), "=&r"(tmp
), "=&r"(tmp2
), "=&r"(tmp3
)
199 : "m"(*sem
), "r"(0x0000000100000000)
202 if (__builtin_expect(ret
<= 0, 0))
206 #if !WAITQUEUE_DEBUG && !DEBUG_SEMAPHORE
207 extern inline void down(struct semaphore
*sem
)
211 extern inline int down_interruptible(struct semaphore
*sem
)
213 return __down_interruptible(sem
);
215 extern inline int down_trylock(struct semaphore
*sem
)
217 return __down_trylock(sem
);
219 extern inline void up(struct semaphore
*sem
)
225 /* rw mutexes (should that be mutices? =) -- throw rw
226 * spinlocks and semaphores together, and this is what we
229 * The lock is initialized to BIAS. This way, a writer
230 * subtracts BIAS ands gets 0 for the case of an uncontended
231 * lock. Readers decrement by 1 and see a positive value
232 * when uncontended, negative if there are writers waiting
233 * (in which case it goes to sleep).
235 * The value 0x01000000 supports up to 128 processors and
236 * lots of processes. BIAS must be chosen such that subtracting
237 * BIAS once per CPU will result in the int remaining
239 * In terms of fairness, this should result in the lock
240 * flopping back and forth between readers and writers
245 * Once we start supporting machines with more than 128 CPUs,
246 * we should go for using a 64bit atomic type instead of 32bit
247 * as counter. We shall probably go for bias 0x80000000 then,
248 * so that single sethi can set it.
253 #define RW_LOCK_BIAS 0x01000000
255 struct rw_semaphore
{
257 /* bit 0 means read bias granted;
258 bit 1 means write bias granted. */
260 wait_queue_head_t wait
;
261 wait_queue_head_t write_bias_wait
;
270 #define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
272 #define __RWSEM_DEBUG_INIT /* */
275 #define __RWSEM_INITIALIZER(name,count) \
276 { ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
277 __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
278 __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
280 #define __DECLARE_RWSEM_GENERIC(name,count) \
281 struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
283 #define DECLARE_RWSEM(name) \
284 __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
285 #define DECLARE_RWSEM_READ_LOCKED(name) \
286 __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
287 #define DECLARE_RWSEM_WRITE_LOCKED(name) \
288 __DECLARE_RWSEM_GENERIC(name, 0)
290 static inline void init_rwsem(struct rw_semaphore
*sem
)
292 atomic_set (&sem
->count
, RW_LOCK_BIAS
);
294 init_waitqueue_head(&sem
->wait
);
295 init_waitqueue_head(&sem
->write_bias_wait
);
297 sem
->__magic
= (long)&sem
->__magic
;
298 atomic_set(&sem
->readers
, 0);
299 atomic_set(&sem
->writers
, 0);
303 extern void down_read(struct rw_semaphore
*);
304 extern void down_write(struct rw_semaphore
*);
305 extern void up_read(struct rw_semaphore
*);
306 extern void up_write(struct rw_semaphore
*);
307 extern void __down_read_failed(struct rw_semaphore
*, int);
308 extern void __down_write_failed(struct rw_semaphore
*, int);
309 extern void __rwsem_wake(struct rw_semaphore
*, int);
311 static inline void __down_read(struct rw_semaphore
*sem
)
313 long count
= atomic_dec_return(&sem
->count
);
314 if (__builtin_expect(count
< 0, 0))
315 __down_read_failed(sem
, count
);
318 static inline void __down_write(struct rw_semaphore
*sem
)
320 long count
= atomic_sub_return(RW_LOCK_BIAS
, &sem
->count
);
321 if (__builtin_expect(count
!= 0, 0))
322 __down_write_failed(sem
, count
);
325 /* When a reader does a release, the only significant case is when there
326 was a writer waiting, and we've bumped the count to 0, then we must
327 wake the writer up. */
329 static inline void __up_read(struct rw_semaphore
*sem
)
333 count
= atomic_inc_return(&sem
->count
);
334 if (__builtin_expect(count
== 0, 0))
335 __rwsem_wake(sem
, 0);
338 /* Releasing the writer is easy -- just release it and wake up
341 static inline void __up_write(struct rw_semaphore
*sem
)
345 count
= atomic_add_return(RW_LOCK_BIAS
, &sem
->count
);
347 /* Only do the wake if we were, but are no longer, negative. */
348 wake
= ((int)(count
- RW_LOCK_BIAS
) < 0) && count
>= 0;
349 if (__builtin_expect(wake
, 0))
350 __rwsem_wake(sem
, count
);
353 #if !WAITQUEUE_DEBUG && !DEBUG_RW_SEMAPHORE
354 extern inline void down_read(struct rw_semaphore
*sem
)
358 extern inline void down_write(struct rw_semaphore
*sem
)
362 extern inline void up_read(struct rw_semaphore
*sem
)
366 extern inline void up_write(struct rw_semaphore
*sem
)