sched: s/sched_latency/sched_min_granularity
[usb.git] / include / asm-powerpc / semaphore.h
blob57369d2cadef816fdb551b83b6d59b647f1763da
1 #ifndef _ASM_POWERPC_SEMAPHORE_H
2 #define _ASM_POWERPC_SEMAPHORE_H
4 /*
5 * Remove spinlock-based RW semaphores; RW semaphore definitions are
6 * now in rwsem.h and we use the generic lib/rwsem.c implementation.
7 * Rework semaphores to use atomic_dec_if_positive.
8 * -- Paul Mackerras (paulus@samba.org)
9 */
11 #ifdef __KERNEL__
13 #include <asm/atomic.h>
14 #include <asm/system.h>
15 #include <linux/wait.h>
16 #include <linux/rwsem.h>
18 struct semaphore {
20 * Note that any negative value of count is equivalent to 0,
21 * but additionally indicates that some process(es) might be
22 * sleeping on `wait'.
24 atomic_t count;
25 wait_queue_head_t wait;
28 #define __SEMAPHORE_INITIALIZER(name, n) \
29 { \
30 .count = ATOMIC_INIT(n), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34 #define __DECLARE_SEMAPHORE_GENERIC(name, count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
38 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0)
40 static inline void sema_init (struct semaphore *sem, int val)
42 atomic_set(&sem->count, val);
43 init_waitqueue_head(&sem->wait);
46 static inline void init_MUTEX (struct semaphore *sem)
48 sema_init(sem, 1);
51 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
53 sema_init(sem, 0);
56 extern void __down(struct semaphore * sem);
57 extern int __down_interruptible(struct semaphore * sem);
58 extern void __up(struct semaphore * sem);
60 static inline void down(struct semaphore * sem)
62 might_sleep();
65 * Try to get the semaphore, take the slow path if we fail.
67 if (unlikely(atomic_dec_return(&sem->count) < 0))
68 __down(sem);
71 static inline int down_interruptible(struct semaphore * sem)
73 int ret = 0;
75 might_sleep();
77 if (unlikely(atomic_dec_return(&sem->count) < 0))
78 ret = __down_interruptible(sem);
79 return ret;
82 static inline int down_trylock(struct semaphore * sem)
84 return atomic_dec_if_positive(&sem->count) < 0;
87 static inline void up(struct semaphore * sem)
89 if (unlikely(atomic_inc_return(&sem->count) <= 0))
90 __up(sem);
93 #endif /* __KERNEL__ */
95 #endif /* _ASM_POWERPC_SEMAPHORE_H */