1 #ifndef _BFIN_SEMAPHORE_H
2 #define _BFIN_SEMAPHORE_H
6 #include <linux/linkage.h>
7 #include <linux/wait.h>
8 #include <linux/spinlock.h>
9 #include <linux/rwsem.h>
10 #include <asm/atomic.h>
13 * Interrupt-safe semaphores..
15 * (C) Copyright 1996 Linus Torvalds
17 * BFIN version by akbar hussain Lineo Inc April 2001
24 wait_queue_head_t wait
;
27 #define __SEMAPHORE_INITIALIZER(name, n) \
29 .count = ATOMIC_INIT(n), \
31 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
35 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
37 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
38 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
40 static inline void sema_init(struct semaphore
*sem
, int val
)
42 *sem
= (struct semaphore
)__SEMAPHORE_INITIALIZER(*sem
, val
);
45 static inline void init_MUTEX(struct semaphore
*sem
)
50 static inline void init_MUTEX_LOCKED(struct semaphore
*sem
)
55 asmlinkage
void __down(struct semaphore
*sem
);
56 asmlinkage
int __down_interruptible(struct semaphore
*sem
);
57 asmlinkage
int __down_trylock(struct semaphore
*sem
);
58 asmlinkage
void __up(struct semaphore
*sem
);
60 extern spinlock_t semaphore_wake_lock
;
63 * This is ugly, but we want the default case to fall through.
64 * "down_failed" is a special asm handler that calls the C
65 * routine that actually waits.
67 static inline void down(struct semaphore
*sem
)
70 if (atomic_dec_return(&sem
->count
) < 0)
74 static inline int down_interruptible(struct semaphore
*sem
)
79 if (atomic_dec_return(&sem
->count
) < 0)
80 ret
= __down_interruptible(sem
);
84 static inline int down_trylock(struct semaphore
*sem
)
88 if (atomic_dec_return(&sem
->count
) < 0)
89 ret
= __down_trylock(sem
);
94 * Note! This is subtle. We jump to wake people up only if
95 * the semaphore was negative (== somebody was waiting on it).
96 * The default case (no contention) will result in NO
97 * jumps for both down() and up().
99 static inline void up(struct semaphore
*sem
)
101 if (atomic_inc_return(&sem
->count
) <= 0)
105 #endif /* __ASSEMBLY__ */
106 #endif /* _BFIN_SEMAPHORE_H */