1 #ifndef ASMARM_SEMAPHORE_HELPER_H
2 #define ASMARM_SEMAPHORE_HELPER_H
5 * These two _must_ execute atomically wrt each other.
7 static inline void wake_one_more(struct semaphore
* sem
)
11 spin_lock_irqsave(&semaphore_wake_lock
, flags
);
12 if (atomic_read(&sem
->count
) <= 0)
14 spin_unlock_irqrestore(&semaphore_wake_lock
, flags
);
17 static inline int waking_non_zero(struct semaphore
*sem
)
22 spin_lock_irqsave(&semaphore_wake_lock
, flags
);
23 if (sem
->waking
> 0) {
27 spin_unlock_irqrestore(&semaphore_wake_lock
, flags
);
32 * waking non zero interruptible
37 * We must undo the sem->count down_interruptible() increment while we are
38 * protected by the spinlock in order to make this atomic_inc() with the
39 * atomic_read() in wake_one_more(), otherwise we can race. -arca
41 static inline int waking_non_zero_interruptible(struct semaphore
*sem
,
42 struct task_struct
*tsk
)
47 spin_lock_irqsave(&semaphore_wake_lock
, flags
);
48 if (sem
->waking
> 0) {
51 } else if (signal_pending(tsk
)) {
52 atomic_inc(&sem
->count
);
55 spin_unlock_irqrestore(&semaphore_wake_lock
, flags
);
60 * waking_non_zero_try_lock:
64 * We must undo the sem->count down_interruptible() increment while we are
65 * protected by the spinlock in order to make this atomic_inc() with the
66 * atomic_read() in wake_one_more(), otherwise we can race. -arca
68 static inline int waking_non_zero_trylock(struct semaphore
*sem
)
73 spin_lock_irqsave(&semaphore_wake_lock
, flags
);
75 atomic_inc(&sem
->count
);
80 spin_unlock_irqrestore(&semaphore_wake_lock
, flags
);