sched: s/sched_latency/sched_min_granularity
[usb.git] / include / asm-sh64 / semaphore-helper.h
blobfcfafe263e868c3da3f4126b1b6b828ab8d2b6ee
1 #ifndef __ASM_SH64_SEMAPHORE_HELPER_H
2 #define __ASM_SH64_SEMAPHORE_HELPER_H
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
9 * include/asm-sh64/semaphore-helper.h
11 * Copyright (C) 2000, 2001 Paolo Alberelli
14 #include <asm/errno.h>
17 * SMP- and interrupt-safe semaphores helper functions.
19 * (C) Copyright 1996 Linus Torvalds
20 * (C) Copyright 1999 Andrea Arcangeli
24 * These two _must_ execute atomically wrt each other.
26 * This is trivially done with load_locked/store_cond,
27 * which we have. Let the rest of the losers suck eggs.
29 static __inline__ void wake_one_more(struct semaphore * sem)
31 atomic_inc((atomic_t *)&sem->sleepers);
34 static __inline__ int waking_non_zero(struct semaphore *sem)
36 unsigned long flags;
37 int ret = 0;
39 spin_lock_irqsave(&semaphore_wake_lock, flags);
40 if (sem->sleepers > 0) {
41 sem->sleepers--;
42 ret = 1;
44 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
45 return ret;
49 * waking_non_zero_interruptible:
50 * 1 got the lock
51 * 0 go to sleep
52 * -EINTR interrupted
54 * We must undo the sem->count down_interruptible() increment while we are
55 * protected by the spinlock in order to make atomic this atomic_inc() with the
56 * atomic_read() in wake_one_more(), otherwise we can race. -arca
58 static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
59 struct task_struct *tsk)
61 unsigned long flags;
62 int ret = 0;
64 spin_lock_irqsave(&semaphore_wake_lock, flags);
65 if (sem->sleepers > 0) {
66 sem->sleepers--;
67 ret = 1;
68 } else if (signal_pending(tsk)) {
69 atomic_inc(&sem->count);
70 ret = -EINTR;
72 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
73 return ret;
77 * waking_non_zero_trylock:
78 * 1 failed to lock
79 * 0 got the lock
81 * We must undo the sem->count down_trylock() increment while we are
82 * protected by the spinlock in order to make atomic this atomic_inc() with the
83 * atomic_read() in wake_one_more(), otherwise we can race. -arca
85 static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
87 unsigned long flags;
88 int ret = 1;
90 spin_lock_irqsave(&semaphore_wake_lock, flags);
91 if (sem->sleepers <= 0)
92 atomic_inc(&sem->count);
93 else {
94 sem->sleepers--;
95 ret = 0;
97 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
98 return ret;
101 #endif /* __ASM_SH64_SEMAPHORE_HELPER_H */