- Alan Cox: synch. PA-RISC arch and bitops cleanups
[davej-history.git] / arch / parisc / kernel / semaphore.c
blob81156de7dd82150d307ccaa48bc5ccfa8a0f4311
1 /*
2 * Just taken from alpha implementation.
3 * This can't work well, perhaps.
4 */
5 /*
6 * Generic semaphore code. Buyer beware. Do your own
7 * specific changes in <asm/semaphore-helper.h>
8 */
10 #include <linux/sched.h>
11 #include <asm/semaphore-helper.h>
14 * Semaphores are implemented using a two-way counter:
15 * The "count" variable is decremented for each process
16 * that tries to sleep, while the "waking" variable is
17 * incremented when the "up()" code goes to wake up waiting
18 * processes.
20 * Notably, the inline "up()" and "down()" functions can
21 * efficiently test if they need to do any extra work (up
22 * needs to do something only if count was negative before
23 * the increment operation.
25 * waking_non_zero() (from asm/semaphore.h) must execute
26 * atomically.
28 * When __up() is called, the count was negative before
29 * incrementing it, and we need to wake up somebody.
31 * This routine adds one to the count of processes that need to
32 * wake up and exit. ALL waiting processes actually wake up but
33 * only the one that gets to the "waking" field first will gate
34 * through and acquire the semaphore. The others will go back
35 * to sleep.
37 * Note that these functions are only called when there is
38 * contention on the lock, and as such all this is the
39 * "non-critical" part of the whole semaphore business. The
40 * critical part is the inline stuff in <asm/semaphore.h>
41 * where we want to avoid any extra jumps and calls.
43 void __up(struct semaphore *sem)
45 wake_one_more(sem);
46 wake_up(&sem->wait);
50 * Perform the "down" function. Return zero for semaphore acquired,
51 * return negative for signalled out of the function.
53 * If called from __down, the return is ignored and the wait loop is
54 * not interruptible. This means that a task waiting on a semaphore
55 * using "down()" cannot be killed until someone does an "up()" on
56 * the semaphore.
58 * If called from __down_interruptible, the return value gets checked
59 * upon return. If the return value is negative then the task continues
60 * with the negative value in the return register (it can be tested by
61 * the caller).
63 * Either form may be used in conjunction with "up()".
68 #define DOWN_HEAD(task_state) \
71 current->state = (task_state); \
72 add_wait_queue(&sem->wait, &wait); \
74 /* \
75 * Ok, we're set up. sem->count is known to be less than zero \
76 * so we must wait. \
77 * \
78 * We can let go the lock for purposes of waiting. \
79 * We re-acquire it after awaking so as to protect \
80 * all semaphore operations. \
81 * \
82 * If "up()" is called before we call waking_non_zero() then \
83 * we will catch it right away. If it is called later then \
84 * we will have to go through a wakeup cycle to catch it. \
85 * \
86 * Multiple waiters contend for the semaphore lock to see \
87 * who gets to gate through and who has to wait some more. \
88 */ \
89 for (;;) {
91 #define DOWN_TAIL(task_state) \
92 current->state = (task_state); \
93 } \
94 current->state = TASK_RUNNING; \
95 remove_wait_queue(&sem->wait, &wait);
97 void __down(struct semaphore * sem)
99 DECLARE_WAITQUEUE(wait, current);
101 DOWN_HEAD(TASK_UNINTERRUPTIBLE)
102 if (waking_non_zero(sem))
103 break;
104 schedule();
105 DOWN_TAIL(TASK_UNINTERRUPTIBLE)
108 int __down_interruptible(struct semaphore * sem)
110 DECLARE_WAITQUEUE(wait, current);
111 int ret = 0;
113 DOWN_HEAD(TASK_INTERRUPTIBLE)
115 ret = waking_non_zero_interruptible(sem, current);
116 if (ret)
118 if (ret == 1)
119 /* ret != 0 only if we get interrupted -arca */
120 ret = 0;
121 break;
123 schedule();
124 DOWN_TAIL(TASK_INTERRUPTIBLE)
125 return ret;
128 int __down_trylock(struct semaphore * sem)
130 return waking_non_zero_trylock(sem);
134 /* Wait for the lock to become unbiased. Readers
135 * are non-exclusive. =)
137 void down_read_failed(struct rw_semaphore *sem)
139 DECLARE_WAITQUEUE(wait, current);
141 __up_read(sem); /* this takes care of granting the lock */
143 add_wait_queue(&sem->wait, &wait);
145 while (atomic_read(&sem->count) < 0) {
146 set_task_state(current, TASK_UNINTERRUPTIBLE);
147 if (atomic_read(&sem->count) >= 0)
148 break;
149 schedule();
152 remove_wait_queue(&sem->wait, &wait);
153 current->state = TASK_RUNNING;
156 void down_read_failed_biased(struct rw_semaphore *sem)
158 DECLARE_WAITQUEUE(wait, current);
160 add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
162 for (;;) {
163 if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
164 break;
165 set_task_state(current, TASK_UNINTERRUPTIBLE);
166 if (!sem->read_bias_granted)
167 schedule();
170 remove_wait_queue(&sem->wait, &wait);
171 current->state = TASK_RUNNING;
175 /* Wait for the lock to become unbiased. Since we're
176 * a writer, we'll make ourselves exclusive.
178 void down_write_failed(struct rw_semaphore *sem)
180 DECLARE_WAITQUEUE(wait, current);
182 __up_write(sem); /* this takes care of granting the lock */
184 add_wait_queue_exclusive(&sem->wait, &wait);
186 while (atomic_read(&sem->count) < 0) {
187 set_task_state(current, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
188 if (atomic_read(&sem->count) >= 0)
189 break; /* we must attempt to aquire or bias the lock */
190 schedule();
193 remove_wait_queue(&sem->wait, &wait);
194 current->state = TASK_RUNNING;
197 void down_write_failed_biased(struct rw_semaphore *sem)
199 DECLARE_WAITQUEUE(wait, current);
201 add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
203 for (;;) {
204 if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
205 break;
206 set_task_state(current, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE);
207 if (!sem->write_bias_granted)
208 schedule();
211 remove_wait_queue(&sem->write_bias_wait, &wait);
212 current->state = TASK_RUNNING;
214 /* if the lock is currently unbiased, awaken the sleepers
215 * FIXME: this wakes up the readers early in a bit of a
216 * stampede -> bad!
218 if (atomic_read(&sem->count) >= 0)
219 wake_up(&sem->wait);
223 /* Called when someone has done an up that transitioned from
224 * negative to non-negative, meaning that the lock has been
225 * granted to whomever owned the bias.
227 void rwsem_wake_readers(struct rw_semaphore *sem)
229 if (xchg(&sem->read_bias_granted, 1))
230 BUG();
231 wake_up(&sem->wait);
234 void rwsem_wake_writer(struct rw_semaphore *sem)
236 if (xchg(&sem->write_bias_granted, 1))
237 BUG();
238 wake_up(&sem->write_bias_wait);