2 * Generic semaphore code. Buyer beware. Do your own
3 * specific changes in <asm/semaphore-helper.h>
6 #include <linux/sched.h>
7 #include <asm/semaphore-helper.h>
10 * Semaphores are implemented using a two-way counter:
11 * The "count" variable is decremented for each process
12 * that tries to sleep, while the "waking" variable is
13 * incremented when the "up()" code goes to wake up waiting
16 * Notably, the inline "up()" and "down()" functions can
17 * efficiently test if they need to do any extra work (up
18 * needs to do something only if count was negative before
19 * the increment operation.
21 * waking_non_zero() (from asm/semaphore.h) must execute
24 * When __up() is called, the count was negative before
25 * incrementing it, and we need to wake up somebody.
27 * This routine adds one to the count of processes that need to
28 * wake up and exit. ALL waiting processes actually wake up but
29 * only the one that gets to the "waking" field first will gate
30 * through and acquire the semaphore. The others will go back
33 * Note that these functions are only called when there is
34 * contention on the lock, and as such all this is the
35 * "non-critical" part of the whole semaphore business. The
36 * critical part is the inline stuff in <asm/semaphore.h>
37 * where we want to avoid any extra jumps and calls.
39 void __up(struct semaphore
*sem
)
46 * Perform the "down" function. Return zero for semaphore acquired,
47 * return negative for signalled out of the function.
49 * If called from __down, the return is ignored and the wait loop is
50 * not interruptible. This means that a task waiting on a semaphore
51 * using "down()" cannot be killed until someone does an "up()" on
54 * If called from __down_interruptible, the return value gets checked
55 * upon return. If the return value is negative then the task continues
56 * with the negative value in the return register (it can be tested by
59 * Either form may be used in conjunction with "up()".
64 #define DOWN_HEAD(task_state) \
67 current->state = (task_state); \
68 add_wait_queue(&sem->wait, &wait); \
71 * Ok, we're set up. sem->count is known to be less than zero \
74 * We can let go the lock for purposes of waiting. \
75 * We re-acquire it after awaking so as to protect \
76 * all semaphore operations. \
78 * If "up()" is called before we call waking_non_zero() then \
79 * we will catch it right away. If it is called later then \
80 * we will have to go through a wakeup cycle to catch it. \
82 * Multiple waiters contend for the semaphore lock to see \
83 * who gets to gate through and who has to wait some more. \
87 #define DOWN_TAIL(task_state) \
88 current->state = (task_state); \
90 current->state = TASK_RUNNING; \
91 remove_wait_queue(&sem->wait, &wait);
93 void __down(struct semaphore
* sem
)
95 DECLARE_WAITQUEUE(wait
, current
);
97 DOWN_HEAD(TASK_UNINTERRUPTIBLE
)
98 if (waking_non_zero(sem
))
101 DOWN_TAIL(TASK_UNINTERRUPTIBLE
)
104 int __down_interruptible(struct semaphore
* sem
)
106 DECLARE_WAITQUEUE(wait
, current
);
109 DOWN_HEAD(TASK_INTERRUPTIBLE
)
111 ret
= waking_non_zero_interruptible(sem
, current
);
115 /* ret != 0 only if we get interrupted -arca */
120 DOWN_TAIL(TASK_INTERRUPTIBLE
)
124 int __down_trylock(struct semaphore
* sem
)
126 return waking_non_zero_trylock(sem
);
130 /* Wait for the lock to become unbiased. Readers
131 * are non-exclusive. =)
133 void down_read_failed(struct rw_semaphore
*sem
)
135 DECLARE_WAITQUEUE(wait
, current
);
137 __up_read(sem
); /* this takes care of granting the lock */
139 add_wait_queue(&sem
->wait
, &wait
);
141 while (atomic_read(&sem
->count
) < 0) {
142 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
143 if (atomic_read(&sem
->count
) >= 0)
148 remove_wait_queue(&sem
->wait
, &wait
);
149 current
->state
= TASK_RUNNING
;
152 void down_read_failed_biased(struct rw_semaphore
*sem
)
154 DECLARE_WAITQUEUE(wait
, current
);
156 add_wait_queue(&sem
->wait
, &wait
); /* put ourselves at the head of the list */
159 if (sem
->read_bias_granted
&& xchg(&sem
->read_bias_granted
, 0))
161 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
162 if (!sem
->read_bias_granted
)
166 remove_wait_queue(&sem
->wait
, &wait
);
167 current
->state
= TASK_RUNNING
;
171 /* Wait for the lock to become unbiased. Since we're
172 * a writer, we'll make ourselves exclusive.
174 void down_write_failed(struct rw_semaphore
*sem
)
176 DECLARE_WAITQUEUE(wait
, current
);
178 __up_write(sem
); /* this takes care of granting the lock */
180 add_wait_queue_exclusive(&sem
->wait
, &wait
);
182 while (atomic_read(&sem
->count
) < 0) {
183 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
184 if (atomic_read(&sem
->count
) >= 0)
185 break; /* we must attempt to acquire or bias the lock */
189 remove_wait_queue(&sem
->wait
, &wait
);
190 current
->state
= TASK_RUNNING
;
193 void down_write_failed_biased(struct rw_semaphore
*sem
)
195 DECLARE_WAITQUEUE(wait
, current
);
197 add_wait_queue_exclusive(&sem
->write_bias_wait
, &wait
); /* put ourselves at the end of the list */
200 if (sem
->write_bias_granted
&& xchg(&sem
->write_bias_granted
, 0))
202 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
203 if (!sem
->write_bias_granted
)
207 remove_wait_queue(&sem
->write_bias_wait
, &wait
);
208 current
->state
= TASK_RUNNING
;
210 /* if the lock is currently unbiased, awaken the sleepers
211 * FIXME: this wakes up the readers early in a bit of a
214 if (atomic_read(&sem
->count
) >= 0)
219 /* Called when someone has done an up that transitioned from
220 * negative to non-negative, meaning that the lock has been
221 * granted to whomever owned the bias.
223 void rwsem_wake_readers(struct rw_semaphore
*sem
)
225 if (xchg(&sem
->read_bias_granted
, 1))
230 void rwsem_wake_writer(struct rw_semaphore
*sem
)
232 if (xchg(&sem
->write_bias_granted
, 1))
234 wake_up(&sem
->write_bias_wait
);