2 * ARM semaphore implementation, taken from
4 * i386 semaphore implementation.
6 * (C) Copyright 1999 Linus Torvalds
8 * Modified for ARM by Russell King
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/config.h>
15 #include <linux/sched.h>
17 #include <asm/semaphore.h>
20 * Semaphores are implemented using a two-way counter:
21 * The "count" variable is decremented for each process
22 * that tries to acquire the semaphore, while the "sleeping"
23 * variable is a count of such acquires.
25 * Notably, the inline "up()" and "down()" functions can
26 * efficiently test if they need to do any extra work (up
27 * needs to do something only if count was negative before
28 * the increment operation.
30 * "sleeping" and the contention routine ordering is
31 * protected by the semaphore spinlock.
33 * Note that these functions are only called when there is
34 * contention on the lock, and as such all this is the
35 * "non-critical" part of the whole semaphore business. The
36 * critical part is the inline stuff in <asm/semaphore.h>
37 * where we want to avoid any extra jumps and calls.
42 * - only on a boundary condition do we need to care. When we go
43 * from a negative count to a non-negative, we wake people up.
44 * - when we go from a non-negative count to a negative do we
45 * (a) synchronize with the "sleeper" count and (b) make sure
46 * that we're on the wakeup list before we synchronize so that
47 * we cannot lose wakeup events.
50 void __up(struct semaphore
*sem
)
55 static spinlock_t semaphore_lock
= SPIN_LOCK_UNLOCKED
;
57 void __down(struct semaphore
* sem
)
59 struct task_struct
*tsk
= current
;
60 DECLARE_WAITQUEUE(wait
, tsk
);
61 tsk
->state
= TASK_UNINTERRUPTIBLE
;
62 add_wait_queue_exclusive(&sem
->wait
, &wait
);
64 spin_lock_irq(&semaphore_lock
);
67 int sleepers
= sem
->sleepers
;
70 * Add "everybody else" into it. They aren't
71 * playing, because we own the spinlock.
73 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
77 sem
->sleepers
= 1; /* us - see -1 above */
78 spin_unlock_irq(&semaphore_lock
);
81 tsk
->state
= TASK_UNINTERRUPTIBLE
;
82 spin_lock_irq(&semaphore_lock
);
84 spin_unlock_irq(&semaphore_lock
);
85 remove_wait_queue(&sem
->wait
, &wait
);
86 tsk
->state
= TASK_RUNNING
;
90 int __down_interruptible(struct semaphore
* sem
)
93 struct task_struct
*tsk
= current
;
94 DECLARE_WAITQUEUE(wait
, tsk
);
95 tsk
->state
= TASK_INTERRUPTIBLE
;
96 add_wait_queue_exclusive(&sem
->wait
, &wait
);
98 spin_lock_irq(&semaphore_lock
);
101 int sleepers
= sem
->sleepers
;
104 * With signals pending, this turns into
105 * the trylock failure case - we won't be
106 * sleeping, and we* can't get the lock as
107 * it has contention. Just correct the count
110 if (signal_pending(current
)) {
113 atomic_add(sleepers
, &sem
->count
);
118 * Add "everybody else" into it. They aren't
119 * playing, because we own the spinlock. The
120 * "-1" is because we're still hoping to get
123 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
127 sem
->sleepers
= 1; /* us - see -1 above */
128 spin_unlock_irq(&semaphore_lock
);
131 tsk
->state
= TASK_INTERRUPTIBLE
;
132 spin_lock_irq(&semaphore_lock
);
134 spin_unlock_irq(&semaphore_lock
);
135 tsk
->state
= TASK_RUNNING
;
136 remove_wait_queue(&sem
->wait
, &wait
);
142 * Trylock failed - make sure we correct for
143 * having decremented the count.
145 * We could have done the trylock with a
146 * single "cmpxchg" without failure cases,
147 * but then it wouldn't work on a 386.
149 int __down_trylock(struct semaphore
* sem
)
154 spin_lock_irqsave(&semaphore_lock
, flags
);
155 sleepers
= sem
->sleepers
+ 1;
159 * Add "everybody else" and us into it. They aren't
160 * playing, because we own the spinlock.
162 if (!atomic_add_negative(sleepers
, &sem
->count
))
165 spin_unlock_irqrestore(&semaphore_lock
, flags
);
169 struct rw_semaphore
*down_read_failed_biased(struct rw_semaphore
*sem
)
171 struct task_struct
*tsk
= current
;
172 DECLARE_WAITQUEUE(wait
, tsk
);
174 add_wait_queue(&sem
->wait
, &wait
); /* put ourselves at the head of the list */
177 if (sem
->read_bias_granted
&& xchg(&sem
->read_bias_granted
, 0))
179 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
180 if (!sem
->read_bias_granted
)
184 remove_wait_queue(&sem
->wait
, &wait
);
185 tsk
->state
= TASK_RUNNING
;
190 struct rw_semaphore
*down_write_failed_biased(struct rw_semaphore
*sem
)
192 struct task_struct
*tsk
= current
;
193 DECLARE_WAITQUEUE(wait
, tsk
);
195 add_wait_queue_exclusive(&sem
->write_bias_wait
, &wait
); /* put ourselves at the end of the list */
198 if (sem
->write_bias_granted
&& xchg(&sem
->write_bias_granted
, 0))
200 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
201 if (!sem
->write_bias_granted
)
205 remove_wait_queue(&sem
->write_bias_wait
, &wait
);
206 tsk
->state
= TASK_RUNNING
;
208 /* if the lock is currently unbiased, awaken the sleepers
209 * FIXME: this wakes up the readers early in a bit of a
212 if (atomic_read(&sem
->count
) >= 0)
218 /* Wait for the lock to become unbiased. Readers
219 * are non-exclusive. =)
221 struct rw_semaphore
*down_read_failed(struct rw_semaphore
*sem
)
223 struct task_struct
*tsk
= current
;
224 DECLARE_WAITQUEUE(wait
, tsk
);
226 /* this takes care of granting the lock */
227 __up_op_read(sem
, __rwsem_wake
);
229 add_wait_queue(&sem
->wait
, &wait
);
231 while (atomic_read(&sem
->count
) < 0) {
232 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
233 if (atomic_read(&sem
->count
) >= 0)
238 remove_wait_queue(&sem
->wait
, &wait
);
239 tsk
->state
= TASK_RUNNING
;
244 /* Wait for the lock to become unbiased. Since we're
245 * a writer, we'll make ourselves exclusive.
247 struct rw_semaphore
*down_write_failed(struct rw_semaphore
*sem
)
249 struct task_struct
*tsk
= current
;
250 DECLARE_WAITQUEUE(wait
, tsk
);
252 /* this takes care of granting the lock */
253 __up_op_write(sem
, __rwsem_wake
);
255 add_wait_queue_exclusive(&sem
->wait
, &wait
);
257 while (atomic_read(&sem
->count
) < 0) {
258 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
259 if (atomic_read(&sem
->count
) >= 0)
260 break; /* we must attempt to acquire or bias the lock */
264 remove_wait_queue(&sem
->wait
, &wait
);
265 tsk
->state
= TASK_RUNNING
;
270 /* Called when someone has done an up that transitioned from
271 * negative to non-negative, meaning that the lock has been
272 * granted to whomever owned the bias.
274 struct rw_semaphore
*rwsem_wake_readers(struct rw_semaphore
*sem
)
276 if (xchg(&sem
->read_bias_granted
, 1))
282 struct rw_semaphore
*rwsem_wake_writer(struct rw_semaphore
*sem
)
284 if (xchg(&sem
->write_bias_granted
, 1))
286 wake_up(&sem
->write_bias_wait
);
291 * The semaphore operations have a special calling sequence that
292 * allow us to do a simpler in-line version of them. These routines
293 * need to convert that sequence back into the C sequence when
294 * there is contention on the semaphore.
296 * ip contains the semaphore pointer on entry. Save the C-clobbered
297 * registers (r0 to r3 and lr), but not ip, as we use it as a return
298 * value in some cases..
301 asm(" .section .text.lock, \"ax\"
305 stmfd sp!, {r0 - r3, lr}
308 ldmfd sp!, {r0 - r3, pc}^
311 .globl __down_interruptible_failed
312 __down_interruptible_failed:
313 stmfd sp!, {r0 - r3, lr}
315 bl __down_interruptible
317 ldmfd sp!, {r0 - r3, pc}^
320 .globl __down_trylock_failed
321 __down_trylock_failed:
322 stmfd sp!, {r0 - r3, lr}
326 ldmfd sp!, {r0 - r3, pc}^
331 stmfd sp!, {r0 - r3, lr}
334 ldmfd sp!, {r0 - r3, pc}^
337 .globl __down_read_failed
339 stmfd sp!, {r0 - r3, lr}
342 1: bl down_read_failed_biased
343 ldmfd sp!, {r0 - r3, pc}^
344 2: bl down_read_failed
352 ldmplfd sp!, {r0 - r3, pc}^
353 orrcs r1, r1, #0x20000000 @ Set carry
359 .globl __down_write_failed
361 stmfd sp!, {r0 - r3, lr}
364 1: bl down_write_failed_biased
365 ldmfd sp!, {r0 - r3, pc}^
366 2: bl down_write_failed
372 subs r3, r3, #"RW_LOCK_BIAS_STR
"
374 ldmeqfd sp!, {r0 - r3, pc}^
375 orrcs r1, r1, #0x20000000 @ Set carry
383 stmfd sp!, {r0 - r3, lr}
386 bl rwsem_wake_readers
387 ldmfd sp!, {r0 - r3, pc}^
388 1: bl rwsem_wake_writer
389 ldmfd sp!, {r0 - r3, pc}^
396 asm(" .section .text.lock, \"ax\"
400 stmfd sp!, {r0 - r3, lr}
403 ldmfd sp!, {r0 - r3, pc}
406 .globl __down_interruptible_failed
407 __down_interruptible_failed:
408 stmfd sp!, {r0 - r3, lr}
410 bl __down_interruptible
412 ldmfd sp!, {r0 - r3, pc}
415 .globl __down_trylock_failed
416 __down_trylock_failed:
417 stmfd sp!, {r0 - r3, lr}
421 ldmfd sp!, {r0 - r3, pc}
426 stmfd sp!, {r0 - r3, lr}
429 ldmfd sp!, {r0 - r3, pc}
432 .globl __down_read_failed
434 stmfd sp!, {r0 - r3, lr}
437 1: bl down_read_failed_biased
438 ldmfd sp!, {r0 - r3, pc}
439 2: bl down_read_failed
447 ldmplfd sp!, {r0 - r3, pc}
452 .globl __down_write_failed
454 stmfd sp!, {r0 - r3, lr}
457 1: bl down_write_failed_biased
458 ldmfd sp!, {r0 - r3, pc}
459 2: bl down_write_failed
464 subs r3, r3, #"RW_LOCK_BIAS_STR
"
467 ldmeqfd sp!, {r0 - r3, pc}
474 stmfd sp!, {r0 - r3, lr}
477 bl rwsem_wake_readers
478 ldmfd sp!, {r0 - r3, pc}
479 1: bl rwsem_wake_writer
480 ldmfd sp!, {r0 - r3, pc}