2 * i386 and x86-64 semaphore implementation.
4 * (C) Copyright 1999 Linus Torvalds
6 * Portions Copyright 1999 Red Hat, Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <asm/semaphore.h>
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
31 * "sleeping" and the contention routine ordering is protected
32 * by the spinlock in the semaphore's waitqueue head.
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
51 void __up(struct semaphore
*sem
)
56 void __sched
__down(struct semaphore
*sem
)
58 struct task_struct
*tsk
= current
;
59 DECLARE_WAITQUEUE(wait
, tsk
);
62 tsk
->state
= TASK_UNINTERRUPTIBLE
;
63 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
64 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
68 int sleepers
= sem
->sleepers
;
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock in
73 * the wait_queue_head.
75 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
79 sem
->sleepers
= 1; /* us - see -1 above */
80 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
84 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
85 tsk
->state
= TASK_UNINTERRUPTIBLE
;
87 remove_wait_queue_locked(&sem
->wait
, &wait
);
88 wake_up_locked(&sem
->wait
);
89 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
90 tsk
->state
= TASK_RUNNING
;
93 int __sched
__down_interruptible(struct semaphore
*sem
)
96 struct task_struct
*tsk
= current
;
97 DECLARE_WAITQUEUE(wait
, tsk
);
100 tsk
->state
= TASK_INTERRUPTIBLE
;
101 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
102 add_wait_queue_exclusive_locked(&sem
->wait
, &wait
);
106 int sleepers
= sem
->sleepers
;
109 * With signals pending, this turns into
110 * the trylock failure case - we won't be
111 * sleeping, and we* can't get the lock as
112 * it has contention. Just correct the count
115 if (signal_pending(current
)) {
118 atomic_add(sleepers
, &sem
->count
);
123 * Add "everybody else" into it. They aren't
124 * playing, because we own the spinlock in
125 * wait_queue_head. The "-1" is because we're
126 * still hoping to get the semaphore.
128 if (!atomic_add_negative(sleepers
- 1, &sem
->count
)) {
132 sem
->sleepers
= 1; /* us - see -1 above */
133 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
137 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
138 tsk
->state
= TASK_INTERRUPTIBLE
;
140 remove_wait_queue_locked(&sem
->wait
, &wait
);
141 wake_up_locked(&sem
->wait
);
142 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);
144 tsk
->state
= TASK_RUNNING
;
149 * Trylock failed - make sure we correct for
150 * having decremented the count.
152 * We could have done the trylock with a
153 * single "cmpxchg" without failure cases,
154 * but then it wouldn't work on a 386.
156 int __down_trylock(struct semaphore
*sem
)
161 spin_lock_irqsave(&sem
->wait
.lock
, flags
);
162 sleepers
= sem
->sleepers
+ 1;
166 * Add "everybody else" and us into it. They aren't
167 * playing, because we own the spinlock in the
170 if (!atomic_add_negative(sleepers
, &sem
->count
)) {
171 wake_up_locked(&sem
->wait
);
174 spin_unlock_irqrestore(&sem
->wait
.lock
, flags
);