- Stephen Rothwell: APM updates
[davej-history.git] / arch / i386 / kernel / semaphore.c
blobf97d3b1ad5f6d639fc302c74ab73bc019b209ed2
1 /*
2 * i386 semaphore implementation.
4 * (C) Copyright 1999 Linus Torvalds
6 * Portions Copyright 1999 Red Hat, Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@redhat.com>
15 #include <linux/config.h>
16 #include <linux/sched.h>
18 #include <asm/semaphore.h>
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
31 * "sleeping" and the contention routine ordering is
32 * protected by the semaphore spinlock.
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
42 * Logic:
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
51 void __up(struct semaphore *sem)
53 wake_up(&sem->wait);
56 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
58 void __down(struct semaphore * sem)
60 struct task_struct *tsk = current;
61 DECLARE_WAITQUEUE(wait, tsk);
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 add_wait_queue_exclusive(&sem->wait, &wait);
65 spin_lock_irq(&semaphore_lock);
66 sem->sleepers++;
67 for (;;) {
68 int sleepers = sem->sleepers;
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock.
74 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
75 sem->sleepers = 0;
76 break;
78 sem->sleepers = 1; /* us - see -1 above */
79 spin_unlock_irq(&semaphore_lock);
81 schedule();
82 tsk->state = TASK_UNINTERRUPTIBLE;
83 spin_lock_irq(&semaphore_lock);
85 spin_unlock_irq(&semaphore_lock);
86 remove_wait_queue(&sem->wait, &wait);
87 tsk->state = TASK_RUNNING;
88 wake_up(&sem->wait);
91 int __down_interruptible(struct semaphore * sem)
93 int retval = 0;
94 struct task_struct *tsk = current;
95 DECLARE_WAITQUEUE(wait, tsk);
96 tsk->state = TASK_INTERRUPTIBLE;
97 add_wait_queue_exclusive(&sem->wait, &wait);
99 spin_lock_irq(&semaphore_lock);
100 sem->sleepers ++;
101 for (;;) {
102 int sleepers = sem->sleepers;
105 * With signals pending, this turns into
106 * the trylock failure case - we won't be
107 * sleeping, and we* can't get the lock as
108 * it has contention. Just correct the count
109 * and exit.
111 if (signal_pending(current)) {
112 retval = -EINTR;
113 sem->sleepers = 0;
114 atomic_add(sleepers, &sem->count);
115 break;
119 * Add "everybody else" into it. They aren't
120 * playing, because we own the spinlock. The
121 * "-1" is because we're still hoping to get
122 * the lock.
124 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
125 sem->sleepers = 0;
126 break;
128 sem->sleepers = 1; /* us - see -1 above */
129 spin_unlock_irq(&semaphore_lock);
131 schedule();
132 tsk->state = TASK_INTERRUPTIBLE;
133 spin_lock_irq(&semaphore_lock);
135 spin_unlock_irq(&semaphore_lock);
136 tsk->state = TASK_RUNNING;
137 remove_wait_queue(&sem->wait, &wait);
138 wake_up(&sem->wait);
139 return retval;
143 * Trylock failed - make sure we correct for
144 * having decremented the count.
146 * We could have done the trylock with a
147 * single "cmpxchg" without failure cases,
148 * but then it wouldn't work on a 386.
150 int __down_trylock(struct semaphore * sem)
152 int sleepers;
153 unsigned long flags;
155 spin_lock_irqsave(&semaphore_lock, flags);
156 sleepers = sem->sleepers + 1;
157 sem->sleepers = 0;
160 * Add "everybody else" and us into it. They aren't
161 * playing, because we own the spinlock.
163 if (!atomic_add_negative(sleepers, &sem->count))
164 wake_up(&sem->wait);
166 spin_unlock_irqrestore(&semaphore_lock, flags);
167 return 1;
172 * The semaphore operations have a special calling sequence that
173 * allow us to do a simpler in-line version of them. These routines
174 * need to convert that sequence back into the C sequence when
175 * there is contention on the semaphore.
177 * %ecx contains the semaphore pointer on entry. Save the C-clobbered
178 * registers (%eax, %edx and %ecx) except %eax when used as a return
179 * value..
181 asm(
182 ".align 4\n"
183 ".globl __down_failed\n"
184 "__down_failed:\n\t"
185 "pushl %eax\n\t"
186 "pushl %edx\n\t"
187 "pushl %ecx\n\t"
188 "call __down\n\t"
189 "popl %ecx\n\t"
190 "popl %edx\n\t"
191 "popl %eax\n\t"
192 "ret"
195 asm(
196 ".align 4\n"
197 ".globl __down_failed_interruptible\n"
198 "__down_failed_interruptible:\n\t"
199 "pushl %edx\n\t"
200 "pushl %ecx\n\t"
201 "call __down_interruptible\n\t"
202 "popl %ecx\n\t"
203 "popl %edx\n\t"
204 "ret"
207 asm(
208 ".align 4\n"
209 ".globl __down_failed_trylock\n"
210 "__down_failed_trylock:\n\t"
211 "pushl %edx\n\t"
212 "pushl %ecx\n\t"
213 "call __down_trylock\n\t"
214 "popl %ecx\n\t"
215 "popl %edx\n\t"
216 "ret"
219 asm(
220 ".align 4\n"
221 ".globl __up_wakeup\n"
222 "__up_wakeup:\n\t"
223 "pushl %eax\n\t"
224 "pushl %edx\n\t"
225 "pushl %ecx\n\t"
226 "call __up\n\t"
227 "popl %ecx\n\t"
228 "popl %edx\n\t"
229 "popl %eax\n\t"
230 "ret"
233 asm(
235 .align 4
236 .globl __down_read_failed
237 __down_read_failed:
238 pushl %edx
239 pushl %ecx
240 jnc 2f
242 3: call down_read_failed_biased
244 1: popl %ecx
245 popl %edx
248 2: call down_read_failed
249 " LOCK "subl $1,(%eax)
250 jns 1b
251 jnc 2b
252 jmp 3b
256 asm(
258 .align 4
259 .globl __down_write_failed
260 __down_write_failed:
261 pushl %edx
262 pushl %ecx
263 jnc 2f
265 3: call down_write_failed_biased
267 1: popl %ecx
268 popl %edx
271 2: call down_write_failed
272 " LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)
273 jz 1b
274 jnc 2b
275 jmp 3b
279 struct rw_semaphore *FASTCALL(rwsem_wake_readers(struct rw_semaphore *sem));
280 struct rw_semaphore *FASTCALL(rwsem_wake_writer(struct rw_semaphore *sem));
282 struct rw_semaphore *FASTCALL(down_read_failed_biased(struct rw_semaphore *sem));
283 struct rw_semaphore *FASTCALL(down_write_failed_biased(struct rw_semaphore *sem));
284 struct rw_semaphore *FASTCALL(down_read_failed(struct rw_semaphore *sem));
285 struct rw_semaphore *FASTCALL(down_write_failed(struct rw_semaphore *sem));
287 struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem)
289 struct task_struct *tsk = current;
290 DECLARE_WAITQUEUE(wait, tsk);
292 add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */
294 for (;;) {
295 if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0))
296 break;
297 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
298 if (!sem->read_bias_granted)
299 schedule();
302 remove_wait_queue(&sem->wait, &wait);
303 tsk->state = TASK_RUNNING;
305 return sem;
308 struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem)
310 struct task_struct *tsk = current;
311 DECLARE_WAITQUEUE(wait, tsk);
313 add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */
315 for (;;) {
316 if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0))
317 break;
318 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
319 if (!sem->write_bias_granted)
320 schedule();
323 remove_wait_queue(&sem->write_bias_wait, &wait);
324 tsk->state = TASK_RUNNING;
326 /* if the lock is currently unbiased, awaken the sleepers
327 * FIXME: this wakes up the readers early in a bit of a
328 * stampede -> bad!
330 if (atomic_read(&sem->count) >= 0)
331 wake_up(&sem->wait);
333 return sem;
336 /* Wait for the lock to become unbiased. Readers
337 * are non-exclusive. =)
339 struct rw_semaphore *down_read_failed(struct rw_semaphore *sem)
341 struct task_struct *tsk = current;
342 DECLARE_WAITQUEUE(wait, tsk);
344 __up_read(sem); /* this takes care of granting the lock */
346 add_wait_queue(&sem->wait, &wait);
348 while (atomic_read(&sem->count) < 0) {
349 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
350 if (atomic_read(&sem->count) >= 0)
351 break;
352 schedule();
355 remove_wait_queue(&sem->wait, &wait);
356 tsk->state = TASK_RUNNING;
358 return sem;
361 /* Wait for the lock to become unbiased. Since we're
362 * a writer, we'll make ourselves exclusive.
364 struct rw_semaphore *down_write_failed(struct rw_semaphore *sem)
366 struct task_struct *tsk = current;
367 DECLARE_WAITQUEUE(wait, tsk);
369 __up_write(sem); /* this takes care of granting the lock */
371 add_wait_queue_exclusive(&sem->wait, &wait);
373 while (atomic_read(&sem->count) < 0) {
374 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
375 if (atomic_read(&sem->count) >= 0)
376 break; /* we must attempt to acquire or bias the lock */
377 schedule();
380 remove_wait_queue(&sem->wait, &wait);
381 tsk->state = TASK_RUNNING;
383 return sem;
386 asm(
388 .align 4
389 .globl __rwsem_wake
390 __rwsem_wake:
391 pushl %edx
392 pushl %ecx
394 jz 1f
395 call rwsem_wake_readers
396 jmp 2f
398 1: call rwsem_wake_writer
400 2: popl %ecx
401 popl %edx
406 /* Called when someone has done an up that transitioned from
407 * negative to non-negative, meaning that the lock has been
408 * granted to whomever owned the bias.
410 struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem)
412 if (xchg(&sem->read_bias_granted, 1))
413 BUG();
414 wake_up(&sem->wait);
415 return sem;
418 struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem)
420 if (xchg(&sem->write_bias_granted, 1))
421 BUG();
422 wake_up(&sem->write_bias_wait);
423 return sem;
426 #if defined(CONFIG_SMP)
427 asm(
429 .align 4
430 .globl __write_lock_failed
431 __write_lock_failed:
432 " LOCK "addl $" RW_LOCK_BIAS_STR ",(%eax)
433 1: cmpl $" RW_LOCK_BIAS_STR ",(%eax)
434 jne 1b
436 " LOCK "subl $" RW_LOCK_BIAS_STR ",(%eax)
437 jnz __write_lock_failed
441 .align 4
442 .globl __read_lock_failed
443 __read_lock_failed:
444 lock ; incl (%eax)
445 1: cmpl $1,(%eax)
446 js 1b
448 lock ; decl (%eax)
449 js __read_lock_failed
453 #endif