1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
7 extern void __check_locks(unsigned int);
9 #define __check_locks(x) do { } while (0)
13 * SMP- and interrupt-safe semaphores..
15 * (C) Copyright 1996 Linus Torvalds
17 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
18 * the original code and to make semaphore waits
19 * interruptible so that processes waiting on
20 * semaphores can be killed.
22 * If you would like to see an analysis of this implementation, please
23 * ftp to gcom.com and download the file
24 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
28 #include <asm/system.h>
29 #include <asm/atomic.h>
30 #include <asm/spinlock.h>
35 struct wait_queue
* wait
;
38 #define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
39 #define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
41 asmlinkage
void __down_failed(void /* special register calling convention */);
42 asmlinkage
int __down_failed_interruptible(void /* params in registers */);
43 asmlinkage
void __up_wakeup(void /* special register calling convention */);
45 extern void __down(struct semaphore
* sem
);
46 extern void __up(struct semaphore
* sem
);
48 extern spinlock_t semaphore_wake_lock
;
50 #define sema_init(sem, val) atomic_set(&((sem)->count), (val))
53 * These two _must_ execute atomically wrt each other.
55 * This is trivially done with load_locked/store_cond,
56 * but on the x86 we need an external synchronizer.
57 * Currently this is just the global interrupt lock,
58 * bah. Go for a smaller spinlock some day.
60 * (On the other hand this shouldn't be in any critical
63 static inline void wake_one_more(struct semaphore
* sem
)
67 spin_lock_irqsave(&semaphore_wake_lock
, flags
);
69 spin_unlock_irqrestore(&semaphore_wake_lock
, flags
);
72 static inline int waking_non_zero(struct semaphore
*sem
)
77 spin_lock_irqsave(&semaphore_wake_lock
, flags
);
78 if (sem
->waking
> 0) {
82 spin_unlock_irqrestore(&semaphore_wake_lock
, flags
);
87 * This is ugly, but we want the default case to fall through.
88 * "down_failed" is a special asm handler that calls the C
89 * routine that actually waits. See arch/i386/lib/semaphore.S
91 extern inline void down(struct semaphore
* sem
)
95 "# atomic down operation\n\t"
102 ".section .text.lock,\"ax\"\n"
104 "jmp __down_failed\n"
111 extern inline int down_interruptible(struct semaphore
* sem
)
116 __asm__
__volatile__(
117 "# atomic interruptible down operation\n\t"
125 ".section .text.lock,\"ax\"\n"
127 "jmp __down_failed_interruptible\n"
137 * Note! This is subtle. We jump to wake people up only if
138 * the semaphore was negative (== somebody was waiting on it).
139 * The default case (no contention) will result in NO
140 * jumps for both down() and up().
142 extern inline void up(struct semaphore
* sem
)
144 __asm__
__volatile__(
145 "# atomic up operation\n\t"
152 ".section .text.lock,\"ax\"\n"