Import 2.1.116pre2
[davej-history.git] / include / asm-i386 / semaphore.h
blobc78e20618c00f59720689a7f186b867f213c417d
1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
6 #ifdef __SMP__
7 extern void __check_locks(unsigned int);
8 #else
9 #define __check_locks(x) do { } while (0)
10 #endif
13 * SMP- and interrupt-safe semaphores..
15 * (C) Copyright 1996 Linus Torvalds
17 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
18 * the original code and to make semaphore waits
19 * interruptible so that processes waiting on
20 * semaphores can be killed.
22 * If you would like to see an analysis of this implementation, please
23 * ftp to gcom.com and download the file
24 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
28 #include <asm/system.h>
29 #include <asm/atomic.h>
30 #include <asm/spinlock.h>
32 struct semaphore {
33 atomic_t count;
34 int waking;
35 struct wait_queue * wait;
38 #define MUTEX ((struct semaphore) { ATOMIC_INIT(1), 0, NULL })
39 #define MUTEX_LOCKED ((struct semaphore) { ATOMIC_INIT(0), 0, NULL })
41 asmlinkage void __down_failed(void /* special register calling convention */);
42 asmlinkage int __down_failed_interruptible(void /* params in registers */);
43 asmlinkage void __up_wakeup(void /* special register calling convention */);
45 extern void __down(struct semaphore * sem);
46 extern void __up(struct semaphore * sem);
48 extern spinlock_t semaphore_wake_lock;
50 #define sema_init(sem, val) atomic_set(&((sem)->count), (val))
53 * These two _must_ execute atomically wrt each other.
55 * This is trivially done with load_locked/store_cond,
56 * but on the x86 we need an external synchronizer.
57 * Currently this is just the global interrupt lock,
58 * bah. Go for a smaller spinlock some day.
60 * (On the other hand this shouldn't be in any critical
61 * path, so..)
63 static inline void wake_one_more(struct semaphore * sem)
65 unsigned long flags;
67 spin_lock_irqsave(&semaphore_wake_lock, flags);
68 sem->waking++;
69 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
72 static inline int waking_non_zero(struct semaphore *sem)
74 unsigned long flags;
75 int ret = 0;
77 spin_lock_irqsave(&semaphore_wake_lock, flags);
78 if (sem->waking > 0) {
79 sem->waking--;
80 ret = 1;
82 spin_unlock_irqrestore(&semaphore_wake_lock, flags);
83 return ret;
87 * This is ugly, but we want the default case to fall through.
88 * "down_failed" is a special asm handler that calls the C
89 * routine that actually waits. See arch/i386/lib/semaphore.S
91 extern inline void down(struct semaphore * sem)
93 __check_locks(0);
94 __asm__ __volatile__(
95 "# atomic down operation\n\t"
96 #ifdef __SMP__
97 "lock ; "
98 #endif
99 "decl 0(%0)\n\t"
100 "js 2f\n"
101 "1:\n"
102 ".section .text.lock,\"ax\"\n"
103 "2:\tpushl $1b\n\t"
104 "jmp __down_failed\n"
105 ".previous"
106 :/* no outputs */
107 :"c" (sem)
108 :"memory");
111 extern inline int down_interruptible(struct semaphore * sem)
113 int result;
115 __check_locks(0);
116 __asm__ __volatile__(
117 "# atomic interruptible down operation\n\t"
118 #ifdef __SMP__
119 "lock ; "
120 #endif
121 "decl 0(%1)\n\t"
122 "js 2f\n\t"
123 "xorl %0,%0\n"
124 "1:\n"
125 ".section .text.lock,\"ax\"\n"
126 "2:\tpushl $1b\n\t"
127 "jmp __down_failed_interruptible\n"
128 ".previous"
129 :"=a" (result)
130 :"c" (sem)
131 :"memory");
132 return result;
137 * Note! This is subtle. We jump to wake people up only if
138 * the semaphore was negative (== somebody was waiting on it).
139 * The default case (no contention) will result in NO
140 * jumps for both down() and up().
142 extern inline void up(struct semaphore * sem)
144 __asm__ __volatile__(
145 "# atomic up operation\n\t"
146 #ifdef __SMP__
147 "lock ; "
148 #endif
149 "incl 0(%0)\n\t"
150 "jle 2f\n"
151 "1:\n"
152 ".section .text.lock,\"ax\"\n"
153 "2:\tpushl $1b\n\t"
154 "jmp __up_wakeup\n"
155 ".previous"
156 :/* no outputs */
157 :"c" (sem)
158 :"memory");
161 #endif