1 #ifndef _I386_SEMAPHORE_H
2 #define _I386_SEMAPHORE_H
4 #include <linux/linkage.h>
7 * SMP- and interrupt-safe semaphores..
9 * (C) Copyright 1996 Linus Torvalds
11 * Modified 1996-12-23 by Dave Grothe <dave@gcom.com> to fix bugs in
12 * the original code and to make semaphore waits
13 * interruptible so that processes waiting on
14 * semaphores can be killed.
15 * Modified 1999-02-14 by Andrea Arcangeli, split the sched.c helper
16 * functions in asm/sempahore-helper.h while fixing a
17 * potential and subtle race discovered by Ulrich Schmid
18 * in down_interruptible(). Since I started to play here I
19 * also implemented the `trylock' semaphore operation.
20 * 1999-07-02 Artur Skawina <skawina@geocities.com>
21 * Optimized "0(ecx)" -> "(ecx)" (the assembler does not
22 * do this). Changed calling sequences from push/jmp to
23 * traditional call/ret.
25 * If you would like to see an analysis of this implementation, please
26 * ftp to gcom.com and download the file
27 * /pub/linux/src/semaphore/semaphore-2.0.24.tar.gz.
31 #include <asm/system.h>
32 #include <asm/atomic.h>
33 #include <linux/spinlock.h>
34 #include <linux/wait.h>
39 wait_queue_head_t wait
;
46 # define __SEM_DEBUG_INIT(name) \
47 , (int)&(name).__magic
49 # define __SEM_DEBUG_INIT(name)
52 #define __SEMAPHORE_INITIALIZER(name,count) \
53 { ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
54 __SEM_DEBUG_INIT(name) }
56 #define __MUTEX_INITIALIZER(name) \
57 __SEMAPHORE_INITIALIZER(name,1)
59 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
60 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
62 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
63 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
65 extern inline void sema_init (struct semaphore
*sem
, int val
)
68 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
70 * i'd rather use the more flexible initialization above, but sadly
71 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
73 atomic_set(&sem
->count
, val
);
75 init_waitqueue_head(&sem
->wait
);
77 sem
->__magic
= (int)&sem
->__magic
;
81 static inline void init_MUTEX (struct semaphore
*sem
)
86 static inline void init_MUTEX_LOCKED (struct semaphore
*sem
)
91 asmlinkage
void __down_failed(void /* special register calling convention */);
92 asmlinkage
int __down_failed_interruptible(void /* params in registers */);
93 asmlinkage
int __down_failed_trylock(void /* params in registers */);
94 asmlinkage
void __up_wakeup(void /* special register calling convention */);
96 asmlinkage
void __down(struct semaphore
* sem
);
97 asmlinkage
int __down_interruptible(struct semaphore
* sem
);
98 asmlinkage
int __down_trylock(struct semaphore
* sem
);
99 asmlinkage
void __up(struct semaphore
* sem
);
101 extern spinlock_t semaphore_wake_lock
;
104 * This is ugly, but we want the default case to fall through.
105 * "down_failed" is a special asm handler that calls the C
106 * routine that actually waits. See arch/i386/lib/semaphore.S
108 extern inline void down(struct semaphore
* sem
)
111 CHECK_MAGIC(sem
->__magic
);
114 __asm__
__volatile__(
115 "# atomic down operation\n\t"
119 "decl (%0)\n\t" /* --sem->count */
122 ".section .text.lock,\"ax\"\n"
123 "2:\tcall __down_failed\n\t"
131 extern inline int down_interruptible(struct semaphore
* sem
)
136 CHECK_MAGIC(sem
->__magic
);
139 __asm__
__volatile__(
140 "# atomic interruptible down operation\n\t"
144 "decl (%1)\n\t" /* --sem->count */
148 ".section .text.lock,\"ax\"\n"
149 "2:\tcall __down_failed_interruptible\n\t"
158 extern inline int down_trylock(struct semaphore
* sem
)
163 CHECK_MAGIC(sem
->__magic
);
166 __asm__
__volatile__(
167 "# atomic interruptible down operation\n\t"
171 "decl (%1)\n\t" /* --sem->count */
175 ".section .text.lock,\"ax\"\n"
176 "2:\tcall __down_failed_trylock\n\t"
186 * Note! This is subtle. We jump to wake people up only if
187 * the semaphore was negative (== somebody was waiting on it).
188 * The default case (no contention) will result in NO
189 * jumps for both down() and up().
191 extern inline void up(struct semaphore
* sem
)
194 CHECK_MAGIC(sem
->__magic
);
196 __asm__
__volatile__(
197 "# atomic up operation\n\t"
201 "incl (%0)\n\t" /* ++sem->count */
204 ".section .text.lock,\"ax\"\n"
205 "2:\tcall __up_wakeup\n\t"