pre-2.3.4..
[davej-history.git] / include / asm-alpha / semaphore.h
blob255888e8a6da54298320d09b6fcab28b209c2814
1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
4 /*
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996 Richard Henderson
9 */
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
15 struct semaphore {
16 /* Careful, inline assembly knows about the position of these two. */
17 atomic_t count;
18 atomic_t waking; /* biased by -1 */
19 wait_queue_head_t wait;
20 #if WAITQUEUE_DEBUG
21 long __magic;
22 #endif
25 #if WAITQUEUE_DEBUG
26 # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
27 #else
28 # define __SEM_DEBUG_INIT(name)
29 #endif
31 #define __SEMAPHORE_INITIALIZER(name,count) \
32 { ATOMIC_INIT(count), ATOMIC_INIT(-1), \
33 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
34 __SEM_DEBUG_INIT(name) }
36 #define __MUTEX_INITIALIZER(name) \
37 __SEMAPHORE_INITIALIZER(name,1)
39 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
40 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
42 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
43 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
45 extern inline void sema_init (struct semaphore *sem, int val)
48 * Logically,
49 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
50 * except that gcc produces better initializing by parts yet.
53 atomic_set(&sem->count, val);
54 atomic_set(&sem->waking, -1);
55 init_waitqueue_head(&sem->wait);
56 #if WAITQUEUE_DEBUG
57 sem->__magic = (long)&sem->__magic;
58 #endif
61 static inline void init_MUTEX (struct semaphore *sem)
63 sema_init(sem, 1);
66 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
68 sema_init(sem, 0);
72 extern void __down(struct semaphore * sem);
73 extern int __down_interruptible(struct semaphore * sem);
74 extern int __down_trylock(struct semaphore * sem);
75 extern void __up(struct semaphore * sem);
77 /* All have custom assembly linkages. */
78 extern void __down_failed(struct semaphore * sem);
79 extern void __down_failed_interruptible(struct semaphore * sem);
80 extern void __down_failed_trylock(struct semaphore * sem);
81 extern void __up_wakeup(struct semaphore * sem);
84 * Whee. Hidden out of line code is fun. The contention cases are
85 * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
86 * takes care of making sure we can call it without clobbering regs.
89 extern inline void down(struct semaphore * sem)
91 /* Given that we have to use particular hard registers to
92 communicate with __down_failed anyway, reuse them in
93 the atomic operation as well.
95 __down_failed takes the semaphore address in $24, and
96 it's return address in $28. The pv is loaded as usual.
97 The gp is clobbered (in the module case) as usual. */
99 /* This little bit of silliness is to get the GP loaded for
100 a function that ordinarily wouldn't. Otherwise we could
101 have it done by the macro directly, which can be optimized
102 the linker. */
103 register void *pv __asm__("$27");
105 #if WAITQUEUE_DEBUG
106 CHECK_MAGIC(sem->__magic);
107 #endif
109 pv = __down_failed;
110 __asm__ __volatile__ (
111 "/* semaphore down operation */\n"
112 "1: ldl_l $24,%1\n"
113 " subl $24,1,$24\n"
114 " mov $24,$28\n"
115 " stl_c $28,%1\n"
116 " beq $28,2f\n"
117 " blt $24,3f\n"
118 "4: mb\n"
119 ".section .text2,\"ax\"\n"
120 "2: br 1b\n"
121 "3: lda $24,%1\n"
122 " jsr $28,($27),__down_failed\n"
123 " ldgp $29,0($28)\n"
124 " br 4b\n"
125 ".previous"
126 : "=r"(pv)
127 : "m"(sem->count), "r"(pv)
128 : "$24", "$28", "memory");
131 extern inline int down_interruptible(struct semaphore * sem)
133 /* __down_failed_interruptible takes the semaphore address in $24,
134 and it's return address in $28. The pv is loaded as usual.
135 The gp is clobbered (in the module case) as usual. The return
136 value is in $24. */
138 register int ret __asm__("$24");
139 register void *pv __asm__("$27");
141 #if WAITQUEUE_DEBUG
142 CHECK_MAGIC(sem->__magic);
143 #endif
145 pv = __down_failed_interruptible;
146 __asm__ __volatile__ (
147 "/* semaphore down interruptible operation */\n"
148 "1: ldl_l $24,%2\n"
149 " subl $24,1,$24\n"
150 " mov $24,$28\n"
151 " stl_c $28,%2\n"
152 " beq $28,2f\n"
153 " blt $24,3f\n"
154 " mov $31,%0\n"
155 "4: mb\n"
156 ".section .text2,\"ax\"\n"
157 "2: br 1b\n"
158 "3: lda $24,%2\n"
159 " jsr $28,($27),__down_failed_interruptible\n"
160 " ldgp $29,0($28)\n"
161 " br 4b\n"
162 ".previous"
163 : "=r"(ret), "=r"(pv)
164 : "m"(sem->count), "r"(pv)
165 : "$28", "memory");
167 return ret;
171 * down_trylock returns 0 on success, 1 if we failed to get the lock.
173 * We must manipulate count and waking simultaneously and atomically.
174 * Do this by using ll/sc on the pair of 32-bit words.
177 extern inline int down_trylock(struct semaphore * sem)
179 long ret, tmp, tmp2, sub;
181 /* "Equivalent" C. Note that we have to do this all without
182 (taken) branches in order to be a valid ll/sc sequence.
184 do {
185 tmp = ldq_l;
186 sub = 0x0000000100000000;
187 ret = ((int)tmp <= 0); // count =< 0 ?
188 if ((int)tmp >= 0) sub = 0; // count >= 0 ?
189 // note that if count=0 subq overflows to the high
190 // longword (i.e waking)
191 ret &= ((long)tmp < 0); // waking < 0 ?
192 sub += 1;
193 if (ret)
194 break;
195 tmp -= sub;
196 tmp = stq_c = tmp;
197 } while (tmp == 0);
200 #if WAITQUEUE_DEBUG
201 CHECK_MAGIC(sem->__magic);
202 #endif
204 __asm__ __volatile__(
205 "1: ldq_l %1,%4\n"
206 " lda %3,1\n"
207 " addl %1,0,%2\n"
208 " sll %3,32,%3\n"
209 " cmple %2,0,%0\n"
210 " cmovge %2,0,%3\n"
211 " cmplt %1,0,%2\n"
212 " addq %3,1,%3\n"
213 " and %0,%2,%0\n"
214 " bne %0,2f\n"
215 " subq %1,%3,%1\n"
216 " stq_c %1,%4\n"
217 " beq %1,3f\n"
218 "2:\n"
219 ".section .text2,\"ax\"\n"
220 "3: br 1b\n"
221 ".previous"
222 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
223 : "m"(*sem)
224 : "memory");
226 return ret;
229 extern inline void up(struct semaphore * sem)
231 /* Given that we have to use particular hard registers to
232 communicate with __up_wakeup anyway, reuse them in
233 the atomic operation as well.
235 __up_wakeup takes the semaphore address in $24, and
236 it's return address in $28. The pv is loaded as usual.
237 The gp is clobbered (in the module case) as usual. */
239 register void *pv __asm__("$27");
241 #if WAITQUEUE_DEBUG
242 CHECK_MAGIC(sem->__magic);
243 #endif
245 pv = __up_wakeup;
246 __asm__ __volatile__ (
247 "/* semaphore up operation */\n"
248 " mb\n"
249 "1: ldl_l $24,%1\n"
250 " addl $24,1,$24\n"
251 " mov $24,$28\n"
252 " stl_c $28,%1\n"
253 " beq $28,2f\n"
254 " mb\n"
255 " ble $24,3f\n"
256 "4:\n"
257 ".section .text2,\"ax\"\n"
258 "2: br 1b\n"
259 "3: lda $24,%1\n"
260 " jsr $28,($27),__up_wakeup\n"
261 " ldgp $29,0($28)\n"
262 " br 4b\n"
263 ".previous"
264 : "=r"(pv)
265 : "m"(sem->count), "r"(pv)
266 : "$24", "$28", "memory");
269 #endif