Linux-2.3.3 and a short hiatus..
[davej-history.git] / include / asm-alpha / semaphore.h
blobb7515c0291cd736e7a1842d1cd139d3de684b078
1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
4 /*
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996 Richard Henderson
9 */
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
15 struct semaphore {
16 /* Careful, inline assembly knows about the position of these two. */
17 atomic_t count;
18 atomic_t waking; /* biased by -1 */
19 wait_queue_head_t wait;
22 #define MUTEX ((struct semaphore) \
23 { ATOMIC_INIT(1), ATOMIC_INIT(-1), NULL })
24 #define MUTEX_LOCKED ((struct semaphore) \
25 { ATOMIC_INIT(0), ATOMIC_INIT(-1), NULL })
27 #define sema_init(sem, val) atomic_set(&((sem)->count), val)
29 extern void __down(struct semaphore * sem);
30 extern int __down_interruptible(struct semaphore * sem);
31 extern int __down_trylock(struct semaphore * sem);
32 extern void __up(struct semaphore * sem);
34 /* All have custom assembly linkages. */
35 extern void __down_failed(struct semaphore * sem);
36 extern void __down_failed_interruptible(struct semaphore * sem);
37 extern void __down_failed_trylock(struct semaphore * sem);
38 extern void __up_wakeup(struct semaphore * sem);
41 * Whee. Hidden out of line code is fun. The contention cases are
42 * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
43 * takes care of making sure we can call it without clobbering regs.
46 extern inline void down(struct semaphore * sem)
48 /* Given that we have to use particular hard registers to
49 communicate with __down_failed anyway, reuse them in
50 the atomic operation as well.
52 __down_failed takes the semaphore address in $24, and
53 it's return address in $28. The pv is loaded as usual.
54 The gp is clobbered (in the module case) as usual. */
56 /* This little bit of silliness is to get the GP loaded for
57 a function that ordinarily wouldn't. Otherwise we could
58 have it done by the macro directly, which can be optimized
59 the linker. */
60 register void *pv __asm__("$27") = __down_failed;
62 __asm__ __volatile__ (
63 "/* semaphore down operation */\n"
64 "1: ldl_l $24,%1\n"
65 " subl $24,1,$24\n"
66 " mov $24,$28\n"
67 " stl_c $28,%1\n"
68 " beq $28,2f\n"
69 " blt $24,3f\n"
70 "4: mb\n"
71 ".section .text2,\"ax\"\n"
72 "2: br 1b\n"
73 "3: lda $24,%1\n"
74 " jsr $28,($27),__down_failed\n"
75 " ldgp $29,0($28)\n"
76 " br 4b\n"
77 ".previous"
78 : "=r"(pv)
79 : "m"(sem->count), "r"(pv)
80 : "$24", "$28", "memory");
83 extern inline int down_interruptible(struct semaphore * sem)
85 /* __down_failed_interruptible takes the semaphore address in $24,
86 and it's return address in $28. The pv is loaded as usual.
87 The gp is clobbered (in the module case) as usual. The return
88 value is in $24. */
90 register int ret __asm__("$24");
91 register void *pv __asm__("$27") = __down_failed_interruptible;
93 __asm__ __volatile__ (
94 "/* semaphore down interruptible operation */\n"
95 "1: ldl_l $24,%2\n"
96 " subl $24,1,$24\n"
97 " mov $24,$28\n"
98 " stl_c $28,%2\n"
99 " beq $28,2f\n"
100 " blt $24,3f\n"
101 " mov $31,%0\n"
102 "4: mb\n"
103 ".section .text2,\"ax\"\n"
104 "2: br 1b\n"
105 "3: lda $24,%2\n"
106 " jsr $28,($27),__down_failed_interruptible\n"
107 " ldgp $29,0($28)\n"
108 " br 4b\n"
109 ".previous"
110 : "=r"(ret), "=r"(pv)
111 : "m"(sem->count), "r"(pv)
112 : "$28", "memory");
114 return ret;
118 * down_trylock returns 0 on success, 1 if we failed to get the lock.
120 * We must manipulate count and waking simultaneously and atomically.
121 * Do this by using ll/sc on the pair of 32-bit words.
124 extern inline int down_trylock(struct semaphore * sem)
126 long ret, tmp, tmp2, sub;
128 /* "Equivalent" C. Note that we have to do this all without
129 (taken) branches in order to be a valid ll/sc sequence.
131 do {
132 tmp = ldq_l;
133 sub = 0x0000000100000000;
134 ret = ((int)tmp <= 0); // count =< 0 ?
135 if ((int)tmp >= 0) sub = 0; // count >= 0 ?
136 // note that if count=0 subq overflows to the high
137 // longword (i.e waking)
138 ret &= ((long)tmp < 0); // waking < 0 ?
139 sub += 1;
140 if (ret)
141 break;
142 tmp -= sub;
143 tmp = stq_c = tmp;
144 } while (tmp == 0);
147 __asm__ __volatile__(
148 "1: ldq_l %1,%4\n"
149 " lda %3,1\n"
150 " addl %1,0,%2\n"
151 " sll %3,32,%3\n"
152 " cmple %2,0,%0\n"
153 " cmovge %2,0,%3\n"
154 " cmplt %1,0,%2\n"
155 " addq %3,1,%3\n"
156 " and %0,%2,%0\n"
157 " bne %0,2f\n"
158 " subq %1,%3,%1\n"
159 " stq_c %1,%4\n"
160 " beq %1,3f\n"
161 "2:\n"
162 ".section .text2,\"ax\"\n"
163 "3: br 1b\n"
164 ".previous"
165 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
166 : "m"(*sem)
167 : "memory");
169 return ret;
172 extern inline void up(struct semaphore * sem)
174 /* Given that we have to use particular hard registers to
175 communicate with __up_wakeup anyway, reuse them in
176 the atomic operation as well.
178 __up_wakeup takes the semaphore address in $24, and
179 it's return address in $28. The pv is loaded as usual.
180 The gp is clobbered (in the module case) as usual. */
182 register void *pv __asm__("$27") = __up_wakeup;
184 __asm__ __volatile__ (
185 "/* semaphore up operation */\n"
186 " mb\n"
187 "1: ldl_l $24,%1\n"
188 " addl $24,1,$24\n"
189 " mov $24,$28\n"
190 " stl_c $28,%1\n"
191 " beq $28,2f\n"
192 " mb\n"
193 " ble $24,3f\n"
194 "4:\n"
195 ".section .text2,\"ax\"\n"
196 "2: br 1b\n"
197 "3: lda $24,%1\n"
198 " jsr $28,($27),__up_wakeup\n"
199 " ldgp $29,0($28)\n"
200 " br 4b\n"
201 ".previous"
202 : "=r"(pv)
203 : "m"(sem->count), "r"(pv)
204 : "$24", "$28", "memory");
207 #endif