2.2.0-final
[davej-history.git] / include / asm-alpha / semaphore.h
bloba172211c1862a2382bb779880ad79ae1a86396f7
1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
4 /*
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 */
10 #include <asm/current.h>
11 #include <asm/system.h>
12 #include <asm/atomic.h>
15 * Semaphores are recursive: we allow the holder process to recursively do
16 * down() operations on a semaphore that the process already owns. In order
17 * to do that, we need to keep a semaphore-local copy of the owner and the
18 * "depth of ownership".
20 * NOTE! Nasty memory ordering rules:
21 * - "owner" and "owner_count" may only be modified once you hold the lock.
22 * - "owner_count" must be written _after_ modifying owner, and must be
23 * read _before_ reading owner. There must be appropriate write and read
24 * barriers to enforce this.
27 struct semaphore {
28 atomic_t count;
29 atomic_t waking;
30 struct task_struct *owner;
31 long owner_depth;
32 struct wait_queue * wait;
35 #define MUTEX ((struct semaphore) \
36 { ATOMIC_INIT(1), ATOMIC_INIT(0), NULL, 0, NULL })
37 #define MUTEX_LOCKED ((struct semaphore) \
38 { ATOMIC_INIT(0), ATOMIC_INIT(0), NULL, 1, NULL })
40 #define semaphore_owner(sem) ((sem)->owner)
41 #define sema_init(sem, val) atomic_set(&((sem)->count), val)
43 extern void __down(struct semaphore * sem);
44 extern int __down_interruptible(struct semaphore * sem);
45 extern void __up(struct semaphore * sem);
47 /* All three have custom assembly linkages. */
48 extern void __down_failed(struct semaphore * sem);
49 extern void __down_failed_interruptible(struct semaphore * sem);
50 extern void __up_wakeup(struct semaphore * sem);
54 * These two _must_ execute atomically wrt each other.
56 * This is trivially done with load_locked/store_cond,
57 * which we have. Let the rest of the losers suck eggs.
59 * Tricky bits --
61 * (1) One task does two downs, no other contention
62 * initial state:
63 * count = 1, waking = 0, depth = undef;
64 * down(&sem)
65 * count = 0, waking = 0, depth = 1;
66 * down(&sem)
67 * atomic dec and test sends us to waking_non_zero via __down
68 * count = -1, waking = 0;
69 * conditional atomic dec on waking discovers no free slots
70 * count = -1, waking = 0;
71 * test for owner succeeeds and we return ok.
72 * count = -1, waking = 0, depth = 2;
73 * up(&sem)
74 * dec depth
75 * count = -1, waking = 0, depth = 1;
76 * atomic inc and test sends us to slow path
77 * count = 0, waking = 0, depth = 1;
78 * notice !(depth < 0) and don't call __up.
79 * up(&sem)
80 * dec depth
81 * count = 0, waking = 0, depth = 0;
82 * atomic inc and test succeeds.
83 * count = 1, waking = 0, depth = 0;
86 static inline void wake_one_more(struct semaphore * sem)
88 atomic_inc(&sem->waking);
91 static inline int waking_non_zero(struct semaphore *sem,
92 struct task_struct *tsk)
94 long owner_depth;
95 int ret, tmp;
97 owner_depth = sem->owner_depth;
99 /* Atomic decrement, iff the value is > 0. */
100 __asm__ __volatile__(
101 "1: ldl_l %1,%2\n"
102 " ble %1,2f\n"
103 " subl %1,1,%0\n"
104 " stl_c %0,%2\n"
105 " beq %0,3f\n"
106 "2: mb\n"
107 ".section .text2,\"ax\"\n"
108 "3: br 1b\n"
109 ".previous"
110 : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
111 : "0"(0));
113 ret |= ((owner_depth != 0) & (sem->owner == tsk));
114 if (ret) {
115 sem->owner = tsk;
116 wmb();
117 /* Don't use the old value, which is stale in the
118 !owner case. */
119 sem->owner_depth++;
122 return ret;
126 * Whee. Hidden out of line code is fun. The contention cases are
127 * handled out of line in kernel/sched.c; arch/alpha/lib/semaphore.S
128 * takes care of making sure we can call it without clobbering regs.
131 extern inline void down(struct semaphore * sem)
133 /* Given that we have to use particular hard registers to
134 communicate with __down_failed anyway, reuse them in
135 the atomic operation as well.
137 __down_failed takes the semaphore address in $24, and
138 it's return address in $28. The pv is loaded as usual.
139 The gp is clobbered (in the module case) as usual. */
141 __asm__ __volatile__ (
142 "/* semaphore down operation */\n"
143 "1: ldl_l $27,%3\n"
144 " subl $27,1,$27\n"
145 " mov $27,$28\n"
146 " stl_c $28,%0\n"
147 " beq $28,2f\n"
148 " blt $27,3f\n"
149 /* Got the semaphore no contention. Set owner and depth. */
150 " stq $8,%1\n"
151 " lda $28,1\n"
152 " wmb\n"
153 " stq $28,%2\n"
154 "4: mb\n"
155 ".section .text2,\"ax\"\n"
156 "2: br 1b\n"
157 "3: lda $24,%3\n"
158 " jsr $28,__down_failed\n"
159 " ldgp $29,0($28)\n"
160 " br 4b\n"
161 ".previous"
162 : "=m"(sem->count), "=m"(sem->owner), "=m"(sem->owner_depth)
163 : "m"(sem->count)
164 : "$24", "$27", "$28", "memory");
167 extern inline int down_interruptible(struct semaphore * sem)
169 /* __down_failed_interruptible takes the semaphore address in $24,
170 and it's return address in $28. The pv is loaded as usual.
171 The gp is clobbered (in the module case) as usual. The return
172 value is in $24. */
174 register int ret __asm__("$24");
176 __asm__ __volatile__ (
177 "/* semaphore down interruptible operation */\n"
178 "1: ldl_l $27,%4\n"
179 " subl $27,1,$27\n"
180 " mov $27,$28\n"
181 " stl_c $28,%1\n"
182 " beq $28,2f\n"
183 " blt $27,3f\n"
184 /* Got the semaphore no contention. Set owner and depth. */
185 " stq $8,%2\n"
186 " lda $28,1\n"
187 " wmb\n"
188 " stq $28,%3\n"
189 " mov $31,$24\n"
190 "4: mb\n"
191 ".section .text2,\"ax\"\n"
192 "2: br 1b\n"
193 "3: lda $24,%4\n"
194 " jsr $28,__down_failed_interruptible\n"
195 " ldgp $29,0($28)\n"
196 " br 4b\n"
197 ".previous"
198 : "=r"(ret), "=m"(sem->count), "=m"(sem->owner),
199 "=m"(sem->owner_depth)
200 : "m"(sem->count)
201 : "$27", "$28", "memory");
203 return ret;
206 extern inline void up(struct semaphore * sem)
208 /* Given that we have to use particular hard registers to
209 communicate with __up_wakeup anyway, reuse them in
210 the atomic operation as well.
212 __up_wakeup takes the semaphore address in $24, and
213 it's return address in $28. The pv is loaded as usual.
214 The gp is clobbered (in the module case) as usual. */
216 __asm__ __volatile__ (
217 "/* semaphore up operation */\n"
218 " mb\n"
219 "1: ldl_l $27,%1\n"
220 " addl $27,1,$27\n"
221 " mov $27,$28\n"
222 " stl_c $28,%0\n"
223 " beq $28,2f\n"
224 " mb\n"
225 " ble $27,3f\n"
226 "4:\n"
227 ".section .text2,\"ax\"\n"
228 "2: br 1b\n"
229 "3: lda $24,%1\n"
230 " bgt %2,4b\n"
231 " jsr $28,__up_wakeup\n"
232 " ldgp $29,0($28)\n"
233 " br 4b\n"
234 ".previous"
235 : "=m"(sem->count)
236 : "m"(sem->count), "r"(--sem->owner_depth)
237 : "$24", "$27", "$28", "memory");
240 #endif