More cleaning ...
[linux-2.6/linux-mips.git] / include / asm-alpha / semaphore.h
blobab26cfbaddbfe76facac91b2feb9abe43b4c9e93
1 #ifndef _ALPHA_SEMAPHORE_H
2 #define _ALPHA_SEMAPHORE_H
4 /*
5 * SMP- and interrupt-safe semaphores..
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1996, 2000 Richard Henderson
9 */
11 #include <asm/current.h>
12 #include <asm/system.h>
13 #include <asm/atomic.h>
14 #include <linux/compiler.h>
15 #include <linux/wait.h>
16 #include <linux/rwsem.h>
18 struct semaphore {
19 /* Careful, inline assembly knows about the position of these two. */
20 atomic_t count __attribute__((aligned(8)));
21 atomic_t waking; /* biased by -1 */
23 wait_queue_head_t wait;
24 #if WAITQUEUE_DEBUG
25 long __magic;
26 #endif
29 #if WAITQUEUE_DEBUG
30 # define __SEM_DEBUG_INIT(name) , (long)&(name).__magic
31 #else
32 # define __SEM_DEBUG_INIT(name)
33 #endif
35 #define __SEMAPHORE_INITIALIZER(name,count) \
36 { ATOMIC_INIT(count), ATOMIC_INIT(-1), \
37 __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
38 __SEM_DEBUG_INIT(name) }
40 #define __MUTEX_INITIALIZER(name) \
41 __SEMAPHORE_INITIALIZER(name,1)
43 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
44 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
46 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
47 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
49 static inline void sema_init(struct semaphore *sem, int val)
52 * Logically,
53 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
54 * except that gcc produces better initializing by parts yet.
57 atomic_set(&sem->count, val);
58 atomic_set(&sem->waking, -1);
59 init_waitqueue_head(&sem->wait);
60 #if WAITQUEUE_DEBUG
61 sem->__magic = (long)&sem->__magic;
62 #endif
65 static inline void init_MUTEX (struct semaphore *sem)
67 sema_init(sem, 1);
70 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
72 sema_init(sem, 0);
75 extern void down(struct semaphore *);
76 extern void __down_failed(struct semaphore *);
77 extern int down_interruptible(struct semaphore *);
78 extern int __down_failed_interruptible(struct semaphore *);
79 extern int down_trylock(struct semaphore *);
80 extern void up(struct semaphore *);
81 extern void __up_wakeup(struct semaphore *);
84 * Hidden out of line code is fun, but extremely messy. Rely on newer
85 * compilers to do a respectable job with this. The contention cases
86 * are handled out of line in arch/alpha/kernel/semaphore.c.
89 static inline void __down(struct semaphore *sem)
91 long count = atomic_dec_return(&sem->count);
92 if (unlikely(count < 0))
93 __down_failed(sem);
96 static inline int __down_interruptible(struct semaphore *sem)
98 long count = atomic_dec_return(&sem->count);
99 if (unlikely(count < 0))
100 return __down_failed_interruptible(sem);
101 return 0;
105 * down_trylock returns 0 on success, 1 if we failed to get the lock.
107 * We must manipulate count and waking simultaneously and atomically.
108 * Do this by using ll/sc on the pair of 32-bit words.
111 static inline int __down_trylock(struct semaphore * sem)
113 long ret, tmp, tmp2, sub;
115 /* "Equivalent" C. Note that we have to do this all without
116 (taken) branches in order to be a valid ll/sc sequence.
118 do {
119 tmp = ldq_l;
120 sub = 0x0000000100000000;
121 ret = ((int)tmp <= 0); // count <= 0 ?
122 // Note that if count=0, the decrement overflows into
123 // waking, so cancel the 1 loaded above. Also cancel
124 // it if the lock was already free.
125 if ((int)tmp >= 0) sub = 0; // count >= 0 ?
126 ret &= ((long)tmp < 0); // waking < 0 ?
127 sub += 1;
128 if (ret) break;
129 tmp -= sub;
130 tmp = stq_c = tmp;
131 } while (tmp == 0);
134 __asm__ __volatile__(
135 "1: ldq_l %1,%4\n"
136 " lda %3,1\n"
137 " addl %1,0,%2\n"
138 " sll %3,32,%3\n"
139 " cmple %2,0,%0\n"
140 " cmovge %2,0,%3\n"
141 " cmplt %1,0,%2\n"
142 " addq %3,1,%3\n"
143 " and %0,%2,%0\n"
144 " bne %0,2f\n"
145 " subq %1,%3,%1\n"
146 " stq_c %1,%4\n"
147 " beq %1,3f\n"
148 "2: mb\n"
149 ".subsection 2\n"
150 "3: br 1b\n"
151 ".previous"
152 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
153 : "m"(*sem)
154 : "memory");
156 return ret;
159 static inline void __up(struct semaphore *sem)
161 long ret, tmp, tmp2, tmp3;
163 /* We must manipulate count and waking simultaneously and atomically.
164 Otherwise we have races between up and __down_failed_interruptible
165 waking up on a signal.
167 "Equivalent" C. Note that we have to do this all without
168 (taken) branches in order to be a valid ll/sc sequence.
170 do {
171 tmp = ldq_l;
172 ret = (int)tmp + 1; // count += 1;
173 tmp2 = tmp & 0xffffffff00000000; // extract waking
174 if (ret <= 0) // still sleepers?
175 tmp2 += 0x0000000100000000; // waking += 1;
176 tmp = ret & 0x00000000ffffffff; // insert count
177 tmp |= tmp2; // insert waking;
178 tmp = stq_c = tmp;
179 } while (tmp == 0);
182 __asm__ __volatile__(
183 " mb\n"
184 "1: ldq_l %1,%4\n"
185 " addl %1,1,%0\n"
186 " zapnot %1,0xf0,%2\n"
187 " addq %2,%5,%3\n"
188 " cmovle %0,%3,%2\n"
189 " zapnot %0,0x0f,%1\n"
190 " bis %1,%2,%1\n"
191 " stq_c %1,%4\n"
192 " beq %1,3f\n"
193 "2:\n"
194 ".subsection 2\n"
195 "3: br 1b\n"
196 ".previous"
197 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3)
198 : "m"(*sem), "r"(0x0000000100000000)
199 : "memory");
201 if (unlikely(ret <= 0))
202 __up_wakeup(sem);
205 #if !WAITQUEUE_DEBUG && !defined(CONFIG_DEBUG_SEMAPHORE)
206 extern inline void down(struct semaphore *sem)
208 __down(sem);
210 extern inline int down_interruptible(struct semaphore *sem)
212 return __down_interruptible(sem);
214 extern inline int down_trylock(struct semaphore *sem)
216 return __down_trylock(sem);
218 extern inline void up(struct semaphore *sem)
220 __up(sem);
222 #endif
224 #endif