[PATCH] PCI: update Toshiba ohci quirk DMI table
[linux-2.6/mini2440.git] / include / asm-s390 / rwsem.h
blob0422a085dd56a2d54d36cd46a190c54b7c7af4ac
1 #ifndef _S390_RWSEM_H
2 #define _S390_RWSEM_H
4 /*
5 * include/asm-s390/rwsem.h
7 * S390 version
8 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
11 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
16 * The MSW of the count is the negated number of active writers and waiting
17 * lockers, and the LSW is the total number of active locks
19 * The lock count is initialized to 0 (no active and no waiting lockers).
21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
22 * uncontended lock. This can be determined because XADD returns the old value.
23 * Readers increment by 1 and see a positive value when uncontended, negative
24 * if there are writers (and maybe) readers waiting (in which case it goes to
25 * sleep).
27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
28 * be extended to 65534 by manually checking the whole MSW rather than relying
29 * on the S flag.
31 * The value of ACTIVE_BIAS supports up to 65535 active processes.
33 * This should be totally fair - if anything is waiting, a process that wants a
34 * lock will go to the back of the queue. When the currently active lock is
35 * released, if there's a writer at the front of the queue, then that and only
36 * that will be woken up; if there's a bunch of consequtive readers at the
37 * front, then they'll all be woken up, but no other readers will be.
40 #ifndef _LINUX_RWSEM_H
41 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
42 #endif
44 #ifdef __KERNEL__
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
49 struct rwsem_waiter;
51 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *);
52 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *);
53 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
54 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *);
55 extern struct rw_semaphore *rwsem_downgrade_write(struct rw_semaphore *);
58 * the semaphore definition
60 struct rw_semaphore {
61 signed long count;
62 spinlock_t wait_lock;
63 struct list_head wait_list;
66 #ifndef __s390x__
67 #define RWSEM_UNLOCKED_VALUE 0x00000000
68 #define RWSEM_ACTIVE_BIAS 0x00000001
69 #define RWSEM_ACTIVE_MASK 0x0000ffff
70 #define RWSEM_WAITING_BIAS (-0x00010000)
71 #else /* __s390x__ */
72 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
73 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
74 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
75 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
76 #endif /* __s390x__ */
77 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
78 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
81 * initialisation
83 #define __RWSEM_INITIALIZER(name) \
84 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
86 #define DECLARE_RWSEM(name) \
87 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
89 static inline void init_rwsem(struct rw_semaphore *sem)
91 sem->count = RWSEM_UNLOCKED_VALUE;
92 spin_lock_init(&sem->wait_lock);
93 INIT_LIST_HEAD(&sem->wait_list);
97 * lock for reading
99 static inline void __down_read(struct rw_semaphore *sem)
101 signed long old, new;
103 __asm__ __volatile__(
104 #ifndef __s390x__
105 " l %0,0(%3)\n"
106 "0: lr %1,%0\n"
107 " ahi %1,%5\n"
108 " cs %0,%1,0(%3)\n"
109 " jl 0b"
110 #else /* __s390x__ */
111 " lg %0,0(%3)\n"
112 "0: lgr %1,%0\n"
113 " aghi %1,%5\n"
114 " csg %0,%1,0(%3)\n"
115 " jl 0b"
116 #endif /* __s390x__ */
117 : "=&d" (old), "=&d" (new), "=m" (sem->count)
118 : "a" (&sem->count), "m" (sem->count),
119 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
120 if (old < 0)
121 rwsem_down_read_failed(sem);
125 * trylock for reading -- returns 1 if successful, 0 if contention
127 static inline int __down_read_trylock(struct rw_semaphore *sem)
129 signed long old, new;
131 __asm__ __volatile__(
132 #ifndef __s390x__
133 " l %0,0(%3)\n"
134 "0: ltr %1,%0\n"
135 " jm 1f\n"
136 " ahi %1,%5\n"
137 " cs %0,%1,0(%3)\n"
138 " jl 0b\n"
139 "1:"
140 #else /* __s390x__ */
141 " lg %0,0(%3)\n"
142 "0: ltgr %1,%0\n"
143 " jm 1f\n"
144 " aghi %1,%5\n"
145 " csg %0,%1,0(%3)\n"
146 " jl 0b\n"
147 "1:"
148 #endif /* __s390x__ */
149 : "=&d" (old), "=&d" (new), "=m" (sem->count)
150 : "a" (&sem->count), "m" (sem->count),
151 "i" (RWSEM_ACTIVE_READ_BIAS) : "cc", "memory" );
152 return old >= 0 ? 1 : 0;
156 * lock for writing
158 static inline void __down_write(struct rw_semaphore *sem)
160 signed long old, new, tmp;
162 tmp = RWSEM_ACTIVE_WRITE_BIAS;
163 __asm__ __volatile__(
164 #ifndef __s390x__
165 " l %0,0(%3)\n"
166 "0: lr %1,%0\n"
167 " a %1,%5\n"
168 " cs %0,%1,0(%3)\n"
169 " jl 0b"
170 #else /* __s390x__ */
171 " lg %0,0(%3)\n"
172 "0: lgr %1,%0\n"
173 " ag %1,%5\n"
174 " csg %0,%1,0(%3)\n"
175 " jl 0b"
176 #endif /* __s390x__ */
177 : "=&d" (old), "=&d" (new), "=m" (sem->count)
178 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
179 : "cc", "memory" );
180 if (old != 0)
181 rwsem_down_write_failed(sem);
185 * trylock for writing -- returns 1 if successful, 0 if contention
187 static inline int __down_write_trylock(struct rw_semaphore *sem)
189 signed long old;
191 __asm__ __volatile__(
192 #ifndef __s390x__
193 " l %0,0(%2)\n"
194 "0: ltr %0,%0\n"
195 " jnz 1f\n"
196 " cs %0,%4,0(%2)\n"
197 " jl 0b\n"
198 #else /* __s390x__ */
199 " lg %0,0(%2)\n"
200 "0: ltgr %0,%0\n"
201 " jnz 1f\n"
202 " csg %0,%4,0(%2)\n"
203 " jl 0b\n"
204 #endif /* __s390x__ */
205 "1:"
206 : "=&d" (old), "=m" (sem->count)
207 : "a" (&sem->count), "m" (sem->count),
208 "d" (RWSEM_ACTIVE_WRITE_BIAS) : "cc", "memory" );
209 return (old == RWSEM_UNLOCKED_VALUE) ? 1 : 0;
213 * unlock after reading
215 static inline void __up_read(struct rw_semaphore *sem)
217 signed long old, new;
219 __asm__ __volatile__(
220 #ifndef __s390x__
221 " l %0,0(%3)\n"
222 "0: lr %1,%0\n"
223 " ahi %1,%5\n"
224 " cs %0,%1,0(%3)\n"
225 " jl 0b"
226 #else /* __s390x__ */
227 " lg %0,0(%3)\n"
228 "0: lgr %1,%0\n"
229 " aghi %1,%5\n"
230 " csg %0,%1,0(%3)\n"
231 " jl 0b"
232 #endif /* __s390x__ */
233 : "=&d" (old), "=&d" (new), "=m" (sem->count)
234 : "a" (&sem->count), "m" (sem->count),
235 "i" (-RWSEM_ACTIVE_READ_BIAS)
236 : "cc", "memory" );
237 if (new < 0)
238 if ((new & RWSEM_ACTIVE_MASK) == 0)
239 rwsem_wake(sem);
243 * unlock after writing
245 static inline void __up_write(struct rw_semaphore *sem)
247 signed long old, new, tmp;
249 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
250 __asm__ __volatile__(
251 #ifndef __s390x__
252 " l %0,0(%3)\n"
253 "0: lr %1,%0\n"
254 " a %1,%5\n"
255 " cs %0,%1,0(%3)\n"
256 " jl 0b"
257 #else /* __s390x__ */
258 " lg %0,0(%3)\n"
259 "0: lgr %1,%0\n"
260 " ag %1,%5\n"
261 " csg %0,%1,0(%3)\n"
262 " jl 0b"
263 #endif /* __s390x__ */
264 : "=&d" (old), "=&d" (new), "=m" (sem->count)
265 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
266 : "cc", "memory" );
267 if (new < 0)
268 if ((new & RWSEM_ACTIVE_MASK) == 0)
269 rwsem_wake(sem);
273 * downgrade write lock to read lock
275 static inline void __downgrade_write(struct rw_semaphore *sem)
277 signed long old, new, tmp;
279 tmp = -RWSEM_WAITING_BIAS;
280 __asm__ __volatile__(
281 #ifndef __s390x__
282 " l %0,0(%3)\n"
283 "0: lr %1,%0\n"
284 " a %1,%5\n"
285 " cs %0,%1,0(%3)\n"
286 " jl 0b"
287 #else /* __s390x__ */
288 " lg %0,0(%3)\n"
289 "0: lgr %1,%0\n"
290 " ag %1,%5\n"
291 " csg %0,%1,0(%3)\n"
292 " jl 0b"
293 #endif /* __s390x__ */
294 : "=&d" (old), "=&d" (new), "=m" (sem->count)
295 : "a" (&sem->count), "m" (sem->count), "m" (tmp)
296 : "cc", "memory" );
297 if (new > 1)
298 rwsem_downgrade_wake(sem);
302 * implement atomic add functionality
304 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
306 signed long old, new;
308 __asm__ __volatile__(
309 #ifndef __s390x__
310 " l %0,0(%3)\n"
311 "0: lr %1,%0\n"
312 " ar %1,%5\n"
313 " cs %0,%1,0(%3)\n"
314 " jl 0b"
315 #else /* __s390x__ */
316 " lg %0,0(%3)\n"
317 "0: lgr %1,%0\n"
318 " agr %1,%5\n"
319 " csg %0,%1,0(%3)\n"
320 " jl 0b"
321 #endif /* __s390x__ */
322 : "=&d" (old), "=&d" (new), "=m" (sem->count)
323 : "a" (&sem->count), "m" (sem->count), "d" (delta)
324 : "cc", "memory" );
328 * implement exchange and add functionality
330 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
332 signed long old, new;
334 __asm__ __volatile__(
335 #ifndef __s390x__
336 " l %0,0(%3)\n"
337 "0: lr %1,%0\n"
338 " ar %1,%5\n"
339 " cs %0,%1,0(%3)\n"
340 " jl 0b"
341 #else /* __s390x__ */
342 " lg %0,0(%3)\n"
343 "0: lgr %1,%0\n"
344 " agr %1,%5\n"
345 " csg %0,%1,0(%3)\n"
346 " jl 0b"
347 #endif /* __s390x__ */
348 : "=&d" (old), "=&d" (new), "=m" (sem->count)
349 : "a" (&sem->count), "m" (sem->count), "d" (delta)
350 : "cc", "memory" );
351 return new;
354 static inline int rwsem_is_locked(struct rw_semaphore *sem)
356 return (sem->count != 0);
359 #endif /* __KERNEL__ */
360 #endif /* _S390_RWSEM_H */