5 * include/asm-s390/rwsem.h
8 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
11 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
16 * The MSW of the count is the negated number of active writers and waiting
17 * lockers, and the LSW is the total number of active locks
19 * The lock count is initialized to 0 (no active and no waiting lockers).
21 * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
22 * uncontended lock. This can be determined because XADD returns the old value.
23 * Readers increment by 1 and see a positive value when uncontended, negative
24 * if there are writers (and maybe) readers waiting (in which case it goes to
27 * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
28 * be extended to 65534 by manually checking the whole MSW rather than relying
31 * The value of ACTIVE_BIAS supports up to 65535 active processes.
33 * This should be totally fair - if anything is waiting, a process that wants a
34 * lock will go to the back of the queue. When the currently active lock is
35 * released, if there's a writer at the front of the queue, then that and only
36 * that will be woken up; if there's a bunch of consequtive readers at the
37 * front, then they'll all be woken up, but no other readers will be.
40 #ifndef _LINUX_RWSEM_H
41 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
46 #include <linux/list.h>
47 #include <linux/spinlock.h>
51 extern struct rw_semaphore
*rwsem_down_read_failed(struct rw_semaphore
*);
52 extern struct rw_semaphore
*rwsem_down_write_failed(struct rw_semaphore
*);
53 extern struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*);
54 extern struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*);
55 extern struct rw_semaphore
*rwsem_downgrade_write(struct rw_semaphore
*);
58 * the semaphore definition
63 struct list_head wait_list
;
64 #ifdef CONFIG_DEBUG_LOCK_ALLOC
65 struct lockdep_map dep_map
;
70 #define RWSEM_UNLOCKED_VALUE 0x00000000
71 #define RWSEM_ACTIVE_BIAS 0x00000001
72 #define RWSEM_ACTIVE_MASK 0x0000ffff
73 #define RWSEM_WAITING_BIAS (-0x00010000)
75 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
76 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
77 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
78 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
79 #endif /* __s390x__ */
80 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
81 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
87 #ifdef CONFIG_DEBUG_LOCK_ALLOC
88 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
90 # define __RWSEM_DEP_MAP_INIT(lockname)
93 #define __RWSEM_INITIALIZER(name) \
94 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
95 __RWSEM_DEP_MAP_INIT(name) }
97 #define DECLARE_RWSEM(name) \
98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
100 static inline void init_rwsem(struct rw_semaphore
*sem
)
102 sem
->count
= RWSEM_UNLOCKED_VALUE
;
103 spin_lock_init(&sem
->wait_lock
);
104 INIT_LIST_HEAD(&sem
->wait_list
);
107 extern void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
108 struct lock_class_key
*key
);
110 #define init_rwsem(sem) \
112 static struct lock_class_key __key; \
114 __init_rwsem((sem), #sem, &__key); \
121 static inline void __down_read(struct rw_semaphore
*sem
)
123 signed long old
, new;
125 __asm__
__volatile__(
132 #else /* __s390x__ */
138 #endif /* __s390x__ */
139 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
140 : "a" (&sem
->count
), "m" (sem
->count
),
141 "i" (RWSEM_ACTIVE_READ_BIAS
) : "cc", "memory" );
143 rwsem_down_read_failed(sem
);
147 * trylock for reading -- returns 1 if successful, 0 if contention
149 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
151 signed long old
, new;
153 __asm__
__volatile__(
162 #else /* __s390x__ */
170 #endif /* __s390x__ */
171 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
172 : "a" (&sem
->count
), "m" (sem
->count
),
173 "i" (RWSEM_ACTIVE_READ_BIAS
) : "cc", "memory" );
174 return old
>= 0 ? 1 : 0;
180 static inline void __down_write_nested(struct rw_semaphore
*sem
, int subclass
)
182 signed long old
, new, tmp
;
184 tmp
= RWSEM_ACTIVE_WRITE_BIAS
;
185 __asm__
__volatile__(
192 #else /* __s390x__ */
198 #endif /* __s390x__ */
199 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
200 : "a" (&sem
->count
), "m" (sem
->count
), "m" (tmp
)
203 rwsem_down_write_failed(sem
);
206 static inline void __down_write(struct rw_semaphore
*sem
)
208 __down_write_nested(sem
, 0);
212 * trylock for writing -- returns 1 if successful, 0 if contention
214 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
218 __asm__
__volatile__(
225 #else /* __s390x__ */
231 #endif /* __s390x__ */
233 : "=&d" (old
), "=m" (sem
->count
)
234 : "a" (&sem
->count
), "m" (sem
->count
),
235 "d" (RWSEM_ACTIVE_WRITE_BIAS
) : "cc", "memory" );
236 return (old
== RWSEM_UNLOCKED_VALUE
) ? 1 : 0;
240 * unlock after reading
242 static inline void __up_read(struct rw_semaphore
*sem
)
244 signed long old
, new;
246 __asm__
__volatile__(
253 #else /* __s390x__ */
259 #endif /* __s390x__ */
260 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
261 : "a" (&sem
->count
), "m" (sem
->count
),
262 "i" (-RWSEM_ACTIVE_READ_BIAS
)
265 if ((new & RWSEM_ACTIVE_MASK
) == 0)
270 * unlock after writing
272 static inline void __up_write(struct rw_semaphore
*sem
)
274 signed long old
, new, tmp
;
276 tmp
= -RWSEM_ACTIVE_WRITE_BIAS
;
277 __asm__
__volatile__(
284 #else /* __s390x__ */
290 #endif /* __s390x__ */
291 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
292 : "a" (&sem
->count
), "m" (sem
->count
), "m" (tmp
)
295 if ((new & RWSEM_ACTIVE_MASK
) == 0)
300 * downgrade write lock to read lock
302 static inline void __downgrade_write(struct rw_semaphore
*sem
)
304 signed long old
, new, tmp
;
306 tmp
= -RWSEM_WAITING_BIAS
;
307 __asm__
__volatile__(
314 #else /* __s390x__ */
320 #endif /* __s390x__ */
321 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
322 : "a" (&sem
->count
), "m" (sem
->count
), "m" (tmp
)
325 rwsem_downgrade_wake(sem
);
329 * implement atomic add functionality
331 static inline void rwsem_atomic_add(long delta
, struct rw_semaphore
*sem
)
333 signed long old
, new;
335 __asm__
__volatile__(
342 #else /* __s390x__ */
348 #endif /* __s390x__ */
349 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
350 : "a" (&sem
->count
), "m" (sem
->count
), "d" (delta
)
355 * implement exchange and add functionality
357 static inline long rwsem_atomic_update(long delta
, struct rw_semaphore
*sem
)
359 signed long old
, new;
361 __asm__
__volatile__(
368 #else /* __s390x__ */
374 #endif /* __s390x__ */
375 : "=&d" (old
), "=&d" (new), "=m" (sem
->count
)
376 : "a" (&sem
->count
), "m" (sem
->count
), "d" (delta
)
381 static inline int rwsem_is_locked(struct rw_semaphore
*sem
)
383 return (sem
->count
!= 0);
386 #endif /* __KERNEL__ */
387 #endif /* _S390_RWSEM_H */