5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
10 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
15 #include <linux/compiler.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
21 extern struct rw_semaphore
*rwsem_down_read_failed(struct rw_semaphore
*sem
);
22 extern struct rw_semaphore
*rwsem_down_write_failed(struct rw_semaphore
*sem
);
23 extern struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*);
24 extern struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
);
27 * the semaphore definition
31 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
32 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
33 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
34 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
35 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
36 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
38 struct list_head wait_list
;
41 #define __RWSEM_INITIALIZER(name) \
42 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
43 LIST_HEAD_INIT((name).wait_list) }
45 #define DECLARE_RWSEM(name) \
46 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
48 static inline void init_rwsem(struct rw_semaphore
*sem
)
50 sem
->count
= RWSEM_UNLOCKED_VALUE
;
51 spin_lock_init(&sem
->wait_lock
);
52 INIT_LIST_HEAD(&sem
->wait_list
);
55 static inline void __down_read(struct rw_semaphore
*sem
)
59 oldcount
= sem
->count
;
60 sem
->count
+= RWSEM_ACTIVE_READ_BIAS
;
72 :"=&r" (oldcount
), "=m" (sem
->count
), "=&r" (temp
)
73 :"Ir" (RWSEM_ACTIVE_READ_BIAS
), "m" (sem
->count
) : "memory");
75 if (unlikely(oldcount
< 0))
76 rwsem_down_read_failed(sem
);
80 * trylock for reading -- returns 1 if successful, 0 if contention
82 static inline int __down_read_trylock(struct rw_semaphore
*sem
)
88 new = res
+ RWSEM_ACTIVE_READ_BIAS
;
92 res
= cmpxchg(&sem
->count
, old
, new);
94 return res
>= 0 ? 1 : 0;
97 static inline void __down_write(struct rw_semaphore
*sem
)
101 oldcount
= sem
->count
;
102 sem
->count
+= RWSEM_ACTIVE_WRITE_BIAS
;
105 __asm__
__volatile__(
114 :"=&r" (oldcount
), "=m" (sem
->count
), "=&r" (temp
)
115 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS
), "m" (sem
->count
) : "memory");
117 if (unlikely(oldcount
))
118 rwsem_down_write_failed(sem
);
122 * trylock for writing -- returns 1 if successful, 0 if contention
124 static inline int __down_write_trylock(struct rw_semaphore
*sem
)
126 long ret
= cmpxchg(&sem
->count
, RWSEM_UNLOCKED_VALUE
,
127 RWSEM_ACTIVE_WRITE_BIAS
);
128 if (ret
== RWSEM_UNLOCKED_VALUE
)
133 static inline void __up_read(struct rw_semaphore
*sem
)
137 oldcount
= sem
->count
;
138 sem
->count
-= RWSEM_ACTIVE_READ_BIAS
;
141 __asm__
__volatile__(
150 :"=&r" (oldcount
), "=m" (sem
->count
), "=&r" (temp
)
151 :"Ir" (RWSEM_ACTIVE_READ_BIAS
), "m" (sem
->count
) : "memory");
153 if (unlikely(oldcount
< 0))
154 if ((int)oldcount
- RWSEM_ACTIVE_READ_BIAS
== 0)
158 static inline void __up_write(struct rw_semaphore
*sem
)
162 sem
->count
-= RWSEM_ACTIVE_WRITE_BIAS
;
166 __asm__
__volatile__(
176 :"=&r" (count
), "=m" (sem
->count
), "=&r" (temp
)
177 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS
), "m" (sem
->count
) : "memory");
185 * downgrade write lock to read lock
187 static inline void __downgrade_write(struct rw_semaphore
*sem
)
191 oldcount
= sem
->count
;
192 sem
->count
-= RWSEM_WAITING_BIAS
;
195 __asm__
__volatile__(
204 :"=&r" (oldcount
), "=m" (sem
->count
), "=&r" (temp
)
205 :"Ir" (-RWSEM_WAITING_BIAS
), "m" (sem
->count
) : "memory");
207 if (unlikely(oldcount
< 0))
208 rwsem_downgrade_wake(sem
);
211 static inline void rwsem_atomic_add(long val
, struct rw_semaphore
*sem
)
217 __asm__
__volatile__(
225 :"=&r" (temp
), "=m" (sem
->count
)
226 :"Ir" (val
), "m" (sem
->count
));
230 static inline long rwsem_atomic_update(long val
, struct rw_semaphore
*sem
)
237 __asm__
__volatile__(
246 :"=&r" (ret
), "=m" (sem
->count
), "=&r" (temp
)
247 :"Ir" (val
), "m" (sem
->count
));
253 static inline int rwsem_is_locked(struct rw_semaphore
*sem
)
255 return (sem
->count
!= 0);
258 #endif /* __KERNEL__ */
259 #endif /* _ALPHA_RWSEM_H */