1 #ifndef _ASM_PARISC_SEMAPHORE_H
2 #define _ASM_PARISC_SEMAPHORE_H
4 #include <linux/linkage.h>
7 * SMP- and interrupt-safe semaphores.
9 * (C) Copyright 1996 Linus Torvalds
11 * SuperH verison by Niibe Yutaka
15 /* if you're going to use out-of-line slowpaths, use .section .lock.text,
16 * not .text.lock or the -ffunction-sections monster will eat you alive
19 #include <linux/spinlock.h>
21 #include <asm/system.h>
22 #include <asm/atomic.h>
27 wait_queue_head_t wait
;
34 # define __SEM_DEBUG_INIT(name) \
35 , (long)&(name).__magic
37 # define __SEM_DEBUG_INIT(name)
40 #define __SEMAPHORE_INITIALIZER(name,count) \
41 { ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
42 __SEM_DEBUG_INIT(name) }
44 #define __MUTEX_INITIALIZER(name) \
45 __SEMAPHORE_INITIALIZER(name,1)
47 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
48 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
50 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
51 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
53 extern inline void sema_init (struct semaphore
*sem
, int val
)
56 * *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
58 * i'd rather use the more flexible initialization above, but sadly
59 * GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
61 atomic_set(&sem
->count
, val
);
63 init_waitqueue_head(&sem
->wait
);
65 sem
->__magic
= (long)&sem
->__magic
;
69 static inline void init_MUTEX (struct semaphore
*sem
)
74 static inline void init_MUTEX_LOCKED (struct semaphore
*sem
)
79 asmlinkage
void __down_failed(void /* special register calling convention */);
80 asmlinkage
int __down_failed_interruptible(void /* params in registers */);
81 asmlinkage
int __down_failed_trylock(void /* params in registers */);
82 asmlinkage
void __up_wakeup(void /* special register calling convention */);
84 asmlinkage
void __down(struct semaphore
* sem
);
85 asmlinkage
int __down_interruptible(struct semaphore
* sem
);
86 asmlinkage
int __down_trylock(struct semaphore
* sem
);
87 asmlinkage
void __up(struct semaphore
* sem
);
89 extern spinlock_t semaphore_wake_lock
;
91 extern __inline__
void down(struct semaphore
* sem
)
94 CHECK_MAGIC(sem
->__magic
);
97 if (atomic_dec_return(&sem
->count
) < 0)
101 extern __inline__
int down_interruptible(struct semaphore
* sem
)
105 CHECK_MAGIC(sem
->__magic
);
108 if (atomic_dec_return(&sem
->count
) < 0)
109 ret
= __down_interruptible(sem
);
113 extern __inline__
int down_trylock(struct semaphore
* sem
)
117 CHECK_MAGIC(sem
->__magic
);
120 if (atomic_dec_return(&sem
->count
) < 0)
121 ret
= __down_trylock(sem
);
126 * Note! This is subtle. We jump to wake people up only if
127 * the semaphore was negative (== somebody was waiting on it).
129 extern __inline__
void up(struct semaphore
* sem
)
132 CHECK_MAGIC(sem
->__magic
);
134 if (atomic_inc_return(&sem
->count
) <= 0)
138 /* rw mutexes (should that be mutices? =) -- throw rw
139 * spinlocks and semaphores together, and this is what we
142 * The lock is initialized to BIAS. This way, a writer
143 * subtracts BIAS ands gets 0 for the case of an uncontended
144 * lock. Readers decrement by 1 and see a positive value
145 * when uncontended, negative if there are writers waiting
146 * (in which case it goes to sleep).
148 * The value 0x01000000 supports up to 128 processors and
149 * lots of processes. BIAS must be chosen such that subl'ing
150 * BIAS once per CPU will result in the long remaining
153 * In terms of fairness, this should result in the lock
154 * flopping back and forth between readers and writers
159 struct rw_semaphore
{
161 volatile unsigned char write_bias_granted
;
162 volatile unsigned char read_bias_granted
;
163 volatile unsigned char pad1
;
164 volatile unsigned char pad2
;
165 wait_queue_head_t wait
;
166 wait_queue_head_t write_bias_wait
;
175 #define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
177 #define __RWSEM_DEBUG_INIT /* */
180 #define RW_LOCK_BIAS 0x01000000
182 #define __RWSEM_INITIALIZER(name,count) \
183 { ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
184 __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
185 __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
187 #define __DECLARE_RWSEM_GENERIC(name,count) \
188 struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
190 #define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
191 #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
192 #define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
194 extern inline void init_rwsem(struct rw_semaphore
*sem
)
196 atomic_set(&sem
->count
, RW_LOCK_BIAS
);
197 sem
->read_bias_granted
= 0;
198 sem
->write_bias_granted
= 0;
199 init_waitqueue_head(&sem
->wait
);
200 init_waitqueue_head(&sem
->write_bias_wait
);
202 sem
->__magic
= (long)&sem
->__magic
;
203 atomic_set(&sem
->readers
, 0);
204 atomic_set(&sem
->writers
, 0);
208 #ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME
209 extern struct rw_semaphore
*__build_read_lock(struct rw_semaphore
*sem
, const char *what
);
210 extern struct rw_semaphore
*__build_write_lock(struct rw_semaphore
*sem
, const char *what
);
213 /* we use FASTCALL convention for the helpers */
214 extern struct rw_semaphore
*FASTCALL(__down_read_failed(struct rw_semaphore
*sem
));
215 extern struct rw_semaphore
*FASTCALL(__down_write_failed(struct rw_semaphore
*sem
));
216 extern struct rw_semaphore
*FASTCALL(__rwsem_wake(struct rw_semaphore
*sem
));
218 extern inline void down_read(struct rw_semaphore
*sem
)
221 if (sem
->__magic
!= (long)&sem
->__magic
)
224 #ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME
225 __build_read_lock(sem
, "__down_read_failed");
228 if (sem
->write_bias_granted
)
230 if (atomic_read(&sem
->writers
))
232 atomic_inc(&sem
->readers
);
236 extern inline void down_write(struct rw_semaphore
*sem
)
239 if (sem
->__magic
!= (long)&sem
->__magic
)
242 #ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME
243 __build_write_lock(sem
, "__down_write_failed");
246 if (atomic_read(&sem
->writers
))
248 if (atomic_read(&sem
->readers
))
250 if (sem
->read_bias_granted
)
252 if (sem
->write_bias_granted
)
254 atomic_inc(&sem
->writers
);
258 /* When a reader does a release, the only significant
259 * case is when there was a writer waiting, and we've
260 * bumped the count to 0: we must wake the writer up.
262 extern inline void __up_read(struct rw_semaphore
*sem
)
266 /* releasing the writer is easy -- just release it and
267 * wake up any sleepers.
269 extern inline void __up_write(struct rw_semaphore
*sem
)
273 extern inline void up_read(struct rw_semaphore
*sem
)
276 if (sem
->write_bias_granted
)
278 if (atomic_read(&sem
->writers
))
280 atomic_dec(&sem
->readers
);
285 extern inline void up_write(struct rw_semaphore
*sem
)
288 if (sem
->read_bias_granted
)
290 if (sem
->write_bias_granted
)
292 if (atomic_read(&sem
->readers
))
294 if (atomic_read(&sem
->writers
) != 1)
296 atomic_dec(&sem
->writers
);
301 #endif /* _ASM_PARISC_SEMAPHORE_H */