Optimize andes_clear_page() and andes_copy_page() with prefetch
[linux-2.6/linux-mips.git] / include / asm-sparc64 / semaphore.h
blob5a53d342debeeb83bd3ee10de5490e3f64fafd4a
1 #ifndef _SPARC64_SEMAPHORE_H
2 #define _SPARC64_SEMAPHORE_H
4 /* These are actually reasonable on the V9. */
5 #ifdef __KERNEL__
7 #include <asm/atomic.h>
8 #include <asm/bitops.h>
9 #include <asm/system.h>
10 #include <linux/wait.h>
12 struct semaphore {
13 atomic_t count;
14 atomic_t waking;
15 wait_queue_head_t wait;
16 #if WAITQUEUE_DEBUG
17 long __magic;
18 #endif
21 #if WAITQUEUE_DEBUG
22 # define __SEM_DEBUG_INIT(name) \
23 , (long)&(name).__magic
24 #else
25 # define __SEM_DEBUG_INIT(name)
26 #endif
28 #define __SEMAPHORE_INITIALIZER(name,count) \
29 { ATOMIC_INIT(count), ATOMIC_INIT(0), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
30 __SEM_DEBUG_INIT(name) }
32 #define __MUTEX_INITIALIZER(name) \
33 __SEMAPHORE_INITIALIZER(name,1)
35 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
36 struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
38 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
39 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
41 extern inline void sema_init (struct semaphore *sem, int val)
43 atomic_set(&sem->count, val);
44 atomic_set(&sem->waking, 0);
45 init_waitqueue_head(&sem->wait);
46 #if WAITQUEUE_DEBUG
47 sem->__magic = (long)&sem->__magic;
48 #endif
51 static inline void init_MUTEX (struct semaphore *sem)
53 sema_init(sem, 1);
56 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
58 sema_init(sem, 0);
61 extern void __down(struct semaphore * sem);
62 extern int __down_interruptible(struct semaphore * sem);
63 extern int __down_trylock(struct semaphore * sem);
64 extern void __up(struct semaphore * sem);
66 extern __inline__ void down(struct semaphore * sem)
68 #if WAITQUEUE_DEBUG
69 CHECK_MAGIC(sem->__magic);
70 #endif
71 __asm__ __volatile__("
72 1: lduw [%0], %%g5
73 sub %%g5, 1, %%g7
74 cas [%0], %%g5, %%g7
75 cmp %%g5, %%g7
76 bne,pn %%icc, 1b
77 cmp %%g7, 1
78 bl,pn %%icc, 3f
79 membar #StoreStore
81 .subsection 2
82 3: mov %0, %%g5
83 save %%sp, -160, %%sp
84 mov %%g1, %%l1
85 mov %%g2, %%l2
86 mov %%g3, %%l3
87 call %1
88 mov %%g5, %%o0
89 mov %%l1, %%g1
90 mov %%l2, %%g2
91 ba,pt %%xcc, 2b
92 restore %%l3, %%g0, %%g3
93 .previous\n"
94 : : "r" (sem), "i" (__down)
95 : "g5", "g7", "memory", "cc");
98 extern __inline__ int down_interruptible(struct semaphore *sem)
100 int ret = 0;
102 #if WAITQUEUE_DEBUG
103 CHECK_MAGIC(sem->__magic);
104 #endif
105 __asm__ __volatile__("
106 1: lduw [%2], %%g5
107 sub %%g5, 1, %%g7
108 cas [%2], %%g5, %%g7
109 cmp %%g5, %%g7
110 bne,pn %%icc, 1b
111 cmp %%g7, 1
112 bl,pn %%icc, 3f
113 membar #StoreStore
115 .subsection 2
116 3: mov %2, %%g5
117 save %%sp, -160, %%sp
118 mov %%g1, %%l1
119 mov %%g2, %%l2
120 mov %%g3, %%l3
121 call %3
122 mov %%g5, %%o0
123 mov %%l1, %%g1
124 mov %%l2, %%g2
125 mov %%l3, %%g3
126 ba,pt %%xcc, 2b
127 restore %%o0, %%g0, %0
128 .previous\n"
129 : "=r" (ret)
130 : "0" (ret), "r" (sem), "i" (__down_interruptible)
131 : "g5", "g7", "memory", "cc");
132 return ret;
135 extern inline int down_trylock(struct semaphore *sem)
137 int ret = 0;
138 #if WAITQUEUE_DEBUG
139 CHECK_MAGIC(sem->__magic);
140 #endif
141 __asm__ __volatile__("
142 1: lduw [%2], %%g5
143 sub %%g5, 1, %%g7
144 cas [%2], %%g5, %%g7
145 cmp %%g5, %%g7
146 bne,pn %%icc, 1b
147 cmp %%g7, 1
148 bl,pn %%icc, 3f
149 membar #StoreStore
151 .subsection 2
152 3: mov %2, %%g5
153 save %%sp, -160, %%sp
154 mov %%g1, %%l1
155 mov %%g2, %%l2
156 mov %%g3, %%l3
157 call %3
158 mov %%g5, %%o0
159 mov %%l1, %%g1
160 mov %%l2, %%g2
161 mov %%l3, %%g3
162 ba,pt %%xcc, 2b
163 restore %%o0, %%g0, %0
164 .previous\n"
165 : "=r" (ret)
166 : "0" (ret), "r" (sem), "i" (__down_trylock)
167 : "g5", "g7", "memory", "cc");
168 return ret;
171 extern __inline__ void up(struct semaphore * sem)
173 #if WAITQUEUE_DEBUG
174 CHECK_MAGIC(sem->__magic);
175 #endif
176 __asm__ __volatile__("
177 membar #StoreLoad | #LoadLoad
178 1: lduw [%0], %%g5
179 add %%g5, 1, %%g7
180 cas [%0], %%g5, %%g7
181 cmp %%g5, %%g7
182 bne,pn %%icc, 1b
183 addcc %%g7, 1, %%g0
184 ble,pn %%icc, 3f
187 .subsection 2
188 3: mov %0, %%g5
189 save %%sp, -160, %%sp
190 mov %%g1, %%l1
191 mov %%g2, %%l2
192 mov %%g3, %%l3
193 call %1
194 mov %%g5, %%o0
195 mov %%l1, %%g1
196 mov %%l2, %%g2
197 ba,pt %%xcc, 2b
198 restore %%l3, %%g0, %%g3
199 .previous\n"
200 : : "r" (sem), "i" (__up)
201 : "g5", "g7", "memory", "cc");
204 /* rw mutexes (should that be mutices? =) -- throw rw
205 * spinlocks and semaphores together, and this is what we
206 * end up with...
208 * The lock is initialized to BIAS. This way, a writer
209 * subtracts BIAS ands gets 0 for the case of an uncontended
210 * lock. Readers decrement by 1 and see a positive value
211 * when uncontended, negative if there are writers waiting
212 * (in which case it goes to sleep).
214 * The value 0x01000000 supports up to 128 processors and
215 * lots of processes. BIAS must be chosen such that subtracting
216 * BIAS once per CPU will result in the int remaining
217 * negative.
218 * In terms of fairness, this should result in the lock
219 * flopping back and forth between readers and writers
220 * under heavy use.
222 * -ben
224 * Once we start supporting machines with more than 128 CPUs,
225 * we should go for using a 64bit atomic type instead of 32bit
226 * as counter. We shall probably go for bias 0x80000000 then,
227 * so that single sethi can set it.
229 * -jj
231 #define RW_LOCK_BIAS 0x01000000
232 #define RW_LOCK_BIAS_STR "0x01000000"
234 struct rw_semaphore {
235 int count;
236 /* So that this does not have to be 64bit type,
237 * we'll use le bitops on it which use casa instead of casx.
238 * bit 0 means read bias granted
239 * bit 1 means write bias granted
241 unsigned granted;
242 wait_queue_head_t wait;
243 wait_queue_head_t write_bias_wait;
244 #if WAITQUEUE_DEBUG
245 long __magic;
246 atomic_t readers;
247 atomic_t writers;
248 #endif
251 #if WAITQUEUE_DEBUG
252 #define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0)
253 #else
254 #define __RWSEM_DEBUG_INIT /* */
255 #endif
257 #define __RWSEM_INITIALIZER(name,count) \
258 { (count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
259 __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \
260 __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
262 #define __DECLARE_RWSEM_GENERIC(name,count) \
263 struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
265 #define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
266 #define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
267 #define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
269 extern inline void init_rwsem(struct rw_semaphore *sem)
271 sem->count = RW_LOCK_BIAS;
272 sem->granted = 0;
273 init_waitqueue_head(&sem->wait);
274 init_waitqueue_head(&sem->write_bias_wait);
275 #if WAITQUEUE_DEBUG
276 sem->__magic = (long)&sem->__magic;
277 atomic_set(&sem->readers, 0);
278 atomic_set(&sem->writers, 0);
279 #endif
282 extern void __down_read_failed(/* Special calling convention */ void);
283 extern void __down_write_failed(/* Special calling convention */ void);
284 extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers);
286 extern inline void down_read(struct rw_semaphore *sem)
288 #if WAITQUEUE_DEBUG
289 CHECK_MAGIC(sem->__magic);
290 #endif
291 __asm__ __volatile__("
292 1: lduw [%0], %%g5
293 subcc %%g5, 1, %%g7
294 cas [%0], %%g5, %%g7
295 bneg,pn %%icc, 3f
296 cmp %%g5, %%g7
297 bne,pn %%icc, 1b
298 membar #StoreStore
300 .subsection 2
301 3: bne,pn %%icc, 1b
302 mov %0, %%g7
303 save %%sp, -160, %%sp
304 mov %%g1, %%l1
305 mov %%g2, %%l2
306 call %1
307 mov %%g3, %%l3
308 mov %%l1, %%g1
309 mov %%l2, %%g2
310 ba,pt %%xcc, 2b
311 restore %%l3, %%g0, %%g3
312 .previous\n"
313 : : "r" (sem), "i" (__down_read_failed)
314 : "g5", "g7", "memory", "cc");
315 #if WAITQUEUE_DEBUG
316 if (test_le_bit(1, &sem->granted))
317 BUG();
318 if (atomic_read(&sem->writers))
319 BUG();
320 atomic_inc(&sem->readers);
321 #endif
324 extern inline void down_write(struct rw_semaphore *sem)
326 #if WAITQUEUE_DEBUG
327 CHECK_MAGIC(sem->__magic);
328 #endif
329 __asm__ __volatile__("
330 1: lduw [%0], %%g5
331 sethi %%hi(" RW_LOCK_BIAS_STR "), %%g7
332 subcc %%g5, %%g7, %%g7
333 cas [%0], %%g5, %%g7
334 bne,pn %%icc, 3f
335 cmp %%g5, %%g7
336 bne,pn %%icc, 1b
337 membar #StoreStore
339 .subsection 2
340 3: bne,pn %%icc, 1b
341 mov %0, %%g7
342 save %%sp, -160, %%sp
343 mov %%g1, %%l1
344 mov %%g2, %%l2
345 call %1
346 mov %%g3, %%l3
347 mov %%l1, %%g1
348 mov %%l2, %%g2
349 ba,pt %%xcc, 2b
350 restore %%l3, %%g0, %%g3
351 .previous\n"
352 : : "r" (sem), "i" (__down_write_failed)
353 : "g5", "g7", "memory", "cc");
354 #if WAITQUEUE_DEBUG
355 if (atomic_read(&sem->writers))
356 BUG();
357 if (atomic_read(&sem->readers))
358 BUG();
359 if (test_le_bit(0, &sem->granted))
360 BUG();
361 if (test_le_bit(1, &sem->granted))
362 BUG();
363 atomic_inc(&sem->writers);
364 #endif
367 /* When a reader does a release, the only significant
368 * case is when there was a writer waiting, and we've
369 * bumped the count to 0: we must wake the writer up.
371 extern inline void __up_read(struct rw_semaphore *sem)
373 __asm__ __volatile__("
374 membar #StoreLoad | #LoadLoad
375 1: lduw [%0], %%g5
376 addcc %%g5, 1, %%g7
377 cas [%0], %%g5, %%g7
378 be,pn %%icc, 3f
379 cmp %%g5, %%g7
380 bne,pn %%icc, 1b
383 .subsection 2
384 3: bne,pn %%icc, 1b
385 mov %0, %%g7
386 save %%sp, -160, %%sp
387 mov %%g1, %%l1
388 mov %%g2, %%l2
389 clr %%o1
390 mov %%g7, %%o0
391 call %1
392 mov %%g3, %%l3
393 mov %%l1, %%g1
394 mov %%l2, %%g2
395 ba,pt %%xcc, 2b
396 restore %%l3, %%g0, %%g3
397 .previous\n"
398 : : "r" (sem), "i" (__rwsem_wake)
399 : "g5", "g7", "memory", "cc");
402 /* releasing the writer is easy -- just release it and
403 * wake up any sleepers.
405 extern inline void __up_write(struct rw_semaphore *sem)
407 __asm__ __volatile__("
408 membar #StoreLoad | #LoadLoad
409 1: lduw [%0], %%g5
410 sethi %%hi(" RW_LOCK_BIAS_STR "), %%g7
411 add %%g5, %%g7, %%g7
412 cas [%0], %%g5, %%g7
413 cmp %%g5, %%g7
414 bne,pn %%icc, 1b
415 sethi %%hi(" RW_LOCK_BIAS_STR "), %%g7
416 addcc %%g5, %%g7, %%g5
417 bcs,pn %%icc, 3f
420 .subsection 2
421 3: mov %0, %%g7
422 save %%sp, -160, %%sp
423 mov %%g1, %%l1
424 mov %%g2, %%l2
425 srl %%g5, 0, %%o1
426 mov %%g7, %%o0
427 call %1
428 mov %%g3, %%l3
429 mov %%l1, %%g1
430 mov %%l2, %%g2
431 ba,pt %%xcc, 2b
432 restore %%l3, %%g0, %%g3
433 .previous\n"
434 : : "r" (sem), "i" (__rwsem_wake)
435 : "g5", "g7", "memory", "cc");
438 extern inline void up_read(struct rw_semaphore *sem)
440 #if WAITQUEUE_DEBUG
441 if (test_le_bit(1, &sem->granted))
442 BUG();
443 if (atomic_read(&sem->writers))
444 BUG();
445 atomic_dec(&sem->readers);
446 #endif
447 __up_read(sem);
450 extern inline void up_write(struct rw_semaphore *sem)
452 #if WAITQUEUE_DEBUG
453 if (test_le_bit(0, &sem->granted))
454 BUG();
455 if (test_le_bit(1, &sem->granted))
456 BUG();
457 if (atomic_read(&sem->readers))
458 BUG();
459 if (atomic_read(&sem->writers) != 1)
460 BUG();
461 atomic_dec(&sem->writers);
462 #endif
463 __up_write(sem);
466 #endif /* __KERNEL__ */
468 #endif /* !(_SPARC64_SEMAPHORE_H) */