Import 2.3.18pre1
[davej-history.git] / include / asm-alpha / semaphore-helper.h
blob0577d2c93fca5e6aefee2106a0ceba5fcaea0a58
1 #ifndef _ALPHA_SEMAPHORE_HELPER_H
2 #define _ALPHA_SEMAPHORE_HELPER_H
4 /*
5 * SMP- and interrupt-safe semaphores helper functions.
7 * (C) Copyright 1996 Linus Torvalds
8 * (C) Copyright 1999 Richard Henderson
9 */
12 * These two _must_ execute atomically wrt each other.
14 * This is trivially done with load_locked/store_cond,
15 * which we have. Let the rest of the losers suck eggs.
18 static inline void
19 wake_one_more(struct semaphore * sem)
21 atomic_inc(&sem->waking);
24 static inline int
25 waking_non_zero(struct semaphore *sem)
27 long ret, tmp;
29 /* An atomic conditional decrement. */
30 __asm__ __volatile__(
31 "1: ldl_l %1,%2\n"
32 " blt %1,2f\n"
33 " subl %1,1,%0\n"
34 " stl_c %0,%2\n"
35 " beq %0,3f\n"
36 "2:\n"
37 ".section .text2,\"ax\"\n"
38 "3: br 1b\n"
39 ".previous"
40 : "=r"(ret), "=r"(tmp), "=m"(__atomic_fool_gcc(&sem->waking))
41 : "0"(0));
43 return ret > 0;
48 * waking_non_zero_interruptible:
49 * 1 got the lock
50 * 0 go to sleep
51 * -EINTR interrupted
53 * We must undo the sem->count down_interruptible decrement
54 * simultaneously and atomicly with the sem->waking adjustment,
55 * otherwise we can race with wake_one_more.
57 * This is accomplished by doing a 64-bit ll/sc on the 2 32-bit words.
60 static inline int
61 waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
63 long ret, tmp, tmp2, tmp3;
65 /* "Equivalent" C. Note that we have to do this all without
66 (taken) branches in order to be a valid ll/sc sequence.
68 do {
69 tmp = ldq_l;
70 ret = 0;
71 if (tmp >= 0) {
72 tmp += 0xffffffff00000000;
73 ret = 1;
75 else if (pending) {
76 // Since -1 + 1 carries into the high word, we have
77 // to be more careful adding 1 here.
78 tmp = (tmp & 0xffffffff00000000)
79 | ((tmp + 1) & 0x00000000ffffffff;
80 ret = -EINTR;
82 else {
83 break; // ideally. we don't actually break
84 // since this is a predicate we don't
85 // have, and is more trouble to build
86 // than to elide the noop stq_c.
88 tmp = stq_c = tmp;
89 } while (tmp == 0);
92 __asm__ __volatile__(
93 "1: ldq_l %1,%4\n"
94 " lda %0,0\n"
95 " cmovne %5,%6,%0\n"
96 " addq %1,1,%2\n"
97 " and %1,%7,%3\n"
98 " andnot %2,%7,%2\n"
99 " cmovge %1,1,%0\n"
100 " or %3,%2,%2\n"
101 " addq %1,%7,%3\n"
102 " cmovne %5,%2,%1\n"
103 " cmovge %2,%3,%1\n"
104 " stq_c %1,%4\n"
105 " beq %1,3f\n"
106 "2:\n"
107 ".section .text2,\"ax\"\n"
108 "3: br 1b\n"
109 ".previous"
110 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(tmp3), "=m"(*sem)
111 : "r"(signal_pending(tsk)), "r"(-EINTR),
112 "r"(0xffffffff00000000));
114 return ret;
118 * waking_non_zero_trylock is unused. we do everything in
119 * down_trylock and let non-ll/sc hosts bounce around.
122 static inline int
123 waking_non_zero_trylock(struct semaphore *sem)
125 return 0;
128 #endif