[PATCH] x86_64: fix cpu_to_node setup for sparse apic_ids
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-alpha / rwsem.h
blob8e058a67c9a48fdd966877daa81021c54df866ba
1 #ifndef _ALPHA_RWSEM_H
2 #define _ALPHA_RWSEM_H
4 /*
5 * Written by Ivan Kokshaysky <ink@jurassic.park.msu.ru>, 2001.
6 * Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
7 */
9 #ifndef _LINUX_RWSEM_H
10 #error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
11 #endif
13 #ifdef __KERNEL__
15 #include <linux/compiler.h>
16 #include <linux/list.h>
17 #include <linux/spinlock.h>
19 struct rwsem_waiter;
21 extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
22 extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
23 extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
24 extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
27 * the semaphore definition
29 struct rw_semaphore {
30 long count;
31 #define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
32 #define RWSEM_ACTIVE_BIAS 0x0000000000000001L
33 #define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
34 #define RWSEM_WAITING_BIAS (-0x0000000100000000L)
35 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
36 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
37 spinlock_t wait_lock;
38 struct list_head wait_list;
39 #if RWSEM_DEBUG
40 int debug;
41 #endif
44 #if RWSEM_DEBUG
45 #define __RWSEM_DEBUG_INIT , 0
46 #else
47 #define __RWSEM_DEBUG_INIT /* */
48 #endif
50 #define __RWSEM_INITIALIZER(name) \
51 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
52 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT }
54 #define DECLARE_RWSEM(name) \
55 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
57 static inline void init_rwsem(struct rw_semaphore *sem)
59 sem->count = RWSEM_UNLOCKED_VALUE;
60 spin_lock_init(&sem->wait_lock);
61 INIT_LIST_HEAD(&sem->wait_list);
62 #if RWSEM_DEBUG
63 sem->debug = 0;
64 #endif
67 static inline void __down_read(struct rw_semaphore *sem)
69 long oldcount;
70 #ifndef CONFIG_SMP
71 oldcount = sem->count;
72 sem->count += RWSEM_ACTIVE_READ_BIAS;
73 #else
74 long temp;
75 __asm__ __volatile__(
76 "1: ldq_l %0,%1\n"
77 " addq %0,%3,%2\n"
78 " stq_c %2,%1\n"
79 " beq %2,2f\n"
80 " mb\n"
81 ".subsection 2\n"
82 "2: br 1b\n"
83 ".previous"
84 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
85 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
86 #endif
87 if (unlikely(oldcount < 0))
88 rwsem_down_read_failed(sem);
92 * trylock for reading -- returns 1 if successful, 0 if contention
94 static inline int __down_read_trylock(struct rw_semaphore *sem)
96 long old, new, res;
98 res = sem->count;
99 do {
100 new = res + RWSEM_ACTIVE_READ_BIAS;
101 if (new <= 0)
102 break;
103 old = res;
104 res = cmpxchg(&sem->count, old, new);
105 } while (res != old);
106 return res >= 0 ? 1 : 0;
109 static inline void __down_write(struct rw_semaphore *sem)
111 long oldcount;
112 #ifndef CONFIG_SMP
113 oldcount = sem->count;
114 sem->count += RWSEM_ACTIVE_WRITE_BIAS;
115 #else
116 long temp;
117 __asm__ __volatile__(
118 "1: ldq_l %0,%1\n"
119 " addq %0,%3,%2\n"
120 " stq_c %2,%1\n"
121 " beq %2,2f\n"
122 " mb\n"
123 ".subsection 2\n"
124 "2: br 1b\n"
125 ".previous"
126 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
127 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
128 #endif
129 if (unlikely(oldcount))
130 rwsem_down_write_failed(sem);
134 * trylock for writing -- returns 1 if successful, 0 if contention
136 static inline int __down_write_trylock(struct rw_semaphore *sem)
138 long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
139 RWSEM_ACTIVE_WRITE_BIAS);
140 if (ret == RWSEM_UNLOCKED_VALUE)
141 return 1;
142 return 0;
145 static inline void __up_read(struct rw_semaphore *sem)
147 long oldcount;
148 #ifndef CONFIG_SMP
149 oldcount = sem->count;
150 sem->count -= RWSEM_ACTIVE_READ_BIAS;
151 #else
152 long temp;
153 __asm__ __volatile__(
154 " mb\n"
155 "1: ldq_l %0,%1\n"
156 " subq %0,%3,%2\n"
157 " stq_c %2,%1\n"
158 " beq %2,2f\n"
159 ".subsection 2\n"
160 "2: br 1b\n"
161 ".previous"
162 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
163 :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
164 #endif
165 if (unlikely(oldcount < 0))
166 if ((int)oldcount - RWSEM_ACTIVE_READ_BIAS == 0)
167 rwsem_wake(sem);
170 static inline void __up_write(struct rw_semaphore *sem)
172 long count;
173 #ifndef CONFIG_SMP
174 sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
175 count = sem->count;
176 #else
177 long temp;
178 __asm__ __volatile__(
179 " mb\n"
180 "1: ldq_l %0,%1\n"
181 " subq %0,%3,%2\n"
182 " stq_c %2,%1\n"
183 " beq %2,2f\n"
184 " subq %0,%3,%0\n"
185 ".subsection 2\n"
186 "2: br 1b\n"
187 ".previous"
188 :"=&r" (count), "=m" (sem->count), "=&r" (temp)
189 :"Ir" (RWSEM_ACTIVE_WRITE_BIAS), "m" (sem->count) : "memory");
190 #endif
191 if (unlikely(count))
192 if ((int)count == 0)
193 rwsem_wake(sem);
197 * downgrade write lock to read lock
199 static inline void __downgrade_write(struct rw_semaphore *sem)
201 long oldcount;
202 #ifndef CONFIG_SMP
203 oldcount = sem->count;
204 sem->count -= RWSEM_WAITING_BIAS;
205 #else
206 long temp;
207 __asm__ __volatile__(
208 "1: ldq_l %0,%1\n"
209 " addq %0,%3,%2\n"
210 " stq_c %2,%1\n"
211 " beq %2,2f\n"
212 " mb\n"
213 ".subsection 2\n"
214 "2: br 1b\n"
215 ".previous"
216 :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
217 :"Ir" (-RWSEM_WAITING_BIAS), "m" (sem->count) : "memory");
218 #endif
219 if (unlikely(oldcount < 0))
220 rwsem_downgrade_wake(sem);
223 static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
225 #ifndef CONFIG_SMP
226 sem->count += val;
227 #else
228 long temp;
229 __asm__ __volatile__(
230 "1: ldq_l %0,%1\n"
231 " addq %0,%2,%0\n"
232 " stq_c %0,%1\n"
233 " beq %0,2f\n"
234 ".subsection 2\n"
235 "2: br 1b\n"
236 ".previous"
237 :"=&r" (temp), "=m" (sem->count)
238 :"Ir" (val), "m" (sem->count));
239 #endif
242 static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
244 #ifndef CONFIG_SMP
245 sem->count += val;
246 return sem->count;
247 #else
248 long ret, temp;
249 __asm__ __volatile__(
250 "1: ldq_l %0,%1\n"
251 " addq %0,%3,%2\n"
252 " addq %0,%3,%0\n"
253 " stq_c %2,%1\n"
254 " beq %2,2f\n"
255 ".subsection 2\n"
256 "2: br 1b\n"
257 ".previous"
258 :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
259 :"Ir" (val), "m" (sem->count));
261 return ret;
262 #endif
265 #endif /* __KERNEL__ */
266 #endif /* _ALPHA_RWSEM_H */