[PATCH] s390: add ptr_to_compat()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-mips / bitops.h
bloba1728f8c070519776ed379a018bf56ff7d619d08
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
12 #include <linux/config.h>
13 #include <linux/compiler.h>
14 #include <linux/types.h>
15 #include <asm/bug.h>
16 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/cpu-features.h>
19 #if (_MIPS_SZLONG == 32)
20 #define SZLONG_LOG 5
21 #define SZLONG_MASK 31UL
22 #define __LL "ll "
23 #define __SC "sc "
24 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
25 #elif (_MIPS_SZLONG == 64)
26 #define SZLONG_LOG 6
27 #define SZLONG_MASK 63UL
28 #define __LL "lld "
29 #define __SC "scd "
30 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
31 #endif
33 #ifdef __KERNEL__
35 #include <asm/interrupt.h>
36 #include <asm/sgidefs.h>
37 #include <asm/war.h>
40 * clear_bit() doesn't provide any barrier for the compiler.
42 #define smp_mb__before_clear_bit() smp_mb()
43 #define smp_mb__after_clear_bit() smp_mb()
46 * Only disable interrupt for kernel mode stuff to keep usermode stuff
47 * that dares to use kernel include files alive.
50 #define __bi_flags unsigned long flags
51 #define __bi_local_irq_save(x) local_irq_save(x)
52 #define __bi_local_irq_restore(x) local_irq_restore(x)
53 #else
54 #define __bi_flags
55 #define __bi_local_irq_save(x)
56 #define __bi_local_irq_restore(x)
57 #endif /* __KERNEL__ */
60 * set_bit - Atomically set a bit in memory
61 * @nr: the bit to set
62 * @addr: the address to start counting from
64 * This function is atomic and may not be reordered. See __set_bit()
65 * if you do not require the atomic guarantees.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
69 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
71 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
72 unsigned long temp;
74 if (cpu_has_llsc && R10000_LLSC_WAR) {
75 __asm__ __volatile__(
76 " .set mips3 \n"
77 "1: " __LL "%0, %1 # set_bit \n"
78 " or %0, %2 \n"
79 " " __SC "%0, %1 \n"
80 " beqzl %0, 1b \n"
81 " .set mips0 \n"
82 : "=&r" (temp), "=m" (*m)
83 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
84 } else if (cpu_has_llsc) {
85 __asm__ __volatile__(
86 " .set mips3 \n"
87 "1: " __LL "%0, %1 # set_bit \n"
88 " or %0, %2 \n"
89 " " __SC "%0, %1 \n"
90 " beqz %0, 1b \n"
91 " .set mips0 \n"
92 : "=&r" (temp), "=m" (*m)
93 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
94 } else {
95 volatile unsigned long *a = addr;
96 unsigned long mask;
97 __bi_flags;
99 a += nr >> SZLONG_LOG;
100 mask = 1UL << (nr & SZLONG_MASK);
101 __bi_local_irq_save(flags);
102 *a |= mask;
103 __bi_local_irq_restore(flags);
108 * clear_bit - Clears a bit in memory
109 * @nr: Bit to clear
110 * @addr: Address to start counting from
112 * clear_bit() is atomic and may not be reordered. However, it does
113 * not contain a memory barrier, so if it is used for locking purposes,
114 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
115 * in order to ensure changes are visible on other processors.
117 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
119 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
120 unsigned long temp;
122 if (cpu_has_llsc && R10000_LLSC_WAR) {
123 __asm__ __volatile__(
124 " .set mips3 \n"
125 "1: " __LL "%0, %1 # clear_bit \n"
126 " and %0, %2 \n"
127 " " __SC "%0, %1 \n"
128 " beqzl %0, 1b \n"
129 " .set mips0 \n"
130 : "=&r" (temp), "=m" (*m)
131 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
132 } else if (cpu_has_llsc) {
133 __asm__ __volatile__(
134 " .set mips3 \n"
135 "1: " __LL "%0, %1 # clear_bit \n"
136 " and %0, %2 \n"
137 " " __SC "%0, %1 \n"
138 " beqz %0, 1b \n"
139 " .set mips0 \n"
140 : "=&r" (temp), "=m" (*m)
141 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
142 } else {
143 volatile unsigned long *a = addr;
144 unsigned long mask;
145 __bi_flags;
147 a += nr >> SZLONG_LOG;
148 mask = 1UL << (nr & SZLONG_MASK);
149 __bi_local_irq_save(flags);
150 *a &= ~mask;
151 __bi_local_irq_restore(flags);
156 * change_bit - Toggle a bit in memory
157 * @nr: Bit to change
158 * @addr: Address to start counting from
160 * change_bit() is atomic and may not be reordered.
161 * Note that @nr may be almost arbitrarily large; this function is not
162 * restricted to acting on a single-word quantity.
164 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
166 if (cpu_has_llsc && R10000_LLSC_WAR) {
167 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
168 unsigned long temp;
170 __asm__ __volatile__(
171 " .set mips3 \n"
172 "1: " __LL "%0, %1 # change_bit \n"
173 " xor %0, %2 \n"
174 " " __SC "%0, %1 \n"
175 " beqzl %0, 1b \n"
176 " .set mips0 \n"
177 : "=&r" (temp), "=m" (*m)
178 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
179 } else if (cpu_has_llsc) {
180 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
181 unsigned long temp;
183 __asm__ __volatile__(
184 " .set mips3 \n"
185 "1: " __LL "%0, %1 # change_bit \n"
186 " xor %0, %2 \n"
187 " " __SC "%0, %1 \n"
188 " beqz %0, 1b \n"
189 " .set mips0 \n"
190 : "=&r" (temp), "=m" (*m)
191 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
192 } else {
193 volatile unsigned long *a = addr;
194 unsigned long mask;
195 __bi_flags;
197 a += nr >> SZLONG_LOG;
198 mask = 1UL << (nr & SZLONG_MASK);
199 __bi_local_irq_save(flags);
200 *a ^= mask;
201 __bi_local_irq_restore(flags);
206 * test_and_set_bit - Set a bit and return its old value
207 * @nr: Bit to set
208 * @addr: Address to count from
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
213 static inline int test_and_set_bit(unsigned long nr,
214 volatile unsigned long *addr)
216 if (cpu_has_llsc && R10000_LLSC_WAR) {
217 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
218 unsigned long temp, res;
220 __asm__ __volatile__(
221 " .set mips3 \n"
222 "1: " __LL "%0, %1 # test_and_set_bit \n"
223 " or %2, %0, %3 \n"
224 " " __SC "%2, %1 \n"
225 " beqzl %2, 1b \n"
226 " and %2, %0, %3 \n"
227 #ifdef CONFIG_SMP
228 " sync \n"
229 #endif
230 " .set mips0 \n"
231 : "=&r" (temp), "=m" (*m), "=&r" (res)
232 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
233 : "memory");
235 return res != 0;
236 } else if (cpu_has_llsc) {
237 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
238 unsigned long temp, res;
240 __asm__ __volatile__(
241 " .set push \n"
242 " .set noreorder \n"
243 " .set mips3 \n"
244 "1: " __LL "%0, %1 # test_and_set_bit \n"
245 " or %2, %0, %3 \n"
246 " " __SC "%2, %1 \n"
247 " beqz %2, 1b \n"
248 " and %2, %0, %3 \n"
249 #ifdef CONFIG_SMP
250 " sync \n"
251 #endif
252 " .set pop \n"
253 : "=&r" (temp), "=m" (*m), "=&r" (res)
254 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
255 : "memory");
257 return res != 0;
258 } else {
259 volatile unsigned long *a = addr;
260 unsigned long mask;
261 int retval;
262 __bi_flags;
264 a += nr >> SZLONG_LOG;
265 mask = 1UL << (nr & SZLONG_MASK);
266 __bi_local_irq_save(flags);
267 retval = (mask & *a) != 0;
268 *a |= mask;
269 __bi_local_irq_restore(flags);
271 return retval;
276 * test_and_clear_bit - Clear a bit and return its old value
277 * @nr: Bit to clear
278 * @addr: Address to count from
280 * This operation is atomic and cannot be reordered.
281 * It also implies a memory barrier.
283 static inline int test_and_clear_bit(unsigned long nr,
284 volatile unsigned long *addr)
286 if (cpu_has_llsc && R10000_LLSC_WAR) {
287 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
288 unsigned long temp, res;
290 __asm__ __volatile__(
291 " .set mips3 \n"
292 "1: " __LL "%0, %1 # test_and_clear_bit \n"
293 " or %2, %0, %3 \n"
294 " xor %2, %3 \n"
295 " " __SC "%2, %1 \n"
296 " beqzl %2, 1b \n"
297 " and %2, %0, %3 \n"
298 #ifdef CONFIG_SMP
299 " sync \n"
300 #endif
301 " .set mips0 \n"
302 : "=&r" (temp), "=m" (*m), "=&r" (res)
303 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
304 : "memory");
306 return res != 0;
307 } else if (cpu_has_llsc) {
308 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
309 unsigned long temp, res;
311 __asm__ __volatile__(
312 " .set push \n"
313 " .set noreorder \n"
314 " .set mips3 \n"
315 "1: " __LL "%0, %1 # test_and_clear_bit \n"
316 " or %2, %0, %3 \n"
317 " xor %2, %3 \n"
318 " " __SC "%2, %1 \n"
319 " beqz %2, 1b \n"
320 " and %2, %0, %3 \n"
321 #ifdef CONFIG_SMP
322 " sync \n"
323 #endif
324 " .set pop \n"
325 : "=&r" (temp), "=m" (*m), "=&r" (res)
326 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
327 : "memory");
329 return res != 0;
330 } else {
331 volatile unsigned long *a = addr;
332 unsigned long mask;
333 int retval;
334 __bi_flags;
336 a += nr >> SZLONG_LOG;
337 mask = 1UL << (nr & SZLONG_MASK);
338 __bi_local_irq_save(flags);
339 retval = (mask & *a) != 0;
340 *a &= ~mask;
341 __bi_local_irq_restore(flags);
343 return retval;
348 * test_and_change_bit - Change a bit and return its old value
349 * @nr: Bit to change
350 * @addr: Address to count from
352 * This operation is atomic and cannot be reordered.
353 * It also implies a memory barrier.
355 static inline int test_and_change_bit(unsigned long nr,
356 volatile unsigned long *addr)
358 if (cpu_has_llsc && R10000_LLSC_WAR) {
359 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
360 unsigned long temp, res;
362 __asm__ __volatile__(
363 " .set mips3 \n"
364 "1: " __LL "%0, %1 # test_and_change_bit \n"
365 " xor %2, %0, %3 \n"
366 " " __SC "%2, %1 \n"
367 " beqzl %2, 1b \n"
368 " and %2, %0, %3 \n"
369 #ifdef CONFIG_SMP
370 " sync \n"
371 #endif
372 " .set mips0 \n"
373 : "=&r" (temp), "=m" (*m), "=&r" (res)
374 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
375 : "memory");
377 return res != 0;
378 } else if (cpu_has_llsc) {
379 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
380 unsigned long temp, res;
382 __asm__ __volatile__(
383 " .set push \n"
384 " .set noreorder \n"
385 " .set mips3 \n"
386 "1: " __LL "%0, %1 # test_and_change_bit \n"
387 " xor %2, %0, %3 \n"
388 " " __SC "\t%2, %1 \n"
389 " beqz %2, 1b \n"
390 " and %2, %0, %3 \n"
391 #ifdef CONFIG_SMP
392 " sync \n"
393 #endif
394 " .set pop \n"
395 : "=&r" (temp), "=m" (*m), "=&r" (res)
396 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
397 : "memory");
399 return res != 0;
400 } else {
401 volatile unsigned long *a = addr;
402 unsigned long mask, retval;
403 __bi_flags;
405 a += nr >> SZLONG_LOG;
406 mask = 1UL << (nr & SZLONG_MASK);
407 __bi_local_irq_save(flags);
408 retval = (mask & *a) != 0;
409 *a ^= mask;
410 __bi_local_irq_restore(flags);
412 return retval;
416 #undef __bi_flags
417 #undef __bi_local_irq_save
418 #undef __bi_local_irq_restore
420 #include <asm-generic/bitops/non-atomic.h>
423 * Return the bit position (0..63) of the most significant 1 bit in a word
424 * Returns -1 if no 1 bit exists
426 static inline int __ilog2(unsigned long x)
428 int lz;
430 if (sizeof(x) == 4) {
431 __asm__ (
432 " .set push \n"
433 " .set mips32 \n"
434 " clz %0, %1 \n"
435 " .set pop \n"
436 : "=r" (lz)
437 : "r" (x));
439 return 31 - lz;
442 BUG_ON(sizeof(x) != 8);
444 __asm__ (
445 " .set push \n"
446 " .set mips64 \n"
447 " dclz %0, %1 \n"
448 " .set pop \n"
449 : "=r" (lz)
450 : "r" (x));
452 return 63 - lz;
455 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
458 * __ffs - find first bit in word.
459 * @word: The word to search
461 * Returns 0..SZLONG-1
462 * Undefined if no bit exists, so code should check against 0 first.
464 static inline unsigned long __ffs(unsigned long word)
466 return __ilog2(word & -word);
470 * ffs - find first bit set.
471 * @word: The word to search
473 * Returns 1..SZLONG
474 * Returns 0 if no bit exists
477 static inline unsigned long ffs(unsigned long word)
479 if (!word)
480 return 0;
482 return __ffs(word) + 1;
486 * ffz - find first zero in word.
487 * @word: The word to search
489 * Undefined if no zero exists, so code should check against ~0UL first.
491 static inline unsigned long ffz(unsigned long word)
493 return __ffs (~word);
497 * fls - find last bit set.
498 * @word: The word to search
500 * Returns 1..SZLONG
501 * Returns 0 if no bit exists
503 static inline unsigned long fls(unsigned long word)
505 #ifdef CONFIG_CPU_MIPS32
506 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
508 return 32 - word;
509 #endif
511 #ifdef CONFIG_CPU_MIPS64
512 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
514 return 64 - word;
515 #endif
518 #else
520 #include <asm-generic/bitops/__ffs.h>
521 #include <asm-generic/bitops/ffs.h>
522 #include <asm-generic/bitops/ffz.h>
523 #include <asm-generic/bitops/fls.h>
525 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
527 #include <asm-generic/bitops/fls64.h>
528 #include <asm-generic/bitops/find.h>
530 #ifdef __KERNEL__
532 #include <asm-generic/bitops/sched.h>
533 #include <asm-generic/bitops/hweight.h>
534 #include <asm-generic/bitops/ext2-non-atomic.h>
535 #include <asm-generic/bitops/ext2-atomic.h>
536 #include <asm-generic/bitops/minix.h>
538 #endif /* __KERNEL__ */
540 #endif /* _ASM_BITOPS_H */