MOXA linux-2.6.x / linux-2.6.19-uc1 from UC-7110-LX-BOOTLOADER-1.9_VERSION-4.2.tgz
[linux-2.6.19-moxart.git] / include / asm-mips / bitops.h
blob1bb89c5a10ee65d52dc34f4b2e40763fd6c42537
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_BITOPS_H
10 #define _ASM_BITOPS_H
12 #include <linux/compiler.h>
13 #include <linux/types.h>
14 #include <asm/bug.h>
15 #include <asm/byteorder.h> /* sigh ... */
16 #include <asm/cpu-features.h>
18 #if (_MIPS_SZLONG == 32)
19 #define SZLONG_LOG 5
20 #define SZLONG_MASK 31UL
21 #define __LL "ll "
22 #define __SC "sc "
23 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
24 #elif (_MIPS_SZLONG == 64)
25 #define SZLONG_LOG 6
26 #define SZLONG_MASK 63UL
27 #define __LL "lld "
28 #define __SC "scd "
29 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
30 #endif
32 #ifdef __KERNEL__
34 #include <linux/irqflags.h>
35 #include <asm/sgidefs.h>
36 #include <asm/war.h>
39 * clear_bit() doesn't provide any barrier for the compiler.
41 #define smp_mb__before_clear_bit() smp_mb()
42 #define smp_mb__after_clear_bit() smp_mb()
45 * Only disable interrupt for kernel mode stuff to keep usermode stuff
46 * that dares to use kernel include files alive.
49 #define __bi_flags unsigned long flags
50 #define __bi_local_irq_save(x) local_irq_save(x)
51 #define __bi_local_irq_restore(x) local_irq_restore(x)
52 #else
53 #define __bi_flags
54 #define __bi_local_irq_save(x)
55 #define __bi_local_irq_restore(x)
56 #endif /* __KERNEL__ */
59 * set_bit - Atomically set a bit in memory
60 * @nr: the bit to set
61 * @addr: the address to start counting from
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
68 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
70 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
71 unsigned long temp;
73 if (cpu_has_llsc && R10000_LLSC_WAR) {
74 __asm__ __volatile__(
75 " .set mips3 \n"
76 "1: " __LL "%0, %1 # set_bit \n"
77 " or %0, %2 \n"
78 " " __SC "%0, %1 \n"
79 " beqzl %0, 1b \n"
80 " .set mips0 \n"
81 : "=&r" (temp), "=m" (*m)
82 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
83 } else if (cpu_has_llsc) {
84 __asm__ __volatile__(
85 " .set mips3 \n"
86 "1: " __LL "%0, %1 # set_bit \n"
87 " or %0, %2 \n"
88 " " __SC "%0, %1 \n"
89 " beqz %0, 1b \n"
90 " .set mips0 \n"
91 : "=&r" (temp), "=m" (*m)
92 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
93 } else {
94 volatile unsigned long *a = addr;
95 unsigned long mask;
96 __bi_flags;
98 a += nr >> SZLONG_LOG;
99 mask = 1UL << (nr & SZLONG_MASK);
100 __bi_local_irq_save(flags);
101 *a |= mask;
102 __bi_local_irq_restore(flags);
107 * clear_bit - Clears a bit in memory
108 * @nr: Bit to clear
109 * @addr: Address to start counting from
111 * clear_bit() is atomic and may not be reordered. However, it does
112 * not contain a memory barrier, so if it is used for locking purposes,
113 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
114 * in order to ensure changes are visible on other processors.
116 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
118 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119 unsigned long temp;
121 if (cpu_has_llsc && R10000_LLSC_WAR) {
122 __asm__ __volatile__(
123 " .set mips3 \n"
124 "1: " __LL "%0, %1 # clear_bit \n"
125 " and %0, %2 \n"
126 " " __SC "%0, %1 \n"
127 " beqzl %0, 1b \n"
128 " .set mips0 \n"
129 : "=&r" (temp), "=m" (*m)
130 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
131 } else if (cpu_has_llsc) {
132 __asm__ __volatile__(
133 " .set mips3 \n"
134 "1: " __LL "%0, %1 # clear_bit \n"
135 " and %0, %2 \n"
136 " " __SC "%0, %1 \n"
137 " beqz %0, 1b \n"
138 " .set mips0 \n"
139 : "=&r" (temp), "=m" (*m)
140 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
141 } else {
142 volatile unsigned long *a = addr;
143 unsigned long mask;
144 __bi_flags;
146 a += nr >> SZLONG_LOG;
147 mask = 1UL << (nr & SZLONG_MASK);
148 __bi_local_irq_save(flags);
149 *a &= ~mask;
150 __bi_local_irq_restore(flags);
155 * change_bit - Toggle a bit in memory
156 * @nr: Bit to change
157 * @addr: Address to start counting from
159 * change_bit() is atomic and may not be reordered.
160 * Note that @nr may be almost arbitrarily large; this function is not
161 * restricted to acting on a single-word quantity.
163 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
165 if (cpu_has_llsc && R10000_LLSC_WAR) {
166 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
167 unsigned long temp;
169 __asm__ __volatile__(
170 " .set mips3 \n"
171 "1: " __LL "%0, %1 # change_bit \n"
172 " xor %0, %2 \n"
173 " " __SC "%0, %1 \n"
174 " beqzl %0, 1b \n"
175 " .set mips0 \n"
176 : "=&r" (temp), "=m" (*m)
177 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
178 } else if (cpu_has_llsc) {
179 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
180 unsigned long temp;
182 __asm__ __volatile__(
183 " .set mips3 \n"
184 "1: " __LL "%0, %1 # change_bit \n"
185 " xor %0, %2 \n"
186 " " __SC "%0, %1 \n"
187 " beqz %0, 1b \n"
188 " .set mips0 \n"
189 : "=&r" (temp), "=m" (*m)
190 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
191 } else {
192 volatile unsigned long *a = addr;
193 unsigned long mask;
194 __bi_flags;
196 a += nr >> SZLONG_LOG;
197 mask = 1UL << (nr & SZLONG_MASK);
198 __bi_local_irq_save(flags);
199 *a ^= mask;
200 __bi_local_irq_restore(flags);
205 * test_and_set_bit - Set a bit and return its old value
206 * @nr: Bit to set
207 * @addr: Address to count from
209 * This operation is atomic and cannot be reordered.
210 * It also implies a memory barrier.
212 static inline int test_and_set_bit(unsigned long nr,
213 volatile unsigned long *addr)
215 if (cpu_has_llsc && R10000_LLSC_WAR) {
216 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
217 unsigned long temp, res;
219 __asm__ __volatile__(
220 " .set mips3 \n"
221 "1: " __LL "%0, %1 # test_and_set_bit \n"
222 " or %2, %0, %3 \n"
223 " " __SC "%2, %1 \n"
224 " beqzl %2, 1b \n"
225 " and %2, %0, %3 \n"
226 #ifdef CONFIG_SMP
227 " sync \n"
228 #endif
229 " .set mips0 \n"
230 : "=&r" (temp), "=m" (*m), "=&r" (res)
231 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
232 : "memory");
234 return res != 0;
235 } else if (cpu_has_llsc) {
236 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
237 unsigned long temp, res;
239 __asm__ __volatile__(
240 " .set push \n"
241 " .set noreorder \n"
242 " .set mips3 \n"
243 "1: " __LL "%0, %1 # test_and_set_bit \n"
244 " or %2, %0, %3 \n"
245 " " __SC "%2, %1 \n"
246 " beqz %2, 1b \n"
247 " and %2, %0, %3 \n"
248 #ifdef CONFIG_SMP
249 " sync \n"
250 #endif
251 " .set pop \n"
252 : "=&r" (temp), "=m" (*m), "=&r" (res)
253 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
254 : "memory");
256 return res != 0;
257 } else {
258 volatile unsigned long *a = addr;
259 unsigned long mask;
260 int retval;
261 __bi_flags;
263 a += nr >> SZLONG_LOG;
264 mask = 1UL << (nr & SZLONG_MASK);
265 __bi_local_irq_save(flags);
266 retval = (mask & *a) != 0;
267 *a |= mask;
268 __bi_local_irq_restore(flags);
270 return retval;
275 * test_and_clear_bit - Clear a bit and return its old value
276 * @nr: Bit to clear
277 * @addr: Address to count from
279 * This operation is atomic and cannot be reordered.
280 * It also implies a memory barrier.
282 static inline int test_and_clear_bit(unsigned long nr,
283 volatile unsigned long *addr)
285 if (cpu_has_llsc && R10000_LLSC_WAR) {
286 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
287 unsigned long temp, res;
289 __asm__ __volatile__(
290 " .set mips3 \n"
291 "1: " __LL "%0, %1 # test_and_clear_bit \n"
292 " or %2, %0, %3 \n"
293 " xor %2, %3 \n"
294 " " __SC "%2, %1 \n"
295 " beqzl %2, 1b \n"
296 " and %2, %0, %3 \n"
297 #ifdef CONFIG_SMP
298 " sync \n"
299 #endif
300 " .set mips0 \n"
301 : "=&r" (temp), "=m" (*m), "=&r" (res)
302 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
303 : "memory");
305 return res != 0;
306 } else if (cpu_has_llsc) {
307 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
308 unsigned long temp, res;
310 __asm__ __volatile__(
311 " .set push \n"
312 " .set noreorder \n"
313 " .set mips3 \n"
314 "1: " __LL "%0, %1 # test_and_clear_bit \n"
315 " or %2, %0, %3 \n"
316 " xor %2, %3 \n"
317 " " __SC "%2, %1 \n"
318 " beqz %2, 1b \n"
319 " and %2, %0, %3 \n"
320 #ifdef CONFIG_SMP
321 " sync \n"
322 #endif
323 " .set pop \n"
324 : "=&r" (temp), "=m" (*m), "=&r" (res)
325 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
326 : "memory");
328 return res != 0;
329 } else {
330 volatile unsigned long *a = addr;
331 unsigned long mask;
332 int retval;
333 __bi_flags;
335 a += nr >> SZLONG_LOG;
336 mask = 1UL << (nr & SZLONG_MASK);
337 __bi_local_irq_save(flags);
338 retval = (mask & *a) != 0;
339 *a &= ~mask;
340 __bi_local_irq_restore(flags);
342 return retval;
347 * test_and_change_bit - Change a bit and return its old value
348 * @nr: Bit to change
349 * @addr: Address to count from
351 * This operation is atomic and cannot be reordered.
352 * It also implies a memory barrier.
354 static inline int test_and_change_bit(unsigned long nr,
355 volatile unsigned long *addr)
357 if (cpu_has_llsc && R10000_LLSC_WAR) {
358 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
359 unsigned long temp, res;
361 __asm__ __volatile__(
362 " .set mips3 \n"
363 "1: " __LL "%0, %1 # test_and_change_bit \n"
364 " xor %2, %0, %3 \n"
365 " " __SC "%2, %1 \n"
366 " beqzl %2, 1b \n"
367 " and %2, %0, %3 \n"
368 #ifdef CONFIG_SMP
369 " sync \n"
370 #endif
371 " .set mips0 \n"
372 : "=&r" (temp), "=m" (*m), "=&r" (res)
373 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
374 : "memory");
376 return res != 0;
377 } else if (cpu_has_llsc) {
378 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
379 unsigned long temp, res;
381 __asm__ __volatile__(
382 " .set push \n"
383 " .set noreorder \n"
384 " .set mips3 \n"
385 "1: " __LL "%0, %1 # test_and_change_bit \n"
386 " xor %2, %0, %3 \n"
387 " " __SC "\t%2, %1 \n"
388 " beqz %2, 1b \n"
389 " and %2, %0, %3 \n"
390 #ifdef CONFIG_SMP
391 " sync \n"
392 #endif
393 " .set pop \n"
394 : "=&r" (temp), "=m" (*m), "=&r" (res)
395 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
396 : "memory");
398 return res != 0;
399 } else {
400 volatile unsigned long *a = addr;
401 unsigned long mask, retval;
402 __bi_flags;
404 a += nr >> SZLONG_LOG;
405 mask = 1UL << (nr & SZLONG_MASK);
406 __bi_local_irq_save(flags);
407 retval = (mask & *a) != 0;
408 *a ^= mask;
409 __bi_local_irq_restore(flags);
411 return retval;
415 #undef __bi_flags
416 #undef __bi_local_irq_save
417 #undef __bi_local_irq_restore
419 #include <asm-generic/bitops/non-atomic.h>
422 * Return the bit position (0..63) of the most significant 1 bit in a word
423 * Returns -1 if no 1 bit exists
425 static inline int __ilog2(unsigned long x)
427 int lz;
429 if (sizeof(x) == 4) {
430 __asm__ (
431 " .set push \n"
432 " .set mips32 \n"
433 " clz %0, %1 \n"
434 " .set pop \n"
435 : "=r" (lz)
436 : "r" (x));
438 return 31 - lz;
441 BUG_ON(sizeof(x) != 8);
443 __asm__ (
444 " .set push \n"
445 " .set mips64 \n"
446 " dclz %0, %1 \n"
447 " .set pop \n"
448 : "=r" (lz)
449 : "r" (x));
451 return 63 - lz;
454 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
457 * __ffs - find first bit in word.
458 * @word: The word to search
460 * Returns 0..SZLONG-1
461 * Undefined if no bit exists, so code should check against 0 first.
463 static inline unsigned long __ffs(unsigned long word)
465 return __ilog2(word & -word);
469 * fls - find last bit set.
470 * @word: The word to search
472 * This is defined the same way as ffs.
473 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
475 static inline int fls(int word)
477 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
479 return 32 - word;
482 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
483 static inline int fls64(__u64 word)
485 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
487 return 64 - word;
489 #else
490 #include <asm-generic/bitops/fls64.h>
491 #endif
494 * ffs - find first bit set.
495 * @word: The word to search
497 * This is defined the same way as
498 * the libc and compiler builtin ffs routines, therefore
499 * differs in spirit from the above ffz (man ffs).
501 static inline int ffs(int word)
503 if (!word)
504 return 0;
506 return fls(word & -word);
509 #else
511 #include <asm-generic/bitops/__ffs.h>
512 #include <asm-generic/bitops/ffs.h>
513 #include <asm-generic/bitops/fls.h>
514 #include <asm-generic/bitops/fls64.h>
516 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
518 #include <asm-generic/bitops/ffz.h>
519 #include <asm-generic/bitops/find.h>
521 #ifdef __KERNEL__
523 #include <asm-generic/bitops/sched.h>
524 #include <asm-generic/bitops/hweight.h>
525 #include <asm-generic/bitops/ext2-non-atomic.h>
526 #include <asm-generic/bitops/ext2-atomic.h>
527 #include <asm-generic/bitops/minix.h>
529 #endif /* __KERNEL__ */
531 #endif /* _ASM_BITOPS_H */