2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/config.h>
13 #include <linux/compiler.h>
14 #include <linux/types.h>
16 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/cpu-features.h>
19 #if (_MIPS_SZLONG == 32)
21 #define SZLONG_MASK 31UL
24 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
25 #elif (_MIPS_SZLONG == 64)
27 #define SZLONG_MASK 63UL
30 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
35 #include <asm/interrupt.h>
36 #include <asm/sgidefs.h>
40 * clear_bit() doesn't provide any barrier for the compiler.
42 #define smp_mb__before_clear_bit() smp_mb()
43 #define smp_mb__after_clear_bit() smp_mb()
46 * Only disable interrupt for kernel mode stuff to keep usermode stuff
47 * that dares to use kernel include files alive.
50 #define __bi_flags unsigned long flags
51 #define __bi_local_irq_save(x) local_irq_save(x)
52 #define __bi_local_irq_restore(x) local_irq_restore(x)
55 #define __bi_local_irq_save(x)
56 #define __bi_local_irq_restore(x)
57 #endif /* __KERNEL__ */
60 * set_bit - Atomically set a bit in memory
62 * @addr: the address to start counting from
64 * This function is atomic and may not be reordered. See __set_bit()
65 * if you do not require the atomic guarantees.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
69 static inline void set_bit(unsigned long nr
, volatile unsigned long *addr
)
71 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
74 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
77 "1: " __LL
"%0, %1 # set_bit \n"
82 : "=&r" (temp
), "=m" (*m
)
83 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
84 } else if (cpu_has_llsc
) {
87 "1: " __LL
"%0, %1 # set_bit \n"
92 : "=&r" (temp
), "=m" (*m
)
93 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
95 volatile unsigned long *a
= addr
;
99 a
+= nr
>> SZLONG_LOG
;
100 mask
= 1UL << (nr
& SZLONG_MASK
);
101 __bi_local_irq_save(flags
);
103 __bi_local_irq_restore(flags
);
108 * clear_bit - Clears a bit in memory
110 * @addr: Address to start counting from
112 * clear_bit() is atomic and may not be reordered. However, it does
113 * not contain a memory barrier, so if it is used for locking purposes,
114 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
115 * in order to ensure changes are visible on other processors.
117 static inline void clear_bit(unsigned long nr
, volatile unsigned long *addr
)
119 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
122 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
123 __asm__
__volatile__(
125 "1: " __LL
"%0, %1 # clear_bit \n"
130 : "=&r" (temp
), "=m" (*m
)
131 : "ir" (~(1UL << (nr
& SZLONG_MASK
))), "m" (*m
));
132 } else if (cpu_has_llsc
) {
133 __asm__
__volatile__(
135 "1: " __LL
"%0, %1 # clear_bit \n"
140 : "=&r" (temp
), "=m" (*m
)
141 : "ir" (~(1UL << (nr
& SZLONG_MASK
))), "m" (*m
));
143 volatile unsigned long *a
= addr
;
147 a
+= nr
>> SZLONG_LOG
;
148 mask
= 1UL << (nr
& SZLONG_MASK
);
149 __bi_local_irq_save(flags
);
151 __bi_local_irq_restore(flags
);
156 * change_bit - Toggle a bit in memory
158 * @addr: Address to start counting from
160 * change_bit() is atomic and may not be reordered.
161 * Note that @nr may be almost arbitrarily large; this function is not
162 * restricted to acting on a single-word quantity.
164 static inline void change_bit(unsigned long nr
, volatile unsigned long *addr
)
166 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
167 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
170 __asm__
__volatile__(
172 "1: " __LL
"%0, %1 # change_bit \n"
177 : "=&r" (temp
), "=m" (*m
)
178 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
179 } else if (cpu_has_llsc
) {
180 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
183 __asm__
__volatile__(
185 "1: " __LL
"%0, %1 # change_bit \n"
190 : "=&r" (temp
), "=m" (*m
)
191 : "ir" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
));
193 volatile unsigned long *a
= addr
;
197 a
+= nr
>> SZLONG_LOG
;
198 mask
= 1UL << (nr
& SZLONG_MASK
);
199 __bi_local_irq_save(flags
);
201 __bi_local_irq_restore(flags
);
206 * test_and_set_bit - Set a bit and return its old value
208 * @addr: Address to count from
210 * This operation is atomic and cannot be reordered.
211 * It also implies a memory barrier.
213 static inline int test_and_set_bit(unsigned long nr
,
214 volatile unsigned long *addr
)
216 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
217 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
218 unsigned long temp
, res
;
220 __asm__
__volatile__(
222 "1: " __LL
"%0, %1 # test_and_set_bit \n"
231 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
232 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
236 } else if (cpu_has_llsc
) {
237 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
238 unsigned long temp
, res
;
240 __asm__
__volatile__(
244 "1: " __LL
"%0, %1 # test_and_set_bit \n"
253 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
254 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
259 volatile unsigned long *a
= addr
;
264 a
+= nr
>> SZLONG_LOG
;
265 mask
= 1UL << (nr
& SZLONG_MASK
);
266 __bi_local_irq_save(flags
);
267 retval
= (mask
& *a
) != 0;
269 __bi_local_irq_restore(flags
);
276 * test_and_clear_bit - Clear a bit and return its old value
278 * @addr: Address to count from
280 * This operation is atomic and cannot be reordered.
281 * It also implies a memory barrier.
283 static inline int test_and_clear_bit(unsigned long nr
,
284 volatile unsigned long *addr
)
286 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
287 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
288 unsigned long temp
, res
;
290 __asm__
__volatile__(
292 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
302 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
303 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
307 } else if (cpu_has_llsc
) {
308 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
309 unsigned long temp
, res
;
311 __asm__
__volatile__(
315 "1: " __LL
"%0, %1 # test_and_clear_bit \n"
325 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
326 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
331 volatile unsigned long *a
= addr
;
336 a
+= nr
>> SZLONG_LOG
;
337 mask
= 1UL << (nr
& SZLONG_MASK
);
338 __bi_local_irq_save(flags
);
339 retval
= (mask
& *a
) != 0;
341 __bi_local_irq_restore(flags
);
348 * test_and_change_bit - Change a bit and return its old value
350 * @addr: Address to count from
352 * This operation is atomic and cannot be reordered.
353 * It also implies a memory barrier.
355 static inline int test_and_change_bit(unsigned long nr
,
356 volatile unsigned long *addr
)
358 if (cpu_has_llsc
&& R10000_LLSC_WAR
) {
359 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
360 unsigned long temp
, res
;
362 __asm__
__volatile__(
364 "1: " __LL
"%0, %1 # test_and_change_bit \n"
373 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
374 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
378 } else if (cpu_has_llsc
) {
379 unsigned long *m
= ((unsigned long *) addr
) + (nr
>> SZLONG_LOG
);
380 unsigned long temp
, res
;
382 __asm__
__volatile__(
386 "1: " __LL
"%0, %1 # test_and_change_bit \n"
388 " " __SC
"\t%2, %1 \n"
395 : "=&r" (temp
), "=m" (*m
), "=&r" (res
)
396 : "r" (1UL << (nr
& SZLONG_MASK
)), "m" (*m
)
401 volatile unsigned long *a
= addr
;
402 unsigned long mask
, retval
;
405 a
+= nr
>> SZLONG_LOG
;
406 mask
= 1UL << (nr
& SZLONG_MASK
);
407 __bi_local_irq_save(flags
);
408 retval
= (mask
& *a
) != 0;
410 __bi_local_irq_restore(flags
);
417 #undef __bi_local_irq_save
418 #undef __bi_local_irq_restore
420 #include <asm-generic/bitops/non-atomic.h>
423 * Return the bit position (0..63) of the most significant 1 bit in a word
424 * Returns -1 if no 1 bit exists
426 static inline int __ilog2(unsigned long x
)
430 if (sizeof(x
) == 4) {
442 BUG_ON(sizeof(x
) != 8);
455 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
458 * __ffs - find first bit in word.
459 * @word: The word to search
461 * Returns 0..SZLONG-1
462 * Undefined if no bit exists, so code should check against 0 first.
464 static inline unsigned long __ffs(unsigned long word
)
466 return __ilog2(word
& -word
);
470 * ffs - find first bit set.
471 * @word: The word to search
474 * Returns 0 if no bit exists
477 static inline unsigned long ffs(unsigned long word
)
482 return __ffs(word
) + 1;
486 * ffz - find first zero in word.
487 * @word: The word to search
489 * Undefined if no zero exists, so code should check against ~0UL first.
491 static inline unsigned long ffz(unsigned long word
)
493 return __ffs (~word
);
497 * fls - find last bit set.
498 * @word: The word to search
501 * Returns 0 if no bit exists
503 static inline unsigned long fls(unsigned long word
)
505 #ifdef CONFIG_CPU_MIPS32
506 __asm__ ("clz %0, %1" : "=r" (word
) : "r" (word
));
511 #ifdef CONFIG_CPU_MIPS64
512 __asm__ ("dclz %0, %1" : "=r" (word
) : "r" (word
));
520 #include <asm-generic/bitops/__ffs.h>
521 #include <asm-generic/bitops/ffs.h>
522 #include <asm-generic/bitops/ffz.h>
523 #include <asm-generic/bitops/fls.h>
525 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
527 #include <asm-generic/bitops/fls64.h>
528 #include <asm-generic/bitops/find.h>
532 #include <asm-generic/bitops/sched.h>
533 #include <asm-generic/bitops/hweight.h>
534 #include <asm-generic/bitops/ext2-non-atomic.h>
535 #include <asm-generic/bitops/ext2-atomic.h>
536 #include <asm-generic/bitops/minix.h>
538 #endif /* __KERNEL__ */
540 #endif /* _ASM_BITOPS_H */