1 #ifndef _ASM_M32R_BITOPS_H
2 #define _ASM_M32R_BITOPS_H
5 * linux/include/asm-m32r/bitops.h
7 * Copyright 1992, Linus Torvalds.
10 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
11 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
14 #include <linux/config.h>
15 #include <linux/compiler.h>
16 #include <asm/assembler.h>
17 #include <asm/system.h>
18 #include <asm/byteorder.h>
19 #include <asm/types.h>
22 * These have to be done with inline assembly: that way the bit-setting
23 * is guaranteed to be atomic. All bit operations return 0 if the bit
24 * was cleared before the operation and != 0 if it was not.
26 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
30 * set_bit - Atomically set a bit in memory
32 * @addr: the address to start counting from
34 * This function is atomic and may not be reordered. See __set_bit()
35 * if you do not require the atomic guarantees.
36 * Note that @nr may be almost arbitrarily large; this function is not
37 * restricted to acting on a single-word quantity.
39 static __inline__
void set_bit(int nr
, volatile void * addr
)
42 volatile __u32
*a
= addr
;
47 mask
= (1 << (nr
& 0x1F));
49 local_irq_save(flags
);
50 __asm__
__volatile__ (
51 DCACHE_CLEAR("%0", "r6", "%1")
52 M32R_LOCK
" %0, @%1; \n\t"
54 M32R_UNLOCK
" %0, @%1; \n\t"
58 #ifdef CONFIG_CHIP_M32700_TS1
60 #endif /* CONFIG_CHIP_M32700_TS1 */
62 local_irq_restore(flags
);
66 * __set_bit - Set a bit in memory
68 * @addr: the address to start counting from
70 * Unlike set_bit(), this function is non-atomic and may be reordered.
71 * If it's called on the same region of memory simultaneously, the effect
72 * may be that only one operation succeeds.
74 static __inline__
void __set_bit(int nr
, volatile void * addr
)
77 volatile __u32
*a
= addr
;
80 mask
= (1 << (nr
& 0x1F));
85 * clear_bit - Clears a bit in memory
87 * @addr: Address to start counting from
89 * clear_bit() is atomic and may not be reordered. However, it does
90 * not contain a memory barrier, so if it is used for locking purposes,
91 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
92 * in order to ensure changes are visible on other processors.
94 static __inline__
void clear_bit(int nr
, volatile void * addr
)
97 volatile __u32
*a
= addr
;
102 mask
= (1 << (nr
& 0x1F));
104 local_irq_save(flags
);
106 __asm__
__volatile__ (
107 DCACHE_CLEAR("%0", "r6", "%1")
108 M32R_LOCK
" %0, @%1; \n\t"
110 M32R_UNLOCK
" %0, @%1; \n\t"
112 : "r" (a
), "r" (~mask
)
114 #ifdef CONFIG_CHIP_M32700_TS1
116 #endif /* CONFIG_CHIP_M32700_TS1 */
118 local_irq_restore(flags
);
121 static __inline__
void __clear_bit(int nr
, volatile unsigned long * addr
)
124 volatile unsigned long *a
= addr
;
127 mask
= (1 << (nr
& 0x1F));
131 #define smp_mb__before_clear_bit() barrier()
132 #define smp_mb__after_clear_bit() barrier()
135 * __change_bit - Toggle a bit in memory
136 * @nr: the bit to set
137 * @addr: the address to start counting from
139 * Unlike change_bit(), this function is non-atomic and may be reordered.
140 * If it's called on the same region of memory simultaneously, the effect
141 * may be that only one operation succeeds.
143 static __inline__
void __change_bit(int nr
, volatile void * addr
)
146 volatile __u32
*a
= addr
;
149 mask
= (1 << (nr
& 0x1F));
154 * change_bit - Toggle a bit in memory
156 * @addr: Address to start counting from
158 * change_bit() is atomic and may not be reordered.
159 * Note that @nr may be almost arbitrarily large; this function is not
160 * restricted to acting on a single-word quantity.
162 static __inline__
void change_bit(int nr
, volatile void * addr
)
165 volatile __u32
*a
= addr
;
170 mask
= (1 << (nr
& 0x1F));
172 local_irq_save(flags
);
173 __asm__
__volatile__ (
174 DCACHE_CLEAR("%0", "r6", "%1")
175 M32R_LOCK
" %0, @%1; \n\t"
177 M32R_UNLOCK
" %0, @%1; \n\t"
179 : "r" (a
), "r" (mask
)
181 #ifdef CONFIG_CHIP_M32700_TS1
183 #endif /* CONFIG_CHIP_M32700_TS1 */
185 local_irq_restore(flags
);
189 * test_and_set_bit - Set a bit and return its old value
191 * @addr: Address to count from
193 * This operation is atomic and cannot be reordered.
194 * It also implies a memory barrier.
196 static __inline__
int test_and_set_bit(int nr
, volatile void * addr
)
199 volatile __u32
*a
= addr
;
204 mask
= (1 << (nr
& 0x1F));
206 local_irq_save(flags
);
207 __asm__
__volatile__ (
208 DCACHE_CLEAR("%0", "%1", "%2")
209 M32R_LOCK
" %0, @%2; \n\t"
213 M32R_UNLOCK
" %1, @%2; \n\t"
214 : "=&r" (oldbit
), "=&r" (tmp
)
215 : "r" (a
), "r" (mask
)
218 local_irq_restore(flags
);
220 return (oldbit
!= 0);
224 * __test_and_set_bit - Set a bit and return its old value
226 * @addr: Address to count from
228 * This operation is non-atomic and can be reordered.
229 * If two examples of this operation race, one can appear to succeed
230 * but actually fail. You must protect multiple accesses with a lock.
232 static __inline__
int __test_and_set_bit(int nr
, volatile void * addr
)
235 volatile __u32
*a
= addr
;
238 mask
= (1 << (nr
& 0x1F));
239 oldbit
= (*a
& mask
);
242 return (oldbit
!= 0);
246 * test_and_clear_bit - Clear a bit and return its old value
248 * @addr: Address to count from
250 * This operation is atomic and cannot be reordered.
251 * It also implies a memory barrier.
253 static __inline__
int test_and_clear_bit(int nr
, volatile void * addr
)
256 volatile __u32
*a
= addr
;
261 mask
= (1 << (nr
& 0x1F));
263 local_irq_save(flags
);
265 __asm__
__volatile__ (
266 DCACHE_CLEAR("%0", "%1", "%3")
267 M32R_LOCK
" %0, @%3; \n\t"
272 M32R_UNLOCK
" %1, @%3; \n\t"
273 : "=&r" (oldbit
), "=&r" (tmp
), "+r" (mask
)
277 local_irq_restore(flags
);
279 return (oldbit
!= 0);
283 * __test_and_clear_bit - Clear a bit and return its old value
285 * @addr: Address to count from
287 * This operation is non-atomic and can be reordered.
288 * If two examples of this operation race, one can appear to succeed
289 * but actually fail. You must protect multiple accesses with a lock.
291 static __inline__
int __test_and_clear_bit(int nr
, volatile void * addr
)
294 volatile __u32
*a
= addr
;
297 mask
= (1 << (nr
& 0x1F));
298 oldbit
= (*a
& mask
);
301 return (oldbit
!= 0);
304 /* WARNING: non atomic and it can be reordered! */
305 static __inline__
int __test_and_change_bit(int nr
, volatile void * addr
)
308 volatile __u32
*a
= addr
;
311 mask
= (1 << (nr
& 0x1F));
312 oldbit
= (*a
& mask
);
315 return (oldbit
!= 0);
319 * test_and_change_bit - Change a bit and return its old value
321 * @addr: Address to count from
323 * This operation is atomic and cannot be reordered.
324 * It also implies a memory barrier.
326 static __inline__
int test_and_change_bit(int nr
, volatile void * addr
)
329 volatile __u32
*a
= addr
;
334 mask
= (1 << (nr
& 0x1F));
336 local_irq_save(flags
);
337 __asm__
__volatile__ (
338 DCACHE_CLEAR("%0", "%1", "%2")
339 M32R_LOCK
" %0, @%2; \n\t"
343 M32R_UNLOCK
" %1, @%2; \n\t"
344 : "=&r" (oldbit
), "=&r" (tmp
)
345 : "r" (a
), "r" (mask
)
348 local_irq_restore(flags
);
350 return (oldbit
!= 0);
354 * test_bit - Determine whether a bit is set
355 * @nr: bit number to test
356 * @addr: Address to start counting from
358 static __inline__
int test_bit(int nr
, const volatile void * addr
)
361 const volatile __u32
*a
= addr
;
364 mask
= (1 << (nr
& 0x1F));
366 return ((*a
& mask
) != 0);
370 * ffz - find first zero in word.
371 * @word: The word to search
373 * Undefined if no zero exists, so code should check against ~0UL first.
375 static __inline__
unsigned long ffz(unsigned long word
)
381 if (!(word
& 0x0000ffff)) { k
+= 16; word
>>= 16; }
382 if (!(word
& 0x000000ff)) { k
+= 8; word
>>= 8; }
383 if (!(word
& 0x0000000f)) { k
+= 4; word
>>= 4; }
384 if (!(word
& 0x00000003)) { k
+= 2; word
>>= 2; }
385 if (!(word
& 0x00000001)) { k
+= 1; }
391 * find_first_zero_bit - find the first zero bit in a memory region
392 * @addr: The address to start the search at
393 * @size: The maximum size to search
395 * Returns the bit-number of the first zero bit, not the number of the byte
399 #define find_first_zero_bit(addr, size) \
400 find_next_zero_bit((addr), (size), 0)
403 * find_next_zero_bit - find the first zero bit in a memory region
404 * @addr: The address to base the search on
405 * @offset: The bitnumber to start searching at
406 * @size: The maximum size to search
408 static __inline__
int find_next_zero_bit(const unsigned long *addr
,
409 int size
, int offset
)
411 const unsigned long *p
= addr
+ (offset
>> 5);
412 unsigned long result
= offset
& ~31UL;
421 tmp
|= ~0UL >> (32-offset
);
429 while (size
& ~31UL) {
442 return result
+ ffz(tmp
);
446 * __ffs - find first bit in word.
447 * @word: The word to search
449 * Undefined if no bit exists, so code should check against 0 first.
451 static __inline__
unsigned long __ffs(unsigned long word
)
455 if (!(word
& 0x0000ffff)) { k
+= 16; word
>>= 16; }
456 if (!(word
& 0x000000ff)) { k
+= 8; word
>>= 8; }
457 if (!(word
& 0x0000000f)) { k
+= 4; word
>>= 4; }
458 if (!(word
& 0x00000003)) { k
+= 2; word
>>= 2; }
459 if (!(word
& 0x00000001)) { k
+= 1;}
465 * fls: find last bit set.
467 #define fls(x) generic_fls(x)
472 * Every architecture must define this function. It's the fastest
473 * way of searching a 140-bit bitmap where the first 100 bits are
474 * unlikely to be set. It's guaranteed that at least one of the 140
477 static inline int sched_find_first_bit(unsigned long *b
)
482 return __ffs(b
[1]) + 32;
484 return __ffs(b
[2]) + 64;
486 return __ffs(b
[3]) + 96;
487 return __ffs(b
[4]) + 128;
491 * find_next_bit - find the first set bit in a memory region
492 * @addr: The address to base the search on
493 * @offset: The bitnumber to start searching at
494 * @size: The maximum size to search
496 static inline unsigned long find_next_bit(const unsigned long *addr
,
497 unsigned long size
, unsigned long offset
)
499 unsigned int *p
= ((unsigned int *) addr
) + (offset
>> 5);
500 unsigned int result
= offset
& ~31UL;
509 tmp
&= ~0UL << offset
;
518 if ((tmp
= *p
++) != 0)
528 tmp
&= ~0UL >> (32 - size
);
529 if (tmp
== 0UL) /* Are any bits set? */
530 return result
+ size
; /* Nope. */
532 return result
+ __ffs(tmp
);
536 * find_first_bit - find the first set bit in a memory region
537 * @addr: The address to start the search at
538 * @size: The maximum size to search
540 * Returns the bit-number of the first set bit, not the number of the byte
543 #define find_first_bit(addr, size) \
544 find_next_bit((addr), (size), 0)
547 * ffs - find first bit set
548 * @x: the word to search
550 * This is defined the same way as
551 * the libc and compiler builtin ffs routines, therefore
552 * differs in spirit from the above ffz (man ffs).
554 #define ffs(x) generic_ffs(x)
557 * hweightN - returns the hamming weight of a N-bit word
558 * @x: the word to weigh
560 * The Hamming Weight of a number is the total number of bits set in it.
563 #define hweight32(x) generic_hweight32(x)
564 #define hweight16(x) generic_hweight16(x)
565 #define hweight8(x) generic_hweight8(x)
567 #endif /* __KERNEL__ */
573 * orig: include/asm-sh/bitops.h
576 #ifdef __LITTLE_ENDIAN__
577 #define ext2_set_bit test_and_set_bit
578 #define ext2_clear_bit __test_and_clear_bit
579 #define ext2_test_bit test_bit
580 #define ext2_find_first_zero_bit find_first_zero_bit
581 #define ext2_find_next_zero_bit find_next_zero_bit
583 static inline int ext2_set_bit(int nr
, volatile void * addr
)
586 volatile __u8
*a
= addr
;
589 mask
= (1 << (nr
& 0x07));
590 oldbit
= (*a
& mask
);
593 return (oldbit
!= 0);
596 static inline int ext2_clear_bit(int nr
, volatile void * addr
)
599 volatile __u8
*a
= addr
;
602 mask
= (1 << (nr
& 0x07));
603 oldbit
= (*a
& mask
);
606 return (oldbit
!= 0);
609 static inline int ext2_test_bit(int nr
, const volatile void * addr
)
612 const volatile __u8
*a
= addr
;
615 mask
= (1 << (nr
& 0x07));
617 return ((mask
& *a
) != 0);
620 #define ext2_find_first_zero_bit(addr, size) \
621 ext2_find_next_zero_bit((addr), (size), 0)
623 static inline unsigned long ext2_find_next_zero_bit(void *addr
,
624 unsigned long size
, unsigned long offset
)
626 unsigned long *p
= ((unsigned long *) addr
) + (offset
>> 5);
627 unsigned long result
= offset
& ~31UL;
635 /* We hold the little endian value in tmp, but then the
636 * shift is illegal. So we could keep a big endian value
639 * tmp = __swab32(*(p++));
640 * tmp |= ~0UL >> (32-offset);
642 * but this would decrease preformance, so we change the
646 tmp
|= __swab32(~0UL >> (32-offset
));
654 while(size
& ~31UL) {
665 /* tmp is little endian, so we would have to swab the shift,
666 * see above. But then we have to swab tmp below for ffz, so
667 * we might as well do this here.
669 return result
+ ffz(__swab32(tmp
) | (~0UL << size
));
671 return result
+ ffz(__swab32(tmp
));
675 #define ext2_set_bit_atomic(lock, nr, addr) \
679 ret = ext2_set_bit((nr), (addr)); \
684 #define ext2_clear_bit_atomic(lock, nr, addr) \
688 ret = ext2_clear_bit((nr), (addr)); \
693 /* Bitmap functions for the minix filesystem. */
694 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
695 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
696 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
697 #define minix_test_bit(nr,addr) test_bit(nr,addr)
698 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
700 #endif /* __KERNEL__ */
702 #endif /* _ASM_M32R_BITOPS_H */