2 * include/asm-mips/bitops.h
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (c) 1994 - 1997 Ralf Baechle (ralf@gnu.org)
10 #ifndef __ASM_MIPS_BITOPS_H
11 #define __ASM_MIPS_BITOPS_H
13 #include <linux/types.h>
14 #include <linux/byteorder/swab.h> /* sigh ... */
18 #include <asm/sgidefs.h>
19 #include <asm/system.h>
22 * Only disable interrupt for kernel mode stuff to keep usermode stuff
23 * that dares to use kernel include files alive.
25 #define __bi_flags unsigned long flags
26 #define __bi_cli() __cli()
27 #define __bi_save_flags(x) __save_flags(x)
28 #define __bi_restore_flags(x) __restore_flags(x)
32 #define __bi_save_flags(x)
33 #define __bi_restore_flags(x)
34 #endif /* __KERNEL__ */
37 * Note that the bit operations are defined on arrays of 32 bit sized
38 * elements. With respect to a future 64 bit implementation it is
39 * wrong to use long *. Use u32 * or int *.
41 extern __inline__
void set_bit(int nr
, void *addr
);
42 extern __inline__
void clear_bit(int nr
, void *addr
);
43 extern __inline__
void change_bit(int nr
, void *addr
);
44 extern __inline__
int test_and_set_bit(int nr
, void *addr
);
45 extern __inline__
int test_and_clear_bit(int nr
, void *addr
);
46 extern __inline__
int test_and_change_bit(int nr
, void *addr
);
48 extern __inline__
int test_bit(int nr
, const void *addr
);
50 extern __inline__
int find_first_zero_bit (void *addr
, unsigned size
);
52 extern __inline__
int find_next_zero_bit (void * addr
, int size
, int offset
);
53 extern __inline__
unsigned long ffz(unsigned long word
);
55 #if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \
56 (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5)
59 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
62 #include <asm/mipsregs.h>
65 * The following functions will only work for the R4000!
68 extern __inline__
void set_bit(int nr
, void *addr
)
72 addr
+= ((nr
>> 3) & ~3);
73 mask
= 1 << (nr
& 0x1f);
75 mw
= load_linked(addr
);
76 } while (!store_conditional(addr
, mw
|mask
));
79 extern __inline__
void clear_bit(int nr
, void *addr
)
83 addr
+= ((nr
>> 3) & ~3);
84 mask
= 1 << (nr
& 0x1f);
86 mw
= load_linked(addr
);
88 while (!store_conditional(addr
, mw
& ~mask
));
91 extern __inline__
void change_bit(int nr
, void *addr
)
95 addr
+= ((nr
>> 3) & ~3);
96 mask
= 1 << (nr
& 0x1f);
98 mw
= load_linked(addr
);
99 } while (!store_conditional(addr
, mw
^ mask
));
102 extern __inline__
int test_and_set_bit(int nr
, void *addr
)
104 int mask
, retval
, mw
;
106 addr
+= ((nr
>> 3) & ~3);
107 mask
= 1 << (nr
& 0x1f);
109 mw
= load_linked(addr
);
110 retval
= (mask
& mw
) != 0;
111 } while (!store_conditional(addr
, mw
|mask
));
116 extern __inline__
int test_and_clear_bit(int nr
, void *addr
)
118 int mask
, retval
, mw
;
120 addr
+= ((nr
>> 3) & ~3);
121 mask
= 1 << (nr
& 0x1f);
123 mw
= load_linked(addr
);
124 retval
= (mask
& mw
) != 0;
126 while (!store_conditional(addr
, mw
& ~mask
));
131 extern __inline__
int test_and_change_bit(int nr
, void *addr
)
133 int mask
, retval
, mw
;
135 addr
+= ((nr
>> 3) & ~3);
136 mask
= 1 << (nr
& 0x1f);
138 mw
= load_linked(addr
);
139 retval
= (mask
& mw
) != 0;
140 } while (!store_conditional(addr
, mw
^ mask
));
147 extern __inline__
void set_bit(int nr
, void * addr
)
154 mask
= 1 << (nr
& 0x1f);
155 __bi_save_flags(flags
);
158 __bi_restore_flags(flags
);
161 extern __inline__
void clear_bit(int nr
, void * addr
)
168 mask
= 1 << (nr
& 0x1f);
169 __bi_save_flags(flags
);
172 __bi_restore_flags(flags
);
175 extern __inline__
void change_bit(int nr
, void * addr
)
182 mask
= 1 << (nr
& 0x1f);
183 __bi_save_flags(flags
);
186 __bi_restore_flags(flags
);
189 extern __inline__
int test_and_set_bit(int nr
, void * addr
)
196 mask
= 1 << (nr
& 0x1f);
197 __bi_save_flags(flags
);
199 retval
= (mask
& *a
) != 0;
201 __bi_restore_flags(flags
);
206 extern __inline__
int test_and_clear_bit(int nr
, void * addr
)
213 mask
= 1 << (nr
& 0x1f);
214 __bi_save_flags(flags
);
216 retval
= (mask
& *a
) != 0;
218 __bi_restore_flags(flags
);
223 extern __inline__
int test_and_change_bit(int nr
, void * addr
)
230 mask
= 1 << (nr
& 0x1f);
231 __bi_save_flags(flags
);
233 retval
= (mask
& *a
) != 0;
235 __bi_restore_flags(flags
);
242 #undef __bi_save_flags(x)
243 #undef __bi_restore_flags(x)
247 extern __inline__
int test_bit(int nr
, const void *addr
)
249 return ((1UL << (nr
& 31)) & (((const unsigned int *) addr
)[nr
>> 5])) != 0;
254 /* Little endian versions. */
256 extern __inline__
int find_first_zero_bit (void *addr
, unsigned size
)
264 __asm__ (".set\tnoreorder\n\t"
266 "1:\tsubu\t$1,%6,%0\n\t"
270 #if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) || \
271 (_MIPS_ISA == _MIPS_ISA_MIPS4) || (_MIPS_ISA == _MIPS_ISA_MIPS5)
281 #error "Fix this for big endian"
282 #endif /* __MIPSEB__ */
284 "1:\tand\t%2,$1,%1\n\t"
295 : "0" ((signed int) 0),
296 "1" ((unsigned int) 0xffffffff),
304 extern __inline__
int find_next_zero_bit (void * addr
, int size
, int offset
)
306 unsigned int *p
= ((unsigned int *) addr
) + (offset
>> 5);
307 int set
= 0, bit
= offset
& 31, res
;
312 * Look for zero in first byte
315 #error "Fix this for big endian byte order"
317 __asm__(".set\tnoreorder\n\t"
319 "1:\tand\t$1,%4,%1\n\t"
333 if (set
< (32 - bit
))
339 * No zero yet, search remaining full bytes for a zero
341 res
= find_first_zero_bit(p
, size
- 32 * (p
- (unsigned int *) addr
));
342 return offset
+ set
+ res
;
345 #endif /* !(__MIPSEB__) */
348 * ffz = Find First Zero in word. Undefined if no zero exists,
349 * so code should check against ~0UL first..
351 extern __inline__
unsigned long ffz(unsigned long word
)
354 unsigned int mask
= 1;
357 ".set\tnoreorder\n\t"
360 "1:\tand\t$1,%2,%1\n\t"
368 : "=&r" (__res
), "=r" (mask
)
369 : "r" (word
), "1" (mask
)
378 * ffs: find first bit set. This is defined the same way as
379 * the libc and compiler builtin ffs routines, therefore
380 * differs in spirit from the above ffz (man ffs).
383 #define ffs(x) generic_ffs(x)
386 * hweightN: returns the hamming weight (i.e. the number
387 * of bits set) of a N-bit word
390 #define hweight32(x) generic_hweight32(x)
391 #define hweight16(x) generic_hweight16(x)
392 #define hweight8(x) generic_hweight8(x)
394 #endif /* __KERNEL__ */
397 /* For now I steal the Sparc C versions, no need for speed, just need to
400 /* find_next_zero_bit() finds the first zero bit in a bit string of length
401 * 'size' bits, starting the search at bit 'offset'. This is largely based
402 * on Linus's ALPHA routines, which are pretty portable BTW.
405 extern __inline__
int find_next_zero_bit(void *addr
, int size
, int offset
)
407 unsigned long *p
= ((unsigned long *) addr
) + (offset
>> 5);
408 unsigned long result
= offset
& ~31UL;
417 tmp
|= ~0UL >> (32-offset
);
425 while (size
& ~31UL) {
438 return result
+ ffz(tmp
);
441 /* Linus sez that gcc can optimize the following correctly, we'll see if this
442 * holds on the Sparc as it does for the ALPHA.
445 #define find_first_zero_bit(addr, size) \
446 find_next_zero_bit((addr), (size), 0)
448 #endif /* (__MIPSEB__) */
450 /* Now for the ext2 filesystem bit operations and helper routines. */
453 extern __inline__
int ext2_set_bit(int nr
,void * addr
)
455 int mask
, retval
, flags
;
456 unsigned char *ADDR
= (unsigned char *) addr
;
459 mask
= 1 << (nr
& 0x07);
460 save_flags(flags
); cli();
461 retval
= (mask
& *ADDR
) != 0;
463 restore_flags(flags
);
467 extern __inline__
int ext2_clear_bit(int nr
, void * addr
)
469 int mask
, retval
, flags
;
470 unsigned char *ADDR
= (unsigned char *) addr
;
473 mask
= 1 << (nr
& 0x07);
474 save_flags(flags
); cli();
475 retval
= (mask
& *ADDR
) != 0;
477 restore_flags(flags
);
481 extern __inline__
int ext2_test_bit(int nr
, const void * addr
)
484 const unsigned char *ADDR
= (const unsigned char *) addr
;
487 mask
= 1 << (nr
& 0x07);
488 return ((mask
& *ADDR
) != 0);
491 #define ext2_find_first_zero_bit(addr, size) \
492 ext2_find_next_zero_bit((addr), (size), 0)
494 extern __inline__
unsigned long ext2_find_next_zero_bit(void *addr
, unsigned long size
, unsigned long offset
)
496 unsigned long *p
= ((unsigned long *) addr
) + (offset
>> 5);
497 unsigned long result
= offset
& ~31UL;
505 /* We hold the little endian value in tmp, but then the
506 * shift is illegal. So we could keep a big endian value
509 * tmp = __swab32(*(p++));
510 * tmp |= ~0UL >> (32-offset);
512 * but this would decrease preformance, so we change the
516 tmp
|= __swab32(~0UL >> (32-offset
));
524 while(size
& ~31UL) {
535 /* tmp is little endian, so we would have to swab the shift,
536 * see above. But then we have to swab tmp below for ffz, so
537 * we might as well do this here.
539 return result
+ ffz(__swab32(tmp
) | (~0UL << size
));
541 return result
+ ffz(__swab32(tmp
));
543 #else /* !(__MIPSEB__) */
545 /* Native ext2 byte ordering, just collapse using defines. */
546 #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
547 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
548 #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
549 #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
550 #define ext2_find_next_zero_bit(addr, size, offset) \
551 find_next_zero_bit((addr), (size), (offset))
553 #endif /* !(__MIPSEB__) */
556 * Bitmap functions for the minix filesystem.
557 * FIXME: These assume that Minix uses the native byte/bitorder.
558 * This limits the Minix filesystem's value for data exchange very much.
560 #define minix_set_bit(nr,addr) test_and_set_bit(nr,addr)
561 #define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
562 #define minix_test_bit(nr,addr) test_bit(nr,addr)
563 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
565 #endif /* __ASM_MIPS_BITOPS_H */