5 * Copyright 1992, Linus Torvalds.
8 #include <linux/config.h>
11 * These have to be done with inline assembly: that way the bit-setting
12 * is guaranteed to be atomic. All bit operations return 0 if the bit
13 * was cleared before the operation and != 0 if it was not.
15 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
19 #define LOCK_PREFIX "lock ; "
21 #define LOCK_PREFIX ""
24 #define ADDR (*(volatile long *) addr)
26 static __inline__
void set_bit(int nr
, volatile void * addr
)
28 __asm__
__volatile__( LOCK_PREFIX
34 /* WARNING: non atomic and it can be reordered! */
35 static __inline__
void __set_bit(int nr
, volatile void * addr
)
44 * clear_bit() doesn't provide any barrier for the compiler.
46 #define smp_mb__before_clear_bit() barrier()
47 #define smp_mb__after_clear_bit() barrier()
48 static __inline__
void clear_bit(int nr
, volatile void * addr
)
50 __asm__
__volatile__( LOCK_PREFIX
56 static __inline__
void change_bit(int nr
, volatile void * addr
)
58 __asm__
__volatile__( LOCK_PREFIX
65 * It will also imply a memory barrier, thus it must clobber memory
66 * to make sure to reload anything that was cached into registers
67 * outside _this_ critical section.
69 static __inline__
int test_and_set_bit(int nr
, volatile void * addr
)
73 __asm__
__volatile__( LOCK_PREFIX
74 "btsl %2,%1\n\tsbbl %0,%0"
75 :"=r" (oldbit
),"=m" (ADDR
)
76 :"Ir" (nr
) : "memory");
80 /* WARNING: non atomic and it can be reordered! */
81 static __inline__
int __test_and_set_bit(int nr
, volatile void * addr
)
86 "btsl %2,%1\n\tsbbl %0,%0"
87 :"=r" (oldbit
),"=m" (ADDR
)
92 static __inline__
int test_and_clear_bit(int nr
, volatile void * addr
)
96 __asm__
__volatile__( LOCK_PREFIX
97 "btrl %2,%1\n\tsbbl %0,%0"
98 :"=r" (oldbit
),"=m" (ADDR
)
99 :"Ir" (nr
) : "memory");
103 /* WARNING: non atomic and it can be reordered! */
104 static __inline__
int __test_and_clear_bit(int nr
, volatile void * addr
)
109 "btrl %2,%1\n\tsbbl %0,%0"
110 :"=r" (oldbit
),"=m" (ADDR
)
115 static __inline__
int test_and_change_bit(int nr
, volatile void * addr
)
119 __asm__
__volatile__( LOCK_PREFIX
120 "btcl %2,%1\n\tsbbl %0,%0"
121 :"=r" (oldbit
),"=m" (ADDR
)
122 :"Ir" (nr
) : "memory");
127 * This routine doesn't need to be atomic.
129 static __inline__
int constant_test_bit(int nr
, const volatile void * addr
)
131 return ((1UL << (nr
& 31)) & (((const volatile unsigned int *) addr
)[nr
>> 5])) != 0;
134 static __inline__
int variable_test_bit(int nr
, volatile void * addr
)
138 __asm__
__volatile__(
139 "btl %2,%1\n\tsbbl %0,%0"
141 :"m" (ADDR
),"Ir" (nr
));
145 #define test_bit(nr,addr) \
146 (__builtin_constant_p(nr) ? \
147 constant_test_bit((nr),(addr)) : \
148 variable_test_bit((nr),(addr)))
151 * Find-bit routines..
153 static __inline__
int find_first_zero_bit(void * addr
, unsigned size
)
160 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
161 __asm__
__volatile__(
163 "xorl %%edx,%%edx\n\t"
166 "xorl -4(%%edi),%%eax\n\t"
169 "1:\tsubl %%ebx,%%edi\n\t"
172 :"=d" (res
), "=&c" (d0
), "=&D" (d1
), "=&a" (d2
)
173 :"1" ((size
+ 31) >> 5), "2" (addr
), "b" (addr
));
177 static __inline__
int find_next_zero_bit (void * addr
, int size
, int offset
)
179 unsigned long * p
= ((unsigned long *) addr
) + (offset
>> 5);
180 int set
= 0, bit
= offset
& 31, res
;
184 * Look for zero in first byte
186 __asm__("bsfl %1,%0\n\t"
191 : "r" (~(*p
>> bit
)));
192 if (set
< (32 - bit
))
198 * No zero yet, search remaining full bytes for a zero
200 res
= find_first_zero_bit (p
, size
- 32 * (p
- (unsigned long *) addr
));
201 return (offset
+ set
+ res
);
205 * ffz = Find First Zero in word. Undefined if no zero exists,
206 * so code should check against ~0UL first..
208 static __inline__
unsigned long ffz(unsigned long word
)
219 * ffs: find first bit set. This is defined the same way as
220 * the libc and compiler builtin ffs routines, therefore
221 * differs in spirit from the above ffz (man ffs).
224 static __inline__
int ffs(int x
)
228 __asm__("bsfl %1,%0\n\t"
231 "1:" : "=r" (r
) : "g" (x
));
236 * hweightN: returns the hamming weight (i.e. the number
237 * of bits set) of a N-bit word
240 #define hweight32(x) generic_hweight32(x)
241 #define hweight16(x) generic_hweight16(x)
242 #define hweight8(x) generic_hweight8(x)
244 #endif /* __KERNEL__ */
248 #define ext2_set_bit __test_and_set_bit
249 #define ext2_clear_bit __test_and_clear_bit
250 #define ext2_test_bit test_bit
251 #define ext2_find_first_zero_bit find_first_zero_bit
252 #define ext2_find_next_zero_bit find_next_zero_bit
254 /* Bitmap functions for the minix filesystem. */
255 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
256 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
257 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
258 #define minix_test_bit(nr,addr) test_bit(nr,addr)
259 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
261 #endif /* __KERNEL__ */
263 #endif /* _I386_BITOPS_H */