1 #ifndef _X86_64_BITOPS_H
2 #define _X86_64_BITOPS_H
5 * Copyright 1992, Linus Torvalds.
8 #include <linux/config.h>
11 #define LOCK_PREFIX "lock ; "
13 #define LOCK_PREFIX ""
16 #define ADDR (*(volatile long *) addr)
19 * set_bit - Atomically set a bit in memory
21 * @addr: the address to start counting from
23 * This function is atomic and may not be reordered. See __set_bit()
24 * if you do not require the atomic guarantees.
25 * Note that @nr may be almost arbitrarily large; this function is not
26 * restricted to acting on a single-word quantity.
28 static __inline__
void set_bit(int nr
, volatile void * addr
)
30 __asm__
__volatile__( LOCK_PREFIX
33 :"dIr" (nr
) : "memory");
37 * __set_bit - Set a bit in memory
39 * @addr: the address to start counting from
41 * Unlike set_bit(), this function is non-atomic and may be reordered.
42 * If it's called on the same region of memory simultaneously, the effect
43 * may be that only one operation succeeds.
45 static __inline__
void __set_bit(int nr
, volatile void * addr
)
50 :"dIr" (nr
) : "memory");
54 * clear_bit - Clears a bit in memory
56 * @addr: Address to start counting from
58 * clear_bit() is atomic and may not be reordered. However, it does
59 * not contain a memory barrier, so if it is used for locking purposes,
60 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
61 * in order to ensure changes are visible on other processors.
63 static __inline__
void clear_bit(int nr
, volatile void * addr
)
65 __asm__
__volatile__( LOCK_PREFIX
71 static __inline__
void __clear_bit(int nr
, volatile void * addr
)
79 #define smp_mb__before_clear_bit() barrier()
80 #define smp_mb__after_clear_bit() barrier()
83 * __change_bit - Toggle a bit in memory
84 * @nr: the bit to change
85 * @addr: the address to start counting from
87 * Unlike change_bit(), this function is non-atomic and may be reordered.
88 * If it's called on the same region of memory simultaneously, the effect
89 * may be that only one operation succeeds.
91 static __inline__
void __change_bit(int nr
, volatile void * addr
)
100 * change_bit - Toggle a bit in memory
102 * @addr: Address to start counting from
104 * change_bit() is atomic and may not be reordered.
105 * Note that @nr may be almost arbitrarily large; this function is not
106 * restricted to acting on a single-word quantity.
108 static __inline__
void change_bit(int nr
, volatile void * addr
)
110 __asm__
__volatile__( LOCK_PREFIX
117 * test_and_set_bit - Set a bit and return its old value
119 * @addr: Address to count from
121 * This operation is atomic and cannot be reordered.
122 * It also implies a memory barrier.
124 static __inline__
int test_and_set_bit(int nr
, volatile void * addr
)
128 __asm__
__volatile__( LOCK_PREFIX
129 "btsl %2,%1\n\tsbbl %0,%0"
130 :"=r" (oldbit
),"+m" (ADDR
)
131 :"dIr" (nr
) : "memory");
136 * __test_and_set_bit - Set a bit and return its old value
138 * @addr: Address to count from
140 * This operation is non-atomic and can be reordered.
141 * If two examples of this operation race, one can appear to succeed
142 * but actually fail. You must protect multiple accesses with a lock.
144 static __inline__
int __test_and_set_bit(int nr
, volatile void * addr
)
149 "btsl %2,%1\n\tsbbl %0,%0"
150 :"=r" (oldbit
),"+m" (ADDR
)
156 * test_and_clear_bit - Clear a bit and return its old value
158 * @addr: Address to count from
160 * This operation is atomic and cannot be reordered.
161 * It also implies a memory barrier.
163 static __inline__
int test_and_clear_bit(int nr
, volatile void * addr
)
167 __asm__
__volatile__( LOCK_PREFIX
168 "btrl %2,%1\n\tsbbl %0,%0"
169 :"=r" (oldbit
),"+m" (ADDR
)
170 :"dIr" (nr
) : "memory");
175 * __test_and_clear_bit - Clear a bit and return its old value
177 * @addr: Address to count from
179 * This operation is non-atomic and can be reordered.
180 * If two examples of this operation race, one can appear to succeed
181 * but actually fail. You must protect multiple accesses with a lock.
183 static __inline__
int __test_and_clear_bit(int nr
, volatile void * addr
)
188 "btrl %2,%1\n\tsbbl %0,%0"
189 :"=r" (oldbit
),"+m" (ADDR
)
194 /* WARNING: non atomic and it can be reordered! */
195 static __inline__
int __test_and_change_bit(int nr
, volatile void * addr
)
199 __asm__
__volatile__(
200 "btcl %2,%1\n\tsbbl %0,%0"
201 :"=r" (oldbit
),"+m" (ADDR
)
202 :"dIr" (nr
) : "memory");
207 * test_and_change_bit - Change a bit and return its old value
209 * @addr: Address to count from
211 * This operation is atomic and cannot be reordered.
212 * It also implies a memory barrier.
214 static __inline__
int test_and_change_bit(int nr
, volatile void * addr
)
218 __asm__
__volatile__( LOCK_PREFIX
219 "btcl %2,%1\n\tsbbl %0,%0"
220 :"=r" (oldbit
),"+m" (ADDR
)
221 :"dIr" (nr
) : "memory");
225 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
227 * test_bit - Determine whether a bit is set
228 * @nr: bit number to test
229 * @addr: Address to start counting from
231 static int test_bit(int nr
, const volatile void * addr
);
234 static __inline__
int constant_test_bit(int nr
, const volatile void * addr
)
236 return ((1UL << (nr
& 31)) & (((const volatile unsigned int *) addr
)[nr
>> 5])) != 0;
239 static __inline__
int variable_test_bit(int nr
, volatile const void * addr
)
243 __asm__
__volatile__(
244 "btl %2,%1\n\tsbbl %0,%0"
246 :"m" (ADDR
),"dIr" (nr
));
250 #define test_bit(nr,addr) \
251 (__builtin_constant_p(nr) ? \
252 constant_test_bit((nr),(addr)) : \
253 variable_test_bit((nr),(addr)))
257 extern long find_first_zero_bit(const unsigned long * addr
, unsigned long size
);
258 extern long find_next_zero_bit (const unsigned long * addr
, long size
, long offset
);
259 extern long find_first_bit(const unsigned long * addr
, unsigned long size
);
260 extern long find_next_bit(const unsigned long * addr
, long size
, long offset
);
262 /* return index of first bet set in val or max when no bit is set */
263 static inline unsigned long __scanbit(unsigned long val
, unsigned long max
)
265 asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val
) : "r" (val
), "r" (max
));
269 #define find_first_bit(addr,size) \
270 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
271 (__scanbit(*(unsigned long *)addr,(size))) : \
272 find_first_bit(addr,size)))
274 #define find_next_bit(addr,size,off) \
275 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
276 ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
277 find_next_bit(addr,size,off)))
279 #define find_first_zero_bit(addr,size) \
280 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
281 (__scanbit(~*(unsigned long *)addr,(size))) : \
282 find_first_zero_bit(addr,size)))
284 #define find_next_zero_bit(addr,size,off) \
285 ((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
286 ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
287 find_next_zero_bit(addr,size,off)))
290 * Find string of zero bits in a bitmap. -1 when not found.
293 find_next_zero_string(unsigned long *bitmap
, long start
, long nbits
, int len
);
295 static inline void set_bit_string(unsigned long *bitmap
, unsigned long i
,
298 unsigned long end
= i
+ len
;
300 __set_bit(i
, bitmap
);
305 static inline void __clear_bit_string(unsigned long *bitmap
, unsigned long i
,
308 unsigned long end
= i
+ len
;
310 __clear_bit(i
, bitmap
);
316 * ffz - find first zero in word.
317 * @word: The word to search
319 * Undefined if no zero exists, so code should check against ~0UL first.
321 static __inline__
unsigned long ffz(unsigned long word
)
330 * __ffs - find first bit in word.
331 * @word: The word to search
333 * Undefined if no bit exists, so code should check against 0 first.
335 static __inline__
unsigned long __ffs(unsigned long word
)
344 * __fls: find last bit set.
345 * @word: The word to search
347 * Undefined if no zero exists, so code should check against ~0UL first.
349 static __inline__
unsigned long __fls(unsigned long word
)
359 #include <asm-generic/bitops/sched.h>
362 * ffs - find first bit set
363 * @x: the word to search
365 * This is defined the same way as
366 * the libc and compiler builtin ffs routines, therefore
367 * differs in spirit from the above ffz (man ffs).
369 static __inline__
int ffs(int x
)
373 __asm__("bsfl %1,%0\n\t"
375 : "=r" (r
) : "rm" (x
), "r" (-1));
380 * fls64 - find last bit set in 64 bit word
381 * @x: the word to search
383 * This is defined the same way as fls.
385 static __inline__
int fls64(__u64 x
)
393 * fls - find last bit set
394 * @x: the word to search
396 * This is defined the same way as ffs.
398 static __inline__
int fls(int x
)
402 __asm__("bsrl %1,%0\n\t"
404 : "=&r" (r
) : "rm" (x
), "rm" (-1));
408 #include <asm-generic/bitops/hweight.h>
410 #endif /* __KERNEL__ */
414 #include <asm-generic/bitops/ext2-non-atomic.h>
416 #define ext2_set_bit_atomic(lock,nr,addr) \
417 test_and_set_bit((nr),(unsigned long*)addr)
418 #define ext2_clear_bit_atomic(lock,nr,addr) \
419 test_and_clear_bit((nr),(unsigned long*)addr)
421 #include <asm-generic/bitops/minix.h>
423 #endif /* __KERNEL__ */
425 #endif /* _X86_64_BITOPS_H */