bitops kernel-doc: inline instead of macro
[linux-2.6/kvm.git] / include / asm-x86 / bitops_32.h
blob0b40f6d20bea207081fd739f8eae5b9bb3ac12f4
1 #ifndef _I386_BITOPS_H
2 #define _I386_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #ifndef _LINUX_BITOPS_H
9 #error only <linux/bitops.h> can be included directly
10 #endif
12 #include <linux/compiler.h>
13 #include <asm/alternative.h>
16 * These have to be done with inline assembly: that way the bit-setting
17 * is guaranteed to be atomic. All bit operations return 0 if the bit
18 * was cleared before the operation and != 0 if it was not.
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
23 #define ADDR (*(volatile long *) addr)
25 /**
26 * set_bit - Atomically set a bit in memory
27 * @nr: the bit to set
28 * @addr: the address to start counting from
30 * This function is atomic and may not be reordered. See __set_bit()
31 * if you do not require the atomic guarantees.
33 * Note: there are no guarantees that this function will not be reordered
34 * on non x86 architectures, so if you are writing portable code,
35 * make sure not to rely on its reordering guarantees.
37 * Note that @nr may be almost arbitrarily large; this function is not
38 * restricted to acting on a single-word quantity.
40 static inline void set_bit(int nr, volatile unsigned long * addr)
42 __asm__ __volatile__( LOCK_PREFIX
43 "btsl %1,%0"
44 :"+m" (ADDR)
45 :"Ir" (nr));
48 /**
49 * __set_bit - Set a bit in memory
50 * @nr: the bit to set
51 * @addr: the address to start counting from
53 * Unlike set_bit(), this function is non-atomic and may be reordered.
54 * If it's called on the same region of memory simultaneously, the effect
55 * may be that only one operation succeeds.
57 static inline void __set_bit(int nr, volatile unsigned long * addr)
59 __asm__(
60 "btsl %1,%0"
61 :"+m" (ADDR)
62 :"Ir" (nr));
65 /**
66 * clear_bit - Clears a bit in memory
67 * @nr: Bit to clear
68 * @addr: Address to start counting from
70 * clear_bit() is atomic and may not be reordered. However, it does
71 * not contain a memory barrier, so if it is used for locking purposes,
72 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
73 * in order to ensure changes are visible on other processors.
75 static inline void clear_bit(int nr, volatile unsigned long * addr)
77 __asm__ __volatile__( LOCK_PREFIX
78 "btrl %1,%0"
79 :"+m" (ADDR)
80 :"Ir" (nr));
84 * clear_bit_unlock - Clears a bit in memory
85 * @nr: Bit to clear
86 * @addr: Address to start counting from
88 * clear_bit() is atomic and implies release semantics before the memory
89 * operation. It can be used for an unlock.
91 static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
93 barrier();
94 clear_bit(nr, addr);
97 static inline void __clear_bit(int nr, volatile unsigned long * addr)
99 __asm__ __volatile__(
100 "btrl %1,%0"
101 :"+m" (ADDR)
102 :"Ir" (nr));
106 * __clear_bit_unlock - Clears a bit in memory
107 * @nr: Bit to clear
108 * @addr: Address to start counting from
110 * __clear_bit() is non-atomic and implies release semantics before the memory
111 * operation. It can be used for an unlock if no other CPUs can concurrently
112 * modify other bits in the word.
114 * No memory barrier is required here, because x86 cannot reorder stores past
115 * older loads. Same principle as spin_unlock.
117 static inline void __clear_bit_unlock(unsigned long nr, volatile unsigned long *addr)
119 barrier();
120 __clear_bit(nr, addr);
123 #define smp_mb__before_clear_bit() barrier()
124 #define smp_mb__after_clear_bit() barrier()
127 * __change_bit - Toggle a bit in memory
128 * @nr: the bit to change
129 * @addr: the address to start counting from
131 * Unlike change_bit(), this function is non-atomic and may be reordered.
132 * If it's called on the same region of memory simultaneously, the effect
133 * may be that only one operation succeeds.
135 static inline void __change_bit(int nr, volatile unsigned long * addr)
137 __asm__ __volatile__(
138 "btcl %1,%0"
139 :"+m" (ADDR)
140 :"Ir" (nr));
144 * change_bit - Toggle a bit in memory
145 * @nr: Bit to change
146 * @addr: Address to start counting from
148 * change_bit() is atomic and may not be reordered. It may be
149 * reordered on other architectures than x86.
150 * Note that @nr may be almost arbitrarily large; this function is not
151 * restricted to acting on a single-word quantity.
153 static inline void change_bit(int nr, volatile unsigned long * addr)
155 __asm__ __volatile__( LOCK_PREFIX
156 "btcl %1,%0"
157 :"+m" (ADDR)
158 :"Ir" (nr));
162 * test_and_set_bit - Set a bit and return its old value
163 * @nr: Bit to set
164 * @addr: Address to count from
166 * This operation is atomic and cannot be reordered.
167 * It may be reordered on other architectures than x86.
168 * It also implies a memory barrier.
170 static inline int test_and_set_bit(int nr, volatile unsigned long * addr)
172 int oldbit;
174 __asm__ __volatile__( LOCK_PREFIX
175 "btsl %2,%1\n\tsbbl %0,%0"
176 :"=r" (oldbit),"+m" (ADDR)
177 :"Ir" (nr) : "memory");
178 return oldbit;
182 * test_and_set_bit_lock - Set a bit and return its old value for lock
183 * @nr: Bit to set
184 * @addr: Address to count from
186 * This is the same as test_and_set_bit on x86.
188 static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr)
190 return test_and_set_bit(nr, addr);
194 * __test_and_set_bit - Set a bit and return its old value
195 * @nr: Bit to set
196 * @addr: Address to count from
198 * This operation is non-atomic and can be reordered.
199 * If two examples of this operation race, one can appear to succeed
200 * but actually fail. You must protect multiple accesses with a lock.
202 static inline int __test_and_set_bit(int nr, volatile unsigned long * addr)
204 int oldbit;
206 __asm__(
207 "btsl %2,%1\n\tsbbl %0,%0"
208 :"=r" (oldbit),"+m" (ADDR)
209 :"Ir" (nr));
210 return oldbit;
214 * test_and_clear_bit - Clear a bit and return its old value
215 * @nr: Bit to clear
216 * @addr: Address to count from
218 * This operation is atomic and cannot be reordered.
219 * It can be reorderdered on other architectures other than x86.
220 * It also implies a memory barrier.
222 static inline int test_and_clear_bit(int nr, volatile unsigned long * addr)
224 int oldbit;
226 __asm__ __volatile__( LOCK_PREFIX
227 "btrl %2,%1\n\tsbbl %0,%0"
228 :"=r" (oldbit),"+m" (ADDR)
229 :"Ir" (nr) : "memory");
230 return oldbit;
234 * __test_and_clear_bit - Clear a bit and return its old value
235 * @nr: Bit to clear
236 * @addr: Address to count from
238 * This operation is non-atomic and can be reordered.
239 * If two examples of this operation race, one can appear to succeed
240 * but actually fail. You must protect multiple accesses with a lock.
242 static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
244 int oldbit;
246 __asm__(
247 "btrl %2,%1\n\tsbbl %0,%0"
248 :"=r" (oldbit),"+m" (ADDR)
249 :"Ir" (nr));
250 return oldbit;
253 /* WARNING: non atomic and it can be reordered! */
254 static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
256 int oldbit;
258 __asm__ __volatile__(
259 "btcl %2,%1\n\tsbbl %0,%0"
260 :"=r" (oldbit),"+m" (ADDR)
261 :"Ir" (nr) : "memory");
262 return oldbit;
266 * test_and_change_bit - Change a bit and return its old value
267 * @nr: Bit to change
268 * @addr: Address to count from
270 * This operation is atomic and cannot be reordered.
271 * It also implies a memory barrier.
273 static inline int test_and_change_bit(int nr, volatile unsigned long* addr)
275 int oldbit;
277 __asm__ __volatile__( LOCK_PREFIX
278 "btcl %2,%1\n\tsbbl %0,%0"
279 :"=r" (oldbit),"+m" (ADDR)
280 :"Ir" (nr) : "memory");
281 return oldbit;
284 #if 0 /* Fool kernel-doc since it doesn't do macros yet */
286 * test_bit - Determine whether a bit is set
287 * @nr: bit number to test
288 * @addr: Address to start counting from
290 static int test_bit(int nr, const volatile void * addr);
291 #endif
293 static __always_inline int constant_test_bit(int nr, const volatile unsigned long *addr)
295 return ((1UL << (nr & 31)) & (addr[nr >> 5])) != 0;
298 static inline int variable_test_bit(int nr, const volatile unsigned long * addr)
300 int oldbit;
302 __asm__ __volatile__(
303 "btl %2,%1\n\tsbbl %0,%0"
304 :"=r" (oldbit)
305 :"m" (ADDR),"Ir" (nr));
306 return oldbit;
309 #define test_bit(nr,addr) \
310 (__builtin_constant_p(nr) ? \
311 constant_test_bit((nr),(addr)) : \
312 variable_test_bit((nr),(addr)))
314 #undef ADDR
317 * find_first_zero_bit - find the first zero bit in a memory region
318 * @addr: The address to start the search at
319 * @size: The maximum size to search
321 * Returns the bit-number of the first zero bit, not the number of the byte
322 * containing a bit.
324 static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
326 int d0, d1, d2;
327 int res;
329 if (!size)
330 return 0;
331 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
332 __asm__ __volatile__(
333 "movl $-1,%%eax\n\t"
334 "xorl %%edx,%%edx\n\t"
335 "repe; scasl\n\t"
336 "je 1f\n\t"
337 "xorl -4(%%edi),%%eax\n\t"
338 "subl $4,%%edi\n\t"
339 "bsfl %%eax,%%edx\n"
340 "1:\tsubl %%ebx,%%edi\n\t"
341 "shll $3,%%edi\n\t"
342 "addl %%edi,%%edx"
343 :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
344 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr) : "memory");
345 return res;
349 * find_next_zero_bit - find the first zero bit in a memory region
350 * @addr: The address to base the search on
351 * @offset: The bitnumber to start searching at
352 * @size: The maximum size to search
354 int find_next_zero_bit(const unsigned long *addr, int size, int offset);
357 * __ffs - find first bit in word.
358 * @word: The word to search
360 * Undefined if no bit exists, so code should check against 0 first.
362 static inline unsigned long __ffs(unsigned long word)
364 __asm__("bsfl %1,%0"
365 :"=r" (word)
366 :"rm" (word));
367 return word;
371 * find_first_bit - find the first set bit in a memory region
372 * @addr: The address to start the search at
373 * @size: The maximum size to search
375 * Returns the bit-number of the first set bit, not the number of the byte
376 * containing a bit.
378 static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
380 unsigned x = 0;
382 while (x < size) {
383 unsigned long val = *addr++;
384 if (val)
385 return __ffs(val) + x;
386 x += (sizeof(*addr)<<3);
388 return x;
392 * find_next_bit - find the first set bit in a memory region
393 * @addr: The address to base the search on
394 * @offset: The bitnumber to start searching at
395 * @size: The maximum size to search
397 int find_next_bit(const unsigned long *addr, int size, int offset);
400 * ffz - find first zero in word.
401 * @word: The word to search
403 * Undefined if no zero exists, so code should check against ~0UL first.
405 static inline unsigned long ffz(unsigned long word)
407 __asm__("bsfl %1,%0"
408 :"=r" (word)
409 :"r" (~word));
410 return word;
413 #ifdef __KERNEL__
415 #include <asm-generic/bitops/sched.h>
418 * ffs - find first bit set
419 * @x: the word to search
421 * This is defined the same way as
422 * the libc and compiler builtin ffs routines, therefore
423 * differs in spirit from the above ffz() (man ffs).
425 static inline int ffs(int x)
427 int r;
429 __asm__("bsfl %1,%0\n\t"
430 "jnz 1f\n\t"
431 "movl $-1,%0\n"
432 "1:" : "=r" (r) : "rm" (x));
433 return r+1;
437 * fls - find last bit set
438 * @x: the word to search
440 * This is defined the same way as ffs().
442 static inline int fls(int x)
444 int r;
446 __asm__("bsrl %1,%0\n\t"
447 "jnz 1f\n\t"
448 "movl $-1,%0\n"
449 "1:" : "=r" (r) : "rm" (x));
450 return r+1;
453 #include <asm-generic/bitops/hweight.h>
455 #endif /* __KERNEL__ */
457 #include <asm-generic/bitops/fls64.h>
459 #ifdef __KERNEL__
461 #include <asm-generic/bitops/ext2-non-atomic.h>
463 #define ext2_set_bit_atomic(lock,nr,addr) \
464 test_and_set_bit((nr),(unsigned long*)addr)
465 #define ext2_clear_bit_atomic(lock,nr, addr) \
466 test_and_clear_bit((nr),(unsigned long*)addr)
468 #include <asm-generic/bitops/minix.h>
470 #endif /* __KERNEL__ */
472 #endif /* _I386_BITOPS_H */