- Linus: drop support for old-style Makefiles entirely. Big.
[davej-history.git] / include / asm-i386 / bitops.h
blob444f247c323197353632f462e1e253543d5a8d4c
1 #ifndef _I386_BITOPS_H
2 #define _I386_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <linux/config.h>
11 * These have to be done with inline assembly: that way the bit-setting
12 * is guaranteed to be atomic. All bit operations return 0 if the bit
13 * was cleared before the operation and != 0 if it was not.
15 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
18 #ifdef CONFIG_SMP
19 #define LOCK_PREFIX "lock ; "
20 #else
21 #define LOCK_PREFIX ""
22 #endif
24 #define ADDR (*(volatile long *) addr)
26 static __inline__ void set_bit(int nr, volatile void * addr)
28 __asm__ __volatile__( LOCK_PREFIX
29 "btsl %1,%0"
30 :"=m" (ADDR)
31 :"Ir" (nr));
34 /* WARNING: non atomic and it can be reordered! */
35 static __inline__ void __set_bit(int nr, volatile void * addr)
37 __asm__(
38 "btsl %1,%0"
39 :"=m" (ADDR)
40 :"Ir" (nr));
44 * clear_bit() doesn't provide any barrier for the compiler.
46 #define smp_mb__before_clear_bit() barrier()
47 #define smp_mb__after_clear_bit() barrier()
48 static __inline__ void clear_bit(int nr, volatile void * addr)
50 __asm__ __volatile__( LOCK_PREFIX
51 "btrl %1,%0"
52 :"=m" (ADDR)
53 :"Ir" (nr));
56 static __inline__ void change_bit(int nr, volatile void * addr)
58 __asm__ __volatile__( LOCK_PREFIX
59 "btcl %1,%0"
60 :"=m" (ADDR)
61 :"Ir" (nr));
65 * It will also imply a memory barrier, thus it must clobber memory
66 * to make sure to reload anything that was cached into registers
67 * outside _this_ critical section.
69 static __inline__ int test_and_set_bit(int nr, volatile void * addr)
71 int oldbit;
73 __asm__ __volatile__( LOCK_PREFIX
74 "btsl %2,%1\n\tsbbl %0,%0"
75 :"=r" (oldbit),"=m" (ADDR)
76 :"Ir" (nr) : "memory");
77 return oldbit;
80 /* WARNING: non atomic and it can be reordered! */
81 static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
83 int oldbit;
85 __asm__(
86 "btsl %2,%1\n\tsbbl %0,%0"
87 :"=r" (oldbit),"=m" (ADDR)
88 :"Ir" (nr));
89 return oldbit;
92 static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
94 int oldbit;
96 __asm__ __volatile__( LOCK_PREFIX
97 "btrl %2,%1\n\tsbbl %0,%0"
98 :"=r" (oldbit),"=m" (ADDR)
99 :"Ir" (nr) : "memory");
100 return oldbit;
103 /* WARNING: non atomic and it can be reordered! */
104 static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
106 int oldbit;
108 __asm__(
109 "btrl %2,%1\n\tsbbl %0,%0"
110 :"=r" (oldbit),"=m" (ADDR)
111 :"Ir" (nr));
112 return oldbit;
115 static __inline__ int test_and_change_bit(int nr, volatile void * addr)
117 int oldbit;
119 __asm__ __volatile__( LOCK_PREFIX
120 "btcl %2,%1\n\tsbbl %0,%0"
121 :"=r" (oldbit),"=m" (ADDR)
122 :"Ir" (nr) : "memory");
123 return oldbit;
127 * This routine doesn't need to be atomic.
129 static __inline__ int constant_test_bit(int nr, const volatile void * addr)
131 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
134 static __inline__ int variable_test_bit(int nr, volatile void * addr)
136 int oldbit;
138 __asm__ __volatile__(
139 "btl %2,%1\n\tsbbl %0,%0"
140 :"=r" (oldbit)
141 :"m" (ADDR),"Ir" (nr));
142 return oldbit;
145 #define test_bit(nr,addr) \
146 (__builtin_constant_p(nr) ? \
147 constant_test_bit((nr),(addr)) : \
148 variable_test_bit((nr),(addr)))
151 * Find-bit routines..
153 static __inline__ int find_first_zero_bit(void * addr, unsigned size)
155 int d0, d1, d2;
156 int res;
158 if (!size)
159 return 0;
160 /* This looks at memory. Mark it volatile to tell gcc not to move it around */
161 __asm__ __volatile__(
162 "movl $-1,%%eax\n\t"
163 "xorl %%edx,%%edx\n\t"
164 "repe; scasl\n\t"
165 "je 1f\n\t"
166 "xorl -4(%%edi),%%eax\n\t"
167 "subl $4,%%edi\n\t"
168 "bsfl %%eax,%%edx\n"
169 "1:\tsubl %%ebx,%%edi\n\t"
170 "shll $3,%%edi\n\t"
171 "addl %%edi,%%edx"
172 :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
173 :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
174 return res;
177 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
179 unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
180 int set = 0, bit = offset & 31, res;
182 if (bit) {
184 * Look for zero in first byte
186 __asm__("bsfl %1,%0\n\t"
187 "jne 1f\n\t"
188 "movl $32, %0\n"
189 "1:"
190 : "=r" (set)
191 : "r" (~(*p >> bit)));
192 if (set < (32 - bit))
193 return set + offset;
194 set = 32 - bit;
195 p++;
198 * No zero yet, search remaining full bytes for a zero
200 res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
201 return (offset + set + res);
205 * ffz = Find First Zero in word. Undefined if no zero exists,
206 * so code should check against ~0UL first..
208 static __inline__ unsigned long ffz(unsigned long word)
210 __asm__("bsfl %1,%0"
211 :"=r" (word)
212 :"r" (~word));
213 return word;
216 #ifdef __KERNEL__
219 * ffs: find first bit set. This is defined the same way as
220 * the libc and compiler builtin ffs routines, therefore
221 * differs in spirit from the above ffz (man ffs).
224 static __inline__ int ffs(int x)
226 int r;
228 __asm__("bsfl %1,%0\n\t"
229 "jnz 1f\n\t"
230 "movl $-1,%0\n"
231 "1:" : "=r" (r) : "g" (x));
232 return r+1;
236 * hweightN: returns the hamming weight (i.e. the number
237 * of bits set) of a N-bit word
240 #define hweight32(x) generic_hweight32(x)
241 #define hweight16(x) generic_hweight16(x)
242 #define hweight8(x) generic_hweight8(x)
244 #endif /* __KERNEL__ */
246 #ifdef __KERNEL__
248 #define ext2_set_bit __test_and_set_bit
249 #define ext2_clear_bit __test_and_clear_bit
250 #define ext2_test_bit test_bit
251 #define ext2_find_first_zero_bit find_first_zero_bit
252 #define ext2_find_next_zero_bit find_next_zero_bit
254 /* Bitmap functions for the minix filesystem. */
255 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
256 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
257 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
258 #define minix_test_bit(nr,addr) test_bit(nr,addr)
259 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
261 #endif /* __KERNEL__ */
263 #endif /* _I386_BITOPS_H */