m68knommu: use generic find_next_bit_le()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / m68k / include / asm / bitops_no.h
blob72e85acdd7bd5f6f61f9a071d909a347ea2a5a56
1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <linux/compiler.h>
9 #include <asm/byteorder.h> /* swab32 */
11 #ifdef __KERNEL__
13 #ifndef _LINUX_BITOPS_H
14 #error only <linux/bitops.h> can be included directly
15 #endif
17 #if defined (__mcfisaaplus__) || defined (__mcfisac__)
18 static inline int ffs(unsigned int val)
20 if (!val)
21 return 0;
23 asm volatile(
24 "bitrev %0\n\t"
25 "ff1 %0\n\t"
26 : "=d" (val)
27 : "0" (val)
29 val++;
30 return val;
33 static inline int __ffs(unsigned int val)
35 asm volatile(
36 "bitrev %0\n\t"
37 "ff1 %0\n\t"
38 : "=d" (val)
39 : "0" (val)
41 return val;
44 #else
45 #include <asm-generic/bitops/ffs.h>
46 #include <asm-generic/bitops/__ffs.h>
47 #endif
49 #include <asm-generic/bitops/sched.h>
50 #include <asm-generic/bitops/ffz.h>
52 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
54 #ifdef CONFIG_COLDFIRE
55 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
56 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
57 : "d" (nr)
58 : "%a0", "cc");
59 #else
60 __asm__ __volatile__ ("bset %1,%0"
61 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
62 : "di" (nr)
63 : "cc");
64 #endif
67 #define __set_bit(nr, addr) set_bit(nr, addr)
70 * clear_bit() doesn't provide any barrier for the compiler.
72 #define smp_mb__before_clear_bit() barrier()
73 #define smp_mb__after_clear_bit() barrier()
75 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
77 #ifdef CONFIG_COLDFIRE
78 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
79 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
80 : "d" (nr)
81 : "%a0", "cc");
82 #else
83 __asm__ __volatile__ ("bclr %1,%0"
84 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
85 : "di" (nr)
86 : "cc");
87 #endif
90 #define __clear_bit(nr, addr) clear_bit(nr, addr)
92 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
94 #ifdef CONFIG_COLDFIRE
95 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
96 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
97 : "d" (nr)
98 : "%a0", "cc");
99 #else
100 __asm__ __volatile__ ("bchg %1,%0"
101 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
102 : "di" (nr)
103 : "cc");
104 #endif
107 #define __change_bit(nr, addr) change_bit(nr, addr)
109 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
111 char retval;
113 #ifdef CONFIG_COLDFIRE
114 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
115 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
116 : "d" (nr)
117 : "%a0");
118 #else
119 __asm__ __volatile__ ("bset %2,%1; sne %0"
120 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
121 : "di" (nr)
122 /* No clobber */);
123 #endif
125 return retval;
128 #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
130 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
132 char retval;
134 #ifdef CONFIG_COLDFIRE
135 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
136 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
137 : "d" (nr)
138 : "%a0");
139 #else
140 __asm__ __volatile__ ("bclr %2,%1; sne %0"
141 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
142 : "di" (nr)
143 /* No clobber */);
144 #endif
146 return retval;
149 #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
151 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
153 char retval;
155 #ifdef CONFIG_COLDFIRE
156 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
157 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
158 : "d" (nr)
159 : "%a0");
160 #else
161 __asm__ __volatile__ ("bchg %2,%1; sne %0"
162 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
163 : "di" (nr)
164 /* No clobber */);
165 #endif
167 return retval;
170 #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
173 * This routine doesn't need to be atomic.
175 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
177 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
180 static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
182 int * a = (int *) addr;
183 int mask;
185 a += nr >> 5;
186 mask = 1 << (nr & 0x1f);
187 return ((mask & *a) != 0);
190 #define test_bit(nr,addr) \
191 (__builtin_constant_p(nr) ? \
192 __constant_test_bit((nr),(addr)) : \
193 __test_bit((nr),(addr)))
195 #include <asm-generic/bitops/find.h>
196 #include <asm-generic/bitops/hweight.h>
197 #include <asm-generic/bitops/lock.h>
199 #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
201 static inline void __set_bit_le(int nr, void *addr)
203 __set_bit(nr ^ BITOP_LE_SWIZZLE, addr);
206 static inline void __clear_bit_le(int nr, void *addr)
208 __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr);
211 static inline int __test_and_set_bit_le(int nr, volatile void *addr)
213 char retval;
215 #ifdef CONFIG_COLDFIRE
216 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
217 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
218 : "d" (nr)
219 : "%a0");
220 #else
221 __asm__ __volatile__ ("bset %2,%1; sne %0"
222 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
223 : "di" (nr)
224 /* No clobber */);
225 #endif
227 return retval;
230 static inline int __test_and_clear_bit_le(int nr, volatile void *addr)
232 char retval;
234 #ifdef CONFIG_COLDFIRE
235 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
236 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
237 : "d" (nr)
238 : "%a0");
239 #else
240 __asm__ __volatile__ ("bclr %2,%1; sne %0"
241 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
242 : "di" (nr)
243 /* No clobber */);
244 #endif
246 return retval;
249 #include <asm-generic/bitops/ext2-atomic.h>
251 static inline int test_bit_le(int nr, const volatile void *addr)
253 char retval;
255 #ifdef CONFIG_COLDFIRE
256 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
257 : "=d" (retval)
258 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
259 : "%a0");
260 #else
261 __asm__ __volatile__ ("btst %2,%1; sne %0"
262 : "=d" (retval)
263 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
264 /* No clobber */);
265 #endif
267 return retval;
270 #define find_first_zero_bit_le(addr, size) \
271 find_next_zero_bit_le((addr), (size), 0)
273 static inline unsigned long find_next_zero_bit_le(void *addr, unsigned long size, unsigned long offset)
275 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
276 unsigned long result = offset & ~31UL;
277 unsigned long tmp;
279 if (offset >= size)
280 return size;
281 size -= result;
282 offset &= 31UL;
283 if(offset) {
284 /* We hold the little endian value in tmp, but then the
285 * shift is illegal. So we could keep a big endian value
286 * in tmp, like this:
288 * tmp = __swab32(*(p++));
289 * tmp |= ~0UL >> (32-offset);
291 * but this would decrease performance, so we change the
292 * shift:
294 tmp = *(p++);
295 tmp |= __swab32(~0UL >> (32-offset));
296 if(size < 32)
297 goto found_first;
298 if(~tmp)
299 goto found_middle;
300 size -= 32;
301 result += 32;
303 while(size & ~31UL) {
304 if(~(tmp = *(p++)))
305 goto found_middle;
306 result += 32;
307 size -= 32;
309 if(!size)
310 return result;
311 tmp = *p;
313 found_first:
314 /* tmp is little endian, so we would have to swab the shift,
315 * see above. But then we have to swab tmp below for ffz, so
316 * we might as well do this here.
318 return result + ffz(__swab32(tmp) | (~0UL << size));
319 found_middle:
320 return result + ffz(__swab32(tmp));
322 #define find_next_zero_bit_le find_next_zero_bit_le
324 extern unsigned long find_next_bit_le(const void *addr,
325 unsigned long size, unsigned long offset);
327 #endif /* __KERNEL__ */
329 #include <asm-generic/bitops/fls.h>
330 #include <asm-generic/bitops/__fls.h>
331 #include <asm-generic/bitops/fls64.h>
333 #endif /* _M68KNOMMU_BITOPS_H */