Pull vector-domain into release branch
[linux-2.6/mini2440.git] / include / asm-m68knommu / bitops.h
blob7d6075d9b5cb8998faa785d68a8392dd2115ab00
1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <linux/compiler.h>
9 #include <asm/byteorder.h> /* swab32 */
11 #ifdef __KERNEL__
13 #include <asm-generic/bitops/ffs.h>
14 #include <asm-generic/bitops/__ffs.h>
15 #include <asm-generic/bitops/sched.h>
16 #include <asm-generic/bitops/ffz.h>
18 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
20 #ifdef CONFIG_COLDFIRE
21 __asm__ __volatile__ ("lea %0,%%a0; bset %1,(%%a0)"
22 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
23 : "d" (nr)
24 : "%a0", "cc");
25 #else
26 __asm__ __volatile__ ("bset %1,%0"
27 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
28 : "di" (nr)
29 : "cc");
30 #endif
33 #define __set_bit(nr, addr) set_bit(nr, addr)
36 * clear_bit() doesn't provide any barrier for the compiler.
38 #define smp_mb__before_clear_bit() barrier()
39 #define smp_mb__after_clear_bit() barrier()
41 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
43 #ifdef CONFIG_COLDFIRE
44 __asm__ __volatile__ ("lea %0,%%a0; bclr %1,(%%a0)"
45 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
46 : "d" (nr)
47 : "%a0", "cc");
48 #else
49 __asm__ __volatile__ ("bclr %1,%0"
50 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
51 : "di" (nr)
52 : "cc");
53 #endif
56 #define __clear_bit(nr, addr) clear_bit(nr, addr)
58 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
60 #ifdef CONFIG_COLDFIRE
61 __asm__ __volatile__ ("lea %0,%%a0; bchg %1,(%%a0)"
62 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
63 : "d" (nr)
64 : "%a0", "cc");
65 #else
66 __asm__ __volatile__ ("bchg %1,%0"
67 : "+m" (((volatile char *)addr)[(nr^31) >> 3])
68 : "di" (nr)
69 : "cc");
70 #endif
73 #define __change_bit(nr, addr) change_bit(nr, addr)
75 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
77 char retval;
79 #ifdef CONFIG_COLDFIRE
80 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
81 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
82 : "d" (nr)
83 : "%a0");
84 #else
85 __asm__ __volatile__ ("bset %2,%1; sne %0"
86 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
87 : "di" (nr)
88 /* No clobber */);
89 #endif
91 return retval;
94 #define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
96 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
98 char retval;
100 #ifdef CONFIG_COLDFIRE
101 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
102 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
103 : "d" (nr)
104 : "%a0");
105 #else
106 __asm__ __volatile__ ("bclr %2,%1; sne %0"
107 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
108 : "di" (nr)
109 /* No clobber */);
110 #endif
112 return retval;
115 #define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
117 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
119 char retval;
121 #ifdef CONFIG_COLDFIRE
122 __asm__ __volatile__ ("lea %1,%%a0\n\tbchg %2,(%%a0)\n\tsne %0"
123 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
124 : "d" (nr)
125 : "%a0");
126 #else
127 __asm__ __volatile__ ("bchg %2,%1; sne %0"
128 : "=d" (retval), "+m" (((volatile char *)addr)[(nr^31) >> 3])
129 : "di" (nr)
130 /* No clobber */);
131 #endif
133 return retval;
136 #define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
139 * This routine doesn't need to be atomic.
141 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
143 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
146 static __inline__ int __test_bit(int nr, const volatile unsigned long * addr)
148 int * a = (int *) addr;
149 int mask;
151 a += nr >> 5;
152 mask = 1 << (nr & 0x1f);
153 return ((mask & *a) != 0);
156 #define test_bit(nr,addr) \
157 (__builtin_constant_p(nr) ? \
158 __constant_test_bit((nr),(addr)) : \
159 __test_bit((nr),(addr)))
161 #include <asm-generic/bitops/find.h>
162 #include <asm-generic/bitops/hweight.h>
164 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
166 char retval;
168 #ifdef CONFIG_COLDFIRE
169 __asm__ __volatile__ ("lea %1,%%a0; bset %2,(%%a0); sne %0"
170 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
171 : "d" (nr)
172 : "%a0");
173 #else
174 __asm__ __volatile__ ("bset %2,%1; sne %0"
175 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
176 : "di" (nr)
177 /* No clobber */);
178 #endif
180 return retval;
183 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
185 char retval;
187 #ifdef CONFIG_COLDFIRE
188 __asm__ __volatile__ ("lea %1,%%a0; bclr %2,(%%a0); sne %0"
189 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
190 : "d" (nr)
191 : "%a0");
192 #else
193 __asm__ __volatile__ ("bclr %2,%1; sne %0"
194 : "=d" (retval), "+m" (((volatile char *)addr)[nr >> 3])
195 : "di" (nr)
196 /* No clobber */);
197 #endif
199 return retval;
202 #define ext2_set_bit_atomic(lock, nr, addr) \
203 ({ \
204 int ret; \
205 spin_lock(lock); \
206 ret = ext2_set_bit((nr), (addr)); \
207 spin_unlock(lock); \
208 ret; \
211 #define ext2_clear_bit_atomic(lock, nr, addr) \
212 ({ \
213 int ret; \
214 spin_lock(lock); \
215 ret = ext2_clear_bit((nr), (addr)); \
216 spin_unlock(lock); \
217 ret; \
220 static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
222 char retval;
224 #ifdef CONFIG_COLDFIRE
225 __asm__ __volatile__ ("lea %1,%%a0; btst %2,(%%a0); sne %0"
226 : "=d" (retval)
227 : "m" (((const volatile char *)addr)[nr >> 3]), "d" (nr)
228 : "%a0");
229 #else
230 __asm__ __volatile__ ("btst %2,%1; sne %0"
231 : "=d" (retval)
232 : "m" (((const volatile char *)addr)[nr >> 3]), "di" (nr)
233 /* No clobber */);
234 #endif
236 return retval;
239 #define ext2_find_first_zero_bit(addr, size) \
240 ext2_find_next_zero_bit((addr), (size), 0)
242 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
244 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
245 unsigned long result = offset & ~31UL;
246 unsigned long tmp;
248 if (offset >= size)
249 return size;
250 size -= result;
251 offset &= 31UL;
252 if(offset) {
253 /* We hold the little endian value in tmp, but then the
254 * shift is illegal. So we could keep a big endian value
255 * in tmp, like this:
257 * tmp = __swab32(*(p++));
258 * tmp |= ~0UL >> (32-offset);
260 * but this would decrease preformance, so we change the
261 * shift:
263 tmp = *(p++);
264 tmp |= __swab32(~0UL >> (32-offset));
265 if(size < 32)
266 goto found_first;
267 if(~tmp)
268 goto found_middle;
269 size -= 32;
270 result += 32;
272 while(size & ~31UL) {
273 if(~(tmp = *(p++)))
274 goto found_middle;
275 result += 32;
276 size -= 32;
278 if(!size)
279 return result;
280 tmp = *p;
282 found_first:
283 /* tmp is little endian, so we would have to swab the shift,
284 * see above. But then we have to swab tmp below for ffz, so
285 * we might as well do this here.
287 return result + ffz(__swab32(tmp) | (~0UL << size));
288 found_middle:
289 return result + ffz(__swab32(tmp));
292 #include <asm-generic/bitops/minix.h>
294 #endif /* __KERNEL__ */
296 #include <asm-generic/bitops/fls.h>
297 #include <asm-generic/bitops/fls64.h>
299 #endif /* _M68KNOMMU_BITOPS_H */