Merge with Linux 2.5.74.
[linux-2.6/linux-mips.git] / include / asm-m68knommu / bitops.h
blob18422f15af11cd7e195363501f695eec82b012a3
1 #ifndef _M68KNOMMU_BITOPS_H
2 #define _M68KNOMMU_BITOPS_H
4 /*
5 * Copyright 1992, Linus Torvalds.
6 */
8 #include <linux/config.h>
9 #include <linux/compiler.h>
10 #include <asm/byteorder.h> /* swab32 */
11 #include <asm/system.h> /* save_flags */
13 #ifdef __KERNEL__
16 * Generic ffs().
18 static inline int ffs(int x)
20 int r = 1;
22 if (!x)
23 return 0;
24 if (!(x & 0xffff)) {
25 x >>= 16;
26 r += 16;
28 if (!(x & 0xff)) {
29 x >>= 8;
30 r += 8;
32 if (!(x & 0xf)) {
33 x >>= 4;
34 r += 4;
36 if (!(x & 3)) {
37 x >>= 2;
38 r += 2;
40 if (!(x & 1)) {
41 x >>= 1;
42 r += 1;
44 return r;
48 * Generic __ffs().
50 static inline int __ffs(int x)
52 int r = 0;
54 if (!x)
55 return 0;
56 if (!(x & 0xffff)) {
57 x >>= 16;
58 r += 16;
60 if (!(x & 0xff)) {
61 x >>= 8;
62 r += 8;
64 if (!(x & 0xf)) {
65 x >>= 4;
66 r += 4;
68 if (!(x & 3)) {
69 x >>= 2;
70 r += 2;
72 if (!(x & 1)) {
73 x >>= 1;
74 r += 1;
76 return r;
80 * fls: find last bit set.
82 #define fls(x) generic_fls(x)
86 * Every architecture must define this function. It's the fastest
87 * way of searching a 140-bit bitmap where the first 100 bits are
88 * unlikely to be set. It's guaranteed that at least one of the 140
89 * bits is cleared.
91 static inline int sched_find_first_bit(unsigned long *b)
93 if (unlikely(b[0]))
94 return __ffs(b[0]);
95 if (unlikely(b[1]))
96 return __ffs(b[1]) + 32;
97 if (unlikely(b[2]))
98 return __ffs(b[2]) + 64;
99 if (b[3])
100 return __ffs(b[3]) + 96;
101 return __ffs(b[4]) + 128;
105 * ffz = Find First Zero in word. Undefined if no zero exists,
106 * so code should check against ~0UL first..
108 static __inline__ unsigned long ffz(unsigned long word)
110 unsigned long result = 0;
112 while(word & 1) {
113 result++;
114 word >>= 1;
116 return result;
120 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
122 int * a = (int *) addr;
123 int mask;
124 unsigned long flags;
126 a += nr >> 5;
127 mask = 1 << (nr & 0x1f);
128 local_irq_save(flags);
129 *a |= mask;
130 local_irq_restore(flags);
133 static __inline__ void __set_bit(int nr, volatile unsigned long * addr)
135 int * a = (int *) addr;
136 int mask;
138 a += nr >> 5;
139 mask = 1 << (nr & 0x1f);
140 *a |= mask;
144 * clear_bit() doesn't provide any barrier for the compiler.
146 #define smp_mb__before_clear_bit() barrier()
147 #define smp_mb__after_clear_bit() barrier()
149 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
151 int * a = (int *) addr;
152 int mask;
153 unsigned long flags;
155 a += nr >> 5;
156 mask = 1 << (nr & 0x1f);
157 local_irq_save(flags);
158 *a &= ~mask;
159 local_irq_restore(flags);
162 static __inline__ void __clear_bit(int nr, volatile unsigned long * addr)
164 int * a = (int *) addr;
165 int mask;
167 a += nr >> 5;
168 mask = 1 << (nr & 0x1f);
169 *a &= ~mask;
172 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
174 int mask, flags;
175 unsigned long *ADDR = (unsigned long *) addr;
177 ADDR += nr >> 5;
178 mask = 1 << (nr & 31);
179 local_irq_save(flags);
180 *ADDR ^= mask;
181 local_irq_restore(flags);
184 static __inline__ void __change_bit(int nr, volatile unsigned long * addr)
186 int mask;
187 unsigned long *ADDR = (unsigned long *) addr;
189 ADDR += nr >> 5;
190 mask = 1 << (nr & 31);
191 *ADDR ^= mask;
194 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
196 int mask, retval;
197 volatile unsigned int *a = (volatile unsigned int *) addr;
198 unsigned long flags;
200 a += nr >> 5;
201 mask = 1 << (nr & 0x1f);
202 local_irq_save(flags);
203 retval = (mask & *a) != 0;
204 *a |= mask;
205 local_irq_restore(flags);
207 return retval;
210 static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * addr)
212 int mask, retval;
213 volatile unsigned int *a = (volatile unsigned int *) addr;
215 a += nr >> 5;
216 mask = 1 << (nr & 0x1f);
217 retval = (mask & *a) != 0;
218 *a |= mask;
219 return retval;
222 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
224 int mask, retval;
225 volatile unsigned int *a = (volatile unsigned int *) addr;
226 unsigned long flags;
228 a += nr >> 5;
229 mask = 1 << (nr & 0x1f);
230 local_irq_save(flags);
231 retval = (mask & *a) != 0;
232 *a &= ~mask;
233 local_irq_restore(flags);
235 return retval;
238 static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * addr)
240 int mask, retval;
241 volatile unsigned int *a = (volatile unsigned int *) addr;
243 a += nr >> 5;
244 mask = 1 << (nr & 0x1f);
245 retval = (mask & *a) != 0;
246 *a &= ~mask;
247 return retval;
250 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
252 int mask, retval;
253 volatile unsigned int *a = (volatile unsigned int *) addr;
254 unsigned long flags;
256 a += nr >> 5;
257 mask = 1 << (nr & 0x1f);
258 local_irq_save(flags);
259 retval = (mask & *a) != 0;
260 *a ^= mask;
261 local_irq_restore(flags);
263 return retval;
266 static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * addr)
268 int mask, retval;
269 volatile unsigned int *a = (volatile unsigned int *) addr;
271 a += nr >> 5;
272 mask = 1 << (nr & 0x1f);
273 retval = (mask & *a) != 0;
274 *a ^= mask;
275 return retval;
279 * This routine doesn't need to be atomic.
281 static __inline__ int __constant_test_bit(int nr, const volatile unsigned long * addr)
283 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
286 static __inline__ int __test_bit(int nr, const unsigned long * addr)
288 int * a = (int *) addr;
289 int mask;
291 a += nr >> 5;
292 mask = 1 << (nr & 0x1f);
293 return ((mask & *a) != 0);
296 #define test_bit(nr,addr) \
297 (__builtin_constant_p(nr) ? \
298 __constant_test_bit((nr),(addr)) : \
299 __test_bit((nr),(addr)))
301 #define find_first_zero_bit(addr, size) \
302 find_next_zero_bit((addr), (size), 0)
304 static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
306 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
307 unsigned long result = offset & ~31UL;
308 unsigned long tmp;
310 if (offset >= size)
311 return size;
312 size -= result;
313 offset &= 31UL;
314 if (offset) {
315 tmp = *(p++);
316 tmp |= ~0UL >> (32-offset);
317 if (size < 32)
318 goto found_first;
319 if (~tmp)
320 goto found_middle;
321 size -= 32;
322 result += 32;
324 while (size & ~31UL) {
325 if (~(tmp = *(p++)))
326 goto found_middle;
327 result += 32;
328 size -= 32;
330 if (!size)
331 return result;
332 tmp = *p;
334 found_first:
335 tmp |= ~0UL >> size;
336 found_middle:
337 return result + ffz(tmp);
341 * hweightN: returns the hamming weight (i.e. the number
342 * of bits set) of a N-bit word
345 #define hweight32(x) generic_hweight32(x)
346 #define hweight16(x) generic_hweight16(x)
347 #define hweight8(x) generic_hweight8(x)
350 static __inline__ int ext2_set_bit(int nr, volatile void * addr)
352 int mask, retval;
353 unsigned long flags;
354 volatile unsigned char *ADDR = (unsigned char *) addr;
356 ADDR += nr >> 3;
357 mask = 1 << (nr & 0x07);
358 local_irq_save(flags);
359 retval = (mask & *ADDR) != 0;
360 *ADDR |= mask;
361 local_irq_restore(flags);
362 return retval;
365 static __inline__ int ext2_clear_bit(int nr, volatile void * addr)
367 int mask, retval;
368 unsigned long flags;
369 volatile unsigned char *ADDR = (unsigned char *) addr;
371 ADDR += nr >> 3;
372 mask = 1 << (nr & 0x07);
373 local_irq_save(flags);
374 retval = (mask & *ADDR) != 0;
375 *ADDR &= ~mask;
376 local_irq_restore(flags);
377 return retval;
380 #define ext2_set_bit_atomic(lock, nr, addr) \
381 ({ \
382 int ret; \
383 spin_lock(lock); \
384 ret = ext2_set_bit((nr), (addr)); \
385 spin_unlock(lock); \
386 ret; \
389 #define ext2_clear_bit_atomic(lock, nr, addr) \
390 ({ \
391 int ret; \
392 spin_lock(lock); \
393 ret = ext2_clear_bit((nr), (addr)); \
394 spin_unlock(lock); \
395 ret; \
398 static __inline__ int ext2_test_bit(int nr, const volatile void * addr)
400 int mask;
401 const volatile unsigned char *ADDR = (const unsigned char *) addr;
403 ADDR += nr >> 3;
404 mask = 1 << (nr & 0x07);
405 return ((mask & *ADDR) != 0);
408 #define ext2_find_first_zero_bit(addr, size) \
409 ext2_find_next_zero_bit((addr), (size), 0)
411 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
413 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
414 unsigned long result = offset & ~31UL;
415 unsigned long tmp;
417 if (offset >= size)
418 return size;
419 size -= result;
420 offset &= 31UL;
421 if(offset) {
422 /* We hold the little endian value in tmp, but then the
423 * shift is illegal. So we could keep a big endian value
424 * in tmp, like this:
426 * tmp = __swab32(*(p++));
427 * tmp |= ~0UL >> (32-offset);
429 * but this would decrease preformance, so we change the
430 * shift:
432 tmp = *(p++);
433 tmp |= __swab32(~0UL >> (32-offset));
434 if(size < 32)
435 goto found_first;
436 if(~tmp)
437 goto found_middle;
438 size -= 32;
439 result += 32;
441 while(size & ~31UL) {
442 if(~(tmp = *(p++)))
443 goto found_middle;
444 result += 32;
445 size -= 32;
447 if(!size)
448 return result;
449 tmp = *p;
451 found_first:
452 /* tmp is little endian, so we would have to swab the shift,
453 * see above. But then we have to swab tmp below for ffz, so
454 * we might as well do this here.
456 return result + ffz(__swab32(tmp) | (~0UL << size));
457 found_middle:
458 return result + ffz(__swab32(tmp));
461 /* Bitmap functions for the minix filesystem. */
462 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
463 #define minix_set_bit(nr,addr) set_bit(nr,addr)
464 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
465 #define minix_test_bit(nr,addr) test_bit(nr,addr)
466 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
469 * hweightN - returns the hamming weight of a N-bit word
470 * @x: the word to weigh
472 * The Hamming Weight of a number is the total number of bits set in it.
475 #define hweight32(x) generic_hweight32(x)
476 #define hweight16(x) generic_hweight16(x)
477 #define hweight8(x) generic_hweight8(x)
479 #endif /* __KERNEL__ */
481 #endif /* _M68KNOMMU_BITOPS_H */