[PARISC] Update bitops from parisc tree
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-parisc / bitops.h
blob55b98c67fd82d60ef49fe64e7892f335082fc4c1
1 #ifndef _PARISC_BITOPS_H
2 #define _PARISC_BITOPS_H
4 #include <linux/compiler.h>
5 #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
6 #include <asm/byteorder.h>
7 #include <asm/atomic.h>
9 /*
10 * HP-PARISC specific bit operations
11 * for a detailed description of the functions please refer
12 * to include/asm-i386/bitops.h or kerneldoc
15 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
18 #define smp_mb__before_clear_bit() smp_mb()
19 #define smp_mb__after_clear_bit() smp_mb()
21 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
22 * on use of volatile and __*_bit() (set/clear/change):
23 * *_bit() want use of volatile.
24 * __*_bit() are "relaxed" and don't use spinlock or volatile.
27 static __inline__ void set_bit(int nr, volatile unsigned long * addr)
29 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
30 unsigned long flags;
32 addr += (nr >> SHIFT_PER_LONG);
33 _atomic_spin_lock_irqsave(addr, flags);
34 *addr |= mask;
35 _atomic_spin_unlock_irqrestore(addr, flags);
38 static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr)
40 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
42 *m |= 1UL << CHOP_SHIFTCOUNT(nr);
45 static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
47 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
48 unsigned long flags;
50 addr += (nr >> SHIFT_PER_LONG);
51 _atomic_spin_lock_irqsave(addr, flags);
52 *addr &= mask;
53 _atomic_spin_unlock_irqrestore(addr, flags);
56 static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr)
58 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
60 *m &= ~(1UL << CHOP_SHIFTCOUNT(nr));
63 static __inline__ void change_bit(int nr, volatile unsigned long * addr)
65 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
66 unsigned long flags;
68 addr += (nr >> SHIFT_PER_LONG);
69 _atomic_spin_lock_irqsave(addr, flags);
70 *addr ^= mask;
71 _atomic_spin_unlock_irqrestore(addr, flags);
74 static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr)
76 unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG);
78 *m ^= 1UL << CHOP_SHIFTCOUNT(nr);
81 static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
83 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
84 unsigned long oldbit;
85 unsigned long flags;
87 addr += (nr >> SHIFT_PER_LONG);
88 _atomic_spin_lock_irqsave(addr, flags);
89 oldbit = *addr;
90 *addr = oldbit | mask;
91 _atomic_spin_unlock_irqrestore(addr, flags);
93 return (oldbit & mask) ? 1 : 0;
96 static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address)
98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
99 unsigned long oldbit;
100 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
102 oldbit = *addr;
103 *addr = oldbit | mask;
105 return (oldbit & mask) ? 1 : 0;
108 static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
110 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
111 unsigned long oldbit;
112 unsigned long flags;
114 addr += (nr >> SHIFT_PER_LONG);
115 _atomic_spin_lock_irqsave(addr, flags);
116 oldbit = *addr;
117 *addr = oldbit & ~mask;
118 _atomic_spin_unlock_irqrestore(addr, flags);
120 return (oldbit & mask) ? 1 : 0;
123 static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address)
125 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
126 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
127 unsigned long oldbit;
129 oldbit = *addr;
130 *addr = oldbit & ~mask;
132 return (oldbit & mask) ? 1 : 0;
135 static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
137 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
138 unsigned long oldbit;
139 unsigned long flags;
141 addr += (nr >> SHIFT_PER_LONG);
142 _atomic_spin_lock_irqsave(addr, flags);
143 oldbit = *addr;
144 *addr = oldbit ^ mask;
145 _atomic_spin_unlock_irqrestore(addr, flags);
147 return (oldbit & mask) ? 1 : 0;
150 static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address)
152 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
153 unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG);
154 unsigned long oldbit;
156 oldbit = *addr;
157 *addr = oldbit ^ mask;
159 return (oldbit & mask) ? 1 : 0;
162 static __inline__ int test_bit(int nr, const volatile unsigned long *address)
164 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
165 const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG);
167 return !!(*addr & mask);
170 #ifdef __KERNEL__
173 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
174 * @word: The word to search
176 * __ffs() return is undefined if no bit is set.
178 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
179 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
180 * (with help from willy/jejb to get the semantics right)
182 * This algorithm avoids branches by making use of nullification.
183 * One side effect of "extr" instructions is it sets PSW[N] bit.
184 * How PSW[N] (nullify next insn) gets set is determined by the
185 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
186 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
187 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
188 * cycles for each mispredicted branch.
191 static __inline__ unsigned long __ffs(unsigned long x)
193 unsigned long ret;
195 __asm__(
196 #ifdef __LP64__
197 " ldi 63,%1\n"
198 " extrd,u,*<> %0,63,32,%%r0\n"
199 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
200 " addi -32,%1,%1\n"
201 #else
202 " ldi 31,%1\n"
203 #endif
204 " extru,<> %0,31,16,%%r0\n"
205 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
206 " addi -16,%1,%1\n"
207 " extru,<> %0,31,8,%%r0\n"
208 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
209 " addi -8,%1,%1\n"
210 " extru,<> %0,31,4,%%r0\n"
211 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
212 " addi -4,%1,%1\n"
213 " extru,<> %0,31,2,%%r0\n"
214 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
215 " addi -2,%1,%1\n"
216 " extru,= %0,31,1,%%r0\n" /* check last bit */
217 " addi -1,%1,%1\n"
218 : "+r" (x), "=r" (ret) );
219 return ret;
222 /* Undefined if no bit is zero. */
223 #define ffz(x) __ffs(~x)
226 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
227 * This is defined the same way as the libc and compiler builtin
228 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
230 static __inline__ int ffs(int x)
232 return x ? (__ffs((unsigned long)x) + 1) : 0;
236 * fls: find last (most significant) bit set.
237 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
240 static __inline__ int fls(int x)
242 int ret;
243 if (!x)
244 return 0;
246 __asm__(
247 " ldi 1,%1\n"
248 " extru,<> %0,15,16,%%r0\n"
249 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
250 " addi 16,%1,%1\n"
251 " extru,<> %0,7,8,%%r0\n"
252 " zdep,TR %0,23,24,%0\n" /* xx000000 */
253 " addi 8,%1,%1\n"
254 " extru,<> %0,3,4,%%r0\n"
255 " zdep,TR %0,27,28,%0\n" /* x0000000 */
256 " addi 4,%1,%1\n"
257 " extru,<> %0,1,2,%%r0\n"
258 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
259 " addi 2,%1,%1\n"
260 " extru,= %0,0,1,%%r0\n"
261 " addi 1,%1,%1\n" /* if y & 8, add 1 */
262 : "+r" (x), "=r" (ret) );
264 return ret;
268 * hweightN: returns the hamming weight (i.e. the number
269 * of bits set) of a N-bit word
271 #define hweight64(x) generic_hweight64(x)
272 #define hweight32(x) generic_hweight32(x)
273 #define hweight16(x) generic_hweight16(x)
274 #define hweight8(x) generic_hweight8(x)
277 * Every architecture must define this function. It's the fastest
278 * way of searching a 140-bit bitmap where the first 100 bits are
279 * unlikely to be set. It's guaranteed that at least one of the 140
280 * bits is cleared.
282 static inline int sched_find_first_bit(const unsigned long *b)
284 #ifdef __LP64__
285 if (unlikely(b[0]))
286 return __ffs(b[0]);
287 if (unlikely(b[1]))
288 return __ffs(b[1]) + 64;
289 return __ffs(b[2]) + 128;
290 #else
291 if (unlikely(b[0]))
292 return __ffs(b[0]);
293 if (unlikely(b[1]))
294 return __ffs(b[1]) + 32;
295 if (unlikely(b[2]))
296 return __ffs(b[2]) + 64;
297 if (b[3])
298 return __ffs(b[3]) + 96;
299 return __ffs(b[4]) + 128;
300 #endif
303 #endif /* __KERNEL__ */
306 * This implementation of find_{first,next}_zero_bit was stolen from
307 * Linus' asm-alpha/bitops.h.
309 #define find_first_zero_bit(addr, size) \
310 find_next_zero_bit((addr), (size), 0)
312 static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset)
314 const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG);
315 unsigned long result = offset & ~(BITS_PER_LONG-1);
316 unsigned long tmp;
318 if (offset >= size)
319 return size;
320 size -= result;
321 offset &= (BITS_PER_LONG-1);
322 if (offset) {
323 tmp = *(p++);
324 tmp |= ~0UL >> (BITS_PER_LONG-offset);
325 if (size < BITS_PER_LONG)
326 goto found_first;
327 if (~tmp)
328 goto found_middle;
329 size -= BITS_PER_LONG;
330 result += BITS_PER_LONG;
332 while (size & ~(BITS_PER_LONG -1)) {
333 if (~(tmp = *(p++)))
334 goto found_middle;
335 result += BITS_PER_LONG;
336 size -= BITS_PER_LONG;
338 if (!size)
339 return result;
340 tmp = *p;
341 found_first:
342 tmp |= ~0UL << size;
343 found_middle:
344 return result + ffz(tmp);
347 static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
349 const unsigned long *p = addr + (offset >> SHIFT_PER_LONG);
350 unsigned long result = offset & ~(BITS_PER_LONG-1);
351 unsigned long tmp;
353 if (offset >= size)
354 return size;
355 size -= result;
356 offset &= (BITS_PER_LONG-1);
357 if (offset) {
358 tmp = *(p++);
359 tmp &= (~0UL << offset);
360 if (size < BITS_PER_LONG)
361 goto found_first;
362 if (tmp)
363 goto found_middle;
364 size -= BITS_PER_LONG;
365 result += BITS_PER_LONG;
367 while (size & ~(BITS_PER_LONG-1)) {
368 if ((tmp = *(p++)))
369 goto found_middle;
370 result += BITS_PER_LONG;
371 size -= BITS_PER_LONG;
373 if (!size)
374 return result;
375 tmp = *p;
377 found_first:
378 tmp &= (~0UL >> (BITS_PER_LONG - size));
379 if (tmp == 0UL) /* Are any bits set? */
380 return result + size; /* Nope. */
381 found_middle:
382 return result + __ffs(tmp);
386 * find_first_bit - find the first set bit in a memory region
387 * @addr: The address to start the search at
388 * @size: The maximum size to search
390 * Returns the bit-number of the first set bit, not the number of the byte
391 * containing a bit.
393 #define find_first_bit(addr, size) \
394 find_next_bit((addr), (size), 0)
396 #define _EXT2_HAVE_ASM_BITOPS_
398 #ifdef __KERNEL__
400 * test_and_{set,clear}_bit guarantee atomicity without
401 * disabling interrupts.
404 /* '3' is bits per byte */
405 #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
407 #define ext2_test_bit(nr, addr) \
408 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
409 #define ext2_set_bit(nr, addr) \
410 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
411 #define ext2_clear_bit(nr, addr) \
412 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
414 #define ext2_set_bit_atomic(l,nr,addr) \
415 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
416 #define ext2_clear_bit_atomic(l,nr,addr) \
417 test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
419 #endif /* __KERNEL__ */
422 #define ext2_find_first_zero_bit(addr, size) \
423 ext2_find_next_zero_bit((addr), (size), 0)
425 /* include/linux/byteorder does not support "unsigned long" type */
426 static inline unsigned long ext2_swabp(unsigned long * x)
428 #ifdef __LP64__
429 return (unsigned long) __swab64p((u64 *) x);
430 #else
431 return (unsigned long) __swab32p((u32 *) x);
432 #endif
435 /* include/linux/byteorder doesn't support "unsigned long" type */
436 static inline unsigned long ext2_swab(unsigned long y)
438 #ifdef __LP64__
439 return (unsigned long) __swab64((u64) y);
440 #else
441 return (unsigned long) __swab32((u32) y);
442 #endif
445 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
447 unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG);
448 unsigned long result = offset & ~(BITS_PER_LONG - 1);
449 unsigned long tmp;
451 if (offset >= size)
452 return size;
453 size -= result;
454 offset &= (BITS_PER_LONG - 1UL);
455 if (offset) {
456 tmp = ext2_swabp(p++);
457 tmp |= (~0UL >> (BITS_PER_LONG - offset));
458 if (size < BITS_PER_LONG)
459 goto found_first;
460 if (~tmp)
461 goto found_middle;
462 size -= BITS_PER_LONG;
463 result += BITS_PER_LONG;
466 while (size & ~(BITS_PER_LONG - 1)) {
467 if (~(tmp = *(p++)))
468 goto found_middle_swap;
469 result += BITS_PER_LONG;
470 size -= BITS_PER_LONG;
472 if (!size)
473 return result;
474 tmp = ext2_swabp(p);
475 found_first:
476 tmp |= ~0UL << size;
477 if (tmp == ~0UL) /* Are any bits zero? */
478 return result + size; /* Nope. Skip ffz */
479 found_middle:
480 return result + ffz(tmp);
482 found_middle_swap:
483 return result + ffz(ext2_swab(tmp));
487 /* Bitmap functions for the minix filesystem. */
488 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
489 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
490 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
491 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
492 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
494 #endif /* _PARISC_BITOPS_H */