1 #ifndef _PARISC_BITOPS_H
2 #define _PARISC_BITOPS_H
4 #include <linux/compiler.h>
5 #include <asm/spinlock.h>
6 #include <asm/byteorder.h>
7 #include <asm/atomic.h>
10 * HP-PARISC specific bit operations
11 * for a detailed description of the functions please refer
12 * to include/asm-i386/bitops.h or kerneldoc
16 # define SHIFT_PER_LONG 6
18 # define BITS_PER_LONG 64
21 # define SHIFT_PER_LONG 5
23 # define BITS_PER_LONG 32
27 #define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
30 #define smp_mb__before_clear_bit() smp_mb()
31 #define smp_mb__after_clear_bit() smp_mb()
33 static __inline__
void set_bit(int nr
, volatile unsigned long * address
)
36 unsigned long *addr
= (unsigned long *) address
;
39 addr
+= (nr
>> SHIFT_PER_LONG
);
40 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
41 _atomic_spin_lock_irqsave(addr
, flags
);
43 _atomic_spin_unlock_irqrestore(addr
, flags
);
46 static __inline__
void __set_bit(int nr
, volatile unsigned long * address
)
49 unsigned long *addr
= (unsigned long *) address
;
51 addr
+= (nr
>> SHIFT_PER_LONG
);
52 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
56 static __inline__
void clear_bit(int nr
, volatile unsigned long * address
)
59 unsigned long *addr
= (unsigned long *) address
;
62 addr
+= (nr
>> SHIFT_PER_LONG
);
63 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
64 _atomic_spin_lock_irqsave(addr
, flags
);
66 _atomic_spin_unlock_irqrestore(addr
, flags
);
69 static __inline__
void __clear_bit(unsigned long nr
, volatile unsigned long * address
)
72 unsigned long *addr
= (unsigned long *) address
;
74 addr
+= (nr
>> SHIFT_PER_LONG
);
75 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
79 static __inline__
void change_bit(int nr
, volatile unsigned long * address
)
82 unsigned long *addr
= (unsigned long *) address
;
85 addr
+= (nr
>> SHIFT_PER_LONG
);
86 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
87 _atomic_spin_lock_irqsave(addr
, flags
);
89 _atomic_spin_unlock_irqrestore(addr
, flags
);
92 static __inline__
void __change_bit(int nr
, volatile unsigned long * address
)
95 unsigned long *addr
= (unsigned long *) address
;
97 addr
+= (nr
>> SHIFT_PER_LONG
);
98 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
102 static __inline__
int test_and_set_bit(int nr
, volatile unsigned long * address
)
105 unsigned long *addr
= (unsigned long *) address
;
109 addr
+= (nr
>> SHIFT_PER_LONG
);
110 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
111 _atomic_spin_lock_irqsave(addr
, flags
);
112 oldbit
= (*addr
& mask
) ? 1 : 0;
114 _atomic_spin_unlock_irqrestore(addr
, flags
);
119 static __inline__
int __test_and_set_bit(int nr
, volatile unsigned long * address
)
122 unsigned long *addr
= (unsigned long *) address
;
125 addr
+= (nr
>> SHIFT_PER_LONG
);
126 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
127 oldbit
= (*addr
& mask
) ? 1 : 0;
133 static __inline__
int test_and_clear_bit(int nr
, volatile unsigned long * address
)
136 unsigned long *addr
= (unsigned long *) address
;
140 addr
+= (nr
>> SHIFT_PER_LONG
);
141 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
142 _atomic_spin_lock_irqsave(addr
, flags
);
143 oldbit
= (*addr
& mask
) ? 1 : 0;
145 _atomic_spin_unlock_irqrestore(addr
, flags
);
150 static __inline__
int __test_and_clear_bit(int nr
, volatile unsigned long * address
)
153 unsigned long *addr
= (unsigned long *) address
;
156 addr
+= (nr
>> SHIFT_PER_LONG
);
157 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
158 oldbit
= (*addr
& mask
) ? 1 : 0;
164 static __inline__
int test_and_change_bit(int nr
, volatile unsigned long * address
)
167 unsigned long *addr
= (unsigned long *) address
;
171 addr
+= (nr
>> SHIFT_PER_LONG
);
172 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
173 _atomic_spin_lock_irqsave(addr
, flags
);
174 oldbit
= (*addr
& mask
) ? 1 : 0;
176 _atomic_spin_unlock_irqrestore(addr
, flags
);
181 static __inline__
int __test_and_change_bit(int nr
, volatile unsigned long * address
)
184 unsigned long *addr
= (unsigned long *) address
;
187 addr
+= (nr
>> SHIFT_PER_LONG
);
188 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
189 oldbit
= (*addr
& mask
) ? 1 : 0;
195 static __inline__
int test_bit(int nr
, const volatile unsigned long *address
)
198 const unsigned long *addr
= (const unsigned long *)address
;
200 addr
+= (nr
>> SHIFT_PER_LONG
);
201 mask
= 1L << CHOP_SHIFTCOUNT(nr
);
203 return !!(*addr
& mask
);
209 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
210 * @word: The word to search
212 * __ffs() return is undefined if no bit is set.
214 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
215 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
216 * (with help from willy/jejb to get the semantics right)
218 * This algorithm avoids branches by making use of nullification.
219 * One side effect of "extr" instructions is it sets PSW[N] bit.
220 * How PSW[N] (nullify next insn) gets set is determined by the
221 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
222 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
223 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
224 * cycles for each mispredicted branch.
227 static __inline__
unsigned long __ffs(unsigned long x
)
232 #if BITS_PER_LONG > 32
234 " extrd,u,*<> %0,63,32,%%r0\n"
235 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
240 " extru,<> %0,31,16,%%r0\n"
241 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
243 " extru,<> %0,31,8,%%r0\n"
244 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
246 " extru,<> %0,31,4,%%r0\n"
247 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
249 " extru,<> %0,31,2,%%r0\n"
250 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
252 " extru,= %0,31,1,%%r0\n" /* check last bit */
254 : "+r" (x
), "=r" (ret
) );
258 /* Undefined if no bit is zero. */
259 #define ffz(x) __ffs(~x)
262 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
263 * This is defined the same way as the libc and compiler builtin
264 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
266 static __inline__
int ffs(int x
)
268 return x
? (__ffs((unsigned long)x
) + 1) : 0;
272 * fls: find last (most significant) bit set.
273 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
276 static __inline__
int fls(int x
)
284 " extru,<> %0,15,16,%%r0\n"
285 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
287 " extru,<> %0,7,8,%%r0\n"
288 " zdep,TR %0,23,24,%0\n" /* xx000000 */
290 " extru,<> %0,3,4,%%r0\n"
291 " zdep,TR %0,27,28,%0\n" /* x0000000 */
293 " extru,<> %0,1,2,%%r0\n"
294 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
296 " extru,= %0,0,1,%%r0\n"
297 " addi 1,%1,%1\n" /* if y & 8, add 1 */
298 : "+r" (x
), "=r" (ret
) );
304 * hweightN: returns the hamming weight (i.e. the number
305 * of bits set) of a N-bit word
307 #define hweight64(x) \
309 unsigned long __x = (x); \
311 __w = generic_hweight32((unsigned int) __x); \
312 __w += generic_hweight32((unsigned int) (__x>>32)); \
315 #define hweight32(x) generic_hweight32(x)
316 #define hweight16(x) generic_hweight16(x)
317 #define hweight8(x) generic_hweight8(x)
320 * Every architecture must define this function. It's the fastest
321 * way of searching a 140-bit bitmap where the first 100 bits are
322 * unlikely to be set. It's guaranteed that at least one of the 140
325 static inline int sched_find_first_bit(const unsigned long *b
)
331 return __ffs(b
[1]) + 32;
333 return __ffs(b
[2]) + 64;
335 return __ffs(b
[3]) + 96;
336 return __ffs(b
[4]) + 128;
340 if (unlikely(((unsigned int)b
[1])))
341 return __ffs(b
[1]) + 64;
343 return __ffs(b
[1] >> 32) + 96;
344 return __ffs(b
[2]) + 128;
348 #endif /* __KERNEL__ */
351 * This implementation of find_{first,next}_zero_bit was stolen from
352 * Linus' asm-alpha/bitops.h.
354 #define find_first_zero_bit(addr, size) \
355 find_next_zero_bit((addr), (size), 0)
357 static __inline__
unsigned long find_next_zero_bit(const void * addr
, unsigned long size
, unsigned long offset
)
359 const unsigned long * p
= ((unsigned long *) addr
) + (offset
>> SHIFT_PER_LONG
);
360 unsigned long result
= offset
& ~(BITS_PER_LONG
-1);
366 offset
&= (BITS_PER_LONG
-1);
369 tmp
|= ~0UL >> (BITS_PER_LONG
-offset
);
370 if (size
< BITS_PER_LONG
)
374 size
-= BITS_PER_LONG
;
375 result
+= BITS_PER_LONG
;
377 while (size
& ~(BITS_PER_LONG
-1)) {
380 result
+= BITS_PER_LONG
;
381 size
-= BITS_PER_LONG
;
389 return result
+ ffz(tmp
);
392 static __inline__
unsigned long find_next_bit(const unsigned long *addr
, unsigned long size
, unsigned long offset
)
394 const unsigned long *p
= addr
+ (offset
>> 6);
395 unsigned long result
= offset
& ~(BITS_PER_LONG
-1);
401 offset
&= (BITS_PER_LONG
-1);
404 tmp
&= (~0UL << offset
);
405 if (size
< BITS_PER_LONG
)
409 size
-= BITS_PER_LONG
;
410 result
+= BITS_PER_LONG
;
412 while (size
& ~(BITS_PER_LONG
-1)) {
415 result
+= BITS_PER_LONG
;
416 size
-= BITS_PER_LONG
;
423 tmp
&= (~0UL >> (BITS_PER_LONG
- size
));
424 if (tmp
== 0UL) /* Are any bits set? */
425 return result
+ size
; /* Nope. */
427 return result
+ __ffs(tmp
);
431 * find_first_bit - find the first set bit in a memory region
432 * @addr: The address to start the search at
433 * @size: The maximum size to search
435 * Returns the bit-number of the first set bit, not the number of the byte
438 #define find_first_bit(addr, size) \
439 find_next_bit((addr), (size), 0)
441 #define _EXT2_HAVE_ASM_BITOPS_
445 * test_and_{set,clear}_bit guarantee atomicity without
446 * disabling interrupts.
449 #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
450 #define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x38, (unsigned long *)addr)
451 #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
452 #define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x38, (unsigned long *)addr)
454 #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
455 #define ext2_set_bit_atomic(l,nr,addr) test_and_set_bit((nr) ^ 0x18, (unsigned long *)addr)
456 #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
457 #define ext2_clear_bit_atomic(l,nr,addr) test_and_clear_bit((nr) ^ 0x18, (unsigned long *)addr)
460 #endif /* __KERNEL__ */
462 static __inline__
int ext2_test_bit(int nr
, __const__
void * addr
)
464 __const__
unsigned char *ADDR
= (__const__
unsigned char *) addr
;
466 return (ADDR
[nr
>> 3] >> (nr
& 7)) & 1;
470 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
471 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
474 #define ext2_find_first_zero_bit(addr, size) \
475 ext2_find_next_zero_bit((addr), (size), 0)
477 extern __inline__
unsigned long ext2_find_next_zero_bit(void *addr
,
478 unsigned long size
, unsigned long offset
)
480 unsigned int *p
= ((unsigned int *) addr
) + (offset
>> 5);
481 unsigned int result
= offset
& ~31UL;
489 tmp
= cpu_to_le32p(p
++);
490 tmp
|= ~0UL >> (32-offset
);
499 if ((tmp
= cpu_to_le32p(p
++)) != ~0U)
506 tmp
= cpu_to_le32p(p
);
510 return result
+ ffz(tmp
);
513 /* Bitmap functions for the minix filesystem. */
514 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
515 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
516 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
517 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
518 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
520 #endif /* _PARISC_BITOPS_H */