1 #ifndef _PARISC_BITOPS_H
2 #define _PARISC_BITOPS_H
4 #include <linux/compiler.h>
5 #include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
6 #include <asm/byteorder.h>
7 #include <asm/atomic.h>
10 * HP-PARISC specific bit operations
11 * for a detailed description of the functions please refer
12 * to include/asm-i386/bitops.h or kerneldoc
15 #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
18 #define smp_mb__before_clear_bit() smp_mb()
19 #define smp_mb__after_clear_bit() smp_mb()
21 /* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
22 * on use of volatile and __*_bit() (set/clear/change):
23 * *_bit() want use of volatile.
24 * __*_bit() are "relaxed" and don't use spinlock or volatile.
27 static __inline__
void set_bit(int nr
, volatile unsigned long * addr
)
29 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
32 addr
+= (nr
>> SHIFT_PER_LONG
);
33 _atomic_spin_lock_irqsave(addr
, flags
);
35 _atomic_spin_unlock_irqrestore(addr
, flags
);
38 static __inline__
void __set_bit(unsigned long nr
, volatile unsigned long * addr
)
40 unsigned long *m
= (unsigned long *) addr
+ (nr
>> SHIFT_PER_LONG
);
42 *m
|= 1UL << CHOP_SHIFTCOUNT(nr
);
45 static __inline__
void clear_bit(int nr
, volatile unsigned long * addr
)
47 unsigned long mask
= ~(1UL << CHOP_SHIFTCOUNT(nr
));
50 addr
+= (nr
>> SHIFT_PER_LONG
);
51 _atomic_spin_lock_irqsave(addr
, flags
);
53 _atomic_spin_unlock_irqrestore(addr
, flags
);
56 static __inline__
void __clear_bit(unsigned long nr
, volatile unsigned long * addr
)
58 unsigned long *m
= (unsigned long *) addr
+ (nr
>> SHIFT_PER_LONG
);
60 *m
&= ~(1UL << CHOP_SHIFTCOUNT(nr
));
63 static __inline__
void change_bit(int nr
, volatile unsigned long * addr
)
65 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
68 addr
+= (nr
>> SHIFT_PER_LONG
);
69 _atomic_spin_lock_irqsave(addr
, flags
);
71 _atomic_spin_unlock_irqrestore(addr
, flags
);
74 static __inline__
void __change_bit(unsigned long nr
, volatile unsigned long * addr
)
76 unsigned long *m
= (unsigned long *) addr
+ (nr
>> SHIFT_PER_LONG
);
78 *m
^= 1UL << CHOP_SHIFTCOUNT(nr
);
81 static __inline__
int test_and_set_bit(int nr
, volatile unsigned long * addr
)
83 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
87 addr
+= (nr
>> SHIFT_PER_LONG
);
88 _atomic_spin_lock_irqsave(addr
, flags
);
90 *addr
= oldbit
| mask
;
91 _atomic_spin_unlock_irqrestore(addr
, flags
);
93 return (oldbit
& mask
) ? 1 : 0;
96 static __inline__
int __test_and_set_bit(int nr
, volatile unsigned long * address
)
98 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
100 unsigned long *addr
= (unsigned long *)address
+ (nr
>> SHIFT_PER_LONG
);
103 *addr
= oldbit
| mask
;
105 return (oldbit
& mask
) ? 1 : 0;
108 static __inline__
int test_and_clear_bit(int nr
, volatile unsigned long * addr
)
110 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
111 unsigned long oldbit
;
114 addr
+= (nr
>> SHIFT_PER_LONG
);
115 _atomic_spin_lock_irqsave(addr
, flags
);
117 *addr
= oldbit
& ~mask
;
118 _atomic_spin_unlock_irqrestore(addr
, flags
);
120 return (oldbit
& mask
) ? 1 : 0;
123 static __inline__
int __test_and_clear_bit(int nr
, volatile unsigned long * address
)
125 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
126 unsigned long *addr
= (unsigned long *)address
+ (nr
>> SHIFT_PER_LONG
);
127 unsigned long oldbit
;
130 *addr
= oldbit
& ~mask
;
132 return (oldbit
& mask
) ? 1 : 0;
135 static __inline__
int test_and_change_bit(int nr
, volatile unsigned long * addr
)
137 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
138 unsigned long oldbit
;
141 addr
+= (nr
>> SHIFT_PER_LONG
);
142 _atomic_spin_lock_irqsave(addr
, flags
);
144 *addr
= oldbit
^ mask
;
145 _atomic_spin_unlock_irqrestore(addr
, flags
);
147 return (oldbit
& mask
) ? 1 : 0;
150 static __inline__
int __test_and_change_bit(int nr
, volatile unsigned long * address
)
152 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
153 unsigned long *addr
= (unsigned long *)address
+ (nr
>> SHIFT_PER_LONG
);
154 unsigned long oldbit
;
157 *addr
= oldbit
^ mask
;
159 return (oldbit
& mask
) ? 1 : 0;
162 static __inline__
int test_bit(int nr
, const volatile unsigned long *address
)
164 unsigned long mask
= 1UL << CHOP_SHIFTCOUNT(nr
);
165 const unsigned long *addr
= (const unsigned long *)address
+ (nr
>> SHIFT_PER_LONG
);
167 return !!(*addr
& mask
);
173 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1".
174 * @word: The word to search
176 * __ffs() return is undefined if no bit is set.
178 * 32-bit fast __ffs by LaMont Jones "lamont At hp com".
179 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org".
180 * (with help from willy/jejb to get the semantics right)
182 * This algorithm avoids branches by making use of nullification.
183 * One side effect of "extr" instructions is it sets PSW[N] bit.
184 * How PSW[N] (nullify next insn) gets set is determined by the
185 * "condition" field (eg "<>" or "TR" below) in the extr* insn.
186 * Only the 1st and one of either the 2cd or 3rd insn will get executed.
187 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
188 * cycles for each mispredicted branch.
191 static __inline__
unsigned long __ffs(unsigned long x
)
198 " extrd,u,*<> %0,63,32,%%r0\n"
199 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */
204 " extru,<> %0,31,16,%%r0\n"
205 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */
207 " extru,<> %0,31,8,%%r0\n"
208 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */
210 " extru,<> %0,31,4,%%r0\n"
211 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */
213 " extru,<> %0,31,2,%%r0\n"
214 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */
216 " extru,= %0,31,1,%%r0\n" /* check last bit */
218 : "+r" (x
), "=r" (ret
) );
222 /* Undefined if no bit is zero. */
223 #define ffz(x) __ffs(~x)
226 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set)
227 * This is defined the same way as the libc and compiler builtin
228 * ffs routines, therefore differs in spirit from the above ffz (man ffs).
230 static __inline__
int ffs(int x
)
232 return x
? (__ffs((unsigned long)x
) + 1) : 0;
236 * fls: find last (most significant) bit set.
237 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
240 static __inline__
int fls(int x
)
248 " extru,<> %0,15,16,%%r0\n"
249 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */
251 " extru,<> %0,7,8,%%r0\n"
252 " zdep,TR %0,23,24,%0\n" /* xx000000 */
254 " extru,<> %0,3,4,%%r0\n"
255 " zdep,TR %0,27,28,%0\n" /* x0000000 */
257 " extru,<> %0,1,2,%%r0\n"
258 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */
260 " extru,= %0,0,1,%%r0\n"
261 " addi 1,%1,%1\n" /* if y & 8, add 1 */
262 : "+r" (x
), "=r" (ret
) );
266 #define fls64(x) generic_fls64(x)
269 * hweightN: returns the hamming weight (i.e. the number
270 * of bits set) of a N-bit word
272 #define hweight64(x) generic_hweight64(x)
273 #define hweight32(x) generic_hweight32(x)
274 #define hweight16(x) generic_hweight16(x)
275 #define hweight8(x) generic_hweight8(x)
278 * Every architecture must define this function. It's the fastest
279 * way of searching a 140-bit bitmap where the first 100 bits are
280 * unlikely to be set. It's guaranteed that at least one of the 140
283 static inline int sched_find_first_bit(const unsigned long *b
)
289 return __ffs(b
[1]) + 64;
290 return __ffs(b
[2]) + 128;
295 return __ffs(b
[1]) + 32;
297 return __ffs(b
[2]) + 64;
299 return __ffs(b
[3]) + 96;
300 return __ffs(b
[4]) + 128;
304 #endif /* __KERNEL__ */
307 * This implementation of find_{first,next}_zero_bit was stolen from
308 * Linus' asm-alpha/bitops.h.
310 #define find_first_zero_bit(addr, size) \
311 find_next_zero_bit((addr), (size), 0)
313 static __inline__
unsigned long find_next_zero_bit(const void * addr
, unsigned long size
, unsigned long offset
)
315 const unsigned long * p
= ((unsigned long *) addr
) + (offset
>> SHIFT_PER_LONG
);
316 unsigned long result
= offset
& ~(BITS_PER_LONG
-1);
322 offset
&= (BITS_PER_LONG
-1);
325 tmp
|= ~0UL >> (BITS_PER_LONG
-offset
);
326 if (size
< BITS_PER_LONG
)
330 size
-= BITS_PER_LONG
;
331 result
+= BITS_PER_LONG
;
333 while (size
& ~(BITS_PER_LONG
-1)) {
336 result
+= BITS_PER_LONG
;
337 size
-= BITS_PER_LONG
;
345 return result
+ ffz(tmp
);
348 static __inline__
unsigned long find_next_bit(const unsigned long *addr
, unsigned long size
, unsigned long offset
)
350 const unsigned long *p
= addr
+ (offset
>> SHIFT_PER_LONG
);
351 unsigned long result
= offset
& ~(BITS_PER_LONG
-1);
357 offset
&= (BITS_PER_LONG
-1);
360 tmp
&= (~0UL << offset
);
361 if (size
< BITS_PER_LONG
)
365 size
-= BITS_PER_LONG
;
366 result
+= BITS_PER_LONG
;
368 while (size
& ~(BITS_PER_LONG
-1)) {
371 result
+= BITS_PER_LONG
;
372 size
-= BITS_PER_LONG
;
379 tmp
&= (~0UL >> (BITS_PER_LONG
- size
));
380 if (tmp
== 0UL) /* Are any bits set? */
381 return result
+ size
; /* Nope. */
383 return result
+ __ffs(tmp
);
387 * find_first_bit - find the first set bit in a memory region
388 * @addr: The address to start the search at
389 * @size: The maximum size to search
391 * Returns the bit-number of the first set bit, not the number of the byte
394 #define find_first_bit(addr, size) \
395 find_next_bit((addr), (size), 0)
397 #define _EXT2_HAVE_ASM_BITOPS_
401 * test_and_{set,clear}_bit guarantee atomicity without
402 * disabling interrupts.
405 /* '3' is bits per byte */
406 #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
408 #define ext2_test_bit(nr, addr) \
409 test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
410 #define ext2_set_bit(nr, addr) \
411 __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
412 #define ext2_clear_bit(nr, addr) \
413 __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
415 #define ext2_set_bit_atomic(l,nr,addr) \
416 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
417 #define ext2_clear_bit_atomic(l,nr,addr) \
418 test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr)
420 #endif /* __KERNEL__ */
423 #define ext2_find_first_zero_bit(addr, size) \
424 ext2_find_next_zero_bit((addr), (size), 0)
426 /* include/linux/byteorder does not support "unsigned long" type */
427 static inline unsigned long ext2_swabp(unsigned long * x
)
430 return (unsigned long) __swab64p((u64
*) x
);
432 return (unsigned long) __swab32p((u32
*) x
);
436 /* include/linux/byteorder doesn't support "unsigned long" type */
437 static inline unsigned long ext2_swab(unsigned long y
)
440 return (unsigned long) __swab64((u64
) y
);
442 return (unsigned long) __swab32((u32
) y
);
446 static __inline__
unsigned long ext2_find_next_zero_bit(void *addr
, unsigned long size
, unsigned long offset
)
448 unsigned long *p
= (unsigned long *) addr
+ (offset
>> SHIFT_PER_LONG
);
449 unsigned long result
= offset
& ~(BITS_PER_LONG
- 1);
455 offset
&= (BITS_PER_LONG
- 1UL);
457 tmp
= ext2_swabp(p
++);
458 tmp
|= (~0UL >> (BITS_PER_LONG
- offset
));
459 if (size
< BITS_PER_LONG
)
463 size
-= BITS_PER_LONG
;
464 result
+= BITS_PER_LONG
;
467 while (size
& ~(BITS_PER_LONG
- 1)) {
469 goto found_middle_swap
;
470 result
+= BITS_PER_LONG
;
471 size
-= BITS_PER_LONG
;
478 if (tmp
== ~0UL) /* Are any bits zero? */
479 return result
+ size
; /* Nope. Skip ffz */
481 return result
+ ffz(tmp
);
484 return result
+ ffz(ext2_swab(tmp
));
488 /* Bitmap functions for the minix filesystem. */
489 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
490 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
491 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
492 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
493 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
495 #endif /* _PARISC_BITOPS_H */