1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 #include <linux/config.h>
5 #include <asm/compiler.h>
8 * Copyright 1994, Linus Torvalds.
12 * These have to be done with inline assembly: that way the bit-setting
13 * is guaranteed to be atomic. All bit operations return 0 if the bit
14 * was cleared before the operation and != 0 if it was not.
16 * To get proper branch prediction for the main line, we must branch
17 * forward to code at the end of this object's .text section, then
18 * branch back to restart the operation.
20 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
24 set_bit(unsigned long nr
, volatile void * addr
)
27 int *m
= ((int *) addr
) + (nr
>> 5);
37 :"=&r" (temp
), "=m" (*m
)
38 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
42 * WARNING: non atomic version.
45 __set_bit(unsigned long nr
, volatile void * addr
)
47 int *m
= ((int *) addr
) + (nr
>> 5);
52 #define smp_mb__before_clear_bit() smp_mb()
53 #define smp_mb__after_clear_bit() smp_mb()
56 clear_bit(unsigned long nr
, volatile void * addr
)
59 int *m
= ((int *) addr
) + (nr
>> 5);
69 :"=&r" (temp
), "=m" (*m
)
70 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
74 * WARNING: non atomic version.
76 static __inline__
void
77 __clear_bit(unsigned long nr
, volatile void * addr
)
79 int *m
= ((int *) addr
) + (nr
>> 5);
81 *m
&= ~(1 << (nr
& 31));
85 change_bit(unsigned long nr
, volatile void * addr
)
88 int *m
= ((int *) addr
) + (nr
>> 5);
98 :"=&r" (temp
), "=m" (*m
)
99 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
103 * WARNING: non atomic version.
105 static __inline__
void
106 __change_bit(unsigned long nr
, volatile void * addr
)
108 int *m
= ((int *) addr
) + (nr
>> 5);
110 *m
^= 1 << (nr
& 31);
114 test_and_set_bit(unsigned long nr
, volatile void *addr
)
116 unsigned long oldbit
;
118 int *m
= ((int *) addr
) + (nr
>> 5);
120 __asm__
__volatile__(
134 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
135 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
141 * WARNING: non atomic version.
144 __test_and_set_bit(unsigned long nr
, volatile void * addr
)
146 unsigned long mask
= 1 << (nr
& 0x1f);
147 int *m
= ((int *) addr
) + (nr
>> 5);
151 return (old
& mask
) != 0;
155 test_and_clear_bit(unsigned long nr
, volatile void * addr
)
157 unsigned long oldbit
;
159 int *m
= ((int *) addr
) + (nr
>> 5);
161 __asm__
__volatile__(
175 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
176 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
182 * WARNING: non atomic version.
185 __test_and_clear_bit(unsigned long nr
, volatile void * addr
)
187 unsigned long mask
= 1 << (nr
& 0x1f);
188 int *m
= ((int *) addr
) + (nr
>> 5);
192 return (old
& mask
) != 0;
196 test_and_change_bit(unsigned long nr
, volatile void * addr
)
198 unsigned long oldbit
;
200 int *m
= ((int *) addr
) + (nr
>> 5);
202 __asm__
__volatile__(
214 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
215 :"Ir" (1UL << (nr
& 31)), "m" (*m
) : "memory");
221 * WARNING: non atomic version.
223 static __inline__
int
224 __test_and_change_bit(unsigned long nr
, volatile void * addr
)
226 unsigned long mask
= 1 << (nr
& 0x1f);
227 int *m
= ((int *) addr
) + (nr
>> 5);
231 return (old
& mask
) != 0;
235 test_bit(int nr
, const volatile void * addr
)
237 return (1UL & (((const int *) addr
)[nr
>> 5] >> (nr
& 31))) != 0UL;
241 * ffz = Find First Zero in word. Undefined if no zero exists,
242 * so code should check against ~0UL first..
244 * Do a binary search on the bits. Due to the nature of large
245 * constants on the alpha, it is worthwhile to split the search.
247 static inline unsigned long ffz_b(unsigned long x
)
249 unsigned long sum
, x1
, x2
, x4
;
251 x
= ~x
& -~x
; /* set first 0 bit, clear others */
256 sum
+= (x4
!= 0) * 4;
262 static inline unsigned long ffz(unsigned long word
)
264 #if defined(__alpha_cix__) && defined(__alpha_fix__)
265 /* Whee. EV67 can calculate it directly. */
266 return __kernel_cttz(~word
);
268 unsigned long bits
, qofs
, bofs
;
270 bits
= __kernel_cmpbge(word
, ~0UL);
272 bits
= __kernel_extbl(word
, qofs
);
275 return qofs
*8 + bofs
;
280 * __ffs = Find First set bit in word. Undefined if no set bit exists.
282 static inline unsigned long __ffs(unsigned long word
)
284 #if defined(__alpha_cix__) && defined(__alpha_fix__)
285 /* Whee. EV67 can calculate it directly. */
286 return __kernel_cttz(word
);
288 unsigned long bits
, qofs
, bofs
;
290 bits
= __kernel_cmpbge(0, word
);
292 bits
= __kernel_extbl(word
, qofs
);
295 return qofs
*8 + bofs
;
302 * ffs: find first bit set. This is defined the same way as
303 * the libc and compiler builtin ffs routines, therefore
304 * differs in spirit from the above __ffs.
307 static inline int ffs(int word
)
309 int result
= __ffs(word
) + 1;
310 return word
? result
: 0;
314 * fls: find last bit set.
316 #if defined(__alpha_cix__) && defined(__alpha_fix__)
317 static inline int fls(int word
)
319 return 64 - __kernel_ctlz(word
& 0xffffffff);
322 #define fls generic_fls
325 /* Compute powers of two for the given integer. */
326 static inline long floor_log2(unsigned long word
)
328 #if defined(__alpha_cix__) && defined(__alpha_fix__)
329 return 63 - __kernel_ctlz(word
);
332 for (bit
= -1; word
; bit
++)
338 static inline long ceil_log2(unsigned long word
)
340 long bit
= floor_log2(word
);
341 return bit
+ (word
> (1UL << bit
));
345 * hweightN: returns the hamming weight (i.e. the number
346 * of bits set) of a N-bit word
349 #if defined(__alpha_cix__) && defined(__alpha_fix__)
350 /* Whee. EV67 can calculate it directly. */
351 static inline unsigned long hweight64(unsigned long w
)
353 return __kernel_ctpop(w
);
356 #define hweight32(x) hweight64((x) & 0xfffffffful)
357 #define hweight16(x) hweight64((x) & 0xfffful)
358 #define hweight8(x) hweight64((x) & 0xfful)
360 static inline unsigned long hweight64(unsigned long w
)
362 unsigned long result
;
363 for (result
= 0; w
; w
>>= 1)
368 #define hweight32(x) generic_hweight32(x)
369 #define hweight16(x) generic_hweight16(x)
370 #define hweight8(x) generic_hweight8(x)
373 #endif /* __KERNEL__ */
376 * Find next zero bit in a bitmap reasonably efficiently..
378 static inline unsigned long
379 find_next_zero_bit(void * addr
, unsigned long size
, unsigned long offset
)
381 unsigned long * p
= ((unsigned long *) addr
) + (offset
>> 6);
382 unsigned long result
= offset
& ~63UL;
391 tmp
|= ~0UL >> (64-offset
);
399 while (size
& ~63UL) {
410 if (tmp
== ~0UL) /* Are any bits zero? */
411 return result
+ size
; /* Nope. */
413 return result
+ ffz(tmp
);
417 * Find next one bit in a bitmap reasonably efficiently.
419 static inline unsigned long
420 find_next_bit(const void * addr
, unsigned long size
, unsigned long offset
)
422 const unsigned long * p
= ((const unsigned long *) addr
) + (offset
>> 6);
423 unsigned long result
= offset
& ~63UL;
432 tmp
&= ~0UL << offset
;
440 while (size
& ~63UL) {
450 tmp
&= ~0UL >> (64 - size
);
452 return result
+ size
;
454 return result
+ __ffs(tmp
);
458 * The optimizer actually does good code for this case.
460 #define find_first_zero_bit(addr, size) \
461 find_next_zero_bit((addr), (size), 0)
462 #define find_first_bit(addr, size) \
463 find_next_bit((addr), (size), 0)
468 * Every architecture must define this function. It's the fastest
469 * way of searching a 140-bit bitmap where the first 100 bits are
470 * unlikely to be set. It's guaranteed that at least one of the 140
473 static inline unsigned long
474 sched_find_first_bit(unsigned long b
[3])
476 unsigned long b0
= b
[0], b1
= b
[1], b2
= b
[2];
479 ofs
= (b1
? 64 : 128);
481 ofs
= (b0
? 0 : ofs
);
484 return __ffs(b0
) + ofs
;
488 #define ext2_set_bit __test_and_set_bit
489 #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a)
490 #define ext2_clear_bit __test_and_clear_bit
491 #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a)
492 #define ext2_test_bit test_bit
493 #define ext2_find_first_zero_bit find_first_zero_bit
494 #define ext2_find_next_zero_bit find_next_zero_bit
496 /* Bitmap functions for the minix filesystem. */
497 #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
498 #define minix_set_bit(nr,addr) __set_bit(nr,addr)
499 #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
500 #define minix_test_bit(nr,addr) test_bit(nr,addr)
501 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
503 #endif /* __KERNEL__ */
505 #endif /* _ALPHA_BITOPS_H */