1 /* $Id: bitops.h,v 1.67 2001/11/19 18:36:34 davem Exp $
2 * bitops.h: Bit string operations on the Sparc.
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright 2001 Anton Blanchard (anton@samba.org)
9 #ifndef _SPARC_BITOPS_H
10 #define _SPARC_BITOPS_H
12 #include <linux/compiler.h>
13 #include <asm/byteorder.h>
18 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
19 * is in the highest of the four bytes and bit '31' is the high bit
20 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
21 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
23 static __inline__
int test_and_set_bit(unsigned long nr
, volatile unsigned long *addr
)
25 register unsigned long mask
asm("g2");
26 register unsigned long *ADDR
asm("g1");
28 ADDR
= ((unsigned long *) addr
) + (nr
>> 5);
29 mask
= 1 << (nr
& 31);
34 " add %%o7, 8, %%o7\n"
36 : "0" (mask
), "r" (ADDR
)
37 : "g3", "g4", "g5", "g7", "memory", "cc");
42 static __inline__
void set_bit(unsigned long nr
, volatile unsigned long *addr
)
44 register unsigned long mask
asm("g2");
45 register unsigned long *ADDR
asm("g1");
47 ADDR
= ((unsigned long *) addr
) + (nr
>> 5);
48 mask
= 1 << (nr
& 31);
53 " add %%o7, 8, %%o7\n"
55 : "0" (mask
), "r" (ADDR
)
56 : "g3", "g4", "g5", "g7", "cc");
59 static __inline__
int test_and_clear_bit(unsigned long nr
, volatile unsigned long *addr
)
61 register unsigned long mask
asm("g2");
62 register unsigned long *ADDR
asm("g1");
64 ADDR
= ((unsigned long *) addr
) + (nr
>> 5);
65 mask
= 1 << (nr
& 31);
69 "call ___clear_bit\n\t"
70 " add %%o7, 8, %%o7\n"
72 : "0" (mask
), "r" (ADDR
)
73 : "g3", "g4", "g5", "g7", "memory", "cc");
78 static __inline__
void clear_bit(unsigned long nr
, volatile unsigned long *addr
)
80 register unsigned long mask
asm("g2");
81 register unsigned long *ADDR
asm("g1");
83 ADDR
= ((unsigned long *) addr
) + (nr
>> 5);
84 mask
= 1 << (nr
& 31);
88 "call ___clear_bit\n\t"
89 " add %%o7, 8, %%o7\n"
91 : "0" (mask
), "r" (ADDR
)
92 : "g3", "g4", "g5", "g7", "cc");
95 static __inline__
int test_and_change_bit(unsigned long nr
, volatile unsigned long *addr
)
97 register unsigned long mask
asm("g2");
98 register unsigned long *ADDR
asm("g1");
100 ADDR
= ((unsigned long *) addr
) + (nr
>> 5);
101 mask
= 1 << (nr
& 31);
103 __asm__
__volatile__(
105 "call ___change_bit\n\t"
106 " add %%o7, 8, %%o7\n"
108 : "0" (mask
), "r" (ADDR
)
109 : "g3", "g4", "g5", "g7", "memory", "cc");
114 static __inline__
void change_bit(unsigned long nr
, volatile unsigned long *addr
)
116 register unsigned long mask
asm("g2");
117 register unsigned long *ADDR
asm("g1");
119 ADDR
= ((unsigned long *) addr
) + (nr
>> 5);
120 mask
= 1 << (nr
& 31);
122 __asm__
__volatile__(
124 "call ___change_bit\n\t"
125 " add %%o7, 8, %%o7\n"
127 : "0" (mask
), "r" (ADDR
)
128 : "g3", "g4", "g5", "g7", "cc");
132 * non-atomic versions
134 static __inline__
void __set_bit(int nr
, volatile unsigned long *addr
)
136 unsigned long mask
= 1UL << (nr
& 0x1f);
137 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
142 static __inline__
void __clear_bit(int nr
, volatile unsigned long *addr
)
144 unsigned long mask
= 1UL << (nr
& 0x1f);
145 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
150 static __inline__
void __change_bit(int nr
, volatile unsigned long *addr
)
152 unsigned long mask
= 1UL << (nr
& 0x1f);
153 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
158 static __inline__
int __test_and_set_bit(int nr
, volatile unsigned long *addr
)
160 unsigned long mask
= 1UL << (nr
& 0x1f);
161 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
162 unsigned long old
= *p
;
165 return (old
& mask
) != 0;
168 static __inline__
int __test_and_clear_bit(int nr
, volatile unsigned long *addr
)
170 unsigned long mask
= 1UL << (nr
& 0x1f);
171 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
172 unsigned long old
= *p
;
175 return (old
& mask
) != 0;
178 static __inline__
int __test_and_change_bit(int nr
, volatile unsigned long *addr
)
180 unsigned long mask
= 1UL << (nr
& 0x1f);
181 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
182 unsigned long old
= *p
;
185 return (old
& mask
) != 0;
188 #define smp_mb__before_clear_bit() do { } while(0)
189 #define smp_mb__after_clear_bit() do { } while(0)
191 /* The following routine need not be atomic. */
192 static __inline__
int test_bit(int nr
, __const__
volatile unsigned long *addr
)
194 return (1UL & (((unsigned long *)addr
)[nr
>> 5] >> (nr
& 31))) != 0UL;
197 /* The easy/cheese version for now. */
198 static __inline__
unsigned long ffz(unsigned long word
)
200 unsigned long result
= 0;
210 * __ffs - find first bit in word.
211 * @word: The word to search
213 * Undefined if no bit exists, so code should check against 0 first.
215 static __inline__
int __ffs(unsigned long word
)
219 if ((word
& 0xffff) == 0) {
223 if ((word
& 0xff) == 0) {
227 if ((word
& 0xf) == 0) {
231 if ((word
& 0x3) == 0) {
235 if ((word
& 0x1) == 0)
241 * Every architecture must define this function. It's the fastest
242 * way of searching a 140-bit bitmap where the first 100 bits are
243 * unlikely to be set. It's guaranteed that at least one of the 140
246 static __inline__
int sched_find_first_bit(unsigned long *b
)
252 return __ffs(b
[1]) + 32;
254 return __ffs(b
[2]) + 64;
256 return __ffs(b
[3]) + 96;
257 return __ffs(b
[4]) + 128;
261 * ffs: find first bit set. This is defined the same way as
262 * the libc and compiler builtin ffs routines, therefore
263 * differs in spirit from the above ffz (man ffs).
265 static __inline__
int ffs(int x
)
269 return __ffs((unsigned long)x
) + 1;
273 * fls: find last (most-significant) bit set.
274 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
276 #define fls(x) generic_fls(x)
279 * hweightN: returns the hamming weight (i.e. the number
280 * of bits set) of a N-bit word
282 #define hweight32(x) generic_hweight32(x)
283 #define hweight16(x) generic_hweight16(x)
284 #define hweight8(x) generic_hweight8(x)
287 * find_next_zero_bit() finds the first zero bit in a bit string of length
288 * 'size' bits, starting the search at bit 'offset'. This is largely based
289 * on Linus's ALPHA routines, which are pretty portable BTW.
291 static __inline__
unsigned long find_next_zero_bit(unsigned long *addr
,
292 unsigned long size
, unsigned long offset
)
294 unsigned long *p
= addr
+ (offset
>> 5);
295 unsigned long result
= offset
& ~31UL;
304 tmp
|= ~0UL >> (32-offset
);
312 while (size
& ~31UL) {
324 if (tmp
== ~0UL) /* Are any bits zero? */
325 return result
+ size
; /* Nope. */
327 return result
+ ffz(tmp
);
331 * Linus sez that gcc can optimize the following correctly, we'll see if this
332 * holds on the Sparc as it does for the ALPHA.
334 #define find_first_zero_bit(addr, size) \
335 find_next_zero_bit((addr), (size), 0)
338 * find_next_bit - find the first set bit in a memory region
339 * @addr: The address to base the search on
340 * @offset: The bitnumber to start searching at
341 * @size: The maximum size to search
343 * Scheduler induced bitop, do not use.
345 static __inline__
int find_next_bit(unsigned long *addr
, int size
, int offset
)
347 unsigned long *p
= addr
+ (offset
>> 5);
348 int num
= offset
& ~0x1f;
352 word
&= ~((1 << (offset
& 0x1f)) - 1);
355 return __ffs(word
) + num
;
365 static __inline__
int test_le_bit(int nr
, __const__
unsigned long * addr
)
367 __const__
unsigned char *ADDR
= (__const__
unsigned char *) addr
;
368 return (ADDR
[nr
>> 3] >> (nr
& 7)) & 1;
372 * non-atomic versions
374 static __inline__
void __set_le_bit(int nr
, unsigned long *addr
)
376 unsigned char *ADDR
= (unsigned char *)addr
;
379 *ADDR
|= 1 << (nr
& 0x07);
382 static __inline__
void __clear_le_bit(int nr
, unsigned long *addr
)
384 unsigned char *ADDR
= (unsigned char *)addr
;
387 *ADDR
&= ~(1 << (nr
& 0x07));
390 static __inline__
int __test_and_set_le_bit(int nr
, unsigned long *addr
)
393 unsigned char *ADDR
= (unsigned char *)addr
;
396 mask
= 1 << (nr
& 0x07);
397 retval
= (mask
& *ADDR
) != 0;
402 static __inline__
int __test_and_clear_le_bit(int nr
, unsigned long *addr
)
405 unsigned char *ADDR
= (unsigned char *)addr
;
408 mask
= 1 << (nr
& 0x07);
409 retval
= (mask
& *ADDR
) != 0;
414 static __inline__
unsigned long find_next_zero_le_bit(unsigned long *addr
,
415 unsigned long size
, unsigned long offset
)
417 unsigned long *p
= addr
+ (offset
>> 5);
418 unsigned long result
= offset
& ~31UL;
427 tmp
|= __swab32(~0UL >> (32-offset
));
435 while(size
& ~31UL) {
446 tmp
= __swab32(tmp
) | (~0UL << size
);
447 if (tmp
== ~0UL) /* Are any bits zero? */
448 return result
+ size
; /* Nope. */
449 return result
+ ffz(tmp
);
452 return result
+ ffz(__swab32(tmp
));
455 #define find_first_zero_le_bit(addr, size) \
456 find_next_zero_le_bit((addr), (size), 0)
458 #define ext2_set_bit __test_and_set_le_bit
459 #define ext2_clear_bit __test_and_clear_le_bit
461 #define ext2_set_bit_atomic(lock, nr, addr) \
465 ret = ext2_set_bit((nr), (addr)); \
470 #define ext2_clear_bit_atomic(lock, nr, addr) \
474 ret = ext2_clear_bit((nr), (addr)); \
479 #define ext2_test_bit test_le_bit
480 #define ext2_find_first_zero_bit find_first_zero_le_bit
481 #define ext2_find_next_zero_bit find_next_zero_le_bit
483 /* Bitmap functions for the minix filesystem. */
484 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
485 #define minix_set_bit(nr,addr) set_bit(nr,addr)
486 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
487 #define minix_test_bit(nr,addr) test_bit(nr,addr)
488 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
490 #endif /* __KERNEL__ */
492 #endif /* defined(_SPARC_BITOPS_H) */