1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
5 * Copyright 1994, Linus Torvalds.
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
13 * To get proper branch prediction for the main line, we must branch
14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
17 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
20 extern __inline__
void set_bit(unsigned long nr
, volatile void * addr
)
24 unsigned int * m
= ((unsigned int *) addr
) + (nr
>> 5);
37 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
38 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
41 extern __inline__
void clear_bit(unsigned long nr
, volatile void * addr
)
45 unsigned int * m
= ((unsigned int *) addr
) + (nr
>> 5);
58 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
59 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
62 extern __inline__
void change_bit(unsigned long nr
, volatile void * addr
)
65 unsigned int * m
= ((unsigned int *) addr
) + (nr
>> 5);
75 :"=&r" (temp
), "=m" (*m
)
76 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
79 extern __inline__
int test_and_set_bit(unsigned long nr
,
84 unsigned int * m
= ((unsigned int *) addr
) + (nr
>> 5);
98 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
99 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
104 extern __inline__
int test_and_clear_bit(unsigned long nr
,
105 volatile void * addr
)
107 unsigned long oldbit
;
109 unsigned int * m
= ((unsigned int *) addr
) + (nr
>> 5);
111 __asm__
__volatile__(
123 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
124 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
129 extern __inline__
int test_and_change_bit(unsigned long nr
,
130 volatile void * addr
)
132 unsigned long oldbit
;
134 unsigned int * m
= ((unsigned int *) addr
) + (nr
>> 5);
136 __asm__
__volatile__(
146 :"=&r" (temp
), "=m" (*m
), "=&r" (oldbit
)
147 :"Ir" (1UL << (nr
& 31)), "m" (*m
));
152 extern __inline__
int test_bit(int nr
, volatile void * addr
)
154 return (1UL & (((const int *) addr
)[nr
>> 5] >> (nr
& 31))) != 0UL;
158 * ffz = Find First Zero in word. Undefined if no zero exists,
159 * so code should check against ~0UL first..
161 * Do a binary search on the bits. Due to the nature of large
162 * constants on the alpha, it is worthwhile to split the search.
164 extern inline unsigned long ffz_b(unsigned long x
)
166 unsigned long sum
= 0;
168 x
= ~x
& -~x
; /* set first 0 bit, clear others */
169 if (x
& 0xF0) sum
+= 4;
170 if (x
& 0xCC) sum
+= 2;
171 if (x
& 0xAA) sum
+= 1;
176 extern inline unsigned long ffz(unsigned long word
)
178 #if defined(__alpha_cix__) && defined(__alpha_fix__)
179 /* Whee. EV6 can calculate it directly. */
180 unsigned long result
;
181 __asm__("cttz %1,%0" : "=r"(result
) : "r"(~word
));
184 unsigned long bits
, qofs
, bofs
;
186 __asm__("cmpbge %1,%2,%0" : "=r"(bits
) : "r"(word
), "r"(~0UL));
188 __asm__("extbl %1,%2,%0" : "=r"(bits
) : "r"(word
), "r"(qofs
));
191 return qofs
*8 + bofs
;
198 * ffs: find first bit set. This is defined the same way as
199 * the libc and compiler builtin ffs routines, therefore
200 * differs in spirit from the above ffz (man ffs).
203 extern inline int ffs(int word
)
205 int result
= ffz(~word
);
206 return word
? result
+1 : 0;
210 * hweightN: returns the hamming weight (i.e. the number
211 * of bits set) of a N-bit word
214 #if defined(__alpha_cix__) && defined(__alpha_fix__)
215 /* Whee. EV6 can calculate it directly. */
216 extern __inline__
unsigned long hweight64(unsigned long w
)
218 unsigned long result
;
219 __asm__("ctpop %1,%0" : "=r"(result
) : "r"(w
));
223 #define hweight32(x) hweight64((x) & 0xfffffffful)
224 #define hweight16(x) hweight64((x) & 0xfffful)
225 #define hweight8(x) hweight64((x) & 0xfful)
227 #define hweight32(x) generic_hweight32(x)
228 #define hweight16(x) generic_hweight16(x)
229 #define hweight8(x) generic_hweight8(x)
232 #endif /* __KERNEL__ */
235 * Find next zero bit in a bitmap reasonably efficiently..
237 extern inline unsigned long find_next_zero_bit(void * addr
, unsigned long size
, unsigned long offset
)
239 unsigned long * p
= ((unsigned long *) addr
) + (offset
>> 6);
240 unsigned long result
= offset
& ~63UL;
249 tmp
|= ~0UL >> (64-offset
);
257 while (size
& ~63UL) {
268 if (tmp
== ~0UL) /* Are any bits zero? */
269 return result
+ size
; /* Nope. */
271 return result
+ ffz(tmp
);
275 * The optimizer actually does good code for this case..
277 #define find_first_zero_bit(addr, size) \
278 find_next_zero_bit((addr), (size), 0)
282 #define ext2_set_bit test_and_set_bit
283 #define ext2_clear_bit test_and_clear_bit
284 #define ext2_test_bit test_bit
285 #define ext2_find_first_zero_bit find_first_zero_bit
286 #define ext2_find_next_zero_bit find_next_zero_bit
288 /* Bitmap functions for the minix filesystem. */
289 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
290 #define minix_set_bit(nr,addr) set_bit(nr,addr)
291 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
292 #define minix_test_bit(nr,addr) test_bit(nr,addr)
293 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
295 #endif /* __KERNEL__ */
297 #endif /* _ALPHA_BITOPS_H */