Import 2.4.0-test3pre5
[davej-history.git] / include / asm-ia64 / bitops.h
blob054704c47116b7397b46fdce9e2433397efb50e5
1 #ifndef _ASM_IA64_BITOPS_H
2 #define _ASM_IA64_BITOPS_H
4 /*
5 * Copyright (C) 1998-2000 Hewlett-Packard Co
6 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
8 * 02/04/00 D. Mosberger Require 64-bit alignment for bitops, per suggestion from davem
9 */
11 #include <asm/system.h>
14 * These operations need to be atomic. The address must be (at least)
15 * 32-bit aligned. Note that there are driver (e.g., eepro100) which
16 * use these operations to operate on hw-defined data-structures, so
17 * we can't easily change these operations to force a bigger
18 * alignment.
20 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
23 extern __inline__ void
24 set_bit (int nr, volatile void *addr)
26 __u32 bit, old, new;
27 volatile __u32 *m;
28 CMPXCHG_BUGCHECK_DECL
30 m = (volatile __u32 *) addr + (nr >> 5);
31 bit = 1 << (nr & 31);
32 do {
33 CMPXCHG_BUGCHECK(m);
34 old = *m;
35 new = old | bit;
36 } while (cmpxchg_acq(m, old, new) != old);
39 extern __inline__ void
40 clear_bit (int nr, volatile void *addr)
42 __u32 mask, old, new;
43 volatile __u32 *m;
44 CMPXCHG_BUGCHECK_DECL
46 m = (volatile __u32 *) addr + (nr >> 5);
47 mask = ~(1 << (nr & 31));
48 do {
49 CMPXCHG_BUGCHECK(m);
50 old = *m;
51 new = old & mask;
52 } while (cmpxchg_acq(m, old, new) != old);
55 extern __inline__ void
56 change_bit (int nr, volatile void *addr)
58 __u32 bit, old, new;
59 volatile __u32 *m;
60 CMPXCHG_BUGCHECK_DECL
62 m = (volatile __u32 *) addr + (nr >> 5);
63 bit = (1 << (nr & 31));
64 do {
65 CMPXCHG_BUGCHECK(m);
66 old = *m;
67 new = old ^ bit;
68 } while (cmpxchg_acq(m, old, new) != old);
71 extern __inline__ int
72 test_and_set_bit (int nr, volatile void *addr)
74 __u32 bit, old, new;
75 volatile __u32 *m;
76 CMPXCHG_BUGCHECK_DECL
78 m = (volatile __u32 *) addr + (nr >> 5);
79 bit = 1 << (nr & 31);
80 do {
81 CMPXCHG_BUGCHECK(m);
82 old = *m;
83 new = old | bit;
84 } while (cmpxchg_acq(m, old, new) != old);
85 return (old & bit) != 0;
88 extern __inline__ int
89 test_and_clear_bit (int nr, volatile void *addr)
91 __u32 mask, old, new;
92 volatile __u32 *m;
93 CMPXCHG_BUGCHECK_DECL
95 m = (volatile __u32 *) addr + (nr >> 5);
96 mask = ~(1 << (nr & 31));
97 do {
98 CMPXCHG_BUGCHECK(m);
99 old = *m;
100 new = old & mask;
101 } while (cmpxchg_acq(m, old, new) != old);
102 return (old & ~mask) != 0;
105 extern __inline__ int
106 test_and_change_bit (int nr, volatile void *addr)
108 __u32 bit, old, new;
109 volatile __u32 *m;
110 CMPXCHG_BUGCHECK_DECL
112 m = (volatile __u32 *) addr + (nr >> 5);
113 bit = (1 << (nr & 31));
114 do {
115 CMPXCHG_BUGCHECK(m);
116 old = *m;
117 new = old ^ bit;
118 } while (cmpxchg_acq(m, old, new) != old);
119 return (old & bit) != 0;
122 extern __inline__ int
123 test_bit (int nr, volatile void *addr)
125 return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
129 * ffz = Find First Zero in word. Undefined if no zero exists,
130 * so code should check against ~0UL first..
132 extern inline unsigned long
133 ffz (unsigned long x)
135 unsigned long result;
137 __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x & (~x - 1)));
138 return result;
141 #ifdef __KERNEL__
144 * Find the most significant bit that is set (undefined if no bit is
145 * set).
147 static inline unsigned long
148 ia64_fls (unsigned long x)
150 double d = x;
151 long exp;
153 __asm__ ("getf.exp %0=%1" : "=r"(exp) : "f"(d));
154 return exp - 0xffff;
157 * ffs: find first bit set. This is defined the same way as
158 * the libc and compiler builtin ffs routines, therefore
159 * differs in spirit from the above ffz (man ffs).
161 #define ffs(x) __builtin_ffs(x)
164 * hweightN: returns the hamming weight (i.e. the number
165 * of bits set) of a N-bit word
167 extern __inline__ unsigned long
168 hweight64 (unsigned long x)
170 unsigned long result;
171 __asm__ ("popcnt %0=%1" : "=r" (result) : "r" (x));
172 return result;
175 #define hweight32(x) hweight64 ((x) & 0xfffffffful)
176 #define hweight16(x) hweight64 ((x) & 0xfffful)
177 #define hweight8(x) hweight64 ((x) & 0xfful)
179 #endif /* __KERNEL__ */
182 * Find next zero bit in a bitmap reasonably efficiently..
184 extern inline int
185 find_next_zero_bit (void *addr, unsigned long size, unsigned long offset)
187 unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
188 unsigned long result = offset & ~63UL;
189 unsigned long tmp;
191 if (offset >= size)
192 return size;
193 size -= result;
194 offset &= 63UL;
195 if (offset) {
196 tmp = *(p++);
197 tmp |= ~0UL >> (64-offset);
198 if (size < 64)
199 goto found_first;
200 if (~tmp)
201 goto found_middle;
202 size -= 64;
203 result += 64;
205 while (size & ~63UL) {
206 if (~(tmp = *(p++)))
207 goto found_middle;
208 result += 64;
209 size -= 64;
211 if (!size)
212 return result;
213 tmp = *p;
214 found_first:
215 tmp |= ~0UL << size;
216 found_middle:
217 return result + ffz(tmp);
221 * The optimizer actually does good code for this case..
223 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
225 #ifdef __KERNEL__
227 #define ext2_set_bit test_and_set_bit
228 #define ext2_clear_bit test_and_clear_bit
229 #define ext2_test_bit test_bit
230 #define ext2_find_first_zero_bit find_first_zero_bit
231 #define ext2_find_next_zero_bit find_next_zero_bit
233 /* Bitmap functions for the minix filesystem. */
234 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
235 #define minix_set_bit(nr,addr) set_bit(nr,addr)
236 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
237 #define minix_test_bit(nr,addr) test_bit(nr,addr)
238 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
240 #endif /* __KERNEL__ */
242 #endif /* _ASM_IA64_BITOPS_H */