- pre2
[davej-history.git] / include / asm-alpha / bitops.h
blobc93491f1a366f591a9cd360cf7384310ac77f12c
1 #ifndef _ALPHA_BITOPS_H
2 #define _ALPHA_BITOPS_H
4 /*
5 * Copyright 1994, Linus Torvalds.
6 */
8 /*
9 * These have to be done with inline assembly: that way the bit-setting
10 * is guaranteed to be atomic. All bit operations return 0 if the bit
11 * was cleared before the operation and != 0 if it was not.
13 * To get proper branch prediction for the main line, we must branch
14 * forward to code at the end of this object's .text section, then
15 * branch back to restart the operation.
17 * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1).
20 extern __inline__ void set_bit(unsigned long nr, volatile void * addr)
22 unsigned long oldbit;
23 unsigned long temp;
24 unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
26 __asm__ __volatile__(
27 "1: ldl_l %0,%1\n"
28 " and %0,%3,%2\n"
29 " bne %2,2f\n"
30 " xor %0,%3,%0\n"
31 " stl_c %0,%1\n"
32 " beq %0,3f\n"
33 "2:\n"
34 ".subsection 2\n"
35 "3: br 1b\n"
36 ".previous"
37 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
38 :"Ir" (1UL << (nr & 31)), "m" (*m));
41 extern __inline__ void clear_bit(unsigned long nr, volatile void * addr)
43 unsigned long oldbit;
44 unsigned long temp;
45 unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
47 __asm__ __volatile__(
48 "1: ldl_l %0,%1\n"
49 " and %0,%3,%2\n"
50 " beq %2,2f\n"
51 " xor %0,%3,%0\n"
52 " stl_c %0,%1\n"
53 " beq %0,3f\n"
54 "2:\n"
55 ".subsection 2\n"
56 "3: br 1b\n"
57 ".previous"
58 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
59 :"Ir" (1UL << (nr & 31)), "m" (*m));
62 extern __inline__ void change_bit(unsigned long nr, volatile void * addr)
64 unsigned long temp;
65 unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
67 __asm__ __volatile__(
68 "1: ldl_l %0,%1\n"
69 " xor %0,%2,%0\n"
70 " stl_c %0,%1\n"
71 " beq %0,3f\n"
72 ".subsection 2\n"
73 "3: br 1b\n"
74 ".previous"
75 :"=&r" (temp), "=m" (*m)
76 :"Ir" (1UL << (nr & 31)), "m" (*m));
79 extern __inline__ int test_and_set_bit(unsigned long nr,
80 volatile void * addr)
82 unsigned long oldbit;
83 unsigned long temp;
84 unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
86 __asm__ __volatile__(
87 "1: ldl_l %0,%1\n"
88 " and %0,%3,%2\n"
89 " bne %2,2f\n"
90 " xor %0,%3,%0\n"
91 " stl_c %0,%1\n"
92 " beq %0,3f\n"
93 " mb\n"
94 "2:\n"
95 ".subsection 2\n"
96 "3: br 1b\n"
97 ".previous"
98 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
99 :"Ir" (1UL << (nr & 31)), "m" (*m));
101 return oldbit != 0;
104 extern __inline__ int test_and_clear_bit(unsigned long nr,
105 volatile void * addr)
107 unsigned long oldbit;
108 unsigned long temp;
109 unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
111 __asm__ __volatile__(
112 "1: ldl_l %0,%1\n"
113 " and %0,%3,%2\n"
114 " beq %2,2f\n"
115 " xor %0,%3,%0\n"
116 " stl_c %0,%1\n"
117 " beq %0,3f\n"
118 " mb\n"
119 "2:\n"
120 ".subsection 2\n"
121 "3: br 1b\n"
122 ".previous"
123 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
124 :"Ir" (1UL << (nr & 31)), "m" (*m));
126 return oldbit != 0;
129 extern __inline__ int test_and_change_bit(unsigned long nr,
130 volatile void * addr)
132 unsigned long oldbit;
133 unsigned long temp;
134 unsigned int * m = ((unsigned int *) addr) + (nr >> 5);
136 __asm__ __volatile__(
137 "1: ldl_l %0,%1\n"
138 " and %0,%3,%2\n"
139 " xor %0,%3,%0\n"
140 " stl_c %0,%1\n"
141 " beq %0,3f\n"
142 " mb\n"
143 ".subsection 2\n"
144 "3: br 1b\n"
145 ".previous"
146 :"=&r" (temp), "=m" (*m), "=&r" (oldbit)
147 :"Ir" (1UL << (nr & 31)), "m" (*m));
149 return oldbit != 0;
152 extern __inline__ int test_bit(int nr, volatile void * addr)
154 return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
158 * ffz = Find First Zero in word. Undefined if no zero exists,
159 * so code should check against ~0UL first..
161 * Do a binary search on the bits. Due to the nature of large
162 * constants on the alpha, it is worthwhile to split the search.
164 extern inline unsigned long ffz_b(unsigned long x)
166 unsigned long sum = 0;
168 x = ~x & -~x; /* set first 0 bit, clear others */
169 if (x & 0xF0) sum += 4;
170 if (x & 0xCC) sum += 2;
171 if (x & 0xAA) sum += 1;
173 return sum;
176 extern inline unsigned long ffz(unsigned long word)
178 #if defined(__alpha_cix__) && defined(__alpha_fix__)
179 /* Whee. EV6 can calculate it directly. */
180 unsigned long result;
181 __asm__("cttz %1,%0" : "=r"(result) : "r"(~word));
182 return result;
183 #else
184 unsigned long bits, qofs, bofs;
186 __asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL));
187 qofs = ffz_b(bits);
188 __asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs));
189 bofs = ffz_b(bits);
191 return qofs*8 + bofs;
192 #endif
195 #ifdef __KERNEL__
198 * ffs: find first bit set. This is defined the same way as
199 * the libc and compiler builtin ffs routines, therefore
200 * differs in spirit from the above ffz (man ffs).
203 extern inline int ffs(int word)
205 int result = ffz(~word);
206 return word ? result+1 : 0;
210 * hweightN: returns the hamming weight (i.e. the number
211 * of bits set) of a N-bit word
214 #if defined(__alpha_cix__) && defined(__alpha_fix__)
215 /* Whee. EV6 can calculate it directly. */
216 extern __inline__ unsigned long hweight64(unsigned long w)
218 unsigned long result;
219 __asm__("ctpop %1,%0" : "=r"(result) : "r"(w));
220 return result;
223 #define hweight32(x) hweight64((x) & 0xfffffffful)
224 #define hweight16(x) hweight64((x) & 0xfffful)
225 #define hweight8(x) hweight64((x) & 0xfful)
226 #else
227 #define hweight32(x) generic_hweight32(x)
228 #define hweight16(x) generic_hweight16(x)
229 #define hweight8(x) generic_hweight8(x)
230 #endif
232 #endif /* __KERNEL__ */
235 * Find next zero bit in a bitmap reasonably efficiently..
237 extern inline unsigned long find_next_zero_bit(void * addr, unsigned long size, unsigned long offset)
239 unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
240 unsigned long result = offset & ~63UL;
241 unsigned long tmp;
243 if (offset >= size)
244 return size;
245 size -= result;
246 offset &= 63UL;
247 if (offset) {
248 tmp = *(p++);
249 tmp |= ~0UL >> (64-offset);
250 if (size < 64)
251 goto found_first;
252 if (~tmp)
253 goto found_middle;
254 size -= 64;
255 result += 64;
257 while (size & ~63UL) {
258 if (~(tmp = *(p++)))
259 goto found_middle;
260 result += 64;
261 size -= 64;
263 if (!size)
264 return result;
265 tmp = *p;
266 found_first:
267 tmp |= ~0UL << size;
268 if (tmp == ~0UL) /* Are any bits zero? */
269 return result + size; /* Nope. */
270 found_middle:
271 return result + ffz(tmp);
275 * The optimizer actually does good code for this case..
277 #define find_first_zero_bit(addr, size) \
278 find_next_zero_bit((addr), (size), 0)
280 #ifdef __KERNEL__
282 #define ext2_set_bit test_and_set_bit
283 #define ext2_clear_bit test_and_clear_bit
284 #define ext2_test_bit test_bit
285 #define ext2_find_first_zero_bit find_first_zero_bit
286 #define ext2_find_next_zero_bit find_next_zero_bit
288 /* Bitmap functions for the minix filesystem. */
289 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
290 #define minix_set_bit(nr,addr) set_bit(nr,addr)
291 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
292 #define minix_test_bit(nr,addr) test_bit(nr,addr)
293 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
295 #endif /* __KERNEL__ */
297 #endif /* _ALPHA_BITOPS_H */