Fix IP22 timer calibration.
[linux-2.6/linux-mips.git] / include / asm-sparc / bitops.h
blob4340bddccabba0f474b267c21e3d96e993264484
1 /* $Id: bitops.h,v 1.67 2001/11/19 18:36:34 davem Exp $
2 * bitops.h: Bit string operations on the Sparc.
4 * Copyright 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright 2001 Anton Blanchard (anton@samba.org)
7 */
9 #ifndef _SPARC_BITOPS_H
10 #define _SPARC_BITOPS_H
12 #include <linux/compiler.h>
13 #include <asm/byteorder.h>
15 #ifdef __KERNEL__
18 * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
19 * is in the highest of the four bytes and bit '31' is the high bit
20 * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
21 * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
23 static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
25 register unsigned long mask asm("g2");
26 register unsigned long *ADDR asm("g1");
28 ADDR = ((unsigned long *) addr) + (nr >> 5);
29 mask = 1 << (nr & 31);
31 __asm__ __volatile__(
32 "mov %%o7, %%g4\n\t"
33 "call ___set_bit\n\t"
34 " add %%o7, 8, %%o7\n"
35 : "=&r" (mask)
36 : "0" (mask), "r" (ADDR)
37 : "g3", "g4", "g5", "g7", "memory", "cc");
39 return mask != 0;
42 static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
44 register unsigned long mask asm("g2");
45 register unsigned long *ADDR asm("g1");
47 ADDR = ((unsigned long *) addr) + (nr >> 5);
48 mask = 1 << (nr & 31);
50 __asm__ __volatile__(
51 "mov %%o7, %%g4\n\t"
52 "call ___set_bit\n\t"
53 " add %%o7, 8, %%o7\n"
54 : "=&r" (mask)
55 : "0" (mask), "r" (ADDR)
56 : "g3", "g4", "g5", "g7", "cc");
59 static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
61 register unsigned long mask asm("g2");
62 register unsigned long *ADDR asm("g1");
64 ADDR = ((unsigned long *) addr) + (nr >> 5);
65 mask = 1 << (nr & 31);
67 __asm__ __volatile__(
68 "mov %%o7, %%g4\n\t"
69 "call ___clear_bit\n\t"
70 " add %%o7, 8, %%o7\n"
71 : "=&r" (mask)
72 : "0" (mask), "r" (ADDR)
73 : "g3", "g4", "g5", "g7", "memory", "cc");
75 return mask != 0;
78 static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
80 register unsigned long mask asm("g2");
81 register unsigned long *ADDR asm("g1");
83 ADDR = ((unsigned long *) addr) + (nr >> 5);
84 mask = 1 << (nr & 31);
86 __asm__ __volatile__(
87 "mov %%o7, %%g4\n\t"
88 "call ___clear_bit\n\t"
89 " add %%o7, 8, %%o7\n"
90 : "=&r" (mask)
91 : "0" (mask), "r" (ADDR)
92 : "g3", "g4", "g5", "g7", "cc");
95 static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
97 register unsigned long mask asm("g2");
98 register unsigned long *ADDR asm("g1");
100 ADDR = ((unsigned long *) addr) + (nr >> 5);
101 mask = 1 << (nr & 31);
103 __asm__ __volatile__(
104 "mov %%o7, %%g4\n\t"
105 "call ___change_bit\n\t"
106 " add %%o7, 8, %%o7\n"
107 : "=&r" (mask)
108 : "0" (mask), "r" (ADDR)
109 : "g3", "g4", "g5", "g7", "memory", "cc");
111 return mask != 0;
114 static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr)
116 register unsigned long mask asm("g2");
117 register unsigned long *ADDR asm("g1");
119 ADDR = ((unsigned long *) addr) + (nr >> 5);
120 mask = 1 << (nr & 31);
122 __asm__ __volatile__(
123 "mov %%o7, %%g4\n\t"
124 "call ___change_bit\n\t"
125 " add %%o7, 8, %%o7\n"
126 : "=&r" (mask)
127 : "0" (mask), "r" (ADDR)
128 : "g3", "g4", "g5", "g7", "cc");
132 * non-atomic versions
134 static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
136 unsigned long mask = 1UL << (nr & 0x1f);
137 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
139 *p |= mask;
142 static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
144 unsigned long mask = 1UL << (nr & 0x1f);
145 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
147 *p &= ~mask;
150 static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
152 unsigned long mask = 1UL << (nr & 0x1f);
153 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
155 *p ^= mask;
158 static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
160 unsigned long mask = 1UL << (nr & 0x1f);
161 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
162 unsigned long old = *p;
164 *p = old | mask;
165 return (old & mask) != 0;
168 static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
170 unsigned long mask = 1UL << (nr & 0x1f);
171 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
172 unsigned long old = *p;
174 *p = old & ~mask;
175 return (old & mask) != 0;
178 static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
180 unsigned long mask = 1UL << (nr & 0x1f);
181 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
182 unsigned long old = *p;
184 *p = old ^ mask;
185 return (old & mask) != 0;
188 #define smp_mb__before_clear_bit() do { } while(0)
189 #define smp_mb__after_clear_bit() do { } while(0)
191 /* The following routine need not be atomic. */
192 static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
194 return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
197 /* The easy/cheese version for now. */
198 static __inline__ unsigned long ffz(unsigned long word)
200 unsigned long result = 0;
202 while(word & 1) {
203 result++;
204 word >>= 1;
206 return result;
210 * __ffs - find first bit in word.
211 * @word: The word to search
213 * Undefined if no bit exists, so code should check against 0 first.
215 static __inline__ int __ffs(unsigned long word)
217 int num = 0;
219 if ((word & 0xffff) == 0) {
220 num += 16;
221 word >>= 16;
223 if ((word & 0xff) == 0) {
224 num += 8;
225 word >>= 8;
227 if ((word & 0xf) == 0) {
228 num += 4;
229 word >>= 4;
231 if ((word & 0x3) == 0) {
232 num += 2;
233 word >>= 2;
235 if ((word & 0x1) == 0)
236 num += 1;
237 return num;
241 * Every architecture must define this function. It's the fastest
242 * way of searching a 140-bit bitmap where the first 100 bits are
243 * unlikely to be set. It's guaranteed that at least one of the 140
244 * bits is cleared.
246 static __inline__ int sched_find_first_bit(unsigned long *b)
249 if (unlikely(b[0]))
250 return __ffs(b[0]);
251 if (unlikely(b[1]))
252 return __ffs(b[1]) + 32;
253 if (unlikely(b[2]))
254 return __ffs(b[2]) + 64;
255 if (b[3])
256 return __ffs(b[3]) + 96;
257 return __ffs(b[4]) + 128;
261 * ffs: find first bit set. This is defined the same way as
262 * the libc and compiler builtin ffs routines, therefore
263 * differs in spirit from the above ffz (man ffs).
265 static __inline__ int ffs(int x)
267 if (!x)
268 return 0;
269 return __ffs((unsigned long)x) + 1;
273 * fls: find last (most-significant) bit set.
274 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
276 #define fls(x) generic_fls(x)
279 * hweightN: returns the hamming weight (i.e. the number
280 * of bits set) of a N-bit word
282 #define hweight32(x) generic_hweight32(x)
283 #define hweight16(x) generic_hweight16(x)
284 #define hweight8(x) generic_hweight8(x)
287 * find_next_zero_bit() finds the first zero bit in a bit string of length
288 * 'size' bits, starting the search at bit 'offset'. This is largely based
289 * on Linus's ALPHA routines, which are pretty portable BTW.
291 static __inline__ unsigned long find_next_zero_bit(unsigned long *addr,
292 unsigned long size, unsigned long offset)
294 unsigned long *p = addr + (offset >> 5);
295 unsigned long result = offset & ~31UL;
296 unsigned long tmp;
298 if (offset >= size)
299 return size;
300 size -= result;
301 offset &= 31UL;
302 if (offset) {
303 tmp = *(p++);
304 tmp |= ~0UL >> (32-offset);
305 if (size < 32)
306 goto found_first;
307 if (~tmp)
308 goto found_middle;
309 size -= 32;
310 result += 32;
312 while (size & ~31UL) {
313 if (~(tmp = *(p++)))
314 goto found_middle;
315 result += 32;
316 size -= 32;
318 if (!size)
319 return result;
320 tmp = *p;
322 found_first:
323 tmp |= ~0UL << size;
324 if (tmp == ~0UL) /* Are any bits zero? */
325 return result + size; /* Nope. */
326 found_middle:
327 return result + ffz(tmp);
331 * Linus sez that gcc can optimize the following correctly, we'll see if this
332 * holds on the Sparc as it does for the ALPHA.
334 #define find_first_zero_bit(addr, size) \
335 find_next_zero_bit((addr), (size), 0)
338 * find_next_bit - find the first set bit in a memory region
339 * @addr: The address to base the search on
340 * @offset: The bitnumber to start searching at
341 * @size: The maximum size to search
343 * Scheduler induced bitop, do not use.
345 static __inline__ int find_next_bit(unsigned long *addr, int size, int offset)
347 unsigned long *p = addr + (offset >> 5);
348 int num = offset & ~0x1f;
349 unsigned long word;
351 word = *p++;
352 word &= ~((1 << (offset & 0x1f)) - 1);
353 while (num < size) {
354 if (word != 0) {
355 return __ffs(word) + num;
357 word = *p++;
358 num += 0x20;
360 return num;
365 static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
367 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
368 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
372 * non-atomic versions
374 static __inline__ void __set_le_bit(int nr, unsigned long *addr)
376 unsigned char *ADDR = (unsigned char *)addr;
378 ADDR += nr >> 3;
379 *ADDR |= 1 << (nr & 0x07);
382 static __inline__ void __clear_le_bit(int nr, unsigned long *addr)
384 unsigned char *ADDR = (unsigned char *)addr;
386 ADDR += nr >> 3;
387 *ADDR &= ~(1 << (nr & 0x07));
390 static __inline__ int __test_and_set_le_bit(int nr, unsigned long *addr)
392 int mask, retval;
393 unsigned char *ADDR = (unsigned char *)addr;
395 ADDR += nr >> 3;
396 mask = 1 << (nr & 0x07);
397 retval = (mask & *ADDR) != 0;
398 *ADDR |= mask;
399 return retval;
402 static __inline__ int __test_and_clear_le_bit(int nr, unsigned long *addr)
404 int mask, retval;
405 unsigned char *ADDR = (unsigned char *)addr;
407 ADDR += nr >> 3;
408 mask = 1 << (nr & 0x07);
409 retval = (mask & *ADDR) != 0;
410 *ADDR &= ~mask;
411 return retval;
414 static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr,
415 unsigned long size, unsigned long offset)
417 unsigned long *p = addr + (offset >> 5);
418 unsigned long result = offset & ~31UL;
419 unsigned long tmp;
421 if (offset >= size)
422 return size;
423 size -= result;
424 offset &= 31UL;
425 if(offset) {
426 tmp = *(p++);
427 tmp |= __swab32(~0UL >> (32-offset));
428 if(size < 32)
429 goto found_first;
430 if(~tmp)
431 goto found_middle;
432 size -= 32;
433 result += 32;
435 while(size & ~31UL) {
436 if(~(tmp = *(p++)))
437 goto found_middle;
438 result += 32;
439 size -= 32;
441 if(!size)
442 return result;
443 tmp = *p;
445 found_first:
446 tmp = __swab32(tmp) | (~0UL << size);
447 if (tmp == ~0UL) /* Are any bits zero? */
448 return result + size; /* Nope. */
449 return result + ffz(tmp);
451 found_middle:
452 return result + ffz(__swab32(tmp));
455 #define find_first_zero_le_bit(addr, size) \
456 find_next_zero_le_bit((addr), (size), 0)
458 #define ext2_set_bit __test_and_set_le_bit
459 #define ext2_clear_bit __test_and_clear_le_bit
461 #define ext2_set_bit_atomic(lock, nr, addr) \
462 ({ \
463 int ret; \
464 spin_lock(lock); \
465 ret = ext2_set_bit((nr), (addr)); \
466 spin_unlock(lock); \
467 ret; \
470 #define ext2_clear_bit_atomic(lock, nr, addr) \
471 ({ \
472 int ret; \
473 spin_lock(lock); \
474 ret = ext2_clear_bit((nr), (addr)); \
475 spin_unlock(lock); \
476 ret; \
479 #define ext2_test_bit test_le_bit
480 #define ext2_find_first_zero_bit find_first_zero_le_bit
481 #define ext2_find_next_zero_bit find_next_zero_le_bit
483 /* Bitmap functions for the minix filesystem. */
484 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
485 #define minix_set_bit(nr,addr) set_bit(nr,addr)
486 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
487 #define minix_test_bit(nr,addr) test_bit(nr,addr)
488 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
490 #endif /* __KERNEL__ */
492 #endif /* defined(_SPARC_BITOPS_H) */