- pre3:
[davej-history.git] / include / asm-ppc / bitops.h
blobc31e5c5eb3047c15df8415f4bc1ec00d4c891209
1 /*
2 * $Id: bitops.h,v 1.12 2000/02/09 03:28:31 davem Exp $
3 * bitops.h: Bit string operations on the ppc
4 */
6 #ifdef __KERNEL__
7 #ifndef _PPC_BITOPS_H
8 #define _PPC_BITOPS_H
10 #include <linux/config.h>
11 #include <asm/byteorder.h>
14 * The test_and_*_bit operations are taken to imply a memory barrier
15 * on SMP systems.
17 #ifdef CONFIG_SMP
18 #define SMP_WMB "eieio\n"
19 #define SMP_MB "\nsync"
20 #else
21 #define SMP_WMB
22 #define SMP_MB
23 #endif /* CONFIG_SMP */
25 #define __INLINE_BITOPS 1
27 #if __INLINE_BITOPS
29 * These used to be if'd out here because using : "cc" as a constraint
30 * resulted in errors from egcs. Things may be OK with gcc-2.95.
32 static __inline__ void set_bit(int nr, volatile void * addr)
34 unsigned long old;
35 unsigned long mask = 1 << (nr & 0x1f);
36 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
38 __asm__ __volatile__("\
39 1: lwarx %0,0,%3
40 or %0,%0,%2
41 stwcx. %0,0,%3
42 bne- 1b"
43 : "=&r" (old), "=m" (*p)
44 : "r" (mask), "r" (p), "m" (*p)
45 : "cc" );
49 * non-atomic version
51 static __inline__ void __set_bit(int nr, volatile void *addr)
53 unsigned long mask = 1 << (nr & 0x1f);
54 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
56 *p |= mask;
60 * clear_bit doesn't imply a memory barrier
62 #define smp_mb__before_clear_bit() smp_mb()
63 #define smp_mb__after_clear_bit() smp_mb()
65 static __inline__ void clear_bit(int nr, volatile void *addr)
67 unsigned long old;
68 unsigned long mask = 1 << (nr & 0x1f);
69 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
71 __asm__ __volatile__("\
72 1: lwarx %0,0,%3
73 andc %0,%0,%2
74 stwcx. %0,0,%3
75 bne- 1b"
76 : "=&r" (old), "=m" (*p)
77 : "r" (mask), "r" (p), "m" (*p)
78 : "cc");
81 static __inline__ void change_bit(int nr, volatile void *addr)
83 unsigned long old;
84 unsigned long mask = 1 << (nr & 0x1f);
85 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
87 __asm__ __volatile__("\
88 1: lwarx %0,0,%3
89 xor %0,%0,%2
90 stwcx. %0,0,%3
91 bne- 1b"
92 : "=&r" (old), "=m" (*p)
93 : "r" (mask), "r" (p), "m" (*p)
94 : "cc");
98 * test_and_*_bit do imply a memory barrier (?)
100 static __inline__ int test_and_set_bit(int nr, volatile void *addr)
102 unsigned int old, t;
103 unsigned int mask = 1 << (nr & 0x1f);
104 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
106 __asm__ __volatile__(SMP_WMB "\
107 1: lwarx %0,0,%4
108 or %1,%0,%3
109 stwcx. %1,0,%4
110 bne 1b"
111 SMP_MB
112 : "=&r" (old), "=&r" (t), "=m" (*p)
113 : "r" (mask), "r" (p), "m" (*p)
114 : "cc", "memory");
116 return (old & mask) != 0;
120 * non-atomic version
122 static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
124 unsigned long mask = 1 << (nr & 0x1f);
125 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
126 unsigned long old = *p;
128 *p = old | mask;
129 return (old & mask) != 0;
132 static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
134 unsigned int old, t;
135 unsigned int mask = 1 << (nr & 0x1f);
136 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
138 __asm__ __volatile__(SMP_WMB "\
139 1: lwarx %0,0,%4
140 andc %1,%0,%3
141 stwcx. %1,0,%4
142 bne 1b"
143 SMP_MB
144 : "=&r" (old), "=&r" (t), "=m" (*p)
145 : "r" (mask), "r" (p), "m" (*p)
146 : "cc", "memory");
148 return (old & mask) != 0;
152 * non-atomic version
154 static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
156 unsigned long mask = 1 << (nr & 0x1f);
157 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
158 unsigned long old = *p;
160 *p = old & ~mask;
161 return (old & mask) != 0;
164 static __inline__ int test_and_change_bit(int nr, volatile void *addr)
166 unsigned int old, t;
167 unsigned int mask = 1 << (nr & 0x1f);
168 volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
170 __asm__ __volatile__(SMP_WMB "\
171 1: lwarx %0,0,%4
172 xor %1,%0,%3
173 stwcx. %1,0,%4
174 bne 1b"
175 SMP_MB
176 : "=&r" (old), "=&r" (t), "=m" (*p)
177 : "r" (mask), "r" (p), "m" (*p)
178 : "cc", "memory");
180 return (old & mask) != 0;
182 #else /* __INLINE_BITOPS */
184 extern void set_bit(int nr, volatile void *addr);
185 extern void clear_bit(int nr, volatile void *addr);
186 extern void change_bit(int nr, volatile void *addr);
187 extern int test_and_set_bit(int nr, volatile void *addr);
188 extern int test_and_clear_bit(int nr, volatile void *addr);
189 extern int test_and_change_bit(int nr, volatile void *addr);
191 #endif /* __INLINE_BITOPS */
193 static __inline__ int test_bit(int nr, __const__ volatile void *addr)
195 __const__ unsigned int *p = (__const__ unsigned int *) addr;
197 return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
200 /* Return the bit position of the most significant 1 bit in a word */
201 static __inline__ int __ilog2(unsigned int x)
203 int lz;
205 asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
206 return 31 - lz;
209 static __inline__ int ffz(unsigned int x)
211 if ((x = ~x) == 0)
212 return 32;
213 return __ilog2(x & -x);
216 #ifdef __KERNEL__
219 * ffs: find first bit set. This is defined the same way as
220 * the libc and compiler builtin ffs routines, therefore
221 * differs in spirit from the above ffz (man ffs).
223 static __inline__ int ffs(int x)
225 return __ilog2(x & -x) + 1;
229 * hweightN: returns the hamming weight (i.e. the number
230 * of bits set) of a N-bit word
233 #define hweight32(x) generic_hweight32(x)
234 #define hweight16(x) generic_hweight16(x)
235 #define hweight8(x) generic_hweight8(x)
237 #endif /* __KERNEL__ */
240 * This implementation of find_{first,next}_zero_bit was stolen from
241 * Linus' asm-alpha/bitops.h.
243 #define find_first_zero_bit(addr, size) \
244 find_next_zero_bit((addr), (size), 0)
246 static __inline__ unsigned long find_next_zero_bit(void * addr,
247 unsigned long size, unsigned long offset)
249 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
250 unsigned int result = offset & ~31UL;
251 unsigned int tmp;
253 if (offset >= size)
254 return size;
255 size -= result;
256 offset &= 31UL;
257 if (offset) {
258 tmp = *p++;
259 tmp |= ~0UL >> (32-offset);
260 if (size < 32)
261 goto found_first;
262 if (tmp != ~0U)
263 goto found_middle;
264 size -= 32;
265 result += 32;
267 while (size >= 32) {
268 if ((tmp = *p++) != ~0U)
269 goto found_middle;
270 result += 32;
271 size -= 32;
273 if (!size)
274 return result;
275 tmp = *p;
276 found_first:
277 tmp |= ~0UL << size;
278 if (tmp == ~0UL) /* Are any bits zero? */
279 return result + size; /* Nope. */
280 found_middle:
281 return result + ffz(tmp);
285 #define _EXT2_HAVE_ASM_BITOPS_
287 #ifdef __KERNEL__
289 #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, addr)
290 #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, addr)
292 static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
294 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
296 return (ADDR[nr >> 3] >> (nr & 7)) & 1;
300 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
301 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
304 #define ext2_find_first_zero_bit(addr, size) \
305 ext2_find_next_zero_bit((addr), (size), 0)
307 static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
308 unsigned long size, unsigned long offset)
310 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
311 unsigned int result = offset & ~31UL;
312 unsigned int tmp;
314 if (offset >= size)
315 return size;
316 size -= result;
317 offset &= 31UL;
318 if (offset) {
319 tmp = cpu_to_le32p(p++);
320 tmp |= ~0UL >> (32-offset);
321 if (size < 32)
322 goto found_first;
323 if (tmp != ~0U)
324 goto found_middle;
325 size -= 32;
326 result += 32;
328 while (size >= 32) {
329 if ((tmp = cpu_to_le32p(p++)) != ~0U)
330 goto found_middle;
331 result += 32;
332 size -= 32;
334 if (!size)
335 return result;
336 tmp = cpu_to_le32p(p);
337 found_first:
338 tmp |= ~0U << size;
339 if (tmp == ~0UL) /* Are any bits zero? */
340 return result + size; /* Nope. */
341 found_middle:
342 return result + ffz(tmp);
345 /* Bitmap functions for the minix filesystem. */
346 #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
347 #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
348 #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
349 #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
350 #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
352 #endif /* __KERNEL__ */
354 #endif /* _PPC_BITOPS_H */
355 #endif /* __KERNEL__ */