2 * Copyright (C) 1996 Paul Mackerras.
5 #include <linux/kernel.h>
6 #include <asm/bitops.h>
9 * If the bitops are not inlined in bitops.h, they are defined here.
13 void set_bit(int nr
, volatile void * addr
)
16 unsigned long mask
= 1 << (nr
& 0x1f);
17 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
19 __asm__
__volatile__(SMP_WMB
"\n\
26 : "=&r" (old
), "=m" (*p
)
27 : "r" (mask
), "r" (p
), "m" (*p
)
31 void clear_bit(int nr
, volatile void *addr
)
34 unsigned long mask
= 1 << (nr
& 0x1f);
35 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
37 __asm__
__volatile__(SMP_WMB
"\n\
44 : "=&r" (old
), "=m" (*p
)
45 : "r" (mask
), "r" (p
), "m" (*p
)
49 void change_bit(int nr
, volatile void *addr
)
52 unsigned long mask
= 1 << (nr
& 0x1f);
53 unsigned long *p
= ((unsigned long *)addr
) + (nr
>> 5);
55 __asm__
__volatile__(SMP_WMB
"\n\
62 : "=&r" (old
), "=m" (*p
)
63 : "r" (mask
), "r" (p
), "m" (*p
)
67 int test_and_set_bit(int nr
, volatile void *addr
)
70 unsigned int mask
= 1 << (nr
& 0x1f);
71 volatile unsigned int *p
= ((volatile unsigned int *)addr
) + (nr
>> 5);
73 __asm__
__volatile__(SMP_WMB
"\n\
80 : "=&r" (old
), "=&r" (t
), "=m" (*p
)
81 : "r" (mask
), "r" (p
), "m" (*p
)
84 return (old
& mask
) != 0;
87 int test_and_clear_bit(int nr
, volatile void *addr
)
90 unsigned int mask
= 1 << (nr
& 0x1f);
91 volatile unsigned int *p
= ((volatile unsigned int *)addr
) + (nr
>> 5);
93 __asm__
__volatile__(SMP_WMB
"\n\
100 : "=&r" (old
), "=&r" (t
), "=m" (*p
)
101 : "r" (mask
), "r" (p
), "m" (*p
)
104 return (old
& mask
) != 0;
107 int test_and_change_bit(int nr
, volatile void *addr
)
110 unsigned int mask
= 1 << (nr
& 0x1f);
111 volatile unsigned int *p
= ((volatile unsigned int *)addr
) + (nr
>> 5);
113 __asm__
__volatile__(SMP_WMB
"\n\
120 : "=&r" (old
), "=&r" (t
), "=m" (*p
)
121 : "r" (mask
), "r" (p
), "m" (*p
)
124 return (old
& mask
) != 0;
126 #endif /* !__INLINE_BITOPS */