1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
11 extern void __xchg_wrong_size(void);
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15 * Note 2: xchg has side effect, so that attribute volatile is necessary,
16 * but generally the primitive is invalid, *ptr is output argument. --ANK
22 #define __xg(x) ((struct __xchg_dummy *)(x))
24 #define __xchg(x, ptr, size) \
26 __typeof(*(ptr)) __x = (x); \
29 asm volatile("xchgb %b0,%1" \
30 : "=q" (__x), "+m" (*__xg(ptr)) \
35 asm volatile("xchgw %w0,%1" \
36 : "=r" (__x), "+m" (*__xg(ptr)) \
41 asm volatile("xchgl %0,%1" \
42 : "=r" (__x), "+m" (*__xg(ptr)) \
47 __xchg_wrong_size(); \
52 #define xchg(ptr, v) \
53 __xchg((v), (ptr), sizeof(*ptr))
56 * CMPXCHG8B only writes to the target if we had the previous
57 * value in registers, otherwise it acts as a read and gives us the
58 * "new previous" value. That is why there is a loop. Preloading
59 * EDX:EAX is a performance optimization: in the common case it means
60 * we need only one locked operation.
62 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
63 * least an FPU save and/or %cr0.ts manipulation.
65 * cmpxchg8b must be used with the lock prefix here to allow the
66 * instruction to be executed atomically. We need to have the reader
67 * side to see the coherent 64bit value.
69 static inline void set_64bit(volatile u64
*ptr
, u64 value
)
72 u32 high
= value
>> 32;
76 LOCK_PREFIX
"cmpxchg8b %0\n\t"
78 : "=m" (*ptr
), "+A" (prev
)
79 : "b" (low
), "c" (high
)
83 extern void __cmpxchg_wrong_size(void);
86 * Atomic compare and exchange. Compare OLD with MEM, if identical,
87 * store NEW in MEM. Return the initial value in MEM. Success is
88 * indicated by comparing RETURN with OLD.
90 #define __raw_cmpxchg(ptr, old, new, size, lock) \
92 __typeof__(*(ptr)) __ret; \
93 __typeof__(*(ptr)) __old = (old); \
94 __typeof__(*(ptr)) __new = (new); \
97 asm volatile(lock "cmpxchgb %b2,%1" \
98 : "=a" (__ret), "+m" (*__xg(ptr)) \
99 : "q" (__new), "0" (__old) \
103 asm volatile(lock "cmpxchgw %w2,%1" \
104 : "=a" (__ret), "+m" (*__xg(ptr)) \
105 : "r" (__new), "0" (__old) \
109 asm volatile(lock "cmpxchgl %2,%1" \
110 : "=a" (__ret), "+m" (*__xg(ptr)) \
111 : "r" (__new), "0" (__old) \
115 __cmpxchg_wrong_size(); \
120 #define __cmpxchg(ptr, old, new, size) \
121 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
123 #define __sync_cmpxchg(ptr, old, new, size) \
124 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
126 #define __cmpxchg_local(ptr, old, new, size) \
127 __raw_cmpxchg((ptr), (old), (new), (size), "")
129 #ifdef CONFIG_X86_CMPXCHG
130 #define __HAVE_ARCH_CMPXCHG 1
132 #define cmpxchg(ptr, old, new) \
133 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
135 #define sync_cmpxchg(ptr, old, new) \
136 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
138 #define cmpxchg_local(ptr, old, new) \
139 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
142 #ifdef CONFIG_X86_CMPXCHG64
143 #define cmpxchg64(ptr, o, n) \
144 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
145 (unsigned long long)(n)))
146 #define cmpxchg64_local(ptr, o, n) \
147 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
148 (unsigned long long)(n)))
151 static inline unsigned long long __cmpxchg64(volatile void *ptr
,
152 unsigned long long old
,
153 unsigned long long new)
155 unsigned long long prev
;
156 asm volatile(LOCK_PREFIX
"cmpxchg8b %1"
159 : "b" ((unsigned long)new),
160 "c" ((unsigned long)(new >> 32)),
166 static inline unsigned long long __cmpxchg64_local(volatile void *ptr
,
167 unsigned long long old
,
168 unsigned long long new)
170 unsigned long long prev
;
171 asm volatile("cmpxchg8b %1"
174 : "b" ((unsigned long)new),
175 "c" ((unsigned long)(new >> 32)),
181 #ifndef CONFIG_X86_CMPXCHG
183 * Building a kernel capable running on 80386. It may be necessary to
184 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
185 * a function for each of the sizes we support.
188 extern unsigned long cmpxchg_386_u8(volatile void *, u8
, u8
);
189 extern unsigned long cmpxchg_386_u16(volatile void *, u16
, u16
);
190 extern unsigned long cmpxchg_386_u32(volatile void *, u32
, u32
);
192 static inline unsigned long cmpxchg_386(volatile void *ptr
, unsigned long old
,
193 unsigned long new, int size
)
197 return cmpxchg_386_u8(ptr
, old
, new);
199 return cmpxchg_386_u16(ptr
, old
, new);
201 return cmpxchg_386_u32(ptr
, old
, new);
206 #define cmpxchg(ptr, o, n) \
208 __typeof__(*(ptr)) __ret; \
209 if (likely(boot_cpu_data.x86 > 3)) \
210 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
211 (unsigned long)(o), (unsigned long)(n), \
214 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
215 (unsigned long)(o), (unsigned long)(n), \
219 #define cmpxchg_local(ptr, o, n) \
221 __typeof__(*(ptr)) __ret; \
222 if (likely(boot_cpu_data.x86 > 3)) \
223 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
224 (unsigned long)(o), (unsigned long)(n), \
227 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
228 (unsigned long)(o), (unsigned long)(n), \
234 #ifndef CONFIG_X86_CMPXCHG64
236 * Building a kernel capable running on 80386 and 80486. It may be necessary
237 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
240 extern unsigned long long cmpxchg_486_u64(volatile void *, u64
, u64
);
242 #define cmpxchg64(ptr, o, n) \
244 __typeof__(*(ptr)) __ret; \
245 __typeof__(*(ptr)) __old = (o); \
246 __typeof__(*(ptr)) __new = (n); \
247 alternative_io(LOCK_PREFIX_HERE \
248 "call cmpxchg8b_emu", \
249 "lock; cmpxchg8b (%%esi)" , \
252 "S" ((ptr)), "0" (__old), \
253 "b" ((unsigned int)__new), \
254 "c" ((unsigned int)(__new>>32)) \
260 #define cmpxchg64_local(ptr, o, n) \
262 __typeof__(*(ptr)) __ret; \
263 if (likely(boot_cpu_data.x86 > 4)) \
264 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
265 (unsigned long long)(o), \
266 (unsigned long long)(n)); \
268 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
269 (unsigned long long)(o), \
270 (unsigned long long)(n)); \
276 #endif /* _ASM_X86_CMPXCHG_32_H */