1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
11 extern void __xchg_wrong_size(void);
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15 * Note 2: xchg has side effect, so that attribute volatile is necessary,
16 * but generally the primitive is invalid, *ptr is output argument. --ANK
22 #define __xg(x) ((struct __xchg_dummy *)(x))
24 #define __xchg(x, ptr, size) \
26 __typeof(*(ptr)) __x = (x); \
29 asm volatile("xchgb %b0,%1" \
31 : "m" (*__xg(ptr)), "0" (__x) \
35 asm volatile("xchgw %w0,%1" \
37 : "m" (*__xg(ptr)), "0" (__x) \
41 asm volatile("xchgl %0,%1" \
43 : "m" (*__xg(ptr)), "0" (__x) \
47 __xchg_wrong_size(); \
52 #define xchg(ptr, v) \
53 __xchg((v), (ptr), sizeof(*ptr))
56 * The semantics of XCHGCMP8B are a bit strange, this is why
57 * there is a loop and the loading of %%eax and %%edx has to
58 * be inside. This inlines well in most cases, the cached
59 * cost is around ~38 cycles. (in the future we might want
60 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
61 * might have an implicit FPU-save as a cost, so it's not
62 * clear which path to go.)
64 * cmpxchg8b must be used with the lock prefix here to allow
65 * the instruction to be executed atomically, see page 3-102
66 * of the instruction set reference 24319102.pdf. We need
67 * the reader side to see the coherent 64bit value.
69 static inline void __set_64bit(unsigned long long *ptr
,
70 unsigned int low
, unsigned int high
)
73 "movl (%0), %%eax\n\t"
74 "movl 4(%0), %%edx\n\t"
75 LOCK_PREFIX
"cmpxchg8b (%0)\n\t"
81 : "ax", "dx", "memory");
84 static inline void __set_64bit_constant(unsigned long long *ptr
,
85 unsigned long long value
)
87 __set_64bit(ptr
, (unsigned int)value
, (unsigned int)(value
>> 32));
90 #define ll_low(x) *(((unsigned int *)&(x)) + 0)
91 #define ll_high(x) *(((unsigned int *)&(x)) + 1)
93 static inline void __set_64bit_var(unsigned long long *ptr
,
94 unsigned long long value
)
96 __set_64bit(ptr
, ll_low(value
), ll_high(value
));
99 #define set_64bit(ptr, value) \
100 (__builtin_constant_p((value)) \
101 ? __set_64bit_constant((ptr), (value)) \
102 : __set_64bit_var((ptr), (value)))
104 #define _set_64bit(ptr, value) \
105 (__builtin_constant_p(value) \
106 ? __set_64bit(ptr, (unsigned int)(value), \
107 (unsigned int)((value) >> 32)) \
108 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
110 extern void __cmpxchg_wrong_size(void);
113 * Atomic compare and exchange. Compare OLD with MEM, if identical,
114 * store NEW in MEM. Return the initial value in MEM. Success is
115 * indicated by comparing RETURN with OLD.
117 #define __raw_cmpxchg(ptr, old, new, size, lock) \
119 __typeof__(*(ptr)) __ret; \
120 __typeof__(*(ptr)) __old = (old); \
121 __typeof__(*(ptr)) __new = (new); \
124 asm volatile(lock "cmpxchgb %b1,%2" \
126 : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \
130 asm volatile(lock "cmpxchgw %w1,%2" \
132 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
136 asm volatile(lock "cmpxchgl %1,%2" \
138 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
142 __cmpxchg_wrong_size(); \
147 #define __cmpxchg(ptr, old, new, size) \
148 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
150 #define __sync_cmpxchg(ptr, old, new, size) \
151 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
153 #define __cmpxchg_local(ptr, old, new, size) \
154 __raw_cmpxchg((ptr), (old), (new), (size), "")
156 #ifdef CONFIG_X86_CMPXCHG
157 #define __HAVE_ARCH_CMPXCHG 1
159 #define cmpxchg(ptr, old, new) \
160 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
162 #define sync_cmpxchg(ptr, old, new) \
163 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
165 #define cmpxchg_local(ptr, old, new) \
166 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
169 #ifdef CONFIG_X86_CMPXCHG64
170 #define cmpxchg64(ptr, o, n) \
171 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
172 (unsigned long long)(n)))
173 #define cmpxchg64_local(ptr, o, n) \
174 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
175 (unsigned long long)(n)))
178 static inline unsigned long long __cmpxchg64(volatile void *ptr
,
179 unsigned long long old
,
180 unsigned long long new)
182 unsigned long long prev
;
183 asm volatile(LOCK_PREFIX
"cmpxchg8b %3"
185 : "b"((unsigned long)new),
186 "c"((unsigned long)(new >> 32)),
193 static inline unsigned long long __cmpxchg64_local(volatile void *ptr
,
194 unsigned long long old
,
195 unsigned long long new)
197 unsigned long long prev
;
198 asm volatile("cmpxchg8b %3"
200 : "b"((unsigned long)new),
201 "c"((unsigned long)(new >> 32)),
208 #ifndef CONFIG_X86_CMPXCHG
210 * Building a kernel capable running on 80386. It may be necessary to
211 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
212 * a function for each of the sizes we support.
215 extern unsigned long cmpxchg_386_u8(volatile void *, u8
, u8
);
216 extern unsigned long cmpxchg_386_u16(volatile void *, u16
, u16
);
217 extern unsigned long cmpxchg_386_u32(volatile void *, u32
, u32
);
219 static inline unsigned long cmpxchg_386(volatile void *ptr
, unsigned long old
,
220 unsigned long new, int size
)
224 return cmpxchg_386_u8(ptr
, old
, new);
226 return cmpxchg_386_u16(ptr
, old
, new);
228 return cmpxchg_386_u32(ptr
, old
, new);
233 #define cmpxchg(ptr, o, n) \
235 __typeof__(*(ptr)) __ret; \
236 if (likely(boot_cpu_data.x86 > 3)) \
237 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
238 (unsigned long)(o), (unsigned long)(n), \
241 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
242 (unsigned long)(o), (unsigned long)(n), \
246 #define cmpxchg_local(ptr, o, n) \
248 __typeof__(*(ptr)) __ret; \
249 if (likely(boot_cpu_data.x86 > 3)) \
250 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
251 (unsigned long)(o), (unsigned long)(n), \
254 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
255 (unsigned long)(o), (unsigned long)(n), \
261 #ifndef CONFIG_X86_CMPXCHG64
263 * Building a kernel capable running on 80386 and 80486. It may be necessary
264 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
267 extern unsigned long long cmpxchg_486_u64(volatile void *, u64
, u64
);
269 #define cmpxchg64(ptr, o, n) \
271 __typeof__(*(ptr)) __ret; \
272 __typeof__(*(ptr)) __old = (o); \
273 __typeof__(*(ptr)) __new = (n); \
274 alternative_io("call cmpxchg8b_emu", \
275 "lock; cmpxchg8b (%%esi)" , \
278 "S" ((ptr)), "0" (__old), \
279 "b" ((unsigned int)__new), \
280 "c" ((unsigned int)(__new>>32)) \
286 #define cmpxchg64_local(ptr, o, n) \
288 __typeof__(*(ptr)) __ret; \
289 if (likely(boot_cpu_data.x86 > 4)) \
290 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
291 (unsigned long long)(o), \
292 (unsigned long long)(n)); \
294 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
295 (unsigned long long)(o), \
296 (unsigned long long)(n)); \
302 #endif /* _ASM_X86_CMPXCHG_32_H */