1 #ifndef _ASM_X86_CMPXCHG_64_H
2 #define _ASM_X86_CMPXCHG_64_H
4 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
6 #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
7 (ptr), sizeof(*(ptr))))
9 #define __xg(x) ((volatile long *)(x))
11 static inline void set_64bit(volatile unsigned long *ptr
, unsigned long val
)
16 #define _set_64bit set_64bit
19 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
20 * Note 2: xchg has side effect, so that attribute volatile is necessary,
21 * but generally the primitive is invalid, *ptr is output argument. --ANK
23 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
28 asm volatile("xchgb %b0,%1"
30 : "m" (*__xg(ptr
)), "0" (x
)
34 asm volatile("xchgw %w0,%1"
36 : "m" (*__xg(ptr
)), "0" (x
)
40 asm volatile("xchgl %k0,%1"
42 : "m" (*__xg(ptr
)), "0" (x
)
46 asm volatile("xchgq %0,%1"
48 : "m" (*__xg(ptr
)), "0" (x
)
56 * Atomic compare and exchange. Compare OLD with MEM, if identical,
57 * store NEW in MEM. Return the initial value in MEM. Success is
58 * indicated by comparing RETURN with OLD.
61 #define __HAVE_ARCH_CMPXCHG 1
63 static inline unsigned long __cmpxchg(volatile void *ptr
, unsigned long old
,
64 unsigned long new, int size
)
69 asm volatile(LOCK_PREFIX
"cmpxchgb %b1,%2"
71 : "q"(new), "m"(*__xg(ptr
)), "0"(old
)
75 asm volatile(LOCK_PREFIX
"cmpxchgw %w1,%2"
77 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
81 asm volatile(LOCK_PREFIX
"cmpxchgl %k1,%2"
83 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
87 asm volatile(LOCK_PREFIX
"cmpxchgq %1,%2"
89 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
97 * Always use locked operations when touching memory shared with a
98 * hypervisor, since the system may be SMP even if the guest kernel
101 static inline unsigned long __sync_cmpxchg(volatile void *ptr
,
103 unsigned long new, int size
)
108 asm volatile("lock; cmpxchgb %b1,%2"
110 : "q"(new), "m"(*__xg(ptr
)), "0"(old
)
114 asm volatile("lock; cmpxchgw %w1,%2"
116 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
120 asm volatile("lock; cmpxchgl %1,%2"
122 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
129 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
131 unsigned long new, int size
)
136 asm volatile("cmpxchgb %b1,%2"
138 : "q"(new), "m"(*__xg(ptr
)), "0"(old
)
142 asm volatile("cmpxchgw %w1,%2"
144 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
148 asm volatile("cmpxchgl %k1,%2"
150 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
154 asm volatile("cmpxchgq %1,%2"
156 : "r"(new), "m"(*__xg(ptr
)), "0"(old
)
163 #define cmpxchg(ptr, o, n) \
164 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
165 (unsigned long)(n), sizeof(*(ptr))))
166 #define cmpxchg64(ptr, o, n) \
168 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
169 cmpxchg((ptr), (o), (n)); \
171 #define cmpxchg_local(ptr, o, n) \
172 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
173 (unsigned long)(n), \
175 #define sync_cmpxchg(ptr, o, n) \
176 ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
177 (unsigned long)(n), \
179 #define cmpxchg64_local(ptr, o, n) \
181 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
182 cmpxchg_local((ptr), (o), (n)); \
185 #endif /* _ASM_X86_CMPXCHG_64_H */