hw-breakpoint: Keep track of dr7 local enable bits
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / include / asm / cmpxchg_32.h
blobffb9bb6b6c372be80ce68445e60750a53458ea6e
1 #ifndef _ASM_X86_CMPXCHG_32_H
2 #define _ASM_X86_CMPXCHG_32_H
4 #include <linux/bitops.h> /* for LOCK_PREFIX */
6 /*
7 * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
8 * you need to test for the feature in boot_cpu_data.
9 */
11 extern void __xchg_wrong_size(void);
14 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
15 * Note 2: xchg has side effect, so that attribute volatile is necessary,
16 * but generally the primitive is invalid, *ptr is output argument. --ANK
19 struct __xchg_dummy {
20 unsigned long a[100];
22 #define __xg(x) ((struct __xchg_dummy *)(x))
24 #define __xchg(x, ptr, size) \
25 ({ \
26 __typeof(*(ptr)) __x = (x); \
27 switch (size) { \
28 case 1: \
29 asm volatile("xchgb %b0,%1" \
30 : "=q" (__x) \
31 : "m" (*__xg(ptr)), "0" (__x) \
32 : "memory"); \
33 break; \
34 case 2: \
35 asm volatile("xchgw %w0,%1" \
36 : "=r" (__x) \
37 : "m" (*__xg(ptr)), "0" (__x) \
38 : "memory"); \
39 break; \
40 case 4: \
41 asm volatile("xchgl %0,%1" \
42 : "=r" (__x) \
43 : "m" (*__xg(ptr)), "0" (__x) \
44 : "memory"); \
45 break; \
46 default: \
47 __xchg_wrong_size(); \
48 } \
49 __x; \
52 #define xchg(ptr, v) \
53 __xchg((v), (ptr), sizeof(*ptr))
56 * The semantics of XCHGCMP8B are a bit strange, this is why
57 * there is a loop and the loading of %%eax and %%edx has to
58 * be inside. This inlines well in most cases, the cached
59 * cost is around ~38 cycles. (in the future we might want
60 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
61 * might have an implicit FPU-save as a cost, so it's not
62 * clear which path to go.)
64 * cmpxchg8b must be used with the lock prefix here to allow
65 * the instruction to be executed atomically, see page 3-102
66 * of the instruction set reference 24319102.pdf. We need
67 * the reader side to see the coherent 64bit value.
69 static inline void __set_64bit(unsigned long long *ptr,
70 unsigned int low, unsigned int high)
72 asm volatile("\n1:\t"
73 "movl (%0), %%eax\n\t"
74 "movl 4(%0), %%edx\n\t"
75 LOCK_PREFIX "cmpxchg8b (%0)\n\t"
76 "jnz 1b"
77 : /* no outputs */
78 : "D"(ptr),
79 "b"(low),
80 "c"(high)
81 : "ax", "dx", "memory");
84 static inline void __set_64bit_constant(unsigned long long *ptr,
85 unsigned long long value)
87 __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
90 #define ll_low(x) *(((unsigned int *)&(x)) + 0)
91 #define ll_high(x) *(((unsigned int *)&(x)) + 1)
93 static inline void __set_64bit_var(unsigned long long *ptr,
94 unsigned long long value)
96 __set_64bit(ptr, ll_low(value), ll_high(value));
99 #define set_64bit(ptr, value) \
100 (__builtin_constant_p((value)) \
101 ? __set_64bit_constant((ptr), (value)) \
102 : __set_64bit_var((ptr), (value)))
104 #define _set_64bit(ptr, value) \
105 (__builtin_constant_p(value) \
106 ? __set_64bit(ptr, (unsigned int)(value), \
107 (unsigned int)((value) >> 32)) \
108 : __set_64bit(ptr, ll_low((value)), ll_high((value))))
110 extern void __cmpxchg_wrong_size(void);
113 * Atomic compare and exchange. Compare OLD with MEM, if identical,
114 * store NEW in MEM. Return the initial value in MEM. Success is
115 * indicated by comparing RETURN with OLD.
117 #define __raw_cmpxchg(ptr, old, new, size, lock) \
118 ({ \
119 __typeof__(*(ptr)) __ret; \
120 __typeof__(*(ptr)) __old = (old); \
121 __typeof__(*(ptr)) __new = (new); \
122 switch (size) { \
123 case 1: \
124 asm volatile(lock "cmpxchgb %b1,%2" \
125 : "=a"(__ret) \
126 : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \
127 : "memory"); \
128 break; \
129 case 2: \
130 asm volatile(lock "cmpxchgw %w1,%2" \
131 : "=a"(__ret) \
132 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
133 : "memory"); \
134 break; \
135 case 4: \
136 asm volatile(lock "cmpxchgl %1,%2" \
137 : "=a"(__ret) \
138 : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
139 : "memory"); \
140 break; \
141 default: \
142 __cmpxchg_wrong_size(); \
144 __ret; \
147 #define __cmpxchg(ptr, old, new, size) \
148 __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
150 #define __sync_cmpxchg(ptr, old, new, size) \
151 __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
153 #define __cmpxchg_local(ptr, old, new, size) \
154 __raw_cmpxchg((ptr), (old), (new), (size), "")
156 #ifdef CONFIG_X86_CMPXCHG
157 #define __HAVE_ARCH_CMPXCHG 1
159 #define cmpxchg(ptr, old, new) \
160 __cmpxchg((ptr), (old), (new), sizeof(*ptr))
162 #define sync_cmpxchg(ptr, old, new) \
163 __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
165 #define cmpxchg_local(ptr, old, new) \
166 __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
167 #endif
169 #ifdef CONFIG_X86_CMPXCHG64
170 #define cmpxchg64(ptr, o, n) \
171 ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
172 (unsigned long long)(n)))
173 #define cmpxchg64_local(ptr, o, n) \
174 ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
175 (unsigned long long)(n)))
176 #endif
178 static inline unsigned long long __cmpxchg64(volatile void *ptr,
179 unsigned long long old,
180 unsigned long long new)
182 unsigned long long prev;
183 asm volatile(LOCK_PREFIX "cmpxchg8b %3"
184 : "=A"(prev)
185 : "b"((unsigned long)new),
186 "c"((unsigned long)(new >> 32)),
187 "m"(*__xg(ptr)),
188 "0"(old)
189 : "memory");
190 return prev;
193 static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
194 unsigned long long old,
195 unsigned long long new)
197 unsigned long long prev;
198 asm volatile("cmpxchg8b %3"
199 : "=A"(prev)
200 : "b"((unsigned long)new),
201 "c"((unsigned long)(new >> 32)),
202 "m"(*__xg(ptr)),
203 "0"(old)
204 : "memory");
205 return prev;
208 #ifndef CONFIG_X86_CMPXCHG
210 * Building a kernel capable running on 80386. It may be necessary to
211 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
212 * a function for each of the sizes we support.
215 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
216 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
217 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
219 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
220 unsigned long new, int size)
222 switch (size) {
223 case 1:
224 return cmpxchg_386_u8(ptr, old, new);
225 case 2:
226 return cmpxchg_386_u16(ptr, old, new);
227 case 4:
228 return cmpxchg_386_u32(ptr, old, new);
230 return old;
233 #define cmpxchg(ptr, o, n) \
234 ({ \
235 __typeof__(*(ptr)) __ret; \
236 if (likely(boot_cpu_data.x86 > 3)) \
237 __ret = (__typeof__(*(ptr)))__cmpxchg((ptr), \
238 (unsigned long)(o), (unsigned long)(n), \
239 sizeof(*(ptr))); \
240 else \
241 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
242 (unsigned long)(o), (unsigned long)(n), \
243 sizeof(*(ptr))); \
244 __ret; \
246 #define cmpxchg_local(ptr, o, n) \
247 ({ \
248 __typeof__(*(ptr)) __ret; \
249 if (likely(boot_cpu_data.x86 > 3)) \
250 __ret = (__typeof__(*(ptr)))__cmpxchg_local((ptr), \
251 (unsigned long)(o), (unsigned long)(n), \
252 sizeof(*(ptr))); \
253 else \
254 __ret = (__typeof__(*(ptr)))cmpxchg_386((ptr), \
255 (unsigned long)(o), (unsigned long)(n), \
256 sizeof(*(ptr))); \
257 __ret; \
259 #endif
261 #ifndef CONFIG_X86_CMPXCHG64
263 * Building a kernel capable running on 80386 and 80486. It may be necessary
264 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
267 extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
269 #define cmpxchg64(ptr, o, n) \
270 ({ \
271 __typeof__(*(ptr)) __ret; \
272 __typeof__(*(ptr)) __old = (o); \
273 __typeof__(*(ptr)) __new = (n); \
274 alternative_io("call cmpxchg8b_emu", \
275 "lock; cmpxchg8b (%%esi)" , \
276 X86_FEATURE_CX8, \
277 "=A" (__ret), \
278 "S" ((ptr)), "0" (__old), \
279 "b" ((unsigned int)__new), \
280 "c" ((unsigned int)(__new>>32)) \
281 : "memory"); \
282 __ret; })
286 #define cmpxchg64_local(ptr, o, n) \
287 ({ \
288 __typeof__(*(ptr)) __ret; \
289 if (likely(boot_cpu_data.x86 > 4)) \
290 __ret = (__typeof__(*(ptr)))__cmpxchg64_local((ptr), \
291 (unsigned long long)(o), \
292 (unsigned long long)(n)); \
293 else \
294 __ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
295 (unsigned long long)(o), \
296 (unsigned long long)(n)); \
297 __ret; \
300 #endif
302 #endif /* _ASM_X86_CMPXCHG_32_H */