1 //===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 // Not intended for direct inclusion. Include sanitizer_atomic.h.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ATOMIC_CLANG_X86_H
14 #define SANITIZER_ATOMIC_CLANG_X86_H
16 namespace __sanitizer
{
18 INLINE
void proc_yield(int cnt
) {
19 __asm__
__volatile__("" ::: "memory");
20 for (int i
= 0; i
< cnt
; i
++)
21 __asm__
__volatile__("pause");
22 __asm__
__volatile__("" ::: "memory");
26 INLINE typename
T::Type
atomic_load(
27 const volatile T
*a
, memory_order mo
) {
28 DCHECK(mo
& (memory_order_relaxed
| memory_order_consume
29 | memory_order_acquire
| memory_order_seq_cst
));
30 DCHECK(!((uptr
)a
% sizeof(*a
)));
33 if (sizeof(*a
) < 8 || sizeof(void*) == 8) {
34 // Assume that aligned loads are atomic.
35 if (mo
== memory_order_relaxed
) {
37 } else if (mo
== memory_order_consume
) {
38 // Assume that processor respects data dependencies
39 // (and that compiler won't break them).
40 __asm__
__volatile__("" ::: "memory");
42 __asm__
__volatile__("" ::: "memory");
43 } else if (mo
== memory_order_acquire
) {
44 __asm__
__volatile__("" ::: "memory");
46 // On x86 loads are implicitly acquire.
47 __asm__
__volatile__("" ::: "memory");
49 // On x86 plain MOV is enough for seq_cst store.
50 __asm__
__volatile__("" ::: "memory");
52 __asm__
__volatile__("" ::: "memory");
55 // 64-bit load on 32-bit platform.
57 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
58 "movq %%mm0, %0;" // (ptr could be read-only)
59 "emms;" // Empty mmx state/Reset FP regs
61 : "m" (a
->val_dont_use
)
62 : // mark the FP stack and mmx registers as clobbered
63 "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
65 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
66 #endif // #ifdef __MMX__
73 INLINE
void atomic_store(volatile T
*a
, typename
T::Type v
, memory_order mo
) {
74 DCHECK(mo
& (memory_order_relaxed
| memory_order_release
75 | memory_order_seq_cst
));
76 DCHECK(!((uptr
)a
% sizeof(*a
)));
78 if (sizeof(*a
) < 8 || sizeof(void*) == 8) {
79 // Assume that aligned loads are atomic.
80 if (mo
== memory_order_relaxed
) {
82 } else if (mo
== memory_order_release
) {
83 // On x86 stores are implicitly release.
84 __asm__
__volatile__("" ::: "memory");
86 __asm__
__volatile__("" ::: "memory");
88 // On x86 stores are implicitly release.
89 __asm__
__volatile__("" ::: "memory");
94 // 64-bit store on 32-bit platform.
96 "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
98 "emms;" // Empty mmx state/Reset FP regs
99 : "=m" (a
->val_dont_use
)
101 : // mark the FP stack and mmx registers as clobbered
102 "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
104 "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
105 #endif // #ifdef __MMX__
107 if (mo
== memory_order_seq_cst
)
108 __sync_synchronize();
112 } // namespace __sanitizer
114 #endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H