1 //===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
9 // Not intended for direct inclusion. Include sanitizer_atomic.h.
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_ATOMIC_CLANG_H
14 #define SANITIZER_ATOMIC_CLANG_H
16 namespace __sanitizer
{
18 INLINE
void atomic_signal_fence(memory_order
) {
19 __asm__
__volatile__("" ::: "memory");
22 INLINE
void atomic_thread_fence(memory_order
) {
26 INLINE
void proc_yield(int cnt
) {
27 __asm__
__volatile__("" ::: "memory");
28 #if defined(__i386__) || defined(__x86_64__)
29 for (int i
= 0; i
< cnt
; i
++)
30 __asm__
__volatile__("pause");
32 __asm__
__volatile__("" ::: "memory");
36 INLINE typename
T::Type
atomic_load(
37 const volatile T
*a
, memory_order mo
) {
38 DCHECK(mo
& (memory_order_relaxed
| memory_order_consume
39 | memory_order_acquire
| memory_order_seq_cst
));
40 DCHECK(!((uptr
)a
% sizeof(*a
)));
43 // 64-bit atomic operations are not atomic on 32-bit platforms.
44 // The implementation lacks necessary memory fences on ARM/PPC.
45 // We would like to use compiler builtin atomic operations,
46 // but they are mostly broken:
47 // - they lead to vastly inefficient code generation
48 // (http://llvm.org/bugs/show_bug.cgi?id=17281)
49 // - 64-bit atomic operations are not implemented on x86_32
50 // (http://llvm.org/bugs/show_bug.cgi?id=15034)
51 // - they are not implemented on ARM
52 // error: undefined reference to '__atomic_load_4'
53 if (mo
== memory_order_relaxed
) {
56 atomic_signal_fence(memory_order_seq_cst
);
58 atomic_signal_fence(memory_order_seq_cst
);
64 INLINE
void atomic_store(volatile T
*a
, typename
T::Type v
, memory_order mo
) {
65 DCHECK(mo
& (memory_order_relaxed
| memory_order_release
66 | memory_order_seq_cst
));
67 DCHECK(!((uptr
)a
% sizeof(*a
)));
68 if (mo
== memory_order_relaxed
) {
71 atomic_signal_fence(memory_order_seq_cst
);
73 atomic_signal_fence(memory_order_seq_cst
);
75 if (mo
== memory_order_seq_cst
)
76 atomic_thread_fence(memory_order_seq_cst
);
80 INLINE typename
T::Type
atomic_fetch_add(volatile T
*a
,
81 typename
T::Type v
, memory_order mo
) {
83 DCHECK(!((uptr
)a
% sizeof(*a
)));
84 return __sync_fetch_and_add(&a
->val_dont_use
, v
);
88 INLINE typename
T::Type
atomic_fetch_sub(volatile T
*a
,
89 typename
T::Type v
, memory_order mo
) {
91 DCHECK(!((uptr
)a
% sizeof(*a
)));
92 return __sync_fetch_and_add(&a
->val_dont_use
, -v
);
96 INLINE typename
T::Type
atomic_exchange(volatile T
*a
,
97 typename
T::Type v
, memory_order mo
) {
98 DCHECK(!((uptr
)a
% sizeof(*a
)));
99 if (mo
& (memory_order_release
| memory_order_acq_rel
| memory_order_seq_cst
))
100 __sync_synchronize();
101 v
= __sync_lock_test_and_set(&a
->val_dont_use
, v
);
102 if (mo
== memory_order_seq_cst
)
103 __sync_synchronize();
108 INLINE
bool atomic_compare_exchange_strong(volatile T
*a
,
109 typename
T::Type
*cmp
,
110 typename
T::Type xchg
,
112 typedef typename
T::Type Type
;
114 Type prev
= __sync_val_compare_and_swap(&a
->val_dont_use
, cmpv
, xchg
);
122 INLINE
bool atomic_compare_exchange_weak(volatile T
*a
,
123 typename
T::Type
*cmp
,
124 typename
T::Type xchg
,
126 return atomic_compare_exchange_strong(a
, cmp
, xchg
, mo
);
129 } // namespace __sanitizer
133 #endif // SANITIZER_ATOMIC_CLANG_H