1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publically
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "tsan_interface_atomic.h"
23 #include "tsan_flags.h"
26 using namespace __tsan
; // NOLINT
28 #define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
31 pc = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); \
32 mo = ConvertOrder(mo); \
33 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
34 ThreadState *const thr = cur_thread(); \
35 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
36 ScopedAtomic sa(thr, callpc, __FUNCTION__); \
37 return Atomic##func(thr, pc, __VA_ARGS__); \
42 ScopedAtomic(ThreadState
*thr
, uptr pc
, const char *func
)
44 CHECK_EQ(thr_
->in_rtl
, 0);
45 ProcessPendingSignals(thr
);
47 DPrintf("#%d: %s\n", thr_
->tid
, func
);
52 CHECK_EQ(thr_
->in_rtl
, 0);
60 typedef __tsan_memory_order morder
;
61 typedef __tsan_atomic8 a8
;
62 typedef __tsan_atomic16 a16
;
63 typedef __tsan_atomic32 a32
;
64 typedef __tsan_atomic64 a64
;
65 typedef __tsan_atomic128 a128
;
66 const morder mo_relaxed
= __tsan_memory_order_relaxed
;
67 const morder mo_consume
= __tsan_memory_order_consume
;
68 const morder mo_acquire
= __tsan_memory_order_acquire
;
69 const morder mo_release
= __tsan_memory_order_release
;
70 const morder mo_acq_rel
= __tsan_memory_order_acq_rel
;
71 const morder mo_seq_cst
= __tsan_memory_order_seq_cst
;
73 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
74 StatInc(thr
, StatAtomic
);
76 StatInc(thr
, size
== 1 ? StatAtomic1
77 : size
== 2 ? StatAtomic2
78 : size
== 4 ? StatAtomic4
79 : size
== 8 ? StatAtomic8
81 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
82 : mo
== mo_consume
? StatAtomicConsume
83 : mo
== mo_acquire
? StatAtomicAcquire
84 : mo
== mo_release
? StatAtomicRelease
85 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
89 static bool IsLoadOrder(morder mo
) {
90 return mo
== mo_relaxed
|| mo
== mo_consume
91 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
94 static bool IsStoreOrder(morder mo
) {
95 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
98 static bool IsReleaseOrder(morder mo
) {
99 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
102 static bool IsAcquireOrder(morder mo
) {
103 return mo
== mo_consume
|| mo
== mo_acquire
104 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
107 static bool IsAcqRelOrder(morder mo
) {
108 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
111 static morder
ConvertOrder(morder mo
) {
112 if (mo
> (morder
)100500) {
113 mo
= morder(mo
- 100500);
114 if (mo
== morder(1 << 0))
116 else if (mo
== morder(1 << 1))
118 else if (mo
== morder(1 << 2))
120 else if (mo
== morder(1 << 3))
122 else if (mo
== morder(1 << 4))
124 else if (mo
== morder(1 << 5))
127 CHECK_GE(mo
, mo_relaxed
);
128 CHECK_LE(mo
, mo_seq_cst
);
132 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
133 T res
= __sync_lock_test_and_set(v
, op
);
134 // __sync_lock_test_and_set does not contain full barrier.
135 __sync_synchronize();
139 template<typename T
> T
func_add(volatile T
*v
, T op
) {
140 return __sync_fetch_and_add(v
, op
);
143 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
144 return __sync_fetch_and_sub(v
, op
);
147 template<typename T
> T
func_and(volatile T
*v
, T op
) {
148 return __sync_fetch_and_and(v
, op
);
151 template<typename T
> T
func_or(volatile T
*v
, T op
) {
152 return __sync_fetch_and_or(v
, op
);
155 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
156 return __sync_fetch_and_xor(v
, op
);
159 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
160 // clang does not support __sync_fetch_and_nand.
163 T newv
= ~(cmp
& op
);
164 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
171 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
172 return __sync_val_compare_and_swap(v
, cmp
, xch
);
175 // clang does not support 128-bit atomic ops.
176 // Atomic ops are executed under tsan internal mutex,
177 // here we assume that the atomic variables are not accessed
178 // from non-instrumented code.
179 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
180 a128
func_xchg(volatile a128
*v
, a128 op
) {
186 a128
func_add(volatile a128
*v
, a128 op
) {
192 a128
func_sub(volatile a128
*v
, a128 op
) {
198 a128
func_and(volatile a128
*v
, a128 op
) {
204 a128
func_or(volatile a128
*v
, a128 op
) {
210 a128
func_xor(volatile a128
*v
, a128 op
) {
216 a128
func_nand(volatile a128
*v
, a128 op
) {
222 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
231 static int SizeLog() {
234 else if (sizeof(T
) <= 2)
236 else if (sizeof(T
) <= 4)
240 // For 16-byte atomics we also use 8-byte memory access,
241 // this leads to false negatives only in very obscure cases.
245 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
247 CHECK(IsLoadOrder(mo
));
248 // This fast-path is critical for performance.
249 // Assume the access is atomic.
250 if (!IsAcquireOrder(mo
) && sizeof(T
) <= sizeof(a
)) {
251 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
254 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
255 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
256 thr
->clock
.acquire(&s
->clock
);
259 __sync_synchronize();
260 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
265 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
267 CHECK(IsStoreOrder(mo
));
268 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
269 // This fast-path is critical for performance.
270 // Assume the access is atomic.
271 // Strictly saying even relaxed store cuts off release sequence,
272 // so must reset the clock.
273 if (!IsReleaseOrder(mo
) && sizeof(T
) <= sizeof(a
)) {
277 __sync_synchronize();
278 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
279 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
280 thr
->clock
.ReleaseStore(&s
->clock
);
283 // Trainling memory barrier to provide sequential consistency
284 // for Dekker-like store-load synchronization.
285 __sync_synchronize();
288 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
289 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
290 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
291 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
292 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
293 if (IsAcqRelOrder(mo
))
294 thr
->clock
.acq_rel(&s
->clock
);
295 else if (IsReleaseOrder(mo
))
296 thr
->clock
.release(&s
->clock
);
297 else if (IsAcquireOrder(mo
))
298 thr
->clock
.acquire(&s
->clock
);
305 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
307 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
311 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
313 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
317 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
319 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
323 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
325 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
329 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
331 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
335 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
337 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
341 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
343 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
347 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
348 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
349 (void)fmo
; // Unused because llvm does not pass it yet.
350 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
351 SyncVar
*s
= CTX()->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
352 thr
->clock
.set(thr
->tid
, thr
->fast_state
.epoch());
353 if (IsAcqRelOrder(mo
))
354 thr
->clock
.acq_rel(&s
->clock
);
355 else if (IsReleaseOrder(mo
))
356 thr
->clock
.release(&s
->clock
);
357 else if (IsAcquireOrder(mo
))
358 thr
->clock
.acquire(&s
->clock
);
360 T pr
= func_cas(a
, cc
, v
);
369 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
370 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
371 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
375 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
376 // FIXME(dvyukov): not implemented.
377 __sync_synchronize();
380 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
381 SCOPED_ATOMIC(Load
, a
, mo
);
384 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
385 SCOPED_ATOMIC(Load
, a
, mo
);
388 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
389 SCOPED_ATOMIC(Load
, a
, mo
);
392 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
393 SCOPED_ATOMIC(Load
, a
, mo
);
396 #if __TSAN_HAS_INT128
397 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
398 SCOPED_ATOMIC(Load
, a
, mo
);
402 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
403 SCOPED_ATOMIC(Store
, a
, v
, mo
);
406 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
407 SCOPED_ATOMIC(Store
, a
, v
, mo
);
410 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
411 SCOPED_ATOMIC(Store
, a
, v
, mo
);
414 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
415 SCOPED_ATOMIC(Store
, a
, v
, mo
);
418 #if __TSAN_HAS_INT128
419 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
420 SCOPED_ATOMIC(Store
, a
, v
, mo
);
424 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
425 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
428 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
429 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
432 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
433 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
436 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
437 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
440 #if __TSAN_HAS_INT128
441 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
442 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
446 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
447 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
450 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
451 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
454 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
455 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
458 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
459 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
462 #if __TSAN_HAS_INT128
463 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
464 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
468 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
469 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
472 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
473 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
476 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
477 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
480 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
481 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
484 #if __TSAN_HAS_INT128
485 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
486 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
490 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
491 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
494 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
495 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
498 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
499 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
502 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
503 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
506 #if __TSAN_HAS_INT128
507 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
508 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
512 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
513 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
516 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
517 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
520 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
521 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
524 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
525 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
528 #if __TSAN_HAS_INT128
529 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
530 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
534 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
535 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
538 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
539 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
542 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
543 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
546 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
547 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
550 #if __TSAN_HAS_INT128
551 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
552 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
556 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
557 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
560 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
561 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
564 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
565 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
568 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
569 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
572 #if __TSAN_HAS_INT128
573 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
574 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
578 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
579 morder mo
, morder fmo
) {
580 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
583 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
584 morder mo
, morder fmo
) {
585 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
588 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
589 morder mo
, morder fmo
) {
590 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
593 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
594 morder mo
, morder fmo
) {
595 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
598 #if __TSAN_HAS_INT128
599 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
600 morder mo
, morder fmo
) {
601 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
605 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
606 morder mo
, morder fmo
) {
607 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
610 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
611 morder mo
, morder fmo
) {
612 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
615 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
616 morder mo
, morder fmo
) {
617 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
620 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
621 morder mo
, morder fmo
) {
622 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
625 #if __TSAN_HAS_INT128
626 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
627 morder mo
, morder fmo
) {
628 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
632 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
633 morder mo
, morder fmo
) {
634 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
636 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
637 morder mo
, morder fmo
) {
638 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
641 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
642 morder mo
, morder fmo
) {
643 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
646 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
647 morder mo
, morder fmo
) {
648 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
651 #if __TSAN_HAS_INT128
652 a128
__tsan_atomic64_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
653 morder mo
, morder fmo
) {
654 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
658 void __tsan_atomic_thread_fence(morder mo
) {
660 SCOPED_ATOMIC(Fence
, mo
);
663 void __tsan_atomic_signal_fence(morder mo
) {