1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publicly
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_mutex.h"
23 #include "tsan_flags.h"
26 using namespace __tsan
; // NOLINT
28 #define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
31 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
32 ThreadState *const thr = cur_thread(); \
33 if (thr->ignore_interceptors) \
34 return NoTsanAtomic##func(__VA_ARGS__); \
35 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
36 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
37 return Atomic##func(thr, pc, __VA_ARGS__); \
40 // These should match declarations from public tsan_interface_atomic.h header.
41 typedef unsigned char a8
;
42 typedef unsigned short a16
; // NOLINT
43 typedef unsigned int a32
;
44 typedef unsigned long long a64
; // NOLINT
45 #if defined(__SIZEOF_INT128__) \
46 || (__clang_major__ * 100 + __clang_minor__ >= 302)
47 __extension__
typedef __int128 a128
;
48 # define __TSAN_HAS_INT128 1
50 # define __TSAN_HAS_INT128 0
53 // Protects emulation of 128-bit atomic operations.
54 static StaticSpinMutex mutex128
;
56 // Part of ABI, do not change.
57 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
69 ScopedAtomic(ThreadState
*thr
, uptr pc
, const volatile void *a
,
70 morder mo
, const char *func
)
73 DPrintf("#%d: %s(%p, %d)\n", thr_
->tid
, func
, a
, mo
);
76 ProcessPendingSignals(thr_
);
83 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
84 StatInc(thr
, StatAtomic
);
86 StatInc(thr
, size
== 1 ? StatAtomic1
87 : size
== 2 ? StatAtomic2
88 : size
== 4 ? StatAtomic4
89 : size
== 8 ? StatAtomic8
91 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
92 : mo
== mo_consume
? StatAtomicConsume
93 : mo
== mo_acquire
? StatAtomicAcquire
94 : mo
== mo_release
? StatAtomicRelease
95 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
99 static bool IsLoadOrder(morder mo
) {
100 return mo
== mo_relaxed
|| mo
== mo_consume
101 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
104 static bool IsStoreOrder(morder mo
) {
105 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
108 static bool IsReleaseOrder(morder mo
) {
109 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
112 static bool IsAcquireOrder(morder mo
) {
113 return mo
== mo_consume
|| mo
== mo_acquire
114 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
117 static bool IsAcqRelOrder(morder mo
) {
118 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
121 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
122 T res
= __sync_lock_test_and_set(v
, op
);
123 // __sync_lock_test_and_set does not contain full barrier.
124 __sync_synchronize();
128 template<typename T
> T
func_add(volatile T
*v
, T op
) {
129 return __sync_fetch_and_add(v
, op
);
132 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
133 return __sync_fetch_and_sub(v
, op
);
136 template<typename T
> T
func_and(volatile T
*v
, T op
) {
137 return __sync_fetch_and_and(v
, op
);
140 template<typename T
> T
func_or(volatile T
*v
, T op
) {
141 return __sync_fetch_and_or(v
, op
);
144 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
145 return __sync_fetch_and_xor(v
, op
);
148 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
149 // clang does not support __sync_fetch_and_nand.
152 T newv
= ~(cmp
& op
);
153 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
160 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
161 return __sync_val_compare_and_swap(v
, cmp
, xch
);
164 // clang does not support 128-bit atomic ops.
165 // Atomic ops are executed under tsan internal mutex,
166 // here we assume that the atomic variables are not accessed
167 // from non-instrumented code.
168 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
169 a128
func_xchg(volatile a128
*v
, a128 op
) {
170 SpinMutexLock
lock(&mutex128
);
176 a128
func_add(volatile a128
*v
, a128 op
) {
177 SpinMutexLock
lock(&mutex128
);
183 a128
func_sub(volatile a128
*v
, a128 op
) {
184 SpinMutexLock
lock(&mutex128
);
190 a128
func_and(volatile a128
*v
, a128 op
) {
191 SpinMutexLock
lock(&mutex128
);
197 a128
func_or(volatile a128
*v
, a128 op
) {
198 SpinMutexLock
lock(&mutex128
);
204 a128
func_xor(volatile a128
*v
, a128 op
) {
205 SpinMutexLock
lock(&mutex128
);
211 a128
func_nand(volatile a128
*v
, a128 op
) {
212 SpinMutexLock
lock(&mutex128
);
218 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
219 SpinMutexLock
lock(&mutex128
);
228 static int SizeLog() {
231 else if (sizeof(T
) <= 2)
233 else if (sizeof(T
) <= 4)
237 // For 16-byte atomics we also use 8-byte memory access,
238 // this leads to false negatives only in very obscure cases.
241 static atomic_uint8_t
*to_atomic(const volatile a8
*a
) {
242 return (atomic_uint8_t
*)a
;
245 static atomic_uint16_t
*to_atomic(const volatile a16
*a
) {
246 return (atomic_uint16_t
*)a
;
249 static atomic_uint32_t
*to_atomic(const volatile a32
*a
) {
250 return (atomic_uint32_t
*)a
;
253 static atomic_uint64_t
*to_atomic(const volatile a64
*a
) {
254 return (atomic_uint64_t
*)a
;
257 static memory_order
to_mo(morder mo
) {
259 case mo_relaxed
: return memory_order_relaxed
;
260 case mo_consume
: return memory_order_consume
;
261 case mo_acquire
: return memory_order_acquire
;
262 case mo_release
: return memory_order_release
;
263 case mo_acq_rel
: return memory_order_acq_rel
;
264 case mo_seq_cst
: return memory_order_seq_cst
;
267 return memory_order_seq_cst
;
271 static T
NoTsanAtomicLoad(const volatile T
*a
, morder mo
) {
272 return atomic_load(to_atomic(a
), to_mo(mo
));
275 static a128
NoTsanAtomicLoad(const volatile a128
*a
, morder mo
) {
276 SpinMutexLock
lock(&mutex128
);
281 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
283 CHECK(IsLoadOrder(mo
));
284 // This fast-path is critical for performance.
285 // Assume the access is atomic.
286 if (!IsAcquireOrder(mo
)) {
287 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
288 return NoTsanAtomicLoad(a
, mo
);
290 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
291 AcquireImpl(thr
, pc
, &s
->clock
);
292 T v
= NoTsanAtomicLoad(a
, mo
);
294 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
299 static void NoTsanAtomicStore(volatile T
*a
, T v
, morder mo
) {
300 atomic_store(to_atomic(a
), v
, to_mo(mo
));
303 static void NoTsanAtomicStore(volatile a128
*a
, a128 v
, morder mo
) {
304 SpinMutexLock
lock(&mutex128
);
309 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
311 CHECK(IsStoreOrder(mo
));
312 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
313 // This fast-path is critical for performance.
314 // Assume the access is atomic.
315 // Strictly saying even relaxed store cuts off release sequence,
316 // so must reset the clock.
317 if (!IsReleaseOrder(mo
)) {
318 NoTsanAtomicStore(a
, v
, mo
);
321 __sync_synchronize();
322 SyncVar
*s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
323 thr
->fast_state
.IncrementEpoch();
324 // Can't increment epoch w/o writing to the trace as well.
325 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
326 ReleaseImpl(thr
, pc
, &s
->clock
);
327 NoTsanAtomicStore(a
, v
, mo
);
331 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
332 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
333 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
335 if (mo
!= mo_relaxed
) {
336 s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
337 thr
->fast_state
.IncrementEpoch();
338 // Can't increment epoch w/o writing to the trace as well.
339 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
340 if (IsAcqRelOrder(mo
))
341 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
342 else if (IsReleaseOrder(mo
))
343 ReleaseImpl(thr
, pc
, &s
->clock
);
344 else if (IsAcquireOrder(mo
))
345 AcquireImpl(thr
, pc
, &s
->clock
);
354 static T
NoTsanAtomicExchange(volatile T
*a
, T v
, morder mo
) {
355 return func_xchg(a
, v
);
359 static T
NoTsanAtomicFetchAdd(volatile T
*a
, T v
, morder mo
) {
360 return func_add(a
, v
);
364 static T
NoTsanAtomicFetchSub(volatile T
*a
, T v
, morder mo
) {
365 return func_sub(a
, v
);
369 static T
NoTsanAtomicFetchAnd(volatile T
*a
, T v
, morder mo
) {
370 return func_and(a
, v
);
374 static T
NoTsanAtomicFetchOr(volatile T
*a
, T v
, morder mo
) {
375 return func_or(a
, v
);
379 static T
NoTsanAtomicFetchXor(volatile T
*a
, T v
, morder mo
) {
380 return func_xor(a
, v
);
384 static T
NoTsanAtomicFetchNand(volatile T
*a
, T v
, morder mo
) {
385 return func_nand(a
, v
);
389 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
391 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
395 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
397 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
401 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
403 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
407 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
409 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
413 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
415 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
419 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
421 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
425 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
427 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
431 static bool NoTsanAtomicCAS(volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
432 return atomic_compare_exchange_strong(to_atomic(a
), c
, v
, to_mo(mo
));
435 static bool NoTsanAtomicCAS(volatile a128
*a
, a128
*c
, a128 v
,
436 morder mo
, morder fmo
) {
438 a128 cur
= func_cas(a
, old
, v
);
446 static bool NoTsanAtomicCAS(volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
447 return NoTsanAtomicCAS(a
, &c
, v
, mo
, fmo
);
451 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
452 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
453 (void)fmo
; // Unused because llvm does not pass it yet.
454 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
456 bool write_lock
= mo
!= mo_acquire
&& mo
!= mo_consume
;
457 if (mo
!= mo_relaxed
) {
458 s
= ctx
->synctab
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, write_lock
);
459 thr
->fast_state
.IncrementEpoch();
460 // Can't increment epoch w/o writing to the trace as well.
461 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
462 if (IsAcqRelOrder(mo
))
463 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
464 else if (IsReleaseOrder(mo
))
465 ReleaseImpl(thr
, pc
, &s
->clock
);
466 else if (IsAcquireOrder(mo
))
467 AcquireImpl(thr
, pc
, &s
->clock
);
470 T pr
= func_cas(a
, cc
, v
);
484 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
485 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
486 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
490 static void NoTsanAtomicFence(morder mo
) {
491 __sync_synchronize();
494 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
495 // FIXME(dvyukov): not implemented.
496 __sync_synchronize();
500 SANITIZER_INTERFACE_ATTRIBUTE
501 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
502 SCOPED_ATOMIC(Load
, a
, mo
);
505 SANITIZER_INTERFACE_ATTRIBUTE
506 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
507 SCOPED_ATOMIC(Load
, a
, mo
);
510 SANITIZER_INTERFACE_ATTRIBUTE
511 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
512 SCOPED_ATOMIC(Load
, a
, mo
);
515 SANITIZER_INTERFACE_ATTRIBUTE
516 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
517 SCOPED_ATOMIC(Load
, a
, mo
);
520 #if __TSAN_HAS_INT128
521 SANITIZER_INTERFACE_ATTRIBUTE
522 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
523 SCOPED_ATOMIC(Load
, a
, mo
);
527 SANITIZER_INTERFACE_ATTRIBUTE
528 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
529 SCOPED_ATOMIC(Store
, a
, v
, mo
);
532 SANITIZER_INTERFACE_ATTRIBUTE
533 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
534 SCOPED_ATOMIC(Store
, a
, v
, mo
);
537 SANITIZER_INTERFACE_ATTRIBUTE
538 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
539 SCOPED_ATOMIC(Store
, a
, v
, mo
);
542 SANITIZER_INTERFACE_ATTRIBUTE
543 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
544 SCOPED_ATOMIC(Store
, a
, v
, mo
);
547 #if __TSAN_HAS_INT128
548 SANITIZER_INTERFACE_ATTRIBUTE
549 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
550 SCOPED_ATOMIC(Store
, a
, v
, mo
);
554 SANITIZER_INTERFACE_ATTRIBUTE
555 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
556 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
559 SANITIZER_INTERFACE_ATTRIBUTE
560 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
561 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
564 SANITIZER_INTERFACE_ATTRIBUTE
565 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
566 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
569 SANITIZER_INTERFACE_ATTRIBUTE
570 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
571 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
574 #if __TSAN_HAS_INT128
575 SANITIZER_INTERFACE_ATTRIBUTE
576 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
577 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
581 SANITIZER_INTERFACE_ATTRIBUTE
582 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
583 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
586 SANITIZER_INTERFACE_ATTRIBUTE
587 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
588 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
591 SANITIZER_INTERFACE_ATTRIBUTE
592 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
593 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
596 SANITIZER_INTERFACE_ATTRIBUTE
597 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
598 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
601 #if __TSAN_HAS_INT128
602 SANITIZER_INTERFACE_ATTRIBUTE
603 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
604 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
608 SANITIZER_INTERFACE_ATTRIBUTE
609 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
610 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
613 SANITIZER_INTERFACE_ATTRIBUTE
614 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
615 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
618 SANITIZER_INTERFACE_ATTRIBUTE
619 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
620 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
623 SANITIZER_INTERFACE_ATTRIBUTE
624 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
625 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
628 #if __TSAN_HAS_INT128
629 SANITIZER_INTERFACE_ATTRIBUTE
630 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
631 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
635 SANITIZER_INTERFACE_ATTRIBUTE
636 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
637 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
640 SANITIZER_INTERFACE_ATTRIBUTE
641 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
642 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
645 SANITIZER_INTERFACE_ATTRIBUTE
646 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
647 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
650 SANITIZER_INTERFACE_ATTRIBUTE
651 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
652 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
655 #if __TSAN_HAS_INT128
656 SANITIZER_INTERFACE_ATTRIBUTE
657 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
658 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
662 SANITIZER_INTERFACE_ATTRIBUTE
663 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
664 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
667 SANITIZER_INTERFACE_ATTRIBUTE
668 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
669 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
672 SANITIZER_INTERFACE_ATTRIBUTE
673 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
674 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
677 SANITIZER_INTERFACE_ATTRIBUTE
678 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
679 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
682 #if __TSAN_HAS_INT128
683 SANITIZER_INTERFACE_ATTRIBUTE
684 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
685 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
689 SANITIZER_INTERFACE_ATTRIBUTE
690 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
691 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
694 SANITIZER_INTERFACE_ATTRIBUTE
695 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
696 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
699 SANITIZER_INTERFACE_ATTRIBUTE
700 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
701 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
704 SANITIZER_INTERFACE_ATTRIBUTE
705 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
706 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
709 #if __TSAN_HAS_INT128
710 SANITIZER_INTERFACE_ATTRIBUTE
711 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
712 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
716 SANITIZER_INTERFACE_ATTRIBUTE
717 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
718 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
721 SANITIZER_INTERFACE_ATTRIBUTE
722 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
723 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
726 SANITIZER_INTERFACE_ATTRIBUTE
727 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
728 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
731 SANITIZER_INTERFACE_ATTRIBUTE
732 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
733 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
736 #if __TSAN_HAS_INT128
737 SANITIZER_INTERFACE_ATTRIBUTE
738 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
739 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
743 SANITIZER_INTERFACE_ATTRIBUTE
744 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
745 morder mo
, morder fmo
) {
746 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
749 SANITIZER_INTERFACE_ATTRIBUTE
750 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
751 morder mo
, morder fmo
) {
752 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
755 SANITIZER_INTERFACE_ATTRIBUTE
756 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
757 morder mo
, morder fmo
) {
758 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
761 SANITIZER_INTERFACE_ATTRIBUTE
762 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
763 morder mo
, morder fmo
) {
764 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
767 #if __TSAN_HAS_INT128
768 SANITIZER_INTERFACE_ATTRIBUTE
769 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
770 morder mo
, morder fmo
) {
771 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
775 SANITIZER_INTERFACE_ATTRIBUTE
776 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
777 morder mo
, morder fmo
) {
778 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
781 SANITIZER_INTERFACE_ATTRIBUTE
782 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
783 morder mo
, morder fmo
) {
784 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
787 SANITIZER_INTERFACE_ATTRIBUTE
788 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
789 morder mo
, morder fmo
) {
790 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
793 SANITIZER_INTERFACE_ATTRIBUTE
794 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
795 morder mo
, morder fmo
) {
796 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
799 #if __TSAN_HAS_INT128
800 SANITIZER_INTERFACE_ATTRIBUTE
801 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
802 morder mo
, morder fmo
) {
803 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
807 SANITIZER_INTERFACE_ATTRIBUTE
808 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
809 morder mo
, morder fmo
) {
810 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
813 SANITIZER_INTERFACE_ATTRIBUTE
814 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
815 morder mo
, morder fmo
) {
816 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
819 SANITIZER_INTERFACE_ATTRIBUTE
820 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
821 morder mo
, morder fmo
) {
822 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
825 SANITIZER_INTERFACE_ATTRIBUTE
826 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
827 morder mo
, morder fmo
) {
828 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
831 #if __TSAN_HAS_INT128
832 SANITIZER_INTERFACE_ATTRIBUTE
833 a128
__tsan_atomic128_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
834 morder mo
, morder fmo
) {
835 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
839 SANITIZER_INTERFACE_ATTRIBUTE
840 void __tsan_atomic_thread_fence(morder mo
) {
842 SCOPED_ATOMIC(Fence
, mo
);
845 SANITIZER_INTERFACE_ATTRIBUTE
846 void __tsan_atomic_signal_fence(morder mo
) {