1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publicly
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_mutex.h"
23 #include "tsan_flags.h"
24 #include "tsan_interface.h"
27 using namespace __tsan
; // NOLINT
29 #if !SANITIZER_GO && __TSAN_HAS_INT128
30 // Protects emulation of 128-bit atomic operations.
31 static StaticSpinMutex mutex128
;
34 static bool IsLoadOrder(morder mo
) {
35 return mo
== mo_relaxed
|| mo
== mo_consume
36 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
39 static bool IsStoreOrder(morder mo
) {
40 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
43 static bool IsReleaseOrder(morder mo
) {
44 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
47 static bool IsAcquireOrder(morder mo
) {
48 return mo
== mo_consume
|| mo
== mo_acquire
49 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
52 static bool IsAcqRelOrder(morder mo
) {
53 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
56 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
57 T res
= __sync_lock_test_and_set(v
, op
);
58 // __sync_lock_test_and_set does not contain full barrier.
63 template<typename T
> T
func_add(volatile T
*v
, T op
) {
64 return __sync_fetch_and_add(v
, op
);
67 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
68 return __sync_fetch_and_sub(v
, op
);
71 template<typename T
> T
func_and(volatile T
*v
, T op
) {
72 return __sync_fetch_and_and(v
, op
);
75 template<typename T
> T
func_or(volatile T
*v
, T op
) {
76 return __sync_fetch_and_or(v
, op
);
79 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
80 return __sync_fetch_and_xor(v
, op
);
83 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
84 // clang does not support __sync_fetch_and_nand.
88 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
95 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
96 return __sync_val_compare_and_swap(v
, cmp
, xch
);
99 // clang does not support 128-bit atomic ops.
100 // Atomic ops are executed under tsan internal mutex,
101 // here we assume that the atomic variables are not accessed
102 // from non-instrumented code.
103 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
105 a128
func_xchg(volatile a128
*v
, a128 op
) {
106 SpinMutexLock
lock(&mutex128
);
112 a128
func_add(volatile a128
*v
, a128 op
) {
113 SpinMutexLock
lock(&mutex128
);
119 a128
func_sub(volatile a128
*v
, a128 op
) {
120 SpinMutexLock
lock(&mutex128
);
126 a128
func_and(volatile a128
*v
, a128 op
) {
127 SpinMutexLock
lock(&mutex128
);
133 a128
func_or(volatile a128
*v
, a128 op
) {
134 SpinMutexLock
lock(&mutex128
);
140 a128
func_xor(volatile a128
*v
, a128 op
) {
141 SpinMutexLock
lock(&mutex128
);
147 a128
func_nand(volatile a128
*v
, a128 op
) {
148 SpinMutexLock
lock(&mutex128
);
154 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
155 SpinMutexLock
lock(&mutex128
);
164 static int SizeLog() {
167 else if (sizeof(T
) <= 2)
169 else if (sizeof(T
) <= 4)
173 // For 16-byte atomics we also use 8-byte memory access,
174 // this leads to false negatives only in very obscure cases.
178 static atomic_uint8_t
*to_atomic(const volatile a8
*a
) {
179 return reinterpret_cast<atomic_uint8_t
*>(const_cast<a8
*>(a
));
182 static atomic_uint16_t
*to_atomic(const volatile a16
*a
) {
183 return reinterpret_cast<atomic_uint16_t
*>(const_cast<a16
*>(a
));
187 static atomic_uint32_t
*to_atomic(const volatile a32
*a
) {
188 return reinterpret_cast<atomic_uint32_t
*>(const_cast<a32
*>(a
));
191 static atomic_uint64_t
*to_atomic(const volatile a64
*a
) {
192 return reinterpret_cast<atomic_uint64_t
*>(const_cast<a64
*>(a
));
195 static memory_order
to_mo(morder mo
) {
197 case mo_relaxed
: return memory_order_relaxed
;
198 case mo_consume
: return memory_order_consume
;
199 case mo_acquire
: return memory_order_acquire
;
200 case mo_release
: return memory_order_release
;
201 case mo_acq_rel
: return memory_order_acq_rel
;
202 case mo_seq_cst
: return memory_order_seq_cst
;
205 return memory_order_seq_cst
;
209 static T
NoTsanAtomicLoad(const volatile T
*a
, morder mo
) {
210 return atomic_load(to_atomic(a
), to_mo(mo
));
213 #if __TSAN_HAS_INT128 && !SANITIZER_GO
214 static a128
NoTsanAtomicLoad(const volatile a128
*a
, morder mo
) {
215 SpinMutexLock
lock(&mutex128
);
221 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
223 CHECK(IsLoadOrder(mo
));
224 // This fast-path is critical for performance.
225 // Assume the access is atomic.
226 if (!IsAcquireOrder(mo
)) {
227 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
228 return NoTsanAtomicLoad(a
, mo
);
230 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
231 AcquireImpl(thr
, pc
, &s
->clock
);
232 T v
= NoTsanAtomicLoad(a
, mo
);
234 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
239 static void NoTsanAtomicStore(volatile T
*a
, T v
, morder mo
) {
240 atomic_store(to_atomic(a
), v
, to_mo(mo
));
243 #if __TSAN_HAS_INT128 && !SANITIZER_GO
244 static void NoTsanAtomicStore(volatile a128
*a
, a128 v
, morder mo
) {
245 SpinMutexLock
lock(&mutex128
);
251 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
253 CHECK(IsStoreOrder(mo
));
254 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
255 // This fast-path is critical for performance.
256 // Assume the access is atomic.
257 // Strictly saying even relaxed store cuts off release sequence,
258 // so must reset the clock.
259 if (!IsReleaseOrder(mo
)) {
260 NoTsanAtomicStore(a
, v
, mo
);
263 __sync_synchronize();
264 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
265 thr
->fast_state
.IncrementEpoch();
266 // Can't increment epoch w/o writing to the trace as well.
267 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
268 ReleaseImpl(thr
, pc
, &s
->clock
);
269 NoTsanAtomicStore(a
, v
, mo
);
273 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
274 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
275 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
277 if (mo
!= mo_relaxed
) {
278 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
279 thr
->fast_state
.IncrementEpoch();
280 // Can't increment epoch w/o writing to the trace as well.
281 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
282 if (IsAcqRelOrder(mo
))
283 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
284 else if (IsReleaseOrder(mo
))
285 ReleaseImpl(thr
, pc
, &s
->clock
);
286 else if (IsAcquireOrder(mo
))
287 AcquireImpl(thr
, pc
, &s
->clock
);
296 static T
NoTsanAtomicExchange(volatile T
*a
, T v
, morder mo
) {
297 return func_xchg(a
, v
);
301 static T
NoTsanAtomicFetchAdd(volatile T
*a
, T v
, morder mo
) {
302 return func_add(a
, v
);
306 static T
NoTsanAtomicFetchSub(volatile T
*a
, T v
, morder mo
) {
307 return func_sub(a
, v
);
311 static T
NoTsanAtomicFetchAnd(volatile T
*a
, T v
, morder mo
) {
312 return func_and(a
, v
);
316 static T
NoTsanAtomicFetchOr(volatile T
*a
, T v
, morder mo
) {
317 return func_or(a
, v
);
321 static T
NoTsanAtomicFetchXor(volatile T
*a
, T v
, morder mo
) {
322 return func_xor(a
, v
);
326 static T
NoTsanAtomicFetchNand(volatile T
*a
, T v
, morder mo
) {
327 return func_nand(a
, v
);
331 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
333 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
337 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
339 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
343 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
345 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
349 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
351 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
355 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
357 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
361 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
363 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
367 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
369 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
373 static bool NoTsanAtomicCAS(volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
374 return atomic_compare_exchange_strong(to_atomic(a
), c
, v
, to_mo(mo
));
377 #if __TSAN_HAS_INT128
378 static bool NoTsanAtomicCAS(volatile a128
*a
, a128
*c
, a128 v
,
379 morder mo
, morder fmo
) {
381 a128 cur
= func_cas(a
, old
, v
);
390 static T
NoTsanAtomicCAS(volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
391 NoTsanAtomicCAS(a
, &c
, v
, mo
, fmo
);
396 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
397 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
398 (void)fmo
; // Unused because llvm does not pass it yet.
399 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
401 bool write_lock
= mo
!= mo_acquire
&& mo
!= mo_consume
;
402 if (mo
!= mo_relaxed
) {
403 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, write_lock
);
404 thr
->fast_state
.IncrementEpoch();
405 // Can't increment epoch w/o writing to the trace as well.
406 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
407 if (IsAcqRelOrder(mo
))
408 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
409 else if (IsReleaseOrder(mo
))
410 ReleaseImpl(thr
, pc
, &s
->clock
);
411 else if (IsAcquireOrder(mo
))
412 AcquireImpl(thr
, pc
, &s
->clock
);
415 T pr
= func_cas(a
, cc
, v
);
429 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
430 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
431 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
436 static void NoTsanAtomicFence(morder mo
) {
437 __sync_synchronize();
440 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
441 // FIXME(dvyukov): not implemented.
442 __sync_synchronize();
446 // Interface functions follow.
451 static morder
covert_morder(morder mo
) {
452 if (flags()->force_seq_cst_atomics
)
453 return (morder
)mo_seq_cst
;
455 // Filter out additional memory order flags:
456 // MEMMODEL_SYNC = 1 << 15
457 // __ATOMIC_HLE_ACQUIRE = 1 << 16
458 // __ATOMIC_HLE_RELEASE = 1 << 17
460 // HLE is an optimization, and we pretend that elision always fails.
461 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
462 // since we use __sync_ atomics for actual atomic operations,
463 // we can safely ignore it as well. It also subtly affects semantics,
464 // but we don't model the difference.
465 return (morder
)(mo
& 0x7fff);
468 #define SCOPED_ATOMIC(func, ...) \
469 const uptr callpc = (uptr)__builtin_return_address(0); \
470 uptr pc = StackTrace::GetCurrentPc(); \
471 mo = covert_morder(mo); \
472 ThreadState *const thr = cur_thread(); \
473 if (thr->ignore_interceptors) \
474 return NoTsanAtomic##func(__VA_ARGS__); \
475 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
476 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
477 return Atomic##func(thr, pc, __VA_ARGS__); \
482 ScopedAtomic(ThreadState
*thr
, uptr pc
, const volatile void *a
,
483 morder mo
, const char *func
)
486 DPrintf("#%d: %s(%p, %d)\n", thr_
->tid
, func
, a
, mo
);
489 ProcessPendingSignals(thr_
);
496 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
497 StatInc(thr
, StatAtomic
);
499 StatInc(thr
, size
== 1 ? StatAtomic1
500 : size
== 2 ? StatAtomic2
501 : size
== 4 ? StatAtomic4
502 : size
== 8 ? StatAtomic8
504 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
505 : mo
== mo_consume
? StatAtomicConsume
506 : mo
== mo_acquire
? StatAtomicAcquire
507 : mo
== mo_release
? StatAtomicRelease
508 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
509 : StatAtomicSeq_Cst
);
513 SANITIZER_INTERFACE_ATTRIBUTE
514 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
515 SCOPED_ATOMIC(Load
, a
, mo
);
518 SANITIZER_INTERFACE_ATTRIBUTE
519 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
520 SCOPED_ATOMIC(Load
, a
, mo
);
523 SANITIZER_INTERFACE_ATTRIBUTE
524 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
525 SCOPED_ATOMIC(Load
, a
, mo
);
528 SANITIZER_INTERFACE_ATTRIBUTE
529 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
530 SCOPED_ATOMIC(Load
, a
, mo
);
533 #if __TSAN_HAS_INT128
534 SANITIZER_INTERFACE_ATTRIBUTE
535 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
536 SCOPED_ATOMIC(Load
, a
, mo
);
540 SANITIZER_INTERFACE_ATTRIBUTE
541 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
542 SCOPED_ATOMIC(Store
, a
, v
, mo
);
545 SANITIZER_INTERFACE_ATTRIBUTE
546 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
547 SCOPED_ATOMIC(Store
, a
, v
, mo
);
550 SANITIZER_INTERFACE_ATTRIBUTE
551 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
552 SCOPED_ATOMIC(Store
, a
, v
, mo
);
555 SANITIZER_INTERFACE_ATTRIBUTE
556 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
557 SCOPED_ATOMIC(Store
, a
, v
, mo
);
560 #if __TSAN_HAS_INT128
561 SANITIZER_INTERFACE_ATTRIBUTE
562 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
563 SCOPED_ATOMIC(Store
, a
, v
, mo
);
567 SANITIZER_INTERFACE_ATTRIBUTE
568 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
569 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
572 SANITIZER_INTERFACE_ATTRIBUTE
573 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
574 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
577 SANITIZER_INTERFACE_ATTRIBUTE
578 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
579 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
582 SANITIZER_INTERFACE_ATTRIBUTE
583 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
584 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
587 #if __TSAN_HAS_INT128
588 SANITIZER_INTERFACE_ATTRIBUTE
589 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
590 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
594 SANITIZER_INTERFACE_ATTRIBUTE
595 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
596 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
599 SANITIZER_INTERFACE_ATTRIBUTE
600 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
601 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
604 SANITIZER_INTERFACE_ATTRIBUTE
605 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
606 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
609 SANITIZER_INTERFACE_ATTRIBUTE
610 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
611 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
614 #if __TSAN_HAS_INT128
615 SANITIZER_INTERFACE_ATTRIBUTE
616 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
617 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
621 SANITIZER_INTERFACE_ATTRIBUTE
622 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
623 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
626 SANITIZER_INTERFACE_ATTRIBUTE
627 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
628 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
631 SANITIZER_INTERFACE_ATTRIBUTE
632 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
633 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
636 SANITIZER_INTERFACE_ATTRIBUTE
637 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
638 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
641 #if __TSAN_HAS_INT128
642 SANITIZER_INTERFACE_ATTRIBUTE
643 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
644 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
648 SANITIZER_INTERFACE_ATTRIBUTE
649 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
650 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
653 SANITIZER_INTERFACE_ATTRIBUTE
654 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
655 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
658 SANITIZER_INTERFACE_ATTRIBUTE
659 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
660 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
663 SANITIZER_INTERFACE_ATTRIBUTE
664 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
665 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
668 #if __TSAN_HAS_INT128
669 SANITIZER_INTERFACE_ATTRIBUTE
670 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
671 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
675 SANITIZER_INTERFACE_ATTRIBUTE
676 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
677 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
680 SANITIZER_INTERFACE_ATTRIBUTE
681 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
682 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
685 SANITIZER_INTERFACE_ATTRIBUTE
686 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
687 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
690 SANITIZER_INTERFACE_ATTRIBUTE
691 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
692 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
695 #if __TSAN_HAS_INT128
696 SANITIZER_INTERFACE_ATTRIBUTE
697 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
698 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
702 SANITIZER_INTERFACE_ATTRIBUTE
703 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
704 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
707 SANITIZER_INTERFACE_ATTRIBUTE
708 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
709 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
712 SANITIZER_INTERFACE_ATTRIBUTE
713 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
714 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
717 SANITIZER_INTERFACE_ATTRIBUTE
718 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
719 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
722 #if __TSAN_HAS_INT128
723 SANITIZER_INTERFACE_ATTRIBUTE
724 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
725 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
729 SANITIZER_INTERFACE_ATTRIBUTE
730 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
731 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
734 SANITIZER_INTERFACE_ATTRIBUTE
735 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
736 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
739 SANITIZER_INTERFACE_ATTRIBUTE
740 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
741 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
744 SANITIZER_INTERFACE_ATTRIBUTE
745 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
746 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
749 #if __TSAN_HAS_INT128
750 SANITIZER_INTERFACE_ATTRIBUTE
751 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
752 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
756 SANITIZER_INTERFACE_ATTRIBUTE
757 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
758 morder mo
, morder fmo
) {
759 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
762 SANITIZER_INTERFACE_ATTRIBUTE
763 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
764 morder mo
, morder fmo
) {
765 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
768 SANITIZER_INTERFACE_ATTRIBUTE
769 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
770 morder mo
, morder fmo
) {
771 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
774 SANITIZER_INTERFACE_ATTRIBUTE
775 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
776 morder mo
, morder fmo
) {
777 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
780 #if __TSAN_HAS_INT128
781 SANITIZER_INTERFACE_ATTRIBUTE
782 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
783 morder mo
, morder fmo
) {
784 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
788 SANITIZER_INTERFACE_ATTRIBUTE
789 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
790 morder mo
, morder fmo
) {
791 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
794 SANITIZER_INTERFACE_ATTRIBUTE
795 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
796 morder mo
, morder fmo
) {
797 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
800 SANITIZER_INTERFACE_ATTRIBUTE
801 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
802 morder mo
, morder fmo
) {
803 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
806 SANITIZER_INTERFACE_ATTRIBUTE
807 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
808 morder mo
, morder fmo
) {
809 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
812 #if __TSAN_HAS_INT128
813 SANITIZER_INTERFACE_ATTRIBUTE
814 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
815 morder mo
, morder fmo
) {
816 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
820 SANITIZER_INTERFACE_ATTRIBUTE
821 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
822 morder mo
, morder fmo
) {
823 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
826 SANITIZER_INTERFACE_ATTRIBUTE
827 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
828 morder mo
, morder fmo
) {
829 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
832 SANITIZER_INTERFACE_ATTRIBUTE
833 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
834 morder mo
, morder fmo
) {
835 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
838 SANITIZER_INTERFACE_ATTRIBUTE
839 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
840 morder mo
, morder fmo
) {
841 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
844 #if __TSAN_HAS_INT128
845 SANITIZER_INTERFACE_ATTRIBUTE
846 a128
__tsan_atomic128_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
847 morder mo
, morder fmo
) {
848 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
852 SANITIZER_INTERFACE_ATTRIBUTE
853 void __tsan_atomic_thread_fence(morder mo
) {
855 SCOPED_ATOMIC(Fence
, mo
);
858 SANITIZER_INTERFACE_ATTRIBUTE
859 void __tsan_atomic_signal_fence(morder mo
) {
863 #else // #if !SANITIZER_GO
867 #define ATOMIC(func, ...) \
868 if (thr->ignore_sync) { \
869 NoTsanAtomic##func(__VA_ARGS__); \
871 FuncEntry(thr, cpc); \
872 Atomic##func(thr, pc, __VA_ARGS__); \
877 #define ATOMIC_RET(func, ret, ...) \
878 if (thr->ignore_sync) { \
879 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
881 FuncEntry(thr, cpc); \
882 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
888 SANITIZER_INTERFACE_ATTRIBUTE
889 void __tsan_go_atomic32_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
890 ATOMIC_RET(Load
, *(a32
*)(a
+8), *(a32
**)a
, mo_acquire
);
893 SANITIZER_INTERFACE_ATTRIBUTE
894 void __tsan_go_atomic64_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
895 ATOMIC_RET(Load
, *(a64
*)(a
+8), *(a64
**)a
, mo_acquire
);
898 SANITIZER_INTERFACE_ATTRIBUTE
899 void __tsan_go_atomic32_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
900 ATOMIC(Store
, *(a32
**)a
, *(a32
*)(a
+8), mo_release
);
903 SANITIZER_INTERFACE_ATTRIBUTE
904 void __tsan_go_atomic64_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
905 ATOMIC(Store
, *(a64
**)a
, *(a64
*)(a
+8), mo_release
);
908 SANITIZER_INTERFACE_ATTRIBUTE
909 void __tsan_go_atomic32_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
910 ATOMIC_RET(FetchAdd
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
913 SANITIZER_INTERFACE_ATTRIBUTE
914 void __tsan_go_atomic64_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
915 ATOMIC_RET(FetchAdd
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
918 SANITIZER_INTERFACE_ATTRIBUTE
919 void __tsan_go_atomic32_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
920 ATOMIC_RET(Exchange
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
923 SANITIZER_INTERFACE_ATTRIBUTE
924 void __tsan_go_atomic64_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
925 ATOMIC_RET(Exchange
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
928 SANITIZER_INTERFACE_ATTRIBUTE
929 void __tsan_go_atomic32_compare_exchange(
930 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
932 a32 cmp
= *(a32
*)(a
+8);
933 ATOMIC_RET(CAS
, cur
, *(a32
**)a
, cmp
, *(a32
*)(a
+12), mo_acq_rel
, mo_acquire
);
934 *(bool*)(a
+16) = (cur
== cmp
);
937 SANITIZER_INTERFACE_ATTRIBUTE
938 void __tsan_go_atomic64_compare_exchange(
939 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
941 a64 cmp
= *(a64
*)(a
+8);
942 ATOMIC_RET(CAS
, cur
, *(a64
**)a
, cmp
, *(a64
*)(a
+16), mo_acq_rel
, mo_acquire
);
943 *(bool*)(a
+24) = (cur
== cmp
);
946 #endif // #if !SANITIZER_GO