1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publicly
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_mutex.h"
23 #include "tsan_flags.h"
26 using namespace __tsan
; // NOLINT
28 // These should match declarations from public tsan_interface_atomic.h header.
29 typedef unsigned char a8
;
30 typedef unsigned short a16
; // NOLINT
31 typedef unsigned int a32
;
32 typedef unsigned long long a64
; // NOLINT
33 #if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \
34 || (__clang_major__ * 100 + __clang_minor__ >= 302))
35 __extension__
typedef __int128 a128
;
36 # define __TSAN_HAS_INT128 1
38 # define __TSAN_HAS_INT128 0
42 // Protects emulation of 128-bit atomic operations.
43 static StaticSpinMutex mutex128
;
46 // Part of ABI, do not change.
47 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
57 static bool IsLoadOrder(morder mo
) {
58 return mo
== mo_relaxed
|| mo
== mo_consume
59 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
62 static bool IsStoreOrder(morder mo
) {
63 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
66 static bool IsReleaseOrder(morder mo
) {
67 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
70 static bool IsAcquireOrder(morder mo
) {
71 return mo
== mo_consume
|| mo
== mo_acquire
72 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
75 static bool IsAcqRelOrder(morder mo
) {
76 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
79 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
80 T res
= __sync_lock_test_and_set(v
, op
);
81 // __sync_lock_test_and_set does not contain full barrier.
86 template<typename T
> T
func_add(volatile T
*v
, T op
) {
87 return __sync_fetch_and_add(v
, op
);
90 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
91 return __sync_fetch_and_sub(v
, op
);
94 template<typename T
> T
func_and(volatile T
*v
, T op
) {
95 return __sync_fetch_and_and(v
, op
);
98 template<typename T
> T
func_or(volatile T
*v
, T op
) {
99 return __sync_fetch_and_or(v
, op
);
102 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
103 return __sync_fetch_and_xor(v
, op
);
106 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
107 // clang does not support __sync_fetch_and_nand.
110 T newv
= ~(cmp
& op
);
111 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
118 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
119 return __sync_val_compare_and_swap(v
, cmp
, xch
);
122 // clang does not support 128-bit atomic ops.
123 // Atomic ops are executed under tsan internal mutex,
124 // here we assume that the atomic variables are not accessed
125 // from non-instrumented code.
126 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO)
127 a128
func_xchg(volatile a128
*v
, a128 op
) {
128 SpinMutexLock
lock(&mutex128
);
134 a128
func_add(volatile a128
*v
, a128 op
) {
135 SpinMutexLock
lock(&mutex128
);
141 a128
func_sub(volatile a128
*v
, a128 op
) {
142 SpinMutexLock
lock(&mutex128
);
148 a128
func_and(volatile a128
*v
, a128 op
) {
149 SpinMutexLock
lock(&mutex128
);
155 a128
func_or(volatile a128
*v
, a128 op
) {
156 SpinMutexLock
lock(&mutex128
);
162 a128
func_xor(volatile a128
*v
, a128 op
) {
163 SpinMutexLock
lock(&mutex128
);
169 a128
func_nand(volatile a128
*v
, a128 op
) {
170 SpinMutexLock
lock(&mutex128
);
176 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
177 SpinMutexLock
lock(&mutex128
);
186 static int SizeLog() {
189 else if (sizeof(T
) <= 2)
191 else if (sizeof(T
) <= 4)
195 // For 16-byte atomics we also use 8-byte memory access,
196 // this leads to false negatives only in very obscure cases.
200 static atomic_uint8_t
*to_atomic(const volatile a8
*a
) {
201 return (atomic_uint8_t
*)a
;
204 static atomic_uint16_t
*to_atomic(const volatile a16
*a
) {
205 return (atomic_uint16_t
*)a
;
209 static atomic_uint32_t
*to_atomic(const volatile a32
*a
) {
210 return (atomic_uint32_t
*)a
;
213 static atomic_uint64_t
*to_atomic(const volatile a64
*a
) {
214 return (atomic_uint64_t
*)a
;
217 static memory_order
to_mo(morder mo
) {
219 case mo_relaxed
: return memory_order_relaxed
;
220 case mo_consume
: return memory_order_consume
;
221 case mo_acquire
: return memory_order_acquire
;
222 case mo_release
: return memory_order_release
;
223 case mo_acq_rel
: return memory_order_acq_rel
;
224 case mo_seq_cst
: return memory_order_seq_cst
;
227 return memory_order_seq_cst
;
231 static T
NoTsanAtomicLoad(const volatile T
*a
, morder mo
) {
232 return atomic_load(to_atomic(a
), to_mo(mo
));
235 #if __TSAN_HAS_INT128 && !defined(TSAN_GO)
236 static a128
NoTsanAtomicLoad(const volatile a128
*a
, morder mo
) {
237 SpinMutexLock
lock(&mutex128
);
243 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
245 CHECK(IsLoadOrder(mo
));
246 // This fast-path is critical for performance.
247 // Assume the access is atomic.
248 if (!IsAcquireOrder(mo
)) {
249 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
250 return NoTsanAtomicLoad(a
, mo
);
252 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
253 AcquireImpl(thr
, pc
, &s
->clock
);
254 T v
= NoTsanAtomicLoad(a
, mo
);
256 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
261 static void NoTsanAtomicStore(volatile T
*a
, T v
, morder mo
) {
262 atomic_store(to_atomic(a
), v
, to_mo(mo
));
265 #if __TSAN_HAS_INT128 && !defined(TSAN_GO)
266 static void NoTsanAtomicStore(volatile a128
*a
, a128 v
, morder mo
) {
267 SpinMutexLock
lock(&mutex128
);
273 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
275 CHECK(IsStoreOrder(mo
));
276 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
277 // This fast-path is critical for performance.
278 // Assume the access is atomic.
279 // Strictly saying even relaxed store cuts off release sequence,
280 // so must reset the clock.
281 if (!IsReleaseOrder(mo
)) {
282 NoTsanAtomicStore(a
, v
, mo
);
285 __sync_synchronize();
286 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
287 thr
->fast_state
.IncrementEpoch();
288 // Can't increment epoch w/o writing to the trace as well.
289 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
290 ReleaseImpl(thr
, pc
, &s
->clock
);
291 NoTsanAtomicStore(a
, v
, mo
);
295 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
296 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
297 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
299 if (mo
!= mo_relaxed
) {
300 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
301 thr
->fast_state
.IncrementEpoch();
302 // Can't increment epoch w/o writing to the trace as well.
303 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
304 if (IsAcqRelOrder(mo
))
305 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
306 else if (IsReleaseOrder(mo
))
307 ReleaseImpl(thr
, pc
, &s
->clock
);
308 else if (IsAcquireOrder(mo
))
309 AcquireImpl(thr
, pc
, &s
->clock
);
318 static T
NoTsanAtomicExchange(volatile T
*a
, T v
, morder mo
) {
319 return func_xchg(a
, v
);
323 static T
NoTsanAtomicFetchAdd(volatile T
*a
, T v
, morder mo
) {
324 return func_add(a
, v
);
328 static T
NoTsanAtomicFetchSub(volatile T
*a
, T v
, morder mo
) {
329 return func_sub(a
, v
);
333 static T
NoTsanAtomicFetchAnd(volatile T
*a
, T v
, morder mo
) {
334 return func_and(a
, v
);
338 static T
NoTsanAtomicFetchOr(volatile T
*a
, T v
, morder mo
) {
339 return func_or(a
, v
);
343 static T
NoTsanAtomicFetchXor(volatile T
*a
, T v
, morder mo
) {
344 return func_xor(a
, v
);
348 static T
NoTsanAtomicFetchNand(volatile T
*a
, T v
, morder mo
) {
349 return func_nand(a
, v
);
353 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
355 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
359 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
361 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
365 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
367 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
371 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
373 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
377 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
379 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
383 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
385 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
389 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
391 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
395 static bool NoTsanAtomicCAS(volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
396 return atomic_compare_exchange_strong(to_atomic(a
), c
, v
, to_mo(mo
));
399 #if __TSAN_HAS_INT128
400 static bool NoTsanAtomicCAS(volatile a128
*a
, a128
*c
, a128 v
,
401 morder mo
, morder fmo
) {
403 a128 cur
= func_cas(a
, old
, v
);
412 static T
NoTsanAtomicCAS(volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
413 NoTsanAtomicCAS(a
, &c
, v
, mo
, fmo
);
418 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
419 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
420 (void)fmo
; // Unused because llvm does not pass it yet.
421 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
423 bool write_lock
= mo
!= mo_acquire
&& mo
!= mo_consume
;
424 if (mo
!= mo_relaxed
) {
425 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, write_lock
);
426 thr
->fast_state
.IncrementEpoch();
427 // Can't increment epoch w/o writing to the trace as well.
428 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
429 if (IsAcqRelOrder(mo
))
430 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
431 else if (IsReleaseOrder(mo
))
432 ReleaseImpl(thr
, pc
, &s
->clock
);
433 else if (IsAcquireOrder(mo
))
434 AcquireImpl(thr
, pc
, &s
->clock
);
437 T pr
= func_cas(a
, cc
, v
);
451 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
452 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
453 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
458 static void NoTsanAtomicFence(morder mo
) {
459 __sync_synchronize();
462 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
463 // FIXME(dvyukov): not implemented.
464 __sync_synchronize();
468 // Interface functions follow.
473 #define SCOPED_ATOMIC(func, ...) \
474 const uptr callpc = (uptr)__builtin_return_address(0); \
475 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
476 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
477 ThreadState *const thr = cur_thread(); \
478 if (thr->ignore_interceptors) \
479 return NoTsanAtomic##func(__VA_ARGS__); \
480 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
481 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
482 return Atomic##func(thr, pc, __VA_ARGS__); \
487 ScopedAtomic(ThreadState
*thr
, uptr pc
, const volatile void *a
,
488 morder mo
, const char *func
)
491 DPrintf("#%d: %s(%p, %d)\n", thr_
->tid
, func
, a
, mo
);
494 ProcessPendingSignals(thr_
);
501 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
502 StatInc(thr
, StatAtomic
);
504 StatInc(thr
, size
== 1 ? StatAtomic1
505 : size
== 2 ? StatAtomic2
506 : size
== 4 ? StatAtomic4
507 : size
== 8 ? StatAtomic8
509 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
510 : mo
== mo_consume
? StatAtomicConsume
511 : mo
== mo_acquire
? StatAtomicAcquire
512 : mo
== mo_release
? StatAtomicRelease
513 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
514 : StatAtomicSeq_Cst
);
518 SANITIZER_INTERFACE_ATTRIBUTE
519 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
520 SCOPED_ATOMIC(Load
, a
, mo
);
523 SANITIZER_INTERFACE_ATTRIBUTE
524 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
525 SCOPED_ATOMIC(Load
, a
, mo
);
528 SANITIZER_INTERFACE_ATTRIBUTE
529 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
530 SCOPED_ATOMIC(Load
, a
, mo
);
533 SANITIZER_INTERFACE_ATTRIBUTE
534 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
535 SCOPED_ATOMIC(Load
, a
, mo
);
538 #if __TSAN_HAS_INT128
539 SANITIZER_INTERFACE_ATTRIBUTE
540 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
541 SCOPED_ATOMIC(Load
, a
, mo
);
545 SANITIZER_INTERFACE_ATTRIBUTE
546 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
547 SCOPED_ATOMIC(Store
, a
, v
, mo
);
550 SANITIZER_INTERFACE_ATTRIBUTE
551 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
552 SCOPED_ATOMIC(Store
, a
, v
, mo
);
555 SANITIZER_INTERFACE_ATTRIBUTE
556 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
557 SCOPED_ATOMIC(Store
, a
, v
, mo
);
560 SANITIZER_INTERFACE_ATTRIBUTE
561 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
562 SCOPED_ATOMIC(Store
, a
, v
, mo
);
565 #if __TSAN_HAS_INT128
566 SANITIZER_INTERFACE_ATTRIBUTE
567 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
568 SCOPED_ATOMIC(Store
, a
, v
, mo
);
572 SANITIZER_INTERFACE_ATTRIBUTE
573 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
574 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
577 SANITIZER_INTERFACE_ATTRIBUTE
578 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
579 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
582 SANITIZER_INTERFACE_ATTRIBUTE
583 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
584 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
587 SANITIZER_INTERFACE_ATTRIBUTE
588 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
589 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
592 #if __TSAN_HAS_INT128
593 SANITIZER_INTERFACE_ATTRIBUTE
594 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
595 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
599 SANITIZER_INTERFACE_ATTRIBUTE
600 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
601 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
604 SANITIZER_INTERFACE_ATTRIBUTE
605 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
606 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
609 SANITIZER_INTERFACE_ATTRIBUTE
610 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
611 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
614 SANITIZER_INTERFACE_ATTRIBUTE
615 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
616 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
619 #if __TSAN_HAS_INT128
620 SANITIZER_INTERFACE_ATTRIBUTE
621 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
622 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
626 SANITIZER_INTERFACE_ATTRIBUTE
627 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
628 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
631 SANITIZER_INTERFACE_ATTRIBUTE
632 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
633 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
636 SANITIZER_INTERFACE_ATTRIBUTE
637 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
638 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
641 SANITIZER_INTERFACE_ATTRIBUTE
642 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
643 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
646 #if __TSAN_HAS_INT128
647 SANITIZER_INTERFACE_ATTRIBUTE
648 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
649 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
653 SANITIZER_INTERFACE_ATTRIBUTE
654 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
655 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
658 SANITIZER_INTERFACE_ATTRIBUTE
659 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
660 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
663 SANITIZER_INTERFACE_ATTRIBUTE
664 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
665 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
668 SANITIZER_INTERFACE_ATTRIBUTE
669 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
670 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
673 #if __TSAN_HAS_INT128
674 SANITIZER_INTERFACE_ATTRIBUTE
675 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
676 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
680 SANITIZER_INTERFACE_ATTRIBUTE
681 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
682 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
685 SANITIZER_INTERFACE_ATTRIBUTE
686 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
687 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
690 SANITIZER_INTERFACE_ATTRIBUTE
691 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
692 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
695 SANITIZER_INTERFACE_ATTRIBUTE
696 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
697 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
700 #if __TSAN_HAS_INT128
701 SANITIZER_INTERFACE_ATTRIBUTE
702 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
703 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
707 SANITIZER_INTERFACE_ATTRIBUTE
708 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
709 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
712 SANITIZER_INTERFACE_ATTRIBUTE
713 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
714 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
717 SANITIZER_INTERFACE_ATTRIBUTE
718 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
719 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
722 SANITIZER_INTERFACE_ATTRIBUTE
723 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
724 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
727 #if __TSAN_HAS_INT128
728 SANITIZER_INTERFACE_ATTRIBUTE
729 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
730 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
734 SANITIZER_INTERFACE_ATTRIBUTE
735 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
736 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
739 SANITIZER_INTERFACE_ATTRIBUTE
740 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
741 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
744 SANITIZER_INTERFACE_ATTRIBUTE
745 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
746 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
749 SANITIZER_INTERFACE_ATTRIBUTE
750 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
751 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
754 #if __TSAN_HAS_INT128
755 SANITIZER_INTERFACE_ATTRIBUTE
756 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
757 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
761 SANITIZER_INTERFACE_ATTRIBUTE
762 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
763 morder mo
, morder fmo
) {
764 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
767 SANITIZER_INTERFACE_ATTRIBUTE
768 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
769 morder mo
, morder fmo
) {
770 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
773 SANITIZER_INTERFACE_ATTRIBUTE
774 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
775 morder mo
, morder fmo
) {
776 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
779 SANITIZER_INTERFACE_ATTRIBUTE
780 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
781 morder mo
, morder fmo
) {
782 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
785 #if __TSAN_HAS_INT128
786 SANITIZER_INTERFACE_ATTRIBUTE
787 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
788 morder mo
, morder fmo
) {
789 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
793 SANITIZER_INTERFACE_ATTRIBUTE
794 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
795 morder mo
, morder fmo
) {
796 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
799 SANITIZER_INTERFACE_ATTRIBUTE
800 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
801 morder mo
, morder fmo
) {
802 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
805 SANITIZER_INTERFACE_ATTRIBUTE
806 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
807 morder mo
, morder fmo
) {
808 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
811 SANITIZER_INTERFACE_ATTRIBUTE
812 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
813 morder mo
, morder fmo
) {
814 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
817 #if __TSAN_HAS_INT128
818 SANITIZER_INTERFACE_ATTRIBUTE
819 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
820 morder mo
, morder fmo
) {
821 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
825 SANITIZER_INTERFACE_ATTRIBUTE
826 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
827 morder mo
, morder fmo
) {
828 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
831 SANITIZER_INTERFACE_ATTRIBUTE
832 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
833 morder mo
, morder fmo
) {
834 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
837 SANITIZER_INTERFACE_ATTRIBUTE
838 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
839 morder mo
, morder fmo
) {
840 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
843 SANITIZER_INTERFACE_ATTRIBUTE
844 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
845 morder mo
, morder fmo
) {
846 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
849 #if __TSAN_HAS_INT128
850 SANITIZER_INTERFACE_ATTRIBUTE
851 a128
__tsan_atomic128_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
852 morder mo
, morder fmo
) {
853 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
857 SANITIZER_INTERFACE_ATTRIBUTE
858 void __tsan_atomic_thread_fence(morder mo
) {
860 SCOPED_ATOMIC(Fence
, mo
);
863 SANITIZER_INTERFACE_ATTRIBUTE
864 void __tsan_atomic_signal_fence(morder mo
) {
868 #else // #ifndef TSAN_GO
872 #define ATOMIC(func, ...) \
873 if (thr->ignore_sync) { \
874 NoTsanAtomic##func(__VA_ARGS__); \
876 FuncEntry(thr, cpc); \
877 Atomic##func(thr, pc, __VA_ARGS__); \
882 #define ATOMIC_RET(func, ret, ...) \
883 if (thr->ignore_sync) { \
884 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
886 FuncEntry(thr, cpc); \
887 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
893 SANITIZER_INTERFACE_ATTRIBUTE
894 void __tsan_go_atomic32_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
895 ATOMIC_RET(Load
, *(a32
*)(a
+8), *(a32
**)a
, mo_acquire
);
898 SANITIZER_INTERFACE_ATTRIBUTE
899 void __tsan_go_atomic64_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
900 ATOMIC_RET(Load
, *(a64
*)(a
+8), *(a64
**)a
, mo_acquire
);
903 SANITIZER_INTERFACE_ATTRIBUTE
904 void __tsan_go_atomic32_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
905 ATOMIC(Store
, *(a32
**)a
, *(a32
*)(a
+8), mo_release
);
908 SANITIZER_INTERFACE_ATTRIBUTE
909 void __tsan_go_atomic64_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
910 ATOMIC(Store
, *(a64
**)a
, *(a64
*)(a
+8), mo_release
);
913 SANITIZER_INTERFACE_ATTRIBUTE
914 void __tsan_go_atomic32_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
915 ATOMIC_RET(FetchAdd
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
918 SANITIZER_INTERFACE_ATTRIBUTE
919 void __tsan_go_atomic64_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
920 ATOMIC_RET(FetchAdd
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
923 SANITIZER_INTERFACE_ATTRIBUTE
924 void __tsan_go_atomic32_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
925 ATOMIC_RET(Exchange
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
928 SANITIZER_INTERFACE_ATTRIBUTE
929 void __tsan_go_atomic64_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
930 ATOMIC_RET(Exchange
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
933 SANITIZER_INTERFACE_ATTRIBUTE
934 void __tsan_go_atomic32_compare_exchange(
935 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
937 a32 cmp
= *(a32
*)(a
+8);
938 ATOMIC_RET(CAS
, cur
, *(a32
**)a
, cmp
, *(a32
*)(a
+12), mo_acq_rel
, mo_acquire
);
939 *(bool*)(a
+16) = (cur
== cmp
);
942 SANITIZER_INTERFACE_ATTRIBUTE
943 void __tsan_go_atomic64_compare_exchange(
944 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
946 a64 cmp
= *(a64
*)(a
+8);
947 ATOMIC_RET(CAS
, cur
, *(a64
**)a
, cmp
, *(a64
*)(a
+16), mo_acq_rel
, mo_acquire
);
948 *(bool*)(a
+24) = (cur
== cmp
);
951 #endif // #ifndef TSAN_GO