1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publicly
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_mutex.h"
23 #include "tsan_flags.h"
26 using namespace __tsan
; // NOLINT
28 // These should match declarations from public tsan_interface_atomic.h header.
29 typedef unsigned char a8
;
30 typedef unsigned short a16
; // NOLINT
31 typedef unsigned int a32
;
32 typedef unsigned long long a64
; // NOLINT
33 #if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
34 || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
35 __extension__
typedef __int128 a128
;
36 # define __TSAN_HAS_INT128 1
38 # define __TSAN_HAS_INT128 0
41 #if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
42 // Protects emulation of 128-bit atomic operations.
43 static StaticSpinMutex mutex128
;
46 // Part of ABI, do not change.
47 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
57 static bool IsLoadOrder(morder mo
) {
58 return mo
== mo_relaxed
|| mo
== mo_consume
59 || mo
== mo_acquire
|| mo
== mo_seq_cst
;
62 static bool IsStoreOrder(morder mo
) {
63 return mo
== mo_relaxed
|| mo
== mo_release
|| mo
== mo_seq_cst
;
66 static bool IsReleaseOrder(morder mo
) {
67 return mo
== mo_release
|| mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
70 static bool IsAcquireOrder(morder mo
) {
71 return mo
== mo_consume
|| mo
== mo_acquire
72 || mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
75 static bool IsAcqRelOrder(morder mo
) {
76 return mo
== mo_acq_rel
|| mo
== mo_seq_cst
;
79 template<typename T
> T
func_xchg(volatile T
*v
, T op
) {
80 T res
= __sync_lock_test_and_set(v
, op
);
81 // __sync_lock_test_and_set does not contain full barrier.
86 template<typename T
> T
func_add(volatile T
*v
, T op
) {
87 return __sync_fetch_and_add(v
, op
);
90 template<typename T
> T
func_sub(volatile T
*v
, T op
) {
91 return __sync_fetch_and_sub(v
, op
);
94 template<typename T
> T
func_and(volatile T
*v
, T op
) {
95 return __sync_fetch_and_and(v
, op
);
98 template<typename T
> T
func_or(volatile T
*v
, T op
) {
99 return __sync_fetch_and_or(v
, op
);
102 template<typename T
> T
func_xor(volatile T
*v
, T op
) {
103 return __sync_fetch_and_xor(v
, op
);
106 template<typename T
> T
func_nand(volatile T
*v
, T op
) {
107 // clang does not support __sync_fetch_and_nand.
110 T newv
= ~(cmp
& op
);
111 T cur
= __sync_val_compare_and_swap(v
, cmp
, newv
);
118 template<typename T
> T
func_cas(volatile T
*v
, T cmp
, T xch
) {
119 return __sync_val_compare_and_swap(v
, cmp
, xch
);
122 // clang does not support 128-bit atomic ops.
123 // Atomic ops are executed under tsan internal mutex,
124 // here we assume that the atomic variables are not accessed
125 // from non-instrumented code.
126 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
128 a128
func_xchg(volatile a128
*v
, a128 op
) {
129 SpinMutexLock
lock(&mutex128
);
135 a128
func_add(volatile a128
*v
, a128 op
) {
136 SpinMutexLock
lock(&mutex128
);
142 a128
func_sub(volatile a128
*v
, a128 op
) {
143 SpinMutexLock
lock(&mutex128
);
149 a128
func_and(volatile a128
*v
, a128 op
) {
150 SpinMutexLock
lock(&mutex128
);
156 a128
func_or(volatile a128
*v
, a128 op
) {
157 SpinMutexLock
lock(&mutex128
);
163 a128
func_xor(volatile a128
*v
, a128 op
) {
164 SpinMutexLock
lock(&mutex128
);
170 a128
func_nand(volatile a128
*v
, a128 op
) {
171 SpinMutexLock
lock(&mutex128
);
177 a128
func_cas(volatile a128
*v
, a128 cmp
, a128 xch
) {
178 SpinMutexLock
lock(&mutex128
);
187 static int SizeLog() {
190 else if (sizeof(T
) <= 2)
192 else if (sizeof(T
) <= 4)
196 // For 16-byte atomics we also use 8-byte memory access,
197 // this leads to false negatives only in very obscure cases.
201 static atomic_uint8_t
*to_atomic(const volatile a8
*a
) {
202 return reinterpret_cast<atomic_uint8_t
*>(const_cast<a8
*>(a
));
205 static atomic_uint16_t
*to_atomic(const volatile a16
*a
) {
206 return reinterpret_cast<atomic_uint16_t
*>(const_cast<a16
*>(a
));
210 static atomic_uint32_t
*to_atomic(const volatile a32
*a
) {
211 return reinterpret_cast<atomic_uint32_t
*>(const_cast<a32
*>(a
));
214 static atomic_uint64_t
*to_atomic(const volatile a64
*a
) {
215 return reinterpret_cast<atomic_uint64_t
*>(const_cast<a64
*>(a
));
218 static memory_order
to_mo(morder mo
) {
220 case mo_relaxed
: return memory_order_relaxed
;
221 case mo_consume
: return memory_order_consume
;
222 case mo_acquire
: return memory_order_acquire
;
223 case mo_release
: return memory_order_release
;
224 case mo_acq_rel
: return memory_order_acq_rel
;
225 case mo_seq_cst
: return memory_order_seq_cst
;
228 return memory_order_seq_cst
;
232 static T
NoTsanAtomicLoad(const volatile T
*a
, morder mo
) {
233 return atomic_load(to_atomic(a
), to_mo(mo
));
236 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
237 static a128
NoTsanAtomicLoad(const volatile a128
*a
, morder mo
) {
238 SpinMutexLock
lock(&mutex128
);
244 static T
AtomicLoad(ThreadState
*thr
, uptr pc
, const volatile T
*a
,
246 CHECK(IsLoadOrder(mo
));
247 // This fast-path is critical for performance.
248 // Assume the access is atomic.
249 if (!IsAcquireOrder(mo
)) {
250 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
251 return NoTsanAtomicLoad(a
, mo
);
253 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, false);
254 AcquireImpl(thr
, pc
, &s
->clock
);
255 T v
= NoTsanAtomicLoad(a
, mo
);
257 MemoryReadAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
262 static void NoTsanAtomicStore(volatile T
*a
, T v
, morder mo
) {
263 atomic_store(to_atomic(a
), v
, to_mo(mo
));
266 #if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
267 static void NoTsanAtomicStore(volatile a128
*a
, a128 v
, morder mo
) {
268 SpinMutexLock
lock(&mutex128
);
274 static void AtomicStore(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
276 CHECK(IsStoreOrder(mo
));
277 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
278 // This fast-path is critical for performance.
279 // Assume the access is atomic.
280 // Strictly saying even relaxed store cuts off release sequence,
281 // so must reset the clock.
282 if (!IsReleaseOrder(mo
)) {
283 NoTsanAtomicStore(a
, v
, mo
);
286 __sync_synchronize();
287 SyncVar
*s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
288 thr
->fast_state
.IncrementEpoch();
289 // Can't increment epoch w/o writing to the trace as well.
290 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
291 ReleaseImpl(thr
, pc
, &s
->clock
);
292 NoTsanAtomicStore(a
, v
, mo
);
296 template<typename T
, T (*F
)(volatile T
*v
, T op
)>
297 static T
AtomicRMW(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
, morder mo
) {
298 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
300 if (mo
!= mo_relaxed
) {
301 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, true);
302 thr
->fast_state
.IncrementEpoch();
303 // Can't increment epoch w/o writing to the trace as well.
304 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
305 if (IsAcqRelOrder(mo
))
306 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
307 else if (IsReleaseOrder(mo
))
308 ReleaseImpl(thr
, pc
, &s
->clock
);
309 else if (IsAcquireOrder(mo
))
310 AcquireImpl(thr
, pc
, &s
->clock
);
319 static T
NoTsanAtomicExchange(volatile T
*a
, T v
, morder mo
) {
320 return func_xchg(a
, v
);
324 static T
NoTsanAtomicFetchAdd(volatile T
*a
, T v
, morder mo
) {
325 return func_add(a
, v
);
329 static T
NoTsanAtomicFetchSub(volatile T
*a
, T v
, morder mo
) {
330 return func_sub(a
, v
);
334 static T
NoTsanAtomicFetchAnd(volatile T
*a
, T v
, morder mo
) {
335 return func_and(a
, v
);
339 static T
NoTsanAtomicFetchOr(volatile T
*a
, T v
, morder mo
) {
340 return func_or(a
, v
);
344 static T
NoTsanAtomicFetchXor(volatile T
*a
, T v
, morder mo
) {
345 return func_xor(a
, v
);
349 static T
NoTsanAtomicFetchNand(volatile T
*a
, T v
, morder mo
) {
350 return func_nand(a
, v
);
354 static T
AtomicExchange(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
356 return AtomicRMW
<T
, func_xchg
>(thr
, pc
, a
, v
, mo
);
360 static T
AtomicFetchAdd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
362 return AtomicRMW
<T
, func_add
>(thr
, pc
, a
, v
, mo
);
366 static T
AtomicFetchSub(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
368 return AtomicRMW
<T
, func_sub
>(thr
, pc
, a
, v
, mo
);
372 static T
AtomicFetchAnd(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
374 return AtomicRMW
<T
, func_and
>(thr
, pc
, a
, v
, mo
);
378 static T
AtomicFetchOr(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
380 return AtomicRMW
<T
, func_or
>(thr
, pc
, a
, v
, mo
);
384 static T
AtomicFetchXor(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
386 return AtomicRMW
<T
, func_xor
>(thr
, pc
, a
, v
, mo
);
390 static T
AtomicFetchNand(ThreadState
*thr
, uptr pc
, volatile T
*a
, T v
,
392 return AtomicRMW
<T
, func_nand
>(thr
, pc
, a
, v
, mo
);
396 static bool NoTsanAtomicCAS(volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
397 return atomic_compare_exchange_strong(to_atomic(a
), c
, v
, to_mo(mo
));
400 #if __TSAN_HAS_INT128
401 static bool NoTsanAtomicCAS(volatile a128
*a
, a128
*c
, a128 v
,
402 morder mo
, morder fmo
) {
404 a128 cur
= func_cas(a
, old
, v
);
413 static T
NoTsanAtomicCAS(volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
414 NoTsanAtomicCAS(a
, &c
, v
, mo
, fmo
);
419 static bool AtomicCAS(ThreadState
*thr
, uptr pc
,
420 volatile T
*a
, T
*c
, T v
, morder mo
, morder fmo
) {
421 (void)fmo
; // Unused because llvm does not pass it yet.
422 MemoryWriteAtomic(thr
, pc
, (uptr
)a
, SizeLog
<T
>());
424 bool write_lock
= mo
!= mo_acquire
&& mo
!= mo_consume
;
425 if (mo
!= mo_relaxed
) {
426 s
= ctx
->metamap
.GetOrCreateAndLock(thr
, pc
, (uptr
)a
, write_lock
);
427 thr
->fast_state
.IncrementEpoch();
428 // Can't increment epoch w/o writing to the trace as well.
429 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
430 if (IsAcqRelOrder(mo
))
431 AcquireReleaseImpl(thr
, pc
, &s
->clock
);
432 else if (IsReleaseOrder(mo
))
433 ReleaseImpl(thr
, pc
, &s
->clock
);
434 else if (IsAcquireOrder(mo
))
435 AcquireImpl(thr
, pc
, &s
->clock
);
438 T pr
= func_cas(a
, cc
, v
);
452 static T
AtomicCAS(ThreadState
*thr
, uptr pc
,
453 volatile T
*a
, T c
, T v
, morder mo
, morder fmo
) {
454 AtomicCAS(thr
, pc
, a
, &c
, v
, mo
, fmo
);
459 static void NoTsanAtomicFence(morder mo
) {
460 __sync_synchronize();
463 static void AtomicFence(ThreadState
*thr
, uptr pc
, morder mo
) {
464 // FIXME(dvyukov): not implemented.
465 __sync_synchronize();
469 // Interface functions follow.
474 #define SCOPED_ATOMIC(func, ...) \
475 const uptr callpc = (uptr)__builtin_return_address(0); \
476 uptr pc = StackTrace::GetCurrentPc(); \
477 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
478 ThreadState *const thr = cur_thread(); \
479 if (thr->ignore_interceptors) \
480 return NoTsanAtomic##func(__VA_ARGS__); \
481 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
482 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
483 return Atomic##func(thr, pc, __VA_ARGS__); \
488 ScopedAtomic(ThreadState
*thr
, uptr pc
, const volatile void *a
,
489 morder mo
, const char *func
)
492 DPrintf("#%d: %s(%p, %d)\n", thr_
->tid
, func
, a
, mo
);
495 ProcessPendingSignals(thr_
);
502 static void AtomicStatInc(ThreadState
*thr
, uptr size
, morder mo
, StatType t
) {
503 StatInc(thr
, StatAtomic
);
505 StatInc(thr
, size
== 1 ? StatAtomic1
506 : size
== 2 ? StatAtomic2
507 : size
== 4 ? StatAtomic4
508 : size
== 8 ? StatAtomic8
510 StatInc(thr
, mo
== mo_relaxed
? StatAtomicRelaxed
511 : mo
== mo_consume
? StatAtomicConsume
512 : mo
== mo_acquire
? StatAtomicAcquire
513 : mo
== mo_release
? StatAtomicRelease
514 : mo
== mo_acq_rel
? StatAtomicAcq_Rel
515 : StatAtomicSeq_Cst
);
519 SANITIZER_INTERFACE_ATTRIBUTE
520 a8
__tsan_atomic8_load(const volatile a8
*a
, morder mo
) {
521 SCOPED_ATOMIC(Load
, a
, mo
);
524 SANITIZER_INTERFACE_ATTRIBUTE
525 a16
__tsan_atomic16_load(const volatile a16
*a
, morder mo
) {
526 SCOPED_ATOMIC(Load
, a
, mo
);
529 SANITIZER_INTERFACE_ATTRIBUTE
530 a32
__tsan_atomic32_load(const volatile a32
*a
, morder mo
) {
531 SCOPED_ATOMIC(Load
, a
, mo
);
534 SANITIZER_INTERFACE_ATTRIBUTE
535 a64
__tsan_atomic64_load(const volatile a64
*a
, morder mo
) {
536 SCOPED_ATOMIC(Load
, a
, mo
);
539 #if __TSAN_HAS_INT128
540 SANITIZER_INTERFACE_ATTRIBUTE
541 a128
__tsan_atomic128_load(const volatile a128
*a
, morder mo
) {
542 SCOPED_ATOMIC(Load
, a
, mo
);
546 SANITIZER_INTERFACE_ATTRIBUTE
547 void __tsan_atomic8_store(volatile a8
*a
, a8 v
, morder mo
) {
548 SCOPED_ATOMIC(Store
, a
, v
, mo
);
551 SANITIZER_INTERFACE_ATTRIBUTE
552 void __tsan_atomic16_store(volatile a16
*a
, a16 v
, morder mo
) {
553 SCOPED_ATOMIC(Store
, a
, v
, mo
);
556 SANITIZER_INTERFACE_ATTRIBUTE
557 void __tsan_atomic32_store(volatile a32
*a
, a32 v
, morder mo
) {
558 SCOPED_ATOMIC(Store
, a
, v
, mo
);
561 SANITIZER_INTERFACE_ATTRIBUTE
562 void __tsan_atomic64_store(volatile a64
*a
, a64 v
, morder mo
) {
563 SCOPED_ATOMIC(Store
, a
, v
, mo
);
566 #if __TSAN_HAS_INT128
567 SANITIZER_INTERFACE_ATTRIBUTE
568 void __tsan_atomic128_store(volatile a128
*a
, a128 v
, morder mo
) {
569 SCOPED_ATOMIC(Store
, a
, v
, mo
);
573 SANITIZER_INTERFACE_ATTRIBUTE
574 a8
__tsan_atomic8_exchange(volatile a8
*a
, a8 v
, morder mo
) {
575 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
578 SANITIZER_INTERFACE_ATTRIBUTE
579 a16
__tsan_atomic16_exchange(volatile a16
*a
, a16 v
, morder mo
) {
580 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
583 SANITIZER_INTERFACE_ATTRIBUTE
584 a32
__tsan_atomic32_exchange(volatile a32
*a
, a32 v
, morder mo
) {
585 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
588 SANITIZER_INTERFACE_ATTRIBUTE
589 a64
__tsan_atomic64_exchange(volatile a64
*a
, a64 v
, morder mo
) {
590 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
593 #if __TSAN_HAS_INT128
594 SANITIZER_INTERFACE_ATTRIBUTE
595 a128
__tsan_atomic128_exchange(volatile a128
*a
, a128 v
, morder mo
) {
596 SCOPED_ATOMIC(Exchange
, a
, v
, mo
);
600 SANITIZER_INTERFACE_ATTRIBUTE
601 a8
__tsan_atomic8_fetch_add(volatile a8
*a
, a8 v
, morder mo
) {
602 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
605 SANITIZER_INTERFACE_ATTRIBUTE
606 a16
__tsan_atomic16_fetch_add(volatile a16
*a
, a16 v
, morder mo
) {
607 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
610 SANITIZER_INTERFACE_ATTRIBUTE
611 a32
__tsan_atomic32_fetch_add(volatile a32
*a
, a32 v
, morder mo
) {
612 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
615 SANITIZER_INTERFACE_ATTRIBUTE
616 a64
__tsan_atomic64_fetch_add(volatile a64
*a
, a64 v
, morder mo
) {
617 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
620 #if __TSAN_HAS_INT128
621 SANITIZER_INTERFACE_ATTRIBUTE
622 a128
__tsan_atomic128_fetch_add(volatile a128
*a
, a128 v
, morder mo
) {
623 SCOPED_ATOMIC(FetchAdd
, a
, v
, mo
);
627 SANITIZER_INTERFACE_ATTRIBUTE
628 a8
__tsan_atomic8_fetch_sub(volatile a8
*a
, a8 v
, morder mo
) {
629 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
632 SANITIZER_INTERFACE_ATTRIBUTE
633 a16
__tsan_atomic16_fetch_sub(volatile a16
*a
, a16 v
, morder mo
) {
634 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
637 SANITIZER_INTERFACE_ATTRIBUTE
638 a32
__tsan_atomic32_fetch_sub(volatile a32
*a
, a32 v
, morder mo
) {
639 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
642 SANITIZER_INTERFACE_ATTRIBUTE
643 a64
__tsan_atomic64_fetch_sub(volatile a64
*a
, a64 v
, morder mo
) {
644 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
647 #if __TSAN_HAS_INT128
648 SANITIZER_INTERFACE_ATTRIBUTE
649 a128
__tsan_atomic128_fetch_sub(volatile a128
*a
, a128 v
, morder mo
) {
650 SCOPED_ATOMIC(FetchSub
, a
, v
, mo
);
654 SANITIZER_INTERFACE_ATTRIBUTE
655 a8
__tsan_atomic8_fetch_and(volatile a8
*a
, a8 v
, morder mo
) {
656 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
659 SANITIZER_INTERFACE_ATTRIBUTE
660 a16
__tsan_atomic16_fetch_and(volatile a16
*a
, a16 v
, morder mo
) {
661 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
664 SANITIZER_INTERFACE_ATTRIBUTE
665 a32
__tsan_atomic32_fetch_and(volatile a32
*a
, a32 v
, morder mo
) {
666 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
669 SANITIZER_INTERFACE_ATTRIBUTE
670 a64
__tsan_atomic64_fetch_and(volatile a64
*a
, a64 v
, morder mo
) {
671 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
674 #if __TSAN_HAS_INT128
675 SANITIZER_INTERFACE_ATTRIBUTE
676 a128
__tsan_atomic128_fetch_and(volatile a128
*a
, a128 v
, morder mo
) {
677 SCOPED_ATOMIC(FetchAnd
, a
, v
, mo
);
681 SANITIZER_INTERFACE_ATTRIBUTE
682 a8
__tsan_atomic8_fetch_or(volatile a8
*a
, a8 v
, morder mo
) {
683 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
686 SANITIZER_INTERFACE_ATTRIBUTE
687 a16
__tsan_atomic16_fetch_or(volatile a16
*a
, a16 v
, morder mo
) {
688 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
691 SANITIZER_INTERFACE_ATTRIBUTE
692 a32
__tsan_atomic32_fetch_or(volatile a32
*a
, a32 v
, morder mo
) {
693 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
696 SANITIZER_INTERFACE_ATTRIBUTE
697 a64
__tsan_atomic64_fetch_or(volatile a64
*a
, a64 v
, morder mo
) {
698 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
701 #if __TSAN_HAS_INT128
702 SANITIZER_INTERFACE_ATTRIBUTE
703 a128
__tsan_atomic128_fetch_or(volatile a128
*a
, a128 v
, morder mo
) {
704 SCOPED_ATOMIC(FetchOr
, a
, v
, mo
);
708 SANITIZER_INTERFACE_ATTRIBUTE
709 a8
__tsan_atomic8_fetch_xor(volatile a8
*a
, a8 v
, morder mo
) {
710 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
713 SANITIZER_INTERFACE_ATTRIBUTE
714 a16
__tsan_atomic16_fetch_xor(volatile a16
*a
, a16 v
, morder mo
) {
715 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
718 SANITIZER_INTERFACE_ATTRIBUTE
719 a32
__tsan_atomic32_fetch_xor(volatile a32
*a
, a32 v
, morder mo
) {
720 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
723 SANITIZER_INTERFACE_ATTRIBUTE
724 a64
__tsan_atomic64_fetch_xor(volatile a64
*a
, a64 v
, morder mo
) {
725 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
728 #if __TSAN_HAS_INT128
729 SANITIZER_INTERFACE_ATTRIBUTE
730 a128
__tsan_atomic128_fetch_xor(volatile a128
*a
, a128 v
, morder mo
) {
731 SCOPED_ATOMIC(FetchXor
, a
, v
, mo
);
735 SANITIZER_INTERFACE_ATTRIBUTE
736 a8
__tsan_atomic8_fetch_nand(volatile a8
*a
, a8 v
, morder mo
) {
737 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
740 SANITIZER_INTERFACE_ATTRIBUTE
741 a16
__tsan_atomic16_fetch_nand(volatile a16
*a
, a16 v
, morder mo
) {
742 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
745 SANITIZER_INTERFACE_ATTRIBUTE
746 a32
__tsan_atomic32_fetch_nand(volatile a32
*a
, a32 v
, morder mo
) {
747 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
750 SANITIZER_INTERFACE_ATTRIBUTE
751 a64
__tsan_atomic64_fetch_nand(volatile a64
*a
, a64 v
, morder mo
) {
752 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
755 #if __TSAN_HAS_INT128
756 SANITIZER_INTERFACE_ATTRIBUTE
757 a128
__tsan_atomic128_fetch_nand(volatile a128
*a
, a128 v
, morder mo
) {
758 SCOPED_ATOMIC(FetchNand
, a
, v
, mo
);
762 SANITIZER_INTERFACE_ATTRIBUTE
763 int __tsan_atomic8_compare_exchange_strong(volatile a8
*a
, a8
*c
, a8 v
,
764 morder mo
, morder fmo
) {
765 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
768 SANITIZER_INTERFACE_ATTRIBUTE
769 int __tsan_atomic16_compare_exchange_strong(volatile a16
*a
, a16
*c
, a16 v
,
770 morder mo
, morder fmo
) {
771 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
774 SANITIZER_INTERFACE_ATTRIBUTE
775 int __tsan_atomic32_compare_exchange_strong(volatile a32
*a
, a32
*c
, a32 v
,
776 morder mo
, morder fmo
) {
777 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
780 SANITIZER_INTERFACE_ATTRIBUTE
781 int __tsan_atomic64_compare_exchange_strong(volatile a64
*a
, a64
*c
, a64 v
,
782 morder mo
, morder fmo
) {
783 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
786 #if __TSAN_HAS_INT128
787 SANITIZER_INTERFACE_ATTRIBUTE
788 int __tsan_atomic128_compare_exchange_strong(volatile a128
*a
, a128
*c
, a128 v
,
789 morder mo
, morder fmo
) {
790 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
794 SANITIZER_INTERFACE_ATTRIBUTE
795 int __tsan_atomic8_compare_exchange_weak(volatile a8
*a
, a8
*c
, a8 v
,
796 morder mo
, morder fmo
) {
797 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
800 SANITIZER_INTERFACE_ATTRIBUTE
801 int __tsan_atomic16_compare_exchange_weak(volatile a16
*a
, a16
*c
, a16 v
,
802 morder mo
, morder fmo
) {
803 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
806 SANITIZER_INTERFACE_ATTRIBUTE
807 int __tsan_atomic32_compare_exchange_weak(volatile a32
*a
, a32
*c
, a32 v
,
808 morder mo
, morder fmo
) {
809 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
812 SANITIZER_INTERFACE_ATTRIBUTE
813 int __tsan_atomic64_compare_exchange_weak(volatile a64
*a
, a64
*c
, a64 v
,
814 morder mo
, morder fmo
) {
815 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
818 #if __TSAN_HAS_INT128
819 SANITIZER_INTERFACE_ATTRIBUTE
820 int __tsan_atomic128_compare_exchange_weak(volatile a128
*a
, a128
*c
, a128 v
,
821 morder mo
, morder fmo
) {
822 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
826 SANITIZER_INTERFACE_ATTRIBUTE
827 a8
__tsan_atomic8_compare_exchange_val(volatile a8
*a
, a8 c
, a8 v
,
828 morder mo
, morder fmo
) {
829 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
832 SANITIZER_INTERFACE_ATTRIBUTE
833 a16
__tsan_atomic16_compare_exchange_val(volatile a16
*a
, a16 c
, a16 v
,
834 morder mo
, morder fmo
) {
835 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
838 SANITIZER_INTERFACE_ATTRIBUTE
839 a32
__tsan_atomic32_compare_exchange_val(volatile a32
*a
, a32 c
, a32 v
,
840 morder mo
, morder fmo
) {
841 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
844 SANITIZER_INTERFACE_ATTRIBUTE
845 a64
__tsan_atomic64_compare_exchange_val(volatile a64
*a
, a64 c
, a64 v
,
846 morder mo
, morder fmo
) {
847 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
850 #if __TSAN_HAS_INT128
851 SANITIZER_INTERFACE_ATTRIBUTE
852 a128
__tsan_atomic128_compare_exchange_val(volatile a128
*a
, a128 c
, a128 v
,
853 morder mo
, morder fmo
) {
854 SCOPED_ATOMIC(CAS
, a
, c
, v
, mo
, fmo
);
858 SANITIZER_INTERFACE_ATTRIBUTE
859 void __tsan_atomic_thread_fence(morder mo
) {
861 SCOPED_ATOMIC(Fence
, mo
);
864 SANITIZER_INTERFACE_ATTRIBUTE
865 void __tsan_atomic_signal_fence(morder mo
) {
869 #else // #ifndef SANITIZER_GO
873 #define ATOMIC(func, ...) \
874 if (thr->ignore_sync) { \
875 NoTsanAtomic##func(__VA_ARGS__); \
877 FuncEntry(thr, cpc); \
878 Atomic##func(thr, pc, __VA_ARGS__); \
883 #define ATOMIC_RET(func, ret, ...) \
884 if (thr->ignore_sync) { \
885 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
887 FuncEntry(thr, cpc); \
888 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
894 SANITIZER_INTERFACE_ATTRIBUTE
895 void __tsan_go_atomic32_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
896 ATOMIC_RET(Load
, *(a32
*)(a
+8), *(a32
**)a
, mo_acquire
);
899 SANITIZER_INTERFACE_ATTRIBUTE
900 void __tsan_go_atomic64_load(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
901 ATOMIC_RET(Load
, *(a64
*)(a
+8), *(a64
**)a
, mo_acquire
);
904 SANITIZER_INTERFACE_ATTRIBUTE
905 void __tsan_go_atomic32_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
906 ATOMIC(Store
, *(a32
**)a
, *(a32
*)(a
+8), mo_release
);
909 SANITIZER_INTERFACE_ATTRIBUTE
910 void __tsan_go_atomic64_store(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
911 ATOMIC(Store
, *(a64
**)a
, *(a64
*)(a
+8), mo_release
);
914 SANITIZER_INTERFACE_ATTRIBUTE
915 void __tsan_go_atomic32_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
916 ATOMIC_RET(FetchAdd
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
919 SANITIZER_INTERFACE_ATTRIBUTE
920 void __tsan_go_atomic64_fetch_add(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
921 ATOMIC_RET(FetchAdd
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
924 SANITIZER_INTERFACE_ATTRIBUTE
925 void __tsan_go_atomic32_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
926 ATOMIC_RET(Exchange
, *(a32
*)(a
+16), *(a32
**)a
, *(a32
*)(a
+8), mo_acq_rel
);
929 SANITIZER_INTERFACE_ATTRIBUTE
930 void __tsan_go_atomic64_exchange(ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
931 ATOMIC_RET(Exchange
, *(a64
*)(a
+16), *(a64
**)a
, *(a64
*)(a
+8), mo_acq_rel
);
934 SANITIZER_INTERFACE_ATTRIBUTE
935 void __tsan_go_atomic32_compare_exchange(
936 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
938 a32 cmp
= *(a32
*)(a
+8);
939 ATOMIC_RET(CAS
, cur
, *(a32
**)a
, cmp
, *(a32
*)(a
+12), mo_acq_rel
, mo_acquire
);
940 *(bool*)(a
+16) = (cur
== cmp
);
943 SANITIZER_INTERFACE_ATTRIBUTE
944 void __tsan_go_atomic64_compare_exchange(
945 ThreadState
*thr
, uptr cpc
, uptr pc
, u8
*a
) {
947 a64 cmp
= *(a64
*)(a
+8);
948 ATOMIC_RET(CAS
, cur
, *(a64
**)a
, cmp
, *(a64
*)(a
+16), mo_acq_rel
, mo_acquire
);
949 *(bool*)(a
+24) = (cur
== cmp
);
952 #endif // #ifndef SANITIZER_GO