d: Fix testcase failure of gdc.dg/Wbuiltin_declaration_mismatch2.d.
[official-gcc.git] / libsanitizer / tsan / tsan_interface_atomic.cpp
blobf794a2fcdd0df7fa372e017fa8a80fe333f10563
1 //===-- tsan_interface_atomic.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
14 // For background see C++11 standard. A slightly older, publicly
15 // available draft of the standard (not entirely up-to-date, but close enough
16 // for casual browsing) is available here:
17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18 // The following page contains more background information:
19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_mutex.h"
24 #include "tsan_flags.h"
25 #include "tsan_interface.h"
26 #include "tsan_rtl.h"
28 using namespace __tsan;
30 #if !SANITIZER_GO && __TSAN_HAS_INT128
31 // Protects emulation of 128-bit atomic operations.
32 static StaticSpinMutex mutex128;
33 #endif
35 #if SANITIZER_DEBUG
36 static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
41 static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
44 #endif
46 static bool IsReleaseOrder(morder mo) {
47 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
50 static bool IsAcquireOrder(morder mo) {
51 return mo == mo_consume || mo == mo_acquire
52 || mo == mo_acq_rel || mo == mo_seq_cst;
55 static bool IsAcqRelOrder(morder mo) {
56 return mo == mo_acq_rel || mo == mo_seq_cst;
59 template<typename T> T func_xchg(volatile T *v, T op) {
60 T res = __sync_lock_test_and_set(v, op);
61 // __sync_lock_test_and_set does not contain full barrier.
62 __sync_synchronize();
63 return res;
66 template<typename T> T func_add(volatile T *v, T op) {
67 return __sync_fetch_and_add(v, op);
70 template<typename T> T func_sub(volatile T *v, T op) {
71 return __sync_fetch_and_sub(v, op);
74 template<typename T> T func_and(volatile T *v, T op) {
75 return __sync_fetch_and_and(v, op);
78 template<typename T> T func_or(volatile T *v, T op) {
79 return __sync_fetch_and_or(v, op);
82 template<typename T> T func_xor(volatile T *v, T op) {
83 return __sync_fetch_and_xor(v, op);
86 template<typename T> T func_nand(volatile T *v, T op) {
87 // clang does not support __sync_fetch_and_nand.
88 T cmp = *v;
89 for (;;) {
90 T newv = ~(cmp & op);
91 T cur = __sync_val_compare_and_swap(v, cmp, newv);
92 if (cmp == cur)
93 return cmp;
94 cmp = cur;
98 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
99 return __sync_val_compare_and_swap(v, cmp, xch);
102 // clang does not support 128-bit atomic ops.
103 // Atomic ops are executed under tsan internal mutex,
104 // here we assume that the atomic variables are not accessed
105 // from non-instrumented code.
106 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
107 && __TSAN_HAS_INT128
108 a128 func_xchg(volatile a128 *v, a128 op) {
109 SpinMutexLock lock(&mutex128);
110 a128 cmp = *v;
111 *v = op;
112 return cmp;
115 a128 func_add(volatile a128 *v, a128 op) {
116 SpinMutexLock lock(&mutex128);
117 a128 cmp = *v;
118 *v = cmp + op;
119 return cmp;
122 a128 func_sub(volatile a128 *v, a128 op) {
123 SpinMutexLock lock(&mutex128);
124 a128 cmp = *v;
125 *v = cmp - op;
126 return cmp;
129 a128 func_and(volatile a128 *v, a128 op) {
130 SpinMutexLock lock(&mutex128);
131 a128 cmp = *v;
132 *v = cmp & op;
133 return cmp;
136 a128 func_or(volatile a128 *v, a128 op) {
137 SpinMutexLock lock(&mutex128);
138 a128 cmp = *v;
139 *v = cmp | op;
140 return cmp;
143 a128 func_xor(volatile a128 *v, a128 op) {
144 SpinMutexLock lock(&mutex128);
145 a128 cmp = *v;
146 *v = cmp ^ op;
147 return cmp;
150 a128 func_nand(volatile a128 *v, a128 op) {
151 SpinMutexLock lock(&mutex128);
152 a128 cmp = *v;
153 *v = ~(cmp & op);
154 return cmp;
157 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
158 SpinMutexLock lock(&mutex128);
159 a128 cur = *v;
160 if (cur == cmp)
161 *v = xch;
162 return cur;
164 #endif
166 template <typename T>
167 static int AccessSize() {
168 if (sizeof(T) <= 1)
169 return 1;
170 else if (sizeof(T) <= 2)
171 return 2;
172 else if (sizeof(T) <= 4)
173 return 4;
174 else
175 return 8;
176 // For 16-byte atomics we also use 8-byte memory access,
177 // this leads to false negatives only in very obscure cases.
180 #if !SANITIZER_GO
181 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
182 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
185 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
186 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
188 #endif
190 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
191 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
194 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
195 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
198 static memory_order to_mo(morder mo) {
199 switch (mo) {
200 case mo_relaxed: return memory_order_relaxed;
201 case mo_consume: return memory_order_consume;
202 case mo_acquire: return memory_order_acquire;
203 case mo_release: return memory_order_release;
204 case mo_acq_rel: return memory_order_acq_rel;
205 case mo_seq_cst: return memory_order_seq_cst;
207 DCHECK(0);
208 return memory_order_seq_cst;
211 template<typename T>
212 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
213 return atomic_load(to_atomic(a), to_mo(mo));
216 #if __TSAN_HAS_INT128 && !SANITIZER_GO
217 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
218 SpinMutexLock lock(&mutex128);
219 return *a;
221 #endif
223 template <typename T>
224 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
225 DCHECK(IsLoadOrder(mo));
226 // This fast-path is critical for performance.
227 // Assume the access is atomic.
228 if (!IsAcquireOrder(mo)) {
229 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
230 kAccessRead | kAccessAtomic);
231 return NoTsanAtomicLoad(a, mo);
233 // Don't create sync object if it does not exist yet. For example, an atomic
234 // pointer is initialized to nullptr and then periodically acquire-loaded.
235 T v = NoTsanAtomicLoad(a, mo);
236 SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
237 if (s) {
238 SlotLocker locker(thr);
239 ReadLock lock(&s->mtx);
240 thr->clock.Acquire(s->clock);
241 // Re-read under sync mutex because we need a consistent snapshot
242 // of the value and the clock we acquire.
243 v = NoTsanAtomicLoad(a, mo);
245 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
246 return v;
249 template<typename T>
250 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
251 atomic_store(to_atomic(a), v, to_mo(mo));
254 #if __TSAN_HAS_INT128 && !SANITIZER_GO
255 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
256 SpinMutexLock lock(&mutex128);
257 *a = v;
259 #endif
261 template <typename T>
262 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
263 morder mo) {
264 DCHECK(IsStoreOrder(mo));
265 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
266 // This fast-path is critical for performance.
267 // Assume the access is atomic.
268 // Strictly saying even relaxed store cuts off release sequence,
269 // so must reset the clock.
270 if (!IsReleaseOrder(mo)) {
271 NoTsanAtomicStore(a, v, mo);
272 return;
274 SlotLocker locker(thr);
276 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
277 Lock lock(&s->mtx);
278 thr->clock.ReleaseStore(&s->clock);
279 NoTsanAtomicStore(a, v, mo);
281 IncrementEpoch(thr);
284 template <typename T, T (*F)(volatile T *v, T op)>
285 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
286 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
287 if (LIKELY(mo == mo_relaxed))
288 return F(a, v);
289 SlotLocker locker(thr);
291 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
292 RWLock lock(&s->mtx, IsReleaseOrder(mo));
293 if (IsAcqRelOrder(mo))
294 thr->clock.ReleaseAcquire(&s->clock);
295 else if (IsReleaseOrder(mo))
296 thr->clock.Release(&s->clock);
297 else if (IsAcquireOrder(mo))
298 thr->clock.Acquire(s->clock);
299 v = F(a, v);
301 if (IsReleaseOrder(mo))
302 IncrementEpoch(thr);
303 return v;
306 template<typename T>
307 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
308 return func_xchg(a, v);
311 template<typename T>
312 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
313 return func_add(a, v);
316 template<typename T>
317 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
318 return func_sub(a, v);
321 template<typename T>
322 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
323 return func_and(a, v);
326 template<typename T>
327 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
328 return func_or(a, v);
331 template<typename T>
332 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
333 return func_xor(a, v);
336 template<typename T>
337 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
338 return func_nand(a, v);
341 template<typename T>
342 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
343 morder mo) {
344 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
347 template<typename T>
348 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
349 morder mo) {
350 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
353 template<typename T>
354 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
355 morder mo) {
356 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
359 template<typename T>
360 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
361 morder mo) {
362 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
365 template<typename T>
366 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
367 morder mo) {
368 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
371 template<typename T>
372 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
373 morder mo) {
374 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
377 template<typename T>
378 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
379 morder mo) {
380 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
383 template<typename T>
384 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
385 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
388 #if __TSAN_HAS_INT128
389 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
390 morder mo, morder fmo) {
391 a128 old = *c;
392 a128 cur = func_cas(a, old, v);
393 if (cur == old)
394 return true;
395 *c = cur;
396 return false;
398 #endif
400 template<typename T>
401 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
402 NoTsanAtomicCAS(a, &c, v, mo, fmo);
403 return c;
406 template <typename T>
407 static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
408 morder mo, morder fmo) {
409 // 31.7.2.18: "The failure argument shall not be memory_order_release
410 // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
411 // (mo_relaxed) when those are used.
412 DCHECK(IsLoadOrder(fmo));
414 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
415 if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
416 T cc = *c;
417 T pr = func_cas(a, cc, v);
418 if (pr == cc)
419 return true;
420 *c = pr;
421 return false;
423 SlotLocker locker(thr);
424 bool release = IsReleaseOrder(mo);
425 bool success;
427 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
428 RWLock lock(&s->mtx, release);
429 T cc = *c;
430 T pr = func_cas(a, cc, v);
431 success = pr == cc;
432 if (!success) {
433 *c = pr;
434 mo = fmo;
436 if (success && IsAcqRelOrder(mo))
437 thr->clock.ReleaseAcquire(&s->clock);
438 else if (success && IsReleaseOrder(mo))
439 thr->clock.Release(&s->clock);
440 else if (IsAcquireOrder(mo))
441 thr->clock.Acquire(s->clock);
443 if (success && release)
444 IncrementEpoch(thr);
445 return success;
448 template<typename T>
449 static T AtomicCAS(ThreadState *thr, uptr pc,
450 volatile T *a, T c, T v, morder mo, morder fmo) {
451 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
452 return c;
455 #if !SANITIZER_GO
456 static void NoTsanAtomicFence(morder mo) {
457 __sync_synchronize();
460 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
461 // FIXME(dvyukov): not implemented.
462 __sync_synchronize();
464 #endif
466 // Interface functions follow.
467 #if !SANITIZER_GO
469 // C/C++
471 static morder convert_morder(morder mo) {
472 if (flags()->force_seq_cst_atomics)
473 return (morder)mo_seq_cst;
475 // Filter out additional memory order flags:
476 // MEMMODEL_SYNC = 1 << 15
477 // __ATOMIC_HLE_ACQUIRE = 1 << 16
478 // __ATOMIC_HLE_RELEASE = 1 << 17
480 // HLE is an optimization, and we pretend that elision always fails.
481 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
482 // since we use __sync_ atomics for actual atomic operations,
483 // we can safely ignore it as well. It also subtly affects semantics,
484 // but we don't model the difference.
485 return (morder)(mo & 0x7fff);
488 # define ATOMIC_IMPL(func, ...) \
489 ThreadState *const thr = cur_thread(); \
490 ProcessPendingSignals(thr); \
491 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
492 return NoTsanAtomic##func(__VA_ARGS__); \
493 mo = convert_morder(mo); \
494 return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
496 extern "C" {
497 SANITIZER_INTERFACE_ATTRIBUTE
498 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
499 ATOMIC_IMPL(Load, a, mo);
502 SANITIZER_INTERFACE_ATTRIBUTE
503 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
504 ATOMIC_IMPL(Load, a, mo);
507 SANITIZER_INTERFACE_ATTRIBUTE
508 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
509 ATOMIC_IMPL(Load, a, mo);
512 SANITIZER_INTERFACE_ATTRIBUTE
513 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
514 ATOMIC_IMPL(Load, a, mo);
517 #if __TSAN_HAS_INT128
518 SANITIZER_INTERFACE_ATTRIBUTE
519 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
520 ATOMIC_IMPL(Load, a, mo);
522 #endif
524 SANITIZER_INTERFACE_ATTRIBUTE
525 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
526 ATOMIC_IMPL(Store, a, v, mo);
529 SANITIZER_INTERFACE_ATTRIBUTE
530 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
531 ATOMIC_IMPL(Store, a, v, mo);
534 SANITIZER_INTERFACE_ATTRIBUTE
535 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
536 ATOMIC_IMPL(Store, a, v, mo);
539 SANITIZER_INTERFACE_ATTRIBUTE
540 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
541 ATOMIC_IMPL(Store, a, v, mo);
544 #if __TSAN_HAS_INT128
545 SANITIZER_INTERFACE_ATTRIBUTE
546 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
547 ATOMIC_IMPL(Store, a, v, mo);
549 #endif
551 SANITIZER_INTERFACE_ATTRIBUTE
552 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
553 ATOMIC_IMPL(Exchange, a, v, mo);
556 SANITIZER_INTERFACE_ATTRIBUTE
557 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
558 ATOMIC_IMPL(Exchange, a, v, mo);
561 SANITIZER_INTERFACE_ATTRIBUTE
562 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
563 ATOMIC_IMPL(Exchange, a, v, mo);
566 SANITIZER_INTERFACE_ATTRIBUTE
567 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
568 ATOMIC_IMPL(Exchange, a, v, mo);
571 #if __TSAN_HAS_INT128
572 SANITIZER_INTERFACE_ATTRIBUTE
573 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
574 ATOMIC_IMPL(Exchange, a, v, mo);
576 #endif
578 SANITIZER_INTERFACE_ATTRIBUTE
579 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
580 ATOMIC_IMPL(FetchAdd, a, v, mo);
583 SANITIZER_INTERFACE_ATTRIBUTE
584 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
585 ATOMIC_IMPL(FetchAdd, a, v, mo);
588 SANITIZER_INTERFACE_ATTRIBUTE
589 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
590 ATOMIC_IMPL(FetchAdd, a, v, mo);
593 SANITIZER_INTERFACE_ATTRIBUTE
594 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
595 ATOMIC_IMPL(FetchAdd, a, v, mo);
598 #if __TSAN_HAS_INT128
599 SANITIZER_INTERFACE_ATTRIBUTE
600 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
601 ATOMIC_IMPL(FetchAdd, a, v, mo);
603 #endif
605 SANITIZER_INTERFACE_ATTRIBUTE
606 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
607 ATOMIC_IMPL(FetchSub, a, v, mo);
610 SANITIZER_INTERFACE_ATTRIBUTE
611 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
612 ATOMIC_IMPL(FetchSub, a, v, mo);
615 SANITIZER_INTERFACE_ATTRIBUTE
616 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
617 ATOMIC_IMPL(FetchSub, a, v, mo);
620 SANITIZER_INTERFACE_ATTRIBUTE
621 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
622 ATOMIC_IMPL(FetchSub, a, v, mo);
625 #if __TSAN_HAS_INT128
626 SANITIZER_INTERFACE_ATTRIBUTE
627 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
628 ATOMIC_IMPL(FetchSub, a, v, mo);
630 #endif
632 SANITIZER_INTERFACE_ATTRIBUTE
633 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
634 ATOMIC_IMPL(FetchAnd, a, v, mo);
637 SANITIZER_INTERFACE_ATTRIBUTE
638 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
639 ATOMIC_IMPL(FetchAnd, a, v, mo);
642 SANITIZER_INTERFACE_ATTRIBUTE
643 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
644 ATOMIC_IMPL(FetchAnd, a, v, mo);
647 SANITIZER_INTERFACE_ATTRIBUTE
648 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
649 ATOMIC_IMPL(FetchAnd, a, v, mo);
652 #if __TSAN_HAS_INT128
653 SANITIZER_INTERFACE_ATTRIBUTE
654 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
655 ATOMIC_IMPL(FetchAnd, a, v, mo);
657 #endif
659 SANITIZER_INTERFACE_ATTRIBUTE
660 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
661 ATOMIC_IMPL(FetchOr, a, v, mo);
664 SANITIZER_INTERFACE_ATTRIBUTE
665 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
666 ATOMIC_IMPL(FetchOr, a, v, mo);
669 SANITIZER_INTERFACE_ATTRIBUTE
670 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
671 ATOMIC_IMPL(FetchOr, a, v, mo);
674 SANITIZER_INTERFACE_ATTRIBUTE
675 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
676 ATOMIC_IMPL(FetchOr, a, v, mo);
679 #if __TSAN_HAS_INT128
680 SANITIZER_INTERFACE_ATTRIBUTE
681 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
682 ATOMIC_IMPL(FetchOr, a, v, mo);
684 #endif
686 SANITIZER_INTERFACE_ATTRIBUTE
687 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
688 ATOMIC_IMPL(FetchXor, a, v, mo);
691 SANITIZER_INTERFACE_ATTRIBUTE
692 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
693 ATOMIC_IMPL(FetchXor, a, v, mo);
696 SANITIZER_INTERFACE_ATTRIBUTE
697 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
698 ATOMIC_IMPL(FetchXor, a, v, mo);
701 SANITIZER_INTERFACE_ATTRIBUTE
702 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
703 ATOMIC_IMPL(FetchXor, a, v, mo);
706 #if __TSAN_HAS_INT128
707 SANITIZER_INTERFACE_ATTRIBUTE
708 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
709 ATOMIC_IMPL(FetchXor, a, v, mo);
711 #endif
713 SANITIZER_INTERFACE_ATTRIBUTE
714 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
715 ATOMIC_IMPL(FetchNand, a, v, mo);
718 SANITIZER_INTERFACE_ATTRIBUTE
719 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
720 ATOMIC_IMPL(FetchNand, a, v, mo);
723 SANITIZER_INTERFACE_ATTRIBUTE
724 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
725 ATOMIC_IMPL(FetchNand, a, v, mo);
728 SANITIZER_INTERFACE_ATTRIBUTE
729 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
730 ATOMIC_IMPL(FetchNand, a, v, mo);
733 #if __TSAN_HAS_INT128
734 SANITIZER_INTERFACE_ATTRIBUTE
735 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
736 ATOMIC_IMPL(FetchNand, a, v, mo);
738 #endif
740 SANITIZER_INTERFACE_ATTRIBUTE
741 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
742 morder mo, morder fmo) {
743 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
746 SANITIZER_INTERFACE_ATTRIBUTE
747 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
748 morder mo, morder fmo) {
749 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
752 SANITIZER_INTERFACE_ATTRIBUTE
753 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
754 morder mo, morder fmo) {
755 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
758 SANITIZER_INTERFACE_ATTRIBUTE
759 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
760 morder mo, morder fmo) {
761 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
764 #if __TSAN_HAS_INT128
765 SANITIZER_INTERFACE_ATTRIBUTE
766 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
767 morder mo, morder fmo) {
768 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
770 #endif
772 SANITIZER_INTERFACE_ATTRIBUTE
773 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
774 morder mo, morder fmo) {
775 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
778 SANITIZER_INTERFACE_ATTRIBUTE
779 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
780 morder mo, morder fmo) {
781 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
784 SANITIZER_INTERFACE_ATTRIBUTE
785 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
786 morder mo, morder fmo) {
787 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
790 SANITIZER_INTERFACE_ATTRIBUTE
791 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
792 morder mo, morder fmo) {
793 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
796 #if __TSAN_HAS_INT128
797 SANITIZER_INTERFACE_ATTRIBUTE
798 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
799 morder mo, morder fmo) {
800 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
802 #endif
804 SANITIZER_INTERFACE_ATTRIBUTE
805 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
806 morder mo, morder fmo) {
807 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
810 SANITIZER_INTERFACE_ATTRIBUTE
811 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
812 morder mo, morder fmo) {
813 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
816 SANITIZER_INTERFACE_ATTRIBUTE
817 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
818 morder mo, morder fmo) {
819 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
822 SANITIZER_INTERFACE_ATTRIBUTE
823 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
824 morder mo, morder fmo) {
825 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
828 #if __TSAN_HAS_INT128
829 SANITIZER_INTERFACE_ATTRIBUTE
830 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
831 morder mo, morder fmo) {
832 ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
834 #endif
836 SANITIZER_INTERFACE_ATTRIBUTE
837 void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
839 SANITIZER_INTERFACE_ATTRIBUTE
840 void __tsan_atomic_signal_fence(morder mo) {
842 } // extern "C"
844 #else // #if !SANITIZER_GO
846 // Go
848 # define ATOMIC(func, ...) \
849 if (thr->ignore_sync) { \
850 NoTsanAtomic##func(__VA_ARGS__); \
851 } else { \
852 FuncEntry(thr, cpc); \
853 Atomic##func(thr, pc, __VA_ARGS__); \
854 FuncExit(thr); \
857 # define ATOMIC_RET(func, ret, ...) \
858 if (thr->ignore_sync) { \
859 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
860 } else { \
861 FuncEntry(thr, cpc); \
862 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
863 FuncExit(thr); \
866 extern "C" {
867 SANITIZER_INTERFACE_ATTRIBUTE
868 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
869 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
872 SANITIZER_INTERFACE_ATTRIBUTE
873 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
874 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
877 SANITIZER_INTERFACE_ATTRIBUTE
878 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
879 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
882 SANITIZER_INTERFACE_ATTRIBUTE
883 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
884 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
887 SANITIZER_INTERFACE_ATTRIBUTE
888 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
889 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
892 SANITIZER_INTERFACE_ATTRIBUTE
893 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
894 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
897 SANITIZER_INTERFACE_ATTRIBUTE
898 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
899 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
902 SANITIZER_INTERFACE_ATTRIBUTE
903 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
904 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
907 SANITIZER_INTERFACE_ATTRIBUTE
908 void __tsan_go_atomic32_compare_exchange(
909 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910 a32 cur = 0;
911 a32 cmp = *(a32*)(a+8);
912 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
913 *(bool*)(a+16) = (cur == cmp);
916 SANITIZER_INTERFACE_ATTRIBUTE
917 void __tsan_go_atomic64_compare_exchange(
918 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
919 a64 cur = 0;
920 a64 cmp = *(a64*)(a+8);
921 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
922 *(bool*)(a+24) = (cur == cmp);
924 } // extern "C"
925 #endif // #if !SANITIZER_GO