Rebase.
[official-gcc.git] / libsanitizer / tsan / tsan_interface_atomic.cc
blob3f5a4ccc9f7158fdeefd58a78ae57afd32fea8df
1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publicly
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_mutex.h"
23 #include "tsan_flags.h"
24 #include "tsan_rtl.h"
26 using namespace __tsan; // NOLINT
28 #define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
31 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
32 ThreadState *const thr = cur_thread(); \
33 if (thr->ignore_interceptors) \
34 return NoTsanAtomic##func(__VA_ARGS__); \
35 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
36 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
37 return Atomic##func(thr, pc, __VA_ARGS__); \
38 /**/
40 // These should match declarations from public tsan_interface_atomic.h header.
41 typedef unsigned char a8;
42 typedef unsigned short a16; // NOLINT
43 typedef unsigned int a32;
44 typedef unsigned long long a64; // NOLINT
45 #if defined(__SIZEOF_INT128__) \
46 || (__clang_major__ * 100 + __clang_minor__ >= 302)
47 __extension__ typedef __int128 a128;
48 # define __TSAN_HAS_INT128 1
49 #else
50 # define __TSAN_HAS_INT128 0
51 #endif
53 // Protects emulation of 128-bit atomic operations.
54 static StaticSpinMutex mutex128;
56 // Part of ABI, do not change.
57 // http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
58 typedef enum {
59 mo_relaxed,
60 mo_consume,
61 mo_acquire,
62 mo_release,
63 mo_acq_rel,
64 mo_seq_cst
65 } morder;
67 class ScopedAtomic {
68 public:
69 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
70 morder mo, const char *func)
71 : thr_(thr) {
72 FuncEntry(thr_, pc);
73 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
75 ~ScopedAtomic() {
76 ProcessPendingSignals(thr_);
77 FuncExit(thr_);
79 private:
80 ThreadState *thr_;
83 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
84 StatInc(thr, StatAtomic);
85 StatInc(thr, t);
86 StatInc(thr, size == 1 ? StatAtomic1
87 : size == 2 ? StatAtomic2
88 : size == 4 ? StatAtomic4
89 : size == 8 ? StatAtomic8
90 : StatAtomic16);
91 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
92 : mo == mo_consume ? StatAtomicConsume
93 : mo == mo_acquire ? StatAtomicAcquire
94 : mo == mo_release ? StatAtomicRelease
95 : mo == mo_acq_rel ? StatAtomicAcq_Rel
96 : StatAtomicSeq_Cst);
99 static bool IsLoadOrder(morder mo) {
100 return mo == mo_relaxed || mo == mo_consume
101 || mo == mo_acquire || mo == mo_seq_cst;
104 static bool IsStoreOrder(morder mo) {
105 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
108 static bool IsReleaseOrder(morder mo) {
109 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
112 static bool IsAcquireOrder(morder mo) {
113 return mo == mo_consume || mo == mo_acquire
114 || mo == mo_acq_rel || mo == mo_seq_cst;
117 static bool IsAcqRelOrder(morder mo) {
118 return mo == mo_acq_rel || mo == mo_seq_cst;
121 template<typename T> T func_xchg(volatile T *v, T op) {
122 T res = __sync_lock_test_and_set(v, op);
123 // __sync_lock_test_and_set does not contain full barrier.
124 __sync_synchronize();
125 return res;
128 template<typename T> T func_add(volatile T *v, T op) {
129 return __sync_fetch_and_add(v, op);
132 template<typename T> T func_sub(volatile T *v, T op) {
133 return __sync_fetch_and_sub(v, op);
136 template<typename T> T func_and(volatile T *v, T op) {
137 return __sync_fetch_and_and(v, op);
140 template<typename T> T func_or(volatile T *v, T op) {
141 return __sync_fetch_and_or(v, op);
144 template<typename T> T func_xor(volatile T *v, T op) {
145 return __sync_fetch_and_xor(v, op);
148 template<typename T> T func_nand(volatile T *v, T op) {
149 // clang does not support __sync_fetch_and_nand.
150 T cmp = *v;
151 for (;;) {
152 T newv = ~(cmp & op);
153 T cur = __sync_val_compare_and_swap(v, cmp, newv);
154 if (cmp == cur)
155 return cmp;
156 cmp = cur;
160 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
161 return __sync_val_compare_and_swap(v, cmp, xch);
164 // clang does not support 128-bit atomic ops.
165 // Atomic ops are executed under tsan internal mutex,
166 // here we assume that the atomic variables are not accessed
167 // from non-instrumented code.
168 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
169 a128 func_xchg(volatile a128 *v, a128 op) {
170 SpinMutexLock lock(&mutex128);
171 a128 cmp = *v;
172 *v = op;
173 return cmp;
176 a128 func_add(volatile a128 *v, a128 op) {
177 SpinMutexLock lock(&mutex128);
178 a128 cmp = *v;
179 *v = cmp + op;
180 return cmp;
183 a128 func_sub(volatile a128 *v, a128 op) {
184 SpinMutexLock lock(&mutex128);
185 a128 cmp = *v;
186 *v = cmp - op;
187 return cmp;
190 a128 func_and(volatile a128 *v, a128 op) {
191 SpinMutexLock lock(&mutex128);
192 a128 cmp = *v;
193 *v = cmp & op;
194 return cmp;
197 a128 func_or(volatile a128 *v, a128 op) {
198 SpinMutexLock lock(&mutex128);
199 a128 cmp = *v;
200 *v = cmp | op;
201 return cmp;
204 a128 func_xor(volatile a128 *v, a128 op) {
205 SpinMutexLock lock(&mutex128);
206 a128 cmp = *v;
207 *v = cmp ^ op;
208 return cmp;
211 a128 func_nand(volatile a128 *v, a128 op) {
212 SpinMutexLock lock(&mutex128);
213 a128 cmp = *v;
214 *v = ~(cmp & op);
215 return cmp;
218 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
219 SpinMutexLock lock(&mutex128);
220 a128 cur = *v;
221 if (cur == cmp)
222 *v = xch;
223 return cur;
225 #endif
227 template<typename T>
228 static int SizeLog() {
229 if (sizeof(T) <= 1)
230 return kSizeLog1;
231 else if (sizeof(T) <= 2)
232 return kSizeLog2;
233 else if (sizeof(T) <= 4)
234 return kSizeLog4;
235 else
236 return kSizeLog8;
237 // For 16-byte atomics we also use 8-byte memory access,
238 // this leads to false negatives only in very obscure cases.
241 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
242 return (atomic_uint8_t*)a;
245 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
246 return (atomic_uint16_t*)a;
249 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
250 return (atomic_uint32_t*)a;
253 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
254 return (atomic_uint64_t*)a;
257 static memory_order to_mo(morder mo) {
258 switch (mo) {
259 case mo_relaxed: return memory_order_relaxed;
260 case mo_consume: return memory_order_consume;
261 case mo_acquire: return memory_order_acquire;
262 case mo_release: return memory_order_release;
263 case mo_acq_rel: return memory_order_acq_rel;
264 case mo_seq_cst: return memory_order_seq_cst;
266 CHECK(0);
267 return memory_order_seq_cst;
270 template<typename T>
271 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
272 return atomic_load(to_atomic(a), to_mo(mo));
275 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
276 SpinMutexLock lock(&mutex128);
277 return *a;
280 template<typename T>
281 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
282 morder mo) {
283 CHECK(IsLoadOrder(mo));
284 // This fast-path is critical for performance.
285 // Assume the access is atomic.
286 if (!IsAcquireOrder(mo)) {
287 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
288 return NoTsanAtomicLoad(a, mo);
290 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
291 AcquireImpl(thr, pc, &s->clock);
292 T v = NoTsanAtomicLoad(a, mo);
293 s->mtx.ReadUnlock();
294 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
295 return v;
298 template<typename T>
299 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
300 atomic_store(to_atomic(a), v, to_mo(mo));
303 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
304 SpinMutexLock lock(&mutex128);
305 *a = v;
308 template<typename T>
309 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
310 morder mo) {
311 CHECK(IsStoreOrder(mo));
312 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
313 // This fast-path is critical for performance.
314 // Assume the access is atomic.
315 // Strictly saying even relaxed store cuts off release sequence,
316 // so must reset the clock.
317 if (!IsReleaseOrder(mo)) {
318 NoTsanAtomicStore(a, v, mo);
319 return;
321 __sync_synchronize();
322 SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
323 thr->fast_state.IncrementEpoch();
324 // Can't increment epoch w/o writing to the trace as well.
325 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
326 ReleaseImpl(thr, pc, &s->clock);
327 NoTsanAtomicStore(a, v, mo);
328 s->mtx.Unlock();
331 template<typename T, T (*F)(volatile T *v, T op)>
332 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
333 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
334 SyncVar *s = 0;
335 if (mo != mo_relaxed) {
336 s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
337 thr->fast_state.IncrementEpoch();
338 // Can't increment epoch w/o writing to the trace as well.
339 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
340 if (IsAcqRelOrder(mo))
341 AcquireReleaseImpl(thr, pc, &s->clock);
342 else if (IsReleaseOrder(mo))
343 ReleaseImpl(thr, pc, &s->clock);
344 else if (IsAcquireOrder(mo))
345 AcquireImpl(thr, pc, &s->clock);
347 v = F(a, v);
348 if (s)
349 s->mtx.Unlock();
350 return v;
353 template<typename T>
354 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
355 return func_xchg(a, v);
358 template<typename T>
359 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
360 return func_add(a, v);
363 template<typename T>
364 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
365 return func_sub(a, v);
368 template<typename T>
369 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
370 return func_and(a, v);
373 template<typename T>
374 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
375 return func_or(a, v);
378 template<typename T>
379 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
380 return func_xor(a, v);
383 template<typename T>
384 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
385 return func_nand(a, v);
388 template<typename T>
389 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
390 morder mo) {
391 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
394 template<typename T>
395 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
396 morder mo) {
397 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
400 template<typename T>
401 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
402 morder mo) {
403 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
406 template<typename T>
407 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
408 morder mo) {
409 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
412 template<typename T>
413 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
414 morder mo) {
415 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
418 template<typename T>
419 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
420 morder mo) {
421 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
424 template<typename T>
425 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
426 morder mo) {
427 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
430 template<typename T>
431 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
432 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
435 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
436 morder mo, morder fmo) {
437 a128 old = *c;
438 a128 cur = func_cas(a, old, v);
439 if (cur == old)
440 return true;
441 *c = cur;
442 return false;
445 template<typename T>
446 static bool NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
447 return NoTsanAtomicCAS(a, &c, v, mo, fmo);
450 template<typename T>
451 static bool AtomicCAS(ThreadState *thr, uptr pc,
452 volatile T *a, T *c, T v, morder mo, morder fmo) {
453 (void)fmo; // Unused because llvm does not pass it yet.
454 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
455 SyncVar *s = 0;
456 bool write_lock = mo != mo_acquire && mo != mo_consume;
457 if (mo != mo_relaxed) {
458 s = ctx->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
459 thr->fast_state.IncrementEpoch();
460 // Can't increment epoch w/o writing to the trace as well.
461 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
462 if (IsAcqRelOrder(mo))
463 AcquireReleaseImpl(thr, pc, &s->clock);
464 else if (IsReleaseOrder(mo))
465 ReleaseImpl(thr, pc, &s->clock);
466 else if (IsAcquireOrder(mo))
467 AcquireImpl(thr, pc, &s->clock);
469 T cc = *c;
470 T pr = func_cas(a, cc, v);
471 if (s) {
472 if (write_lock)
473 s->mtx.Unlock();
474 else
475 s->mtx.ReadUnlock();
477 if (pr == cc)
478 return true;
479 *c = pr;
480 return false;
483 template<typename T>
484 static T AtomicCAS(ThreadState *thr, uptr pc,
485 volatile T *a, T c, T v, morder mo, morder fmo) {
486 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
487 return c;
490 static void NoTsanAtomicFence(morder mo) {
491 __sync_synchronize();
494 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
495 // FIXME(dvyukov): not implemented.
496 __sync_synchronize();
499 extern "C" {
500 SANITIZER_INTERFACE_ATTRIBUTE
501 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
502 SCOPED_ATOMIC(Load, a, mo);
505 SANITIZER_INTERFACE_ATTRIBUTE
506 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
507 SCOPED_ATOMIC(Load, a, mo);
510 SANITIZER_INTERFACE_ATTRIBUTE
511 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
512 SCOPED_ATOMIC(Load, a, mo);
515 SANITIZER_INTERFACE_ATTRIBUTE
516 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
517 SCOPED_ATOMIC(Load, a, mo);
520 #if __TSAN_HAS_INT128
521 SANITIZER_INTERFACE_ATTRIBUTE
522 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
523 SCOPED_ATOMIC(Load, a, mo);
525 #endif
527 SANITIZER_INTERFACE_ATTRIBUTE
528 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
529 SCOPED_ATOMIC(Store, a, v, mo);
532 SANITIZER_INTERFACE_ATTRIBUTE
533 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
534 SCOPED_ATOMIC(Store, a, v, mo);
537 SANITIZER_INTERFACE_ATTRIBUTE
538 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
539 SCOPED_ATOMIC(Store, a, v, mo);
542 SANITIZER_INTERFACE_ATTRIBUTE
543 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
544 SCOPED_ATOMIC(Store, a, v, mo);
547 #if __TSAN_HAS_INT128
548 SANITIZER_INTERFACE_ATTRIBUTE
549 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
550 SCOPED_ATOMIC(Store, a, v, mo);
552 #endif
554 SANITIZER_INTERFACE_ATTRIBUTE
555 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
556 SCOPED_ATOMIC(Exchange, a, v, mo);
559 SANITIZER_INTERFACE_ATTRIBUTE
560 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
561 SCOPED_ATOMIC(Exchange, a, v, mo);
564 SANITIZER_INTERFACE_ATTRIBUTE
565 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
566 SCOPED_ATOMIC(Exchange, a, v, mo);
569 SANITIZER_INTERFACE_ATTRIBUTE
570 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
571 SCOPED_ATOMIC(Exchange, a, v, mo);
574 #if __TSAN_HAS_INT128
575 SANITIZER_INTERFACE_ATTRIBUTE
576 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
577 SCOPED_ATOMIC(Exchange, a, v, mo);
579 #endif
581 SANITIZER_INTERFACE_ATTRIBUTE
582 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
583 SCOPED_ATOMIC(FetchAdd, a, v, mo);
586 SANITIZER_INTERFACE_ATTRIBUTE
587 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
588 SCOPED_ATOMIC(FetchAdd, a, v, mo);
591 SANITIZER_INTERFACE_ATTRIBUTE
592 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
593 SCOPED_ATOMIC(FetchAdd, a, v, mo);
596 SANITIZER_INTERFACE_ATTRIBUTE
597 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
598 SCOPED_ATOMIC(FetchAdd, a, v, mo);
601 #if __TSAN_HAS_INT128
602 SANITIZER_INTERFACE_ATTRIBUTE
603 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
604 SCOPED_ATOMIC(FetchAdd, a, v, mo);
606 #endif
608 SANITIZER_INTERFACE_ATTRIBUTE
609 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
610 SCOPED_ATOMIC(FetchSub, a, v, mo);
613 SANITIZER_INTERFACE_ATTRIBUTE
614 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
615 SCOPED_ATOMIC(FetchSub, a, v, mo);
618 SANITIZER_INTERFACE_ATTRIBUTE
619 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
620 SCOPED_ATOMIC(FetchSub, a, v, mo);
623 SANITIZER_INTERFACE_ATTRIBUTE
624 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
625 SCOPED_ATOMIC(FetchSub, a, v, mo);
628 #if __TSAN_HAS_INT128
629 SANITIZER_INTERFACE_ATTRIBUTE
630 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
631 SCOPED_ATOMIC(FetchSub, a, v, mo);
633 #endif
635 SANITIZER_INTERFACE_ATTRIBUTE
636 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
637 SCOPED_ATOMIC(FetchAnd, a, v, mo);
640 SANITIZER_INTERFACE_ATTRIBUTE
641 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
642 SCOPED_ATOMIC(FetchAnd, a, v, mo);
645 SANITIZER_INTERFACE_ATTRIBUTE
646 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
647 SCOPED_ATOMIC(FetchAnd, a, v, mo);
650 SANITIZER_INTERFACE_ATTRIBUTE
651 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
652 SCOPED_ATOMIC(FetchAnd, a, v, mo);
655 #if __TSAN_HAS_INT128
656 SANITIZER_INTERFACE_ATTRIBUTE
657 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
658 SCOPED_ATOMIC(FetchAnd, a, v, mo);
660 #endif
662 SANITIZER_INTERFACE_ATTRIBUTE
663 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
664 SCOPED_ATOMIC(FetchOr, a, v, mo);
667 SANITIZER_INTERFACE_ATTRIBUTE
668 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
669 SCOPED_ATOMIC(FetchOr, a, v, mo);
672 SANITIZER_INTERFACE_ATTRIBUTE
673 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
674 SCOPED_ATOMIC(FetchOr, a, v, mo);
677 SANITIZER_INTERFACE_ATTRIBUTE
678 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
679 SCOPED_ATOMIC(FetchOr, a, v, mo);
682 #if __TSAN_HAS_INT128
683 SANITIZER_INTERFACE_ATTRIBUTE
684 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
685 SCOPED_ATOMIC(FetchOr, a, v, mo);
687 #endif
689 SANITIZER_INTERFACE_ATTRIBUTE
690 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
691 SCOPED_ATOMIC(FetchXor, a, v, mo);
694 SANITIZER_INTERFACE_ATTRIBUTE
695 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
696 SCOPED_ATOMIC(FetchXor, a, v, mo);
699 SANITIZER_INTERFACE_ATTRIBUTE
700 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
701 SCOPED_ATOMIC(FetchXor, a, v, mo);
704 SANITIZER_INTERFACE_ATTRIBUTE
705 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
706 SCOPED_ATOMIC(FetchXor, a, v, mo);
709 #if __TSAN_HAS_INT128
710 SANITIZER_INTERFACE_ATTRIBUTE
711 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
712 SCOPED_ATOMIC(FetchXor, a, v, mo);
714 #endif
716 SANITIZER_INTERFACE_ATTRIBUTE
717 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
718 SCOPED_ATOMIC(FetchNand, a, v, mo);
721 SANITIZER_INTERFACE_ATTRIBUTE
722 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
723 SCOPED_ATOMIC(FetchNand, a, v, mo);
726 SANITIZER_INTERFACE_ATTRIBUTE
727 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
728 SCOPED_ATOMIC(FetchNand, a, v, mo);
731 SANITIZER_INTERFACE_ATTRIBUTE
732 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
733 SCOPED_ATOMIC(FetchNand, a, v, mo);
736 #if __TSAN_HAS_INT128
737 SANITIZER_INTERFACE_ATTRIBUTE
738 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
739 SCOPED_ATOMIC(FetchNand, a, v, mo);
741 #endif
743 SANITIZER_INTERFACE_ATTRIBUTE
744 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
745 morder mo, morder fmo) {
746 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
749 SANITIZER_INTERFACE_ATTRIBUTE
750 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
751 morder mo, morder fmo) {
752 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
755 SANITIZER_INTERFACE_ATTRIBUTE
756 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
757 morder mo, morder fmo) {
758 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
761 SANITIZER_INTERFACE_ATTRIBUTE
762 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
763 morder mo, morder fmo) {
764 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
767 #if __TSAN_HAS_INT128
768 SANITIZER_INTERFACE_ATTRIBUTE
769 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
770 morder mo, morder fmo) {
771 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
773 #endif
775 SANITIZER_INTERFACE_ATTRIBUTE
776 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
777 morder mo, morder fmo) {
778 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
781 SANITIZER_INTERFACE_ATTRIBUTE
782 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
783 morder mo, morder fmo) {
784 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
787 SANITIZER_INTERFACE_ATTRIBUTE
788 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
789 morder mo, morder fmo) {
790 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
793 SANITIZER_INTERFACE_ATTRIBUTE
794 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
795 morder mo, morder fmo) {
796 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
799 #if __TSAN_HAS_INT128
800 SANITIZER_INTERFACE_ATTRIBUTE
801 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
802 morder mo, morder fmo) {
803 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
805 #endif
807 SANITIZER_INTERFACE_ATTRIBUTE
808 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
809 morder mo, morder fmo) {
810 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
813 SANITIZER_INTERFACE_ATTRIBUTE
814 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
815 morder mo, morder fmo) {
816 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
819 SANITIZER_INTERFACE_ATTRIBUTE
820 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
821 morder mo, morder fmo) {
822 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
825 SANITIZER_INTERFACE_ATTRIBUTE
826 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
827 morder mo, morder fmo) {
828 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
831 #if __TSAN_HAS_INT128
832 SANITIZER_INTERFACE_ATTRIBUTE
833 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
834 morder mo, morder fmo) {
835 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
837 #endif
839 SANITIZER_INTERFACE_ATTRIBUTE
840 void __tsan_atomic_thread_fence(morder mo) {
841 char* a = 0;
842 SCOPED_ATOMIC(Fence, mo);
845 SANITIZER_INTERFACE_ATTRIBUTE
846 void __tsan_atomic_signal_fence(morder mo) {
848 } // extern "C"