Make arm_feature_set agree with type of FL_* macros
[official-gcc.git] / libsanitizer / tsan / tsan_interface_atomic.cc
blob5c5c34f3b8761c9210d1c049cdb8681dbaebcd2c
1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publicly
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_mutex.h"
23 #include "tsan_flags.h"
24 #include "tsan_interface.h"
25 #include "tsan_rtl.h"
27 using namespace __tsan; // NOLINT
29 #if !SANITIZER_GO && __TSAN_HAS_INT128
30 // Protects emulation of 128-bit atomic operations.
31 static StaticSpinMutex mutex128;
32 #endif
34 static bool IsLoadOrder(morder mo) {
35 return mo == mo_relaxed || mo == mo_consume
36 || mo == mo_acquire || mo == mo_seq_cst;
39 static bool IsStoreOrder(morder mo) {
40 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43 static bool IsReleaseOrder(morder mo) {
44 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
47 static bool IsAcquireOrder(morder mo) {
48 return mo == mo_consume || mo == mo_acquire
49 || mo == mo_acq_rel || mo == mo_seq_cst;
52 static bool IsAcqRelOrder(morder mo) {
53 return mo == mo_acq_rel || mo == mo_seq_cst;
56 template<typename T> T func_xchg(volatile T *v, T op) {
57 T res = __sync_lock_test_and_set(v, op);
58 // __sync_lock_test_and_set does not contain full barrier.
59 __sync_synchronize();
60 return res;
63 template<typename T> T func_add(volatile T *v, T op) {
64 return __sync_fetch_and_add(v, op);
67 template<typename T> T func_sub(volatile T *v, T op) {
68 return __sync_fetch_and_sub(v, op);
71 template<typename T> T func_and(volatile T *v, T op) {
72 return __sync_fetch_and_and(v, op);
75 template<typename T> T func_or(volatile T *v, T op) {
76 return __sync_fetch_and_or(v, op);
79 template<typename T> T func_xor(volatile T *v, T op) {
80 return __sync_fetch_and_xor(v, op);
83 template<typename T> T func_nand(volatile T *v, T op) {
84 // clang does not support __sync_fetch_and_nand.
85 T cmp = *v;
86 for (;;) {
87 T newv = ~(cmp & op);
88 T cur = __sync_val_compare_and_swap(v, cmp, newv);
89 if (cmp == cur)
90 return cmp;
91 cmp = cur;
95 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
96 return __sync_val_compare_and_swap(v, cmp, xch);
99 // clang does not support 128-bit atomic ops.
100 // Atomic ops are executed under tsan internal mutex,
101 // here we assume that the atomic variables are not accessed
102 // from non-instrumented code.
103 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
104 && __TSAN_HAS_INT128
105 a128 func_xchg(volatile a128 *v, a128 op) {
106 SpinMutexLock lock(&mutex128);
107 a128 cmp = *v;
108 *v = op;
109 return cmp;
112 a128 func_add(volatile a128 *v, a128 op) {
113 SpinMutexLock lock(&mutex128);
114 a128 cmp = *v;
115 *v = cmp + op;
116 return cmp;
119 a128 func_sub(volatile a128 *v, a128 op) {
120 SpinMutexLock lock(&mutex128);
121 a128 cmp = *v;
122 *v = cmp - op;
123 return cmp;
126 a128 func_and(volatile a128 *v, a128 op) {
127 SpinMutexLock lock(&mutex128);
128 a128 cmp = *v;
129 *v = cmp & op;
130 return cmp;
133 a128 func_or(volatile a128 *v, a128 op) {
134 SpinMutexLock lock(&mutex128);
135 a128 cmp = *v;
136 *v = cmp | op;
137 return cmp;
140 a128 func_xor(volatile a128 *v, a128 op) {
141 SpinMutexLock lock(&mutex128);
142 a128 cmp = *v;
143 *v = cmp ^ op;
144 return cmp;
147 a128 func_nand(volatile a128 *v, a128 op) {
148 SpinMutexLock lock(&mutex128);
149 a128 cmp = *v;
150 *v = ~(cmp & op);
151 return cmp;
154 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
155 SpinMutexLock lock(&mutex128);
156 a128 cur = *v;
157 if (cur == cmp)
158 *v = xch;
159 return cur;
161 #endif
163 template<typename T>
164 static int SizeLog() {
165 if (sizeof(T) <= 1)
166 return kSizeLog1;
167 else if (sizeof(T) <= 2)
168 return kSizeLog2;
169 else if (sizeof(T) <= 4)
170 return kSizeLog4;
171 else
172 return kSizeLog8;
173 // For 16-byte atomics we also use 8-byte memory access,
174 // this leads to false negatives only in very obscure cases.
177 #if !SANITIZER_GO
178 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
179 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
182 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
183 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
185 #endif
187 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
188 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
191 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
192 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
195 static memory_order to_mo(morder mo) {
196 switch (mo) {
197 case mo_relaxed: return memory_order_relaxed;
198 case mo_consume: return memory_order_consume;
199 case mo_acquire: return memory_order_acquire;
200 case mo_release: return memory_order_release;
201 case mo_acq_rel: return memory_order_acq_rel;
202 case mo_seq_cst: return memory_order_seq_cst;
204 CHECK(0);
205 return memory_order_seq_cst;
208 template<typename T>
209 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
210 return atomic_load(to_atomic(a), to_mo(mo));
213 #if __TSAN_HAS_INT128 && !SANITIZER_GO
214 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
215 SpinMutexLock lock(&mutex128);
216 return *a;
218 #endif
220 template<typename T>
221 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
222 morder mo) {
223 CHECK(IsLoadOrder(mo));
224 // This fast-path is critical for performance.
225 // Assume the access is atomic.
226 if (!IsAcquireOrder(mo)) {
227 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
228 return NoTsanAtomicLoad(a, mo);
230 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
231 AcquireImpl(thr, pc, &s->clock);
232 T v = NoTsanAtomicLoad(a, mo);
233 s->mtx.ReadUnlock();
234 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
235 return v;
238 template<typename T>
239 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
240 atomic_store(to_atomic(a), v, to_mo(mo));
243 #if __TSAN_HAS_INT128 && !SANITIZER_GO
244 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
245 SpinMutexLock lock(&mutex128);
246 *a = v;
248 #endif
250 template<typename T>
251 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
252 morder mo) {
253 CHECK(IsStoreOrder(mo));
254 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
255 // This fast-path is critical for performance.
256 // Assume the access is atomic.
257 // Strictly saying even relaxed store cuts off release sequence,
258 // so must reset the clock.
259 if (!IsReleaseOrder(mo)) {
260 NoTsanAtomicStore(a, v, mo);
261 return;
263 __sync_synchronize();
264 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
265 thr->fast_state.IncrementEpoch();
266 // Can't increment epoch w/o writing to the trace as well.
267 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
268 ReleaseImpl(thr, pc, &s->clock);
269 NoTsanAtomicStore(a, v, mo);
270 s->mtx.Unlock();
273 template<typename T, T (*F)(volatile T *v, T op)>
274 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
275 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
276 SyncVar *s = 0;
277 if (mo != mo_relaxed) {
278 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
279 thr->fast_state.IncrementEpoch();
280 // Can't increment epoch w/o writing to the trace as well.
281 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
282 if (IsAcqRelOrder(mo))
283 AcquireReleaseImpl(thr, pc, &s->clock);
284 else if (IsReleaseOrder(mo))
285 ReleaseImpl(thr, pc, &s->clock);
286 else if (IsAcquireOrder(mo))
287 AcquireImpl(thr, pc, &s->clock);
289 v = F(a, v);
290 if (s)
291 s->mtx.Unlock();
292 return v;
295 template<typename T>
296 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
297 return func_xchg(a, v);
300 template<typename T>
301 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
302 return func_add(a, v);
305 template<typename T>
306 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
307 return func_sub(a, v);
310 template<typename T>
311 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
312 return func_and(a, v);
315 template<typename T>
316 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
317 return func_or(a, v);
320 template<typename T>
321 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
322 return func_xor(a, v);
325 template<typename T>
326 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
327 return func_nand(a, v);
330 template<typename T>
331 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
332 morder mo) {
333 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
336 template<typename T>
337 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
338 morder mo) {
339 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
342 template<typename T>
343 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
344 morder mo) {
345 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
348 template<typename T>
349 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
350 morder mo) {
351 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
354 template<typename T>
355 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
356 morder mo) {
357 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
360 template<typename T>
361 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
362 morder mo) {
363 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
366 template<typename T>
367 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
368 morder mo) {
369 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
372 template<typename T>
373 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
374 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
377 #if __TSAN_HAS_INT128
378 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
379 morder mo, morder fmo) {
380 a128 old = *c;
381 a128 cur = func_cas(a, old, v);
382 if (cur == old)
383 return true;
384 *c = cur;
385 return false;
387 #endif
389 template<typename T>
390 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
391 NoTsanAtomicCAS(a, &c, v, mo, fmo);
392 return c;
395 template<typename T>
396 static bool AtomicCAS(ThreadState *thr, uptr pc,
397 volatile T *a, T *c, T v, morder mo, morder fmo) {
398 (void)fmo; // Unused because llvm does not pass it yet.
399 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
400 SyncVar *s = 0;
401 bool write_lock = mo != mo_acquire && mo != mo_consume;
402 if (mo != mo_relaxed) {
403 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
404 thr->fast_state.IncrementEpoch();
405 // Can't increment epoch w/o writing to the trace as well.
406 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
407 if (IsAcqRelOrder(mo))
408 AcquireReleaseImpl(thr, pc, &s->clock);
409 else if (IsReleaseOrder(mo))
410 ReleaseImpl(thr, pc, &s->clock);
411 else if (IsAcquireOrder(mo))
412 AcquireImpl(thr, pc, &s->clock);
414 T cc = *c;
415 T pr = func_cas(a, cc, v);
416 if (s) {
417 if (write_lock)
418 s->mtx.Unlock();
419 else
420 s->mtx.ReadUnlock();
422 if (pr == cc)
423 return true;
424 *c = pr;
425 return false;
428 template<typename T>
429 static T AtomicCAS(ThreadState *thr, uptr pc,
430 volatile T *a, T c, T v, morder mo, morder fmo) {
431 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
432 return c;
435 #if !SANITIZER_GO
436 static void NoTsanAtomicFence(morder mo) {
437 __sync_synchronize();
440 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
441 // FIXME(dvyukov): not implemented.
442 __sync_synchronize();
444 #endif
446 // Interface functions follow.
447 #if !SANITIZER_GO
449 // C/C++
451 #define SCOPED_ATOMIC(func, ...) \
452 const uptr callpc = (uptr)__builtin_return_address(0); \
453 uptr pc = StackTrace::GetCurrentPc(); \
454 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
455 ThreadState *const thr = cur_thread(); \
456 if (thr->ignore_interceptors) \
457 return NoTsanAtomic##func(__VA_ARGS__); \
458 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
459 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
460 return Atomic##func(thr, pc, __VA_ARGS__); \
461 /**/
463 class ScopedAtomic {
464 public:
465 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
466 morder mo, const char *func)
467 : thr_(thr) {
468 FuncEntry(thr_, pc);
469 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
471 ~ScopedAtomic() {
472 ProcessPendingSignals(thr_);
473 FuncExit(thr_);
475 private:
476 ThreadState *thr_;
479 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
480 StatInc(thr, StatAtomic);
481 StatInc(thr, t);
482 StatInc(thr, size == 1 ? StatAtomic1
483 : size == 2 ? StatAtomic2
484 : size == 4 ? StatAtomic4
485 : size == 8 ? StatAtomic8
486 : StatAtomic16);
487 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
488 : mo == mo_consume ? StatAtomicConsume
489 : mo == mo_acquire ? StatAtomicAcquire
490 : mo == mo_release ? StatAtomicRelease
491 : mo == mo_acq_rel ? StatAtomicAcq_Rel
492 : StatAtomicSeq_Cst);
495 extern "C" {
496 SANITIZER_INTERFACE_ATTRIBUTE
497 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
498 SCOPED_ATOMIC(Load, a, mo);
501 SANITIZER_INTERFACE_ATTRIBUTE
502 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
503 SCOPED_ATOMIC(Load, a, mo);
506 SANITIZER_INTERFACE_ATTRIBUTE
507 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
508 SCOPED_ATOMIC(Load, a, mo);
511 SANITIZER_INTERFACE_ATTRIBUTE
512 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
513 SCOPED_ATOMIC(Load, a, mo);
516 #if __TSAN_HAS_INT128
517 SANITIZER_INTERFACE_ATTRIBUTE
518 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
519 SCOPED_ATOMIC(Load, a, mo);
521 #endif
523 SANITIZER_INTERFACE_ATTRIBUTE
524 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
525 SCOPED_ATOMIC(Store, a, v, mo);
528 SANITIZER_INTERFACE_ATTRIBUTE
529 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
530 SCOPED_ATOMIC(Store, a, v, mo);
533 SANITIZER_INTERFACE_ATTRIBUTE
534 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
535 SCOPED_ATOMIC(Store, a, v, mo);
538 SANITIZER_INTERFACE_ATTRIBUTE
539 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
540 SCOPED_ATOMIC(Store, a, v, mo);
543 #if __TSAN_HAS_INT128
544 SANITIZER_INTERFACE_ATTRIBUTE
545 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
546 SCOPED_ATOMIC(Store, a, v, mo);
548 #endif
550 SANITIZER_INTERFACE_ATTRIBUTE
551 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
552 SCOPED_ATOMIC(Exchange, a, v, mo);
555 SANITIZER_INTERFACE_ATTRIBUTE
556 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
557 SCOPED_ATOMIC(Exchange, a, v, mo);
560 SANITIZER_INTERFACE_ATTRIBUTE
561 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
562 SCOPED_ATOMIC(Exchange, a, v, mo);
565 SANITIZER_INTERFACE_ATTRIBUTE
566 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
567 SCOPED_ATOMIC(Exchange, a, v, mo);
570 #if __TSAN_HAS_INT128
571 SANITIZER_INTERFACE_ATTRIBUTE
572 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
573 SCOPED_ATOMIC(Exchange, a, v, mo);
575 #endif
577 SANITIZER_INTERFACE_ATTRIBUTE
578 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
579 SCOPED_ATOMIC(FetchAdd, a, v, mo);
582 SANITIZER_INTERFACE_ATTRIBUTE
583 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
584 SCOPED_ATOMIC(FetchAdd, a, v, mo);
587 SANITIZER_INTERFACE_ATTRIBUTE
588 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
589 SCOPED_ATOMIC(FetchAdd, a, v, mo);
592 SANITIZER_INTERFACE_ATTRIBUTE
593 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
594 SCOPED_ATOMIC(FetchAdd, a, v, mo);
597 #if __TSAN_HAS_INT128
598 SANITIZER_INTERFACE_ATTRIBUTE
599 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
600 SCOPED_ATOMIC(FetchAdd, a, v, mo);
602 #endif
604 SANITIZER_INTERFACE_ATTRIBUTE
605 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
606 SCOPED_ATOMIC(FetchSub, a, v, mo);
609 SANITIZER_INTERFACE_ATTRIBUTE
610 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
611 SCOPED_ATOMIC(FetchSub, a, v, mo);
614 SANITIZER_INTERFACE_ATTRIBUTE
615 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
616 SCOPED_ATOMIC(FetchSub, a, v, mo);
619 SANITIZER_INTERFACE_ATTRIBUTE
620 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
621 SCOPED_ATOMIC(FetchSub, a, v, mo);
624 #if __TSAN_HAS_INT128
625 SANITIZER_INTERFACE_ATTRIBUTE
626 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
627 SCOPED_ATOMIC(FetchSub, a, v, mo);
629 #endif
631 SANITIZER_INTERFACE_ATTRIBUTE
632 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
633 SCOPED_ATOMIC(FetchAnd, a, v, mo);
636 SANITIZER_INTERFACE_ATTRIBUTE
637 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
638 SCOPED_ATOMIC(FetchAnd, a, v, mo);
641 SANITIZER_INTERFACE_ATTRIBUTE
642 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
643 SCOPED_ATOMIC(FetchAnd, a, v, mo);
646 SANITIZER_INTERFACE_ATTRIBUTE
647 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
648 SCOPED_ATOMIC(FetchAnd, a, v, mo);
651 #if __TSAN_HAS_INT128
652 SANITIZER_INTERFACE_ATTRIBUTE
653 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
654 SCOPED_ATOMIC(FetchAnd, a, v, mo);
656 #endif
658 SANITIZER_INTERFACE_ATTRIBUTE
659 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
660 SCOPED_ATOMIC(FetchOr, a, v, mo);
663 SANITIZER_INTERFACE_ATTRIBUTE
664 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
665 SCOPED_ATOMIC(FetchOr, a, v, mo);
668 SANITIZER_INTERFACE_ATTRIBUTE
669 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
670 SCOPED_ATOMIC(FetchOr, a, v, mo);
673 SANITIZER_INTERFACE_ATTRIBUTE
674 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
675 SCOPED_ATOMIC(FetchOr, a, v, mo);
678 #if __TSAN_HAS_INT128
679 SANITIZER_INTERFACE_ATTRIBUTE
680 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
681 SCOPED_ATOMIC(FetchOr, a, v, mo);
683 #endif
685 SANITIZER_INTERFACE_ATTRIBUTE
686 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
687 SCOPED_ATOMIC(FetchXor, a, v, mo);
690 SANITIZER_INTERFACE_ATTRIBUTE
691 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
692 SCOPED_ATOMIC(FetchXor, a, v, mo);
695 SANITIZER_INTERFACE_ATTRIBUTE
696 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
697 SCOPED_ATOMIC(FetchXor, a, v, mo);
700 SANITIZER_INTERFACE_ATTRIBUTE
701 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
702 SCOPED_ATOMIC(FetchXor, a, v, mo);
705 #if __TSAN_HAS_INT128
706 SANITIZER_INTERFACE_ATTRIBUTE
707 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
708 SCOPED_ATOMIC(FetchXor, a, v, mo);
710 #endif
712 SANITIZER_INTERFACE_ATTRIBUTE
713 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
714 SCOPED_ATOMIC(FetchNand, a, v, mo);
717 SANITIZER_INTERFACE_ATTRIBUTE
718 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
719 SCOPED_ATOMIC(FetchNand, a, v, mo);
722 SANITIZER_INTERFACE_ATTRIBUTE
723 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
724 SCOPED_ATOMIC(FetchNand, a, v, mo);
727 SANITIZER_INTERFACE_ATTRIBUTE
728 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
729 SCOPED_ATOMIC(FetchNand, a, v, mo);
732 #if __TSAN_HAS_INT128
733 SANITIZER_INTERFACE_ATTRIBUTE
734 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
735 SCOPED_ATOMIC(FetchNand, a, v, mo);
737 #endif
739 SANITIZER_INTERFACE_ATTRIBUTE
740 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
741 morder mo, morder fmo) {
742 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
745 SANITIZER_INTERFACE_ATTRIBUTE
746 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
747 morder mo, morder fmo) {
748 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
751 SANITIZER_INTERFACE_ATTRIBUTE
752 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
753 morder mo, morder fmo) {
754 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
757 SANITIZER_INTERFACE_ATTRIBUTE
758 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
759 morder mo, morder fmo) {
760 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
763 #if __TSAN_HAS_INT128
764 SANITIZER_INTERFACE_ATTRIBUTE
765 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
766 morder mo, morder fmo) {
767 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
769 #endif
771 SANITIZER_INTERFACE_ATTRIBUTE
772 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
773 morder mo, morder fmo) {
774 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
777 SANITIZER_INTERFACE_ATTRIBUTE
778 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
779 morder mo, morder fmo) {
780 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
783 SANITIZER_INTERFACE_ATTRIBUTE
784 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
785 morder mo, morder fmo) {
786 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
789 SANITIZER_INTERFACE_ATTRIBUTE
790 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
791 morder mo, morder fmo) {
792 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
795 #if __TSAN_HAS_INT128
796 SANITIZER_INTERFACE_ATTRIBUTE
797 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
798 morder mo, morder fmo) {
799 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
801 #endif
803 SANITIZER_INTERFACE_ATTRIBUTE
804 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
805 morder mo, morder fmo) {
806 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
809 SANITIZER_INTERFACE_ATTRIBUTE
810 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
811 morder mo, morder fmo) {
812 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
815 SANITIZER_INTERFACE_ATTRIBUTE
816 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
817 morder mo, morder fmo) {
818 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
821 SANITIZER_INTERFACE_ATTRIBUTE
822 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
823 morder mo, morder fmo) {
824 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
827 #if __TSAN_HAS_INT128
828 SANITIZER_INTERFACE_ATTRIBUTE
829 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
830 morder mo, morder fmo) {
831 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
833 #endif
835 SANITIZER_INTERFACE_ATTRIBUTE
836 void __tsan_atomic_thread_fence(morder mo) {
837 char* a = 0;
838 SCOPED_ATOMIC(Fence, mo);
841 SANITIZER_INTERFACE_ATTRIBUTE
842 void __tsan_atomic_signal_fence(morder mo) {
844 } // extern "C"
846 #else // #if !SANITIZER_GO
848 // Go
850 #define ATOMIC(func, ...) \
851 if (thr->ignore_sync) { \
852 NoTsanAtomic##func(__VA_ARGS__); \
853 } else { \
854 FuncEntry(thr, cpc); \
855 Atomic##func(thr, pc, __VA_ARGS__); \
856 FuncExit(thr); \
858 /**/
860 #define ATOMIC_RET(func, ret, ...) \
861 if (thr->ignore_sync) { \
862 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
863 } else { \
864 FuncEntry(thr, cpc); \
865 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
866 FuncExit(thr); \
868 /**/
870 extern "C" {
871 SANITIZER_INTERFACE_ATTRIBUTE
872 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
873 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
876 SANITIZER_INTERFACE_ATTRIBUTE
877 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
878 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
881 SANITIZER_INTERFACE_ATTRIBUTE
882 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
883 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
886 SANITIZER_INTERFACE_ATTRIBUTE
887 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
888 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
891 SANITIZER_INTERFACE_ATTRIBUTE
892 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
893 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
896 SANITIZER_INTERFACE_ATTRIBUTE
897 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
898 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
901 SANITIZER_INTERFACE_ATTRIBUTE
902 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
903 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
906 SANITIZER_INTERFACE_ATTRIBUTE
907 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
908 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
911 SANITIZER_INTERFACE_ATTRIBUTE
912 void __tsan_go_atomic32_compare_exchange(
913 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
914 a32 cur = 0;
915 a32 cmp = *(a32*)(a+8);
916 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
917 *(bool*)(a+16) = (cur == cmp);
920 SANITIZER_INTERFACE_ATTRIBUTE
921 void __tsan_go_atomic64_compare_exchange(
922 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
923 a64 cur = 0;
924 a64 cmp = *(a64*)(a+8);
925 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
926 *(bool*)(a+24) = (cur == cmp);
928 } // extern "C"
929 #endif // #if !SANITIZER_GO