gcc/
[official-gcc.git] / libsanitizer / tsan / tsan_rtl.h
bloba5b88e0c358bd965d7c24531a13fcd89a30f6320
1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main internal TSan header file.
12 // Ground rules:
13 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
14 // function-scope locals)
15 // - All functions/classes/etc reside in namespace __tsan, except for those
16 // declared in tsan_interface.h.
17 // - Platform-specific files should be used instead of ifdefs (*).
18 // - No system headers included in header files (*).
19 // - Platform specific headres included only into platform-specific files (*).
21 // (*) Except when inlining is critical for performance.
22 //===----------------------------------------------------------------------===//
24 #ifndef TSAN_RTL_H
25 #define TSAN_RTL_H
27 #include "sanitizer_common/sanitizer_allocator.h"
28 #include "sanitizer_common/sanitizer_allocator_internal.h"
29 #include "sanitizer_common/sanitizer_asm.h"
30 #include "sanitizer_common/sanitizer_common.h"
31 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
32 #include "sanitizer_common/sanitizer_libignore.h"
33 #include "sanitizer_common/sanitizer_suppressions.h"
34 #include "sanitizer_common/sanitizer_thread_registry.h"
35 #include "tsan_clock.h"
36 #include "tsan_defs.h"
37 #include "tsan_flags.h"
38 #include "tsan_sync.h"
39 #include "tsan_trace.h"
40 #include "tsan_vector.h"
41 #include "tsan_report.h"
42 #include "tsan_platform.h"
43 #include "tsan_mutexset.h"
44 #include "tsan_ignoreset.h"
45 #include "tsan_stack_trace.h"
47 #if SANITIZER_WORDSIZE != 64
48 # error "ThreadSanitizer is supported only on 64-bit platforms"
49 #endif
51 namespace __tsan {
53 #ifndef TSAN_GO
54 struct MapUnmapCallback;
55 typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
56 DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
57 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
58 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
59 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
60 SecondaryAllocator> Allocator;
61 Allocator *allocator();
62 #endif
64 void TsanCheckFailed(const char *file, int line, const char *cond,
65 u64 v1, u64 v2);
67 const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
69 // FastState (from most significant bit):
70 // ignore : 1
71 // tid : kTidBits
72 // unused : -
73 // history_size : 3
74 // epoch : kClkBits
75 class FastState {
76 public:
77 FastState(u64 tid, u64 epoch) {
78 x_ = tid << kTidShift;
79 x_ |= epoch;
80 DCHECK_EQ(tid, this->tid());
81 DCHECK_EQ(epoch, this->epoch());
82 DCHECK_EQ(GetIgnoreBit(), false);
85 explicit FastState(u64 x)
86 : x_(x) {
89 u64 raw() const {
90 return x_;
93 u64 tid() const {
94 u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
95 return res;
98 u64 TidWithIgnore() const {
99 u64 res = x_ >> kTidShift;
100 return res;
103 u64 epoch() const {
104 u64 res = x_ & ((1ull << kClkBits) - 1);
105 return res;
108 void IncrementEpoch() {
109 u64 old_epoch = epoch();
110 x_ += 1;
111 DCHECK_EQ(old_epoch + 1, epoch());
112 (void)old_epoch;
115 void SetIgnoreBit() { x_ |= kIgnoreBit; }
116 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
117 bool GetIgnoreBit() const { return (s64)x_ < 0; }
119 void SetHistorySize(int hs) {
120 CHECK_GE(hs, 0);
121 CHECK_LE(hs, 7);
122 x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
125 ALWAYS_INLINE
126 int GetHistorySize() const {
127 return (int)((x_ >> kHistoryShift) & kHistoryMask);
130 void ClearHistorySize() {
131 SetHistorySize(0);
134 ALWAYS_INLINE
135 u64 GetTracePos() const {
136 const int hs = GetHistorySize();
137 // When hs == 0, the trace consists of 2 parts.
138 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
139 return epoch() & mask;
142 private:
143 friend class Shadow;
144 static const int kTidShift = 64 - kTidBits - 1;
145 static const u64 kIgnoreBit = 1ull << 63;
146 static const u64 kFreedBit = 1ull << 63;
147 static const u64 kHistoryShift = kClkBits;
148 static const u64 kHistoryMask = 7;
149 u64 x_;
152 // Shadow (from most significant bit):
153 // freed : 1
154 // tid : kTidBits
155 // is_atomic : 1
156 // is_read : 1
157 // size_log : 2
158 // addr0 : 3
159 // epoch : kClkBits
160 class Shadow : public FastState {
161 public:
162 explicit Shadow(u64 x)
163 : FastState(x) {
166 explicit Shadow(const FastState &s)
167 : FastState(s.x_) {
168 ClearHistorySize();
171 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
172 DCHECK_EQ((x_ >> kClkBits) & 31, 0);
173 DCHECK_LE(addr0, 7);
174 DCHECK_LE(kAccessSizeLog, 3);
175 x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
176 DCHECK_EQ(kAccessSizeLog, size_log());
177 DCHECK_EQ(addr0, this->addr0());
180 void SetWrite(unsigned kAccessIsWrite) {
181 DCHECK_EQ(x_ & kReadBit, 0);
182 if (!kAccessIsWrite)
183 x_ |= kReadBit;
184 DCHECK_EQ(kAccessIsWrite, IsWrite());
187 void SetAtomic(bool kIsAtomic) {
188 DCHECK(!IsAtomic());
189 if (kIsAtomic)
190 x_ |= kAtomicBit;
191 DCHECK_EQ(IsAtomic(), kIsAtomic);
194 bool IsAtomic() const {
195 return x_ & kAtomicBit;
198 bool IsZero() const {
199 return x_ == 0;
202 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
203 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
204 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
205 return shifted_xor == 0;
208 static ALWAYS_INLINE
209 bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
210 u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
211 return masked_xor == 0;
214 static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
215 unsigned kS2AccessSize) {
216 bool res = false;
217 u64 diff = s1.addr0() - s2.addr0();
218 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
219 // if (s1.addr0() + size1) > s2.addr0()) return true;
220 if (s1.size() > -diff)
221 res = true;
222 } else {
223 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
224 if (kS2AccessSize > diff)
225 res = true;
227 DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
228 DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
229 return res;
232 u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
233 u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
234 bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
235 bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
237 // The idea behind the freed bit is as follows.
238 // When the memory is freed (or otherwise unaccessible) we write to the shadow
239 // values with tid/epoch related to the free and the freed bit set.
240 // During memory accesses processing the freed bit is considered
241 // as msb of tid. So any access races with shadow with freed bit set
242 // (it is as if write from a thread with which we never synchronized before).
243 // This allows us to detect accesses to freed memory w/o additional
244 // overheads in memory access processing and at the same time restore
245 // tid/epoch of free.
246 void MarkAsFreed() {
247 x_ |= kFreedBit;
250 bool IsFreed() const {
251 return x_ & kFreedBit;
254 bool GetFreedAndReset() {
255 bool res = x_ & kFreedBit;
256 x_ &= ~kFreedBit;
257 return res;
260 bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
261 bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
262 | (u64(kIsAtomic) << kAtomicShift));
263 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
264 return v;
267 bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
268 bool v = ((x_ >> kReadShift) & 3)
269 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
270 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
271 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
272 return v;
275 bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
276 bool v = ((x_ >> kReadShift) & 3)
277 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
278 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
279 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
280 return v;
283 private:
284 static const u64 kReadShift = 5 + kClkBits;
285 static const u64 kReadBit = 1ull << kReadShift;
286 static const u64 kAtomicShift = 6 + kClkBits;
287 static const u64 kAtomicBit = 1ull << kAtomicShift;
289 u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
291 static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
292 if (s1.addr0() == s2.addr0()) return true;
293 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
294 return true;
295 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
296 return true;
297 return false;
301 struct SignalContext;
303 struct JmpBuf {
304 uptr sp;
305 uptr mangled_sp;
306 int int_signal_send;
307 bool in_blocking_func;
308 uptr in_signal_handler;
309 uptr *shadow_stack_pos;
312 // This struct is stored in TLS.
313 struct ThreadState {
314 FastState fast_state;
315 // Synch epoch represents the threads's epoch before the last synchronization
316 // action. It allows to reduce number of shadow state updates.
317 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
318 // if we are processing write to X from the same thread at epoch=200,
319 // we do nothing, because both writes happen in the same 'synch epoch'.
320 // That is, if another memory access does not race with the former write,
321 // it does not race with the latter as well.
322 // QUESTION: can we can squeeze this into ThreadState::Fast?
323 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
324 // taken by epoch between synchs.
325 // This way we can save one load from tls.
326 u64 fast_synch_epoch;
327 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
328 // We do not distinguish beteween ignoring reads and writes
329 // for better performance.
330 int ignore_reads_and_writes;
331 int ignore_sync;
332 // Go does not support ignores.
333 #ifndef TSAN_GO
334 IgnoreSet mop_ignore_set;
335 IgnoreSet sync_ignore_set;
336 #endif
337 // C/C++ uses fixed size shadow stack embed into Trace.
338 // Go uses malloc-allocated shadow stack with dynamic size.
339 uptr *shadow_stack;
340 uptr *shadow_stack_end;
341 uptr *shadow_stack_pos;
342 u64 *racy_shadow_addr;
343 u64 racy_state[2];
344 MutexSet mset;
345 ThreadClock clock;
346 #ifndef TSAN_GO
347 AllocatorCache alloc_cache;
348 InternalAllocatorCache internal_alloc_cache;
349 Vector<JmpBuf> jmp_bufs;
350 int ignore_interceptors;
351 #endif
352 u64 stat[StatCnt];
353 const int tid;
354 const int unique_id;
355 bool in_symbolizer;
356 bool in_ignored_lib;
357 bool is_dead;
358 bool is_freeing;
359 bool is_vptr_access;
360 const uptr stk_addr;
361 const uptr stk_size;
362 const uptr tls_addr;
363 const uptr tls_size;
364 ThreadContext *tctx;
366 InternalDeadlockDetector internal_deadlock_detector;
367 DDPhysicalThread *dd_pt;
368 DDLogicalThread *dd_lt;
370 atomic_uintptr_t in_signal_handler;
371 SignalContext *signal_ctx;
373 DenseSlabAllocCache block_cache;
374 DenseSlabAllocCache sync_cache;
375 DenseSlabAllocCache clock_cache;
377 #ifndef TSAN_GO
378 u32 last_sleep_stack_id;
379 ThreadClock last_sleep_clock;
380 #endif
382 // Set in regions of runtime that must be signal-safe and fork-safe.
383 // If set, malloc must not be called.
384 int nomalloc;
386 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
387 unsigned reuse_count,
388 uptr stk_addr, uptr stk_size,
389 uptr tls_addr, uptr tls_size);
392 #ifndef TSAN_GO
393 __attribute__((tls_model("initial-exec")))
394 extern THREADLOCAL char cur_thread_placeholder[];
395 INLINE ThreadState *cur_thread() {
396 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
398 #endif
400 class ThreadContext : public ThreadContextBase {
401 public:
402 explicit ThreadContext(int tid);
403 ~ThreadContext();
404 ThreadState *thr;
405 u32 creation_stack_id;
406 SyncClock sync;
407 // Epoch at which the thread had started.
408 // If we see an event from the thread stamped by an older epoch,
409 // the event is from a dead thread that shared tid with this thread.
410 u64 epoch0;
411 u64 epoch1;
413 // Override superclass callbacks.
414 void OnDead();
415 void OnJoined(void *arg);
416 void OnFinished();
417 void OnStarted(void *arg);
418 void OnCreated(void *arg);
419 void OnReset();
420 void OnDetached(void *arg);
423 struct RacyStacks {
424 MD5Hash hash[2];
425 bool operator==(const RacyStacks &other) const {
426 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
427 return true;
428 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
429 return true;
430 return false;
434 struct RacyAddress {
435 uptr addr_min;
436 uptr addr_max;
439 struct FiredSuppression {
440 ReportType type;
441 uptr pc;
442 Suppression *supp;
445 struct Context {
446 Context();
448 bool initialized;
449 bool after_multithreaded_fork;
451 MetaMap metamap;
453 Mutex report_mtx;
454 int nreported;
455 int nmissed_expected;
456 atomic_uint64_t last_symbolize_time_ns;
458 void *background_thread;
459 atomic_uint32_t stop_background_thread;
461 ThreadRegistry *thread_registry;
463 Vector<RacyStacks> racy_stacks;
464 Vector<RacyAddress> racy_addresses;
465 // Number of fired suppressions may be large enough.
466 InternalMmapVector<FiredSuppression> fired_suppressions;
467 DDetector *dd;
469 ClockAlloc clock_alloc;
471 Flags flags;
473 u64 stat[StatCnt];
474 u64 int_alloc_cnt[MBlockTypeCount];
475 u64 int_alloc_siz[MBlockTypeCount];
478 extern Context *ctx; // The one and the only global runtime context.
480 struct ScopedIgnoreInterceptors {
481 ScopedIgnoreInterceptors() {
482 #ifndef TSAN_GO
483 cur_thread()->ignore_interceptors++;
484 #endif
487 ~ScopedIgnoreInterceptors() {
488 #ifndef TSAN_GO
489 cur_thread()->ignore_interceptors--;
490 #endif
494 class ScopedReport {
495 public:
496 explicit ScopedReport(ReportType typ);
497 ~ScopedReport();
499 void AddMemoryAccess(uptr addr, Shadow s, StackTrace stack,
500 const MutexSet *mset);
501 void AddStack(StackTrace stack, bool suppressable = false);
502 void AddThread(const ThreadContext *tctx, bool suppressable = false);
503 void AddThread(int unique_tid, bool suppressable = false);
504 void AddUniqueTid(int unique_tid);
505 void AddMutex(const SyncVar *s);
506 u64 AddMutex(u64 id);
507 void AddLocation(uptr addr, uptr size);
508 void AddSleep(u32 stack_id);
509 void SetCount(int count);
511 const ReportDesc *GetReport() const;
513 private:
514 ReportDesc *rep_;
515 // Symbolizer makes lots of intercepted calls. If we try to process them,
516 // at best it will cause deadlocks on internal mutexes.
517 ScopedIgnoreInterceptors ignore_interceptors_;
519 void AddDeadMutex(u64 id);
521 ScopedReport(const ScopedReport&);
522 void operator = (const ScopedReport&);
525 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
526 MutexSet *mset);
528 template<typename StackTraceTy>
529 void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) {
530 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
531 uptr start = 0;
532 if (size + !!toppc > kStackTraceMax) {
533 start = size + !!toppc - kStackTraceMax;
534 size = kStackTraceMax - !!toppc;
536 stack->Init(&thr->shadow_stack[start], size, toppc);
540 void StatAggregate(u64 *dst, u64 *src);
541 void StatOutput(u64 *stat);
542 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
543 if (kCollectStats)
544 thr->stat[typ] += n;
546 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
547 if (kCollectStats)
548 thr->stat[typ] = n;
551 void MapShadow(uptr addr, uptr size);
552 void MapThreadTrace(uptr addr, uptr size);
553 void DontNeedShadowFor(uptr addr, uptr size);
554 void InitializeShadowMemory();
555 void InitializeInterceptors();
556 void InitializeLibIgnore();
557 void InitializeDynamicAnnotations();
559 void ForkBefore(ThreadState *thr, uptr pc);
560 void ForkParentAfter(ThreadState *thr, uptr pc);
561 void ForkChildAfter(ThreadState *thr, uptr pc);
563 void ReportRace(ThreadState *thr);
564 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
565 bool IsFiredSuppression(Context *ctx, const ScopedReport &srep,
566 StackTrace trace);
567 bool IsExpectedReport(uptr addr, uptr size);
568 void PrintMatchedBenignRaces();
569 bool FrameIsInternal(const ReportStack *frame);
570 ReportStack *SkipTsanInternalFrames(ReportStack *ent);
572 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
573 # define DPrintf Printf
574 #else
575 # define DPrintf(...)
576 #endif
578 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
579 # define DPrintf2 Printf
580 #else
581 # define DPrintf2(...)
582 #endif
584 u32 CurrentStackId(ThreadState *thr, uptr pc);
585 ReportStack *SymbolizeStackId(u32 stack_id);
586 void PrintCurrentStack(ThreadState *thr, uptr pc);
587 void PrintCurrentStackSlow(uptr pc); // uses libunwind
589 void Initialize(ThreadState *thr);
590 int Finalize(ThreadState *thr);
592 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
593 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
595 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
596 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
597 void MemoryAccessImpl(ThreadState *thr, uptr addr,
598 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
599 u64 *shadow_mem, Shadow cur);
600 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
601 uptr size, bool is_write);
602 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
603 uptr size, uptr step, bool is_write);
604 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
605 int size, bool kAccessIsWrite, bool kIsAtomic);
607 const int kSizeLog1 = 0;
608 const int kSizeLog2 = 1;
609 const int kSizeLog4 = 2;
610 const int kSizeLog8 = 3;
612 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
613 uptr addr, int kAccessSizeLog) {
614 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
617 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
618 uptr addr, int kAccessSizeLog) {
619 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
622 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
623 uptr addr, int kAccessSizeLog) {
624 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
627 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
628 uptr addr, int kAccessSizeLog) {
629 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
632 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
633 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
634 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
636 void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
637 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
638 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
639 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
641 void FuncEntry(ThreadState *thr, uptr pc);
642 void FuncExit(ThreadState *thr);
644 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
645 void ThreadStart(ThreadState *thr, int tid, uptr os_id);
646 void ThreadFinish(ThreadState *thr);
647 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
648 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
649 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
650 void ThreadFinalize(ThreadState *thr);
651 void ThreadSetName(ThreadState *thr, const char *name);
652 int ThreadCount(ThreadState *thr);
653 void ProcessPendingSignals(ThreadState *thr);
655 void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
656 bool rw, bool recursive, bool linker_init);
657 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
658 void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec = 1,
659 bool try_lock = false);
660 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all = false);
661 void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
662 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
663 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
664 void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
666 void Acquire(ThreadState *thr, uptr pc, uptr addr);
667 void AcquireGlobal(ThreadState *thr, uptr pc);
668 void Release(ThreadState *thr, uptr pc, uptr addr);
669 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
670 void AfterSleep(ThreadState *thr, uptr pc);
671 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
672 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
673 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
674 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
676 // The hacky call uses custom calling convention and an assembly thunk.
677 // It is considerably faster that a normal call for the caller
678 // if it is not executed (it is intended for slow paths from hot functions).
679 // The trick is that the call preserves all registers and the compiler
680 // does not treat it as a call.
681 // If it does not work for you, use normal call.
682 #if TSAN_DEBUG == 0 && defined(__x86_64__)
683 // The caller may not create the stack frame for itself at all,
684 // so we create a reserve stack frame for it (1024b must be enough).
685 #define HACKY_CALL(f) \
686 __asm__ __volatile__("sub $1024, %%rsp;" \
687 CFI_INL_ADJUST_CFA_OFFSET(1024) \
688 ".hidden " #f "_thunk;" \
689 "call " #f "_thunk;" \
690 "add $1024, %%rsp;" \
691 CFI_INL_ADJUST_CFA_OFFSET(-1024) \
692 ::: "memory", "cc");
693 #else
694 #define HACKY_CALL(f) f()
695 #endif
697 void TraceSwitch(ThreadState *thr);
698 uptr TraceTopPC(ThreadState *thr);
699 uptr TraceSize();
700 uptr TraceParts();
701 Trace *ThreadTrace(int tid);
703 extern "C" void __tsan_trace_switch();
704 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
705 EventType typ, u64 addr) {
706 if (!kCollectHistory)
707 return;
708 DCHECK_GE((int)typ, 0);
709 DCHECK_LE((int)typ, 7);
710 DCHECK_EQ(GetLsb(addr, 61), addr);
711 StatInc(thr, StatEvents);
712 u64 pos = fs.GetTracePos();
713 if (UNLIKELY((pos % kTracePartSize) == 0)) {
714 #ifndef TSAN_GO
715 HACKY_CALL(__tsan_trace_switch);
716 #else
717 TraceSwitch(thr);
718 #endif
720 Event *trace = (Event*)GetThreadTrace(fs.tid());
721 Event *evp = &trace[pos];
722 Event ev = (u64)addr | ((u64)typ << 61);
723 *evp = ev;
726 } // namespace __tsan
728 #endif // TSAN_RTL_H