1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
12 // Main internal TSan header file.
15 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16 // function-scope locals)
17 // - All functions/classes/etc reside in namespace __tsan, except for those
18 // declared in tsan_interface.h.
19 // - Platform-specific files should be used instead of ifdefs (*).
20 // - No system headers included in header files (*).
21 // - Platform specific headres included only into platform-specific files (*).
23 // (*) Except when inlining is critical for performance.
24 //===----------------------------------------------------------------------===//
29 #include "sanitizer_common/sanitizer_allocator.h"
30 #include "sanitizer_common/sanitizer_allocator_internal.h"
31 #include "sanitizer_common/sanitizer_common.h"
32 #include "sanitizer_common/sanitizer_libignore.h"
33 #include "sanitizer_common/sanitizer_suppressions.h"
34 #include "sanitizer_common/sanitizer_thread_registry.h"
35 #include "tsan_clock.h"
36 #include "tsan_defs.h"
37 #include "tsan_flags.h"
38 #include "tsan_sync.h"
39 #include "tsan_trace.h"
40 #include "tsan_vector.h"
41 #include "tsan_report.h"
42 #include "tsan_platform.h"
43 #include "tsan_mutexset.h"
45 #if SANITIZER_WORDSIZE != 64
46 # error "ThreadSanitizer is supported only on 64-bit platforms"
51 // Descriptor of user's memory block.
54 u64 mtx : 1; // must be first
56 u64 stk : 31; // on word boundary
58 u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
62 void Init(uptr siz
, u32 tid
, u32 stk
) {
64 raw
[1] |= (u64
)siz
<< ((1 + 44 + 31 + kTidBits
) % 64);
65 raw
[1] |= (u64
)tid
<< ((1 + 44 + 31) % 64);
66 raw
[0] |= (u64
)stk
<< (1 + 44);
67 raw
[1] |= (u64
)stk
>> (64 - 44 - 1);
68 DCHECK_EQ(Size(), siz
);
69 DCHECK_EQ(Tid(), tid
);
70 DCHECK_EQ(StackId(), stk
);
74 return GetLsb(raw
[1] >> ((1 + 44 + 31) % 64), kTidBits
);
78 return raw
[1] >> ((1 + 31 + 44 + kTidBits
) % 64);
82 return (raw
[0] >> (1 + 44)) | GetLsb(raw
[1] << (64 - 44 - 1), 31);
85 SyncVar
*ListHead() const {
86 return (SyncVar
*)(GetLsb(raw
[0] >> 1, 44) << 3);
89 void ListPush(SyncVar
*v
) {
90 SyncVar
*lst
= ListHead();
92 u64 x
= (u64
)v
^ (u64
)lst
;
95 DCHECK_EQ(ListHead(), v
);
99 SyncVar
*lst
= ListHead();
100 SyncVar
*nxt
= lst
->next
;
102 u64 x
= (u64
)lst
^ (u64
)nxt
;
105 DCHECK_EQ(ListHead(), nxt
);
110 SyncVar
*lst
= ListHead();
114 DCHECK_EQ(ListHead(), 0);
119 typedef GenericScopedLock
<MBlock
> ScopedLock
;
123 #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
124 const uptr kAllocatorSpace
= 0x7d0000000000ULL
;
126 const uptr kAllocatorSpace
= 0x7d0000000000ULL
;
128 const uptr kAllocatorSize
= 0x10000000000ULL
; // 1T.
130 struct MapUnmapCallback
;
131 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, sizeof(MBlock
),
132 DefaultSizeClassMap
, MapUnmapCallback
> PrimaryAllocator
;
133 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
134 typedef LargeMmapAllocator
<MapUnmapCallback
> SecondaryAllocator
;
135 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
136 SecondaryAllocator
> Allocator
;
137 Allocator
*allocator();
140 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
143 const u64 kShadowRodata
= (u64
)-1; // .rodata shadow marker
145 // FastState (from most significant bit):
153 FastState(u64 tid
, u64 epoch
) {
154 x_
= tid
<< kTidShift
;
155 x_
|= epoch
<< kClkShift
;
156 DCHECK_EQ(tid
, this->tid());
157 DCHECK_EQ(epoch
, this->epoch());
158 DCHECK_EQ(GetIgnoreBit(), false);
161 explicit FastState(u64 x
)
170 u64 res
= (x_
& ~kIgnoreBit
) >> kTidShift
;
174 u64
TidWithIgnore() const {
175 u64 res
= x_
>> kTidShift
;
180 u64 res
= (x_
<< (kTidBits
+ 1)) >> (64 - kClkBits
);
184 void IncrementEpoch() {
185 u64 old_epoch
= epoch();
186 x_
+= 1 << kClkShift
;
187 DCHECK_EQ(old_epoch
+ 1, epoch());
191 void SetIgnoreBit() { x_
|= kIgnoreBit
; }
192 void ClearIgnoreBit() { x_
&= ~kIgnoreBit
; }
193 bool GetIgnoreBit() const { return (s64
)x_
< 0; }
195 void SetHistorySize(int hs
) {
201 int GetHistorySize() const {
202 return (int)(x_
& 7);
205 void ClearHistorySize() {
209 u64
GetTracePos() const {
210 const int hs
= GetHistorySize();
211 // When hs == 0, the trace consists of 2 parts.
212 const u64 mask
= (1ull << (kTracePartSizeBits
+ hs
+ 1)) - 1;
213 return epoch() & mask
;
218 static const int kTidShift
= 64 - kTidBits
- 1;
219 static const int kClkShift
= kTidShift
- kClkBits
;
220 static const u64 kIgnoreBit
= 1ull << 63;
221 static const u64 kFreedBit
= 1ull << 63;
225 // Shadow (from most significant bit):
233 class Shadow
: public FastState
{
235 explicit Shadow(u64 x
)
239 explicit Shadow(const FastState
&s
)
244 void SetAddr0AndSizeLog(u64 addr0
, unsigned kAccessSizeLog
) {
245 DCHECK_EQ(x_
& 31, 0);
247 DCHECK_LE(kAccessSizeLog
, 3);
248 x_
|= (kAccessSizeLog
<< 3) | addr0
;
249 DCHECK_EQ(kAccessSizeLog
, size_log());
250 DCHECK_EQ(addr0
, this->addr0());
253 void SetWrite(unsigned kAccessIsWrite
) {
254 DCHECK_EQ(x_
& kReadBit
, 0);
257 DCHECK_EQ(kAccessIsWrite
, IsWrite());
260 void SetAtomic(bool kIsAtomic
) {
264 DCHECK_EQ(IsAtomic(), kIsAtomic
);
267 bool IsAtomic() const {
268 return x_
& kAtomicBit
;
271 bool IsZero() const {
275 static inline bool TidsAreEqual(const Shadow s1
, const Shadow s2
) {
276 u64 shifted_xor
= (s1
.x_
^ s2
.x_
) >> kTidShift
;
277 DCHECK_EQ(shifted_xor
== 0, s1
.TidWithIgnore() == s2
.TidWithIgnore());
278 return shifted_xor
== 0;
281 static inline bool Addr0AndSizeAreEqual(const Shadow s1
, const Shadow s2
) {
282 u64 masked_xor
= (s1
.x_
^ s2
.x_
) & 31;
283 return masked_xor
== 0;
286 static inline bool TwoRangesIntersect(Shadow s1
, Shadow s2
,
287 unsigned kS2AccessSize
) {
289 u64 diff
= s1
.addr0() - s2
.addr0();
290 if ((s64
)diff
< 0) { // s1.addr0 < s2.addr0 // NOLINT
291 // if (s1.addr0() + size1) > s2.addr0()) return true;
292 if (s1
.size() > -diff
) res
= true;
294 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
295 if (kS2AccessSize
> diff
) res
= true;
297 DCHECK_EQ(res
, TwoRangesIntersectSLOW(s1
, s2
));
298 DCHECK_EQ(res
, TwoRangesIntersectSLOW(s2
, s1
));
302 // The idea behind the offset is as follows.
303 // Consider that we have 8 bool's contained within a single 8-byte block
304 // (mapped to a single shadow "cell"). Now consider that we write to the bools
305 // from a single thread (which we consider the common case).
306 // W/o offsetting each access will have to scan 4 shadow values at average
307 // to find the corresponding shadow value for the bool.
308 // With offsetting we start scanning shadow with the offset so that
309 // each access hits necessary shadow straight off (at least in an expected
311 // This logic works seamlessly for any layout of user data. For example,
312 // if user data is {int, short, char, char}, then accesses to the int are
313 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
314 // from a single thread won't need to scan all 8 shadow values.
315 unsigned ComputeSearchOffset() {
318 u64
addr0() const { return x_
& 7; }
319 u64
size() const { return 1ull << size_log(); }
320 bool IsWrite() const { return !IsRead(); }
321 bool IsRead() const { return x_
& kReadBit
; }
323 // The idea behind the freed bit is as follows.
324 // When the memory is freed (or otherwise unaccessible) we write to the shadow
325 // values with tid/epoch related to the free and the freed bit set.
326 // During memory accesses processing the freed bit is considered
327 // as msb of tid. So any access races with shadow with freed bit set
328 // (it is as if write from a thread with which we never synchronized before).
329 // This allows us to detect accesses to freed memory w/o additional
330 // overheads in memory access processing and at the same time restore
331 // tid/epoch of free.
336 bool IsFreed() const {
337 return x_
& kFreedBit
;
340 bool GetFreedAndReset() {
341 bool res
= x_
& kFreedBit
;
346 bool IsBothReadsOrAtomic(bool kIsWrite
, bool kIsAtomic
) const {
347 // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
348 bool v
= x_
& u64(((kIsWrite
^ 1) << kReadShift
)
349 | (kIsAtomic
<< kAtomicShift
));
350 DCHECK_EQ(v
, (!IsWrite() && !kIsWrite
) || (IsAtomic() && kIsAtomic
));
354 bool IsRWNotWeaker(bool kIsWrite
, bool kIsAtomic
) const {
355 bool v
= ((x_
>> kReadShift
) & 3)
356 <= u64((kIsWrite
^ 1) | (kIsAtomic
<< 1));
357 DCHECK_EQ(v
, (IsAtomic() < kIsAtomic
) ||
358 (IsAtomic() == kIsAtomic
&& !IsWrite() <= !kIsWrite
));
362 bool IsRWWeakerOrEqual(bool kIsWrite
, bool kIsAtomic
) const {
363 bool v
= ((x_
>> kReadShift
) & 3)
364 >= u64((kIsWrite
^ 1) | (kIsAtomic
<< 1));
365 DCHECK_EQ(v
, (IsAtomic() > kIsAtomic
) ||
366 (IsAtomic() == kIsAtomic
&& !IsWrite() >= !kIsWrite
));
371 static const u64 kReadShift
= 5;
372 static const u64 kReadBit
= 1ull << kReadShift
;
373 static const u64 kAtomicShift
= 6;
374 static const u64 kAtomicBit
= 1ull << kAtomicShift
;
376 u64
size_log() const { return (x_
>> 3) & 3; }
378 static bool TwoRangesIntersectSLOW(const Shadow s1
, const Shadow s2
) {
379 if (s1
.addr0() == s2
.addr0()) return true;
380 if (s1
.addr0() < s2
.addr0() && s1
.addr0() + s1
.size() > s2
.addr0())
382 if (s2
.addr0() < s1
.addr0() && s2
.addr0() + s2
.size() > s1
.addr0())
388 struct SignalContext
;
393 uptr
*shadow_stack_pos
;
396 // This struct is stored in TLS.
398 FastState fast_state
;
399 // Synch epoch represents the threads's epoch before the last synchronization
400 // action. It allows to reduce number of shadow state updates.
401 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
402 // if we are processing write to X from the same thread at epoch=200,
403 // we do nothing, because both writes happen in the same 'synch epoch'.
404 // That is, if another memory access does not race with the former write,
405 // it does not race with the latter as well.
406 // QUESTION: can we can squeeze this into ThreadState::Fast?
407 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
408 // taken by epoch between synchs.
409 // This way we can save one load from tls.
410 u64 fast_synch_epoch
;
411 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
412 // We do not distinguish beteween ignoring reads and writes
413 // for better performance.
414 int ignore_reads_and_writes
;
416 // C/C++ uses fixed size shadow stack embed into Trace.
417 // Go uses malloc-allocated shadow stack with dynamic size.
419 uptr
*shadow_stack_end
;
420 uptr
*shadow_stack_pos
;
421 u64
*racy_shadow_addr
;
426 AllocatorCache alloc_cache
;
427 InternalAllocatorCache internal_alloc_cache
;
428 Vector
<JmpBuf
> jmp_bufs
;
444 DeadlockDetector deadlock_detector
;
446 bool in_signal_handler
;
447 SignalContext
*signal_ctx
;
450 u32 last_sleep_stack_id
;
451 ThreadClock last_sleep_clock
;
454 // Set in regions of runtime that must be signal-safe and fork-safe.
455 // If set, malloc must not be called.
458 explicit ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
459 uptr stk_addr
, uptr stk_size
,
460 uptr tls_addr
, uptr tls_size
);
466 extern THREADLOCAL
char cur_thread_placeholder
[];
467 INLINE ThreadState
*cur_thread() {
468 return reinterpret_cast<ThreadState
*>(&cur_thread_placeholder
);
472 class ThreadContext
: public ThreadContextBase
{
474 explicit ThreadContext(int tid
);
478 StackTrace creation_stack
;
480 u32 creation_stack_id
;
483 // Epoch at which the thread had started.
484 // If we see an event from the thread stamped by an older epoch,
485 // the event is from a dead thread that shared tid with this thread.
489 // Override superclass callbacks.
491 void OnJoined(void *arg
);
493 void OnStarted(void *arg
);
494 void OnCreated(void *arg
);
500 bool operator==(const RacyStacks
&other
) const {
501 if (hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1])
503 if (hash
[0] == other
.hash
[1] && hash
[1] == other
.hash
[0])
514 struct FiredSuppression
{
529 int nmissed_expected
;
530 atomic_uint64_t last_symbolize_time_ns
;
532 ThreadRegistry
*thread_registry
;
534 Vector
<RacyStacks
> racy_stacks
;
535 Vector
<RacyAddress
> racy_addresses
;
536 // Number of fired suppressions may be large enough.
537 InternalMmapVector
<FiredSuppression
> fired_suppressions
;
542 u64 int_alloc_cnt
[MBlockTypeCount
];
543 u64 int_alloc_siz
[MBlockTypeCount
];
558 explicit ScopedReport(ReportType typ
);
561 void AddStack(const StackTrace
*stack
);
562 void AddMemoryAccess(uptr addr
, Shadow s
, const StackTrace
*stack
,
563 const MutexSet
*mset
);
564 void AddThread(const ThreadContext
*tctx
);
565 void AddMutex(const SyncVar
*s
);
566 void AddLocation(uptr addr
, uptr size
);
567 void AddSleep(u32 stack_id
);
568 void SetCount(int count
);
570 const ReportDesc
*GetReport() const;
576 void AddMutex(u64 id
);
578 ScopedReport(const ScopedReport
&);
579 void operator = (const ScopedReport
&);
582 void RestoreStack(int tid
, const u64 epoch
, StackTrace
*stk
, MutexSet
*mset
);
584 void StatAggregate(u64
*dst
, u64
*src
);
585 void StatOutput(u64
*stat
);
586 void ALWAYS_INLINE
StatInc(ThreadState
*thr
, StatType typ
, u64 n
= 1) {
590 void ALWAYS_INLINE
StatSet(ThreadState
*thr
, StatType typ
, u64 n
) {
595 void MapShadow(uptr addr
, uptr size
);
596 void MapThreadTrace(uptr addr
, uptr size
);
597 void DontNeedShadowFor(uptr addr
, uptr size
);
598 void InitializeShadowMemory();
599 void InitializeInterceptors();
600 void InitializeLibIgnore();
601 void InitializeDynamicAnnotations();
603 void ReportRace(ThreadState
*thr
);
604 bool OutputReport(Context
*ctx
,
605 const ScopedReport
&srep
,
606 const ReportStack
*suppress_stack1
= 0,
607 const ReportStack
*suppress_stack2
= 0,
608 const ReportLocation
*suppress_loc
= 0);
609 bool IsFiredSuppression(Context
*ctx
,
610 const ScopedReport
&srep
,
611 const StackTrace
&trace
);
612 bool IsExpectedReport(uptr addr
, uptr size
);
613 void PrintMatchedBenignRaces();
614 bool FrameIsInternal(const ReportStack
*frame
);
615 ReportStack
*SkipTsanInternalFrames(ReportStack
*ent
);
617 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
618 # define DPrintf Printf
620 # define DPrintf(...)
623 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
624 # define DPrintf2 Printf
626 # define DPrintf2(...)
629 u32
CurrentStackId(ThreadState
*thr
, uptr pc
);
630 void PrintCurrentStack(ThreadState
*thr
, uptr pc
);
631 void PrintCurrentStackSlow(); // uses libunwind
633 void Initialize(ThreadState
*thr
);
634 int Finalize(ThreadState
*thr
);
636 SyncVar
* GetJavaSync(ThreadState
*thr
, uptr pc
, uptr addr
,
637 bool write_lock
, bool create
);
638 SyncVar
* GetAndRemoveJavaSync(ThreadState
*thr
, uptr pc
, uptr addr
);
640 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
641 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
);
642 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
643 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
644 u64
*shadow_mem
, Shadow cur
);
645 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
646 uptr size
, bool is_write
);
647 void MemoryAccessRangeStep(ThreadState
*thr
, uptr pc
, uptr addr
,
648 uptr size
, uptr step
, bool is_write
);
649 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
650 int size
, bool kAccessIsWrite
, bool kIsAtomic
);
652 const int kSizeLog1
= 0;
653 const int kSizeLog2
= 1;
654 const int kSizeLog4
= 2;
655 const int kSizeLog8
= 3;
657 void ALWAYS_INLINE
MemoryRead(ThreadState
*thr
, uptr pc
,
658 uptr addr
, int kAccessSizeLog
) {
659 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, false, false);
662 void ALWAYS_INLINE
MemoryWrite(ThreadState
*thr
, uptr pc
,
663 uptr addr
, int kAccessSizeLog
) {
664 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, true, false);
667 void ALWAYS_INLINE
MemoryReadAtomic(ThreadState
*thr
, uptr pc
,
668 uptr addr
, int kAccessSizeLog
) {
669 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, false, true);
672 void ALWAYS_INLINE
MemoryWriteAtomic(ThreadState
*thr
, uptr pc
,
673 uptr addr
, int kAccessSizeLog
) {
674 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, true, true);
677 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
678 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
679 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
681 void ThreadIgnoreBegin(ThreadState
*thr
);
682 void ThreadIgnoreEnd(ThreadState
*thr
);
683 void ThreadIgnoreSyncBegin(ThreadState
*thr
);
684 void ThreadIgnoreSyncEnd(ThreadState
*thr
);
686 void FuncEntry(ThreadState
*thr
, uptr pc
);
687 void FuncExit(ThreadState
*thr
);
689 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
);
690 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
);
691 void ThreadFinish(ThreadState
*thr
);
692 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
);
693 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
);
694 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
);
695 void ThreadFinalize(ThreadState
*thr
);
696 void ThreadSetName(ThreadState
*thr
, const char *name
);
697 int ThreadCount(ThreadState
*thr
);
698 void ProcessPendingSignals(ThreadState
*thr
);
700 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
701 bool rw
, bool recursive
, bool linker_init
);
702 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
);
703 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
= 1);
704 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
= false);
705 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
);
706 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
);
707 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
);
709 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
);
710 void AcquireGlobal(ThreadState
*thr
, uptr pc
);
711 void Release(ThreadState
*thr
, uptr pc
, uptr addr
);
712 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
);
713 void AfterSleep(ThreadState
*thr
, uptr pc
);
714 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
715 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
716 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
717 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
719 // The hacky call uses custom calling convention and an assembly thunk.
720 // It is considerably faster that a normal call for the caller
721 // if it is not executed (it is intended for slow paths from hot functions).
722 // The trick is that the call preserves all registers and the compiler
723 // does not treat it as a call.
724 // If it does not work for you, use normal call.
726 // The caller may not create the stack frame for itself at all,
727 // so we create a reserve stack frame for it (1024b must be enough).
728 #define HACKY_CALL(f) \
729 __asm__ __volatile__("sub $1024, %%rsp;" \
730 ".cfi_adjust_cfa_offset 1024;" \
731 ".hidden " #f "_thunk;" \
732 "call " #f "_thunk;" \
733 "add $1024, %%rsp;" \
734 ".cfi_adjust_cfa_offset -1024;" \
737 #define HACKY_CALL(f) f()
740 void TraceSwitch(ThreadState
*thr
);
741 uptr
TraceTopPC(ThreadState
*thr
);
744 Trace
*ThreadTrace(int tid
);
746 extern "C" void __tsan_trace_switch();
747 void ALWAYS_INLINE
TraceAddEvent(ThreadState
*thr
, FastState fs
,
748 EventType typ
, u64 addr
) {
749 DCHECK_GE((int)typ
, 0);
750 DCHECK_LE((int)typ
, 7);
751 DCHECK_EQ(GetLsb(addr
, 61), addr
);
752 StatInc(thr
, StatEvents
);
753 u64 pos
= fs
.GetTracePos();
754 if (UNLIKELY((pos
% kTracePartSize
) == 0)) {
756 HACKY_CALL(__tsan_trace_switch
);
761 Event
*trace
= (Event
*)GetThreadTrace(fs
.tid());
762 Event
*evp
= &trace
[pos
];
763 Event ev
= (u64
)addr
| ((u64
)typ
<< 61);
767 } // namespace __tsan