1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main internal TSan header file.
13 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
14 // function-scope locals)
15 // - All functions/classes/etc reside in namespace __tsan, except for those
16 // declared in tsan_interface.h.
17 // - Platform-specific files should be used instead of ifdefs (*).
18 // - No system headers included in header files (*).
19 // - Platform specific headres included only into platform-specific files (*).
21 // (*) Except when inlining is critical for performance.
22 //===----------------------------------------------------------------------===//
27 #include "sanitizer_common/sanitizer_allocator.h"
28 #include "sanitizer_common/sanitizer_allocator_internal.h"
29 #include "sanitizer_common/sanitizer_asm.h"
30 #include "sanitizer_common/sanitizer_common.h"
31 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
32 #include "sanitizer_common/sanitizer_libignore.h"
33 #include "sanitizer_common/sanitizer_suppressions.h"
34 #include "sanitizer_common/sanitizer_thread_registry.h"
35 #include "tsan_clock.h"
36 #include "tsan_defs.h"
37 #include "tsan_flags.h"
38 #include "tsan_sync.h"
39 #include "tsan_trace.h"
40 #include "tsan_vector.h"
41 #include "tsan_report.h"
42 #include "tsan_platform.h"
43 #include "tsan_mutexset.h"
44 #include "tsan_ignoreset.h"
45 #include "tsan_stack_trace.h"
47 #if SANITIZER_WORDSIZE != 64
48 # error "ThreadSanitizer is supported only on 64-bit platforms"
54 struct MapUnmapCallback
;
55 typedef SizeClassAllocator64
<kHeapMemBeg
, kHeapMemEnd
- kHeapMemBeg
, 0,
56 DefaultSizeClassMap
, MapUnmapCallback
> PrimaryAllocator
;
57 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
58 typedef LargeMmapAllocator
<MapUnmapCallback
> SecondaryAllocator
;
59 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
60 SecondaryAllocator
> Allocator
;
61 Allocator
*allocator();
64 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
67 const u64 kShadowRodata
= (u64
)-1; // .rodata shadow marker
69 // FastState (from most significant bit):
77 FastState(u64 tid
, u64 epoch
) {
78 x_
= tid
<< kTidShift
;
80 DCHECK_EQ(tid
, this->tid());
81 DCHECK_EQ(epoch
, this->epoch());
82 DCHECK_EQ(GetIgnoreBit(), false);
85 explicit FastState(u64 x
)
94 u64 res
= (x_
& ~kIgnoreBit
) >> kTidShift
;
98 u64
TidWithIgnore() const {
99 u64 res
= x_
>> kTidShift
;
104 u64 res
= x_
& ((1ull << kClkBits
) - 1);
108 void IncrementEpoch() {
109 u64 old_epoch
= epoch();
111 DCHECK_EQ(old_epoch
+ 1, epoch());
115 void SetIgnoreBit() { x_
|= kIgnoreBit
; }
116 void ClearIgnoreBit() { x_
&= ~kIgnoreBit
; }
117 bool GetIgnoreBit() const { return (s64
)x_
< 0; }
119 void SetHistorySize(int hs
) {
122 x_
= (x_
& ~(kHistoryMask
<< kHistoryShift
)) | (u64(hs
) << kHistoryShift
);
126 int GetHistorySize() const {
127 return (int)((x_
>> kHistoryShift
) & kHistoryMask
);
130 void ClearHistorySize() {
135 u64
GetTracePos() const {
136 const int hs
= GetHistorySize();
137 // When hs == 0, the trace consists of 2 parts.
138 const u64 mask
= (1ull << (kTracePartSizeBits
+ hs
+ 1)) - 1;
139 return epoch() & mask
;
144 static const int kTidShift
= 64 - kTidBits
- 1;
145 static const u64 kIgnoreBit
= 1ull << 63;
146 static const u64 kFreedBit
= 1ull << 63;
147 static const u64 kHistoryShift
= kClkBits
;
148 static const u64 kHistoryMask
= 7;
152 // Shadow (from most significant bit):
160 class Shadow
: public FastState
{
162 explicit Shadow(u64 x
)
166 explicit Shadow(const FastState
&s
)
171 void SetAddr0AndSizeLog(u64 addr0
, unsigned kAccessSizeLog
) {
172 DCHECK_EQ((x_
>> kClkBits
) & 31, 0);
174 DCHECK_LE(kAccessSizeLog
, 3);
175 x_
|= ((kAccessSizeLog
<< 3) | addr0
) << kClkBits
;
176 DCHECK_EQ(kAccessSizeLog
, size_log());
177 DCHECK_EQ(addr0
, this->addr0());
180 void SetWrite(unsigned kAccessIsWrite
) {
181 DCHECK_EQ(x_
& kReadBit
, 0);
184 DCHECK_EQ(kAccessIsWrite
, IsWrite());
187 void SetAtomic(bool kIsAtomic
) {
191 DCHECK_EQ(IsAtomic(), kIsAtomic
);
194 bool IsAtomic() const {
195 return x_
& kAtomicBit
;
198 bool IsZero() const {
202 static inline bool TidsAreEqual(const Shadow s1
, const Shadow s2
) {
203 u64 shifted_xor
= (s1
.x_
^ s2
.x_
) >> kTidShift
;
204 DCHECK_EQ(shifted_xor
== 0, s1
.TidWithIgnore() == s2
.TidWithIgnore());
205 return shifted_xor
== 0;
209 bool Addr0AndSizeAreEqual(const Shadow s1
, const Shadow s2
) {
210 u64 masked_xor
= ((s1
.x_
^ s2
.x_
) >> kClkBits
) & 31;
211 return masked_xor
== 0;
214 static ALWAYS_INLINE
bool TwoRangesIntersect(Shadow s1
, Shadow s2
,
215 unsigned kS2AccessSize
) {
217 u64 diff
= s1
.addr0() - s2
.addr0();
218 if ((s64
)diff
< 0) { // s1.addr0 < s2.addr0 // NOLINT
219 // if (s1.addr0() + size1) > s2.addr0()) return true;
220 if (s1
.size() > -diff
)
223 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
224 if (kS2AccessSize
> diff
)
227 DCHECK_EQ(res
, TwoRangesIntersectSlow(s1
, s2
));
228 DCHECK_EQ(res
, TwoRangesIntersectSlow(s2
, s1
));
232 u64 ALWAYS_INLINE
addr0() const { return (x_
>> kClkBits
) & 7; }
233 u64 ALWAYS_INLINE
size() const { return 1ull << size_log(); }
234 bool ALWAYS_INLINE
IsWrite() const { return !IsRead(); }
235 bool ALWAYS_INLINE
IsRead() const { return x_
& kReadBit
; }
237 // The idea behind the freed bit is as follows.
238 // When the memory is freed (or otherwise unaccessible) we write to the shadow
239 // values with tid/epoch related to the free and the freed bit set.
240 // During memory accesses processing the freed bit is considered
241 // as msb of tid. So any access races with shadow with freed bit set
242 // (it is as if write from a thread with which we never synchronized before).
243 // This allows us to detect accesses to freed memory w/o additional
244 // overheads in memory access processing and at the same time restore
245 // tid/epoch of free.
250 bool IsFreed() const {
251 return x_
& kFreedBit
;
254 bool GetFreedAndReset() {
255 bool res
= x_
& kFreedBit
;
260 bool ALWAYS_INLINE
IsBothReadsOrAtomic(bool kIsWrite
, bool kIsAtomic
) const {
261 bool v
= x_
& ((u64(kIsWrite
^ 1) << kReadShift
)
262 | (u64(kIsAtomic
) << kAtomicShift
));
263 DCHECK_EQ(v
, (!IsWrite() && !kIsWrite
) || (IsAtomic() && kIsAtomic
));
267 bool ALWAYS_INLINE
IsRWNotWeaker(bool kIsWrite
, bool kIsAtomic
) const {
268 bool v
= ((x_
>> kReadShift
) & 3)
269 <= u64((kIsWrite
^ 1) | (kIsAtomic
<< 1));
270 DCHECK_EQ(v
, (IsAtomic() < kIsAtomic
) ||
271 (IsAtomic() == kIsAtomic
&& !IsWrite() <= !kIsWrite
));
275 bool ALWAYS_INLINE
IsRWWeakerOrEqual(bool kIsWrite
, bool kIsAtomic
) const {
276 bool v
= ((x_
>> kReadShift
) & 3)
277 >= u64((kIsWrite
^ 1) | (kIsAtomic
<< 1));
278 DCHECK_EQ(v
, (IsAtomic() > kIsAtomic
) ||
279 (IsAtomic() == kIsAtomic
&& !IsWrite() >= !kIsWrite
));
284 static const u64 kReadShift
= 5 + kClkBits
;
285 static const u64 kReadBit
= 1ull << kReadShift
;
286 static const u64 kAtomicShift
= 6 + kClkBits
;
287 static const u64 kAtomicBit
= 1ull << kAtomicShift
;
289 u64
size_log() const { return (x_
>> (3 + kClkBits
)) & 3; }
291 static bool TwoRangesIntersectSlow(const Shadow s1
, const Shadow s2
) {
292 if (s1
.addr0() == s2
.addr0()) return true;
293 if (s1
.addr0() < s2
.addr0() && s1
.addr0() + s1
.size() > s2
.addr0())
295 if (s2
.addr0() < s1
.addr0() && s2
.addr0() + s2
.size() > s1
.addr0())
301 struct SignalContext
;
307 bool in_blocking_func
;
308 uptr in_signal_handler
;
309 uptr
*shadow_stack_pos
;
312 // This struct is stored in TLS.
314 FastState fast_state
;
315 // Synch epoch represents the threads's epoch before the last synchronization
316 // action. It allows to reduce number of shadow state updates.
317 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
318 // if we are processing write to X from the same thread at epoch=200,
319 // we do nothing, because both writes happen in the same 'synch epoch'.
320 // That is, if another memory access does not race with the former write,
321 // it does not race with the latter as well.
322 // QUESTION: can we can squeeze this into ThreadState::Fast?
323 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
324 // taken by epoch between synchs.
325 // This way we can save one load from tls.
326 u64 fast_synch_epoch
;
327 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
328 // We do not distinguish beteween ignoring reads and writes
329 // for better performance.
330 int ignore_reads_and_writes
;
332 // Go does not support ignores.
334 IgnoreSet mop_ignore_set
;
335 IgnoreSet sync_ignore_set
;
337 // C/C++ uses fixed size shadow stack embed into Trace.
338 // Go uses malloc-allocated shadow stack with dynamic size.
340 uptr
*shadow_stack_end
;
341 uptr
*shadow_stack_pos
;
342 u64
*racy_shadow_addr
;
347 AllocatorCache alloc_cache
;
348 InternalAllocatorCache internal_alloc_cache
;
349 Vector
<JmpBuf
> jmp_bufs
;
350 int ignore_interceptors
;
366 InternalDeadlockDetector internal_deadlock_detector
;
367 DDPhysicalThread
*dd_pt
;
368 DDLogicalThread
*dd_lt
;
370 atomic_uintptr_t in_signal_handler
;
371 SignalContext
*signal_ctx
;
373 DenseSlabAllocCache block_cache
;
374 DenseSlabAllocCache sync_cache
;
375 DenseSlabAllocCache clock_cache
;
378 u32 last_sleep_stack_id
;
379 ThreadClock last_sleep_clock
;
382 // Set in regions of runtime that must be signal-safe and fork-safe.
383 // If set, malloc must not be called.
386 explicit ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
387 unsigned reuse_count
,
388 uptr stk_addr
, uptr stk_size
,
389 uptr tls_addr
, uptr tls_size
);
393 __attribute__((tls_model("initial-exec")))
394 extern THREADLOCAL
char cur_thread_placeholder
[];
395 INLINE ThreadState
*cur_thread() {
396 return reinterpret_cast<ThreadState
*>(&cur_thread_placeholder
);
400 class ThreadContext
: public ThreadContextBase
{
402 explicit ThreadContext(int tid
);
405 u32 creation_stack_id
;
407 // Epoch at which the thread had started.
408 // If we see an event from the thread stamped by an older epoch,
409 // the event is from a dead thread that shared tid with this thread.
413 // Override superclass callbacks.
415 void OnJoined(void *arg
);
417 void OnStarted(void *arg
);
418 void OnCreated(void *arg
);
420 void OnDetached(void *arg
);
425 bool operator==(const RacyStacks
&other
) const {
426 if (hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1])
428 if (hash
[0] == other
.hash
[1] && hash
[1] == other
.hash
[0])
439 struct FiredSuppression
{
449 bool after_multithreaded_fork
;
455 int nmissed_expected
;
456 atomic_uint64_t last_symbolize_time_ns
;
458 void *background_thread
;
459 atomic_uint32_t stop_background_thread
;
461 ThreadRegistry
*thread_registry
;
463 Vector
<RacyStacks
> racy_stacks
;
464 Vector
<RacyAddress
> racy_addresses
;
465 // Number of fired suppressions may be large enough.
466 InternalMmapVector
<FiredSuppression
> fired_suppressions
;
469 ClockAlloc clock_alloc
;
474 u64 int_alloc_cnt
[MBlockTypeCount
];
475 u64 int_alloc_siz
[MBlockTypeCount
];
478 extern Context
*ctx
; // The one and the only global runtime context.
480 struct ScopedIgnoreInterceptors
{
481 ScopedIgnoreInterceptors() {
483 cur_thread()->ignore_interceptors
++;
487 ~ScopedIgnoreInterceptors() {
489 cur_thread()->ignore_interceptors
--;
496 explicit ScopedReport(ReportType typ
);
499 void AddMemoryAccess(uptr addr
, Shadow s
, StackTrace stack
,
500 const MutexSet
*mset
);
501 void AddStack(StackTrace stack
, bool suppressable
= false);
502 void AddThread(const ThreadContext
*tctx
, bool suppressable
= false);
503 void AddThread(int unique_tid
, bool suppressable
= false);
504 void AddUniqueTid(int unique_tid
);
505 void AddMutex(const SyncVar
*s
);
506 u64
AddMutex(u64 id
);
507 void AddLocation(uptr addr
, uptr size
);
508 void AddSleep(u32 stack_id
);
509 void SetCount(int count
);
511 const ReportDesc
*GetReport() const;
515 // Symbolizer makes lots of intercepted calls. If we try to process them,
516 // at best it will cause deadlocks on internal mutexes.
517 ScopedIgnoreInterceptors ignore_interceptors_
;
519 void AddDeadMutex(u64 id
);
521 ScopedReport(const ScopedReport
&);
522 void operator = (const ScopedReport
&);
525 void RestoreStack(int tid
, const u64 epoch
, VarSizeStackTrace
*stk
,
528 template<typename StackTraceTy
>
529 void ObtainCurrentStack(ThreadState
*thr
, uptr toppc
, StackTraceTy
*stack
) {
530 uptr size
= thr
->shadow_stack_pos
- thr
->shadow_stack
;
532 if (size
+ !!toppc
> kStackTraceMax
) {
533 start
= size
+ !!toppc
- kStackTraceMax
;
534 size
= kStackTraceMax
- !!toppc
;
536 stack
->Init(&thr
->shadow_stack
[start
], size
, toppc
);
540 void StatAggregate(u64
*dst
, u64
*src
);
541 void StatOutput(u64
*stat
);
542 void ALWAYS_INLINE
StatInc(ThreadState
*thr
, StatType typ
, u64 n
= 1) {
546 void ALWAYS_INLINE
StatSet(ThreadState
*thr
, StatType typ
, u64 n
) {
551 void MapShadow(uptr addr
, uptr size
);
552 void MapThreadTrace(uptr addr
, uptr size
);
553 void DontNeedShadowFor(uptr addr
, uptr size
);
554 void InitializeShadowMemory();
555 void InitializeInterceptors();
556 void InitializeLibIgnore();
557 void InitializeDynamicAnnotations();
559 void ForkBefore(ThreadState
*thr
, uptr pc
);
560 void ForkParentAfter(ThreadState
*thr
, uptr pc
);
561 void ForkChildAfter(ThreadState
*thr
, uptr pc
);
563 void ReportRace(ThreadState
*thr
);
564 bool OutputReport(ThreadState
*thr
, const ScopedReport
&srep
);
565 bool IsFiredSuppression(Context
*ctx
, const ScopedReport
&srep
,
567 bool IsExpectedReport(uptr addr
, uptr size
);
568 void PrintMatchedBenignRaces();
569 bool FrameIsInternal(const ReportStack
*frame
);
570 ReportStack
*SkipTsanInternalFrames(ReportStack
*ent
);
572 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
573 # define DPrintf Printf
575 # define DPrintf(...)
578 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
579 # define DPrintf2 Printf
581 # define DPrintf2(...)
584 u32
CurrentStackId(ThreadState
*thr
, uptr pc
);
585 ReportStack
*SymbolizeStackId(u32 stack_id
);
586 void PrintCurrentStack(ThreadState
*thr
, uptr pc
);
587 void PrintCurrentStackSlow(uptr pc
); // uses libunwind
589 void Initialize(ThreadState
*thr
);
590 int Finalize(ThreadState
*thr
);
592 void OnUserAlloc(ThreadState
*thr
, uptr pc
, uptr p
, uptr sz
, bool write
);
593 void OnUserFree(ThreadState
*thr
, uptr pc
, uptr p
, bool write
);
595 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
596 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
);
597 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
598 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
599 u64
*shadow_mem
, Shadow cur
);
600 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
601 uptr size
, bool is_write
);
602 void MemoryAccessRangeStep(ThreadState
*thr
, uptr pc
, uptr addr
,
603 uptr size
, uptr step
, bool is_write
);
604 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
605 int size
, bool kAccessIsWrite
, bool kIsAtomic
);
607 const int kSizeLog1
= 0;
608 const int kSizeLog2
= 1;
609 const int kSizeLog4
= 2;
610 const int kSizeLog8
= 3;
612 void ALWAYS_INLINE
MemoryRead(ThreadState
*thr
, uptr pc
,
613 uptr addr
, int kAccessSizeLog
) {
614 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, false, false);
617 void ALWAYS_INLINE
MemoryWrite(ThreadState
*thr
, uptr pc
,
618 uptr addr
, int kAccessSizeLog
) {
619 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, true, false);
622 void ALWAYS_INLINE
MemoryReadAtomic(ThreadState
*thr
, uptr pc
,
623 uptr addr
, int kAccessSizeLog
) {
624 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, false, true);
627 void ALWAYS_INLINE
MemoryWriteAtomic(ThreadState
*thr
, uptr pc
,
628 uptr addr
, int kAccessSizeLog
) {
629 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, true, true);
632 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
633 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
634 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
636 void ThreadIgnoreBegin(ThreadState
*thr
, uptr pc
);
637 void ThreadIgnoreEnd(ThreadState
*thr
, uptr pc
);
638 void ThreadIgnoreSyncBegin(ThreadState
*thr
, uptr pc
);
639 void ThreadIgnoreSyncEnd(ThreadState
*thr
, uptr pc
);
641 void FuncEntry(ThreadState
*thr
, uptr pc
);
642 void FuncExit(ThreadState
*thr
);
644 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
);
645 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
);
646 void ThreadFinish(ThreadState
*thr
);
647 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
);
648 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
);
649 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
);
650 void ThreadFinalize(ThreadState
*thr
);
651 void ThreadSetName(ThreadState
*thr
, const char *name
);
652 int ThreadCount(ThreadState
*thr
);
653 void ProcessPendingSignals(ThreadState
*thr
);
655 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
656 bool rw
, bool recursive
, bool linker_init
);
657 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
);
658 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
= 1,
659 bool try_lock
= false);
660 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
= false);
661 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
, bool try_lock
= false);
662 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
);
663 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
);
664 void MutexRepair(ThreadState
*thr
, uptr pc
, uptr addr
); // call on EOWNERDEAD
666 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
);
667 void AcquireGlobal(ThreadState
*thr
, uptr pc
);
668 void Release(ThreadState
*thr
, uptr pc
, uptr addr
);
669 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
);
670 void AfterSleep(ThreadState
*thr
, uptr pc
);
671 void AcquireImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
672 void ReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
673 void ReleaseStoreImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
674 void AcquireReleaseImpl(ThreadState
*thr
, uptr pc
, SyncClock
*c
);
676 // The hacky call uses custom calling convention and an assembly thunk.
677 // It is considerably faster that a normal call for the caller
678 // if it is not executed (it is intended for slow paths from hot functions).
679 // The trick is that the call preserves all registers and the compiler
680 // does not treat it as a call.
681 // If it does not work for you, use normal call.
682 #if TSAN_DEBUG == 0 && defined(__x86_64__)
683 // The caller may not create the stack frame for itself at all,
684 // so we create a reserve stack frame for it (1024b must be enough).
685 #define HACKY_CALL(f) \
686 __asm__ __volatile__("sub $1024, %%rsp;" \
687 CFI_INL_ADJUST_CFA_OFFSET(1024) \
688 ".hidden " #f "_thunk;" \
689 "call " #f "_thunk;" \
690 "add $1024, %%rsp;" \
691 CFI_INL_ADJUST_CFA_OFFSET(-1024) \
694 #define HACKY_CALL(f) f()
697 void TraceSwitch(ThreadState
*thr
);
698 uptr
TraceTopPC(ThreadState
*thr
);
701 Trace
*ThreadTrace(int tid
);
703 extern "C" void __tsan_trace_switch();
704 void ALWAYS_INLINE
TraceAddEvent(ThreadState
*thr
, FastState fs
,
705 EventType typ
, u64 addr
) {
706 if (!kCollectHistory
)
708 DCHECK_GE((int)typ
, 0);
709 DCHECK_LE((int)typ
, 7);
710 DCHECK_EQ(GetLsb(addr
, 61), addr
);
711 StatInc(thr
, StatEvents
);
712 u64 pos
= fs
.GetTracePos();
713 if (UNLIKELY((pos
% kTracePartSize
) == 0)) {
715 HACKY_CALL(__tsan_trace_switch
);
720 Event
*trace
= (Event
*)GetThreadTrace(fs
.tid());
721 Event
*evp
= &trace
[pos
];
722 Event ev
= (u64
)addr
| ((u64
)typ
<< 61);
726 } // namespace __tsan