1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 // Main internal TSan header file.
13 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
14 // function-scope locals)
15 // - All functions/classes/etc reside in namespace __tsan, except for those
16 // declared in tsan_interface.h.
17 // - Platform-specific files should be used instead of ifdefs (*).
18 // - No system headers included in header files (*).
19 // - Platform specific headres included only into platform-specific files (*).
21 // (*) Except when inlining is critical for performance.
22 //===----------------------------------------------------------------------===//
27 #include "sanitizer_common/sanitizer_allocator.h"
28 #include "sanitizer_common/sanitizer_allocator_internal.h"
29 #include "sanitizer_common/sanitizer_common.h"
30 #include "sanitizer_common/sanitizer_suppressions.h"
31 #include "sanitizer_common/sanitizer_thread_registry.h"
32 #include "tsan_clock.h"
33 #include "tsan_defs.h"
34 #include "tsan_flags.h"
35 #include "tsan_sync.h"
36 #include "tsan_trace.h"
37 #include "tsan_vector.h"
38 #include "tsan_report.h"
39 #include "tsan_platform.h"
40 #include "tsan_mutexset.h"
42 #if SANITIZER_WORDSIZE != 64
43 # error "ThreadSanitizer is supported only on 64-bit platforms"
48 // Descriptor of user's memory block.
51 u64 mtx : 1; // must be first
53 u64 stk : 31; // on word boundary
55 u64 siz : 128 - 1 - 31 - 44 - kTidBits; // 39
59 void Init(uptr siz
, u32 tid
, u32 stk
) {
61 raw
[1] |= (u64
)siz
<< ((1 + 44 + 31 + kTidBits
) % 64);
62 raw
[1] |= (u64
)tid
<< ((1 + 44 + 31) % 64);
63 raw
[0] |= (u64
)stk
<< (1 + 44);
64 raw
[1] |= (u64
)stk
>> (64 - 44 - 1);
65 DCHECK_EQ(Size(), siz
);
66 DCHECK_EQ(Tid(), tid
);
67 DCHECK_EQ(StackId(), stk
);
71 return GetLsb(raw
[1] >> ((1 + 44 + 31) % 64), kTidBits
);
75 return raw
[1] >> ((1 + 31 + 44 + kTidBits
) % 64);
79 return (raw
[0] >> (1 + 44)) | GetLsb(raw
[1] << (64 - 44 - 1), 31);
82 SyncVar
*ListHead() const {
83 return (SyncVar
*)(GetLsb(raw
[0] >> 1, 44) << 3);
86 void ListPush(SyncVar
*v
) {
87 SyncVar
*lst
= ListHead();
89 u64 x
= (u64
)v
^ (u64
)lst
;
92 DCHECK_EQ(ListHead(), v
);
96 SyncVar
*lst
= ListHead();
97 SyncVar
*nxt
= lst
->next
;
99 u64 x
= (u64
)lst
^ (u64
)nxt
;
102 DCHECK_EQ(ListHead(), nxt
);
107 SyncVar
*lst
= ListHead();
111 DCHECK_EQ(ListHead(), 0);
116 typedef GenericScopedLock
<MBlock
> ScopedLock
;
120 #if defined(TSAN_COMPAT_SHADOW) && TSAN_COMPAT_SHADOW
121 const uptr kAllocatorSpace
= 0x7d0000000000ULL
;
123 const uptr kAllocatorSpace
= 0x7d0000000000ULL
;
125 const uptr kAllocatorSize
= 0x10000000000ULL
; // 1T.
127 struct MapUnmapCallback
;
128 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, sizeof(MBlock
),
129 DefaultSizeClassMap
, MapUnmapCallback
> PrimaryAllocator
;
130 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
131 typedef LargeMmapAllocator
<MapUnmapCallback
> SecondaryAllocator
;
132 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
133 SecondaryAllocator
> Allocator
;
134 Allocator
*allocator();
137 void TsanCheckFailed(const char *file
, int line
, const char *cond
,
140 const u64 kShadowRodata
= (u64
)-1; // .rodata shadow marker
142 // FastState (from most significant bit):
150 FastState(u64 tid
, u64 epoch
) {
151 x_
= tid
<< kTidShift
;
152 x_
|= epoch
<< kClkShift
;
153 DCHECK_EQ(tid
, this->tid());
154 DCHECK_EQ(epoch
, this->epoch());
155 DCHECK_EQ(GetIgnoreBit(), false);
158 explicit FastState(u64 x
)
167 u64 res
= (x_
& ~kIgnoreBit
) >> kTidShift
;
171 u64
TidWithIgnore() const {
172 u64 res
= x_
>> kTidShift
;
177 u64 res
= (x_
<< (kTidBits
+ 1)) >> (64 - kClkBits
);
181 void IncrementEpoch() {
182 u64 old_epoch
= epoch();
183 x_
+= 1 << kClkShift
;
184 DCHECK_EQ(old_epoch
+ 1, epoch());
188 void SetIgnoreBit() { x_
|= kIgnoreBit
; }
189 void ClearIgnoreBit() { x_
&= ~kIgnoreBit
; }
190 bool GetIgnoreBit() const { return (s64
)x_
< 0; }
192 void SetHistorySize(int hs
) {
198 int GetHistorySize() const {
199 return (int)(x_
& 7);
202 void ClearHistorySize() {
206 u64
GetTracePos() const {
207 const int hs
= GetHistorySize();
208 // When hs == 0, the trace consists of 2 parts.
209 const u64 mask
= (1ull << (kTracePartSizeBits
+ hs
+ 1)) - 1;
210 return epoch() & mask
;
215 static const int kTidShift
= 64 - kTidBits
- 1;
216 static const int kClkShift
= kTidShift
- kClkBits
;
217 static const u64 kIgnoreBit
= 1ull << 63;
218 static const u64 kFreedBit
= 1ull << 63;
222 // Shadow (from most significant bit):
230 class Shadow
: public FastState
{
232 explicit Shadow(u64 x
)
236 explicit Shadow(const FastState
&s
)
241 void SetAddr0AndSizeLog(u64 addr0
, unsigned kAccessSizeLog
) {
242 DCHECK_EQ(x_
& 31, 0);
244 DCHECK_LE(kAccessSizeLog
, 3);
245 x_
|= (kAccessSizeLog
<< 3) | addr0
;
246 DCHECK_EQ(kAccessSizeLog
, size_log());
247 DCHECK_EQ(addr0
, this->addr0());
250 void SetWrite(unsigned kAccessIsWrite
) {
251 DCHECK_EQ(x_
& kReadBit
, 0);
254 DCHECK_EQ(kAccessIsWrite
, IsWrite());
257 void SetAtomic(bool kIsAtomic
) {
261 DCHECK_EQ(IsAtomic(), kIsAtomic
);
264 bool IsAtomic() const {
265 return x_
& kAtomicBit
;
268 bool IsZero() const {
272 static inline bool TidsAreEqual(const Shadow s1
, const Shadow s2
) {
273 u64 shifted_xor
= (s1
.x_
^ s2
.x_
) >> kTidShift
;
274 DCHECK_EQ(shifted_xor
== 0, s1
.TidWithIgnore() == s2
.TidWithIgnore());
275 return shifted_xor
== 0;
278 static inline bool Addr0AndSizeAreEqual(const Shadow s1
, const Shadow s2
) {
279 u64 masked_xor
= (s1
.x_
^ s2
.x_
) & 31;
280 return masked_xor
== 0;
283 static inline bool TwoRangesIntersect(Shadow s1
, Shadow s2
,
284 unsigned kS2AccessSize
) {
286 u64 diff
= s1
.addr0() - s2
.addr0();
287 if ((s64
)diff
< 0) { // s1.addr0 < s2.addr0 // NOLINT
288 // if (s1.addr0() + size1) > s2.addr0()) return true;
289 if (s1
.size() > -diff
) res
= true;
291 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
292 if (kS2AccessSize
> diff
) res
= true;
294 DCHECK_EQ(res
, TwoRangesIntersectSLOW(s1
, s2
));
295 DCHECK_EQ(res
, TwoRangesIntersectSLOW(s2
, s1
));
299 // The idea behind the offset is as follows.
300 // Consider that we have 8 bool's contained within a single 8-byte block
301 // (mapped to a single shadow "cell"). Now consider that we write to the bools
302 // from a single thread (which we consider the common case).
303 // W/o offsetting each access will have to scan 4 shadow values at average
304 // to find the corresponding shadow value for the bool.
305 // With offsetting we start scanning shadow with the offset so that
306 // each access hits necessary shadow straight off (at least in an expected
308 // This logic works seamlessly for any layout of user data. For example,
309 // if user data is {int, short, char, char}, then accesses to the int are
310 // offsetted to 0, short - 4, 1st char - 6, 2nd char - 7. Hopefully, accesses
311 // from a single thread won't need to scan all 8 shadow values.
312 unsigned ComputeSearchOffset() {
315 u64
addr0() const { return x_
& 7; }
316 u64
size() const { return 1ull << size_log(); }
317 bool IsWrite() const { return !IsRead(); }
318 bool IsRead() const { return x_
& kReadBit
; }
320 // The idea behind the freed bit is as follows.
321 // When the memory is freed (or otherwise unaccessible) we write to the shadow
322 // values with tid/epoch related to the free and the freed bit set.
323 // During memory accesses processing the freed bit is considered
324 // as msb of tid. So any access races with shadow with freed bit set
325 // (it is as if write from a thread with which we never synchronized before).
326 // This allows us to detect accesses to freed memory w/o additional
327 // overheads in memory access processing and at the same time restore
328 // tid/epoch of free.
333 bool IsFreed() const {
334 return x_
& kFreedBit
;
337 bool GetFreedAndReset() {
338 bool res
= x_
& kFreedBit
;
343 bool IsBothReadsOrAtomic(bool kIsWrite
, bool kIsAtomic
) const {
344 // analyzes 5-th bit (is_read) and 6-th bit (is_atomic)
345 bool v
= x_
& u64(((kIsWrite
^ 1) << kReadShift
)
346 | (kIsAtomic
<< kAtomicShift
));
347 DCHECK_EQ(v
, (!IsWrite() && !kIsWrite
) || (IsAtomic() && kIsAtomic
));
351 bool IsRWNotWeaker(bool kIsWrite
, bool kIsAtomic
) const {
352 bool v
= ((x_
>> kReadShift
) & 3)
353 <= u64((kIsWrite
^ 1) | (kIsAtomic
<< 1));
354 DCHECK_EQ(v
, (IsAtomic() < kIsAtomic
) ||
355 (IsAtomic() == kIsAtomic
&& !IsWrite() <= !kIsWrite
));
359 bool IsRWWeakerOrEqual(bool kIsWrite
, bool kIsAtomic
) const {
360 bool v
= ((x_
>> kReadShift
) & 3)
361 >= u64((kIsWrite
^ 1) | (kIsAtomic
<< 1));
362 DCHECK_EQ(v
, (IsAtomic() > kIsAtomic
) ||
363 (IsAtomic() == kIsAtomic
&& !IsWrite() >= !kIsWrite
));
368 static const u64 kReadShift
= 5;
369 static const u64 kReadBit
= 1ull << kReadShift
;
370 static const u64 kAtomicShift
= 6;
371 static const u64 kAtomicBit
= 1ull << kAtomicShift
;
373 u64
size_log() const { return (x_
>> 3) & 3; }
375 static bool TwoRangesIntersectSLOW(const Shadow s1
, const Shadow s2
) {
376 if (s1
.addr0() == s2
.addr0()) return true;
377 if (s1
.addr0() < s2
.addr0() && s1
.addr0() + s1
.size() > s2
.addr0())
379 if (s2
.addr0() < s1
.addr0() && s2
.addr0() + s2
.size() > s1
.addr0())
385 struct SignalContext
;
390 uptr
*shadow_stack_pos
;
393 // This struct is stored in TLS.
395 FastState fast_state
;
396 // Synch epoch represents the threads's epoch before the last synchronization
397 // action. It allows to reduce number of shadow state updates.
398 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
399 // if we are processing write to X from the same thread at epoch=200,
400 // we do nothing, because both writes happen in the same 'synch epoch'.
401 // That is, if another memory access does not race with the former write,
402 // it does not race with the latter as well.
403 // QUESTION: can we can squeeze this into ThreadState::Fast?
404 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
405 // taken by epoch between synchs.
406 // This way we can save one load from tls.
407 u64 fast_synch_epoch
;
408 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
409 // We do not distinguish beteween ignoring reads and writes
410 // for better performance.
411 int ignore_reads_and_writes
;
412 uptr
*shadow_stack_pos
;
413 u64
*racy_shadow_addr
;
416 // C/C++ uses embed shadow stack of fixed size.
417 uptr shadow_stack
[kShadowStackSize
];
419 // Go uses satellite shadow stack with dynamic size.
421 uptr
*shadow_stack_end
;
426 AllocatorCache alloc_cache
;
427 InternalAllocatorCache internal_alloc_cache
;
428 Vector
<JmpBuf
> jmp_bufs
;
443 DeadlockDetector deadlock_detector
;
445 bool in_signal_handler
;
446 SignalContext
*signal_ctx
;
449 u32 last_sleep_stack_id
;
450 ThreadClock last_sleep_clock
;
453 // Set in regions of runtime that must be signal-safe and fork-safe.
454 // If set, malloc must not be called.
457 explicit ThreadState(Context
*ctx
, int tid
, int unique_id
, u64 epoch
,
458 uptr stk_addr
, uptr stk_size
,
459 uptr tls_addr
, uptr tls_size
);
465 extern THREADLOCAL
char cur_thread_placeholder
[];
466 INLINE ThreadState
*cur_thread() {
467 return reinterpret_cast<ThreadState
*>(&cur_thread_placeholder
);
471 class ThreadContext
: public ThreadContextBase
{
473 explicit ThreadContext(int tid
);
477 StackTrace creation_stack
;
479 u32 creation_stack_id
;
482 // Epoch at which the thread had started.
483 // If we see an event from the thread stamped by an older epoch,
484 // the event is from a dead thread that shared tid with this thread.
488 // Override superclass callbacks.
490 void OnJoined(void *arg
);
492 void OnStarted(void *arg
);
493 void OnCreated(void *arg
);
499 bool operator==(const RacyStacks
&other
) const {
500 if (hash
[0] == other
.hash
[0] && hash
[1] == other
.hash
[1])
502 if (hash
[0] == other
.hash
[1] && hash
[1] == other
.hash
[0])
513 struct FiredSuppression
{
528 int nmissed_expected
;
529 atomic_uint64_t last_symbolize_time_ns
;
531 ThreadRegistry
*thread_registry
;
533 Vector
<RacyStacks
> racy_stacks
;
534 Vector
<RacyAddress
> racy_addresses
;
535 // Number of fired suppressions may be large enough.
536 InternalMmapVector
<FiredSuppression
> fired_suppressions
;
541 u64 int_alloc_cnt
[MBlockTypeCount
];
542 u64 int_alloc_siz
[MBlockTypeCount
];
557 explicit ScopedReport(ReportType typ
);
560 void AddStack(const StackTrace
*stack
);
561 void AddMemoryAccess(uptr addr
, Shadow s
, const StackTrace
*stack
,
562 const MutexSet
*mset
);
563 void AddThread(const ThreadContext
*tctx
);
564 void AddMutex(const SyncVar
*s
);
565 void AddLocation(uptr addr
, uptr size
);
566 void AddSleep(u32 stack_id
);
567 void SetCount(int count
);
569 const ReportDesc
*GetReport() const;
575 void AddMutex(u64 id
);
577 ScopedReport(const ScopedReport
&);
578 void operator = (const ScopedReport
&);
581 void RestoreStack(int tid
, const u64 epoch
, StackTrace
*stk
, MutexSet
*mset
);
583 void StatAggregate(u64
*dst
, u64
*src
);
584 void StatOutput(u64
*stat
);
585 void ALWAYS_INLINE
StatInc(ThreadState
*thr
, StatType typ
, u64 n
= 1) {
589 void ALWAYS_INLINE
StatSet(ThreadState
*thr
, StatType typ
, u64 n
) {
594 void MapShadow(uptr addr
, uptr size
);
595 void MapThreadTrace(uptr addr
, uptr size
);
596 void DontNeedShadowFor(uptr addr
, uptr size
);
597 void InitializeShadowMemory();
598 void InitializeInterceptors();
599 void InitializeDynamicAnnotations();
601 void ReportRace(ThreadState
*thr
);
602 bool OutputReport(Context
*ctx
,
603 const ScopedReport
&srep
,
604 const ReportStack
*suppress_stack1
= 0,
605 const ReportStack
*suppress_stack2
= 0,
606 const ReportLocation
*suppress_loc
= 0);
607 bool IsFiredSuppression(Context
*ctx
,
608 const ScopedReport
&srep
,
609 const StackTrace
&trace
);
610 bool IsExpectedReport(uptr addr
, uptr size
);
611 void PrintMatchedBenignRaces();
612 bool FrameIsInternal(const ReportStack
*frame
);
613 ReportStack
*SkipTsanInternalFrames(ReportStack
*ent
);
615 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
616 # define DPrintf Printf
618 # define DPrintf(...)
621 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
622 # define DPrintf2 Printf
624 # define DPrintf2(...)
627 u32
CurrentStackId(ThreadState
*thr
, uptr pc
);
628 void PrintCurrentStack(ThreadState
*thr
, uptr pc
);
629 void PrintCurrentStackSlow(); // uses libunwind
631 void Initialize(ThreadState
*thr
);
632 int Finalize(ThreadState
*thr
);
634 SyncVar
* GetJavaSync(ThreadState
*thr
, uptr pc
, uptr addr
,
635 bool write_lock
, bool create
);
636 SyncVar
* GetAndRemoveJavaSync(ThreadState
*thr
, uptr pc
, uptr addr
);
638 void MemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
639 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
);
640 void MemoryAccessImpl(ThreadState
*thr
, uptr addr
,
641 int kAccessSizeLog
, bool kAccessIsWrite
, bool kIsAtomic
,
642 u64
*shadow_mem
, Shadow cur
);
643 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
644 uptr size
, bool is_write
);
645 void MemoryAccessRangeStep(ThreadState
*thr
, uptr pc
, uptr addr
,
646 uptr size
, uptr step
, bool is_write
);
647 void UnalignedMemoryAccess(ThreadState
*thr
, uptr pc
, uptr addr
,
648 int size
, bool kAccessIsWrite
, bool kIsAtomic
);
650 const int kSizeLog1
= 0;
651 const int kSizeLog2
= 1;
652 const int kSizeLog4
= 2;
653 const int kSizeLog8
= 3;
655 void ALWAYS_INLINE
MemoryRead(ThreadState
*thr
, uptr pc
,
656 uptr addr
, int kAccessSizeLog
) {
657 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, false, false);
660 void ALWAYS_INLINE
MemoryWrite(ThreadState
*thr
, uptr pc
,
661 uptr addr
, int kAccessSizeLog
) {
662 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, true, false);
665 void ALWAYS_INLINE
MemoryReadAtomic(ThreadState
*thr
, uptr pc
,
666 uptr addr
, int kAccessSizeLog
) {
667 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, false, true);
670 void ALWAYS_INLINE
MemoryWriteAtomic(ThreadState
*thr
, uptr pc
,
671 uptr addr
, int kAccessSizeLog
) {
672 MemoryAccess(thr
, pc
, addr
, kAccessSizeLog
, true, true);
675 void MemoryResetRange(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
676 void MemoryRangeFreed(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
677 void MemoryRangeImitateWrite(ThreadState
*thr
, uptr pc
, uptr addr
, uptr size
);
678 void ThreadIgnoreBegin(ThreadState
*thr
);
679 void ThreadIgnoreEnd(ThreadState
*thr
);
681 void FuncEntry(ThreadState
*thr
, uptr pc
);
682 void FuncExit(ThreadState
*thr
);
684 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
);
685 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
);
686 void ThreadFinish(ThreadState
*thr
);
687 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
);
688 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
);
689 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
);
690 void ThreadFinalize(ThreadState
*thr
);
691 void ThreadSetName(ThreadState
*thr
, const char *name
);
692 int ThreadCount(ThreadState
*thr
);
693 void ProcessPendingSignals(ThreadState
*thr
);
695 void MutexCreate(ThreadState
*thr
, uptr pc
, uptr addr
,
696 bool rw
, bool recursive
, bool linker_init
);
697 void MutexDestroy(ThreadState
*thr
, uptr pc
, uptr addr
);
698 void MutexLock(ThreadState
*thr
, uptr pc
, uptr addr
, int rec
= 1);
699 int MutexUnlock(ThreadState
*thr
, uptr pc
, uptr addr
, bool all
= false);
700 void MutexReadLock(ThreadState
*thr
, uptr pc
, uptr addr
);
701 void MutexReadUnlock(ThreadState
*thr
, uptr pc
, uptr addr
);
702 void MutexReadOrWriteUnlock(ThreadState
*thr
, uptr pc
, uptr addr
);
704 void Acquire(ThreadState
*thr
, uptr pc
, uptr addr
);
705 void AcquireGlobal(ThreadState
*thr
, uptr pc
);
706 void Release(ThreadState
*thr
, uptr pc
, uptr addr
);
707 void ReleaseStore(ThreadState
*thr
, uptr pc
, uptr addr
);
708 void AfterSleep(ThreadState
*thr
, uptr pc
);
710 // The hacky call uses custom calling convention and an assembly thunk.
711 // It is considerably faster that a normal call for the caller
712 // if it is not executed (it is intended for slow paths from hot functions).
713 // The trick is that the call preserves all registers and the compiler
714 // does not treat it as a call.
715 // If it does not work for you, use normal call.
717 // The caller may not create the stack frame for itself at all,
718 // so we create a reserve stack frame for it (1024b must be enough).
719 #define HACKY_CALL(f) \
720 __asm__ __volatile__("sub $1024, %%rsp;" \
721 "/*.cfi_adjust_cfa_offset 1024;*/" \
722 ".hidden " #f "_thunk;" \
723 "call " #f "_thunk;" \
724 "add $1024, %%rsp;" \
725 "/*.cfi_adjust_cfa_offset -1024;*/" \
728 #define HACKY_CALL(f) f()
731 void TraceSwitch(ThreadState
*thr
);
732 uptr
TraceTopPC(ThreadState
*thr
);
735 Trace
*ThreadTrace(int tid
);
737 extern "C" void __tsan_trace_switch();
738 void ALWAYS_INLINE
TraceAddEvent(ThreadState
*thr
, FastState fs
,
739 EventType typ
, u64 addr
) {
740 DCHECK_GE((int)typ
, 0);
741 DCHECK_LE((int)typ
, 7);
742 DCHECK_EQ(GetLsb(addr
, 61), addr
);
743 StatInc(thr
, StatEvents
);
744 u64 pos
= fs
.GetTracePos();
745 if (UNLIKELY((pos
% kTracePartSize
) == 0)) {
747 HACKY_CALL(__tsan_trace_switch
);
752 Event
*trace
= (Event
*)GetThreadTrace(fs
.tid());
753 Event
*evp
= &trace
[pos
];
754 Event ev
= (u64
)addr
| ((u64
)typ
<< 61);
758 } // namespace __tsan