1 //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //===----------------------------------------------------------------------===//
15 #include "tsan_defs.h"
16 #include "tsan_ilist.h"
17 #include "tsan_mutexset.h"
18 #include "tsan_stack_trace.h"
22 const int kTracePartSizeBits
= 13;
23 const int kTracePartSize
= 1 << kTracePartSizeBits
;
24 const int kTraceParts
= 2 * 1024 * 1024 / kTracePartSize
;
25 const int kTraceSize
= kTracePartSize
* kTraceParts
;
27 // Must fit into 3 bits.
38 // Represents a thread event (from most significant bit):
39 // u64 typ : 3; // EventType.
40 // u64 addr : 61; // Associated pc.
43 const uptr kEventPCBits
= 61;
47 BufferedStackTrace stack0
; // Start stack for the trace.
49 VarSizeStackTrace stack0
;
51 u64 epoch0
; // Start epoch for the trace.
54 TraceHeader() : stack0(), epoch0() {}
60 // Must be last to catch overflow as paging fault.
61 // Go shadow stack is dynamically allocated.
62 uptr shadow_stack
[kShadowStackSize
];
64 // Must be the last field, because we unmap the unused part in
65 // CreateThreadContext.
66 TraceHeader headers
[kTraceParts
];
68 Trace() : mtx(MutexTypeTrace
) {}
73 enum class EventType
: u64
{
82 // "Base" type for all events for type dispatch.
84 // We use variable-length type encoding to give more bits to some event
85 // types that need them. If is_access is set, this is EventAccess.
86 // Otherwise, if is_func is set, this is EventFunc.
87 // Otherwise type denotes the type.
93 static_assert(sizeof(Event
) == 8, "bad Event size");
95 // Nop event used as padding and does not affect state during replay.
96 static constexpr Event NopEvent
= {1, 0, EventType::kAccessExt
, 0};
98 // Compressed memory access can represent only some events with PCs
99 // close enough to each other. Otherwise we fall back to EventAccessExt.
101 static constexpr uptr kPCBits
= 15;
103 u64 is_access
: 1; // = 1
107 u64 pc_delta
: kPCBits
; // signed delta from the previous memory access PC
108 u64 addr
: kCompressedAddrBits
;
110 static_assert(sizeof(EventAccess
) == 8, "bad EventAccess size");
112 // Function entry (pc != 0) or exit (pc == 0).
114 u64 is_access
: 1; // = 0
115 u64 is_func
: 1; // = 1
118 static_assert(sizeof(EventFunc
) == 8, "bad EventFunc size");
120 // Extended memory access with full PC.
121 struct EventAccessExt
{
122 u64 is_access
: 1; // = 0
123 u64 is_func
: 1; // = 0
124 EventType type
: 3; // = EventType::kAccessExt
129 u64 addr
: kCompressedAddrBits
;
132 static_assert(sizeof(EventAccessExt
) == 16, "bad EventAccessExt size");
134 // Access to a memory range.
135 struct EventAccessRange
{
136 static constexpr uptr kSizeLoBits
= 13;
138 u64 is_access
: 1; // = 0
139 u64 is_func
: 1; // = 0
140 EventType type
: 3; // = EventType::kAccessRange
143 u64 size_lo
: kSizeLoBits
;
144 u64 pc
: kCompressedAddrBits
;
145 u64 addr
: kCompressedAddrBits
;
146 u64 size_hi
: 64 - kCompressedAddrBits
;
148 static_assert(sizeof(EventAccessRange
) == 16, "bad EventAccessRange size");
152 static constexpr uptr kStackIDLoBits
= 15;
154 u64 is_access
: 1; // = 0
155 u64 is_func
: 1; // = 0
156 EventType type
: 3; // = EventType::kLock or EventType::kRLock
157 u64 pc
: kCompressedAddrBits
;
158 u64 stack_lo
: kStackIDLoBits
;
159 u64 stack_hi
: sizeof(StackID
) * kByteBits
- kStackIDLoBits
;
161 u64 addr
: kCompressedAddrBits
;
163 static_assert(sizeof(EventLock
) == 16, "bad EventLock size");
167 u64 is_access
: 1; // = 0
168 u64 is_func
: 1; // = 0
169 EventType type
: 3; // = EventType::kUnlock
171 u64 addr
: kCompressedAddrBits
;
173 static_assert(sizeof(EventUnlock
) == 8, "bad EventUnlock size");
175 // Time change event.
177 u64 is_access
: 1; // = 0
178 u64 is_func
: 1; // = 0
179 EventType type
: 3; // = EventType::kTime
180 u64 sid
: sizeof(Sid
) * kByteBits
;
181 u64 epoch
: kEpochBits
;
182 u64 _
: 64 - 5 - sizeof(Sid
) * kByteBits
- kEpochBits
;
184 static_assert(sizeof(EventTime
) == 8, "bad EventTime size");
189 Trace
* trace
= nullptr; // back-pointer to Trace containing this part
190 INode trace_parts
; // in Trace::parts
193 struct TracePart
: TraceHeader
{
194 static constexpr uptr kByteSize
= 256 << 10;
195 static constexpr uptr kSize
=
196 (kByteSize
- sizeof(TraceHeader
)) / sizeof(Event
);
197 // TraceAcquire does a fast event pointer overflow check by comparing
198 // pointer into TracePart::events with kAlignment mask. Since TracePart's
199 // are allocated page-aligned, this check detects end of the array
200 // (it also have false positives in the middle that are filtered separately).
201 // This also requires events to be the last field.
202 static constexpr uptr kAlignment
= 0xff0;
207 static_assert(sizeof(TracePart
) == TracePart::kByteSize
, "bad TracePart size");
211 IList
<TraceHeader
, &TraceHeader::trace_parts
, TracePart
> parts
;
213 nullptr; // final position in the last part for finished threads
215 Trace() : mtx(MutexTypeTrace
) {}
220 } // namespace __tsan
222 #endif // TSAN_TRACE_H