1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
21 // ThreadContext implementation.
23 ThreadContext::ThreadContext(int tid
)
24 : ThreadContextBase(tid
)
32 ThreadContext::~ThreadContext() {
36 void ThreadContext::OnDead() {
37 CHECK_EQ(sync
.size(), 0);
40 void ThreadContext::OnJoined(void *arg
) {
41 ThreadState
*caller_thr
= static_cast<ThreadState
*>(arg
);
42 AcquireImpl(caller_thr
, 0, &sync
);
43 sync
.Reset(&caller_thr
->proc()->clock_cache
);
46 struct OnCreatedArgs
{
51 void ThreadContext::OnCreated(void *arg
) {
55 OnCreatedArgs
*args
= static_cast<OnCreatedArgs
*>(arg
);
56 if (!args
->thr
) // GCD workers don't have a parent thread.
58 args
->thr
->fast_state
.IncrementEpoch();
59 // Can't increment epoch w/o writing to the trace as well.
60 TraceAddEvent(args
->thr
, args
->thr
->fast_state
, EventTypeMop
, 0);
61 ReleaseImpl(args
->thr
, 0, &sync
);
62 creation_stack_id
= CurrentStackId(args
->thr
, args
->pc
);
64 StatInc(args
->thr
, StatThreadMaxTid
);
67 void ThreadContext::OnReset() {
68 CHECK_EQ(sync
.size(), 0);
69 ReleaseMemoryToOS(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
70 //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
73 void ThreadContext::OnDetached(void *arg
) {
74 ThreadState
*thr1
= static_cast<ThreadState
*>(arg
);
75 sync
.Reset(&thr1
->proc()->clock_cache
);
78 struct OnStartedArgs
{
86 void ThreadContext::OnStarted(void *arg
) {
87 OnStartedArgs
*args
= static_cast<OnStartedArgs
*>(arg
);
89 // RoundUp so that one trace part does not contain events
90 // from different threads.
91 epoch0
= RoundUp(epoch1
+ 1, kTracePartSize
);
93 new(thr
) ThreadState(ctx
, tid
, unique_id
, epoch0
, reuse_count
,
94 args
->stk_addr
, args
->stk_size
, args
->tls_addr
, args
->tls_size
);
96 thr
->shadow_stack
= &ThreadTrace(thr
->tid
)->shadow_stack
[0];
97 thr
->shadow_stack_pos
= thr
->shadow_stack
;
98 thr
->shadow_stack_end
= thr
->shadow_stack
+ kShadowStackSize
;
100 // Setup dynamic shadow stack.
101 const int kInitStackSize
= 8;
102 thr
->shadow_stack
= (uptr
*)internal_alloc(MBlockShadowStack
,
103 kInitStackSize
* sizeof(uptr
));
104 thr
->shadow_stack_pos
= thr
->shadow_stack
;
105 thr
->shadow_stack_end
= thr
->shadow_stack
+ kInitStackSize
;
107 if (common_flags()->detect_deadlocks
)
108 thr
->dd_lt
= ctx
->dd
->CreateLogicalThread(unique_id
);
109 thr
->fast_state
.SetHistorySize(flags()->history_size
);
110 // Commit switch to the new part of the trace.
111 // TraceAddEvent will reset stack0/mset0 in the new part for us.
112 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
114 thr
->fast_synch_epoch
= epoch0
;
115 AcquireImpl(thr
, 0, &sync
);
116 StatInc(thr
, StatSyncAcquire
);
117 sync
.Reset(&thr
->proc()->clock_cache
);
118 thr
->is_inited
= true;
119 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
120 "tls_addr=%zx tls_size=%zx\n",
121 tid
, (uptr
)epoch0
, args
->stk_addr
, args
->stk_size
,
122 args
->tls_addr
, args
->tls_size
);
125 void ThreadContext::OnFinished() {
127 internal_free(thr
->shadow_stack
);
128 thr
->shadow_stack
= nullptr;
129 thr
->shadow_stack_pos
= nullptr;
130 thr
->shadow_stack_end
= nullptr;
133 thr
->fast_state
.IncrementEpoch();
134 // Can't increment epoch w/o writing to the trace as well.
135 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
136 ReleaseImpl(thr
, 0, &sync
);
138 epoch1
= thr
->fast_state
.epoch();
140 if (common_flags()->detect_deadlocks
)
141 ctx
->dd
->DestroyLogicalThread(thr
->dd_lt
);
143 #if TSAN_COLLECT_STATS
144 StatAggregate(ctx
->stat
, thr
->stat
);
155 static void MaybeReportThreadLeak(ThreadContextBase
*tctx_base
, void *arg
) {
156 Vector
<ThreadLeak
> &leaks
= *(Vector
<ThreadLeak
>*)arg
;
157 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
158 if (tctx
->detached
|| tctx
->status
!= ThreadStatusFinished
)
160 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
161 if (leaks
[i
].tctx
->creation_stack_id
== tctx
->creation_stack_id
) {
166 ThreadLeak leak
= {tctx
, 1};
167 leaks
.PushBack(leak
);
172 static void ReportIgnoresEnabled(ThreadContext
*tctx
, IgnoreSet
*set
) {
173 if (tctx
->tid
== 0) {
174 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
176 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
177 " created at:\n", tctx
->tid
, tctx
->name
);
178 PrintStack(SymbolizeStackId(tctx
->creation_stack_id
));
180 Printf(" One of the following ignores was not ended"
181 " (in order of probability)\n");
182 for (uptr i
= 0; i
< set
->Size(); i
++) {
183 Printf(" Ignore was enabled at:\n");
184 PrintStack(SymbolizeStackId(set
->At(i
)));
189 static void ThreadCheckIgnore(ThreadState
*thr
) {
190 if (ctx
->after_multithreaded_fork
)
192 if (thr
->ignore_reads_and_writes
)
193 ReportIgnoresEnabled(thr
->tctx
, &thr
->mop_ignore_set
);
194 if (thr
->ignore_sync
)
195 ReportIgnoresEnabled(thr
->tctx
, &thr
->sync_ignore_set
);
198 static void ThreadCheckIgnore(ThreadState
*thr
) {}
201 void ThreadFinalize(ThreadState
*thr
) {
202 ThreadCheckIgnore(thr
);
204 if (!flags()->report_thread_leaks
)
206 ThreadRegistryLock
l(ctx
->thread_registry
);
207 Vector
<ThreadLeak
> leaks(MBlockScopedBuf
);
208 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
209 MaybeReportThreadLeak
, &leaks
);
210 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
211 ScopedReport
rep(ReportTypeThreadLeak
);
212 rep
.AddThread(leaks
[i
].tctx
, true);
213 rep
.SetCount(leaks
[i
].count
);
214 OutputReport(thr
, rep
);
219 int ThreadCount(ThreadState
*thr
) {
221 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &result
);
225 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
) {
226 StatInc(thr
, StatThreadCreate
);
227 OnCreatedArgs args
= { thr
, pc
};
228 u32 parent_tid
= thr
? thr
->tid
: kInvalidTid
; // No parent for GCD workers.
230 ctx
->thread_registry
->CreateThread(uid
, detached
, parent_tid
, &args
);
231 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid
, tid
, uid
);
232 StatSet(thr
, StatThreadMaxAlive
, ctx
->thread_registry
->GetMaxAliveThreads());
236 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
) {
242 GetThreadStackAndTls(tid
== 0, &stk_addr
, &stk_size
, &tls_addr
, &tls_size
);
245 if (stk_addr
&& stk_size
)
246 MemoryRangeImitateWrite(thr
, /*pc=*/ 1, stk_addr
, stk_size
);
248 if (tls_addr
&& tls_size
) {
249 // Check that the thr object is in tls;
250 const uptr thr_beg
= (uptr
)thr
;
251 const uptr thr_end
= (uptr
)thr
+ sizeof(*thr
);
252 CHECK_GE(thr_beg
, tls_addr
);
253 CHECK_LE(thr_beg
, tls_addr
+ tls_size
);
254 CHECK_GE(thr_end
, tls_addr
);
255 CHECK_LE(thr_end
, tls_addr
+ tls_size
);
256 // Since the thr object is huge, skip it.
257 MemoryRangeImitateWrite(thr
, /*pc=*/ 2, tls_addr
, thr_beg
- tls_addr
);
258 MemoryRangeImitateWrite(thr
, /*pc=*/ 2,
259 thr_end
, tls_addr
+ tls_size
- thr_end
);
264 ThreadRegistry
*tr
= ctx
->thread_registry
;
265 OnStartedArgs args
= { thr
, stk_addr
, stk_size
, tls_addr
, tls_size
};
266 tr
->StartThread(tid
, os_id
, &args
);
269 thr
->tctx
= (ThreadContext
*)tr
->GetThreadLocked(tid
);
273 if (ctx
->after_multithreaded_fork
) {
274 thr
->ignore_interceptors
++;
275 ThreadIgnoreBegin(thr
, 0);
276 ThreadIgnoreSyncBegin(thr
, 0);
281 void ThreadFinish(ThreadState
*thr
) {
282 ThreadCheckIgnore(thr
);
283 StatInc(thr
, StatThreadFinish
);
284 if (thr
->stk_addr
&& thr
->stk_size
)
285 DontNeedShadowFor(thr
->stk_addr
, thr
->stk_size
);
286 if (thr
->tls_addr
&& thr
->tls_size
)
287 DontNeedShadowFor(thr
->tls_addr
, thr
->tls_size
);
289 ctx
->thread_registry
->FinishThread(thr
->tid
);
292 static bool FindThreadByUid(ThreadContextBase
*tctx
, void *arg
) {
293 uptr uid
= (uptr
)arg
;
294 if (tctx
->user_id
== uid
&& tctx
->status
!= ThreadStatusInvalid
) {
301 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
) {
302 int res
= ctx
->thread_registry
->FindThread(FindThreadByUid
, (void*)uid
);
303 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr
->tid
, uid
, res
);
307 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
) {
309 CHECK_LT(tid
, kMaxTid
);
310 DPrintf("#%d: ThreadJoin tid=%d\n", thr
->tid
, tid
);
311 ctx
->thread_registry
->JoinThread(tid
, thr
);
314 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
) {
316 CHECK_LT(tid
, kMaxTid
);
317 ctx
->thread_registry
->DetachThread(tid
, thr
);
320 void ThreadSetName(ThreadState
*thr
, const char *name
) {
321 ctx
->thread_registry
->SetThreadName(thr
->tid
, name
);
324 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
325 uptr size
, bool is_write
) {
329 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
330 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
331 thr
->tid
, (void*)pc
, (void*)addr
,
332 (int)size
, is_write
);
335 if (!IsAppMem(addr
)) {
336 Printf("Access to non app mem %zx\n", addr
);
337 DCHECK(IsAppMem(addr
));
339 if (!IsAppMem(addr
+ size
- 1)) {
340 Printf("Access to non app mem %zx\n", addr
+ size
- 1);
341 DCHECK(IsAppMem(addr
+ size
- 1));
343 if (!IsShadowMem((uptr
)shadow_mem
)) {
344 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
345 DCHECK(IsShadowMem((uptr
)shadow_mem
));
347 if (!IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1))) {
348 Printf("Bad shadow addr %p (%zx)\n",
349 shadow_mem
+ size
* kShadowCnt
/ 8 - 1, addr
+ size
- 1);
350 DCHECK(IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1)));
354 StatInc(thr
, StatMopRange
);
356 if (*shadow_mem
== kShadowRodata
) {
357 // Access to .rodata section, no races here.
358 // Measurements show that it can be 10-20% of all memory accesses.
359 StatInc(thr
, StatMopRangeRodata
);
363 FastState fast_state
= thr
->fast_state
;
364 if (fast_state
.GetIgnoreBit())
367 fast_state
.IncrementEpoch();
368 thr
->fast_state
= fast_state
;
369 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
371 bool unaligned
= (addr
% kShadowCell
) != 0;
373 // Handle unaligned beginning, if any.
374 for (; addr
% kShadowCell
&& size
; addr
++, size
--) {
375 int const kAccessSizeLog
= 0;
376 Shadow
cur(fast_state
);
377 cur
.SetWrite(is_write
);
378 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
379 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
383 shadow_mem
+= kShadowCnt
;
384 // Handle middle part, if any.
385 for (; size
>= kShadowCell
; addr
+= kShadowCell
, size
-= kShadowCell
) {
386 int const kAccessSizeLog
= 3;
387 Shadow
cur(fast_state
);
388 cur
.SetWrite(is_write
);
389 cur
.SetAddr0AndSizeLog(0, kAccessSizeLog
);
390 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
392 shadow_mem
+= kShadowCnt
;
394 // Handle ending, if any.
395 for (; size
; addr
++, size
--) {
396 int const kAccessSizeLog
= 0;
397 Shadow
cur(fast_state
);
398 cur
.SetWrite(is_write
);
399 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
400 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
405 } // namespace __tsan