1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
21 // ThreadContext implementation.
23 ThreadContext::ThreadContext(int tid
)
24 : ThreadContextBase(tid
)
32 ThreadContext::~ThreadContext() {
36 void ThreadContext::OnDead() {
37 CHECK_EQ(sync
.size(), 0);
40 void ThreadContext::OnJoined(void *arg
) {
41 ThreadState
*caller_thr
= static_cast<ThreadState
*>(arg
);
42 AcquireImpl(caller_thr
, 0, &sync
);
43 sync
.Reset(&caller_thr
->clock_cache
);
46 struct OnCreatedArgs
{
51 void ThreadContext::OnCreated(void *arg
) {
55 OnCreatedArgs
*args
= static_cast<OnCreatedArgs
*>(arg
);
56 if (!args
->thr
) // GCD workers don't have a parent thread.
58 args
->thr
->fast_state
.IncrementEpoch();
59 // Can't increment epoch w/o writing to the trace as well.
60 TraceAddEvent(args
->thr
, args
->thr
->fast_state
, EventTypeMop
, 0);
61 ReleaseImpl(args
->thr
, 0, &sync
);
62 creation_stack_id
= CurrentStackId(args
->thr
, args
->pc
);
64 StatInc(args
->thr
, StatThreadMaxTid
);
67 void ThreadContext::OnReset() {
68 CHECK_EQ(sync
.size(), 0);
69 FlushUnneededShadowMemory(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
70 //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
73 void ThreadContext::OnDetached(void *arg
) {
74 ThreadState
*thr1
= static_cast<ThreadState
*>(arg
);
75 sync
.Reset(&thr1
->clock_cache
);
78 struct OnStartedArgs
{
86 void ThreadContext::OnStarted(void *arg
) {
87 OnStartedArgs
*args
= static_cast<OnStartedArgs
*>(arg
);
89 // RoundUp so that one trace part does not contain events
90 // from different threads.
91 epoch0
= RoundUp(epoch1
+ 1, kTracePartSize
);
93 new(thr
) ThreadState(ctx
, tid
, unique_id
, epoch0
, reuse_count
,
94 args
->stk_addr
, args
->stk_size
, args
->tls_addr
, args
->tls_size
);
96 thr
->shadow_stack
= &ThreadTrace(thr
->tid
)->shadow_stack
[0];
97 thr
->shadow_stack_pos
= thr
->shadow_stack
;
98 thr
->shadow_stack_end
= thr
->shadow_stack
+ kShadowStackSize
;
100 // Setup dynamic shadow stack.
101 const int kInitStackSize
= 8;
102 thr
->shadow_stack
= (uptr
*)internal_alloc(MBlockShadowStack
,
103 kInitStackSize
* sizeof(uptr
));
104 thr
->shadow_stack_pos
= thr
->shadow_stack
;
105 thr
->shadow_stack_end
= thr
->shadow_stack
+ kInitStackSize
;
108 AllocatorThreadStart(thr
);
110 if (common_flags()->detect_deadlocks
) {
111 thr
->dd_pt
= ctx
->dd
->CreatePhysicalThread();
112 thr
->dd_lt
= ctx
->dd
->CreateLogicalThread(unique_id
);
114 thr
->fast_state
.SetHistorySize(flags()->history_size
);
115 // Commit switch to the new part of the trace.
116 // TraceAddEvent will reset stack0/mset0 in the new part for us.
117 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
119 thr
->fast_synch_epoch
= epoch0
;
120 AcquireImpl(thr
, 0, &sync
);
121 StatInc(thr
, StatSyncAcquire
);
122 sync
.Reset(&thr
->clock_cache
);
123 thr
->is_inited
= true;
124 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
125 "tls_addr=%zx tls_size=%zx\n",
126 tid
, (uptr
)epoch0
, args
->stk_addr
, args
->stk_size
,
127 args
->tls_addr
, args
->tls_size
);
130 void ThreadContext::OnFinished() {
132 thr
->fast_state
.IncrementEpoch();
133 // Can't increment epoch w/o writing to the trace as well.
134 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
135 ReleaseImpl(thr
, 0, &sync
);
137 epoch1
= thr
->fast_state
.epoch();
139 if (common_flags()->detect_deadlocks
) {
140 ctx
->dd
->DestroyPhysicalThread(thr
->dd_pt
);
141 ctx
->dd
->DestroyLogicalThread(thr
->dd_lt
);
143 ctx
->clock_alloc
.FlushCache(&thr
->clock_cache
);
144 ctx
->metamap
.OnThreadIdle(thr
);
146 AllocatorThreadFinish(thr
);
149 #if TSAN_COLLECT_STATS
150 StatAggregate(ctx
->stat
, thr
->stat
);
161 static void MaybeReportThreadLeak(ThreadContextBase
*tctx_base
, void *arg
) {
162 Vector
<ThreadLeak
> &leaks
= *(Vector
<ThreadLeak
>*)arg
;
163 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
164 if (tctx
->detached
|| tctx
->status
!= ThreadStatusFinished
)
166 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
167 if (leaks
[i
].tctx
->creation_stack_id
== tctx
->creation_stack_id
) {
172 ThreadLeak leak
= {tctx
, 1};
173 leaks
.PushBack(leak
);
178 static void ReportIgnoresEnabled(ThreadContext
*tctx
, IgnoreSet
*set
) {
179 if (tctx
->tid
== 0) {
180 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
182 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
183 " created at:\n", tctx
->tid
, tctx
->name
);
184 PrintStack(SymbolizeStackId(tctx
->creation_stack_id
));
186 Printf(" One of the following ignores was not ended"
187 " (in order of probability)\n");
188 for (uptr i
= 0; i
< set
->Size(); i
++) {
189 Printf(" Ignore was enabled at:\n");
190 PrintStack(SymbolizeStackId(set
->At(i
)));
195 static void ThreadCheckIgnore(ThreadState
*thr
) {
196 if (ctx
->after_multithreaded_fork
)
198 if (thr
->ignore_reads_and_writes
)
199 ReportIgnoresEnabled(thr
->tctx
, &thr
->mop_ignore_set
);
200 if (thr
->ignore_sync
)
201 ReportIgnoresEnabled(thr
->tctx
, &thr
->sync_ignore_set
);
204 static void ThreadCheckIgnore(ThreadState
*thr
) {}
207 void ThreadFinalize(ThreadState
*thr
) {
208 ThreadCheckIgnore(thr
);
210 if (!flags()->report_thread_leaks
)
212 ThreadRegistryLock
l(ctx
->thread_registry
);
213 Vector
<ThreadLeak
> leaks(MBlockScopedBuf
);
214 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
215 MaybeReportThreadLeak
, &leaks
);
216 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
217 ScopedReport
rep(ReportTypeThreadLeak
);
218 rep
.AddThread(leaks
[i
].tctx
, true);
219 rep
.SetCount(leaks
[i
].count
);
220 OutputReport(thr
, rep
);
225 int ThreadCount(ThreadState
*thr
) {
227 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &result
);
231 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
) {
232 StatInc(thr
, StatThreadCreate
);
233 OnCreatedArgs args
= { thr
, pc
};
234 u32 parent_tid
= thr
? thr
->tid
: kInvalidTid
; // No parent for GCD workers.
236 ctx
->thread_registry
->CreateThread(uid
, detached
, parent_tid
, &args
);
237 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid
, tid
, uid
);
238 StatSet(thr
, StatThreadMaxAlive
, ctx
->thread_registry
->GetMaxAliveThreads());
242 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
) {
248 GetThreadStackAndTls(tid
== 0, &stk_addr
, &stk_size
, &tls_addr
, &tls_size
);
251 if (stk_addr
&& stk_size
)
252 MemoryRangeImitateWrite(thr
, /*pc=*/ 1, stk_addr
, stk_size
);
254 if (tls_addr
&& tls_size
) {
255 // Check that the thr object is in tls;
256 const uptr thr_beg
= (uptr
)thr
;
257 const uptr thr_end
= (uptr
)thr
+ sizeof(*thr
);
258 CHECK_GE(thr_beg
, tls_addr
);
259 CHECK_LE(thr_beg
, tls_addr
+ tls_size
);
260 CHECK_GE(thr_end
, tls_addr
);
261 CHECK_LE(thr_end
, tls_addr
+ tls_size
);
262 // Since the thr object is huge, skip it.
263 MemoryRangeImitateWrite(thr
, /*pc=*/ 2, tls_addr
, thr_beg
- tls_addr
);
264 MemoryRangeImitateWrite(thr
, /*pc=*/ 2,
265 thr_end
, tls_addr
+ tls_size
- thr_end
);
270 ThreadRegistry
*tr
= ctx
->thread_registry
;
271 OnStartedArgs args
= { thr
, stk_addr
, stk_size
, tls_addr
, tls_size
};
272 tr
->StartThread(tid
, os_id
, &args
);
275 thr
->tctx
= (ThreadContext
*)tr
->GetThreadLocked(tid
);
279 if (ctx
->after_multithreaded_fork
) {
280 thr
->ignore_interceptors
++;
281 ThreadIgnoreBegin(thr
, 0);
282 ThreadIgnoreSyncBegin(thr
, 0);
287 void ThreadFinish(ThreadState
*thr
) {
288 ThreadCheckIgnore(thr
);
289 StatInc(thr
, StatThreadFinish
);
290 if (thr
->stk_addr
&& thr
->stk_size
)
291 DontNeedShadowFor(thr
->stk_addr
, thr
->stk_size
);
292 if (thr
->tls_addr
&& thr
->tls_size
)
293 DontNeedShadowFor(thr
->tls_addr
, thr
->tls_size
);
295 ctx
->thread_registry
->FinishThread(thr
->tid
);
298 static bool FindThreadByUid(ThreadContextBase
*tctx
, void *arg
) {
299 uptr uid
= (uptr
)arg
;
300 if (tctx
->user_id
== uid
&& tctx
->status
!= ThreadStatusInvalid
) {
307 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
) {
308 int res
= ctx
->thread_registry
->FindThread(FindThreadByUid
, (void*)uid
);
309 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr
->tid
, uid
, res
);
313 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
) {
315 CHECK_LT(tid
, kMaxTid
);
316 DPrintf("#%d: ThreadJoin tid=%d\n", thr
->tid
, tid
);
317 ctx
->thread_registry
->JoinThread(tid
, thr
);
320 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
) {
322 CHECK_LT(tid
, kMaxTid
);
323 ctx
->thread_registry
->DetachThread(tid
, thr
);
326 void ThreadSetName(ThreadState
*thr
, const char *name
) {
327 ctx
->thread_registry
->SetThreadName(thr
->tid
, name
);
330 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
331 uptr size
, bool is_write
) {
335 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
336 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
337 thr
->tid
, (void*)pc
, (void*)addr
,
338 (int)size
, is_write
);
341 if (!IsAppMem(addr
)) {
342 Printf("Access to non app mem %zx\n", addr
);
343 DCHECK(IsAppMem(addr
));
345 if (!IsAppMem(addr
+ size
- 1)) {
346 Printf("Access to non app mem %zx\n", addr
+ size
- 1);
347 DCHECK(IsAppMem(addr
+ size
- 1));
349 if (!IsShadowMem((uptr
)shadow_mem
)) {
350 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
351 DCHECK(IsShadowMem((uptr
)shadow_mem
));
353 if (!IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1))) {
354 Printf("Bad shadow addr %p (%zx)\n",
355 shadow_mem
+ size
* kShadowCnt
/ 8 - 1, addr
+ size
- 1);
356 DCHECK(IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1)));
360 StatInc(thr
, StatMopRange
);
362 if (*shadow_mem
== kShadowRodata
) {
363 // Access to .rodata section, no races here.
364 // Measurements show that it can be 10-20% of all memory accesses.
365 StatInc(thr
, StatMopRangeRodata
);
369 FastState fast_state
= thr
->fast_state
;
370 if (fast_state
.GetIgnoreBit())
373 fast_state
.IncrementEpoch();
374 thr
->fast_state
= fast_state
;
375 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
377 bool unaligned
= (addr
% kShadowCell
) != 0;
379 // Handle unaligned beginning, if any.
380 for (; addr
% kShadowCell
&& size
; addr
++, size
--) {
381 int const kAccessSizeLog
= 0;
382 Shadow
cur(fast_state
);
383 cur
.SetWrite(is_write
);
384 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
385 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
389 shadow_mem
+= kShadowCnt
;
390 // Handle middle part, if any.
391 for (; size
>= kShadowCell
; addr
+= kShadowCell
, size
-= kShadowCell
) {
392 int const kAccessSizeLog
= 3;
393 Shadow
cur(fast_state
);
394 cur
.SetWrite(is_write
);
395 cur
.SetAddr0AndSizeLog(0, kAccessSizeLog
);
396 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
398 shadow_mem
+= kShadowCnt
;
400 // Handle ending, if any.
401 for (; size
; addr
++, size
--) {
402 int const kAccessSizeLog
= 0;
403 Shadow
cur(fast_state
);
404 cur
.SetWrite(is_write
);
405 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
406 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
411 } // namespace __tsan