1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
21 // ThreadContext implementation.
23 ThreadContext::ThreadContext(int tid
)
24 : ThreadContextBase(tid
)
32 ThreadContext::~ThreadContext() {
36 void ThreadContext::OnDead() {
37 CHECK_EQ(sync
.size(), 0);
40 void ThreadContext::OnJoined(void *arg
) {
41 ThreadState
*caller_thr
= static_cast<ThreadState
*>(arg
);
42 AcquireImpl(caller_thr
, 0, &sync
);
43 sync
.Reset(&caller_thr
->clock_cache
);
46 struct OnCreatedArgs
{
51 void ThreadContext::OnCreated(void *arg
) {
55 OnCreatedArgs
*args
= static_cast<OnCreatedArgs
*>(arg
);
56 args
->thr
->fast_state
.IncrementEpoch();
57 // Can't increment epoch w/o writing to the trace as well.
58 TraceAddEvent(args
->thr
, args
->thr
->fast_state
, EventTypeMop
, 0);
59 ReleaseImpl(args
->thr
, 0, &sync
);
60 creation_stack_id
= CurrentStackId(args
->thr
, args
->pc
);
62 StatInc(args
->thr
, StatThreadMaxTid
);
65 void ThreadContext::OnReset() {
66 CHECK_EQ(sync
.size(), 0);
67 FlushUnneededShadowMemory(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
68 //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
71 void ThreadContext::OnDetached(void *arg
) {
72 ThreadState
*thr1
= static_cast<ThreadState
*>(arg
);
73 sync
.Reset(&thr1
->clock_cache
);
76 struct OnStartedArgs
{
84 void ThreadContext::OnStarted(void *arg
) {
85 OnStartedArgs
*args
= static_cast<OnStartedArgs
*>(arg
);
87 // RoundUp so that one trace part does not contain events
88 // from different threads.
89 epoch0
= RoundUp(epoch1
+ 1, kTracePartSize
);
91 new(thr
) ThreadState(ctx
, tid
, unique_id
, epoch0
, reuse_count
,
92 args
->stk_addr
, args
->stk_size
, args
->tls_addr
, args
->tls_size
);
94 thr
->shadow_stack
= &ThreadTrace(thr
->tid
)->shadow_stack
[0];
95 thr
->shadow_stack_pos
= thr
->shadow_stack
;
96 thr
->shadow_stack_end
= thr
->shadow_stack
+ kShadowStackSize
;
98 // Setup dynamic shadow stack.
99 const int kInitStackSize
= 8;
100 thr
->shadow_stack
= (uptr
*)internal_alloc(MBlockShadowStack
,
101 kInitStackSize
* sizeof(uptr
));
102 thr
->shadow_stack_pos
= thr
->shadow_stack
;
103 thr
->shadow_stack_end
= thr
->shadow_stack
+ kInitStackSize
;
106 AllocatorThreadStart(thr
);
108 if (common_flags()->detect_deadlocks
) {
109 thr
->dd_pt
= ctx
->dd
->CreatePhysicalThread();
110 thr
->dd_lt
= ctx
->dd
->CreateLogicalThread(unique_id
);
112 thr
->fast_state
.SetHistorySize(flags()->history_size
);
113 // Commit switch to the new part of the trace.
114 // TraceAddEvent will reset stack0/mset0 in the new part for us.
115 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
117 thr
->fast_synch_epoch
= epoch0
;
118 AcquireImpl(thr
, 0, &sync
);
119 StatInc(thr
, StatSyncAcquire
);
120 sync
.Reset(&thr
->clock_cache
);
121 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
122 "tls_addr=%zx tls_size=%zx\n",
123 tid
, (uptr
)epoch0
, args
->stk_addr
, args
->stk_size
,
124 args
->tls_addr
, args
->tls_size
);
127 void ThreadContext::OnFinished() {
129 thr
->fast_state
.IncrementEpoch();
130 // Can't increment epoch w/o writing to the trace as well.
131 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
132 ReleaseImpl(thr
, 0, &sync
);
134 epoch1
= thr
->fast_state
.epoch();
136 if (common_flags()->detect_deadlocks
) {
137 ctx
->dd
->DestroyPhysicalThread(thr
->dd_pt
);
138 ctx
->dd
->DestroyLogicalThread(thr
->dd_lt
);
140 ctx
->clock_alloc
.FlushCache(&thr
->clock_cache
);
141 ctx
->metamap
.OnThreadIdle(thr
);
143 AllocatorThreadFinish(thr
);
146 StatAggregate(ctx
->stat
, thr
->stat
);
156 static void MaybeReportThreadLeak(ThreadContextBase
*tctx_base
, void *arg
) {
157 Vector
<ThreadLeak
> &leaks
= *(Vector
<ThreadLeak
>*)arg
;
158 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
159 if (tctx
->detached
|| tctx
->status
!= ThreadStatusFinished
)
161 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
162 if (leaks
[i
].tctx
->creation_stack_id
== tctx
->creation_stack_id
) {
167 ThreadLeak leak
= {tctx
, 1};
168 leaks
.PushBack(leak
);
173 static void ReportIgnoresEnabled(ThreadContext
*tctx
, IgnoreSet
*set
) {
174 if (tctx
->tid
== 0) {
175 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
177 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
178 " created at:\n", tctx
->tid
, tctx
->name
);
179 PrintStack(SymbolizeStackId(tctx
->creation_stack_id
));
181 Printf(" One of the following ignores was not ended"
182 " (in order of probability)\n");
183 for (uptr i
= 0; i
< set
->Size(); i
++) {
184 Printf(" Ignore was enabled at:\n");
185 PrintStack(SymbolizeStackId(set
->At(i
)));
190 static void ThreadCheckIgnore(ThreadState
*thr
) {
191 if (ctx
->after_multithreaded_fork
)
193 if (thr
->ignore_reads_and_writes
)
194 ReportIgnoresEnabled(thr
->tctx
, &thr
->mop_ignore_set
);
195 if (thr
->ignore_sync
)
196 ReportIgnoresEnabled(thr
->tctx
, &thr
->sync_ignore_set
);
199 static void ThreadCheckIgnore(ThreadState
*thr
) {}
202 void ThreadFinalize(ThreadState
*thr
) {
203 ThreadCheckIgnore(thr
);
205 if (!flags()->report_thread_leaks
)
207 ThreadRegistryLock
l(ctx
->thread_registry
);
208 Vector
<ThreadLeak
> leaks(MBlockScopedBuf
);
209 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
210 MaybeReportThreadLeak
, &leaks
);
211 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
212 ScopedReport
rep(ReportTypeThreadLeak
);
213 rep
.AddThread(leaks
[i
].tctx
, true);
214 rep
.SetCount(leaks
[i
].count
);
215 OutputReport(thr
, rep
);
220 int ThreadCount(ThreadState
*thr
) {
222 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &result
);
226 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
) {
227 StatInc(thr
, StatThreadCreate
);
228 OnCreatedArgs args
= { thr
, pc
};
229 int tid
= ctx
->thread_registry
->CreateThread(uid
, detached
, thr
->tid
, &args
);
230 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr
->tid
, tid
, uid
);
231 StatSet(thr
, StatThreadMaxAlive
, ctx
->thread_registry
->GetMaxAliveThreads());
235 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
) {
240 GetThreadStackAndTls(tid
== 0, &stk_addr
, &stk_size
, &tls_addr
, &tls_size
);
243 if (stk_addr
&& stk_size
)
244 MemoryRangeImitateWrite(thr
, /*pc=*/ 1, stk_addr
, stk_size
);
246 if (tls_addr
&& tls_size
) {
247 // Check that the thr object is in tls;
248 const uptr thr_beg
= (uptr
)thr
;
249 const uptr thr_end
= (uptr
)thr
+ sizeof(*thr
);
250 CHECK_GE(thr_beg
, tls_addr
);
251 CHECK_LE(thr_beg
, tls_addr
+ tls_size
);
252 CHECK_GE(thr_end
, tls_addr
);
253 CHECK_LE(thr_end
, tls_addr
+ tls_size
);
254 // Since the thr object is huge, skip it.
255 MemoryRangeImitateWrite(thr
, /*pc=*/ 2, tls_addr
, thr_beg
- tls_addr
);
256 MemoryRangeImitateWrite(thr
, /*pc=*/ 2,
257 thr_end
, tls_addr
+ tls_size
- thr_end
);
261 ThreadRegistry
*tr
= ctx
->thread_registry
;
262 OnStartedArgs args
= { thr
, stk_addr
, stk_size
, tls_addr
, tls_size
};
263 tr
->StartThread(tid
, os_id
, &args
);
266 thr
->tctx
= (ThreadContext
*)tr
->GetThreadLocked(tid
);
270 if (ctx
->after_multithreaded_fork
) {
271 thr
->ignore_interceptors
++;
272 ThreadIgnoreBegin(thr
, 0);
273 ThreadIgnoreSyncBegin(thr
, 0);
278 void ThreadFinish(ThreadState
*thr
) {
279 ThreadCheckIgnore(thr
);
280 StatInc(thr
, StatThreadFinish
);
281 if (thr
->stk_addr
&& thr
->stk_size
)
282 DontNeedShadowFor(thr
->stk_addr
, thr
->stk_size
);
283 if (thr
->tls_addr
&& thr
->tls_size
)
284 DontNeedShadowFor(thr
->tls_addr
, thr
->tls_size
);
286 ctx
->thread_registry
->FinishThread(thr
->tid
);
289 static bool FindThreadByUid(ThreadContextBase
*tctx
, void *arg
) {
290 uptr uid
= (uptr
)arg
;
291 if (tctx
->user_id
== uid
&& tctx
->status
!= ThreadStatusInvalid
) {
298 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
) {
299 int res
= ctx
->thread_registry
->FindThread(FindThreadByUid
, (void*)uid
);
300 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr
->tid
, uid
, res
);
304 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
) {
306 CHECK_LT(tid
, kMaxTid
);
307 DPrintf("#%d: ThreadJoin tid=%d\n", thr
->tid
, tid
);
308 ctx
->thread_registry
->JoinThread(tid
, thr
);
311 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
) {
313 CHECK_LT(tid
, kMaxTid
);
314 ctx
->thread_registry
->DetachThread(tid
, thr
);
317 void ThreadSetName(ThreadState
*thr
, const char *name
) {
318 ctx
->thread_registry
->SetThreadName(thr
->tid
, name
);
321 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
322 uptr size
, bool is_write
) {
326 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
327 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
328 thr
->tid
, (void*)pc
, (void*)addr
,
329 (int)size
, is_write
);
332 if (!IsAppMem(addr
)) {
333 Printf("Access to non app mem %zx\n", addr
);
334 DCHECK(IsAppMem(addr
));
336 if (!IsAppMem(addr
+ size
- 1)) {
337 Printf("Access to non app mem %zx\n", addr
+ size
- 1);
338 DCHECK(IsAppMem(addr
+ size
- 1));
340 if (!IsShadowMem((uptr
)shadow_mem
)) {
341 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
342 DCHECK(IsShadowMem((uptr
)shadow_mem
));
344 if (!IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1))) {
345 Printf("Bad shadow addr %p (%zx)\n",
346 shadow_mem
+ size
* kShadowCnt
/ 8 - 1, addr
+ size
- 1);
347 DCHECK(IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1)));
351 StatInc(thr
, StatMopRange
);
353 if (*shadow_mem
== kShadowRodata
) {
354 // Access to .rodata section, no races here.
355 // Measurements show that it can be 10-20% of all memory accesses.
356 StatInc(thr
, StatMopRangeRodata
);
360 FastState fast_state
= thr
->fast_state
;
361 if (fast_state
.GetIgnoreBit())
364 fast_state
.IncrementEpoch();
365 thr
->fast_state
= fast_state
;
366 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
368 bool unaligned
= (addr
% kShadowCell
) != 0;
370 // Handle unaligned beginning, if any.
371 for (; addr
% kShadowCell
&& size
; addr
++, size
--) {
372 int const kAccessSizeLog
= 0;
373 Shadow
cur(fast_state
);
374 cur
.SetWrite(is_write
);
375 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
376 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
380 shadow_mem
+= kShadowCnt
;
381 // Handle middle part, if any.
382 for (; size
>= kShadowCell
; addr
+= kShadowCell
, size
-= kShadowCell
) {
383 int const kAccessSizeLog
= 3;
384 Shadow
cur(fast_state
);
385 cur
.SetWrite(is_write
);
386 cur
.SetAddr0AndSizeLog(0, kAccessSizeLog
);
387 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
389 shadow_mem
+= kShadowCnt
;
391 // Handle ending, if any.
392 for (; size
; addr
++, size
--) {
393 int const kAccessSizeLog
= 0;
394 Shadow
cur(fast_state
);
395 cur
.SetWrite(is_write
);
396 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
397 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
402 } // namespace __tsan