1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
21 // ThreadContext implementation.
23 ThreadContext::ThreadContext(int tid
)
24 : ThreadContextBase(tid
)
32 ThreadContext::~ThreadContext() {
36 void ThreadContext::OnDead() {
37 CHECK_EQ(sync
.size(), 0);
40 void ThreadContext::OnJoined(void *arg
) {
41 ThreadState
*caller_thr
= static_cast<ThreadState
*>(arg
);
42 AcquireImpl(caller_thr
, 0, &sync
);
43 sync
.Reset(&caller_thr
->proc()->clock_cache
);
46 struct OnCreatedArgs
{
51 void ThreadContext::OnCreated(void *arg
) {
55 OnCreatedArgs
*args
= static_cast<OnCreatedArgs
*>(arg
);
56 if (!args
->thr
) // GCD workers don't have a parent thread.
58 args
->thr
->fast_state
.IncrementEpoch();
59 // Can't increment epoch w/o writing to the trace as well.
60 TraceAddEvent(args
->thr
, args
->thr
->fast_state
, EventTypeMop
, 0);
61 ReleaseImpl(args
->thr
, 0, &sync
);
62 creation_stack_id
= CurrentStackId(args
->thr
, args
->pc
);
64 StatInc(args
->thr
, StatThreadMaxTid
);
67 void ThreadContext::OnReset() {
68 CHECK_EQ(sync
.size(), 0);
69 uptr trace_p
= GetThreadTrace(tid
);
70 ReleaseMemoryPagesToOS(trace_p
, trace_p
+ TraceSize() * sizeof(Event
));
71 //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
74 void ThreadContext::OnDetached(void *arg
) {
75 ThreadState
*thr1
= static_cast<ThreadState
*>(arg
);
76 sync
.Reset(&thr1
->proc()->clock_cache
);
79 struct OnStartedArgs
{
87 void ThreadContext::OnStarted(void *arg
) {
88 OnStartedArgs
*args
= static_cast<OnStartedArgs
*>(arg
);
90 // RoundUp so that one trace part does not contain events
91 // from different threads.
92 epoch0
= RoundUp(epoch1
+ 1, kTracePartSize
);
94 new(thr
) ThreadState(ctx
, tid
, unique_id
, epoch0
, reuse_count
,
95 args
->stk_addr
, args
->stk_size
, args
->tls_addr
, args
->tls_size
);
97 thr
->shadow_stack
= &ThreadTrace(thr
->tid
)->shadow_stack
[0];
98 thr
->shadow_stack_pos
= thr
->shadow_stack
;
99 thr
->shadow_stack_end
= thr
->shadow_stack
+ kShadowStackSize
;
101 // Setup dynamic shadow stack.
102 const int kInitStackSize
= 8;
103 thr
->shadow_stack
= (uptr
*)internal_alloc(MBlockShadowStack
,
104 kInitStackSize
* sizeof(uptr
));
105 thr
->shadow_stack_pos
= thr
->shadow_stack
;
106 thr
->shadow_stack_end
= thr
->shadow_stack
+ kInitStackSize
;
108 if (common_flags()->detect_deadlocks
)
109 thr
->dd_lt
= ctx
->dd
->CreateLogicalThread(unique_id
);
110 thr
->fast_state
.SetHistorySize(flags()->history_size
);
111 // Commit switch to the new part of the trace.
112 // TraceAddEvent will reset stack0/mset0 in the new part for us.
113 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
115 thr
->fast_synch_epoch
= epoch0
;
116 AcquireImpl(thr
, 0, &sync
);
117 StatInc(thr
, StatSyncAcquire
);
118 sync
.Reset(&thr
->proc()->clock_cache
);
119 thr
->is_inited
= true;
120 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
121 "tls_addr=%zx tls_size=%zx\n",
122 tid
, (uptr
)epoch0
, args
->stk_addr
, args
->stk_size
,
123 args
->tls_addr
, args
->tls_size
);
126 void ThreadContext::OnFinished() {
128 internal_free(thr
->shadow_stack
);
129 thr
->shadow_stack
= nullptr;
130 thr
->shadow_stack_pos
= nullptr;
131 thr
->shadow_stack_end
= nullptr;
134 thr
->fast_state
.IncrementEpoch();
135 // Can't increment epoch w/o writing to the trace as well.
136 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
137 ReleaseImpl(thr
, 0, &sync
);
139 epoch1
= thr
->fast_state
.epoch();
141 if (common_flags()->detect_deadlocks
)
142 ctx
->dd
->DestroyLogicalThread(thr
->dd_lt
);
143 thr
->clock
.ResetCached(&thr
->proc()->clock_cache
);
145 thr
->last_sleep_clock
.ResetCached(&thr
->proc()->clock_cache
);
148 #if TSAN_COLLECT_STATS
149 StatAggregate(ctx
->stat
, thr
->stat
);
160 static void MaybeReportThreadLeak(ThreadContextBase
*tctx_base
, void *arg
) {
161 Vector
<ThreadLeak
> &leaks
= *(Vector
<ThreadLeak
>*)arg
;
162 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
163 if (tctx
->detached
|| tctx
->status
!= ThreadStatusFinished
)
165 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
166 if (leaks
[i
].tctx
->creation_stack_id
== tctx
->creation_stack_id
) {
171 ThreadLeak leak
= {tctx
, 1};
172 leaks
.PushBack(leak
);
177 static void ReportIgnoresEnabled(ThreadContext
*tctx
, IgnoreSet
*set
) {
178 if (tctx
->tid
== 0) {
179 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
181 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
182 " created at:\n", tctx
->tid
, tctx
->name
);
183 PrintStack(SymbolizeStackId(tctx
->creation_stack_id
));
185 Printf(" One of the following ignores was not ended"
186 " (in order of probability)\n");
187 for (uptr i
= 0; i
< set
->Size(); i
++) {
188 Printf(" Ignore was enabled at:\n");
189 PrintStack(SymbolizeStackId(set
->At(i
)));
194 static void ThreadCheckIgnore(ThreadState
*thr
) {
195 if (ctx
->after_multithreaded_fork
)
197 if (thr
->ignore_reads_and_writes
)
198 ReportIgnoresEnabled(thr
->tctx
, &thr
->mop_ignore_set
);
199 if (thr
->ignore_sync
)
200 ReportIgnoresEnabled(thr
->tctx
, &thr
->sync_ignore_set
);
203 static void ThreadCheckIgnore(ThreadState
*thr
) {}
206 void ThreadFinalize(ThreadState
*thr
) {
207 ThreadCheckIgnore(thr
);
209 if (!flags()->report_thread_leaks
)
211 ThreadRegistryLock
l(ctx
->thread_registry
);
212 Vector
<ThreadLeak
> leaks(MBlockScopedBuf
);
213 ctx
->thread_registry
->RunCallbackForEachThreadLocked(
214 MaybeReportThreadLeak
, &leaks
);
215 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
216 ScopedReport
rep(ReportTypeThreadLeak
);
217 rep
.AddThread(leaks
[i
].tctx
, true);
218 rep
.SetCount(leaks
[i
].count
);
219 OutputReport(thr
, rep
);
224 int ThreadCount(ThreadState
*thr
) {
226 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &result
);
230 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
) {
231 StatInc(thr
, StatThreadCreate
);
232 OnCreatedArgs args
= { thr
, pc
};
233 u32 parent_tid
= thr
? thr
->tid
: kInvalidTid
; // No parent for GCD workers.
235 ctx
->thread_registry
->CreateThread(uid
, detached
, parent_tid
, &args
);
236 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid
, tid
, uid
);
237 StatSet(thr
, StatThreadMaxAlive
, ctx
->thread_registry
->GetMaxAliveThreads());
241 void ThreadStart(ThreadState
*thr
, int tid
, tid_t os_id
, bool workerthread
) {
247 GetThreadStackAndTls(tid
== 0, &stk_addr
, &stk_size
, &tls_addr
, &tls_size
);
250 if (stk_addr
&& stk_size
)
251 MemoryRangeImitateWrite(thr
, /*pc=*/ 1, stk_addr
, stk_size
);
253 if (tls_addr
&& tls_size
) ImitateTlsWrite(thr
, tls_addr
, tls_size
);
257 ThreadRegistry
*tr
= ctx
->thread_registry
;
258 OnStartedArgs args
= { thr
, stk_addr
, stk_size
, tls_addr
, tls_size
};
259 tr
->StartThread(tid
, os_id
, workerthread
, &args
);
262 thr
->tctx
= (ThreadContext
*)tr
->GetThreadLocked(tid
);
266 if (ctx
->after_multithreaded_fork
) {
267 thr
->ignore_interceptors
++;
268 ThreadIgnoreBegin(thr
, 0);
269 ThreadIgnoreSyncBegin(thr
, 0);
274 void ThreadFinish(ThreadState
*thr
) {
275 ThreadCheckIgnore(thr
);
276 StatInc(thr
, StatThreadFinish
);
277 if (thr
->stk_addr
&& thr
->stk_size
)
278 DontNeedShadowFor(thr
->stk_addr
, thr
->stk_size
);
279 if (thr
->tls_addr
&& thr
->tls_size
)
280 DontNeedShadowFor(thr
->tls_addr
, thr
->tls_size
);
282 ctx
->thread_registry
->FinishThread(thr
->tid
);
285 static bool FindThreadByUid(ThreadContextBase
*tctx
, void *arg
) {
286 uptr uid
= (uptr
)arg
;
287 if (tctx
->user_id
== uid
&& tctx
->status
!= ThreadStatusInvalid
) {
294 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
) {
295 int res
= ctx
->thread_registry
->FindThread(FindThreadByUid
, (void*)uid
);
296 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr
->tid
, uid
, res
);
300 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
) {
302 CHECK_LT(tid
, kMaxTid
);
303 DPrintf("#%d: ThreadJoin tid=%d\n", thr
->tid
, tid
);
304 ctx
->thread_registry
->JoinThread(tid
, thr
);
307 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
) {
309 CHECK_LT(tid
, kMaxTid
);
310 ctx
->thread_registry
->DetachThread(tid
, thr
);
313 void ThreadSetName(ThreadState
*thr
, const char *name
) {
314 ctx
->thread_registry
->SetThreadName(thr
->tid
, name
);
317 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
318 uptr size
, bool is_write
) {
322 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
323 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
324 thr
->tid
, (void*)pc
, (void*)addr
,
325 (int)size
, is_write
);
328 if (!IsAppMem(addr
)) {
329 Printf("Access to non app mem %zx\n", addr
);
330 DCHECK(IsAppMem(addr
));
332 if (!IsAppMem(addr
+ size
- 1)) {
333 Printf("Access to non app mem %zx\n", addr
+ size
- 1);
334 DCHECK(IsAppMem(addr
+ size
- 1));
336 if (!IsShadowMem((uptr
)shadow_mem
)) {
337 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
338 DCHECK(IsShadowMem((uptr
)shadow_mem
));
340 if (!IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1))) {
341 Printf("Bad shadow addr %p (%zx)\n",
342 shadow_mem
+ size
* kShadowCnt
/ 8 - 1, addr
+ size
- 1);
343 DCHECK(IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1)));
347 StatInc(thr
, StatMopRange
);
349 if (*shadow_mem
== kShadowRodata
) {
351 // Access to .rodata section, no races here.
352 // Measurements show that it can be 10-20% of all memory accesses.
353 StatInc(thr
, StatMopRangeRodata
);
357 FastState fast_state
= thr
->fast_state
;
358 if (fast_state
.GetIgnoreBit())
361 fast_state
.IncrementEpoch();
362 thr
->fast_state
= fast_state
;
363 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
365 bool unaligned
= (addr
% kShadowCell
) != 0;
367 // Handle unaligned beginning, if any.
368 for (; addr
% kShadowCell
&& size
; addr
++, size
--) {
369 int const kAccessSizeLog
= 0;
370 Shadow
cur(fast_state
);
371 cur
.SetWrite(is_write
);
372 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
373 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
377 shadow_mem
+= kShadowCnt
;
378 // Handle middle part, if any.
379 for (; size
>= kShadowCell
; addr
+= kShadowCell
, size
-= kShadowCell
) {
380 int const kAccessSizeLog
= 3;
381 Shadow
cur(fast_state
);
382 cur
.SetWrite(is_write
);
383 cur
.SetAddr0AndSizeLog(0, kAccessSizeLog
);
384 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
386 shadow_mem
+= kShadowCnt
;
388 // Handle ending, if any.
389 for (; size
; addr
++, size
--) {
390 int const kAccessSizeLog
= 0;
391 Shadow
cur(fast_state
);
392 cur
.SetWrite(is_write
);
393 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
394 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
399 } // namespace __tsan