1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
21 // ThreadContext implementation.
23 ThreadContext::ThreadContext(int tid
)
24 : ThreadContextBase(tid
)
32 ThreadContext::~ThreadContext() {
36 void ThreadContext::OnDead() {
40 void ThreadContext::OnJoined(void *arg
) {
41 ThreadState
*caller_thr
= static_cast<ThreadState
*>(arg
);
42 AcquireImpl(caller_thr
, 0, &sync
);
46 struct OnCreatedArgs
{
51 void ThreadContext::OnCreated(void *arg
) {
55 OnCreatedArgs
*args
= static_cast<OnCreatedArgs
*>(arg
);
56 args
->thr
->fast_state
.IncrementEpoch();
57 // Can't increment epoch w/o writing to the trace as well.
58 TraceAddEvent(args
->thr
, args
->thr
->fast_state
, EventTypeMop
, 0);
59 ReleaseImpl(args
->thr
, 0, &sync
);
61 creation_stack
.ObtainCurrent(args
->thr
, args
->pc
);
63 creation_stack_id
= CurrentStackId(args
->thr
, args
->pc
);
66 StatInc(args
->thr
, StatThreadMaxTid
);
69 void ThreadContext::OnReset() {
71 FlushUnneededShadowMemory(GetThreadTrace(tid
), TraceSize() * sizeof(Event
));
72 //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
75 struct OnStartedArgs
{
83 void ThreadContext::OnStarted(void *arg
) {
84 OnStartedArgs
*args
= static_cast<OnStartedArgs
*>(arg
);
86 // RoundUp so that one trace part does not contain events
87 // from different threads.
88 epoch0
= RoundUp(epoch1
+ 1, kTracePartSize
);
90 new(thr
) ThreadState(CTX(), tid
, unique_id
,
91 epoch0
, args
->stk_addr
, args
->stk_size
, args
->tls_addr
, args
->tls_size
);
93 thr
->shadow_stack
= &ThreadTrace(thr
->tid
)->shadow_stack
[0];
94 thr
->shadow_stack_pos
= thr
->shadow_stack
;
95 thr
->shadow_stack_end
= thr
->shadow_stack
+ kShadowStackSize
;
97 // Setup dynamic shadow stack.
98 const int kInitStackSize
= 8;
99 thr
->shadow_stack
= (uptr
*)internal_alloc(MBlockShadowStack
,
100 kInitStackSize
* sizeof(uptr
));
101 thr
->shadow_stack_pos
= thr
->shadow_stack
;
102 thr
->shadow_stack_end
= thr
->shadow_stack
+ kInitStackSize
;
105 AllocatorThreadStart(thr
);
107 thr
->fast_synch_epoch
= epoch0
;
108 AcquireImpl(thr
, 0, &sync
);
109 thr
->fast_state
.SetHistorySize(flags()->history_size
);
110 const uptr trace
= (epoch0
/ kTracePartSize
) % TraceParts();
111 Trace
*thr_trace
= ThreadTrace(thr
->tid
);
112 thr_trace
->headers
[trace
].epoch0
= epoch0
;
113 StatInc(thr
, StatSyncAcquire
);
115 DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
116 "tls_addr=%zx tls_size=%zx\n",
117 tid
, (uptr
)epoch0
, args
->stk_addr
, args
->stk_size
,
118 args
->tls_addr
, args
->tls_size
);
119 thr
->is_alive
= true;
122 void ThreadContext::OnFinished() {
124 thr
->fast_state
.IncrementEpoch();
125 // Can't increment epoch w/o writing to the trace as well.
126 TraceAddEvent(thr
, thr
->fast_state
, EventTypeMop
, 0);
127 ReleaseImpl(thr
, 0, &sync
);
129 epoch1
= thr
->fast_state
.epoch();
132 AllocatorThreadFinish(thr
);
135 StatAggregate(CTX()->stat
, thr
->stat
);
145 static void MaybeReportThreadLeak(ThreadContextBase
*tctx_base
, void *arg
) {
146 Vector
<ThreadLeak
> &leaks
= *(Vector
<ThreadLeak
>*)arg
;
147 ThreadContext
*tctx
= static_cast<ThreadContext
*>(tctx_base
);
148 if (tctx
->detached
|| tctx
->status
!= ThreadStatusFinished
)
150 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
151 if (leaks
[i
].tctx
->creation_stack_id
== tctx
->creation_stack_id
) {
156 ThreadLeak leak
= {tctx
, 1};
157 leaks
.PushBack(leak
);
162 static void ReportIgnoresEnabled(ThreadContext
*tctx
, IgnoreSet
*set
) {
163 if (tctx
->tid
== 0) {
164 Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
166 Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
167 " created at:\n", tctx
->tid
, tctx
->name
);
168 PrintStack(SymbolizeStackId(tctx
->creation_stack_id
));
170 Printf(" One of the following ignores was not ended"
171 " (in order of probability)\n");
172 for (uptr i
= 0; i
< set
->Size(); i
++) {
173 Printf(" Ignore was enabled at:\n");
174 PrintStack(SymbolizeStackId(set
->At(i
)));
179 static void ThreadCheckIgnore(ThreadState
*thr
) {
180 if (thr
->ignore_reads_and_writes
)
181 ReportIgnoresEnabled(thr
->tctx
, &thr
->mop_ignore_set
);
182 if (thr
->ignore_sync
)
183 ReportIgnoresEnabled(thr
->tctx
, &thr
->sync_ignore_set
);
186 static void ThreadCheckIgnore(ThreadState
*thr
) {}
189 void ThreadFinalize(ThreadState
*thr
) {
190 CHECK_GT(thr
->in_rtl
, 0);
191 ThreadCheckIgnore(thr
);
193 if (!flags()->report_thread_leaks
)
195 ThreadRegistryLock
l(CTX()->thread_registry
);
196 Vector
<ThreadLeak
> leaks(MBlockScopedBuf
);
197 CTX()->thread_registry
->RunCallbackForEachThreadLocked(
198 MaybeReportThreadLeak
, &leaks
);
199 for (uptr i
= 0; i
< leaks
.Size(); i
++) {
200 ScopedReport
rep(ReportTypeThreadLeak
);
201 rep
.AddThread(leaks
[i
].tctx
);
202 rep
.SetCount(leaks
[i
].count
);
203 OutputReport(CTX(), rep
);
208 int ThreadCount(ThreadState
*thr
) {
209 CHECK_GT(thr
->in_rtl
, 0);
210 Context
*ctx
= CTX();
212 ctx
->thread_registry
->GetNumberOfThreads(0, 0, &result
);
216 int ThreadCreate(ThreadState
*thr
, uptr pc
, uptr uid
, bool detached
) {
217 CHECK_GT(thr
->in_rtl
, 0);
218 StatInc(thr
, StatThreadCreate
);
219 Context
*ctx
= CTX();
220 OnCreatedArgs args
= { thr
, pc
};
221 int tid
= ctx
->thread_registry
->CreateThread(uid
, detached
, thr
->tid
, &args
);
222 DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr
->tid
, tid
, uid
);
223 StatSet(thr
, StatThreadMaxAlive
, ctx
->thread_registry
->GetMaxAliveThreads());
227 void ThreadStart(ThreadState
*thr
, int tid
, uptr os_id
) {
228 Context
*ctx
= CTX();
229 CHECK_GT(thr
->in_rtl
, 0);
234 GetThreadStackAndTls(tid
== 0, &stk_addr
, &stk_size
, &tls_addr
, &tls_size
);
237 if (stk_addr
&& stk_size
)
238 MemoryRangeImitateWrite(thr
, /*pc=*/ 1, stk_addr
, stk_size
);
240 if (tls_addr
&& tls_size
) {
241 // Check that the thr object is in tls;
242 const uptr thr_beg
= (uptr
)thr
;
243 const uptr thr_end
= (uptr
)thr
+ sizeof(*thr
);
244 CHECK_GE(thr_beg
, tls_addr
);
245 CHECK_LE(thr_beg
, tls_addr
+ tls_size
);
246 CHECK_GE(thr_end
, tls_addr
);
247 CHECK_LE(thr_end
, tls_addr
+ tls_size
);
248 // Since the thr object is huge, skip it.
249 MemoryRangeImitateWrite(thr
, /*pc=*/ 2, tls_addr
, thr_beg
- tls_addr
);
250 MemoryRangeImitateWrite(thr
, /*pc=*/ 2,
251 thr_end
, tls_addr
+ tls_size
- thr_end
);
255 ThreadRegistry
*tr
= ctx
->thread_registry
;
256 OnStartedArgs args
= { thr
, stk_addr
, stk_size
, tls_addr
, tls_size
};
257 tr
->StartThread(tid
, os_id
, &args
);
260 thr
->tctx
= (ThreadContext
*)tr
->GetThreadLocked(tid
);
264 void ThreadFinish(ThreadState
*thr
) {
265 CHECK_GT(thr
->in_rtl
, 0);
266 ThreadCheckIgnore(thr
);
267 StatInc(thr
, StatThreadFinish
);
268 if (thr
->stk_addr
&& thr
->stk_size
)
269 DontNeedShadowFor(thr
->stk_addr
, thr
->stk_size
);
270 if (thr
->tls_addr
&& thr
->tls_size
)
271 DontNeedShadowFor(thr
->tls_addr
, thr
->tls_size
);
272 thr
->is_alive
= false;
273 Context
*ctx
= CTX();
274 ctx
->thread_registry
->FinishThread(thr
->tid
);
277 static bool FindThreadByUid(ThreadContextBase
*tctx
, void *arg
) {
278 uptr uid
= (uptr
)arg
;
279 if (tctx
->user_id
== uid
&& tctx
->status
!= ThreadStatusInvalid
) {
286 int ThreadTid(ThreadState
*thr
, uptr pc
, uptr uid
) {
287 CHECK_GT(thr
->in_rtl
, 0);
288 Context
*ctx
= CTX();
289 int res
= ctx
->thread_registry
->FindThread(FindThreadByUid
, (void*)uid
);
290 DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr
->tid
, uid
, res
);
294 void ThreadJoin(ThreadState
*thr
, uptr pc
, int tid
) {
295 CHECK_GT(thr
->in_rtl
, 0);
297 CHECK_LT(tid
, kMaxTid
);
298 DPrintf("#%d: ThreadJoin tid=%d\n", thr
->tid
, tid
);
299 Context
*ctx
= CTX();
300 ctx
->thread_registry
->JoinThread(tid
, thr
);
303 void ThreadDetach(ThreadState
*thr
, uptr pc
, int tid
) {
304 CHECK_GT(thr
->in_rtl
, 0);
306 CHECK_LT(tid
, kMaxTid
);
307 Context
*ctx
= CTX();
308 ctx
->thread_registry
->DetachThread(tid
);
311 void ThreadSetName(ThreadState
*thr
, const char *name
) {
312 CHECK_GT(thr
->in_rtl
, 0);
313 CTX()->thread_registry
->SetThreadName(thr
->tid
, name
);
316 void MemoryAccessRange(ThreadState
*thr
, uptr pc
, uptr addr
,
317 uptr size
, bool is_write
) {
321 u64
*shadow_mem
= (u64
*)MemToShadow(addr
);
322 DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
323 thr
->tid
, (void*)pc
, (void*)addr
,
324 (int)size
, is_write
);
327 if (!IsAppMem(addr
)) {
328 Printf("Access to non app mem %zx\n", addr
);
329 DCHECK(IsAppMem(addr
));
331 if (!IsAppMem(addr
+ size
- 1)) {
332 Printf("Access to non app mem %zx\n", addr
+ size
- 1);
333 DCHECK(IsAppMem(addr
+ size
- 1));
335 if (!IsShadowMem((uptr
)shadow_mem
)) {
336 Printf("Bad shadow addr %p (%zx)\n", shadow_mem
, addr
);
337 DCHECK(IsShadowMem((uptr
)shadow_mem
));
339 if (!IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1))) {
340 Printf("Bad shadow addr %p (%zx)\n",
341 shadow_mem
+ size
* kShadowCnt
/ 8 - 1, addr
+ size
- 1);
342 DCHECK(IsShadowMem((uptr
)(shadow_mem
+ size
* kShadowCnt
/ 8 - 1)));
346 StatInc(thr
, StatMopRange
);
348 if (*shadow_mem
== kShadowRodata
) {
349 // Access to .rodata section, no races here.
350 // Measurements show that it can be 10-20% of all memory accesses.
351 StatInc(thr
, StatMopRangeRodata
);
355 FastState fast_state
= thr
->fast_state
;
356 if (fast_state
.GetIgnoreBit())
359 fast_state
.IncrementEpoch();
360 thr
->fast_state
= fast_state
;
361 TraceAddEvent(thr
, fast_state
, EventTypeMop
, pc
);
363 bool unaligned
= (addr
% kShadowCell
) != 0;
365 // Handle unaligned beginning, if any.
366 for (; addr
% kShadowCell
&& size
; addr
++, size
--) {
367 int const kAccessSizeLog
= 0;
368 Shadow
cur(fast_state
);
369 cur
.SetWrite(is_write
);
370 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
371 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
375 shadow_mem
+= kShadowCnt
;
376 // Handle middle part, if any.
377 for (; size
>= kShadowCell
; addr
+= kShadowCell
, size
-= kShadowCell
) {
378 int const kAccessSizeLog
= 3;
379 Shadow
cur(fast_state
);
380 cur
.SetWrite(is_write
);
381 cur
.SetAddr0AndSizeLog(0, kAccessSizeLog
);
382 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
384 shadow_mem
+= kShadowCnt
;
386 // Handle ending, if any.
387 for (; size
; addr
++, size
--) {
388 int const kAccessSizeLog
= 0;
389 Shadow
cur(fast_state
);
390 cur
.SetWrite(is_write
);
391 cur
.SetAddr0AndSizeLog(addr
& (kShadowCell
- 1), kAccessSizeLog
);
392 MemoryAccessImpl(thr
, addr
, kAccessSizeLog
, is_write
, false,
397 } // namespace __tsan