1 //===-- asan_thread.cc ----------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Thread-related code.
11 //===----------------------------------------------------------------------===//
12 #include "asan_allocator.h"
13 #include "asan_interceptors.h"
14 #include "asan_poisoning.h"
15 #include "asan_stack.h"
16 #include "asan_thread.h"
17 #include "asan_mapping.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_tls_get_addr.h"
22 #include "lsan/lsan_common.h"
26 // AsanThreadContext implementation.
28 void AsanThreadContext::OnCreated(void *arg
) {
29 CreateThreadContextArgs
*args
= static_cast<CreateThreadContextArgs
*>(arg
);
31 stack_id
= StackDepotPut(*args
->stack
);
32 thread
= args
->thread
;
33 thread
->set_context(this);
36 void AsanThreadContext::OnFinished() {
37 // Drop the link to the AsanThread object.
41 // MIPS requires aligned address
42 static ALIGNED(16) char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
43 static ThreadRegistry
*asan_thread_registry
;
45 static BlockingMutex
mu_for_thread_context(LINKER_INITIALIZED
);
46 static LowLevelAllocator allocator_for_thread_context
;
48 static ThreadContextBase
*GetAsanThreadContext(u32 tid
) {
49 BlockingMutexLock
lock(&mu_for_thread_context
);
50 return new(allocator_for_thread_context
) AsanThreadContext(tid
);
53 ThreadRegistry
&asanThreadRegistry() {
54 static bool initialized
;
55 // Don't worry about thread_safety - this should be called when there is
58 // Never reuse ASan threads: we store pointer to AsanThreadContext
59 // in TSD and can't reliably tell when no more TSD destructors will
60 // be called. It would be wrong to reuse AsanThreadContext for another
61 // thread before all TSD destructors will be called for it.
62 asan_thread_registry
= new(thread_registry_placeholder
) ThreadRegistry(
63 GetAsanThreadContext
, kMaxNumberOfThreads
, kMaxNumberOfThreads
);
66 return *asan_thread_registry
;
69 AsanThreadContext
*GetThreadContextByTidLocked(u32 tid
) {
70 return static_cast<AsanThreadContext
*>(
71 asanThreadRegistry().GetThreadLocked(tid
));
74 // AsanThread implementation.
76 AsanThread
*AsanThread::Create(thread_callback_t start_routine
, void *arg
,
77 u32 parent_tid
, StackTrace
*stack
,
79 uptr PageSize
= GetPageSizeCached();
80 uptr size
= RoundUpTo(sizeof(AsanThread
), PageSize
);
81 AsanThread
*thread
= (AsanThread
*)MmapOrDie(size
, __func__
);
82 thread
->start_routine_
= start_routine
;
84 AsanThreadContext::CreateThreadContextArgs args
= {thread
, stack
};
85 asanThreadRegistry().CreateThread(*reinterpret_cast<uptr
*>(thread
), detached
,
91 void AsanThread::TSDDtor(void *tsd
) {
92 AsanThreadContext
*context
= (AsanThreadContext
*)tsd
;
93 VReport(1, "T%d TSDDtor\n", context
->tid
);
95 context
->thread
->Destroy();
98 void AsanThread::Destroy() {
99 int tid
= this->tid();
100 VReport(1, "T%d exited\n", tid
);
102 malloc_storage().CommitBack();
103 if (common_flags()->use_sigaltstack
) UnsetAlternateSignalStack();
104 asanThreadRegistry().FinishThread(tid
);
105 FlushToDeadThreadStats(&stats_
);
106 // We also clear the shadow on thread destruction because
107 // some code may still be executing in later TSD destructors
108 // and we don't want it to have any poisoned stack.
109 ClearShadowForThreadStackAndTLS();
110 DeleteFakeStack(tid
);
111 uptr size
= RoundUpTo(sizeof(AsanThread
), GetPageSizeCached());
112 UnmapOrDie(this, size
);
116 void AsanThread::StartSwitchFiber(FakeStack
**fake_stack_save
, uptr bottom
,
118 if (atomic_load(&stack_switching_
, memory_order_relaxed
)) {
119 Report("ERROR: starting fiber switch while in fiber switch\n");
123 next_stack_bottom_
= bottom
;
124 next_stack_top_
= bottom
+ size
;
125 atomic_store(&stack_switching_
, 1, memory_order_release
);
127 FakeStack
*current_fake_stack
= fake_stack_
;
129 *fake_stack_save
= fake_stack_
;
130 fake_stack_
= nullptr;
131 SetTLSFakeStack(nullptr);
132 // if fake_stack_save is null, the fiber will die, delete the fakestack
133 if (!fake_stack_save
&& current_fake_stack
)
134 current_fake_stack
->Destroy(this->tid());
137 void AsanThread::FinishSwitchFiber(FakeStack
*fake_stack_save
,
140 if (!atomic_load(&stack_switching_
, memory_order_relaxed
)) {
141 Report("ERROR: finishing a fiber switch that has not started\n");
145 if (fake_stack_save
) {
146 SetTLSFakeStack(fake_stack_save
);
147 fake_stack_
= fake_stack_save
;
151 *bottom_old
= stack_bottom_
;
153 *size_old
= stack_top_
- stack_bottom_
;
154 stack_bottom_
= next_stack_bottom_
;
155 stack_top_
= next_stack_top_
;
156 atomic_store(&stack_switching_
, 0, memory_order_release
);
158 next_stack_bottom_
= 0;
161 inline AsanThread::StackBounds
AsanThread::GetStackBounds() const {
162 if (!atomic_load(&stack_switching_
, memory_order_acquire
)) {
163 // Make sure the stack bounds are fully initialized.
164 if (stack_bottom_
>= stack_top_
) return {0, 0};
165 return {stack_bottom_
, stack_top_
};
168 const uptr cur_stack
= (uptr
)&local
;
169 // Note: need to check next stack first, because FinishSwitchFiber
170 // may be in process of overwriting stack_top_/bottom_. But in such case
171 // we are already on the next stack.
172 if (cur_stack
>= next_stack_bottom_
&& cur_stack
< next_stack_top_
)
173 return {next_stack_bottom_
, next_stack_top_
};
174 return {stack_bottom_
, stack_top_
};
177 uptr
AsanThread::stack_top() {
178 return GetStackBounds().top
;
181 uptr
AsanThread::stack_bottom() {
182 return GetStackBounds().bottom
;
185 uptr
AsanThread::stack_size() {
186 const auto bounds
= GetStackBounds();
187 return bounds
.top
- bounds
.bottom
;
190 // We want to create the FakeStack lazyly on the first use, but not eralier
191 // than the stack size is known and the procedure has to be async-signal safe.
192 FakeStack
*AsanThread::AsyncSignalSafeLazyInitFakeStack() {
193 uptr stack_size
= this->stack_size();
194 if (stack_size
== 0) // stack_size is not yet available, don't use FakeStack.
197 // fake_stack_ has 3 states:
198 // 0 -- not initialized
199 // 1 -- being initialized
200 // ptr -- initialized
201 // This CAS checks if the state was 0 and if so changes it to state 1,
202 // if that was successful, it initializes the pointer.
203 if (atomic_compare_exchange_strong(
204 reinterpret_cast<atomic_uintptr_t
*>(&fake_stack_
), &old_val
, 1UL,
205 memory_order_relaxed
)) {
206 uptr stack_size_log
= Log2(RoundUpToPowerOfTwo(stack_size
));
207 CHECK_LE(flags()->min_uar_stack_size_log
, flags()->max_uar_stack_size_log
);
209 Min(stack_size_log
, static_cast<uptr
>(flags()->max_uar_stack_size_log
));
211 Max(stack_size_log
, static_cast<uptr
>(flags()->min_uar_stack_size_log
));
212 fake_stack_
= FakeStack::Create(stack_size_log
);
213 SetTLSFakeStack(fake_stack_
);
219 void AsanThread::Init(const InitOptions
*options
) {
220 next_stack_top_
= next_stack_bottom_
= 0;
221 atomic_store(&stack_switching_
, false, memory_order_release
);
222 CHECK_EQ(this->stack_size(), 0U);
223 SetThreadStackAndTls(options
);
224 CHECK_GT(this->stack_size(), 0U);
225 CHECK(AddrIsInMem(stack_bottom_
));
226 CHECK(AddrIsInMem(stack_top_
- 1));
227 ClearShadowForThreadStackAndTLS();
228 fake_stack_
= nullptr;
229 if (__asan_option_detect_stack_use_after_return
)
230 AsyncSignalSafeLazyInitFakeStack();
232 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
233 (void *)stack_bottom_
, (void *)stack_top_
, stack_top_
- stack_bottom_
,
237 // Fuchsia and RTEMS don't use ThreadStart.
238 // asan_fuchsia.c/asan_rtems.c define CreateMainThread and
239 // SetThreadStackAndTls.
240 #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
242 thread_return_t
AsanThread::ThreadStart(
243 tid_t os_id
, atomic_uintptr_t
*signal_thread_is_registered
) {
245 asanThreadRegistry().StartThread(tid(), os_id
, /*workerthread*/ false,
247 if (signal_thread_is_registered
)
248 atomic_store(signal_thread_is_registered
, 1, memory_order_release
);
250 if (common_flags()->use_sigaltstack
) SetAlternateSignalStack();
252 if (!start_routine_
) {
253 // start_routine_ == 0 if we're on the main thread or on one of the
254 // OS X libdispatch worker threads. But nobody is supposed to call
255 // ThreadStart() for the worker threads.
260 thread_return_t res
= start_routine_(arg_
);
262 // On POSIX systems we defer this to the TSD destructor. LSan will consider
263 // the thread's memory as non-live from the moment we call Destroy(), even
264 // though that memory might contain pointers to heap objects which will be
265 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
266 // the TSD destructors have run might cause false positives in LSan.
267 if (!SANITIZER_POSIX
)
273 AsanThread
*CreateMainThread() {
274 AsanThread
*main_thread
= AsanThread::Create(
275 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
276 /* stack */ nullptr, /* detached */ true);
277 SetCurrentThread(main_thread
);
278 main_thread
->ThreadStart(internal_getpid(),
279 /* signal_thread_is_registered */ nullptr);
283 // This implementation doesn't use the argument, which is just passed down
284 // from the caller of Init (which see, above). It's only there to support
285 // OS-specific implementations that need more information passed through.
286 void AsanThread::SetThreadStackAndTls(const InitOptions
*options
) {
287 DCHECK_EQ(options
, nullptr);
290 GetThreadStackAndTls(tid() == 0, const_cast<uptr
*>(&stack_bottom_
),
291 const_cast<uptr
*>(&stack_size
), &tls_begin_
, &tls_size
);
292 stack_top_
= stack_bottom_
+ stack_size
;
293 tls_end_
= tls_begin_
+ tls_size
;
297 CHECK(AddrIsInStack((uptr
)&local
));
300 #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
302 void AsanThread::ClearShadowForThreadStackAndTLS() {
303 PoisonShadow(stack_bottom_
, stack_top_
- stack_bottom_
, 0);
304 if (tls_begin_
!= tls_end_
) {
305 uptr tls_begin_aligned
= RoundDownTo(tls_begin_
, SHADOW_GRANULARITY
);
306 uptr tls_end_aligned
= RoundUpTo(tls_end_
, SHADOW_GRANULARITY
);
307 FastPoisonShadowPartialRightRedzone(tls_begin_aligned
,
308 tls_end_
- tls_begin_aligned
,
309 tls_end_aligned
- tls_end_
, 0);
313 bool AsanThread::GetStackFrameAccessByAddr(uptr addr
,
314 StackFrameAccess
*access
) {
316 if (AddrIsInStack(addr
)) {
317 bottom
= stack_bottom();
318 } else if (has_fake_stack()) {
319 bottom
= fake_stack()->AddrIsInFakeStack(addr
);
321 access
->offset
= addr
- bottom
;
322 access
->frame_pc
= ((uptr
*)bottom
)[2];
323 access
->frame_descr
= (const char *)((uptr
*)bottom
)[1];
326 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
327 uptr mem_ptr
= RoundDownTo(aligned_addr
, SHADOW_GRANULARITY
);
328 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
329 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
331 while (shadow_ptr
>= shadow_bottom
&&
332 *shadow_ptr
!= kAsanStackLeftRedzoneMagic
) {
334 mem_ptr
-= SHADOW_GRANULARITY
;
337 while (shadow_ptr
>= shadow_bottom
&&
338 *shadow_ptr
== kAsanStackLeftRedzoneMagic
) {
340 mem_ptr
-= SHADOW_GRANULARITY
;
343 if (shadow_ptr
< shadow_bottom
) {
347 uptr
* ptr
= (uptr
*)(mem_ptr
+ SHADOW_GRANULARITY
);
348 CHECK(ptr
[0] == kCurrentStackFrameMagic
);
349 access
->offset
= addr
- (uptr
)ptr
;
350 access
->frame_pc
= ptr
[2];
351 access
->frame_descr
= (const char*)ptr
[1];
355 uptr
AsanThread::GetStackVariableShadowStart(uptr addr
) {
357 if (AddrIsInStack(addr
)) {
358 bottom
= stack_bottom();
359 } else if (has_fake_stack()) {
360 bottom
= fake_stack()->AddrIsInFakeStack(addr
);
365 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
366 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
367 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
369 while (shadow_ptr
>= shadow_bottom
&&
370 (*shadow_ptr
!= kAsanStackLeftRedzoneMagic
&&
371 *shadow_ptr
!= kAsanStackMidRedzoneMagic
&&
372 *shadow_ptr
!= kAsanStackRightRedzoneMagic
))
375 return (uptr
)shadow_ptr
+ 1;
378 bool AsanThread::AddrIsInStack(uptr addr
) {
379 const auto bounds
= GetStackBounds();
380 return addr
>= bounds
.bottom
&& addr
< bounds
.top
;
383 static bool ThreadStackContainsAddress(ThreadContextBase
*tctx_base
,
385 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(tctx_base
);
386 AsanThread
*t
= tctx
->thread
;
387 if (!t
) return false;
388 if (t
->AddrIsInStack((uptr
)addr
)) return true;
389 if (t
->has_fake_stack() && t
->fake_stack()->AddrIsInFakeStack((uptr
)addr
))
394 AsanThread
*GetCurrentThread() {
395 if (SANITIZER_RTEMS
&& !asan_inited
)
398 AsanThreadContext
*context
=
399 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
401 if (SANITIZER_ANDROID
) {
402 // On Android, libc constructor is called _after_ asan_init, and cleans up
403 // TSD. Try to figure out if this is still the main thread by the stack
404 // address. We are not entirely sure that we have correct main thread
405 // limits, so only do this magic on Android, and only if the found thread
406 // is the main thread.
407 AsanThreadContext
*tctx
= GetThreadContextByTidLocked(0);
408 if (tctx
&& ThreadStackContainsAddress(tctx
, &context
)) {
409 SetCurrentThread(tctx
->thread
);
415 return context
->thread
;
418 void SetCurrentThread(AsanThread
*t
) {
420 VReport(2, "SetCurrentThread: %p for thread %p\n", t
->context(),
421 (void *)GetThreadSelf());
422 // Make sure we do not reset the current AsanThread.
423 CHECK_EQ(0, AsanTSDGet());
424 AsanTSDSet(t
->context());
425 CHECK_EQ(t
->context(), AsanTSDGet());
428 u32
GetCurrentTidOrInvalid() {
429 AsanThread
*t
= GetCurrentThread();
430 return t
? t
->tid() : kInvalidTid
;
433 AsanThread
*FindThreadByStackAddress(uptr addr
) {
434 asanThreadRegistry().CheckLocked();
435 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(
436 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress
,
438 return tctx
? tctx
->thread
: nullptr;
441 void EnsureMainThreadIDIsCorrect() {
442 AsanThreadContext
*context
=
443 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
444 if (context
&& (context
->tid
== 0))
445 context
->os_id
= GetTid();
448 __asan::AsanThread
*GetAsanThreadByOsIDLocked(tid_t os_id
) {
449 __asan::AsanThreadContext
*context
= static_cast<__asan::AsanThreadContext
*>(
450 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id
));
451 if (!context
) return nullptr;
452 return context
->thread
;
454 } // namespace __asan
456 // --- Implementation of LSan-specific functions --- {{{1
458 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
459 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
460 uptr
*cache_end
, DTLS
**dtls
) {
461 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
462 if (!t
) return false;
463 *stack_begin
= t
->stack_bottom();
464 *stack_end
= t
->stack_top();
465 *tls_begin
= t
->tls_begin();
466 *tls_end
= t
->tls_end();
467 // ASan doesn't keep allocator caches in TLS, so these are unused.
474 void ForEachExtraStackRange(tid_t os_id
, RangeIteratorCallback callback
,
476 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
477 if (t
&& t
->has_fake_stack())
478 t
->fake_stack()->ForEachFakeFrame(callback
, arg
);
481 void LockThreadRegistry() {
482 __asan::asanThreadRegistry().Lock();
485 void UnlockThreadRegistry() {
486 __asan::asanThreadRegistry().Unlock();
489 ThreadRegistry
*GetThreadRegistryLocked() {
490 __asan::asanThreadRegistry().CheckLocked();
491 return &__asan::asanThreadRegistry();
494 void EnsureMainThreadIDIsCorrect() {
495 __asan::EnsureMainThreadIDIsCorrect();
497 } // namespace __lsan
499 // ---------------------- Interface ---------------- {{{1
500 using namespace __asan
; // NOLINT
503 SANITIZER_INTERFACE_ATTRIBUTE
504 void __sanitizer_start_switch_fiber(void **fakestacksave
, const void *bottom
,
506 AsanThread
*t
= GetCurrentThread();
508 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
511 t
->StartSwitchFiber((FakeStack
**)fakestacksave
, (uptr
)bottom
, size
);
514 SANITIZER_INTERFACE_ATTRIBUTE
515 void __sanitizer_finish_switch_fiber(void* fakestack
,
516 const void **bottom_old
,
518 AsanThread
*t
= GetCurrentThread();
520 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
523 t
->FinishSwitchFiber((FakeStack
*)fakestack
,