1 //===-- asan_thread.cc ----------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // Thread-related code.
11 //===----------------------------------------------------------------------===//
12 #include "asan_allocator.h"
13 #include "asan_interceptors.h"
14 #include "asan_poisoning.h"
15 #include "asan_stack.h"
16 #include "asan_thread.h"
17 #include "asan_mapping.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_tls_get_addr.h"
22 #include "lsan/lsan_common.h"
26 // AsanThreadContext implementation.
28 void AsanThreadContext::OnCreated(void *arg
) {
29 CreateThreadContextArgs
*args
= static_cast<CreateThreadContextArgs
*>(arg
);
31 stack_id
= StackDepotPut(*args
->stack
);
32 thread
= args
->thread
;
33 thread
->set_context(this);
36 void AsanThreadContext::OnFinished() {
37 // Drop the link to the AsanThread object.
41 // MIPS requires aligned address
42 static ALIGNED(16) char thread_registry_placeholder
[sizeof(ThreadRegistry
)];
43 static ThreadRegistry
*asan_thread_registry
;
45 static BlockingMutex
mu_for_thread_context(LINKER_INITIALIZED
);
46 static LowLevelAllocator allocator_for_thread_context
;
48 static ThreadContextBase
*GetAsanThreadContext(u32 tid
) {
49 BlockingMutexLock
lock(&mu_for_thread_context
);
50 return new(allocator_for_thread_context
) AsanThreadContext(tid
);
53 ThreadRegistry
&asanThreadRegistry() {
54 static bool initialized
;
55 // Don't worry about thread_safety - this should be called when there is
58 // Never reuse ASan threads: we store pointer to AsanThreadContext
59 // in TSD and can't reliably tell when no more TSD destructors will
60 // be called. It would be wrong to reuse AsanThreadContext for another
61 // thread before all TSD destructors will be called for it.
62 asan_thread_registry
= new(thread_registry_placeholder
) ThreadRegistry(
63 GetAsanThreadContext
, kMaxNumberOfThreads
, kMaxNumberOfThreads
);
66 return *asan_thread_registry
;
69 AsanThreadContext
*GetThreadContextByTidLocked(u32 tid
) {
70 return static_cast<AsanThreadContext
*>(
71 asanThreadRegistry().GetThreadLocked(tid
));
74 // AsanThread implementation.
76 AsanThread
*AsanThread::Create(thread_callback_t start_routine
, void *arg
,
77 u32 parent_tid
, StackTrace
*stack
,
79 uptr PageSize
= GetPageSizeCached();
80 uptr size
= RoundUpTo(sizeof(AsanThread
), PageSize
);
81 AsanThread
*thread
= (AsanThread
*)MmapOrDie(size
, __func__
);
82 thread
->start_routine_
= start_routine
;
84 AsanThreadContext::CreateThreadContextArgs args
= {thread
, stack
};
85 asanThreadRegistry().CreateThread(*reinterpret_cast<uptr
*>(thread
), detached
,
91 void AsanThread::TSDDtor(void *tsd
) {
92 AsanThreadContext
*context
= (AsanThreadContext
*)tsd
;
93 VReport(1, "T%d TSDDtor\n", context
->tid
);
95 context
->thread
->Destroy();
98 void AsanThread::Destroy() {
99 int tid
= this->tid();
100 VReport(1, "T%d exited\n", tid
);
102 malloc_storage().CommitBack();
103 if (common_flags()->use_sigaltstack
) UnsetAlternateSignalStack();
104 asanThreadRegistry().FinishThread(tid
);
105 FlushToDeadThreadStats(&stats_
);
106 // We also clear the shadow on thread destruction because
107 // some code may still be executing in later TSD destructors
108 // and we don't want it to have any poisoned stack.
109 ClearShadowForThreadStackAndTLS();
110 DeleteFakeStack(tid
);
111 uptr size
= RoundUpTo(sizeof(AsanThread
), GetPageSizeCached());
112 UnmapOrDie(this, size
);
116 void AsanThread::StartSwitchFiber(FakeStack
**fake_stack_save
, uptr bottom
,
118 if (atomic_load(&stack_switching_
, memory_order_relaxed
)) {
119 Report("ERROR: starting fiber switch while in fiber switch\n");
123 next_stack_bottom_
= bottom
;
124 next_stack_top_
= bottom
+ size
;
125 atomic_store(&stack_switching_
, 1, memory_order_release
);
127 FakeStack
*current_fake_stack
= fake_stack_
;
129 *fake_stack_save
= fake_stack_
;
130 fake_stack_
= nullptr;
131 SetTLSFakeStack(nullptr);
132 // if fake_stack_save is null, the fiber will die, delete the fakestack
133 if (!fake_stack_save
&& current_fake_stack
)
134 current_fake_stack
->Destroy(this->tid());
137 void AsanThread::FinishSwitchFiber(FakeStack
*fake_stack_save
,
140 if (!atomic_load(&stack_switching_
, memory_order_relaxed
)) {
141 Report("ERROR: finishing a fiber switch that has not started\n");
145 if (fake_stack_save
) {
146 SetTLSFakeStack(fake_stack_save
);
147 fake_stack_
= fake_stack_save
;
151 *bottom_old
= stack_bottom_
;
153 *size_old
= stack_top_
- stack_bottom_
;
154 stack_bottom_
= next_stack_bottom_
;
155 stack_top_
= next_stack_top_
;
156 atomic_store(&stack_switching_
, 0, memory_order_release
);
158 next_stack_bottom_
= 0;
161 inline AsanThread::StackBounds
AsanThread::GetStackBounds() const {
162 if (!atomic_load(&stack_switching_
, memory_order_acquire
)) {
163 // Make sure the stack bounds are fully initialized.
164 if (stack_bottom_
>= stack_top_
) return {0, 0};
165 return {stack_bottom_
, stack_top_
};
168 const uptr cur_stack
= (uptr
)&local
;
169 // Note: need to check next stack first, because FinishSwitchFiber
170 // may be in process of overwriting stack_top_/bottom_. But in such case
171 // we are already on the next stack.
172 if (cur_stack
>= next_stack_bottom_
&& cur_stack
< next_stack_top_
)
173 return {next_stack_bottom_
, next_stack_top_
};
174 return {stack_bottom_
, stack_top_
};
177 uptr
AsanThread::stack_top() {
178 return GetStackBounds().top
;
181 uptr
AsanThread::stack_bottom() {
182 return GetStackBounds().bottom
;
185 uptr
AsanThread::stack_size() {
186 const auto bounds
= GetStackBounds();
187 return bounds
.top
- bounds
.bottom
;
190 // We want to create the FakeStack lazyly on the first use, but not eralier
191 // than the stack size is known and the procedure has to be async-signal safe.
192 FakeStack
*AsanThread::AsyncSignalSafeLazyInitFakeStack() {
193 uptr stack_size
= this->stack_size();
194 if (stack_size
== 0) // stack_size is not yet available, don't use FakeStack.
197 // fake_stack_ has 3 states:
198 // 0 -- not initialized
199 // 1 -- being initialized
200 // ptr -- initialized
201 // This CAS checks if the state was 0 and if so changes it to state 1,
202 // if that was successful, it initializes the pointer.
203 if (atomic_compare_exchange_strong(
204 reinterpret_cast<atomic_uintptr_t
*>(&fake_stack_
), &old_val
, 1UL,
205 memory_order_relaxed
)) {
206 uptr stack_size_log
= Log2(RoundUpToPowerOfTwo(stack_size
));
207 CHECK_LE(flags()->min_uar_stack_size_log
, flags()->max_uar_stack_size_log
);
209 Min(stack_size_log
, static_cast<uptr
>(flags()->max_uar_stack_size_log
));
211 Max(stack_size_log
, static_cast<uptr
>(flags()->min_uar_stack_size_log
));
212 fake_stack_
= FakeStack::Create(stack_size_log
);
213 SetTLSFakeStack(fake_stack_
);
219 void AsanThread::Init(const InitOptions
*options
) {
220 next_stack_top_
= next_stack_bottom_
= 0;
221 atomic_store(&stack_switching_
, false, memory_order_release
);
222 fake_stack_
= nullptr; // Will be initialized lazily if needed.
223 CHECK_EQ(this->stack_size(), 0U);
224 SetThreadStackAndTls(options
);
225 CHECK_GT(this->stack_size(), 0U);
226 CHECK(AddrIsInMem(stack_bottom_
));
227 CHECK(AddrIsInMem(stack_top_
- 1));
228 ClearShadowForThreadStackAndTLS();
230 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
231 (void *)stack_bottom_
, (void *)stack_top_
, stack_top_
- stack_bottom_
,
235 // Fuchsia doesn't use ThreadStart.
236 // asan_fuchsia.c defines CreateMainThread and SetThreadStackAndTls.
237 #if !SANITIZER_FUCHSIA
239 thread_return_t
AsanThread::ThreadStart(
240 tid_t os_id
, atomic_uintptr_t
*signal_thread_is_registered
) {
242 asanThreadRegistry().StartThread(tid(), os_id
, /*workerthread*/ false,
244 if (signal_thread_is_registered
)
245 atomic_store(signal_thread_is_registered
, 1, memory_order_release
);
247 if (common_flags()->use_sigaltstack
) SetAlternateSignalStack();
249 if (!start_routine_
) {
250 // start_routine_ == 0 if we're on the main thread or on one of the
251 // OS X libdispatch worker threads. But nobody is supposed to call
252 // ThreadStart() for the worker threads.
257 thread_return_t res
= start_routine_(arg_
);
259 // On POSIX systems we defer this to the TSD destructor. LSan will consider
260 // the thread's memory as non-live from the moment we call Destroy(), even
261 // though that memory might contain pointers to heap objects which will be
262 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
263 // the TSD destructors have run might cause false positives in LSan.
264 if (!SANITIZER_POSIX
)
270 AsanThread
*CreateMainThread() {
271 AsanThread
*main_thread
= AsanThread::Create(
272 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
273 /* stack */ nullptr, /* detached */ true);
274 SetCurrentThread(main_thread
);
275 main_thread
->ThreadStart(internal_getpid(),
276 /* signal_thread_is_registered */ nullptr);
280 // This implementation doesn't use the argument, which is just passed down
281 // from the caller of Init (which see, above). It's only there to support
282 // OS-specific implementations that need more information passed through.
283 void AsanThread::SetThreadStackAndTls(const InitOptions
*options
) {
284 DCHECK_EQ(options
, nullptr);
287 GetThreadStackAndTls(tid() == 0, const_cast<uptr
*>(&stack_bottom_
),
288 const_cast<uptr
*>(&stack_size
), &tls_begin_
, &tls_size
);
289 stack_top_
= stack_bottom_
+ stack_size
;
290 tls_end_
= tls_begin_
+ tls_size
;
294 CHECK(AddrIsInStack((uptr
)&local
));
297 #endif // !SANITIZER_FUCHSIA
299 void AsanThread::ClearShadowForThreadStackAndTLS() {
300 PoisonShadow(stack_bottom_
, stack_top_
- stack_bottom_
, 0);
301 if (tls_begin_
!= tls_end_
)
302 PoisonShadow(tls_begin_
, tls_end_
- tls_begin_
, 0);
305 bool AsanThread::GetStackFrameAccessByAddr(uptr addr
,
306 StackFrameAccess
*access
) {
308 if (AddrIsInStack(addr
)) {
309 bottom
= stack_bottom();
310 } else if (has_fake_stack()) {
311 bottom
= fake_stack()->AddrIsInFakeStack(addr
);
313 access
->offset
= addr
- bottom
;
314 access
->frame_pc
= ((uptr
*)bottom
)[2];
315 access
->frame_descr
= (const char *)((uptr
*)bottom
)[1];
318 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
319 uptr mem_ptr
= RoundDownTo(aligned_addr
, SHADOW_GRANULARITY
);
320 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
321 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
323 while (shadow_ptr
>= shadow_bottom
&&
324 *shadow_ptr
!= kAsanStackLeftRedzoneMagic
) {
326 mem_ptr
-= SHADOW_GRANULARITY
;
329 while (shadow_ptr
>= shadow_bottom
&&
330 *shadow_ptr
== kAsanStackLeftRedzoneMagic
) {
332 mem_ptr
-= SHADOW_GRANULARITY
;
335 if (shadow_ptr
< shadow_bottom
) {
339 uptr
* ptr
= (uptr
*)(mem_ptr
+ SHADOW_GRANULARITY
);
340 CHECK(ptr
[0] == kCurrentStackFrameMagic
);
341 access
->offset
= addr
- (uptr
)ptr
;
342 access
->frame_pc
= ptr
[2];
343 access
->frame_descr
= (const char*)ptr
[1];
347 uptr
AsanThread::GetStackVariableShadowStart(uptr addr
) {
349 if (AddrIsInStack(addr
)) {
350 bottom
= stack_bottom();
351 } else if (has_fake_stack()) {
352 bottom
= fake_stack()->AddrIsInFakeStack(addr
);
357 uptr aligned_addr
= RoundDownTo(addr
, SANITIZER_WORDSIZE
/ 8); // align addr.
358 u8
*shadow_ptr
= (u8
*)MemToShadow(aligned_addr
);
359 u8
*shadow_bottom
= (u8
*)MemToShadow(bottom
);
361 while (shadow_ptr
>= shadow_bottom
&&
362 (*shadow_ptr
!= kAsanStackLeftRedzoneMagic
&&
363 *shadow_ptr
!= kAsanStackMidRedzoneMagic
&&
364 *shadow_ptr
!= kAsanStackRightRedzoneMagic
))
367 return (uptr
)shadow_ptr
+ 1;
370 bool AsanThread::AddrIsInStack(uptr addr
) {
371 const auto bounds
= GetStackBounds();
372 return addr
>= bounds
.bottom
&& addr
< bounds
.top
;
375 static bool ThreadStackContainsAddress(ThreadContextBase
*tctx_base
,
377 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(tctx_base
);
378 AsanThread
*t
= tctx
->thread
;
379 if (!t
) return false;
380 if (t
->AddrIsInStack((uptr
)addr
)) return true;
381 if (t
->has_fake_stack() && t
->fake_stack()->AddrIsInFakeStack((uptr
)addr
))
386 AsanThread
*GetCurrentThread() {
387 AsanThreadContext
*context
=
388 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
390 if (SANITIZER_ANDROID
) {
391 // On Android, libc constructor is called _after_ asan_init, and cleans up
392 // TSD. Try to figure out if this is still the main thread by the stack
393 // address. We are not entirely sure that we have correct main thread
394 // limits, so only do this magic on Android, and only if the found thread
395 // is the main thread.
396 AsanThreadContext
*tctx
= GetThreadContextByTidLocked(0);
397 if (tctx
&& ThreadStackContainsAddress(tctx
, &context
)) {
398 SetCurrentThread(tctx
->thread
);
404 return context
->thread
;
407 void SetCurrentThread(AsanThread
*t
) {
409 VReport(2, "SetCurrentThread: %p for thread %p\n", t
->context(),
410 (void *)GetThreadSelf());
411 // Make sure we do not reset the current AsanThread.
412 CHECK_EQ(0, AsanTSDGet());
413 AsanTSDSet(t
->context());
414 CHECK_EQ(t
->context(), AsanTSDGet());
417 u32
GetCurrentTidOrInvalid() {
418 AsanThread
*t
= GetCurrentThread();
419 return t
? t
->tid() : kInvalidTid
;
422 AsanThread
*FindThreadByStackAddress(uptr addr
) {
423 asanThreadRegistry().CheckLocked();
424 AsanThreadContext
*tctx
= static_cast<AsanThreadContext
*>(
425 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress
,
427 return tctx
? tctx
->thread
: nullptr;
430 void EnsureMainThreadIDIsCorrect() {
431 AsanThreadContext
*context
=
432 reinterpret_cast<AsanThreadContext
*>(AsanTSDGet());
433 if (context
&& (context
->tid
== 0))
434 context
->os_id
= GetTid();
437 __asan::AsanThread
*GetAsanThreadByOsIDLocked(tid_t os_id
) {
438 __asan::AsanThreadContext
*context
= static_cast<__asan::AsanThreadContext
*>(
439 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id
));
440 if (!context
) return nullptr;
441 return context
->thread
;
443 } // namespace __asan
445 // --- Implementation of LSan-specific functions --- {{{1
447 bool GetThreadRangesLocked(tid_t os_id
, uptr
*stack_begin
, uptr
*stack_end
,
448 uptr
*tls_begin
, uptr
*tls_end
, uptr
*cache_begin
,
449 uptr
*cache_end
, DTLS
**dtls
) {
450 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
451 if (!t
) return false;
452 *stack_begin
= t
->stack_bottom();
453 *stack_end
= t
->stack_top();
454 *tls_begin
= t
->tls_begin();
455 *tls_end
= t
->tls_end();
456 // ASan doesn't keep allocator caches in TLS, so these are unused.
463 void ForEachExtraStackRange(tid_t os_id
, RangeIteratorCallback callback
,
465 __asan::AsanThread
*t
= __asan::GetAsanThreadByOsIDLocked(os_id
);
466 if (t
&& t
->has_fake_stack())
467 t
->fake_stack()->ForEachFakeFrame(callback
, arg
);
470 void LockThreadRegistry() {
471 __asan::asanThreadRegistry().Lock();
474 void UnlockThreadRegistry() {
475 __asan::asanThreadRegistry().Unlock();
478 void EnsureMainThreadIDIsCorrect() {
479 __asan::EnsureMainThreadIDIsCorrect();
481 } // namespace __lsan
483 // ---------------------- Interface ---------------- {{{1
484 using namespace __asan
; // NOLINT
487 SANITIZER_INTERFACE_ATTRIBUTE
488 void __sanitizer_start_switch_fiber(void **fakestacksave
, const void *bottom
,
490 AsanThread
*t
= GetCurrentThread();
492 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
495 t
->StartSwitchFiber((FakeStack
**)fakestacksave
, (uptr
)bottom
, size
);
498 SANITIZER_INTERFACE_ATTRIBUTE
499 void __sanitizer_finish_switch_fiber(void* fakestack
,
500 const void **bottom_old
,
502 AsanThread
*t
= GetCurrentThread();
504 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
507 t
->FinishSwitchFiber((FakeStack
*)fakestack
,