Fix build on sparc64-linux-gnu.
[official-gcc.git] / libsanitizer / asan / asan_thread.cc
blob82da9a28e8248b0bf18c6af6240fd05d03fd7faa
1 //===-- asan_thread.cc ----------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // Thread-related code.
11 //===----------------------------------------------------------------------===//
12 #include "asan_allocator.h"
13 #include "asan_interceptors.h"
14 #include "asan_poisoning.h"
15 #include "asan_stack.h"
16 #include "asan_thread.h"
17 #include "asan_mapping.h"
18 #include "sanitizer_common/sanitizer_common.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_tls_get_addr.h"
22 #include "lsan/lsan_common.h"
24 namespace __asan {
26 // AsanThreadContext implementation.
28 void AsanThreadContext::OnCreated(void *arg) {
29 CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
30 if (args->stack)
31 stack_id = StackDepotPut(*args->stack);
32 thread = args->thread;
33 thread->set_context(this);
36 void AsanThreadContext::OnFinished() {
37 // Drop the link to the AsanThread object.
38 thread = nullptr;
41 // MIPS requires aligned address
42 static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
43 static ThreadRegistry *asan_thread_registry;
45 static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
46 static LowLevelAllocator allocator_for_thread_context;
48 static ThreadContextBase *GetAsanThreadContext(u32 tid) {
49 BlockingMutexLock lock(&mu_for_thread_context);
50 return new(allocator_for_thread_context) AsanThreadContext(tid);
53 ThreadRegistry &asanThreadRegistry() {
54 static bool initialized;
55 // Don't worry about thread_safety - this should be called when there is
56 // a single thread.
57 if (!initialized) {
58 // Never reuse ASan threads: we store pointer to AsanThreadContext
59 // in TSD and can't reliably tell when no more TSD destructors will
60 // be called. It would be wrong to reuse AsanThreadContext for another
61 // thread before all TSD destructors will be called for it.
62 asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
63 GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
64 initialized = true;
66 return *asan_thread_registry;
69 AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
70 return static_cast<AsanThreadContext *>(
71 asanThreadRegistry().GetThreadLocked(tid));
74 // AsanThread implementation.
76 AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
77 u32 parent_tid, StackTrace *stack,
78 bool detached) {
79 uptr PageSize = GetPageSizeCached();
80 uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
81 AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
82 thread->start_routine_ = start_routine;
83 thread->arg_ = arg;
84 AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
85 asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
86 parent_tid, &args);
88 return thread;
91 void AsanThread::TSDDtor(void *tsd) {
92 AsanThreadContext *context = (AsanThreadContext*)tsd;
93 VReport(1, "T%d TSDDtor\n", context->tid);
94 if (context->thread)
95 context->thread->Destroy();
98 void AsanThread::Destroy() {
99 int tid = this->tid();
100 VReport(1, "T%d exited\n", tid);
102 malloc_storage().CommitBack();
103 if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
104 asanThreadRegistry().FinishThread(tid);
105 FlushToDeadThreadStats(&stats_);
106 // We also clear the shadow on thread destruction because
107 // some code may still be executing in later TSD destructors
108 // and we don't want it to have any poisoned stack.
109 ClearShadowForThreadStackAndTLS();
110 DeleteFakeStack(tid);
111 uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
112 UnmapOrDie(this, size);
113 DTLS_Destroy();
116 void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
117 uptr size) {
118 if (atomic_load(&stack_switching_, memory_order_relaxed)) {
119 Report("ERROR: starting fiber switch while in fiber switch\n");
120 Die();
123 next_stack_bottom_ = bottom;
124 next_stack_top_ = bottom + size;
125 atomic_store(&stack_switching_, 1, memory_order_release);
127 FakeStack *current_fake_stack = fake_stack_;
128 if (fake_stack_save)
129 *fake_stack_save = fake_stack_;
130 fake_stack_ = nullptr;
131 SetTLSFakeStack(nullptr);
132 // if fake_stack_save is null, the fiber will die, delete the fakestack
133 if (!fake_stack_save && current_fake_stack)
134 current_fake_stack->Destroy(this->tid());
137 void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
138 uptr *bottom_old,
139 uptr *size_old) {
140 if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
141 Report("ERROR: finishing a fiber switch that has not started\n");
142 Die();
145 if (fake_stack_save) {
146 SetTLSFakeStack(fake_stack_save);
147 fake_stack_ = fake_stack_save;
150 if (bottom_old)
151 *bottom_old = stack_bottom_;
152 if (size_old)
153 *size_old = stack_top_ - stack_bottom_;
154 stack_bottom_ = next_stack_bottom_;
155 stack_top_ = next_stack_top_;
156 atomic_store(&stack_switching_, 0, memory_order_release);
157 next_stack_top_ = 0;
158 next_stack_bottom_ = 0;
161 inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
162 if (!atomic_load(&stack_switching_, memory_order_acquire)) {
163 // Make sure the stack bounds are fully initialized.
164 if (stack_bottom_ >= stack_top_) return {0, 0};
165 return {stack_bottom_, stack_top_};
167 char local;
168 const uptr cur_stack = (uptr)&local;
169 // Note: need to check next stack first, because FinishSwitchFiber
170 // may be in process of overwriting stack_top_/bottom_. But in such case
171 // we are already on the next stack.
172 if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
173 return {next_stack_bottom_, next_stack_top_};
174 return {stack_bottom_, stack_top_};
177 uptr AsanThread::stack_top() {
178 return GetStackBounds().top;
181 uptr AsanThread::stack_bottom() {
182 return GetStackBounds().bottom;
185 uptr AsanThread::stack_size() {
186 const auto bounds = GetStackBounds();
187 return bounds.top - bounds.bottom;
190 // We want to create the FakeStack lazyly on the first use, but not eralier
191 // than the stack size is known and the procedure has to be async-signal safe.
192 FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
193 uptr stack_size = this->stack_size();
194 if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
195 return nullptr;
196 uptr old_val = 0;
197 // fake_stack_ has 3 states:
198 // 0 -- not initialized
199 // 1 -- being initialized
200 // ptr -- initialized
201 // This CAS checks if the state was 0 and if so changes it to state 1,
202 // if that was successful, it initializes the pointer.
203 if (atomic_compare_exchange_strong(
204 reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
205 memory_order_relaxed)) {
206 uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
207 CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
208 stack_size_log =
209 Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
210 stack_size_log =
211 Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
212 fake_stack_ = FakeStack::Create(stack_size_log);
213 SetTLSFakeStack(fake_stack_);
214 return fake_stack_;
216 return nullptr;
219 void AsanThread::Init(const InitOptions *options) {
220 next_stack_top_ = next_stack_bottom_ = 0;
221 atomic_store(&stack_switching_, false, memory_order_release);
222 CHECK_EQ(this->stack_size(), 0U);
223 SetThreadStackAndTls(options);
224 CHECK_GT(this->stack_size(), 0U);
225 CHECK(AddrIsInMem(stack_bottom_));
226 CHECK(AddrIsInMem(stack_top_ - 1));
227 ClearShadowForThreadStackAndTLS();
228 fake_stack_ = nullptr;
229 if (__asan_option_detect_stack_use_after_return)
230 AsyncSignalSafeLazyInitFakeStack();
231 int local = 0;
232 VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
233 (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
234 &local);
237 // Fuchsia and RTEMS don't use ThreadStart.
238 // asan_fuchsia.c/asan_rtems.c define CreateMainThread and
239 // SetThreadStackAndTls.
240 #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
242 thread_return_t AsanThread::ThreadStart(
243 tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
244 Init();
245 asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false,
246 nullptr);
247 if (signal_thread_is_registered)
248 atomic_store(signal_thread_is_registered, 1, memory_order_release);
250 if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
252 if (!start_routine_) {
253 // start_routine_ == 0 if we're on the main thread or on one of the
254 // OS X libdispatch worker threads. But nobody is supposed to call
255 // ThreadStart() for the worker threads.
256 CHECK_EQ(tid(), 0);
257 return 0;
260 thread_return_t res = start_routine_(arg_);
262 // On POSIX systems we defer this to the TSD destructor. LSan will consider
263 // the thread's memory as non-live from the moment we call Destroy(), even
264 // though that memory might contain pointers to heap objects which will be
265 // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
266 // the TSD destructors have run might cause false positives in LSan.
267 if (!SANITIZER_POSIX)
268 this->Destroy();
270 return res;
273 AsanThread *CreateMainThread() {
274 AsanThread *main_thread = AsanThread::Create(
275 /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
276 /* stack */ nullptr, /* detached */ true);
277 SetCurrentThread(main_thread);
278 main_thread->ThreadStart(internal_getpid(),
279 /* signal_thread_is_registered */ nullptr);
280 return main_thread;
283 // This implementation doesn't use the argument, which is just passed down
284 // from the caller of Init (which see, above). It's only there to support
285 // OS-specific implementations that need more information passed through.
286 void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
287 DCHECK_EQ(options, nullptr);
288 uptr tls_size = 0;
289 uptr stack_size = 0;
290 GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
291 const_cast<uptr *>(&stack_size), &tls_begin_, &tls_size);
292 stack_top_ = stack_bottom_ + stack_size;
293 tls_end_ = tls_begin_ + tls_size;
294 dtls_ = DTLS_Get();
296 int local;
297 CHECK(AddrIsInStack((uptr)&local));
300 #endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
302 void AsanThread::ClearShadowForThreadStackAndTLS() {
303 PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
304 if (tls_begin_ != tls_end_) {
305 uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
306 uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
307 FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
308 tls_end_ - tls_begin_aligned,
309 tls_end_aligned - tls_end_, 0);
313 bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
314 StackFrameAccess *access) {
315 uptr bottom = 0;
316 if (AddrIsInStack(addr)) {
317 bottom = stack_bottom();
318 } else if (has_fake_stack()) {
319 bottom = fake_stack()->AddrIsInFakeStack(addr);
320 CHECK(bottom);
321 access->offset = addr - bottom;
322 access->frame_pc = ((uptr*)bottom)[2];
323 access->frame_descr = (const char *)((uptr*)bottom)[1];
324 return true;
326 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
327 uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
328 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
329 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
331 while (shadow_ptr >= shadow_bottom &&
332 *shadow_ptr != kAsanStackLeftRedzoneMagic) {
333 shadow_ptr--;
334 mem_ptr -= SHADOW_GRANULARITY;
337 while (shadow_ptr >= shadow_bottom &&
338 *shadow_ptr == kAsanStackLeftRedzoneMagic) {
339 shadow_ptr--;
340 mem_ptr -= SHADOW_GRANULARITY;
343 if (shadow_ptr < shadow_bottom) {
344 return false;
347 uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
348 CHECK(ptr[0] == kCurrentStackFrameMagic);
349 access->offset = addr - (uptr)ptr;
350 access->frame_pc = ptr[2];
351 access->frame_descr = (const char*)ptr[1];
352 return true;
355 uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
356 uptr bottom = 0;
357 if (AddrIsInStack(addr)) {
358 bottom = stack_bottom();
359 } else if (has_fake_stack()) {
360 bottom = fake_stack()->AddrIsInFakeStack(addr);
361 CHECK(bottom);
362 } else
363 return 0;
365 uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
366 u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
367 u8 *shadow_bottom = (u8*)MemToShadow(bottom);
369 while (shadow_ptr >= shadow_bottom &&
370 (*shadow_ptr != kAsanStackLeftRedzoneMagic &&
371 *shadow_ptr != kAsanStackMidRedzoneMagic &&
372 *shadow_ptr != kAsanStackRightRedzoneMagic))
373 shadow_ptr--;
375 return (uptr)shadow_ptr + 1;
378 bool AsanThread::AddrIsInStack(uptr addr) {
379 const auto bounds = GetStackBounds();
380 return addr >= bounds.bottom && addr < bounds.top;
383 static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
384 void *addr) {
385 AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
386 AsanThread *t = tctx->thread;
387 if (!t) return false;
388 if (t->AddrIsInStack((uptr)addr)) return true;
389 if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
390 return true;
391 return false;
394 AsanThread *GetCurrentThread() {
395 if (SANITIZER_RTEMS && !asan_inited)
396 return nullptr;
398 AsanThreadContext *context =
399 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
400 if (!context) {
401 if (SANITIZER_ANDROID) {
402 // On Android, libc constructor is called _after_ asan_init, and cleans up
403 // TSD. Try to figure out if this is still the main thread by the stack
404 // address. We are not entirely sure that we have correct main thread
405 // limits, so only do this magic on Android, and only if the found thread
406 // is the main thread.
407 AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
408 if (tctx && ThreadStackContainsAddress(tctx, &context)) {
409 SetCurrentThread(tctx->thread);
410 return tctx->thread;
413 return nullptr;
415 return context->thread;
418 void SetCurrentThread(AsanThread *t) {
419 CHECK(t->context());
420 VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
421 (void *)GetThreadSelf());
422 // Make sure we do not reset the current AsanThread.
423 CHECK_EQ(0, AsanTSDGet());
424 AsanTSDSet(t->context());
425 CHECK_EQ(t->context(), AsanTSDGet());
428 u32 GetCurrentTidOrInvalid() {
429 AsanThread *t = GetCurrentThread();
430 return t ? t->tid() : kInvalidTid;
433 AsanThread *FindThreadByStackAddress(uptr addr) {
434 asanThreadRegistry().CheckLocked();
435 AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
436 asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
437 (void *)addr));
438 return tctx ? tctx->thread : nullptr;
441 void EnsureMainThreadIDIsCorrect() {
442 AsanThreadContext *context =
443 reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
444 if (context && (context->tid == 0))
445 context->os_id = GetTid();
448 __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
449 __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
450 __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
451 if (!context) return nullptr;
452 return context->thread;
454 } // namespace __asan
456 // --- Implementation of LSan-specific functions --- {{{1
457 namespace __lsan {
458 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
459 uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
460 uptr *cache_end, DTLS **dtls) {
461 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
462 if (!t) return false;
463 *stack_begin = t->stack_bottom();
464 *stack_end = t->stack_top();
465 *tls_begin = t->tls_begin();
466 *tls_end = t->tls_end();
467 // ASan doesn't keep allocator caches in TLS, so these are unused.
468 *cache_begin = 0;
469 *cache_end = 0;
470 *dtls = t->dtls();
471 return true;
474 void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
475 void *arg) {
476 __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
477 if (t && t->has_fake_stack())
478 t->fake_stack()->ForEachFakeFrame(callback, arg);
481 void LockThreadRegistry() {
482 __asan::asanThreadRegistry().Lock();
485 void UnlockThreadRegistry() {
486 __asan::asanThreadRegistry().Unlock();
489 ThreadRegistry *GetThreadRegistryLocked() {
490 __asan::asanThreadRegistry().CheckLocked();
491 return &__asan::asanThreadRegistry();
494 void EnsureMainThreadIDIsCorrect() {
495 __asan::EnsureMainThreadIDIsCorrect();
497 } // namespace __lsan
499 // ---------------------- Interface ---------------- {{{1
500 using namespace __asan; // NOLINT
502 extern "C" {
503 SANITIZER_INTERFACE_ATTRIBUTE
504 void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
505 uptr size) {
506 AsanThread *t = GetCurrentThread();
507 if (!t) {
508 VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
509 return;
511 t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
514 SANITIZER_INTERFACE_ATTRIBUTE
515 void __sanitizer_finish_switch_fiber(void* fakestack,
516 const void **bottom_old,
517 uptr *size_old) {
518 AsanThread *t = GetCurrentThread();
519 if (!t) {
520 VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
521 return;
523 t->FinishSwitchFiber((FakeStack*)fakestack,
524 (uptr*)bottom_old,
525 (uptr*)size_old);