[CMake] Rename add_compiler_rt_static_runtime to add_compiler_rt_runtime.
[blocksruntime.git] / lib / asan / asan_fake_stack.cc
blobc7f13c7317b4ad307b4ffb00184782d0e15624e0
1 //===-- asan_fake_stack.cc ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of AddressSanitizer, an address sanity checker.
12 // FakeStack is used to detect use-after-return bugs.
13 //===----------------------------------------------------------------------===//
14 #include "asan_allocator.h"
15 #include "asan_poisoning.h"
16 #include "asan_thread.h"
18 namespace __asan {
20 static const u64 kMagic1 = kAsanStackAfterReturnMagic;
21 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
22 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
23 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
25 // For small size classes inline PoisonShadow for better performance.
26 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
27 CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
28 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
29 if (class_id <= 6) {
30 for (uptr i = 0; i < (1U << class_id); i++)
31 shadow[i] = magic;
32 } else {
33 // The size class is too big, it's cheaper to poison only size bytes.
34 PoisonShadow(ptr, size, static_cast<u8>(magic));
38 FakeStack *FakeStack::Create(uptr stack_size_log) {
39 static uptr kMinStackSizeLog = 16;
40 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
41 if (stack_size_log < kMinStackSizeLog)
42 stack_size_log = kMinStackSizeLog;
43 if (stack_size_log > kMaxStackSizeLog)
44 stack_size_log = kMaxStackSizeLog;
45 uptr size = RequiredSize(stack_size_log);
46 FakeStack *res = reinterpret_cast<FakeStack *>(
47 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
48 : MmapOrDie(size, "FakeStack"));
49 res->stack_size_log_ = stack_size_log;
50 u8 *p = reinterpret_cast<u8 *>(res);
51 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
52 "mmapped %zdK, noreserve=%d \n",
53 GetCurrentTidOrInvalid(), p,
54 p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
55 size >> 10, flags()->uar_noreserve);
56 return res;
59 void FakeStack::Destroy(int tid) {
60 PoisonAll(0);
61 if (common_flags()->verbosity >= 2) {
62 InternalScopedString str(kNumberOfSizeClasses * 50);
63 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
64 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
65 NumberOfFrames(stack_size_log(), class_id));
66 Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
68 uptr size = RequiredSize(stack_size_log_);
69 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
70 UnmapOrDie(this, size);
73 void FakeStack::PoisonAll(u8 magic) {
74 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
75 magic);
78 ALWAYS_INLINE USED
79 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
80 uptr real_stack) {
81 CHECK_LT(class_id, kNumberOfSizeClasses);
82 if (needs_gc_)
83 GC(real_stack);
84 uptr &hint_position = hint_position_[class_id];
85 const int num_iter = NumberOfFrames(stack_size_log, class_id);
86 u8 *flags = GetFlags(stack_size_log, class_id);
87 for (int i = 0; i < num_iter; i++) {
88 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
89 // This part is tricky. On one hand, checking and setting flags[pos]
90 // should be atomic to ensure async-signal safety. But on the other hand,
91 // if the signal arrives between checking and setting flags[pos], the
92 // signal handler's fake stack will start from a different hint_position
93 // and so will not touch this particular byte. So, it is safe to do this
94 // with regular non-atimic load and store (at least I was not able to make
95 // this code crash).
96 if (flags[pos]) continue;
97 flags[pos] = 1;
98 FakeFrame *res = reinterpret_cast<FakeFrame *>(
99 GetFrame(stack_size_log, class_id, pos));
100 res->real_stack = real_stack;
101 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
102 return res;
104 return 0; // We are out of fake stack.
107 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
108 uptr stack_size_log = this->stack_size_log();
109 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
110 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
111 if (ptr < beg || ptr >= end) return 0;
112 uptr class_id = (ptr - beg) >> stack_size_log;
113 uptr base = beg + (class_id << stack_size_log);
114 CHECK_LE(base, ptr);
115 CHECK_LT(ptr, base + (1UL << stack_size_log));
116 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
117 uptr res = base + pos * BytesInSizeClass(class_id);
118 *frame_end = res + BytesInSizeClass(class_id);
119 *frame_beg = res + sizeof(FakeFrame);
120 return res;
123 void FakeStack::HandleNoReturn() {
124 needs_gc_ = true;
127 // When throw, longjmp or some such happens we don't call OnFree() and
128 // as the result may leak one or more fake frames, but the good news is that
129 // we are notified about all such events by HandleNoReturn().
130 // If we recently had such no-return event we need to collect garbage frames.
131 // We do it based on their 'real_stack' values -- everything that is lower
132 // than the current real_stack is garbage.
133 NOINLINE void FakeStack::GC(uptr real_stack) {
134 uptr collected = 0;
135 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
136 u8 *flags = GetFlags(stack_size_log(), class_id);
137 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
138 i++) {
139 if (flags[i] == 0) continue; // not allocated.
140 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
141 GetFrame(stack_size_log(), class_id, i));
142 if (ff->real_stack < real_stack) {
143 flags[i] = 0;
144 collected++;
148 needs_gc_ = false;
151 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
152 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
153 u8 *flags = GetFlags(stack_size_log(), class_id);
154 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
155 i++) {
156 if (flags[i] == 0) continue; // not allocated.
157 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
158 GetFrame(stack_size_log(), class_id, i));
159 uptr begin = reinterpret_cast<uptr>(ff);
160 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
165 #if SANITIZER_LINUX && !SANITIZER_ANDROID
166 static THREADLOCAL FakeStack *fake_stack_tls;
168 FakeStack *GetTLSFakeStack() {
169 return fake_stack_tls;
171 void SetTLSFakeStack(FakeStack *fs) {
172 fake_stack_tls = fs;
174 #else
175 FakeStack *GetTLSFakeStack() { return 0; }
176 void SetTLSFakeStack(FakeStack *fs) { }
177 #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
179 static FakeStack *GetFakeStack() {
180 AsanThread *t = GetCurrentThread();
181 if (!t) return 0;
182 return t->fake_stack();
185 static FakeStack *GetFakeStackFast() {
186 if (FakeStack *fs = GetTLSFakeStack())
187 return fs;
188 if (!__asan_option_detect_stack_use_after_return)
189 return 0;
190 return GetFakeStack();
193 ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
194 FakeStack *fs = GetFakeStackFast();
195 if (!fs) return real_stack;
196 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
197 if (!ff)
198 return real_stack; // Out of fake stack, return the real one.
199 uptr ptr = reinterpret_cast<uptr>(ff);
200 SetShadow(ptr, size, class_id, 0);
201 return ptr;
204 ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
205 if (ptr == real_stack)
206 return;
207 FakeStack::Deallocate(ptr, class_id);
208 SetShadow(ptr, size, class_id, kMagic8);
211 } // namespace __asan
213 // ---------------------- Interface ---------------- {{{1
214 using namespace __asan;
215 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
216 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
217 __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
218 return OnMalloc(class_id, size, real_stack); \
220 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
221 uptr ptr, uptr size, uptr real_stack) { \
222 OnFree(ptr, class_id, size, real_stack); \
225 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
226 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
227 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
228 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
229 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
230 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
231 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
232 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
233 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
234 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
235 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
236 extern "C" {
237 SANITIZER_INTERFACE_ATTRIBUTE
238 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
240 SANITIZER_INTERFACE_ATTRIBUTE
241 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
242 void **end) {
243 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
244 if (!fs) return 0;
245 uptr frame_beg, frame_end;
246 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
247 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
248 if (!frame) return 0;
249 if (frame->magic != kCurrentStackFrameMagic)
250 return 0;
251 if (beg) *beg = reinterpret_cast<void*>(frame_beg);
252 if (end) *end = reinterpret_cast<void*>(frame_end);
253 return reinterpret_cast<void*>(frame->real_stack);
255 } // extern "C"