Daily bump.
[official-gcc.git] / libsanitizer / asan / asan_fake_stack.cpp
blob07681c10de91a067f8db68a88ecb65f68e1e22c9
1 //===-- asan_fake_stack.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of AddressSanitizer, an address sanity checker.
11 // FakeStack is used to detect use-after-return bugs.
12 //===----------------------------------------------------------------------===//
14 #include "asan_allocator.h"
15 #include "asan_poisoning.h"
16 #include "asan_thread.h"
18 namespace __asan {
20 static const u64 kMagic1 = kAsanStackAfterReturnMagic;
21 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
22 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
23 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
25 static const u64 kAllocaRedzoneSize = 32UL;
26 static const u64 kAllocaRedzoneMask = 31UL;
28 // For small size classes inline PoisonShadow for better performance.
29 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
30 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
31 if (SHADOW_SCALE == 3 && class_id <= 6) {
32 // This code expects SHADOW_SCALE=3.
33 for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
34 shadow[i] = magic;
35 // Make sure this does not become memset.
36 SanitizerBreakOptimization(nullptr);
38 } else {
39 // The size class is too big, it's cheaper to poison only size bytes.
40 PoisonShadow(ptr, size, static_cast<u8>(magic));
44 FakeStack *FakeStack::Create(uptr stack_size_log) {
45 static uptr kMinStackSizeLog = 16;
46 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
47 if (stack_size_log < kMinStackSizeLog)
48 stack_size_log = kMinStackSizeLog;
49 if (stack_size_log > kMaxStackSizeLog)
50 stack_size_log = kMaxStackSizeLog;
51 uptr size = RequiredSize(stack_size_log);
52 FakeStack *res = reinterpret_cast<FakeStack *>(
53 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
54 : MmapOrDie(size, "FakeStack"));
55 res->stack_size_log_ = stack_size_log;
56 u8 *p = reinterpret_cast<u8 *>(res);
57 VReport(1,
58 "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
59 "mmapped %zdK, noreserve=%d \n",
60 GetCurrentTidOrInvalid(), (void *)p,
61 (void *)(p + FakeStack::RequiredSize(stack_size_log)), stack_size_log,
62 size >> 10, flags()->uar_noreserve);
63 return res;
66 void FakeStack::Destroy(int tid) {
67 PoisonAll(0);
68 if (Verbosity() >= 2) {
69 InternalScopedString str;
70 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
71 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
72 NumberOfFrames(stack_size_log(), class_id));
73 Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
75 uptr size = RequiredSize(stack_size_log_);
76 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
77 UnmapOrDie(this, size);
80 void FakeStack::PoisonAll(u8 magic) {
81 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
82 magic);
85 #if !defined(_MSC_VER) || defined(__clang__)
86 ALWAYS_INLINE USED
87 #endif
88 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
89 uptr real_stack) {
90 CHECK_LT(class_id, kNumberOfSizeClasses);
91 if (needs_gc_)
92 GC(real_stack);
93 uptr &hint_position = hint_position_[class_id];
94 const int num_iter = NumberOfFrames(stack_size_log, class_id);
95 u8 *flags = GetFlags(stack_size_log, class_id);
96 for (int i = 0; i < num_iter; i++) {
97 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
98 // This part is tricky. On one hand, checking and setting flags[pos]
99 // should be atomic to ensure async-signal safety. But on the other hand,
100 // if the signal arrives between checking and setting flags[pos], the
101 // signal handler's fake stack will start from a different hint_position
102 // and so will not touch this particular byte. So, it is safe to do this
103 // with regular non-atomic load and store (at least I was not able to make
104 // this code crash).
105 if (flags[pos]) continue;
106 flags[pos] = 1;
107 FakeFrame *res = reinterpret_cast<FakeFrame *>(
108 GetFrame(stack_size_log, class_id, pos));
109 res->real_stack = real_stack;
110 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
111 return res;
113 return nullptr; // We are out of fake stack.
116 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
117 uptr stack_size_log = this->stack_size_log();
118 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
119 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
120 if (ptr < beg || ptr >= end) return 0;
121 uptr class_id = (ptr - beg) >> stack_size_log;
122 uptr base = beg + (class_id << stack_size_log);
123 CHECK_LE(base, ptr);
124 CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
125 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
126 uptr res = base + pos * BytesInSizeClass(class_id);
127 *frame_end = res + BytesInSizeClass(class_id);
128 *frame_beg = res + sizeof(FakeFrame);
129 return res;
132 void FakeStack::HandleNoReturn() {
133 needs_gc_ = true;
136 // When throw, longjmp or some such happens we don't call OnFree() and
137 // as the result may leak one or more fake frames, but the good news is that
138 // we are notified about all such events by HandleNoReturn().
139 // If we recently had such no-return event we need to collect garbage frames.
140 // We do it based on their 'real_stack' values -- everything that is lower
141 // than the current real_stack is garbage.
142 NOINLINE void FakeStack::GC(uptr real_stack) {
143 uptr collected = 0;
144 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
145 u8 *flags = GetFlags(stack_size_log(), class_id);
146 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
147 i++) {
148 if (flags[i] == 0) continue; // not allocated.
149 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
150 GetFrame(stack_size_log(), class_id, i));
151 if (ff->real_stack < real_stack) {
152 flags[i] = 0;
153 collected++;
157 needs_gc_ = false;
160 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
161 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
162 u8 *flags = GetFlags(stack_size_log(), class_id);
163 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
164 i++) {
165 if (flags[i] == 0) continue; // not allocated.
166 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
167 GetFrame(stack_size_log(), class_id, i));
168 uptr begin = reinterpret_cast<uptr>(ff);
169 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
174 #if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
175 static THREADLOCAL FakeStack *fake_stack_tls;
177 FakeStack *GetTLSFakeStack() {
178 return fake_stack_tls;
180 void SetTLSFakeStack(FakeStack *fs) {
181 fake_stack_tls = fs;
183 #else
184 FakeStack *GetTLSFakeStack() { return 0; }
185 void SetTLSFakeStack(FakeStack *fs) { }
186 #endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
188 static FakeStack *GetFakeStack() {
189 AsanThread *t = GetCurrentThread();
190 if (!t) return nullptr;
191 return t->get_or_create_fake_stack();
194 static FakeStack *GetFakeStackFast() {
195 if (FakeStack *fs = GetTLSFakeStack())
196 return fs;
197 if (!__asan_option_detect_stack_use_after_return)
198 return nullptr;
199 return GetFakeStack();
202 static FakeStack *GetFakeStackFastAlways() {
203 if (FakeStack *fs = GetTLSFakeStack())
204 return fs;
205 return GetFakeStack();
208 static ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
209 FakeStack *fs = GetFakeStackFast();
210 if (!fs) return 0;
211 uptr local_stack;
212 uptr real_stack = reinterpret_cast<uptr>(&local_stack);
213 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
214 if (!ff) return 0; // Out of fake stack.
215 uptr ptr = reinterpret_cast<uptr>(ff);
216 SetShadow(ptr, size, class_id, 0);
217 return ptr;
220 static ALWAYS_INLINE uptr OnMallocAlways(uptr class_id, uptr size) {
221 FakeStack *fs = GetFakeStackFastAlways();
222 if (!fs)
223 return 0;
224 uptr local_stack;
225 uptr real_stack = reinterpret_cast<uptr>(&local_stack);
226 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
227 if (!ff)
228 return 0; // Out of fake stack.
229 uptr ptr = reinterpret_cast<uptr>(ff);
230 SetShadow(ptr, size, class_id, 0);
231 return ptr;
234 static ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
235 FakeStack::Deallocate(ptr, class_id);
236 SetShadow(ptr, size, class_id, kMagic8);
239 } // namespace __asan
241 // ---------------------- Interface ---------------- {{{1
242 using namespace __asan;
243 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
244 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
245 __asan_stack_malloc_##class_id(uptr size) { \
246 return OnMalloc(class_id, size); \
248 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
249 __asan_stack_malloc_always_##class_id(uptr size) { \
250 return OnMallocAlways(class_id, size); \
252 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
253 uptr ptr, uptr size) { \
254 OnFree(ptr, class_id, size); \
257 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
258 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
259 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
260 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
261 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
262 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
263 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
264 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
265 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
266 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
267 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
269 extern "C" {
270 // TODO: remove this method and fix tests that use it by setting
271 // -asan-use-after-return=never, after modal UAR flag lands
272 // (https://github.com/google/sanitizers/issues/1394)
273 SANITIZER_INTERFACE_ATTRIBUTE
274 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
276 SANITIZER_INTERFACE_ATTRIBUTE
277 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
278 void **end) {
279 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
280 if (!fs) return nullptr;
281 uptr frame_beg, frame_end;
282 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
283 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
284 if (!frame) return nullptr;
285 if (frame->magic != kCurrentStackFrameMagic)
286 return nullptr;
287 if (beg) *beg = reinterpret_cast<void*>(frame_beg);
288 if (end) *end = reinterpret_cast<void*>(frame_end);
289 return reinterpret_cast<void*>(frame->real_stack);
292 SANITIZER_INTERFACE_ATTRIBUTE
293 void __asan_alloca_poison(uptr addr, uptr size) {
294 uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
295 uptr PartialRzAddr = addr + size;
296 uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
297 uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
298 FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
299 FastPoisonShadowPartialRightRedzone(
300 PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
301 RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
302 FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
305 SANITIZER_INTERFACE_ATTRIBUTE
306 void __asan_allocas_unpoison(uptr top, uptr bottom) {
307 if ((!top) || (top > bottom)) return;
308 REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
309 (bottom - top) / SHADOW_GRANULARITY);
311 } // extern "C"