Rebase.
[official-gcc.git] / libsanitizer / asan / asan_fake_stack.cc
blobcfe96a0882f5be84d01294537354640003b10f9d
1 //===-- asan_fake_stack.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of AddressSanitizer, an address sanity checker.
9 //
10 // FakeStack is used to detect use-after-return bugs.
11 //===----------------------------------------------------------------------===//
12 #include "asan_allocator.h"
13 #include "asan_poisoning.h"
14 #include "asan_thread.h"
16 namespace __asan {
18 static const u64 kMagic1 = kAsanStackAfterReturnMagic;
19 static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
20 static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
21 static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
23 // For small size classes inline PoisonShadow for better performance.
24 ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
25 CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
26 u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
27 if (class_id <= 6) {
28 for (uptr i = 0; i < (1U << class_id); i++)
29 shadow[i] = magic;
30 } else {
31 // The size class is too big, it's cheaper to poison only size bytes.
32 PoisonShadow(ptr, size, static_cast<u8>(magic));
36 FakeStack *FakeStack::Create(uptr stack_size_log) {
37 static uptr kMinStackSizeLog = 16;
38 static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
39 if (stack_size_log < kMinStackSizeLog)
40 stack_size_log = kMinStackSizeLog;
41 if (stack_size_log > kMaxStackSizeLog)
42 stack_size_log = kMaxStackSizeLog;
43 uptr size = RequiredSize(stack_size_log);
44 FakeStack *res = reinterpret_cast<FakeStack *>(
45 flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
46 : MmapOrDie(size, "FakeStack"));
47 res->stack_size_log_ = stack_size_log;
48 u8 *p = reinterpret_cast<u8 *>(res);
49 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
50 "mmapped %zdK, noreserve=%d \n",
51 GetCurrentTidOrInvalid(), p,
52 p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
53 size >> 10, flags()->uar_noreserve);
54 return res;
57 void FakeStack::Destroy(int tid) {
58 PoisonAll(0);
59 if (common_flags()->verbosity >= 2) {
60 InternalScopedString str(kNumberOfSizeClasses * 50);
61 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
62 str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
63 NumberOfFrames(stack_size_log(), class_id));
64 Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
66 uptr size = RequiredSize(stack_size_log_);
67 FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
68 UnmapOrDie(this, size);
71 void FakeStack::PoisonAll(u8 magic) {
72 PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
73 magic);
76 ALWAYS_INLINE USED
77 FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
78 uptr real_stack) {
79 CHECK_LT(class_id, kNumberOfSizeClasses);
80 if (needs_gc_)
81 GC(real_stack);
82 uptr &hint_position = hint_position_[class_id];
83 const int num_iter = NumberOfFrames(stack_size_log, class_id);
84 u8 *flags = GetFlags(stack_size_log, class_id);
85 for (int i = 0; i < num_iter; i++) {
86 uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
87 // This part is tricky. On one hand, checking and setting flags[pos]
88 // should be atomic to ensure async-signal safety. But on the other hand,
89 // if the signal arrives between checking and setting flags[pos], the
90 // signal handler's fake stack will start from a different hint_position
91 // and so will not touch this particular byte. So, it is safe to do this
92 // with regular non-atimic load and store (at least I was not able to make
93 // this code crash).
94 if (flags[pos]) continue;
95 flags[pos] = 1;
96 FakeFrame *res = reinterpret_cast<FakeFrame *>(
97 GetFrame(stack_size_log, class_id, pos));
98 res->real_stack = real_stack;
99 *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
100 return res;
102 return 0; // We are out of fake stack.
105 uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
106 uptr stack_size_log = this->stack_size_log();
107 uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
108 uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
109 if (ptr < beg || ptr >= end) return 0;
110 uptr class_id = (ptr - beg) >> stack_size_log;
111 uptr base = beg + (class_id << stack_size_log);
112 CHECK_LE(base, ptr);
113 CHECK_LT(ptr, base + (1UL << stack_size_log));
114 uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
115 uptr res = base + pos * BytesInSizeClass(class_id);
116 *frame_end = res + BytesInSizeClass(class_id);
117 *frame_beg = res + sizeof(FakeFrame);
118 return res;
121 void FakeStack::HandleNoReturn() {
122 needs_gc_ = true;
125 // When throw, longjmp or some such happens we don't call OnFree() and
126 // as the result may leak one or more fake frames, but the good news is that
127 // we are notified about all such events by HandleNoReturn().
128 // If we recently had such no-return event we need to collect garbage frames.
129 // We do it based on their 'real_stack' values -- everything that is lower
130 // than the current real_stack is garbage.
131 NOINLINE void FakeStack::GC(uptr real_stack) {
132 uptr collected = 0;
133 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
134 u8 *flags = GetFlags(stack_size_log(), class_id);
135 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
136 i++) {
137 if (flags[i] == 0) continue; // not allocated.
138 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
139 GetFrame(stack_size_log(), class_id, i));
140 if (ff->real_stack < real_stack) {
141 flags[i] = 0;
142 collected++;
146 needs_gc_ = false;
149 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
150 for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
151 u8 *flags = GetFlags(stack_size_log(), class_id);
152 for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
153 i++) {
154 if (flags[i] == 0) continue; // not allocated.
155 FakeFrame *ff = reinterpret_cast<FakeFrame *>(
156 GetFrame(stack_size_log(), class_id, i));
157 uptr begin = reinterpret_cast<uptr>(ff);
158 callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
163 #if SANITIZER_LINUX && !SANITIZER_ANDROID
164 static THREADLOCAL FakeStack *fake_stack_tls;
166 FakeStack *GetTLSFakeStack() {
167 return fake_stack_tls;
169 void SetTLSFakeStack(FakeStack *fs) {
170 fake_stack_tls = fs;
172 #else
173 FakeStack *GetTLSFakeStack() { return 0; }
174 void SetTLSFakeStack(FakeStack *fs) { }
175 #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
177 static FakeStack *GetFakeStack() {
178 AsanThread *t = GetCurrentThread();
179 if (!t) return 0;
180 return t->fake_stack();
183 static FakeStack *GetFakeStackFast() {
184 if (FakeStack *fs = GetTLSFakeStack())
185 return fs;
186 if (!__asan_option_detect_stack_use_after_return)
187 return 0;
188 return GetFakeStack();
191 ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) {
192 FakeStack *fs = GetFakeStackFast();
193 if (!fs) return real_stack;
194 FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
195 if (!ff)
196 return real_stack; // Out of fake stack, return the real one.
197 uptr ptr = reinterpret_cast<uptr>(ff);
198 SetShadow(ptr, size, class_id, 0);
199 return ptr;
202 ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) {
203 if (ptr == real_stack)
204 return;
205 FakeStack::Deallocate(ptr, class_id);
206 SetShadow(ptr, size, class_id, kMagic8);
209 } // namespace __asan
211 // ---------------------- Interface ---------------- {{{1
212 using namespace __asan;
213 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
214 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
215 __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
216 return OnMalloc(class_id, size, real_stack); \
218 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
219 uptr ptr, uptr size, uptr real_stack) { \
220 OnFree(ptr, class_id, size, real_stack); \
223 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
224 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
225 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
226 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
227 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
228 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
229 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
230 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
231 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
232 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
233 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
234 extern "C" {
235 SANITIZER_INTERFACE_ATTRIBUTE
236 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
238 SANITIZER_INTERFACE_ATTRIBUTE
239 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
240 void **end) {
241 FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
242 if (!fs) return 0;
243 uptr frame_beg, frame_end;
244 FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
245 reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
246 if (!frame) return 0;
247 if (frame->magic != kCurrentStackFrameMagic)
248 return 0;
249 if (beg) *beg = reinterpret_cast<void*>(frame_beg);
250 if (end) *end = reinterpret_cast<void*>(frame_end);
251 return reinterpret_cast<void*>(frame->real_stack);
253 } // extern "C"