1 //===-- asan_fake_stack.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // FakeStack is used to detect use-after-return bugs.
11 //===----------------------------------------------------------------------===//
13 #include "asan_allocator.h"
14 #include "asan_poisoning.h"
15 #include "asan_thread.h"
19 static const u64 kMagic1
= kAsanStackAfterReturnMagic
;
20 static const u64 kMagic2
= (kMagic1
<< 8) | kMagic1
;
21 static const u64 kMagic4
= (kMagic2
<< 16) | kMagic2
;
22 static const u64 kMagic8
= (kMagic4
<< 32) | kMagic4
;
24 static const u64 kAllocaRedzoneSize
= 32UL;
25 static const u64 kAllocaRedzoneMask
= 31UL;
27 // For small size classes inline PoisonShadow for better performance.
28 ALWAYS_INLINE
void SetShadow(uptr ptr
, uptr size
, uptr class_id
, u64 magic
) {
29 CHECK_EQ(SHADOW_SCALE
, 3); // This code expects SHADOW_SCALE=3.
30 u64
*shadow
= reinterpret_cast<u64
*>(MemToShadow(ptr
));
32 for (uptr i
= 0; i
< (((uptr
)1) << class_id
); i
++) {
34 // Make sure this does not become memset.
35 SanitizerBreakOptimization(nullptr);
38 // The size class is too big, it's cheaper to poison only size bytes.
39 PoisonShadow(ptr
, size
, static_cast<u8
>(magic
));
43 FakeStack
*FakeStack::Create(uptr stack_size_log
) {
44 static uptr kMinStackSizeLog
= 16;
45 static uptr kMaxStackSizeLog
= FIRST_32_SECOND_64(24, 28);
46 if (stack_size_log
< kMinStackSizeLog
)
47 stack_size_log
= kMinStackSizeLog
;
48 if (stack_size_log
> kMaxStackSizeLog
)
49 stack_size_log
= kMaxStackSizeLog
;
50 uptr size
= RequiredSize(stack_size_log
);
51 FakeStack
*res
= reinterpret_cast<FakeStack
*>(
52 flags()->uar_noreserve
? MmapNoReserveOrDie(size
, "FakeStack")
53 : MmapOrDie(size
, "FakeStack"));
54 res
->stack_size_log_
= stack_size_log
;
55 u8
*p
= reinterpret_cast<u8
*>(res
);
56 VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
57 "mmapped %zdK, noreserve=%d \n",
58 GetCurrentTidOrInvalid(), p
,
59 p
+ FakeStack::RequiredSize(stack_size_log
), stack_size_log
,
60 size
>> 10, flags()->uar_noreserve
);
64 void FakeStack::Destroy(int tid
) {
66 if (Verbosity() >= 2) {
67 InternalScopedString
str(kNumberOfSizeClasses
* 50);
68 for (uptr class_id
= 0; class_id
< kNumberOfSizeClasses
; class_id
++)
69 str
.append("%zd: %zd/%zd; ", class_id
, hint_position_
[class_id
],
70 NumberOfFrames(stack_size_log(), class_id
));
71 Report("T%d: FakeStack destroyed: %s\n", tid
, str
.data());
73 uptr size
= RequiredSize(stack_size_log_
);
74 FlushUnneededASanShadowMemory(reinterpret_cast<uptr
>(this), size
);
75 UnmapOrDie(this, size
);
78 void FakeStack::PoisonAll(u8 magic
) {
79 PoisonShadow(reinterpret_cast<uptr
>(this), RequiredSize(stack_size_log()),
83 #if !defined(_MSC_VER) || defined(__clang__)
86 FakeFrame
*FakeStack::Allocate(uptr stack_size_log
, uptr class_id
,
88 CHECK_LT(class_id
, kNumberOfSizeClasses
);
91 uptr
&hint_position
= hint_position_
[class_id
];
92 const int num_iter
= NumberOfFrames(stack_size_log
, class_id
);
93 u8
*flags
= GetFlags(stack_size_log
, class_id
);
94 for (int i
= 0; i
< num_iter
; i
++) {
95 uptr pos
= ModuloNumberOfFrames(stack_size_log
, class_id
, hint_position
++);
96 // This part is tricky. On one hand, checking and setting flags[pos]
97 // should be atomic to ensure async-signal safety. But on the other hand,
98 // if the signal arrives between checking and setting flags[pos], the
99 // signal handler's fake stack will start from a different hint_position
100 // and so will not touch this particular byte. So, it is safe to do this
101 // with regular non-atomic load and store (at least I was not able to make
103 if (flags
[pos
]) continue;
105 FakeFrame
*res
= reinterpret_cast<FakeFrame
*>(
106 GetFrame(stack_size_log
, class_id
, pos
));
107 res
->real_stack
= real_stack
;
108 *SavedFlagPtr(reinterpret_cast<uptr
>(res
), class_id
) = &flags
[pos
];
111 return nullptr; // We are out of fake stack.
114 uptr
FakeStack::AddrIsInFakeStack(uptr ptr
, uptr
*frame_beg
, uptr
*frame_end
) {
115 uptr stack_size_log
= this->stack_size_log();
116 uptr beg
= reinterpret_cast<uptr
>(GetFrame(stack_size_log
, 0, 0));
117 uptr end
= reinterpret_cast<uptr
>(this) + RequiredSize(stack_size_log
);
118 if (ptr
< beg
|| ptr
>= end
) return 0;
119 uptr class_id
= (ptr
- beg
) >> stack_size_log
;
120 uptr base
= beg
+ (class_id
<< stack_size_log
);
122 CHECK_LT(ptr
, base
+ (((uptr
)1) << stack_size_log
));
123 uptr pos
= (ptr
- base
) >> (kMinStackFrameSizeLog
+ class_id
);
124 uptr res
= base
+ pos
* BytesInSizeClass(class_id
);
125 *frame_end
= res
+ BytesInSizeClass(class_id
);
126 *frame_beg
= res
+ sizeof(FakeFrame
);
130 void FakeStack::HandleNoReturn() {
134 // When throw, longjmp or some such happens we don't call OnFree() and
135 // as the result may leak one or more fake frames, but the good news is that
136 // we are notified about all such events by HandleNoReturn().
137 // If we recently had such no-return event we need to collect garbage frames.
138 // We do it based on their 'real_stack' values -- everything that is lower
139 // than the current real_stack is garbage.
140 NOINLINE
void FakeStack::GC(uptr real_stack
) {
142 for (uptr class_id
= 0; class_id
< kNumberOfSizeClasses
; class_id
++) {
143 u8
*flags
= GetFlags(stack_size_log(), class_id
);
144 for (uptr i
= 0, n
= NumberOfFrames(stack_size_log(), class_id
); i
< n
;
146 if (flags
[i
] == 0) continue; // not allocated.
147 FakeFrame
*ff
= reinterpret_cast<FakeFrame
*>(
148 GetFrame(stack_size_log(), class_id
, i
));
149 if (ff
->real_stack
< real_stack
) {
158 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback
, void *arg
) {
159 for (uptr class_id
= 0; class_id
< kNumberOfSizeClasses
; class_id
++) {
160 u8
*flags
= GetFlags(stack_size_log(), class_id
);
161 for (uptr i
= 0, n
= NumberOfFrames(stack_size_log(), class_id
); i
< n
;
163 if (flags
[i
] == 0) continue; // not allocated.
164 FakeFrame
*ff
= reinterpret_cast<FakeFrame
*>(
165 GetFrame(stack_size_log(), class_id
, i
));
166 uptr begin
= reinterpret_cast<uptr
>(ff
);
167 callback(begin
, begin
+ FakeStack::BytesInSizeClass(class_id
), arg
);
172 #if SANITIZER_LINUX && !SANITIZER_ANDROID
173 static THREADLOCAL FakeStack
*fake_stack_tls
;
175 FakeStack
*GetTLSFakeStack() {
176 return fake_stack_tls
;
178 void SetTLSFakeStack(FakeStack
*fs
) {
182 FakeStack
*GetTLSFakeStack() { return 0; }
183 void SetTLSFakeStack(FakeStack
*fs
) { }
184 #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
186 static FakeStack
*GetFakeStack() {
187 AsanThread
*t
= GetCurrentThread();
188 if (!t
) return nullptr;
189 return t
->fake_stack();
192 static FakeStack
*GetFakeStackFast() {
193 if (FakeStack
*fs
= GetTLSFakeStack())
195 if (!__asan_option_detect_stack_use_after_return
)
197 return GetFakeStack();
200 ALWAYS_INLINE uptr
OnMalloc(uptr class_id
, uptr size
) {
201 FakeStack
*fs
= GetFakeStackFast();
204 uptr real_stack
= reinterpret_cast<uptr
>(&local_stack
);
205 FakeFrame
*ff
= fs
->Allocate(fs
->stack_size_log(), class_id
, real_stack
);
206 if (!ff
) return 0; // Out of fake stack.
207 uptr ptr
= reinterpret_cast<uptr
>(ff
);
208 SetShadow(ptr
, size
, class_id
, 0);
212 ALWAYS_INLINE
void OnFree(uptr ptr
, uptr class_id
, uptr size
) {
213 FakeStack::Deallocate(ptr
, class_id
);
214 SetShadow(ptr
, size
, class_id
, kMagic8
);
217 } // namespace __asan
219 // ---------------------- Interface ---------------- {{{1
220 using namespace __asan
;
221 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
222 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
223 __asan_stack_malloc_##class_id(uptr size) { \
224 return OnMalloc(class_id, size); \
226 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
227 uptr ptr, uptr size) { \
228 OnFree(ptr, class_id, size); \
231 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
232 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
233 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
234 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
235 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
236 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
237 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
238 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
239 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
240 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
241 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
243 SANITIZER_INTERFACE_ATTRIBUTE
244 void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
246 SANITIZER_INTERFACE_ATTRIBUTE
247 void *__asan_addr_is_in_fake_stack(void *fake_stack
, void *addr
, void **beg
,
249 FakeStack
*fs
= reinterpret_cast<FakeStack
*>(fake_stack
);
250 if (!fs
) return nullptr;
251 uptr frame_beg
, frame_end
;
252 FakeFrame
*frame
= reinterpret_cast<FakeFrame
*>(fs
->AddrIsInFakeStack(
253 reinterpret_cast<uptr
>(addr
), &frame_beg
, &frame_end
));
254 if (!frame
) return nullptr;
255 if (frame
->magic
!= kCurrentStackFrameMagic
)
257 if (beg
) *beg
= reinterpret_cast<void*>(frame_beg
);
258 if (end
) *end
= reinterpret_cast<void*>(frame_end
);
259 return reinterpret_cast<void*>(frame
->real_stack
);
262 SANITIZER_INTERFACE_ATTRIBUTE
263 void __asan_alloca_poison(uptr addr
, uptr size
) {
264 uptr LeftRedzoneAddr
= addr
- kAllocaRedzoneSize
;
265 uptr PartialRzAddr
= addr
+ size
;
266 uptr RightRzAddr
= (PartialRzAddr
+ kAllocaRedzoneMask
) & ~kAllocaRedzoneMask
;
267 uptr PartialRzAligned
= PartialRzAddr
& ~(SHADOW_GRANULARITY
- 1);
268 FastPoisonShadow(LeftRedzoneAddr
, kAllocaRedzoneSize
, kAsanAllocaLeftMagic
);
269 FastPoisonShadowPartialRightRedzone(
270 PartialRzAligned
, PartialRzAddr
% SHADOW_GRANULARITY
,
271 RightRzAddr
- PartialRzAligned
, kAsanAllocaRightMagic
);
272 FastPoisonShadow(RightRzAddr
, kAllocaRedzoneSize
, kAsanAllocaRightMagic
);
275 SANITIZER_INTERFACE_ATTRIBUTE
276 void __asan_allocas_unpoison(uptr top
, uptr bottom
) {
277 if ((!top
) || (top
> bottom
)) return;
278 REAL(memset
)(reinterpret_cast<void*>(MemToShadow(top
)), 0,
279 (bottom
- top
) / SHADOW_GRANULARITY
);