1 //===-- asan_fake_stack.cc ------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of AddressSanitizer, an address sanity checker.
10 // FakeStack is used to detect use-after-return bugs.
11 //===----------------------------------------------------------------------===//
12 #include "asan_allocator.h"
13 #include "asan_poisoning.h"
14 #include "asan_thread.h"
18 static const u64 kMagic1
= kAsanStackAfterReturnMagic
;
19 static const u64 kMagic2
= (kMagic1
<< 8) | kMagic1
;
20 static const u64 kMagic4
= (kMagic2
<< 16) | kMagic2
;
21 static const u64 kMagic8
= (kMagic4
<< 32) | kMagic4
;
23 // For small size classes inline PoisonShadow for better performance.
24 ALWAYS_INLINE
void SetShadow(uptr ptr
, uptr size
, uptr class_id
, u64 magic
) {
25 CHECK_EQ(SHADOW_SCALE
, 3); // This code expects SHADOW_SCALE=3.
26 u64
*shadow
= reinterpret_cast<u64
*>(MemToShadow(ptr
));
28 for (uptr i
= 0; i
< (1U << class_id
); i
++)
31 // The size class is too big, it's cheaper to poison only size bytes.
32 PoisonShadow(ptr
, size
, static_cast<u8
>(magic
));
36 FakeStack
*FakeStack::Create(uptr stack_size_log
) {
37 static uptr kMinStackSizeLog
= 16;
38 static uptr kMaxStackSizeLog
= FIRST_32_SECOND_64(24, 28);
39 if (stack_size_log
< kMinStackSizeLog
)
40 stack_size_log
= kMinStackSizeLog
;
41 if (stack_size_log
> kMaxStackSizeLog
)
42 stack_size_log
= kMaxStackSizeLog
;
43 FakeStack
*res
= reinterpret_cast<FakeStack
*>(
44 MmapOrDie(RequiredSize(stack_size_log
), "FakeStack"));
45 res
->stack_size_log_
= stack_size_log
;
46 if (common_flags()->verbosity
) {
47 u8
*p
= reinterpret_cast<u8
*>(res
);
48 Report("T%d: FakeStack created: %p -- %p stack_size_log: %zd \n",
49 GetCurrentTidOrInvalid(), p
,
50 p
+ FakeStack::RequiredSize(stack_size_log
), stack_size_log
);
55 void FakeStack::Destroy() {
57 UnmapOrDie(this, RequiredSize(stack_size_log_
));
60 void FakeStack::PoisonAll(u8 magic
) {
61 PoisonShadow(reinterpret_cast<uptr
>(this), RequiredSize(stack_size_log()),
66 FakeFrame
*FakeStack::Allocate(uptr stack_size_log
, uptr class_id
,
68 CHECK_LT(class_id
, kNumberOfSizeClasses
);
71 uptr
&hint_position
= hint_position_
[class_id
];
72 const int num_iter
= NumberOfFrames(stack_size_log
, class_id
);
73 u8
*flags
= GetFlags(stack_size_log
, class_id
);
74 for (int i
= 0; i
< num_iter
; i
++) {
75 uptr pos
= ModuloNumberOfFrames(stack_size_log
, class_id
, hint_position
++);
76 // This part is tricky. On one hand, checking and setting flags[pos]
77 // should be atomic to ensure async-signal safety. But on the other hand,
78 // if the signal arrives between checking and setting flags[pos], the
79 // signal handler's fake stack will start from a different hint_position
80 // and so will not touch this particular byte. So, it is safe to do this
81 // with regular non-atimic load and store (at least I was not able to make
83 if (flags
[pos
]) continue;
85 FakeFrame
*res
= reinterpret_cast<FakeFrame
*>(
86 GetFrame(stack_size_log
, class_id
, pos
));
87 res
->real_stack
= real_stack
;
88 *SavedFlagPtr(reinterpret_cast<uptr
>(res
), class_id
) = &flags
[pos
];
91 return 0; // We are out of fake stack.
94 uptr
FakeStack::AddrIsInFakeStack(uptr ptr
) {
95 uptr stack_size_log
= this->stack_size_log();
96 uptr beg
= reinterpret_cast<uptr
>(GetFrame(stack_size_log
, 0, 0));
97 uptr end
= reinterpret_cast<uptr
>(this) + RequiredSize(stack_size_log
);
98 if (ptr
< beg
|| ptr
>= end
) return 0;
99 uptr class_id
= (ptr
- beg
) >> stack_size_log
;
100 uptr base
= beg
+ (class_id
<< stack_size_log
);
102 CHECK_LT(ptr
, base
+ (1UL << stack_size_log
));
103 uptr pos
= (ptr
- base
) >> (kMinStackFrameSizeLog
+ class_id
);
104 return base
+ pos
* BytesInSizeClass(class_id
);
107 void FakeStack::HandleNoReturn() {
111 // When throw, longjmp or some such happens we don't call OnFree() and
112 // as the result may leak one or more fake frames, but the good news is that
113 // we are notified about all such events by HandleNoReturn().
114 // If we recently had such no-return event we need to collect garbage frames.
115 // We do it based on their 'real_stack' values -- everything that is lower
116 // than the current real_stack is garbage.
117 NOINLINE
void FakeStack::GC(uptr real_stack
) {
119 for (uptr class_id
= 0; class_id
< kNumberOfSizeClasses
; class_id
++) {
120 u8
*flags
= GetFlags(stack_size_log(), class_id
);
121 for (uptr i
= 0, n
= NumberOfFrames(stack_size_log(), class_id
); i
< n
;
123 if (flags
[i
] == 0) continue; // not allocated.
124 FakeFrame
*ff
= reinterpret_cast<FakeFrame
*>(
125 GetFrame(stack_size_log(), class_id
, i
));
126 if (ff
->real_stack
< real_stack
) {
135 void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback
, void *arg
) {
136 for (uptr class_id
= 0; class_id
< kNumberOfSizeClasses
; class_id
++) {
137 u8
*flags
= GetFlags(stack_size_log(), class_id
);
138 for (uptr i
= 0, n
= NumberOfFrames(stack_size_log(), class_id
); i
< n
;
140 if (flags
[i
] == 0) continue; // not allocated.
141 FakeFrame
*ff
= reinterpret_cast<FakeFrame
*>(
142 GetFrame(stack_size_log(), class_id
, i
));
143 uptr begin
= reinterpret_cast<uptr
>(ff
);
144 callback(begin
, begin
+ FakeStack::BytesInSizeClass(class_id
), arg
);
149 #if SANITIZER_LINUX && !SANITIZER_ANDROID
150 static THREADLOCAL FakeStack
*fake_stack_tls
;
152 FakeStack
*GetTLSFakeStack() {
153 return fake_stack_tls
;
155 void SetTLSFakeStack(FakeStack
*fs
) {
159 FakeStack
*GetTLSFakeStack() { return 0; }
160 void SetTLSFakeStack(FakeStack
*fs
) { }
161 #endif // SANITIZER_LINUX && !SANITIZER_ANDROID
163 static FakeStack
*GetFakeStack() {
164 AsanThread
*t
= GetCurrentThread();
166 return t
->fake_stack();
169 static FakeStack
*GetFakeStackFast() {
170 if (FakeStack
*fs
= GetTLSFakeStack())
172 if (!__asan_option_detect_stack_use_after_return
)
174 return GetFakeStack();
177 ALWAYS_INLINE uptr
OnMalloc(uptr class_id
, uptr size
, uptr real_stack
) {
178 FakeStack
*fs
= GetFakeStackFast();
179 if (!fs
) return real_stack
;
180 FakeFrame
*ff
= fs
->Allocate(fs
->stack_size_log(), class_id
, real_stack
);
182 return real_stack
; // Out of fake stack, return the real one.
183 uptr ptr
= reinterpret_cast<uptr
>(ff
);
184 SetShadow(ptr
, size
, class_id
, 0);
188 ALWAYS_INLINE
void OnFree(uptr ptr
, uptr class_id
, uptr size
, uptr real_stack
) {
189 if (ptr
== real_stack
)
191 FakeStack::Deallocate(ptr
, class_id
);
192 SetShadow(ptr
, size
, class_id
, kMagic8
);
195 } // namespace __asan
197 // ---------------------- Interface ---------------- {{{1
198 #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
199 extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
200 __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \
201 return __asan::OnMalloc(class_id, size, real_stack); \
203 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
204 uptr ptr, uptr size, uptr real_stack) { \
205 __asan::OnFree(ptr, class_id, size, real_stack); \
208 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
209 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
210 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
211 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
212 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
213 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
214 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
215 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
216 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
217 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
218 DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)