Add support for ARMv8-R architecture
[official-gcc.git] / libsanitizer / tsan / tsan_mman.cc
blob152c2de28d86e200022d7627be9a6ee5ddf599e5
1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_allocator_interface.h"
12 #include "sanitizer_common/sanitizer_common.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_rtl.h"
16 #include "tsan_report.h"
17 #include "tsan_flags.h"
19 // May be overriden by front-end.
20 SANITIZER_WEAK_DEFAULT_IMPL
21 void __sanitizer_malloc_hook(void *ptr, uptr size) {
22 (void)ptr;
23 (void)size;
26 SANITIZER_WEAK_DEFAULT_IMPL
27 void __sanitizer_free_hook(void *ptr) {
28 (void)ptr;
31 namespace __tsan {
33 struct MapUnmapCallback {
34 void OnMap(uptr p, uptr size) const { }
35 void OnUnmap(uptr p, uptr size) const {
36 // We are about to unmap a chunk of user memory.
37 // Mark the corresponding shadow memory as not needed.
38 DontNeedShadowFor(p, size);
39 // Mark the corresponding meta shadow memory as not needed.
40 // Note the block does not contain any meta info at this point
41 // (this happens after free).
42 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
43 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
44 // Block came from LargeMmapAllocator, so must be large.
45 // We rely on this in the calculations below.
46 CHECK_GE(size, 2 * kPageSize);
47 uptr diff = RoundUp(p, kPageSize) - p;
48 if (diff != 0) {
49 p += diff;
50 size -= diff;
52 diff = p + size - RoundDown(p + size, kPageSize);
53 if (diff != 0)
54 size -= diff;
55 ReleaseMemoryToOS((uptr)MemToMeta(p), size / kMetaRatio);
59 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
60 Allocator *allocator() {
61 return reinterpret_cast<Allocator*>(&allocator_placeholder);
64 struct GlobalProc {
65 Mutex mtx;
66 Processor *proc;
68 GlobalProc()
69 : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
70 , proc(ProcCreate()) {
74 static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
75 GlobalProc *global_proc() {
76 return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
79 ScopedGlobalProcessor::ScopedGlobalProcessor() {
80 GlobalProc *gp = global_proc();
81 ThreadState *thr = cur_thread();
82 if (thr->proc())
83 return;
84 // If we don't have a proc, use the global one.
85 // There are currently only two known case where this path is triggered:
86 // __interceptor_free
87 // __nptl_deallocate_tsd
88 // start_thread
89 // clone
90 // and:
91 // ResetRange
92 // __interceptor_munmap
93 // __deallocate_stack
94 // start_thread
95 // clone
96 // Ideally, we destroy thread state (and unwire proc) when a thread actually
97 // exits (i.e. when we join/wait it). Then we would not need the global proc
98 gp->mtx.Lock();
99 ProcWire(gp->proc, thr);
102 ScopedGlobalProcessor::~ScopedGlobalProcessor() {
103 GlobalProc *gp = global_proc();
104 ThreadState *thr = cur_thread();
105 if (thr->proc() != gp->proc)
106 return;
107 ProcUnwire(gp->proc, thr);
108 gp->mtx.Unlock();
111 void InitializeAllocator() {
112 allocator()->Init(common_flags()->allocator_may_return_null);
115 void InitializeAllocatorLate() {
116 new(global_proc()) GlobalProc();
119 void AllocatorProcStart(Processor *proc) {
120 allocator()->InitCache(&proc->alloc_cache);
121 internal_allocator()->InitCache(&proc->internal_alloc_cache);
124 void AllocatorProcFinish(Processor *proc) {
125 allocator()->DestroyCache(&proc->alloc_cache);
126 internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
129 void AllocatorPrintStats() {
130 allocator()->PrintStats();
133 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
134 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
135 !flags()->report_signal_unsafe)
136 return;
137 VarSizeStackTrace stack;
138 ObtainCurrentStack(thr, pc, &stack);
139 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
140 return;
141 ThreadRegistryLock l(ctx->thread_registry);
142 ScopedReport rep(ReportTypeSignalUnsafe);
143 rep.AddStack(stack, true);
144 OutputReport(thr, rep);
147 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
148 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
149 return allocator()->ReturnNullOrDieOnBadRequest();
150 void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
151 if (p == 0)
152 return 0;
153 if (ctx && ctx->initialized)
154 OnUserAlloc(thr, pc, (uptr)p, sz, true);
155 if (signal)
156 SignalUnsafeCall(thr, pc);
157 return p;
160 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
161 if (CallocShouldReturnNullDueToOverflow(size, n))
162 return allocator()->ReturnNullOrDieOnBadRequest();
163 void *p = user_alloc(thr, pc, n * size);
164 if (p)
165 internal_memset(p, 0, n * size);
166 return p;
169 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
170 ScopedGlobalProcessor sgp;
171 if (ctx && ctx->initialized)
172 OnUserFree(thr, pc, (uptr)p, true);
173 allocator()->Deallocate(&thr->proc()->alloc_cache, p);
174 if (signal)
175 SignalUnsafeCall(thr, pc);
178 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
179 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
180 ctx->metamap.AllocBlock(thr, pc, p, sz);
181 if (write && thr->ignore_reads_and_writes == 0)
182 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
183 else
184 MemoryResetRange(thr, pc, (uptr)p, sz);
187 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
188 CHECK_NE(p, (void*)0);
189 uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
190 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
191 if (write && thr->ignore_reads_and_writes == 0)
192 MemoryRangeFreed(thr, pc, (uptr)p, sz);
195 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
196 // FIXME: Handle "shrinking" more efficiently,
197 // it seems that some software actually does this.
198 void *p2 = user_alloc(thr, pc, sz);
199 if (p2 == 0)
200 return 0;
201 if (p) {
202 uptr oldsz = user_alloc_usable_size(p);
203 internal_memcpy(p2, p, min(oldsz, sz));
204 user_free(thr, pc, p);
206 return p2;
209 uptr user_alloc_usable_size(const void *p) {
210 if (p == 0)
211 return 0;
212 MBlock *b = ctx->metamap.GetBlock((uptr)p);
213 if (!b)
214 return 0; // Not a valid pointer.
215 if (b->siz == 0)
216 return 1; // Zero-sized allocations are actually 1 byte.
217 return b->siz;
220 void invoke_malloc_hook(void *ptr, uptr size) {
221 ThreadState *thr = cur_thread();
222 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
223 return;
224 __sanitizer_malloc_hook(ptr, size);
225 RunMallocHooks(ptr, size);
228 void invoke_free_hook(void *ptr) {
229 ThreadState *thr = cur_thread();
230 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
231 return;
232 __sanitizer_free_hook(ptr);
233 RunFreeHooks(ptr);
236 void *internal_alloc(MBlockType typ, uptr sz) {
237 ThreadState *thr = cur_thread();
238 if (thr->nomalloc) {
239 thr->nomalloc = 0; // CHECK calls internal_malloc().
240 CHECK(0);
242 return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
245 void internal_free(void *p) {
246 ThreadState *thr = cur_thread();
247 if (thr->nomalloc) {
248 thr->nomalloc = 0; // CHECK calls internal_malloc().
249 CHECK(0);
251 InternalFree(p, &thr->proc()->internal_alloc_cache);
254 } // namespace __tsan
256 using namespace __tsan;
258 extern "C" {
259 uptr __sanitizer_get_current_allocated_bytes() {
260 uptr stats[AllocatorStatCount];
261 allocator()->GetStats(stats);
262 return stats[AllocatorStatAllocated];
265 uptr __sanitizer_get_heap_size() {
266 uptr stats[AllocatorStatCount];
267 allocator()->GetStats(stats);
268 return stats[AllocatorStatMapped];
271 uptr __sanitizer_get_free_bytes() {
272 return 1;
275 uptr __sanitizer_get_unmapped_bytes() {
276 return 1;
279 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
280 return size;
283 int __sanitizer_get_ownership(const void *p) {
284 return allocator()->GetBlockBegin(p) != 0;
287 uptr __sanitizer_get_allocated_size(const void *p) {
288 return user_alloc_usable_size(p);
291 void __tsan_on_thread_idle() {
292 ThreadState *thr = cur_thread();
293 allocator()->SwallowCache(&thr->proc()->alloc_cache);
294 internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
295 ctx->metamap.OnProcIdle(thr->proc());
297 } // extern "C"