2016-10-21 Paul Thomas <pault@gcc.gnu.org>
[official-gcc.git] / libsanitizer / tsan / tsan_mman.cc
blobc8a5a6a264a8bc0c431e8282dfb113a718173c5a
1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_allocator_interface.h"
12 #include "sanitizer_common/sanitizer_common.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_mman.h"
15 #include "tsan_rtl.h"
16 #include "tsan_report.h"
17 #include "tsan_flags.h"
19 // May be overriden by front-end.
20 extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
21 (void)ptr;
22 (void)size;
25 extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
26 (void)ptr;
29 namespace __tsan {
31 struct MapUnmapCallback {
32 void OnMap(uptr p, uptr size) const { }
33 void OnUnmap(uptr p, uptr size) const {
34 // We are about to unmap a chunk of user memory.
35 // Mark the corresponding shadow memory as not needed.
36 DontNeedShadowFor(p, size);
37 // Mark the corresponding meta shadow memory as not needed.
38 // Note the block does not contain any meta info at this point
39 // (this happens after free).
40 const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
41 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
42 // Block came from LargeMmapAllocator, so must be large.
43 // We rely on this in the calculations below.
44 CHECK_GE(size, 2 * kPageSize);
45 uptr diff = RoundUp(p, kPageSize) - p;
46 if (diff != 0) {
47 p += diff;
48 size -= diff;
50 diff = p + size - RoundDown(p + size, kPageSize);
51 if (diff != 0)
52 size -= diff;
53 FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
57 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
58 Allocator *allocator() {
59 return reinterpret_cast<Allocator*>(&allocator_placeholder);
62 void InitializeAllocator() {
63 allocator()->Init(common_flags()->allocator_may_return_null);
66 void AllocatorThreadStart(ThreadState *thr) {
67 allocator()->InitCache(&thr->alloc_cache);
68 internal_allocator()->InitCache(&thr->internal_alloc_cache);
71 void AllocatorThreadFinish(ThreadState *thr) {
72 allocator()->DestroyCache(&thr->alloc_cache);
73 internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
76 void AllocatorPrintStats() {
77 allocator()->PrintStats();
80 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
81 if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
82 !flags()->report_signal_unsafe)
83 return;
84 VarSizeStackTrace stack;
85 ObtainCurrentStack(thr, pc, &stack);
86 if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
87 return;
88 ThreadRegistryLock l(ctx->thread_registry);
89 ScopedReport rep(ReportTypeSignalUnsafe);
90 rep.AddStack(stack, true);
91 OutputReport(thr, rep);
94 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
95 if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
96 return allocator()->ReturnNullOrDie();
97 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
98 if (p == 0)
99 return 0;
100 if (ctx && ctx->initialized)
101 OnUserAlloc(thr, pc, (uptr)p, sz, true);
102 if (signal)
103 SignalUnsafeCall(thr, pc);
104 return p;
107 void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
108 if (CallocShouldReturnNullDueToOverflow(size, n))
109 return allocator()->ReturnNullOrDie();
110 void *p = user_alloc(thr, pc, n * size);
111 if (p)
112 internal_memset(p, 0, n * size);
113 return p;
116 void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
117 if (ctx && ctx->initialized)
118 OnUserFree(thr, pc, (uptr)p, true);
119 allocator()->Deallocate(&thr->alloc_cache, p);
120 if (signal)
121 SignalUnsafeCall(thr, pc);
124 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
125 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
126 ctx->metamap.AllocBlock(thr, pc, p, sz);
127 if (write && thr->ignore_reads_and_writes == 0)
128 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
129 else
130 MemoryResetRange(thr, pc, (uptr)p, sz);
133 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
134 CHECK_NE(p, (void*)0);
135 uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
136 DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
137 if (write && thr->ignore_reads_and_writes == 0)
138 MemoryRangeFreed(thr, pc, (uptr)p, sz);
141 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
142 void *p2 = 0;
143 // FIXME: Handle "shrinking" more efficiently,
144 // it seems that some software actually does this.
145 if (sz) {
146 p2 = user_alloc(thr, pc, sz);
147 if (p2 == 0)
148 return 0;
149 if (p) {
150 uptr oldsz = user_alloc_usable_size(p);
151 internal_memcpy(p2, p, min(oldsz, sz));
154 if (p)
155 user_free(thr, pc, p);
156 return p2;
159 uptr user_alloc_usable_size(const void *p) {
160 if (p == 0)
161 return 0;
162 MBlock *b = ctx->metamap.GetBlock((uptr)p);
163 return b ? b->siz : 0;
166 void invoke_malloc_hook(void *ptr, uptr size) {
167 ThreadState *thr = cur_thread();
168 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
169 return;
170 __sanitizer_malloc_hook(ptr, size);
173 void invoke_free_hook(void *ptr) {
174 ThreadState *thr = cur_thread();
175 if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
176 return;
177 __sanitizer_free_hook(ptr);
180 void *internal_alloc(MBlockType typ, uptr sz) {
181 ThreadState *thr = cur_thread();
182 if (thr->nomalloc) {
183 thr->nomalloc = 0; // CHECK calls internal_malloc().
184 CHECK(0);
186 return InternalAlloc(sz, &thr->internal_alloc_cache);
189 void internal_free(void *p) {
190 ThreadState *thr = cur_thread();
191 if (thr->nomalloc) {
192 thr->nomalloc = 0; // CHECK calls internal_malloc().
193 CHECK(0);
195 InternalFree(p, &thr->internal_alloc_cache);
198 } // namespace __tsan
200 using namespace __tsan;
202 extern "C" {
203 uptr __sanitizer_get_current_allocated_bytes() {
204 uptr stats[AllocatorStatCount];
205 allocator()->GetStats(stats);
206 return stats[AllocatorStatAllocated];
209 uptr __sanitizer_get_heap_size() {
210 uptr stats[AllocatorStatCount];
211 allocator()->GetStats(stats);
212 return stats[AllocatorStatMapped];
215 uptr __sanitizer_get_free_bytes() {
216 return 1;
219 uptr __sanitizer_get_unmapped_bytes() {
220 return 1;
223 uptr __sanitizer_get_estimated_allocated_size(uptr size) {
224 return size;
227 int __sanitizer_get_ownership(const void *p) {
228 return allocator()->GetBlockBegin(p) != 0;
231 uptr __sanitizer_get_allocated_size(const void *p) {
232 return user_alloc_usable_size(p);
235 void __tsan_on_thread_idle() {
236 ThreadState *thr = cur_thread();
237 allocator()->SwallowCache(&thr->alloc_cache);
238 internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
239 ctx->metamap.OnThreadIdle(thr);
241 } // extern "C"