* MAINTAINERS: Explicitly add myself as AIX maintainer.
[official-gcc.git] / libsanitizer / tsan / tsan_mman.cc
blobf4fafaf77f0d71a4daca768ddd35d5e65be8e0c6
1 //===-- tsan_mman.cc ------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #include "sanitizer_common/sanitizer_common.h"
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_mman.h"
14 #include "tsan_rtl.h"
15 #include "tsan_report.h"
16 #include "tsan_flags.h"
18 // May be overriden by front-end.
19 extern "C" void WEAK __tsan_malloc_hook(void *ptr, uptr size) {
20 (void)ptr;
21 (void)size;
24 extern "C" void WEAK __tsan_free_hook(void *ptr) {
25 (void)ptr;
28 namespace __tsan {
30 static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
31 Allocator *allocator() {
32 return reinterpret_cast<Allocator*>(&allocator_placeholder);
35 void InitializeAllocator() {
36 allocator()->Init();
39 void AlloctorThreadFinish(ThreadState *thr) {
40 allocator()->SwallowCache(&thr->alloc_cache);
43 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
44 if (!thr->in_signal_handler || !flags()->report_signal_unsafe)
45 return;
46 Context *ctx = CTX();
47 StackTrace stack;
48 stack.ObtainCurrent(thr, pc);
49 Lock l(&ctx->thread_mtx);
50 ScopedReport rep(ReportTypeSignalUnsafe);
51 if (!IsFiredSuppression(ctx, rep, stack)) {
52 rep.AddStack(&stack);
53 OutputReport(ctx, rep, rep.GetReport()->stacks[0]);
57 void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align) {
58 CHECK_GT(thr->in_rtl, 0);
59 void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
60 if (p == 0)
61 return 0;
62 MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
63 b->size = sz;
64 b->head = 0;
65 b->alloc_tid = thr->unique_id;
66 b->alloc_stack_id = CurrentStackId(thr, pc);
67 if (CTX() && CTX()->initialized) {
68 MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
70 DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
71 SignalUnsafeCall(thr, pc);
72 return p;
75 void user_free(ThreadState *thr, uptr pc, void *p) {
76 CHECK_GT(thr->in_rtl, 0);
77 CHECK_NE(p, (void*)0);
78 DPrintf("#%d: free(%p)\n", thr->tid, p);
79 MBlock *b = (MBlock*)allocator()->GetMetaData(p);
80 if (b->head) {
81 Lock l(&b->mtx);
82 for (SyncVar *s = b->head; s;) {
83 SyncVar *res = s;
84 s = s->next;
85 StatInc(thr, StatSyncDestroyed);
86 res->mtx.Lock();
87 res->mtx.Unlock();
88 DestroyAndFree(res);
90 b->head = 0;
92 if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
93 MemoryRangeFreed(thr, pc, (uptr)p, b->size);
95 b->~MBlock();
96 allocator()->Deallocate(&thr->alloc_cache, p);
97 SignalUnsafeCall(thr, pc);
100 void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
101 CHECK_GT(thr->in_rtl, 0);
102 void *p2 = 0;
103 // FIXME: Handle "shrinking" more efficiently,
104 // it seems that some software actually does this.
105 if (sz) {
106 p2 = user_alloc(thr, pc, sz);
107 if (p2 == 0)
108 return 0;
109 if (p) {
110 MBlock *b = user_mblock(thr, p);
111 internal_memcpy(p2, p, min(b->size, sz));
114 if (p) {
115 user_free(thr, pc, p);
117 return p2;
120 MBlock *user_mblock(ThreadState *thr, void *p) {
121 CHECK_NE(p, (void*)0);
122 Allocator *a = allocator();
123 void *b = a->GetBlockBegin(p);
124 CHECK_NE(b, 0);
125 return (MBlock*)a->GetMetaData(b);
128 void invoke_malloc_hook(void *ptr, uptr size) {
129 Context *ctx = CTX();
130 ThreadState *thr = cur_thread();
131 if (ctx == 0 || !ctx->initialized || thr->in_rtl)
132 return;
133 __tsan_malloc_hook(ptr, size);
136 void invoke_free_hook(void *ptr) {
137 Context *ctx = CTX();
138 ThreadState *thr = cur_thread();
139 if (ctx == 0 || !ctx->initialized || thr->in_rtl)
140 return;
141 __tsan_free_hook(ptr);
144 void *internal_alloc(MBlockType typ, uptr sz) {
145 ThreadState *thr = cur_thread();
146 CHECK_GT(thr->in_rtl, 0);
147 if (thr->nomalloc) {
148 thr->nomalloc = 0; // CHECK calls internal_malloc().
149 CHECK(0);
151 return InternalAlloc(sz);
154 void internal_free(void *p) {
155 ThreadState *thr = cur_thread();
156 CHECK_GT(thr->in_rtl, 0);
157 if (thr->nomalloc) {
158 thr->nomalloc = 0; // CHECK calls internal_malloc().
159 CHECK(0);
161 InternalFree(p);
164 } // namespace __tsan