PR c++/85305 - pack in lambda init-capture.
[official-gcc.git] / libsanitizer / tsan / tsan_platform_posix.cc
blob6e62575f1ec59261ae94b2bb5d3eb9351f49c135
1 //===-- tsan_platform_posix.cc --------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // POSIX-specific code.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_platform.h"
14 #if SANITIZER_POSIX
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_procmaps.h"
19 #include "tsan_platform.h"
20 #include "tsan_rtl.h"
22 namespace __tsan {
24 #if !SANITIZER_GO
25 void InitializeShadowMemory() {
26 // Map memory shadow.
27 uptr shadow =
28 (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
29 "shadow");
30 if (shadow != ShadowBeg()) {
31 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
32 Printf("FATAL: Make sure to compile with -fPIE and "
33 "to link with -pie (%p, %p).\n", shadow, ShadowBeg());
34 Die();
36 // This memory range is used for thread stacks and large user mmaps.
37 // Frequently a thread uses only a small part of stack and similarly
38 // a program uses a small part of large mmap. On some programs
39 // we see 20% memory usage reduction without huge pages for this range.
40 // FIXME: don't use constants here.
41 #if defined(__x86_64__)
42 const uptr kMadviseRangeBeg = 0x7f0000000000ull;
43 const uptr kMadviseRangeSize = 0x010000000000ull;
44 #elif defined(__mips64)
45 const uptr kMadviseRangeBeg = 0xff00000000ull;
46 const uptr kMadviseRangeSize = 0x0100000000ull;
47 #elif defined(__aarch64__) && defined(__APPLE__)
48 uptr kMadviseRangeBeg = LoAppMemBeg();
49 uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
50 #elif defined(__aarch64__)
51 uptr kMadviseRangeBeg = 0;
52 uptr kMadviseRangeSize = 0;
53 if (vmaSize == 39) {
54 kMadviseRangeBeg = 0x7d00000000ull;
55 kMadviseRangeSize = 0x0300000000ull;
56 } else if (vmaSize == 42) {
57 kMadviseRangeBeg = 0x3f000000000ull;
58 kMadviseRangeSize = 0x01000000000ull;
59 } else {
60 DCHECK(0);
62 #elif defined(__powerpc64__)
63 uptr kMadviseRangeBeg = 0;
64 uptr kMadviseRangeSize = 0;
65 if (vmaSize == 44) {
66 kMadviseRangeBeg = 0x0f60000000ull;
67 kMadviseRangeSize = 0x0010000000ull;
68 } else if (vmaSize == 46) {
69 kMadviseRangeBeg = 0x3f0000000000ull;
70 kMadviseRangeSize = 0x010000000000ull;
71 } else {
72 DCHECK(0);
74 #endif
75 NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
76 kMadviseRangeSize * kShadowMultiplier);
77 // Meta shadow is compressing and we don't flush it,
78 // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory.
79 // On one program it reduces memory consumption from 5GB to 2.5GB.
80 NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg());
81 if (common_flags()->use_madv_dontdump)
82 DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
83 DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
84 ShadowBeg(), ShadowEnd(),
85 (ShadowEnd() - ShadowBeg()) >> 30);
87 // Map meta shadow.
88 uptr meta_size = MetaShadowEnd() - MetaShadowBeg();
89 uptr meta =
90 (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow");
91 if (meta != MetaShadowBeg()) {
92 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
93 Printf("FATAL: Make sure to compile with -fPIE and "
94 "to link with -pie (%p, %p).\n", meta, MetaShadowBeg());
95 Die();
97 if (common_flags()->use_madv_dontdump)
98 DontDumpShadowMemory(meta, meta_size);
99 DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
100 meta, meta + meta_size, meta_size >> 30);
102 InitializeShadowMemoryPlatform();
105 static void ProtectRange(uptr beg, uptr end) {
106 CHECK_LE(beg, end);
107 if (beg == end)
108 return;
109 if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
110 Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
111 Printf("FATAL: Make sure you are not using unlimited stack\n");
112 Die();
116 void CheckAndProtect() {
117 // Ensure that the binary is indeed compiled with -pie.
118 MemoryMappingLayout proc_maps(true);
119 MemoryMappedSegment segment;
120 while (proc_maps.Next(&segment)) {
121 if (IsAppMem(segment.start)) continue;
122 if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
123 if (segment.protection == 0) // Zero page or mprotected.
124 continue;
125 if (segment.start >= VdsoBeg()) // vdso
126 break;
127 Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
128 segment.start, segment.end);
129 Die();
132 #if defined(__aarch64__) && defined(__APPLE__)
133 ProtectRange(HeapMemEnd(), ShadowBeg());
134 ProtectRange(ShadowEnd(), MetaShadowBeg());
135 ProtectRange(MetaShadowEnd(), TraceMemBeg());
136 #else
137 ProtectRange(LoAppMemEnd(), ShadowBeg());
138 ProtectRange(ShadowEnd(), MetaShadowBeg());
139 #ifdef TSAN_MID_APP_RANGE
140 ProtectRange(MetaShadowEnd(), MidAppMemBeg());
141 ProtectRange(MidAppMemEnd(), TraceMemBeg());
142 #else
143 ProtectRange(MetaShadowEnd(), TraceMemBeg());
144 #endif
145 // Memory for traces is mapped lazily in MapThreadTrace.
146 // Protect the whole range for now, so that user does not map something here.
147 ProtectRange(TraceMemBeg(), TraceMemEnd());
148 ProtectRange(TraceMemEnd(), HeapMemBeg());
149 ProtectRange(HeapEnd(), HiAppMemBeg());
150 #endif
152 #endif
154 } // namespace __tsan
156 #endif // SANITIZER_POSIX