[AArch64] Fix PR target/77822: Use tighter predicates for zero_extract patterns
[official-gcc.git] / libsanitizer / tsan / tsan_platform_posix.cc
blob5e3d12e9496e5f01c90ca0e83e0755e87a1883b2
1 //===-- tsan_platform_posix.cc --------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // POSIX-specific code.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_common/sanitizer_platform.h"
14 #if SANITIZER_POSIX
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_procmaps.h"
19 #include "tsan_platform.h"
20 #include "tsan_rtl.h"
22 namespace __tsan {
24 #ifndef SANITIZER_GO
25 void InitializeShadowMemory() {
26 // Map memory shadow.
27 uptr shadow =
28 (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow");
29 if (shadow != kShadowBeg) {
30 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
31 Printf("FATAL: Make sure to compile with -fPIE and "
32 "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
33 Die();
35 // This memory range is used for thread stacks and large user mmaps.
36 // Frequently a thread uses only a small part of stack and similarly
37 // a program uses a small part of large mmap. On some programs
38 // we see 20% memory usage reduction without huge pages for this range.
39 // FIXME: don't use constants here.
40 #if defined(__x86_64__)
41 const uptr kMadviseRangeBeg = 0x7f0000000000ull;
42 const uptr kMadviseRangeSize = 0x010000000000ull;
43 #elif defined(__mips64)
44 const uptr kMadviseRangeBeg = 0xff00000000ull;
45 const uptr kMadviseRangeSize = 0x0100000000ull;
46 #elif defined(__aarch64__)
47 const uptr kMadviseRangeBeg = 0x7e00000000ull;
48 const uptr kMadviseRangeSize = 0x0100000000ull;
49 #endif
50 NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
51 kMadviseRangeSize * kShadowMultiplier);
52 // Meta shadow is compressing and we don't flush it,
53 // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory.
54 // On one program it reduces memory consumption from 5GB to 2.5GB.
55 NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg);
56 if (common_flags()->use_madv_dontdump)
57 DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
58 DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
59 kShadowBeg, kShadowEnd,
60 (kShadowEnd - kShadowBeg) >> 30);
62 // Map meta shadow.
63 uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
64 uptr meta =
65 (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow");
66 if (meta != kMetaShadowBeg) {
67 Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
68 Printf("FATAL: Make sure to compile with -fPIE and "
69 "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
70 Die();
72 if (common_flags()->use_madv_dontdump)
73 DontDumpShadowMemory(meta, meta_size);
74 DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
75 meta, meta + meta_size, meta_size >> 30);
77 InitializeShadowMemoryPlatform();
80 static void ProtectRange(uptr beg, uptr end) {
81 CHECK_LE(beg, end);
82 if (beg == end)
83 return;
84 if (beg != (uptr)MmapNoAccess(beg, end - beg)) {
85 Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
86 Printf("FATAL: Make sure you are not using unlimited stack\n");
87 Die();
91 void CheckAndProtect() {
92 // Ensure that the binary is indeed compiled with -pie.
93 MemoryMappingLayout proc_maps(true);
94 uptr p, end, prot;
95 while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
96 if (IsAppMem(p))
97 continue;
98 if (p >= kHeapMemEnd &&
99 p < HeapEnd())
100 continue;
101 if (prot == 0) // Zero page or mprotected.
102 continue;
103 if (p >= kVdsoBeg) // vdso
104 break;
105 Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
106 Die();
109 ProtectRange(kLoAppMemEnd, kShadowBeg);
110 ProtectRange(kShadowEnd, kMetaShadowBeg);
111 ProtectRange(kMetaShadowEnd, kTraceMemBeg);
112 // Memory for traces is mapped lazily in MapThreadTrace.
113 // Protect the whole range for now, so that user does not map something here.
114 ProtectRange(kTraceMemBeg, kTraceMemEnd);
115 ProtectRange(kTraceMemEnd, kHeapMemBeg);
116 ProtectRange(HeapEnd(), kHiAppMemBeg);
118 #endif
120 } // namespace __tsan
122 #endif // SANITIZER_POSIX