Backed out 2 changesets (bug 1856501, bug 1856547) for causing build bustages. CLOSED...
[gecko.git] / build / build-clang / revert-llvmorg-14-init-11890-gf86deb18cab6_clang_17.patch
blob20ec628f2a73d0f92a9abb6bbbb3fd22a4c9cad1
1 From c8a5013045b5aff8e45418925688ca670545980f Mon Sep 17 00:00:00 2001
2 From: Mike Hommey <mh@glandium.org>
3 Date: Fri, 18 Mar 2022 17:58:28 +0900
4 Subject: [PATCH] Revert "[lsan] Move out suppression of invalid PCs from
5 StopTheWorld"
7 This reverts commit f86deb18cab6479a0961ade3807e4729f3a27bdf
8 because of permafail for a sizable amount of ASan test jobs, where the
9 worker would die without even leaving any logs.
11 ---
12 compiler-rt/lib/lsan/lsan_common.cpp | 108 +++++++++++++++++----------
13 1 file changed, 67 insertions(+), 41 deletions(-)
15 diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
16 index 9101c704e5ff..2a83f62bd8dc 100644
17 --- a/compiler-rt/lib/lsan/lsan_common.cpp
18 +++ b/compiler-rt/lib/lsan/lsan_common.cpp
19 @@ -81,11 +81,9 @@ class LeakSuppressionContext {
20 SuppressionContext context;
21 bool suppressed_stacks_sorted = true;
22 InternalMmapVector<u32> suppressed_stacks;
23 - const LoadedModule *suppress_module = nullptr;
25 - void LazyInit();
26 Suppression *GetSuppressionForAddr(uptr addr);
27 - bool SuppressInvalid(const StackTrace &stack);
28 + void LazyInit();
29 bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
31 public:
32 @@ -136,8 +134,6 @@ void LeakSuppressionContext::LazyInit() {
33 if (&__lsan_default_suppressions)
34 context.Parse(__lsan_default_suppressions());
35 context.Parse(kStdSuppressions);
36 - if (flags()->use_tls && flags()->use_ld_allocations)
37 - suppress_module = GetLinker();
41 @@ -163,13 +159,6 @@ Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
42 return s;
45 -static uptr GetCallerPC(const StackTrace &stack) {
46 - // The top frame is our malloc/calloc/etc. The next frame is the caller.
47 - if (stack.size >= 2)
48 - return stack.trace[1];
49 - return 0;
52 # if SANITIZER_APPLE
53 // Several pointers in the Objective-C runtime (method cache and class_rw_t,
54 // for example) are tagged with additional bits we need to strip.
55 @@ -179,34 +168,6 @@ static inline void *TransformPointer(void *p) {
57 # endif
59 -// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
60 -// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
61 -// modules accounting etc.
62 -// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
63 -// They are allocated with a __libc_memalign() call in allocate_and_init()
64 -// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
65 -// blocks, but we can make sure they come from our own allocator by intercepting
66 -// __libc_memalign(). On top of that, there is no easy way to reach them. Their
67 -// addresses are stored in a dynamically allocated array (the DTV) which is
68 -// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
69 -// being reachable from the static TLS, and the dynamic TLS being reachable from
70 -// the DTV. This is because the initial DTV is allocated before our interception
71 -// mechanism kicks in, and thus we don't recognize it as allocated memory. We
72 -// can't special-case it either, since we don't know its size.
73 -// Our solution is to include in the root set all allocations made from
74 -// ld-linux.so (which is where allocate_and_init() is implemented). This is
75 -// guaranteed to include all dynamic TLS blocks (and possibly other allocations
76 -// which we don't care about).
77 -// On all other platforms, this simply checks to ensure that the caller pc is
78 -// valid before reporting chunks as leaked.
79 -bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
80 - uptr caller_pc = GetCallerPC(stack);
81 - // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
82 - // it as reachable, as we can't properly report its allocation stack anyway.
83 - return !caller_pc ||
84 - (suppress_module && suppress_module->containsAddress(caller_pc));
87 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
88 uptr hit_count, uptr total_size) {
89 for (uptr i = 0; i < stack.size; i++) {
90 @@ -225,7 +186,7 @@ bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
91 uptr total_size) {
92 LazyInit();
93 StackTrace stack = StackDepotGet(stack_trace_id);
94 - if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
95 + if (!SuppressByRule(stack, hit_count, total_size))
96 return false;
97 suppressed_stacks_sorted = false;
98 suppressed_stacks.push_back(stack_trace_id);
99 @@ -617,6 +578,68 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
103 +static uptr GetCallerPC(const StackTrace &stack) {
104 + // The top frame is our malloc/calloc/etc. The next frame is the caller.
105 + if (stack.size >= 2)
106 + return stack.trace[1];
107 + return 0;
110 +struct InvalidPCParam {
111 + Frontier *frontier;
112 + bool skip_linker_allocations;
115 +// ForEachChunk callback. If the caller pc is invalid or is within the linker,
116 +// mark as reachable. Called by ProcessPlatformSpecificAllocations.
117 +static void MarkInvalidPCCb(uptr chunk, void *arg) {
118 + CHECK(arg);
119 + InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
120 + chunk = GetUserBegin(chunk);
121 + LsanMetadata m(chunk);
122 + if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
123 + u32 stack_id = m.stack_trace_id();
124 + uptr caller_pc = 0;
125 + if (stack_id > 0)
126 + caller_pc = GetCallerPC(StackDepotGet(stack_id));
127 + // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
128 + // it as reachable, as we can't properly report its allocation stack anyway.
129 + if (caller_pc == 0 || (param->skip_linker_allocations &&
130 + GetLinker()->containsAddress(caller_pc))) {
131 + m.set_tag(kIgnored);
132 + param->frontier->push_back(chunk);
137 +// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
138 +// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
139 +// modules accounting etc.
140 +// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
141 +// They are allocated with a __libc_memalign() call in allocate_and_init()
142 +// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
143 +// blocks, but we can make sure they come from our own allocator by intercepting
144 +// __libc_memalign(). On top of that, there is no easy way to reach them. Their
145 +// addresses are stored in a dynamically allocated array (the DTV) which is
146 +// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
147 +// being reachable from the static TLS, and the dynamic TLS being reachable from
148 +// the DTV. This is because the initial DTV is allocated before our interception
149 +// mechanism kicks in, and thus we don't recognize it as allocated memory. We
150 +// can't special-case it either, since we don't know its size.
151 +// Our solution is to include in the root set all allocations made from
152 +// ld-linux.so (which is where allocate_and_init() is implemented). This is
153 +// guaranteed to include all dynamic TLS blocks (and possibly other allocations
154 +// which we don't care about).
155 +// On all other platforms, this simply checks to ensure that the caller pc is
156 +// valid before reporting chunks as leaked.
157 +static void ProcessPC(Frontier *frontier) {
158 + InvalidPCParam arg;
159 + arg.frontier = frontier;
160 + arg.skip_linker_allocations =
161 + flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
162 + ForEachChunk(MarkInvalidPCCb, &arg);
165 // Sets the appropriate tag on each chunk.
166 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
167 Frontier *frontier, tid_t caller_tid,
168 @@ -633,6 +656,9 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
169 ProcessRootRegions(frontier);
170 FloodFillTag(frontier, kReachable);
172 + CHECK_EQ(0, frontier->size());
173 + ProcessPC(frontier);
175 // The check here is relatively expensive, so we do this in a separate flood
176 // fill. That way we can skip the check for chunks that are reachable
177 // otherwise.