Don't skip check for warning at line 411 in Wattributes.c on hppa*64*-*-*
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_stackdepot.cpp
bloba746d4621936c62555caa89efcf533804f4716d7
1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
13 #include "sanitizer_stackdepot.h"
15 #include "sanitizer_atomic.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_hash.h"
18 #include "sanitizer_mutex.h"
19 #include "sanitizer_stack_store.h"
20 #include "sanitizer_stackdepotbase.h"
22 namespace __sanitizer {
24 struct StackDepotNode {
25 using hash_type = u64;
26 hash_type stack_hash;
27 u32 link;
28 StackStore::Id store_id;
30 static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
32 typedef StackTrace args_type;
33 bool eq(hash_type hash, const args_type &args) const {
34 return hash == stack_hash;
36 static uptr allocated();
37 static hash_type hash(const args_type &args) {
38 MurMur2Hash64Builder H(args.size * sizeof(uptr));
39 for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
40 H.add(args.tag);
41 return H.get();
43 static bool is_valid(const args_type &args) {
44 return args.size > 0 && args.trace;
46 void store(u32 id, const args_type &args, hash_type hash);
47 args_type load(u32 id) const;
48 static StackDepotHandle get_handle(u32 id);
50 typedef StackDepotHandle handle_type;
53 static StackStore stackStore;
55 // FIXME(dvyukov): this single reserved bit is used in TSan.
56 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
57 StackDepot;
58 static StackDepot theDepot;
59 // Keep mutable data out of frequently access nodes to improve caching
60 // efficiency.
61 static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
62 StackDepot::kNodesSize2>
63 useCounts;
65 int StackDepotHandle::use_count() const {
66 return atomic_load_relaxed(&useCounts[id_]);
69 void StackDepotHandle::inc_use_count_unsafe() {
70 atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed);
73 uptr StackDepotNode::allocated() {
74 return stackStore.Allocated() + useCounts.MemoryUsage();
77 static void CompressStackStore() {
78 u64 start = Verbosity() >= 1 ? MonotonicNanoTime() : 0;
79 uptr diff = stackStore.Pack(static_cast<StackStore::Compression>(
80 Abs(common_flags()->compress_stack_depot)));
81 if (!diff)
82 return;
83 if (Verbosity() >= 1) {
84 u64 finish = MonotonicNanoTime();
85 uptr total_before = theDepot.GetStats().allocated + diff;
86 VPrintf(1, "%s: StackDepot released %zu KiB out of %zu KiB in %llu ms\n",
87 SanitizerToolName, diff >> 10, total_before >> 10,
88 (finish - start) / 1000000);
92 namespace {
94 class CompressThread {
95 public:
96 constexpr CompressThread() = default;
97 void NewWorkNotify();
98 void Stop();
99 void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
100 void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
102 private:
103 enum class State {
104 NotStarted = 0,
105 Started,
106 Failed,
107 Stopped,
110 void Run();
112 bool WaitForWork() {
113 semaphore_.Wait();
114 return atomic_load(&run_, memory_order_acquire);
117 Semaphore semaphore_ = {};
118 StaticSpinMutex mutex_ = {};
119 State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
120 void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
121 atomic_uint8_t run_ = {};
124 static CompressThread compress_thread;
126 void CompressThread::NewWorkNotify() {
127 int compress = common_flags()->compress_stack_depot;
128 if (!compress)
129 return;
130 if (compress > 0 /* for testing or debugging */) {
131 SpinMutexLock l(&mutex_);
132 if (state_ == State::NotStarted) {
133 atomic_store(&run_, 1, memory_order_release);
134 CHECK_EQ(nullptr, thread_);
135 thread_ = internal_start_thread(
136 [](void *arg) -> void * {
137 reinterpret_cast<CompressThread *>(arg)->Run();
138 return nullptr;
140 this);
141 state_ = thread_ ? State::Started : State::Failed;
143 if (state_ == State::Started) {
144 semaphore_.Post();
145 return;
148 CompressStackStore();
151 void CompressThread::Run() {
152 VPrintf(1, "%s: StackDepot compression thread started\n", SanitizerToolName);
153 while (WaitForWork()) CompressStackStore();
154 VPrintf(1, "%s: StackDepot compression thread stopped\n", SanitizerToolName);
157 void CompressThread::Stop() {
158 void *t = nullptr;
160 SpinMutexLock l(&mutex_);
161 if (state_ != State::Started)
162 return;
163 state_ = State::Stopped;
164 CHECK_NE(nullptr, thread_);
165 t = thread_;
166 thread_ = nullptr;
168 atomic_store(&run_, 0, memory_order_release);
169 semaphore_.Post();
170 internal_join_thread(t);
173 void CompressThread::LockAndStop() {
174 mutex_.Lock();
175 if (state_ != State::Started)
176 return;
177 CHECK_NE(nullptr, thread_);
179 atomic_store(&run_, 0, memory_order_release);
180 semaphore_.Post();
181 internal_join_thread(thread_);
182 // Allow to restart after Unlock() if needed.
183 state_ = State::NotStarted;
184 thread_ = nullptr;
187 void CompressThread::Unlock() { mutex_.Unlock(); }
189 } // namespace
191 void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
192 stack_hash = hash;
193 uptr pack = 0;
194 store_id = stackStore.Store(args, &pack);
195 if (LIKELY(!pack))
196 return;
197 compress_thread.NewWorkNotify();
200 StackDepotNode::args_type StackDepotNode::load(u32 id) const {
201 if (!store_id)
202 return {};
203 return stackStore.Load(store_id);
206 StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
208 u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); }
210 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
211 return StackDepotNode::get_handle(theDepot.Put(stack));
214 StackTrace StackDepotGet(u32 id) {
215 return theDepot.Get(id);
218 void StackDepotLockAll() {
219 theDepot.LockAll();
220 compress_thread.LockAndStop();
221 stackStore.LockAll();
224 void StackDepotUnlockAll() {
225 stackStore.UnlockAll();
226 compress_thread.Unlock();
227 theDepot.UnlockAll();
230 void StackDepotPrintAll() {
231 #if !SANITIZER_GO
232 theDepot.PrintAll();
233 #endif
236 void StackDepotStopBackgroundThread() { compress_thread.Stop(); }
238 StackDepotHandle StackDepotNode::get_handle(u32 id) {
239 return StackDepotHandle(&theDepot.nodes[id], id);
242 void StackDepotTestOnlyUnmap() {
243 theDepot.TestOnlyUnmap();
244 stackStore.TestOnlyUnmap();
247 } // namespace __sanitizer