libcpp: remove redundant parameter from rich_location::set_range
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_deadlock_detector.h
blob5c8317554ad56567fa8a58418db1ab885e084623
1 //===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of Sanitizer runtime.
9 // The deadlock detector maintains a directed graph of lock acquisitions.
10 // When a lock event happens, the detector checks if the locks already held by
11 // the current thread are reachable from the newly acquired lock.
13 // The detector can handle only a fixed amount of simultaneously live locks
14 // (a lock is alive if it has been locked at least once and has not been
15 // destroyed). When the maximal number of locks is reached the entire graph
16 // is flushed and the new lock epoch is started. The node ids from the old
17 // epochs can not be used with any of the detector methods except for
18 // nodeBelongsToCurrentEpoch().
20 // FIXME: this is work in progress, nothing really works yet.
22 //===----------------------------------------------------------------------===//
24 #ifndef SANITIZER_DEADLOCK_DETECTOR_H
25 #define SANITIZER_DEADLOCK_DETECTOR_H
27 #include "sanitizer_common.h"
28 #include "sanitizer_bvgraph.h"
30 namespace __sanitizer {
32 // Thread-local state for DeadlockDetector.
33 // It contains the locks currently held by the owning thread.
34 template <class BV>
35 class DeadlockDetectorTLS {
36 public:
37 // No CTOR.
38 void clear() {
39 bv_.clear();
40 epoch_ = 0;
41 n_recursive_locks = 0;
42 n_all_locks_ = 0;
45 bool empty() const { return bv_.empty(); }
47 void ensureCurrentEpoch(uptr current_epoch) {
48 if (epoch_ == current_epoch) return;
49 bv_.clear();
50 epoch_ = current_epoch;
51 n_recursive_locks = 0;
52 n_all_locks_ = 0;
55 uptr getEpoch() const { return epoch_; }
57 // Returns true if this is the first (non-recursive) acquisition of this lock.
58 bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
59 // Printf("addLock: %zx %zx stk %u\n", lock_id, current_epoch, stk);
60 CHECK_EQ(epoch_, current_epoch);
61 if (!bv_.setBit(lock_id)) {
62 // The lock is already held by this thread, it must be recursive.
63 CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
64 recursive_locks[n_recursive_locks++] = lock_id;
65 return false;
67 CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
68 // lock_id < BV::kSize, can cast to a smaller int.
69 u32 lock_id_short = static_cast<u32>(lock_id);
70 LockWithContext l = {lock_id_short, stk};
71 all_locks_with_contexts_[n_all_locks_++] = l;
72 return true;
75 void removeLock(uptr lock_id) {
76 if (n_recursive_locks) {
77 for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
78 if (recursive_locks[i] == lock_id) {
79 n_recursive_locks--;
80 Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
81 return;
85 // Printf("remLock: %zx %zx\n", lock_id, epoch_);
86 if (!bv_.clearBit(lock_id))
87 return; // probably addLock happened before flush
88 if (n_all_locks_) {
89 for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
90 if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
91 Swap(all_locks_with_contexts_[i],
92 all_locks_with_contexts_[n_all_locks_ - 1]);
93 n_all_locks_--;
94 break;
100 u32 findLockContext(uptr lock_id) {
101 for (uptr i = 0; i < n_all_locks_; i++)
102 if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
103 return all_locks_with_contexts_[i].stk;
104 return 0;
107 const BV &getLocks(uptr current_epoch) const {
108 CHECK_EQ(epoch_, current_epoch);
109 return bv_;
112 uptr getNumLocks() const { return n_all_locks_; }
113 uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
115 private:
116 BV bv_;
117 uptr epoch_;
118 uptr recursive_locks[64];
119 uptr n_recursive_locks;
120 struct LockWithContext {
121 u32 lock;
122 u32 stk;
124 LockWithContext all_locks_with_contexts_[64];
125 uptr n_all_locks_;
128 // DeadlockDetector.
129 // For deadlock detection to work we need one global DeadlockDetector object
130 // and one DeadlockDetectorTLS object per evey thread.
131 // This class is not thread safe, all concurrent accesses should be guarded
132 // by an external lock.
133 // Most of the methods of this class are not thread-safe (i.e. should
134 // be protected by an external lock) unless explicitly told otherwise.
135 template <class BV>
136 class DeadlockDetector {
137 public:
138 typedef BV BitVector;
140 uptr size() const { return g_.size(); }
142 // No CTOR.
143 void clear() {
144 current_epoch_ = 0;
145 available_nodes_.clear();
146 recycled_nodes_.clear();
147 g_.clear();
148 n_edges_ = 0;
151 // Allocate new deadlock detector node.
152 // If we are out of available nodes first try to recycle some.
153 // If there is nothing to recycle, flush the graph and increment the epoch.
154 // Associate 'data' (opaque user's object) with the new node.
155 uptr newNode(uptr data) {
156 if (!available_nodes_.empty())
157 return getAvailableNode(data);
158 if (!recycled_nodes_.empty()) {
159 // Printf("recycling: n_edges_ %zd\n", n_edges_);
160 for (sptr i = n_edges_ - 1; i >= 0; i--) {
161 if (recycled_nodes_.getBit(edges_[i].from) ||
162 recycled_nodes_.getBit(edges_[i].to)) {
163 Swap(edges_[i], edges_[n_edges_ - 1]);
164 n_edges_--;
167 CHECK(available_nodes_.empty());
168 // removeEdgesFrom was called in removeNode.
169 g_.removeEdgesTo(recycled_nodes_);
170 available_nodes_.setUnion(recycled_nodes_);
171 recycled_nodes_.clear();
172 return getAvailableNode(data);
174 // We are out of vacant nodes. Flush and increment the current_epoch_.
175 current_epoch_ += size();
176 recycled_nodes_.clear();
177 available_nodes_.setAll();
178 g_.clear();
179 n_edges_ = 0;
180 return getAvailableNode(data);
183 // Get data associated with the node created by newNode().
184 uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
186 bool nodeBelongsToCurrentEpoch(uptr node) {
187 return node && (node / size() * size()) == current_epoch_;
190 void removeNode(uptr node) {
191 uptr idx = nodeToIndex(node);
192 CHECK(!available_nodes_.getBit(idx));
193 CHECK(recycled_nodes_.setBit(idx));
194 g_.removeEdgesFrom(idx);
197 void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
198 dtls->ensureCurrentEpoch(current_epoch_);
201 // Returns true if there is a cycle in the graph after this lock event.
202 // Ideally should be called before the lock is acquired so that we can
203 // report a deadlock before a real deadlock happens.
204 bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
205 ensureCurrentEpoch(dtls);
206 uptr cur_idx = nodeToIndex(cur_node);
207 return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
210 u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
211 return dtls->findLockContext(nodeToIndex(node));
214 // Add cur_node to the set of locks held currently by dtls.
215 void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
216 ensureCurrentEpoch(dtls);
217 uptr cur_idx = nodeToIndex(cur_node);
218 dtls->addLock(cur_idx, current_epoch_, stk);
221 // Experimental *racy* fast path function.
222 // Returns true if all edges from the currently held locks to cur_node exist.
223 bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
224 uptr local_epoch = dtls->getEpoch();
225 // Read from current_epoch_ is racy.
226 if (cur_node && local_epoch == current_epoch_ &&
227 local_epoch == nodeToEpoch(cur_node)) {
228 uptr cur_idx = nodeToIndexUnchecked(cur_node);
229 for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
230 if (!g_.hasEdge(dtls->getLock(i), cur_idx))
231 return false;
233 return true;
235 return false;
238 // Adds edges from currently held locks to cur_node,
239 // returns the number of added edges, and puts the sources of added edges
240 // into added_edges[].
241 // Should be called before onLockAfter.
242 uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
243 int unique_tid) {
244 ensureCurrentEpoch(dtls);
245 uptr cur_idx = nodeToIndex(cur_node);
246 uptr added_edges[40];
247 uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
248 added_edges, ARRAY_SIZE(added_edges));
249 for (uptr i = 0; i < n_added_edges; i++) {
250 if (n_edges_ < ARRAY_SIZE(edges_)) {
251 Edge e = {(u16)added_edges[i], (u16)cur_idx,
252 dtls->findLockContext(added_edges[i]), stk,
253 unique_tid};
254 edges_[n_edges_++] = e;
256 // Printf("Edge%zd: %u %zd=>%zd in T%d\n",
257 // n_edges_, stk, added_edges[i], cur_idx, unique_tid);
259 return n_added_edges;
262 bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
263 int *unique_tid) {
264 uptr from_idx = nodeToIndex(from_node);
265 uptr to_idx = nodeToIndex(to_node);
266 for (uptr i = 0; i < n_edges_; i++) {
267 if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
268 *stk_from = edges_[i].stk_from;
269 *stk_to = edges_[i].stk_to;
270 *unique_tid = edges_[i].unique_tid;
271 return true;
274 return false;
277 // Test-only function. Handles the before/after lock events,
278 // returns true if there is a cycle.
279 bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
280 ensureCurrentEpoch(dtls);
281 bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
282 addEdges(dtls, cur_node, stk, 0);
283 onLockAfter(dtls, cur_node, stk);
284 return is_reachable;
287 // Handles the try_lock event, returns false.
288 // When a try_lock event happens (i.e. a try_lock call succeeds) we need
289 // to add this lock to the currently held locks, but we should not try to
290 // change the lock graph or to detect a cycle. We may want to investigate
291 // whether a more aggressive strategy is possible for try_lock.
292 bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
293 ensureCurrentEpoch(dtls);
294 uptr cur_idx = nodeToIndex(cur_node);
295 dtls->addLock(cur_idx, current_epoch_, stk);
296 return false;
299 // Returns true iff dtls is empty (no locks are currently held) and we can
300 // add the node to the currently held locks w/o chanding the global state.
301 // This operation is thread-safe as it only touches the dtls.
302 bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
303 if (!dtls->empty()) return false;
304 if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
305 dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
306 return true;
308 return false;
311 // Finds a path between the lock 'cur_node' (currently not held in dtls)
312 // and some currently held lock, returns the length of the path
313 // or 0 on failure.
314 uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
315 uptr path_size) {
316 tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
317 uptr idx = nodeToIndex(cur_node);
318 CHECK(!tmp_bv_.getBit(idx));
319 uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
320 for (uptr i = 0; i < res; i++)
321 path[i] = indexToNode(path[i]);
322 if (res)
323 CHECK_EQ(path[0], cur_node);
324 return res;
327 // Handle the unlock event.
328 // This operation is thread-safe as it only touches the dtls.
329 void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
330 if (dtls->getEpoch() == nodeToEpoch(node))
331 dtls->removeLock(nodeToIndexUnchecked(node));
334 // Tries to handle the lock event w/o writing to global state.
335 // Returns true on success.
336 // This operation is thread-safe as it only touches the dtls
337 // (modulo racy nature of hasAllEdges).
338 bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
339 if (hasAllEdges(dtls, node)) {
340 dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
341 return true;
343 return false;
346 bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
347 return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
350 uptr testOnlyGetEpoch() const { return current_epoch_; }
351 bool testOnlyHasEdge(uptr l1, uptr l2) {
352 return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
354 // idx1 and idx2 are raw indices to g_, not lock IDs.
355 bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
356 return g_.hasEdge(idx1, idx2);
359 void Print() {
360 for (uptr from = 0; from < size(); from++)
361 for (uptr to = 0; to < size(); to++)
362 if (g_.hasEdge(from, to))
363 Printf(" %zx => %zx\n", from, to);
366 private:
367 void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
369 void check_node(uptr node) const {
370 CHECK_GE(node, size());
371 CHECK_EQ(current_epoch_, nodeToEpoch(node));
374 uptr indexToNode(uptr idx) const {
375 check_idx(idx);
376 return idx + current_epoch_;
379 uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
381 uptr nodeToIndex(uptr node) const {
382 check_node(node);
383 return nodeToIndexUnchecked(node);
386 uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
388 uptr getAvailableNode(uptr data) {
389 uptr idx = available_nodes_.getAndClearFirstOne();
390 data_[idx] = data;
391 return indexToNode(idx);
394 struct Edge {
395 u16 from;
396 u16 to;
397 u32 stk_from;
398 u32 stk_to;
399 int unique_tid;
402 uptr current_epoch_;
403 BV available_nodes_;
404 BV recycled_nodes_;
405 BV tmp_bv_;
406 BVGraph<BV> g_;
407 uptr data_[BV::kSize];
408 Edge edges_[BV::kSize * 32];
409 uptr n_edges_;
412 } // namespace __sanitizer
414 #endif // SANITIZER_DEADLOCK_DETECTOR_H