2018-02-19 Sebastian Perta <sebastian.perta@renesas.com>
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_stackdepot.cc
blob816a35cfb4683990adddee6c3429ce8dbcd6da6c
1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_stackdepot.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_stackdepotbase.h"
17 namespace __sanitizer {
19 struct StackDepotNode {
20 StackDepotNode *link;
21 u32 id;
22 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
23 u32 size;
24 u32 tag;
25 uptr stack[1]; // [size]
27 static const u32 kTabSizeLog = 20;
28 // Lower kTabSizeLog bits are equal for all items in one bucket.
29 // We use these bits to store the per-stack use counter.
30 static const u32 kUseCountBits = kTabSizeLog;
31 static const u32 kMaxUseCount = 1 << kUseCountBits;
32 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
33 static const u32 kHashMask = ~kUseCountMask;
35 typedef StackTrace args_type;
36 bool eq(u32 hash, const args_type &args) const {
37 u32 hash_bits =
38 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
39 if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
40 return false;
41 uptr i = 0;
42 for (; i < size; i++) {
43 if (stack[i] != args.trace[i]) return false;
45 return true;
47 static uptr storage_size(const args_type &args) {
48 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
50 static u32 hash(const args_type &args) {
51 // murmur2
52 const u32 m = 0x5bd1e995;
53 const u32 seed = 0x9747b28c;
54 const u32 r = 24;
55 u32 h = seed ^ (args.size * sizeof(uptr));
56 for (uptr i = 0; i < args.size; i++) {
57 u32 k = args.trace[i];
58 k *= m;
59 k ^= k >> r;
60 k *= m;
61 h *= m;
62 h ^= k;
64 h ^= h >> 13;
65 h *= m;
66 h ^= h >> 15;
67 return h;
69 static bool is_valid(const args_type &args) {
70 return args.size > 0 && args.trace;
72 void store(const args_type &args, u32 hash) {
73 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
74 size = args.size;
75 tag = args.tag;
76 internal_memcpy(stack, args.trace, size * sizeof(uptr));
78 args_type load() const {
79 return args_type(&stack[0], size, tag);
81 StackDepotHandle get_handle() { return StackDepotHandle(this); }
83 typedef StackDepotHandle handle_type;
86 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
88 u32 StackDepotHandle::id() { return node_->id; }
89 int StackDepotHandle::use_count() {
90 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
91 StackDepotNode::kUseCountMask;
93 void StackDepotHandle::inc_use_count_unsafe() {
94 u32 prev =
95 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
96 StackDepotNode::kUseCountMask;
97 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
100 // FIXME(dvyukov): this single reserved bit is used in TSan.
101 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
102 StackDepot;
103 static StackDepot theDepot;
105 StackDepotStats *StackDepotGetStats() {
106 return theDepot.GetStats();
109 u32 StackDepotPut(StackTrace stack) {
110 StackDepotHandle h = theDepot.Put(stack);
111 return h.valid() ? h.id() : 0;
114 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
115 return theDepot.Put(stack);
118 StackTrace StackDepotGet(u32 id) {
119 return theDepot.Get(id);
122 void StackDepotLockAll() {
123 theDepot.LockAll();
126 void StackDepotUnlockAll() {
127 theDepot.UnlockAll();
130 bool StackDepotReverseMap::IdDescPair::IdComparator(
131 const StackDepotReverseMap::IdDescPair &a,
132 const StackDepotReverseMap::IdDescPair &b) {
133 return a.id < b.id;
136 StackDepotReverseMap::StackDepotReverseMap()
137 : map_(StackDepotGetStats()->n_uniq_ids + 100) {
138 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
139 atomic_uintptr_t *p = &theDepot.tab[idx];
140 uptr v = atomic_load(p, memory_order_consume);
141 StackDepotNode *s = (StackDepotNode*)(v & ~1);
142 for (; s; s = s->link) {
143 IdDescPair pair = {s->id, s};
144 map_.push_back(pair);
147 InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
150 StackTrace StackDepotReverseMap::Get(u32 id) {
151 if (!map_.size())
152 return StackTrace();
153 IdDescPair pair = {id, nullptr};
154 uptr idx =
155 InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
156 if (idx > map_.size() || map_[idx].id != id)
157 return StackTrace();
158 return map_[idx].desc->load();
161 } // namespace __sanitizer