* gcc.dg/20001117-1.c: Needs return_address.
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_stackdepot.cc
bloba3c9c0677e247a908de4868b0081f81850e50769
1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries.
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_stackdepot.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_stackdepotbase.h"
17 namespace __sanitizer {
19 struct StackDepotNode {
20 StackDepotNode *link;
21 u32 id;
22 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
23 uptr size;
24 uptr stack[1]; // [size]
26 static const u32 kTabSizeLog = 20;
27 // Lower kTabSizeLog bits are equal for all items in one bucket.
28 // We use these bits to store the per-stack use counter.
29 static const u32 kUseCountBits = kTabSizeLog;
30 static const u32 kMaxUseCount = 1 << kUseCountBits;
31 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
32 static const u32 kHashMask = ~kUseCountMask;
34 typedef StackTrace args_type;
35 bool eq(u32 hash, const args_type &args) const {
36 u32 hash_bits =
37 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
38 if ((hash & kHashMask) != hash_bits || args.size != size) return false;
39 uptr i = 0;
40 for (; i < size; i++) {
41 if (stack[i] != args.trace[i]) return false;
43 return true;
45 static uptr storage_size(const args_type &args) {
46 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
48 static u32 hash(const args_type &args) {
49 // murmur2
50 const u32 m = 0x5bd1e995;
51 const u32 seed = 0x9747b28c;
52 const u32 r = 24;
53 u32 h = seed ^ (args.size * sizeof(uptr));
54 for (uptr i = 0; i < args.size; i++) {
55 u32 k = args.trace[i];
56 k *= m;
57 k ^= k >> r;
58 k *= m;
59 h *= m;
60 h ^= k;
62 h ^= h >> 13;
63 h *= m;
64 h ^= h >> 15;
65 return h;
67 static bool is_valid(const args_type &args) {
68 return args.size > 0 && args.trace;
70 void store(const args_type &args, u32 hash) {
71 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
72 size = args.size;
73 internal_memcpy(stack, args.trace, size * sizeof(uptr));
75 args_type load() const {
76 return args_type(&stack[0], size);
78 StackDepotHandle get_handle() { return StackDepotHandle(this); }
80 typedef StackDepotHandle handle_type;
83 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
85 u32 StackDepotHandle::id() { return node_->id; }
86 int StackDepotHandle::use_count() {
87 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
88 StackDepotNode::kUseCountMask;
90 void StackDepotHandle::inc_use_count_unsafe() {
91 u32 prev =
92 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
93 StackDepotNode::kUseCountMask;
94 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
97 // FIXME(dvyukov): this single reserved bit is used in TSan.
98 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
99 StackDepot;
100 static StackDepot theDepot;
102 StackDepotStats *StackDepotGetStats() {
103 return theDepot.GetStats();
106 u32 StackDepotPut(StackTrace stack) {
107 StackDepotHandle h = theDepot.Put(stack);
108 return h.valid() ? h.id() : 0;
111 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
112 return theDepot.Put(stack);
115 StackTrace StackDepotGet(u32 id) {
116 return theDepot.Get(id);
119 void StackDepotLockAll() {
120 theDepot.LockAll();
123 void StackDepotUnlockAll() {
124 theDepot.UnlockAll();
127 bool StackDepotReverseMap::IdDescPair::IdComparator(
128 const StackDepotReverseMap::IdDescPair &a,
129 const StackDepotReverseMap::IdDescPair &b) {
130 return a.id < b.id;
133 StackDepotReverseMap::StackDepotReverseMap()
134 : map_(StackDepotGetStats()->n_uniq_ids + 100) {
135 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
136 atomic_uintptr_t *p = &theDepot.tab[idx];
137 uptr v = atomic_load(p, memory_order_consume);
138 StackDepotNode *s = (StackDepotNode*)(v & ~1);
139 for (; s; s = s->link) {
140 IdDescPair pair = {s->id, s};
141 map_.push_back(pair);
144 InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
147 StackTrace StackDepotReverseMap::Get(u32 id) {
148 if (!map_.size())
149 return StackTrace();
150 IdDescPair pair = {id, 0};
151 uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
152 IdDescPair::IdComparator);
153 if (idx > map_.size())
154 return StackTrace();
155 return map_[idx].desc->load();
158 } // namespace __sanitizer