[ASan/Win tests] Bring back -GS- as SEH tests fail otherwise
[blocksruntime.git] / lib / sanitizer_common / sanitizer_stackdepot.cc
blob5b70dfcba43e013d5a7699404606f4c1f8f3916c
1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries.
12 //===----------------------------------------------------------------------===//
14 #include "sanitizer_stackdepot.h"
16 #include "sanitizer_common.h"
17 #include "sanitizer_stackdepotbase.h"
19 namespace __sanitizer {
21 struct StackDepotDesc {
22 const uptr *stack;
23 uptr size;
24 u32 hash() const {
25 // murmur2
26 const u32 m = 0x5bd1e995;
27 const u32 seed = 0x9747b28c;
28 const u32 r = 24;
29 u32 h = seed ^ (size * sizeof(uptr));
30 for (uptr i = 0; i < size; i++) {
31 u32 k = stack[i];
32 k *= m;
33 k ^= k >> r;
34 k *= m;
35 h *= m;
36 h ^= k;
38 h ^= h >> 13;
39 h *= m;
40 h ^= h >> 15;
41 return h;
43 bool is_valid() { return size > 0 && stack; }
46 struct StackDepotNode {
47 StackDepotNode *link;
48 u32 id;
49 atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
50 uptr size;
51 uptr stack[1]; // [size]
53 static const u32 kTabSizeLog = 20;
54 // Lower kTabSizeLog bits are equal for all items in one bucket.
55 // We use these bits to store the per-stack use counter.
56 static const u32 kUseCountBits = kTabSizeLog;
57 static const u32 kMaxUseCount = 1 << kUseCountBits;
58 static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
59 static const u32 kHashMask = ~kUseCountMask;
61 typedef StackDepotDesc args_type;
62 bool eq(u32 hash, const args_type &args) const {
63 u32 hash_bits =
64 atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
65 if ((hash & kHashMask) != hash_bits || args.size != size) return false;
66 uptr i = 0;
67 for (; i < size; i++) {
68 if (stack[i] != args.stack[i]) return false;
70 return true;
72 static uptr storage_size(const args_type &args) {
73 return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
75 void store(const args_type &args, u32 hash) {
76 atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
77 size = args.size;
78 internal_memcpy(stack, args.stack, size * sizeof(uptr));
80 args_type load() const {
81 args_type ret = {&stack[0], size};
82 return ret;
84 StackDepotHandle get_handle() { return StackDepotHandle(this); }
86 typedef StackDepotHandle handle_type;
89 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
91 u32 StackDepotHandle::id() { return node_->id; }
92 int StackDepotHandle::use_count() {
93 return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
94 StackDepotNode::kUseCountMask;
96 void StackDepotHandle::inc_use_count_unsafe() {
97 u32 prev =
98 atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
99 StackDepotNode::kUseCountMask;
100 CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
102 uptr StackDepotHandle::size() { return node_->size; }
103 uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
105 // FIXME(dvyukov): this single reserved bit is used in TSan.
106 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
107 StackDepot;
108 static StackDepot theDepot;
110 StackDepotStats *StackDepotGetStats() {
111 return theDepot.GetStats();
114 u32 StackDepotPut(const uptr *stack, uptr size) {
115 StackDepotDesc desc = {stack, size};
116 StackDepotHandle h = theDepot.Put(desc);
117 return h.valid() ? h.id() : 0;
120 StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
121 StackDepotDesc desc = {stack, size};
122 return theDepot.Put(desc);
125 const uptr *StackDepotGet(u32 id, uptr *size) {
126 StackDepotDesc desc = theDepot.Get(id);
127 *size = desc.size;
128 return desc.stack;
131 bool StackDepotReverseMap::IdDescPair::IdComparator(
132 const StackDepotReverseMap::IdDescPair &a,
133 const StackDepotReverseMap::IdDescPair &b) {
134 return a.id < b.id;
137 StackDepotReverseMap::StackDepotReverseMap()
138 : map_(StackDepotGetStats()->n_uniq_ids + 100) {
139 for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
140 atomic_uintptr_t *p = &theDepot.tab[idx];
141 uptr v = atomic_load(p, memory_order_consume);
142 StackDepotNode *s = (StackDepotNode*)(v & ~1);
143 for (; s; s = s->link) {
144 IdDescPair pair = {s->id, s};
145 map_.push_back(pair);
148 InternalSort(&map_, map_.size(), IdDescPair::IdComparator);
151 const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
152 if (!map_.size()) return 0;
153 IdDescPair pair = {id, 0};
154 uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair,
155 IdDescPair::IdComparator);
156 if (idx > map_.size()) {
157 *size = 0;
158 return 0;
160 StackDepotNode *desc = map_[idx].desc;
161 *size = desc->size;
162 return desc->stack;
165 } // namespace __sanitizer