1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_stackdepot.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_stackdepotbase.h"
17 namespace __sanitizer
{
19 struct StackDepotNode
{
22 atomic_uint32_t hash_and_use_count
; // hash_bits : 12; use_count : 20;
25 uptr stack
[1]; // [size]
27 static const u32 kTabSizeLog
= 20;
28 // Lower kTabSizeLog bits are equal for all items in one bucket.
29 // We use these bits to store the per-stack use counter.
30 static const u32 kUseCountBits
= kTabSizeLog
;
31 static const u32 kMaxUseCount
= 1 << kUseCountBits
;
32 static const u32 kUseCountMask
= (1 << kUseCountBits
) - 1;
33 static const u32 kHashMask
= ~kUseCountMask
;
35 typedef StackTrace args_type
;
36 bool eq(u32 hash
, const args_type
&args
) const {
38 atomic_load(&hash_and_use_count
, memory_order_relaxed
) & kHashMask
;
39 if ((hash
& kHashMask
) != hash_bits
|| args
.size
!= size
|| args
.tag
!= tag
)
42 for (; i
< size
; i
++) {
43 if (stack
[i
] != args
.trace
[i
]) return false;
47 static uptr
storage_size(const args_type
&args
) {
48 return sizeof(StackDepotNode
) + (args
.size
- 1) * sizeof(uptr
);
50 static u32
hash(const args_type
&args
) {
52 const u32 m
= 0x5bd1e995;
53 const u32 seed
= 0x9747b28c;
55 u32 h
= seed
^ (args
.size
* sizeof(uptr
));
56 for (uptr i
= 0; i
< args
.size
; i
++) {
57 u32 k
= args
.trace
[i
];
69 static bool is_valid(const args_type
&args
) {
70 return args
.size
> 0 && args
.trace
;
72 void store(const args_type
&args
, u32 hash
) {
73 atomic_store(&hash_and_use_count
, hash
& kHashMask
, memory_order_relaxed
);
76 internal_memcpy(stack
, args
.trace
, size
* sizeof(uptr
));
78 args_type
load() const {
79 return args_type(&stack
[0], size
, tag
);
81 StackDepotHandle
get_handle() { return StackDepotHandle(this); }
83 typedef StackDepotHandle handle_type
;
86 COMPILER_CHECK(StackDepotNode::kMaxUseCount
== (u32
)kStackDepotMaxUseCount
);
88 u32
StackDepotHandle::id() { return node_
->id
; }
89 int StackDepotHandle::use_count() {
90 return atomic_load(&node_
->hash_and_use_count
, memory_order_relaxed
) &
91 StackDepotNode::kUseCountMask
;
93 void StackDepotHandle::inc_use_count_unsafe() {
95 atomic_fetch_add(&node_
->hash_and_use_count
, 1, memory_order_relaxed
) &
96 StackDepotNode::kUseCountMask
;
97 CHECK_LT(prev
+ 1, StackDepotNode::kMaxUseCount
);
100 // FIXME(dvyukov): this single reserved bit is used in TSan.
101 typedef StackDepotBase
<StackDepotNode
, 1, StackDepotNode::kTabSizeLog
>
103 static StackDepot theDepot
;
105 StackDepotStats
*StackDepotGetStats() {
106 return theDepot
.GetStats();
109 u32
StackDepotPut(StackTrace stack
) {
110 StackDepotHandle h
= theDepot
.Put(stack
);
111 return h
.valid() ? h
.id() : 0;
114 StackDepotHandle
StackDepotPut_WithHandle(StackTrace stack
) {
115 return theDepot
.Put(stack
);
118 StackTrace
StackDepotGet(u32 id
) {
119 return theDepot
.Get(id
);
122 void StackDepotLockAll() {
126 void StackDepotUnlockAll() {
127 theDepot
.UnlockAll();
130 bool StackDepotReverseMap::IdDescPair::IdComparator(
131 const StackDepotReverseMap::IdDescPair
&a
,
132 const StackDepotReverseMap::IdDescPair
&b
) {
136 StackDepotReverseMap::StackDepotReverseMap() {
137 map_
.reserve(StackDepotGetStats()->n_uniq_ids
+ 100);
138 for (int idx
= 0; idx
< StackDepot::kTabSize
; idx
++) {
139 atomic_uintptr_t
*p
= &theDepot
.tab
[idx
];
140 uptr v
= atomic_load(p
, memory_order_consume
);
141 StackDepotNode
*s
= (StackDepotNode
*)(v
& ~1);
142 for (; s
; s
= s
->link
) {
143 IdDescPair pair
= {s
->id
, s
};
144 map_
.push_back(pair
);
147 Sort(map_
.data(), map_
.size(), &IdDescPair::IdComparator
);
150 StackTrace
StackDepotReverseMap::Get(u32 id
) {
153 IdDescPair pair
= {id
, nullptr};
155 InternalLowerBound(map_
, 0, map_
.size(), pair
, IdDescPair::IdComparator
);
156 if (idx
> map_
.size() || map_
[idx
].id
!= id
)
158 return map_
[idx
].desc
->load();
161 } // namespace __sanitizer