1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_stackdepot.h"
14 #include "sanitizer_common.h"
15 #include "sanitizer_stackdepotbase.h"
17 namespace __sanitizer
{
19 struct StackDepotNode
{
22 atomic_uint32_t hash_and_use_count
; // hash_bits : 12; use_count : 20;
24 uptr stack
[1]; // [size]
26 static const u32 kTabSizeLog
= 20;
27 // Lower kTabSizeLog bits are equal for all items in one bucket.
28 // We use these bits to store the per-stack use counter.
29 static const u32 kUseCountBits
= kTabSizeLog
;
30 static const u32 kMaxUseCount
= 1 << kUseCountBits
;
31 static const u32 kUseCountMask
= (1 << kUseCountBits
) - 1;
32 static const u32 kHashMask
= ~kUseCountMask
;
34 typedef StackTrace args_type
;
35 bool eq(u32 hash
, const args_type
&args
) const {
37 atomic_load(&hash_and_use_count
, memory_order_relaxed
) & kHashMask
;
38 if ((hash
& kHashMask
) != hash_bits
|| args
.size
!= size
) return false;
40 for (; i
< size
; i
++) {
41 if (stack
[i
] != args
.trace
[i
]) return false;
45 static uptr
storage_size(const args_type
&args
) {
46 return sizeof(StackDepotNode
) + (args
.size
- 1) * sizeof(uptr
);
48 static u32
hash(const args_type
&args
) {
50 const u32 m
= 0x5bd1e995;
51 const u32 seed
= 0x9747b28c;
53 u32 h
= seed
^ (args
.size
* sizeof(uptr
));
54 for (uptr i
= 0; i
< args
.size
; i
++) {
55 u32 k
= args
.trace
[i
];
67 static bool is_valid(const args_type
&args
) {
68 return args
.size
> 0 && args
.trace
;
70 void store(const args_type
&args
, u32 hash
) {
71 atomic_store(&hash_and_use_count
, hash
& kHashMask
, memory_order_relaxed
);
73 internal_memcpy(stack
, args
.trace
, size
* sizeof(uptr
));
75 args_type
load() const {
76 return args_type(&stack
[0], size
);
78 StackDepotHandle
get_handle() { return StackDepotHandle(this); }
80 typedef StackDepotHandle handle_type
;
83 COMPILER_CHECK(StackDepotNode::kMaxUseCount
== (u32
)kStackDepotMaxUseCount
);
85 u32
StackDepotHandle::id() { return node_
->id
; }
86 int StackDepotHandle::use_count() {
87 return atomic_load(&node_
->hash_and_use_count
, memory_order_relaxed
) &
88 StackDepotNode::kUseCountMask
;
90 void StackDepotHandle::inc_use_count_unsafe() {
92 atomic_fetch_add(&node_
->hash_and_use_count
, 1, memory_order_relaxed
) &
93 StackDepotNode::kUseCountMask
;
94 CHECK_LT(prev
+ 1, StackDepotNode::kMaxUseCount
);
97 // FIXME(dvyukov): this single reserved bit is used in TSan.
98 typedef StackDepotBase
<StackDepotNode
, 1, StackDepotNode::kTabSizeLog
>
100 static StackDepot theDepot
;
102 StackDepotStats
*StackDepotGetStats() {
103 return theDepot
.GetStats();
106 u32
StackDepotPut(StackTrace stack
) {
107 StackDepotHandle h
= theDepot
.Put(stack
);
108 return h
.valid() ? h
.id() : 0;
111 StackDepotHandle
StackDepotPut_WithHandle(StackTrace stack
) {
112 return theDepot
.Put(stack
);
115 StackTrace
StackDepotGet(u32 id
) {
116 return theDepot
.Get(id
);
119 void StackDepotLockAll() {
123 void StackDepotUnlockAll() {
124 theDepot
.UnlockAll();
127 bool StackDepotReverseMap::IdDescPair::IdComparator(
128 const StackDepotReverseMap::IdDescPair
&a
,
129 const StackDepotReverseMap::IdDescPair
&b
) {
133 StackDepotReverseMap::StackDepotReverseMap()
134 : map_(StackDepotGetStats()->n_uniq_ids
+ 100) {
135 for (int idx
= 0; idx
< StackDepot::kTabSize
; idx
++) {
136 atomic_uintptr_t
*p
= &theDepot
.tab
[idx
];
137 uptr v
= atomic_load(p
, memory_order_consume
);
138 StackDepotNode
*s
= (StackDepotNode
*)(v
& ~1);
139 for (; s
; s
= s
->link
) {
140 IdDescPair pair
= {s
->id
, s
};
141 map_
.push_back(pair
);
144 InternalSort(&map_
, map_
.size(), IdDescPair::IdComparator
);
147 StackTrace
StackDepotReverseMap::Get(u32 id
) {
150 IdDescPair pair
= {id
, 0};
151 uptr idx
= InternalBinarySearch(map_
, 0, map_
.size(), pair
,
152 IdDescPair::IdComparator
);
153 if (idx
> map_
.size())
155 return map_
[idx
].desc
->load();
158 } // namespace __sanitizer