1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implementation of a mapping from arbitrary values to unique 32-bit
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_STACKDEPOTBASE_H
14 #define SANITIZER_STACKDEPOTBASE_H
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_mutex.h"
21 #include "sanitizer_persistent_allocator.h"
23 namespace __sanitizer
{
25 template <class Node
, int kReservedBits
, int kTabSizeLog
>
26 class StackDepotBase
{
28 typedef typename
Node::args_type args_type
;
29 typedef typename
Node::handle_type handle_type
;
30 // Maps stack trace to an unique id.
31 handle_type
Put(args_type args
, bool *inserted
= nullptr);
32 // Retrieves a stored stack trace by the id.
33 args_type
Get(u32 id
);
35 StackDepotStats
*GetStats() { return &stats
; }
42 static Node
*find(Node
*s
, args_type args
, u32 hash
);
43 static Node
*lock(atomic_uintptr_t
*p
);
44 static void unlock(atomic_uintptr_t
*p
, Node
*s
);
46 static const int kTabSize
= 1 << kTabSizeLog
; // Hash table size.
47 static const int kPartBits
= 8;
48 static const int kPartShift
= sizeof(u32
) * 8 - kPartBits
- kReservedBits
;
49 static const int kPartCount
=
50 1 << kPartBits
; // Number of subparts in the table.
51 static const int kPartSize
= kTabSize
/ kPartCount
;
52 static const int kMaxId
= 1 << kPartShift
;
54 atomic_uintptr_t tab
[kTabSize
]; // Hash table of Node's.
55 atomic_uint32_t seq
[kPartCount
]; // Unique id generators.
57 StackDepotStats stats
;
59 friend class StackDepotReverseMap
;
62 template <class Node
, int kReservedBits
, int kTabSizeLog
>
63 Node
*StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::find(Node
*s
,
66 // Searches linked list s for the stack, returns its id.
67 for (; s
; s
= s
->link
) {
68 if (s
->eq(hash
, args
)) {
75 template <class Node
, int kReservedBits
, int kTabSizeLog
>
76 Node
*StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::lock(
77 atomic_uintptr_t
*p
) {
78 // Uses the pointer lsb as mutex.
79 for (int i
= 0;; i
++) {
80 uptr cmp
= atomic_load(p
, memory_order_relaxed
);
82 atomic_compare_exchange_weak(p
, &cmp
, cmp
| 1, memory_order_acquire
))
87 internal_sched_yield();
91 template <class Node
, int kReservedBits
, int kTabSizeLog
>
92 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::unlock(
93 atomic_uintptr_t
*p
, Node
*s
) {
94 DCHECK_EQ((uptr
)s
& 1, 0);
95 atomic_store(p
, (uptr
)s
, memory_order_release
);
98 template <class Node
, int kReservedBits
, int kTabSizeLog
>
99 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::handle_type
100 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Put(args_type args
,
102 if (inserted
) *inserted
= false;
103 if (!Node::is_valid(args
)) return handle_type();
104 uptr h
= Node::hash(args
);
105 atomic_uintptr_t
*p
= &tab
[h
% kTabSize
];
106 uptr v
= atomic_load(p
, memory_order_consume
);
107 Node
*s
= (Node
*)(v
& ~1);
108 // First, try to find the existing stack.
109 Node
*node
= find(s
, args
, h
);
110 if (node
) return node
->get_handle();
111 // If failed, lock, retry and insert new.
114 node
= find(s2
, args
, h
);
117 return node
->get_handle();
120 uptr part
= (h
% kTabSize
) / kPartSize
;
121 u32 id
= atomic_fetch_add(&seq
[part
], 1, memory_order_relaxed
) + 1;
123 CHECK_LT(id
, kMaxId
);
124 id
|= part
<< kPartShift
;
126 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
127 uptr memsz
= Node::storage_size(args
);
128 s
= (Node
*)PersistentAlloc(memsz
);
129 stats
.allocated
+= memsz
;
134 if (inserted
) *inserted
= true;
135 return s
->get_handle();
138 template <class Node
, int kReservedBits
, int kTabSizeLog
>
139 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::args_type
140 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Get(u32 id
) {
144 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
145 // High kPartBits contain part id, so we need to scan at most kPartSize lists.
146 uptr part
= id
>> kPartShift
;
147 for (int i
= 0; i
!= kPartSize
; i
++) {
148 uptr idx
= part
* kPartSize
+ i
;
149 CHECK_LT(idx
, kTabSize
);
150 atomic_uintptr_t
*p
= &tab
[idx
];
151 uptr v
= atomic_load(p
, memory_order_consume
);
152 Node
*s
= (Node
*)(v
& ~1);
153 for (; s
; s
= s
->link
) {
162 template <class Node
, int kReservedBits
, int kTabSizeLog
>
163 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::LockAll() {
164 for (int i
= 0; i
< kTabSize
; ++i
) {
169 template <class Node
, int kReservedBits
, int kTabSizeLog
>
170 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::UnlockAll() {
171 for (int i
= 0; i
< kTabSize
; ++i
) {
172 atomic_uintptr_t
*p
= &tab
[i
];
173 uptr s
= atomic_load(p
, memory_order_relaxed
);
174 unlock(p
, (Node
*)(s
& ~1UL));
178 template <class Node
, int kReservedBits
, int kTabSizeLog
>
179 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::PrintAll() {
180 for (int i
= 0; i
< kTabSize
; ++i
) {
181 atomic_uintptr_t
*p
= &tab
[i
];
183 uptr v
= atomic_load(p
, memory_order_relaxed
);
184 Node
*s
= (Node
*)(v
& ~1UL);
185 for (; s
; s
= s
->link
) {
186 Printf("Stack for id %u:\n", s
->id
);
193 } // namespace __sanitizer
195 #endif // SANITIZER_STACKDEPOTBASE_H