1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implementation of a mapping from arbitrary values to unique 32-bit
11 //===----------------------------------------------------------------------===//
13 #ifndef SANITIZER_STACKDEPOTBASE_H
14 #define SANITIZER_STACKDEPOTBASE_H
18 #include "sanitizer_atomic.h"
19 #include "sanitizer_internal_defs.h"
20 #include "sanitizer_mutex.h"
21 #include "sanitizer_persistent_allocator.h"
23 namespace __sanitizer
{
25 template <class Node
, int kReservedBits
, int kTabSizeLog
>
26 class StackDepotBase
{
28 typedef typename
Node::args_type args_type
;
29 typedef typename
Node::handle_type handle_type
;
30 typedef typename
Node::hash_type hash_type
;
31 // Maps stack trace to an unique id.
32 handle_type
Put(args_type args
, bool *inserted
= nullptr);
33 // Retrieves a stored stack trace by the id.
34 args_type
Get(u32 id
);
36 StackDepotStats
GetStats() const { return stats
; }
43 static Node
*find(Node
*s
, args_type args
, hash_type hash
);
44 static Node
*lock(atomic_uintptr_t
*p
);
45 static void unlock(atomic_uintptr_t
*p
, Node
*s
);
47 static const int kTabSize
= 1 << kTabSizeLog
; // Hash table size.
48 static const int kPartBits
= 8;
49 static const int kPartShift
= sizeof(u32
) * 8 - kPartBits
- kReservedBits
;
50 static const int kPartCount
=
51 1 << kPartBits
; // Number of subparts in the table.
52 static const int kPartSize
= kTabSize
/ kPartCount
;
53 static const int kMaxId
= 1 << kPartShift
;
55 atomic_uintptr_t tab
[kTabSize
]; // Hash table of Node's.
56 atomic_uint32_t seq
[kPartCount
]; // Unique id generators.
58 StackDepotStats stats
;
60 friend class StackDepotReverseMap
;
63 template <class Node
, int kReservedBits
, int kTabSizeLog
>
64 Node
*StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::find(Node
*s
,
67 // Searches linked list s for the stack, returns its id.
68 for (; s
; s
= s
->link
) {
69 if (s
->eq(hash
, args
)) {
76 template <class Node
, int kReservedBits
, int kTabSizeLog
>
77 Node
*StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::lock(
78 atomic_uintptr_t
*p
) {
79 // Uses the pointer lsb as mutex.
80 for (int i
= 0;; i
++) {
81 uptr cmp
= atomic_load(p
, memory_order_relaxed
);
83 atomic_compare_exchange_weak(p
, &cmp
, cmp
| 1, memory_order_acquire
))
88 internal_sched_yield();
92 template <class Node
, int kReservedBits
, int kTabSizeLog
>
93 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::unlock(
94 atomic_uintptr_t
*p
, Node
*s
) {
95 DCHECK_EQ((uptr
)s
& 1, 0);
96 atomic_store(p
, (uptr
)s
, memory_order_release
);
99 template <class Node
, int kReservedBits
, int kTabSizeLog
>
100 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::handle_type
101 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Put(args_type args
,
103 if (inserted
) *inserted
= false;
104 if (!Node::is_valid(args
)) return handle_type();
105 hash_type h
= Node::hash(args
);
106 atomic_uintptr_t
*p
= &tab
[h
% kTabSize
];
107 uptr v
= atomic_load(p
, memory_order_consume
);
108 Node
*s
= (Node
*)(v
& ~1);
109 // First, try to find the existing stack.
110 Node
*node
= find(s
, args
, h
);
111 if (node
) return node
->get_handle();
112 // If failed, lock, retry and insert new.
115 node
= find(s2
, args
, h
);
118 return node
->get_handle();
121 uptr part
= (h
% kTabSize
) / kPartSize
;
122 u32 id
= atomic_fetch_add(&seq
[part
], 1, memory_order_relaxed
) + 1;
124 CHECK_LT(id
, kMaxId
);
125 id
|= part
<< kPartShift
;
127 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
128 uptr memsz
= Node::storage_size(args
);
129 s
= (Node
*)PersistentAlloc(memsz
);
130 stats
.allocated
+= memsz
;
135 if (inserted
) *inserted
= true;
136 return s
->get_handle();
139 template <class Node
, int kReservedBits
, int kTabSizeLog
>
140 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::args_type
141 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Get(u32 id
) {
145 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
146 // High kPartBits contain part id, so we need to scan at most kPartSize lists.
147 uptr part
= id
>> kPartShift
;
148 for (int i
= 0; i
!= kPartSize
; i
++) {
149 uptr idx
= part
* kPartSize
+ i
;
150 CHECK_LT(idx
, kTabSize
);
151 atomic_uintptr_t
*p
= &tab
[idx
];
152 uptr v
= atomic_load(p
, memory_order_consume
);
153 Node
*s
= (Node
*)(v
& ~1);
154 for (; s
; s
= s
->link
) {
163 template <class Node
, int kReservedBits
, int kTabSizeLog
>
164 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::LockAll() {
165 for (int i
= 0; i
< kTabSize
; ++i
) {
170 template <class Node
, int kReservedBits
, int kTabSizeLog
>
171 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::UnlockAll() {
172 for (int i
= 0; i
< kTabSize
; ++i
) {
173 atomic_uintptr_t
*p
= &tab
[i
];
174 uptr s
= atomic_load(p
, memory_order_relaxed
);
175 unlock(p
, (Node
*)(s
& ~1UL));
179 template <class Node
, int kReservedBits
, int kTabSizeLog
>
180 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::PrintAll() {
181 for (int i
= 0; i
< kTabSize
; ++i
) {
182 atomic_uintptr_t
*p
= &tab
[i
];
184 uptr v
= atomic_load(p
, memory_order_relaxed
);
185 Node
*s
= (Node
*)(v
& ~1UL);
186 for (; s
; s
= s
->link
) {
187 Printf("Stack for id %u:\n", s
->id
);
194 } // namespace __sanitizer
196 #endif // SANITIZER_STACKDEPOTBASE_H