1 //===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Implementation of a mapping from arbitrary values to unique 32-bit
10 //===----------------------------------------------------------------------===//
12 #ifndef SANITIZER_STACKDEPOTBASE_H
13 #define SANITIZER_STACKDEPOTBASE_H
15 #include "sanitizer_internal_defs.h"
16 #include "sanitizer_mutex.h"
17 #include "sanitizer_atomic.h"
18 #include "sanitizer_persistent_allocator.h"
20 namespace __sanitizer
{
22 template <class Node
, int kReservedBits
, int kTabSizeLog
>
23 class StackDepotBase
{
25 typedef typename
Node::args_type args_type
;
26 typedef typename
Node::handle_type handle_type
;
27 // Maps stack trace to an unique id.
28 handle_type
Put(args_type args
, bool *inserted
= nullptr);
29 // Retrieves a stored stack trace by the id.
30 args_type
Get(u32 id
);
32 StackDepotStats
*GetStats() { return &stats
; }
38 static Node
*find(Node
*s
, args_type args
, u32 hash
);
39 static Node
*lock(atomic_uintptr_t
*p
);
40 static void unlock(atomic_uintptr_t
*p
, Node
*s
);
42 static const int kTabSize
= 1 << kTabSizeLog
; // Hash table size.
43 static const int kPartBits
= 8;
44 static const int kPartShift
= sizeof(u32
) * 8 - kPartBits
- kReservedBits
;
45 static const int kPartCount
=
46 1 << kPartBits
; // Number of subparts in the table.
47 static const int kPartSize
= kTabSize
/ kPartCount
;
48 static const int kMaxId
= 1 << kPartShift
;
50 atomic_uintptr_t tab
[kTabSize
]; // Hash table of Node's.
51 atomic_uint32_t seq
[kPartCount
]; // Unique id generators.
53 StackDepotStats stats
;
55 friend class StackDepotReverseMap
;
58 template <class Node
, int kReservedBits
, int kTabSizeLog
>
59 Node
*StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::find(Node
*s
,
62 // Searches linked list s for the stack, returns its id.
63 for (; s
; s
= s
->link
) {
64 if (s
->eq(hash
, args
)) {
71 template <class Node
, int kReservedBits
, int kTabSizeLog
>
72 Node
*StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::lock(
73 atomic_uintptr_t
*p
) {
74 // Uses the pointer lsb as mutex.
75 for (int i
= 0;; i
++) {
76 uptr cmp
= atomic_load(p
, memory_order_relaxed
);
78 atomic_compare_exchange_weak(p
, &cmp
, cmp
| 1, memory_order_acquire
))
83 internal_sched_yield();
87 template <class Node
, int kReservedBits
, int kTabSizeLog
>
88 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::unlock(
89 atomic_uintptr_t
*p
, Node
*s
) {
90 DCHECK_EQ((uptr
)s
& 1, 0);
91 atomic_store(p
, (uptr
)s
, memory_order_release
);
94 template <class Node
, int kReservedBits
, int kTabSizeLog
>
95 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::handle_type
96 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Put(args_type args
,
98 if (inserted
) *inserted
= false;
99 if (!Node::is_valid(args
)) return handle_type();
100 uptr h
= Node::hash(args
);
101 atomic_uintptr_t
*p
= &tab
[h
% kTabSize
];
102 uptr v
= atomic_load(p
, memory_order_consume
);
103 Node
*s
= (Node
*)(v
& ~1);
104 // First, try to find the existing stack.
105 Node
*node
= find(s
, args
, h
);
106 if (node
) return node
->get_handle();
107 // If failed, lock, retry and insert new.
110 node
= find(s2
, args
, h
);
113 return node
->get_handle();
116 uptr part
= (h
% kTabSize
) / kPartSize
;
117 u32 id
= atomic_fetch_add(&seq
[part
], 1, memory_order_relaxed
) + 1;
119 CHECK_LT(id
, kMaxId
);
120 id
|= part
<< kPartShift
;
122 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
123 uptr memsz
= Node::storage_size(args
);
124 s
= (Node
*)PersistentAlloc(memsz
);
125 stats
.allocated
+= memsz
;
130 if (inserted
) *inserted
= true;
131 return s
->get_handle();
134 template <class Node
, int kReservedBits
, int kTabSizeLog
>
135 typename StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::args_type
136 StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::Get(u32 id
) {
140 CHECK_EQ(id
& (((u32
)-1) >> kReservedBits
), id
);
141 // High kPartBits contain part id, so we need to scan at most kPartSize lists.
142 uptr part
= id
>> kPartShift
;
143 for (int i
= 0; i
!= kPartSize
; i
++) {
144 uptr idx
= part
* kPartSize
+ i
;
145 CHECK_LT(idx
, kTabSize
);
146 atomic_uintptr_t
*p
= &tab
[idx
];
147 uptr v
= atomic_load(p
, memory_order_consume
);
148 Node
*s
= (Node
*)(v
& ~1);
149 for (; s
; s
= s
->link
) {
158 template <class Node
, int kReservedBits
, int kTabSizeLog
>
159 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::LockAll() {
160 for (int i
= 0; i
< kTabSize
; ++i
) {
165 template <class Node
, int kReservedBits
, int kTabSizeLog
>
166 void StackDepotBase
<Node
, kReservedBits
, kTabSizeLog
>::UnlockAll() {
167 for (int i
= 0; i
< kTabSize
; ++i
) {
168 atomic_uintptr_t
*p
= &tab
[i
];
169 uptr s
= atomic_load(p
, memory_order_relaxed
);
170 unlock(p
, (Node
*)(s
& ~1UL));
174 } // namespace __sanitizer
176 #endif // SANITIZER_STACKDEPOTBASE_H