1 //===-- sanitizer_stackdepot.cc -------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is shared between AddressSanitizer and ThreadSanitizer
10 //===----------------------------------------------------------------------===//
12 #include "sanitizer_stackdepot.h"
13 #include "sanitizer_common.h"
14 #include "sanitizer_internal_defs.h"
15 #include "sanitizer_mutex.h"
16 #include "sanitizer_atomic.h"
18 namespace __sanitizer
{
20 const int kTabSize
= 1024 * 1024; // Hash table size.
21 const int kPartBits
= 8;
22 const int kPartShift
= sizeof(u32
) * 8 - kPartBits
- 1;
23 const int kPartCount
= 1 << kPartBits
; // Number of subparts in the table.
24 const int kPartSize
= kTabSize
/ kPartCount
;
25 const int kMaxId
= 1 << kPartShift
;
32 uptr stack
[1]; // [size]
36 StaticSpinMutex mtx
; // Protects alloc of new blocks for region allocator.
37 atomic_uintptr_t region_pos
; // Region allocator for StackDesc's.
38 atomic_uintptr_t region_end
;
39 atomic_uintptr_t tab
[kTabSize
]; // Hash table of StackDesc's.
40 atomic_uint32_t seq
[kPartCount
]; // Unique id generators.
43 static u32
hash(const uptr
*stack
, uptr size
) {
45 const u32 m
= 0x5bd1e995;
46 const u32 seed
= 0x9747b28c;
48 u32 h
= seed
^ (size
* sizeof(uptr
));
49 for (uptr i
= 0; i
< size
; i
++) {
63 static StackDesc
*tryallocDesc(uptr memsz
) {
64 // Optimisic lock-free allocation, essentially try to bump the region ptr.
66 uptr cmp
= atomic_load(&depot
.region_pos
, memory_order_acquire
);
67 uptr end
= atomic_load(&depot
.region_end
, memory_order_acquire
);
68 if (cmp
== 0 || cmp
+ memsz
> end
)
70 if (atomic_compare_exchange_weak(
71 &depot
.region_pos
, &cmp
, cmp
+ memsz
,
72 memory_order_acquire
))
73 return (StackDesc
*)cmp
;
77 static StackDesc
*allocDesc(uptr size
) {
78 // Frist, try to allocate optimisitically.
79 uptr memsz
= sizeof(StackDesc
) + (size
- 1) * sizeof(uptr
);
80 StackDesc
*s
= tryallocDesc(memsz
);
83 // If failed, lock, retry and alloc new superblock.
84 SpinMutexLock
l(&depot
.mtx
);
86 s
= tryallocDesc(memsz
);
89 atomic_store(&depot
.region_pos
, 0, memory_order_relaxed
);
90 uptr allocsz
= 64 * 1024;
93 uptr mem
= (uptr
)MmapOrDie(allocsz
, "stack depot");
94 atomic_store(&depot
.region_end
, mem
+ allocsz
, memory_order_release
);
95 atomic_store(&depot
.region_pos
, mem
, memory_order_release
);
99 static u32
find(StackDesc
*s
, const uptr
*stack
, uptr size
, u32 hash
) {
100 // Searches linked list s for the stack, returns its id.
101 for (; s
; s
= s
->link
) {
102 if (s
->hash
== hash
&& s
->size
== size
) {
104 for (; i
< size
; i
++) {
105 if (stack
[i
] != s
->stack
[i
])
115 static StackDesc
*lock(atomic_uintptr_t
*p
) {
116 // Uses the pointer lsb as mutex.
117 for (int i
= 0;; i
++) {
118 uptr cmp
= atomic_load(p
, memory_order_relaxed
);
120 && atomic_compare_exchange_weak(p
, &cmp
, cmp
| 1,
121 memory_order_acquire
))
122 return (StackDesc
*)cmp
;
126 internal_sched_yield();
130 static void unlock(atomic_uintptr_t
*p
, StackDesc
*s
) {
131 DCHECK_EQ((uptr
)s
& 1, 0);
132 atomic_store(p
, (uptr
)s
, memory_order_release
);
135 u32
StackDepotPut(const uptr
*stack
, uptr size
) {
136 if (stack
== 0 || size
== 0)
138 uptr h
= hash(stack
, size
);
139 atomic_uintptr_t
*p
= &depot
.tab
[h
% kTabSize
];
140 uptr v
= atomic_load(p
, memory_order_consume
);
141 StackDesc
*s
= (StackDesc
*)(v
& ~1);
142 // First, try to find the existing stack.
143 u32 id
= find(s
, stack
, size
, h
);
146 // If failed, lock, retry and insert new.
147 StackDesc
*s2
= lock(p
);
149 id
= find(s2
, stack
, size
, h
);
155 uptr part
= (h
% kTabSize
) / kPartSize
;
156 id
= atomic_fetch_add(&depot
.seq
[part
], 1, memory_order_relaxed
) + 1;
157 CHECK_LT(id
, kMaxId
);
158 id
|= part
<< kPartShift
;
160 CHECK_EQ(id
& (1u << 31), 0);
165 internal_memcpy(s
->stack
, stack
, size
* sizeof(uptr
));
171 const uptr
*StackDepotGet(u32 id
, uptr
*size
) {
174 CHECK_EQ(id
& (1u << 31), 0);
175 // High kPartBits contain part id, so we need to scan at most kPartSize lists.
176 uptr part
= id
>> kPartShift
;
177 for (int i
= 0; i
!= kPartSize
; i
++) {
178 uptr idx
= part
* kPartSize
+ i
;
179 CHECK_LT(idx
, kTabSize
);
180 atomic_uintptr_t
*p
= &depot
.tab
[idx
];
181 uptr v
= atomic_load(p
, memory_order_consume
);
182 StackDesc
*s
= (StackDesc
*)(v
& ~1);
183 for (; s
; s
= s
->link
) {
194 } // namespace __sanitizer