1 //===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file is a part of HWAddressSanitizer.
11 //===----------------------------------------------------------------------===//
13 // HwasanThreadList is a registry for live threads, as well as an allocator for
14 // HwasanThread objects and their stack history ring buffers. There are
15 // constraints on memory layout of the shadow region and CompactRingBuffer that
16 // are part of the ABI contract between compiler-rt and llvm.
18 // * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
19 // * All stack ring buffers are located within (2**kShadowBaseAlignment)
20 // sized region below and adjacent to the shadow region.
21 // * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
22 // aligned to twice its size. The value of N can be different for each buffer.
24 // These constrains guarantee that, given an address A of any element of the
26 // A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
27 // is the address of the next element of that ring buffer (with wrap-around).
28 // And, with K = kShadowBaseAlignment,
29 // S = (A | ((1 << K) - 1)) + 1
30 // (align up to kShadowBaseAlignment) is the start of the shadow region.
32 // These calculations are used in compiler instrumentation to update the ring
33 // buffer and obtain the base address of shadow using only two inputs: address
34 // of the current element of the ring buffer, and N (i.e. size of the ring
35 // buffer). Since the value of N is very limited, we pack both inputs into a
36 // single thread-local word as
37 // (1 << (N + 56)) | A
38 // See the implementation of class CompactRingBuffer, which is what is stored in
39 // said thread-local word.
41 // Note the unusual way of aligning up the address of the shadow:
42 // (A | ((1 << K) - 1)) + 1
43 // It is only correct if A is not already equal to the shadow base address, but
44 // it saves 2 instructions on AArch64.
47 #include "hwasan_allocator.h"
48 #include "hwasan_flags.h"
49 #include "hwasan_thread.h"
51 #include "sanitizer_common/sanitizer_placement_new.h"
55 static uptr
RingBufferSize() {
56 uptr desired_bytes
= flags()->stack_history_size
* sizeof(uptr
);
57 // FIXME: increase the limit to 8 once this bug is fixed:
58 // https://bugs.llvm.org/show_bug.cgi?id=39030
59 for (int shift
= 1; shift
< 7; ++shift
) {
60 uptr size
= 4096 * (1ULL << shift
);
61 if (size
>= desired_bytes
)
64 Printf("stack history size too large: %d\n", flags()->stack_history_size
);
69 struct ThreadListHead
{
72 ThreadListHead() : list_(nullptr) {}
74 void Push(Thread
*t
) {
86 void Remove(Thread
*t
) {
87 Thread
**cur
= &list_
;
88 while (*cur
!= t
) cur
= &(*cur
)->next_
;
89 CHECK(*cur
&& "thread not found");
105 uptr total_stack_size
;
108 class HwasanThreadList
{
110 HwasanThreadList(uptr storage
, uptr size
)
111 : free_space_(storage
), free_space_end_(storage
+ size
) {
112 // [storage, storage + size) is used as a vector of
113 // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
114 // Each element contains
115 // * a ring buffer at offset 0,
116 // * a Thread object at offset ring_buffer_size_.
117 ring_buffer_size_
= RingBufferSize();
119 RoundUpTo(ring_buffer_size_
+ sizeof(Thread
), ring_buffer_size_
* 2);
122 Thread
*CreateCurrentThread() {
125 SpinMutexLock
l(&list_mutex_
);
126 t
= free_list_
.Pop();
128 uptr start
= (uptr
)t
- ring_buffer_size_
;
129 internal_memset((void *)start
, 0, ring_buffer_size_
+ sizeof(Thread
));
135 t
->Init((uptr
)t
- ring_buffer_size_
, ring_buffer_size_
);
140 void DontNeedThread(Thread
*t
) {
141 uptr start
= (uptr
)t
- ring_buffer_size_
;
142 ReleaseMemoryPagesToOS(start
, start
+ thread_alloc_size_
);
145 void ReleaseThread(Thread
*t
) {
146 RemoveThreadStats(t
);
148 SpinMutexLock
l(&list_mutex_
);
149 live_list_
.Remove(t
);
154 Thread
*GetThreadByBufferAddress(uptr p
) {
155 return (Thread
*)(RoundDownTo(p
, ring_buffer_size_
* 2) +
159 uptr
MemoryUsedPerThread() {
160 uptr res
= sizeof(Thread
) + ring_buffer_size_
;
161 if (auto sz
= flags()->heap_history_size
)
162 res
+= HeapAllocationsRingBuffer::SizeInBytes(sz
);
167 void VisitAllLiveThreads(CB cb
) {
168 SpinMutexLock
l(&list_mutex_
);
169 live_list_
.ForEach(cb
);
172 void AddThreadStats(Thread
*t
) {
173 SpinMutexLock
l(&stats_mutex_
);
174 stats_
.n_live_threads
++;
175 stats_
.total_stack_size
+= t
->stack_size();
178 void RemoveThreadStats(Thread
*t
) {
179 SpinMutexLock
l(&stats_mutex_
);
180 stats_
.n_live_threads
--;
181 stats_
.total_stack_size
-= t
->stack_size();
184 ThreadStats
GetThreadStats() {
185 SpinMutexLock
l(&stats_mutex_
);
190 Thread
*AllocThread() {
191 uptr align
= ring_buffer_size_
* 2;
192 CHECK(IsAligned(free_space_
, align
));
193 Thread
*t
= (Thread
*)(free_space_
+ ring_buffer_size_
);
194 free_space_
+= thread_alloc_size_
;
195 CHECK(free_space_
<= free_space_end_
&& "out of thread memory");
200 uptr free_space_end_
;
201 uptr ring_buffer_size_
;
202 uptr thread_alloc_size_
;
204 ThreadListHead free_list_
;
205 ThreadListHead live_list_
;
206 SpinMutex list_mutex_
;
209 SpinMutex stats_mutex_
;
212 void InitThreadList(uptr storage
, uptr size
);
213 HwasanThreadList
&hwasanThreadList();