1 //=-- lsan_allocator.cc ---------------------------------------------------===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // This file is a part of LeakSanitizer.
9 // See lsan_allocator.h for details.
11 //===----------------------------------------------------------------------===//
13 #include "lsan_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator.h"
16 #include "sanitizer_common/sanitizer_internal_defs.h"
17 #include "sanitizer_common/sanitizer_stackdepot.h"
18 #include "sanitizer_common/sanitizer_stacktrace.h"
19 #include "lsan_common.h"
21 extern "C" void *memset(void *ptr
, int value
, uptr num
);
25 static const uptr kMaxAllowedMallocSize
= 8UL << 30;
26 static const uptr kAllocatorSpace
= 0x600000000000ULL
;
27 static const uptr kAllocatorSize
= 0x40000000000ULL
; // 4T.
29 struct ChunkMetadata
{
30 bool allocated
: 8; // Must be first.
32 uptr requested_size
: 54;
36 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
,
37 sizeof(ChunkMetadata
), DefaultSizeClassMap
> PrimaryAllocator
;
38 typedef SizeClassAllocatorLocalCache
<PrimaryAllocator
> AllocatorCache
;
39 typedef LargeMmapAllocator
<> SecondaryAllocator
;
40 typedef CombinedAllocator
<PrimaryAllocator
, AllocatorCache
,
41 SecondaryAllocator
> Allocator
;
43 static Allocator allocator
;
44 static THREADLOCAL AllocatorCache cache
;
46 void InitializeAllocator() {
50 void AllocatorThreadFinish() {
51 allocator
.SwallowCache(&cache
);
54 static ChunkMetadata
*Metadata(void *p
) {
55 return reinterpret_cast<ChunkMetadata
*>(allocator
.GetMetaData(p
));
58 static void RegisterAllocation(const StackTrace
&stack
, void *p
, uptr size
) {
60 ChunkMetadata
*m
= Metadata(p
);
62 m
->tag
= DisabledInThisThread() ? kIgnored
: kDirectlyLeaked
;
63 m
->stack_trace_id
= StackDepotPut(stack
.trace
, stack
.size
);
64 m
->requested_size
= size
;
65 atomic_store(reinterpret_cast<atomic_uint8_t
*>(m
), 1, memory_order_relaxed
);
68 static void RegisterDeallocation(void *p
) {
70 ChunkMetadata
*m
= Metadata(p
);
72 atomic_store(reinterpret_cast<atomic_uint8_t
*>(m
), 0, memory_order_relaxed
);
75 void *Allocate(const StackTrace
&stack
, uptr size
, uptr alignment
,
79 if (size
> kMaxAllowedMallocSize
) {
80 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size
);
83 void *p
= allocator
.Allocate(&cache
, size
, alignment
, false);
84 // Do not rely on the allocator to clear the memory (it's slow).
85 if (cleared
&& allocator
.FromPrimary(p
))
87 RegisterAllocation(stack
, p
, size
);
91 void Deallocate(void *p
) {
92 RegisterDeallocation(p
);
93 allocator
.Deallocate(&cache
, p
);
96 void *Reallocate(const StackTrace
&stack
, void *p
, uptr new_size
,
98 RegisterDeallocation(p
);
99 if (new_size
> kMaxAllowedMallocSize
) {
100 Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size
);
101 allocator
.Deallocate(&cache
, p
);
104 p
= allocator
.Reallocate(&cache
, p
, new_size
, alignment
);
105 RegisterAllocation(stack
, p
, new_size
);
109 void GetAllocatorCacheRange(uptr
*begin
, uptr
*end
) {
110 *begin
= (uptr
)&cache
;
111 *end
= *begin
+ sizeof(cache
);
114 uptr
GetMallocUsableSize(void *p
) {
115 ChunkMetadata
*m
= Metadata(p
);
117 return m
->requested_size
;
120 ///// Interface to the common LSan module. /////
122 void LockAllocator() {
123 allocator
.ForceLock();
126 void UnlockAllocator() {
127 allocator
.ForceUnlock();
130 void GetAllocatorGlobalRange(uptr
*begin
, uptr
*end
) {
131 *begin
= (uptr
)&allocator
;
132 *end
= *begin
+ sizeof(allocator
);
135 uptr
PointsIntoChunk(void* p
) {
136 uptr addr
= reinterpret_cast<uptr
>(p
);
137 uptr chunk
= reinterpret_cast<uptr
>(allocator
.GetBlockBeginFastLocked(p
));
138 if (!chunk
) return 0;
139 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
140 // valid, but we don't want that.
141 if (addr
< chunk
) return 0;
142 ChunkMetadata
*m
= Metadata(reinterpret_cast<void *>(chunk
));
146 if (addr
< chunk
+ m
->requested_size
)
148 if (IsSpecialCaseOfOperatorNew0(chunk
, m
->requested_size
, addr
))
153 uptr
GetUserBegin(uptr chunk
) {
157 LsanMetadata::LsanMetadata(uptr chunk
) {
158 metadata_
= Metadata(reinterpret_cast<void *>(chunk
));
162 bool LsanMetadata::allocated() const {
163 return reinterpret_cast<ChunkMetadata
*>(metadata_
)->allocated
;
166 ChunkTag
LsanMetadata::tag() const {
167 return reinterpret_cast<ChunkMetadata
*>(metadata_
)->tag
;
170 void LsanMetadata::set_tag(ChunkTag value
) {
171 reinterpret_cast<ChunkMetadata
*>(metadata_
)->tag
= value
;
174 uptr
LsanMetadata::requested_size() const {
175 return reinterpret_cast<ChunkMetadata
*>(metadata_
)->requested_size
;
178 u32
LsanMetadata::stack_trace_id() const {
179 return reinterpret_cast<ChunkMetadata
*>(metadata_
)->stack_trace_id
;
182 void ForEachChunk(ForEachChunkCallback callback
, void *arg
) {
183 allocator
.ForEachChunk(callback
, arg
);
186 IgnoreObjectResult
IgnoreObjectLocked(const void *p
) {
187 void *chunk
= allocator
.GetBlockBegin(p
);
188 if (!chunk
|| p
< chunk
) return kIgnoreObjectInvalid
;
189 ChunkMetadata
*m
= Metadata(chunk
);
191 if (m
->allocated
&& (uptr
)p
< (uptr
)chunk
+ m
->requested_size
) {
192 if (m
->tag
== kIgnored
)
193 return kIgnoreObjectAlreadyIgnored
;
195 return kIgnoreObjectSuccess
;
197 return kIgnoreObjectInvalid
;
200 } // namespace __lsan