2017-11-29 Vladimir Makarov <vmakarov@redhat.com>
[official-gcc.git] / libsanitizer / sanitizer_common / sanitizer_allocator_combined.h
blob99e411f4378f2ff758dbbe3cf6b727ffbceb049d
1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
15 // This class implements a complete memory allocator by using two
16 // internal allocators:
17 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
18 // When allocating 2^x bytes it should return 2^x aligned chunk.
19 // PrimaryAllocator is used via a local AllocatorCache.
20 // SecondaryAllocator can allocate anything, but is not efficient.
21 template <class PrimaryAllocator, class AllocatorCache,
22 class SecondaryAllocator> // NOLINT
23 class CombinedAllocator {
24 public:
25 typedef typename SecondaryAllocator::FailureHandler FailureHandler;
27 void InitLinkerInitialized(s32 release_to_os_interval_ms) {
28 primary_.Init(release_to_os_interval_ms);
29 secondary_.InitLinkerInitialized();
30 stats_.InitLinkerInitialized();
33 void Init(s32 release_to_os_interval_ms) {
34 primary_.Init(release_to_os_interval_ms);
35 secondary_.Init();
36 stats_.Init();
39 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
40 // Returning 0 on malloc(0) may break a lot of code.
41 if (size == 0)
42 size = 1;
43 if (size + alignment < size)
44 return FailureHandler::OnBadRequest();
45 uptr original_size = size;
46 // If alignment requirements are to be fulfilled by the frontend allocator
47 // rather than by the primary or secondary, passing an alignment lower than
48 // or equal to 8 will prevent any further rounding up, as well as the later
49 // alignment check.
50 if (alignment > 8)
51 size = RoundUpTo(size, alignment);
52 // The primary allocator should return a 2^x aligned allocation when
53 // requested 2^x bytes, hence using the rounded up 'size' when being
54 // serviced by the primary (this is no longer true when the primary is
55 // using a non-fixed base address). The secondary takes care of the
56 // alignment without such requirement, and allocating 'size' would use
57 // extraneous memory, so we employ 'original_size'.
58 void *res;
59 if (primary_.CanAllocate(size, alignment))
60 res = cache->Allocate(&primary_, primary_.ClassID(size));
61 else
62 res = secondary_.Allocate(&stats_, original_size, alignment);
63 if (!res)
64 return FailureHandler::OnOOM();
65 if (alignment > 8)
66 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
67 return res;
70 s32 ReleaseToOSIntervalMs() const {
71 return primary_.ReleaseToOSIntervalMs();
74 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
75 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
78 void Deallocate(AllocatorCache *cache, void *p) {
79 if (!p) return;
80 if (primary_.PointerIsMine(p))
81 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
82 else
83 secondary_.Deallocate(&stats_, p);
86 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
87 uptr alignment) {
88 if (!p)
89 return Allocate(cache, new_size, alignment);
90 if (!new_size) {
91 Deallocate(cache, p);
92 return nullptr;
94 CHECK(PointerIsMine(p));
95 uptr old_size = GetActuallyAllocatedSize(p);
96 uptr memcpy_size = Min(new_size, old_size);
97 void *new_p = Allocate(cache, new_size, alignment);
98 if (new_p)
99 internal_memcpy(new_p, p, memcpy_size);
100 Deallocate(cache, p);
101 return new_p;
104 bool PointerIsMine(void *p) {
105 if (primary_.PointerIsMine(p))
106 return true;
107 return secondary_.PointerIsMine(p);
110 bool FromPrimary(void *p) {
111 return primary_.PointerIsMine(p);
114 void *GetMetaData(const void *p) {
115 if (primary_.PointerIsMine(p))
116 return primary_.GetMetaData(p);
117 return secondary_.GetMetaData(p);
120 void *GetBlockBegin(const void *p) {
121 if (primary_.PointerIsMine(p))
122 return primary_.GetBlockBegin(p);
123 return secondary_.GetBlockBegin(p);
126 // This function does the same as GetBlockBegin, but is much faster.
127 // Must be called with the allocator locked.
128 void *GetBlockBeginFastLocked(void *p) {
129 if (primary_.PointerIsMine(p))
130 return primary_.GetBlockBegin(p);
131 return secondary_.GetBlockBeginFastLocked(p);
134 uptr GetActuallyAllocatedSize(void *p) {
135 if (primary_.PointerIsMine(p))
136 return primary_.GetActuallyAllocatedSize(p);
137 return secondary_.GetActuallyAllocatedSize(p);
140 uptr TotalMemoryUsed() {
141 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
144 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
146 void InitCache(AllocatorCache *cache) {
147 cache->Init(&stats_);
150 void DestroyCache(AllocatorCache *cache) {
151 cache->Destroy(&primary_, &stats_);
154 void SwallowCache(AllocatorCache *cache) {
155 cache->Drain(&primary_);
158 void GetStats(AllocatorStatCounters s) const {
159 stats_.Get(s);
162 void PrintStats() {
163 primary_.PrintStats();
164 secondary_.PrintStats();
167 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
168 // introspection API.
169 void ForceLock() {
170 primary_.ForceLock();
171 secondary_.ForceLock();
174 void ForceUnlock() {
175 secondary_.ForceUnlock();
176 primary_.ForceUnlock();
179 // Iterate over all existing chunks.
180 // The allocator must be locked when calling this function.
181 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
182 primary_.ForEachChunk(callback, arg);
183 secondary_.ForEachChunk(callback, arg);
186 private:
187 PrimaryAllocator primary_;
188 SecondaryAllocator secondary_;
189 AllocatorGlobalStats stats_;