1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "ds/LifoAlloc.h"
9 #include "mozilla/Likely.h"
10 #include "mozilla/MathAlgorithms.h"
14 #ifdef LIFO_CHUNK_PROTECT
15 # include "gc/Memory.h"
20 using mozilla::tl::BitSize
;
26 UniquePtr
<BumpChunk
> BumpChunk::newWithCapacity(size_t size
) {
27 MOZ_DIAGNOSTIC_ASSERT(size
>= sizeof(BumpChunk
));
28 void* mem
= js_malloc(size
);
33 UniquePtr
<BumpChunk
> result(new (mem
) BumpChunk(size
));
35 // We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
36 // underlying memory allocator -- creating a new BumpChunk should always
37 // satisfy the LIFO_ALLOC_ALIGN alignment constraint.
38 MOZ_ASSERT(AlignPtr(result
->begin()) == result
->begin());
42 #ifdef LIFO_CHUNK_PROTECT
44 static uint8_t* AlignPtrUp(uint8_t* ptr
, uintptr_t align
) {
45 MOZ_ASSERT(mozilla::IsPowerOfTwo(align
));
46 uintptr_t uptr
= uintptr_t(ptr
);
47 uintptr_t diff
= uptr
& (align
- 1);
48 diff
= (align
- diff
) & (align
- 1);
50 return (uint8_t*)uptr
;
53 static uint8_t* AlignPtrDown(uint8_t* ptr
, uintptr_t align
) {
54 MOZ_ASSERT(mozilla::IsPowerOfTwo(align
));
55 uintptr_t uptr
= uintptr_t(ptr
);
56 uptr
= uptr
& ~(align
- 1);
57 return (uint8_t*)uptr
;
60 void BumpChunk::setReadOnly() {
61 uintptr_t pageSize
= gc::SystemPageSize();
62 // The allocated chunks might not be aligned on page boundaries. This code
63 // is used to ensure that we are changing the memory protection of pointers
64 // which are within the range of the BumpChunk, or that the range formed by
67 uint8_t* e
= capacity_
;
68 b
= AlignPtrUp(b
, pageSize
);
69 e
= AlignPtrDown(e
, pageSize
);
73 gc::MakePagesReadOnly(b
, e
- b
);
76 void BumpChunk::setReadWrite() {
77 uintptr_t pageSize
= gc::SystemPageSize();
78 // The allocated chunks might not be aligned on page boundaries. This code
79 // is used to ensure that we are changing the memory protection of pointers
80 // which are within the range of the BumpChunk, or that the range formed by
83 uint8_t* e
= capacity_
;
84 b
= AlignPtrUp(b
, pageSize
);
85 e
= AlignPtrDown(e
, pageSize
);
89 gc::UnprotectPages(b
, e
- b
);
97 void LifoAlloc::reset(size_t defaultChunkSize
) {
98 MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize
));
100 while (!chunks_
.empty()) {
103 while (!oversize_
.empty()) {
104 oversize_
.popFirst();
106 while (!unused_
.empty()) {
109 defaultChunkSize_
= defaultChunkSize
;
110 oversizeThreshold_
= defaultChunkSize
;
113 smallAllocsSize_
= 0;
116 void LifoAlloc::freeAll() {
117 // When free-ing all chunks, we can no longer determine which chunks were
118 // transferred and which were not, so simply clear the heuristic to zero
120 smallAllocsSize_
= 0;
122 while (!chunks_
.empty()) {
123 UniqueBumpChunk bc
= chunks_
.popFirst();
124 decrementCurSize(bc
->computedSizeOfIncludingThis());
126 while (!oversize_
.empty()) {
127 UniqueBumpChunk bc
= oversize_
.popFirst();
128 decrementCurSize(bc
->computedSizeOfIncludingThis());
130 while (!unused_
.empty()) {
131 UniqueBumpChunk bc
= unused_
.popFirst();
132 decrementCurSize(bc
->computedSizeOfIncludingThis());
135 // Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
136 // excellent sanity check.
137 MOZ_ASSERT(curSize_
== 0);
140 // Round at the same page granularity used by malloc.
141 static size_t MallocGoodSize(size_t aSize
) {
142 #if defined(MOZ_MEMORY)
143 return malloc_good_size(aSize
);
149 // Heuristic to choose the size of the next BumpChunk for small allocations.
150 // `start` is the size of the first chunk. `used` is the total size of all
151 // BumpChunks in this LifoAlloc so far.
152 static size_t NextSize(size_t start
, size_t used
) {
153 // Double the size, up to 1 MB.
154 const size_t mb
= 1 * 1024 * 1024;
156 return std::max(start
, used
);
159 // After 1 MB, grow more gradually, to waste less memory.
160 // The sequence (in megabytes) begins:
161 // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
162 return RoundUp(used
/ 8, mb
);
165 LifoAlloc::UniqueBumpChunk
LifoAlloc::newChunkWithCapacity(size_t n
,
167 MOZ_ASSERT(fallibleScope_
,
168 "[OOM] Cannot allocate a new chunk in an infallible scope.");
170 // Compute the size which should be requested in order to be able to fit |n|
171 // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
174 if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n
, &minSize
) ||
175 (minSize
& (size_t(1) << (BitSize
<size_t>::value
- 1))))) {
179 // Note: When computing chunkSize growth, we only are interested in chunks
180 // used for small allocations. This excludes unused chunks, oversized chunks,
181 // and chunks transferred in from another LifoAlloc.
182 MOZ_ASSERT(curSize_
>= smallAllocsSize_
);
183 const size_t chunkSize
= (oversize
|| minSize
> defaultChunkSize_
)
184 ? MallocGoodSize(minSize
)
185 : NextSize(defaultChunkSize_
, smallAllocsSize_
);
187 // Create a new BumpChunk, and allocate space for it.
188 UniqueBumpChunk result
= detail::BumpChunk::newWithCapacity(chunkSize
);
192 MOZ_ASSERT(result
->computedSizeOfIncludingThis() == chunkSize
);
196 LifoAlloc::UniqueBumpChunk
LifoAlloc::getOrCreateChunk(size_t n
) {
197 // Look for existing unused BumpChunks to satisfy the request, and pick the
198 // first one which is large enough, and move it into the list of used
200 if (!unused_
.empty()) {
201 if (unused_
.begin()->canAlloc(n
)) {
202 return unused_
.popFirst();
205 BumpChunkList::Iterator
e(unused_
.end());
206 for (BumpChunkList::Iterator
i(unused_
.begin()); i
->next() != e
.get();
208 detail::BumpChunk
* elem
= i
->next();
209 MOZ_ASSERT(elem
->empty());
210 if (elem
->canAlloc(n
)) {
211 BumpChunkList temp
= unused_
.splitAfter(i
.get());
212 UniqueBumpChunk newChunk
= temp
.popFirst();
213 unused_
.appendAll(std::move(temp
));
219 // Allocate a new BumpChunk with enough space for the next allocation.
220 UniqueBumpChunk newChunk
= newChunkWithCapacity(n
, false);
224 incrementCurSize(newChunk
->computedSizeOfIncludingThis());
228 void* LifoAlloc::allocImplColdPath(size_t n
) {
230 UniqueBumpChunk newChunk
= getOrCreateChunk(n
);
235 // This new chunk is about to be used for small allocations.
236 smallAllocsSize_
+= newChunk
->computedSizeOfIncludingThis();
238 // Since we just created a large enough chunk, this can't fail.
239 chunks_
.append(std::move(newChunk
));
240 result
= chunks_
.last()->tryAlloc(n
);
245 void* LifoAlloc::allocImplOversize(size_t n
) {
247 UniqueBumpChunk newChunk
= newChunkWithCapacity(n
, true);
251 incrementCurSize(newChunk
->computedSizeOfIncludingThis());
253 // Since we just created a large enough chunk, this can't fail.
254 oversize_
.append(std::move(newChunk
));
255 result
= oversize_
.last()->tryAlloc(n
);
260 bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n
, size_t total
) {
261 for (detail::BumpChunk
& bc
: unused_
) {
262 total
+= bc
.unused();
268 UniqueBumpChunk newChunk
= newChunkWithCapacity(n
, false);
272 incrementCurSize(newChunk
->computedSizeOfIncludingThis());
273 unused_
.pushFront(std::move(newChunk
));
277 LifoAlloc::Mark
LifoAlloc::mark() {
280 if (!chunks_
.empty()) {
281 res
.chunk
= chunks_
.last()->mark();
283 if (!oversize_
.empty()) {
284 res
.oversize
= oversize_
.last()->mark();
289 void LifoAlloc::release(Mark mark
) {
292 auto assertIsContained
= [](const detail::BumpChunk::Mark
& m
,
293 BumpChunkList
& list
) {
294 if (m
.markedChunk()) {
295 bool contained
= false;
296 for (const detail::BumpChunk
& chunk
: list
) {
297 if (&chunk
== m
.markedChunk() && chunk
.contains(m
)) {
302 MOZ_ASSERT(contained
);
305 assertIsContained(mark
.chunk
, chunks_
);
306 assertIsContained(mark
.oversize
, oversize_
);
309 BumpChunkList released
;
310 auto cutAtMark
= [&released
](const detail::BumpChunk::Mark
& m
,
311 BumpChunkList
& list
) {
312 // Move the blocks which are after the mark to the set released chunks.
313 if (!m
.markedChunk()) {
314 released
= std::move(list
);
316 released
= list
.splitAfter(m
.markedChunk());
319 // Release everything which follows the mark in the last chunk.
321 list
.last()->release(m
);
325 // Release the content of all the blocks which are after the marks, and keep
327 cutAtMark(mark
.chunk
, chunks_
);
328 for (detail::BumpChunk
& bc
: released
) {
331 // Chunks moved from (after a mark) in chunks_ to unused_ are no longer
332 // considered small allocations.
333 smallAllocsSize_
-= bc
.computedSizeOfIncludingThis();
335 unused_
.appendAll(std::move(released
));
337 // Free the content of all the blocks which are after the marks.
338 cutAtMark(mark
.oversize
, oversize_
);
339 while (!released
.empty()) {
340 UniqueBumpChunk bc
= released
.popFirst();
341 decrementCurSize(bc
->computedSizeOfIncludingThis());
345 void LifoAlloc::steal(LifoAlloc
* other
) {
346 MOZ_ASSERT(!other
->markCount
);
347 MOZ_DIAGNOSTIC_ASSERT(unused_
.empty());
348 MOZ_DIAGNOSTIC_ASSERT(chunks_
.empty());
349 MOZ_DIAGNOSTIC_ASSERT(oversize_
.empty());
351 // Copy everything from |other| to |this| except for |peakSize_|, which
352 // requires some care.
353 chunks_
= std::move(other
->chunks_
);
354 oversize_
= std::move(other
->oversize_
);
355 unused_
= std::move(other
->unused_
);
356 markCount
= other
->markCount
;
357 defaultChunkSize_
= other
->defaultChunkSize_
;
358 oversizeThreshold_
= other
->oversizeThreshold_
;
359 curSize_
= other
->curSize_
;
360 peakSize_
= std::max(peakSize_
, other
->peakSize_
);
361 smallAllocsSize_
= other
->smallAllocsSize_
;
362 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
363 fallibleScope_
= other
->fallibleScope_
;
366 other
->reset(defaultChunkSize_
);
369 void LifoAlloc::transferFrom(LifoAlloc
* other
) {
370 MOZ_ASSERT(!markCount
);
371 MOZ_ASSERT(!other
->markCount
);
373 // Transferred chunks are not counted as part of |smallAllocsSize| as this
374 // could introduce bias in the |NextSize| heuristics, leading to
375 // over-allocations in *this* LifoAlloc. As well, to avoid interference with
376 // small allocations made with |this|, the last chunk of the |chunks_| list
377 // should remain the last chunk. Therefore, the transferred chunks are
378 // prepended to the |chunks_| list.
379 incrementCurSize(other
->curSize_
);
381 appendUnused(std::move(other
->unused_
));
382 chunks_
.prependAll(std::move(other
->chunks_
));
383 oversize_
.prependAll(std::move(other
->oversize_
));
385 other
->smallAllocsSize_
= 0;
388 void LifoAlloc::transferUnusedFrom(LifoAlloc
* other
) {
389 MOZ_ASSERT(!markCount
);
392 for (detail::BumpChunk
& bc
: other
->unused_
) {
393 size
+= bc
.computedSizeOfIncludingThis();
396 appendUnused(std::move(other
->unused_
));
397 incrementCurSize(size
);
398 other
->decrementCurSize(size
);
401 #ifdef LIFO_CHUNK_PROTECT
402 void LifoAlloc::setReadOnly() {
403 for (detail::BumpChunk
& bc
: chunks_
) {
406 for (detail::BumpChunk
& bc
: oversize_
) {
409 for (detail::BumpChunk
& bc
: unused_
) {
414 void LifoAlloc::setReadWrite() {
415 for (detail::BumpChunk
& bc
: chunks_
) {
418 for (detail::BumpChunk
& bc
: oversize_
) {
421 for (detail::BumpChunk
& bc
: unused_
) {