Bug 1865597 - Add error checking when initializing parallel marking and disable on...
[gecko.git] / js / src / ds / LifoAlloc.cpp
blob8f73c04b946321299a0e8787c9d4f791173b164a
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "ds/LifoAlloc.h"
9 #include "mozilla/Likely.h"
10 #include "mozilla/MathAlgorithms.h"
12 #include <algorithm>
14 #ifdef LIFO_CHUNK_PROTECT
15 # include "gc/Memory.h"
16 #endif
18 using namespace js;
20 using mozilla::tl::BitSize;
22 namespace js {
23 namespace detail {
25 /* static */
26 UniquePtr<BumpChunk> BumpChunk::newWithCapacity(size_t size) {
27 MOZ_DIAGNOSTIC_ASSERT(size >= sizeof(BumpChunk));
28 void* mem = js_malloc(size);
29 if (!mem) {
30 return nullptr;
33 UniquePtr<BumpChunk> result(new (mem) BumpChunk(size));
35 // We assume that the alignment of LIFO_ALLOC_ALIGN is less than that of the
36 // underlying memory allocator -- creating a new BumpChunk should always
37 // satisfy the LIFO_ALLOC_ALIGN alignment constraint.
38 MOZ_ASSERT(AlignPtr(result->begin()) == result->begin());
39 return result;
42 #ifdef LIFO_CHUNK_PROTECT
44 static uint8_t* AlignPtrUp(uint8_t* ptr, uintptr_t align) {
45 MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
46 uintptr_t uptr = uintptr_t(ptr);
47 uintptr_t diff = uptr & (align - 1);
48 diff = (align - diff) & (align - 1);
49 uptr = uptr + diff;
50 return (uint8_t*)uptr;
53 static uint8_t* AlignPtrDown(uint8_t* ptr, uintptr_t align) {
54 MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
55 uintptr_t uptr = uintptr_t(ptr);
56 uptr = uptr & ~(align - 1);
57 return (uint8_t*)uptr;
60 void BumpChunk::setReadOnly() {
61 uintptr_t pageSize = gc::SystemPageSize();
62 // The allocated chunks might not be aligned on page boundaries. This code
63 // is used to ensure that we are changing the memory protection of pointers
64 // which are within the range of the BumpChunk, or that the range formed by
65 // [b .. e] is empty.
66 uint8_t* b = base();
67 uint8_t* e = capacity_;
68 b = AlignPtrUp(b, pageSize);
69 e = AlignPtrDown(e, pageSize);
70 if (e <= b) {
71 return;
73 gc::MakePagesReadOnly(b, e - b);
76 void BumpChunk::setReadWrite() {
77 uintptr_t pageSize = gc::SystemPageSize();
78 // The allocated chunks might not be aligned on page boundaries. This code
79 // is used to ensure that we are changing the memory protection of pointers
80 // which are within the range of the BumpChunk, or that the range formed by
81 // [b .. e] is empty.
82 uint8_t* b = base();
83 uint8_t* e = capacity_;
84 b = AlignPtrUp(b, pageSize);
85 e = AlignPtrDown(e, pageSize);
86 if (e <= b) {
87 return;
89 gc::UnprotectPages(b, e - b);
92 #endif
94 } // namespace detail
95 } // namespace js
97 void LifoAlloc::reset(size_t defaultChunkSize) {
98 MOZ_ASSERT(mozilla::IsPowerOfTwo(defaultChunkSize));
100 while (!chunks_.empty()) {
101 chunks_.popFirst();
103 while (!oversize_.empty()) {
104 oversize_.popFirst();
106 while (!unused_.empty()) {
107 unused_.popFirst();
109 defaultChunkSize_ = defaultChunkSize;
110 oversizeThreshold_ = defaultChunkSize;
111 markCount = 0;
112 curSize_ = 0;
113 smallAllocsSize_ = 0;
116 void LifoAlloc::freeAll() {
117 // When free-ing all chunks, we can no longer determine which chunks were
118 // transferred and which were not, so simply clear the heuristic to zero
119 // right away.
120 smallAllocsSize_ = 0;
122 while (!chunks_.empty()) {
123 UniqueBumpChunk bc = chunks_.popFirst();
124 decrementCurSize(bc->computedSizeOfIncludingThis());
126 while (!oversize_.empty()) {
127 UniqueBumpChunk bc = oversize_.popFirst();
128 decrementCurSize(bc->computedSizeOfIncludingThis());
130 while (!unused_.empty()) {
131 UniqueBumpChunk bc = unused_.popFirst();
132 decrementCurSize(bc->computedSizeOfIncludingThis());
135 // Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
136 // excellent sanity check.
137 MOZ_ASSERT(curSize_ == 0);
140 // Round at the same page granularity used by malloc.
141 static size_t MallocGoodSize(size_t aSize) {
142 #if defined(MOZ_MEMORY)
143 return malloc_good_size(aSize);
144 #else
145 return aSize;
146 #endif
149 // Heuristic to choose the size of the next BumpChunk for small allocations.
150 // `start` is the size of the first chunk. `used` is the total size of all
151 // BumpChunks in this LifoAlloc so far.
152 static size_t NextSize(size_t start, size_t used) {
153 // Double the size, up to 1 MB.
154 const size_t mb = 1 * 1024 * 1024;
155 if (used < mb) {
156 return std::max(start, used);
159 // After 1 MB, grow more gradually, to waste less memory.
160 // The sequence (in megabytes) begins:
161 // 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, ...
162 return RoundUp(used / 8, mb);
165 LifoAlloc::UniqueBumpChunk LifoAlloc::newChunkWithCapacity(size_t n,
166 bool oversize) {
167 MOZ_ASSERT(fallibleScope_,
168 "[OOM] Cannot allocate a new chunk in an infallible scope.");
170 // Compute the size which should be requested in order to be able to fit |n|
171 // bytes in a newly allocated chunk, or default to |defaultChunkSize_|.
173 size_t minSize;
174 if (MOZ_UNLIKELY(!detail::BumpChunk::allocSizeWithRedZone(n, &minSize) ||
175 (minSize & (size_t(1) << (BitSize<size_t>::value - 1))))) {
176 return nullptr;
179 // Note: When computing chunkSize growth, we only are interested in chunks
180 // used for small allocations. This excludes unused chunks, oversized chunks,
181 // and chunks transferred in from another LifoAlloc.
182 MOZ_ASSERT(curSize_ >= smallAllocsSize_);
183 const size_t chunkSize = (oversize || minSize > defaultChunkSize_)
184 ? MallocGoodSize(minSize)
185 : NextSize(defaultChunkSize_, smallAllocsSize_);
187 // Create a new BumpChunk, and allocate space for it.
188 UniqueBumpChunk result = detail::BumpChunk::newWithCapacity(chunkSize);
189 if (!result) {
190 return nullptr;
192 MOZ_ASSERT(result->computedSizeOfIncludingThis() == chunkSize);
193 return result;
196 LifoAlloc::UniqueBumpChunk LifoAlloc::getOrCreateChunk(size_t n) {
197 // Look for existing unused BumpChunks to satisfy the request, and pick the
198 // first one which is large enough, and move it into the list of used
199 // chunks.
200 if (!unused_.empty()) {
201 if (unused_.begin()->canAlloc(n)) {
202 return unused_.popFirst();
205 BumpChunkList::Iterator e(unused_.end());
206 for (BumpChunkList::Iterator i(unused_.begin()); i->next() != e.get();
207 ++i) {
208 detail::BumpChunk* elem = i->next();
209 MOZ_ASSERT(elem->empty());
210 if (elem->canAlloc(n)) {
211 BumpChunkList temp = unused_.splitAfter(i.get());
212 UniqueBumpChunk newChunk = temp.popFirst();
213 unused_.appendAll(std::move(temp));
214 return newChunk;
219 // Allocate a new BumpChunk with enough space for the next allocation.
220 UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
221 if (!newChunk) {
222 return newChunk;
224 incrementCurSize(newChunk->computedSizeOfIncludingThis());
225 return newChunk;
228 void* LifoAlloc::allocImplColdPath(size_t n) {
229 void* result;
230 UniqueBumpChunk newChunk = getOrCreateChunk(n);
231 if (!newChunk) {
232 return nullptr;
235 // This new chunk is about to be used for small allocations.
236 smallAllocsSize_ += newChunk->computedSizeOfIncludingThis();
238 // Since we just created a large enough chunk, this can't fail.
239 chunks_.append(std::move(newChunk));
240 result = chunks_.last()->tryAlloc(n);
241 MOZ_ASSERT(result);
242 return result;
245 void* LifoAlloc::allocImplOversize(size_t n) {
246 void* result;
247 UniqueBumpChunk newChunk = newChunkWithCapacity(n, true);
248 if (!newChunk) {
249 return nullptr;
251 incrementCurSize(newChunk->computedSizeOfIncludingThis());
253 // Since we just created a large enough chunk, this can't fail.
254 oversize_.append(std::move(newChunk));
255 result = oversize_.last()->tryAlloc(n);
256 MOZ_ASSERT(result);
257 return result;
260 bool LifoAlloc::ensureUnusedApproximateColdPath(size_t n, size_t total) {
261 for (detail::BumpChunk& bc : unused_) {
262 total += bc.unused();
263 if (total >= n) {
264 return true;
268 UniqueBumpChunk newChunk = newChunkWithCapacity(n, false);
269 if (!newChunk) {
270 return false;
272 incrementCurSize(newChunk->computedSizeOfIncludingThis());
273 unused_.pushFront(std::move(newChunk));
274 return true;
277 LifoAlloc::Mark LifoAlloc::mark() {
278 markCount++;
279 Mark res;
280 if (!chunks_.empty()) {
281 res.chunk = chunks_.last()->mark();
283 if (!oversize_.empty()) {
284 res.oversize = oversize_.last()->mark();
286 return res;
289 void LifoAlloc::release(Mark mark) {
290 markCount--;
291 #ifdef DEBUG
292 auto assertIsContained = [](const detail::BumpChunk::Mark& m,
293 BumpChunkList& list) {
294 if (m.markedChunk()) {
295 bool contained = false;
296 for (const detail::BumpChunk& chunk : list) {
297 if (&chunk == m.markedChunk() && chunk.contains(m)) {
298 contained = true;
299 break;
302 MOZ_ASSERT(contained);
305 assertIsContained(mark.chunk, chunks_);
306 assertIsContained(mark.oversize, oversize_);
307 #endif
309 BumpChunkList released;
310 auto cutAtMark = [&released](const detail::BumpChunk::Mark& m,
311 BumpChunkList& list) {
312 // Move the blocks which are after the mark to the set released chunks.
313 if (!m.markedChunk()) {
314 released = std::move(list);
315 } else {
316 released = list.splitAfter(m.markedChunk());
319 // Release everything which follows the mark in the last chunk.
320 if (!list.empty()) {
321 list.last()->release(m);
325 // Release the content of all the blocks which are after the marks, and keep
326 // blocks as unused.
327 cutAtMark(mark.chunk, chunks_);
328 for (detail::BumpChunk& bc : released) {
329 bc.release();
331 // Chunks moved from (after a mark) in chunks_ to unused_ are no longer
332 // considered small allocations.
333 smallAllocsSize_ -= bc.computedSizeOfIncludingThis();
335 unused_.appendAll(std::move(released));
337 // Free the content of all the blocks which are after the marks.
338 cutAtMark(mark.oversize, oversize_);
339 while (!released.empty()) {
340 UniqueBumpChunk bc = released.popFirst();
341 decrementCurSize(bc->computedSizeOfIncludingThis());
345 void LifoAlloc::steal(LifoAlloc* other) {
346 MOZ_ASSERT(!other->markCount);
347 MOZ_DIAGNOSTIC_ASSERT(unused_.empty());
348 MOZ_DIAGNOSTIC_ASSERT(chunks_.empty());
349 MOZ_DIAGNOSTIC_ASSERT(oversize_.empty());
351 // Copy everything from |other| to |this| except for |peakSize_|, which
352 // requires some care.
353 chunks_ = std::move(other->chunks_);
354 oversize_ = std::move(other->oversize_);
355 unused_ = std::move(other->unused_);
356 markCount = other->markCount;
357 defaultChunkSize_ = other->defaultChunkSize_;
358 oversizeThreshold_ = other->oversizeThreshold_;
359 curSize_ = other->curSize_;
360 peakSize_ = std::max(peakSize_, other->peakSize_);
361 smallAllocsSize_ = other->smallAllocsSize_;
362 #if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
363 fallibleScope_ = other->fallibleScope_;
364 #endif
366 other->reset(defaultChunkSize_);
369 void LifoAlloc::transferFrom(LifoAlloc* other) {
370 MOZ_ASSERT(!markCount);
371 MOZ_ASSERT(!other->markCount);
373 // Transferred chunks are not counted as part of |smallAllocsSize| as this
374 // could introduce bias in the |NextSize| heuristics, leading to
375 // over-allocations in *this* LifoAlloc. As well, to avoid interference with
376 // small allocations made with |this|, the last chunk of the |chunks_| list
377 // should remain the last chunk. Therefore, the transferred chunks are
378 // prepended to the |chunks_| list.
379 incrementCurSize(other->curSize_);
381 appendUnused(std::move(other->unused_));
382 chunks_.prependAll(std::move(other->chunks_));
383 oversize_.prependAll(std::move(other->oversize_));
384 other->curSize_ = 0;
385 other->smallAllocsSize_ = 0;
388 void LifoAlloc::transferUnusedFrom(LifoAlloc* other) {
389 MOZ_ASSERT(!markCount);
391 size_t size = 0;
392 for (detail::BumpChunk& bc : other->unused_) {
393 size += bc.computedSizeOfIncludingThis();
396 appendUnused(std::move(other->unused_));
397 incrementCurSize(size);
398 other->decrementCurSize(size);
401 #ifdef LIFO_CHUNK_PROTECT
402 void LifoAlloc::setReadOnly() {
403 for (detail::BumpChunk& bc : chunks_) {
404 bc.setReadOnly();
406 for (detail::BumpChunk& bc : oversize_) {
407 bc.setReadOnly();
409 for (detail::BumpChunk& bc : unused_) {
410 bc.setReadOnly();
414 void LifoAlloc::setReadWrite() {
415 for (detail::BumpChunk& bc : chunks_) {
416 bc.setReadWrite();
418 for (detail::BumpChunk& bc : oversize_) {
419 bc.setReadWrite();
421 for (detail::BumpChunk& bc : unused_) {
422 bc.setReadWrite();
425 #endif