no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / js / src / gc / Heap.cpp
blob2e7ecfe1d7be9882d280b22d95f7d4a78c310ab0
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 /*
8 * Tenured heap management.
10 * This file contains method definitions for the following classes for code that
11 * is not specific to a particular phase of GC:
13 * - Arena
14 * - ArenaList
15 * - FreeLists
16 * - ArenaLists
17 * - TenuredChunk
18 * - ChunkPool
21 #include "gc/Heap-inl.h"
23 #include "gc/GCLock.h"
24 #include "gc/Memory.h"
25 #include "jit/Assembler.h"
26 #include "vm/BigIntType.h"
27 #include "vm/RegExpShared.h"
28 #include "vm/Scope.h"
30 #include "gc/ArenaList-inl.h"
31 #include "gc/PrivateIterators-inl.h"
33 using namespace js;
34 using namespace js::gc;
36 // Check that reserved bits of a Cell are compatible with our typical allocators
37 // since most derived classes will store a pointer in the first word.
38 static const size_t MinFirstWordAlignment = 1u << CellFlagBitsReservedForGC;
39 static_assert(js::detail::LIFO_ALLOC_ALIGN >= MinFirstWordAlignment,
40 "CellFlagBitsReservedForGC should support LifoAlloc");
41 static_assert(CellAlignBytes >= MinFirstWordAlignment,
42 "CellFlagBitsReservedForGC should support gc::Cell");
43 static_assert(js::jit::CodeAlignment >= MinFirstWordAlignment,
44 "CellFlagBitsReservedForGC should support JIT code");
45 static_assert(js::gc::JSClassAlignBytes >= MinFirstWordAlignment,
46 "CellFlagBitsReservedForGC should support JSClass pointers");
47 static_assert(js::ScopeDataAlignBytes >= MinFirstWordAlignment,
48 "CellFlagBitsReservedForGC should support scope data pointers");
50 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal, \
51 nursery, compact) \
52 static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
53 #sizedType " is smaller than FreeSpan"); \
54 static_assert(sizeof(sizedType) % CellAlignBytes == 0, \
55 "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
56 static_assert(sizeof(sizedType) >= MinCellSize, \
57 "Size of " #sizedType " is smaller than the minimum size");
58 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE);
59 #undef CHECK_THING_SIZE
61 FreeSpan FreeLists::emptySentinel;
63 template <typename T>
64 struct ArenaLayout {
65 static constexpr size_t thingSize() { return sizeof(T); }
66 static constexpr size_t thingsPerArena() {
67 return (ArenaSize - ArenaHeaderSize) / thingSize();
69 static constexpr size_t firstThingOffset() {
70 return ArenaSize - thingSize() * thingsPerArena();
74 const uint8_t Arena::ThingSizes[] = {
75 #define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
76 ArenaLayout<sizedType>::thingSize(),
77 FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE)
78 #undef EXPAND_THING_SIZE
81 const uint8_t Arena::FirstThingOffsets[] = {
82 #define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
83 ArenaLayout<sizedType>::firstThingOffset(),
84 FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET)
85 #undef EXPAND_FIRST_THING_OFFSET
88 const uint8_t Arena::ThingsPerArena[] = {
89 #define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
90 ArenaLayout<sizedType>::thingsPerArena(),
91 FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA)
92 #undef EXPAND_THINGS_PER_ARENA
95 void Arena::unmarkAll() {
96 MarkBitmapWord* arenaBits = chunk()->markBits.arenaBits(this);
97 for (size_t i = 0; i < ArenaBitmapWords; i++) {
98 arenaBits[i] = 0;
102 void Arena::unmarkPreMarkedFreeCells() {
103 for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
104 MOZ_ASSERT(cell->isMarkedBlack());
105 cell->unmark();
109 #ifdef DEBUG
111 void Arena::checkNoMarkedFreeCells() {
112 for (ArenaFreeCellIter cell(this); !cell.done(); cell.next()) {
113 MOZ_ASSERT(!cell->isMarkedAny());
117 void Arena::checkAllCellsMarkedBlack() {
118 for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
119 MOZ_ASSERT(cell->isMarkedBlack());
123 #endif
125 #if defined(DEBUG) || defined(JS_GC_ZEAL)
126 void Arena::checkNoMarkedCells() {
127 for (ArenaCellIter cell(this); !cell.done(); cell.next()) {
128 MOZ_ASSERT(!cell->isMarkedAny());
131 #endif
133 /* static */
134 void Arena::staticAsserts() {
135 static_assert(size_t(AllocKind::LIMIT) <= 255,
136 "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
137 static_assert(std::size(ThingSizes) == AllocKindCount,
138 "We haven't defined all thing sizes.");
139 static_assert(std::size(FirstThingOffsets) == AllocKindCount,
140 "We haven't defined all offsets.");
141 static_assert(std::size(ThingsPerArena) == AllocKindCount,
142 "We haven't defined all counts.");
145 /* static */
146 void Arena::checkLookupTables() {
147 #ifdef DEBUG
148 for (size_t i = 0; i < AllocKindCount; i++) {
149 MOZ_ASSERT(
150 FirstThingOffsets[i] + ThingsPerArena[i] * ThingSizes[i] == ArenaSize,
151 "Inconsistent arena lookup table data");
153 #endif
156 #ifdef DEBUG
157 void js::gc::ArenaList::dump() {
158 fprintf(stderr, "ArenaList %p:", this);
159 if (cursorp_ == &head_) {
160 fprintf(stderr, " *");
162 for (Arena* arena = head(); arena; arena = arena->next) {
163 fprintf(stderr, " %p", arena);
164 if (cursorp_ == &arena->next) {
165 fprintf(stderr, " *");
168 fprintf(stderr, "\n");
170 #endif
172 Arena* ArenaList::removeRemainingArenas(Arena** arenap) {
173 // This is only ever called to remove arenas that are after the cursor, so
174 // we don't need to update it.
175 #ifdef DEBUG
176 for (Arena* arena = *arenap; arena; arena = arena->next) {
177 MOZ_ASSERT(cursorp_ != &arena->next);
179 #endif
180 Arena* remainingArenas = *arenap;
181 *arenap = nullptr;
182 check();
183 return remainingArenas;
186 AutoGatherSweptArenas::AutoGatherSweptArenas(JS::Zone* zone, AllocKind kind) {
187 GCRuntime& gc = zone->runtimeFromMainThread()->gc;
188 sortedList = gc.maybeGetForegroundFinalizedArenas(zone, kind);
189 if (!sortedList) {
190 return;
193 // Link individual sorted arena lists together for iteration, saving the
194 // internal state so we can restore it later.
195 linked = sortedList->convertToArenaList(bucketLastPointers);
198 AutoGatherSweptArenas::~AutoGatherSweptArenas() {
199 if (sortedList) {
200 sortedList->restoreFromArenaList(linked, bucketLastPointers);
202 linked.clear();
205 Arena* AutoGatherSweptArenas::sweptArenas() const { return linked.head(); }
207 FreeLists::FreeLists() {
208 for (auto i : AllAllocKinds()) {
209 freeLists_[i] = &emptySentinel;
213 ArenaLists::ArenaLists(Zone* zone)
214 : zone_(zone),
215 gcCompactPropMapArenasToUpdate(nullptr),
216 gcNormalPropMapArenasToUpdate(nullptr),
217 savedEmptyArenas(nullptr) {
218 for (auto i : AllAllocKinds()) {
219 concurrentUse(i) = ConcurrentUse::None;
223 void ReleaseArenas(JSRuntime* rt, Arena* arena, const AutoLockGC& lock) {
224 Arena* next;
225 for (; arena; arena = next) {
226 next = arena->next;
227 rt->gc.releaseArena(arena, lock);
231 void ReleaseArenaList(JSRuntime* rt, ArenaList& arenaList,
232 const AutoLockGC& lock) {
233 ReleaseArenas(rt, arenaList.head(), lock);
234 arenaList.clear();
237 ArenaLists::~ArenaLists() {
238 AutoLockGC lock(runtime());
240 for (auto i : AllAllocKinds()) {
242 * We can only call this during the shutdown after the last GC when
243 * the background finalization is disabled.
245 MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
246 ReleaseArenaList(runtime(), arenaList(i), lock);
249 ReleaseArenas(runtime(), savedEmptyArenas, lock);
252 void ArenaLists::moveArenasToCollectingLists() {
253 checkEmptyFreeLists();
254 for (AllocKind kind : AllAllocKinds()) {
255 MOZ_ASSERT(collectingArenaList(kind).isEmpty());
256 collectingArenaList(kind) = std::move(arenaList(kind));
257 MOZ_ASSERT(arenaList(kind).isEmpty());
261 void ArenaLists::mergeArenasFromCollectingLists() {
262 for (AllocKind kind : AllAllocKinds()) {
263 collectingArenaList(kind).insertListWithCursorAtEnd(arenaList(kind));
264 arenaList(kind) = std::move(collectingArenaList(kind));
265 MOZ_ASSERT(collectingArenaList(kind).isEmpty());
269 Arena* ArenaLists::takeSweptEmptyArenas() {
270 Arena* arenas = savedEmptyArenas;
271 savedEmptyArenas = nullptr;
272 return arenas;
275 void ArenaLists::checkGCStateNotInUse() {
276 // Called before and after collection to check the state is as expected.
277 #ifdef DEBUG
278 checkSweepStateNotInUse();
279 for (auto i : AllAllocKinds()) {
280 MOZ_ASSERT(collectingArenaList(i).isEmpty());
282 #endif
285 void ArenaLists::checkSweepStateNotInUse() {
286 #ifdef DEBUG
287 checkNoArenasToUpdate();
288 MOZ_ASSERT(!savedEmptyArenas);
289 for (auto i : AllAllocKinds()) {
290 MOZ_ASSERT(concurrentUse(i) == ConcurrentUse::None);
292 #endif
295 void ArenaLists::checkNoArenasToUpdate() {
296 MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
297 MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
300 void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind) {
301 #ifdef DEBUG
302 switch (kind) {
303 case AllocKind::COMPACT_PROP_MAP:
304 MOZ_ASSERT(!gcCompactPropMapArenasToUpdate);
305 break;
306 case AllocKind::NORMAL_PROP_MAP:
307 MOZ_ASSERT(!gcNormalPropMapArenasToUpdate);
308 break;
309 default:
310 break;
312 #endif
315 inline bool TenuredChunk::canDecommitPage(size_t pageIndex) const {
316 if (decommittedPages[pageIndex]) {
317 return false;
320 size_t arenaIndex = pageIndex * ArenasPerPage;
321 for (size_t i = 0; i < ArenasPerPage; i++) {
322 if (!freeCommittedArenas[arenaIndex + i]) {
323 return false;
327 return true;
330 void TenuredChunk::decommitFreeArenas(GCRuntime* gc, const bool& cancel,
331 AutoLockGC& lock) {
332 MOZ_ASSERT(DecommitEnabled());
334 for (size_t i = 0; i < PagesPerChunk; i++) {
335 if (cancel) {
336 break;
339 if (canDecommitPage(i) && !decommitOneFreePage(gc, i, lock)) {
340 break;
345 void TenuredChunk::recycleArena(Arena* arena, SortedArenaList& dest,
346 size_t thingsPerArena) {
347 arena->setAsFullyUnused();
348 dest.insertAt(arena, thingsPerArena);
351 void TenuredChunk::releaseArena(GCRuntime* gc, Arena* arena,
352 const AutoLockGC& lock) {
353 MOZ_ASSERT(!arena->allocated());
354 MOZ_ASSERT(!freeCommittedArenas[arenaIndex(arena)]);
356 freeCommittedArenas[arenaIndex(arena)] = true;
357 ++info.numArenasFreeCommitted;
358 ++info.numArenasFree;
359 gc->updateOnArenaFree();
361 verify();
363 updateChunkListAfterFree(gc, 1, lock);
366 bool TenuredChunk::decommitOneFreePage(GCRuntime* gc, size_t pageIndex,
367 AutoLockGC& lock) {
368 MOZ_ASSERT(DecommitEnabled());
369 MOZ_ASSERT(canDecommitPage(pageIndex));
370 MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
372 // Temporarily mark the page as allocated while we decommit.
373 for (size_t i = 0; i < ArenasPerPage; i++) {
374 size_t arenaIndex = pageIndex * ArenasPerPage + i;
375 MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
376 freeCommittedArenas[arenaIndex] = false;
378 info.numArenasFreeCommitted -= ArenasPerPage;
379 info.numArenasFree -= ArenasPerPage;
380 updateChunkListAfterAlloc(gc, lock);
382 verify();
384 bool ok;
386 AutoUnlockGC unlock(lock);
387 ok = !oom::ShouldFailWithOOM() &&
388 MarkPagesUnusedSoft(pageAddress(pageIndex), PageSize);
391 // Mark the page as decommited if successful or restore the original free
392 // state.
393 if (ok) {
394 decommittedPages[pageIndex] = true;
395 } else {
396 for (size_t i = 0; i < ArenasPerPage; i++) {
397 size_t arenaIndex = pageIndex * ArenasPerPage + i;
398 MOZ_ASSERT(!freeCommittedArenas[arenaIndex]);
399 freeCommittedArenas[arenaIndex] = true;
401 info.numArenasFreeCommitted += ArenasPerPage;
404 info.numArenasFree += ArenasPerPage;
405 updateChunkListAfterFree(gc, ArenasPerPage, lock);
407 verify();
409 return ok;
412 void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC& lock) {
413 MOZ_ASSERT(DecommitEnabled());
415 for (size_t i = 0; i < PagesPerChunk; i++) {
416 if (!canDecommitPage(i)) {
417 continue;
420 MOZ_ASSERT(!decommittedPages[i]);
421 MOZ_ASSERT(info.numArenasFreeCommitted >= ArenasPerPage);
423 if (js::oom::ShouldFailWithOOM() ||
424 !MarkPagesUnusedSoft(pageAddress(i), SystemPageSize())) {
425 break;
428 decommittedPages[i] = true;
429 for (size_t j = 0; j < ArenasPerPage; ++j) {
430 size_t arenaIndex = i * ArenasPerPage + j;
431 MOZ_ASSERT(freeCommittedArenas[arenaIndex]);
432 freeCommittedArenas[arenaIndex] = false;
434 info.numArenasFreeCommitted -= ArenasPerPage;
437 verify();
440 void TenuredChunk::updateChunkListAfterAlloc(GCRuntime* gc,
441 const AutoLockGC& lock) {
442 if (MOZ_UNLIKELY(!hasAvailableArenas())) {
443 gc->availableChunks(lock).remove(this);
444 gc->fullChunks(lock).push(this);
448 void TenuredChunk::updateChunkListAfterFree(GCRuntime* gc, size_t numArenasFree,
449 const AutoLockGC& lock) {
450 if (info.numArenasFree == numArenasFree) {
451 gc->fullChunks(lock).remove(this);
452 gc->availableChunks(lock).push(this);
453 } else if (!unused()) {
454 MOZ_ASSERT(gc->availableChunks(lock).contains(this));
455 } else {
456 MOZ_ASSERT(unused());
457 gc->availableChunks(lock).remove(this);
458 gc->recycleChunk(this, lock);
462 TenuredChunk* ChunkPool::pop() {
463 MOZ_ASSERT(bool(head_) == bool(count_));
464 if (!count_) {
465 return nullptr;
467 return remove(head_);
470 void ChunkPool::push(TenuredChunk* chunk) {
471 MOZ_ASSERT(!chunk->info.next);
472 MOZ_ASSERT(!chunk->info.prev);
474 chunk->info.next = head_;
475 if (head_) {
476 head_->info.prev = chunk;
478 head_ = chunk;
479 ++count_;
482 TenuredChunk* ChunkPool::remove(TenuredChunk* chunk) {
483 MOZ_ASSERT(count_ > 0);
484 MOZ_ASSERT(contains(chunk));
486 if (head_ == chunk) {
487 head_ = chunk->info.next;
489 if (chunk->info.prev) {
490 chunk->info.prev->info.next = chunk->info.next;
492 if (chunk->info.next) {
493 chunk->info.next->info.prev = chunk->info.prev;
495 chunk->info.next = chunk->info.prev = nullptr;
496 --count_;
498 return chunk;
501 // We could keep the chunk pool sorted, but that's likely to be more expensive.
502 // This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
503 // the number of operations (likely higher than n).
504 void ChunkPool::sort() {
505 // Only sort if the list isn't already sorted.
506 if (!isSorted()) {
507 head_ = mergeSort(head(), count());
509 // Fixup prev pointers.
510 TenuredChunk* prev = nullptr;
511 for (TenuredChunk* cur = head_; cur; cur = cur->info.next) {
512 cur->info.prev = prev;
513 prev = cur;
517 MOZ_ASSERT(verify());
518 MOZ_ASSERT(isSorted());
521 TenuredChunk* ChunkPool::mergeSort(TenuredChunk* list, size_t count) {
522 MOZ_ASSERT(bool(list) == bool(count));
524 if (count < 2) {
525 return list;
528 size_t half = count / 2;
530 // Split;
531 TenuredChunk* front = list;
532 TenuredChunk* back;
534 TenuredChunk* cur = list;
535 for (size_t i = 0; i < half - 1; i++) {
536 MOZ_ASSERT(cur);
537 cur = cur->info.next;
539 back = cur->info.next;
540 cur->info.next = nullptr;
543 front = mergeSort(front, half);
544 back = mergeSort(back, count - half);
546 // Merge
547 list = nullptr;
548 TenuredChunk** cur = &list;
549 while (front || back) {
550 if (!front) {
551 *cur = back;
552 break;
554 if (!back) {
555 *cur = front;
556 break;
559 // Note that the sort is stable due to the <= here. Nothing depends on
560 // this but it could.
561 if (front->info.numArenasFree <= back->info.numArenasFree) {
562 *cur = front;
563 front = front->info.next;
564 cur = &(*cur)->info.next;
565 } else {
566 *cur = back;
567 back = back->info.next;
568 cur = &(*cur)->info.next;
572 return list;
575 bool ChunkPool::isSorted() const {
576 uint32_t last = 1;
577 for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
578 if (cursor->info.numArenasFree < last) {
579 return false;
581 last = cursor->info.numArenasFree;
583 return true;
586 #ifdef DEBUG
588 bool ChunkPool::contains(TenuredChunk* chunk) const {
589 verify();
590 for (TenuredChunk* cursor = head_; cursor; cursor = cursor->info.next) {
591 if (cursor == chunk) {
592 return true;
595 return false;
598 bool ChunkPool::verify() const {
599 MOZ_ASSERT(bool(head_) == bool(count_));
600 uint32_t count = 0;
601 for (TenuredChunk* cursor = head_; cursor;
602 cursor = cursor->info.next, ++count) {
603 MOZ_ASSERT_IF(cursor->info.prev, cursor->info.prev->info.next == cursor);
604 MOZ_ASSERT_IF(cursor->info.next, cursor->info.next->info.prev == cursor);
606 MOZ_ASSERT(count_ == count);
607 return true;
610 void ChunkPool::verifyChunks() const {
611 for (TenuredChunk* chunk = head_; chunk; chunk = chunk->info.next) {
612 chunk->verify();
616 void TenuredChunk::verify() const {
617 // Check the mark bits for each arena are aligned to the cache line size.
618 static_assert((offsetof(TenuredChunk, arenas) % ArenaSize) == 0);
619 constexpr size_t CellBytesPerMarkByte = CellBytesPerMarkBit * 8;
620 static_assert((ArenaSize % CellBytesPerMarkByte) == 0);
621 constexpr size_t MarkBytesPerArena = ArenaSize / CellBytesPerMarkByte;
622 static_assert((MarkBytesPerArena % TypicalCacheLineSize) == 0);
623 static_assert((offsetof(TenuredChunk, markBits) % TypicalCacheLineSize) == 0);
625 MOZ_ASSERT(info.numArenasFree <= ArenasPerChunk);
626 MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
628 size_t decommittedCount = decommittedPages.Count() * ArenasPerPage;
629 size_t freeCommittedCount = freeCommittedArenas.Count();
630 size_t freeCount = freeCommittedCount + decommittedCount;
632 MOZ_ASSERT(freeCount == info.numArenasFree);
633 MOZ_ASSERT(freeCommittedCount == info.numArenasFreeCommitted);
635 for (size_t i = 0; i < ArenasPerChunk; ++i) {
636 MOZ_ASSERT(!(decommittedPages[pageIndex(i)] && freeCommittedArenas[i]));
637 MOZ_ASSERT_IF(freeCommittedArenas[i], !arenas[i].allocated());
641 #endif
643 void ChunkPool::Iter::next() {
644 MOZ_ASSERT(!done());
645 current_ = current_->info.next;