1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
8 * Tenured heap management.
10 * This file contains method definitions for the following classes for code that
11 * is not specific to a particular phase of GC:
21 #include "gc/Heap-inl.h"
23 #include "gc/GCLock.h"
24 #include "gc/Memory.h"
25 #include "jit/Assembler.h"
26 #include "vm/BigIntType.h"
27 #include "vm/RegExpShared.h"
30 #include "gc/ArenaList-inl.h"
31 #include "gc/PrivateIterators-inl.h"
34 using namespace js::gc
;
36 // Check that reserved bits of a Cell are compatible with our typical allocators
37 // since most derived classes will store a pointer in the first word.
38 static const size_t MinFirstWordAlignment
= 1u << CellFlagBitsReservedForGC
;
39 static_assert(js::detail::LIFO_ALLOC_ALIGN
>= MinFirstWordAlignment
,
40 "CellFlagBitsReservedForGC should support LifoAlloc");
41 static_assert(CellAlignBytes
>= MinFirstWordAlignment
,
42 "CellFlagBitsReservedForGC should support gc::Cell");
43 static_assert(js::jit::CodeAlignment
>= MinFirstWordAlignment
,
44 "CellFlagBitsReservedForGC should support JIT code");
45 static_assert(js::gc::JSClassAlignBytes
>= MinFirstWordAlignment
,
46 "CellFlagBitsReservedForGC should support JSClass pointers");
47 static_assert(js::ScopeDataAlignBytes
>= MinFirstWordAlignment
,
48 "CellFlagBitsReservedForGC should support scope data pointers");
50 #define CHECK_THING_SIZE(allocKind, traceKind, type, sizedType, bgFinal, \
52 static_assert(sizeof(sizedType) >= SortedArenaList::MinThingSize, \
53 #sizedType " is smaller than SortedArenaList::MinThingSize!"); \
54 static_assert(sizeof(sizedType) >= sizeof(FreeSpan), \
55 #sizedType " is smaller than FreeSpan"); \
56 static_assert(sizeof(sizedType) % CellAlignBytes == 0, \
57 "Size of " #sizedType " is not a multiple of CellAlignBytes"); \
58 static_assert(sizeof(sizedType) >= MinCellSize, \
59 "Size of " #sizedType " is smaller than the minimum size");
60 FOR_EACH_ALLOCKIND(CHECK_THING_SIZE
);
61 #undef CHECK_THING_SIZE
63 FreeSpan
FreeLists::emptySentinel
;
67 static constexpr size_t thingSize() { return sizeof(T
); }
68 static constexpr size_t thingsPerArena() {
69 return (ArenaSize
- ArenaHeaderSize
) / thingSize();
71 static constexpr size_t firstThingOffset() {
72 return ArenaSize
- thingSize() * thingsPerArena();
76 const uint8_t Arena::ThingSizes
[] = {
77 #define EXPAND_THING_SIZE(_1, _2, _3, sizedType, _4, _5, _6) \
78 ArenaLayout<sizedType>::thingSize(),
79 FOR_EACH_ALLOCKIND(EXPAND_THING_SIZE
)
80 #undef EXPAND_THING_SIZE
83 const uint8_t Arena::FirstThingOffsets
[] = {
84 #define EXPAND_FIRST_THING_OFFSET(_1, _2, _3, sizedType, _4, _5, _6) \
85 ArenaLayout<sizedType>::firstThingOffset(),
86 FOR_EACH_ALLOCKIND(EXPAND_FIRST_THING_OFFSET
)
87 #undef EXPAND_FIRST_THING_OFFSET
90 const uint8_t Arena::ThingsPerArena
[] = {
91 #define EXPAND_THINGS_PER_ARENA(_1, _2, _3, sizedType, _4, _5, _6) \
92 ArenaLayout<sizedType>::thingsPerArena(),
93 FOR_EACH_ALLOCKIND(EXPAND_THINGS_PER_ARENA
)
94 #undef EXPAND_THINGS_PER_ARENA
97 void Arena::unmarkAll() {
98 MarkBitmapWord
* arenaBits
= chunk()->markBits
.arenaBits(this);
99 for (size_t i
= 0; i
< ArenaBitmapWords
; i
++) {
104 void Arena::unmarkPreMarkedFreeCells() {
105 for (ArenaFreeCellIter
cell(this); !cell
.done(); cell
.next()) {
106 MOZ_ASSERT(cell
->isMarkedBlack());
113 void Arena::checkNoMarkedFreeCells() {
114 for (ArenaFreeCellIter
cell(this); !cell
.done(); cell
.next()) {
115 MOZ_ASSERT(!cell
->isMarkedAny());
119 void Arena::checkAllCellsMarkedBlack() {
120 for (ArenaCellIter
cell(this); !cell
.done(); cell
.next()) {
121 MOZ_ASSERT(cell
->isMarkedBlack());
127 #if defined(DEBUG) || defined(JS_GC_ZEAL)
128 void Arena::checkNoMarkedCells() {
129 for (ArenaCellIter
cell(this); !cell
.done(); cell
.next()) {
130 MOZ_ASSERT(!cell
->isMarkedAny());
136 void Arena::staticAsserts() {
137 static_assert(size_t(AllocKind::LIMIT
) <= 255,
138 "All AllocKinds and AllocKind::LIMIT must fit in a uint8_t.");
139 static_assert(std::size(ThingSizes
) == AllocKindCount
,
140 "We haven't defined all thing sizes.");
141 static_assert(std::size(FirstThingOffsets
) == AllocKindCount
,
142 "We haven't defined all offsets.");
143 static_assert(std::size(ThingsPerArena
) == AllocKindCount
,
144 "We haven't defined all counts.");
148 void Arena::checkLookupTables() {
150 for (size_t i
= 0; i
< AllocKindCount
; i
++) {
152 FirstThingOffsets
[i
] + ThingsPerArena
[i
] * ThingSizes
[i
] == ArenaSize
,
153 "Inconsistent arena lookup table data");
159 void js::gc::ArenaList::dump() {
160 fprintf(stderr
, "ArenaList %p:", this);
161 if (cursorp_
== &head_
) {
162 fprintf(stderr
, " *");
164 for (Arena
* arena
= head(); arena
; arena
= arena
->next
) {
165 fprintf(stderr
, " %p", arena
);
166 if (cursorp_
== &arena
->next
) {
167 fprintf(stderr
, " *");
170 fprintf(stderr
, "\n");
174 Arena
* ArenaList::removeRemainingArenas(Arena
** arenap
) {
175 // This is only ever called to remove arenas that are after the cursor, so
176 // we don't need to update it.
178 for (Arena
* arena
= *arenap
; arena
; arena
= arena
->next
) {
179 MOZ_ASSERT(cursorp_
!= &arena
->next
);
182 Arena
* remainingArenas
= *arenap
;
185 return remainingArenas
;
188 FreeLists::FreeLists() {
189 for (auto i
: AllAllocKinds()) {
190 freeLists_
[i
] = &emptySentinel
;
194 ArenaLists::ArenaLists(Zone
* zone
)
196 incrementalSweptArenaKind(AllocKind::LIMIT
),
197 gcCompactPropMapArenasToUpdate(nullptr),
198 gcNormalPropMapArenasToUpdate(nullptr),
199 savedEmptyArenas(nullptr) {
200 for (auto i
: AllAllocKinds()) {
201 concurrentUse(i
) = ConcurrentUse::None
;
205 void ReleaseArenas(JSRuntime
* rt
, Arena
* arena
, const AutoLockGC
& lock
) {
207 for (; arena
; arena
= next
) {
209 rt
->gc
.releaseArena(arena
, lock
);
213 void ReleaseArenaList(JSRuntime
* rt
, ArenaList
& arenaList
,
214 const AutoLockGC
& lock
) {
215 ReleaseArenas(rt
, arenaList
.head(), lock
);
219 ArenaLists::~ArenaLists() {
220 AutoLockGC
lock(runtime());
222 for (auto i
: AllAllocKinds()) {
224 * We can only call this during the shutdown after the last GC when
225 * the background finalization is disabled.
227 MOZ_ASSERT(concurrentUse(i
) == ConcurrentUse::None
);
228 ReleaseArenaList(runtime(), arenaList(i
), lock
);
230 ReleaseArenaList(runtime(), incrementalSweptArenas
.ref(), lock
);
232 ReleaseArenas(runtime(), savedEmptyArenas
, lock
);
235 void ArenaLists::moveArenasToCollectingLists() {
236 checkEmptyFreeLists();
237 for (AllocKind kind
: AllAllocKinds()) {
238 MOZ_ASSERT(collectingArenaList(kind
).isEmpty());
239 collectingArenaList(kind
) = std::move(arenaList(kind
));
240 MOZ_ASSERT(arenaList(kind
).isEmpty());
244 void ArenaLists::mergeArenasFromCollectingLists() {
245 for (AllocKind kind
: AllAllocKinds()) {
246 collectingArenaList(kind
).insertListWithCursorAtEnd(arenaList(kind
));
247 arenaList(kind
) = std::move(collectingArenaList(kind
));
248 MOZ_ASSERT(collectingArenaList(kind
).isEmpty());
252 Arena
* ArenaLists::takeSweptEmptyArenas() {
253 Arena
* arenas
= savedEmptyArenas
;
254 savedEmptyArenas
= nullptr;
258 void ArenaLists::setIncrementalSweptArenas(AllocKind kind
,
259 SortedArenaList
& arenas
) {
260 incrementalSweptArenaKind
= kind
;
261 incrementalSweptArenas
.ref().clear();
262 incrementalSweptArenas
= arenas
.toArenaList();
265 void ArenaLists::clearIncrementalSweptArenas() {
266 incrementalSweptArenaKind
= AllocKind::LIMIT
;
267 incrementalSweptArenas
.ref().clear();
270 void ArenaLists::checkGCStateNotInUse() {
271 // Called before and after collection to check the state is as expected.
273 checkSweepStateNotInUse();
274 for (auto i
: AllAllocKinds()) {
275 MOZ_ASSERT(collectingArenaList(i
).isEmpty());
280 void ArenaLists::checkSweepStateNotInUse() {
282 checkNoArenasToUpdate();
283 MOZ_ASSERT(incrementalSweptArenaKind
== AllocKind::LIMIT
);
284 MOZ_ASSERT(incrementalSweptArenas
.ref().isEmpty());
285 MOZ_ASSERT(!savedEmptyArenas
);
286 for (auto i
: AllAllocKinds()) {
287 MOZ_ASSERT(concurrentUse(i
) == ConcurrentUse::None
);
292 void ArenaLists::checkNoArenasToUpdate() {
293 MOZ_ASSERT(!gcCompactPropMapArenasToUpdate
);
294 MOZ_ASSERT(!gcNormalPropMapArenasToUpdate
);
297 void ArenaLists::checkNoArenasToUpdateForKind(AllocKind kind
) {
300 case AllocKind::COMPACT_PROP_MAP
:
301 MOZ_ASSERT(!gcCompactPropMapArenasToUpdate
);
303 case AllocKind::NORMAL_PROP_MAP
:
304 MOZ_ASSERT(!gcNormalPropMapArenasToUpdate
);
312 inline bool TenuredChunk::canDecommitPage(size_t pageIndex
) const {
313 if (decommittedPages
[pageIndex
]) {
317 size_t arenaIndex
= pageIndex
* ArenasPerPage
;
318 for (size_t i
= 0; i
< ArenasPerPage
; i
++) {
319 if (!freeCommittedArenas
[arenaIndex
+ i
]) {
327 void TenuredChunk::decommitFreeArenas(GCRuntime
* gc
, const bool& cancel
,
329 MOZ_ASSERT(DecommitEnabled());
331 for (size_t i
= 0; i
< PagesPerChunk
; i
++) {
336 if (canDecommitPage(i
) && !decommitOneFreePage(gc
, i
, lock
)) {
342 void TenuredChunk::recycleArena(Arena
* arena
, SortedArenaList
& dest
,
343 size_t thingsPerArena
) {
344 arena
->setAsFullyUnused();
345 dest
.insertAt(arena
, thingsPerArena
);
348 void TenuredChunk::releaseArena(GCRuntime
* gc
, Arena
* arena
,
349 const AutoLockGC
& lock
) {
350 MOZ_ASSERT(!arena
->allocated());
351 MOZ_ASSERT(!freeCommittedArenas
[arenaIndex(arena
)]);
353 freeCommittedArenas
[arenaIndex(arena
)] = true;
354 ++info
.numArenasFreeCommitted
;
355 ++info
.numArenasFree
;
356 gc
->updateOnArenaFree();
360 updateChunkListAfterFree(gc
, 1, lock
);
363 bool TenuredChunk::decommitOneFreePage(GCRuntime
* gc
, size_t pageIndex
,
365 MOZ_ASSERT(DecommitEnabled());
366 MOZ_ASSERT(canDecommitPage(pageIndex
));
367 MOZ_ASSERT(info
.numArenasFreeCommitted
>= ArenasPerPage
);
369 // Temporarily mark the page as allocated while we decommit.
370 for (size_t i
= 0; i
< ArenasPerPage
; i
++) {
371 size_t arenaIndex
= pageIndex
* ArenasPerPage
+ i
;
372 MOZ_ASSERT(freeCommittedArenas
[arenaIndex
]);
373 freeCommittedArenas
[arenaIndex
] = false;
375 info
.numArenasFreeCommitted
-= ArenasPerPage
;
376 info
.numArenasFree
-= ArenasPerPage
;
377 updateChunkListAfterAlloc(gc
, lock
);
383 AutoUnlockGC
unlock(lock
);
384 ok
= !oom::ShouldFailWithOOM() &&
385 MarkPagesUnusedSoft(pageAddress(pageIndex
), PageSize
);
388 // Mark the page as decommited if successful or restore the original free
391 decommittedPages
[pageIndex
] = true;
393 for (size_t i
= 0; i
< ArenasPerPage
; i
++) {
394 size_t arenaIndex
= pageIndex
* ArenasPerPage
+ i
;
395 MOZ_ASSERT(!freeCommittedArenas
[arenaIndex
]);
396 freeCommittedArenas
[arenaIndex
] = true;
398 info
.numArenasFreeCommitted
+= ArenasPerPage
;
401 info
.numArenasFree
+= ArenasPerPage
;
402 updateChunkListAfterFree(gc
, ArenasPerPage
, lock
);
409 void TenuredChunk::decommitFreeArenasWithoutUnlocking(const AutoLockGC
& lock
) {
410 MOZ_ASSERT(DecommitEnabled());
412 for (size_t i
= 0; i
< PagesPerChunk
; i
++) {
413 if (!canDecommitPage(i
)) {
417 MOZ_ASSERT(!decommittedPages
[i
]);
418 MOZ_ASSERT(info
.numArenasFreeCommitted
>= ArenasPerPage
);
420 if (js::oom::ShouldFailWithOOM() ||
421 !MarkPagesUnusedSoft(pageAddress(i
), SystemPageSize())) {
425 decommittedPages
[i
] = true;
426 for (size_t j
= 0; j
< ArenasPerPage
; ++j
) {
427 size_t arenaIndex
= i
* ArenasPerPage
+ j
;
428 MOZ_ASSERT(freeCommittedArenas
[arenaIndex
]);
429 freeCommittedArenas
[arenaIndex
] = false;
431 info
.numArenasFreeCommitted
-= ArenasPerPage
;
437 void TenuredChunk::updateChunkListAfterAlloc(GCRuntime
* gc
,
438 const AutoLockGC
& lock
) {
439 if (MOZ_UNLIKELY(!hasAvailableArenas())) {
440 gc
->availableChunks(lock
).remove(this);
441 gc
->fullChunks(lock
).push(this);
445 void TenuredChunk::updateChunkListAfterFree(GCRuntime
* gc
, size_t numArenasFree
,
446 const AutoLockGC
& lock
) {
447 if (info
.numArenasFree
== numArenasFree
) {
448 gc
->fullChunks(lock
).remove(this);
449 gc
->availableChunks(lock
).push(this);
450 } else if (!unused()) {
451 MOZ_ASSERT(gc
->availableChunks(lock
).contains(this));
453 MOZ_ASSERT(unused());
454 gc
->availableChunks(lock
).remove(this);
455 gc
->recycleChunk(this, lock
);
459 TenuredChunk
* ChunkPool::pop() {
460 MOZ_ASSERT(bool(head_
) == bool(count_
));
464 return remove(head_
);
467 void ChunkPool::push(TenuredChunk
* chunk
) {
468 MOZ_ASSERT(!chunk
->info
.next
);
469 MOZ_ASSERT(!chunk
->info
.prev
);
471 chunk
->info
.next
= head_
;
473 head_
->info
.prev
= chunk
;
479 TenuredChunk
* ChunkPool::remove(TenuredChunk
* chunk
) {
480 MOZ_ASSERT(count_
> 0);
481 MOZ_ASSERT(contains(chunk
));
483 if (head_
== chunk
) {
484 head_
= chunk
->info
.next
;
486 if (chunk
->info
.prev
) {
487 chunk
->info
.prev
->info
.next
= chunk
->info
.next
;
489 if (chunk
->info
.next
) {
490 chunk
->info
.next
->info
.prev
= chunk
->info
.prev
;
492 chunk
->info
.next
= chunk
->info
.prev
= nullptr;
498 // We could keep the chunk pool sorted, but that's likely to be more expensive.
499 // This sort is nlogn, but keeping it sorted is likely to be m*n, with m being
500 // the number of operations (likely higher than n).
501 void ChunkPool::sort() {
502 // Only sort if the list isn't already sorted.
504 head_
= mergeSort(head(), count());
506 // Fixup prev pointers.
507 TenuredChunk
* prev
= nullptr;
508 for (TenuredChunk
* cur
= head_
; cur
; cur
= cur
->info
.next
) {
509 cur
->info
.prev
= prev
;
514 MOZ_ASSERT(verify());
515 MOZ_ASSERT(isSorted());
518 TenuredChunk
* ChunkPool::mergeSort(TenuredChunk
* list
, size_t count
) {
519 MOZ_ASSERT(bool(list
) == bool(count
));
525 size_t half
= count
/ 2;
528 TenuredChunk
* front
= list
;
531 TenuredChunk
* cur
= list
;
532 for (size_t i
= 0; i
< half
- 1; i
++) {
534 cur
= cur
->info
.next
;
536 back
= cur
->info
.next
;
537 cur
->info
.next
= nullptr;
540 front
= mergeSort(front
, half
);
541 back
= mergeSort(back
, count
- half
);
545 TenuredChunk
** cur
= &list
;
546 while (front
|| back
) {
556 // Note that the sort is stable due to the <= here. Nothing depends on
557 // this but it could.
558 if (front
->info
.numArenasFree
<= back
->info
.numArenasFree
) {
560 front
= front
->info
.next
;
561 cur
= &(*cur
)->info
.next
;
564 back
= back
->info
.next
;
565 cur
= &(*cur
)->info
.next
;
572 bool ChunkPool::isSorted() const {
574 for (TenuredChunk
* cursor
= head_
; cursor
; cursor
= cursor
->info
.next
) {
575 if (cursor
->info
.numArenasFree
< last
) {
578 last
= cursor
->info
.numArenasFree
;
585 bool ChunkPool::contains(TenuredChunk
* chunk
) const {
587 for (TenuredChunk
* cursor
= head_
; cursor
; cursor
= cursor
->info
.next
) {
588 if (cursor
== chunk
) {
595 bool ChunkPool::verify() const {
596 MOZ_ASSERT(bool(head_
) == bool(count_
));
598 for (TenuredChunk
* cursor
= head_
; cursor
;
599 cursor
= cursor
->info
.next
, ++count
) {
600 MOZ_ASSERT_IF(cursor
->info
.prev
, cursor
->info
.prev
->info
.next
== cursor
);
601 MOZ_ASSERT_IF(cursor
->info
.next
, cursor
->info
.next
->info
.prev
== cursor
);
603 MOZ_ASSERT(count_
== count
);
607 void ChunkPool::verifyChunks() const {
608 for (TenuredChunk
* chunk
= head_
; chunk
; chunk
= chunk
->info
.next
) {
613 void TenuredChunk::verify() const {
614 MOZ_ASSERT(info
.numArenasFree
<= ArenasPerChunk
);
615 MOZ_ASSERT(info
.numArenasFreeCommitted
<= info
.numArenasFree
);
617 size_t decommittedCount
= decommittedPages
.Count() * ArenasPerPage
;
618 size_t freeCommittedCount
= freeCommittedArenas
.Count();
619 size_t freeCount
= freeCommittedCount
+ decommittedCount
;
621 MOZ_ASSERT(freeCount
== info
.numArenasFree
);
622 MOZ_ASSERT(freeCommittedCount
== info
.numArenasFreeCommitted
);
624 for (size_t i
= 0; i
< ArenasPerChunk
; ++i
) {
625 MOZ_ASSERT(!(decommittedPages
[pageIndex(i
)] && freeCommittedArenas
[i
]));
626 MOZ_ASSERT_IF(freeCommittedArenas
[i
], !arenas
[i
].allocated());
632 void ChunkPool::Iter::next() {
634 current_
= current_
->info
.next
;