Bug 1832850 - Part 5: Move the allocateObject definition into gc/Nursery.h r=jandem
[gecko.git] / js / src / gc / Nursery.cpp
blob8cf942428887897c234ca7b53506ba3058123f92
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sw=2 et tw=80:
4 * This Source Code Form is subject to the terms of the Mozilla Public
5 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
6 * You can obtain one at http://mozilla.org/MPL/2.0/. */
8 #include "gc/Nursery-inl.h"
10 #include "mozilla/DebugOnly.h"
11 #include "mozilla/IntegerPrintfMacros.h"
12 #include "mozilla/ScopeExit.h"
13 #include "mozilla/Sprintf.h"
14 #include "mozilla/TimeStamp.h"
16 #include <algorithm>
17 #include <cmath>
18 #include <utility>
20 #include "builtin/MapObject.h"
21 #include "debugger/DebugAPI.h"
22 #include "gc/GCInternals.h"
23 #include "gc/GCLock.h"
24 #include "gc/GCParallelTask.h"
25 #include "gc/GCProbes.h"
26 #include "gc/Memory.h"
27 #include "gc/PublicIterators.h"
28 #include "gc/Tenuring.h"
29 #include "jit/JitFrames.h"
30 #include "jit/JitRealm.h"
31 #include "js/Printer.h"
32 #include "util/DifferentialTesting.h"
33 #include "util/GetPidProvider.h" // getpid()
34 #include "util/Poison.h"
35 #include "vm/JSONPrinter.h"
36 #include "vm/Realm.h"
37 #include "vm/Time.h"
39 #include "gc/Heap-inl.h"
40 #include "gc/Marking-inl.h"
41 #include "gc/StableCellHasher-inl.h"
42 #include "vm/GeckoProfiler-inl.h"
44 using namespace js;
45 using namespace js::gc;
47 using mozilla::DebugOnly;
48 using mozilla::PodCopy;
49 using mozilla::TimeDuration;
50 using mozilla::TimeStamp;
52 namespace js {
54 struct NurseryChunk : public ChunkBase {
55 char data[Nursery::NurseryChunkUsableSize];
57 static NurseryChunk* fromChunk(gc::TenuredChunk* chunk);
59 explicit NurseryChunk(JSRuntime* runtime)
60 : ChunkBase(runtime, &runtime->gc.storeBuffer()) {}
62 void poisonAndInit(JSRuntime* rt, size_t size = ChunkSize);
63 void poisonRange(size_t from, size_t size, uint8_t value,
64 MemCheckKind checkKind);
65 void poisonAfterEvict(size_t extent = ChunkSize);
67 // Mark pages from startOffset to the end of the chunk as unused. The start
68 // offset must be after the first page, which contains the chunk header and is
69 // not marked as unused.
70 void markPagesUnusedHard(size_t startOffset);
72 // Mark pages from the second page of the chunk to endOffset as in use,
73 // following a call to markPagesUnusedHard.
74 [[nodiscard]] bool markPagesInUseHard(size_t endOffset);
76 uintptr_t start() const { return uintptr_t(&data); }
77 uintptr_t end() const { return uintptr_t(this) + ChunkSize; }
79 static_assert(sizeof(js::NurseryChunk) == gc::ChunkSize,
80 "Nursery chunk size must match gc::Chunk size.");
82 class NurseryDecommitTask : public GCParallelTask {
83 public:
84 explicit NurseryDecommitTask(gc::GCRuntime* gc);
85 bool reserveSpaceForBytes(size_t nbytes);
87 bool isEmpty(const AutoLockHelperThreadState& lock) const;
89 void queueChunk(NurseryChunk* chunk, const AutoLockHelperThreadState& lock);
90 void queueRange(size_t newCapacity, NurseryChunk& chunk,
91 const AutoLockHelperThreadState& lock);
93 private:
94 using NurseryChunkVector = Vector<NurseryChunk*, 0, SystemAllocPolicy>;
96 void run(AutoLockHelperThreadState& lock) override;
98 NurseryChunkVector& chunksToDecommit() { return chunksToDecommit_.ref(); }
99 const NurseryChunkVector& chunksToDecommit() const {
100 return chunksToDecommit_.ref();
103 MainThreadOrGCTaskData<NurseryChunkVector> chunksToDecommit_;
105 MainThreadOrGCTaskData<NurseryChunk*> partialChunk;
106 MainThreadOrGCTaskData<size_t> partialCapacity;
109 } // namespace js
111 inline void js::NurseryChunk::poisonAndInit(JSRuntime* rt, size_t size) {
112 MOZ_ASSERT(size >= sizeof(ChunkBase));
113 MOZ_ASSERT(size <= ChunkSize);
114 poisonRange(0, size, JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
115 new (this) NurseryChunk(rt);
118 inline void js::NurseryChunk::poisonRange(size_t from, size_t size,
119 uint8_t value,
120 MemCheckKind checkKind) {
121 MOZ_ASSERT(from + size <= ChunkSize);
123 auto* start = reinterpret_cast<uint8_t*>(this) + from;
125 // We can poison the same chunk more than once, so first make sure memory
126 // sanitizers will let us poison it.
127 MOZ_MAKE_MEM_UNDEFINED(start, size);
128 Poison(start, value, size, checkKind);
131 inline void js::NurseryChunk::poisonAfterEvict(size_t extent) {
132 MOZ_ASSERT(extent <= ChunkSize);
133 poisonRange(sizeof(ChunkBase), extent - sizeof(ChunkBase),
134 JS_SWEPT_NURSERY_PATTERN, MemCheckKind::MakeNoAccess);
137 inline void js::NurseryChunk::markPagesUnusedHard(size_t startOffset) {
138 MOZ_ASSERT(startOffset >= sizeof(ChunkBase)); // Don't touch the header.
139 MOZ_ASSERT(startOffset >= SystemPageSize());
140 MOZ_ASSERT(startOffset <= ChunkSize);
141 uintptr_t start = uintptr_t(this) + startOffset;
142 size_t length = ChunkSize - startOffset;
143 MarkPagesUnusedHard(reinterpret_cast<void*>(start), length);
146 inline bool js::NurseryChunk::markPagesInUseHard(size_t endOffset) {
147 MOZ_ASSERT(endOffset >= sizeof(ChunkBase));
148 MOZ_ASSERT(endOffset >= SystemPageSize());
149 MOZ_ASSERT(endOffset <= ChunkSize);
150 uintptr_t start = uintptr_t(this) + SystemPageSize();
151 size_t length = endOffset - SystemPageSize();
152 return MarkPagesInUseHard(reinterpret_cast<void*>(start), length);
155 // static
156 inline js::NurseryChunk* js::NurseryChunk::fromChunk(TenuredChunk* chunk) {
157 return reinterpret_cast<NurseryChunk*>(chunk);
160 js::NurseryDecommitTask::NurseryDecommitTask(gc::GCRuntime* gc)
161 : GCParallelTask(gc, gcstats::PhaseKind::NONE) {
162 // This can occur outside GCs so doesn't have a stats phase.
165 bool js::NurseryDecommitTask::isEmpty(
166 const AutoLockHelperThreadState& lock) const {
167 return chunksToDecommit().empty() && !partialChunk;
170 bool js::NurseryDecommitTask::reserveSpaceForBytes(size_t nbytes) {
171 MOZ_ASSERT(isIdle());
172 size_t nchunks = HowMany(nbytes, ChunkSize);
173 return chunksToDecommit().reserve(nchunks);
176 void js::NurseryDecommitTask::queueChunk(
177 NurseryChunk* chunk, const AutoLockHelperThreadState& lock) {
178 MOZ_ASSERT(isIdle(lock));
179 MOZ_ALWAYS_TRUE(chunksToDecommit().append(chunk));
182 void js::NurseryDecommitTask::queueRange(
183 size_t newCapacity, NurseryChunk& newChunk,
184 const AutoLockHelperThreadState& lock) {
185 MOZ_ASSERT(isIdle(lock));
186 MOZ_ASSERT(!partialChunk);
187 MOZ_ASSERT(newCapacity < ChunkSize);
188 MOZ_ASSERT(newCapacity % SystemPageSize() == 0);
190 partialChunk = &newChunk;
191 partialCapacity = newCapacity;
194 void js::NurseryDecommitTask::run(AutoLockHelperThreadState& lock) {
195 while (!chunksToDecommit().empty()) {
196 NurseryChunk* nurseryChunk = chunksToDecommit().popCopy();
197 AutoUnlockHelperThreadState unlock(lock);
198 nurseryChunk->~NurseryChunk();
199 TenuredChunk* tenuredChunk = TenuredChunk::emplace(
200 nurseryChunk, gc, /* allMemoryCommitted = */ false);
201 AutoLockGC lock(gc);
202 gc->recycleChunk(tenuredChunk, lock);
205 if (partialChunk) {
207 AutoUnlockHelperThreadState unlock(lock);
208 partialChunk->markPagesUnusedHard(partialCapacity);
210 partialChunk = nullptr;
211 partialCapacity = 0;
215 js::Nursery::Nursery(GCRuntime* gc)
216 : position_(0),
217 currentEnd_(0),
218 currentStringEnd_(0),
219 currentBigIntEnd_(0),
220 gc(gc),
221 currentChunk_(0),
222 currentStartChunk_(0),
223 currentStartPosition_(0),
224 capacity_(0),
225 timeInChunkAlloc_(0),
226 enableProfiling_(false),
227 profileThreshold_(0),
228 canAllocateStrings_(true),
229 canAllocateBigInts_(true),
230 reportDeduplications_(false),
231 reportPretenuring_(false),
232 reportPretenuringThreshold_(0),
233 minorGCTriggerReason_(JS::GCReason::NO_REASON),
234 hasRecentGrowthData(false),
235 smoothedTargetSize(0.0) {
236 const char* env = getenv("MOZ_NURSERY_STRINGS");
237 if (env && *env) {
238 canAllocateStrings_ = (*env == '1');
240 env = getenv("MOZ_NURSERY_BIGINTS");
241 if (env && *env) {
242 canAllocateBigInts_ = (*env == '1');
246 static void PrintAndExit(const char* message) {
247 fprintf(stderr, "%s", message);
248 exit(0);
251 static const char* GetEnvVar(const char* name, const char* helpMessage) {
252 const char* value = getenv(name);
253 if (!value) {
254 return nullptr;
257 if (strcmp(value, "help") == 0) {
258 PrintAndExit(helpMessage);
261 return value;
264 static bool GetBoolEnvVar(const char* name, const char* helpMessage) {
265 const char* env = GetEnvVar(name, helpMessage);
266 return env && bool(atoi(env));
269 static void ReadReportPretenureEnv(const char* name, const char* helpMessage,
270 bool* enabled, size_t* threshold) {
271 *enabled = false;
272 *threshold = 0;
274 const char* env = GetEnvVar(name, helpMessage);
275 if (!env) {
276 return;
279 char* end;
280 *threshold = strtol(env, &end, 10);
281 if (end == env || *end) {
282 PrintAndExit(helpMessage);
285 *enabled = true;
288 bool js::Nursery::init(AutoLockGCBgAlloc& lock) {
289 ReadProfileEnv("JS_GC_PROFILE_NURSERY",
290 "Report minor GCs taking at least N microseconds.\n",
291 &enableProfiling_, &profileWorkers_, &profileThreshold_);
293 reportDeduplications_ = GetBoolEnvVar(
294 "JS_GC_REPORT_STATS",
295 "JS_GC_REPORT_STATS=1\n"
296 "\tAfter a minor GC, report how many strings were deduplicated.\n");
298 ReadReportPretenureEnv(
299 "JS_GC_REPORT_PRETENURE",
300 "JS_GC_REPORT_PRETENURE=N\n"
301 "\tAfter a minor GC, report information about pretenuring, including\n"
302 "\tallocation sites with at least N allocations.\n",
303 &reportPretenuring_, &reportPretenuringThreshold_);
305 decommitTask = MakeUnique<NurseryDecommitTask>(gc);
306 if (!decommitTask) {
307 return false;
310 if (!gc->storeBuffer().enable()) {
311 return false;
314 return initFirstChunk(lock);
317 js::Nursery::~Nursery() { disable(); }
319 void js::Nursery::enable() {
320 MOZ_ASSERT(isEmpty());
321 MOZ_ASSERT(!gc->isVerifyPreBarriersEnabled());
322 if (isEnabled()) {
323 return;
327 AutoLockGCBgAlloc lock(gc);
328 if (!initFirstChunk(lock)) {
329 // If we fail to allocate memory, the nursery will not be enabled.
330 return;
334 #ifdef JS_GC_ZEAL
335 if (gc->hasZealMode(ZealMode::GenerationalGC)) {
336 enterZealMode();
338 #endif
340 updateAllZoneAllocFlags();
342 // This should always succeed after the first time it's called.
343 MOZ_ALWAYS_TRUE(gc->storeBuffer().enable());
346 bool js::Nursery::initFirstChunk(AutoLockGCBgAlloc& lock) {
347 MOZ_ASSERT(!isEnabled());
349 capacity_ = tunables().gcMinNurseryBytes();
351 if (!decommitTask->reserveSpaceForBytes(capacity_) ||
352 !allocateNextChunk(0, lock)) {
353 capacity_ = 0;
354 return false;
357 setCurrentChunk(0);
358 setStartPosition();
359 poisonAndInitCurrentChunk();
361 // Clear any information about previous collections.
362 clearRecentGrowthData();
364 return true;
367 void js::Nursery::disable() {
368 MOZ_ASSERT(isEmpty());
369 if (!isEnabled()) {
370 return;
373 // Free all chunks.
374 decommitTask->join();
375 freeChunksFrom(0);
376 decommitTask->runFromMainThread();
378 capacity_ = 0;
380 // We must reset currentEnd_ so that there is no space for anything in the
381 // nursery. JIT'd code uses this even if the nursery is disabled.
382 currentEnd_ = 0;
383 currentStringEnd_ = 0;
384 currentBigIntEnd_ = 0;
385 position_ = 0;
386 gc->storeBuffer().disable();
388 updateAllZoneAllocFlags();
391 void js::Nursery::enableStrings() {
392 MOZ_ASSERT(isEmpty());
393 canAllocateStrings_ = true;
394 currentStringEnd_ = currentEnd_;
395 updateAllZoneAllocFlags();
398 void js::Nursery::disableStrings() {
399 MOZ_ASSERT(isEmpty());
400 canAllocateStrings_ = false;
401 currentStringEnd_ = 0;
402 updateAllZoneAllocFlags();
405 void js::Nursery::enableBigInts() {
406 MOZ_ASSERT(isEmpty());
407 canAllocateBigInts_ = true;
408 currentBigIntEnd_ = currentEnd_;
409 updateAllZoneAllocFlags();
412 void js::Nursery::disableBigInts() {
413 MOZ_ASSERT(isEmpty());
414 canAllocateBigInts_ = false;
415 currentBigIntEnd_ = 0;
416 updateAllZoneAllocFlags();
419 void js::Nursery::updateAllZoneAllocFlags() {
420 for (AllZonesIter zone(gc); !zone.done(); zone.next()) {
421 updateAllocFlagsForZone(zone);
425 void js::Nursery::updateAllocFlagsForZone(JS::Zone* zone) {
426 bool prevAllocObjects = zone->allocNurseryObjects();
427 bool prevAllocStrings = zone->allocNurseryStrings();
428 bool prevAllocBigInts = zone->allocNurseryBigInts();
430 zone->updateNurseryAllocFlags(*this);
432 if (zone->allocNurseryObjects() != prevAllocObjects ||
433 zone->allocNurseryStrings() != prevAllocStrings ||
434 zone->allocNurseryBigInts() != prevAllocBigInts) {
435 discardJitCodeForZone(zone);
439 void js::Nursery::discardJitCodeForZone(JS::Zone* zone) {
440 CancelOffThreadIonCompile(zone);
442 zone->forceDiscardJitCode(runtime()->gcContext());
444 for (RealmsInZoneIter r(zone); !r.done(); r.next()) {
445 if (jit::JitRealm* jitRealm = r->jitRealm()) {
446 jitRealm->discardStubs();
447 jitRealm->setStringsCanBeInNursery(zone->allocNurseryStrings());
452 bool js::Nursery::isEmpty() const {
453 if (!isEnabled()) {
454 return true;
457 if (!gc->hasZealMode(ZealMode::GenerationalGC)) {
458 MOZ_ASSERT(currentStartChunk_ == 0);
459 MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
461 return position() == currentStartPosition_;
464 #ifdef JS_GC_ZEAL
465 void js::Nursery::enterZealMode() {
466 if (!isEnabled()) {
467 return;
470 MOZ_ASSERT(isEmpty());
472 decommitTask->join();
474 AutoEnterOOMUnsafeRegion oomUnsafe;
476 if (isSubChunkMode()) {
478 if (!chunk(0).markPagesInUseHard(ChunkSize)) {
479 oomUnsafe.crash("Out of memory trying to extend chunk for zeal mode");
483 // It'd be simpler to poison the whole chunk, but we can't do that
484 // because the nursery might be partially used.
485 chunk(0).poisonRange(capacity_, ChunkSize - capacity_,
486 JS_FRESH_NURSERY_PATTERN, MemCheckKind::MakeUndefined);
489 capacity_ = RoundUp(tunables().gcMaxNurseryBytes(), ChunkSize);
491 if (!decommitTask->reserveSpaceForBytes(capacity_)) {
492 oomUnsafe.crash("Nursery::enterZealMode");
495 setCurrentEnd();
498 void js::Nursery::leaveZealMode() {
499 if (!isEnabled()) {
500 return;
503 MOZ_ASSERT(isEmpty());
505 setCurrentChunk(0);
506 setStartPosition();
507 poisonAndInitCurrentChunk();
509 #endif // JS_GC_ZEAL
511 void* js::Nursery::allocateCell(gc::AllocSite* site, size_t size,
512 JS::TraceKind kind) {
513 // Ensure there's enough space to replace the contents with a
514 // RelocationOverlay.
515 MOZ_ASSERT(size >= sizeof(RelocationOverlay));
516 MOZ_ASSERT(size % CellAlignBytes == 0);
517 MOZ_ASSERT(size_t(kind) < NurseryTraceKinds);
519 void* ptr = allocate(sizeof(NurseryCellHeader) + size);
520 if (!ptr) {
521 return nullptr;
524 new (ptr) NurseryCellHeader(site, kind);
526 void* cell =
527 reinterpret_cast<void*>(uintptr_t(ptr) + sizeof(NurseryCellHeader));
529 // Update the allocation site. This code is also inlined in
530 // MacroAssembler::updateAllocSite.
531 uint32_t allocCount = site->incAllocCount();
532 if (allocCount == 1) {
533 pretenuringNursery.insertIntoAllocatedList(site);
534 } else {
535 MOZ_ASSERT_IF(site->isNormal(), site->isInAllocatedList());
538 gcprobes::NurseryAlloc(cell, kind);
539 return cell;
542 inline void* js::Nursery::allocate(size_t size) {
543 MOZ_ASSERT(isEnabled());
544 MOZ_ASSERT(!JS::RuntimeHeapIsBusy());
545 MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime()));
546 MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_,
547 position() >= currentStartPosition_);
548 MOZ_ASSERT(position() % CellAlignBytes == 0);
549 MOZ_ASSERT(size % CellAlignBytes == 0);
551 if (MOZ_UNLIKELY(currentEnd() < position() + size)) {
552 return moveToNextChunkAndAllocate(size);
555 void* thing = (void*)position();
556 position_ = position() + size;
558 DebugOnlyPoison(thing, JS_ALLOCATED_NURSERY_PATTERN, size,
559 MemCheckKind::MakeUndefined);
561 return thing;
564 void* Nursery::moveToNextChunkAndAllocate(size_t size) {
565 MOZ_ASSERT(currentEnd() < position() + size);
567 unsigned chunkno = currentChunk_ + 1;
568 MOZ_ASSERT(chunkno <= maxChunkCount());
569 MOZ_ASSERT(chunkno <= allocatedChunkCount());
570 if (chunkno == maxChunkCount()) {
571 return nullptr;
573 if (chunkno == allocatedChunkCount()) {
574 TimeStamp start = TimeStamp::Now();
576 AutoLockGCBgAlloc lock(gc);
577 if (!allocateNextChunk(chunkno, lock)) {
578 return nullptr;
581 timeInChunkAlloc_ += TimeStamp::Now() - start;
582 MOZ_ASSERT(chunkno < allocatedChunkCount());
584 setCurrentChunk(chunkno);
585 poisonAndInitCurrentChunk();
587 // We know there's enough space to allocate now so we can call allocate()
588 // recursively.
589 MOZ_ASSERT(currentEnd() >= position() + size);
590 return allocate(size);
592 void* js::Nursery::allocateBuffer(Zone* zone, size_t nbytes) {
593 MOZ_ASSERT(nbytes > 0);
595 if (nbytes <= MaxNurseryBufferSize) {
596 void* buffer = allocate(nbytes);
597 if (buffer) {
598 return buffer;
602 void* buffer = zone->pod_malloc<uint8_t>(nbytes);
603 if (buffer && !registerMallocedBuffer(buffer, nbytes)) {
604 js_free(buffer);
605 return nullptr;
607 return buffer;
610 void* js::Nursery::allocateBuffer(Zone* zone, JSObject* obj, size_t nbytes) {
611 MOZ_ASSERT(obj);
612 MOZ_ASSERT(nbytes > 0);
614 if (!IsInsideNursery(obj)) {
615 return zone->pod_malloc<uint8_t>(nbytes);
618 return allocateBuffer(zone, nbytes);
621 void* js::Nursery::allocateBufferSameLocation(JSObject* obj, size_t nbytes) {
622 MOZ_ASSERT(obj);
623 MOZ_ASSERT(nbytes > 0);
624 MOZ_ASSERT(nbytes <= MaxNurseryBufferSize);
626 if (!IsInsideNursery(obj)) {
627 return obj->zone()->pod_malloc<uint8_t>(nbytes);
630 return allocate(nbytes);
633 void* js::Nursery::allocateZeroedBuffer(
634 Zone* zone, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
635 MOZ_ASSERT(nbytes > 0);
637 if (nbytes <= MaxNurseryBufferSize) {
638 void* buffer = allocate(nbytes);
639 if (buffer) {
640 memset(buffer, 0, nbytes);
641 return buffer;
645 void* buffer = zone->pod_arena_calloc<uint8_t>(arena, nbytes);
646 if (buffer && !registerMallocedBuffer(buffer, nbytes)) {
647 js_free(buffer);
648 return nullptr;
650 return buffer;
653 void* js::Nursery::allocateZeroedBuffer(
654 JSObject* obj, size_t nbytes, arena_id_t arena /*= js::MallocArena*/) {
655 MOZ_ASSERT(obj);
656 MOZ_ASSERT(nbytes > 0);
658 if (!IsInsideNursery(obj)) {
659 return obj->zone()->pod_arena_calloc<uint8_t>(arena, nbytes);
661 return allocateZeroedBuffer(obj->zone(), nbytes, arena);
664 void* js::Nursery::reallocateBuffer(Zone* zone, Cell* cell, void* oldBuffer,
665 size_t oldBytes, size_t newBytes) {
666 if (!IsInsideNursery(cell)) {
667 MOZ_ASSERT(!isInside(oldBuffer));
668 return zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
671 if (!isInside(oldBuffer)) {
672 MOZ_ASSERT(mallocedBufferBytes >= oldBytes);
673 void* newBuffer =
674 zone->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
675 if (newBuffer) {
676 if (oldBuffer != newBuffer) {
677 MOZ_ALWAYS_TRUE(
678 mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
680 mallocedBufferBytes -= oldBytes;
681 mallocedBufferBytes += newBytes;
683 return newBuffer;
686 // The nursery cannot make use of the returned slots data.
687 if (newBytes < oldBytes) {
688 return oldBuffer;
691 void* newBuffer = allocateBuffer(zone, newBytes);
692 if (newBuffer) {
693 PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
695 return newBuffer;
698 void* js::Nursery::allocateBuffer(JS::BigInt* bi, size_t nbytes) {
699 MOZ_ASSERT(bi);
700 MOZ_ASSERT(nbytes > 0);
702 if (!IsInsideNursery(bi)) {
703 return bi->zone()->pod_malloc<uint8_t>(nbytes);
705 return allocateBuffer(bi->zone(), nbytes);
708 void js::Nursery::freeBuffer(void* buffer, size_t nbytes) {
709 if (!isInside(buffer)) {
710 removeMallocedBuffer(buffer, nbytes);
711 js_free(buffer);
715 #ifdef DEBUG
716 /* static */
717 inline bool Nursery::checkForwardingPointerLocation(void* ptr,
718 bool expectedInside) {
719 if (isInside(ptr) == expectedInside) {
720 return true;
723 // If a zero-capacity elements header lands right at the end of a chunk then
724 // elements data will appear to be in the next chunk. If we have a pointer to
725 // the very start of a chunk, check the previous chunk.
726 if ((uintptr_t(ptr) & ChunkMask) == 0 &&
727 isInside(reinterpret_cast<uint8_t*>(ptr) - 1) == expectedInside) {
728 return true;
731 return false;
733 #endif
735 void Nursery::setIndirectForwardingPointer(void* oldData, void* newData) {
736 MOZ_ASSERT(checkForwardingPointerLocation(oldData, true));
737 MOZ_ASSERT(checkForwardingPointerLocation(newData, false));
739 AutoEnterOOMUnsafeRegion oomUnsafe;
740 #ifdef DEBUG
741 if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData)) {
742 MOZ_ASSERT(p->value() == newData);
744 #endif
745 if (!forwardedBuffers.put(oldData, newData)) {
746 oomUnsafe.crash("Nursery::setForwardingPointer");
750 #ifdef DEBUG
751 static bool IsWriteableAddress(void* ptr) {
752 auto* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
753 *vPtr = *vPtr;
754 return true;
756 #endif
758 void js::Nursery::forwardBufferPointer(uintptr_t* pSlotsElems) {
759 // Read the current pointer value which may be one of:
760 // - Non-nursery pointer
761 // - Nursery-allocated buffer
762 // - A BufferRelocationOverlay inside the nursery
764 // Note: The buffer has already be relocated. We are just patching stale
765 // pointers now.
766 auto* buffer = reinterpret_cast<void*>(*pSlotsElems);
768 if (!isInside(buffer)) {
769 return;
772 // The new location for this buffer is either stored inline with it or in
773 // the forwardedBuffers table.
774 if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(buffer)) {
775 buffer = p->value();
776 // It's not valid to assert IsWriteableAddress for indirect forwarding
777 // pointers because the size of the allocation could be less than a word.
778 } else {
779 BufferRelocationOverlay* reloc =
780 static_cast<BufferRelocationOverlay*>(buffer);
781 buffer = *reloc;
782 MOZ_ASSERT(IsWriteableAddress(buffer));
785 MOZ_ASSERT(!isInside(buffer));
786 *pSlotsElems = reinterpret_cast<uintptr_t>(buffer);
789 inline double js::Nursery::calcPromotionRate(bool* validForTenuring) const {
790 MOZ_ASSERT(validForTenuring);
792 if (previousGC.nurseryUsedBytes == 0) {
793 *validForTenuring = false;
794 return 0.0;
797 double used = double(previousGC.nurseryUsedBytes);
798 double capacity = double(previousGC.nurseryCapacity);
799 double tenured = double(previousGC.tenuredBytes);
801 // We should only use the promotion rate to make tenuring decisions if it's
802 // likely to be valid. The criterion we use is that the nursery was at least
803 // 90% full.
804 *validForTenuring = used > capacity * 0.9;
806 return tenured / used;
809 void js::Nursery::renderProfileJSON(JSONPrinter& json) const {
810 if (!isEnabled()) {
811 json.beginObject();
812 json.property("status", "nursery disabled");
813 json.endObject();
814 return;
817 if (previousGC.reason == JS::GCReason::NO_REASON) {
818 // If the nursery was empty when the last minorGC was requested, then
819 // no nursery collection will have been performed but JSON may still be
820 // requested. (And as a public API, this function should not crash in
821 // such a case.)
822 json.beginObject();
823 json.property("status", "nursery empty");
824 json.endObject();
825 return;
828 json.beginObject();
830 json.property("status", "complete");
832 json.property("reason", JS::ExplainGCReason(previousGC.reason));
833 json.property("bytes_tenured", previousGC.tenuredBytes);
834 json.property("cells_tenured", previousGC.tenuredCells);
835 json.property("strings_tenured",
836 stats().getStat(gcstats::STAT_STRINGS_TENURED));
837 json.property("strings_deduplicated",
838 stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED));
839 json.property("bigints_tenured",
840 stats().getStat(gcstats::STAT_BIGINTS_TENURED));
841 json.property("bytes_used", previousGC.nurseryUsedBytes);
842 json.property("cur_capacity", previousGC.nurseryCapacity);
843 const size_t newCapacity = capacity();
844 if (newCapacity != previousGC.nurseryCapacity) {
845 json.property("new_capacity", newCapacity);
847 if (previousGC.nurseryCommitted != previousGC.nurseryCapacity) {
848 json.property("lazy_capacity", previousGC.nurseryCommitted);
850 if (!timeInChunkAlloc_.IsZero()) {
851 json.property("chunk_alloc_us", timeInChunkAlloc_, json.MICROSECONDS);
854 // These counters only contain consistent data if the profiler is enabled,
855 // and then there's no guarentee.
856 if (runtime()->geckoProfiler().enabled()) {
857 json.property("cells_allocated_nursery",
858 pretenuringNursery.totalAllocCount());
859 json.property("cells_allocated_tenured",
860 stats().allocsSinceMinorGCTenured());
863 json.beginObjectProperty("phase_times");
865 #define EXTRACT_NAME(name, text) #name,
866 static const char* const names[] = {
867 FOR_EACH_NURSERY_PROFILE_TIME(EXTRACT_NAME)
868 #undef EXTRACT_NAME
869 ""};
871 size_t i = 0;
872 for (auto time : profileDurations_) {
873 json.property(names[i++], time, json.MICROSECONDS);
876 json.endObject(); // timings value
878 json.endObject();
881 // The following macros define nursery GC profile metadata fields that are
882 // printed before the timing information defined by
883 // FOR_EACH_NURSERY_PROFILE_TIME.
885 #define FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \
886 _("PID", 7, "%7zu", pid) \
887 _("Runtime", 14, "0x%12p", runtime)
889 #define FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_) \
890 _("Timestamp", 10, "%10.6f", timestamp.ToSeconds()) \
891 _("Reason", 20, "%-20.20s", reasonStr) \
892 _("PRate", 6, "%5.1f%%", promotionRatePercent) \
893 _("OldKB", 6, "%6zu", oldSizeKB) \
894 _("NewKB", 6, "%6zu", newSizeKB) \
895 _("Dedup", 6, "%6zu", dedupCount)
897 #define FOR_EACH_NURSERY_PROFILE_METADATA(_) \
898 FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(_) \
899 FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(_)
901 void js::Nursery::printCollectionProfile(JS::GCReason reason,
902 double promotionRate) {
903 stats().maybePrintProfileHeaders();
905 Sprinter sprinter;
906 if (!sprinter.init() || !sprinter.put(gcstats::MinorGCProfilePrefix)) {
907 return;
910 size_t pid = getpid();
911 JSRuntime* runtime = gc->rt;
912 TimeDuration timestamp = collectionStartTime() - stats().creationTime();
913 const char* reasonStr = ExplainGCReason(reason);
914 double promotionRatePercent = promotionRate * 100;
915 size_t oldSizeKB = previousGC.nurseryCapacity / 1024;
916 size_t newSizeKB = capacity() / 1024;
917 size_t dedupCount = stats().getStat(gcstats::STAT_STRINGS_DEDUPLICATED);
919 #define PRINT_FIELD_VALUE(_1, _2, format, value) \
920 if (!sprinter.jsprintf(" " format, value)) { \
921 return; \
923 FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_VALUE)
924 #undef PRINT_FIELD_VALUE
926 printProfileDurations(profileDurations_, sprinter);
928 fputs(sprinter.string(), stats().profileFile());
931 void js::Nursery::printProfileHeader() {
932 Sprinter sprinter;
933 if (!sprinter.init() || !sprinter.put(gcstats::MinorGCProfilePrefix)) {
934 return;
937 #define PRINT_FIELD_NAME(name, width, _1, _2) \
938 if (!sprinter.jsprintf(" %-*s", width, name)) { \
939 return; \
941 FOR_EACH_NURSERY_PROFILE_METADATA(PRINT_FIELD_NAME)
942 #undef PRINT_FIELD_NAME
944 #define PRINT_PROFILE_NAME(_1, text) \
945 if (!sprinter.jsprintf(" %-6.6s", text)) { \
946 return; \
948 FOR_EACH_NURSERY_PROFILE_TIME(PRINT_PROFILE_NAME)
949 #undef PRINT_PROFILE_NAME
951 if (!sprinter.put("\n")) {
952 return;
955 fputs(sprinter.string(), stats().profileFile());
958 // static
959 bool js::Nursery::printProfileDurations(const ProfileDurations& times,
960 Sprinter& sprinter) {
961 for (auto time : times) {
962 int64_t micros = int64_t(time.ToMicroseconds());
963 if (!sprinter.jsprintf(" %6" PRIi64, micros)) {
964 return false;
968 return sprinter.put("\n");
971 static constexpr size_t NurserySliceMetadataFormatWidth() {
972 size_t fieldCount = 0;
973 size_t totalWidth = 0;
975 #define UPDATE_COUNT_AND_WIDTH(_1, width, _2, _3) \
976 fieldCount++; \
977 totalWidth += width;
978 FOR_EACH_NURSERY_PROFILE_SLICE_METADATA(UPDATE_COUNT_AND_WIDTH)
979 #undef UPDATE_COUNT_AND_WIDTH
981 // Add padding between fields.
982 totalWidth += fieldCount - 1;
984 return totalWidth;
987 void js::Nursery::printTotalProfileTimes() {
988 if (!enableProfiling_) {
989 return;
992 Sprinter sprinter;
993 if (!sprinter.init() || !sprinter.put(gcstats::MinorGCProfilePrefix)) {
994 return;
997 size_t pid = getpid();
998 JSRuntime* runtime = gc->rt;
1000 char collections[32];
1001 DebugOnly<int> r = SprintfLiteral(
1002 collections, "TOTALS: %7" PRIu64 " collections:", gc->minorGCCount());
1003 MOZ_ASSERT(r > 0 && r < int(sizeof(collections)));
1005 #define PRINT_FIELD_VALUE(_1, _2, format, value) \
1006 if (!sprinter.jsprintf(" " format, value)) { \
1007 return; \
1009 FOR_EACH_NURSERY_PROFILE_COMMON_METADATA(PRINT_FIELD_VALUE)
1010 #undef PRINT_FIELD_VALUE
1012 // Use whole width of per-slice metadata to print total slices so the profile
1013 // totals that follow line up.
1014 size_t width = NurserySliceMetadataFormatWidth();
1015 if (!sprinter.jsprintf(" %-*s", int(width), collections)) {
1016 return;
1019 if (!printProfileDurations(totalDurations_, sprinter)) {
1020 return;
1023 fputs(sprinter.string(), stats().profileFile());
1026 void js::Nursery::maybeClearProfileDurations() {
1027 for (auto& duration : profileDurations_) {
1028 duration = mozilla::TimeDuration();
1032 inline void js::Nursery::startProfile(ProfileKey key) {
1033 startTimes_[key] = TimeStamp::Now();
1036 inline void js::Nursery::endProfile(ProfileKey key) {
1037 profileDurations_[key] = TimeStamp::Now() - startTimes_[key];
1038 totalDurations_[key] += profileDurations_[key];
1041 inline TimeStamp js::Nursery::collectionStartTime() const {
1042 return startTimes_[ProfileKey::Total];
1045 inline TimeStamp js::Nursery::lastCollectionEndTime() const {
1046 return previousGC.endTime;
1049 bool js::Nursery::shouldCollect() const {
1050 if (!isEnabled()) {
1051 return false;
1054 if (isEmpty() && capacity() == tunables().gcMinNurseryBytes()) {
1055 return false;
1058 if (minorGCRequested()) {
1059 return true;
1062 // Eagerly collect the nursery in idle time if it's nearly full.
1063 if (isNearlyFull()) {
1064 return true;
1067 // If the nursery is not being collected often then it may be taking up more
1068 // space than necessary.
1069 return isUnderused();
1072 inline bool js::Nursery::isNearlyFull() const {
1073 bool belowBytesThreshold =
1074 freeSpace() < tunables().nurseryFreeThresholdForIdleCollection();
1075 bool belowFractionThreshold =
1076 double(freeSpace()) / double(capacity()) <
1077 tunables().nurseryFreeThresholdForIdleCollectionFraction();
1079 // We want to use belowBytesThreshold when the nursery is sufficiently large,
1080 // and belowFractionThreshold when it's small.
1082 // When the nursery is small then belowBytesThreshold is a lower threshold
1083 // (triggered earlier) than belowFractionThreshold. So if the fraction
1084 // threshold is true, the bytes one will be true also. The opposite is true
1085 // when the nursery is large.
1087 // Therefore, by the time we cross the threshold we care about, we've already
1088 // crossed the other one, and we can boolean AND to use either condition
1089 // without encoding any "is the nursery big/small" test/threshold. The point
1090 // at which they cross is when the nursery is: BytesThreshold /
1091 // FractionThreshold large.
1093 // With defaults that's:
1095 // 1MB = 256KB / 0.25
1097 return belowBytesThreshold && belowFractionThreshold;
1100 inline bool js::Nursery::isUnderused() const {
1101 if (js::SupportDifferentialTesting() || !previousGC.endTime) {
1102 return false;
1105 if (capacity() == tunables().gcMinNurseryBytes()) {
1106 return false;
1109 // If the nursery is above its minimum size, collect it every so often if we
1110 // have idle time. This allows the nursery to shrink when it's not being
1111 // used. There are other heuristics we could use for this, but this is the
1112 // simplest.
1113 TimeDuration timeSinceLastCollection = TimeStamp::Now() - previousGC.endTime;
1114 return timeSinceLastCollection > tunables().nurseryTimeoutForIdleCollection();
1117 // typeReason is the gcReason for specified type, for example,
1118 // FULL_CELL_PTR_OBJ_BUFFER is the gcReason for JSObject.
1119 static inline bool IsFullStoreBufferReason(JS::GCReason reason,
1120 JS::GCReason typeReason) {
1121 return reason == typeReason ||
1122 reason == JS::GCReason::FULL_WHOLE_CELL_BUFFER ||
1123 reason == JS::GCReason::FULL_GENERIC_BUFFER ||
1124 reason == JS::GCReason::FULL_VALUE_BUFFER ||
1125 reason == JS::GCReason::FULL_SLOT_BUFFER ||
1126 reason == JS::GCReason::FULL_SHAPE_BUFFER;
1129 void js::Nursery::collect(JS::GCOptions options, JS::GCReason reason) {
1130 JSRuntime* rt = runtime();
1131 MOZ_ASSERT(!rt->mainContextFromOwnThread()->suppressGC);
1133 if (!isEnabled() || isEmpty()) {
1134 // Our barriers are not always exact, and there may be entries in the
1135 // storebuffer even when the nursery is disabled or empty. It's not safe
1136 // to keep these entries as they may refer to tenured cells which may be
1137 // freed after this point.
1138 gc->storeBuffer().clear();
1140 MOZ_ASSERT(!pretenuringNursery.hasAllocatedSites());
1143 if (!isEnabled()) {
1144 return;
1147 AutoGCSession session(gc, JS::HeapState::MinorCollecting);
1149 stats().beginNurseryCollection(reason);
1150 gcprobes::MinorGCStart();
1152 maybeClearProfileDurations();
1153 startProfile(ProfileKey::Total);
1155 previousGC.reason = JS::GCReason::NO_REASON;
1156 previousGC.nurseryUsedBytes = usedSpace();
1157 previousGC.nurseryCapacity = capacity();
1158 previousGC.nurseryCommitted = committed();
1159 previousGC.nurseryUsedChunkCount = currentChunk_ + 1;
1160 previousGC.tenuredBytes = 0;
1161 previousGC.tenuredCells = 0;
1163 // If it isn't empty, it will call doCollection, and possibly after that
1164 // isEmpty() will become true, so use another variable to keep track of the
1165 // old empty state.
1166 bool wasEmpty = isEmpty();
1167 if (!wasEmpty) {
1168 CollectionResult result = doCollection(session, options, reason);
1169 // Don't include chunk headers when calculating nursery space, since this
1170 // space does not represent data that can be tenured
1171 MOZ_ASSERT(result.tenuredBytes <=
1172 (previousGC.nurseryUsedBytes -
1173 (sizeof(ChunkBase) * previousGC.nurseryUsedChunkCount)));
1175 previousGC.reason = reason;
1176 previousGC.tenuredBytes = result.tenuredBytes;
1177 previousGC.tenuredCells = result.tenuredCells;
1178 previousGC.nurseryUsedChunkCount = currentChunk_ + 1;
1181 // Resize the nursery.
1182 maybeResizeNursery(options, reason);
1184 // Poison/initialise the first chunk.
1185 if (previousGC.nurseryUsedBytes) {
1186 // In most cases Nursery::clear() has not poisoned this chunk or marked it
1187 // as NoAccess; so we only need to poison the region used during the last
1188 // cycle. Also, if the heap was recently expanded we don't want to
1189 // re-poison the new memory. In both cases we only need to poison until
1190 // previousGC.nurseryUsedBytes.
1192 // In cases where this is not true, like generational zeal mode or subchunk
1193 // mode, poisonAndInitCurrentChunk() will ignore its parameter. It will
1194 // also clamp the parameter.
1195 poisonAndInitCurrentChunk(previousGC.nurseryUsedBytes);
1198 bool validPromotionRate;
1199 const double promotionRate = calcPromotionRate(&validPromotionRate);
1201 startProfile(ProfileKey::Pretenure);
1202 size_t sitesPretenured = 0;
1203 if (!wasEmpty) {
1204 sitesPretenured =
1205 doPretenuring(rt, reason, validPromotionRate, promotionRate);
1207 endProfile(ProfileKey::Pretenure);
1209 // We ignore gcMaxBytes when allocating for minor collection. However, if we
1210 // overflowed, we disable the nursery. The next time we allocate, we'll fail
1211 // because bytes >= gcMaxBytes.
1212 if (gc->heapSize.bytes() >= tunables().gcMaxBytes()) {
1213 disable();
1216 previousGC.endTime =
1217 TimeStamp::Now(); // Must happen after maybeResizeNursery.
1218 endProfile(ProfileKey::Total);
1219 gc->incMinorGcNumber();
1221 TimeDuration totalTime = profileDurations_[ProfileKey::Total];
1222 sendTelemetry(reason, totalTime, wasEmpty, promotionRate, sitesPretenured);
1224 stats().endNurseryCollection(reason); // Calls GCNurseryCollectionCallback.
1225 gcprobes::MinorGCEnd();
1227 timeInChunkAlloc_ = mozilla::TimeDuration();
1229 js::StringStats prevStats = gc->stringStats;
1230 js::StringStats& currStats = gc->stringStats;
1231 currStats = js::StringStats();
1232 for (ZonesIter zone(gc, WithAtoms); !zone.done(); zone.next()) {
1233 currStats += zone->stringStats;
1234 zone->previousGCStringStats = zone->stringStats;
1236 stats().setStat(
1237 gcstats::STAT_STRINGS_DEDUPLICATED,
1238 currStats.deduplicatedStrings - prevStats.deduplicatedStrings);
1239 if (ShouldPrintProfile(runtime(), enableProfiling_, profileWorkers_,
1240 profileThreshold_, totalTime)) {
1241 printCollectionProfile(reason, promotionRate);
1244 if (reportDeduplications_) {
1245 printDeduplicationData(prevStats, currStats);
1249 void js::Nursery::sendTelemetry(JS::GCReason reason, TimeDuration totalTime,
1250 bool wasEmpty, double promotionRate,
1251 size_t sitesPretenured) {
1252 JSRuntime* rt = runtime();
1253 rt->metrics().GC_MINOR_REASON(uint32_t(reason));
1255 // Long minor GCs are those that take more than 1ms.
1256 bool wasLongMinorGC = totalTime.ToMilliseconds() > 1.0;
1257 if (wasLongMinorGC) {
1258 rt->metrics().GC_MINOR_REASON_LONG(uint32_t(reason));
1260 rt->metrics().GC_MINOR_US(totalTime);
1261 rt->metrics().GC_NURSERY_BYTES_2(committed());
1263 if (!wasEmpty) {
1264 rt->metrics().GC_PRETENURE_COUNT_2(sitesPretenured);
1265 rt->metrics().GC_NURSERY_PROMOTION_RATE(promotionRate * 100);
1269 void js::Nursery::printDeduplicationData(js::StringStats& prev,
1270 js::StringStats& curr) {
1271 if (curr.deduplicatedStrings > prev.deduplicatedStrings) {
1272 fprintf(stderr,
1273 "pid %zu: deduplicated %" PRIi64 " strings, %" PRIu64
1274 " chars, %" PRIu64 " malloc bytes\n",
1275 size_t(getpid()),
1276 curr.deduplicatedStrings - prev.deduplicatedStrings,
1277 curr.deduplicatedChars - prev.deduplicatedChars,
1278 curr.deduplicatedBytes - prev.deduplicatedBytes);
1282 void js::Nursery::freeTrailerBlocks(void) {
1283 // This routine frees those blocks denoted by the set
1285 // trailersAdded_ (all of it)
1286 // - trailersRemoved_ (entries with index below trailersRemovedUsed_)
1288 // For each block, places it back on the nursery's small-malloced-block pool
1289 // by calling mallocedBlockCache_.free.
1291 MOZ_ASSERT(trailersAdded_.length() == trailersRemoved_.length());
1292 MOZ_ASSERT(trailersRemovedUsed_ <= trailersRemoved_.length());
1294 // Sort the removed entries.
1295 std::sort(trailersRemoved_.begin(),
1296 trailersRemoved_.begin() + trailersRemovedUsed_,
1297 [](const void* block1, const void* block2) {
1298 return uintptr_t(block1) < uintptr_t(block2);
1301 // Use one of two schemes to enumerate the set subtraction.
1302 if (trailersRemovedUsed_ < 1000) {
1303 // If the number of removed items is relatively small, it isn't worth the
1304 // cost of sorting `trailersAdded_`. Instead, walk through the vector in
1305 // whatever order it is and use binary search to establish whether each
1306 // item is present in trailersRemoved_[0 .. trailersRemovedUsed_ - 1].
1307 const size_t nAdded = trailersAdded_.length();
1308 for (size_t i = 0; i < nAdded; i++) {
1309 const PointerAndUint7 block = trailersAdded_[i];
1310 const void* blockPointer = block.pointer();
1311 if (!std::binary_search(trailersRemoved_.begin(),
1312 trailersRemoved_.begin() + trailersRemovedUsed_,
1313 blockPointer)) {
1314 mallocedBlockCache_.free(block);
1317 } else {
1318 // The general case, which is algorithmically safer for large inputs.
1319 // Sort the added entries, and then walk through both them and the removed
1320 // entries in lockstep.
1321 std::sort(trailersAdded_.begin(), trailersAdded_.end(),
1322 [](const PointerAndUint7& block1, const PointerAndUint7& block2) {
1323 return uintptr_t(block1.pointer()) <
1324 uintptr_t(block2.pointer());
1326 // Enumerate the set subtraction. This is somewhat simplified by the fact
1327 // that all elements of the removed set must also be present in the added
1328 // set. (the "inclusion property").
1329 const size_t nAdded = trailersAdded_.length();
1330 const size_t nRemoved = trailersRemovedUsed_;
1331 size_t iAdded;
1332 size_t iRemoved = 0;
1333 for (iAdded = 0; iAdded < nAdded; iAdded++) {
1334 if (iRemoved == nRemoved) {
1335 // We've run out of items to skip, so move on to the next loop.
1336 break;
1338 const PointerAndUint7 blockAdded = trailersAdded_[iAdded];
1339 const void* blockRemoved = trailersRemoved_[iRemoved];
1340 if (blockAdded.pointer() < blockRemoved) {
1341 mallocedBlockCache_.free(blockAdded);
1342 continue;
1344 // If this doesn't hold
1345 // (that is, if `blockAdded.pointer() > blockRemoved`),
1346 // then the abovementioned inclusion property doesn't hold.
1347 MOZ_RELEASE_ASSERT(blockAdded.pointer() == blockRemoved);
1348 iRemoved++;
1350 MOZ_ASSERT(iRemoved == nRemoved);
1351 // We've used up the removed set, so now finish up the remainder of the
1352 // added set.
1353 for (/*keep going*/; iAdded < nAdded; iAdded++) {
1354 const PointerAndUint7 block = trailersAdded_[iAdded];
1355 mallocedBlockCache_.free(block);
1359 // And empty out both sets, but preserve the underlying storage.
1360 trailersAdded_.clear();
1361 trailersRemoved_.clear();
1362 trailersRemovedUsed_ = 0;
1363 trailerBytes_ = 0;
1365 // Discard blocks from the cache at 0.05% per megabyte of nursery capacity,
1366 // that is, 0.8% of blocks for a 16-megabyte nursery. This allows the cache
1367 // to gradually discard unneeded blocks in long running applications.
1368 mallocedBlockCache_.preen(0.05 * float(capacity() / (1024 * 1024)));
1371 js::Nursery::CollectionResult js::Nursery::doCollection(AutoGCSession& session,
1372 JS::GCOptions options,
1373 JS::GCReason reason) {
1374 JSRuntime* rt = runtime();
1375 AutoSetThreadIsPerformingGC performingGC(rt->gcContext());
1376 AutoStopVerifyingBarriers av(rt, false);
1377 AutoDisableProxyCheck disableStrictProxyChecking;
1378 mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
1380 // Move objects pointed to by roots from the nursery to the major heap.
1381 TenuringTracer mover(rt, this);
1383 // Trace everything considered as a root by a minor GC.
1384 traceRoots(session, mover);
1386 startProfile(ProfileKey::SweepCaches);
1387 gc->purgeRuntimeForMinorGC();
1388 endProfile(ProfileKey::SweepCaches);
1390 // Most of the work is done here. This loop iterates over objects that have
1391 // been moved to the major heap. If these objects have any outgoing pointers
1392 // to the nursery, then those nursery objects get moved as well, until no
1393 // objects are left to move. That is, we iterate to a fixed point.
1394 startProfile(ProfileKey::CollectToObjFP);
1395 mover.collectToObjectFixedPoint();
1396 endProfile(ProfileKey::CollectToObjFP);
1398 startProfile(ProfileKey::CollectToStrFP);
1399 mover.collectToStringFixedPoint();
1400 endProfile(ProfileKey::CollectToStrFP);
1402 // Sweep to update any pointers to nursery objects that have now been
1403 // tenured.
1404 startProfile(ProfileKey::Sweep);
1405 sweep();
1406 endProfile(ProfileKey::Sweep);
1408 // Update any slot or element pointers whose destination has been tenured.
1409 startProfile(ProfileKey::UpdateJitActivations);
1410 js::jit::UpdateJitActivationsForMinorGC(rt);
1411 forwardedBuffers.clearAndCompact();
1412 endProfile(ProfileKey::UpdateJitActivations);
1414 startProfile(ProfileKey::ObjectsTenuredCallback);
1415 gc->callObjectsTenuredCallback();
1416 endProfile(ProfileKey::ObjectsTenuredCallback);
1418 // Sweep.
1419 startProfile(ProfileKey::FreeMallocedBuffers);
1420 gc->queueBuffersForFreeAfterMinorGC(mallocedBuffers);
1421 mallocedBufferBytes = 0;
1422 endProfile(ProfileKey::FreeMallocedBuffers);
1424 // Give trailer blocks associated with non-tenured Wasm{Struct,Array}Objects
1425 // back to our `mallocedBlockCache_`.
1426 startProfile(ProfileKey::FreeTrailerBlocks);
1427 freeTrailerBlocks();
1428 if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason)) {
1429 mallocedBlockCache_.clear();
1431 endProfile(ProfileKey::FreeTrailerBlocks);
1433 startProfile(ProfileKey::ClearNursery);
1434 clear();
1435 endProfile(ProfileKey::ClearNursery);
1437 startProfile(ProfileKey::ClearStoreBuffer);
1438 gc->storeBuffer().clear();
1439 endProfile(ProfileKey::ClearStoreBuffer);
1441 // Purge the StringToAtomCache. This has to happen at the end because the
1442 // cache is used when tenuring strings.
1443 startProfile(ProfileKey::PurgeStringToAtomCache);
1444 runtime()->caches().stringToAtomCache.purge();
1445 endProfile(ProfileKey::PurgeStringToAtomCache);
1447 // Make sure hashtables have been updated after the collection.
1448 startProfile(ProfileKey::CheckHashTables);
1449 #ifdef JS_GC_ZEAL
1450 if (gc->hasZealMode(ZealMode::CheckHashTablesOnMinorGC)) {
1451 runtime()->caches().checkEvalCacheAfterMinorGC();
1452 gc->checkHashTablesAfterMovingGC();
1454 #endif
1455 endProfile(ProfileKey::CheckHashTables);
1457 return {mover.getTenuredSize(), mover.getTenuredCells()};
1460 void js::Nursery::traceRoots(AutoGCSession& session, TenuringTracer& mover) {
1462 // Suppress the sampling profiler to prevent it observing moved functions.
1463 AutoSuppressProfilerSampling suppressProfiler(
1464 runtime()->mainContextFromOwnThread());
1466 // Trace the store buffer. This must happen first.
1467 StoreBuffer& sb = gc->storeBuffer();
1469 // Strings in the whole cell buffer must be traced first, in order to mark
1470 // tenured dependent strings' bases as non-deduplicatable. The rest of
1471 // nursery collection (whole non-string cells, edges, etc.) can happen
1472 // later.
1473 startProfile(ProfileKey::TraceWholeCells);
1474 sb.traceWholeCells(mover);
1475 endProfile(ProfileKey::TraceWholeCells);
1477 startProfile(ProfileKey::TraceValues);
1478 sb.traceValues(mover);
1479 endProfile(ProfileKey::TraceValues);
1481 startProfile(ProfileKey::TraceCells);
1482 sb.traceCells(mover);
1483 endProfile(ProfileKey::TraceCells);
1485 startProfile(ProfileKey::TraceSlots);
1486 sb.traceSlots(mover);
1487 endProfile(ProfileKey::TraceSlots);
1489 startProfile(ProfileKey::TraceGenericEntries);
1490 sb.traceGenericEntries(&mover);
1491 endProfile(ProfileKey::TraceGenericEntries);
1493 startProfile(ProfileKey::MarkRuntime);
1494 gc->traceRuntimeForMinorGC(&mover, session);
1495 endProfile(ProfileKey::MarkRuntime);
1498 startProfile(ProfileKey::MarkDebugger);
1500 gcstats::AutoPhase ap(stats(), gcstats::PhaseKind::MARK_ROOTS);
1501 DebugAPI::traceAllForMovingGC(&mover);
1503 endProfile(ProfileKey::MarkDebugger);
1506 size_t js::Nursery::doPretenuring(JSRuntime* rt, JS::GCReason reason,
1507 bool validPromotionRate,
1508 double promotionRate) {
1509 size_t sitesPretenured = pretenuringNursery.doPretenuring(
1510 gc, reason, validPromotionRate, promotionRate, reportPretenuring_,
1511 reportPretenuringThreshold_);
1513 bool highPromotionRate =
1514 validPromotionRate && promotionRate > tunables().pretenureThreshold();
1516 bool pretenureStr = false;
1517 bool pretenureBigInt = false;
1518 if (tunables().attemptPretenuring()) {
1519 // Should we check for pretenuring regardless of GCReason?
1520 // Use 3MB as the threshold so the pretenuring can be applied on Android.
1521 bool pretenureAll =
1522 highPromotionRate && previousGC.nurseryUsedBytes >= 3 * 1024 * 1024;
1524 pretenureStr =
1525 pretenureAll ||
1526 IsFullStoreBufferReason(reason, JS::GCReason::FULL_CELL_PTR_STR_BUFFER);
1527 pretenureBigInt =
1528 pretenureAll || IsFullStoreBufferReason(
1529 reason, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER);
1532 uint32_t numStringsTenured = 0;
1533 uint32_t numBigIntsTenured = 0;
1534 for (ZonesIter zone(gc, SkipAtoms); !zone.done(); zone.next()) {
1535 // For some tests in JetStream2 and Kraken, the tenuredRate is high but the
1536 // number of allocated strings is low. So we calculate the tenuredRate only
1537 // if the number of string allocations is enough.
1538 uint32_t zoneNurseryStrings =
1539 zone->nurseryAllocCount(JS::TraceKind::String);
1540 bool allocThreshold = zoneNurseryStrings > 30000;
1541 uint64_t zoneTenuredStrings =
1542 zone->stringStats.ref().liveNurseryStrings -
1543 zone->previousGCStringStats.ref().liveNurseryStrings;
1544 double tenuredRate =
1545 allocThreshold ? double(zoneTenuredStrings) / double(zoneNurseryStrings)
1546 : 0.0;
1547 bool disableNurseryStrings =
1548 pretenureStr && zone->allocNurseryStrings() &&
1549 tenuredRate > tunables().pretenureStringThreshold();
1550 bool disableNurseryBigInts = pretenureBigInt &&
1551 zone->allocNurseryBigInts() &&
1552 zone->tenuredBigInts >= 30 * 1000;
1553 if (disableNurseryStrings || disableNurseryBigInts) {
1554 if (disableNurseryStrings) {
1555 zone->nurseryStringsDisabled = true;
1557 if (disableNurseryBigInts) {
1558 zone->nurseryBigIntsDisabled = true;
1560 updateAllocFlagsForZone(zone);
1562 numStringsTenured += zoneTenuredStrings;
1563 numBigIntsTenured += zone->tenuredBigInts;
1564 zone->tenuredBigInts = 0;
1566 stats().setStat(gcstats::STAT_STRINGS_TENURED, numStringsTenured);
1567 stats().setStat(gcstats::STAT_BIGINTS_TENURED, numBigIntsTenured);
1569 return sitesPretenured;
1572 bool js::Nursery::registerMallocedBuffer(void* buffer, size_t nbytes) {
1573 MOZ_ASSERT(buffer);
1574 MOZ_ASSERT(nbytes > 0);
1575 if (!mallocedBuffers.putNew(buffer)) {
1576 return false;
1579 mallocedBufferBytes += nbytes;
1580 if (MOZ_UNLIKELY(mallocedBufferBytes > capacity() * 8)) {
1581 requestMinorGC(JS::GCReason::NURSERY_MALLOC_BUFFERS);
1584 return true;
1587 void js::Nursery::sweep() {
1588 // It's important that the context's GCUse is not Finalizing at this point,
1589 // otherwise we will miscount memory attached to nursery objects with
1590 // CellAllocPolicy.
1591 AutoSetThreadIsSweeping setThreadSweeping(runtime()->gcContext());
1593 MinorSweepingTracer trc(runtime());
1595 // Sweep unique IDs first before we sweep any tables that may be keyed based
1596 // on them.
1597 for (Cell* cell : cellsWithUid_) {
1598 auto* obj = static_cast<JSObject*>(cell);
1599 if (!IsForwarded(obj)) {
1600 gc::RemoveUniqueId(obj);
1601 } else {
1602 JSObject* dst = Forwarded(obj);
1603 gc::TransferUniqueId(dst, obj);
1606 cellsWithUid_.clear();
1608 for (ZonesIter zone(runtime(), SkipAtoms); !zone.done(); zone.next()) {
1609 zone->sweepAfterMinorGC(&trc);
1612 sweepMapAndSetObjects();
1614 runtime()->caches().sweepAfterMinorGC(&trc);
1617 void js::Nursery::clear() {
1618 // Poison the nursery contents so touching a freed object will crash.
1619 unsigned firstClearChunk;
1620 if (gc->hasZealMode(ZealMode::GenerationalGC)) {
1621 // Poison all the chunks used in this cycle. The new start chunk is
1622 // reposioned in Nursery::collect() but there's no point optimising that in
1623 // this case.
1624 firstClearChunk = currentStartChunk_;
1625 } else {
1626 // In normal mode we start at the second chunk, the first one will be used
1627 // in the next cycle and poisoned in Nusery::collect();
1628 MOZ_ASSERT(currentStartChunk_ == 0);
1629 firstClearChunk = 1;
1631 for (unsigned i = firstClearChunk; i < currentChunk_; ++i) {
1632 chunk(i).poisonAfterEvict();
1634 // Clear only the used part of the chunk because that's the part we touched,
1635 // but only if it's not going to be re-used immediately (>= firstClearChunk).
1636 if (currentChunk_ >= firstClearChunk) {
1637 chunk(currentChunk_)
1638 .poisonAfterEvict(position() - chunk(currentChunk_).start());
1641 // Reset the start chunk & position if we're not in this zeal mode, or we're
1642 // in it and close to the end of the nursery.
1643 MOZ_ASSERT(maxChunkCount() > 0);
1644 if (!gc->hasZealMode(ZealMode::GenerationalGC) ||
1645 (gc->hasZealMode(ZealMode::GenerationalGC) &&
1646 currentChunk_ + 1 == maxChunkCount())) {
1647 setCurrentChunk(0);
1650 // Set current start position for isEmpty checks.
1651 setStartPosition();
1654 size_t js::Nursery::spaceToEnd(unsigned chunkCount) const {
1655 if (chunkCount == 0) {
1656 return 0;
1659 unsigned lastChunk = chunkCount - 1;
1661 MOZ_ASSERT(lastChunk >= currentStartChunk_);
1662 MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <=
1663 NurseryChunkUsableSize);
1665 size_t bytes;
1667 if (chunkCount != 1) {
1668 // In the general case we have to add:
1669 // + the bytes used in the first
1670 // chunk which may be less than the total size of a chunk since in some
1671 // zeal modes we start the first chunk at some later position
1672 // (currentStartPosition_).
1673 // + the size of all the other chunks.
1674 bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
1675 ((lastChunk - currentStartChunk_) * ChunkSize);
1676 } else {
1677 // In sub-chunk mode, but it also works whenever chunkCount == 1, we need to
1678 // use currentEnd_ since it may not refer to a full chunk.
1679 bytes = currentEnd_ - currentStartPosition_;
1682 MOZ_ASSERT(bytes <= maxChunkCount() * ChunkSize);
1684 return bytes;
1687 MOZ_ALWAYS_INLINE void js::Nursery::setCurrentChunk(unsigned chunkno) {
1688 MOZ_ASSERT(chunkno < allocatedChunkCount());
1690 currentChunk_ = chunkno;
1691 position_ = chunk(chunkno).start();
1692 setCurrentEnd();
1695 void js::Nursery::poisonAndInitCurrentChunk(size_t extent) {
1696 if (gc->hasZealMode(ZealMode::GenerationalGC) || !isSubChunkMode()) {
1697 chunk(currentChunk_).poisonAndInit(runtime());
1698 } else {
1699 extent = std::min(capacity_, extent);
1700 chunk(currentChunk_).poisonAndInit(runtime(), extent);
1704 MOZ_ALWAYS_INLINE void js::Nursery::setCurrentEnd() {
1705 MOZ_ASSERT_IF(isSubChunkMode(),
1706 currentChunk_ == 0 && currentEnd_ <= chunk(0).end());
1707 currentEnd_ =
1708 uintptr_t(&chunk(currentChunk_)) + std::min(capacity_, ChunkSize);
1709 if (canAllocateStrings_) {
1710 currentStringEnd_ = currentEnd_;
1712 if (canAllocateBigInts_) {
1713 currentBigIntEnd_ = currentEnd_;
1717 bool js::Nursery::allocateNextChunk(const unsigned chunkno,
1718 AutoLockGCBgAlloc& lock) {
1719 const unsigned priorCount = allocatedChunkCount();
1720 const unsigned newCount = priorCount + 1;
1722 MOZ_ASSERT((chunkno == currentChunk_ + 1) ||
1723 (chunkno == 0 && allocatedChunkCount() == 0));
1724 MOZ_ASSERT(chunkno == allocatedChunkCount());
1725 MOZ_ASSERT(chunkno < HowMany(capacity(), ChunkSize));
1727 if (!chunks_.resize(newCount)) {
1728 return false;
1731 TenuredChunk* newChunk;
1732 newChunk = gc->getOrAllocChunk(lock);
1733 if (!newChunk) {
1734 chunks_.shrinkTo(priorCount);
1735 return false;
1738 chunks_[chunkno] = NurseryChunk::fromChunk(newChunk);
1739 return true;
1742 MOZ_ALWAYS_INLINE void js::Nursery::setStartPosition() {
1743 currentStartChunk_ = currentChunk_;
1744 currentStartPosition_ = position();
1747 void js::Nursery::maybeResizeNursery(JS::GCOptions options,
1748 JS::GCReason reason) {
1749 #ifdef JS_GC_ZEAL
1750 // This zeal mode disabled nursery resizing.
1751 if (gc->hasZealMode(ZealMode::GenerationalGC)) {
1752 return;
1754 #endif
1756 decommitTask->join();
1758 size_t newCapacity = mozilla::Clamp(targetSize(options, reason),
1759 tunables().gcMinNurseryBytes(),
1760 tunables().gcMaxNurseryBytes());
1762 MOZ_ASSERT(roundSize(newCapacity) == newCapacity);
1764 if (newCapacity > capacity()) {
1765 growAllocableSpace(newCapacity);
1766 } else if (newCapacity < capacity()) {
1767 shrinkAllocableSpace(newCapacity);
1770 AutoLockHelperThreadState lock;
1771 if (!decommitTask->isEmpty(lock)) {
1772 decommitTask->startOrRunIfIdle(lock);
1776 static inline bool ClampDouble(double* value, double min, double max) {
1777 MOZ_ASSERT(!std::isnan(*value) && !std::isnan(min) && !std::isnan(max));
1778 MOZ_ASSERT(max >= min);
1780 if (*value <= min) {
1781 *value = min;
1782 return true;
1785 if (*value >= max) {
1786 *value = max;
1787 return true;
1790 return false;
1793 size_t js::Nursery::targetSize(JS::GCOptions options, JS::GCReason reason) {
1794 // Shrink the nursery as much as possible if purging was requested or in low
1795 // memory situations.
1796 if (options == JS::GCOptions::Shrink || gc::IsOOMReason(reason) ||
1797 gc->systemHasLowMemory()) {
1798 clearRecentGrowthData();
1799 return 0;
1802 // Don't resize the nursery during shutdown.
1803 if (options == JS::GCOptions::Shutdown) {
1804 clearRecentGrowthData();
1805 return capacity();
1808 TimeStamp now = TimeStamp::Now();
1810 // If the nursery is completely unused then minimise it.
1811 if (hasRecentGrowthData && previousGC.nurseryUsedBytes == 0 &&
1812 now - lastCollectionEndTime() >
1813 tunables().nurseryTimeoutForIdleCollection() &&
1814 !js::SupportDifferentialTesting()) {
1815 clearRecentGrowthData();
1816 return 0;
1819 // Calculate the fraction of the nursery promoted out of its entire
1820 // capacity. This gives better results than using the promotion rate (based on
1821 // the amount of nursery used) in cases where we collect before the nursery is
1822 // full.
1823 double fractionPromoted =
1824 double(previousGC.tenuredBytes) / double(previousGC.nurseryCapacity);
1826 // Calculate the duty factor, the fraction of time spent collecting the
1827 // nursery.
1828 double dutyFactor = 0.0;
1829 TimeDuration collectorTime = now - collectionStartTime();
1830 if (hasRecentGrowthData && !js::SupportDifferentialTesting()) {
1831 TimeDuration totalTime = now - lastCollectionEndTime();
1832 dutyFactor = collectorTime.ToSeconds() / totalTime.ToSeconds();
1835 // Calculate a growth factor to try to achieve target promotion rate and duty
1836 // factor goals.
1837 static const double PromotionGoal = 0.02;
1838 static const double DutyFactorGoal = 0.01;
1839 double promotionGrowth = fractionPromoted / PromotionGoal;
1840 double dutyGrowth = dutyFactor / DutyFactorGoal;
1841 double growthFactor = std::max(promotionGrowth, dutyGrowth);
1843 // Decrease the growth factor to try to keep collections shorter than a target
1844 // maximum time. Don't do this during page load.
1845 static const double MaxTimeGoalMs = 4.0;
1846 if (!gc->isInPageLoad() && !js::SupportDifferentialTesting()) {
1847 double timeGrowth = MaxTimeGoalMs / collectorTime.ToMilliseconds();
1848 growthFactor = std::min(growthFactor, timeGrowth);
1851 // Limit the range of the growth factor to prevent transient high promotion
1852 // rates from affecting the nursery size too far into the future.
1853 static const double GrowthRange = 2.0;
1854 bool wasClamped = ClampDouble(&growthFactor, 1.0 / GrowthRange, GrowthRange);
1856 // Calculate the target size based on data from this collection.
1857 double target = double(capacity()) * growthFactor;
1859 // Use exponential smoothing on the target size to take into account data from
1860 // recent previous collections.
1861 if (hasRecentGrowthData &&
1862 now - lastCollectionEndTime() < TimeDuration::FromMilliseconds(200) &&
1863 !js::SupportDifferentialTesting()) {
1864 // Pay more attention to large changes.
1865 double fraction = wasClamped ? 0.5 : 0.25;
1866 smoothedTargetSize =
1867 (1 - fraction) * smoothedTargetSize + fraction * target;
1868 } else {
1869 smoothedTargetSize = target;
1871 hasRecentGrowthData = true;
1873 // Leave size untouched if we are close to the target.
1874 static const double GoalWidth = 1.5;
1875 growthFactor = smoothedTargetSize / double(capacity());
1876 if (growthFactor > (1.0 / GoalWidth) && growthFactor < GoalWidth) {
1877 return capacity();
1880 return roundSize(size_t(smoothedTargetSize));
1883 void js::Nursery::clearRecentGrowthData() {
1884 if (js::SupportDifferentialTesting()) {
1885 return;
1888 hasRecentGrowthData = false;
1889 smoothedTargetSize = 0.0;
1892 /* static */
1893 size_t js::Nursery::roundSize(size_t size) {
1894 size_t step = size >= ChunkSize ? ChunkSize : SystemPageSize();
1895 size = Round(size, step);
1897 MOZ_ASSERT(size >= SystemPageSize());
1899 return size;
1902 void js::Nursery::growAllocableSpace(size_t newCapacity) {
1903 MOZ_ASSERT_IF(!isSubChunkMode(), newCapacity > currentChunk_ * ChunkSize);
1904 MOZ_ASSERT(newCapacity <= tunables().gcMaxNurseryBytes());
1905 MOZ_ASSERT(newCapacity > capacity());
1907 if (!decommitTask->reserveSpaceForBytes(newCapacity)) {
1908 return;
1911 if (isSubChunkMode()) {
1912 MOZ_ASSERT(currentChunk_ == 0);
1914 // The remainder of the chunk may have been decommitted.
1915 if (!chunk(0).markPagesInUseHard(std::min(newCapacity, ChunkSize))) {
1916 // The OS won't give us the memory we need, we can't grow.
1917 return;
1920 // The capacity has changed and since we were in sub-chunk mode we need to
1921 // update the poison values / asan information for the now-valid region of
1922 // this chunk.
1923 size_t size = std::min(newCapacity, ChunkSize) - capacity();
1924 chunk(0).poisonRange(capacity(), size, JS_FRESH_NURSERY_PATTERN,
1925 MemCheckKind::MakeUndefined);
1928 capacity_ = newCapacity;
1930 setCurrentEnd();
1933 void js::Nursery::freeChunksFrom(const unsigned firstFreeChunk) {
1934 MOZ_ASSERT(firstFreeChunk < chunks_.length());
1936 // The loop below may need to skip the first chunk, so we may use this so we
1937 // can modify it.
1938 unsigned firstChunkToDecommit = firstFreeChunk;
1940 if ((firstChunkToDecommit == 0) && isSubChunkMode()) {
1941 // Part of the first chunk may be hard-decommitted, un-decommit it so that
1942 // the GC's normal chunk-handling doesn't segfault.
1943 MOZ_ASSERT(currentChunk_ == 0);
1944 if (!chunk(0).markPagesInUseHard(ChunkSize)) {
1945 // Free the chunk if we can't allocate its pages.
1946 UnmapPages(static_cast<void*>(&chunk(0)), ChunkSize);
1947 firstChunkToDecommit = 1;
1952 AutoLockHelperThreadState lock;
1953 for (size_t i = firstChunkToDecommit; i < chunks_.length(); i++) {
1954 decommitTask->queueChunk(chunks_[i], lock);
1958 chunks_.shrinkTo(firstFreeChunk);
1961 void js::Nursery::shrinkAllocableSpace(size_t newCapacity) {
1962 #ifdef JS_GC_ZEAL
1963 if (gc->hasZealMode(ZealMode::GenerationalGC)) {
1964 return;
1966 #endif
1968 // Don't shrink the nursery to zero (use Nursery::disable() instead)
1969 // This can't happen due to the rounding-down performed above because of the
1970 // clamping in maybeResizeNursery().
1971 MOZ_ASSERT(newCapacity != 0);
1972 // Don't attempt to shrink it to the same size.
1973 if (newCapacity == capacity_) {
1974 return;
1976 MOZ_ASSERT(newCapacity < capacity_);
1978 unsigned newCount = HowMany(newCapacity, ChunkSize);
1979 if (newCount < allocatedChunkCount()) {
1980 freeChunksFrom(newCount);
1983 size_t oldCapacity = capacity_;
1984 capacity_ = newCapacity;
1986 setCurrentEnd();
1988 if (isSubChunkMode()) {
1989 MOZ_ASSERT(currentChunk_ == 0);
1990 size_t size = std::min(oldCapacity, ChunkSize) - newCapacity;
1991 chunk(0).poisonRange(newCapacity, size, JS_SWEPT_NURSERY_PATTERN,
1992 MemCheckKind::MakeNoAccess);
1994 AutoLockHelperThreadState lock;
1995 decommitTask->queueRange(capacity_, chunk(0), lock);
1999 uintptr_t js::Nursery::currentEnd() const {
2000 // These are separate asserts because it can be useful to see which one
2001 // failed.
2002 MOZ_ASSERT_IF(isSubChunkMode(), currentChunk_ == 0);
2003 MOZ_ASSERT_IF(isSubChunkMode(), currentEnd_ <= chunk(currentChunk_).end());
2004 MOZ_ASSERT_IF(!isSubChunkMode(), currentEnd_ == chunk(currentChunk_).end());
2005 MOZ_ASSERT(currentEnd_ != chunk(currentChunk_).start());
2006 return currentEnd_;
2009 gcstats::Statistics& js::Nursery::stats() const { return gc->stats(); }
2011 MOZ_ALWAYS_INLINE const js::gc::GCSchedulingTunables& js::Nursery::tunables()
2012 const {
2013 return gc->tunables;
2016 bool js::Nursery::isSubChunkMode() const {
2017 return capacity() <= NurseryChunkUsableSize;
2020 void js::Nursery::sweepMapAndSetObjects() {
2021 auto gcx = runtime()->gcContext();
2023 for (auto mapobj : mapsWithNurseryMemory_) {
2024 MapObject::sweepAfterMinorGC(gcx, mapobj);
2026 mapsWithNurseryMemory_.clearAndFree();
2028 for (auto setobj : setsWithNurseryMemory_) {
2029 SetObject::sweepAfterMinorGC(gcx, setobj);
2031 setsWithNurseryMemory_.clearAndFree();
2034 void js::Nursery::joinDecommitTask() { decommitTask->join(); }
2036 JS_PUBLIC_API void JS::EnableNurseryStrings(JSContext* cx) {
2037 AutoEmptyNursery empty(cx);
2038 ReleaseAllJITCode(cx->gcContext());
2039 cx->runtime()->gc.nursery().enableStrings();
2042 JS_PUBLIC_API void JS::DisableNurseryStrings(JSContext* cx) {
2043 AutoEmptyNursery empty(cx);
2044 ReleaseAllJITCode(cx->gcContext());
2045 cx->runtime()->gc.nursery().disableStrings();
2048 JS_PUBLIC_API void JS::EnableNurseryBigInts(JSContext* cx) {
2049 AutoEmptyNursery empty(cx);
2050 ReleaseAllJITCode(cx->gcContext());
2051 cx->runtime()->gc.nursery().enableBigInts();
2054 JS_PUBLIC_API void JS::DisableNurseryBigInts(JSContext* cx) {
2055 AutoEmptyNursery empty(cx);
2056 ReleaseAllJITCode(cx->gcContext());
2057 cx->runtime()->gc.nursery().disableBigInts();