Bug 1874684 - Part 17: Fix uninitialised variable warnings from clang-tidy. r=allstarschh
[gecko.git] / memory / build / PHC.cpp
blob88c95743eb6bccc760beade0f90fc62b0aa4e900
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 // PHC is a probabilistic heap checker. A tiny fraction of randomly chosen heap
8 // allocations are subject to some expensive checking via the use of OS page
9 // access protection. A failed check triggers a crash, whereupon useful
10 // information about the failure is put into the crash report. The cost and
11 // coverage for each user is minimal, but spread over the entire user base the
12 // coverage becomes significant.
14 // The idea comes from Chromium, where it is called GWP-ASAN. (Firefox uses PHC
15 // as the name because GWP-ASAN is long, awkward, and doesn't have any
16 // particular meaning.)
18 // In the current implementation up to 64 allocations per process can become
19 // PHC allocations. These allocations must be page-sized or smaller. Each PHC
20 // allocation gets its own page, and when the allocation is freed its page is
21 // marked inaccessible until the page is reused for another allocation. This
22 // means that a use-after-free defect (which includes double-frees) will be
23 // caught if the use occurs before the page is reused for another allocation.
24 // The crash report will contain stack traces for the allocation site, the free
25 // site, and the use-after-free site, which is often enough to diagnose the
26 // defect.
28 // Also, each PHC allocation is followed by a guard page. The PHC allocation is
29 // positioned so that its end abuts the guard page (or as close as possible,
30 // given alignment constraints). This means that a bounds violation at the end
31 // of the allocation (overflow) will be caught. The crash report will contain
32 // stack traces for the allocation site and the bounds violation use site,
33 // which is often enough to diagnose the defect.
35 // (A bounds violation at the start of the allocation (underflow) will not be
36 // caught, unless it is sufficiently large to hit the preceding allocation's
37 // guard page, which is not that likely. It would be possible to look more
38 // assiduously for underflow by randomly placing some allocations at the end of
39 // the page and some at the start of the page, and GWP-ASAN does this. PHC does
40 // not, however, because overflow is likely to be much more common than
41 // underflow in practice.)
43 // We use a simple heuristic to categorize a guard page access as overflow or
44 // underflow: if the address falls in the lower half of the guard page, we
45 // assume it is overflow, otherwise we assume it is underflow. More
46 // sophisticated heuristics are possible, but this one is very simple, and it is
47 // likely that most overflows/underflows in practice are very close to the page
48 // boundary.
50 // The design space for the randomization strategy is large. The current
51 // implementation has a large random delay before it starts operating, and a
52 // small random delay between each PHC allocation attempt. Each freed PHC
53 // allocation is quarantined for a medium random delay before being reused, in
54 // order to increase the chance of catching UAFs.
56 // The basic cost of PHC's operation is as follows.
58 // - The physical memory cost is 64 pages plus some metadata (including stack
59 // traces) for each page. This amounts to 256 KiB per process on
60 // architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
61 // 16 KiB pages.
63 // - The virtual memory cost is the physical memory cost plus the guard pages:
64 // another 64 pages. This amounts to another 256 KiB per process on
65 // architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
66 // 16 KiB pages. PHC is currently only enabled on 64-bit platforms so the
67 // impact of the virtual memory usage is negligible.
69 // - Every allocation requires a size check and a decrement-and-check of an
70 // atomic counter. When the counter reaches zero a PHC allocation can occur,
71 // which involves marking a page as accessible and getting a stack trace for
72 // the allocation site. Otherwise, mozjemalloc performs the allocation.
74 // - Every deallocation requires a range check on the pointer to see if it
75 // involves a PHC allocation. (The choice to only do PHC allocations that are
76 // a page or smaller enables this range check, because the 64 pages are
77 // contiguous. Allowing larger allocations would make this more complicated,
78 // and we definitely don't want something as slow as a hash table lookup on
79 // every deallocation.) PHC deallocations involve marking a page as
80 // inaccessible and getting a stack trace for the deallocation site.
82 // Note that calls to realloc(), free(), and malloc_usable_size() will
83 // immediately crash if the given pointer falls within a page allocation's
84 // page, but does not point to the start of the allocation itself.
86 // void* p = malloc(64);
87 // free(p + 1); // p+1 doesn't point to the allocation start; crash
89 // Such crashes will not have the PHC fields in the crash report.
91 // PHC-specific tests can be run with the following commands:
92 // - gtests: `./mach gtest '*PHC*'`
93 // - xpcshell-tests: `./mach test toolkit/crashreporter/test/unit`
94 // - This runs some non-PHC tests as well.
96 #include "PHC.h"
98 #include <stdlib.h>
99 #include <time.h>
101 #include <algorithm>
103 #ifdef XP_WIN
104 # include <process.h>
105 #else
106 # include <sys/mman.h>
107 # include <sys/types.h>
108 # include <pthread.h>
109 # include <unistd.h>
110 #endif
112 #include "mozjemalloc.h"
114 #include "mozjemalloc.h"
115 #include "FdPrintf.h"
116 #include "Mutex.h"
117 #include "mozilla/Assertions.h"
118 #include "mozilla/Atomics.h"
119 #include "mozilla/Attributes.h"
120 #include "mozilla/CheckedInt.h"
121 #include "mozilla/Maybe.h"
122 #include "mozilla/StackWalk.h"
123 #include "mozilla/ThreadLocal.h"
124 #include "mozilla/XorShift128PlusRNG.h"
126 using namespace mozilla;
128 //---------------------------------------------------------------------------
129 // Utilities
130 //---------------------------------------------------------------------------
132 #ifdef ANDROID
133 // Android doesn't have pthread_atfork defined in pthread.h.
134 extern "C" MOZ_EXPORT int pthread_atfork(void (*)(void), void (*)(void),
135 void (*)(void));
136 #endif
138 #ifndef DISALLOW_COPY_AND_ASSIGN
139 # define DISALLOW_COPY_AND_ASSIGN(T) \
140 T(const T&); \
141 void operator=(const T&)
142 #endif
144 // This class provides infallible operations for the small number of heap
145 // allocations that PHC does for itself. It would be nice if we could use the
146 // InfallibleAllocPolicy from mozalloc, but PHC cannot use mozalloc.
147 class InfallibleAllocPolicy {
148 public:
149 static void AbortOnFailure(const void* aP) {
150 if (!aP) {
151 MOZ_CRASH("PHC failed to allocate");
155 template <class T>
156 static T* new_() {
157 void* p = MozJemalloc::malloc(sizeof(T));
158 AbortOnFailure(p);
159 return new (p) T;
163 //---------------------------------------------------------------------------
164 // Stack traces
165 //---------------------------------------------------------------------------
167 // This code is similar to the equivalent code within DMD.
169 class StackTrace : public phc::StackTrace {
170 public:
171 StackTrace() = default;
173 void Clear() { mLength = 0; }
175 void Fill();
177 private:
178 static void StackWalkCallback(uint32_t aFrameNumber, void* aPc, void* aSp,
179 void* aClosure) {
180 StackTrace* st = (StackTrace*)aClosure;
181 MOZ_ASSERT(st->mLength < kMaxFrames);
182 st->mPcs[st->mLength] = aPc;
183 st->mLength++;
184 MOZ_ASSERT(st->mLength == aFrameNumber);
188 // WARNING WARNING WARNING: this function must only be called when GMut::sMutex
189 // is *not* locked, otherwise we might get deadlocks.
191 // How? On Windows, MozStackWalk() can lock a mutex, M, from the shared library
192 // loader. Another thread might call malloc() while holding M locked (when
193 // loading a shared library) and try to lock GMut::sMutex, causing a deadlock.
194 // So GMut::sMutex can't be locked during the call to MozStackWalk(). (For
195 // details, see https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8. On
196 // Linux, something similar can happen; see bug 824340. So we just disallow it
197 // on all platforms.)
199 // In DMD, to avoid this problem we temporarily unlock the equivalent mutex for
200 // the MozStackWalk() call. But that's grotty, and things are a bit different
201 // here, so we just require that stack traces be obtained before locking
202 // GMut::sMutex.
204 // Unfortunately, there is no reliable way at compile-time or run-time to ensure
205 // this pre-condition. Hence this large comment.
207 void StackTrace::Fill() {
208 mLength = 0;
210 // These ifdefs should be kept in sync with the conditions in
211 // phc_implies_frame_pointers in build/moz.configure/memory.configure
212 #if defined(XP_WIN) && defined(_M_IX86)
213 // This avoids MozStackWalk(), which causes unusably slow startup on Win32
214 // when it is called during static initialization (see bug 1241684).
216 // This code is cribbed from the Gecko Profiler, which also uses
217 // FramePointerStackWalk() on Win32: Registers::SyncPopulate() for the
218 // frame pointer, and GetStackTop() for the stack end.
219 CONTEXT context;
220 RtlCaptureContext(&context);
221 void** fp = reinterpret_cast<void**>(context.Ebp);
223 PNT_TIB pTib = reinterpret_cast<PNT_TIB>(NtCurrentTeb());
224 void* stackEnd = static_cast<void*>(pTib->StackBase);
225 FramePointerStackWalk(StackWalkCallback, kMaxFrames, this, fp, stackEnd);
226 #elif defined(XP_DARWIN)
227 // This avoids MozStackWalk(), which has become unusably slow on Mac due to
228 // changes in libunwind.
230 // This code is cribbed from the Gecko Profiler, which also uses
231 // FramePointerStackWalk() on Mac: Registers::SyncPopulate() for the frame
232 // pointer, and GetStackTop() for the stack end.
233 # pragma GCC diagnostic push
234 # pragma GCC diagnostic ignored "-Wframe-address"
235 void** fp = reinterpret_cast<void**>(__builtin_frame_address(1));
236 # pragma GCC diagnostic pop
237 void* stackEnd = pthread_get_stackaddr_np(pthread_self());
238 FramePointerStackWalk(StackWalkCallback, kMaxFrames, this, fp, stackEnd);
239 #else
240 MozStackWalk(StackWalkCallback, nullptr, kMaxFrames, this);
241 #endif
244 //---------------------------------------------------------------------------
245 // Logging
246 //---------------------------------------------------------------------------
248 // Change this to 1 to enable some PHC logging. Useful for debugging.
249 #define PHC_LOGGING 0
251 #if PHC_LOGGING
253 static size_t GetPid() { return size_t(getpid()); }
255 static size_t GetTid() {
256 # if defined(XP_WIN)
257 return size_t(GetCurrentThreadId());
258 # else
259 return size_t(pthread_self());
260 # endif
263 # if defined(XP_WIN)
264 # define LOG_STDERR \
265 reinterpret_cast<intptr_t>(GetStdHandle(STD_ERROR_HANDLE))
266 # else
267 # define LOG_STDERR 2
268 # endif
269 # define LOG(fmt, ...) \
270 FdPrintf(LOG_STDERR, "PHC[%zu,%zu,~%zu] " fmt, GetPid(), GetTid(), \
271 size_t(GAtomic::Now()), ##__VA_ARGS__)
273 #else
275 # define LOG(fmt, ...)
277 #endif // PHC_LOGGING
279 //---------------------------------------------------------------------------
280 // Global state
281 //---------------------------------------------------------------------------
283 // Throughout this entire file time is measured as the number of sub-page
284 // allocations performed (by PHC and mozjemalloc combined). `Time` is 64-bit
285 // because we could have more than 2**32 allocations in a long-running session.
286 // `Delay` is 32-bit because the delays used within PHC are always much smaller
287 // than 2**32.
288 using Time = uint64_t; // A moment in time.
289 using Delay = uint32_t; // A time duration.
291 // PHC only runs if the page size is 4 KiB; anything more is uncommon and would
292 // use too much memory. So we hardwire this size for all platforms but macOS
293 // on ARM processors. For the latter we make an exception because the minimum
294 // page size supported is 16KiB so there's no way to go below that.
295 static const size_t kPageSize =
296 #if defined(XP_DARWIN) && defined(__aarch64__)
297 16384
298 #else
299 4096
300 #endif
303 // We align the PHC area to a multiple of the jemalloc and JS GC chunk size
304 // (both use 1MB aligned chunks) so that their address computations don't lead
305 // from non-PHC memory into PHC memory causing misleading PHC stacks to be
306 // attached to a crash report.
307 static const size_t kPhcAlign = 1024 * 1024;
309 static_assert(IsPowerOfTwo(kPhcAlign));
310 static_assert((kPhcAlign % kPageSize) == 0);
312 // There are two kinds of page.
313 // - Allocation pages, from which allocations are made.
314 // - Guard pages, which are never touched by PHC.
316 // These page kinds are interleaved; each allocation page has a guard page on
317 // either side.
318 #ifdef EARLY_BETA_OR_EARLIER
319 static const size_t kNumAllocPages = kPageSize == 4096 ? 4096 : 1024;
320 #else
321 // This will use between 82KiB and 1.1MiB per process (depending on how many
322 // objects are currently allocated). We will tune this in the future.
323 static const size_t kNumAllocPages = kPageSize == 4096 ? 256 : 64;
324 #endif
325 static const size_t kNumAllPages = kNumAllocPages * 2 + 1;
327 // The total size of the allocation pages and guard pages.
328 static const size_t kAllPagesSize = kNumAllPages * kPageSize;
330 // jemalloc adds a guard page to the end of our allocation, see the comment in
331 // AllocAllPages() for more information.
332 static const size_t kAllPagesJemallocSize = kAllPagesSize - kPageSize;
334 // The default state for PHC. Either Enabled or OnlyFree.
335 #define DEFAULT_STATE mozilla::phc::OnlyFree
337 // The maximum time.
338 static const Time kMaxTime = ~(Time(0));
340 // Truncate aRnd to the range (1 .. aAvgDelay*2). If aRnd is random, this
341 // results in an average value of aAvgDelay + 0.5, which is close enough to
342 // aAvgDelay. aAvgDelay must be a power-of-two for speed.
343 constexpr Delay Rnd64ToDelay(Delay aAvgDelay, uint64_t aRnd) {
344 MOZ_ASSERT(IsPowerOfTwo(aAvgDelay), "must be a power of two");
346 return (aRnd & (uint64_t(aAvgDelay) * 2 - 1)) + 1;
349 static Delay CheckProbability(int64_t aProb) {
350 // Limit delays calculated from prefs to 0x80000000, this is the largest
351 // power-of-two that fits in a Delay since it is a uint32_t.
352 // The minimum is 2 that way not every allocation goes straight to PHC.
353 return RoundUpPow2(
354 std::min(std::max(aProb, int64_t(2)), int64_t(0x80000000)));
357 // Maps a pointer to a PHC-specific structure:
358 // - Nothing
359 // - A guard page (it is unspecified which one)
360 // - An allocation page (with an index < kNumAllocPages)
362 // The standard way of handling a PtrKind is to check IsNothing(), and if that
363 // fails, to check IsGuardPage(), and if that fails, to call AllocPage().
364 class PtrKind {
365 private:
366 enum class Tag : uint8_t {
367 Nothing,
368 GuardPage,
369 AllocPage,
372 Tag mTag;
373 uintptr_t mIndex; // Only used if mTag == Tag::AllocPage.
375 public:
376 // Detect what a pointer points to. This constructor must be fast because it
377 // is called for every call to free(), realloc(), malloc_usable_size(), and
378 // jemalloc_ptr_info().
379 PtrKind(const void* aPtr, const uint8_t* aPagesStart,
380 const uint8_t* aPagesLimit) {
381 if (!(aPagesStart <= aPtr && aPtr < aPagesLimit)) {
382 mTag = Tag::Nothing;
383 } else {
384 uintptr_t offset = static_cast<const uint8_t*>(aPtr) - aPagesStart;
385 uintptr_t allPageIndex = offset / kPageSize;
386 MOZ_ASSERT(allPageIndex < kNumAllPages);
387 if (allPageIndex & 1) {
388 // Odd-indexed pages are allocation pages.
389 uintptr_t allocPageIndex = allPageIndex / 2;
390 MOZ_ASSERT(allocPageIndex < kNumAllocPages);
391 mTag = Tag::AllocPage;
392 mIndex = allocPageIndex;
393 } else {
394 // Even-numbered pages are guard pages.
395 mTag = Tag::GuardPage;
400 bool IsNothing() const { return mTag == Tag::Nothing; }
401 bool IsGuardPage() const { return mTag == Tag::GuardPage; }
403 // This should only be called after IsNothing() and IsGuardPage() have been
404 // checked and failed.
405 uintptr_t AllocPageIndex() const {
406 MOZ_RELEASE_ASSERT(mTag == Tag::AllocPage);
407 return mIndex;
411 // Shared, atomic, mutable global state.
412 class GAtomic {
413 public:
414 static void Init(Delay aFirstDelay) {
415 sAllocDelay = aFirstDelay;
417 LOG("Initial sAllocDelay <- %zu\n", size_t(aFirstDelay));
420 static Time Now() { return sNow; }
422 static void IncrementNow() { sNow++; }
424 // Decrements the delay and returns the decremented value.
425 static int32_t DecrementDelay() { return --sAllocDelay; }
427 static void SetAllocDelay(Delay aAllocDelay) { sAllocDelay = aAllocDelay; }
429 static bool AllocDelayHasWrapped(Delay aAvgAllocDelay,
430 Delay aAvgFirstAllocDelay) {
431 // Delay is unsigned so we can't test for less that zero. Instead test if
432 // it has wrapped around by comparing with the maximum value we ever use.
433 return sAllocDelay > 2 * std::max(aAvgAllocDelay, aAvgFirstAllocDelay);
436 private:
437 // The current time. Relaxed semantics because it's primarily used for
438 // determining if an allocation can be recycled yet and therefore it doesn't
439 // need to be exact.
440 static Atomic<Time, Relaxed> sNow;
442 // Delay until the next attempt at a page allocation. See the comment in
443 // MaybePageAlloc() for an explanation of why it uses ReleaseAcquire
444 // semantics.
445 static Atomic<Delay, ReleaseAcquire> sAllocDelay;
448 Atomic<Time, Relaxed> GAtomic::sNow;
449 Atomic<Delay, ReleaseAcquire> GAtomic::sAllocDelay;
451 // Shared, immutable global state. Initialized by replace_init() and never
452 // changed after that. replace_init() runs early enough that no synchronization
453 // is needed.
454 class GConst {
455 private:
456 // The bounds of the allocated pages.
457 uint8_t* const mPagesStart;
458 uint8_t* const mPagesLimit;
460 // Allocates the allocation pages and the guard pages, contiguously.
461 uint8_t* AllocAllPages() {
462 // The memory allocated here is never freed, because it would happen at
463 // process termination when it would be of little use.
465 // We can rely on jemalloc's behaviour that when it allocates memory aligned
466 // with its own chunk size it will over-allocate and guarantee that the
467 // memory after the end of our allocation, but before the next chunk, is
468 // decommitted and inaccessible. Elsewhere in PHC we assume that we own
469 // that page (so that memory errors in it get caught by PHC) but here we
470 // use kAllPagesJemallocSize which subtracts jemalloc's guard page.
471 void* pages = MozJemalloc::memalign(kPhcAlign, kAllPagesJemallocSize);
472 if (!pages) {
473 MOZ_CRASH();
476 // Make the pages inaccessible.
477 #ifdef XP_WIN
478 if (!VirtualFree(pages, kAllPagesJemallocSize, MEM_DECOMMIT)) {
479 MOZ_CRASH("VirtualFree failed");
481 #else
482 if (mmap(pages, kAllPagesJemallocSize, PROT_NONE,
483 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == MAP_FAILED) {
484 MOZ_CRASH("mmap failed");
486 #endif
488 return static_cast<uint8_t*>(pages);
491 public:
492 GConst()
493 : mPagesStart(AllocAllPages()), mPagesLimit(mPagesStart + kAllPagesSize) {
494 LOG("AllocAllPages at %p..%p\n", mPagesStart, mPagesLimit);
497 class PtrKind PtrKind(const void* aPtr) {
498 class PtrKind pk(aPtr, mPagesStart, mPagesLimit);
499 return pk;
502 bool IsInFirstGuardPage(const void* aPtr) {
503 return mPagesStart <= aPtr && aPtr < mPagesStart + kPageSize;
506 // Get the address of the allocation page referred to via an index. Used when
507 // marking the page as accessible/inaccessible.
508 uint8_t* AllocPagePtr(uintptr_t aIndex) {
509 MOZ_ASSERT(aIndex < kNumAllocPages);
510 // Multiply by two and add one to account for allocation pages *and* guard
511 // pages.
512 return mPagesStart + (2 * aIndex + 1) * kPageSize;
516 static GConst* gConst;
518 // This type is used as a proof-of-lock token, to make it clear which functions
519 // require sMutex to be locked.
520 using GMutLock = const MutexAutoLock&;
522 // Shared, mutable global state. Protected by sMutex; all accessing functions
523 // take a GMutLock as proof that sMutex is held.
524 class GMut {
525 enum class AllocPageState {
526 NeverAllocated = 0,
527 InUse = 1,
528 Freed = 2,
531 // Metadata for each allocation page.
532 class AllocPageInfo {
533 public:
534 AllocPageInfo()
535 : mState(AllocPageState::NeverAllocated),
536 mBaseAddr(nullptr),
537 mReuseTime(0) {}
539 // The current allocation page state.
540 AllocPageState mState;
542 // The arena that the allocation is nominally from. This isn't meaningful
543 // within PHC, which has no arenas. But it is necessary for reallocation of
544 // page allocations as normal allocations, such as in this code:
546 // p = moz_arena_malloc(arenaId, 4096);
547 // realloc(p, 8192);
549 // The realloc is more than one page, and thus too large for PHC to handle.
550 // Therefore, if PHC handles the first allocation, it must ask mozjemalloc
551 // to allocate the 8192 bytes in the correct arena, and to do that, it must
552 // call MozJemalloc::moz_arena_malloc with the correct arenaId under the
553 // covers. Therefore it must record that arenaId.
555 // This field is also needed for jemalloc_ptr_info() to work, because it
556 // also returns the arena ID (but only in debug builds).
558 // - NeverAllocated: must be 0.
559 // - InUse | Freed: can be any valid arena ID value.
560 Maybe<arena_id_t> mArenaId;
562 // The starting address of the allocation. Will not be the same as the page
563 // address unless the allocation is a full page.
564 // - NeverAllocated: must be 0.
565 // - InUse | Freed: must be within the allocation page.
566 uint8_t* mBaseAddr;
568 // Usable size is computed as the number of bytes between the pointer and
569 // the end of the allocation page. This might be bigger than the requested
570 // size, especially if an outsized alignment is requested.
571 size_t UsableSize() const {
572 return mState == AllocPageState::NeverAllocated
574 : kPageSize - (reinterpret_cast<uintptr_t>(mBaseAddr) &
575 (kPageSize - 1));
578 // The internal fragmentation for this allocation.
579 size_t FragmentationBytes() const {
580 MOZ_ASSERT(kPageSize >= UsableSize());
581 return mState == AllocPageState::InUse ? kPageSize - UsableSize() : 0;
584 // The allocation stack.
585 // - NeverAllocated: Nothing.
586 // - InUse | Freed: Some.
587 Maybe<StackTrace> mAllocStack;
589 // The free stack.
590 // - NeverAllocated | InUse: Nothing.
591 // - Freed: Some.
592 Maybe<StackTrace> mFreeStack;
594 // The time at which the page is available for reuse, as measured against
595 // GAtomic::sNow. When the page is in use this value will be kMaxTime.
596 // - NeverAllocated: must be 0.
597 // - InUse: must be kMaxTime.
598 // - Freed: must be > 0 and < kMaxTime.
599 Time mReuseTime;
602 public:
603 // The mutex that protects the other members.
604 static Mutex sMutex MOZ_UNANNOTATED;
606 // The RNG seeds here are poor, but non-reentrant since this can be called
607 // from malloc(). SetState() will reset the RNG later.
608 GMut() : mRNG(RandomSeed<1>(), RandomSeed<2>()) { sMutex.Init(); }
610 uint64_t Random64(GMutLock) { return mRNG.next(); }
612 bool IsPageInUse(GMutLock, uintptr_t aIndex) {
613 return mAllocPages[aIndex].mState == AllocPageState::InUse;
616 // Is the page free? And if so, has enough time passed that we can use it?
617 bool IsPageAllocatable(GMutLock, uintptr_t aIndex, Time aNow) {
618 const AllocPageInfo& page = mAllocPages[aIndex];
619 return page.mState != AllocPageState::InUse && aNow >= page.mReuseTime;
622 // Get the address of the allocation page referred to via an index. Used
623 // when checking pointers against page boundaries.
624 uint8_t* AllocPageBaseAddr(GMutLock, uintptr_t aIndex) {
625 return mAllocPages[aIndex].mBaseAddr;
628 Maybe<arena_id_t> PageArena(GMutLock aLock, uintptr_t aIndex) {
629 const AllocPageInfo& page = mAllocPages[aIndex];
630 AssertAllocPageInUse(aLock, page);
632 return page.mArenaId;
635 size_t PageUsableSize(GMutLock aLock, uintptr_t aIndex) {
636 const AllocPageInfo& page = mAllocPages[aIndex];
637 AssertAllocPageInUse(aLock, page);
639 return page.UsableSize();
642 // The total fragmentation in PHC
643 size_t FragmentationBytes() const {
644 size_t sum = 0;
645 for (const auto& page : mAllocPages) {
646 sum += page.FragmentationBytes();
648 return sum;
651 void SetPageInUse(GMutLock aLock, uintptr_t aIndex,
652 const Maybe<arena_id_t>& aArenaId, uint8_t* aBaseAddr,
653 const StackTrace& aAllocStack) {
654 AllocPageInfo& page = mAllocPages[aIndex];
655 AssertAllocPageNotInUse(aLock, page);
657 page.mState = AllocPageState::InUse;
658 page.mArenaId = aArenaId;
659 page.mBaseAddr = aBaseAddr;
660 page.mAllocStack = Some(aAllocStack);
661 page.mFreeStack = Nothing();
662 page.mReuseTime = kMaxTime;
665 #if PHC_LOGGING
666 Time GetFreeTime(uintptr_t aIndex) const { return mFreeTime[aIndex]; }
667 #endif
669 void ResizePageInUse(GMutLock aLock, uintptr_t aIndex,
670 const Maybe<arena_id_t>& aArenaId, uint8_t* aNewBaseAddr,
671 const StackTrace& aAllocStack) {
672 AllocPageInfo& page = mAllocPages[aIndex];
673 AssertAllocPageInUse(aLock, page);
675 // page.mState is not changed.
676 if (aArenaId.isSome()) {
677 // Crash if the arenas don't match.
678 MOZ_RELEASE_ASSERT(page.mArenaId == aArenaId);
680 page.mBaseAddr = aNewBaseAddr;
681 // We could just keep the original alloc stack, but the realloc stack is
682 // more recent and therefore seems more useful.
683 page.mAllocStack = Some(aAllocStack);
684 // page.mFreeStack is not changed.
685 // page.mReuseTime is not changed.
688 void SetPageFreed(GMutLock aLock, uintptr_t aIndex,
689 const Maybe<arena_id_t>& aArenaId,
690 const StackTrace& aFreeStack, Delay aReuseDelay) {
691 AllocPageInfo& page = mAllocPages[aIndex];
692 AssertAllocPageInUse(aLock, page);
694 page.mState = AllocPageState::Freed;
696 // page.mArenaId is left unchanged, for jemalloc_ptr_info() calls that
697 // occur after freeing (e.g. in the PtrInfo test in TestJemalloc.cpp).
698 if (aArenaId.isSome()) {
699 // Crash if the arenas don't match.
700 MOZ_RELEASE_ASSERT(page.mArenaId == aArenaId);
703 // page.musableSize is left unchanged, for reporting on UAF, and for
704 // jemalloc_ptr_info() calls that occur after freeing (e.g. in the PtrInfo
705 // test in TestJemalloc.cpp).
707 // page.mAllocStack is left unchanged, for reporting on UAF.
709 page.mFreeStack = Some(aFreeStack);
710 Time now = GAtomic::Now();
711 #if PHC_LOGGING
712 mFreeTime[aIndex] = now;
713 #endif
714 page.mReuseTime = now + aReuseDelay;
717 static void CrashOnGuardPage(void* aPtr) {
718 // An operation on a guard page? This is a bounds violation. Deliberately
719 // touch the page in question to cause a crash that triggers the usual PHC
720 // machinery.
721 LOG("CrashOnGuardPage(%p), bounds violation\n", aPtr);
722 *static_cast<uint8_t*>(aPtr) = 0;
723 MOZ_CRASH("unreachable");
726 void EnsureValidAndInUse(GMutLock, void* aPtr, uintptr_t aIndex)
727 MOZ_REQUIRES(sMutex) {
728 const AllocPageInfo& page = mAllocPages[aIndex];
730 // The pointer must point to the start of the allocation.
731 MOZ_RELEASE_ASSERT(page.mBaseAddr == aPtr);
733 if (page.mState == AllocPageState::Freed) {
734 LOG("EnsureValidAndInUse(%p), use-after-free\n", aPtr);
735 // An operation on a freed page? This is a particular kind of
736 // use-after-free. Deliberately touch the page in question, in order to
737 // cause a crash that triggers the usual PHC machinery. But unlock sMutex
738 // first, because that self-same PHC machinery needs to re-lock it, and
739 // the crash causes non-local control flow so sMutex won't be unlocked
740 // the normal way in the caller.
741 sMutex.Unlock();
742 *static_cast<uint8_t*>(aPtr) = 0;
743 MOZ_CRASH("unreachable");
747 // This expects GMUt::sMutex to be locked but can't check it with a parameter
748 // since we try-lock it.
749 void FillAddrInfo(uintptr_t aIndex, const void* aBaseAddr, bool isGuardPage,
750 phc::AddrInfo& aOut) {
751 const AllocPageInfo& page = mAllocPages[aIndex];
752 if (isGuardPage) {
753 aOut.mKind = phc::AddrInfo::Kind::GuardPage;
754 } else {
755 switch (page.mState) {
756 case AllocPageState::NeverAllocated:
757 aOut.mKind = phc::AddrInfo::Kind::NeverAllocatedPage;
758 break;
760 case AllocPageState::InUse:
761 aOut.mKind = phc::AddrInfo::Kind::InUsePage;
762 break;
764 case AllocPageState::Freed:
765 aOut.mKind = phc::AddrInfo::Kind::FreedPage;
766 break;
768 default:
769 MOZ_CRASH();
772 aOut.mBaseAddr = page.mBaseAddr;
773 aOut.mUsableSize = page.UsableSize();
774 aOut.mAllocStack = page.mAllocStack;
775 aOut.mFreeStack = page.mFreeStack;
778 void FillJemallocPtrInfo(GMutLock, const void* aPtr, uintptr_t aIndex,
779 jemalloc_ptr_info_t* aInfo) {
780 const AllocPageInfo& page = mAllocPages[aIndex];
781 switch (page.mState) {
782 case AllocPageState::NeverAllocated:
783 break;
785 case AllocPageState::InUse: {
786 // Only return TagLiveAlloc if the pointer is within the bounds of the
787 // allocation's usable size.
788 uint8_t* base = page.mBaseAddr;
789 uint8_t* limit = base + page.UsableSize();
790 if (base <= aPtr && aPtr < limit) {
791 *aInfo = {TagLiveAlloc, page.mBaseAddr, page.UsableSize(),
792 page.mArenaId.valueOr(0)};
793 return;
795 break;
798 case AllocPageState::Freed: {
799 // Only return TagFreedAlloc if the pointer is within the bounds of the
800 // former allocation's usable size.
801 uint8_t* base = page.mBaseAddr;
802 uint8_t* limit = base + page.UsableSize();
803 if (base <= aPtr && aPtr < limit) {
804 *aInfo = {TagFreedAlloc, page.mBaseAddr, page.UsableSize(),
805 page.mArenaId.valueOr(0)};
806 return;
808 break;
811 default:
812 MOZ_CRASH();
815 // Pointers into guard pages will end up here, as will pointers into
816 // allocation pages that aren't within the allocation's bounds.
817 *aInfo = {TagUnknown, nullptr, 0, 0};
820 #ifndef XP_WIN
821 static void prefork() MOZ_NO_THREAD_SAFETY_ANALYSIS { sMutex.Lock(); }
822 static void postfork_parent() MOZ_NO_THREAD_SAFETY_ANALYSIS {
823 sMutex.Unlock();
825 static void postfork_child() { sMutex.Init(); }
826 #endif
828 #if PHC_LOGGING
829 void IncPageAllocHits(GMutLock) { mPageAllocHits++; }
830 void IncPageAllocMisses(GMutLock) { mPageAllocMisses++; }
831 #else
832 void IncPageAllocHits(GMutLock) {}
833 void IncPageAllocMisses(GMutLock) {}
834 #endif
836 phc::PHCStats GetPageStats(GMutLock) {
837 phc::PHCStats stats;
839 for (const auto& page : mAllocPages) {
840 stats.mSlotsAllocated += page.mState == AllocPageState::InUse ? 1 : 0;
841 stats.mSlotsFreed += page.mState == AllocPageState::Freed ? 1 : 0;
843 stats.mSlotsUnused =
844 kNumAllocPages - stats.mSlotsAllocated - stats.mSlotsFreed;
846 return stats;
849 #if PHC_LOGGING
850 size_t PageAllocHits(GMutLock) { return mPageAllocHits; }
851 size_t PageAllocAttempts(GMutLock) {
852 return mPageAllocHits + mPageAllocMisses;
855 // This is an integer because FdPrintf only supports integer printing.
856 size_t PageAllocHitRate(GMutLock) {
857 return mPageAllocHits * 100 / (mPageAllocHits + mPageAllocMisses);
859 #endif
861 // Should we make new PHC allocations?
862 bool ShouldMakeNewAllocations() const {
863 return mPhcState == mozilla::phc::Enabled;
866 using PHCState = mozilla::phc::PHCState;
867 void SetState(PHCState aState) {
868 if (mPhcState != PHCState::Enabled && aState == PHCState::Enabled) {
869 MutexAutoLock lock(GMut::sMutex);
870 // Reset the RNG at this point with a better seed.
871 ResetRNG();
872 GAtomic::Init(Rnd64ToDelay(mAvgFirstAllocDelay, Random64(lock)));
875 mPhcState = aState;
878 void ResetRNG() {
879 mRNG = non_crypto::XorShift128PlusRNG(RandomSeed<0>(), RandomSeed<1>());
882 void SetProbabilities(int64_t aAvgDelayFirst, int64_t aAvgDelayNormal,
883 int64_t aAvgDelayPageReuse) {
884 MutexAutoLock lock(GMut::sMutex);
886 mAvgFirstAllocDelay = CheckProbability(aAvgDelayFirst);
887 mAvgAllocDelay = CheckProbability(aAvgDelayNormal);
888 mAvgPageReuseDelay = CheckProbability(aAvgDelayPageReuse);
891 private:
892 template <int N>
893 uint64_t RandomSeed() {
894 // An older version of this code used RandomUint64() here, but on Mac that
895 // function uses arc4random(), which can allocate, which would cause
896 // re-entry, which would be bad. So we just use time(), a local variable
897 // address and a global variable address. These are mediocre sources of
898 // entropy, but good enough for PHC.
899 static_assert(N == 0 || N == 1 || N == 2, "must be 0, 1 or 2");
900 uint64_t seed;
901 if (N == 0) {
902 time_t t = time(nullptr);
903 seed = t ^ (t << 32);
904 } else if (N == 1) {
905 seed = uintptr_t(&seed) ^ (uintptr_t(&seed) << 32);
906 } else {
907 seed = uintptr_t(&gConst) ^ (uintptr_t(&gConst) << 32);
909 return seed;
912 void AssertAllocPageInUse(GMutLock, const AllocPageInfo& aPage) {
913 MOZ_ASSERT(aPage.mState == AllocPageState::InUse);
914 // There is nothing to assert about aPage.mArenaId.
915 MOZ_ASSERT(aPage.mBaseAddr);
916 MOZ_ASSERT(aPage.UsableSize() > 0);
917 MOZ_ASSERT(aPage.mAllocStack.isSome());
918 MOZ_ASSERT(aPage.mFreeStack.isNothing());
919 MOZ_ASSERT(aPage.mReuseTime == kMaxTime);
922 void AssertAllocPageNotInUse(GMutLock, const AllocPageInfo& aPage) {
923 // We can assert a lot about `NeverAllocated` pages, but not much about
924 // `Freed` pages.
925 #ifdef DEBUG
926 bool isFresh = aPage.mState == AllocPageState::NeverAllocated;
927 MOZ_ASSERT(isFresh || aPage.mState == AllocPageState::Freed);
928 MOZ_ASSERT_IF(isFresh, aPage.mArenaId == Nothing());
929 MOZ_ASSERT(isFresh == (aPage.mBaseAddr == nullptr));
930 MOZ_ASSERT(isFresh == (aPage.mAllocStack.isNothing()));
931 MOZ_ASSERT(isFresh == (aPage.mFreeStack.isNothing()));
932 MOZ_ASSERT(aPage.mReuseTime != kMaxTime);
933 #endif
936 // RNG for deciding which allocations to treat specially. It doesn't need to
937 // be high quality.
939 // This is a raw pointer for the reason explained in the comment above
940 // GMut's constructor. Don't change it to UniquePtr or anything like that.
941 non_crypto::XorShift128PlusRNG mRNG;
943 AllocPageInfo mAllocPages[kNumAllocPages];
944 #if PHC_LOGGING
945 Time mFreeTime[kNumAllocPages];
947 // How many allocations that could have been page allocs actually were? As
948 // constrained kNumAllocPages. If the hit ratio isn't close to 100% it's
949 // likely that the global constants are poorly chosen.
950 size_t mPageAllocHits = 0;
951 size_t mPageAllocMisses = 0;
952 #endif
954 // This will only ever be updated from one thread. The other threads should
955 // eventually get the update.
956 Atomic<PHCState, Relaxed> mPhcState =
957 Atomic<PHCState, Relaxed>(DEFAULT_STATE);
959 // The average delay before doing any page allocations at the start of a
960 // process. Note that roughly 1 million allocations occur in the main process
961 // while starting the browser. The delay range is 1..gAvgFirstAllocDelay*2.
962 Delay mAvgFirstAllocDelay = 64 * 1024;
964 // The average delay until the next attempted page allocation, once we get
965 // past the first delay. The delay range is 1..kAvgAllocDelay*2.
966 Delay mAvgAllocDelay = 16 * 1024;
968 // The average delay before reusing a freed page. Should be significantly
969 // larger than kAvgAllocDelay, otherwise there's not much point in having it.
970 // The delay range is (kAvgAllocDelay / 2)..(kAvgAllocDelay / 2 * 3). This is
971 // different to the other delay ranges in not having a minimum of 1, because
972 // that's such a short delay that there is a high likelihood of bad stacks in
973 // any crash report.
974 Delay mAvgPageReuseDelay = 256 * 1024;
976 public:
977 Delay GetAvgAllocDelay(const MutexAutoLock&) { return mAvgAllocDelay; }
978 Delay GetAvgFirstAllocDelay(const MutexAutoLock&) {
979 return mAvgFirstAllocDelay;
981 Delay GetAvgPageReuseDelay(const MutexAutoLock&) {
982 return mAvgPageReuseDelay;
986 Mutex GMut::sMutex;
988 static GMut* gMut;
990 // When PHC wants to crash we first have to unlock so that the crash reporter
991 // can call into PHC to lockup its pointer. That also means that before calling
992 // PHCCrash please ensure that state is consistent. Because this can report an
993 // arbitrary string, use of it must be reviewed by Firefox data stewards.
994 static void PHCCrash(GMutLock, const char* aMessage)
995 MOZ_REQUIRES(GMut::sMutex) {
996 GMut::sMutex.Unlock();
997 MOZ_CRASH_UNSAFE(aMessage);
1000 // On MacOS, the first __thread/thread_local access calls malloc, which leads
1001 // to an infinite loop. So we use pthread-based TLS instead, which somehow
1002 // doesn't have this problem.
1003 #if !defined(XP_DARWIN)
1004 # define PHC_THREAD_LOCAL(T) MOZ_THREAD_LOCAL(T)
1005 #else
1006 # define PHC_THREAD_LOCAL(T) \
1007 detail::ThreadLocal<T, detail::ThreadLocalKeyStorage>
1008 #endif
1010 // Thread-local state.
1011 class GTls {
1012 public:
1013 GTls(const GTls&) = delete;
1015 const GTls& operator=(const GTls&) = delete;
1017 // When true, PHC does as little as possible.
1019 // (a) It does not allocate any new page allocations.
1021 // (b) It avoids doing any operations that might call malloc/free/etc., which
1022 // would cause re-entry into PHC. (In practice, MozStackWalk() is the
1023 // only such operation.) Note that calls to the functions in MozJemalloc
1024 // are ok.
1026 // For example, replace_malloc() will just fall back to mozjemalloc. However,
1027 // operations involving existing allocations are more complex, because those
1028 // existing allocations may be page allocations. For example, if
1029 // replace_free() is passed a page allocation on a PHC-disabled thread, it
1030 // will free the page allocation in the usual way, but it will get a dummy
1031 // freeStack in order to avoid calling MozStackWalk(), as per (b) above.
1033 // This single disabling mechanism has two distinct uses.
1035 // - It's used to prevent re-entry into PHC, which can cause correctness
1036 // problems. For example, consider this sequence.
1038 // 1. enter replace_free()
1039 // 2. which calls PageFree()
1040 // 3. which calls MozStackWalk()
1041 // 4. which locks a mutex M, and then calls malloc
1042 // 5. enter replace_malloc()
1043 // 6. which calls MaybePageAlloc()
1044 // 7. which calls MozStackWalk()
1045 // 8. which (re)locks a mutex M --> deadlock
1047 // We avoid this sequence by "disabling" the thread in PageFree() (at step
1048 // 2), which causes MaybePageAlloc() to fail, avoiding the call to
1049 // MozStackWalk() (at step 7).
1051 // In practice, realloc or free of a PHC allocation is unlikely on a thread
1052 // that is disabled because of this use: MozStackWalk() will probably only
1053 // realloc/free allocations that it allocated itself, but those won't be
1054 // page allocations because PHC is disabled before calling MozStackWalk().
1056 // (Note that MaybePageAlloc() could safely do a page allocation so long as
1057 // it avoided calling MozStackWalk() by getting a dummy allocStack. But it
1058 // wouldn't be useful, and it would prevent the second use below.)
1060 // - It's used to prevent PHC allocations in some tests that rely on
1061 // mozjemalloc's exact allocation behaviour, which PHC does not replicate
1062 // exactly. (Note that (b) isn't necessary for this use -- MozStackWalk()
1063 // could be safely called -- but it is necessary for the first use above.)
1066 static void Init() {
1067 if (!tlsIsDisabled.init()) {
1068 MOZ_CRASH();
1072 static void DisableOnCurrentThread() {
1073 MOZ_ASSERT(!GTls::tlsIsDisabled.get());
1074 tlsIsDisabled.set(true);
1077 static void EnableOnCurrentThread() {
1078 MOZ_ASSERT(GTls::tlsIsDisabled.get());
1079 MutexAutoLock lock(GMut::sMutex);
1080 Delay avg_delay = gMut->GetAvgAllocDelay(lock);
1081 Delay avg_first_delay = gMut->GetAvgFirstAllocDelay(lock);
1082 if (GAtomic::AllocDelayHasWrapped(avg_delay, avg_first_delay)) {
1083 GAtomic::SetAllocDelay(Rnd64ToDelay(avg_delay, gMut->Random64(lock)));
1085 tlsIsDisabled.set(false);
1088 static bool IsDisabledOnCurrentThread() { return tlsIsDisabled.get(); }
1090 private:
1091 static PHC_THREAD_LOCAL(bool) tlsIsDisabled;
1094 PHC_THREAD_LOCAL(bool) GTls::tlsIsDisabled;
1096 class AutoDisableOnCurrentThread {
1097 public:
1098 AutoDisableOnCurrentThread(const AutoDisableOnCurrentThread&) = delete;
1100 const AutoDisableOnCurrentThread& operator=(
1101 const AutoDisableOnCurrentThread&) = delete;
1103 explicit AutoDisableOnCurrentThread() { GTls::DisableOnCurrentThread(); }
1104 ~AutoDisableOnCurrentThread() { GTls::EnableOnCurrentThread(); }
1107 //---------------------------------------------------------------------------
1108 // Initialisation
1109 //---------------------------------------------------------------------------
1111 // WARNING: this function runs *very* early -- before all static initializers
1112 // have run. For this reason, non-scalar globals (gConst, gMut) are allocated
1113 // dynamically (so we can guarantee their construction in this function) rather
1114 // than statically. GAtomic and GTls contain simple static data that doesn't
1115 // involve static initializers so they don't need to be allocated dynamically.
1116 static bool phc_init() {
1117 if (GetKernelPageSize() != kPageSize) {
1118 return false;
1121 // gConst and gMut are never freed. They live for the life of the process.
1122 gConst = InfallibleAllocPolicy::new_<GConst>();
1124 GTls::Init();
1125 gMut = InfallibleAllocPolicy::new_<GMut>();
1127 #ifndef XP_WIN
1128 // Avoid deadlocks when forking by acquiring our state lock prior to forking
1129 // and releasing it after forking. See |LogAlloc|'s |replace_init| for
1130 // in-depth details.
1131 pthread_atfork(GMut::prefork, GMut::postfork_parent, GMut::postfork_child);
1132 #endif
1134 return true;
1137 static inline bool maybe_init() {
1138 static bool sInitSuccess = []() { return phc_init(); }();
1139 return sInitSuccess;
1142 //---------------------------------------------------------------------------
1143 // Page allocation operations
1144 //---------------------------------------------------------------------------
1146 // Attempt a page allocation if the time and the size are right. Allocated
1147 // memory is zeroed if aZero is true. On failure, the caller should attempt a
1148 // normal allocation via MozJemalloc. Can be called in a context where
1149 // GMut::sMutex is locked.
1150 static void* MaybePageAlloc(const Maybe<arena_id_t>& aArenaId, size_t aReqSize,
1151 size_t aAlignment, bool aZero) {
1152 MOZ_ASSERT(IsPowerOfTwo(aAlignment));
1154 if (!maybe_init()) {
1155 return nullptr;
1158 if (aReqSize > kPageSize) {
1159 return nullptr;
1162 MOZ_ASSERT(gMut);
1163 if (!gMut->ShouldMakeNewAllocations()) {
1164 return nullptr;
1167 GAtomic::IncrementNow();
1169 // Decrement the delay. If it's zero, we do a page allocation and reset the
1170 // delay to a random number. Because the assignment to the random number isn't
1171 // atomic w.r.t. the decrement, we might have a sequence like this:
1173 // Thread 1 Thread 2 Thread 3
1174 // -------- -------- --------
1175 // (a) newDelay = --sAllocDelay (-> 0)
1176 // (b) --sAllocDelay (-> -1)
1177 // (c) (newDelay != 0) fails
1178 // (d) --sAllocDelay (-> -2)
1179 // (e) sAllocDelay = new_random_number()
1181 // It's critical that sAllocDelay has ReleaseAcquire semantics, because that
1182 // guarantees that exactly one thread will see sAllocDelay have the value 0.
1183 // (Relaxed semantics wouldn't guarantee that.)
1185 // Note that sAllocDelay is unsigned and we expect that it will wrap after
1186 // being decremented "below" zero. It must be unsigned so that IsPowerOfTwo()
1187 // can work on some Delay values.
1189 // Finally, note that the decrements that occur between (a) and (e) above are
1190 // effectively ignored, because (e) clobbers them. This shouldn't be a
1191 // problem; it effectively just adds a little more randomness to
1192 // new_random_number(). An early version of this code tried to account for
1193 // these decrements by doing `sAllocDelay += new_random_number()`. However, if
1194 // new_random_value() is small, the number of decrements between (a) and (e)
1195 // can easily exceed it, whereupon sAllocDelay ends up negative after
1196 // `sAllocDelay += new_random_number()`, and the zero-check never succeeds
1197 // again. (At least, not until sAllocDelay wraps around on overflow, which
1198 // would take a very long time indeed.)
1200 int32_t newDelay = GAtomic::DecrementDelay();
1201 if (newDelay != 0) {
1202 return nullptr;
1205 if (GTls::IsDisabledOnCurrentThread()) {
1206 return nullptr;
1209 // Disable on this thread *before* getting the stack trace.
1210 AutoDisableOnCurrentThread disable;
1212 // Get the stack trace *before* locking the mutex. If we return nullptr then
1213 // it was a waste, but it's not so frequent, and doing a stack walk while
1214 // the mutex is locked is problematic (see the big comment on
1215 // StackTrace::Fill() for details).
1216 StackTrace allocStack;
1217 allocStack.Fill();
1219 MutexAutoLock lock(GMut::sMutex);
1221 Time now = GAtomic::Now();
1222 Delay newAllocDelay =
1223 Rnd64ToDelay(gMut->GetAvgAllocDelay(lock), gMut->Random64(lock));
1225 // We start at a random page alloc and wrap around, to ensure pages get even
1226 // amounts of use.
1227 uint8_t* ptr = nullptr;
1228 uint8_t* pagePtr = nullptr;
1229 for (uintptr_t n = 0, i = size_t(gMut->Random64(lock)) % kNumAllocPages;
1230 n < kNumAllocPages; n++, i = (i + 1) % kNumAllocPages) {
1231 if (!gMut->IsPageAllocatable(lock, i, now)) {
1232 continue;
1235 #if PHC_LOGGING
1236 Time lifetime = 0;
1237 #endif
1238 pagePtr = gConst->AllocPagePtr(i);
1239 MOZ_ASSERT(pagePtr);
1240 bool ok =
1241 #ifdef XP_WIN
1242 !!VirtualAlloc(pagePtr, kPageSize, MEM_COMMIT, PAGE_READWRITE);
1243 #else
1244 mprotect(pagePtr, kPageSize, PROT_READ | PROT_WRITE) == 0;
1245 #endif
1247 if (!ok) {
1248 pagePtr = nullptr;
1249 continue;
1252 size_t usableSize = MozJemalloc::malloc_good_size(aReqSize);
1253 MOZ_ASSERT(usableSize > 0);
1255 // Put the allocation as close to the end of the page as possible,
1256 // allowing for alignment requirements.
1257 ptr = pagePtr + kPageSize - usableSize;
1258 if (aAlignment != 1) {
1259 ptr = reinterpret_cast<uint8_t*>(
1260 (reinterpret_cast<uintptr_t>(ptr) & ~(aAlignment - 1)));
1263 #if PHC_LOGGING
1264 Time then = gMut->GetFreeTime(i);
1265 lifetime = then != 0 ? now - then : 0;
1266 #endif
1268 gMut->SetPageInUse(lock, i, aArenaId, ptr, allocStack);
1270 if (aZero) {
1271 memset(ptr, 0, usableSize);
1272 } else {
1273 #ifdef DEBUG
1274 memset(ptr, kAllocJunk, usableSize);
1275 #endif
1278 gMut->IncPageAllocHits(lock);
1279 #if PHC_LOGGING
1280 phc::PHCStats stats = gMut->GetPageStats(lock);
1281 #endif
1282 LOG("PageAlloc(%zu, %zu) -> %p[%zu]/%p (%zu) (z%zu), sAllocDelay <- %zu, "
1283 "fullness %zu/%zu/%zu, hits %zu/%zu (%zu%%), lifetime %zu\n",
1284 aReqSize, aAlignment, pagePtr, i, ptr, usableSize, size_t(aZero),
1285 size_t(newAllocDelay), stats.mSlotsAllocated, stats.mSlotsFreed,
1286 kNumAllocPages, gMut->PageAllocHits(lock),
1287 gMut->PageAllocAttempts(lock), gMut->PageAllocHitRate(lock), lifetime);
1288 break;
1291 if (!pagePtr) {
1292 // No pages are available, or VirtualAlloc/mprotect failed.
1293 gMut->IncPageAllocMisses(lock);
1294 #if PHC_LOGGING
1295 phc::PHCStats stats = gMut->GetPageStats(lock);
1296 #endif
1297 LOG("No PageAlloc(%zu, %zu), sAllocDelay <- %zu, fullness %zu/%zu/%zu, "
1298 "hits %zu/%zu (%zu%%)\n",
1299 aReqSize, aAlignment, size_t(newAllocDelay), stats.mSlotsAllocated,
1300 stats.mSlotsFreed, kNumAllocPages, gMut->PageAllocHits(lock),
1301 gMut->PageAllocAttempts(lock), gMut->PageAllocHitRate(lock));
1304 // Set the new alloc delay.
1305 GAtomic::SetAllocDelay(newAllocDelay);
1307 return ptr;
1310 static void FreePage(GMutLock aLock, uintptr_t aIndex,
1311 const Maybe<arena_id_t>& aArenaId,
1312 const StackTrace& aFreeStack, Delay aReuseDelay)
1313 MOZ_REQUIRES(GMut::sMutex) {
1314 void* pagePtr = gConst->AllocPagePtr(aIndex);
1316 #ifdef XP_WIN
1317 if (!VirtualFree(pagePtr, kPageSize, MEM_DECOMMIT)) {
1318 PHCCrash(aLock, "VirtualFree failed");
1320 #else
1321 if (mmap(pagePtr, kPageSize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON,
1322 -1, 0) == MAP_FAILED) {
1323 PHCCrash(aLock, "mmap failed");
1325 #endif
1327 gMut->SetPageFreed(aLock, aIndex, aArenaId, aFreeStack, aReuseDelay);
1330 //---------------------------------------------------------------------------
1331 // replace-malloc machinery
1332 //---------------------------------------------------------------------------
1334 // This handles malloc, moz_arena_malloc, and realloc-with-a-nullptr.
1335 MOZ_ALWAYS_INLINE static void* PageMalloc(const Maybe<arena_id_t>& aArenaId,
1336 size_t aReqSize) {
1337 void* ptr = MaybePageAlloc(aArenaId, aReqSize, /* aAlignment */ 1,
1338 /* aZero */ false);
1339 return ptr ? ptr
1340 : (aArenaId.isSome()
1341 ? MozJemalloc::moz_arena_malloc(*aArenaId, aReqSize)
1342 : MozJemalloc::malloc(aReqSize));
1345 inline void* MozJemallocPHC::malloc(size_t aReqSize) {
1346 return PageMalloc(Nothing(), aReqSize);
1349 static Delay ReuseDelay(GMutLock aLock) {
1350 Delay avg_reuse_delay = gMut->GetAvgPageReuseDelay(aLock);
1351 return (avg_reuse_delay / 2) +
1352 Rnd64ToDelay(avg_reuse_delay / 2, gMut->Random64(aLock));
1355 // This handles both calloc and moz_arena_calloc.
1356 MOZ_ALWAYS_INLINE static void* PageCalloc(const Maybe<arena_id_t>& aArenaId,
1357 size_t aNum, size_t aReqSize) {
1358 CheckedInt<size_t> checkedSize = CheckedInt<size_t>(aNum) * aReqSize;
1359 if (!checkedSize.isValid()) {
1360 return nullptr;
1363 void* ptr = MaybePageAlloc(aArenaId, checkedSize.value(), /* aAlignment */ 1,
1364 /* aZero */ true);
1365 return ptr ? ptr
1366 : (aArenaId.isSome()
1367 ? MozJemalloc::moz_arena_calloc(*aArenaId, aNum, aReqSize)
1368 : MozJemalloc::calloc(aNum, aReqSize));
1371 inline void* MozJemallocPHC::calloc(size_t aNum, size_t aReqSize) {
1372 return PageCalloc(Nothing(), aNum, aReqSize);
1375 // This function handles both realloc and moz_arena_realloc.
1377 // As always, realloc is complicated, and doubly so when there are two
1378 // different kinds of allocations in play. Here are the possible transitions,
1379 // and what we do in practice.
1381 // - normal-to-normal: This is straightforward and obviously necessary.
1383 // - normal-to-page: This is disallowed because it would require getting the
1384 // arenaId of the normal allocation, which isn't possible in non-DEBUG builds
1385 // for security reasons.
1387 // - page-to-page: This is done whenever possible, i.e. whenever the new size
1388 // is less than or equal to 4 KiB. This choice counterbalances the
1389 // disallowing of normal-to-page allocations, in order to avoid biasing
1390 // towards or away from page allocations. It always occurs in-place.
1392 // - page-to-normal: this is done only when necessary, i.e. only when the new
1393 // size is greater than 4 KiB. This choice naturally flows from the
1394 // prior choice on page-to-page transitions.
1396 // In summary: realloc doesn't change the allocation kind unless it must.
1398 MOZ_ALWAYS_INLINE static void* MaybePageRealloc(
1399 const Maybe<arena_id_t>& aArenaId, void* aOldPtr, size_t aNewSize) {
1400 if (!aOldPtr) {
1401 // Null pointer. Treat like malloc(aNewSize).
1402 return PageMalloc(aArenaId, aNewSize);
1405 if (!maybe_init()) {
1406 return nullptr;
1409 PtrKind pk = gConst->PtrKind(aOldPtr);
1410 if (pk.IsNothing()) {
1411 // A normal-to-normal transition.
1412 return nullptr;
1415 if (pk.IsGuardPage()) {
1416 GMut::CrashOnGuardPage(aOldPtr);
1419 // At this point we know we have an allocation page.
1420 uintptr_t index = pk.AllocPageIndex();
1422 // A page-to-something transition.
1424 // Note that `disable` has no effect unless it is emplaced below.
1425 Maybe<AutoDisableOnCurrentThread> disable;
1426 // Get the stack trace *before* locking the mutex.
1427 StackTrace stack;
1428 if (GTls::IsDisabledOnCurrentThread()) {
1429 // PHC is disabled on this thread. Leave the stack empty.
1430 } else {
1431 // Disable on this thread *before* getting the stack trace.
1432 disable.emplace();
1433 stack.Fill();
1436 MutexAutoLock lock(GMut::sMutex);
1438 // Check for realloc() of a freed block.
1439 gMut->EnsureValidAndInUse(lock, aOldPtr, index);
1441 if (aNewSize <= kPageSize && gMut->ShouldMakeNewAllocations()) {
1442 // A page-to-page transition. Just keep using the page allocation. We do
1443 // this even if the thread is disabled, because it doesn't create a new
1444 // page allocation. Note that ResizePageInUse() checks aArenaId.
1446 // Move the bytes with memmove(), because the old allocation and the new
1447 // allocation overlap. Move the usable size rather than the requested size,
1448 // because the user might have used malloc_usable_size() and filled up the
1449 // usable size.
1450 size_t oldUsableSize = gMut->PageUsableSize(lock, index);
1451 size_t newUsableSize = MozJemalloc::malloc_good_size(aNewSize);
1452 uint8_t* pagePtr = gConst->AllocPagePtr(index);
1453 uint8_t* newPtr = pagePtr + kPageSize - newUsableSize;
1454 memmove(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize));
1455 gMut->ResizePageInUse(lock, index, aArenaId, newPtr, stack);
1456 LOG("PageRealloc-Reuse(%p, %zu) -> %p\n", aOldPtr, aNewSize, newPtr);
1457 return newPtr;
1460 // A page-to-normal transition (with the new size greater than page-sized).
1461 // (Note that aArenaId is checked below.)
1462 void* newPtr;
1463 if (aArenaId.isSome()) {
1464 newPtr = MozJemalloc::moz_arena_malloc(*aArenaId, aNewSize);
1465 } else {
1466 Maybe<arena_id_t> oldArenaId = gMut->PageArena(lock, index);
1467 newPtr = (oldArenaId.isSome()
1468 ? MozJemalloc::moz_arena_malloc(*oldArenaId, aNewSize)
1469 : MozJemalloc::malloc(aNewSize));
1471 if (!newPtr) {
1472 return nullptr;
1475 Delay reuseDelay = ReuseDelay(lock);
1477 // Copy the usable size rather than the requested size, because the user
1478 // might have used malloc_usable_size() and filled up the usable size. Note
1479 // that FreePage() checks aArenaId (via SetPageFreed()).
1480 size_t oldUsableSize = gMut->PageUsableSize(lock, index);
1481 memcpy(newPtr, aOldPtr, std::min(oldUsableSize, aNewSize));
1482 FreePage(lock, index, aArenaId, stack, reuseDelay);
1483 LOG("PageRealloc-Free(%p[%zu], %zu) -> %p, %zu delay, reuse at ~%zu\n",
1484 aOldPtr, index, aNewSize, newPtr, size_t(reuseDelay),
1485 size_t(GAtomic::Now()) + reuseDelay);
1487 return newPtr;
1490 MOZ_ALWAYS_INLINE static void* PageRealloc(const Maybe<arena_id_t>& aArenaId,
1491 void* aOldPtr, size_t aNewSize) {
1492 void* ptr = MaybePageRealloc(aArenaId, aOldPtr, aNewSize);
1494 return ptr ? ptr
1495 : (aArenaId.isSome() ? MozJemalloc::moz_arena_realloc(
1496 *aArenaId, aOldPtr, aNewSize)
1497 : MozJemalloc::realloc(aOldPtr, aNewSize));
1500 inline void* MozJemallocPHC::realloc(void* aOldPtr, size_t aNewSize) {
1501 return PageRealloc(Nothing(), aOldPtr, aNewSize);
1504 // This handles both free and moz_arena_free.
1505 MOZ_ALWAYS_INLINE static bool MaybePageFree(const Maybe<arena_id_t>& aArenaId,
1506 void* aPtr) {
1507 if (!maybe_init()) {
1508 return false;
1511 PtrKind pk = gConst->PtrKind(aPtr);
1512 if (pk.IsNothing()) {
1513 // Not a page allocation.
1514 return false;
1517 if (pk.IsGuardPage()) {
1518 GMut::CrashOnGuardPage(aPtr);
1521 // At this point we know we have an allocation page.
1522 uintptr_t index = pk.AllocPageIndex();
1524 // Note that `disable` has no effect unless it is emplaced below.
1525 Maybe<AutoDisableOnCurrentThread> disable;
1526 // Get the stack trace *before* locking the mutex.
1527 StackTrace freeStack;
1528 if (GTls::IsDisabledOnCurrentThread()) {
1529 // PHC is disabled on this thread. Leave the stack empty.
1530 } else {
1531 // Disable on this thread *before* getting the stack trace.
1532 disable.emplace();
1533 freeStack.Fill();
1536 MutexAutoLock lock(GMut::sMutex);
1538 // Check for a double-free.
1539 gMut->EnsureValidAndInUse(lock, aPtr, index);
1541 // Note that FreePage() checks aArenaId (via SetPageFreed()).
1542 Delay reuseDelay = ReuseDelay(lock);
1543 FreePage(lock, index, aArenaId, freeStack, reuseDelay);
1545 #if PHC_LOGGING
1546 phc::PHCStats stats = gMut->GetPageStats(lock);
1547 #endif
1548 LOG("PageFree(%p[%zu]), %zu delay, reuse at ~%zu, fullness %zu/%zu/%zu\n",
1549 aPtr, index, size_t(reuseDelay), size_t(GAtomic::Now()) + reuseDelay,
1550 stats.mSlotsAllocated, stats.mSlotsFreed, kNumAllocPages);
1552 return true;
1555 MOZ_ALWAYS_INLINE static void PageFree(const Maybe<arena_id_t>& aArenaId,
1556 void* aPtr) {
1557 bool res = MaybePageFree(aArenaId, aPtr);
1558 if (!res) {
1559 aArenaId.isSome() ? MozJemalloc::moz_arena_free(*aArenaId, aPtr)
1560 : MozJemalloc::free(aPtr);
1564 inline void MozJemallocPHC::free(void* aPtr) { PageFree(Nothing(), aPtr); }
1566 // This handles memalign and moz_arena_memalign.
1567 MOZ_ALWAYS_INLINE static void* PageMemalign(const Maybe<arena_id_t>& aArenaId,
1568 size_t aAlignment,
1569 size_t aReqSize) {
1570 MOZ_RELEASE_ASSERT(IsPowerOfTwo(aAlignment));
1572 // PHC can't satisfy an alignment greater than a page size, so fall back to
1573 // mozjemalloc in that case.
1574 void* ptr = nullptr;
1575 if (aAlignment <= kPageSize) {
1576 ptr = MaybePageAlloc(aArenaId, aReqSize, aAlignment, /* aZero */ false);
1578 return ptr ? ptr
1579 : (aArenaId.isSome()
1580 ? MozJemalloc::moz_arena_memalign(*aArenaId, aAlignment,
1581 aReqSize)
1582 : MozJemalloc::memalign(aAlignment, aReqSize));
1585 inline void* MozJemallocPHC::memalign(size_t aAlignment, size_t aReqSize) {
1586 return PageMemalign(Nothing(), aAlignment, aReqSize);
1589 inline size_t MozJemallocPHC::malloc_usable_size(usable_ptr_t aPtr) {
1590 if (!maybe_init()) {
1591 return MozJemalloc::malloc_usable_size(aPtr);
1594 PtrKind pk = gConst->PtrKind(aPtr);
1595 if (pk.IsNothing()) {
1596 // Not a page allocation. Measure it normally.
1597 return MozJemalloc::malloc_usable_size(aPtr);
1600 if (pk.IsGuardPage()) {
1601 GMut::CrashOnGuardPage(const_cast<void*>(aPtr));
1604 // At this point we know aPtr lands within an allocation page, due to the
1605 // math done in the PtrKind constructor. But if aPtr points to memory
1606 // before the base address of the allocation, we return 0.
1607 uintptr_t index = pk.AllocPageIndex();
1609 MutexAutoLock lock(GMut::sMutex);
1611 void* pageBaseAddr = gMut->AllocPageBaseAddr(lock, index);
1613 if (MOZ_UNLIKELY(aPtr < pageBaseAddr)) {
1614 return 0;
1617 return gMut->PageUsableSize(lock, index);
1620 static size_t metadata_size() {
1621 return MozJemalloc::malloc_usable_size(gConst) +
1622 MozJemalloc::malloc_usable_size(gMut);
1625 inline void MozJemallocPHC::jemalloc_stats_internal(
1626 jemalloc_stats_t* aStats, jemalloc_bin_stats_t* aBinStats) {
1627 MozJemalloc::jemalloc_stats_internal(aStats, aBinStats);
1629 if (!maybe_init()) {
1630 // If we're not initialised, then we're not using any additional memory and
1631 // have nothing to add to the report.
1632 return;
1635 // We allocate our memory from jemalloc so it has already counted our memory
1636 // usage within "mapped" and "allocated", we must subtract the memory we
1637 // allocated from jemalloc from allocated before adding in only the parts that
1638 // we have allocated out to Firefox.
1640 aStats->allocated -= kAllPagesJemallocSize;
1642 size_t allocated = 0;
1644 MutexAutoLock lock(GMut::sMutex);
1646 // Add usable space of in-use allocations to `allocated`.
1647 for (size_t i = 0; i < kNumAllocPages; i++) {
1648 if (gMut->IsPageInUse(lock, i)) {
1649 allocated += gMut->PageUsableSize(lock, i);
1653 aStats->allocated += allocated;
1655 // guards is the gap between `allocated` and `mapped`. In some ways this
1656 // almost fits into aStats->wasted since it feels like wasted memory. However
1657 // wasted should only include committed memory and these guard pages are
1658 // uncommitted. Therefore we don't include it anywhere.
1659 // size_t guards = mapped - allocated;
1661 // aStats.page_cache and aStats.bin_unused are left unchanged because PHC
1662 // doesn't have anything corresponding to those.
1664 // The metadata is stored in normal heap allocations, so they're measured by
1665 // mozjemalloc as `allocated`. Move them into `bookkeeping`.
1666 // They're also reported under explicit/heap-overhead/phc/fragmentation in
1667 // about:memory.
1668 size_t bookkeeping = metadata_size();
1669 aStats->allocated -= bookkeeping;
1670 aStats->bookkeeping += bookkeeping;
1673 inline void MozJemallocPHC::jemalloc_ptr_info(const void* aPtr,
1674 jemalloc_ptr_info_t* aInfo) {
1675 if (!maybe_init()) {
1676 return MozJemalloc::jemalloc_ptr_info(aPtr, aInfo);
1679 // We need to implement this properly, because various code locations do
1680 // things like checking that allocations are in the expected arena.
1681 PtrKind pk = gConst->PtrKind(aPtr);
1682 if (pk.IsNothing()) {
1683 // Not a page allocation.
1684 return MozJemalloc::jemalloc_ptr_info(aPtr, aInfo);
1687 if (pk.IsGuardPage()) {
1688 // Treat a guard page as unknown because there's no better alternative.
1689 *aInfo = {TagUnknown, nullptr, 0, 0};
1690 return;
1693 // At this point we know we have an allocation page.
1694 uintptr_t index = pk.AllocPageIndex();
1696 MutexAutoLock lock(GMut::sMutex);
1698 gMut->FillJemallocPtrInfo(lock, aPtr, index, aInfo);
1699 #if DEBUG
1700 LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu, %zu}\n", aPtr, index,
1701 size_t(aInfo->tag), aInfo->addr, aInfo->size, aInfo->arenaId);
1702 #else
1703 LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu}\n", aPtr, index,
1704 size_t(aInfo->tag), aInfo->addr, aInfo->size);
1705 #endif
1708 inline void* MozJemallocPHC::moz_arena_malloc(arena_id_t aArenaId,
1709 size_t aReqSize) {
1710 return PageMalloc(Some(aArenaId), aReqSize);
1713 inline void* MozJemallocPHC::moz_arena_calloc(arena_id_t aArenaId, size_t aNum,
1714 size_t aReqSize) {
1715 return PageCalloc(Some(aArenaId), aNum, aReqSize);
1718 inline void* MozJemallocPHC::moz_arena_realloc(arena_id_t aArenaId,
1719 void* aOldPtr, size_t aNewSize) {
1720 return PageRealloc(Some(aArenaId), aOldPtr, aNewSize);
1723 inline void MozJemallocPHC::moz_arena_free(arena_id_t aArenaId, void* aPtr) {
1724 return PageFree(Some(aArenaId), aPtr);
1727 inline void* MozJemallocPHC::moz_arena_memalign(arena_id_t aArenaId,
1728 size_t aAlignment,
1729 size_t aReqSize) {
1730 return PageMemalign(Some(aArenaId), aAlignment, aReqSize);
1733 namespace mozilla::phc {
1735 bool IsPHCAllocation(const void* aPtr, AddrInfo* aOut) {
1736 if (!maybe_init()) {
1737 return false;
1740 PtrKind pk = gConst->PtrKind(aPtr);
1741 if (pk.IsNothing()) {
1742 return false;
1745 bool isGuardPage = false;
1746 if (pk.IsGuardPage()) {
1747 if ((uintptr_t(aPtr) % kPageSize) < (kPageSize / 2)) {
1748 // The address is in the lower half of a guard page, so it's probably an
1749 // overflow. But first check that it is not on the very first guard
1750 // page, in which case it cannot be an overflow, and we ignore it.
1751 if (gConst->IsInFirstGuardPage(aPtr)) {
1752 return false;
1755 // Get the allocation page preceding this guard page.
1756 pk = gConst->PtrKind(static_cast<const uint8_t*>(aPtr) - kPageSize);
1758 } else {
1759 // The address is in the upper half of a guard page, so it's probably an
1760 // underflow. Get the allocation page following this guard page.
1761 pk = gConst->PtrKind(static_cast<const uint8_t*>(aPtr) + kPageSize);
1764 // Make a note of the fact that we hit a guard page.
1765 isGuardPage = true;
1768 // At this point we know we have an allocation page.
1769 uintptr_t index = pk.AllocPageIndex();
1771 if (aOut) {
1772 if (GMut::sMutex.TryLock()) {
1773 gMut->FillAddrInfo(index, aPtr, isGuardPage, *aOut);
1774 LOG("IsPHCAllocation: %zu, %p, %zu, %zu, %zu\n", size_t(aOut->mKind),
1775 aOut->mBaseAddr, aOut->mUsableSize,
1776 aOut->mAllocStack.isSome() ? aOut->mAllocStack->mLength : 0,
1777 aOut->mFreeStack.isSome() ? aOut->mFreeStack->mLength : 0);
1778 GMut::sMutex.Unlock();
1779 } else {
1780 LOG("IsPHCAllocation: PHC is locked\n");
1781 aOut->mPhcWasLocked = true;
1784 return true;
1787 void DisablePHCOnCurrentThread() {
1788 GTls::DisableOnCurrentThread();
1789 LOG("DisablePHCOnCurrentThread: %zu\n", 0ul);
1792 void ReenablePHCOnCurrentThread() {
1793 GTls::EnableOnCurrentThread();
1794 LOG("ReenablePHCOnCurrentThread: %zu\n", 0ul);
1797 bool IsPHCEnabledOnCurrentThread() {
1798 bool enabled = !GTls::IsDisabledOnCurrentThread();
1799 LOG("IsPHCEnabledOnCurrentThread: %zu\n", size_t(enabled));
1800 return enabled;
1803 void PHCMemoryUsage(MemoryUsage& aMemoryUsage) {
1804 if (!maybe_init()) {
1805 aMemoryUsage = MemoryUsage();
1806 return;
1809 aMemoryUsage.mMetadataBytes = metadata_size();
1810 if (gMut) {
1811 MutexAutoLock lock(GMut::sMutex);
1812 aMemoryUsage.mFragmentationBytes = gMut->FragmentationBytes();
1813 } else {
1814 aMemoryUsage.mFragmentationBytes = 0;
1818 void GetPHCStats(PHCStats& aStats) {
1819 if (!maybe_init()) {
1820 aStats = PHCStats();
1821 return;
1824 MutexAutoLock lock(GMut::sMutex);
1826 aStats = gMut->GetPageStats(lock);
1829 // Enable or Disable PHC at runtime. If PHC is disabled it will still trap
1830 // bad uses of previous allocations, but won't track any new allocations.
1831 void SetPHCState(PHCState aState) {
1832 if (!maybe_init()) {
1833 return;
1836 gMut->SetState(aState);
1839 void SetPHCProbabilities(int64_t aAvgDelayFirst, int64_t aAvgDelayNormal,
1840 int64_t aAvgDelayPageReuse) {
1841 if (!maybe_init()) {
1842 return;
1845 gMut->SetProbabilities(aAvgDelayFirst, aAvgDelayNormal, aAvgDelayPageReuse);
1848 } // namespace mozilla::phc