1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 // PHC is a probabilistic heap checker. A tiny fraction of randomly chosen heap
8 // allocations are subject to some expensive checking via the use of OS page
9 // access protection. A failed check triggers a crash, whereupon useful
10 // information about the failure is put into the crash report. The cost and
11 // coverage for each user is minimal, but spread over the entire user base the
12 // coverage becomes significant.
14 // The idea comes from Chromium, where it is called GWP-ASAN. (Firefox uses PHC
15 // as the name because GWP-ASAN is long, awkward, and doesn't have any
16 // particular meaning.)
18 // In the current implementation up to 64 allocations per process can become
19 // PHC allocations. These allocations must be page-sized or smaller. Each PHC
20 // allocation gets its own page, and when the allocation is freed its page is
21 // marked inaccessible until the page is reused for another allocation. This
22 // means that a use-after-free defect (which includes double-frees) will be
23 // caught if the use occurs before the page is reused for another allocation.
24 // The crash report will contain stack traces for the allocation site, the free
25 // site, and the use-after-free site, which is often enough to diagnose the
28 // Also, each PHC allocation is followed by a guard page. The PHC allocation is
29 // positioned so that its end abuts the guard page (or as close as possible,
30 // given alignment constraints). This means that a bounds violation at the end
31 // of the allocation (overflow) will be caught. The crash report will contain
32 // stack traces for the allocation site and the bounds violation use site,
33 // which is often enough to diagnose the defect.
35 // (A bounds violation at the start of the allocation (underflow) will not be
36 // caught, unless it is sufficiently large to hit the preceding allocation's
37 // guard page, which is not that likely. It would be possible to look more
38 // assiduously for underflow by randomly placing some allocations at the end of
39 // the page and some at the start of the page, and GWP-ASAN does this. PHC does
40 // not, however, because overflow is likely to be much more common than
41 // underflow in practice.)
43 // We use a simple heuristic to categorize a guard page access as overflow or
44 // underflow: if the address falls in the lower half of the guard page, we
45 // assume it is overflow, otherwise we assume it is underflow. More
46 // sophisticated heuristics are possible, but this one is very simple, and it is
47 // likely that most overflows/underflows in practice are very close to the page
50 // The design space for the randomization strategy is large. The current
51 // implementation has a large random delay before it starts operating, and a
52 // small random delay between each PHC allocation attempt. Each freed PHC
53 // allocation is quarantined for a medium random delay before being reused, in
54 // order to increase the chance of catching UAFs.
56 // The basic cost of PHC's operation is as follows.
58 // - The physical memory cost is 64 pages plus some metadata (including stack
59 // traces) for each page. This amounts to 256 KiB per process on
60 // architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
63 // - The virtual memory cost is the physical memory cost plus the guard pages:
64 // another 64 pages. This amounts to another 256 KiB per process on
65 // architectures with 4 KiB pages and 1024 KiB on macOS/AArch64 which uses
66 // 16 KiB pages. PHC is currently only enabled on 64-bit platforms so the
67 // impact of the virtual memory usage is negligible.
69 // - Every allocation requires a size check and a decrement-and-check of an
70 // atomic counter. When the counter reaches zero a PHC allocation can occur,
71 // which involves marking a page as accessible and getting a stack trace for
72 // the allocation site. Otherwise, mozjemalloc performs the allocation.
74 // - Every deallocation requires a range check on the pointer to see if it
75 // involves a PHC allocation. (The choice to only do PHC allocations that are
76 // a page or smaller enables this range check, because the 64 pages are
77 // contiguous. Allowing larger allocations would make this more complicated,
78 // and we definitely don't want something as slow as a hash table lookup on
79 // every deallocation.) PHC deallocations involve marking a page as
80 // inaccessible and getting a stack trace for the deallocation site.
82 // Note that calls to realloc(), free(), and malloc_usable_size() will
83 // immediately crash if the given pointer falls within a page allocation's
84 // page, but does not point to the start of the allocation itself.
86 // void* p = malloc(64);
87 // free(p + 1); // p+1 doesn't point to the allocation start; crash
89 // Such crashes will not have the PHC fields in the crash report.
91 // PHC-specific tests can be run with the following commands:
92 // - gtests: `./mach gtest '*PHC*'`
93 // - xpcshell-tests: `./mach test toolkit/crashreporter/test/unit`
94 // - This runs some non-PHC tests as well.
104 # include <process.h>
106 # include <sys/mman.h>
107 # include <sys/types.h>
108 # include <pthread.h>
112 #include "replace_malloc.h"
113 #include "FdPrintf.h"
115 #include "mozilla/Assertions.h"
116 #include "mozilla/Atomics.h"
117 #include "mozilla/Attributes.h"
118 #include "mozilla/CheckedInt.h"
119 #include "mozilla/Maybe.h"
120 #include "mozilla/StackWalk.h"
121 #include "mozilla/ThreadLocal.h"
122 #include "mozilla/XorShift128PlusRNG.h"
124 using namespace mozilla
;
126 //---------------------------------------------------------------------------
128 //---------------------------------------------------------------------------
131 // Android doesn't have pthread_atfork defined in pthread.h.
132 extern "C" MOZ_EXPORT
int pthread_atfork(void (*)(void), void (*)(void),
136 #ifndef DISALLOW_COPY_AND_ASSIGN
137 # define DISALLOW_COPY_AND_ASSIGN(T) \
139 void operator=(const T&)
142 static malloc_table_t sMallocTable
;
144 // This class provides infallible operations for the small number of heap
145 // allocations that PHC does for itself. It would be nice if we could use the
146 // InfallibleAllocPolicy from mozalloc, but PHC cannot use mozalloc.
147 class InfallibleAllocPolicy
{
149 static void AbortOnFailure(const void* aP
) {
151 MOZ_CRASH("PHC failed to allocate");
157 void* p
= sMallocTable
.malloc(sizeof(T
));
163 //---------------------------------------------------------------------------
165 //---------------------------------------------------------------------------
167 // This code is similar to the equivalent code within DMD.
169 class StackTrace
: public phc::StackTrace
{
171 StackTrace() : phc::StackTrace() {}
173 void Clear() { mLength
= 0; }
178 static void StackWalkCallback(uint32_t aFrameNumber
, void* aPc
, void* aSp
,
180 StackTrace
* st
= (StackTrace
*)aClosure
;
181 MOZ_ASSERT(st
->mLength
< kMaxFrames
);
182 st
->mPcs
[st
->mLength
] = aPc
;
184 MOZ_ASSERT(st
->mLength
== aFrameNumber
);
188 // WARNING WARNING WARNING: this function must only be called when GMut::sMutex
189 // is *not* locked, otherwise we might get deadlocks.
191 // How? On Windows, MozStackWalk() can lock a mutex, M, from the shared library
192 // loader. Another thread might call malloc() while holding M locked (when
193 // loading a shared library) and try to lock GMut::sMutex, causing a deadlock.
194 // So GMut::sMutex can't be locked during the call to MozStackWalk(). (For
195 // details, see https://bugzilla.mozilla.org/show_bug.cgi?id=374829#c8. On
196 // Linux, something similar can happen; see bug 824340. So we just disallow it
197 // on all platforms.)
199 // In DMD, to avoid this problem we temporarily unlock the equivalent mutex for
200 // the MozStackWalk() call. But that's grotty, and things are a bit different
201 // here, so we just require that stack traces be obtained before locking
204 // Unfortunately, there is no reliable way at compile-time or run-time to ensure
205 // this pre-condition. Hence this large comment.
207 void StackTrace::Fill() {
210 #if defined(XP_WIN) && defined(_M_IX86)
211 // This avoids MozStackWalk(), which causes unusably slow startup on Win32
212 // when it is called during static initialization (see bug 1241684).
214 // This code is cribbed from the Gecko Profiler, which also uses
215 // FramePointerStackWalk() on Win32: Registers::SyncPopulate() for the
216 // frame pointer, and GetStackTop() for the stack end.
218 RtlCaptureContext(&context
);
219 void** fp
= reinterpret_cast<void**>(context
.Ebp
);
221 PNT_TIB pTib
= reinterpret_cast<PNT_TIB
>(NtCurrentTeb());
222 void* stackEnd
= static_cast<void*>(pTib
->StackBase
);
223 FramePointerStackWalk(StackWalkCallback
, kMaxFrames
, this, fp
, stackEnd
);
224 #elif defined(XP_MACOSX)
225 // This avoids MozStackWalk(), which has become unusably slow on Mac due to
226 // changes in libunwind.
228 // This code is cribbed from the Gecko Profiler, which also uses
229 // FramePointerStackWalk() on Mac: Registers::SyncPopulate() for the frame
230 // pointer, and GetStackTop() for the stack end.
231 # pragma GCC diagnostic push
232 # pragma GCC diagnostic ignored "-Wframe-address"
233 void** fp
= reinterpret_cast<void**>(__builtin_frame_address(1));
234 # pragma GCC diagnostic pop
235 void* stackEnd
= pthread_get_stackaddr_np(pthread_self());
236 FramePointerStackWalk(StackWalkCallback
, kMaxFrames
, this, fp
, stackEnd
);
238 MozStackWalk(StackWalkCallback
, nullptr, kMaxFrames
, this);
242 //---------------------------------------------------------------------------
244 //---------------------------------------------------------------------------
246 // Change this to 1 to enable some PHC logging. Useful for debugging.
247 #define PHC_LOGGING 0
251 static size_t GetPid() { return size_t(getpid()); }
253 static size_t GetTid() {
255 return size_t(GetCurrentThreadId());
257 return size_t(pthread_self());
262 # define LOG_STDERR \
263 reinterpret_cast<intptr_t>(GetStdHandle(STD_ERROR_HANDLE))
265 # define LOG_STDERR 2
267 # define LOG(fmt, ...) \
268 FdPrintf(LOG_STDERR, "PHC[%zu,%zu,~%zu] " fmt, GetPid(), GetTid(), \
269 size_t(GAtomic::Now()), __VA_ARGS__)
273 # define LOG(fmt, ...)
275 #endif // PHC_LOGGING
277 //---------------------------------------------------------------------------
279 //---------------------------------------------------------------------------
281 // Throughout this entire file time is measured as the number of sub-page
282 // allocations performed (by PHC and mozjemalloc combined). `Time` is 64-bit
283 // because we could have more than 2**32 allocations in a long-running session.
284 // `Delay` is 32-bit because the delays used within PHC are always much smaller
286 using Time
= uint64_t; // A moment in time.
287 using Delay
= uint32_t; // A time duration.
289 // PHC only runs if the page size is 4 KiB; anything more is uncommon and would
290 // use too much memory. So we hardwire this size for all platforms but macOS
291 // on ARM processors. For the latter we make an exception because the minimum
292 // page size supported is 16KiB so there's no way to go below that.
293 static const size_t kPageSize
=
294 #if defined(XP_MACOSX) && defined(__aarch64__)
301 // There are two kinds of page.
302 // - Allocation pages, from which allocations are made.
303 // - Guard pages, which are never touched by PHC.
305 // These page kinds are interleaved; each allocation page has a guard page on
307 static const size_t kNumAllocPages
= kPageSize
== 4096 ? 4096 : 1024;
308 static const size_t kNumAllPages
= kNumAllocPages
* 2 + 1;
310 // The total size of the allocation pages and guard pages.
311 static const size_t kAllPagesSize
= kNumAllPages
* kPageSize
;
313 // The junk value used to fill new allocation in debug builds. It's same value
314 // as the one used by mozjemalloc. PHC applies it unconditionally in debug
315 // builds. Unlike mozjemalloc, PHC doesn't consult the MALLOC_OPTIONS
316 // environment variable to possibly change that behaviour.
318 // Also note that, unlike mozjemalloc, PHC doesn't have a poison value for freed
319 // allocations because freed allocations are protected by OS page protection.
321 const uint8_t kAllocJunk
= 0xe4;
325 static const Time kMaxTime
= ~(Time(0));
327 // The average delay before doing any page allocations at the start of a
328 // process. Note that roughly 1 million allocations occur in the main process
329 // while starting the browser. The delay range is 1..kAvgFirstAllocDelay*2.
330 static const Delay kAvgFirstAllocDelay
= 64 * 1024;
332 // The average delay until the next attempted page allocation, once we get past
333 // the first delay. The delay range is 1..kAvgAllocDelay*2.
334 static const Delay kAvgAllocDelay
= 16 * 1024;
336 // The average delay before reusing a freed page. Should be significantly larger
337 // than kAvgAllocDelay, otherwise there's not much point in having it. The delay
338 // range is (kAvgAllocDelay / 2)..(kAvgAllocDelay / 2 * 3). This is different to
339 // the other delay ranges in not having a minimum of 1, because that's such a
340 // short delay that there is a high likelihood of bad stacks in any crash
342 static const Delay kAvgPageReuseDelay
= 256 * 1024;
344 // Truncate aRnd to the range (1 .. AvgDelay*2). If aRnd is random, this
345 // results in an average value of aAvgDelay + 0.5, which is close enough to
346 // aAvgDelay. aAvgDelay must be a power-of-two (otherwise it will crash) for
348 template <Delay AvgDelay
>
349 constexpr Delay
Rnd64ToDelay(uint64_t aRnd
) {
350 static_assert(IsPowerOfTwo(AvgDelay
), "must be a power of two");
352 return aRnd
% (AvgDelay
* 2) + 1;
355 // Maps a pointer to a PHC-specific structure:
357 // - A guard page (it is unspecified which one)
358 // - An allocation page (with an index < kNumAllocPages)
360 // The standard way of handling a PtrKind is to check IsNothing(), and if that
361 // fails, to check IsGuardPage(), and if that fails, to call AllocPage().
364 enum class Tag
: uint8_t {
371 uintptr_t mIndex
; // Only used if mTag == Tag::AllocPage.
374 // Detect what a pointer points to. This constructor must be fast because it
375 // is called for every call to free(), realloc(), malloc_usable_size(), and
376 // jemalloc_ptr_info().
377 PtrKind(const void* aPtr
, const uint8_t* aPagesStart
,
378 const uint8_t* aPagesLimit
) {
379 if (!(aPagesStart
<= aPtr
&& aPtr
< aPagesLimit
)) {
382 uintptr_t offset
= static_cast<const uint8_t*>(aPtr
) - aPagesStart
;
383 uintptr_t allPageIndex
= offset
/ kPageSize
;
384 MOZ_ASSERT(allPageIndex
< kNumAllPages
);
385 if (allPageIndex
& 1) {
386 // Odd-indexed pages are allocation pages.
387 uintptr_t allocPageIndex
= allPageIndex
/ 2;
388 MOZ_ASSERT(allocPageIndex
< kNumAllocPages
);
389 mTag
= Tag::AllocPage
;
390 mIndex
= allocPageIndex
;
392 // Even-numbered pages are guard pages.
393 mTag
= Tag::GuardPage
;
398 bool IsNothing() const { return mTag
== Tag::Nothing
; }
399 bool IsGuardPage() const { return mTag
== Tag::GuardPage
; }
401 // This should only be called after IsNothing() and IsGuardPage() have been
402 // checked and failed.
403 uintptr_t AllocPageIndex() const {
404 MOZ_RELEASE_ASSERT(mTag
== Tag::AllocPage
);
409 // Shared, atomic, mutable global state.
412 static void Init(Delay aFirstDelay
) {
413 sAllocDelay
= aFirstDelay
;
415 LOG("Initial sAllocDelay <- %zu\n", size_t(aFirstDelay
));
418 static Time
Now() { return sNow
; }
420 static void IncrementNow() { sNow
++; }
422 // Decrements the delay and returns the decremented value.
423 static int32_t DecrementDelay() { return --sAllocDelay
; }
425 static void SetAllocDelay(Delay aAllocDelay
) { sAllocDelay
= aAllocDelay
; }
428 // The current time. Relaxed semantics because it's primarily used for
429 // determining if an allocation can be recycled yet and therefore it doesn't
431 static Atomic
<Time
, Relaxed
> sNow
;
433 // Delay until the next attempt at a page allocation. See the comment in
434 // MaybePageAlloc() for an explanation of why it is a signed integer, and why
435 // it uses ReleaseAcquire semantics.
436 static Atomic
<Delay
, ReleaseAcquire
> sAllocDelay
;
439 Atomic
<Time
, Relaxed
> GAtomic::sNow
;
440 Atomic
<Delay
, ReleaseAcquire
> GAtomic::sAllocDelay
;
442 // Shared, immutable global state. Initialized by replace_init() and never
443 // changed after that. replace_init() runs early enough that no synchronization
447 // The bounds of the allocated pages.
448 uint8_t* const mPagesStart
;
449 uint8_t* const mPagesLimit
;
451 // Allocates the allocation pages and the guard pages, contiguously.
452 uint8_t* AllocAllPages() {
453 // Allocate the pages so that they are inaccessible. They are never freed,
454 // because it would happen at process termination when it would be of little
458 VirtualAlloc(nullptr, kAllPagesSize
, MEM_RESERVE
, PAGE_NOACCESS
);
460 mmap(nullptr, kAllPagesSize
, PROT_NONE
, MAP_ANONYMOUS
| MAP_PRIVATE
, -1,
467 return static_cast<uint8_t*>(pages
);
472 : mPagesStart(AllocAllPages()), mPagesLimit(mPagesStart
+ kAllPagesSize
) {
473 LOG("AllocAllPages at %p..%p\n", mPagesStart
, mPagesLimit
);
476 class PtrKind
PtrKind(const void* aPtr
) {
477 class PtrKind
pk(aPtr
, mPagesStart
, mPagesLimit
);
481 bool IsInFirstGuardPage(const void* aPtr
) {
482 return mPagesStart
<= aPtr
&& aPtr
< mPagesStart
+ kPageSize
;
485 // Get the address of the allocation page referred to via an index. Used when
486 // marking the page as accessible/inaccessible.
487 uint8_t* AllocPagePtr(uintptr_t aIndex
) {
488 MOZ_ASSERT(aIndex
< kNumAllocPages
);
489 // Multiply by two and add one to account for allocation pages *and* guard
491 return mPagesStart
+ (2 * aIndex
+ 1) * kPageSize
;
495 static GConst
* gConst
;
497 // On MacOS, the first __thread/thread_local access calls malloc, which leads
498 // to an infinite loop. So we use pthread-based TLS instead, which somehow
499 // doesn't have this problem.
500 #if !defined(XP_DARWIN)
501 # define PHC_THREAD_LOCAL(T) MOZ_THREAD_LOCAL(T)
503 # define PHC_THREAD_LOCAL(T) \
504 detail::ThreadLocal<T, detail::ThreadLocalKeyStorage>
507 // Thread-local state.
509 GTls(const GTls
&) = delete;
511 const GTls
& operator=(const GTls
&) = delete;
513 // When true, PHC does as little as possible.
515 // (a) It does not allocate any new page allocations.
517 // (b) It avoids doing any operations that might call malloc/free/etc., which
518 // would cause re-entry into PHC. (In practice, MozStackWalk() is the
519 // only such operation.) Note that calls to the functions in sMallocTable
522 // For example, replace_malloc() will just fall back to mozjemalloc. However,
523 // operations involving existing allocations are more complex, because those
524 // existing allocations may be page allocations. For example, if
525 // replace_free() is passed a page allocation on a PHC-disabled thread, it
526 // will free the page allocation in the usual way, but it will get a dummy
527 // freeStack in order to avoid calling MozStackWalk(), as per (b) above.
529 // This single disabling mechanism has two distinct uses.
531 // - It's used to prevent re-entry into PHC, which can cause correctness
532 // problems. For example, consider this sequence.
534 // 1. enter replace_free()
535 // 2. which calls PageFree()
536 // 3. which calls MozStackWalk()
537 // 4. which locks a mutex M, and then calls malloc
538 // 5. enter replace_malloc()
539 // 6. which calls MaybePageAlloc()
540 // 7. which calls MozStackWalk()
541 // 8. which (re)locks a mutex M --> deadlock
543 // We avoid this sequence by "disabling" the thread in PageFree() (at step
544 // 2), which causes MaybePageAlloc() to fail, avoiding the call to
545 // MozStackWalk() (at step 7).
547 // In practice, realloc or free of a PHC allocation is unlikely on a thread
548 // that is disabled because of this use: MozStackWalk() will probably only
549 // realloc/free allocations that it allocated itself, but those won't be
550 // page allocations because PHC is disabled before calling MozStackWalk().
552 // (Note that MaybePageAlloc() could safely do a page allocation so long as
553 // it avoided calling MozStackWalk() by getting a dummy allocStack. But it
554 // wouldn't be useful, and it would prevent the second use below.)
556 // - It's used to prevent PHC allocations in some tests that rely on
557 // mozjemalloc's exact allocation behaviour, which PHC does not replicate
558 // exactly. (Note that (b) isn't necessary for this use -- MozStackWalk()
559 // could be safely called -- but it is necessary for the first use above.)
561 static PHC_THREAD_LOCAL(bool) tlsIsDisabled
;
565 if (!tlsIsDisabled
.init()) {
570 static void DisableOnCurrentThread() {
571 MOZ_ASSERT(!GTls::tlsIsDisabled
.get());
572 tlsIsDisabled
.set(true);
575 static void EnableOnCurrentThread() {
576 MOZ_ASSERT(GTls::tlsIsDisabled
.get());
577 tlsIsDisabled
.set(false);
580 static bool IsDisabledOnCurrentThread() { return tlsIsDisabled
.get(); }
583 PHC_THREAD_LOCAL(bool) GTls::tlsIsDisabled
;
585 class AutoDisableOnCurrentThread
{
586 AutoDisableOnCurrentThread(const AutoDisableOnCurrentThread
&) = delete;
588 const AutoDisableOnCurrentThread
& operator=(
589 const AutoDisableOnCurrentThread
&) = delete;
592 explicit AutoDisableOnCurrentThread() { GTls::DisableOnCurrentThread(); }
593 ~AutoDisableOnCurrentThread() { GTls::EnableOnCurrentThread(); }
596 // This type is used as a proof-of-lock token, to make it clear which functions
597 // require sMutex to be locked.
598 using GMutLock
= const MutexAutoLock
&;
600 // Shared, mutable global state. Protected by sMutex; all accessing functions
601 // take a GMutLock as proof that sMutex is held.
603 enum class AllocPageState
{
609 // Metadata for each allocation page.
610 class AllocPageInfo
{
613 : mState(AllocPageState::NeverAllocated
),
620 // The current allocation page state.
621 AllocPageState mState
;
623 // The arena that the allocation is nominally from. This isn't meaningful
624 // within PHC, which has no arenas. But it is necessary for reallocation of
625 // page allocations as normal allocations, such as in this code:
627 // p = moz_arena_malloc(arenaId, 4096);
630 // The realloc is more than one page, and thus too large for PHC to handle.
631 // Therefore, if PHC handles the first allocation, it must ask mozjemalloc
632 // to allocate the 8192 bytes in the correct arena, and to do that, it must
633 // call sMallocTable.moz_arena_malloc with the correct arenaId under the
634 // covers. Therefore it must record that arenaId.
636 // This field is also needed for jemalloc_ptr_info() to work, because it
637 // also returns the arena ID (but only in debug builds).
639 // - NeverAllocated: must be 0.
640 // - InUse | Freed: can be any valid arena ID value.
641 Maybe
<arena_id_t
> mArenaId
;
643 // The starting address of the allocation. Will not be the same as the page
644 // address unless the allocation is a full page.
645 // - NeverAllocated: must be 0.
646 // - InUse | Freed: must be within the allocation page.
649 // Usable size is computed as the number of bytes between the pointer and
650 // the end of the allocation page. This might be bigger than the requested
651 // size, especially if an outsized alignment is requested.
652 size_t UsableSize() const {
653 return mState
== AllocPageState::NeverAllocated
655 : kPageSize
- (reinterpret_cast<uintptr_t>(mBaseAddr
) &
659 // The internal fragmentation for this allocation.
660 size_t FragmentationBytes() const {
661 MOZ_ASSERT(kPageSize
>= UsableSize());
662 return mState
== AllocPageState::InUse
? kPageSize
- UsableSize() : 0;
665 // The allocation stack.
666 // - NeverAllocated: Nothing.
667 // - InUse | Freed: Some.
668 Maybe
<StackTrace
> mAllocStack
;
671 // - NeverAllocated | InUse: Nothing.
673 Maybe
<StackTrace
> mFreeStack
;
675 // The time at which the page is available for reuse, as measured against
676 // GAtomic::sNow. When the page is in use this value will be kMaxTime.
677 // - NeverAllocated: must be 0.
678 // - InUse: must be kMaxTime.
679 // - Freed: must be > 0 and < kMaxTime.
684 // The mutex that protects the other members.
685 static Mutex sMutex MOZ_UNANNOTATED
;
687 GMut() : mRNG(RandomSeed
<0>(), RandomSeed
<1>()), mAllocPages() {
691 uint64_t Random64(GMutLock
) { return mRNG
.next(); }
693 bool IsPageInUse(GMutLock
, uintptr_t aIndex
) {
694 return mAllocPages
[aIndex
].mState
== AllocPageState::InUse
;
697 // Is the page free? And if so, has enough time passed that we can use it?
698 bool IsPageAllocatable(GMutLock
, uintptr_t aIndex
, Time aNow
) {
699 const AllocPageInfo
& page
= mAllocPages
[aIndex
];
700 return page
.mState
!= AllocPageState::InUse
&& aNow
>= page
.mReuseTime
;
703 // Get the address of the allocation page referred to via an index. Used
704 // when checking pointers against page boundaries.
705 uint8_t* AllocPageBaseAddr(GMutLock
, uintptr_t aIndex
) {
706 return mAllocPages
[aIndex
].mBaseAddr
;
709 Maybe
<arena_id_t
> PageArena(GMutLock aLock
, uintptr_t aIndex
) {
710 const AllocPageInfo
& page
= mAllocPages
[aIndex
];
711 AssertAllocPageInUse(aLock
, page
);
713 return page
.mArenaId
;
716 size_t PageUsableSize(GMutLock aLock
, uintptr_t aIndex
) {
717 const AllocPageInfo
& page
= mAllocPages
[aIndex
];
718 AssertAllocPageInUse(aLock
, page
);
720 return page
.UsableSize();
723 // The total fragmentation in PHC
724 size_t FragmentationBytes() const {
726 for (const auto& page
: mAllocPages
) {
727 sum
+= page
.FragmentationBytes();
732 void SetPageInUse(GMutLock aLock
, uintptr_t aIndex
,
733 const Maybe
<arena_id_t
>& aArenaId
, uint8_t* aBaseAddr
,
734 const StackTrace
& aAllocStack
) {
735 AllocPageInfo
& page
= mAllocPages
[aIndex
];
736 AssertAllocPageNotInUse(aLock
, page
);
738 page
.mState
= AllocPageState::InUse
;
739 page
.mArenaId
= aArenaId
;
740 page
.mBaseAddr
= aBaseAddr
;
741 page
.mAllocStack
= Some(aAllocStack
);
742 page
.mFreeStack
= Nothing();
743 page
.mReuseTime
= kMaxTime
;
747 Time
GetFreeTime(uintptr_t aIndex
) const { return mFreeTime
[aIndex
]; }
750 void ResizePageInUse(GMutLock aLock
, uintptr_t aIndex
,
751 const Maybe
<arena_id_t
>& aArenaId
, uint8_t* aNewBaseAddr
,
752 const StackTrace
& aAllocStack
) {
753 AllocPageInfo
& page
= mAllocPages
[aIndex
];
754 AssertAllocPageInUse(aLock
, page
);
756 // page.mState is not changed.
757 if (aArenaId
.isSome()) {
758 // Crash if the arenas don't match.
759 MOZ_RELEASE_ASSERT(page
.mArenaId
== aArenaId
);
761 page
.mBaseAddr
= aNewBaseAddr
;
762 // We could just keep the original alloc stack, but the realloc stack is
763 // more recent and therefore seems more useful.
764 page
.mAllocStack
= Some(aAllocStack
);
765 // page.mFreeStack is not changed.
766 // page.mReuseTime is not changed.
769 void SetPageFreed(GMutLock aLock
, uintptr_t aIndex
,
770 const Maybe
<arena_id_t
>& aArenaId
,
771 const StackTrace
& aFreeStack
, Delay aReuseDelay
) {
772 AllocPageInfo
& page
= mAllocPages
[aIndex
];
773 AssertAllocPageInUse(aLock
, page
);
775 page
.mState
= AllocPageState::Freed
;
777 // page.mArenaId is left unchanged, for jemalloc_ptr_info() calls that
778 // occur after freeing (e.g. in the PtrInfo test in TestJemalloc.cpp).
779 if (aArenaId
.isSome()) {
780 // Crash if the arenas don't match.
781 MOZ_RELEASE_ASSERT(page
.mArenaId
== aArenaId
);
784 // page.musableSize is left unchanged, for reporting on UAF, and for
785 // jemalloc_ptr_info() calls that occur after freeing (e.g. in the PtrInfo
786 // test in TestJemalloc.cpp).
788 // page.mAllocStack is left unchanged, for reporting on UAF.
790 page
.mFreeStack
= Some(aFreeStack
);
791 Time now
= GAtomic::Now();
793 mFreeTime
[aIndex
] = now
;
795 page
.mReuseTime
= now
+ aReuseDelay
;
798 static void CrashOnGuardPage(void* aPtr
) {
799 // An operation on a guard page? This is a bounds violation. Deliberately
800 // touch the page in question, to cause a crash that triggers the usual PHC
802 LOG("CrashOnGuardPage(%p), bounds violation\n", aPtr
);
803 *static_cast<uint8_t*>(aPtr
) = 0;
804 MOZ_CRASH("unreachable");
807 void EnsureValidAndInUse(GMutLock
, void* aPtr
, uintptr_t aIndex
)
808 MOZ_REQUIRES(sMutex
) {
809 const AllocPageInfo
& page
= mAllocPages
[aIndex
];
811 // The pointer must point to the start of the allocation.
812 MOZ_RELEASE_ASSERT(page
.mBaseAddr
== aPtr
);
814 if (page
.mState
== AllocPageState::Freed
) {
815 LOG("EnsureValidAndInUse(%p), use-after-free\n", aPtr
);
816 // An operation on a freed page? This is a particular kind of
817 // use-after-free. Deliberately touch the page in question, in order to
818 // cause a crash that triggers the usual PHC machinery. But unlock sMutex
819 // first, because that self-same PHC machinery needs to re-lock it, and
820 // the crash causes non-local control flow so sMutex won't be unlocked
821 // the normal way in the caller.
823 *static_cast<uint8_t*>(aPtr
) = 0;
824 MOZ_CRASH("unreachable");
828 void FillAddrInfo(GMutLock
, uintptr_t aIndex
, const void* aBaseAddr
,
829 bool isGuardPage
, phc::AddrInfo
& aOut
) {
830 const AllocPageInfo
& page
= mAllocPages
[aIndex
];
832 aOut
.mKind
= phc::AddrInfo::Kind::GuardPage
;
834 switch (page
.mState
) {
835 case AllocPageState::NeverAllocated
:
836 aOut
.mKind
= phc::AddrInfo::Kind::NeverAllocatedPage
;
839 case AllocPageState::InUse
:
840 aOut
.mKind
= phc::AddrInfo::Kind::InUsePage
;
843 case AllocPageState::Freed
:
844 aOut
.mKind
= phc::AddrInfo::Kind::FreedPage
;
851 aOut
.mBaseAddr
= page
.mBaseAddr
;
852 aOut
.mUsableSize
= page
.UsableSize();
853 aOut
.mAllocStack
= page
.mAllocStack
;
854 aOut
.mFreeStack
= page
.mFreeStack
;
857 void FillJemallocPtrInfo(GMutLock
, const void* aPtr
, uintptr_t aIndex
,
858 jemalloc_ptr_info_t
* aInfo
) {
859 const AllocPageInfo
& page
= mAllocPages
[aIndex
];
860 switch (page
.mState
) {
861 case AllocPageState::NeverAllocated
:
864 case AllocPageState::InUse
: {
865 // Only return TagLiveAlloc if the pointer is within the bounds of the
866 // allocation's usable size.
867 uint8_t* base
= page
.mBaseAddr
;
868 uint8_t* limit
= base
+ page
.UsableSize();
869 if (base
<= aPtr
&& aPtr
< limit
) {
870 *aInfo
= {TagLiveAlloc
, page
.mBaseAddr
, page
.UsableSize(),
871 page
.mArenaId
.valueOr(0)};
877 case AllocPageState::Freed
: {
878 // Only return TagFreedAlloc if the pointer is within the bounds of the
879 // former allocation's usable size.
880 uint8_t* base
= page
.mBaseAddr
;
881 uint8_t* limit
= base
+ page
.UsableSize();
882 if (base
<= aPtr
&& aPtr
< limit
) {
883 *aInfo
= {TagFreedAlloc
, page
.mBaseAddr
, page
.UsableSize(),
884 page
.mArenaId
.valueOr(0)};
894 // Pointers into guard pages will end up here, as will pointers into
895 // allocation pages that aren't within the allocation's bounds.
896 *aInfo
= {TagUnknown
, nullptr, 0, 0};
900 static void prefork() MOZ_NO_THREAD_SAFETY_ANALYSIS
{ sMutex
.Lock(); }
901 static void postfork_parent() MOZ_NO_THREAD_SAFETY_ANALYSIS
{
904 static void postfork_child() { sMutex
.Init(); }
908 void IncPageAllocHits(GMutLock
) { mPageAllocHits
++; }
909 void IncPageAllocMisses(GMutLock
) { mPageAllocMisses
++; }
911 void IncPageAllocHits(GMutLock
) {}
912 void IncPageAllocMisses(GMutLock
) {}
917 size_t mNumAlloced
= 0;
918 size_t mNumFreed
= 0;
921 PageStats
GetPageStats(GMutLock
) {
924 for (const auto& page
: mAllocPages
) {
925 stats
.mNumAlloced
+= page
.mState
== AllocPageState::InUse
? 1 : 0;
926 stats
.mNumFreed
+= page
.mState
== AllocPageState::Freed
? 1 : 0;
932 size_t PageAllocHits(GMutLock
) { return mPageAllocHits
; }
933 size_t PageAllocAttempts(GMutLock
) {
934 return mPageAllocHits
+ mPageAllocMisses
;
937 // This is an integer because FdPrintf only supports integer printing.
938 size_t PageAllocHitRate(GMutLock
) {
939 return mPageAllocHits
* 100 / (mPageAllocHits
+ mPageAllocMisses
);
945 uint64_t RandomSeed() {
946 // An older version of this code used RandomUint64() here, but on Mac that
947 // function uses arc4random(), which can allocate, which would cause
948 // re-entry, which would be bad. So we just use time() and a local variable
949 // address. These are mediocre sources of entropy, but good enough for PHC.
950 static_assert(N
== 0 || N
== 1, "must be 0 or 1");
953 time_t t
= time(nullptr);
954 seed
= t
^ (t
<< 32);
956 seed
= uintptr_t(&seed
) ^ (uintptr_t(&seed
) << 32);
961 void AssertAllocPageInUse(GMutLock
, const AllocPageInfo
& aPage
) {
962 MOZ_ASSERT(aPage
.mState
== AllocPageState::InUse
);
963 // There is nothing to assert about aPage.mArenaId.
964 MOZ_ASSERT(aPage
.mBaseAddr
);
965 MOZ_ASSERT(aPage
.UsableSize() > 0);
966 MOZ_ASSERT(aPage
.mAllocStack
.isSome());
967 MOZ_ASSERT(aPage
.mFreeStack
.isNothing());
968 MOZ_ASSERT(aPage
.mReuseTime
== kMaxTime
);
971 void AssertAllocPageNotInUse(GMutLock
, const AllocPageInfo
& aPage
) {
972 // We can assert a lot about `NeverAllocated` pages, but not much about
975 bool isFresh
= aPage
.mState
== AllocPageState::NeverAllocated
;
976 MOZ_ASSERT(isFresh
|| aPage
.mState
== AllocPageState::Freed
);
977 MOZ_ASSERT_IF(isFresh
, aPage
.mArenaId
== Nothing());
978 MOZ_ASSERT(isFresh
== (aPage
.mBaseAddr
== nullptr));
979 MOZ_ASSERT(isFresh
== (aPage
.mAllocStack
.isNothing()));
980 MOZ_ASSERT(isFresh
== (aPage
.mFreeStack
.isNothing()));
981 MOZ_ASSERT(aPage
.mReuseTime
!= kMaxTime
);
985 // RNG for deciding which allocations to treat specially. It doesn't need to
988 // This is a raw pointer for the reason explained in the comment above
989 // GMut's constructor. Don't change it to UniquePtr or anything like that.
990 non_crypto::XorShift128PlusRNG mRNG
;
992 AllocPageInfo mAllocPages
[kNumAllocPages
];
994 Time mFreeTime
[kNumAllocPages
];
996 // How many allocations that could have been page allocs actually were? As
997 // constrained kNumAllocPages. If the hit ratio isn't close to 100% it's
998 // likely that the global constants are poorly chosen.
999 size_t mPageAllocHits
= 0;
1000 size_t mPageAllocMisses
= 0;
1008 //---------------------------------------------------------------------------
1009 // Page allocation operations
1010 //---------------------------------------------------------------------------
1012 // Attempt a page allocation if the time and the size are right. Allocated
1013 // memory is zeroed if aZero is true. On failure, the caller should attempt a
1014 // normal allocation via sMallocTable. Can be called in a context where
1015 // GMut::sMutex is locked.
1016 static void* MaybePageAlloc(const Maybe
<arena_id_t
>& aArenaId
, size_t aReqSize
,
1017 size_t aAlignment
, bool aZero
) {
1018 MOZ_ASSERT(IsPowerOfTwo(aAlignment
));
1020 if (aReqSize
> kPageSize
) {
1024 GAtomic::IncrementNow();
1026 // Decrement the delay. If it's zero, we do a page allocation and reset the
1027 // delay to a random number. Because the assignment to the random number isn't
1028 // atomic w.r.t. the decrement, we might have a sequence like this:
1030 // Thread 1 Thread 2 Thread 3
1031 // -------- -------- --------
1032 // (a) newDelay = --sAllocDelay (-> 0)
1033 // (b) --sAllocDelay (-> -1)
1034 // (c) (newDelay != 0) fails
1035 // (d) --sAllocDelay (-> -2)
1036 // (e) sAllocDelay = new_random_number()
1038 // It's critical that sAllocDelay has ReleaseAcquire semantics, because that
1039 // guarantees that exactly one thread will see sAllocDelay have the value 0.
1040 // (Relaxed semantics wouldn't guarantee that.)
1042 // It's also nice that sAllocDelay is signed, given that we can decrement to
1043 // below zero. (Strictly speaking, an unsigned integer would also work due
1044 // to wrapping, but a signed integer is conceptually cleaner.)
1046 // Finally, note that the decrements that occur between (a) and (e) above are
1047 // effectively ignored, because (e) clobbers them. This shouldn't be a
1048 // problem; it effectively just adds a little more randomness to
1049 // new_random_number(). An early version of this code tried to account for
1050 // these decrements by doing `sAllocDelay += new_random_number()`. However, if
1051 // new_random_value() is small, the number of decrements between (a) and (e)
1052 // can easily exceed it, whereupon sAllocDelay ends up negative after
1053 // `sAllocDelay += new_random_number()`, and the zero-check never succeeds
1054 // again. (At least, not until sAllocDelay wraps around on overflow, which
1055 // would take a very long time indeed.)
1057 int32_t newDelay
= GAtomic::DecrementDelay();
1058 if (newDelay
!= 0) {
1062 if (GTls::IsDisabledOnCurrentThread()) {
1066 // Disable on this thread *before* getting the stack trace.
1067 AutoDisableOnCurrentThread disable
;
1069 // Get the stack trace *before* locking the mutex. If we return nullptr then
1070 // it was a waste, but it's not so frequent, and doing a stack walk while
1071 // the mutex is locked is problematic (see the big comment on
1072 // StackTrace::Fill() for details).
1073 StackTrace allocStack
;
1076 MutexAutoLock
lock(GMut::sMutex
);
1078 Time now
= GAtomic::Now();
1079 Delay newAllocDelay
= Rnd64ToDelay
<kAvgAllocDelay
>(gMut
->Random64(lock
));
1081 // We start at a random page alloc and wrap around, to ensure pages get even
1083 uint8_t* ptr
= nullptr;
1084 uint8_t* pagePtr
= nullptr;
1085 for (uintptr_t n
= 0, i
= size_t(gMut
->Random64(lock
)) % kNumAllocPages
;
1086 n
< kNumAllocPages
; n
++, i
= (i
+ 1) % kNumAllocPages
) {
1087 if (!gMut
->IsPageAllocatable(lock
, i
, now
)) {
1094 pagePtr
= gConst
->AllocPagePtr(i
);
1095 MOZ_ASSERT(pagePtr
);
1098 !!VirtualAlloc(pagePtr
, kPageSize
, MEM_COMMIT
, PAGE_READWRITE
);
1100 mprotect(pagePtr
, kPageSize
, PROT_READ
| PROT_WRITE
) == 0;
1108 size_t usableSize
= sMallocTable
.malloc_good_size(aReqSize
);
1109 MOZ_ASSERT(usableSize
> 0);
1111 // Put the allocation as close to the end of the page as possible,
1112 // allowing for alignment requirements.
1113 ptr
= pagePtr
+ kPageSize
- usableSize
;
1114 if (aAlignment
!= 1) {
1115 ptr
= reinterpret_cast<uint8_t*>(
1116 (reinterpret_cast<uintptr_t>(ptr
) & ~(aAlignment
- 1)));
1120 Time then
= gMut
->GetFreeTime(i
);
1121 lifetime
= then
!= 0 ? now
- then
: 0;
1124 gMut
->SetPageInUse(lock
, i
, aArenaId
, ptr
, allocStack
);
1127 memset(ptr
, 0, usableSize
);
1130 memset(ptr
, kAllocJunk
, usableSize
);
1134 gMut
->IncPageAllocHits(lock
);
1136 GMut::PageStats stats
= gMut
->GetPageStats(lock
);
1138 LOG("PageAlloc(%zu, %zu) -> %p[%zu]/%p (%zu) (z%zu), sAllocDelay <- %zu, "
1139 "fullness %zu/%zu/%zu, hits %zu/%zu (%zu%%), lifetime %zu\n",
1140 aReqSize
, aAlignment
, pagePtr
, i
, ptr
, usableSize
, size_t(aZero
),
1141 size_t(newAllocDelay
), stats
.mNumAlloced
, stats
.mNumFreed
,
1142 kNumAllocPages
, gMut
->PageAllocHits(lock
),
1143 gMut
->PageAllocAttempts(lock
), gMut
->PageAllocHitRate(lock
), lifetime
);
1148 // No pages are available, or VirtualAlloc/mprotect failed.
1149 gMut
->IncPageAllocMisses(lock
);
1151 GMut::PageStats stats
= gMut
->GetPageStats(lock
);
1153 LOG("No PageAlloc(%zu, %zu), sAllocDelay <- %zu, fullness %zu/%zu/%zu, "
1154 "hits %zu/%zu (%zu%%)\n",
1155 aReqSize
, aAlignment
, size_t(newAllocDelay
), stats
.mNumAlloced
,
1156 stats
.mNumFreed
, kNumAllocPages
, gMut
->PageAllocHits(lock
),
1157 gMut
->PageAllocAttempts(lock
), gMut
->PageAllocHitRate(lock
));
1160 // Set the new alloc delay.
1161 GAtomic::SetAllocDelay(newAllocDelay
);
1166 static void FreePage(GMutLock aLock
, uintptr_t aIndex
,
1167 const Maybe
<arena_id_t
>& aArenaId
,
1168 const StackTrace
& aFreeStack
, Delay aReuseDelay
) {
1169 void* pagePtr
= gConst
->AllocPagePtr(aIndex
);
1172 if (!VirtualFree(pagePtr
, kPageSize
, MEM_DECOMMIT
)) {
1173 MOZ_CRASH("VirtualFree failed");
1176 if (mmap(pagePtr
, kPageSize
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
,
1177 -1, 0) == MAP_FAILED
) {
1178 MOZ_CRASH("mmap failed");
1182 gMut
->SetPageFreed(aLock
, aIndex
, aArenaId
, aFreeStack
, aReuseDelay
);
1185 //---------------------------------------------------------------------------
1186 // replace-malloc machinery
1187 //---------------------------------------------------------------------------
1189 // This handles malloc, moz_arena_malloc, and realloc-with-a-nullptr.
1190 MOZ_ALWAYS_INLINE
static void* PageMalloc(const Maybe
<arena_id_t
>& aArenaId
,
1192 void* ptr
= MaybePageAlloc(aArenaId
, aReqSize
, /* aAlignment */ 1,
1195 : (aArenaId
.isSome()
1196 ? sMallocTable
.moz_arena_malloc(*aArenaId
, aReqSize
)
1197 : sMallocTable
.malloc(aReqSize
));
1200 static void* replace_malloc(size_t aReqSize
) {
1201 return PageMalloc(Nothing(), aReqSize
);
1204 static Delay
ReuseDelay(GMutLock aLock
) {
1205 return (kAvgPageReuseDelay
/ 2) +
1206 Rnd64ToDelay
<kAvgPageReuseDelay
/ 2>(gMut
->Random64(aLock
));
1209 // This handles both calloc and moz_arena_calloc.
1210 MOZ_ALWAYS_INLINE
static void* PageCalloc(const Maybe
<arena_id_t
>& aArenaId
,
1211 size_t aNum
, size_t aReqSize
) {
1212 CheckedInt
<size_t> checkedSize
= CheckedInt
<size_t>(aNum
) * aReqSize
;
1213 if (!checkedSize
.isValid()) {
1217 void* ptr
= MaybePageAlloc(aArenaId
, checkedSize
.value(), /* aAlignment */ 1,
1220 : (aArenaId
.isSome()
1221 ? sMallocTable
.moz_arena_calloc(*aArenaId
, aNum
, aReqSize
)
1222 : sMallocTable
.calloc(aNum
, aReqSize
));
1225 static void* replace_calloc(size_t aNum
, size_t aReqSize
) {
1226 return PageCalloc(Nothing(), aNum
, aReqSize
);
1229 // This function handles both realloc and moz_arena_realloc.
1231 // As always, realloc is complicated, and doubly so when there are two
1232 // different kinds of allocations in play. Here are the possible transitions,
1233 // and what we do in practice.
1235 // - normal-to-normal: This is straightforward and obviously necessary.
1237 // - normal-to-page: This is disallowed because it would require getting the
1238 // arenaId of the normal allocation, which isn't possible in non-DEBUG builds
1239 // for security reasons.
1241 // - page-to-page: This is done whenever possible, i.e. whenever the new size
1242 // is less than or equal to 4 KiB. This choice counterbalances the
1243 // disallowing of normal-to-page allocations, in order to avoid biasing
1244 // towards or away from page allocations. It always occurs in-place.
1246 // - page-to-normal: this is done only when necessary, i.e. only when the new
1247 // size is greater than 4 KiB. This choice naturally flows from the
1248 // prior choice on page-to-page transitions.
1250 // In summary: realloc doesn't change the allocation kind unless it must.
1252 MOZ_ALWAYS_INLINE
static void* PageRealloc(const Maybe
<arena_id_t
>& aArenaId
,
1253 void* aOldPtr
, size_t aNewSize
) {
1255 // Null pointer. Treat like malloc(aNewSize).
1256 return PageMalloc(aArenaId
, aNewSize
);
1259 PtrKind pk
= gConst
->PtrKind(aOldPtr
);
1260 if (pk
.IsNothing()) {
1261 // A normal-to-normal transition.
1262 return aArenaId
.isSome()
1263 ? sMallocTable
.moz_arena_realloc(*aArenaId
, aOldPtr
, aNewSize
)
1264 : sMallocTable
.realloc(aOldPtr
, aNewSize
);
1267 if (pk
.IsGuardPage()) {
1268 GMut::CrashOnGuardPage(aOldPtr
);
1271 // At this point we know we have an allocation page.
1272 uintptr_t index
= pk
.AllocPageIndex();
1274 // A page-to-something transition.
1276 // Note that `disable` has no effect unless it is emplaced below.
1277 Maybe
<AutoDisableOnCurrentThread
> disable
;
1278 // Get the stack trace *before* locking the mutex.
1280 if (GTls::IsDisabledOnCurrentThread()) {
1281 // PHC is disabled on this thread. Leave the stack empty.
1283 // Disable on this thread *before* getting the stack trace.
1288 MutexAutoLock
lock(GMut::sMutex
);
1290 // Check for realloc() of a freed block.
1291 gMut
->EnsureValidAndInUse(lock
, aOldPtr
, index
);
1293 if (aNewSize
<= kPageSize
) {
1294 // A page-to-page transition. Just keep using the page allocation. We do
1295 // this even if the thread is disabled, because it doesn't create a new
1296 // page allocation. Note that ResizePageInUse() checks aArenaId.
1298 // Move the bytes with memmove(), because the old allocation and the new
1299 // allocation overlap. Move the usable size rather than the requested size,
1300 // because the user might have used malloc_usable_size() and filled up the
1302 size_t oldUsableSize
= gMut
->PageUsableSize(lock
, index
);
1303 size_t newUsableSize
= sMallocTable
.malloc_good_size(aNewSize
);
1304 uint8_t* pagePtr
= gConst
->AllocPagePtr(index
);
1305 uint8_t* newPtr
= pagePtr
+ kPageSize
- newUsableSize
;
1306 memmove(newPtr
, aOldPtr
, std::min(oldUsableSize
, aNewSize
));
1307 gMut
->ResizePageInUse(lock
, index
, aArenaId
, newPtr
, stack
);
1308 LOG("PageRealloc-Reuse(%p, %zu) -> %p\n", aOldPtr
, aNewSize
, newPtr
);
1312 // A page-to-normal transition (with the new size greater than page-sized).
1313 // (Note that aArenaId is checked below.)
1315 if (aArenaId
.isSome()) {
1316 newPtr
= sMallocTable
.moz_arena_malloc(*aArenaId
, aNewSize
);
1318 Maybe
<arena_id_t
> oldArenaId
= gMut
->PageArena(lock
, index
);
1319 newPtr
= (oldArenaId
.isSome()
1320 ? sMallocTable
.moz_arena_malloc(*oldArenaId
, aNewSize
)
1321 : sMallocTable
.malloc(aNewSize
));
1327 MOZ_ASSERT(aNewSize
> kPageSize
);
1329 Delay reuseDelay
= ReuseDelay(lock
);
1331 // Copy the usable size rather than the requested size, because the user
1332 // might have used malloc_usable_size() and filled up the usable size. Note
1333 // that FreePage() checks aArenaId (via SetPageFreed()).
1334 size_t oldUsableSize
= gMut
->PageUsableSize(lock
, index
);
1335 memcpy(newPtr
, aOldPtr
, std::min(oldUsableSize
, aNewSize
));
1336 FreePage(lock
, index
, aArenaId
, stack
, reuseDelay
);
1337 LOG("PageRealloc-Free(%p[%zu], %zu) -> %p, %zu delay, reuse at ~%zu\n",
1338 aOldPtr
, index
, aNewSize
, newPtr
, size_t(reuseDelay
),
1339 size_t(GAtomic::Now()) + reuseDelay
);
1344 static void* replace_realloc(void* aOldPtr
, size_t aNewSize
) {
1345 return PageRealloc(Nothing(), aOldPtr
, aNewSize
);
1348 // This handles both free and moz_arena_free.
1349 MOZ_ALWAYS_INLINE
static void PageFree(const Maybe
<arena_id_t
>& aArenaId
,
1351 PtrKind pk
= gConst
->PtrKind(aPtr
);
1352 if (pk
.IsNothing()) {
1353 // Not a page allocation.
1354 return aArenaId
.isSome() ? sMallocTable
.moz_arena_free(*aArenaId
, aPtr
)
1355 : sMallocTable
.free(aPtr
);
1358 if (pk
.IsGuardPage()) {
1359 GMut::CrashOnGuardPage(aPtr
);
1362 // At this point we know we have an allocation page.
1363 uintptr_t index
= pk
.AllocPageIndex();
1365 // Note that `disable` has no effect unless it is emplaced below.
1366 Maybe
<AutoDisableOnCurrentThread
> disable
;
1367 // Get the stack trace *before* locking the mutex.
1368 StackTrace freeStack
;
1369 if (GTls::IsDisabledOnCurrentThread()) {
1370 // PHC is disabled on this thread. Leave the stack empty.
1372 // Disable on this thread *before* getting the stack trace.
1377 MutexAutoLock
lock(GMut::sMutex
);
1379 // Check for a double-free.
1380 gMut
->EnsureValidAndInUse(lock
, aPtr
, index
);
1382 // Note that FreePage() checks aArenaId (via SetPageFreed()).
1383 Delay reuseDelay
= ReuseDelay(lock
);
1384 FreePage(lock
, index
, aArenaId
, freeStack
, reuseDelay
);
1387 GMut::PageStats stats
= gMut
->GetPageStats(lock
);
1389 LOG("PageFree(%p[%zu]), %zu delay, reuse at ~%zu, fullness %zu/%zu/%zu\n",
1390 aPtr
, index
, size_t(reuseDelay
), size_t(GAtomic::Now()) + reuseDelay
,
1391 stats
.mNumAlloced
, stats
.mNumFreed
, kNumAllocPages
);
1394 static void replace_free(void* aPtr
) { return PageFree(Nothing(), aPtr
); }
1396 // This handles memalign and moz_arena_memalign.
1397 MOZ_ALWAYS_INLINE
static void* PageMemalign(const Maybe
<arena_id_t
>& aArenaId
,
1400 MOZ_RELEASE_ASSERT(IsPowerOfTwo(aAlignment
));
1402 // PHC can't satisfy an alignment greater than a page size, so fall back to
1403 // mozjemalloc in that case.
1404 void* ptr
= nullptr;
1405 if (aAlignment
<= kPageSize
) {
1406 ptr
= MaybePageAlloc(aArenaId
, aReqSize
, aAlignment
, /* aZero */ false);
1409 : (aArenaId
.isSome()
1410 ? sMallocTable
.moz_arena_memalign(*aArenaId
, aAlignment
,
1412 : sMallocTable
.memalign(aAlignment
, aReqSize
));
1415 static void* replace_memalign(size_t aAlignment
, size_t aReqSize
) {
1416 return PageMemalign(Nothing(), aAlignment
, aReqSize
);
1419 static size_t replace_malloc_usable_size(usable_ptr_t aPtr
) {
1420 PtrKind pk
= gConst
->PtrKind(aPtr
);
1421 if (pk
.IsNothing()) {
1422 // Not a page allocation. Measure it normally.
1423 return sMallocTable
.malloc_usable_size(aPtr
);
1426 if (pk
.IsGuardPage()) {
1427 GMut::CrashOnGuardPage(const_cast<void*>(aPtr
));
1430 // At this point we know aPtr lands within an allocation page, due to the
1431 // math done in the PtrKind constructor. But if aPtr points to memory
1432 // before the base address of the allocation, we return 0.
1433 uintptr_t index
= pk
.AllocPageIndex();
1435 MutexAutoLock
lock(GMut::sMutex
);
1437 void* pageBaseAddr
= gMut
->AllocPageBaseAddr(lock
, index
);
1439 if (MOZ_UNLIKELY(aPtr
< pageBaseAddr
)) {
1443 return gMut
->PageUsableSize(lock
, index
);
1446 static size_t metadata_size() {
1447 return sMallocTable
.malloc_usable_size(gConst
) +
1448 sMallocTable
.malloc_usable_size(gMut
);
1451 void replace_jemalloc_stats(jemalloc_stats_t
* aStats
,
1452 jemalloc_bin_stats_t
* aBinStats
) {
1453 sMallocTable
.jemalloc_stats_internal(aStats
, aBinStats
);
1455 // Add all the pages to `mapped`.
1456 size_t mapped
= kAllPagesSize
;
1457 aStats
->mapped
+= mapped
;
1459 size_t allocated
= 0;
1461 MutexAutoLock
lock(GMut::sMutex
);
1463 // Add usable space of in-use allocations to `allocated`.
1464 for (size_t i
= 0; i
< kNumAllocPages
; i
++) {
1465 if (gMut
->IsPageInUse(lock
, i
)) {
1466 allocated
+= gMut
->PageUsableSize(lock
, i
);
1470 aStats
->allocated
+= allocated
;
1472 // guards is the gap between `allocated` and `mapped`. In some ways this
1473 // almost fits into aStats->wasted since it feels like wasted memory. However
1474 // wasted should only include committed memory and these guard pages are
1475 // uncommitted. Therefore we don't include it anywhere.
1476 // size_t guards = mapped - allocated;
1478 // aStats.page_cache and aStats.bin_unused are left unchanged because PHC
1479 // doesn't have anything corresponding to those.
1481 // The metadata is stored in normal heap allocations, so they're measured by
1482 // mozjemalloc as `allocated`. Move them into `bookkeeping`.
1483 // They're also reported under explicit/heap-overhead/phc/fragmentation in
1485 size_t bookkeeping
= metadata_size();
1486 aStats
->allocated
-= bookkeeping
;
1487 aStats
->bookkeeping
+= bookkeeping
;
1490 void replace_jemalloc_ptr_info(const void* aPtr
, jemalloc_ptr_info_t
* aInfo
) {
1491 // We need to implement this properly, because various code locations do
1492 // things like checking that allocations are in the expected arena.
1493 PtrKind pk
= gConst
->PtrKind(aPtr
);
1494 if (pk
.IsNothing()) {
1495 // Not a page allocation.
1496 return sMallocTable
.jemalloc_ptr_info(aPtr
, aInfo
);
1499 if (pk
.IsGuardPage()) {
1500 // Treat a guard page as unknown because there's no better alternative.
1501 *aInfo
= {TagUnknown
, nullptr, 0, 0};
1505 // At this point we know we have an allocation page.
1506 uintptr_t index
= pk
.AllocPageIndex();
1508 MutexAutoLock
lock(GMut::sMutex
);
1510 gMut
->FillJemallocPtrInfo(lock
, aPtr
, index
, aInfo
);
1512 LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu, %zu}\n", aPtr
, index
,
1513 size_t(aInfo
->tag
), aInfo
->addr
, aInfo
->size
, aInfo
->arenaId
);
1515 LOG("JemallocPtrInfo(%p[%zu]) -> {%zu, %p, %zu}\n", aPtr
, index
,
1516 size_t(aInfo
->tag
), aInfo
->addr
, aInfo
->size
);
1520 arena_id_t
replace_moz_create_arena_with_params(arena_params_t
* aParams
) {
1521 // No need to do anything special here.
1522 return sMallocTable
.moz_create_arena_with_params(aParams
);
1525 void replace_moz_dispose_arena(arena_id_t aArenaId
) {
1526 // No need to do anything special here.
1527 return sMallocTable
.moz_dispose_arena(aArenaId
);
1530 void replace_moz_set_max_dirty_page_modifier(int32_t aModifier
) {
1531 // No need to do anything special here.
1532 return sMallocTable
.moz_set_max_dirty_page_modifier(aModifier
);
1535 void* replace_moz_arena_malloc(arena_id_t aArenaId
, size_t aReqSize
) {
1536 return PageMalloc(Some(aArenaId
), aReqSize
);
1539 void* replace_moz_arena_calloc(arena_id_t aArenaId
, size_t aNum
,
1541 return PageCalloc(Some(aArenaId
), aNum
, aReqSize
);
1544 void* replace_moz_arena_realloc(arena_id_t aArenaId
, void* aOldPtr
,
1546 return PageRealloc(Some(aArenaId
), aOldPtr
, aNewSize
);
1549 void replace_moz_arena_free(arena_id_t aArenaId
, void* aPtr
) {
1550 return PageFree(Some(aArenaId
), aPtr
);
1553 void* replace_moz_arena_memalign(arena_id_t aArenaId
, size_t aAlignment
,
1555 return PageMemalign(Some(aArenaId
), aAlignment
, aReqSize
);
1558 class PHCBridge
: public ReplaceMallocBridge
{
1559 virtual bool IsPHCAllocation(const void* aPtr
, phc::AddrInfo
* aOut
) override
{
1560 PtrKind pk
= gConst
->PtrKind(aPtr
);
1561 if (pk
.IsNothing()) {
1565 bool isGuardPage
= false;
1566 if (pk
.IsGuardPage()) {
1567 if ((uintptr_t(aPtr
) % kPageSize
) < (kPageSize
/ 2)) {
1568 // The address is in the lower half of a guard page, so it's probably an
1569 // overflow. But first check that it is not on the very first guard
1570 // page, in which case it cannot be an overflow, and we ignore it.
1571 if (gConst
->IsInFirstGuardPage(aPtr
)) {
1575 // Get the allocation page preceding this guard page.
1576 pk
= gConst
->PtrKind(static_cast<const uint8_t*>(aPtr
) - kPageSize
);
1579 // The address is in the upper half of a guard page, so it's probably an
1580 // underflow. Get the allocation page following this guard page.
1581 pk
= gConst
->PtrKind(static_cast<const uint8_t*>(aPtr
) + kPageSize
);
1584 // Make a note of the fact that we hit a guard page.
1588 // At this point we know we have an allocation page.
1589 uintptr_t index
= pk
.AllocPageIndex();
1592 MutexAutoLock
lock(GMut::sMutex
);
1593 gMut
->FillAddrInfo(lock
, index
, aPtr
, isGuardPage
, *aOut
);
1594 LOG("IsPHCAllocation: %zu, %p, %zu, %zu, %zu\n", size_t(aOut
->mKind
),
1595 aOut
->mBaseAddr
, aOut
->mUsableSize
,
1596 aOut
->mAllocStack
.isSome() ? aOut
->mAllocStack
->mLength
: 0,
1597 aOut
->mFreeStack
.isSome() ? aOut
->mFreeStack
->mLength
: 0);
1602 virtual void DisablePHCOnCurrentThread() override
{
1603 GTls::DisableOnCurrentThread();
1604 LOG("DisablePHCOnCurrentThread: %zu\n", 0ul);
1607 virtual void ReenablePHCOnCurrentThread() override
{
1608 GTls::EnableOnCurrentThread();
1609 LOG("ReenablePHCOnCurrentThread: %zu\n", 0ul);
1612 virtual bool IsPHCEnabledOnCurrentThread() override
{
1613 bool enabled
= !GTls::IsDisabledOnCurrentThread();
1614 LOG("IsPHCEnabledOnCurrentThread: %zu\n", size_t(enabled
));
1618 virtual void PHCMemoryUsage(
1619 mozilla::phc::MemoryUsage
& aMemoryUsage
) override
{
1620 aMemoryUsage
.mMetadataBytes
= metadata_size();
1622 MutexAutoLock
lock(GMut::sMutex
);
1623 aMemoryUsage
.mFragmentationBytes
= gMut
->FragmentationBytes();
1625 aMemoryUsage
.mFragmentationBytes
= 0;
1630 // WARNING: this function runs *very* early -- before all static initializers
1631 // have run. For this reason, non-scalar globals (gConst, gMut) are allocated
1632 // dynamically (so we can guarantee their construction in this function) rather
1633 // than statically. GAtomic and GTls contain simple static data that doesn't
1634 // involve static initializers so they don't need to be allocated dynamically.
1635 void replace_init(malloc_table_t
* aMallocTable
, ReplaceMallocBridge
** aBridge
) {
1636 // Don't run PHC if the page size isn't 4 KiB.
1637 jemalloc_stats_t stats
;
1638 aMallocTable
->jemalloc_stats_internal(&stats
, nullptr);
1639 if (stats
.page_size
!= kPageSize
) {
1643 sMallocTable
= *aMallocTable
;
1645 // The choices of which functions to replace are complex enough that we set
1646 // them individually instead of using MALLOC_FUNCS/malloc_decls.h.
1648 aMallocTable
->malloc
= replace_malloc
;
1649 aMallocTable
->calloc
= replace_calloc
;
1650 aMallocTable
->realloc
= replace_realloc
;
1651 aMallocTable
->free
= replace_free
;
1652 aMallocTable
->memalign
= replace_memalign
;
1654 // posix_memalign, aligned_alloc & valloc: unset, which means they fall back
1655 // to replace_memalign.
1656 aMallocTable
->malloc_usable_size
= replace_malloc_usable_size
;
1657 // default malloc_good_size: the default suffices.
1659 aMallocTable
->jemalloc_stats_internal
= replace_jemalloc_stats
;
1660 // jemalloc_purge_freed_pages: the default suffices.
1661 // jemalloc_free_dirty_pages: the default suffices.
1662 // jemalloc_thread_local_arena: the default suffices.
1663 aMallocTable
->jemalloc_ptr_info
= replace_jemalloc_ptr_info
;
1665 aMallocTable
->moz_create_arena_with_params
=
1666 replace_moz_create_arena_with_params
;
1667 aMallocTable
->moz_dispose_arena
= replace_moz_dispose_arena
;
1668 aMallocTable
->moz_arena_malloc
= replace_moz_arena_malloc
;
1669 aMallocTable
->moz_arena_calloc
= replace_moz_arena_calloc
;
1670 aMallocTable
->moz_arena_realloc
= replace_moz_arena_realloc
;
1671 aMallocTable
->moz_arena_free
= replace_moz_arena_free
;
1672 aMallocTable
->moz_arena_memalign
= replace_moz_arena_memalign
;
1674 static PHCBridge bridge
;
1678 // Avoid deadlocks when forking by acquiring our state lock prior to forking
1679 // and releasing it after forking. See |LogAlloc|'s |replace_init| for
1680 // in-depth details.
1682 // Note: This must run after attempting an allocation so as to give the
1683 // system malloc a chance to insert its own atfork handler.
1684 sMallocTable
.malloc(-1);
1685 pthread_atfork(GMut::prefork
, GMut::postfork_parent
, GMut::postfork_child
);
1688 // gConst and gMut are never freed. They live for the life of the process.
1689 gConst
= InfallibleAllocPolicy::new_
<GConst
>();
1691 gMut
= InfallibleAllocPolicy::new_
<GMut
>();
1693 MutexAutoLock
lock(GMut::sMutex
);
1694 Delay firstAllocDelay
=
1695 Rnd64ToDelay
<kAvgFirstAllocDelay
>(gMut
->Random64(lock
));
1696 GAtomic::Init(firstAllocDelay
);