1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 // Portions of this file were originally under the following license:
9 // Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
10 // All rights reserved.
11 // Copyright (C) 2007-2017 Mozilla Foundation.
13 // Redistribution and use in source and binary forms, with or without
14 // modification, are permitted provided that the following conditions
16 // 1. Redistributions of source code must retain the above copyright
17 // notice(s), this list of conditions and the following disclaimer as
18 // the first lines of this file unmodified other than the possible
19 // addition of one or more copyright notices.
20 // 2. Redistributions in binary form must reproduce the above copyright
21 // notice(s), this list of conditions and the following disclaimer in
22 // the documentation and/or other materials provided with the
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
29 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
33 // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
34 // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 // *****************************************************************************
39 // This allocator implementation is designed to provide scalable performance
40 // for multi-threaded programs on multi-processor systems. The following
41 // features are included for this purpose:
43 // + Multiple arenas are used if there are multiple CPUs, which reduces lock
44 // contention and cache sloshing.
46 // + Cache line sharing between arenas is avoided for internal data
49 // + Memory is managed in chunks and runs (chunks can be split into runs),
50 // rather than as individual pages. This provides a constant-time
51 // mechanism for associating allocations with particular arenas.
53 // Allocation requests are rounded up to the nearest size class, and no record
54 // of the original request size is maintained. Allocations are broken into
55 // categories according to size class. Assuming runtime defaults, the size
56 // classes in each category are as follows (for x86, x86_64 and Apple Silicon):
58 // |=========================================================|
59 // | Category | Subcategory | x86 | x86_64 | Mac ARM |
60 // |---------------------------+---------+---------+---------|
61 // | Word size | 32 bit | 64 bit | 64 bit |
62 // | Page size | 4 Kb | 4 Kb | 16 Kb |
63 // |=========================================================|
64 // | Small | Tiny | 4/-w | -w | - |
65 // | | | 8 | 8/-w | 8 |
66 // | |----------------+---------|---------|---------|
67 // | | Quantum-spaced | 16 | 16 | 16 |
68 // | | | 32 | 32 | 32 |
69 // | | | 48 | 48 | 48 |
70 // | | | ... | ... | ... |
71 // | | | 480 | 480 | 480 |
72 // | | | 496 | 496 | 496 |
73 // | |----------------+---------|---------|---------|
74 // | | Quantum-wide- | 512 | 512 | 512 |
75 // | | spaced | 768 | 768 | 768 |
76 // | | | ... | ... | ... |
77 // | | | 3584 | 3584 | 3584 |
78 // | | | 3840 | 3840 | 3840 |
79 // | |----------------+---------|---------|---------|
80 // | | Sub-page | - | - | 4096 |
81 // | | | - | - | 8 kB |
82 // |=========================================================|
83 // | Large | 4 kB | 4 kB | - |
84 // | | 8 kB | 8 kB | - |
85 // | | 12 kB | 12 kB | - |
86 // | | 16 kB | 16 kB | 16 kB |
87 // | | ... | ... | - |
88 // | | 32 kB | 32 kB | 32 kB |
89 // | | ... | ... | ... |
90 // | | 1008 kB | 1008 kB | 1008 kB |
91 // | | 1012 kB | 1012 kB | - |
92 // | | 1016 kB | 1016 kB | - |
93 // | | 1020 kB | 1020 kB | - |
94 // |=========================================================|
95 // | Huge | 1 MB | 1 MB | 1 MB |
96 // | | 2 MB | 2 MB | 2 MB |
97 // | | 3 MB | 3 MB | 3 MB |
98 // | | ... | ... | ... |
99 // |=========================================================|
102 // n: Size class exists for this platform.
103 // n/-w: This size class doesn't exist on Windows (see kMinTinyClass).
104 // -: This size class doesn't exist for this platform.
105 // ...: Size classes follow a pattern here.
107 // NOTE: Due to Mozilla bug 691003, we cannot reserve less than one word for an
108 // allocation on Linux or Mac. So on 32-bit *nix, the smallest bucket size is
109 // 4 bytes, and on 64-bit, the smallest bucket size is 8 bytes.
111 // A different mechanism is used for each category:
113 // Small : Each size class is segregated into its own set of runs. Each run
114 // maintains a bitmap of which regions are free/allocated.
116 // Large : Each allocation is backed by a dedicated run. Metadata are stored
117 // in the associated arena chunk header maps.
119 // Huge : Each allocation is backed by a dedicated contiguous set of chunks.
120 // Metadata are stored in a separate red-black tree.
122 // *****************************************************************************
124 #include "mozmemory_wrap.h"
125 #include "mozjemalloc.h"
126 #include "mozjemalloc_types.h"
131 #include <type_traits>
134 # include <windows.h>
136 # include <sys/mman.h>
140 # include <libkern/OSAtomic.h>
141 # include <mach/mach_init.h>
142 # include <mach/vm_map.h>
145 #include "mozilla/Atomics.h"
146 #include "mozilla/Alignment.h"
147 #include "mozilla/ArrayUtils.h"
148 #include "mozilla/Assertions.h"
149 #include "mozilla/CheckedInt.h"
150 #include "mozilla/DoublyLinkedList.h"
151 #include "mozilla/HelperMacros.h"
152 #include "mozilla/Likely.h"
153 #include "mozilla/Literals.h"
154 #include "mozilla/MathAlgorithms.h"
155 #include "mozilla/RandomNum.h"
156 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
157 // instead of the one defined here; use only MozTagAnonymousMemory().
158 #include "mozilla/TaggedAnonymousMemory.h"
159 #include "mozilla/ThreadLocal.h"
160 #include "mozilla/UniquePtr.h"
161 #include "mozilla/Unused.h"
162 #include "mozilla/XorShift128PlusRNG.h"
163 #include "mozilla/fallible.h"
170 # include "mozmemory_utils.h"
173 // For GetGeckoProcessType(), when it's used.
174 #if defined(XP_WIN) && !defined(JS_STANDALONE)
175 # include "mozilla/ProcessType.h"
178 using namespace mozilla
;
180 // On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
181 // operating system. If we release 1MB of live pages with MADV_DONTNEED, our
182 // RSS will decrease by 1MB (almost) immediately.
184 // On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
185 // on Mac doesn't cause the OS to release the specified pages immediately; the
186 // OS keeps them in our process until the machine comes under memory pressure.
188 // It's therefore difficult to measure the process's RSS on Mac, since, in the
189 // absence of memory pressure, the contribution from the heap to RSS will not
190 // decrease due to our madvise calls.
192 // We therefore define MALLOC_DOUBLE_PURGE on Mac. This causes jemalloc to
193 // track which pages have been MADV_FREE'd. You can then call
194 // jemalloc_purge_freed_pages(), which will force the OS to release those
195 // MADV_FREE'd pages, making the process's RSS reflect its true memory usage.
197 // The jemalloc_purge_freed_pages definition in memory/build/mozmemory.h needs
198 // to be adjusted if MALLOC_DOUBLE_PURGE is ever enabled on Linux.
201 # define MALLOC_DOUBLE_PURGE
205 # define MALLOC_DECOMMIT
208 // Define MALLOC_RUNTIME_CONFIG depending on MOZ_DEBUG. Overriding this as
209 // a build option allows us to build mozjemalloc/firefox without runtime asserts
210 // but with runtime configuration. Making some testing easier.
213 # define MALLOC_RUNTIME_CONFIG
216 // When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
217 // compile-time for better performance, as opposed to determined at
218 // runtime. Some platforms can have different page sizes at runtime
219 // depending on kernel configuration, so they are opted out by default.
220 // Debug builds are opted out too, for test coverage.
221 #ifndef MALLOC_RUNTIME_CONFIG
222 # if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && \
223 !defined(__aarch64__) && !defined(__powerpc__) && !defined(XP_MACOSX) && \
224 !defined(__loongarch__)
225 # define MALLOC_STATIC_PAGESIZE 1
230 # define STDERR_FILENO 2
232 // Implement getenv without using malloc.
233 static char mozillaMallocOptionsBuf
[64];
235 # define getenv xgetenv
236 static char* getenv(const char* name
) {
237 if (GetEnvironmentVariableA(name
, mozillaMallocOptionsBuf
,
238 sizeof(mozillaMallocOptionsBuf
)) > 0) {
239 return mozillaMallocOptionsBuf
;
247 // Newer Linux systems support MADV_FREE, but we're not supporting
248 // that properly. bug #1406304.
249 # if defined(XP_LINUX) && defined(MADV_FREE)
253 # define MADV_FREE MADV_DONTNEED
257 // Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
258 // happen to override mmap() and call dlsym() from their overridden
259 // mmap(). The problem is that dlsym() calls malloc(), and this ends
260 // up in a dead lock in jemalloc.
261 // On these systems, we prefer to directly use the system call.
262 // We do that for Linux systems and kfreebsd with GNU userland.
263 // Note sanity checks are not done (alignment of offset, ...) because
264 // the uses of mmap are pretty limited, in jemalloc.
266 // On Alpha, glibc has a bug that prevents syscall() to work for system
267 // calls with 6 arguments.
268 #if (defined(XP_LINUX) && !defined(__alpha__)) || \
269 (defined(__FreeBSD_kernel__) && defined(__GLIBC__))
270 # include <sys/syscall.h>
271 # if defined(SYS_mmap) || defined(SYS_mmap2)
272 static inline void* _mmap(void* addr
, size_t length
, int prot
, int flags
,
273 int fd
, off_t offset
) {
274 // S390 only passes one argument to the mmap system call, which is a
275 // pointer to a structure containing the arguments.
284 } args
= {addr
, length
, prot
, flags
, fd
, offset
};
285 return (void*)syscall(SYS_mmap
, &args
);
287 # if defined(ANDROID) && defined(__aarch64__) && defined(SYS_mmap2)
288 // Android NDK defines SYS_mmap2 for AArch64 despite it not supporting mmap2.
292 return (void*)syscall(SYS_mmap2
, addr
, length
, prot
, flags
, fd
, offset
>> 12);
294 return (void*)syscall(SYS_mmap
, addr
, length
, prot
, flags
, fd
, offset
);
299 # define munmap(a, l) syscall(SYS_munmap, a, l)
303 // ***************************************************************************
304 // Structures for chunk headers for chunks used for non-huge allocations.
308 // Each element of the chunk map corresponds to one page within the chunk.
309 struct arena_chunk_map_t
{
310 // Linkage for run trees. There are two disjoint uses:
312 // 1) arena_t's tree or available runs.
313 // 2) arena_run_t conceptually uses this linkage for in-use non-full
314 // runs, rather than directly embedding linkage.
315 RedBlackTreeNode
<arena_chunk_map_t
> link
;
317 // Run address (or size) and various flags are stored together. The bit
318 // layout looks like (assuming 32-bit system):
320 // ???????? ???????? ????---- -mckdzla
322 // ? : Unallocated: Run address for first/last pages, unset for internal
324 // Small: Run address.
325 // Large: Run size for first page, unset for trailing pages.
327 // m : MADV_FREE/MADV_DONTNEED'ed?
335 // Following are example bit patterns for the three types of runs.
344 // ssssssss ssssssss ssss---- --c-----
345 // xxxxxxxx xxxxxxxx xxxx---- ----d---
346 // ssssssss ssssssss ssss---- -----z--
349 // rrrrrrrr rrrrrrrr rrrr---- -------a
350 // rrrrrrrr rrrrrrrr rrrr---- -------a
351 // rrrrrrrr rrrrrrrr rrrr---- -------a
354 // ssssssss ssssssss ssss---- ------la
355 // -------- -------- -------- ------la
356 // -------- -------- -------- ------la
359 // Note that CHUNK_MAP_DECOMMITTED's meaning varies depending on whether
360 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are defined.
362 // If MALLOC_DECOMMIT is defined, a page which is CHUNK_MAP_DECOMMITTED must be
363 // re-committed with pages_commit() before it may be touched. If
364 // MALLOC_DECOMMIT is defined, MALLOC_DOUBLE_PURGE may not be defined.
366 // If neither MALLOC_DECOMMIT nor MALLOC_DOUBLE_PURGE is defined, pages which
367 // are madvised (with either MADV_DONTNEED or MADV_FREE) are marked with
368 // CHUNK_MAP_MADVISED.
370 // Otherwise, if MALLOC_DECOMMIT is not defined and MALLOC_DOUBLE_PURGE is
371 // defined, then a page which is madvised is marked as CHUNK_MAP_MADVISED.
372 // When it's finally freed with jemalloc_purge_freed_pages, the page is marked
373 // as CHUNK_MAP_DECOMMITTED.
374 #define CHUNK_MAP_MADVISED ((size_t)0x40U)
375 #define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
376 #define CHUNK_MAP_MADVISED_OR_DECOMMITTED \
377 (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
378 #define CHUNK_MAP_KEY ((size_t)0x10U)
379 #define CHUNK_MAP_DIRTY ((size_t)0x08U)
380 #define CHUNK_MAP_ZEROED ((size_t)0x04U)
381 #define CHUNK_MAP_LARGE ((size_t)0x02U)
382 #define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
385 // Arena chunk header.
386 struct arena_chunk_t
{
387 // Arena that owns the chunk.
390 // Linkage for the arena's tree of dirty chunks.
391 RedBlackTreeNode
<arena_chunk_t
> link_dirty
;
393 #ifdef MALLOC_DOUBLE_PURGE
394 // If we're double-purging, we maintain a linked list of chunks which
395 // have pages which have been madvise(MADV_FREE)'d but not explicitly
398 // We're currently lazy and don't remove a chunk from this list when
399 // all its madvised pages are recommitted.
400 DoublyLinkedListElement
<arena_chunk_t
> chunks_madvised_elem
;
403 // Number of dirty pages.
406 // Map of pages within chunk that keeps track of free/large/small.
407 arena_chunk_map_t map
[1]; // Dynamically sized.
410 // ***************************************************************************
411 // Constants defining allocator size classes and behavior.
413 // Maximum size of L1 cache line. This is used to avoid cache line aliasing,
414 // so over-estimates are okay (up to a point), but under-estimates will
415 // negatively affect performance.
416 static const size_t kCacheLineSize
= 64;
418 // Our size classes are inclusive ranges of memory sizes. By describing the
419 // minimums and how memory is allocated in each range the maximums can be
422 // Smallest size class to support. On Windows the smallest allocation size
423 // must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
424 // malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
426 static const size_t kMinTinyClass
= sizeof(void*) * 2;
428 static const size_t kMinTinyClass
= sizeof(void*);
431 // Maximum tiny size class.
432 static const size_t kMaxTinyClass
= 8;
434 // Smallest quantum-spaced size classes. It could actually also be labelled a
435 // tiny allocation, and is spaced as such from the largest tiny size class.
436 // Tiny classes being powers of 2, this is twice as large as the largest of
438 static const size_t kMinQuantumClass
= kMaxTinyClass
* 2;
439 static const size_t kMinQuantumWideClass
= 512;
440 static const size_t kMinSubPageClass
= 4_KiB
;
442 // Amount (quantum) separating quantum-spaced size classes.
443 static const size_t kQuantum
= 16;
444 static const size_t kQuantumMask
= kQuantum
- 1;
445 static const size_t kQuantumWide
= 256;
446 static const size_t kQuantumWideMask
= kQuantumWide
- 1;
448 static const size_t kMaxQuantumClass
= kMinQuantumWideClass
- kQuantum
;
449 static const size_t kMaxQuantumWideClass
= kMinSubPageClass
- kQuantumWide
;
451 // We can optimise some divisions to shifts if these are powers of two.
452 static_assert(mozilla::IsPowerOfTwo(kQuantum
),
453 "kQuantum is not a power of two");
454 static_assert(mozilla::IsPowerOfTwo(kQuantumWide
),
455 "kQuantumWide is not a power of two");
457 static_assert(kMaxQuantumClass
% kQuantum
== 0,
458 "kMaxQuantumClass is not a multiple of kQuantum");
459 static_assert(kMaxQuantumWideClass
% kQuantumWide
== 0,
460 "kMaxQuantumWideClass is not a multiple of kQuantumWide");
461 static_assert(kQuantum
< kQuantumWide
,
462 "kQuantum must be smaller than kQuantumWide");
463 static_assert(mozilla::IsPowerOfTwo(kMinSubPageClass
),
464 "kMinSubPageClass is not a power of two");
466 // Number of (2^n)-spaced tiny classes.
467 static const size_t kNumTinyClasses
=
468 LOG2(kMaxTinyClass
) - LOG2(kMinTinyClass
) + 1;
470 // Number of quantum-spaced classes. We add kQuantum(Max) before subtracting to
471 // avoid underflow when a class is empty (Max<Min).
472 static const size_t kNumQuantumClasses
=
473 (kMaxQuantumClass
+ kQuantum
- kMinQuantumClass
) / kQuantum
;
474 static const size_t kNumQuantumWideClasses
=
475 (kMaxQuantumWideClass
+ kQuantumWide
- kMinQuantumWideClass
) / kQuantumWide
;
477 // Size and alignment of memory chunks that are allocated by the OS's virtual
479 static const size_t kChunkSize
= 1_MiB
;
480 static const size_t kChunkSizeMask
= kChunkSize
- 1;
482 #ifdef MALLOC_STATIC_PAGESIZE
483 // VM page size. It must divide the runtime CPU page size or the code
485 // Platform specific page size conditions copied from js/public/HeapAPI.h
486 # if defined(__powerpc64__)
487 static const size_t gPageSize
= 64_KiB
;
488 # elif defined(__loongarch64)
489 static const size_t gPageSize
= 16_KiB
;
491 static const size_t gPageSize
= 4_KiB
;
493 static const size_t gRealPageSize
= gPageSize
;
496 // When MALLOC_OPTIONS contains one or several `P`s, the page size used
497 // across the allocator is multiplied by 2 for each `P`, but we also keep
498 // the real page size for code paths that need it. gPageSize is thus a
499 // power of two greater or equal to gRealPageSize.
500 static size_t gRealPageSize
;
501 static size_t gPageSize
;
504 #ifdef MALLOC_STATIC_PAGESIZE
505 # define DECLARE_GLOBAL(type, name)
506 # define DEFINE_GLOBALS
508 # define DEFINE_GLOBAL(type) static const type
509 # define GLOBAL_LOG2 LOG2
510 # define GLOBAL_ASSERT_HELPER1(x) static_assert(x, #x)
511 # define GLOBAL_ASSERT_HELPER2(x, y) static_assert(x, y)
512 # define GLOBAL_ASSERT(...) \
514 MOZ_PASTE_PREFIX_AND_ARG_COUNT(GLOBAL_ASSERT_HELPER, __VA_ARGS__), \
516 # define GLOBAL_CONSTEXPR constexpr
518 # define DECLARE_GLOBAL(type, name) static type name;
519 # define DEFINE_GLOBALS static void DefineGlobals() {
520 # define END_GLOBALS }
521 # define DEFINE_GLOBAL(type)
522 # define GLOBAL_LOG2 FloorLog2
523 # define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
524 # define GLOBAL_CONSTEXPR
527 DECLARE_GLOBAL(size_t, gMaxSubPageClass
)
528 DECLARE_GLOBAL(uint8_t, gNumSubPageClasses
)
529 DECLARE_GLOBAL(uint8_t, gPageSize2Pow
)
530 DECLARE_GLOBAL(size_t, gPageSizeMask
)
531 DECLARE_GLOBAL(size_t, gChunkNumPages
)
532 DECLARE_GLOBAL(size_t, gChunkHeaderNumPages
)
533 DECLARE_GLOBAL(size_t, gMaxLargeClass
)
537 // Largest sub-page size class, or zero if there are none
538 DEFINE_GLOBAL(size_t)
539 gMaxSubPageClass
= gPageSize
/ 2 >= kMinSubPageClass
? gPageSize
/ 2 : 0;
541 // Max size class for bins.
542 #define gMaxBinClass \
543 (gMaxSubPageClass ? gMaxSubPageClass : kMaxQuantumWideClass)
545 // Number of sub-page bins.
546 DEFINE_GLOBAL(uint8_t)
547 gNumSubPageClasses
= []() GLOBAL_CONSTEXPR
-> uint8_t {
548 if GLOBAL_CONSTEXPR (gMaxSubPageClass
!= 0) {
549 return FloorLog2(gMaxSubPageClass
) - LOG2(kMinSubPageClass
) + 1;
554 DEFINE_GLOBAL(uint8_t) gPageSize2Pow
= GLOBAL_LOG2(gPageSize
);
555 DEFINE_GLOBAL(size_t) gPageSizeMask
= gPageSize
- 1;
557 // Number of pages in a chunk.
558 DEFINE_GLOBAL(size_t) gChunkNumPages
= kChunkSize
>> gPageSize2Pow
;
560 // Number of pages necessary for a chunk header plus a guard page.
561 DEFINE_GLOBAL(size_t)
562 gChunkHeaderNumPages
=
563 1 + (((sizeof(arena_chunk_t
) +
564 sizeof(arena_chunk_map_t
) * (gChunkNumPages
- 1) + gPageSizeMask
) &
568 // One chunk, minus the header, minus a guard page
569 DEFINE_GLOBAL(size_t)
571 kChunkSize
- gPageSize
- (gChunkHeaderNumPages
<< gPageSize2Pow
);
573 // Various sanity checks that regard configuration.
574 GLOBAL_ASSERT(1ULL << gPageSize2Pow
== gPageSize
,
575 "Page size is not a power of two");
576 GLOBAL_ASSERT(kQuantum
>= sizeof(void*));
577 GLOBAL_ASSERT(kQuantum
<= kQuantumWide
);
578 GLOBAL_ASSERT(!kNumQuantumWideClasses
||
579 kQuantumWide
<= (kMinSubPageClass
- kMaxQuantumClass
));
581 GLOBAL_ASSERT(kQuantumWide
<= kMaxQuantumClass
);
583 GLOBAL_ASSERT(gMaxSubPageClass
>= kMinSubPageClass
|| gMaxSubPageClass
== 0);
584 GLOBAL_ASSERT(gMaxLargeClass
>= gMaxSubPageClass
);
585 GLOBAL_ASSERT(kChunkSize
>= gPageSize
);
586 GLOBAL_ASSERT(kQuantum
* 4 <= kChunkSize
);
590 // Recycle at most 128 MiB of chunks. This means we retain at most
591 // 6.25% of the process address space on a 32-bit OS for later use.
592 static const size_t gRecycleLimit
= 128_MiB
;
594 // The current amount of recycled bytes, updated atomically.
595 static Atomic
<size_t, ReleaseAcquire
> gRecycledSize
;
597 // Maximum number of dirty pages per arena.
598 #define DIRTY_MAX_DEFAULT (1U << 8)
600 static size_t opt_dirty_max
= DIRTY_MAX_DEFAULT
;
602 // Return the smallest chunk multiple that is >= s.
603 #define CHUNK_CEILING(s) (((s) + kChunkSizeMask) & ~kChunkSizeMask)
605 // Return the smallest cacheline multiple that is >= s.
606 #define CACHELINE_CEILING(s) \
607 (((s) + (kCacheLineSize - 1)) & ~(kCacheLineSize - 1))
609 // Return the smallest quantum multiple that is >= a.
610 #define QUANTUM_CEILING(a) (((a) + (kQuantumMask)) & ~(kQuantumMask))
611 #define QUANTUM_WIDE_CEILING(a) \
612 (((a) + (kQuantumWideMask)) & ~(kQuantumWideMask))
614 // Return the smallest sub page-size that is >= a.
615 #define SUBPAGE_CEILING(a) (RoundUpPow2(a))
617 // Return the smallest pagesize multiple that is >= s.
618 #define PAGE_CEILING(s) (((s) + gPageSizeMask) & ~gPageSizeMask)
620 // Number of all the small-allocated classes
621 #define NUM_SMALL_CLASSES \
622 (kNumTinyClasses + kNumQuantumClasses + kNumQuantumWideClasses + \
625 // ***************************************************************************
626 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
627 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
628 # error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
631 static void* base_alloc(size_t aSize
);
633 // Set to true once the allocator has been initialized.
634 #if defined(_MSC_VER) && !defined(__clang__)
635 // MSVC may create a static initializer for an Atomic<bool>, which may actually
636 // run after `malloc_init` has been called once, which triggers multiple
638 // We work around the problem by not using an Atomic<bool> at all. There is a
639 // theoretical problem with using `malloc_initialized` non-atomically, but
640 // practically, this is only true if `malloc_init` is never called before
641 // threads are created.
642 static bool malloc_initialized
;
644 static Atomic
<bool, MemoryOrdering::ReleaseAcquire
> malloc_initialized
;
647 static StaticMutex gInitLock MOZ_UNANNOTATED
= {STATIC_MUTEX_INIT
};
649 // ***************************************************************************
650 // Statistics data structures.
652 struct arena_stats_t
{
653 // Number of bytes currently mapped.
656 // Current number of committed pages.
659 // Per-size-category statistics.
660 size_t allocated_small
;
662 size_t allocated_large
;
665 // ***************************************************************************
666 // Extent data structures.
670 ZEROED_CHUNK
, // chunk only contains zeroes.
671 ARENA_CHUNK
, // used to back arena runs created by arena_t::AllocRun.
672 HUGE_CHUNK
, // used to back huge allocations (e.g. arena_t::MallocHuge).
673 RECYCLED_CHUNK
, // chunk has been stored for future use by chunk_recycle.
677 struct extent_node_t
{
679 // Linkage for the size/address-ordered tree for chunk recycling.
680 RedBlackTreeNode
<extent_node_t
> mLinkBySize
;
681 // Arena id for huge allocations. It's meant to match mArena->mId,
682 // which only holds true when the arena hasn't been disposed of.
686 // Linkage for the address-ordered tree.
687 RedBlackTreeNode
<extent_node_t
> mLinkByAddr
;
689 // Pointer to the extent that this tree node is responsible for.
692 // Total region size.
696 // What type of chunk is there; used for chunk recycling.
697 ChunkType mChunkType
;
699 // A pointer to the associated arena, for huge allocations.
704 struct ExtentTreeSzTrait
{
705 static RedBlackTreeNode
<extent_node_t
>& GetTreeNode(extent_node_t
* aThis
) {
706 return aThis
->mLinkBySize
;
709 static inline Order
Compare(extent_node_t
* aNode
, extent_node_t
* aOther
) {
710 Order ret
= CompareInt(aNode
->mSize
, aOther
->mSize
);
711 return (ret
!= Order::eEqual
) ? ret
712 : CompareAddr(aNode
->mAddr
, aOther
->mAddr
);
716 struct ExtentTreeTrait
{
717 static RedBlackTreeNode
<extent_node_t
>& GetTreeNode(extent_node_t
* aThis
) {
718 return aThis
->mLinkByAddr
;
721 static inline Order
Compare(extent_node_t
* aNode
, extent_node_t
* aOther
) {
722 return CompareAddr(aNode
->mAddr
, aOther
->mAddr
);
726 struct ExtentTreeBoundsTrait
: public ExtentTreeTrait
{
727 static inline Order
Compare(extent_node_t
* aKey
, extent_node_t
* aNode
) {
728 uintptr_t key_addr
= reinterpret_cast<uintptr_t>(aKey
->mAddr
);
729 uintptr_t node_addr
= reinterpret_cast<uintptr_t>(aNode
->mAddr
);
730 size_t node_size
= aNode
->mSize
;
732 // Is aKey within aNode?
733 if (node_addr
<= key_addr
&& key_addr
< node_addr
+ node_size
) {
734 return Order::eEqual
;
737 return CompareAddr(aKey
->mAddr
, aNode
->mAddr
);
741 // Describe size classes to which allocations are rounded up to.
742 // TODO: add large and huge types when the arena allocation code
743 // changes in a way that allows it to be beneficial.
754 explicit inline SizeClass(size_t aSize
) {
755 if (aSize
<= kMaxTinyClass
) {
757 mSize
= std::max(RoundUpPow2(aSize
), kMinTinyClass
);
758 } else if (aSize
<= kMaxQuantumClass
) {
760 mSize
= QUANTUM_CEILING(aSize
);
761 } else if (aSize
<= kMaxQuantumWideClass
) {
763 mSize
= QUANTUM_WIDE_CEILING(aSize
);
764 } else if (aSize
<= gMaxSubPageClass
) {
766 mSize
= SUBPAGE_CEILING(aSize
);
767 } else if (aSize
<= gMaxLargeClass
) {
769 mSize
= PAGE_CEILING(aSize
);
771 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
775 SizeClass
& operator=(const SizeClass
& aOther
) = default;
777 bool operator==(const SizeClass
& aOther
) { return aOther
.mSize
== mSize
; }
779 size_t Size() { return mSize
; }
781 ClassType
Type() { return mType
; }
783 SizeClass
Next() { return SizeClass(mSize
+ 1); }
792 // During deallocation we want to divide by the size class. This class
793 // provides a routine and sets up a constant as follows.
795 // To divide by a number D that is not a power of two we multiply by (2^17 /
796 // D) and then right shift by 17 positions.
804 // Where m is calculated during the FastDivisor constructor similarly to:
808 template <typename T
>
811 // The shift amount (p) is chosen to minimise the size of m while
812 // working for divisors up to 65536 in steps of 16. I arrived at 17
813 // experimentally. I wanted a low number to minimise the range of m
814 // so it can fit in a uint16_t, 16 didn't work but 17 worked perfectly.
816 // We'd need to increase this if we allocated memory on smaller boundaries
818 static const unsigned p
= 17;
820 // We can fit the inverted divisor in 16 bits, but we template it here for
825 // Needed so mBins can be constructed.
826 FastDivisor() : m(0) {}
828 FastDivisor(unsigned div
, unsigned max
) {
829 MOZ_ASSERT(div
<= max
);
831 // divide_inv_shift is large enough.
832 MOZ_ASSERT((1U << p
) >= div
);
834 // The calculation here for m is formula 26 from Section
835 // 10-9 "Unsigned Division by Divisors >= 1" in
836 // Henry S. Warren, Jr.'s Hacker's Delight, 2nd Ed.
837 unsigned m_
= ((1U << p
) + div
- 1 - (((1U << p
) - 1) % div
)) / div
;
839 // Make sure that max * m does not overflow.
840 MOZ_DIAGNOSTIC_ASSERT(max
< UINT_MAX
/ m_
);
842 MOZ_ASSERT(m_
<= std::numeric_limits
<T
>::max());
843 m
= static_cast<T
>(m_
);
845 // Initialisation made m non-zero.
848 // Test that all the divisions in the range we expected would work.
850 for (unsigned num
= 0; num
< max
; num
+= div
) {
851 MOZ_ASSERT(num
/ div
== divide(num
));
856 // Note that this always occurs in uint32_t regardless of m's type. If m is
857 // a uint16_t it will be zero-extended before the multiplication. We also use
858 // uint32_t rather than something that could possibly be larger because it is
859 // most-likely the cheapest multiplication.
860 inline uint32_t divide(uint32_t num
) const {
861 // Check that m was initialised.
863 return (num
* m
) >> p
;
867 template <typename T
>
868 unsigned inline operator/(unsigned num
, FastDivisor
<T
> divisor
) {
869 return divisor
.divide(num
);
872 // ***************************************************************************
873 // Radix tree data structures.
875 // The number of bits passed to the template is the number of significant bits
876 // in an address to do a radix lookup with.
878 // An address is looked up by splitting it in kBitsPerLevel bit chunks, except
879 // the most significant bits, where the bit chunk is kBitsAtLevel1 which can be
880 // different if Bits is not a multiple of kBitsPerLevel.
882 // With e.g. sizeof(void*)=4, Bits=16 and kBitsPerLevel=8, an address is split
883 // like the following:
884 // 0x12345678 -> mRoot[0x12][0x34]
885 template <size_t Bits
>
886 class AddressRadixTree
{
887 // Size of each radix tree node (as a power of 2).
888 // This impacts tree depth.
889 #ifdef HAVE_64BIT_BUILD
890 static const size_t kNodeSize
= kCacheLineSize
;
892 static const size_t kNodeSize
= 16_KiB
;
894 static const size_t kBitsPerLevel
= LOG2(kNodeSize
) - LOG2(sizeof(void*));
895 static const size_t kBitsAtLevel1
=
896 (Bits
% kBitsPerLevel
) ? Bits
% kBitsPerLevel
: kBitsPerLevel
;
897 static const size_t kHeight
= (Bits
+ kBitsPerLevel
- 1) / kBitsPerLevel
;
898 static_assert(kBitsAtLevel1
+ (kHeight
- 1) * kBitsPerLevel
== Bits
,
899 "AddressRadixTree parameters don't work out");
901 Mutex mLock MOZ_UNANNOTATED
;
907 inline void* Get(void* aAddr
);
909 // Returns whether the value was properly set.
910 inline bool Set(void* aAddr
, void* aValue
);
912 inline bool Unset(void* aAddr
) { return Set(aAddr
, nullptr); }
915 inline void** GetSlot(void* aAddr
, bool aCreate
= false);
918 // ***************************************************************************
919 // Arena data structures.
923 struct ArenaChunkMapLink
{
924 static RedBlackTreeNode
<arena_chunk_map_t
>& GetTreeNode(
925 arena_chunk_map_t
* aThis
) {
930 struct ArenaRunTreeTrait
: public ArenaChunkMapLink
{
931 static inline Order
Compare(arena_chunk_map_t
* aNode
,
932 arena_chunk_map_t
* aOther
) {
935 return CompareAddr(aNode
, aOther
);
939 struct ArenaAvailTreeTrait
: public ArenaChunkMapLink
{
940 static inline Order
Compare(arena_chunk_map_t
* aNode
,
941 arena_chunk_map_t
* aOther
) {
942 size_t size1
= aNode
->bits
& ~gPageSizeMask
;
943 size_t size2
= aOther
->bits
& ~gPageSizeMask
;
944 Order ret
= CompareInt(size1
, size2
);
945 return (ret
!= Order::eEqual
)
947 : CompareAddr((aNode
->bits
& CHUNK_MAP_KEY
) ? nullptr : aNode
,
952 struct ArenaDirtyChunkTrait
{
953 static RedBlackTreeNode
<arena_chunk_t
>& GetTreeNode(arena_chunk_t
* aThis
) {
954 return aThis
->link_dirty
;
957 static inline Order
Compare(arena_chunk_t
* aNode
, arena_chunk_t
* aOther
) {
960 return CompareAddr(aNode
, aOther
);
964 #ifdef MALLOC_DOUBLE_PURGE
968 struct GetDoublyLinkedListElement
<arena_chunk_t
> {
969 static DoublyLinkedListElement
<arena_chunk_t
>& Get(arena_chunk_t
* aThis
) {
970 return aThis
->chunks_madvised_elem
;
973 } // namespace mozilla
977 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
979 # define ARENA_RUN_MAGIC 0x384adf93
981 // On 64-bit platforms, having the arena_bin_t pointer following
982 // the mMagic field means there's padding between both fields, making
983 // the run header larger than necessary.
984 // But when MOZ_DIAGNOSTIC_ASSERT_ENABLED is not set, starting the
985 // header with this field followed by the arena_bin_t pointer yields
986 // the same padding. We do want the mMagic field to appear first, so
987 // depending whether MOZ_DIAGNOSTIC_ASSERT_ENABLED is set or not, we
988 // move some field to avoid padding.
990 // Number of free regions in run.
994 // Bin this run is associated with.
997 // Index of first element that might have a free region.
998 unsigned mRegionsMinElement
;
1000 #if !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
1001 // Number of free regions in run.
1005 // Bitmask of in-use regions (0: in use, 1: free).
1006 unsigned mRegionsMask
[1]; // Dynamically sized.
1009 struct arena_bin_t
{
1010 // Current run being used to service allocations of this bin's size
1012 arena_run_t
* mCurrentRun
;
1014 // Tree of non-full runs. This tree is used when looking for an
1015 // existing run when mCurrentRun is no longer usable. We choose the
1016 // non-full run that is lowest in memory; this policy tends to keep
1017 // objects packed well, and it can also help reduce the number of
1018 // almost-empty chunks.
1019 RedBlackTree
<arena_chunk_map_t
, ArenaRunTreeTrait
> mNonFullRuns
;
1021 // Bin's size class.
1024 // Total number of regions in a run for this bin's size class.
1025 uint32_t mRunNumRegions
;
1027 // Number of elements in a run's mRegionsMask for this bin's size class.
1028 uint32_t mRunNumRegionsMask
;
1030 // Offset of first region in a run for this bin's size class.
1031 uint32_t mRunFirstRegionOffset
;
1033 // Current number of runs in this bin, full or otherwise.
1036 // A constant for fast division by size class. This value is 16 bits wide so
1037 // it is placed last.
1038 FastDivisor
<uint16_t> mSizeDivisor
;
1040 // Total number of pages in a run for this bin's size class.
1041 uint8_t mRunSizePages
;
1043 // Amount of overhead runs are allowed to have.
1044 static constexpr double kRunOverhead
= 1.6_percent
;
1045 static constexpr double kRunRelaxedOverhead
= 2.4_percent
;
1047 // Initialize a bin for the given size class.
1048 // The generated run sizes, for a page size of 4 KiB, are:
1049 // size|run size|run size|run size|run
1050 // class|size class|size class|size class|size
1051 // 4 4 KiB 8 4 KiB 16 4 KiB 32 4 KiB
1052 // 48 4 KiB 64 4 KiB 80 4 KiB 96 4 KiB
1053 // 112 4 KiB 128 8 KiB 144 4 KiB 160 8 KiB
1054 // 176 4 KiB 192 4 KiB 208 8 KiB 224 4 KiB
1055 // 240 8 KiB 256 16 KiB 272 8 KiB 288 4 KiB
1056 // 304 12 KiB 320 12 KiB 336 4 KiB 352 8 KiB
1057 // 368 4 KiB 384 8 KiB 400 20 KiB 416 16 KiB
1058 // 432 12 KiB 448 4 KiB 464 16 KiB 480 8 KiB
1059 // 496 20 KiB 512 32 KiB 768 16 KiB 1024 64 KiB
1060 // 1280 24 KiB 1536 32 KiB 1792 16 KiB 2048 128 KiB
1061 // 2304 16 KiB 2560 48 KiB 2816 36 KiB 3072 64 KiB
1062 // 3328 36 KiB 3584 32 KiB 3840 64 KiB
1063 inline void Init(SizeClass aSizeClass
);
1066 // We try to keep the above structure aligned with common cache lines sizes,
1067 // often that's 64 bytes on x86 and ARM, we don't make assumptions for other
1069 #if defined(__x86_64__) || defined(__aarch64__)
1070 // On 64bit platforms this structure is often 48 bytes
1071 // long, which means every other array element will be properly aligned.
1072 static_assert(sizeof(arena_bin_t
) == 48);
1073 #elif defined(__x86__) || defined(__arm__)
1074 static_assert(sizeof(arena_bin_t
) == 32);
1078 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
1080 # define ARENA_MAGIC 0x947d3d24
1083 // Linkage for the tree of arenas by id.
1084 RedBlackTreeNode
<arena_t
> mLink
;
1086 // Arena id, that we keep away from the beginning of the struct so that
1087 // free list pointers in TypedBaseAlloc<arena_t> don't overflow in it,
1088 // and it keeps the value it had after the destructor.
1091 // All operations on this arena require that lock be locked. The MaybeMutex
1092 // class well elude locking if the arena is accessed from a single thread
1094 MaybeMutex mLock MOZ_UNANNOTATED
;
1096 arena_stats_t mStats
;
1099 // Tree of dirty-page-containing chunks this arena manages.
1100 RedBlackTree
<arena_chunk_t
, ArenaDirtyChunkTrait
> mChunksDirty
;
1102 #ifdef MALLOC_DOUBLE_PURGE
1103 // Head of a linked list of MADV_FREE'd-page-containing chunks this
1105 DoublyLinkedList
<arena_chunk_t
> mChunksMAdvised
;
1108 // In order to avoid rapid chunk allocation/deallocation when an arena
1109 // oscillates right on the cusp of needing a new chunk, cache the most
1110 // recently freed chunk. The spare is left in the arena's chunk trees
1111 // until it is deleted.
1113 // There is one spare chunk per arena, rather than one spare total, in
1114 // order to avoid interactions between multiple threads that could make
1115 // a single spare inadequate.
1116 arena_chunk_t
* mSpare
;
1118 // A per-arena opt-in to randomize the offset of small allocations
1119 bool mRandomizeSmallAllocations
;
1121 // Whether this is a private arena. Multiple public arenas are just a
1122 // performance optimization and not a safety feature.
1124 // Since, for example, we don't want thread-local arenas to grow too much, we
1125 // use the default arena for bigger allocations. We use this member to allow
1126 // realloc() to switch out of our arena if needed (which is not allowed for
1127 // private arenas for security).
1130 // A pseudorandom number generator. Initially null, it gets initialized
1131 // on first use to avoid recursive malloc initialization (e.g. on OSX
1132 // arc4random allocates memory).
1133 mozilla::non_crypto::XorShift128PlusRNG
* mPRNG
;
1136 // Current count of pages within unused runs that are potentially
1137 // dirty, and for which madvise(... MADV_FREE) has not been called. By
1138 // tracking this, we can institute a limit on how much dirty unused
1139 // memory is mapped for each arena.
1142 // Maximum value allowed for mNumDirty.
1145 int32_t mMaxDirtyIncreaseOverride
;
1146 int32_t mMaxDirtyDecreaseOverride
;
1149 // Size/address-ordered tree of this arena's available runs. This tree
1150 // is used for first-best-fit run allocation.
1151 RedBlackTree
<arena_chunk_map_t
, ArenaAvailTreeTrait
> mRunsAvail
;
1154 // mBins is used to store rings of free regions of the following sizes,
1155 // assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
1157 // | mBins[i] | size |
1158 // +----------+------+
1162 // +----------+------+
1171 // +----------+------+
1178 // +----------+------+
1179 arena_bin_t mBins
[1]; // Dynamically sized.
1181 explicit arena_t(arena_params_t
* aParams
, bool aIsPrivate
);
1185 void InitChunk(arena_chunk_t
* aChunk
);
1187 // This may return a chunk that should be destroyed with chunk_dealloc outside
1188 // of the arena lock. It is not the same chunk as was passed in (since that
1189 // chunk now becomes mSpare).
1190 [[nodiscard
]] arena_chunk_t
* DeallocChunk(arena_chunk_t
* aChunk
);
1192 arena_run_t
* AllocRun(size_t aSize
, bool aLarge
, bool aZero
);
1194 arena_chunk_t
* DallocRun(arena_run_t
* aRun
, bool aDirty
);
1196 [[nodiscard
]] bool SplitRun(arena_run_t
* aRun
, size_t aSize
, bool aLarge
,
1199 void TrimRunHead(arena_chunk_t
* aChunk
, arena_run_t
* aRun
, size_t aOldSize
,
1202 void TrimRunTail(arena_chunk_t
* aChunk
, arena_run_t
* aRun
, size_t aOldSize
,
1203 size_t aNewSize
, bool dirty
);
1205 arena_run_t
* GetNonFullBinRun(arena_bin_t
* aBin
);
1207 inline uint8_t FindFreeBitInMask(uint32_t aMask
, uint32_t& aRng
);
1209 inline void* ArenaRunRegAlloc(arena_run_t
* aRun
, arena_bin_t
* aBin
);
1211 inline void* MallocSmall(size_t aSize
, bool aZero
);
1213 void* MallocLarge(size_t aSize
, bool aZero
);
1215 void* MallocHuge(size_t aSize
, bool aZero
);
1217 void* PallocLarge(size_t aAlignment
, size_t aSize
, size_t aAllocSize
);
1219 void* PallocHuge(size_t aSize
, size_t aAlignment
, bool aZero
);
1221 void RallocShrinkLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
1224 bool RallocGrowLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
1227 void* RallocSmallOrLarge(void* aPtr
, size_t aSize
, size_t aOldSize
);
1229 void* RallocHuge(void* aPtr
, size_t aSize
, size_t aOldSize
);
1232 inline void* Malloc(size_t aSize
, bool aZero
);
1234 void* Palloc(size_t aAlignment
, size_t aSize
);
1236 // This may return a chunk that should be destroyed with chunk_dealloc outside
1237 // of the arena lock. It is not the same chunk as was passed in (since that
1238 // chunk now becomes mSpare).
1239 [[nodiscard
]] inline arena_chunk_t
* DallocSmall(arena_chunk_t
* aChunk
,
1241 arena_chunk_map_t
* aMapElm
);
1243 [[nodiscard
]] arena_chunk_t
* DallocLarge(arena_chunk_t
* aChunk
, void* aPtr
);
1245 void* Ralloc(void* aPtr
, size_t aSize
, size_t aOldSize
);
1247 size_t EffectiveMaxDirty();
1249 // Passing one means purging all.
1250 void Purge(size_t aMaxDirty
);
1254 bool IsMainThreadOnly() const { return !mLock
.LockIsEnabled(); }
1256 void* operator new(size_t aCount
) = delete;
1258 void* operator new(size_t aCount
, const fallible_t
&) noexcept
;
1260 void operator delete(void*);
1263 struct ArenaTreeTrait
{
1264 static RedBlackTreeNode
<arena_t
>& GetTreeNode(arena_t
* aThis
) {
1265 return aThis
->mLink
;
1268 static inline Order
Compare(arena_t
* aNode
, arena_t
* aOther
) {
1271 return CompareInt(aNode
->mId
, aOther
->mId
);
1275 // Bookkeeping for all the arenas used by the allocator.
1276 // Arenas are separated in two categories:
1277 // - "private" arenas, used through the moz_arena_* API
1278 // - all the other arenas: the default arena, and thread-local arenas,
1279 // used by the standard API.
1280 class ArenaCollection
{
1284 mPrivateArenas
.Init();
1285 mMainThreadArenas
.Init();
1286 arena_params_t params
;
1287 // The main arena allows more dirty pages than the default for other arenas.
1288 params
.mMaxDirty
= opt_dirty_max
;
1290 mLock
.Init() ? CreateArena(/* aIsPrivate = */ false, ¶ms
) : nullptr;
1291 return bool(mDefaultArena
);
1294 inline arena_t
* GetById(arena_id_t aArenaId
, bool aIsPrivate
);
1296 arena_t
* CreateArena(bool aIsPrivate
, arena_params_t
* aParams
);
1298 void DisposeArena(arena_t
* aArena
) {
1299 MutexAutoLock
lock(mLock
);
1301 aArena
->IsMainThreadOnly() ? mMainThreadArenas
: mPrivateArenas
;
1303 MOZ_RELEASE_ASSERT(tree
.Search(aArena
), "Arena not in tree");
1304 tree
.Remove(aArena
);
1308 void SetDefaultMaxDirtyPageModifier(int32_t aModifier
) {
1309 mDefaultMaxDirtyPageModifier
= aModifier
;
1311 int32_t DefaultMaxDirtyPageModifier() { return mDefaultMaxDirtyPageModifier
; }
1313 using Tree
= RedBlackTree
<arena_t
, ArenaTreeTrait
>;
1315 struct Iterator
: Tree::Iterator
{
1316 explicit Iterator(Tree
* aTree
, Tree
* aSecondTree
,
1317 Tree
* aThirdTree
= nullptr)
1318 : Tree::Iterator(aTree
),
1319 mSecondTree(aSecondTree
),
1320 mThirdTree(aThirdTree
) {}
1322 Item
<Iterator
> begin() {
1323 return Item
<Iterator
>(this, *Tree::Iterator::begin());
1326 Item
<Iterator
> end() { return Item
<Iterator
>(this, nullptr); }
1329 arena_t
* result
= Tree::Iterator::Next();
1330 if (!result
&& mSecondTree
) {
1331 new (this) Iterator(mSecondTree
, mThirdTree
);
1332 result
= *Tree::Iterator::begin();
1343 if (IsOnMainThreadWeak()) {
1344 return Iterator(&mArenas
, &mPrivateArenas
, &mMainThreadArenas
);
1346 return Iterator(&mArenas
, &mPrivateArenas
);
1349 inline arena_t
* GetDefault() { return mDefaultArena
; }
1351 Mutex mLock MOZ_UNANNOTATED
;
1353 // We're running on the main thread which is set by a call to SetMainThread().
1354 bool IsOnMainThread() const {
1355 return mMainThreadId
.isSome() && mMainThreadId
.value() == GetThreadId();
1358 // We're running on the main thread or SetMainThread() has never been called.
1359 bool IsOnMainThreadWeak() const {
1360 return mMainThreadId
.isNothing() || IsOnMainThread();
1363 // After a fork set the new thread ID in the child.
1364 void PostForkFixMainThread() {
1365 if (mMainThreadId
.isSome()) {
1366 // Only if the main thread has been defined.
1367 mMainThreadId
= Some(GetThreadId());
1371 void SetMainThread() {
1372 MutexAutoLock
lock(mLock
);
1373 MOZ_ASSERT(mMainThreadId
.isNothing());
1374 mMainThreadId
= Some(GetThreadId());
1378 const static arena_id_t MAIN_THREAD_ARENA_BIT
= 0x1;
1380 inline arena_t
* GetByIdInternal(Tree
& aTree
, arena_id_t aArenaId
);
1382 arena_id_t
MakeRandArenaId(bool aIsMainThreadOnly
) const;
1383 static bool ArenaIdIsMainThreadOnly(arena_id_t aArenaId
) {
1384 return aArenaId
& MAIN_THREAD_ARENA_BIT
;
1387 arena_t
* mDefaultArena
;
1388 arena_id_t mLastPublicArenaId
;
1390 // Accessing mArenas and mPrivateArenas can only be done while holding mLock.
1391 // Since mMainThreadArenas can only be used from the main thread, it can be
1392 // accessed without a lock which is why it is a seperate tree.
1394 Tree mPrivateArenas
;
1395 Tree mMainThreadArenas
;
1396 Atomic
<int32_t, MemoryOrdering::Relaxed
> mDefaultMaxDirtyPageModifier
;
1397 Maybe
<ThreadId
> mMainThreadId
;
1400 static ArenaCollection gArenas
;
1404 static AddressRadixTree
<(sizeof(void*) << 3) - LOG2(kChunkSize
)> gChunkRTree
;
1406 // Protects chunk-related data structures.
1407 static Mutex chunks_mtx
;
1409 // Trees of chunks that were previously allocated (trees differ only in node
1410 // ordering). These are used when allocating chunks, in an attempt to re-use
1411 // address space. Depending on function, different tree orderings are needed,
1412 // which is why there are two trees with the same contents.
1413 static RedBlackTree
<extent_node_t
, ExtentTreeSzTrait
> gChunksBySize
1414 MOZ_GUARDED_BY(chunks_mtx
);
1415 static RedBlackTree
<extent_node_t
, ExtentTreeTrait
> gChunksByAddress
1416 MOZ_GUARDED_BY(chunks_mtx
);
1418 // Protects huge allocation-related data structures.
1419 static Mutex huge_mtx
;
1421 // Tree of chunks that are stand-alone huge allocations.
1422 static RedBlackTree
<extent_node_t
, ExtentTreeTrait
> huge
1423 MOZ_GUARDED_BY(huge_mtx
);
1425 // Huge allocation statistics.
1426 static size_t huge_allocated
MOZ_GUARDED_BY(huge_mtx
);
1427 static size_t huge_mapped
MOZ_GUARDED_BY(huge_mtx
);
1429 // **************************
1430 // base (internal allocation).
1432 static Mutex base_mtx
;
1434 // Current pages that are being used for internal memory allocations. These
1435 // pages are carved up in cacheline-size quanta, so that there is no chance of
1436 // false cache line sharing.
1437 static void* base_pages
MOZ_GUARDED_BY(base_mtx
);
1438 static void* base_next_addr
MOZ_GUARDED_BY(base_mtx
);
1439 static void* base_next_decommitted
MOZ_GUARDED_BY(base_mtx
);
1440 // Address immediately past base_pages.
1441 static void* base_past_addr
MOZ_GUARDED_BY(base_mtx
);
1442 static size_t base_mapped
MOZ_GUARDED_BY(base_mtx
);
1443 static size_t base_committed
MOZ_GUARDED_BY(base_mtx
);
1448 // The arena associated with the current thread (per
1449 // jemalloc_thread_local_arena) On OSX, __thread/thread_local circles back
1450 // calling malloc to allocate storage on first access on each thread, which
1451 // leads to an infinite loop, but pthread-based TLS somehow doesn't have this
1453 #if !defined(XP_DARWIN)
1454 static MOZ_THREAD_LOCAL(arena_t
*) thread_arena
;
1456 static detail::ThreadLocal
<arena_t
*, detail::ThreadLocalKeyStorage
>
1460 // *****************************
1461 // Runtime configuration options.
1463 #ifdef MALLOC_RUNTIME_CONFIG
1464 # define MALLOC_RUNTIME_VAR static
1466 # define MALLOC_RUNTIME_VAR static const
1475 MALLOC_RUNTIME_VAR
bool opt_junk
= false;
1476 MALLOC_RUNTIME_VAR
bool opt_zero
= false;
1478 #ifdef EARLY_BETA_OR_EARLIER
1479 MALLOC_RUNTIME_VAR PoisonType opt_poison
= ALL
;
1481 MALLOC_RUNTIME_VAR PoisonType opt_poison
= SOME
;
1484 MALLOC_RUNTIME_VAR
size_t opt_poison_size
= kCacheLineSize
* 4;
1486 static bool opt_randomize_small
= true;
1488 // ***************************************************************************
1489 // Begin forward declarations.
1491 static void* chunk_alloc(size_t aSize
, size_t aAlignment
, bool aBase
);
1492 static void chunk_dealloc(void* aChunk
, size_t aSize
, ChunkType aType
);
1494 static void chunk_assert_zero(void* aPtr
, size_t aSize
);
1496 static void huge_dalloc(void* aPtr
, arena_t
* aArena
);
1497 static bool malloc_init_hard();
1501 # define FORK_HOOK extern "C"
1503 # define FORK_HOOK static
1505 FORK_HOOK
void _malloc_prefork(void);
1506 FORK_HOOK
void _malloc_postfork_parent(void);
1507 FORK_HOOK
void _malloc_postfork_child(void);
1510 // End forward declarations.
1511 // ***************************************************************************
1513 // FreeBSD's pthreads implementation calls malloc(3), so the malloc
1514 // implementation has to take pains to avoid infinite recursion during
1516 // Returns whether the allocator was successfully initialized.
1517 static inline bool malloc_init() {
1518 if (!malloc_initialized
) {
1519 return malloc_init_hard();
1524 static void _malloc_message(const char* p
) {
1525 #if !defined(XP_WIN)
1526 # define _write write
1528 // Pretend to check _write() errors to suppress gcc warnings about
1529 // warn_unused_result annotations in some versions of glibc headers.
1530 if (_write(STDERR_FILENO
, p
, (unsigned int)strlen(p
)) < 0) {
1535 template <typename
... Args
>
1536 static void _malloc_message(const char* p
, Args
... args
) {
1538 _malloc_message(args
...);
1542 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
1543 extern "C" MOZ_EXPORT
int pthread_atfork(void (*)(void), void (*)(void),
1547 // ***************************************************************************
1548 // Begin Utility functions/macros.
1550 // Return the chunk address for allocation address a.
1551 static inline arena_chunk_t
* GetChunkForPtr(const void* aPtr
) {
1552 return (arena_chunk_t
*)(uintptr_t(aPtr
) & ~kChunkSizeMask
);
1555 // Return the chunk offset of address a.
1556 static inline size_t GetChunkOffsetForPtr(const void* aPtr
) {
1557 return (size_t)(uintptr_t(aPtr
) & kChunkSizeMask
);
1560 static inline const char* _getprogname(void) { return "<jemalloc>"; }
1562 static inline void MaybePoison(void* aPtr
, size_t aSize
) {
1564 switch (opt_poison
) {
1568 size
= std::min(aSize
, opt_poison_size
);
1574 MOZ_ASSERT(size
!= 0 && size
<= aSize
);
1575 memset(aPtr
, kAllocPoison
, size
);
1578 // Fill the given range of memory with zeroes or junk depending on opt_junk and
1580 static inline void ApplyZeroOrJunk(void* aPtr
, size_t aSize
) {
1582 memset(aPtr
, kAllocJunk
, aSize
);
1583 } else if (opt_zero
) {
1584 memset(aPtr
, 0, aSize
);
1588 // On Windows, delay crashing on OOM.
1591 // Implementation of VirtualAlloc wrapper (bug 1716727).
1592 namespace MozAllocRetries
{
1594 // Maximum retry count on OOM.
1595 constexpr size_t kMaxAttempts
= 10;
1596 // Minimum delay time between retries. (The actual delay time may be larger. See
1597 // Microsoft's documentation for ::Sleep() for details.)
1598 constexpr size_t kDelayMs
= 50;
1600 using StallSpecs
= ::mozilla::StallSpecs
;
1602 static constexpr StallSpecs maxStall
= {.maxAttempts
= kMaxAttempts
,
1603 .delayMs
= kDelayMs
};
1605 static inline StallSpecs
GetStallSpecs() {
1606 # if defined(JS_STANDALONE)
1607 // GetGeckoProcessType() isn't available in this configuration. (SpiderMonkey
1608 // on Windows mostly skips this in favor of directly calling ::VirtualAlloc(),
1609 // though, so it's probably not going to matter whether we stall here or not.)
1612 switch (GetGeckoProcessType()) {
1613 // For the main process, stall for the maximum permissible time period. (The
1614 // main process is the most important one to keep alive.)
1615 case GeckoProcessType::GeckoProcessType_Default
:
1618 // For all other process types, stall for at most half as long.
1620 return {.maxAttempts
= maxStall
.maxAttempts
/ 2,
1621 .delayMs
= maxStall
.delayMs
};
1626 // Drop-in wrapper around VirtualAlloc. When out of memory, may attempt to stall
1627 // and retry rather than returning immediately, in hopes that the page file is
1628 // about to be expanded by Windows.
1631 // https://docs.microsoft.com/en-us/troubleshoot/windows-client/performance/slow-page-file-growth-memory-allocation-errors
1632 [[nodiscard
]] void* MozVirtualAlloc(LPVOID lpAddress
, SIZE_T dwSize
,
1633 DWORD flAllocationType
, DWORD flProtect
) {
1634 DWORD
const lastError
= ::GetLastError();
1636 constexpr auto IsOOMError
= [] {
1637 switch (::GetLastError()) {
1638 // This is the usual error result from VirtualAlloc for OOM.
1639 case ERROR_COMMITMENT_LIMIT
:
1640 // Although rare, this has also been observed in low-memory situations.
1641 // (Presumably this means Windows can't allocate enough kernel-side space
1642 // for its own internal representation of the process's virtual address
1644 case ERROR_NOT_ENOUGH_MEMORY
:
1651 void* ptr
= ::VirtualAlloc(lpAddress
, dwSize
, flAllocationType
, flProtect
);
1652 if (MOZ_LIKELY(ptr
)) return ptr
;
1654 // We can't do anything for errors other than OOM...
1655 if (!IsOOMError()) return nullptr;
1656 // ... or if this wasn't a request to commit memory in the first place.
1657 // (This function has no strategy for resolving MEM_RESERVE failures.)
1658 if (!(flAllocationType
& MEM_COMMIT
)) return nullptr;
1661 // Retry as many times as desired (possibly zero).
1662 const StallSpecs stallSpecs
= GetStallSpecs();
1665 stallSpecs
.StallAndRetry(&::Sleep
, [&]() -> std::optional
<void*> {
1667 ::VirtualAlloc(lpAddress
, dwSize
, flAllocationType
, flProtect
);
1670 // The OOM status has been handled, and should not be reported to
1673 ::SetLastError(lastError
);
1678 // Failure for some reason other than OOM.
1679 if (!IsOOMError()) {
1683 return std::nullopt
;
1686 return ret
.value_or(nullptr);
1688 } // namespace MozAllocRetries
1690 using MozAllocRetries::MozVirtualAlloc
;
1693 MOZ_JEMALLOC_API StallSpecs
GetAllocatorStallSpecs() {
1694 return ::MozAllocRetries::GetStallSpecs();
1696 } // namespace mozilla
1700 // ***************************************************************************
1702 static inline void pages_decommit(void* aAddr
, size_t aSize
) {
1704 // The region starting at addr may have been allocated in multiple calls
1705 // to VirtualAlloc and recycled, so decommitting the entire region in one
1706 // go may not be valid. However, since we allocate at least a chunk at a
1707 // time, we may touch any region in chunksized increments.
1708 size_t pages_size
= std::min(aSize
, kChunkSize
- GetChunkOffsetForPtr(aAddr
));
1710 // This will cause Access Violation on read and write and thus act as a
1711 // guard page or region as well.
1712 if (!VirtualFree(aAddr
, pages_size
, MEM_DECOMMIT
)) {
1715 aAddr
= (void*)((uintptr_t)aAddr
+ pages_size
);
1716 aSize
-= pages_size
;
1717 pages_size
= std::min(aSize
, kChunkSize
);
1720 if (mmap(aAddr
, aSize
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1,
1722 // We'd like to report the OOM for our tooling, but we can't allocate
1723 // memory at this point, so avoid the use of printf.
1724 const char out_of_mappings
[] =
1725 "[unhandlable oom] Failed to mmap, likely no more mappings "
1726 "available " __FILE__
" : " MOZ_STRINGIFY(__LINE__
);
1727 if (errno
== ENOMEM
) {
1729 fputs(out_of_mappings
, stderr
);
1732 MOZ_CRASH_ANNOTATE(out_of_mappings
);
1734 MOZ_REALLY_CRASH(__LINE__
);
1736 MozTagAnonymousMemory(aAddr
, aSize
, "jemalloc-decommitted");
1740 // Commit pages. Returns whether pages were committed.
1741 [[nodiscard
]] static inline bool pages_commit(void* aAddr
, size_t aSize
) {
1743 // The region starting at addr may have been allocated in multiple calls
1744 // to VirtualAlloc and recycled, so committing the entire region in one
1745 // go may not be valid. However, since we allocate at least a chunk at a
1746 // time, we may touch any region in chunksized increments.
1747 size_t pages_size
= std::min(aSize
, kChunkSize
- GetChunkOffsetForPtr(aAddr
));
1749 if (!MozVirtualAlloc(aAddr
, pages_size
, MEM_COMMIT
, PAGE_READWRITE
)) {
1752 aAddr
= (void*)((uintptr_t)aAddr
+ pages_size
);
1753 aSize
-= pages_size
;
1754 pages_size
= std::min(aSize
, kChunkSize
);
1757 if (mmap(aAddr
, aSize
, PROT_READ
| PROT_WRITE
,
1758 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == MAP_FAILED
) {
1761 MozTagAnonymousMemory(aAddr
, aSize
, "jemalloc");
1766 static bool base_pages_alloc(size_t minsize
) MOZ_REQUIRES(base_mtx
) {
1770 MOZ_ASSERT(minsize
!= 0);
1771 csize
= CHUNK_CEILING(minsize
);
1772 base_pages
= chunk_alloc(csize
, kChunkSize
, true);
1776 base_next_addr
= base_pages
;
1777 base_past_addr
= (void*)((uintptr_t)base_pages
+ csize
);
1778 // Leave enough pages for minsize committed, since otherwise they would
1779 // have to be immediately recommitted.
1780 pminsize
= PAGE_CEILING(minsize
);
1781 base_next_decommitted
= (void*)((uintptr_t)base_pages
+ pminsize
);
1782 if (pminsize
< csize
) {
1783 pages_decommit(base_next_decommitted
, csize
- pminsize
);
1785 base_mapped
+= csize
;
1786 base_committed
+= pminsize
;
1791 static void* base_alloc(size_t aSize
) {
1795 // Round size up to nearest multiple of the cacheline size.
1796 csize
= CACHELINE_CEILING(aSize
);
1798 MutexAutoLock
lock(base_mtx
);
1799 // Make sure there's enough space for the allocation.
1800 if ((uintptr_t)base_next_addr
+ csize
> (uintptr_t)base_past_addr
) {
1801 if (base_pages_alloc(csize
)) {
1806 ret
= base_next_addr
;
1807 base_next_addr
= (void*)((uintptr_t)base_next_addr
+ csize
);
1808 // Make sure enough pages are committed for the new allocation.
1809 if ((uintptr_t)base_next_addr
> (uintptr_t)base_next_decommitted
) {
1810 void* pbase_next_addr
= (void*)(PAGE_CEILING((uintptr_t)base_next_addr
));
1813 base_next_decommitted
,
1814 (uintptr_t)pbase_next_addr
- (uintptr_t)base_next_decommitted
)) {
1819 (uintptr_t)pbase_next_addr
- (uintptr_t)base_next_decommitted
;
1820 base_next_decommitted
= pbase_next_addr
;
1826 static void* base_calloc(size_t aNumber
, size_t aSize
) {
1827 void* ret
= base_alloc(aNumber
* aSize
);
1829 memset(ret
, 0, aNumber
* aSize
);
1834 // A specialization of the base allocator with a free list.
1835 template <typename T
>
1836 struct TypedBaseAlloc
{
1837 static T
* sFirstFree
;
1839 static size_t size_of() { return sizeof(T
); }
1847 sFirstFree
= *(T
**)ret
;
1851 ret
= (T
*)base_alloc(size_of());
1857 static void dealloc(T
* aNode
) {
1858 MutexAutoLock
lock(base_mtx
);
1859 *(T
**)aNode
= sFirstFree
;
1864 using ExtentAlloc
= TypedBaseAlloc
<extent_node_t
>;
1867 extent_node_t
* ExtentAlloc::sFirstFree
= nullptr;
1870 arena_t
* TypedBaseAlloc
<arena_t
>::sFirstFree
= nullptr;
1873 size_t TypedBaseAlloc
<arena_t
>::size_of() {
1874 // Allocate enough space for trailing bins.
1875 return sizeof(arena_t
) + (sizeof(arena_bin_t
) * (NUM_SMALL_CLASSES
- 1));
1878 template <typename T
>
1879 struct BaseAllocFreePolicy
{
1880 void operator()(T
* aPtr
) { TypedBaseAlloc
<T
>::dealloc(aPtr
); }
1883 using UniqueBaseNode
=
1884 UniquePtr
<extent_node_t
, BaseAllocFreePolicy
<extent_node_t
>>;
1886 // End Utility functions/macros.
1887 // ***************************************************************************
1888 // Begin chunk management functions.
1892 static void* pages_map(void* aAddr
, size_t aSize
) {
1893 void* ret
= nullptr;
1894 ret
= MozVirtualAlloc(aAddr
, aSize
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
1898 static void pages_unmap(void* aAddr
, size_t aSize
) {
1899 if (VirtualFree(aAddr
, 0, MEM_RELEASE
) == 0) {
1900 _malloc_message(_getprogname(), ": (malloc) Error in VirtualFree()\n");
1905 static void pages_unmap(void* aAddr
, size_t aSize
) {
1906 if (munmap(aAddr
, aSize
) == -1) {
1909 if (strerror_r(errno
, buf
, sizeof(buf
)) == 0) {
1910 _malloc_message(_getprogname(), ": (malloc) Error in munmap(): ", buf
,
1916 static void* pages_map(void* aAddr
, size_t aSize
) {
1918 # if defined(__ia64__) || \
1919 (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
1920 // The JS engine assumes that all allocated pointers have their high 17 bits
1921 // clear, which ia64's mmap doesn't support directly. However, we can emulate
1922 // it by passing mmap an "addr" parameter with those bits clear. The mmap will
1923 // return that address, or the nearest available memory above that address,
1924 // providing a near-guarantee that those bits are clear. If they are not, we
1925 // return nullptr below to indicate out-of-memory.
1927 // The addr is chosen as 0x0000070000000000, which still allows about 120TB of
1928 // virtual address space.
1930 // See Bug 589735 for more information.
1931 bool check_placement
= true;
1933 aAddr
= (void*)0x0000070000000000;
1934 check_placement
= false;
1938 # if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
1939 const uintptr_t start
= 0x0000070000000000ULL
;
1940 const uintptr_t end
= 0x0000800000000000ULL
;
1942 // Copied from js/src/gc/Memory.cpp and adapted for this source
1944 void* region
= MAP_FAILED
;
1945 for (hint
= start
; region
== MAP_FAILED
&& hint
+ aSize
<= end
;
1946 hint
+= kChunkSize
) {
1947 region
= mmap((void*)hint
, aSize
, PROT_READ
| PROT_WRITE
,
1948 MAP_PRIVATE
| MAP_ANON
, -1, 0);
1949 if (region
!= MAP_FAILED
) {
1950 if (((size_t)region
+ (aSize
- 1)) & 0xffff800000000000) {
1951 if (munmap(region
, aSize
)) {
1952 MOZ_ASSERT(errno
== ENOMEM
);
1954 region
= MAP_FAILED
;
1960 // We don't use MAP_FIXED here, because it can cause the *replacement*
1961 // of existing mappings, and we only want to create new mappings.
1963 mmap(aAddr
, aSize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
1966 if (ret
== MAP_FAILED
) {
1969 # if defined(__ia64__) || \
1970 (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
1971 // If the allocated memory doesn't have its upper 17 bits clear, consider it
1972 // as out of memory.
1973 else if ((long long)ret
& 0xffff800000000000) {
1977 // If the caller requested a specific memory location, verify that's what mmap
1979 else if (check_placement
&& ret
!= aAddr
) {
1981 else if (aAddr
&& ret
!= aAddr
) {
1983 // We succeeded in mapping memory, but not in the right place.
1984 pages_unmap(ret
, aSize
);
1988 MozTagAnonymousMemory(ret
, aSize
, "jemalloc");
1991 # if defined(__ia64__) || \
1992 (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
1993 MOZ_ASSERT(!ret
|| (!check_placement
&& ret
) ||
1994 (check_placement
&& ret
== aAddr
));
1996 MOZ_ASSERT(!ret
|| (!aAddr
&& ret
!= aAddr
) || (aAddr
&& ret
== aAddr
));
2003 # define VM_COPY_MIN kChunkSize
2004 static inline void pages_copy(void* dest
, const void* src
, size_t n
) {
2005 MOZ_ASSERT((void*)((uintptr_t)dest
& ~gPageSizeMask
) == dest
);
2006 MOZ_ASSERT(n
>= VM_COPY_MIN
);
2007 MOZ_ASSERT((void*)((uintptr_t)src
& ~gPageSizeMask
) == src
);
2009 kern_return_t r
= vm_copy(mach_task_self(), (vm_address_t
)src
, (vm_size_t
)n
,
2010 (vm_address_t
)dest
);
2011 if (r
!= KERN_SUCCESS
) {
2012 MOZ_CRASH("vm_copy() failed");
2018 template <size_t Bits
>
2019 bool AddressRadixTree
<Bits
>::Init() {
2021 mRoot
= (void**)base_calloc(1 << kBitsAtLevel1
, sizeof(void*));
2025 template <size_t Bits
>
2026 void** AddressRadixTree
<Bits
>::GetSlot(void* aKey
, bool aCreate
) {
2027 uintptr_t key
= reinterpret_cast<uintptr_t>(aKey
);
2029 unsigned i
, lshift
, height
, bits
;
2033 for (i
= lshift
= 0, height
= kHeight
, node
= mRoot
; i
< height
- 1;
2034 i
++, lshift
+= bits
, node
= child
) {
2035 bits
= i
? kBitsPerLevel
: kBitsAtLevel1
;
2036 subkey
= (key
<< lshift
) >> ((sizeof(void*) << 3) - bits
);
2037 child
= (void**)node
[subkey
];
2038 if (!child
&& aCreate
) {
2039 child
= (void**)base_calloc(1 << kBitsPerLevel
, sizeof(void*));
2041 node
[subkey
] = child
;
2049 // node is a leaf, so it contains values rather than node
2051 bits
= i
? kBitsPerLevel
: kBitsAtLevel1
;
2052 subkey
= (key
<< lshift
) >> ((sizeof(void*) << 3) - bits
);
2053 return &node
[subkey
];
2056 template <size_t Bits
>
2057 void* AddressRadixTree
<Bits
>::Get(void* aKey
) {
2058 void* ret
= nullptr;
2060 void** slot
= GetSlot(aKey
);
2066 MutexAutoLock
lock(mLock
);
2068 // Suppose that it were possible for a jemalloc-allocated chunk to be
2069 // munmap()ped, followed by a different allocator in another thread re-using
2070 // overlapping virtual memory, all without invalidating the cached rtree
2071 // value. The result would be a false positive (the rtree would claim that
2072 // jemalloc owns memory that it had actually discarded). I don't think this
2073 // scenario is possible, but the following assertion is a prudent sanity
2076 // In case a slot has been created in the meantime.
2077 slot
= GetSlot(aKey
);
2080 // The MutexAutoLock above should act as a memory barrier, forcing
2081 // the compiler to emit a new read instruction for *slot.
2082 MOZ_ASSERT(ret
== *slot
);
2084 MOZ_ASSERT(ret
== nullptr);
2090 template <size_t Bits
>
2091 bool AddressRadixTree
<Bits
>::Set(void* aKey
, void* aValue
) {
2092 MutexAutoLock
lock(mLock
);
2093 void** slot
= GetSlot(aKey
, /* aCreate = */ true);
2100 // pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
2101 // from upstream jemalloc 3.4.1 to fix Mozilla bug 956501.
2103 // Return the offset between a and the nearest aligned address at or below a.
2104 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
2105 ((size_t)((uintptr_t)(a) & ((alignment)-1)))
2107 // Return the smallest alignment multiple that is >= s.
2108 #define ALIGNMENT_CEILING(s, alignment) \
2109 (((s) + ((alignment)-1)) & (~((alignment)-1)))
2111 static void* pages_trim(void* addr
, size_t alloc_size
, size_t leadsize
,
2113 void* ret
= (void*)((uintptr_t)addr
+ leadsize
);
2115 MOZ_ASSERT(alloc_size
>= leadsize
+ size
);
2120 pages_unmap(addr
, alloc_size
);
2121 new_addr
= pages_map(ret
, size
);
2122 if (new_addr
== ret
) {
2126 pages_unmap(new_addr
, size
);
2132 size_t trailsize
= alloc_size
- leadsize
- size
;
2134 if (leadsize
!= 0) {
2135 pages_unmap(addr
, leadsize
);
2137 if (trailsize
!= 0) {
2138 pages_unmap((void*)((uintptr_t)ret
+ size
), trailsize
);
2145 static void* chunk_alloc_mmap_slow(size_t size
, size_t alignment
) {
2147 size_t alloc_size
, leadsize
;
2149 alloc_size
= size
+ alignment
- gRealPageSize
;
2150 // Beware size_t wrap-around.
2151 if (alloc_size
< size
) {
2155 pages
= pages_map(nullptr, alloc_size
);
2160 ALIGNMENT_CEILING((uintptr_t)pages
, alignment
) - (uintptr_t)pages
;
2161 ret
= pages_trim(pages
, alloc_size
, leadsize
, size
);
2168 static void* chunk_alloc_mmap(size_t size
, size_t alignment
) {
2172 // Ideally, there would be a way to specify alignment to mmap() (like
2173 // NetBSD has), but in the absence of such a feature, we have to work
2174 // hard to efficiently create aligned mappings. The reliable, but
2175 // slow method is to create a mapping that is over-sized, then trim the
2176 // excess. However, that always results in one or two calls to
2179 // Optimistically try mapping precisely the right amount before falling
2180 // back to the slow method, with the expectation that the optimistic
2181 // approach works most of the time.
2182 ret
= pages_map(nullptr, size
);
2186 offset
= ALIGNMENT_ADDR2OFFSET(ret
, alignment
);
2188 pages_unmap(ret
, size
);
2189 return chunk_alloc_mmap_slow(size
, alignment
);
2196 // Purge and release the pages in the chunk of length `length` at `addr` to
2198 // Returns whether the pages are guaranteed to be full of zeroes when the
2199 // function returns.
2200 // The force_zero argument explicitly requests that the memory is guaranteed
2201 // to be full of zeroes when the function returns.
2202 static bool pages_purge(void* addr
, size_t length
, bool force_zero
) {
2203 pages_decommit(addr
, length
);
2207 static void* chunk_recycle(size_t aSize
, size_t aAlignment
) {
2210 size_t alloc_size
= aSize
+ aAlignment
- kChunkSize
;
2211 // Beware size_t wrap-around.
2212 if (alloc_size
< aSize
) {
2215 key
.mAddr
= nullptr;
2216 key
.mSize
= alloc_size
;
2218 extent_node_t
* node
= gChunksBySize
.SearchOrNext(&key
);
2220 chunks_mtx
.Unlock();
2223 size_t leadsize
= ALIGNMENT_CEILING((uintptr_t)node
->mAddr
, aAlignment
) -
2224 (uintptr_t)node
->mAddr
;
2225 MOZ_ASSERT(node
->mSize
>= leadsize
+ aSize
);
2226 size_t trailsize
= node
->mSize
- leadsize
- aSize
;
2227 void* ret
= (void*)((uintptr_t)node
->mAddr
+ leadsize
);
2229 // All recycled chunks are zeroed (because they're purged) before being
2231 MOZ_ASSERT(node
->mChunkType
== ZEROED_CHUNK
);
2233 // Remove node from the tree.
2234 gChunksBySize
.Remove(node
);
2235 gChunksByAddress
.Remove(node
);
2236 if (leadsize
!= 0) {
2237 // Insert the leading space as a smaller chunk.
2238 node
->mSize
= leadsize
;
2239 gChunksBySize
.Insert(node
);
2240 gChunksByAddress
.Insert(node
);
2243 if (trailsize
!= 0) {
2244 // Insert the trailing space as a smaller chunk.
2246 // An additional node is required, but
2247 // TypedBaseAlloc::alloc() can cause a new base chunk to be
2248 // allocated. Drop chunks_mtx in order to avoid
2249 // deadlock, and if node allocation fails, deallocate
2250 // the result before returning an error.
2251 chunks_mtx
.Unlock();
2252 node
= ExtentAlloc::alloc();
2254 chunk_dealloc(ret
, aSize
, ZEROED_CHUNK
);
2259 node
->mAddr
= (void*)((uintptr_t)(ret
) + aSize
);
2260 node
->mSize
= trailsize
;
2261 node
->mChunkType
= ZEROED_CHUNK
;
2262 gChunksBySize
.Insert(node
);
2263 gChunksByAddress
.Insert(node
);
2267 gRecycledSize
-= aSize
;
2269 chunks_mtx
.Unlock();
2272 ExtentAlloc::dealloc(node
);
2274 if (!pages_commit(ret
, aSize
)) {
2282 // On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
2283 // awkward to recycle allocations of varying sizes. Therefore we only allow
2284 // recycling when the size equals the chunksize, unless deallocation is entirely
2286 # define CAN_RECYCLE(size) ((size) == kChunkSize)
2288 # define CAN_RECYCLE(size) true
2291 // Allocates `size` bytes of system memory aligned for `alignment`.
2292 // `base` indicates whether the memory will be used for the base allocator
2293 // (e.g. base_alloc).
2294 // `zeroed` is an outvalue that returns whether the allocated memory is
2295 // guaranteed to be full of zeroes. It can be omitted when the caller doesn't
2296 // care about the result.
2297 static void* chunk_alloc(size_t aSize
, size_t aAlignment
, bool aBase
) {
2298 void* ret
= nullptr;
2300 MOZ_ASSERT(aSize
!= 0);
2301 MOZ_ASSERT((aSize
& kChunkSizeMask
) == 0);
2302 MOZ_ASSERT(aAlignment
!= 0);
2303 MOZ_ASSERT((aAlignment
& kChunkSizeMask
) == 0);
2305 // Base allocations can't be fulfilled by recycling because of
2306 // possible deadlock or infinite recursion.
2307 if (CAN_RECYCLE(aSize
) && !aBase
) {
2308 ret
= chunk_recycle(aSize
, aAlignment
);
2311 ret
= chunk_alloc_mmap(aSize
, aAlignment
);
2313 if (ret
&& !aBase
) {
2314 if (!gChunkRTree
.Set(ret
, ret
)) {
2315 chunk_dealloc(ret
, aSize
, UNKNOWN_CHUNK
);
2320 MOZ_ASSERT(GetChunkOffsetForPtr(ret
) == 0);
2325 static void chunk_assert_zero(void* aPtr
, size_t aSize
) {
2327 size_t* p
= (size_t*)(uintptr_t)aPtr
;
2329 for (i
= 0; i
< aSize
/ sizeof(size_t); i
++) {
2330 MOZ_ASSERT(p
[i
] == 0);
2335 static void chunk_record(void* aChunk
, size_t aSize
, ChunkType aType
) {
2338 if (aType
!= ZEROED_CHUNK
) {
2339 if (pages_purge(aChunk
, aSize
, aType
== HUGE_CHUNK
)) {
2340 aType
= ZEROED_CHUNK
;
2344 // Allocate a node before acquiring chunks_mtx even though it might not
2345 // be needed, because TypedBaseAlloc::alloc() may cause a new base chunk to
2346 // be allocated, which could cause deadlock if chunks_mtx were already
2348 UniqueBaseNode
xnode(ExtentAlloc::alloc());
2349 // Use xprev to implement conditional deferred deallocation of prev.
2350 UniqueBaseNode xprev
;
2352 // RAII deallocates xnode and xprev defined above after unlocking
2353 // in order to avoid potential dead-locks
2354 MutexAutoLock
lock(chunks_mtx
);
2355 key
.mAddr
= (void*)((uintptr_t)aChunk
+ aSize
);
2356 extent_node_t
* node
= gChunksByAddress
.SearchOrNext(&key
);
2357 // Try to coalesce forward.
2358 if (node
&& node
->mAddr
== key
.mAddr
) {
2359 // Coalesce chunk with the following address range. This does
2360 // not change the position within gChunksByAddress, so only
2361 // remove/insert from/into gChunksBySize.
2362 gChunksBySize
.Remove(node
);
2363 node
->mAddr
= aChunk
;
2364 node
->mSize
+= aSize
;
2365 if (node
->mChunkType
!= aType
) {
2366 node
->mChunkType
= RECYCLED_CHUNK
;
2368 gChunksBySize
.Insert(node
);
2370 // Coalescing forward failed, so insert a new node.
2372 // TypedBaseAlloc::alloc() failed, which is an exceedingly
2373 // unlikely failure. Leak chunk; its pages have
2374 // already been purged, so this is only a virtual
2378 node
= xnode
.release();
2379 node
->mAddr
= aChunk
;
2380 node
->mSize
= aSize
;
2381 node
->mChunkType
= aType
;
2382 gChunksByAddress
.Insert(node
);
2383 gChunksBySize
.Insert(node
);
2386 // Try to coalesce backward.
2387 extent_node_t
* prev
= gChunksByAddress
.Prev(node
);
2388 if (prev
&& (void*)((uintptr_t)prev
->mAddr
+ prev
->mSize
) == aChunk
) {
2389 // Coalesce chunk with the previous address range. This does
2390 // not change the position within gChunksByAddress, so only
2391 // remove/insert node from/into gChunksBySize.
2392 gChunksBySize
.Remove(prev
);
2393 gChunksByAddress
.Remove(prev
);
2395 gChunksBySize
.Remove(node
);
2396 node
->mAddr
= prev
->mAddr
;
2397 node
->mSize
+= prev
->mSize
;
2398 if (node
->mChunkType
!= prev
->mChunkType
) {
2399 node
->mChunkType
= RECYCLED_CHUNK
;
2401 gChunksBySize
.Insert(node
);
2406 gRecycledSize
+= aSize
;
2409 static void chunk_dealloc(void* aChunk
, size_t aSize
, ChunkType aType
) {
2411 MOZ_ASSERT(GetChunkOffsetForPtr(aChunk
) == 0);
2412 MOZ_ASSERT(aSize
!= 0);
2413 MOZ_ASSERT((aSize
& kChunkSizeMask
) == 0);
2415 gChunkRTree
.Unset(aChunk
);
2417 if (CAN_RECYCLE(aSize
)) {
2418 size_t recycled_so_far
= gRecycledSize
;
2419 // In case some race condition put us above the limit.
2420 if (recycled_so_far
< gRecycleLimit
) {
2421 size_t recycle_remaining
= gRecycleLimit
- recycled_so_far
;
2423 if (aSize
> recycle_remaining
) {
2424 to_recycle
= recycle_remaining
;
2425 // Drop pages that would overflow the recycle limit
2426 pages_trim(aChunk
, aSize
, 0, to_recycle
);
2430 chunk_record(aChunk
, to_recycle
, aType
);
2435 pages_unmap(aChunk
, aSize
);
2440 // End chunk management functions.
2441 // ***************************************************************************
2444 static inline arena_t
* thread_local_arena(bool enabled
) {
2448 // The arena will essentially be leaked if this function is
2449 // called with `false`, but it doesn't matter at the moment.
2450 // because in practice nothing actually calls this function
2451 // with `false`, except maybe at shutdown.
2453 gArenas
.CreateArena(/* aIsPrivate = */ false, /* aParams = */ nullptr);
2455 arena
= gArenas
.GetDefault();
2457 thread_arena
.set(arena
);
2461 inline void MozJemalloc::jemalloc_thread_local_arena(bool aEnabled
) {
2462 if (malloc_init()) {
2463 thread_local_arena(aEnabled
);
2467 // Choose an arena based on a per-thread value.
2468 static inline arena_t
* choose_arena(size_t size
) {
2469 arena_t
* ret
= nullptr;
2471 // We can only use TLS if this is a PIC library, since for the static
2472 // library version, libc's malloc is used by TLS allocation, which
2473 // introduces a bootstrapping issue.
2475 if (size
> kMaxQuantumClass
) {
2476 // Force the default arena for larger allocations.
2477 ret
= gArenas
.GetDefault();
2479 // Check TLS to see if our thread has requested a pinned arena.
2480 ret
= thread_arena
.get();
2481 // If ret is non-null, it must not be in the first page.
2482 MOZ_DIAGNOSTIC_ASSERT_IF(ret
, (size_t)ret
>= gPageSize
);
2484 // Nothing in TLS. Pin this thread to the default arena.
2485 ret
= thread_local_arena(false);
2489 MOZ_DIAGNOSTIC_ASSERT(ret
);
2493 inline uint8_t arena_t::FindFreeBitInMask(uint32_t aMask
, uint32_t& aRng
) {
2494 if (mPRNG
!= nullptr) {
2495 if (aRng
== UINT_MAX
) {
2496 aRng
= mPRNG
->next() % 32;
2499 // RotateRight asserts when provided bad input.
2500 aMask
= aRng
? RotateRight(aMask
, aRng
)
2501 : aMask
; // Rotate the mask a random number of slots
2502 bitIndex
= CountTrailingZeroes32(aMask
);
2503 return (bitIndex
+ aRng
) % 32;
2505 return CountTrailingZeroes32(aMask
);
2508 inline void* arena_t::ArenaRunRegAlloc(arena_run_t
* aRun
, arena_bin_t
* aBin
) {
2510 unsigned i
, mask
, bit
, regind
;
2511 uint32_t rndPos
= UINT_MAX
;
2513 MOZ_DIAGNOSTIC_ASSERT(aRun
->mMagic
== ARENA_RUN_MAGIC
);
2514 MOZ_ASSERT(aRun
->mRegionsMinElement
< aBin
->mRunNumRegionsMask
);
2516 // Move the first check outside the loop, so that aRun->mRegionsMinElement can
2517 // be updated unconditionally, without the possibility of updating it
2519 i
= aRun
->mRegionsMinElement
;
2520 mask
= aRun
->mRegionsMask
[i
];
2522 bit
= FindFreeBitInMask(mask
, rndPos
);
2524 regind
= ((i
<< (LOG2(sizeof(int)) + 3)) + bit
);
2525 MOZ_ASSERT(regind
< aBin
->mRunNumRegions
);
2526 ret
= (void*)(((uintptr_t)aRun
) + aBin
->mRunFirstRegionOffset
+
2527 (aBin
->mSizeClass
* regind
));
2530 mask
^= (1U << bit
);
2531 aRun
->mRegionsMask
[i
] = mask
;
2536 for (i
++; i
< aBin
->mRunNumRegionsMask
; i
++) {
2537 mask
= aRun
->mRegionsMask
[i
];
2539 bit
= FindFreeBitInMask(mask
, rndPos
);
2541 regind
= ((i
<< (LOG2(sizeof(int)) + 3)) + bit
);
2542 MOZ_ASSERT(regind
< aBin
->mRunNumRegions
);
2543 ret
= (void*)(((uintptr_t)aRun
) + aBin
->mRunFirstRegionOffset
+
2544 (aBin
->mSizeClass
* regind
));
2547 mask
^= (1U << bit
);
2548 aRun
->mRegionsMask
[i
] = mask
;
2550 // Make a note that nothing before this element
2551 // contains a free region.
2552 aRun
->mRegionsMinElement
= i
; // Low payoff: + (mask == 0);
2558 MOZ_DIAGNOSTIC_ASSERT(0);
2562 static inline void arena_run_reg_dalloc(arena_run_t
* run
, arena_bin_t
* bin
,
2563 void* ptr
, size_t size
) {
2564 uint32_t diff
, regind
;
2567 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
2569 // Avoid doing division with a variable divisor if possible. Using
2570 // actual division here can reduce allocator throughput by over 20%!
2572 (uint32_t)((uintptr_t)ptr
- (uintptr_t)run
- bin
->mRunFirstRegionOffset
);
2575 (static_cast<unsigned>(bin
->mRunSizePages
) << gPageSize2Pow
));
2576 regind
= diff
/ bin
->mSizeDivisor
;
2578 MOZ_DIAGNOSTIC_ASSERT(diff
== regind
* size
);
2579 MOZ_DIAGNOSTIC_ASSERT(regind
< bin
->mRunNumRegions
);
2581 elm
= regind
>> (LOG2(sizeof(int)) + 3);
2582 if (elm
< run
->mRegionsMinElement
) {
2583 run
->mRegionsMinElement
= elm
;
2585 bit
= regind
- (elm
<< (LOG2(sizeof(int)) + 3));
2586 MOZ_RELEASE_ASSERT((run
->mRegionsMask
[elm
] & (1U << bit
)) == 0,
2588 run
->mRegionsMask
[elm
] |= (1U << bit
);
2591 bool arena_t::SplitRun(arena_run_t
* aRun
, size_t aSize
, bool aLarge
,
2593 arena_chunk_t
* chunk
;
2594 size_t old_ndirty
, run_ind
, total_pages
, need_pages
, rem_pages
, i
;
2596 chunk
= GetChunkForPtr(aRun
);
2597 old_ndirty
= chunk
->ndirty
;
2598 run_ind
= (unsigned)((uintptr_t(aRun
) - uintptr_t(chunk
)) >> gPageSize2Pow
);
2599 total_pages
= (chunk
->map
[run_ind
].bits
& ~gPageSizeMask
) >> gPageSize2Pow
;
2600 need_pages
= (aSize
>> gPageSize2Pow
);
2601 MOZ_ASSERT(need_pages
> 0);
2602 MOZ_ASSERT(need_pages
<= total_pages
);
2603 rem_pages
= total_pages
- need_pages
;
2605 for (i
= 0; i
< need_pages
; i
++) {
2606 // Commit decommitted pages if necessary. If a decommitted
2607 // page is encountered, commit all needed adjacent decommitted
2608 // pages in one operation, in order to reduce system call
2610 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_MADVISED_OR_DECOMMITTED
) {
2613 // Advance i+j to just past the index of the last page
2614 // to commit. Clear CHUNK_MAP_DECOMMITTED and
2615 // CHUNK_MAP_MADVISED along the way.
2616 for (j
= 0; i
+ j
< need_pages
&& (chunk
->map
[run_ind
+ i
+ j
].bits
&
2617 CHUNK_MAP_MADVISED_OR_DECOMMITTED
);
2619 // DECOMMITTED and MADVISED are mutually exclusive.
2620 MOZ_ASSERT(!(chunk
->map
[run_ind
+ i
+ j
].bits
& CHUNK_MAP_DECOMMITTED
&&
2621 chunk
->map
[run_ind
+ i
+ j
].bits
& CHUNK_MAP_MADVISED
));
2623 chunk
->map
[run_ind
+ i
+ j
].bits
&= ~CHUNK_MAP_MADVISED_OR_DECOMMITTED
;
2626 #ifdef MALLOC_DECOMMIT
2627 bool committed
= pages_commit(
2628 (void*)(uintptr_t(chunk
) + ((run_ind
+ i
) << gPageSize2Pow
)),
2629 j
<< gPageSize2Pow
);
2630 // pages_commit zeroes pages, so mark them as such if it succeeded.
2631 // That's checked further below to avoid manually zeroing the pages.
2632 for (size_t k
= 0; k
< j
; k
++) {
2633 chunk
->map
[run_ind
+ i
+ k
].bits
|=
2634 committed
? CHUNK_MAP_ZEROED
: CHUNK_MAP_DECOMMITTED
;
2641 mStats
.committed
+= j
;
2645 mRunsAvail
.Remove(&chunk
->map
[run_ind
]);
2647 // Keep track of trailing unused pages for later use.
2648 if (rem_pages
> 0) {
2649 chunk
->map
[run_ind
+ need_pages
].bits
=
2650 (rem_pages
<< gPageSize2Pow
) |
2651 (chunk
->map
[run_ind
+ need_pages
].bits
& gPageSizeMask
);
2652 chunk
->map
[run_ind
+ total_pages
- 1].bits
=
2653 (rem_pages
<< gPageSize2Pow
) |
2654 (chunk
->map
[run_ind
+ total_pages
- 1].bits
& gPageSizeMask
);
2655 mRunsAvail
.Insert(&chunk
->map
[run_ind
+ need_pages
]);
2658 for (i
= 0; i
< need_pages
; i
++) {
2659 // Zero if necessary.
2661 if ((chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_ZEROED
) == 0) {
2662 memset((void*)(uintptr_t(chunk
) + ((run_ind
+ i
) << gPageSize2Pow
)), 0,
2664 // CHUNK_MAP_ZEROED is cleared below.
2668 // Update dirty page accounting.
2669 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DIRTY
) {
2672 // CHUNK_MAP_DIRTY is cleared below.
2675 // Initialize the chunk map.
2677 chunk
->map
[run_ind
+ i
].bits
= CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
2679 chunk
->map
[run_ind
+ i
].bits
= size_t(aRun
) | CHUNK_MAP_ALLOCATED
;
2683 // Set the run size only in the first element for large runs. This is
2684 // primarily a debugging aid, since the lack of size info for trailing
2685 // pages only matters if the application tries to operate on an
2686 // interior pointer.
2688 chunk
->map
[run_ind
].bits
|= aSize
;
2691 if (chunk
->ndirty
== 0 && old_ndirty
> 0) {
2692 mChunksDirty
.Remove(chunk
);
2697 void arena_t::InitChunk(arena_chunk_t
* aChunk
) {
2699 // WARNING: The following relies on !aZeroed meaning "used to be an arena
2701 // When the chunk we're initializating as an arena chunk is zeroed, we
2702 // mark all runs are decommitted and zeroed.
2703 // When it is not, which we can assume means it's a recycled arena chunk,
2704 // all it can contain is an arena chunk header (which we're overwriting),
2705 // and zeroed or poisoned memory (because a recycled arena chunk will
2706 // have been emptied before being recycled). In that case, we can get
2707 // away with reusing the chunk as-is, marking all runs as madvised.
2709 size_t flags
= CHUNK_MAP_DECOMMITTED
| CHUNK_MAP_ZEROED
;
2711 mStats
.mapped
+= kChunkSize
;
2713 aChunk
->arena
= this;
2715 // Claim that no pages are in use, since the header is merely overhead.
2718 // Initialize the map to contain one maximal free untouched run.
2719 arena_run_t
* run
= (arena_run_t
*)(uintptr_t(aChunk
) +
2720 (gChunkHeaderNumPages
<< gPageSize2Pow
));
2722 // Clear the bits for the real header pages.
2723 for (i
= 0; i
< gChunkHeaderNumPages
- 1; i
++) {
2724 aChunk
->map
[i
].bits
= 0;
2726 // Mark the leading guard page (last header page) as decommitted.
2727 aChunk
->map
[i
++].bits
= CHUNK_MAP_DECOMMITTED
;
2729 // Mark the area usable for runs as available, note size at start and end
2730 aChunk
->map
[i
++].bits
= gMaxLargeClass
| flags
;
2731 for (; i
< gChunkNumPages
- 2; i
++) {
2732 aChunk
->map
[i
].bits
= flags
;
2734 aChunk
->map
[gChunkNumPages
- 2].bits
= gMaxLargeClass
| flags
;
2736 // Mark the trailing guard page as decommitted.
2737 aChunk
->map
[gChunkNumPages
- 1].bits
= CHUNK_MAP_DECOMMITTED
;
2739 #ifdef MALLOC_DECOMMIT
2740 // Start out decommitted, in order to force a closer correspondence
2741 // between dirty pages and committed untouched pages. This includes
2742 // leading and trailing guard pages.
2743 pages_decommit((void*)(uintptr_t(run
) - gPageSize
),
2744 gMaxLargeClass
+ 2 * gPageSize
);
2746 // Decommit the last header page (=leading page) as a guard.
2747 pages_decommit((void*)(uintptr_t(run
) - gPageSize
), gPageSize
);
2748 // Decommit the last page as a guard.
2749 pages_decommit((void*)(uintptr_t(aChunk
) + kChunkSize
- gPageSize
),
2753 mStats
.committed
+= gChunkHeaderNumPages
- 1;
2755 // Insert the run into the tree of available runs.
2756 mRunsAvail
.Insert(&aChunk
->map
[gChunkHeaderNumPages
]);
2758 #ifdef MALLOC_DOUBLE_PURGE
2759 new (&aChunk
->chunks_madvised_elem
) DoublyLinkedListElement
<arena_chunk_t
>();
2763 arena_chunk_t
* arena_t::DeallocChunk(arena_chunk_t
* aChunk
) {
2765 if (mSpare
->ndirty
> 0) {
2766 aChunk
->arena
->mChunksDirty
.Remove(mSpare
);
2767 mNumDirty
-= mSpare
->ndirty
;
2768 mStats
.committed
-= mSpare
->ndirty
;
2771 #ifdef MALLOC_DOUBLE_PURGE
2772 if (mChunksMAdvised
.ElementProbablyInList(mSpare
)) {
2773 mChunksMAdvised
.remove(mSpare
);
2777 mStats
.mapped
-= kChunkSize
;
2778 mStats
.committed
-= gChunkHeaderNumPages
- 1;
2781 // Remove run from the tree of available runs, so that the arena does not use
2782 // it. Dirty page flushing only uses the tree of dirty chunks, so leaving this
2783 // chunk in the chunks_* trees is sufficient for that purpose.
2784 mRunsAvail
.Remove(&aChunk
->map
[gChunkHeaderNumPages
]);
2786 arena_chunk_t
* chunk_dealloc
= mSpare
;
2788 return chunk_dealloc
;
2791 arena_run_t
* arena_t::AllocRun(size_t aSize
, bool aLarge
, bool aZero
) {
2793 arena_chunk_map_t
* mapelm
;
2794 arena_chunk_map_t key
;
2796 MOZ_ASSERT(aSize
<= gMaxLargeClass
);
2797 MOZ_ASSERT((aSize
& gPageSizeMask
) == 0);
2799 // Search the arena's chunks for the lowest best fit.
2800 key
.bits
= aSize
| CHUNK_MAP_KEY
;
2801 mapelm
= mRunsAvail
.SearchOrNext(&key
);
2803 arena_chunk_t
* chunk
= GetChunkForPtr(mapelm
);
2805 (uintptr_t(mapelm
) - uintptr_t(chunk
->map
)) / sizeof(arena_chunk_map_t
);
2807 run
= (arena_run_t
*)(uintptr_t(chunk
) + (pageind
<< gPageSize2Pow
));
2808 } else if (mSpare
) {
2810 arena_chunk_t
* chunk
= mSpare
;
2812 run
= (arena_run_t
*)(uintptr_t(chunk
) +
2813 (gChunkHeaderNumPages
<< gPageSize2Pow
));
2814 // Insert the run into the tree of available runs.
2815 mRunsAvail
.Insert(&chunk
->map
[gChunkHeaderNumPages
]);
2817 // No usable runs. Create a new chunk from which to allocate
2819 arena_chunk_t
* chunk
=
2820 (arena_chunk_t
*)chunk_alloc(kChunkSize
, kChunkSize
, false);
2826 run
= (arena_run_t
*)(uintptr_t(chunk
) +
2827 (gChunkHeaderNumPages
<< gPageSize2Pow
));
2830 return SplitRun(run
, aSize
, aLarge
, aZero
) ? run
: nullptr;
2833 size_t arena_t::EffectiveMaxDirty() {
2834 int32_t modifier
= gArenas
.DefaultMaxDirtyPageModifier();
2836 int32_t arenaOverride
=
2837 modifier
> 0 ? mMaxDirtyIncreaseOverride
: mMaxDirtyDecreaseOverride
;
2838 if (arenaOverride
) {
2839 modifier
= arenaOverride
;
2843 return modifier
>= 0 ? mMaxDirty
<< modifier
: mMaxDirty
>> -modifier
;
2846 void arena_t::Purge(size_t aMaxDirty
) {
2847 arena_chunk_t
* chunk
;
2852 for (auto chunk
: mChunksDirty
.iter()) {
2853 ndirty
+= chunk
->ndirty
;
2855 MOZ_ASSERT(ndirty
== mNumDirty
);
2857 MOZ_DIAGNOSTIC_ASSERT(aMaxDirty
== 1 || (mNumDirty
> aMaxDirty
));
2859 // Iterate downward through chunks until enough dirty memory has been
2860 // purged. Terminate as soon as possible in order to minimize the
2861 // number of system calls, even if a chunk has only been partially
2863 while (mNumDirty
> (aMaxDirty
>> 1)) {
2864 #ifdef MALLOC_DOUBLE_PURGE
2865 bool madvised
= false;
2867 chunk
= mChunksDirty
.Last();
2868 MOZ_DIAGNOSTIC_ASSERT(chunk
);
2869 // Last page is DECOMMITTED as a guard page.
2870 MOZ_ASSERT((chunk
->map
[gChunkNumPages
- 1].bits
& CHUNK_MAP_DECOMMITTED
) !=
2872 for (i
= gChunkNumPages
- 2; chunk
->ndirty
> 0; i
--) {
2873 MOZ_DIAGNOSTIC_ASSERT(i
>= gChunkHeaderNumPages
);
2875 if (chunk
->map
[i
].bits
& CHUNK_MAP_DIRTY
) {
2876 #ifdef MALLOC_DECOMMIT
2877 const size_t free_operation
= CHUNK_MAP_DECOMMITTED
;
2879 const size_t free_operation
= CHUNK_MAP_MADVISED
;
2881 MOZ_ASSERT((chunk
->map
[i
].bits
& CHUNK_MAP_MADVISED_OR_DECOMMITTED
) ==
2883 chunk
->map
[i
].bits
^= free_operation
| CHUNK_MAP_DIRTY
;
2884 // Find adjacent dirty run(s).
2885 for (npages
= 1; i
> gChunkHeaderNumPages
&&
2886 (chunk
->map
[i
- 1].bits
& CHUNK_MAP_DIRTY
);
2889 MOZ_ASSERT((chunk
->map
[i
].bits
& CHUNK_MAP_MADVISED_OR_DECOMMITTED
) ==
2891 chunk
->map
[i
].bits
^= free_operation
| CHUNK_MAP_DIRTY
;
2893 chunk
->ndirty
-= npages
;
2894 mNumDirty
-= npages
;
2896 #ifdef MALLOC_DECOMMIT
2897 pages_decommit((void*)(uintptr_t(chunk
) + (i
<< gPageSize2Pow
)),
2898 (npages
<< gPageSize2Pow
));
2901 posix_madvise((void*)(uintptr_t(chunk
) + (i
<< gPageSize2Pow
)),
2902 (npages
<< gPageSize2Pow
), MADV_FREE
);
2904 madvise((void*)(uintptr_t(chunk
) + (i
<< gPageSize2Pow
)),
2905 (npages
<< gPageSize2Pow
), MADV_FREE
);
2907 # ifdef MALLOC_DOUBLE_PURGE
2911 mStats
.committed
-= npages
;
2913 if (mNumDirty
<= (aMaxDirty
>> 1)) {
2919 if (chunk
->ndirty
== 0) {
2920 mChunksDirty
.Remove(chunk
);
2922 #ifdef MALLOC_DOUBLE_PURGE
2924 // The chunk might already be in the list, but this
2925 // makes sure it's at the front.
2926 if (mChunksMAdvised
.ElementProbablyInList(chunk
)) {
2927 mChunksMAdvised
.remove(chunk
);
2929 mChunksMAdvised
.pushFront(chunk
);
2935 arena_chunk_t
* arena_t::DallocRun(arena_run_t
* aRun
, bool aDirty
) {
2936 arena_chunk_t
* chunk
;
2937 size_t size
, run_ind
, run_pages
;
2939 chunk
= GetChunkForPtr(aRun
);
2940 run_ind
= (size_t)((uintptr_t(aRun
) - uintptr_t(chunk
)) >> gPageSize2Pow
);
2941 MOZ_DIAGNOSTIC_ASSERT(run_ind
>= gChunkHeaderNumPages
);
2942 MOZ_RELEASE_ASSERT(run_ind
< gChunkNumPages
- 1);
2943 if ((chunk
->map
[run_ind
].bits
& CHUNK_MAP_LARGE
) != 0) {
2944 size
= chunk
->map
[run_ind
].bits
& ~gPageSizeMask
;
2945 run_pages
= (size
>> gPageSize2Pow
);
2947 run_pages
= aRun
->mBin
->mRunSizePages
;
2948 size
= run_pages
<< gPageSize2Pow
;
2951 // Mark pages as unallocated in the chunk map.
2955 for (i
= 0; i
< run_pages
; i
++) {
2956 MOZ_DIAGNOSTIC_ASSERT((chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DIRTY
) ==
2958 chunk
->map
[run_ind
+ i
].bits
= CHUNK_MAP_DIRTY
;
2961 if (chunk
->ndirty
== 0) {
2962 mChunksDirty
.Insert(chunk
);
2964 chunk
->ndirty
+= run_pages
;
2965 mNumDirty
+= run_pages
;
2969 for (i
= 0; i
< run_pages
; i
++) {
2970 chunk
->map
[run_ind
+ i
].bits
&= ~(CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
);
2973 chunk
->map
[run_ind
].bits
= size
| (chunk
->map
[run_ind
].bits
& gPageSizeMask
);
2974 chunk
->map
[run_ind
+ run_pages
- 1].bits
=
2975 size
| (chunk
->map
[run_ind
+ run_pages
- 1].bits
& gPageSizeMask
);
2977 // Try to coalesce forward.
2978 if (run_ind
+ run_pages
< gChunkNumPages
- 1 &&
2979 (chunk
->map
[run_ind
+ run_pages
].bits
& CHUNK_MAP_ALLOCATED
) == 0) {
2980 size_t nrun_size
= chunk
->map
[run_ind
+ run_pages
].bits
& ~gPageSizeMask
;
2982 // Remove successor from tree of available runs; the coalesced run is
2984 mRunsAvail
.Remove(&chunk
->map
[run_ind
+ run_pages
]);
2987 run_pages
= size
>> gPageSize2Pow
;
2989 MOZ_DIAGNOSTIC_ASSERT((chunk
->map
[run_ind
+ run_pages
- 1].bits
&
2990 ~gPageSizeMask
) == nrun_size
);
2991 chunk
->map
[run_ind
].bits
=
2992 size
| (chunk
->map
[run_ind
].bits
& gPageSizeMask
);
2993 chunk
->map
[run_ind
+ run_pages
- 1].bits
=
2994 size
| (chunk
->map
[run_ind
+ run_pages
- 1].bits
& gPageSizeMask
);
2997 // Try to coalesce backward.
2998 if (run_ind
> gChunkHeaderNumPages
&&
2999 (chunk
->map
[run_ind
- 1].bits
& CHUNK_MAP_ALLOCATED
) == 0) {
3000 size_t prun_size
= chunk
->map
[run_ind
- 1].bits
& ~gPageSizeMask
;
3002 run_ind
-= prun_size
>> gPageSize2Pow
;
3004 // Remove predecessor from tree of available runs; the coalesced run is
3006 mRunsAvail
.Remove(&chunk
->map
[run_ind
]);
3009 run_pages
= size
>> gPageSize2Pow
;
3011 MOZ_DIAGNOSTIC_ASSERT((chunk
->map
[run_ind
].bits
& ~gPageSizeMask
) ==
3013 chunk
->map
[run_ind
].bits
=
3014 size
| (chunk
->map
[run_ind
].bits
& gPageSizeMask
);
3015 chunk
->map
[run_ind
+ run_pages
- 1].bits
=
3016 size
| (chunk
->map
[run_ind
+ run_pages
- 1].bits
& gPageSizeMask
);
3019 // Insert into tree of available runs, now that coalescing is complete.
3020 mRunsAvail
.Insert(&chunk
->map
[run_ind
]);
3022 // Deallocate chunk if it is now completely unused.
3023 arena_chunk_t
* chunk_dealloc
= nullptr;
3024 if ((chunk
->map
[gChunkHeaderNumPages
].bits
&
3025 (~gPageSizeMask
| CHUNK_MAP_ALLOCATED
)) == gMaxLargeClass
) {
3026 chunk_dealloc
= DeallocChunk(chunk
);
3029 size_t maxDirty
= EffectiveMaxDirty();
3030 if (mNumDirty
> maxDirty
) {
3034 return chunk_dealloc
;
3037 void arena_t::TrimRunHead(arena_chunk_t
* aChunk
, arena_run_t
* aRun
,
3038 size_t aOldSize
, size_t aNewSize
) {
3039 size_t pageind
= (uintptr_t(aRun
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3040 size_t head_npages
= (aOldSize
- aNewSize
) >> gPageSize2Pow
;
3042 MOZ_ASSERT(aOldSize
> aNewSize
);
3044 // Update the chunk map so that arena_t::RunDalloc() can treat the
3045 // leading run as separately allocated.
3046 aChunk
->map
[pageind
].bits
=
3047 (aOldSize
- aNewSize
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3048 aChunk
->map
[pageind
+ head_npages
].bits
=
3049 aNewSize
| CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3052 arena_chunk_t
* no_chunk
=
3054 DallocRun(aRun
, false);
3055 // This will never release a chunk as there's still at least one allocated
3057 MOZ_ASSERT(!no_chunk
);
3060 void arena_t::TrimRunTail(arena_chunk_t
* aChunk
, arena_run_t
* aRun
,
3061 size_t aOldSize
, size_t aNewSize
, bool aDirty
) {
3062 size_t pageind
= (uintptr_t(aRun
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3063 size_t npages
= aNewSize
>> gPageSize2Pow
;
3065 MOZ_ASSERT(aOldSize
> aNewSize
);
3067 // Update the chunk map so that arena_t::RunDalloc() can treat the
3068 // trailing run as separately allocated.
3069 aChunk
->map
[pageind
].bits
= aNewSize
| CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3070 aChunk
->map
[pageind
+ npages
].bits
=
3071 (aOldSize
- aNewSize
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3074 arena_chunk_t
* no_chunk
=
3076 DallocRun((arena_run_t
*)(uintptr_t(aRun
) + aNewSize
), aDirty
);
3078 // This will never release a chunk as there's still at least one allocated
3080 MOZ_ASSERT(!no_chunk
);
3083 arena_run_t
* arena_t::GetNonFullBinRun(arena_bin_t
* aBin
) {
3084 arena_chunk_map_t
* mapelm
;
3086 unsigned i
, remainder
;
3088 // Look for a usable run.
3089 mapelm
= aBin
->mNonFullRuns
.First();
3091 // run is guaranteed to have available space.
3092 aBin
->mNonFullRuns
.Remove(mapelm
);
3093 run
= (arena_run_t
*)(mapelm
->bits
& ~gPageSizeMask
);
3096 // No existing runs have any space available.
3098 // Allocate a new run.
3099 run
= AllocRun(static_cast<size_t>(aBin
->mRunSizePages
) << gPageSize2Pow
,
3104 // Don't initialize if a race in arena_t::RunAlloc() allowed an existing
3105 // run to become usable.
3106 if (run
== aBin
->mCurrentRun
) {
3110 // Initialize run internals.
3113 for (i
= 0; i
< aBin
->mRunNumRegionsMask
- 1; i
++) {
3114 run
->mRegionsMask
[i
] = UINT_MAX
;
3116 remainder
= aBin
->mRunNumRegions
& ((1U << (LOG2(sizeof(int)) + 3)) - 1);
3117 if (remainder
== 0) {
3118 run
->mRegionsMask
[i
] = UINT_MAX
;
3120 // The last element has spare bits that need to be unset.
3121 run
->mRegionsMask
[i
] =
3122 (UINT_MAX
>> ((1U << (LOG2(sizeof(int)) + 3)) - remainder
));
3125 run
->mRegionsMinElement
= 0;
3127 run
->mNumFree
= aBin
->mRunNumRegions
;
3128 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
3129 run
->mMagic
= ARENA_RUN_MAGIC
;
3136 void arena_bin_t::Init(SizeClass aSizeClass
) {
3137 size_t try_run_size
;
3138 unsigned try_nregs
, try_mask_nelms
, try_reg0_offset
;
3139 // Size of the run header, excluding mRegionsMask.
3140 static const size_t kFixedHeaderSize
= offsetof(arena_run_t
, mRegionsMask
);
3142 MOZ_ASSERT(aSizeClass
.Size() <= gMaxBinClass
);
3144 try_run_size
= gPageSize
;
3146 mCurrentRun
= nullptr;
3147 mNonFullRuns
.Init();
3148 mSizeClass
= aSizeClass
.Size();
3151 // Run size expansion loop.
3153 try_nregs
= ((try_run_size
- kFixedHeaderSize
) / mSizeClass
) +
3154 1; // Counter-act try_nregs-- in loop.
3156 // The do..while loop iteratively reduces the number of regions until
3157 // the run header and the regions no longer overlap. A closed formula
3158 // would be quite messy, since there is an interdependency between the
3159 // header's mask length and the number of regions.
3163 (try_nregs
>> (LOG2(sizeof(int)) + 3)) +
3164 ((try_nregs
& ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
3165 try_reg0_offset
= try_run_size
- (try_nregs
* mSizeClass
);
3166 } while (kFixedHeaderSize
+ (sizeof(unsigned) * try_mask_nelms
) >
3169 // Try to keep the run overhead below kRunOverhead.
3170 if (Fraction(try_reg0_offset
, try_run_size
) <= kRunOverhead
) {
3174 // If the overhead is larger than the size class, it means the size class
3175 // is small and doesn't align very well with the header. It's desirable to
3176 // have smaller run sizes for them, so relax the overhead requirement.
3177 if (try_reg0_offset
> mSizeClass
) {
3178 if (Fraction(try_reg0_offset
, try_run_size
) <= kRunRelaxedOverhead
) {
3183 // The run header includes one bit per region of the given size. For sizes
3184 // small enough, the number of regions is large enough that growing the run
3185 // size barely moves the needle for the overhead because of all those bits.
3186 // For example, for a size of 8 bytes, adding 4KiB to the run size adds
3187 // close to 512 bits to the header, which is 64 bytes.
3188 // With such overhead, there is no way to get to the wanted overhead above,
3189 // so we give up if the required size for mRegionsMask more than doubles the
3190 // size of the run header.
3191 if (try_mask_nelms
* sizeof(unsigned) >= kFixedHeaderSize
) {
3195 // If next iteration is going to be larger than the largest possible large
3196 // size class, then we didn't find a setup where the overhead is small
3197 // enough, and we can't do better than the current settings, so just use
3199 if (try_run_size
+ gPageSize
> gMaxLargeClass
) {
3203 // Try more aggressive settings.
3204 try_run_size
+= gPageSize
;
3207 MOZ_ASSERT(kFixedHeaderSize
+ (sizeof(unsigned) * try_mask_nelms
) <=
3209 MOZ_ASSERT((try_mask_nelms
<< (LOG2(sizeof(int)) + 3)) >= try_nregs
);
3211 // Copy final settings.
3212 MOZ_ASSERT((try_run_size
>> gPageSize2Pow
) <= UINT8_MAX
);
3213 mRunSizePages
= static_cast<uint8_t>(try_run_size
>> gPageSize2Pow
);
3214 mRunNumRegions
= try_nregs
;
3215 mRunNumRegionsMask
= try_mask_nelms
;
3216 mRunFirstRegionOffset
= try_reg0_offset
;
3217 mSizeDivisor
= FastDivisor
<uint16_t>(aSizeClass
.Size(), try_run_size
);
3220 void* arena_t::MallocSmall(size_t aSize
, bool aZero
) {
3224 SizeClass
sizeClass(aSize
);
3225 aSize
= sizeClass
.Size();
3227 switch (sizeClass
.Type()) {
3228 case SizeClass::Tiny
:
3229 bin
= &mBins
[FloorLog2(aSize
/ kMinTinyClass
)];
3231 case SizeClass::Quantum
:
3232 // Although we divide 2 things by kQuantum, the compiler will
3233 // reduce `kMinQuantumClass / kQuantum` and `kNumTinyClasses` to a
3235 bin
= &mBins
[kNumTinyClasses
+ (aSize
/ kQuantum
) -
3236 (kMinQuantumClass
/ kQuantum
)];
3238 case SizeClass::QuantumWide
:
3240 &mBins
[kNumTinyClasses
+ kNumQuantumClasses
+ (aSize
/ kQuantumWide
) -
3241 (kMinQuantumWideClass
/ kQuantumWide
)];
3243 case SizeClass::SubPage
:
3245 &mBins
[kNumTinyClasses
+ kNumQuantumClasses
+ kNumQuantumWideClasses
+
3246 (FloorLog2(aSize
) - LOG2(kMinSubPageClass
))];
3249 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
3251 MOZ_DIAGNOSTIC_ASSERT(aSize
== bin
->mSizeClass
);
3254 // Before we lock, we determine if we need to randomize the allocation
3255 // because if we do, we need to create the PRNG which might require
3256 // allocating memory (arc4random on OSX for example) and we need to
3257 // avoid the deadlock
3258 if (MOZ_UNLIKELY(mRandomizeSmallAllocations
&& mPRNG
== nullptr)) {
3259 // This is frustrating. Because the code backing RandomUint64 (arc4random
3260 // for example) may allocate memory, and because
3261 // mRandomizeSmallAllocations is true and we haven't yet initilized mPRNG,
3262 // we would re-enter this same case and cause a deadlock inside e.g.
3263 // arc4random. So we temporarily disable mRandomizeSmallAllocations to
3264 // skip this case and then re-enable it
3265 mRandomizeSmallAllocations
= false;
3266 mozilla::Maybe
<uint64_t> prngState1
= mozilla::RandomUint64();
3267 mozilla::Maybe
<uint64_t> prngState2
= mozilla::RandomUint64();
3269 base_alloc(sizeof(mozilla::non_crypto::XorShift128PlusRNG
));
3270 mPRNG
= new (backing
) mozilla::non_crypto::XorShift128PlusRNG(
3271 prngState1
.valueOr(0), prngState2
.valueOr(0));
3272 mRandomizeSmallAllocations
= true;
3274 MOZ_ASSERT(!mRandomizeSmallAllocations
|| mPRNG
);
3276 MaybeMutexAutoLock
lock(mLock
);
3277 run
= bin
->mCurrentRun
;
3278 if (MOZ_UNLIKELY(!run
|| run
->mNumFree
== 0)) {
3279 run
= bin
->mCurrentRun
= GetNonFullBinRun(bin
);
3281 if (MOZ_UNLIKELY(!run
)) {
3284 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3285 MOZ_DIAGNOSTIC_ASSERT(run
->mNumFree
> 0);
3286 ret
= ArenaRunRegAlloc(run
, bin
);
3287 MOZ_DIAGNOSTIC_ASSERT(ret
);
3293 mStats
.allocated_small
+= aSize
;
3297 ApplyZeroOrJunk(ret
, aSize
);
3299 memset(ret
, 0, aSize
);
3305 void* arena_t::MallocLarge(size_t aSize
, bool aZero
) {
3308 // Large allocation.
3309 aSize
= PAGE_CEILING(aSize
);
3312 MaybeMutexAutoLock
lock(mLock
);
3313 ret
= AllocRun(aSize
, true, aZero
);
3317 mStats
.allocated_large
+= aSize
;
3321 ApplyZeroOrJunk(ret
, aSize
);
3327 void* arena_t::Malloc(size_t aSize
, bool aZero
) {
3328 MOZ_DIAGNOSTIC_ASSERT(mMagic
== ARENA_MAGIC
);
3329 MOZ_ASSERT(aSize
!= 0);
3331 if (aSize
<= gMaxBinClass
) {
3332 return MallocSmall(aSize
, aZero
);
3334 if (aSize
<= gMaxLargeClass
) {
3335 return MallocLarge(aSize
, aZero
);
3337 return MallocHuge(aSize
, aZero
);
3340 // Only handles large allocations that require more than page alignment.
3341 void* arena_t::PallocLarge(size_t aAlignment
, size_t aSize
, size_t aAllocSize
) {
3344 arena_chunk_t
* chunk
;
3346 MOZ_ASSERT((aSize
& gPageSizeMask
) == 0);
3347 MOZ_ASSERT((aAlignment
& gPageSizeMask
) == 0);
3350 MaybeMutexAutoLock
lock(mLock
);
3351 ret
= AllocRun(aAllocSize
, true, false);
3356 chunk
= GetChunkForPtr(ret
);
3358 offset
= uintptr_t(ret
) & (aAlignment
- 1);
3359 MOZ_ASSERT((offset
& gPageSizeMask
) == 0);
3360 MOZ_ASSERT(offset
< aAllocSize
);
3362 TrimRunTail(chunk
, (arena_run_t
*)ret
, aAllocSize
, aSize
, false);
3364 size_t leadsize
, trailsize
;
3366 leadsize
= aAlignment
- offset
;
3368 TrimRunHead(chunk
, (arena_run_t
*)ret
, aAllocSize
,
3369 aAllocSize
- leadsize
);
3370 ret
= (void*)(uintptr_t(ret
) + leadsize
);
3373 trailsize
= aAllocSize
- leadsize
- aSize
;
3374 if (trailsize
!= 0) {
3375 // Trim trailing space.
3376 MOZ_ASSERT(trailsize
< aAllocSize
);
3377 TrimRunTail(chunk
, (arena_run_t
*)ret
, aSize
+ trailsize
, aSize
, false);
3381 mStats
.allocated_large
+= aSize
;
3384 ApplyZeroOrJunk(ret
, aSize
);
3388 void* arena_t::Palloc(size_t aAlignment
, size_t aSize
) {
3392 // Round size up to the nearest multiple of alignment.
3394 // This done, we can take advantage of the fact that for each small
3395 // size class, every object is aligned at the smallest power of two
3396 // that is non-zero in the base two representation of the size. For
3399 // Size | Base 2 | Minimum alignment
3400 // -----+----------+------------------
3401 // 96 | 1100000 | 32
3402 // 144 | 10100000 | 32
3403 // 192 | 11000000 | 64
3405 // Depending on runtime settings, it is possible that arena_malloc()
3406 // will further round up to a power of two, but that never causes
3407 // correctness issues.
3408 ceil_size
= ALIGNMENT_CEILING(aSize
, aAlignment
);
3410 // (ceil_size < aSize) protects against the combination of maximal
3411 // alignment and size greater than maximal alignment.
3412 if (ceil_size
< aSize
) {
3417 if (ceil_size
<= gPageSize
||
3418 (aAlignment
<= gPageSize
&& ceil_size
<= gMaxLargeClass
)) {
3419 ret
= Malloc(ceil_size
, false);
3423 // We can't achieve sub-page alignment, so round up alignment
3424 // permanently; it makes later calculations simpler.
3425 aAlignment
= PAGE_CEILING(aAlignment
);
3426 ceil_size
= PAGE_CEILING(aSize
);
3428 // (ceil_size < aSize) protects against very large sizes within
3429 // pagesize of SIZE_T_MAX.
3431 // (ceil_size + aAlignment < ceil_size) protects against the
3432 // combination of maximal alignment and ceil_size large enough
3433 // to cause overflow. This is similar to the first overflow
3434 // check above, but it needs to be repeated due to the new
3435 // ceil_size value, which may now be *equal* to maximal
3436 // alignment, whereas before we only detected overflow if the
3437 // original size was *greater* than maximal alignment.
3438 if (ceil_size
< aSize
|| ceil_size
+ aAlignment
< ceil_size
) {
3443 // Calculate the size of the over-size run that arena_palloc()
3444 // would need to allocate in order to guarantee the alignment.
3445 if (ceil_size
>= aAlignment
) {
3446 run_size
= ceil_size
+ aAlignment
- gPageSize
;
3448 // It is possible that (aAlignment << 1) will cause
3449 // overflow, but it doesn't matter because we also
3450 // subtract pagesize, which in the case of overflow
3451 // leaves us with a very large run_size. That causes
3452 // the first conditional below to fail, which means
3453 // that the bogus run_size value never gets used for
3454 // anything important.
3455 run_size
= (aAlignment
<< 1) - gPageSize
;
3458 if (run_size
<= gMaxLargeClass
) {
3459 ret
= PallocLarge(aAlignment
, ceil_size
, run_size
);
3460 } else if (aAlignment
<= kChunkSize
) {
3461 ret
= MallocHuge(ceil_size
, false);
3463 ret
= PallocHuge(ceil_size
, aAlignment
, false);
3467 MOZ_ASSERT((uintptr_t(ret
) & (aAlignment
- 1)) == 0);
3473 template <bool Validate
= false>
3474 static inline AllocInfo
Get(const void* aPtr
) {
3475 // If the allocator is not initialized, the pointer can't belong to it.
3476 if (Validate
&& !malloc_initialized
) {
3480 auto chunk
= GetChunkForPtr(aPtr
);
3482 if (!chunk
|| !gChunkRTree
.Get(chunk
)) {
3487 if (chunk
!= aPtr
) {
3488 MOZ_DIAGNOSTIC_ASSERT(chunk
->arena
->mMagic
== ARENA_MAGIC
);
3489 size_t pageind
= (((uintptr_t)aPtr
- (uintptr_t)chunk
) >> gPageSize2Pow
);
3490 return GetInChunk(aPtr
, chunk
, pageind
);
3497 MutexAutoLock
lock(huge_mtx
);
3498 extent_node_t
* node
= huge
.Search(&key
);
3499 if (Validate
&& !node
) {
3502 return AllocInfo(node
->mSize
, node
);
3505 // Get the allocation information for a pointer we know is within a chunk
3506 // (Small or large, not huge).
3507 static inline AllocInfo
GetInChunk(const void* aPtr
, arena_chunk_t
* aChunk
,
3509 size_t mapbits
= aChunk
->map
[pageind
].bits
;
3510 MOZ_DIAGNOSTIC_ASSERT((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
3513 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
3514 arena_run_t
* run
= (arena_run_t
*)(mapbits
& ~gPageSizeMask
);
3515 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3516 size
= run
->mBin
->mSizeClass
;
3518 size
= mapbits
& ~gPageSizeMask
;
3519 MOZ_DIAGNOSTIC_ASSERT(size
!= 0);
3522 return AllocInfo(size
, aChunk
);
3525 // Validate ptr before assuming that it points to an allocation. Currently,
3526 // the following validation is performed:
3528 // + Check that ptr is not nullptr.
3530 // + Check that ptr lies within a mapped chunk.
3531 static inline AllocInfo
GetValidated(const void* aPtr
) {
3532 return Get
<true>(aPtr
);
3535 AllocInfo() : mSize(0), mChunk(nullptr) {}
3537 explicit AllocInfo(size_t aSize
, arena_chunk_t
* aChunk
)
3538 : mSize(aSize
), mChunk(aChunk
) {
3539 MOZ_ASSERT(mSize
<= gMaxLargeClass
);
3542 explicit AllocInfo(size_t aSize
, extent_node_t
* aNode
)
3543 : mSize(aSize
), mNode(aNode
) {
3544 MOZ_ASSERT(mSize
> gMaxLargeClass
);
3547 size_t Size() { return mSize
; }
3550 if (mSize
<= gMaxLargeClass
) {
3551 return mChunk
->arena
;
3553 // Best effort detection that we're not trying to access an already
3554 // disposed arena. In the case of a disposed arena, the memory location
3555 // pointed by mNode->mArena is either free (but still a valid memory
3556 // region, per TypedBaseAlloc<arena_t>), in which case its id was reset,
3557 // or has been reallocated for a new region, and its id is very likely
3558 // different (per randomness). In both cases, the id is unlikely to
3559 // match what it was for the disposed arena.
3560 MOZ_RELEASE_ASSERT(mNode
->mArenaId
== mNode
->mArena
->mId
);
3561 return mNode
->mArena
;
3564 bool IsValid() const { return !!mSize
; }
3569 // Pointer to the chunk associated with the allocation for small
3570 // and large allocations.
3571 arena_chunk_t
* mChunk
;
3573 // Pointer to the extent node for huge allocations.
3574 extent_node_t
* mNode
;
3578 inline void MozJemalloc::jemalloc_ptr_info(const void* aPtr
,
3579 jemalloc_ptr_info_t
* aInfo
) {
3580 arena_chunk_t
* chunk
= GetChunkForPtr(aPtr
);
3582 // Is the pointer null, or within one chunk's size of null?
3583 // Alternatively, if the allocator is not initialized yet, the pointer
3585 if (!chunk
|| !malloc_initialized
) {
3586 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3590 // Look for huge allocations before looking for |chunk| in gChunkRTree.
3591 // This is necessary because |chunk| won't be in gChunkRTree if it's
3592 // the second or subsequent chunk in a huge allocation.
3593 extent_node_t
* node
;
3596 MutexAutoLock
lock(huge_mtx
);
3597 key
.mAddr
= const_cast<void*>(aPtr
);
3599 reinterpret_cast<RedBlackTree
<extent_node_t
, ExtentTreeBoundsTrait
>*>(
3603 *aInfo
= {TagLiveAlloc
, node
->mAddr
, node
->mSize
, node
->mArena
->mId
};
3608 // It's not a huge allocation. Check if we have a known chunk.
3609 if (!gChunkRTree
.Get(chunk
)) {
3610 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3614 MOZ_DIAGNOSTIC_ASSERT(chunk
->arena
->mMagic
== ARENA_MAGIC
);
3616 // Get the page number within the chunk.
3617 size_t pageind
= (((uintptr_t)aPtr
- (uintptr_t)chunk
) >> gPageSize2Pow
);
3618 if (pageind
< gChunkHeaderNumPages
) {
3619 // Within the chunk header.
3620 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3624 size_t mapbits
= chunk
->map
[pageind
].bits
;
3626 if (!(mapbits
& CHUNK_MAP_ALLOCATED
)) {
3627 void* pageaddr
= (void*)(uintptr_t(aPtr
) & ~gPageSizeMask
);
3628 *aInfo
= {TagFreedPage
, pageaddr
, gPageSize
, chunk
->arena
->mId
};
3632 if (mapbits
& CHUNK_MAP_LARGE
) {
3633 // It's a large allocation. Only the first page of a large
3634 // allocation contains its size, so if the address is not in
3635 // the first page, scan back to find the allocation size.
3638 size
= mapbits
& ~gPageSizeMask
;
3643 // The following two return paths shouldn't occur in
3644 // practice unless there is heap corruption.
3646 MOZ_DIAGNOSTIC_ASSERT(pageind
>= gChunkHeaderNumPages
);
3647 if (pageind
< gChunkHeaderNumPages
) {
3648 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3652 mapbits
= chunk
->map
[pageind
].bits
;
3653 MOZ_DIAGNOSTIC_ASSERT(mapbits
& CHUNK_MAP_LARGE
);
3654 if (!(mapbits
& CHUNK_MAP_LARGE
)) {
3655 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3660 void* addr
= ((char*)chunk
) + (pageind
<< gPageSize2Pow
);
3661 *aInfo
= {TagLiveAlloc
, addr
, size
, chunk
->arena
->mId
};
3665 // It must be a small allocation.
3666 auto run
= (arena_run_t
*)(mapbits
& ~gPageSizeMask
);
3667 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3669 // The allocation size is stored in the run metadata.
3670 size_t size
= run
->mBin
->mSizeClass
;
3672 // Address of the first possible pointer in the run after its headers.
3673 uintptr_t reg0_addr
= (uintptr_t)run
+ run
->mBin
->mRunFirstRegionOffset
;
3674 if (aPtr
< (void*)reg0_addr
) {
3675 // In the run header.
3676 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3680 // Position in the run.
3681 unsigned regind
= ((uintptr_t)aPtr
- reg0_addr
) / size
;
3683 // Pointer to the allocation's base address.
3684 void* addr
= (void*)(reg0_addr
+ regind
* size
);
3686 // Check if the allocation has been freed.
3687 unsigned elm
= regind
>> (LOG2(sizeof(int)) + 3);
3688 unsigned bit
= regind
- (elm
<< (LOG2(sizeof(int)) + 3));
3690 ((run
->mRegionsMask
[elm
] & (1U << bit
))) ? TagFreedAlloc
: TagLiveAlloc
;
3692 *aInfo
= {tag
, addr
, size
, chunk
->arena
->mId
};
3696 // Helper for debuggers. We don't want it to be inlined and optimized out.
3697 MOZ_NEVER_INLINE jemalloc_ptr_info_t
* jemalloc_ptr_info(const void* aPtr
) {
3698 static jemalloc_ptr_info_t info
;
3699 MozJemalloc::jemalloc_ptr_info(aPtr
, &info
);
3702 } // namespace Debug
3704 arena_chunk_t
* arena_t::DallocSmall(arena_chunk_t
* aChunk
, void* aPtr
,
3705 arena_chunk_map_t
* aMapElm
) {
3710 run
= (arena_run_t
*)(aMapElm
->bits
& ~gPageSizeMask
);
3711 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3713 size
= bin
->mSizeClass
;
3714 MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr
) >=
3715 uintptr_t(run
) + bin
->mRunFirstRegionOffset
);
3717 arena_run_reg_dalloc(run
, bin
, aPtr
, size
);
3719 arena_chunk_t
* dealloc_chunk
= nullptr;
3721 if (run
->mNumFree
== bin
->mRunNumRegions
) {
3723 if (run
== bin
->mCurrentRun
) {
3724 bin
->mCurrentRun
= nullptr;
3725 } else if (bin
->mRunNumRegions
!= 1) {
3726 size_t run_pageind
=
3727 (uintptr_t(run
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3728 arena_chunk_map_t
* run_mapelm
= &aChunk
->map
[run_pageind
];
3730 // This block's conditional is necessary because if the
3731 // run only contains one region, then it never gets
3732 // inserted into the non-full runs tree.
3733 MOZ_DIAGNOSTIC_ASSERT(bin
->mNonFullRuns
.Search(run_mapelm
) == run_mapelm
);
3734 bin
->mNonFullRuns
.Remove(run_mapelm
);
3736 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
3739 dealloc_chunk
= DallocRun(run
, true);
3741 } else if (run
->mNumFree
== 1 && run
!= bin
->mCurrentRun
) {
3742 // Make sure that bin->mCurrentRun always refers to the lowest
3743 // non-full run, if one exists.
3744 if (!bin
->mCurrentRun
) {
3745 bin
->mCurrentRun
= run
;
3746 } else if (uintptr_t(run
) < uintptr_t(bin
->mCurrentRun
)) {
3747 // Switch mCurrentRun.
3748 if (bin
->mCurrentRun
->mNumFree
> 0) {
3749 arena_chunk_t
* runcur_chunk
= GetChunkForPtr(bin
->mCurrentRun
);
3750 size_t runcur_pageind
=
3751 (uintptr_t(bin
->mCurrentRun
) - uintptr_t(runcur_chunk
)) >>
3753 arena_chunk_map_t
* runcur_mapelm
= &runcur_chunk
->map
[runcur_pageind
];
3756 MOZ_DIAGNOSTIC_ASSERT(!bin
->mNonFullRuns
.Search(runcur_mapelm
));
3757 bin
->mNonFullRuns
.Insert(runcur_mapelm
);
3759 bin
->mCurrentRun
= run
;
3761 size_t run_pageind
=
3762 (uintptr_t(run
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3763 arena_chunk_map_t
* run_mapelm
= &aChunk
->map
[run_pageind
];
3765 MOZ_DIAGNOSTIC_ASSERT(bin
->mNonFullRuns
.Search(run_mapelm
) == nullptr);
3766 bin
->mNonFullRuns
.Insert(run_mapelm
);
3769 mStats
.allocated_small
-= size
;
3771 return dealloc_chunk
;
3774 arena_chunk_t
* arena_t::DallocLarge(arena_chunk_t
* aChunk
, void* aPtr
) {
3775 MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr
) & gPageSizeMask
) == 0);
3776 size_t pageind
= (uintptr_t(aPtr
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3777 size_t size
= aChunk
->map
[pageind
].bits
& ~gPageSizeMask
;
3779 mStats
.allocated_large
-= size
;
3781 return DallocRun((arena_run_t
*)aPtr
, true);
3784 static inline void arena_dalloc(void* aPtr
, size_t aOffset
, arena_t
* aArena
) {
3786 MOZ_ASSERT(aOffset
!= 0);
3787 MOZ_ASSERT(GetChunkOffsetForPtr(aPtr
) == aOffset
);
3789 auto chunk
= (arena_chunk_t
*)((uintptr_t)aPtr
- aOffset
);
3790 auto arena
= chunk
->arena
;
3792 MOZ_DIAGNOSTIC_ASSERT(arena
->mMagic
== ARENA_MAGIC
);
3793 MOZ_RELEASE_ASSERT(!aArena
|| arena
== aArena
);
3795 size_t pageind
= aOffset
>> gPageSize2Pow
;
3797 AllocInfo info
= AllocInfo::GetInChunk(aPtr
, chunk
, pageind
);
3798 MOZ_ASSERT(info
.IsValid());
3799 MaybePoison(aPtr
, info
.Size());
3802 arena_chunk_t
* chunk_dealloc_delay
= nullptr;
3805 MaybeMutexAutoLock
lock(arena
->mLock
);
3806 arena_chunk_map_t
* mapelm
= &chunk
->map
[pageind
];
3807 MOZ_RELEASE_ASSERT((mapelm
->bits
& CHUNK_MAP_DECOMMITTED
) == 0,
3808 "Freeing in decommitted page.");
3809 MOZ_RELEASE_ASSERT((mapelm
->bits
& CHUNK_MAP_ALLOCATED
) != 0,
3811 if ((mapelm
->bits
& CHUNK_MAP_LARGE
) == 0) {
3812 // Small allocation.
3813 chunk_dealloc_delay
= arena
->DallocSmall(chunk
, aPtr
, mapelm
);
3815 // Large allocation.
3816 chunk_dealloc_delay
= arena
->DallocLarge(chunk
, aPtr
);
3820 if (chunk_dealloc_delay
) {
3821 chunk_dealloc((void*)chunk_dealloc_delay
, kChunkSize
, ARENA_CHUNK
);
3825 static inline void idalloc(void* ptr
, arena_t
* aArena
) {
3830 offset
= GetChunkOffsetForPtr(ptr
);
3832 arena_dalloc(ptr
, offset
, aArena
);
3834 huge_dalloc(ptr
, aArena
);
3838 void arena_t::RallocShrinkLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
3840 MOZ_ASSERT(aSize
< aOldSize
);
3842 // Shrink the run, and make trailing pages available for other
3844 MaybeMutexAutoLock
lock(mLock
);
3845 TrimRunTail(aChunk
, (arena_run_t
*)aPtr
, aOldSize
, aSize
, true);
3846 mStats
.allocated_large
-= aOldSize
- aSize
;
3849 // Returns whether reallocation was successful.
3850 bool arena_t::RallocGrowLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
3852 size_t pageind
= (uintptr_t(aPtr
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3853 size_t npages
= aOldSize
>> gPageSize2Pow
;
3855 MaybeMutexAutoLock
lock(mLock
);
3856 MOZ_DIAGNOSTIC_ASSERT(aOldSize
==
3857 (aChunk
->map
[pageind
].bits
& ~gPageSizeMask
));
3859 // Try to extend the run.
3860 MOZ_ASSERT(aSize
> aOldSize
);
3861 if (pageind
+ npages
< gChunkNumPages
- 1 &&
3862 (aChunk
->map
[pageind
+ npages
].bits
& CHUNK_MAP_ALLOCATED
) == 0 &&
3863 (aChunk
->map
[pageind
+ npages
].bits
& ~gPageSizeMask
) >=
3865 // The next run is available and sufficiently large. Split the
3866 // following run, then merge the first part with the existing
3868 if (!SplitRun((arena_run_t
*)(uintptr_t(aChunk
) +
3869 ((pageind
+ npages
) << gPageSize2Pow
)),
3870 aSize
- aOldSize
, true, false)) {
3874 aChunk
->map
[pageind
].bits
= aSize
| CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3875 aChunk
->map
[pageind
+ npages
].bits
= CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3877 mStats
.allocated_large
+= aSize
- aOldSize
;
3884 void* arena_t::RallocSmallOrLarge(void* aPtr
, size_t aSize
, size_t aOldSize
) {
3887 SizeClass
sizeClass(aSize
);
3889 // Try to avoid moving the allocation.
3890 if (aOldSize
<= gMaxLargeClass
&& sizeClass
.Size() == aOldSize
) {
3891 if (aSize
< aOldSize
) {
3892 MaybePoison((void*)(uintptr_t(aPtr
) + aSize
), aOldSize
- aSize
);
3896 if (sizeClass
.Type() == SizeClass::Large
&& aOldSize
> gMaxBinClass
&&
3897 aOldSize
<= gMaxLargeClass
) {
3898 arena_chunk_t
* chunk
= GetChunkForPtr(aPtr
);
3899 if (sizeClass
.Size() < aOldSize
) {
3900 // Fill before shrinking in order to avoid a race.
3901 MaybePoison((void*)((uintptr_t)aPtr
+ aSize
), aOldSize
- aSize
);
3902 RallocShrinkLarge(chunk
, aPtr
, sizeClass
.Size(), aOldSize
);
3905 if (RallocGrowLarge(chunk
, aPtr
, sizeClass
.Size(), aOldSize
)) {
3906 ApplyZeroOrJunk((void*)((uintptr_t)aPtr
+ aOldSize
), aSize
- aOldSize
);
3911 // If we get here, then aSize and aOldSize are different enough that we
3912 // need to move the object. In that case, fall back to allocating new
3913 // space and copying. Allow non-private arenas to switch arenas.
3914 ret
= (mIsPrivate
? this : choose_arena(aSize
))->Malloc(aSize
, false);
3919 // Junk/zero-filling were already done by arena_t::Malloc().
3920 copysize
= (aSize
< aOldSize
) ? aSize
: aOldSize
;
3922 if (copysize
>= VM_COPY_MIN
) {
3923 pages_copy(ret
, aPtr
, copysize
);
3927 memcpy(ret
, aPtr
, copysize
);
3929 idalloc(aPtr
, this);
3933 void* arena_t::Ralloc(void* aPtr
, size_t aSize
, size_t aOldSize
) {
3934 MOZ_DIAGNOSTIC_ASSERT(mMagic
== ARENA_MAGIC
);
3936 MOZ_ASSERT(aSize
!= 0);
3938 return (aSize
<= gMaxLargeClass
) ? RallocSmallOrLarge(aPtr
, aSize
, aOldSize
)
3939 : RallocHuge(aPtr
, aSize
, aOldSize
);
3942 void* arena_t::operator new(size_t aCount
, const fallible_t
&) noexcept
{
3943 MOZ_ASSERT(aCount
== sizeof(arena_t
));
3944 return TypedBaseAlloc
<arena_t
>::alloc();
3947 void arena_t::operator delete(void* aPtr
) {
3948 TypedBaseAlloc
<arena_t
>::dealloc((arena_t
*)aPtr
);
3951 arena_t::arena_t(arena_params_t
* aParams
, bool aIsPrivate
) {
3954 memset(&mLink
, 0, sizeof(mLink
));
3955 memset(&mStats
, 0, sizeof(arena_stats_t
));
3958 // Initialize chunks.
3959 mChunksDirty
.Init();
3960 #ifdef MALLOC_DOUBLE_PURGE
3961 new (&mChunksMAdvised
) DoublyLinkedList
<arena_chunk_t
>();
3965 mRandomizeSmallAllocations
= opt_randomize_small
;
3966 MaybeMutex::DoLock doLock
= MaybeMutex::MUST_LOCK
;
3968 uint32_t randFlags
= aParams
->mFlags
& ARENA_FLAG_RANDOMIZE_SMALL_MASK
;
3969 switch (randFlags
) {
3970 case ARENA_FLAG_RANDOMIZE_SMALL_ENABLED
:
3971 mRandomizeSmallAllocations
= true;
3973 case ARENA_FLAG_RANDOMIZE_SMALL_DISABLED
:
3974 mRandomizeSmallAllocations
= false;
3976 case ARENA_FLAG_RANDOMIZE_SMALL_DEFAULT
:
3981 uint32_t threadFlags
= aParams
->mFlags
& ARENA_FLAG_THREAD_MASK
;
3982 if (threadFlags
== ARENA_FLAG_THREAD_MAIN_THREAD_ONLY
) {
3983 // At the moment we require that any ARENA_FLAG_THREAD_MAIN_THREAD_ONLY
3984 // arenas are created and therefore always accessed by the main thread.
3985 // This is for two reasons:
3986 // * it allows jemalloc_stats to read their statistics (we also require
3987 // that jemalloc_stats is only used on the main thread).
3988 // * Only main-thread or threadsafe arenas can be guanteed to be in a
3989 // consistent state after a fork() from the main thread. If fork()
3990 // occurs off-thread then the new child process cannot use these arenas
3991 // (new children should usually exec() or exit() since other data may
3992 // also be inconsistent).
3993 MOZ_ASSERT(gArenas
.IsOnMainThread());
3994 MOZ_ASSERT(aIsPrivate
);
3995 doLock
= MaybeMutex::AVOID_LOCK_UNSAFE
;
3998 mMaxDirtyIncreaseOverride
= aParams
->mMaxDirtyIncreaseOverride
;
3999 mMaxDirtyDecreaseOverride
= aParams
->mMaxDirtyDecreaseOverride
;
4001 mMaxDirtyIncreaseOverride
= 0;
4002 mMaxDirtyDecreaseOverride
= 0;
4005 MOZ_RELEASE_ASSERT(mLock
.Init(doLock
));
4009 mIsPrivate
= aIsPrivate
;
4012 // The default maximum amount of dirty pages allowed on arenas is a fraction
4013 // of opt_dirty_max.
4014 mMaxDirty
= (aParams
&& aParams
->mMaxDirty
) ? aParams
->mMaxDirty
4015 : (opt_dirty_max
/ 8);
4020 SizeClass
sizeClass(1);
4023 arena_bin_t
& bin
= mBins
[i
];
4024 bin
.Init(sizeClass
);
4026 // SizeClass doesn't want sizes larger than gMaxBinClass for now.
4027 if (sizeClass
.Size() == gMaxBinClass
) {
4030 sizeClass
= sizeClass
.Next();
4032 MOZ_ASSERT(i
== NUM_SMALL_CLASSES
- 1);
4034 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
4035 mMagic
= ARENA_MAGIC
;
4039 arena_t::~arena_t() {
4041 MaybeMutexAutoLock
lock(mLock
);
4042 MOZ_RELEASE_ASSERT(!mLink
.Left() && !mLink
.Right(),
4043 "Arena is still registered");
4044 MOZ_RELEASE_ASSERT(!mStats
.allocated_small
&& !mStats
.allocated_large
,
4045 "Arena is not empty");
4047 chunk_dealloc(mSpare
, kChunkSize
, ARENA_CHUNK
);
4049 for (i
= 0; i
< NUM_SMALL_CLASSES
; i
++) {
4050 MOZ_RELEASE_ASSERT(!mBins
[i
].mNonFullRuns
.First(), "Bin is not empty");
4054 MutexAutoLock
lock(huge_mtx
);
4055 // This is an expensive check, so we only do it on debug builds.
4056 for (auto node
: huge
.iter()) {
4057 MOZ_RELEASE_ASSERT(node
->mArenaId
!= mId
, "Arena has huge allocations");
4064 arena_t
* ArenaCollection::CreateArena(bool aIsPrivate
,
4065 arena_params_t
* aParams
) {
4066 arena_t
* ret
= new (fallible
) arena_t(aParams
, aIsPrivate
);
4068 // Only reached if there is an OOM error.
4070 // OOM here is quite inconvenient to propagate, since dealing with it
4071 // would require a check for failure in the fast path. Instead, punt
4072 // by using the first arena.
4073 // In practice, this is an extremely unlikely failure.
4074 _malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
4076 return mDefaultArena
;
4079 MutexAutoLock
lock(mLock
);
4081 // For public arenas, it's fine to just use incrementing arena id
4083 ret
->mId
= mLastPublicArenaId
++;
4084 mArenas
.Insert(ret
);
4088 // For private arenas, generate a cryptographically-secure random id for the
4089 // new arena. If an attacker manages to get control of the process, this
4090 // should make it more difficult for them to "guess" the ID of a memory
4091 // arena, stopping them from getting data they may want
4092 Tree
& tree
= (ret
->IsMainThreadOnly()) ? mMainThreadArenas
: mPrivateArenas
;
4093 arena_id_t arena_id
;
4095 arena_id
= MakeRandArenaId(ret
->IsMainThreadOnly());
4096 // Keep looping until we ensure that the random number we just generated
4097 // isn't already in use by another active arena
4098 } while (GetByIdInternal(tree
, arena_id
));
4100 ret
->mId
= arena_id
;
4105 arena_id_t
ArenaCollection::MakeRandArenaId(bool aIsMainThreadOnly
) const {
4108 mozilla::Maybe
<uint64_t> maybeRandomId
= mozilla::RandomUint64();
4109 MOZ_RELEASE_ASSERT(maybeRandomId
.isSome());
4111 rand
= maybeRandomId
.value();
4113 // Set or clear the least significant bit depending on if this is a
4114 // main-thread-only arena. We use this in GetById.
4115 if (aIsMainThreadOnly
) {
4116 rand
= rand
| MAIN_THREAD_ARENA_BIT
;
4118 rand
= rand
& ~MAIN_THREAD_ARENA_BIT
;
4121 // Avoid 0 as an arena Id. We use 0 for disposed arenas.
4122 } while (rand
== 0);
4124 return arena_id_t(rand
);
4128 // ***************************************************************************
4129 // Begin general internal functions.
4131 void* arena_t::MallocHuge(size_t aSize
, bool aZero
) {
4132 return PallocHuge(aSize
, kChunkSize
, aZero
);
4135 void* arena_t::PallocHuge(size_t aSize
, size_t aAlignment
, bool aZero
) {
4139 extent_node_t
* node
;
4141 // We're going to configure guard pages in the region between the
4142 // page-aligned size and the chunk-aligned size, so if those are the same
4143 // then we need to force that region into existence.
4144 csize
= CHUNK_CEILING(aSize
+ gPageSize
);
4145 if (csize
< aSize
) {
4146 // size is large enough to cause size_t wrap-around.
4150 // Allocate an extent node with which to track the chunk.
4151 node
= ExtentAlloc::alloc();
4156 // Allocate one or more contiguous chunks for this request.
4157 ret
= chunk_alloc(csize
, aAlignment
, false);
4159 ExtentAlloc::dealloc(node
);
4162 psize
= PAGE_CEILING(aSize
);
4165 chunk_assert_zero(ret
, psize
);
4169 // Insert node into huge.
4171 node
->mSize
= psize
;
4172 node
->mArena
= this;
4173 node
->mArenaId
= mId
;
4176 MutexAutoLock
lock(huge_mtx
);
4179 // Although we allocated space for csize bytes, we indicate that we've
4180 // allocated only psize bytes.
4182 // If DECOMMIT is defined, this is a reasonable thing to do, since
4183 // we'll explicitly decommit the bytes in excess of psize.
4185 // If DECOMMIT is not defined, then we're relying on the OS to be lazy
4186 // about how it allocates physical pages to mappings. If we never
4187 // touch the pages in excess of psize, the OS won't allocate a physical
4188 // page, and we won't use more than psize bytes of physical memory.
4190 // A correct program will only touch memory in excess of how much it
4191 // requested if it first calls malloc_usable_size and finds out how
4192 // much space it has to play with. But because we set node->mSize =
4193 // psize above, malloc_usable_size will return psize, not csize, and
4194 // the program will (hopefully) never touch bytes in excess of psize.
4195 // Thus those bytes won't take up space in physical memory, and we can
4196 // reasonably claim we never "allocated" them in the first place.
4197 huge_allocated
+= psize
;
4198 huge_mapped
+= csize
;
4201 pages_decommit((void*)((uintptr_t)ret
+ psize
), csize
- psize
);
4204 ApplyZeroOrJunk(ret
, psize
);
4210 void* arena_t::RallocHuge(void* aPtr
, size_t aSize
, size_t aOldSize
) {
4214 // Avoid moving the allocation if the size class would not change.
4215 if (aOldSize
> gMaxLargeClass
&&
4216 CHUNK_CEILING(aSize
+ gPageSize
) == CHUNK_CEILING(aOldSize
+ gPageSize
)) {
4217 size_t psize
= PAGE_CEILING(aSize
);
4218 if (aSize
< aOldSize
) {
4219 MaybePoison((void*)((uintptr_t)aPtr
+ aSize
), aOldSize
- aSize
);
4221 if (psize
< aOldSize
) {
4224 pages_decommit((void*)((uintptr_t)aPtr
+ psize
), aOldSize
- psize
);
4226 // Update recorded size.
4227 MutexAutoLock
lock(huge_mtx
);
4228 key
.mAddr
= const_cast<void*>(aPtr
);
4229 extent_node_t
* node
= huge
.Search(&key
);
4231 MOZ_ASSERT(node
->mSize
== aOldSize
);
4232 MOZ_RELEASE_ASSERT(node
->mArena
== this);
4233 huge_allocated
-= aOldSize
- psize
;
4234 // No need to change huge_mapped, because we didn't (un)map anything.
4235 node
->mSize
= psize
;
4236 } else if (psize
> aOldSize
) {
4237 if (!pages_commit((void*)((uintptr_t)aPtr
+ aOldSize
),
4238 psize
- aOldSize
)) {
4242 // We need to update the recorded size if the size increased,
4243 // so malloc_usable_size doesn't return a value smaller than
4244 // what was requested via realloc().
4246 MutexAutoLock
lock(huge_mtx
);
4247 key
.mAddr
= const_cast<void*>(aPtr
);
4248 extent_node_t
* node
= huge
.Search(&key
);
4250 MOZ_ASSERT(node
->mSize
== aOldSize
);
4251 MOZ_RELEASE_ASSERT(node
->mArena
== this);
4252 huge_allocated
+= psize
- aOldSize
;
4253 // No need to change huge_mapped, because we didn't
4254 // (un)map anything.
4255 node
->mSize
= psize
;
4258 if (aSize
> aOldSize
) {
4259 ApplyZeroOrJunk((void*)((uintptr_t)aPtr
+ aOldSize
), aSize
- aOldSize
);
4264 // If we get here, then aSize and aOldSize are different enough that we
4265 // need to use a different size class. In that case, fall back to allocating
4266 // new space and copying. Allow non-private arenas to switch arenas.
4267 ret
= (mIsPrivate
? this : choose_arena(aSize
))->MallocHuge(aSize
, false);
4272 copysize
= (aSize
< aOldSize
) ? aSize
: aOldSize
;
4274 if (copysize
>= VM_COPY_MIN
) {
4275 pages_copy(ret
, aPtr
, copysize
);
4279 memcpy(ret
, aPtr
, copysize
);
4281 idalloc(aPtr
, this);
4285 static void huge_dalloc(void* aPtr
, arena_t
* aArena
) {
4286 extent_node_t
* node
;
4290 MutexAutoLock
lock(huge_mtx
);
4292 // Extract from tree of huge allocations.
4294 node
= huge
.Search(&key
);
4295 MOZ_RELEASE_ASSERT(node
, "Double-free?");
4296 MOZ_ASSERT(node
->mAddr
== aPtr
);
4297 MOZ_RELEASE_ASSERT(!aArena
|| node
->mArena
== aArena
);
4298 // See AllocInfo::Arena.
4299 MOZ_RELEASE_ASSERT(node
->mArenaId
== node
->mArena
->mId
);
4302 mapped
= CHUNK_CEILING(node
->mSize
+ gPageSize
);
4303 huge_allocated
-= node
->mSize
;
4304 huge_mapped
-= mapped
;
4308 chunk_dealloc(node
->mAddr
, mapped
, HUGE_CHUNK
);
4310 ExtentAlloc::dealloc(node
);
4313 size_t GetKernelPageSize() {
4314 static size_t kernel_page_size
= ([]() {
4317 GetSystemInfo(&info
);
4318 return info
.dwPageSize
;
4320 long result
= sysconf(_SC_PAGESIZE
);
4321 MOZ_ASSERT(result
!= -1);
4325 return kernel_page_size
;
4328 // Returns whether the allocator was successfully initialized.
4329 static bool malloc_init_hard() {
4333 AutoLock
<StaticMutex
> lock(gInitLock
);
4335 if (malloc_initialized
) {
4336 // Another thread initialized the allocator before this one
4337 // acquired gInitLock.
4341 if (!thread_arena
.init()) {
4345 // Get page size and number of CPUs
4346 const size_t page_size
= GetKernelPageSize();
4347 // We assume that the page size is a power of 2.
4348 MOZ_ASSERT(IsPowerOfTwo(page_size
));
4349 #ifdef MALLOC_STATIC_PAGESIZE
4350 if (gPageSize
% page_size
) {
4353 "Compile-time page size does not divide the runtime one.\n");
4357 gRealPageSize
= gPageSize
= page_size
;
4360 // Get runtime configuration.
4361 if ((opts
= getenv("MALLOC_OPTIONS"))) {
4362 for (i
= 0; opts
[i
] != '\0'; i
++) {
4363 // All options are single letters, some take a *prefix* numeric argument.
4365 // Parse the argument.
4366 unsigned prefix_arg
= 0;
4367 while (opts
[i
] >= '0' && opts
[i
] <= '9') {
4369 prefix_arg
+= opts
[i
] - '0';
4375 opt_dirty_max
>>= prefix_arg
? prefix_arg
: 1;
4378 prefix_arg
= prefix_arg
? prefix_arg
: 1;
4379 if (opt_dirty_max
== 0) {
4383 opt_dirty_max
<<= prefix_arg
;
4384 if (opt_dirty_max
== 0) {
4385 // If the shift above overflowed all the bits then clamp the result
4386 // instead. If we started with DIRTY_MAX_DEFAULT then this will
4387 // always be a power of two so choose the maximum power of two that
4388 // fits in a size_t.
4389 opt_dirty_max
= size_t(1) << (sizeof(size_t) * CHAR_BIT
- 1);
4392 #ifdef MALLOC_RUNTIME_CONFIG
4400 // The argument selects how much poisoning to do.
4404 if (opts
[i
+ 1] == 'Q') {
4405 // Maximum poisoning.
4410 opt_poison_size
= kCacheLineSize
* prefix_arg
;
4419 # ifndef MALLOC_STATIC_PAGESIZE
4421 MOZ_ASSERT(gPageSize
>= 4_KiB
);
4422 MOZ_ASSERT(gPageSize
<= 64_KiB
);
4423 prefix_arg
= prefix_arg
? prefix_arg
: 1;
4424 gPageSize
<<= prefix_arg
;
4425 // We know that if the shift causes gPageSize to be zero then it's
4426 // because it shifted all the bits off. We didn't start with zero.
4427 // Therefore if gPageSize is out of bounds we set it to 64KiB.
4428 if (gPageSize
< 4_KiB
|| gPageSize
> 64_KiB
) {
4435 opt_randomize_small
= false;
4438 opt_randomize_small
= true;
4445 _malloc_message(_getprogname(),
4446 ": (malloc) Unsupported character "
4447 "in malloc options: '",
4454 #ifndef MALLOC_STATIC_PAGESIZE
4459 // Initialize chunks data.
4461 MOZ_PUSH_IGNORE_THREAD_SAFETY
4462 gChunksBySize
.Init();
4463 gChunksByAddress
.Init();
4464 MOZ_POP_THREAD_SAFETY
4466 // Initialize huge allocation data.
4468 MOZ_PUSH_IGNORE_THREAD_SAFETY
4472 MOZ_POP_THREAD_SAFETY
4474 // Initialize base allocation data structures.
4476 MOZ_PUSH_IGNORE_THREAD_SAFETY
4479 MOZ_POP_THREAD_SAFETY
4481 // Initialize arenas collection here.
4482 if (!gArenas
.Init()) {
4486 // Assign the default arena to the initial thread.
4487 thread_arena
.set(gArenas
.GetDefault());
4489 if (!gChunkRTree
.Init()) {
4493 malloc_initialized
= true;
4495 // Dummy call so that the function is not removed by dead-code elimination
4496 Debug::jemalloc_ptr_info(nullptr);
4498 #if !defined(XP_WIN) && !defined(XP_DARWIN)
4499 // Prevent potential deadlock on malloc locks after fork.
4500 pthread_atfork(_malloc_prefork
, _malloc_postfork_parent
,
4501 _malloc_postfork_child
);
4507 // End general internal functions.
4508 // ***************************************************************************
4509 // Begin malloc(3)-compatible functions.
4511 // The BaseAllocator class is a helper class that implements the base allocator
4512 // functions (malloc, calloc, realloc, free, memalign) for a given arena,
4513 // or an appropriately chosen arena (per choose_arena()) when none is given.
4514 struct BaseAllocator
{
4515 #define MALLOC_DECL(name, return_type, ...) \
4516 inline return_type name(__VA_ARGS__);
4518 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
4519 #include "malloc_decls.h"
4521 explicit BaseAllocator(arena_t
* aArena
) : mArena(aArena
) {}
4527 #define MALLOC_DECL(name, return_type, ...) \
4528 inline return_type MozJemalloc::name( \
4529 ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
4530 BaseAllocator allocator(nullptr); \
4531 return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
4533 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
4534 #include "malloc_decls.h"
4536 inline void* BaseAllocator::malloc(size_t aSize
) {
4540 if (!malloc_init()) {
4548 // If mArena is non-null, it must not be in the first page.
4549 MOZ_DIAGNOSTIC_ASSERT_IF(mArena
, (size_t)mArena
>= gPageSize
);
4550 arena
= mArena
? mArena
: choose_arena(aSize
);
4551 ret
= arena
->Malloc(aSize
, /* aZero = */ false);
4561 inline void* BaseAllocator::memalign(size_t aAlignment
, size_t aSize
) {
4562 MOZ_ASSERT(((aAlignment
- 1) & aAlignment
) == 0);
4564 if (!malloc_init()) {
4572 aAlignment
= aAlignment
< sizeof(void*) ? sizeof(void*) : aAlignment
;
4573 arena_t
* arena
= mArena
? mArena
: choose_arena(aSize
);
4574 return arena
->Palloc(aAlignment
, aSize
);
4577 inline void* BaseAllocator::calloc(size_t aNum
, size_t aSize
) {
4580 if (malloc_init()) {
4581 CheckedInt
<size_t> checkedSize
= CheckedInt
<size_t>(aNum
) * aSize
;
4582 if (checkedSize
.isValid()) {
4583 size_t allocSize
= checkedSize
.value();
4584 if (allocSize
== 0) {
4587 arena_t
* arena
= mArena
? mArena
: choose_arena(allocSize
);
4588 ret
= arena
->Malloc(allocSize
, /* aZero = */ true);
4603 inline void* BaseAllocator::realloc(void* aPtr
, size_t aSize
) {
4611 MOZ_RELEASE_ASSERT(malloc_initialized
);
4613 auto info
= AllocInfo::Get(aPtr
);
4614 auto arena
= info
.Arena();
4615 MOZ_RELEASE_ASSERT(!mArena
|| arena
== mArena
);
4616 ret
= arena
->Ralloc(aPtr
, aSize
, info
.Size());
4618 if (!malloc_init()) {
4621 arena_t
* arena
= mArena
? mArena
: choose_arena(aSize
);
4622 ret
= arena
->Malloc(aSize
, /* aZero = */ false);
4632 inline void BaseAllocator::free(void* aPtr
) {
4635 // A version of idalloc that checks for nullptr pointer.
4636 offset
= GetChunkOffsetForPtr(aPtr
);
4638 MOZ_RELEASE_ASSERT(malloc_initialized
);
4639 arena_dalloc(aPtr
, offset
, mArena
);
4641 MOZ_RELEASE_ASSERT(malloc_initialized
);
4642 huge_dalloc(aPtr
, mArena
);
4646 inline int MozJemalloc::posix_memalign(void** aMemPtr
, size_t aAlignment
,
4648 return AlignedAllocator
<memalign
>::posix_memalign(aMemPtr
, aAlignment
, aSize
);
4651 inline void* MozJemalloc::aligned_alloc(size_t aAlignment
, size_t aSize
) {
4652 return AlignedAllocator
<memalign
>::aligned_alloc(aAlignment
, aSize
);
4655 inline void* MozJemalloc::valloc(size_t aSize
) {
4656 return AlignedAllocator
<memalign
>::valloc(aSize
);
4659 // End malloc(3)-compatible functions.
4660 // ***************************************************************************
4661 // Begin non-standard functions.
4663 // This was added by Mozilla for use by SQLite.
4664 inline size_t MozJemalloc::malloc_good_size(size_t aSize
) {
4665 if (aSize
<= gMaxLargeClass
) {
4667 aSize
= SizeClass(aSize
).Size();
4669 // Huge. We use PAGE_CEILING to get psize, instead of using
4670 // CHUNK_CEILING to get csize. This ensures that this
4671 // malloc_usable_size(malloc(n)) always matches
4672 // malloc_good_size(n).
4673 aSize
= PAGE_CEILING(aSize
);
4678 inline size_t MozJemalloc::malloc_usable_size(usable_ptr_t aPtr
) {
4679 return AllocInfo::GetValidated(aPtr
).Size();
4682 inline void MozJemalloc::jemalloc_stats_internal(
4683 jemalloc_stats_t
* aStats
, jemalloc_bin_stats_t
* aBinStats
) {
4684 size_t non_arena_mapped
, chunk_header_size
;
4689 if (!malloc_init()) {
4690 memset(aStats
, 0, sizeof(*aStats
));
4694 memset(aBinStats
, 0, sizeof(jemalloc_bin_stats_t
) * NUM_SMALL_CLASSES
);
4697 // Gather runtime settings.
4698 aStats
->opt_junk
= opt_junk
;
4699 aStats
->opt_zero
= opt_zero
;
4700 aStats
->quantum
= kQuantum
;
4701 aStats
->quantum_max
= kMaxQuantumClass
;
4702 aStats
->quantum_wide
= kQuantumWide
;
4703 aStats
->quantum_wide_max
= kMaxQuantumWideClass
;
4704 aStats
->subpage_max
= gMaxSubPageClass
;
4705 aStats
->large_max
= gMaxLargeClass
;
4706 aStats
->chunksize
= kChunkSize
;
4707 aStats
->page_size
= gPageSize
;
4708 aStats
->dirty_max
= opt_dirty_max
;
4710 // Gather current memory usage statistics.
4711 aStats
->narenas
= 0;
4713 aStats
->allocated
= 0;
4715 aStats
->page_cache
= 0;
4716 aStats
->bookkeeping
= 0;
4717 aStats
->bin_unused
= 0;
4719 non_arena_mapped
= 0;
4721 // Get huge mapped/allocated.
4723 MutexAutoLock
lock(huge_mtx
);
4724 non_arena_mapped
+= huge_mapped
;
4725 aStats
->allocated
+= huge_allocated
;
4726 MOZ_ASSERT(huge_mapped
>= huge_allocated
);
4729 // Get base mapped/allocated.
4731 MutexAutoLock
lock(base_mtx
);
4732 non_arena_mapped
+= base_mapped
;
4733 aStats
->bookkeeping
+= base_committed
;
4734 MOZ_ASSERT(base_mapped
>= base_committed
);
4737 gArenas
.mLock
.Lock();
4739 // Stats can only read complete information if its run on the main thread.
4740 MOZ_ASSERT(gArenas
.IsOnMainThreadWeak());
4742 // Iterate over arenas.
4743 for (auto arena
: gArenas
.iter()) {
4744 // Cannot safely read stats for this arena and therefore stats would be
4746 MOZ_ASSERT(arena
->mLock
.SafeOnThisThread());
4748 size_t arena_mapped
, arena_allocated
, arena_committed
, arena_dirty
, j
,
4749 arena_unused
, arena_headers
;
4755 MaybeMutexAutoLock
lock(arena
->mLock
);
4757 arena_mapped
= arena
->mStats
.mapped
;
4759 // "committed" counts dirty and allocated memory.
4760 arena_committed
= arena
->mStats
.committed
<< gPageSize2Pow
;
4763 arena
->mStats
.allocated_small
+ arena
->mStats
.allocated_large
;
4765 arena_dirty
= arena
->mNumDirty
<< gPageSize2Pow
;
4767 for (j
= 0; j
< NUM_SMALL_CLASSES
; j
++) {
4768 arena_bin_t
* bin
= &arena
->mBins
[j
];
4769 size_t bin_unused
= 0;
4770 size_t num_non_full_runs
= 0;
4772 for (auto mapelm
: bin
->mNonFullRuns
.iter()) {
4773 arena_run_t
* run
= (arena_run_t
*)(mapelm
->bits
& ~gPageSizeMask
);
4774 bin_unused
+= run
->mNumFree
* bin
->mSizeClass
;
4775 num_non_full_runs
++;
4778 if (bin
->mCurrentRun
) {
4779 bin_unused
+= bin
->mCurrentRun
->mNumFree
* bin
->mSizeClass
;
4780 num_non_full_runs
++;
4783 arena_unused
+= bin_unused
;
4784 arena_headers
+= bin
->mNumRuns
* bin
->mRunFirstRegionOffset
;
4786 aBinStats
[j
].size
= bin
->mSizeClass
;
4787 aBinStats
[j
].num_non_full_runs
+= num_non_full_runs
;
4788 aBinStats
[j
].num_runs
+= bin
->mNumRuns
;
4789 aBinStats
[j
].bytes_unused
+= bin_unused
;
4790 size_t bytes_per_run
= static_cast<size_t>(bin
->mRunSizePages
)
4792 aBinStats
[j
].bytes_total
+=
4793 bin
->mNumRuns
* (bytes_per_run
- bin
->mRunFirstRegionOffset
);
4794 aBinStats
[j
].bytes_per_run
= bytes_per_run
;
4799 MOZ_ASSERT(arena_mapped
>= arena_committed
);
4800 MOZ_ASSERT(arena_committed
>= arena_allocated
+ arena_dirty
);
4802 aStats
->mapped
+= arena_mapped
;
4803 aStats
->allocated
+= arena_allocated
;
4804 aStats
->page_cache
+= arena_dirty
;
4805 // "waste" is committed memory that is neither dirty nor
4806 // allocated. If you change this definition please update
4807 // memory/replace/logalloc/replay/Replay.cpp's jemalloc_stats calculation of
4809 MOZ_ASSERT(arena_committed
>=
4810 (arena_allocated
+ arena_dirty
+ arena_unused
+ arena_headers
));
4811 aStats
->waste
+= arena_committed
- arena_allocated
- arena_dirty
-
4812 arena_unused
- arena_headers
;
4813 aStats
->bin_unused
+= arena_unused
;
4814 aStats
->bookkeeping
+= arena_headers
;
4817 gArenas
.mLock
.Unlock();
4819 // Account for arena chunk headers in bookkeeping rather than waste.
4821 ((aStats
->mapped
/ aStats
->chunksize
) * (gChunkHeaderNumPages
- 1))
4824 aStats
->mapped
+= non_arena_mapped
;
4825 aStats
->bookkeeping
+= chunk_header_size
;
4826 aStats
->waste
-= chunk_header_size
;
4828 MOZ_ASSERT(aStats
->mapped
>= aStats
->allocated
+ aStats
->waste
+
4829 aStats
->page_cache
+ aStats
->bookkeeping
);
4832 inline size_t MozJemalloc::jemalloc_stats_num_bins() {
4833 return NUM_SMALL_CLASSES
;
4836 inline void MozJemalloc::jemalloc_set_main_thread() {
4837 MOZ_ASSERT(malloc_initialized
);
4838 gArenas
.SetMainThread();
4841 #ifdef MALLOC_DOUBLE_PURGE
4843 // Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
4844 static void hard_purge_chunk(arena_chunk_t
* aChunk
) {
4845 // See similar logic in arena_t::Purge().
4846 for (size_t i
= gChunkHeaderNumPages
; i
< gChunkNumPages
; i
++) {
4847 // Find all adjacent pages with CHUNK_MAP_MADVISED set.
4849 for (npages
= 0; aChunk
->map
[i
+ npages
].bits
& CHUNK_MAP_MADVISED
&&
4850 i
+ npages
< gChunkNumPages
;
4852 // Turn off the chunk's MADV_FREED bit and turn on its
4854 MOZ_DIAGNOSTIC_ASSERT(
4855 !(aChunk
->map
[i
+ npages
].bits
& CHUNK_MAP_DECOMMITTED
));
4856 aChunk
->map
[i
+ npages
].bits
^= CHUNK_MAP_MADVISED_OR_DECOMMITTED
;
4859 // We could use mincore to find out which pages are actually
4860 // present, but it's not clear that's better.
4862 pages_decommit(((char*)aChunk
) + (i
<< gPageSize2Pow
),
4863 npages
<< gPageSize2Pow
);
4864 Unused
<< pages_commit(((char*)aChunk
) + (i
<< gPageSize2Pow
),
4865 npages
<< gPageSize2Pow
);
4871 // Explicitly remove all of this arena's MADV_FREE'd pages from memory.
4872 void arena_t::HardPurge() {
4873 MaybeMutexAutoLock
lock(mLock
);
4875 while (!mChunksMAdvised
.isEmpty()) {
4876 arena_chunk_t
* chunk
= mChunksMAdvised
.popFront();
4877 hard_purge_chunk(chunk
);
4881 inline void MozJemalloc::jemalloc_purge_freed_pages() {
4882 if (malloc_initialized
) {
4883 MutexAutoLock
lock(gArenas
.mLock
);
4884 MOZ_ASSERT(gArenas
.IsOnMainThreadWeak());
4885 for (auto arena
: gArenas
.iter()) {
4891 #else // !defined MALLOC_DOUBLE_PURGE
4893 inline void MozJemalloc::jemalloc_purge_freed_pages() {
4897 #endif // defined MALLOC_DOUBLE_PURGE
4899 inline void MozJemalloc::jemalloc_free_dirty_pages(void) {
4900 if (malloc_initialized
) {
4901 MutexAutoLock
lock(gArenas
.mLock
);
4902 MOZ_ASSERT(gArenas
.IsOnMainThreadWeak());
4903 for (auto arena
: gArenas
.iter()) {
4904 MaybeMutexAutoLock
arena_lock(arena
->mLock
);
4910 inline arena_t
* ArenaCollection::GetByIdInternal(Tree
& aTree
,
4911 arena_id_t aArenaId
) {
4912 // Use AlignedStorage2 to avoid running the arena_t constructor, while
4913 // we only need it as a placeholder for mId.
4914 mozilla::AlignedStorage2
<arena_t
> key
;
4915 key
.addr()->mId
= aArenaId
;
4916 return aTree
.Search(key
.addr());
4919 inline arena_t
* ArenaCollection::GetById(arena_id_t aArenaId
, bool aIsPrivate
) {
4920 if (!malloc_initialized
) {
4924 Tree
* tree
= nullptr;
4926 if (ArenaIdIsMainThreadOnly(aArenaId
)) {
4927 // Main thread only arena. Do the lookup here without taking the lock.
4928 arena_t
* result
= GetByIdInternal(mMainThreadArenas
, aArenaId
);
4929 MOZ_RELEASE_ASSERT(result
);
4932 tree
= &mPrivateArenas
;
4937 MutexAutoLock
lock(mLock
);
4938 arena_t
* result
= GetByIdInternal(*tree
, aArenaId
);
4939 MOZ_RELEASE_ASSERT(result
);
4943 inline arena_id_t
MozJemalloc::moz_create_arena_with_params(
4944 arena_params_t
* aParams
) {
4945 if (malloc_init()) {
4946 arena_t
* arena
= gArenas
.CreateArena(/* IsPrivate = */ true, aParams
);
4952 inline void MozJemalloc::moz_dispose_arena(arena_id_t aArenaId
) {
4953 arena_t
* arena
= gArenas
.GetById(aArenaId
, /* IsPrivate = */ true);
4954 MOZ_RELEASE_ASSERT(arena
);
4955 gArenas
.DisposeArena(arena
);
4958 inline void MozJemalloc::moz_set_max_dirty_page_modifier(int32_t aModifier
) {
4959 gArenas
.SetDefaultMaxDirtyPageModifier(aModifier
);
4962 #define MALLOC_DECL(name, return_type, ...) \
4963 inline return_type MozJemalloc::moz_arena_##name( \
4964 arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
4965 BaseAllocator allocator( \
4966 gArenas.GetById(aArenaId, /* IsPrivate = */ true)); \
4967 return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
4969 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
4970 #include "malloc_decls.h"
4972 // End non-standard functions.
4973 // ***************************************************************************
4975 // Begin library-private functions, used by threading libraries for protection
4976 // of malloc during fork(). These functions are only called if the program is
4977 // running in threaded mode, so there is no need to check whether the program
4978 // is threaded here.
4980 // Note that the only way to keep the main-thread-only arenas in a consistent
4981 // state for the child is if fork is called from the main thread only. Or the
4982 // child must not use them, eg it should call exec(). We attempt to prevent the
4983 // child for accessing these arenas by refusing to re-initialise them.
4984 static pthread_t gForkingThread
;
4987 void _malloc_prefork(void) MOZ_NO_THREAD_SAFETY_ANALYSIS
{
4988 // Acquire all mutexes in a safe order.
4989 gArenas
.mLock
.Lock();
4990 gForkingThread
= pthread_self();
4992 for (auto arena
: gArenas
.iter()) {
4993 if (arena
->mLock
.LockIsEnabled()) {
4994 arena
->mLock
.Lock();
5004 void _malloc_postfork_parent(void) MOZ_NO_THREAD_SAFETY_ANALYSIS
{
5005 // Release all mutexes, now that fork() has completed.
5010 for (auto arena
: gArenas
.iter()) {
5011 if (arena
->mLock
.LockIsEnabled()) {
5012 arena
->mLock
.Unlock();
5016 gArenas
.mLock
.Unlock();
5020 void _malloc_postfork_child(void) {
5021 // Reinitialize all mutexes, now that fork() has completed.
5026 for (auto arena
: gArenas
.iter()) {
5027 arena
->mLock
.Reinit(gForkingThread
);
5030 gArenas
.PostForkFixMainThread();
5031 gArenas
.mLock
.Init();
5035 // End library-private functions.
5036 // ***************************************************************************
5037 #ifdef MOZ_REPLACE_MALLOC
5038 // Windows doesn't come with weak imports as they are possible with
5039 // LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
5040 // the replacement functions are defined as variable pointers to the
5041 // function resolved with GetProcAddress() instead of weak definitions
5042 // of functions. On Android, the same needs to happen as well, because
5043 // the Android linker doesn't handle weak linking with non LD_PRELOADed
5044 // libraries, but LD_PRELOADing is not very convenient on Android, with
5047 # define MOZ_REPLACE_WEAK __attribute__((weak_import))
5048 # elif defined(XP_WIN) || defined(ANDROID)
5049 # define MOZ_DYNAMIC_REPLACE_INIT
5050 # define replace_init replace_init_decl
5051 # elif defined(__GNUC__)
5052 # define MOZ_REPLACE_WEAK __attribute__((weak))
5055 # include "replace_malloc.h"
5057 # define MALLOC_DECL(name, return_type, ...) CanonicalMalloc::name,
5059 // The default malloc table, i.e. plain allocations. It never changes. It's
5060 // used by init(), and not used after that.
5061 static const malloc_table_t gDefaultMallocTable
= {
5062 # include "malloc_decls.h"
5065 // The malloc table installed by init(). It never changes from that point
5066 // onward. It will be the same as gDefaultMallocTable if no replace-malloc tool
5067 // is enabled at startup.
5068 static malloc_table_t gOriginalMallocTable
= {
5069 # include "malloc_decls.h"
5072 // The malloc table installed by jemalloc_replace_dynamic(). (Read the
5073 // comments above that function for more details.)
5074 static malloc_table_t gDynamicMallocTable
= {
5075 # include "malloc_decls.h"
5078 // This briefly points to gDefaultMallocTable at startup. After that, it points
5079 // to either gOriginalMallocTable or gDynamicMallocTable. It's atomic to avoid
5080 // races when switching between tables.
5081 static Atomic
<malloc_table_t
const*, mozilla::MemoryOrdering::Relaxed
>
5084 # ifdef MOZ_DYNAMIC_REPLACE_INIT
5085 # undef replace_init
5086 typedef decltype(replace_init_decl
) replace_init_impl_t
;
5087 static replace_init_impl_t
* replace_init
= nullptr;
5091 typedef HMODULE replace_malloc_handle_t
;
5093 static replace_malloc_handle_t
replace_malloc_handle() {
5094 wchar_t replace_malloc_lib
[1024];
5095 if (GetEnvironmentVariableW(L
"MOZ_REPLACE_MALLOC_LIB", replace_malloc_lib
,
5096 ArrayLength(replace_malloc_lib
)) > 0) {
5097 return LoadLibraryW(replace_malloc_lib
);
5102 # define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
5103 (replace_init_impl_t*)GetProcAddress(handle, "replace_init")
5105 # elif defined(ANDROID)
5108 typedef void* replace_malloc_handle_t
;
5110 static replace_malloc_handle_t
replace_malloc_handle() {
5111 const char* replace_malloc_lib
= getenv("MOZ_REPLACE_MALLOC_LIB");
5112 if (replace_malloc_lib
&& *replace_malloc_lib
) {
5113 return dlopen(replace_malloc_lib
, RTLD_LAZY
);
5118 # define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
5119 (replace_init_impl_t*)dlsym(handle, "replace_init")
5123 static void replace_malloc_init_funcs(malloc_table_t
*);
5125 # ifdef MOZ_REPLACE_MALLOC_STATIC
5126 extern "C" void logalloc_init(malloc_table_t
*, ReplaceMallocBridge
**);
5128 extern "C" void dmd_init(malloc_table_t
*, ReplaceMallocBridge
**);
5131 void phc_init(malloc_table_t
*, ReplaceMallocBridge
**);
5133 bool Equals(const malloc_table_t
& aTable1
, const malloc_table_t
& aTable2
) {
5134 return memcmp(&aTable1
, &aTable2
, sizeof(malloc_table_t
)) == 0;
5137 // Below is the malloc implementation overriding jemalloc and calling the
5138 // replacement functions if they exist.
5139 static ReplaceMallocBridge
* gReplaceMallocBridge
= nullptr;
5140 static void init() {
5141 malloc_table_t tempTable
= gDefaultMallocTable
;
5143 # ifdef MOZ_DYNAMIC_REPLACE_INIT
5144 replace_malloc_handle_t handle
= replace_malloc_handle();
5146 replace_init
= REPLACE_MALLOC_GET_INIT_FUNC(handle
);
5150 // Set this *before* calling replace_init, otherwise if replace_init calls
5151 // malloc() we'll get an infinite loop.
5152 gMallocTablePtr
= &gDefaultMallocTable
;
5154 // Pass in the default allocator table so replace functions can copy and use
5155 // it for their allocations. The replace_init() function should modify the
5156 // table if it wants to be active, otherwise leave it unmodified.
5158 replace_init(&tempTable
, &gReplaceMallocBridge
);
5160 # ifdef MOZ_REPLACE_MALLOC_STATIC
5161 if (Equals(tempTable
, gDefaultMallocTable
)) {
5162 logalloc_init(&tempTable
, &gReplaceMallocBridge
);
5165 if (Equals(tempTable
, gDefaultMallocTable
)) {
5166 dmd_init(&tempTable
, &gReplaceMallocBridge
);
5170 if (!Equals(tempTable
, gDefaultMallocTable
)) {
5171 replace_malloc_init_funcs(&tempTable
);
5173 gOriginalMallocTable
= tempTable
;
5174 gMallocTablePtr
= &gOriginalMallocTable
;
5177 // WARNING WARNING WARNING: this function should be used with extreme care. It
5178 // is not as general-purpose as it looks. It is currently used by
5179 // tools/profiler/core/memory_hooks.cpp for counting allocations and probably
5180 // should not be used for any other purpose.
5182 // This function allows the original malloc table to be temporarily replaced by
5183 // a different malloc table. Or, if the argument is nullptr, it switches back to
5184 // the original malloc table.
5188 // - It is not threadsafe. If multiple threads pass it the same
5189 // `replace_init_func` at the same time, there will be data races writing to
5190 // the malloc_table_t within that function.
5192 // - Only one replacement can be installed. No nesting is allowed.
5194 // - The new malloc table must be able to free allocations made by the original
5195 // malloc table, and upon removal the original malloc table must be able to
5196 // free allocations made by the new malloc table. This means the new malloc
5197 // table can only do simple things like recording extra information, while
5198 // delegating actual allocation/free operations to the original malloc table.
5200 MOZ_JEMALLOC_API
void jemalloc_replace_dynamic(
5201 jemalloc_init_func replace_init_func
) {
5202 if (replace_init_func
) {
5203 malloc_table_t tempTable
= gOriginalMallocTable
;
5204 (*replace_init_func
)(&tempTable
, &gReplaceMallocBridge
);
5205 if (!Equals(tempTable
, gOriginalMallocTable
)) {
5206 replace_malloc_init_funcs(&tempTable
);
5208 // Temporarily switch back to the original malloc table. In the
5209 // (supported) non-nested case, this is a no-op. But just in case this is
5210 // a (unsupported) nested call, it makes the overwriting of
5211 // gDynamicMallocTable less racy, because ongoing calls to malloc() and
5212 // friends won't go through gDynamicMallocTable.
5213 gMallocTablePtr
= &gOriginalMallocTable
;
5215 gDynamicMallocTable
= tempTable
;
5216 gMallocTablePtr
= &gDynamicMallocTable
;
5217 // We assume that dynamic replaces don't occur close enough for a
5218 // thread to still have old copies of the table pointer when the 2nd
5222 // Switch back to the original malloc table.
5223 gMallocTablePtr
= &gOriginalMallocTable
;
5227 # define MALLOC_DECL(name, return_type, ...) \
5228 inline return_type ReplaceMalloc::name( \
5229 ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
5230 if (MOZ_UNLIKELY(!gMallocTablePtr)) { \
5233 return (*gMallocTablePtr).name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
5235 # include "malloc_decls.h"
5237 MOZ_JEMALLOC_API
struct ReplaceMallocBridge
* get_bridge(void) {
5238 if (MOZ_UNLIKELY(!gMallocTablePtr
)) {
5241 return gReplaceMallocBridge
;
5244 // posix_memalign, aligned_alloc, memalign and valloc all implement some kind
5245 // of aligned memory allocation. For convenience, a replace-malloc library can
5246 // skip defining replace_posix_memalign, replace_aligned_alloc and
5247 // replace_valloc, and default implementations will be automatically derived
5248 // from replace_memalign.
5249 static void replace_malloc_init_funcs(malloc_table_t
* table
) {
5250 if (table
->posix_memalign
== CanonicalMalloc::posix_memalign
&&
5251 table
->memalign
!= CanonicalMalloc::memalign
) {
5252 table
->posix_memalign
=
5253 AlignedAllocator
<ReplaceMalloc::memalign
>::posix_memalign
;
5255 if (table
->aligned_alloc
== CanonicalMalloc::aligned_alloc
&&
5256 table
->memalign
!= CanonicalMalloc::memalign
) {
5257 table
->aligned_alloc
=
5258 AlignedAllocator
<ReplaceMalloc::memalign
>::aligned_alloc
;
5260 if (table
->valloc
== CanonicalMalloc::valloc
&&
5261 table
->memalign
!= CanonicalMalloc::memalign
) {
5262 table
->valloc
= AlignedAllocator
<ReplaceMalloc::memalign
>::valloc
;
5264 if (table
->moz_create_arena_with_params
==
5265 CanonicalMalloc::moz_create_arena_with_params
&&
5266 table
->malloc
!= CanonicalMalloc::malloc
) {
5267 # define MALLOC_DECL(name, ...) \
5268 table->name = DummyArenaAllocator<ReplaceMalloc>::name;
5269 # define MALLOC_FUNCS MALLOC_FUNCS_ARENA_BASE
5270 # include "malloc_decls.h"
5272 if (table
->moz_arena_malloc
== CanonicalMalloc::moz_arena_malloc
&&
5273 table
->malloc
!= CanonicalMalloc::malloc
) {
5274 # define MALLOC_DECL(name, ...) \
5275 table->name = DummyArenaAllocator<ReplaceMalloc>::name;
5276 # define MALLOC_FUNCS MALLOC_FUNCS_ARENA_ALLOC
5277 # include "malloc_decls.h"
5281 #endif // MOZ_REPLACE_MALLOC
5282 // ***************************************************************************
5283 // Definition of all the _impl functions
5284 // GENERIC_MALLOC_DECL2_MINGW is only used for the MinGW build, and aliases
5285 // the malloc funcs (e.g. malloc) to the je_ versions. It does not generate
5286 // aliases for the other functions (jemalloc and arena functions).
5288 // We do need aliases for the other mozglue.def-redirected functions though,
5289 // these are done at the bottom of mozmemory_wrap.cpp
5290 #define GENERIC_MALLOC_DECL2_MINGW(name, name_impl, return_type, ...) \
5291 return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
5292 __attribute__((alias(MOZ_STRINGIFY(name_impl))));
5294 #define GENERIC_MALLOC_DECL2(attributes, name, name_impl, return_type, ...) \
5295 return_type name_impl(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) attributes { \
5296 return DefaultMalloc::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
5300 # define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
5301 GENERIC_MALLOC_DECL2(attributes, name, name##_impl, return_type, \
5304 # define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
5305 GENERIC_MALLOC_DECL2(attributes, name, name##_impl, return_type, \
5307 GENERIC_MALLOC_DECL2_MINGW(name, name##_impl, return_type, ##__VA_ARGS__)
5310 #define NOTHROW_MALLOC_DECL(...) \
5311 MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (noexcept(true), __VA_ARGS__))
5312 #define MALLOC_DECL(...) \
5313 MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (, __VA_ARGS__))
5314 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
5315 #include "malloc_decls.h"
5317 #undef GENERIC_MALLOC_DECL
5318 #define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
5319 GENERIC_MALLOC_DECL2(attributes, name, name, return_type, ##__VA_ARGS__)
5321 #define MALLOC_DECL(...) \
5322 MOZ_JEMALLOC_API MACRO_CALL(GENERIC_MALLOC_DECL, (, __VA_ARGS__))
5323 #define MALLOC_FUNCS (MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
5324 #include "malloc_decls.h"
5325 // ***************************************************************************
5331 #if defined(__GLIBC__) && !defined(__UCLIBC__)
5332 // glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
5333 // to inconsistently reference libc's malloc(3)-compatible functions
5336 // These definitions interpose hooks in glibc. The functions are actually
5337 // passed an extra argument for the caller return address, which will be
5341 MOZ_EXPORT
void (*__free_hook
)(void*) = free_impl
;
5342 MOZ_EXPORT
void* (*__malloc_hook
)(size_t) = malloc_impl
;
5343 MOZ_EXPORT
void* (*__realloc_hook
)(void*, size_t) = realloc_impl
;
5344 MOZ_EXPORT
void* (*__memalign_hook
)(size_t, size_t) = memalign_impl
;
5347 #elif defined(RTLD_DEEPBIND)
5348 // XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
5349 // implementations permit similar inconsistencies? Should STV_SINGLETON
5350 // visibility be used for interposition where available?
5352 "Interposing malloc is unsafe on this system without libc malloc hooks."
5356 MOZ_EXPORT
void* _recalloc(void* aPtr
, size_t aCount
, size_t aSize
) {
5357 size_t oldsize
= aPtr
? AllocInfo::Get(aPtr
).Size() : 0;
5358 CheckedInt
<size_t> checkedSize
= CheckedInt
<size_t>(aCount
) * aSize
;
5360 if (!checkedSize
.isValid()) {
5364 size_t newsize
= checkedSize
.value();
5366 // In order for all trailing bytes to be zeroed, the caller needs to
5367 // use calloc(), followed by recalloc(). However, the current calloc()
5368 // implementation only zeros the bytes requested, so if recalloc() is
5369 // to work 100% correctly, calloc() will need to change to zero
5371 aPtr
= DefaultMalloc::realloc(aPtr
, newsize
);
5372 if (aPtr
&& oldsize
< newsize
) {
5373 memset((void*)((uintptr_t)aPtr
+ oldsize
), 0, newsize
- oldsize
);
5379 // This impl of _expand doesn't ever actually expand or shrink blocks: it
5380 // simply replies that you may continue using a shrunk block.
5381 MOZ_EXPORT
void* _expand(void* aPtr
, size_t newsize
) {
5382 if (AllocInfo::Get(aPtr
).Size() >= newsize
) {
5389 MOZ_EXPORT
size_t _msize(void* aPtr
) {
5390 return DefaultMalloc::malloc_usable_size(aPtr
);
5395 // Compile PHC and mozjemalloc together so that PHC can inline mozjemalloc.