1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 // Portions of this file were originally under the following license:
9 // Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>.
10 // All rights reserved.
11 // Copyright (C) 2007-2017 Mozilla Foundation.
13 // Redistribution and use in source and binary forms, with or without
14 // modification, are permitted provided that the following conditions
16 // 1. Redistributions of source code must retain the above copyright
17 // notice(s), this list of conditions and the following disclaimer as
18 // the first lines of this file unmodified other than the possible
19 // addition of one or more copyright notices.
20 // 2. Redistributions in binary form must reproduce the above copyright
21 // notice(s), this list of conditions and the following disclaimer in
22 // the documentation and/or other materials provided with the
25 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE
29 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
33 // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
34 // OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
35 // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 // *****************************************************************************
39 // This allocator implementation is designed to provide scalable performance
40 // for multi-threaded programs on multi-processor systems. The following
41 // features are included for this purpose:
43 // + Multiple arenas are used if there are multiple CPUs, which reduces lock
44 // contention and cache sloshing.
46 // + Cache line sharing between arenas is avoided for internal data
49 // + Memory is managed in chunks and runs (chunks can be split into runs),
50 // rather than as individual pages. This provides a constant-time
51 // mechanism for associating allocations with particular arenas.
53 // Allocation requests are rounded up to the nearest size class, and no record
54 // of the original request size is maintained. Allocations are broken into
55 // categories according to size class. Assuming runtime defaults, the size
56 // classes in each category are as follows (for x86, x86_64 and Apple Silicon):
58 // |=========================================================|
59 // | Category | Subcategory | x86 | x86_64 | Mac ARM |
60 // |---------------------------+---------+---------+---------|
61 // | Word size | 32 bit | 64 bit | 64 bit |
62 // | Page size | 4 Kb | 4 Kb | 16 Kb |
63 // |=========================================================|
64 // | Small | Tiny | 4/-w | -w | - |
65 // | | | 8 | 8/-w | 8 |
66 // | |----------------+---------|---------|---------|
67 // | | Quantum-spaced | 16 | 16 | 16 |
68 // | | | 32 | 32 | 32 |
69 // | | | 48 | 48 | 48 |
70 // | | | ... | ... | ... |
71 // | | | 480 | 480 | 480 |
72 // | | | 496 | 496 | 496 |
73 // | |----------------+---------|---------|---------|
74 // | | Quantum-wide- | 512 | 512 | 512 |
75 // | | spaced | 768 | 768 | 768 |
76 // | | | ... | ... | ... |
77 // | | | 3584 | 3584 | 3584 |
78 // | | | 3840 | 3840 | 3840 |
79 // | |----------------+---------|---------|---------|
80 // | | Sub-page | - | - | 4096 |
81 // | | | - | - | 8 kB |
82 // |=========================================================|
83 // | Large | 4 kB | 4 kB | - |
84 // | | 8 kB | 8 kB | - |
85 // | | 12 kB | 12 kB | - |
86 // | | 16 kB | 16 kB | 16 kB |
87 // | | ... | ... | - |
88 // | | 32 kB | 32 kB | 32 kB |
89 // | | ... | ... | ... |
90 // | | 1008 kB | 1008 kB | 1008 kB |
91 // | | 1012 kB | 1012 kB | - |
92 // | | 1016 kB | 1016 kB | - |
93 // | | 1020 kB | 1020 kB | - |
94 // |=========================================================|
95 // | Huge | 1 MB | 1 MB | 1 MB |
96 // | | 2 MB | 2 MB | 2 MB |
97 // | | 3 MB | 3 MB | 3 MB |
98 // | | ... | ... | ... |
99 // |=========================================================|
102 // n: Size class exists for this platform.
103 // n/-w: This size class doesn't exist on Windows (see kMinTinyClass).
104 // -: This size class doesn't exist for this platform.
105 // ...: Size classes follow a pattern here.
107 // NOTE: Due to Mozilla bug 691003, we cannot reserve less than one word for an
108 // allocation on Linux or Mac. So on 32-bit *nix, the smallest bucket size is
109 // 4 bytes, and on 64-bit, the smallest bucket size is 8 bytes.
111 // A different mechanism is used for each category:
113 // Small : Each size class is segregated into its own set of runs. Each run
114 // maintains a bitmap of which regions are free/allocated.
116 // Large : Each allocation is backed by a dedicated run. Metadata are stored
117 // in the associated arena chunk header maps.
119 // Huge : Each allocation is backed by a dedicated contiguous set of chunks.
120 // Metadata are stored in a separate red-black tree.
122 // *****************************************************************************
124 #include "mozmemory_wrap.h"
125 #include "mozjemalloc.h"
126 #include "mozjemalloc_types.h"
131 #include <type_traits>
134 # include <windows.h>
136 # include <sys/mman.h>
140 # include <libkern/OSAtomic.h>
141 # include <mach/mach_init.h>
142 # include <mach/vm_map.h>
145 #include "mozilla/Atomics.h"
146 #include "mozilla/Alignment.h"
147 #include "mozilla/ArrayUtils.h"
148 #include "mozilla/Assertions.h"
149 #include "mozilla/CheckedInt.h"
150 #include "mozilla/DoublyLinkedList.h"
151 #include "mozilla/HelperMacros.h"
152 #include "mozilla/Likely.h"
153 #include "mozilla/Literals.h"
154 #include "mozilla/MathAlgorithms.h"
155 #include "mozilla/RandomNum.h"
156 // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
157 // instead of the one defined here; use only MozTagAnonymousMemory().
158 #include "mozilla/TaggedAnonymousMemory.h"
159 #include "mozilla/ThreadLocal.h"
160 #include "mozilla/UniquePtr.h"
161 #include "mozilla/Unused.h"
162 #include "mozilla/XorShift128PlusRNG.h"
163 #include "mozilla/fallible.h"
170 # include "mozmemory_utils.h"
173 // For GetGeckoProcessType(), when it's used.
174 #if defined(XP_WIN) && !defined(JS_STANDALONE)
175 # include "mozilla/ProcessType.h"
178 using namespace mozilla
;
180 // On Linux, we use madvise(MADV_DONTNEED) to release memory back to the
181 // operating system. If we release 1MB of live pages with MADV_DONTNEED, our
182 // RSS will decrease by 1MB (almost) immediately.
184 // On Mac, we use madvise(MADV_FREE). Unlike MADV_DONTNEED on Linux, MADV_FREE
185 // on Mac doesn't cause the OS to release the specified pages immediately; the
186 // OS keeps them in our process until the machine comes under memory pressure.
188 // It's therefore difficult to measure the process's RSS on Mac, since, in the
189 // absence of memory pressure, the contribution from the heap to RSS will not
190 // decrease due to our madvise calls.
192 // We therefore define MALLOC_DOUBLE_PURGE on Mac. This causes jemalloc to
193 // track which pages have been MADV_FREE'd. You can then call
194 // jemalloc_purge_freed_pages(), which will force the OS to release those
195 // MADV_FREE'd pages, making the process's RSS reflect its true memory usage.
198 # define MALLOC_DOUBLE_PURGE
202 # define MALLOC_DECOMMIT
205 // Define MALLOC_RUNTIME_CONFIG depending on MOZ_DEBUG. Overriding this as
206 // a build option allows us to build mozjemalloc/firefox without runtime asserts
207 // but with runtime configuration. Making some testing easier.
210 # define MALLOC_RUNTIME_CONFIG
213 // When MALLOC_STATIC_PAGESIZE is defined, the page size is fixed at
214 // compile-time for better performance, as opposed to determined at
215 // runtime. Some platforms can have different page sizes at runtime
216 // depending on kernel configuration, so they are opted out by default.
217 // Debug builds are opted out too, for test coverage.
218 #ifndef MALLOC_RUNTIME_CONFIG
219 # if !defined(__ia64__) && !defined(__sparc__) && !defined(__mips__) && \
220 !defined(__aarch64__) && !defined(__powerpc__) && !defined(XP_MACOSX) && \
221 !defined(__loongarch__)
222 # define MALLOC_STATIC_PAGESIZE 1
227 # define STDERR_FILENO 2
229 // Implement getenv without using malloc.
230 static char mozillaMallocOptionsBuf
[64];
232 # define getenv xgetenv
233 static char* getenv(const char* name
) {
234 if (GetEnvironmentVariableA(name
, mozillaMallocOptionsBuf
,
235 sizeof(mozillaMallocOptionsBuf
)) > 0) {
236 return mozillaMallocOptionsBuf
;
244 // Newer Linux systems support MADV_FREE, but we're not supporting
245 // that properly. bug #1406304.
246 # if defined(XP_LINUX) && defined(MADV_FREE)
250 # define MADV_FREE MADV_DONTNEED
254 // Some tools, such as /dev/dsp wrappers, LD_PRELOAD libraries that
255 // happen to override mmap() and call dlsym() from their overridden
256 // mmap(). The problem is that dlsym() calls malloc(), and this ends
257 // up in a dead lock in jemalloc.
258 // On these systems, we prefer to directly use the system call.
259 // We do that for Linux systems and kfreebsd with GNU userland.
260 // Note sanity checks are not done (alignment of offset, ...) because
261 // the uses of mmap are pretty limited, in jemalloc.
263 // On Alpha, glibc has a bug that prevents syscall() to work for system
264 // calls with 6 arguments.
265 #if (defined(XP_LINUX) && !defined(__alpha__)) || \
266 (defined(__FreeBSD_kernel__) && defined(__GLIBC__))
267 # include <sys/syscall.h>
268 # if defined(SYS_mmap) || defined(SYS_mmap2)
269 static inline void* _mmap(void* addr
, size_t length
, int prot
, int flags
,
270 int fd
, off_t offset
) {
271 // S390 only passes one argument to the mmap system call, which is a
272 // pointer to a structure containing the arguments.
281 } args
= {addr
, length
, prot
, flags
, fd
, offset
};
282 return (void*)syscall(SYS_mmap
, &args
);
284 # if defined(ANDROID) && defined(__aarch64__) && defined(SYS_mmap2)
285 // Android NDK defines SYS_mmap2 for AArch64 despite it not supporting mmap2.
289 return (void*)syscall(SYS_mmap2
, addr
, length
, prot
, flags
, fd
, offset
>> 12);
291 return (void*)syscall(SYS_mmap
, addr
, length
, prot
, flags
, fd
, offset
);
296 # define munmap(a, l) syscall(SYS_munmap, a, l)
300 // ***************************************************************************
301 // Structures for chunk headers for chunks used for non-huge allocations.
305 // Each element of the chunk map corresponds to one page within the chunk.
306 struct arena_chunk_map_t
{
307 // Linkage for run trees. There are two disjoint uses:
309 // 1) arena_t's tree or available runs.
310 // 2) arena_run_t conceptually uses this linkage for in-use non-full
311 // runs, rather than directly embedding linkage.
312 RedBlackTreeNode
<arena_chunk_map_t
> link
;
314 // Run address (or size) and various flags are stored together. The bit
315 // layout looks like (assuming 32-bit system):
317 // ???????? ???????? ????---- fmckdzla
319 // ? : Unallocated: Run address for first/last pages, unset for internal
321 // Small: Run address.
322 // Large: Run size for first page, unset for trailing pages.
325 // m : MADV_FREE/MADV_DONTNEED'ed?
333 // Following are example bit patterns for the three types of runs.
342 // ssssssss ssssssss ssss---- --c-----
343 // xxxxxxxx xxxxxxxx xxxx---- ----d---
344 // ssssssss ssssssss ssss---- -----z--
347 // rrrrrrrr rrrrrrrr rrrr---- -------a
348 // rrrrrrrr rrrrrrrr rrrr---- -------a
349 // rrrrrrrr rrrrrrrr rrrr---- -------a
352 // ssssssss ssssssss ssss---- ------la
353 // -------- -------- -------- ------la
354 // -------- -------- -------- ------la
357 // A page can be in one of several states.
359 // CHUNK_MAP_ALLOCATED marks allocated pages, the only other bit that can be
360 // combined is CHUNK_MAP_LARGE.
362 // CHUNK_MAP_LARGE may be combined with CHUNK_MAP_ALLOCATED to show that the
363 // allocation is a "large" allocation (see SizeClass), rather than a run of
364 // small allocations. The interpretation of the gPageSizeMask bits depends onj
365 // this bit, see the description above.
367 // CHUNK_MAP_DIRTY is used to mark pages that were allocated and are now freed.
368 // They may contain their previous contents (or poison). CHUNK_MAP_DIRTY, when
369 // set, must be the only set bit.
371 // CHUNK_MAP_MADVISED marks pages which are madvised (with either MADV_DONTNEED
372 // or MADV_FREE). This is only valid if MALLOC_DECOMMIT is not defined. When
373 // set, it must be the only bit set.
375 // CHUNK_MAP_DECOMMITTED is used if CHUNK_MAP_DECOMMITTED is defined. Unused
376 // dirty pages may be decommitted and marked as CHUNK_MAP_DECOMMITTED. They
377 // must be re-committed with pages_commit() before they can be touched.
379 // CHUNK_MAP_FRESH is set on pages that have never been used before (the chunk
380 // is newly allocated or they were decommitted and have now been recommitted.
381 // CHUNK_MAP_FRESH is also used for "double purged" pages meaning that they were
382 // madvised and later were unmapped and remapped to force them out of the
383 // program's resident set. This is enabled when MALLOC_DOUBLE_PURGE is defined
386 // CHUNK_MAP_ZEROED is set on pages that are known to contain zeros.
388 // CHUNK_MAP_DIRTY, _DECOMMITED _MADVISED and _FRESH are always mutually
391 // CHUNK_MAP_KEY is never used on real pages, only on lookup keys.
393 #define CHUNK_MAP_FRESH ((size_t)0x80U)
394 #define CHUNK_MAP_MADVISED ((size_t)0x40U)
395 #define CHUNK_MAP_DECOMMITTED ((size_t)0x20U)
396 #define CHUNK_MAP_MADVISED_OR_DECOMMITTED \
397 (CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
398 #define CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED \
399 (CHUNK_MAP_FRESH | CHUNK_MAP_MADVISED | CHUNK_MAP_DECOMMITTED)
400 #define CHUNK_MAP_KEY ((size_t)0x10U)
401 #define CHUNK_MAP_DIRTY ((size_t)0x08U)
402 #define CHUNK_MAP_ZEROED ((size_t)0x04U)
403 #define CHUNK_MAP_LARGE ((size_t)0x02U)
404 #define CHUNK_MAP_ALLOCATED ((size_t)0x01U)
407 // Arena chunk header.
408 struct arena_chunk_t
{
409 // Arena that owns the chunk.
412 // Linkage for the arena's tree of dirty chunks.
413 RedBlackTreeNode
<arena_chunk_t
> link_dirty
;
415 #ifdef MALLOC_DOUBLE_PURGE
416 // If we're double-purging, we maintain a linked list of chunks which
417 // have pages which have been madvise(MADV_FREE)'d but not explicitly
420 // We're currently lazy and don't remove a chunk from this list when
421 // all its madvised pages are recommitted.
422 DoublyLinkedListElement
<arena_chunk_t
> chunks_madvised_elem
;
425 // Number of dirty pages.
428 // Map of pages within chunk that keeps track of free/large/small.
429 arena_chunk_map_t map
[1]; // Dynamically sized.
432 // ***************************************************************************
433 // Constants defining allocator size classes and behavior.
435 // Our size classes are inclusive ranges of memory sizes. By describing the
436 // minimums and how memory is allocated in each range the maximums can be
439 // Smallest size class to support. On Windows the smallest allocation size
440 // must be 8 bytes on 32-bit, 16 bytes on 64-bit. On Linux and Mac, even
441 // malloc(1) must reserve a word's worth of memory (see Mozilla bug 691003).
443 static const size_t kMinTinyClass
= sizeof(void*) * 2;
445 static const size_t kMinTinyClass
= sizeof(void*);
448 // Maximum tiny size class.
449 static const size_t kMaxTinyClass
= 8;
451 // Smallest quantum-spaced size classes. It could actually also be labelled a
452 // tiny allocation, and is spaced as such from the largest tiny size class.
453 // Tiny classes being powers of 2, this is twice as large as the largest of
455 static const size_t kMinQuantumClass
= kMaxTinyClass
* 2;
456 static const size_t kMinQuantumWideClass
= 512;
457 static const size_t kMinSubPageClass
= 4_KiB
;
459 // Amount (quantum) separating quantum-spaced size classes.
460 static const size_t kQuantum
= 16;
461 static const size_t kQuantumMask
= kQuantum
- 1;
462 static const size_t kQuantumWide
= 256;
463 static const size_t kQuantumWideMask
= kQuantumWide
- 1;
465 static const size_t kMaxQuantumClass
= kMinQuantumWideClass
- kQuantum
;
466 static const size_t kMaxQuantumWideClass
= kMinSubPageClass
- kQuantumWide
;
468 // We can optimise some divisions to shifts if these are powers of two.
469 static_assert(mozilla::IsPowerOfTwo(kQuantum
),
470 "kQuantum is not a power of two");
471 static_assert(mozilla::IsPowerOfTwo(kQuantumWide
),
472 "kQuantumWide is not a power of two");
474 static_assert(kMaxQuantumClass
% kQuantum
== 0,
475 "kMaxQuantumClass is not a multiple of kQuantum");
476 static_assert(kMaxQuantumWideClass
% kQuantumWide
== 0,
477 "kMaxQuantumWideClass is not a multiple of kQuantumWide");
478 static_assert(kQuantum
< kQuantumWide
,
479 "kQuantum must be smaller than kQuantumWide");
480 static_assert(mozilla::IsPowerOfTwo(kMinSubPageClass
),
481 "kMinSubPageClass is not a power of two");
483 // Number of (2^n)-spaced tiny classes.
484 static const size_t kNumTinyClasses
=
485 LOG2(kMaxTinyClass
) - LOG2(kMinTinyClass
) + 1;
487 // Number of quantum-spaced classes. We add kQuantum(Max) before subtracting to
488 // avoid underflow when a class is empty (Max<Min).
489 static const size_t kNumQuantumClasses
=
490 (kMaxQuantumClass
+ kQuantum
- kMinQuantumClass
) / kQuantum
;
491 static const size_t kNumQuantumWideClasses
=
492 (kMaxQuantumWideClass
+ kQuantumWide
- kMinQuantumWideClass
) / kQuantumWide
;
494 // Size and alignment of memory chunks that are allocated by the OS's virtual
496 static const size_t kChunkSize
= 1_MiB
;
497 static const size_t kChunkSizeMask
= kChunkSize
- 1;
499 #ifdef MALLOC_STATIC_PAGESIZE
500 // VM page size. It must divide the runtime CPU page size or the code
502 // Platform specific page size conditions copied from js/public/HeapAPI.h
503 # if defined(__powerpc64__)
504 static const size_t gPageSize
= 64_KiB
;
505 # elif defined(__loongarch64)
506 static const size_t gPageSize
= 16_KiB
;
508 static const size_t gPageSize
= 4_KiB
;
510 static const size_t gRealPageSize
= gPageSize
;
513 // When MALLOC_OPTIONS contains one or several `P`s, the page size used
514 // across the allocator is multiplied by 2 for each `P`, but we also keep
515 // the real page size for code paths that need it. gPageSize is thus a
516 // power of two greater or equal to gRealPageSize.
517 static size_t gRealPageSize
;
518 static size_t gPageSize
;
521 #ifdef MALLOC_STATIC_PAGESIZE
522 # define DECLARE_GLOBAL(type, name)
523 # define DEFINE_GLOBALS
525 # define DEFINE_GLOBAL(type) static const type
526 # define GLOBAL_LOG2 LOG2
527 # define GLOBAL_ASSERT_HELPER1(x) static_assert(x, #x)
528 # define GLOBAL_ASSERT_HELPER2(x, y) static_assert(x, y)
529 # define GLOBAL_ASSERT(...) \
531 MOZ_PASTE_PREFIX_AND_ARG_COUNT(GLOBAL_ASSERT_HELPER, __VA_ARGS__), \
533 # define GLOBAL_CONSTEXPR constexpr
535 # define DECLARE_GLOBAL(type, name) static type name;
536 # define DEFINE_GLOBALS static void DefineGlobals() {
537 # define END_GLOBALS }
538 # define DEFINE_GLOBAL(type)
539 # define GLOBAL_LOG2 FloorLog2
540 # define GLOBAL_ASSERT MOZ_RELEASE_ASSERT
541 # define GLOBAL_CONSTEXPR
544 DECLARE_GLOBAL(size_t, gMaxSubPageClass
)
545 DECLARE_GLOBAL(uint8_t, gNumSubPageClasses
)
546 DECLARE_GLOBAL(uint8_t, gPageSize2Pow
)
547 DECLARE_GLOBAL(size_t, gPageSizeMask
)
548 DECLARE_GLOBAL(size_t, gChunkNumPages
)
549 DECLARE_GLOBAL(size_t, gChunkHeaderNumPages
)
550 DECLARE_GLOBAL(size_t, gMaxLargeClass
)
554 // Largest sub-page size class, or zero if there are none
555 DEFINE_GLOBAL(size_t)
556 gMaxSubPageClass
= gPageSize
/ 2 >= kMinSubPageClass
? gPageSize
/ 2 : 0;
558 // Max size class for bins.
559 #define gMaxBinClass \
560 (gMaxSubPageClass ? gMaxSubPageClass : kMaxQuantumWideClass)
562 // Number of sub-page bins.
563 DEFINE_GLOBAL(uint8_t)
564 gNumSubPageClasses
= []() GLOBAL_CONSTEXPR
-> uint8_t {
565 if GLOBAL_CONSTEXPR (gMaxSubPageClass
!= 0) {
566 return FloorLog2(gMaxSubPageClass
) - LOG2(kMinSubPageClass
) + 1;
571 DEFINE_GLOBAL(uint8_t) gPageSize2Pow
= GLOBAL_LOG2(gPageSize
);
572 DEFINE_GLOBAL(size_t) gPageSizeMask
= gPageSize
- 1;
574 // Number of pages in a chunk.
575 DEFINE_GLOBAL(size_t) gChunkNumPages
= kChunkSize
>> gPageSize2Pow
;
577 // Number of pages necessary for a chunk header plus a guard page.
578 DEFINE_GLOBAL(size_t)
579 gChunkHeaderNumPages
=
580 1 + (((sizeof(arena_chunk_t
) +
581 sizeof(arena_chunk_map_t
) * (gChunkNumPages
- 1) + gPageSizeMask
) &
585 // One chunk, minus the header, minus a guard page
586 DEFINE_GLOBAL(size_t)
588 kChunkSize
- gPageSize
- (gChunkHeaderNumPages
<< gPageSize2Pow
);
590 // Various sanity checks that regard configuration.
591 GLOBAL_ASSERT(1ULL << gPageSize2Pow
== gPageSize
,
592 "Page size is not a power of two");
593 GLOBAL_ASSERT(kQuantum
>= sizeof(void*));
594 GLOBAL_ASSERT(kQuantum
<= kQuantumWide
);
595 GLOBAL_ASSERT(!kNumQuantumWideClasses
||
596 kQuantumWide
<= (kMinSubPageClass
- kMaxQuantumClass
));
598 GLOBAL_ASSERT(kQuantumWide
<= kMaxQuantumClass
);
600 GLOBAL_ASSERT(gMaxSubPageClass
>= kMinSubPageClass
|| gMaxSubPageClass
== 0);
601 GLOBAL_ASSERT(gMaxLargeClass
>= gMaxSubPageClass
);
602 GLOBAL_ASSERT(kChunkSize
>= gPageSize
);
603 GLOBAL_ASSERT(kQuantum
* 4 <= kChunkSize
);
607 // Recycle at most 128 MiB of chunks. This means we retain at most
608 // 6.25% of the process address space on a 32-bit OS for later use.
609 static const size_t gRecycleLimit
= 128_MiB
;
611 // The current amount of recycled bytes, updated atomically.
612 static Atomic
<size_t, ReleaseAcquire
> gRecycledSize
;
614 // Maximum number of dirty pages per arena.
615 #define DIRTY_MAX_DEFAULT (1U << 8)
617 static size_t opt_dirty_max
= DIRTY_MAX_DEFAULT
;
619 // Return the smallest chunk multiple that is >= s.
620 #define CHUNK_CEILING(s) (((s) + kChunkSizeMask) & ~kChunkSizeMask)
622 // Return the smallest cacheline multiple that is >= s.
623 #define CACHELINE_CEILING(s) \
624 (((s) + (kCacheLineSize - 1)) & ~(kCacheLineSize - 1))
626 // Return the smallest quantum multiple that is >= a.
627 #define QUANTUM_CEILING(a) (((a) + (kQuantumMask)) & ~(kQuantumMask))
628 #define QUANTUM_WIDE_CEILING(a) \
629 (((a) + (kQuantumWideMask)) & ~(kQuantumWideMask))
631 // Return the smallest sub page-size that is >= a.
632 #define SUBPAGE_CEILING(a) (RoundUpPow2(a))
634 // Return the smallest pagesize multiple that is >= s.
635 #define PAGE_CEILING(s) (((s) + gPageSizeMask) & ~gPageSizeMask)
637 // Number of all the small-allocated classes
638 #define NUM_SMALL_CLASSES \
639 (kNumTinyClasses + kNumQuantumClasses + kNumQuantumWideClasses + \
642 // ***************************************************************************
643 // MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
644 #if defined(MALLOC_DECOMMIT) && defined(MALLOC_DOUBLE_PURGE)
645 # error MALLOC_DECOMMIT and MALLOC_DOUBLE_PURGE are mutually exclusive.
648 static void* base_alloc(size_t aSize
);
650 // Set to true once the allocator has been initialized.
651 #if defined(_MSC_VER) && !defined(__clang__)
652 // MSVC may create a static initializer for an Atomic<bool>, which may actually
653 // run after `malloc_init` has been called once, which triggers multiple
655 // We work around the problem by not using an Atomic<bool> at all. There is a
656 // theoretical problem with using `malloc_initialized` non-atomically, but
657 // practically, this is only true if `malloc_init` is never called before
658 // threads are created.
659 static bool malloc_initialized
;
661 static Atomic
<bool, MemoryOrdering::ReleaseAcquire
> malloc_initialized
;
664 static StaticMutex gInitLock MOZ_UNANNOTATED
= {STATIC_MUTEX_INIT
};
666 // ***************************************************************************
667 // Statistics data structures.
669 struct arena_stats_t
{
670 // Number of bytes currently mapped.
673 // Current number of committed pages (non madvised/decommitted)
676 // Per-size-category statistics.
677 size_t allocated_small
;
679 size_t allocated_large
;
682 // ***************************************************************************
683 // Extent data structures.
687 ZEROED_CHUNK
, // chunk only contains zeroes.
688 ARENA_CHUNK
, // used to back arena runs created by arena_t::AllocRun.
689 HUGE_CHUNK
, // used to back huge allocations (e.g. arena_t::MallocHuge).
690 RECYCLED_CHUNK
, // chunk has been stored for future use by chunk_recycle.
694 struct extent_node_t
{
696 // Linkage for the size/address-ordered tree for chunk recycling.
697 RedBlackTreeNode
<extent_node_t
> mLinkBySize
;
698 // Arena id for huge allocations. It's meant to match mArena->mId,
699 // which only holds true when the arena hasn't been disposed of.
703 // Linkage for the address-ordered tree.
704 RedBlackTreeNode
<extent_node_t
> mLinkByAddr
;
706 // Pointer to the extent that this tree node is responsible for.
709 // Total region size.
713 // What type of chunk is there; used for chunk recycling.
714 ChunkType mChunkType
;
716 // A pointer to the associated arena, for huge allocations.
721 struct ExtentTreeSzTrait
{
722 static RedBlackTreeNode
<extent_node_t
>& GetTreeNode(extent_node_t
* aThis
) {
723 return aThis
->mLinkBySize
;
726 static inline Order
Compare(extent_node_t
* aNode
, extent_node_t
* aOther
) {
727 Order ret
= CompareInt(aNode
->mSize
, aOther
->mSize
);
728 return (ret
!= Order::eEqual
) ? ret
729 : CompareAddr(aNode
->mAddr
, aOther
->mAddr
);
733 struct ExtentTreeTrait
{
734 static RedBlackTreeNode
<extent_node_t
>& GetTreeNode(extent_node_t
* aThis
) {
735 return aThis
->mLinkByAddr
;
738 static inline Order
Compare(extent_node_t
* aNode
, extent_node_t
* aOther
) {
739 return CompareAddr(aNode
->mAddr
, aOther
->mAddr
);
743 struct ExtentTreeBoundsTrait
: public ExtentTreeTrait
{
744 static inline Order
Compare(extent_node_t
* aKey
, extent_node_t
* aNode
) {
745 uintptr_t key_addr
= reinterpret_cast<uintptr_t>(aKey
->mAddr
);
746 uintptr_t node_addr
= reinterpret_cast<uintptr_t>(aNode
->mAddr
);
747 size_t node_size
= aNode
->mSize
;
749 // Is aKey within aNode?
750 if (node_addr
<= key_addr
&& key_addr
< node_addr
+ node_size
) {
751 return Order::eEqual
;
754 return CompareAddr(aKey
->mAddr
, aNode
->mAddr
);
758 // Describe size classes to which allocations are rounded up to.
759 // TODO: add large and huge types when the arena allocation code
760 // changes in a way that allows it to be beneficial.
771 explicit inline SizeClass(size_t aSize
) {
772 if (aSize
<= kMaxTinyClass
) {
774 mSize
= std::max(RoundUpPow2(aSize
), kMinTinyClass
);
775 } else if (aSize
<= kMaxQuantumClass
) {
777 mSize
= QUANTUM_CEILING(aSize
);
778 } else if (aSize
<= kMaxQuantumWideClass
) {
780 mSize
= QUANTUM_WIDE_CEILING(aSize
);
781 } else if (aSize
<= gMaxSubPageClass
) {
783 mSize
= SUBPAGE_CEILING(aSize
);
784 } else if (aSize
<= gMaxLargeClass
) {
786 mSize
= PAGE_CEILING(aSize
);
788 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Invalid size");
792 SizeClass
& operator=(const SizeClass
& aOther
) = default;
794 bool operator==(const SizeClass
& aOther
) { return aOther
.mSize
== mSize
; }
796 size_t Size() { return mSize
; }
798 ClassType
Type() { return mType
; }
800 SizeClass
Next() { return SizeClass(mSize
+ 1); }
809 // During deallocation we want to divide by the size class. This class
810 // provides a routine and sets up a constant as follows.
812 // To divide by a number D that is not a power of two we multiply by (2^17 /
813 // D) and then right shift by 17 positions.
821 // Where m is calculated during the FastDivisor constructor similarly to:
825 template <typename T
>
828 // The shift amount (p) is chosen to minimise the size of m while
829 // working for divisors up to 65536 in steps of 16. I arrived at 17
830 // experimentally. I wanted a low number to minimise the range of m
831 // so it can fit in a uint16_t, 16 didn't work but 17 worked perfectly.
833 // We'd need to increase this if we allocated memory on smaller boundaries
835 static const unsigned p
= 17;
837 // We can fit the inverted divisor in 16 bits, but we template it here for
842 // Needed so mBins can be constructed.
843 FastDivisor() : m(0) {}
845 FastDivisor(unsigned div
, unsigned max
) {
846 MOZ_ASSERT(div
<= max
);
848 // divide_inv_shift is large enough.
849 MOZ_ASSERT((1U << p
) >= div
);
851 // The calculation here for m is formula 26 from Section
852 // 10-9 "Unsigned Division by Divisors >= 1" in
853 // Henry S. Warren, Jr.'s Hacker's Delight, 2nd Ed.
854 unsigned m_
= ((1U << p
) + div
- 1 - (((1U << p
) - 1) % div
)) / div
;
856 // Make sure that max * m does not overflow.
857 MOZ_DIAGNOSTIC_ASSERT(max
< UINT_MAX
/ m_
);
859 MOZ_ASSERT(m_
<= std::numeric_limits
<T
>::max());
860 m
= static_cast<T
>(m_
);
862 // Initialisation made m non-zero.
865 // Test that all the divisions in the range we expected would work.
867 for (unsigned num
= 0; num
< max
; num
+= div
) {
868 MOZ_ASSERT(num
/ div
== divide(num
));
873 // Note that this always occurs in uint32_t regardless of m's type. If m is
874 // a uint16_t it will be zero-extended before the multiplication. We also use
875 // uint32_t rather than something that could possibly be larger because it is
876 // most-likely the cheapest multiplication.
877 inline uint32_t divide(uint32_t num
) const {
878 // Check that m was initialised.
880 return (num
* m
) >> p
;
884 template <typename T
>
885 unsigned inline operator/(unsigned num
, FastDivisor
<T
> divisor
) {
886 return divisor
.divide(num
);
889 // ***************************************************************************
890 // Radix tree data structures.
892 // The number of bits passed to the template is the number of significant bits
893 // in an address to do a radix lookup with.
895 // An address is looked up by splitting it in kBitsPerLevel bit chunks, except
896 // the most significant bits, where the bit chunk is kBitsAtLevel1 which can be
897 // different if Bits is not a multiple of kBitsPerLevel.
899 // With e.g. sizeof(void*)=4, Bits=16 and kBitsPerLevel=8, an address is split
900 // like the following:
901 // 0x12345678 -> mRoot[0x12][0x34]
902 template <size_t Bits
>
903 class AddressRadixTree
{
904 // Size of each radix tree node (as a power of 2).
905 // This impacts tree depth.
906 #ifdef HAVE_64BIT_BUILD
907 static const size_t kNodeSize
= kCacheLineSize
;
909 static const size_t kNodeSize
= 16_KiB
;
911 static const size_t kBitsPerLevel
= LOG2(kNodeSize
) - LOG2(sizeof(void*));
912 static const size_t kBitsAtLevel1
=
913 (Bits
% kBitsPerLevel
) ? Bits
% kBitsPerLevel
: kBitsPerLevel
;
914 static const size_t kHeight
= (Bits
+ kBitsPerLevel
- 1) / kBitsPerLevel
;
915 static_assert(kBitsAtLevel1
+ (kHeight
- 1) * kBitsPerLevel
== Bits
,
916 "AddressRadixTree parameters don't work out");
918 Mutex mLock MOZ_UNANNOTATED
;
924 inline void* Get(void* aAddr
);
926 // Returns whether the value was properly set.
927 inline bool Set(void* aAddr
, void* aValue
);
929 inline bool Unset(void* aAddr
) { return Set(aAddr
, nullptr); }
932 inline void** GetSlot(void* aAddr
, bool aCreate
= false);
935 // ***************************************************************************
936 // Arena data structures.
940 struct ArenaChunkMapLink
{
941 static RedBlackTreeNode
<arena_chunk_map_t
>& GetTreeNode(
942 arena_chunk_map_t
* aThis
) {
947 struct ArenaRunTreeTrait
: public ArenaChunkMapLink
{
948 static inline Order
Compare(arena_chunk_map_t
* aNode
,
949 arena_chunk_map_t
* aOther
) {
952 return CompareAddr(aNode
, aOther
);
956 struct ArenaAvailTreeTrait
: public ArenaChunkMapLink
{
957 static inline Order
Compare(arena_chunk_map_t
* aNode
,
958 arena_chunk_map_t
* aOther
) {
959 size_t size1
= aNode
->bits
& ~gPageSizeMask
;
960 size_t size2
= aOther
->bits
& ~gPageSizeMask
;
961 Order ret
= CompareInt(size1
, size2
);
962 return (ret
!= Order::eEqual
)
964 : CompareAddr((aNode
->bits
& CHUNK_MAP_KEY
) ? nullptr : aNode
,
969 struct ArenaDirtyChunkTrait
{
970 static RedBlackTreeNode
<arena_chunk_t
>& GetTreeNode(arena_chunk_t
* aThis
) {
971 return aThis
->link_dirty
;
974 static inline Order
Compare(arena_chunk_t
* aNode
, arena_chunk_t
* aOther
) {
977 return CompareAddr(aNode
, aOther
);
981 #ifdef MALLOC_DOUBLE_PURGE
985 struct GetDoublyLinkedListElement
<arena_chunk_t
> {
986 static DoublyLinkedListElement
<arena_chunk_t
>& Get(arena_chunk_t
* aThis
) {
987 return aThis
->chunks_madvised_elem
;
990 } // namespace mozilla
994 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
996 # define ARENA_RUN_MAGIC 0x384adf93
998 // On 64-bit platforms, having the arena_bin_t pointer following
999 // the mMagic field means there's padding between both fields, making
1000 // the run header larger than necessary.
1001 // But when MOZ_DIAGNOSTIC_ASSERT_ENABLED is not set, starting the
1002 // header with this field followed by the arena_bin_t pointer yields
1003 // the same padding. We do want the mMagic field to appear first, so
1004 // depending whether MOZ_DIAGNOSTIC_ASSERT_ENABLED is set or not, we
1005 // move some field to avoid padding.
1007 // Number of free regions in run.
1011 // Bin this run is associated with.
1014 // Index of first element that might have a free region.
1015 unsigned mRegionsMinElement
;
1017 #if !defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
1018 // Number of free regions in run.
1022 // Bitmask of in-use regions (0: in use, 1: free).
1023 unsigned mRegionsMask
[1]; // Dynamically sized.
1026 struct arena_bin_t
{
1027 // Current run being used to service allocations of this bin's size
1029 arena_run_t
* mCurrentRun
;
1031 // Tree of non-full runs. This tree is used when looking for an
1032 // existing run when mCurrentRun is no longer usable. We choose the
1033 // non-full run that is lowest in memory; this policy tends to keep
1034 // objects packed well, and it can also help reduce the number of
1035 // almost-empty chunks.
1036 RedBlackTree
<arena_chunk_map_t
, ArenaRunTreeTrait
> mNonFullRuns
;
1038 // Bin's size class.
1041 // Total number of regions in a run for this bin's size class.
1042 uint32_t mRunNumRegions
;
1044 // Number of elements in a run's mRegionsMask for this bin's size class.
1045 uint32_t mRunNumRegionsMask
;
1047 // Offset of first region in a run for this bin's size class.
1048 uint32_t mRunFirstRegionOffset
;
1050 // Current number of runs in this bin, full or otherwise.
1053 // A constant for fast division by size class. This value is 16 bits wide so
1054 // it is placed last.
1055 FastDivisor
<uint16_t> mSizeDivisor
;
1057 // Total number of pages in a run for this bin's size class.
1058 uint8_t mRunSizePages
;
1060 // Amount of overhead runs are allowed to have.
1061 static constexpr double kRunOverhead
= 1.6_percent
;
1062 static constexpr double kRunRelaxedOverhead
= 2.4_percent
;
1064 // Initialize a bin for the given size class.
1065 // The generated run sizes, for a page size of 4 KiB, are:
1066 // size|run size|run size|run size|run
1067 // class|size class|size class|size class|size
1068 // 4 4 KiB 8 4 KiB 16 4 KiB 32 4 KiB
1069 // 48 4 KiB 64 4 KiB 80 4 KiB 96 4 KiB
1070 // 112 4 KiB 128 8 KiB 144 4 KiB 160 8 KiB
1071 // 176 4 KiB 192 4 KiB 208 8 KiB 224 4 KiB
1072 // 240 8 KiB 256 16 KiB 272 8 KiB 288 4 KiB
1073 // 304 12 KiB 320 12 KiB 336 4 KiB 352 8 KiB
1074 // 368 4 KiB 384 8 KiB 400 20 KiB 416 16 KiB
1075 // 432 12 KiB 448 4 KiB 464 16 KiB 480 8 KiB
1076 // 496 20 KiB 512 32 KiB 768 16 KiB 1024 64 KiB
1077 // 1280 24 KiB 1536 32 KiB 1792 16 KiB 2048 128 KiB
1078 // 2304 16 KiB 2560 48 KiB 2816 36 KiB 3072 64 KiB
1079 // 3328 36 KiB 3584 32 KiB 3840 64 KiB
1080 inline void Init(SizeClass aSizeClass
);
1083 // We try to keep the above structure aligned with common cache lines sizes,
1084 // often that's 64 bytes on x86 and ARM, we don't make assumptions for other
1086 #if defined(__x86_64__) || defined(__aarch64__)
1087 // On 64bit platforms this structure is often 48 bytes
1088 // long, which means every other array element will be properly aligned.
1089 static_assert(sizeof(arena_bin_t
) == 48);
1090 #elif defined(__x86__) || defined(__arm__)
1091 static_assert(sizeof(arena_bin_t
) == 32);
1095 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
1097 # define ARENA_MAGIC 0x947d3d24
1100 // Linkage for the tree of arenas by id.
1101 RedBlackTreeNode
<arena_t
> mLink
;
1103 // Arena id, that we keep away from the beginning of the struct so that
1104 // free list pointers in TypedBaseAlloc<arena_t> don't overflow in it,
1105 // and it keeps the value it had after the destructor.
1108 // All operations on this arena require that lock be locked. The MaybeMutex
1109 // class well elude locking if the arena is accessed from a single thread
1111 MaybeMutex mLock MOZ_UNANNOTATED
;
1113 arena_stats_t mStats
;
1116 // Tree of dirty-page-containing chunks this arena manages.
1117 RedBlackTree
<arena_chunk_t
, ArenaDirtyChunkTrait
> mChunksDirty
;
1119 #ifdef MALLOC_DOUBLE_PURGE
1120 // Head of a linked list of MADV_FREE'd-page-containing chunks this
1122 DoublyLinkedList
<arena_chunk_t
> mChunksMAdvised
;
1125 // In order to avoid rapid chunk allocation/deallocation when an arena
1126 // oscillates right on the cusp of needing a new chunk, cache the most
1127 // recently freed chunk. The spare is left in the arena's chunk trees
1128 // until it is deleted.
1130 // There is one spare chunk per arena, rather than one spare total, in
1131 // order to avoid interactions between multiple threads that could make
1132 // a single spare inadequate.
1133 arena_chunk_t
* mSpare
;
1135 // A per-arena opt-in to randomize the offset of small allocations
1136 bool mRandomizeSmallAllocations
;
1138 // Whether this is a private arena. Multiple public arenas are just a
1139 // performance optimization and not a safety feature.
1141 // Since, for example, we don't want thread-local arenas to grow too much, we
1142 // use the default arena for bigger allocations. We use this member to allow
1143 // realloc() to switch out of our arena if needed (which is not allowed for
1144 // private arenas for security).
1147 // A pseudorandom number generator. Initially null, it gets initialized
1148 // on first use to avoid recursive malloc initialization (e.g. on OSX
1149 // arc4random allocates memory).
1150 mozilla::non_crypto::XorShift128PlusRNG
* mPRNG
;
1153 // Current count of pages within unused runs that are potentially
1154 // dirty, and for which madvise(... MADV_FREE) has not been called. By
1155 // tracking this, we can institute a limit on how much dirty unused
1156 // memory is mapped for each arena.
1159 // The current number of pages that are available without a system call (but
1160 // probably a page fault).
1161 size_t mNumMAdvised
;
1164 // Maximum value allowed for mNumDirty.
1167 int32_t mMaxDirtyIncreaseOverride
;
1168 int32_t mMaxDirtyDecreaseOverride
;
1171 // Size/address-ordered tree of this arena's available runs. This tree
1172 // is used for first-best-fit run allocation.
1173 RedBlackTree
<arena_chunk_map_t
, ArenaAvailTreeTrait
> mRunsAvail
;
1176 // mBins is used to store rings of free regions of the following sizes,
1177 // assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS.
1179 // | mBins[i] | size |
1180 // +----------+------+
1184 // +----------+------+
1193 // +----------+------+
1200 // +----------+------+
1201 arena_bin_t mBins
[1]; // Dynamically sized.
1203 explicit arena_t(arena_params_t
* aParams
, bool aIsPrivate
);
1207 void InitChunk(arena_chunk_t
* aChunk
, size_t aMinCommittedPages
);
1209 // This may return a chunk that should be destroyed with chunk_dealloc outside
1210 // of the arena lock. It is not the same chunk as was passed in (since that
1211 // chunk now becomes mSpare).
1212 [[nodiscard
]] arena_chunk_t
* DeallocChunk(arena_chunk_t
* aChunk
);
1214 arena_run_t
* AllocRun(size_t aSize
, bool aLarge
, bool aZero
);
1216 arena_chunk_t
* DallocRun(arena_run_t
* aRun
, bool aDirty
);
1218 [[nodiscard
]] bool SplitRun(arena_run_t
* aRun
, size_t aSize
, bool aLarge
,
1221 void TrimRunHead(arena_chunk_t
* aChunk
, arena_run_t
* aRun
, size_t aOldSize
,
1224 void TrimRunTail(arena_chunk_t
* aChunk
, arena_run_t
* aRun
, size_t aOldSize
,
1225 size_t aNewSize
, bool dirty
);
1227 arena_run_t
* GetNonFullBinRun(arena_bin_t
* aBin
);
1229 inline uint8_t FindFreeBitInMask(uint32_t aMask
, uint32_t& aRng
);
1231 inline void* ArenaRunRegAlloc(arena_run_t
* aRun
, arena_bin_t
* aBin
);
1233 inline void* MallocSmall(size_t aSize
, bool aZero
);
1235 void* MallocLarge(size_t aSize
, bool aZero
);
1237 void* MallocHuge(size_t aSize
, bool aZero
);
1239 void* PallocLarge(size_t aAlignment
, size_t aSize
, size_t aAllocSize
);
1241 void* PallocHuge(size_t aSize
, size_t aAlignment
, bool aZero
);
1243 void RallocShrinkLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
1246 bool RallocGrowLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
1249 void* RallocSmallOrLarge(void* aPtr
, size_t aSize
, size_t aOldSize
);
1251 void* RallocHuge(void* aPtr
, size_t aSize
, size_t aOldSize
);
1254 inline void* Malloc(size_t aSize
, bool aZero
);
1256 void* Palloc(size_t aAlignment
, size_t aSize
);
1258 // This may return a chunk that should be destroyed with chunk_dealloc outside
1259 // of the arena lock. It is not the same chunk as was passed in (since that
1260 // chunk now becomes mSpare).
1261 [[nodiscard
]] inline arena_chunk_t
* DallocSmall(arena_chunk_t
* aChunk
,
1263 arena_chunk_map_t
* aMapElm
);
1265 [[nodiscard
]] arena_chunk_t
* DallocLarge(arena_chunk_t
* aChunk
, void* aPtr
);
1267 void* Ralloc(void* aPtr
, size_t aSize
, size_t aOldSize
);
1269 size_t EffectiveMaxDirty();
1271 #ifdef MALLOC_DECOMMIT
1272 // During a commit operation (for aReqPages) we have the opportunity of
1273 // commiting at most aRemPages additional pages. How many should we commit to
1274 // amortise system calls?
1275 size_t ExtraCommitPages(size_t aReqPages
, size_t aRemainingPages
);
1278 // Passing one means purging all.
1279 void Purge(size_t aMaxDirty
);
1283 bool IsMainThreadOnly() const { return !mLock
.LockIsEnabled(); }
1285 void* operator new(size_t aCount
) = delete;
1287 void* operator new(size_t aCount
, const fallible_t
&) noexcept
;
1289 void operator delete(void*);
1292 struct ArenaTreeTrait
{
1293 static RedBlackTreeNode
<arena_t
>& GetTreeNode(arena_t
* aThis
) {
1294 return aThis
->mLink
;
1297 static inline Order
Compare(arena_t
* aNode
, arena_t
* aOther
) {
1300 return CompareInt(aNode
->mId
, aOther
->mId
);
1304 // Bookkeeping for all the arenas used by the allocator.
1305 // Arenas are separated in two categories:
1306 // - "private" arenas, used through the moz_arena_* API
1307 // - all the other arenas: the default arena, and thread-local arenas,
1308 // used by the standard API.
1309 class ArenaCollection
{
1313 mPrivateArenas
.Init();
1314 mMainThreadArenas
.Init();
1315 arena_params_t params
;
1316 // The main arena allows more dirty pages than the default for other arenas.
1317 params
.mMaxDirty
= opt_dirty_max
;
1319 mLock
.Init() ? CreateArena(/* aIsPrivate = */ false, ¶ms
) : nullptr;
1320 return bool(mDefaultArena
);
1323 inline arena_t
* GetById(arena_id_t aArenaId
, bool aIsPrivate
);
1325 arena_t
* CreateArena(bool aIsPrivate
, arena_params_t
* aParams
);
1327 void DisposeArena(arena_t
* aArena
) {
1328 MutexAutoLock
lock(mLock
);
1330 aArena
->IsMainThreadOnly() ? mMainThreadArenas
: mPrivateArenas
;
1332 MOZ_RELEASE_ASSERT(tree
.Search(aArena
), "Arena not in tree");
1333 tree
.Remove(aArena
);
1337 void SetDefaultMaxDirtyPageModifier(int32_t aModifier
) {
1338 mDefaultMaxDirtyPageModifier
= aModifier
;
1340 int32_t DefaultMaxDirtyPageModifier() { return mDefaultMaxDirtyPageModifier
; }
1342 using Tree
= RedBlackTree
<arena_t
, ArenaTreeTrait
>;
1344 struct Iterator
: Tree::Iterator
{
1345 explicit Iterator(Tree
* aTree
, Tree
* aSecondTree
,
1346 Tree
* aThirdTree
= nullptr)
1347 : Tree::Iterator(aTree
),
1348 mSecondTree(aSecondTree
),
1349 mThirdTree(aThirdTree
) {}
1351 Item
<Iterator
> begin() {
1352 return Item
<Iterator
>(this, *Tree::Iterator::begin());
1355 Item
<Iterator
> end() { return Item
<Iterator
>(this, nullptr); }
1358 arena_t
* result
= Tree::Iterator::Next();
1359 if (!result
&& mSecondTree
) {
1360 new (this) Iterator(mSecondTree
, mThirdTree
);
1361 result
= *Tree::Iterator::begin();
1372 if (IsOnMainThreadWeak()) {
1373 return Iterator(&mArenas
, &mPrivateArenas
, &mMainThreadArenas
);
1375 return Iterator(&mArenas
, &mPrivateArenas
);
1378 inline arena_t
* GetDefault() { return mDefaultArena
; }
1380 Mutex mLock MOZ_UNANNOTATED
;
1382 // We're running on the main thread which is set by a call to SetMainThread().
1383 bool IsOnMainThread() const {
1384 return mMainThreadId
.isSome() &&
1385 ThreadIdEqual(mMainThreadId
.value(), GetThreadId());
1388 // We're running on the main thread or SetMainThread() has never been called.
1389 bool IsOnMainThreadWeak() const {
1390 return mMainThreadId
.isNothing() || IsOnMainThread();
1393 // After a fork set the new thread ID in the child.
1394 void ResetMainThread() {
1395 // The post fork handler in the child can run from a MacOS worker thread,
1396 // so we can't set our main thread to it here. Instead we have to clear it.
1397 mMainThreadId
= Nothing();
1400 void SetMainThread() {
1401 MutexAutoLock
lock(mLock
);
1402 MOZ_ASSERT(mMainThreadId
.isNothing());
1403 mMainThreadId
= Some(GetThreadId());
1407 const static arena_id_t MAIN_THREAD_ARENA_BIT
= 0x1;
1409 inline arena_t
* GetByIdInternal(Tree
& aTree
, arena_id_t aArenaId
);
1411 arena_id_t
MakeRandArenaId(bool aIsMainThreadOnly
) const;
1412 static bool ArenaIdIsMainThreadOnly(arena_id_t aArenaId
) {
1413 return aArenaId
& MAIN_THREAD_ARENA_BIT
;
1416 arena_t
* mDefaultArena
;
1417 arena_id_t mLastPublicArenaId
;
1419 // Accessing mArenas and mPrivateArenas can only be done while holding mLock.
1420 // Since mMainThreadArenas can only be used from the main thread, it can be
1421 // accessed without a lock which is why it is a seperate tree.
1423 Tree mPrivateArenas
;
1424 Tree mMainThreadArenas
;
1425 Atomic
<int32_t, MemoryOrdering::Relaxed
> mDefaultMaxDirtyPageModifier
;
1426 Maybe
<ThreadId
> mMainThreadId
;
1429 static ArenaCollection gArenas
;
1433 static AddressRadixTree
<(sizeof(void*) << 3) - LOG2(kChunkSize
)> gChunkRTree
;
1435 // Protects chunk-related data structures.
1436 static Mutex chunks_mtx
;
1438 // Trees of chunks that were previously allocated (trees differ only in node
1439 // ordering). These are used when allocating chunks, in an attempt to re-use
1440 // address space. Depending on function, different tree orderings are needed,
1441 // which is why there are two trees with the same contents.
1442 static RedBlackTree
<extent_node_t
, ExtentTreeSzTrait
> gChunksBySize
1443 MOZ_GUARDED_BY(chunks_mtx
);
1444 static RedBlackTree
<extent_node_t
, ExtentTreeTrait
> gChunksByAddress
1445 MOZ_GUARDED_BY(chunks_mtx
);
1447 // Protects huge allocation-related data structures.
1448 static Mutex huge_mtx
;
1450 // Tree of chunks that are stand-alone huge allocations.
1451 static RedBlackTree
<extent_node_t
, ExtentTreeTrait
> huge
1452 MOZ_GUARDED_BY(huge_mtx
);
1454 // Huge allocation statistics.
1455 static size_t huge_allocated
MOZ_GUARDED_BY(huge_mtx
);
1456 static size_t huge_mapped
MOZ_GUARDED_BY(huge_mtx
);
1458 // **************************
1459 // base (internal allocation).
1461 static Mutex base_mtx
;
1463 // Current pages that are being used for internal memory allocations. These
1464 // pages are carved up in cacheline-size quanta, so that there is no chance of
1465 // false cache line sharing.
1466 static void* base_pages
MOZ_GUARDED_BY(base_mtx
);
1467 static void* base_next_addr
MOZ_GUARDED_BY(base_mtx
);
1468 static void* base_next_decommitted
MOZ_GUARDED_BY(base_mtx
);
1469 // Address immediately past base_pages.
1470 static void* base_past_addr
MOZ_GUARDED_BY(base_mtx
);
1471 static size_t base_mapped
MOZ_GUARDED_BY(base_mtx
);
1472 static size_t base_committed
MOZ_GUARDED_BY(base_mtx
);
1477 // The arena associated with the current thread (per
1478 // jemalloc_thread_local_arena) On OSX, __thread/thread_local circles back
1479 // calling malloc to allocate storage on first access on each thread, which
1480 // leads to an infinite loop, but pthread-based TLS somehow doesn't have this
1482 #if !defined(XP_DARWIN)
1483 static MOZ_THREAD_LOCAL(arena_t
*) thread_arena
;
1485 static detail::ThreadLocal
<arena_t
*, detail::ThreadLocalKeyStorage
>
1489 // *****************************
1490 // Runtime configuration options.
1492 #ifdef MALLOC_RUNTIME_CONFIG
1493 # define MALLOC_RUNTIME_VAR static
1495 # define MALLOC_RUNTIME_VAR static const
1504 MALLOC_RUNTIME_VAR
bool opt_junk
= false;
1505 MALLOC_RUNTIME_VAR
bool opt_zero
= false;
1507 #ifdef EARLY_BETA_OR_EARLIER
1508 MALLOC_RUNTIME_VAR PoisonType opt_poison
= ALL
;
1510 MALLOC_RUNTIME_VAR PoisonType opt_poison
= SOME
;
1513 // Keep this larger than and ideally a multiple of kCacheLineSize;
1514 MALLOC_RUNTIME_VAR
size_t opt_poison_size
= 256;
1515 #ifndef MALLOC_RUNTIME_CONFIG
1516 static_assert(opt_poison_size
>= kCacheLineSize
);
1517 static_assert((opt_poison_size
% kCacheLineSize
) == 0);
1520 static bool opt_randomize_small
= true;
1522 // ***************************************************************************
1523 // Begin forward declarations.
1525 static void* chunk_alloc(size_t aSize
, size_t aAlignment
, bool aBase
);
1526 static void chunk_dealloc(void* aChunk
, size_t aSize
, ChunkType aType
);
1528 static void chunk_assert_zero(void* aPtr
, size_t aSize
);
1530 static void huge_dalloc(void* aPtr
, arena_t
* aArena
);
1531 static bool malloc_init_hard();
1535 # define FORK_HOOK extern "C"
1537 # define FORK_HOOK static
1539 FORK_HOOK
void _malloc_prefork(void);
1540 FORK_HOOK
void _malloc_postfork_parent(void);
1541 FORK_HOOK
void _malloc_postfork_child(void);
1543 FORK_HOOK
void _malloc_postfork(void);
1547 // End forward declarations.
1548 // ***************************************************************************
1550 // FreeBSD's pthreads implementation calls malloc(3), so the malloc
1551 // implementation has to take pains to avoid infinite recursion during
1553 // Returns whether the allocator was successfully initialized.
1554 static inline bool malloc_init() {
1555 if (!malloc_initialized
) {
1556 return malloc_init_hard();
1561 static void _malloc_message(const char* p
) {
1562 #if !defined(XP_WIN)
1563 # define _write write
1565 // Pretend to check _write() errors to suppress gcc warnings about
1566 // warn_unused_result annotations in some versions of glibc headers.
1567 if (_write(STDERR_FILENO
, p
, (unsigned int)strlen(p
)) < 0) {
1572 template <typename
... Args
>
1573 static void _malloc_message(const char* p
, Args
... args
) {
1575 _malloc_message(args
...);
1579 // Android's pthread.h does not declare pthread_atfork() until SDK 21.
1580 extern "C" MOZ_EXPORT
int pthread_atfork(void (*)(void), void (*)(void),
1584 // ***************************************************************************
1585 // Begin Utility functions/macros.
1587 // Return the chunk address for allocation address a.
1588 static inline arena_chunk_t
* GetChunkForPtr(const void* aPtr
) {
1589 return (arena_chunk_t
*)(uintptr_t(aPtr
) & ~kChunkSizeMask
);
1592 // Return the chunk offset of address a.
1593 static inline size_t GetChunkOffsetForPtr(const void* aPtr
) {
1594 return (size_t)(uintptr_t(aPtr
) & kChunkSizeMask
);
1597 static inline const char* _getprogname(void) { return "<jemalloc>"; }
1599 static inline void MaybePoison(void* aPtr
, size_t aSize
) {
1601 switch (opt_poison
) {
1605 size
= std::min(aSize
, opt_poison_size
);
1611 MOZ_ASSERT(size
!= 0 && size
<= aSize
);
1612 memset(aPtr
, kAllocPoison
, size
);
1615 // Fill the given range of memory with zeroes or junk depending on opt_junk and
1617 static inline void ApplyZeroOrJunk(void* aPtr
, size_t aSize
) {
1619 memset(aPtr
, kAllocJunk
, aSize
);
1620 } else if (opt_zero
) {
1621 memset(aPtr
, 0, aSize
);
1625 // On Windows, delay crashing on OOM.
1628 // Implementation of VirtualAlloc wrapper (bug 1716727).
1629 namespace MozAllocRetries
{
1631 // Maximum retry count on OOM.
1632 constexpr size_t kMaxAttempts
= 10;
1633 // Minimum delay time between retries. (The actual delay time may be larger. See
1634 // Microsoft's documentation for ::Sleep() for details.)
1635 constexpr size_t kDelayMs
= 50;
1637 using StallSpecs
= ::mozilla::StallSpecs
;
1639 static constexpr StallSpecs maxStall
= {.maxAttempts
= kMaxAttempts
,
1640 .delayMs
= kDelayMs
};
1642 static inline StallSpecs
GetStallSpecs() {
1643 # if defined(JS_STANDALONE)
1644 // GetGeckoProcessType() isn't available in this configuration. (SpiderMonkey
1645 // on Windows mostly skips this in favor of directly calling ::VirtualAlloc(),
1646 // though, so it's probably not going to matter whether we stall here or not.)
1649 switch (GetGeckoProcessType()) {
1650 // For the main process, stall for the maximum permissible time period. (The
1651 // main process is the most important one to keep alive.)
1652 case GeckoProcessType::GeckoProcessType_Default
:
1655 // For all other process types, stall for at most half as long.
1657 return {.maxAttempts
= maxStall
.maxAttempts
/ 2,
1658 .delayMs
= maxStall
.delayMs
};
1663 // Drop-in wrapper around VirtualAlloc. When out of memory, may attempt to stall
1664 // and retry rather than returning immediately, in hopes that the page file is
1665 // about to be expanded by Windows.
1668 // https://docs.microsoft.com/en-us/troubleshoot/windows-client/performance/slow-page-file-growth-memory-allocation-errors
1669 [[nodiscard
]] void* MozVirtualAlloc(LPVOID lpAddress
, SIZE_T dwSize
,
1670 DWORD flAllocationType
, DWORD flProtect
) {
1671 DWORD
const lastError
= ::GetLastError();
1673 constexpr auto IsOOMError
= [] {
1674 switch (::GetLastError()) {
1675 // This is the usual error result from VirtualAlloc for OOM.
1676 case ERROR_COMMITMENT_LIMIT
:
1677 // Although rare, this has also been observed in low-memory situations.
1678 // (Presumably this means Windows can't allocate enough kernel-side space
1679 // for its own internal representation of the process's virtual address
1681 case ERROR_NOT_ENOUGH_MEMORY
:
1688 void* ptr
= ::VirtualAlloc(lpAddress
, dwSize
, flAllocationType
, flProtect
);
1689 if (MOZ_LIKELY(ptr
)) return ptr
;
1691 // We can't do anything for errors other than OOM...
1692 if (!IsOOMError()) return nullptr;
1693 // ... or if this wasn't a request to commit memory in the first place.
1694 // (This function has no strategy for resolving MEM_RESERVE failures.)
1695 if (!(flAllocationType
& MEM_COMMIT
)) return nullptr;
1698 // Retry as many times as desired (possibly zero).
1699 const StallSpecs stallSpecs
= GetStallSpecs();
1702 stallSpecs
.StallAndRetry(&::Sleep
, [&]() -> std::optional
<void*> {
1704 ::VirtualAlloc(lpAddress
, dwSize
, flAllocationType
, flProtect
);
1707 // The OOM status has been handled, and should not be reported to
1710 ::SetLastError(lastError
);
1715 // Failure for some reason other than OOM.
1716 if (!IsOOMError()) {
1720 return std::nullopt
;
1723 return ret
.value_or(nullptr);
1725 } // namespace MozAllocRetries
1727 using MozAllocRetries::MozVirtualAlloc
;
1730 MOZ_JEMALLOC_API StallSpecs
GetAllocatorStallSpecs() {
1731 return ::MozAllocRetries::GetStallSpecs();
1733 } // namespace mozilla
1737 // ***************************************************************************
1739 static inline void pages_decommit(void* aAddr
, size_t aSize
) {
1741 // The region starting at addr may have been allocated in multiple calls
1742 // to VirtualAlloc and recycled, so decommitting the entire region in one
1743 // go may not be valid. However, since we allocate at least a chunk at a
1744 // time, we may touch any region in chunksized increments.
1745 size_t pages_size
= std::min(aSize
, kChunkSize
- GetChunkOffsetForPtr(aAddr
));
1747 // This will cause Access Violation on read and write and thus act as a
1748 // guard page or region as well.
1749 if (!VirtualFree(aAddr
, pages_size
, MEM_DECOMMIT
)) {
1752 aAddr
= (void*)((uintptr_t)aAddr
+ pages_size
);
1753 aSize
-= pages_size
;
1754 pages_size
= std::min(aSize
, kChunkSize
);
1757 if (mmap(aAddr
, aSize
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1,
1759 // We'd like to report the OOM for our tooling, but we can't allocate
1760 // memory at this point, so avoid the use of printf.
1761 const char out_of_mappings
[] =
1762 "[unhandlable oom] Failed to mmap, likely no more mappings "
1763 "available " __FILE__
" : " MOZ_STRINGIFY(__LINE__
);
1764 if (errno
== ENOMEM
) {
1766 fputs(out_of_mappings
, stderr
);
1769 MOZ_CRASH_ANNOTATE(out_of_mappings
);
1771 MOZ_REALLY_CRASH(__LINE__
);
1773 MozTagAnonymousMemory(aAddr
, aSize
, "jemalloc-decommitted");
1777 // Commit pages. Returns whether pages were committed.
1778 [[nodiscard
]] static inline bool pages_commit(void* aAddr
, size_t aSize
) {
1780 // The region starting at addr may have been allocated in multiple calls
1781 // to VirtualAlloc and recycled, so committing the entire region in one
1782 // go may not be valid. However, since we allocate at least a chunk at a
1783 // time, we may touch any region in chunksized increments.
1784 size_t pages_size
= std::min(aSize
, kChunkSize
- GetChunkOffsetForPtr(aAddr
));
1786 if (!MozVirtualAlloc(aAddr
, pages_size
, MEM_COMMIT
, PAGE_READWRITE
)) {
1789 aAddr
= (void*)((uintptr_t)aAddr
+ pages_size
);
1790 aSize
-= pages_size
;
1791 pages_size
= std::min(aSize
, kChunkSize
);
1794 if (mmap(aAddr
, aSize
, PROT_READ
| PROT_WRITE
,
1795 MAP_FIXED
| MAP_PRIVATE
| MAP_ANON
, -1, 0) == MAP_FAILED
) {
1798 MozTagAnonymousMemory(aAddr
, aSize
, "jemalloc");
1803 static bool base_pages_alloc(size_t minsize
) MOZ_REQUIRES(base_mtx
) {
1807 MOZ_ASSERT(minsize
!= 0);
1808 csize
= CHUNK_CEILING(minsize
);
1809 base_pages
= chunk_alloc(csize
, kChunkSize
, true);
1813 base_next_addr
= base_pages
;
1814 base_past_addr
= (void*)((uintptr_t)base_pages
+ csize
);
1815 // Leave enough pages for minsize committed, since otherwise they would
1816 // have to be immediately recommitted.
1817 pminsize
= PAGE_CEILING(minsize
);
1818 base_next_decommitted
= (void*)((uintptr_t)base_pages
+ pminsize
);
1819 if (pminsize
< csize
) {
1820 pages_decommit(base_next_decommitted
, csize
- pminsize
);
1822 base_mapped
+= csize
;
1823 base_committed
+= pminsize
;
1828 static void* base_alloc(size_t aSize
) {
1832 // Round size up to nearest multiple of the cacheline size.
1833 csize
= CACHELINE_CEILING(aSize
);
1835 MutexAutoLock
lock(base_mtx
);
1836 // Make sure there's enough space for the allocation.
1837 if ((uintptr_t)base_next_addr
+ csize
> (uintptr_t)base_past_addr
) {
1838 if (base_pages_alloc(csize
)) {
1843 ret
= base_next_addr
;
1844 base_next_addr
= (void*)((uintptr_t)base_next_addr
+ csize
);
1845 // Make sure enough pages are committed for the new allocation.
1846 if ((uintptr_t)base_next_addr
> (uintptr_t)base_next_decommitted
) {
1847 void* pbase_next_addr
= (void*)(PAGE_CEILING((uintptr_t)base_next_addr
));
1850 base_next_decommitted
,
1851 (uintptr_t)pbase_next_addr
- (uintptr_t)base_next_decommitted
)) {
1856 (uintptr_t)pbase_next_addr
- (uintptr_t)base_next_decommitted
;
1857 base_next_decommitted
= pbase_next_addr
;
1863 static void* base_calloc(size_t aNumber
, size_t aSize
) {
1864 void* ret
= base_alloc(aNumber
* aSize
);
1866 memset(ret
, 0, aNumber
* aSize
);
1871 // A specialization of the base allocator with a free list.
1872 template <typename T
>
1873 struct TypedBaseAlloc
{
1874 static T
* sFirstFree
;
1876 static size_t size_of() { return sizeof(T
); }
1884 sFirstFree
= *(T
**)ret
;
1888 ret
= (T
*)base_alloc(size_of());
1894 static void dealloc(T
* aNode
) {
1895 MutexAutoLock
lock(base_mtx
);
1896 *(T
**)aNode
= sFirstFree
;
1901 using ExtentAlloc
= TypedBaseAlloc
<extent_node_t
>;
1904 extent_node_t
* ExtentAlloc::sFirstFree
= nullptr;
1907 arena_t
* TypedBaseAlloc
<arena_t
>::sFirstFree
= nullptr;
1910 size_t TypedBaseAlloc
<arena_t
>::size_of() {
1911 // Allocate enough space for trailing bins.
1912 return sizeof(arena_t
) + (sizeof(arena_bin_t
) * (NUM_SMALL_CLASSES
- 1));
1915 template <typename T
>
1916 struct BaseAllocFreePolicy
{
1917 void operator()(T
* aPtr
) { TypedBaseAlloc
<T
>::dealloc(aPtr
); }
1920 using UniqueBaseNode
=
1921 UniquePtr
<extent_node_t
, BaseAllocFreePolicy
<extent_node_t
>>;
1923 // End Utility functions/macros.
1924 // ***************************************************************************
1925 // Begin chunk management functions.
1929 static void* pages_map(void* aAddr
, size_t aSize
) {
1930 void* ret
= nullptr;
1931 ret
= MozVirtualAlloc(aAddr
, aSize
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
1935 static void pages_unmap(void* aAddr
, size_t aSize
) {
1936 if (VirtualFree(aAddr
, 0, MEM_RELEASE
) == 0) {
1937 _malloc_message(_getprogname(), ": (malloc) Error in VirtualFree()\n");
1942 static void pages_unmap(void* aAddr
, size_t aSize
) {
1943 if (munmap(aAddr
, aSize
) == -1) {
1946 if (strerror_r(errno
, buf
, sizeof(buf
)) == 0) {
1947 _malloc_message(_getprogname(), ": (malloc) Error in munmap(): ", buf
,
1953 static void* pages_map(void* aAddr
, size_t aSize
) {
1955 # if defined(__ia64__) || \
1956 (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
1957 // The JS engine assumes that all allocated pointers have their high 17 bits
1958 // clear, which ia64's mmap doesn't support directly. However, we can emulate
1959 // it by passing mmap an "addr" parameter with those bits clear. The mmap will
1960 // return that address, or the nearest available memory above that address,
1961 // providing a near-guarantee that those bits are clear. If they are not, we
1962 // return nullptr below to indicate out-of-memory.
1964 // The addr is chosen as 0x0000070000000000, which still allows about 120TB of
1965 // virtual address space.
1967 // See Bug 589735 for more information.
1968 bool check_placement
= true;
1970 aAddr
= (void*)0x0000070000000000;
1971 check_placement
= false;
1975 # if defined(__sparc__) && defined(__arch64__) && defined(__linux__)
1976 const uintptr_t start
= 0x0000070000000000ULL
;
1977 const uintptr_t end
= 0x0000800000000000ULL
;
1979 // Copied from js/src/gc/Memory.cpp and adapted for this source
1981 void* region
= MAP_FAILED
;
1982 for (hint
= start
; region
== MAP_FAILED
&& hint
+ aSize
<= end
;
1983 hint
+= kChunkSize
) {
1984 region
= mmap((void*)hint
, aSize
, PROT_READ
| PROT_WRITE
,
1985 MAP_PRIVATE
| MAP_ANON
, -1, 0);
1986 if (region
!= MAP_FAILED
) {
1987 if (((size_t)region
+ (aSize
- 1)) & 0xffff800000000000) {
1988 if (munmap(region
, aSize
)) {
1989 MOZ_ASSERT(errno
== ENOMEM
);
1991 region
= MAP_FAILED
;
1997 // We don't use MAP_FIXED here, because it can cause the *replacement*
1998 // of existing mappings, and we only want to create new mappings.
2000 mmap(aAddr
, aSize
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
, -1, 0);
2003 if (ret
== MAP_FAILED
) {
2006 # if defined(__ia64__) || \
2007 (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
2008 // If the allocated memory doesn't have its upper 17 bits clear, consider it
2009 // as out of memory.
2010 else if ((long long)ret
& 0xffff800000000000) {
2014 // If the caller requested a specific memory location, verify that's what mmap
2016 else if (check_placement
&& ret
!= aAddr
) {
2018 else if (aAddr
&& ret
!= aAddr
) {
2020 // We succeeded in mapping memory, but not in the right place.
2021 pages_unmap(ret
, aSize
);
2025 MozTagAnonymousMemory(ret
, aSize
, "jemalloc");
2028 # if defined(__ia64__) || \
2029 (defined(__sparc__) && defined(__arch64__) && defined(__linux__))
2030 MOZ_ASSERT(!ret
|| (!check_placement
&& ret
) ||
2031 (check_placement
&& ret
== aAddr
));
2033 MOZ_ASSERT(!ret
|| (!aAddr
&& ret
!= aAddr
) || (aAddr
&& ret
== aAddr
));
2040 # define VM_COPY_MIN kChunkSize
2041 static inline void pages_copy(void* dest
, const void* src
, size_t n
) {
2042 MOZ_ASSERT((void*)((uintptr_t)dest
& ~gPageSizeMask
) == dest
);
2043 MOZ_ASSERT(n
>= VM_COPY_MIN
);
2044 MOZ_ASSERT((void*)((uintptr_t)src
& ~gPageSizeMask
) == src
);
2046 kern_return_t r
= vm_copy(mach_task_self(), (vm_address_t
)src
, (vm_size_t
)n
,
2047 (vm_address_t
)dest
);
2048 if (r
!= KERN_SUCCESS
) {
2049 MOZ_CRASH("vm_copy() failed");
2055 template <size_t Bits
>
2056 bool AddressRadixTree
<Bits
>::Init() {
2058 mRoot
= (void**)base_calloc(1 << kBitsAtLevel1
, sizeof(void*));
2062 template <size_t Bits
>
2063 void** AddressRadixTree
<Bits
>::GetSlot(void* aKey
, bool aCreate
) {
2064 uintptr_t key
= reinterpret_cast<uintptr_t>(aKey
);
2066 unsigned i
, lshift
, height
, bits
;
2070 for (i
= lshift
= 0, height
= kHeight
, node
= mRoot
; i
< height
- 1;
2071 i
++, lshift
+= bits
, node
= child
) {
2072 bits
= i
? kBitsPerLevel
: kBitsAtLevel1
;
2073 subkey
= (key
<< lshift
) >> ((sizeof(void*) << 3) - bits
);
2074 child
= (void**)node
[subkey
];
2075 if (!child
&& aCreate
) {
2076 child
= (void**)base_calloc(1 << kBitsPerLevel
, sizeof(void*));
2078 node
[subkey
] = child
;
2086 // node is a leaf, so it contains values rather than node
2088 bits
= i
? kBitsPerLevel
: kBitsAtLevel1
;
2089 subkey
= (key
<< lshift
) >> ((sizeof(void*) << 3) - bits
);
2090 return &node
[subkey
];
2093 template <size_t Bits
>
2094 void* AddressRadixTree
<Bits
>::Get(void* aKey
) {
2095 void* ret
= nullptr;
2097 void** slot
= GetSlot(aKey
);
2103 MutexAutoLock
lock(mLock
);
2105 // Suppose that it were possible for a jemalloc-allocated chunk to be
2106 // munmap()ped, followed by a different allocator in another thread re-using
2107 // overlapping virtual memory, all without invalidating the cached rtree
2108 // value. The result would be a false positive (the rtree would claim that
2109 // jemalloc owns memory that it had actually discarded). I don't think this
2110 // scenario is possible, but the following assertion is a prudent sanity
2113 // In case a slot has been created in the meantime.
2114 slot
= GetSlot(aKey
);
2117 // The MutexAutoLock above should act as a memory barrier, forcing
2118 // the compiler to emit a new read instruction for *slot.
2119 MOZ_ASSERT(ret
== *slot
);
2121 MOZ_ASSERT(ret
== nullptr);
2127 template <size_t Bits
>
2128 bool AddressRadixTree
<Bits
>::Set(void* aKey
, void* aValue
) {
2129 MutexAutoLock
lock(mLock
);
2130 void** slot
= GetSlot(aKey
, /* aCreate = */ true);
2137 // pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
2138 // from upstream jemalloc 3.4.1 to fix Mozilla bug 956501.
2140 // Return the offset between a and the nearest aligned address at or below a.
2141 #define ALIGNMENT_ADDR2OFFSET(a, alignment) \
2142 ((size_t)((uintptr_t)(a) & ((alignment) - 1)))
2144 // Return the smallest alignment multiple that is >= s.
2145 #define ALIGNMENT_CEILING(s, alignment) \
2146 (((s) + ((alignment) - 1)) & (~((alignment) - 1)))
2148 static void* pages_trim(void* addr
, size_t alloc_size
, size_t leadsize
,
2150 void* ret
= (void*)((uintptr_t)addr
+ leadsize
);
2152 MOZ_ASSERT(alloc_size
>= leadsize
+ size
);
2157 pages_unmap(addr
, alloc_size
);
2158 new_addr
= pages_map(ret
, size
);
2159 if (new_addr
== ret
) {
2163 pages_unmap(new_addr
, size
);
2169 size_t trailsize
= alloc_size
- leadsize
- size
;
2171 if (leadsize
!= 0) {
2172 pages_unmap(addr
, leadsize
);
2174 if (trailsize
!= 0) {
2175 pages_unmap((void*)((uintptr_t)ret
+ size
), trailsize
);
2182 static void* chunk_alloc_mmap_slow(size_t size
, size_t alignment
) {
2184 size_t alloc_size
, leadsize
;
2186 alloc_size
= size
+ alignment
- gRealPageSize
;
2187 // Beware size_t wrap-around.
2188 if (alloc_size
< size
) {
2192 pages
= pages_map(nullptr, alloc_size
);
2197 ALIGNMENT_CEILING((uintptr_t)pages
, alignment
) - (uintptr_t)pages
;
2198 ret
= pages_trim(pages
, alloc_size
, leadsize
, size
);
2205 static void* chunk_alloc_mmap(size_t size
, size_t alignment
) {
2209 // Ideally, there would be a way to specify alignment to mmap() (like
2210 // NetBSD has), but in the absence of such a feature, we have to work
2211 // hard to efficiently create aligned mappings. The reliable, but
2212 // slow method is to create a mapping that is over-sized, then trim the
2213 // excess. However, that always results in one or two calls to
2216 // Optimistically try mapping precisely the right amount before falling
2217 // back to the slow method, with the expectation that the optimistic
2218 // approach works most of the time.
2219 ret
= pages_map(nullptr, size
);
2223 offset
= ALIGNMENT_ADDR2OFFSET(ret
, alignment
);
2225 pages_unmap(ret
, size
);
2226 return chunk_alloc_mmap_slow(size
, alignment
);
2233 // Purge and release the pages in the chunk of length `length` at `addr` to
2235 // Returns whether the pages are guaranteed to be full of zeroes when the
2236 // function returns.
2237 // The force_zero argument explicitly requests that the memory is guaranteed
2238 // to be full of zeroes when the function returns.
2239 static bool pages_purge(void* addr
, size_t length
, bool force_zero
) {
2240 pages_decommit(addr
, length
);
2244 static void* chunk_recycle(size_t aSize
, size_t aAlignment
) {
2247 size_t alloc_size
= aSize
+ aAlignment
- kChunkSize
;
2248 // Beware size_t wrap-around.
2249 if (alloc_size
< aSize
) {
2252 key
.mAddr
= nullptr;
2253 key
.mSize
= alloc_size
;
2255 extent_node_t
* node
= gChunksBySize
.SearchOrNext(&key
);
2257 chunks_mtx
.Unlock();
2260 size_t leadsize
= ALIGNMENT_CEILING((uintptr_t)node
->mAddr
, aAlignment
) -
2261 (uintptr_t)node
->mAddr
;
2262 MOZ_ASSERT(node
->mSize
>= leadsize
+ aSize
);
2263 size_t trailsize
= node
->mSize
- leadsize
- aSize
;
2264 void* ret
= (void*)((uintptr_t)node
->mAddr
+ leadsize
);
2266 // All recycled chunks are zeroed (because they're purged) before being
2268 MOZ_ASSERT(node
->mChunkType
== ZEROED_CHUNK
);
2270 // Remove node from the tree.
2271 gChunksBySize
.Remove(node
);
2272 gChunksByAddress
.Remove(node
);
2273 if (leadsize
!= 0) {
2274 // Insert the leading space as a smaller chunk.
2275 node
->mSize
= leadsize
;
2276 gChunksBySize
.Insert(node
);
2277 gChunksByAddress
.Insert(node
);
2280 if (trailsize
!= 0) {
2281 // Insert the trailing space as a smaller chunk.
2283 // An additional node is required, but
2284 // TypedBaseAlloc::alloc() can cause a new base chunk to be
2285 // allocated. Drop chunks_mtx in order to avoid
2286 // deadlock, and if node allocation fails, deallocate
2287 // the result before returning an error.
2288 chunks_mtx
.Unlock();
2289 node
= ExtentAlloc::alloc();
2291 chunk_dealloc(ret
, aSize
, ZEROED_CHUNK
);
2296 node
->mAddr
= (void*)((uintptr_t)(ret
) + aSize
);
2297 node
->mSize
= trailsize
;
2298 node
->mChunkType
= ZEROED_CHUNK
;
2299 gChunksBySize
.Insert(node
);
2300 gChunksByAddress
.Insert(node
);
2304 gRecycledSize
-= aSize
;
2306 chunks_mtx
.Unlock();
2309 ExtentAlloc::dealloc(node
);
2311 if (!pages_commit(ret
, aSize
)) {
2319 // On Windows, calls to VirtualAlloc and VirtualFree must be matched, making it
2320 // awkward to recycle allocations of varying sizes. Therefore we only allow
2321 // recycling when the size equals the chunksize, unless deallocation is entirely
2323 # define CAN_RECYCLE(size) ((size) == kChunkSize)
2325 # define CAN_RECYCLE(size) true
2328 // Allocates `size` bytes of system memory aligned for `alignment`.
2329 // `base` indicates whether the memory will be used for the base allocator
2330 // (e.g. base_alloc).
2331 // `zeroed` is an outvalue that returns whether the allocated memory is
2332 // guaranteed to be full of zeroes. It can be omitted when the caller doesn't
2333 // care about the result.
2334 static void* chunk_alloc(size_t aSize
, size_t aAlignment
, bool aBase
) {
2335 void* ret
= nullptr;
2337 MOZ_ASSERT(aSize
!= 0);
2338 MOZ_ASSERT((aSize
& kChunkSizeMask
) == 0);
2339 MOZ_ASSERT(aAlignment
!= 0);
2340 MOZ_ASSERT((aAlignment
& kChunkSizeMask
) == 0);
2342 // Base allocations can't be fulfilled by recycling because of
2343 // possible deadlock or infinite recursion.
2344 if (CAN_RECYCLE(aSize
) && !aBase
) {
2345 ret
= chunk_recycle(aSize
, aAlignment
);
2348 ret
= chunk_alloc_mmap(aSize
, aAlignment
);
2350 if (ret
&& !aBase
) {
2351 if (!gChunkRTree
.Set(ret
, ret
)) {
2352 chunk_dealloc(ret
, aSize
, UNKNOWN_CHUNK
);
2357 MOZ_ASSERT(GetChunkOffsetForPtr(ret
) == 0);
2362 static void chunk_assert_zero(void* aPtr
, size_t aSize
) {
2364 size_t* p
= (size_t*)(uintptr_t)aPtr
;
2366 for (i
= 0; i
< aSize
/ sizeof(size_t); i
++) {
2367 MOZ_ASSERT(p
[i
] == 0);
2372 static void chunk_record(void* aChunk
, size_t aSize
, ChunkType aType
) {
2375 if (aType
!= ZEROED_CHUNK
) {
2376 if (pages_purge(aChunk
, aSize
, aType
== HUGE_CHUNK
)) {
2377 aType
= ZEROED_CHUNK
;
2381 // Allocate a node before acquiring chunks_mtx even though it might not
2382 // be needed, because TypedBaseAlloc::alloc() may cause a new base chunk to
2383 // be allocated, which could cause deadlock if chunks_mtx were already
2385 UniqueBaseNode
xnode(ExtentAlloc::alloc());
2386 // Use xprev to implement conditional deferred deallocation of prev.
2387 UniqueBaseNode xprev
;
2389 // RAII deallocates xnode and xprev defined above after unlocking
2390 // in order to avoid potential dead-locks
2391 MutexAutoLock
lock(chunks_mtx
);
2392 key
.mAddr
= (void*)((uintptr_t)aChunk
+ aSize
);
2393 extent_node_t
* node
= gChunksByAddress
.SearchOrNext(&key
);
2394 // Try to coalesce forward.
2395 if (node
&& node
->mAddr
== key
.mAddr
) {
2396 // Coalesce chunk with the following address range. This does
2397 // not change the position within gChunksByAddress, so only
2398 // remove/insert from/into gChunksBySize.
2399 gChunksBySize
.Remove(node
);
2400 node
->mAddr
= aChunk
;
2401 node
->mSize
+= aSize
;
2402 if (node
->mChunkType
!= aType
) {
2403 node
->mChunkType
= RECYCLED_CHUNK
;
2405 gChunksBySize
.Insert(node
);
2407 // Coalescing forward failed, so insert a new node.
2409 // TypedBaseAlloc::alloc() failed, which is an exceedingly
2410 // unlikely failure. Leak chunk; its pages have
2411 // already been purged, so this is only a virtual
2415 node
= xnode
.release();
2416 node
->mAddr
= aChunk
;
2417 node
->mSize
= aSize
;
2418 node
->mChunkType
= aType
;
2419 gChunksByAddress
.Insert(node
);
2420 gChunksBySize
.Insert(node
);
2423 // Try to coalesce backward.
2424 extent_node_t
* prev
= gChunksByAddress
.Prev(node
);
2425 if (prev
&& (void*)((uintptr_t)prev
->mAddr
+ prev
->mSize
) == aChunk
) {
2426 // Coalesce chunk with the previous address range. This does
2427 // not change the position within gChunksByAddress, so only
2428 // remove/insert node from/into gChunksBySize.
2429 gChunksBySize
.Remove(prev
);
2430 gChunksByAddress
.Remove(prev
);
2432 gChunksBySize
.Remove(node
);
2433 node
->mAddr
= prev
->mAddr
;
2434 node
->mSize
+= prev
->mSize
;
2435 if (node
->mChunkType
!= prev
->mChunkType
) {
2436 node
->mChunkType
= RECYCLED_CHUNK
;
2438 gChunksBySize
.Insert(node
);
2443 gRecycledSize
+= aSize
;
2446 static void chunk_dealloc(void* aChunk
, size_t aSize
, ChunkType aType
) {
2448 MOZ_ASSERT(GetChunkOffsetForPtr(aChunk
) == 0);
2449 MOZ_ASSERT(aSize
!= 0);
2450 MOZ_ASSERT((aSize
& kChunkSizeMask
) == 0);
2452 gChunkRTree
.Unset(aChunk
);
2454 if (CAN_RECYCLE(aSize
)) {
2455 size_t recycled_so_far
= gRecycledSize
;
2456 // In case some race condition put us above the limit.
2457 if (recycled_so_far
< gRecycleLimit
) {
2458 size_t recycle_remaining
= gRecycleLimit
- recycled_so_far
;
2460 if (aSize
> recycle_remaining
) {
2461 to_recycle
= recycle_remaining
;
2462 // Drop pages that would overflow the recycle limit
2463 pages_trim(aChunk
, aSize
, 0, to_recycle
);
2467 chunk_record(aChunk
, to_recycle
, aType
);
2472 pages_unmap(aChunk
, aSize
);
2477 // End chunk management functions.
2478 // ***************************************************************************
2481 static inline arena_t
* thread_local_arena(bool enabled
) {
2485 // The arena will essentially be leaked if this function is
2486 // called with `false`, but it doesn't matter at the moment.
2487 // because in practice nothing actually calls this function
2488 // with `false`, except maybe at shutdown.
2490 gArenas
.CreateArena(/* aIsPrivate = */ false, /* aParams = */ nullptr);
2492 arena
= gArenas
.GetDefault();
2494 thread_arena
.set(arena
);
2498 inline void MozJemalloc::jemalloc_thread_local_arena(bool aEnabled
) {
2499 if (malloc_init()) {
2500 thread_local_arena(aEnabled
);
2504 // Choose an arena based on a per-thread value.
2505 static inline arena_t
* choose_arena(size_t size
) {
2506 arena_t
* ret
= nullptr;
2508 // We can only use TLS if this is a PIC library, since for the static
2509 // library version, libc's malloc is used by TLS allocation, which
2510 // introduces a bootstrapping issue.
2512 if (size
> kMaxQuantumClass
) {
2513 // Force the default arena for larger allocations.
2514 ret
= gArenas
.GetDefault();
2516 // Check TLS to see if our thread has requested a pinned arena.
2517 ret
= thread_arena
.get();
2518 // If ret is non-null, it must not be in the first page.
2519 MOZ_DIAGNOSTIC_ASSERT_IF(ret
, (size_t)ret
>= gPageSize
);
2521 // Nothing in TLS. Pin this thread to the default arena.
2522 ret
= thread_local_arena(false);
2526 MOZ_DIAGNOSTIC_ASSERT(ret
);
2530 inline uint8_t arena_t::FindFreeBitInMask(uint32_t aMask
, uint32_t& aRng
) {
2531 if (mPRNG
!= nullptr) {
2532 if (aRng
== UINT_MAX
) {
2533 aRng
= mPRNG
->next() % 32;
2536 // RotateRight asserts when provided bad input.
2537 aMask
= aRng
? RotateRight(aMask
, aRng
)
2538 : aMask
; // Rotate the mask a random number of slots
2539 bitIndex
= CountTrailingZeroes32(aMask
);
2540 return (bitIndex
+ aRng
) % 32;
2542 return CountTrailingZeroes32(aMask
);
2545 inline void* arena_t::ArenaRunRegAlloc(arena_run_t
* aRun
, arena_bin_t
* aBin
) {
2547 unsigned i
, mask
, bit
, regind
;
2548 uint32_t rndPos
= UINT_MAX
;
2550 MOZ_DIAGNOSTIC_ASSERT(aRun
->mMagic
== ARENA_RUN_MAGIC
);
2551 MOZ_ASSERT(aRun
->mRegionsMinElement
< aBin
->mRunNumRegionsMask
);
2553 // Move the first check outside the loop, so that aRun->mRegionsMinElement can
2554 // be updated unconditionally, without the possibility of updating it
2556 i
= aRun
->mRegionsMinElement
;
2557 mask
= aRun
->mRegionsMask
[i
];
2559 bit
= FindFreeBitInMask(mask
, rndPos
);
2561 regind
= ((i
<< (LOG2(sizeof(int)) + 3)) + bit
);
2562 MOZ_ASSERT(regind
< aBin
->mRunNumRegions
);
2563 ret
= (void*)(((uintptr_t)aRun
) + aBin
->mRunFirstRegionOffset
+
2564 (aBin
->mSizeClass
* regind
));
2567 mask
^= (1U << bit
);
2568 aRun
->mRegionsMask
[i
] = mask
;
2573 for (i
++; i
< aBin
->mRunNumRegionsMask
; i
++) {
2574 mask
= aRun
->mRegionsMask
[i
];
2576 bit
= FindFreeBitInMask(mask
, rndPos
);
2578 regind
= ((i
<< (LOG2(sizeof(int)) + 3)) + bit
);
2579 MOZ_ASSERT(regind
< aBin
->mRunNumRegions
);
2580 ret
= (void*)(((uintptr_t)aRun
) + aBin
->mRunFirstRegionOffset
+
2581 (aBin
->mSizeClass
* regind
));
2584 mask
^= (1U << bit
);
2585 aRun
->mRegionsMask
[i
] = mask
;
2587 // Make a note that nothing before this element
2588 // contains a free region.
2589 aRun
->mRegionsMinElement
= i
; // Low payoff: + (mask == 0);
2595 MOZ_DIAGNOSTIC_ASSERT(0);
2599 static inline void arena_run_reg_dalloc(arena_run_t
* run
, arena_bin_t
* bin
,
2600 void* ptr
, size_t size
) {
2601 uint32_t diff
, regind
;
2604 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
2606 // Avoid doing division with a variable divisor if possible. Using
2607 // actual division here can reduce allocator throughput by over 20%!
2609 (uint32_t)((uintptr_t)ptr
- (uintptr_t)run
- bin
->mRunFirstRegionOffset
);
2612 (static_cast<unsigned>(bin
->mRunSizePages
) << gPageSize2Pow
));
2613 regind
= diff
/ bin
->mSizeDivisor
;
2615 MOZ_DIAGNOSTIC_ASSERT(diff
== regind
* size
);
2616 MOZ_DIAGNOSTIC_ASSERT(regind
< bin
->mRunNumRegions
);
2618 elm
= regind
>> (LOG2(sizeof(int)) + 3);
2619 if (elm
< run
->mRegionsMinElement
) {
2620 run
->mRegionsMinElement
= elm
;
2622 bit
= regind
- (elm
<< (LOG2(sizeof(int)) + 3));
2623 MOZ_RELEASE_ASSERT((run
->mRegionsMask
[elm
] & (1U << bit
)) == 0,
2625 run
->mRegionsMask
[elm
] |= (1U << bit
);
2628 bool arena_t::SplitRun(arena_run_t
* aRun
, size_t aSize
, bool aLarge
,
2630 arena_chunk_t
* chunk
= GetChunkForPtr(aRun
);
2631 size_t old_ndirty
= chunk
->ndirty
;
2633 (unsigned)((uintptr_t(aRun
) - uintptr_t(chunk
)) >> gPageSize2Pow
);
2634 size_t total_pages
=
2635 (chunk
->map
[run_ind
].bits
& ~gPageSizeMask
) >> gPageSize2Pow
;
2636 size_t need_pages
= (aSize
>> gPageSize2Pow
);
2637 MOZ_ASSERT(need_pages
> 0);
2638 MOZ_ASSERT(need_pages
<= total_pages
);
2639 size_t rem_pages
= total_pages
- need_pages
;
2641 #ifdef MALLOC_DECOMMIT
2643 while (i
< need_pages
) {
2644 // Commit decommitted pages if necessary. If a decommitted
2645 // page is encountered, commit all needed adjacent decommitted
2646 // pages in one operation, in order to reduce system call
2648 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DECOMMITTED
) {
2649 // Advance i+j to just past the index of the last page
2650 // to commit. Clear CHUNK_MAP_DECOMMITTED along the way.
2652 for (j
= 0; i
+ j
< need_pages
&&
2653 (chunk
->map
[run_ind
+ i
+ j
].bits
& CHUNK_MAP_DECOMMITTED
);
2655 // DECOMMITTED, MADVISED and FRESH are mutually exclusive.
2656 MOZ_ASSERT((chunk
->map
[run_ind
+ i
+ j
].bits
&
2657 (CHUNK_MAP_FRESH
| CHUNK_MAP_MADVISED
)) == 0);
2660 // Consider committing more pages to amortise calls to VirtualAlloc.
2661 // This only makes sense at the edge of our run hence the if condition
2663 if (i
+ j
== need_pages
) {
2664 size_t extra_commit
= ExtraCommitPages(j
, rem_pages
);
2665 for (; i
+ j
< need_pages
+ extra_commit
&&
2666 (chunk
->map
[run_ind
+ i
+ j
].bits
&
2667 CHUNK_MAP_MADVISED_OR_DECOMMITTED
);
2669 MOZ_ASSERT((chunk
->map
[run_ind
+ i
+ j
].bits
&
2670 (CHUNK_MAP_FRESH
| CHUNK_MAP_MADVISED
)) == 0);
2675 (void*)(uintptr_t(chunk
) + ((run_ind
+ i
) << gPageSize2Pow
)),
2676 j
<< gPageSize2Pow
)) {
2680 // pages_commit zeroes pages, so mark them as such if it succeeded.
2681 // That's checked further below to avoid manually zeroing the pages.
2682 for (size_t k
= 0; k
< j
; k
++) {
2683 chunk
->map
[run_ind
+ i
+ k
].bits
=
2684 (chunk
->map
[run_ind
+ i
+ k
].bits
& ~CHUNK_MAP_DECOMMITTED
) |
2685 CHUNK_MAP_ZEROED
| CHUNK_MAP_FRESH
;
2696 mRunsAvail
.Remove(&chunk
->map
[run_ind
]);
2698 // Keep track of trailing unused pages for later use.
2699 if (rem_pages
> 0) {
2700 chunk
->map
[run_ind
+ need_pages
].bits
=
2701 (rem_pages
<< gPageSize2Pow
) |
2702 (chunk
->map
[run_ind
+ need_pages
].bits
& gPageSizeMask
);
2703 chunk
->map
[run_ind
+ total_pages
- 1].bits
=
2704 (rem_pages
<< gPageSize2Pow
) |
2705 (chunk
->map
[run_ind
+ total_pages
- 1].bits
& gPageSizeMask
);
2706 mRunsAvail
.Insert(&chunk
->map
[run_ind
+ need_pages
]);
2709 for (size_t i
= 0; i
< need_pages
; i
++) {
2710 // Zero if necessary.
2712 if ((chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_ZEROED
) == 0) {
2713 memset((void*)(uintptr_t(chunk
) + ((run_ind
+ i
) << gPageSize2Pow
)), 0,
2715 // CHUNK_MAP_ZEROED is cleared below.
2719 // Update dirty page accounting.
2720 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DIRTY
) {
2723 // CHUNK_MAP_DIRTY is cleared below.
2724 } else if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_MADVISED
) {
2729 if (chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_FRESH
) {
2734 // This bit has already been cleared
2735 MOZ_ASSERT(!(chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DECOMMITTED
));
2737 // Initialize the chunk map. This clears the dirty, zeroed and madvised
2738 // bits, decommitted is cleared above.
2740 chunk
->map
[run_ind
+ i
].bits
= CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
2742 chunk
->map
[run_ind
+ i
].bits
= size_t(aRun
) | CHUNK_MAP_ALLOCATED
;
2746 // Set the run size only in the first element for large runs. This is
2747 // primarily a debugging aid, since the lack of size info for trailing
2748 // pages only matters if the application tries to operate on an
2749 // interior pointer.
2751 chunk
->map
[run_ind
].bits
|= aSize
;
2754 if (chunk
->ndirty
== 0 && old_ndirty
> 0) {
2755 mChunksDirty
.Remove(chunk
);
2760 void arena_t::InitChunk(arena_chunk_t
* aChunk
, size_t aMinCommittedPages
) {
2761 mStats
.mapped
+= kChunkSize
;
2763 aChunk
->arena
= this;
2765 // Claim that no pages are in use, since the header is merely overhead.
2768 // Setup the chunk's pages in two phases. First we mark which pages are
2769 // committed & decommitted and perform the decommit. Then we update the map
2770 // to create the runs.
2772 // Clear the bits for the real header pages.
2774 for (i
= 0; i
< gChunkHeaderNumPages
- 1; i
++) {
2775 aChunk
->map
[i
].bits
= 0;
2777 mStats
.committed
+= gChunkHeaderNumPages
- 1;
2779 // Decommit the last header page (=leading page) as a guard.
2780 pages_decommit((void*)(uintptr_t(aChunk
) + (i
<< gPageSize2Pow
)), gPageSize
);
2781 aChunk
->map
[i
++].bits
= CHUNK_MAP_DECOMMITTED
;
2783 // If MALLOC_DECOMMIT is enabled then commit only the pages we're about to
2784 // use. Otherwise commit all of them.
2785 #ifdef MALLOC_DECOMMIT
2786 size_t n_fresh_pages
=
2787 aMinCommittedPages
+
2790 gChunkNumPages
- gChunkHeaderNumPages
- aMinCommittedPages
- 1);
2792 size_t n_fresh_pages
= gChunkNumPages
- 1 - gChunkHeaderNumPages
;
2795 // The committed pages are marked as Fresh. Our caller, SplitRun will update
2796 // this when it uses them.
2797 for (size_t j
= 0; j
< n_fresh_pages
; j
++) {
2798 aChunk
->map
[i
+ j
].bits
= CHUNK_MAP_ZEROED
| CHUNK_MAP_FRESH
;
2801 mNumFresh
+= n_fresh_pages
;
2803 #ifndef MALLOC_DECOMMIT
2804 // If MALLOC_DECOMMIT isn't defined then all the pages are fresh and setup in
2806 MOZ_ASSERT(i
== gChunkNumPages
- 1);
2809 // If MALLOC_DECOMMIT is defined, then this will decommit the remainder of the
2810 // chunk plus the last page which is a guard page, if it is not defined it
2811 // will only decommit the guard page.
2812 pages_decommit((void*)(uintptr_t(aChunk
) + (i
<< gPageSize2Pow
)),
2813 (gChunkNumPages
- i
) << gPageSize2Pow
);
2814 for (; i
< gChunkNumPages
; i
++) {
2815 aChunk
->map
[i
].bits
= CHUNK_MAP_DECOMMITTED
;
2818 // aMinCommittedPages will create a valid run.
2819 MOZ_ASSERT(aMinCommittedPages
> 0);
2820 MOZ_ASSERT(aMinCommittedPages
<= gChunkNumPages
- gChunkHeaderNumPages
- 1);
2823 aChunk
->map
[gChunkHeaderNumPages
].bits
|= gMaxLargeClass
;
2824 aChunk
->map
[gChunkNumPages
- 2].bits
|= gMaxLargeClass
;
2825 mRunsAvail
.Insert(&aChunk
->map
[gChunkHeaderNumPages
]);
2827 #ifdef MALLOC_DOUBLE_PURGE
2828 new (&aChunk
->chunks_madvised_elem
) DoublyLinkedListElement
<arena_chunk_t
>();
2832 arena_chunk_t
* arena_t::DeallocChunk(arena_chunk_t
* aChunk
) {
2834 if (mSpare
->ndirty
> 0) {
2835 aChunk
->arena
->mChunksDirty
.Remove(mSpare
);
2836 mNumDirty
-= mSpare
->ndirty
;
2837 mStats
.committed
-= mSpare
->ndirty
;
2840 // Count the number of madvised/fresh pages and update the stats.
2841 size_t madvised
= 0;
2843 for (size_t i
= gChunkHeaderNumPages
; i
< gChunkNumPages
- 1; i
++) {
2844 // There must not be any pages that are not fresh, madvised, decommitted
2846 MOZ_ASSERT(mSpare
->map
[i
].bits
&
2847 (CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED
| CHUNK_MAP_DIRTY
));
2849 if (mSpare
->map
[i
].bits
& CHUNK_MAP_MADVISED
) {
2851 } else if (mSpare
->map
[i
].bits
& CHUNK_MAP_FRESH
) {
2856 mNumMAdvised
-= madvised
;
2859 #ifdef MALLOC_DOUBLE_PURGE
2860 if (mChunksMAdvised
.ElementProbablyInList(mSpare
)) {
2861 mChunksMAdvised
.remove(mSpare
);
2865 mStats
.mapped
-= kChunkSize
;
2866 mStats
.committed
-= gChunkHeaderNumPages
- 1;
2869 // Remove run from the tree of available runs, so that the arena does not use
2870 // it. Dirty page flushing only uses the tree of dirty chunks, so leaving this
2871 // chunk in the chunks_* trees is sufficient for that purpose.
2872 mRunsAvail
.Remove(&aChunk
->map
[gChunkHeaderNumPages
]);
2874 arena_chunk_t
* chunk_dealloc
= mSpare
;
2876 return chunk_dealloc
;
2879 arena_run_t
* arena_t::AllocRun(size_t aSize
, bool aLarge
, bool aZero
) {
2881 arena_chunk_map_t
* mapelm
;
2882 arena_chunk_map_t key
;
2884 MOZ_ASSERT(aSize
<= gMaxLargeClass
);
2885 MOZ_ASSERT((aSize
& gPageSizeMask
) == 0);
2887 // Search the arena's chunks for the lowest best fit.
2888 key
.bits
= aSize
| CHUNK_MAP_KEY
;
2889 mapelm
= mRunsAvail
.SearchOrNext(&key
);
2891 arena_chunk_t
* chunk
= GetChunkForPtr(mapelm
);
2893 (uintptr_t(mapelm
) - uintptr_t(chunk
->map
)) / sizeof(arena_chunk_map_t
);
2895 run
= (arena_run_t
*)(uintptr_t(chunk
) + (pageind
<< gPageSize2Pow
));
2896 } else if (mSpare
) {
2898 arena_chunk_t
* chunk
= mSpare
;
2900 run
= (arena_run_t
*)(uintptr_t(chunk
) +
2901 (gChunkHeaderNumPages
<< gPageSize2Pow
));
2902 // Insert the run into the tree of available runs.
2903 mRunsAvail
.Insert(&chunk
->map
[gChunkHeaderNumPages
]);
2905 // No usable runs. Create a new chunk from which to allocate
2907 arena_chunk_t
* chunk
=
2908 (arena_chunk_t
*)chunk_alloc(kChunkSize
, kChunkSize
, false);
2913 InitChunk(chunk
, aSize
>> gPageSize2Pow
);
2914 run
= (arena_run_t
*)(uintptr_t(chunk
) +
2915 (gChunkHeaderNumPages
<< gPageSize2Pow
));
2918 return SplitRun(run
, aSize
, aLarge
, aZero
) ? run
: nullptr;
2921 size_t arena_t::EffectiveMaxDirty() {
2922 int32_t modifier
= gArenas
.DefaultMaxDirtyPageModifier();
2924 int32_t arenaOverride
=
2925 modifier
> 0 ? mMaxDirtyIncreaseOverride
: mMaxDirtyDecreaseOverride
;
2926 if (arenaOverride
) {
2927 modifier
= arenaOverride
;
2931 return modifier
>= 0 ? mMaxDirty
<< modifier
: mMaxDirty
>> -modifier
;
2934 #ifdef MALLOC_DECOMMIT
2936 size_t arena_t::ExtraCommitPages(size_t aReqPages
, size_t aRemainingPages
) {
2937 const int32_t modifier
= gArenas
.DefaultMaxDirtyPageModifier();
2942 // The maximum size of the page cache
2943 const size_t max_page_cache
= EffectiveMaxDirty();
2945 // The current size of the page cache, note that we use mNumFresh +
2946 // mNumMAdvised here but Purge() does not.
2947 const size_t page_cache
= mNumDirty
+ mNumFresh
+ mNumMAdvised
;
2949 if (page_cache
> max_page_cache
) {
2950 // We're already exceeding our dirty page count even though we're trying
2951 // to allocate. This can happen due to fragmentation. Don't commit
2952 // excess memory since we're probably here due to a larger allocation and
2953 // small amounts of memory are certainly available in the page cache.
2957 // If modifier is > 0 then we want to keep all the pages we can, but don't
2958 // exceed the size of the page cache. The subtraction cannot underflow
2959 // because of the condition above.
2960 return std::min(aRemainingPages
, max_page_cache
- page_cache
);
2963 // The rest is arbitrary and involves a some assumptions. I've broken it down
2964 // into simple expressions to document them more clearly.
2966 // Assumption 1: a quarter of EffectiveMaxDirty() is a sensible "minimum
2967 // target" for the dirty page cache. Likewise 3 quarters is a sensible
2968 // "maximum target". Note that for the maximum we avoid using the whole page
2969 // cache now so that a free that follows this allocation doesn't immeidatly
2970 // call Purge (churning memory).
2971 const size_t min
= max_page_cache
/ 4;
2972 const size_t max
= 3 * max_page_cache
/ 4;
2974 // Assumption 2: Committing 32 pages at a time is sufficient to amortise
2975 // VirtualAlloc costs.
2976 size_t amortisation_threshold
= 32;
2978 // extra_pages is the number of additional pages needed to meet
2979 // amortisation_threshold.
2980 size_t extra_pages
= aReqPages
< amortisation_threshold
2981 ? amortisation_threshold
- aReqPages
2984 // If committing extra_pages isn't enough to hit the minimum target then
2986 if (page_cache
+ extra_pages
< min
) {
2987 extra_pages
= min
- page_cache
;
2988 } else if (page_cache
+ extra_pages
> max
) {
2989 // If committing extra_pages would exceed our maximum target then it may
2990 // still be useful to allocate extra pages. One of the reasons this can
2991 // happen could be fragmentation of the cache,
2993 // Therefore reduce the amortisation threshold so that we might allocate
2994 // some extra pages but avoid exceeding the dirty page cache.
2995 amortisation_threshold
/= 2;
2996 extra_pages
= std::min(aReqPages
< amortisation_threshold
2997 ? amortisation_threshold
- aReqPages
2999 max_page_cache
- page_cache
);
3002 // Cap extra_pages to aRemainingPages and adjust aRemainingPages. We will
3003 // commit at least this many extra pages.
3004 extra_pages
= std::min(extra_pages
, aRemainingPages
);
3006 // Finally if commiting a small number of additional pages now can prevent
3007 // a small commit later then try to commit a little more now, provided we
3008 // don't exceed max_page_cache.
3009 if ((aRemainingPages
- extra_pages
) < amortisation_threshold
/ 2 &&
3010 (page_cache
+ aRemainingPages
) < max_page_cache
) {
3011 return aRemainingPages
;
3018 void arena_t::Purge(size_t aMaxDirty
) {
3019 arena_chunk_t
* chunk
;
3024 for (auto chunk
: mChunksDirty
.iter()) {
3025 ndirty
+= chunk
->ndirty
;
3027 MOZ_ASSERT(ndirty
== mNumDirty
);
3029 MOZ_DIAGNOSTIC_ASSERT(aMaxDirty
== 1 || (mNumDirty
> aMaxDirty
));
3031 // Iterate downward through chunks until enough dirty memory has been
3032 // purged. Terminate as soon as possible in order to minimize the
3033 // number of system calls, even if a chunk has only been partially
3035 while (mNumDirty
> (aMaxDirty
>> 1)) {
3036 #ifdef MALLOC_DOUBLE_PURGE
3037 bool madvised
= false;
3039 chunk
= mChunksDirty
.Last();
3040 MOZ_DIAGNOSTIC_ASSERT(chunk
);
3041 // Last page is DECOMMITTED as a guard page.
3042 MOZ_ASSERT((chunk
->map
[gChunkNumPages
- 1].bits
& CHUNK_MAP_DECOMMITTED
) !=
3044 for (i
= gChunkNumPages
- 2; chunk
->ndirty
> 0; i
--) {
3045 MOZ_DIAGNOSTIC_ASSERT(i
>= gChunkHeaderNumPages
);
3047 if (chunk
->map
[i
].bits
& CHUNK_MAP_DIRTY
) {
3048 #ifdef MALLOC_DECOMMIT
3049 const size_t free_operation
= CHUNK_MAP_DECOMMITTED
;
3051 const size_t free_operation
= CHUNK_MAP_MADVISED
;
3053 MOZ_ASSERT((chunk
->map
[i
].bits
&
3054 CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED
) == 0);
3055 chunk
->map
[i
].bits
^= free_operation
| CHUNK_MAP_DIRTY
;
3056 // Find adjacent dirty run(s).
3057 for (npages
= 1; i
> gChunkHeaderNumPages
&&
3058 (chunk
->map
[i
- 1].bits
& CHUNK_MAP_DIRTY
);
3061 MOZ_ASSERT((chunk
->map
[i
].bits
&
3062 CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED
) == 0);
3063 chunk
->map
[i
].bits
^= free_operation
| CHUNK_MAP_DIRTY
;
3065 chunk
->ndirty
-= npages
;
3066 mNumDirty
-= npages
;
3068 #ifdef MALLOC_DECOMMIT
3069 pages_decommit((void*)(uintptr_t(chunk
) + (i
<< gPageSize2Pow
)),
3070 (npages
<< gPageSize2Pow
));
3073 posix_madvise((void*)(uintptr_t(chunk
) + (i
<< gPageSize2Pow
)),
3074 (npages
<< gPageSize2Pow
), MADV_FREE
);
3076 madvise((void*)(uintptr_t(chunk
) + (i
<< gPageSize2Pow
)),
3077 (npages
<< gPageSize2Pow
), MADV_FREE
);
3079 mNumMAdvised
+= npages
;
3080 # ifdef MALLOC_DOUBLE_PURGE
3084 mStats
.committed
-= npages
;
3086 if (mNumDirty
<= (aMaxDirty
>> 1)) {
3092 if (chunk
->ndirty
== 0) {
3093 mChunksDirty
.Remove(chunk
);
3095 #ifdef MALLOC_DOUBLE_PURGE
3097 // The chunk might already be in the list, but this
3098 // makes sure it's at the front.
3099 if (mChunksMAdvised
.ElementProbablyInList(chunk
)) {
3100 mChunksMAdvised
.remove(chunk
);
3102 mChunksMAdvised
.pushFront(chunk
);
3108 arena_chunk_t
* arena_t::DallocRun(arena_run_t
* aRun
, bool aDirty
) {
3109 arena_chunk_t
* chunk
;
3110 size_t size
, run_ind
, run_pages
;
3112 chunk
= GetChunkForPtr(aRun
);
3113 run_ind
= (size_t)((uintptr_t(aRun
) - uintptr_t(chunk
)) >> gPageSize2Pow
);
3114 MOZ_DIAGNOSTIC_ASSERT(run_ind
>= gChunkHeaderNumPages
);
3115 MOZ_RELEASE_ASSERT(run_ind
< gChunkNumPages
- 1);
3116 if ((chunk
->map
[run_ind
].bits
& CHUNK_MAP_LARGE
) != 0) {
3117 size
= chunk
->map
[run_ind
].bits
& ~gPageSizeMask
;
3118 run_pages
= (size
>> gPageSize2Pow
);
3120 run_pages
= aRun
->mBin
->mRunSizePages
;
3121 size
= run_pages
<< gPageSize2Pow
;
3124 // Mark pages as unallocated in the chunk map.
3128 for (i
= 0; i
< run_pages
; i
++) {
3129 MOZ_DIAGNOSTIC_ASSERT((chunk
->map
[run_ind
+ i
].bits
& CHUNK_MAP_DIRTY
) ==
3131 chunk
->map
[run_ind
+ i
].bits
= CHUNK_MAP_DIRTY
;
3134 if (chunk
->ndirty
== 0) {
3135 mChunksDirty
.Insert(chunk
);
3137 chunk
->ndirty
+= run_pages
;
3138 mNumDirty
+= run_pages
;
3142 for (i
= 0; i
< run_pages
; i
++) {
3143 chunk
->map
[run_ind
+ i
].bits
&= ~(CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
);
3146 chunk
->map
[run_ind
].bits
= size
| (chunk
->map
[run_ind
].bits
& gPageSizeMask
);
3147 chunk
->map
[run_ind
+ run_pages
- 1].bits
=
3148 size
| (chunk
->map
[run_ind
+ run_pages
- 1].bits
& gPageSizeMask
);
3150 // Try to coalesce forward.
3151 if (run_ind
+ run_pages
< gChunkNumPages
- 1 &&
3152 (chunk
->map
[run_ind
+ run_pages
].bits
& CHUNK_MAP_ALLOCATED
) == 0) {
3153 size_t nrun_size
= chunk
->map
[run_ind
+ run_pages
].bits
& ~gPageSizeMask
;
3155 // Remove successor from tree of available runs; the coalesced run is
3157 mRunsAvail
.Remove(&chunk
->map
[run_ind
+ run_pages
]);
3160 run_pages
= size
>> gPageSize2Pow
;
3162 MOZ_DIAGNOSTIC_ASSERT((chunk
->map
[run_ind
+ run_pages
- 1].bits
&
3163 ~gPageSizeMask
) == nrun_size
);
3164 chunk
->map
[run_ind
].bits
=
3165 size
| (chunk
->map
[run_ind
].bits
& gPageSizeMask
);
3166 chunk
->map
[run_ind
+ run_pages
- 1].bits
=
3167 size
| (chunk
->map
[run_ind
+ run_pages
- 1].bits
& gPageSizeMask
);
3170 // Try to coalesce backward.
3171 if (run_ind
> gChunkHeaderNumPages
&&
3172 (chunk
->map
[run_ind
- 1].bits
& CHUNK_MAP_ALLOCATED
) == 0) {
3173 size_t prun_size
= chunk
->map
[run_ind
- 1].bits
& ~gPageSizeMask
;
3175 run_ind
-= prun_size
>> gPageSize2Pow
;
3177 // Remove predecessor from tree of available runs; the coalesced run is
3179 mRunsAvail
.Remove(&chunk
->map
[run_ind
]);
3182 run_pages
= size
>> gPageSize2Pow
;
3184 MOZ_DIAGNOSTIC_ASSERT((chunk
->map
[run_ind
].bits
& ~gPageSizeMask
) ==
3186 chunk
->map
[run_ind
].bits
=
3187 size
| (chunk
->map
[run_ind
].bits
& gPageSizeMask
);
3188 chunk
->map
[run_ind
+ run_pages
- 1].bits
=
3189 size
| (chunk
->map
[run_ind
+ run_pages
- 1].bits
& gPageSizeMask
);
3192 // Insert into tree of available runs, now that coalescing is complete.
3193 mRunsAvail
.Insert(&chunk
->map
[run_ind
]);
3195 // Deallocate chunk if it is now completely unused.
3196 arena_chunk_t
* chunk_dealloc
= nullptr;
3197 if ((chunk
->map
[gChunkHeaderNumPages
].bits
&
3198 (~gPageSizeMask
| CHUNK_MAP_ALLOCATED
)) == gMaxLargeClass
) {
3199 chunk_dealloc
= DeallocChunk(chunk
);
3202 size_t maxDirty
= EffectiveMaxDirty();
3203 if (mNumDirty
> maxDirty
) {
3207 return chunk_dealloc
;
3210 void arena_t::TrimRunHead(arena_chunk_t
* aChunk
, arena_run_t
* aRun
,
3211 size_t aOldSize
, size_t aNewSize
) {
3212 size_t pageind
= (uintptr_t(aRun
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3213 size_t head_npages
= (aOldSize
- aNewSize
) >> gPageSize2Pow
;
3215 MOZ_ASSERT(aOldSize
> aNewSize
);
3217 // Update the chunk map so that arena_t::RunDalloc() can treat the
3218 // leading run as separately allocated.
3219 aChunk
->map
[pageind
].bits
=
3220 (aOldSize
- aNewSize
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3221 aChunk
->map
[pageind
+ head_npages
].bits
=
3222 aNewSize
| CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3225 arena_chunk_t
* no_chunk
=
3227 DallocRun(aRun
, false);
3228 // This will never release a chunk as there's still at least one allocated
3230 MOZ_ASSERT(!no_chunk
);
3233 void arena_t::TrimRunTail(arena_chunk_t
* aChunk
, arena_run_t
* aRun
,
3234 size_t aOldSize
, size_t aNewSize
, bool aDirty
) {
3235 size_t pageind
= (uintptr_t(aRun
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3236 size_t npages
= aNewSize
>> gPageSize2Pow
;
3238 MOZ_ASSERT(aOldSize
> aNewSize
);
3240 // Update the chunk map so that arena_t::RunDalloc() can treat the
3241 // trailing run as separately allocated.
3242 aChunk
->map
[pageind
].bits
= aNewSize
| CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3243 aChunk
->map
[pageind
+ npages
].bits
=
3244 (aOldSize
- aNewSize
) | CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
3247 arena_chunk_t
* no_chunk
=
3249 DallocRun((arena_run_t
*)(uintptr_t(aRun
) + aNewSize
), aDirty
);
3251 // This will never release a chunk as there's still at least one allocated
3253 MOZ_ASSERT(!no_chunk
);
3256 arena_run_t
* arena_t::GetNonFullBinRun(arena_bin_t
* aBin
) {
3257 arena_chunk_map_t
* mapelm
;
3259 unsigned i
, remainder
;
3261 // Look for a usable run.
3262 mapelm
= aBin
->mNonFullRuns
.First();
3264 // run is guaranteed to have available space.
3265 aBin
->mNonFullRuns
.Remove(mapelm
);
3266 run
= (arena_run_t
*)(mapelm
->bits
& ~gPageSizeMask
);
3269 // No existing runs have any space available.
3271 // Allocate a new run.
3272 run
= AllocRun(static_cast<size_t>(aBin
->mRunSizePages
) << gPageSize2Pow
,
3277 // Don't initialize if a race in arena_t::RunAlloc() allowed an existing
3278 // run to become usable.
3279 if (run
== aBin
->mCurrentRun
) {
3283 // Initialize run internals.
3286 for (i
= 0; i
< aBin
->mRunNumRegionsMask
- 1; i
++) {
3287 run
->mRegionsMask
[i
] = UINT_MAX
;
3289 remainder
= aBin
->mRunNumRegions
& ((1U << (LOG2(sizeof(int)) + 3)) - 1);
3290 if (remainder
== 0) {
3291 run
->mRegionsMask
[i
] = UINT_MAX
;
3293 // The last element has spare bits that need to be unset.
3294 run
->mRegionsMask
[i
] =
3295 (UINT_MAX
>> ((1U << (LOG2(sizeof(int)) + 3)) - remainder
));
3298 run
->mRegionsMinElement
= 0;
3300 run
->mNumFree
= aBin
->mRunNumRegions
;
3301 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
3302 run
->mMagic
= ARENA_RUN_MAGIC
;
3309 void arena_bin_t::Init(SizeClass aSizeClass
) {
3310 size_t try_run_size
;
3311 unsigned try_nregs
, try_mask_nelms
, try_reg0_offset
;
3312 // Size of the run header, excluding mRegionsMask.
3313 static const size_t kFixedHeaderSize
= offsetof(arena_run_t
, mRegionsMask
);
3315 MOZ_ASSERT(aSizeClass
.Size() <= gMaxBinClass
);
3317 try_run_size
= gPageSize
;
3319 mCurrentRun
= nullptr;
3320 mNonFullRuns
.Init();
3321 mSizeClass
= aSizeClass
.Size();
3324 // Run size expansion loop.
3326 try_nregs
= ((try_run_size
- kFixedHeaderSize
) / mSizeClass
) +
3327 1; // Counter-act try_nregs-- in loop.
3329 // The do..while loop iteratively reduces the number of regions until
3330 // the run header and the regions no longer overlap. A closed formula
3331 // would be quite messy, since there is an interdependency between the
3332 // header's mask length and the number of regions.
3336 (try_nregs
>> (LOG2(sizeof(int)) + 3)) +
3337 ((try_nregs
& ((1U << (LOG2(sizeof(int)) + 3)) - 1)) ? 1 : 0);
3338 try_reg0_offset
= try_run_size
- (try_nregs
* mSizeClass
);
3339 } while (kFixedHeaderSize
+ (sizeof(unsigned) * try_mask_nelms
) >
3342 // Try to keep the run overhead below kRunOverhead.
3343 if (Fraction(try_reg0_offset
, try_run_size
) <= kRunOverhead
) {
3347 // If the overhead is larger than the size class, it means the size class
3348 // is small and doesn't align very well with the header. It's desirable to
3349 // have smaller run sizes for them, so relax the overhead requirement.
3350 if (try_reg0_offset
> mSizeClass
) {
3351 if (Fraction(try_reg0_offset
, try_run_size
) <= kRunRelaxedOverhead
) {
3356 // The run header includes one bit per region of the given size. For sizes
3357 // small enough, the number of regions is large enough that growing the run
3358 // size barely moves the needle for the overhead because of all those bits.
3359 // For example, for a size of 8 bytes, adding 4KiB to the run size adds
3360 // close to 512 bits to the header, which is 64 bytes.
3361 // With such overhead, there is no way to get to the wanted overhead above,
3362 // so we give up if the required size for mRegionsMask more than doubles the
3363 // size of the run header.
3364 if (try_mask_nelms
* sizeof(unsigned) >= kFixedHeaderSize
) {
3368 // If next iteration is going to be larger than the largest possible large
3369 // size class, then we didn't find a setup where the overhead is small
3370 // enough, and we can't do better than the current settings, so just use
3372 if (try_run_size
+ gPageSize
> gMaxLargeClass
) {
3376 // Try more aggressive settings.
3377 try_run_size
+= gPageSize
;
3380 MOZ_ASSERT(kFixedHeaderSize
+ (sizeof(unsigned) * try_mask_nelms
) <=
3382 MOZ_ASSERT((try_mask_nelms
<< (LOG2(sizeof(int)) + 3)) >= try_nregs
);
3384 // Copy final settings.
3385 MOZ_ASSERT((try_run_size
>> gPageSize2Pow
) <= UINT8_MAX
);
3386 mRunSizePages
= static_cast<uint8_t>(try_run_size
>> gPageSize2Pow
);
3387 mRunNumRegions
= try_nregs
;
3388 mRunNumRegionsMask
= try_mask_nelms
;
3389 mRunFirstRegionOffset
= try_reg0_offset
;
3390 mSizeDivisor
= FastDivisor
<uint16_t>(aSizeClass
.Size(), try_run_size
);
3393 void* arena_t::MallocSmall(size_t aSize
, bool aZero
) {
3397 SizeClass
sizeClass(aSize
);
3398 aSize
= sizeClass
.Size();
3400 switch (sizeClass
.Type()) {
3401 case SizeClass::Tiny
:
3402 bin
= &mBins
[FloorLog2(aSize
/ kMinTinyClass
)];
3404 case SizeClass::Quantum
:
3405 // Although we divide 2 things by kQuantum, the compiler will
3406 // reduce `kMinQuantumClass / kQuantum` and `kNumTinyClasses` to a
3408 bin
= &mBins
[kNumTinyClasses
+ (aSize
/ kQuantum
) -
3409 (kMinQuantumClass
/ kQuantum
)];
3411 case SizeClass::QuantumWide
:
3413 &mBins
[kNumTinyClasses
+ kNumQuantumClasses
+ (aSize
/ kQuantumWide
) -
3414 (kMinQuantumWideClass
/ kQuantumWide
)];
3416 case SizeClass::SubPage
:
3418 &mBins
[kNumTinyClasses
+ kNumQuantumClasses
+ kNumQuantumWideClasses
+
3419 (FloorLog2(aSize
) - LOG2(kMinSubPageClass
))];
3422 MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Unexpected size class type");
3424 MOZ_DIAGNOSTIC_ASSERT(aSize
== bin
->mSizeClass
);
3427 // Before we lock, we determine if we need to randomize the allocation
3428 // because if we do, we need to create the PRNG which might require
3429 // allocating memory (arc4random on OSX for example) and we need to
3430 // avoid the deadlock
3431 if (MOZ_UNLIKELY(mRandomizeSmallAllocations
&& mPRNG
== nullptr)) {
3432 // This is frustrating. Because the code backing RandomUint64 (arc4random
3433 // for example) may allocate memory, and because
3434 // mRandomizeSmallAllocations is true and we haven't yet initilized mPRNG,
3435 // we would re-enter this same case and cause a deadlock inside e.g.
3436 // arc4random. So we temporarily disable mRandomizeSmallAllocations to
3437 // skip this case and then re-enable it
3438 mRandomizeSmallAllocations
= false;
3439 mozilla::Maybe
<uint64_t> prngState1
= mozilla::RandomUint64();
3440 mozilla::Maybe
<uint64_t> prngState2
= mozilla::RandomUint64();
3442 base_alloc(sizeof(mozilla::non_crypto::XorShift128PlusRNG
));
3443 mPRNG
= new (backing
) mozilla::non_crypto::XorShift128PlusRNG(
3444 prngState1
.valueOr(0), prngState2
.valueOr(0));
3445 mRandomizeSmallAllocations
= true;
3447 MOZ_ASSERT(!mRandomizeSmallAllocations
|| mPRNG
);
3449 MaybeMutexAutoLock
lock(mLock
);
3450 run
= bin
->mCurrentRun
;
3451 if (MOZ_UNLIKELY(!run
|| run
->mNumFree
== 0)) {
3452 run
= bin
->mCurrentRun
= GetNonFullBinRun(bin
);
3454 if (MOZ_UNLIKELY(!run
)) {
3457 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3458 MOZ_DIAGNOSTIC_ASSERT(run
->mNumFree
> 0);
3459 ret
= ArenaRunRegAlloc(run
, bin
);
3460 MOZ_DIAGNOSTIC_ASSERT(ret
);
3466 mStats
.allocated_small
+= aSize
;
3470 ApplyZeroOrJunk(ret
, aSize
);
3472 memset(ret
, 0, aSize
);
3478 void* arena_t::MallocLarge(size_t aSize
, bool aZero
) {
3481 // Large allocation.
3482 aSize
= PAGE_CEILING(aSize
);
3485 MaybeMutexAutoLock
lock(mLock
);
3486 ret
= AllocRun(aSize
, true, aZero
);
3490 mStats
.allocated_large
+= aSize
;
3494 ApplyZeroOrJunk(ret
, aSize
);
3500 void* arena_t::Malloc(size_t aSize
, bool aZero
) {
3501 MOZ_DIAGNOSTIC_ASSERT(mMagic
== ARENA_MAGIC
);
3502 MOZ_ASSERT(aSize
!= 0);
3504 if (aSize
<= gMaxBinClass
) {
3505 return MallocSmall(aSize
, aZero
);
3507 if (aSize
<= gMaxLargeClass
) {
3508 return MallocLarge(aSize
, aZero
);
3510 return MallocHuge(aSize
, aZero
);
3513 // Only handles large allocations that require more than page alignment.
3514 void* arena_t::PallocLarge(size_t aAlignment
, size_t aSize
, size_t aAllocSize
) {
3517 arena_chunk_t
* chunk
;
3519 MOZ_ASSERT((aSize
& gPageSizeMask
) == 0);
3520 MOZ_ASSERT((aAlignment
& gPageSizeMask
) == 0);
3523 MaybeMutexAutoLock
lock(mLock
);
3524 ret
= AllocRun(aAllocSize
, true, false);
3529 chunk
= GetChunkForPtr(ret
);
3531 offset
= uintptr_t(ret
) & (aAlignment
- 1);
3532 MOZ_ASSERT((offset
& gPageSizeMask
) == 0);
3533 MOZ_ASSERT(offset
< aAllocSize
);
3535 TrimRunTail(chunk
, (arena_run_t
*)ret
, aAllocSize
, aSize
, false);
3537 size_t leadsize
, trailsize
;
3539 leadsize
= aAlignment
- offset
;
3541 TrimRunHead(chunk
, (arena_run_t
*)ret
, aAllocSize
,
3542 aAllocSize
- leadsize
);
3543 ret
= (void*)(uintptr_t(ret
) + leadsize
);
3546 trailsize
= aAllocSize
- leadsize
- aSize
;
3547 if (trailsize
!= 0) {
3548 // Trim trailing space.
3549 MOZ_ASSERT(trailsize
< aAllocSize
);
3550 TrimRunTail(chunk
, (arena_run_t
*)ret
, aSize
+ trailsize
, aSize
, false);
3554 mStats
.allocated_large
+= aSize
;
3557 ApplyZeroOrJunk(ret
, aSize
);
3561 void* arena_t::Palloc(size_t aAlignment
, size_t aSize
) {
3565 // Round size up to the nearest multiple of alignment.
3567 // This done, we can take advantage of the fact that for each small
3568 // size class, every object is aligned at the smallest power of two
3569 // that is non-zero in the base two representation of the size. For
3572 // Size | Base 2 | Minimum alignment
3573 // -----+----------+------------------
3574 // 96 | 1100000 | 32
3575 // 144 | 10100000 | 32
3576 // 192 | 11000000 | 64
3578 // Depending on runtime settings, it is possible that arena_malloc()
3579 // will further round up to a power of two, but that never causes
3580 // correctness issues.
3581 ceil_size
= ALIGNMENT_CEILING(aSize
, aAlignment
);
3583 // (ceil_size < aSize) protects against the combination of maximal
3584 // alignment and size greater than maximal alignment.
3585 if (ceil_size
< aSize
) {
3590 if (ceil_size
<= gPageSize
||
3591 (aAlignment
<= gPageSize
&& ceil_size
<= gMaxLargeClass
)) {
3592 ret
= Malloc(ceil_size
, false);
3596 // We can't achieve sub-page alignment, so round up alignment
3597 // permanently; it makes later calculations simpler.
3598 aAlignment
= PAGE_CEILING(aAlignment
);
3599 ceil_size
= PAGE_CEILING(aSize
);
3601 // (ceil_size < aSize) protects against very large sizes within
3602 // pagesize of SIZE_T_MAX.
3604 // (ceil_size + aAlignment < ceil_size) protects against the
3605 // combination of maximal alignment and ceil_size large enough
3606 // to cause overflow. This is similar to the first overflow
3607 // check above, but it needs to be repeated due to the new
3608 // ceil_size value, which may now be *equal* to maximal
3609 // alignment, whereas before we only detected overflow if the
3610 // original size was *greater* than maximal alignment.
3611 if (ceil_size
< aSize
|| ceil_size
+ aAlignment
< ceil_size
) {
3616 // Calculate the size of the over-size run that arena_palloc()
3617 // would need to allocate in order to guarantee the alignment.
3618 if (ceil_size
>= aAlignment
) {
3619 run_size
= ceil_size
+ aAlignment
- gPageSize
;
3621 // It is possible that (aAlignment << 1) will cause
3622 // overflow, but it doesn't matter because we also
3623 // subtract pagesize, which in the case of overflow
3624 // leaves us with a very large run_size. That causes
3625 // the first conditional below to fail, which means
3626 // that the bogus run_size value never gets used for
3627 // anything important.
3628 run_size
= (aAlignment
<< 1) - gPageSize
;
3631 if (run_size
<= gMaxLargeClass
) {
3632 ret
= PallocLarge(aAlignment
, ceil_size
, run_size
);
3633 } else if (aAlignment
<= kChunkSize
) {
3634 ret
= MallocHuge(ceil_size
, false);
3636 ret
= PallocHuge(ceil_size
, aAlignment
, false);
3640 MOZ_ASSERT((uintptr_t(ret
) & (aAlignment
- 1)) == 0);
3646 template <bool Validate
= false>
3647 static inline AllocInfo
Get(const void* aPtr
) {
3648 // If the allocator is not initialized, the pointer can't belong to it.
3649 if (Validate
&& !malloc_initialized
) {
3653 auto chunk
= GetChunkForPtr(aPtr
);
3655 if (!chunk
|| !gChunkRTree
.Get(chunk
)) {
3660 if (chunk
!= aPtr
) {
3661 MOZ_DIAGNOSTIC_ASSERT(chunk
->arena
->mMagic
== ARENA_MAGIC
);
3662 size_t pageind
= (((uintptr_t)aPtr
- (uintptr_t)chunk
) >> gPageSize2Pow
);
3663 return GetInChunk(aPtr
, chunk
, pageind
);
3670 MutexAutoLock
lock(huge_mtx
);
3671 extent_node_t
* node
= huge
.Search(&key
);
3672 if (Validate
&& !node
) {
3675 return AllocInfo(node
->mSize
, node
);
3678 // Get the allocation information for a pointer we know is within a chunk
3679 // (Small or large, not huge).
3680 static inline AllocInfo
GetInChunk(const void* aPtr
, arena_chunk_t
* aChunk
,
3682 size_t mapbits
= aChunk
->map
[pageind
].bits
;
3683 MOZ_DIAGNOSTIC_ASSERT((mapbits
& CHUNK_MAP_ALLOCATED
) != 0);
3686 if ((mapbits
& CHUNK_MAP_LARGE
) == 0) {
3687 arena_run_t
* run
= (arena_run_t
*)(mapbits
& ~gPageSizeMask
);
3688 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3689 size
= run
->mBin
->mSizeClass
;
3691 size
= mapbits
& ~gPageSizeMask
;
3692 MOZ_DIAGNOSTIC_ASSERT(size
!= 0);
3695 return AllocInfo(size
, aChunk
);
3698 // Validate ptr before assuming that it points to an allocation. Currently,
3699 // the following validation is performed:
3701 // + Check that ptr is not nullptr.
3703 // + Check that ptr lies within a mapped chunk.
3704 static inline AllocInfo
GetValidated(const void* aPtr
) {
3705 return Get
<true>(aPtr
);
3708 AllocInfo() : mSize(0), mChunk(nullptr) {}
3710 explicit AllocInfo(size_t aSize
, arena_chunk_t
* aChunk
)
3711 : mSize(aSize
), mChunk(aChunk
) {
3712 MOZ_ASSERT(mSize
<= gMaxLargeClass
);
3715 explicit AllocInfo(size_t aSize
, extent_node_t
* aNode
)
3716 : mSize(aSize
), mNode(aNode
) {
3717 MOZ_ASSERT(mSize
> gMaxLargeClass
);
3720 size_t Size() { return mSize
; }
3723 if (mSize
<= gMaxLargeClass
) {
3724 return mChunk
->arena
;
3726 // Best effort detection that we're not trying to access an already
3727 // disposed arena. In the case of a disposed arena, the memory location
3728 // pointed by mNode->mArena is either free (but still a valid memory
3729 // region, per TypedBaseAlloc<arena_t>), in which case its id was reset,
3730 // or has been reallocated for a new region, and its id is very likely
3731 // different (per randomness). In both cases, the id is unlikely to
3732 // match what it was for the disposed arena.
3733 MOZ_RELEASE_ASSERT(mNode
->mArenaId
== mNode
->mArena
->mId
);
3734 return mNode
->mArena
;
3737 bool IsValid() const { return !!mSize
; }
3742 // Pointer to the chunk associated with the allocation for small
3743 // and large allocations.
3744 arena_chunk_t
* mChunk
;
3746 // Pointer to the extent node for huge allocations.
3747 extent_node_t
* mNode
;
3751 inline void MozJemalloc::jemalloc_ptr_info(const void* aPtr
,
3752 jemalloc_ptr_info_t
* aInfo
) {
3753 arena_chunk_t
* chunk
= GetChunkForPtr(aPtr
);
3755 // Is the pointer null, or within one chunk's size of null?
3756 // Alternatively, if the allocator is not initialized yet, the pointer
3758 if (!chunk
|| !malloc_initialized
) {
3759 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3763 // Look for huge allocations before looking for |chunk| in gChunkRTree.
3764 // This is necessary because |chunk| won't be in gChunkRTree if it's
3765 // the second or subsequent chunk in a huge allocation.
3766 extent_node_t
* node
;
3769 MutexAutoLock
lock(huge_mtx
);
3770 key
.mAddr
= const_cast<void*>(aPtr
);
3772 reinterpret_cast<RedBlackTree
<extent_node_t
, ExtentTreeBoundsTrait
>*>(
3776 *aInfo
= {TagLiveAlloc
, node
->mAddr
, node
->mSize
, node
->mArena
->mId
};
3781 // It's not a huge allocation. Check if we have a known chunk.
3782 if (!gChunkRTree
.Get(chunk
)) {
3783 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3787 MOZ_DIAGNOSTIC_ASSERT(chunk
->arena
->mMagic
== ARENA_MAGIC
);
3789 // Get the page number within the chunk.
3790 size_t pageind
= (((uintptr_t)aPtr
- (uintptr_t)chunk
) >> gPageSize2Pow
);
3791 if (pageind
< gChunkHeaderNumPages
) {
3792 // Within the chunk header.
3793 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3797 size_t mapbits
= chunk
->map
[pageind
].bits
;
3799 if (!(mapbits
& CHUNK_MAP_ALLOCATED
)) {
3800 void* pageaddr
= (void*)(uintptr_t(aPtr
) & ~gPageSizeMask
);
3801 *aInfo
= {TagFreedPage
, pageaddr
, gPageSize
, chunk
->arena
->mId
};
3805 if (mapbits
& CHUNK_MAP_LARGE
) {
3806 // It's a large allocation. Only the first page of a large
3807 // allocation contains its size, so if the address is not in
3808 // the first page, scan back to find the allocation size.
3811 size
= mapbits
& ~gPageSizeMask
;
3816 // The following two return paths shouldn't occur in
3817 // practice unless there is heap corruption.
3819 MOZ_DIAGNOSTIC_ASSERT(pageind
>= gChunkHeaderNumPages
);
3820 if (pageind
< gChunkHeaderNumPages
) {
3821 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3825 mapbits
= chunk
->map
[pageind
].bits
;
3826 MOZ_DIAGNOSTIC_ASSERT(mapbits
& CHUNK_MAP_LARGE
);
3827 if (!(mapbits
& CHUNK_MAP_LARGE
)) {
3828 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3833 void* addr
= ((char*)chunk
) + (pageind
<< gPageSize2Pow
);
3834 *aInfo
= {TagLiveAlloc
, addr
, size
, chunk
->arena
->mId
};
3838 // It must be a small allocation.
3839 auto run
= (arena_run_t
*)(mapbits
& ~gPageSizeMask
);
3840 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3842 // The allocation size is stored in the run metadata.
3843 size_t size
= run
->mBin
->mSizeClass
;
3845 // Address of the first possible pointer in the run after its headers.
3846 uintptr_t reg0_addr
= (uintptr_t)run
+ run
->mBin
->mRunFirstRegionOffset
;
3847 if (aPtr
< (void*)reg0_addr
) {
3848 // In the run header.
3849 *aInfo
= {TagUnknown
, nullptr, 0, 0};
3853 // Position in the run.
3854 unsigned regind
= ((uintptr_t)aPtr
- reg0_addr
) / size
;
3856 // Pointer to the allocation's base address.
3857 void* addr
= (void*)(reg0_addr
+ regind
* size
);
3859 // Check if the allocation has been freed.
3860 unsigned elm
= regind
>> (LOG2(sizeof(int)) + 3);
3861 unsigned bit
= regind
- (elm
<< (LOG2(sizeof(int)) + 3));
3863 ((run
->mRegionsMask
[elm
] & (1U << bit
))) ? TagFreedAlloc
: TagLiveAlloc
;
3865 *aInfo
= {tag
, addr
, size
, chunk
->arena
->mId
};
3869 // Helper for debuggers. We don't want it to be inlined and optimized out.
3870 MOZ_NEVER_INLINE jemalloc_ptr_info_t
* jemalloc_ptr_info(const void* aPtr
) {
3871 static jemalloc_ptr_info_t info
;
3872 MozJemalloc::jemalloc_ptr_info(aPtr
, &info
);
3875 } // namespace Debug
3877 arena_chunk_t
* arena_t::DallocSmall(arena_chunk_t
* aChunk
, void* aPtr
,
3878 arena_chunk_map_t
* aMapElm
) {
3883 run
= (arena_run_t
*)(aMapElm
->bits
& ~gPageSizeMask
);
3884 MOZ_DIAGNOSTIC_ASSERT(run
->mMagic
== ARENA_RUN_MAGIC
);
3886 size
= bin
->mSizeClass
;
3887 MOZ_DIAGNOSTIC_ASSERT(uintptr_t(aPtr
) >=
3888 uintptr_t(run
) + bin
->mRunFirstRegionOffset
);
3890 arena_run_reg_dalloc(run
, bin
, aPtr
, size
);
3892 arena_chunk_t
* dealloc_chunk
= nullptr;
3894 if (run
->mNumFree
== bin
->mRunNumRegions
) {
3896 if (run
== bin
->mCurrentRun
) {
3897 bin
->mCurrentRun
= nullptr;
3898 } else if (bin
->mRunNumRegions
!= 1) {
3899 size_t run_pageind
=
3900 (uintptr_t(run
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3901 arena_chunk_map_t
* run_mapelm
= &aChunk
->map
[run_pageind
];
3903 // This block's conditional is necessary because if the
3904 // run only contains one region, then it never gets
3905 // inserted into the non-full runs tree.
3906 MOZ_DIAGNOSTIC_ASSERT(bin
->mNonFullRuns
.Search(run_mapelm
) == run_mapelm
);
3907 bin
->mNonFullRuns
.Remove(run_mapelm
);
3909 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
3912 dealloc_chunk
= DallocRun(run
, true);
3914 } else if (run
->mNumFree
== 1 && run
!= bin
->mCurrentRun
) {
3915 // Make sure that bin->mCurrentRun always refers to the lowest
3916 // non-full run, if one exists.
3917 if (!bin
->mCurrentRun
) {
3918 bin
->mCurrentRun
= run
;
3919 } else if (uintptr_t(run
) < uintptr_t(bin
->mCurrentRun
)) {
3920 // Switch mCurrentRun.
3921 if (bin
->mCurrentRun
->mNumFree
> 0) {
3922 arena_chunk_t
* runcur_chunk
= GetChunkForPtr(bin
->mCurrentRun
);
3923 size_t runcur_pageind
=
3924 (uintptr_t(bin
->mCurrentRun
) - uintptr_t(runcur_chunk
)) >>
3926 arena_chunk_map_t
* runcur_mapelm
= &runcur_chunk
->map
[runcur_pageind
];
3929 MOZ_DIAGNOSTIC_ASSERT(!bin
->mNonFullRuns
.Search(runcur_mapelm
));
3930 bin
->mNonFullRuns
.Insert(runcur_mapelm
);
3932 bin
->mCurrentRun
= run
;
3934 size_t run_pageind
=
3935 (uintptr_t(run
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3936 arena_chunk_map_t
* run_mapelm
= &aChunk
->map
[run_pageind
];
3938 MOZ_DIAGNOSTIC_ASSERT(bin
->mNonFullRuns
.Search(run_mapelm
) == nullptr);
3939 bin
->mNonFullRuns
.Insert(run_mapelm
);
3942 mStats
.allocated_small
-= size
;
3944 return dealloc_chunk
;
3947 arena_chunk_t
* arena_t::DallocLarge(arena_chunk_t
* aChunk
, void* aPtr
) {
3948 MOZ_DIAGNOSTIC_ASSERT((uintptr_t(aPtr
) & gPageSizeMask
) == 0);
3949 size_t pageind
= (uintptr_t(aPtr
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
3950 size_t size
= aChunk
->map
[pageind
].bits
& ~gPageSizeMask
;
3952 mStats
.allocated_large
-= size
;
3954 return DallocRun((arena_run_t
*)aPtr
, true);
3957 static inline void arena_dalloc(void* aPtr
, size_t aOffset
, arena_t
* aArena
) {
3959 MOZ_ASSERT(aOffset
!= 0);
3960 MOZ_ASSERT(GetChunkOffsetForPtr(aPtr
) == aOffset
);
3962 auto chunk
= (arena_chunk_t
*)((uintptr_t)aPtr
- aOffset
);
3963 auto arena
= chunk
->arena
;
3965 MOZ_DIAGNOSTIC_ASSERT(arena
->mMagic
== ARENA_MAGIC
);
3966 MOZ_RELEASE_ASSERT(!aArena
|| arena
== aArena
);
3968 size_t pageind
= aOffset
>> gPageSize2Pow
;
3970 AllocInfo info
= AllocInfo::GetInChunk(aPtr
, chunk
, pageind
);
3971 MOZ_ASSERT(info
.IsValid());
3972 MaybePoison(aPtr
, info
.Size());
3975 arena_chunk_t
* chunk_dealloc_delay
= nullptr;
3978 MaybeMutexAutoLock
lock(arena
->mLock
);
3979 arena_chunk_map_t
* mapelm
= &chunk
->map
[pageind
];
3982 (CHUNK_MAP_FRESH_MADVISED_OR_DECOMMITTED
| CHUNK_MAP_ZEROED
)) == 0,
3983 "Freeing in a page with bad bits.");
3984 MOZ_RELEASE_ASSERT((mapelm
->bits
& CHUNK_MAP_ALLOCATED
) != 0,
3986 if ((mapelm
->bits
& CHUNK_MAP_LARGE
) == 0) {
3987 // Small allocation.
3988 chunk_dealloc_delay
= arena
->DallocSmall(chunk
, aPtr
, mapelm
);
3990 // Large allocation.
3991 chunk_dealloc_delay
= arena
->DallocLarge(chunk
, aPtr
);
3995 if (chunk_dealloc_delay
) {
3996 chunk_dealloc((void*)chunk_dealloc_delay
, kChunkSize
, ARENA_CHUNK
);
4000 static inline void idalloc(void* ptr
, arena_t
* aArena
) {
4005 offset
= GetChunkOffsetForPtr(ptr
);
4007 arena_dalloc(ptr
, offset
, aArena
);
4009 huge_dalloc(ptr
, aArena
);
4013 void arena_t::RallocShrinkLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
4015 MOZ_ASSERT(aSize
< aOldSize
);
4017 // Shrink the run, and make trailing pages available for other
4019 MaybeMutexAutoLock
lock(mLock
);
4020 TrimRunTail(aChunk
, (arena_run_t
*)aPtr
, aOldSize
, aSize
, true);
4021 mStats
.allocated_large
-= aOldSize
- aSize
;
4024 // Returns whether reallocation was successful.
4025 bool arena_t::RallocGrowLarge(arena_chunk_t
* aChunk
, void* aPtr
, size_t aSize
,
4027 size_t pageind
= (uintptr_t(aPtr
) - uintptr_t(aChunk
)) >> gPageSize2Pow
;
4028 size_t npages
= aOldSize
>> gPageSize2Pow
;
4030 MaybeMutexAutoLock
lock(mLock
);
4031 MOZ_DIAGNOSTIC_ASSERT(aOldSize
==
4032 (aChunk
->map
[pageind
].bits
& ~gPageSizeMask
));
4034 // Try to extend the run.
4035 MOZ_ASSERT(aSize
> aOldSize
);
4036 if (pageind
+ npages
< gChunkNumPages
- 1 &&
4037 (aChunk
->map
[pageind
+ npages
].bits
& CHUNK_MAP_ALLOCATED
) == 0 &&
4038 (aChunk
->map
[pageind
+ npages
].bits
& ~gPageSizeMask
) >=
4040 // The next run is available and sufficiently large. Split the
4041 // following run, then merge the first part with the existing
4043 if (!SplitRun((arena_run_t
*)(uintptr_t(aChunk
) +
4044 ((pageind
+ npages
) << gPageSize2Pow
)),
4045 aSize
- aOldSize
, true, false)) {
4049 aChunk
->map
[pageind
].bits
= aSize
| CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
4050 aChunk
->map
[pageind
+ npages
].bits
= CHUNK_MAP_LARGE
| CHUNK_MAP_ALLOCATED
;
4052 mStats
.allocated_large
+= aSize
- aOldSize
;
4059 void* arena_t::RallocSmallOrLarge(void* aPtr
, size_t aSize
, size_t aOldSize
) {
4062 SizeClass
sizeClass(aSize
);
4064 // Try to avoid moving the allocation.
4065 if (aOldSize
<= gMaxLargeClass
&& sizeClass
.Size() == aOldSize
) {
4066 if (aSize
< aOldSize
) {
4067 MaybePoison((void*)(uintptr_t(aPtr
) + aSize
), aOldSize
- aSize
);
4071 if (sizeClass
.Type() == SizeClass::Large
&& aOldSize
> gMaxBinClass
&&
4072 aOldSize
<= gMaxLargeClass
) {
4073 arena_chunk_t
* chunk
= GetChunkForPtr(aPtr
);
4074 if (sizeClass
.Size() < aOldSize
) {
4075 // Fill before shrinking in order to avoid a race.
4076 MaybePoison((void*)((uintptr_t)aPtr
+ aSize
), aOldSize
- aSize
);
4077 RallocShrinkLarge(chunk
, aPtr
, sizeClass
.Size(), aOldSize
);
4080 if (RallocGrowLarge(chunk
, aPtr
, sizeClass
.Size(), aOldSize
)) {
4081 ApplyZeroOrJunk((void*)((uintptr_t)aPtr
+ aOldSize
), aSize
- aOldSize
);
4086 // If we get here, then aSize and aOldSize are different enough that we
4087 // need to move the object. In that case, fall back to allocating new
4088 // space and copying. Allow non-private arenas to switch arenas.
4089 ret
= (mIsPrivate
? this : choose_arena(aSize
))->Malloc(aSize
, false);
4094 // Junk/zero-filling were already done by arena_t::Malloc().
4095 copysize
= (aSize
< aOldSize
) ? aSize
: aOldSize
;
4097 if (copysize
>= VM_COPY_MIN
) {
4098 pages_copy(ret
, aPtr
, copysize
);
4102 memcpy(ret
, aPtr
, copysize
);
4104 idalloc(aPtr
, this);
4108 void* arena_t::Ralloc(void* aPtr
, size_t aSize
, size_t aOldSize
) {
4109 MOZ_DIAGNOSTIC_ASSERT(mMagic
== ARENA_MAGIC
);
4111 MOZ_ASSERT(aSize
!= 0);
4113 return (aSize
<= gMaxLargeClass
) ? RallocSmallOrLarge(aPtr
, aSize
, aOldSize
)
4114 : RallocHuge(aPtr
, aSize
, aOldSize
);
4117 void* arena_t::operator new(size_t aCount
, const fallible_t
&) noexcept
{
4118 MOZ_ASSERT(aCount
== sizeof(arena_t
));
4119 return TypedBaseAlloc
<arena_t
>::alloc();
4122 void arena_t::operator delete(void* aPtr
) {
4123 TypedBaseAlloc
<arena_t
>::dealloc((arena_t
*)aPtr
);
4126 arena_t::arena_t(arena_params_t
* aParams
, bool aIsPrivate
) {
4129 memset(&mLink
, 0, sizeof(mLink
));
4130 memset(&mStats
, 0, sizeof(arena_stats_t
));
4133 // Initialize chunks.
4134 mChunksDirty
.Init();
4135 #ifdef MALLOC_DOUBLE_PURGE
4136 new (&mChunksMAdvised
) DoublyLinkedList
<arena_chunk_t
>();
4140 mRandomizeSmallAllocations
= opt_randomize_small
;
4141 MaybeMutex::DoLock doLock
= MaybeMutex::MUST_LOCK
;
4143 uint32_t randFlags
= aParams
->mFlags
& ARENA_FLAG_RANDOMIZE_SMALL_MASK
;
4144 switch (randFlags
) {
4145 case ARENA_FLAG_RANDOMIZE_SMALL_ENABLED
:
4146 mRandomizeSmallAllocations
= true;
4148 case ARENA_FLAG_RANDOMIZE_SMALL_DISABLED
:
4149 mRandomizeSmallAllocations
= false;
4151 case ARENA_FLAG_RANDOMIZE_SMALL_DEFAULT
:
4156 uint32_t threadFlags
= aParams
->mFlags
& ARENA_FLAG_THREAD_MASK
;
4157 if (threadFlags
== ARENA_FLAG_THREAD_MAIN_THREAD_ONLY
) {
4158 // At the moment we require that any ARENA_FLAG_THREAD_MAIN_THREAD_ONLY
4159 // arenas are created and therefore always accessed by the main thread.
4160 // This is for two reasons:
4161 // * it allows jemalloc_stats to read their statistics (we also require
4162 // that jemalloc_stats is only used on the main thread).
4163 // * Only main-thread or threadsafe arenas can be guanteed to be in a
4164 // consistent state after a fork() from the main thread. If fork()
4165 // occurs off-thread then the new child process cannot use these arenas
4166 // (new children should usually exec() or exit() since other data may
4167 // also be inconsistent).
4168 MOZ_ASSERT(gArenas
.IsOnMainThread());
4169 MOZ_ASSERT(aIsPrivate
);
4170 doLock
= MaybeMutex::AVOID_LOCK_UNSAFE
;
4173 mMaxDirtyIncreaseOverride
= aParams
->mMaxDirtyIncreaseOverride
;
4174 mMaxDirtyDecreaseOverride
= aParams
->mMaxDirtyDecreaseOverride
;
4176 mMaxDirtyIncreaseOverride
= 0;
4177 mMaxDirtyDecreaseOverride
= 0;
4180 MOZ_RELEASE_ASSERT(mLock
.Init(doLock
));
4184 mIsPrivate
= aIsPrivate
;
4189 // The default maximum amount of dirty pages allowed on arenas is a fraction
4190 // of opt_dirty_max.
4191 mMaxDirty
= (aParams
&& aParams
->mMaxDirty
) ? aParams
->mMaxDirty
4192 : (opt_dirty_max
/ 8);
4197 SizeClass
sizeClass(1);
4200 arena_bin_t
& bin
= mBins
[i
];
4201 bin
.Init(sizeClass
);
4203 // SizeClass doesn't want sizes larger than gMaxBinClass for now.
4204 if (sizeClass
.Size() == gMaxBinClass
) {
4207 sizeClass
= sizeClass
.Next();
4209 MOZ_ASSERT(i
== NUM_SMALL_CLASSES
- 1);
4211 #if defined(MOZ_DIAGNOSTIC_ASSERT_ENABLED)
4212 mMagic
= ARENA_MAGIC
;
4216 arena_t::~arena_t() {
4218 MaybeMutexAutoLock
lock(mLock
);
4219 MOZ_RELEASE_ASSERT(!mLink
.Left() && !mLink
.Right(),
4220 "Arena is still registered");
4221 MOZ_RELEASE_ASSERT(!mStats
.allocated_small
&& !mStats
.allocated_large
,
4222 "Arena is not empty");
4224 chunk_dealloc(mSpare
, kChunkSize
, ARENA_CHUNK
);
4226 for (i
= 0; i
< NUM_SMALL_CLASSES
; i
++) {
4227 MOZ_RELEASE_ASSERT(!mBins
[i
].mNonFullRuns
.First(), "Bin is not empty");
4231 MutexAutoLock
lock(huge_mtx
);
4232 // This is an expensive check, so we only do it on debug builds.
4233 for (auto node
: huge
.iter()) {
4234 MOZ_RELEASE_ASSERT(node
->mArenaId
!= mId
, "Arena has huge allocations");
4241 arena_t
* ArenaCollection::CreateArena(bool aIsPrivate
,
4242 arena_params_t
* aParams
) {
4243 arena_t
* ret
= new (fallible
) arena_t(aParams
, aIsPrivate
);
4245 // Only reached if there is an OOM error.
4247 // OOM here is quite inconvenient to propagate, since dealing with it
4248 // would require a check for failure in the fast path. Instead, punt
4249 // by using the first arena.
4250 // In practice, this is an extremely unlikely failure.
4251 _malloc_message(_getprogname(), ": (malloc) Error initializing arena\n");
4253 return mDefaultArena
;
4256 MutexAutoLock
lock(mLock
);
4258 // For public arenas, it's fine to just use incrementing arena id
4260 ret
->mId
= mLastPublicArenaId
++;
4261 mArenas
.Insert(ret
);
4265 // For private arenas, generate a cryptographically-secure random id for the
4266 // new arena. If an attacker manages to get control of the process, this
4267 // should make it more difficult for them to "guess" the ID of a memory
4268 // arena, stopping them from getting data they may want
4269 Tree
& tree
= (ret
->IsMainThreadOnly()) ? mMainThreadArenas
: mPrivateArenas
;
4270 arena_id_t arena_id
;
4272 arena_id
= MakeRandArenaId(ret
->IsMainThreadOnly());
4273 // Keep looping until we ensure that the random number we just generated
4274 // isn't already in use by another active arena
4275 } while (GetByIdInternal(tree
, arena_id
));
4277 ret
->mId
= arena_id
;
4282 arena_id_t
ArenaCollection::MakeRandArenaId(bool aIsMainThreadOnly
) const {
4285 mozilla::Maybe
<uint64_t> maybeRandomId
= mozilla::RandomUint64();
4286 MOZ_RELEASE_ASSERT(maybeRandomId
.isSome());
4288 rand
= maybeRandomId
.value();
4290 // Set or clear the least significant bit depending on if this is a
4291 // main-thread-only arena. We use this in GetById.
4292 if (aIsMainThreadOnly
) {
4293 rand
= rand
| MAIN_THREAD_ARENA_BIT
;
4295 rand
= rand
& ~MAIN_THREAD_ARENA_BIT
;
4298 // Avoid 0 as an arena Id. We use 0 for disposed arenas.
4299 } while (rand
== 0);
4301 return arena_id_t(rand
);
4305 // ***************************************************************************
4306 // Begin general internal functions.
4308 void* arena_t::MallocHuge(size_t aSize
, bool aZero
) {
4309 return PallocHuge(aSize
, kChunkSize
, aZero
);
4312 void* arena_t::PallocHuge(size_t aSize
, size_t aAlignment
, bool aZero
) {
4316 extent_node_t
* node
;
4318 // We're going to configure guard pages in the region between the
4319 // page-aligned size and the chunk-aligned size, so if those are the same
4320 // then we need to force that region into existence.
4321 csize
= CHUNK_CEILING(aSize
+ gPageSize
);
4322 if (csize
< aSize
) {
4323 // size is large enough to cause size_t wrap-around.
4327 // Allocate an extent node with which to track the chunk.
4328 node
= ExtentAlloc::alloc();
4333 // Allocate one or more contiguous chunks for this request.
4334 ret
= chunk_alloc(csize
, aAlignment
, false);
4336 ExtentAlloc::dealloc(node
);
4339 psize
= PAGE_CEILING(aSize
);
4342 chunk_assert_zero(ret
, psize
);
4346 // Insert node into huge.
4348 node
->mSize
= psize
;
4349 node
->mArena
= this;
4350 node
->mArenaId
= mId
;
4353 MutexAutoLock
lock(huge_mtx
);
4356 // Although we allocated space for csize bytes, we indicate that we've
4357 // allocated only psize bytes.
4359 // If DECOMMIT is defined, this is a reasonable thing to do, since
4360 // we'll explicitly decommit the bytes in excess of psize.
4362 // If DECOMMIT is not defined, then we're relying on the OS to be lazy
4363 // about how it allocates physical pages to mappings. If we never
4364 // touch the pages in excess of psize, the OS won't allocate a physical
4365 // page, and we won't use more than psize bytes of physical memory.
4367 // A correct program will only touch memory in excess of how much it
4368 // requested if it first calls malloc_usable_size and finds out how
4369 // much space it has to play with. But because we set node->mSize =
4370 // psize above, malloc_usable_size will return psize, not csize, and
4371 // the program will (hopefully) never touch bytes in excess of psize.
4372 // Thus those bytes won't take up space in physical memory, and we can
4373 // reasonably claim we never "allocated" them in the first place.
4374 huge_allocated
+= psize
;
4375 huge_mapped
+= csize
;
4378 pages_decommit((void*)((uintptr_t)ret
+ psize
), csize
- psize
);
4381 ApplyZeroOrJunk(ret
, psize
);
4387 void* arena_t::RallocHuge(void* aPtr
, size_t aSize
, size_t aOldSize
) {
4391 // Avoid moving the allocation if the size class would not change.
4392 if (aOldSize
> gMaxLargeClass
&&
4393 CHUNK_CEILING(aSize
+ gPageSize
) == CHUNK_CEILING(aOldSize
+ gPageSize
)) {
4394 size_t psize
= PAGE_CEILING(aSize
);
4395 if (aSize
< aOldSize
) {
4396 MaybePoison((void*)((uintptr_t)aPtr
+ aSize
), aOldSize
- aSize
);
4398 if (psize
< aOldSize
) {
4401 pages_decommit((void*)((uintptr_t)aPtr
+ psize
), aOldSize
- psize
);
4403 // Update recorded size.
4404 MutexAutoLock
lock(huge_mtx
);
4405 key
.mAddr
= const_cast<void*>(aPtr
);
4406 extent_node_t
* node
= huge
.Search(&key
);
4408 MOZ_ASSERT(node
->mSize
== aOldSize
);
4409 MOZ_RELEASE_ASSERT(node
->mArena
== this);
4410 huge_allocated
-= aOldSize
- psize
;
4411 // No need to change huge_mapped, because we didn't (un)map anything.
4412 node
->mSize
= psize
;
4413 } else if (psize
> aOldSize
) {
4414 if (!pages_commit((void*)((uintptr_t)aPtr
+ aOldSize
),
4415 psize
- aOldSize
)) {
4419 // We need to update the recorded size if the size increased,
4420 // so malloc_usable_size doesn't return a value smaller than
4421 // what was requested via realloc().
4423 MutexAutoLock
lock(huge_mtx
);
4424 key
.mAddr
= const_cast<void*>(aPtr
);
4425 extent_node_t
* node
= huge
.Search(&key
);
4427 MOZ_ASSERT(node
->mSize
== aOldSize
);
4428 MOZ_RELEASE_ASSERT(node
->mArena
== this);
4429 huge_allocated
+= psize
- aOldSize
;
4430 // No need to change huge_mapped, because we didn't
4431 // (un)map anything.
4432 node
->mSize
= psize
;
4435 if (aSize
> aOldSize
) {
4436 ApplyZeroOrJunk((void*)((uintptr_t)aPtr
+ aOldSize
), aSize
- aOldSize
);
4441 // If we get here, then aSize and aOldSize are different enough that we
4442 // need to use a different size class. In that case, fall back to allocating
4443 // new space and copying. Allow non-private arenas to switch arenas.
4444 ret
= (mIsPrivate
? this : choose_arena(aSize
))->MallocHuge(aSize
, false);
4449 copysize
= (aSize
< aOldSize
) ? aSize
: aOldSize
;
4451 if (copysize
>= VM_COPY_MIN
) {
4452 pages_copy(ret
, aPtr
, copysize
);
4456 memcpy(ret
, aPtr
, copysize
);
4458 idalloc(aPtr
, this);
4462 static void huge_dalloc(void* aPtr
, arena_t
* aArena
) {
4463 extent_node_t
* node
;
4467 MutexAutoLock
lock(huge_mtx
);
4469 // Extract from tree of huge allocations.
4471 node
= huge
.Search(&key
);
4472 MOZ_RELEASE_ASSERT(node
, "Double-free?");
4473 MOZ_ASSERT(node
->mAddr
== aPtr
);
4474 MOZ_RELEASE_ASSERT(!aArena
|| node
->mArena
== aArena
);
4475 // See AllocInfo::Arena.
4476 MOZ_RELEASE_ASSERT(node
->mArenaId
== node
->mArena
->mId
);
4479 mapped
= CHUNK_CEILING(node
->mSize
+ gPageSize
);
4480 huge_allocated
-= node
->mSize
;
4481 huge_mapped
-= mapped
;
4485 chunk_dealloc(node
->mAddr
, mapped
, HUGE_CHUNK
);
4487 ExtentAlloc::dealloc(node
);
4490 size_t GetKernelPageSize() {
4491 static size_t kernel_page_size
= ([]() {
4494 GetSystemInfo(&info
);
4495 return info
.dwPageSize
;
4497 long result
= sysconf(_SC_PAGESIZE
);
4498 MOZ_ASSERT(result
!= -1);
4502 return kernel_page_size
;
4505 // Returns whether the allocator was successfully initialized.
4506 static bool malloc_init_hard() {
4510 AutoLock
<StaticMutex
> lock(gInitLock
);
4512 if (malloc_initialized
) {
4513 // Another thread initialized the allocator before this one
4514 // acquired gInitLock.
4518 if (!thread_arena
.init()) {
4522 // Get page size and number of CPUs
4523 const size_t page_size
= GetKernelPageSize();
4524 // We assume that the page size is a power of 2.
4525 MOZ_ASSERT(IsPowerOfTwo(page_size
));
4526 #ifdef MALLOC_STATIC_PAGESIZE
4527 if (gPageSize
% page_size
) {
4530 "Compile-time page size does not divide the runtime one.\n");
4534 gRealPageSize
= gPageSize
= page_size
;
4537 // Get runtime configuration.
4538 if ((opts
= getenv("MALLOC_OPTIONS"))) {
4539 for (i
= 0; opts
[i
] != '\0'; i
++) {
4540 // All options are single letters, some take a *prefix* numeric argument.
4542 // Parse the argument.
4543 unsigned prefix_arg
= 0;
4544 while (opts
[i
] >= '0' && opts
[i
] <= '9') {
4546 prefix_arg
+= opts
[i
] - '0';
4552 opt_dirty_max
>>= prefix_arg
? prefix_arg
: 1;
4555 prefix_arg
= prefix_arg
? prefix_arg
: 1;
4556 if (opt_dirty_max
== 0) {
4560 opt_dirty_max
<<= prefix_arg
;
4561 if (opt_dirty_max
== 0) {
4562 // If the shift above overflowed all the bits then clamp the result
4563 // instead. If we started with DIRTY_MAX_DEFAULT then this will
4564 // always be a power of two so choose the maximum power of two that
4565 // fits in a size_t.
4566 opt_dirty_max
= size_t(1) << (sizeof(size_t) * CHAR_BIT
- 1);
4569 #ifdef MALLOC_RUNTIME_CONFIG
4577 // The argument selects how much poisoning to do.
4581 if (opts
[i
+ 1] == 'Q') {
4582 // Maximum poisoning.
4587 opt_poison_size
= kCacheLineSize
* prefix_arg
;
4596 # ifndef MALLOC_STATIC_PAGESIZE
4598 MOZ_ASSERT(gPageSize
>= 4_KiB
);
4599 MOZ_ASSERT(gPageSize
<= 64_KiB
);
4600 prefix_arg
= prefix_arg
? prefix_arg
: 1;
4601 gPageSize
<<= prefix_arg
;
4602 // We know that if the shift causes gPageSize to be zero then it's
4603 // because it shifted all the bits off. We didn't start with zero.
4604 // Therefore if gPageSize is out of bounds we set it to 64KiB.
4605 if (gPageSize
< 4_KiB
|| gPageSize
> 64_KiB
) {
4612 opt_randomize_small
= false;
4615 opt_randomize_small
= true;
4622 _malloc_message(_getprogname(),
4623 ": (malloc) Unsupported character "
4624 "in malloc options: '",
4631 #ifndef MALLOC_STATIC_PAGESIZE
4636 // Initialize chunks data.
4638 MOZ_PUSH_IGNORE_THREAD_SAFETY
4639 gChunksBySize
.Init();
4640 gChunksByAddress
.Init();
4641 MOZ_POP_THREAD_SAFETY
4643 // Initialize huge allocation data.
4645 MOZ_PUSH_IGNORE_THREAD_SAFETY
4649 MOZ_POP_THREAD_SAFETY
4651 // Initialize base allocation data structures.
4653 MOZ_PUSH_IGNORE_THREAD_SAFETY
4656 MOZ_POP_THREAD_SAFETY
4658 // Initialize arenas collection here.
4659 if (!gArenas
.Init()) {
4663 // Assign the default arena to the initial thread.
4664 thread_arena
.set(gArenas
.GetDefault());
4666 if (!gChunkRTree
.Init()) {
4670 malloc_initialized
= true;
4672 // Dummy call so that the function is not removed by dead-code elimination
4673 Debug::jemalloc_ptr_info(nullptr);
4675 #if !defined(XP_WIN) && !defined(XP_DARWIN)
4676 // Prevent potential deadlock on malloc locks after fork.
4677 pthread_atfork(_malloc_prefork
, _malloc_postfork_parent
,
4678 _malloc_postfork_child
);
4684 // End general internal functions.
4685 // ***************************************************************************
4686 // Begin malloc(3)-compatible functions.
4688 // The BaseAllocator class is a helper class that implements the base allocator
4689 // functions (malloc, calloc, realloc, free, memalign) for a given arena,
4690 // or an appropriately chosen arena (per choose_arena()) when none is given.
4691 struct BaseAllocator
{
4692 #define MALLOC_DECL(name, return_type, ...) \
4693 inline return_type name(__VA_ARGS__);
4695 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
4696 #include "malloc_decls.h"
4698 explicit BaseAllocator(arena_t
* aArena
) : mArena(aArena
) {}
4704 #define MALLOC_DECL(name, return_type, ...) \
4705 inline return_type MozJemalloc::name( \
4706 ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
4707 BaseAllocator allocator(nullptr); \
4708 return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
4710 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
4711 #include "malloc_decls.h"
4713 inline void* BaseAllocator::malloc(size_t aSize
) {
4717 if (!malloc_init()) {
4725 // If mArena is non-null, it must not be in the first page.
4726 MOZ_DIAGNOSTIC_ASSERT_IF(mArena
, (size_t)mArena
>= gPageSize
);
4727 arena
= mArena
? mArena
: choose_arena(aSize
);
4728 ret
= arena
->Malloc(aSize
, /* aZero = */ false);
4738 inline void* BaseAllocator::memalign(size_t aAlignment
, size_t aSize
) {
4739 MOZ_ASSERT(((aAlignment
- 1) & aAlignment
) == 0);
4741 if (!malloc_init()) {
4749 aAlignment
= aAlignment
< sizeof(void*) ? sizeof(void*) : aAlignment
;
4750 arena_t
* arena
= mArena
? mArena
: choose_arena(aSize
);
4751 return arena
->Palloc(aAlignment
, aSize
);
4754 inline void* BaseAllocator::calloc(size_t aNum
, size_t aSize
) {
4757 if (malloc_init()) {
4758 CheckedInt
<size_t> checkedSize
= CheckedInt
<size_t>(aNum
) * aSize
;
4759 if (checkedSize
.isValid()) {
4760 size_t allocSize
= checkedSize
.value();
4761 if (allocSize
== 0) {
4764 arena_t
* arena
= mArena
? mArena
: choose_arena(allocSize
);
4765 ret
= arena
->Malloc(allocSize
, /* aZero = */ true);
4780 inline void* BaseAllocator::realloc(void* aPtr
, size_t aSize
) {
4788 MOZ_RELEASE_ASSERT(malloc_initialized
);
4790 auto info
= AllocInfo::Get(aPtr
);
4791 auto arena
= info
.Arena();
4792 MOZ_RELEASE_ASSERT(!mArena
|| arena
== mArena
);
4793 ret
= arena
->Ralloc(aPtr
, aSize
, info
.Size());
4795 if (!malloc_init()) {
4798 arena_t
* arena
= mArena
? mArena
: choose_arena(aSize
);
4799 ret
= arena
->Malloc(aSize
, /* aZero = */ false);
4809 inline void BaseAllocator::free(void* aPtr
) {
4812 // A version of idalloc that checks for nullptr pointer.
4813 offset
= GetChunkOffsetForPtr(aPtr
);
4815 MOZ_RELEASE_ASSERT(malloc_initialized
);
4816 arena_dalloc(aPtr
, offset
, mArena
);
4818 MOZ_RELEASE_ASSERT(malloc_initialized
);
4819 huge_dalloc(aPtr
, mArena
);
4823 inline int MozJemalloc::posix_memalign(void** aMemPtr
, size_t aAlignment
,
4825 return AlignedAllocator
<memalign
>::posix_memalign(aMemPtr
, aAlignment
, aSize
);
4828 inline void* MozJemalloc::aligned_alloc(size_t aAlignment
, size_t aSize
) {
4829 return AlignedAllocator
<memalign
>::aligned_alloc(aAlignment
, aSize
);
4832 inline void* MozJemalloc::valloc(size_t aSize
) {
4833 return AlignedAllocator
<memalign
>::valloc(aSize
);
4836 // End malloc(3)-compatible functions.
4837 // ***************************************************************************
4838 // Begin non-standard functions.
4840 // This was added by Mozilla for use by SQLite.
4841 inline size_t MozJemalloc::malloc_good_size(size_t aSize
) {
4842 if (aSize
<= gMaxLargeClass
) {
4844 aSize
= SizeClass(aSize
).Size();
4846 // Huge. We use PAGE_CEILING to get psize, instead of using
4847 // CHUNK_CEILING to get csize. This ensures that this
4848 // malloc_usable_size(malloc(n)) always matches
4849 // malloc_good_size(n).
4850 aSize
= PAGE_CEILING(aSize
);
4855 inline size_t MozJemalloc::malloc_usable_size(usable_ptr_t aPtr
) {
4856 return AllocInfo::GetValidated(aPtr
).Size();
4859 inline void MozJemalloc::jemalloc_stats_internal(
4860 jemalloc_stats_t
* aStats
, jemalloc_bin_stats_t
* aBinStats
) {
4861 size_t non_arena_mapped
, chunk_header_size
;
4866 if (!malloc_init()) {
4867 memset(aStats
, 0, sizeof(*aStats
));
4871 memset(aBinStats
, 0, sizeof(jemalloc_bin_stats_t
) * NUM_SMALL_CLASSES
);
4874 // Gather runtime settings.
4875 aStats
->opt_junk
= opt_junk
;
4876 aStats
->opt_zero
= opt_zero
;
4877 aStats
->quantum
= kQuantum
;
4878 aStats
->quantum_max
= kMaxQuantumClass
;
4879 aStats
->quantum_wide
= kQuantumWide
;
4880 aStats
->quantum_wide_max
= kMaxQuantumWideClass
;
4881 aStats
->subpage_max
= gMaxSubPageClass
;
4882 aStats
->large_max
= gMaxLargeClass
;
4883 aStats
->chunksize
= kChunkSize
;
4884 aStats
->page_size
= gPageSize
;
4885 aStats
->dirty_max
= opt_dirty_max
;
4887 // Gather current memory usage statistics.
4888 aStats
->narenas
= 0;
4890 aStats
->allocated
= 0;
4892 aStats
->pages_dirty
= 0;
4893 aStats
->pages_fresh
= 0;
4894 aStats
->pages_madvised
= 0;
4895 aStats
->bookkeeping
= 0;
4896 aStats
->bin_unused
= 0;
4898 non_arena_mapped
= 0;
4900 // Get huge mapped/allocated.
4902 MutexAutoLock
lock(huge_mtx
);
4903 non_arena_mapped
+= huge_mapped
;
4904 aStats
->allocated
+= huge_allocated
;
4905 MOZ_ASSERT(huge_mapped
>= huge_allocated
);
4908 // Get base mapped/allocated.
4910 MutexAutoLock
lock(base_mtx
);
4911 non_arena_mapped
+= base_mapped
;
4912 aStats
->bookkeeping
+= base_committed
;
4913 MOZ_ASSERT(base_mapped
>= base_committed
);
4916 gArenas
.mLock
.Lock();
4918 // Stats can only read complete information if its run on the main thread.
4919 MOZ_ASSERT(gArenas
.IsOnMainThreadWeak());
4921 // Iterate over arenas.
4922 for (auto arena
: gArenas
.iter()) {
4923 // Cannot safely read stats for this arena and therefore stats would be
4925 MOZ_ASSERT(arena
->mLock
.SafeOnThisThread());
4927 size_t arena_mapped
, arena_allocated
, arena_committed
, arena_dirty
,
4928 arena_fresh
, arena_madvised
, j
, arena_unused
, arena_headers
;
4934 MaybeMutexAutoLock
lock(arena
->mLock
);
4936 arena_mapped
= arena
->mStats
.mapped
;
4938 // "committed" counts dirty and allocated memory.
4939 arena_committed
= arena
->mStats
.committed
<< gPageSize2Pow
;
4942 arena
->mStats
.allocated_small
+ arena
->mStats
.allocated_large
;
4944 arena_dirty
= arena
->mNumDirty
<< gPageSize2Pow
;
4945 arena_fresh
= arena
->mNumFresh
<< gPageSize2Pow
;
4946 arena_madvised
= arena
->mNumMAdvised
<< gPageSize2Pow
;
4948 for (j
= 0; j
< NUM_SMALL_CLASSES
; j
++) {
4949 arena_bin_t
* bin
= &arena
->mBins
[j
];
4950 size_t bin_unused
= 0;
4951 size_t num_non_full_runs
= 0;
4953 for (auto mapelm
: bin
->mNonFullRuns
.iter()) {
4954 arena_run_t
* run
= (arena_run_t
*)(mapelm
->bits
& ~gPageSizeMask
);
4955 bin_unused
+= run
->mNumFree
* bin
->mSizeClass
;
4956 num_non_full_runs
++;
4959 if (bin
->mCurrentRun
) {
4960 bin_unused
+= bin
->mCurrentRun
->mNumFree
* bin
->mSizeClass
;
4961 num_non_full_runs
++;
4964 arena_unused
+= bin_unused
;
4965 arena_headers
+= bin
->mNumRuns
* bin
->mRunFirstRegionOffset
;
4967 aBinStats
[j
].size
= bin
->mSizeClass
;
4968 aBinStats
[j
].num_non_full_runs
+= num_non_full_runs
;
4969 aBinStats
[j
].num_runs
+= bin
->mNumRuns
;
4970 aBinStats
[j
].bytes_unused
+= bin_unused
;
4971 size_t bytes_per_run
= static_cast<size_t>(bin
->mRunSizePages
)
4973 aBinStats
[j
].bytes_total
+=
4974 bin
->mNumRuns
* (bytes_per_run
- bin
->mRunFirstRegionOffset
);
4975 aBinStats
[j
].bytes_per_run
= bytes_per_run
;
4980 MOZ_ASSERT(arena_mapped
>= arena_committed
);
4981 MOZ_ASSERT(arena_committed
>= arena_allocated
+ arena_dirty
);
4983 aStats
->mapped
+= arena_mapped
;
4984 aStats
->allocated
+= arena_allocated
;
4985 aStats
->pages_dirty
+= arena_dirty
;
4986 aStats
->pages_fresh
+= arena_fresh
;
4987 aStats
->pages_madvised
+= arena_madvised
;
4988 // "waste" is committed memory that is neither dirty nor
4989 // allocated. If you change this definition please update
4990 // memory/replace/logalloc/replay/Replay.cpp's jemalloc_stats calculation of
4992 MOZ_ASSERT(arena_committed
>=
4993 (arena_allocated
+ arena_dirty
+ arena_unused
+ arena_headers
));
4994 aStats
->waste
+= arena_committed
- arena_allocated
- arena_dirty
-
4995 arena_unused
- arena_headers
;
4996 aStats
->bin_unused
+= arena_unused
;
4997 aStats
->bookkeeping
+= arena_headers
;
5000 gArenas
.mLock
.Unlock();
5002 // Account for arena chunk headers in bookkeeping rather than waste.
5004 ((aStats
->mapped
/ aStats
->chunksize
) * (gChunkHeaderNumPages
- 1))
5007 aStats
->mapped
+= non_arena_mapped
;
5008 aStats
->bookkeeping
+= chunk_header_size
;
5009 aStats
->waste
-= chunk_header_size
;
5011 MOZ_ASSERT(aStats
->mapped
>= aStats
->allocated
+ aStats
->waste
+
5012 aStats
->pages_dirty
+ aStats
->bookkeeping
);
5015 inline size_t MozJemalloc::jemalloc_stats_num_bins() {
5016 return NUM_SMALL_CLASSES
;
5019 inline void MozJemalloc::jemalloc_set_main_thread() {
5020 MOZ_ASSERT(malloc_initialized
);
5021 gArenas
.SetMainThread();
5024 #ifdef MALLOC_DOUBLE_PURGE
5026 // Explicitly remove all of this chunk's MADV_FREE'd pages from memory.
5027 static size_t hard_purge_chunk(arena_chunk_t
* aChunk
) {
5028 size_t total_npages
= 0;
5029 // See similar logic in arena_t::Purge().
5030 for (size_t i
= gChunkHeaderNumPages
; i
< gChunkNumPages
; i
++) {
5031 // Find all adjacent pages with CHUNK_MAP_MADVISED set.
5033 for (npages
= 0; aChunk
->map
[i
+ npages
].bits
& CHUNK_MAP_MADVISED
&&
5034 i
+ npages
< gChunkNumPages
;
5036 // Turn off the page's CHUNK_MAP_MADVISED bit and turn on its
5037 // CHUNK_MAP_FRESH bit.
5038 MOZ_DIAGNOSTIC_ASSERT(!(aChunk
->map
[i
+ npages
].bits
&
5039 (CHUNK_MAP_FRESH
| CHUNK_MAP_DECOMMITTED
)));
5040 aChunk
->map
[i
+ npages
].bits
^= (CHUNK_MAP_MADVISED
| CHUNK_MAP_FRESH
);
5043 // We could use mincore to find out which pages are actually
5044 // present, but it's not clear that's better.
5046 pages_decommit(((char*)aChunk
) + (i
<< gPageSize2Pow
),
5047 npages
<< gPageSize2Pow
);
5048 Unused
<< pages_commit(((char*)aChunk
) + (i
<< gPageSize2Pow
),
5049 npages
<< gPageSize2Pow
);
5051 total_npages
+= npages
;
5055 return total_npages
;
5058 // Explicitly remove all of this arena's MADV_FREE'd pages from memory.
5059 void arena_t::HardPurge() {
5060 MaybeMutexAutoLock
lock(mLock
);
5062 while (!mChunksMAdvised
.isEmpty()) {
5063 arena_chunk_t
* chunk
= mChunksMAdvised
.popFront();
5064 size_t npages
= hard_purge_chunk(chunk
);
5065 mNumMAdvised
-= npages
;
5066 mNumFresh
+= npages
;
5070 inline void MozJemalloc::jemalloc_purge_freed_pages() {
5071 if (malloc_initialized
) {
5072 MutexAutoLock
lock(gArenas
.mLock
);
5073 MOZ_ASSERT(gArenas
.IsOnMainThreadWeak());
5074 for (auto arena
: gArenas
.iter()) {
5080 #else // !defined MALLOC_DOUBLE_PURGE
5082 inline void MozJemalloc::jemalloc_purge_freed_pages() {
5086 #endif // defined MALLOC_DOUBLE_PURGE
5088 inline void MozJemalloc::jemalloc_free_dirty_pages(void) {
5089 if (malloc_initialized
) {
5090 MutexAutoLock
lock(gArenas
.mLock
);
5091 MOZ_ASSERT(gArenas
.IsOnMainThreadWeak());
5092 for (auto arena
: gArenas
.iter()) {
5093 MaybeMutexAutoLock
arena_lock(arena
->mLock
);
5099 inline arena_t
* ArenaCollection::GetByIdInternal(Tree
& aTree
,
5100 arena_id_t aArenaId
) {
5101 // Use AlignedStorage2 to avoid running the arena_t constructor, while
5102 // we only need it as a placeholder for mId.
5103 mozilla::AlignedStorage2
<arena_t
> key
;
5104 key
.addr()->mId
= aArenaId
;
5105 return aTree
.Search(key
.addr());
5108 inline arena_t
* ArenaCollection::GetById(arena_id_t aArenaId
, bool aIsPrivate
) {
5109 if (!malloc_initialized
) {
5113 Tree
* tree
= nullptr;
5115 if (ArenaIdIsMainThreadOnly(aArenaId
)) {
5116 // Main thread only arena. Do the lookup here without taking the lock.
5117 arena_t
* result
= GetByIdInternal(mMainThreadArenas
, aArenaId
);
5118 MOZ_RELEASE_ASSERT(result
);
5121 tree
= &mPrivateArenas
;
5126 MutexAutoLock
lock(mLock
);
5127 arena_t
* result
= GetByIdInternal(*tree
, aArenaId
);
5128 MOZ_RELEASE_ASSERT(result
);
5132 inline arena_id_t
MozJemalloc::moz_create_arena_with_params(
5133 arena_params_t
* aParams
) {
5134 if (malloc_init()) {
5135 arena_t
* arena
= gArenas
.CreateArena(/* IsPrivate = */ true, aParams
);
5141 inline void MozJemalloc::moz_dispose_arena(arena_id_t aArenaId
) {
5142 arena_t
* arena
= gArenas
.GetById(aArenaId
, /* IsPrivate = */ true);
5143 MOZ_RELEASE_ASSERT(arena
);
5144 gArenas
.DisposeArena(arena
);
5147 inline void MozJemalloc::moz_set_max_dirty_page_modifier(int32_t aModifier
) {
5148 gArenas
.SetDefaultMaxDirtyPageModifier(aModifier
);
5151 #define MALLOC_DECL(name, return_type, ...) \
5152 inline return_type MozJemalloc::moz_arena_##name( \
5153 arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
5154 BaseAllocator allocator( \
5155 gArenas.GetById(aArenaId, /* IsPrivate = */ true)); \
5156 return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
5158 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC_BASE
5159 #include "malloc_decls.h"
5161 // End non-standard functions.
5162 // ***************************************************************************
5164 // Begin library-private functions, used by threading libraries for protection
5165 // of malloc during fork(). These functions are only called if the program is
5166 // running in threaded mode, so there is no need to check whether the program
5167 // is threaded here.
5169 // Note that the only way to keep the main-thread-only arenas in a consistent
5170 // state for the child is if fork is called from the main thread only. Or the
5171 // child must not use them, eg it should call exec(). We attempt to prevent the
5172 // child for accessing these arenas by refusing to re-initialise them.
5174 // This is only accessed in the fork handlers while gArenas.mLock is held.
5175 static pthread_t gForkingThread
;
5178 // This is only accessed in the fork handlers while gArenas.mLock is held.
5179 static pid_t gForkingProcess
;
5183 void _malloc_prefork(void) MOZ_NO_THREAD_SAFETY_ANALYSIS
{
5184 // Acquire all mutexes in a safe order.
5185 gArenas
.mLock
.Lock();
5186 gForkingThread
= pthread_self();
5188 gForkingProcess
= getpid();
5191 for (auto arena
: gArenas
.iter()) {
5192 if (arena
->mLock
.LockIsEnabled()) {
5193 arena
->mLock
.Lock();
5203 void _malloc_postfork_parent(void) MOZ_NO_THREAD_SAFETY_ANALYSIS
{
5204 // Release all mutexes, now that fork() has completed.
5209 for (auto arena
: gArenas
.iter()) {
5210 if (arena
->mLock
.LockIsEnabled()) {
5211 arena
->mLock
.Unlock();
5215 gArenas
.mLock
.Unlock();
5219 void _malloc_postfork_child(void) {
5220 // Do this before iterating over the arenas.
5221 gArenas
.ResetMainThread();
5223 // Reinitialize all mutexes, now that fork() has completed.
5228 for (auto arena
: gArenas
.iter()) {
5229 arena
->mLock
.Reinit(gForkingThread
);
5232 gArenas
.mLock
.Init();
5237 void _malloc_postfork(void) {
5238 // On MacOS we need to check if this is running in the parent or child
5240 bool is_in_parent
= getpid() == gForkingProcess
;
5241 gForkingProcess
= 0;
5243 _malloc_postfork_parent();
5245 _malloc_postfork_child();
5248 # endif // XP_DARWIN
5251 // End library-private functions.
5252 // ***************************************************************************
5253 #ifdef MOZ_REPLACE_MALLOC
5254 // Windows doesn't come with weak imports as they are possible with
5255 // LD_PRELOAD or DYLD_INSERT_LIBRARIES on Linux/OSX. On this platform,
5256 // the replacement functions are defined as variable pointers to the
5257 // function resolved with GetProcAddress() instead of weak definitions
5258 // of functions. On Android, the same needs to happen as well, because
5259 // the Android linker doesn't handle weak linking with non LD_PRELOADed
5260 // libraries, but LD_PRELOADing is not very convenient on Android, with
5263 # define MOZ_REPLACE_WEAK __attribute__((weak_import))
5264 # elif defined(XP_WIN) || defined(ANDROID)
5265 # define MOZ_DYNAMIC_REPLACE_INIT
5266 # define replace_init replace_init_decl
5267 # elif defined(__GNUC__)
5268 # define MOZ_REPLACE_WEAK __attribute__((weak))
5271 # include "replace_malloc.h"
5273 # define MALLOC_DECL(name, return_type, ...) CanonicalMalloc::name,
5275 // The default malloc table, i.e. plain allocations. It never changes. It's
5276 // used by init(), and not used after that.
5277 static const malloc_table_t gDefaultMallocTable
= {
5278 # include "malloc_decls.h"
5281 // The malloc table installed by init(). It never changes from that point
5282 // onward. It will be the same as gDefaultMallocTable if no replace-malloc tool
5283 // is enabled at startup.
5284 static malloc_table_t gOriginalMallocTable
= {
5285 # include "malloc_decls.h"
5288 // The malloc table installed by jemalloc_replace_dynamic(). (Read the
5289 // comments above that function for more details.)
5290 static malloc_table_t gDynamicMallocTable
= {
5291 # include "malloc_decls.h"
5294 // This briefly points to gDefaultMallocTable at startup. After that, it points
5295 // to either gOriginalMallocTable or gDynamicMallocTable. It's atomic to avoid
5296 // races when switching between tables.
5297 static Atomic
<malloc_table_t
const*, mozilla::MemoryOrdering::Relaxed
>
5300 # ifdef MOZ_DYNAMIC_REPLACE_INIT
5301 # undef replace_init
5302 typedef decltype(replace_init_decl
) replace_init_impl_t
;
5303 static replace_init_impl_t
* replace_init
= nullptr;
5307 typedef HMODULE replace_malloc_handle_t
;
5309 static replace_malloc_handle_t
replace_malloc_handle() {
5310 wchar_t replace_malloc_lib
[1024];
5311 if (GetEnvironmentVariableW(L
"MOZ_REPLACE_MALLOC_LIB", replace_malloc_lib
,
5312 ArrayLength(replace_malloc_lib
)) > 0) {
5313 return LoadLibraryW(replace_malloc_lib
);
5318 # define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
5319 (replace_init_impl_t*)GetProcAddress(handle, "replace_init")
5321 # elif defined(ANDROID)
5324 typedef void* replace_malloc_handle_t
;
5326 static replace_malloc_handle_t
replace_malloc_handle() {
5327 const char* replace_malloc_lib
= getenv("MOZ_REPLACE_MALLOC_LIB");
5328 if (replace_malloc_lib
&& *replace_malloc_lib
) {
5329 return dlopen(replace_malloc_lib
, RTLD_LAZY
);
5334 # define REPLACE_MALLOC_GET_INIT_FUNC(handle) \
5335 (replace_init_impl_t*)dlsym(handle, "replace_init")
5339 static void replace_malloc_init_funcs(malloc_table_t
*);
5341 # ifdef MOZ_REPLACE_MALLOC_STATIC
5342 extern "C" void logalloc_init(malloc_table_t
*, ReplaceMallocBridge
**);
5344 extern "C" void dmd_init(malloc_table_t
*, ReplaceMallocBridge
**);
5347 void phc_init(malloc_table_t
*, ReplaceMallocBridge
**);
5349 bool Equals(const malloc_table_t
& aTable1
, const malloc_table_t
& aTable2
) {
5350 return memcmp(&aTable1
, &aTable2
, sizeof(malloc_table_t
)) == 0;
5353 // Below is the malloc implementation overriding jemalloc and calling the
5354 // replacement functions if they exist.
5355 static ReplaceMallocBridge
* gReplaceMallocBridge
= nullptr;
5356 static void init() {
5357 malloc_table_t tempTable
= gDefaultMallocTable
;
5359 # ifdef MOZ_DYNAMIC_REPLACE_INIT
5360 replace_malloc_handle_t handle
= replace_malloc_handle();
5362 replace_init
= REPLACE_MALLOC_GET_INIT_FUNC(handle
);
5366 // Set this *before* calling replace_init, otherwise if replace_init calls
5367 // malloc() we'll get an infinite loop.
5368 gMallocTablePtr
= &gDefaultMallocTable
;
5370 // Pass in the default allocator table so replace functions can copy and use
5371 // it for their allocations. The replace_init() function should modify the
5372 // table if it wants to be active, otherwise leave it unmodified.
5374 replace_init(&tempTable
, &gReplaceMallocBridge
);
5376 # ifdef MOZ_REPLACE_MALLOC_STATIC
5377 if (Equals(tempTable
, gDefaultMallocTable
)) {
5378 logalloc_init(&tempTable
, &gReplaceMallocBridge
);
5381 if (Equals(tempTable
, gDefaultMallocTable
)) {
5382 dmd_init(&tempTable
, &gReplaceMallocBridge
);
5386 if (!Equals(tempTable
, gDefaultMallocTable
)) {
5387 replace_malloc_init_funcs(&tempTable
);
5389 gOriginalMallocTable
= tempTable
;
5390 gMallocTablePtr
= &gOriginalMallocTable
;
5393 // WARNING WARNING WARNING: this function should be used with extreme care. It
5394 // is not as general-purpose as it looks. It is currently used by
5395 // tools/profiler/core/memory_hooks.cpp for counting allocations and probably
5396 // should not be used for any other purpose.
5398 // This function allows the original malloc table to be temporarily replaced by
5399 // a different malloc table. Or, if the argument is nullptr, it switches back to
5400 // the original malloc table.
5404 // - It is not threadsafe. If multiple threads pass it the same
5405 // `replace_init_func` at the same time, there will be data races writing to
5406 // the malloc_table_t within that function.
5408 // - Only one replacement can be installed. No nesting is allowed.
5410 // - The new malloc table must be able to free allocations made by the original
5411 // malloc table, and upon removal the original malloc table must be able to
5412 // free allocations made by the new malloc table. This means the new malloc
5413 // table can only do simple things like recording extra information, while
5414 // delegating actual allocation/free operations to the original malloc table.
5416 MOZ_JEMALLOC_API
void jemalloc_replace_dynamic(
5417 jemalloc_init_func replace_init_func
) {
5418 if (replace_init_func
) {
5419 malloc_table_t tempTable
= gOriginalMallocTable
;
5420 (*replace_init_func
)(&tempTable
, &gReplaceMallocBridge
);
5421 if (!Equals(tempTable
, gOriginalMallocTable
)) {
5422 replace_malloc_init_funcs(&tempTable
);
5424 // Temporarily switch back to the original malloc table. In the
5425 // (supported) non-nested case, this is a no-op. But just in case this is
5426 // a (unsupported) nested call, it makes the overwriting of
5427 // gDynamicMallocTable less racy, because ongoing calls to malloc() and
5428 // friends won't go through gDynamicMallocTable.
5429 gMallocTablePtr
= &gOriginalMallocTable
;
5431 gDynamicMallocTable
= tempTable
;
5432 gMallocTablePtr
= &gDynamicMallocTable
;
5433 // We assume that dynamic replaces don't occur close enough for a
5434 // thread to still have old copies of the table pointer when the 2nd
5438 // Switch back to the original malloc table.
5439 gMallocTablePtr
= &gOriginalMallocTable
;
5443 # define MALLOC_DECL(name, return_type, ...) \
5444 inline return_type ReplaceMalloc::name( \
5445 ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) { \
5446 if (MOZ_UNLIKELY(!gMallocTablePtr)) { \
5449 return (*gMallocTablePtr).name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
5451 # include "malloc_decls.h"
5453 MOZ_JEMALLOC_API
struct ReplaceMallocBridge
* get_bridge(void) {
5454 if (MOZ_UNLIKELY(!gMallocTablePtr
)) {
5457 return gReplaceMallocBridge
;
5460 // posix_memalign, aligned_alloc, memalign and valloc all implement some kind
5461 // of aligned memory allocation. For convenience, a replace-malloc library can
5462 // skip defining replace_posix_memalign, replace_aligned_alloc and
5463 // replace_valloc, and default implementations will be automatically derived
5464 // from replace_memalign.
5465 static void replace_malloc_init_funcs(malloc_table_t
* table
) {
5466 if (table
->posix_memalign
== CanonicalMalloc::posix_memalign
&&
5467 table
->memalign
!= CanonicalMalloc::memalign
) {
5468 table
->posix_memalign
=
5469 AlignedAllocator
<ReplaceMalloc::memalign
>::posix_memalign
;
5471 if (table
->aligned_alloc
== CanonicalMalloc::aligned_alloc
&&
5472 table
->memalign
!= CanonicalMalloc::memalign
) {
5473 table
->aligned_alloc
=
5474 AlignedAllocator
<ReplaceMalloc::memalign
>::aligned_alloc
;
5476 if (table
->valloc
== CanonicalMalloc::valloc
&&
5477 table
->memalign
!= CanonicalMalloc::memalign
) {
5478 table
->valloc
= AlignedAllocator
<ReplaceMalloc::memalign
>::valloc
;
5480 if (table
->moz_create_arena_with_params
==
5481 CanonicalMalloc::moz_create_arena_with_params
&&
5482 table
->malloc
!= CanonicalMalloc::malloc
) {
5483 # define MALLOC_DECL(name, ...) \
5484 table->name = DummyArenaAllocator<ReplaceMalloc>::name;
5485 # define MALLOC_FUNCS MALLOC_FUNCS_ARENA_BASE
5486 # include "malloc_decls.h"
5488 if (table
->moz_arena_malloc
== CanonicalMalloc::moz_arena_malloc
&&
5489 table
->malloc
!= CanonicalMalloc::malloc
) {
5490 # define MALLOC_DECL(name, ...) \
5491 table->name = DummyArenaAllocator<ReplaceMalloc>::name;
5492 # define MALLOC_FUNCS MALLOC_FUNCS_ARENA_ALLOC
5493 # include "malloc_decls.h"
5497 #endif // MOZ_REPLACE_MALLOC
5498 // ***************************************************************************
5499 // Definition of all the _impl functions
5500 // GENERIC_MALLOC_DECL2_MINGW is only used for the MinGW build, and aliases
5501 // the malloc funcs (e.g. malloc) to the je_ versions. It does not generate
5502 // aliases for the other functions (jemalloc and arena functions).
5504 // We do need aliases for the other mozglue.def-redirected functions though,
5505 // these are done at the bottom of mozmemory_wrap.cpp
5506 #define GENERIC_MALLOC_DECL2_MINGW(name, name_impl, return_type, ...) \
5507 return_type name(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
5508 __attribute__((alias(MOZ_STRINGIFY(name_impl))));
5510 #define GENERIC_MALLOC_DECL2(attributes, name, name_impl, return_type, ...) \
5511 return_type name_impl(ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) attributes { \
5512 return DefaultMalloc::name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
5516 # define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
5517 GENERIC_MALLOC_DECL2(attributes, name, name##_impl, return_type, \
5520 # define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
5521 GENERIC_MALLOC_DECL2(attributes, name, name##_impl, return_type, \
5523 GENERIC_MALLOC_DECL2_MINGW(name, name##_impl, return_type, ##__VA_ARGS__)
5526 #define NOTHROW_MALLOC_DECL(...) \
5527 MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (noexcept(true), __VA_ARGS__))
5528 #define MALLOC_DECL(...) \
5529 MOZ_MEMORY_API MACRO_CALL(GENERIC_MALLOC_DECL, (, __VA_ARGS__))
5530 #define MALLOC_FUNCS MALLOC_FUNCS_MALLOC
5531 #include "malloc_decls.h"
5533 #undef GENERIC_MALLOC_DECL
5534 #define GENERIC_MALLOC_DECL(attributes, name, return_type, ...) \
5535 GENERIC_MALLOC_DECL2(attributes, name, name, return_type, ##__VA_ARGS__)
5537 #define MALLOC_DECL(...) \
5538 MOZ_JEMALLOC_API MACRO_CALL(GENERIC_MALLOC_DECL, (, __VA_ARGS__))
5539 #define MALLOC_FUNCS (MALLOC_FUNCS_JEMALLOC | MALLOC_FUNCS_ARENA)
5540 #include "malloc_decls.h"
5541 // ***************************************************************************
5547 #if defined(__GLIBC__) && !defined(__UCLIBC__)
5548 // glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
5549 // to inconsistently reference libc's malloc(3)-compatible functions
5552 // These definitions interpose hooks in glibc. The functions are actually
5553 // passed an extra argument for the caller return address, which will be
5557 MOZ_EXPORT
void (*__free_hook
)(void*) = free_impl
;
5558 MOZ_EXPORT
void* (*__malloc_hook
)(size_t) = malloc_impl
;
5559 MOZ_EXPORT
void* (*__realloc_hook
)(void*, size_t) = realloc_impl
;
5560 MOZ_EXPORT
void* (*__memalign_hook
)(size_t, size_t) = memalign_impl
;
5563 #elif defined(RTLD_DEEPBIND)
5564 // XXX On systems that support RTLD_GROUP or DF_1_GROUP, do their
5565 // implementations permit similar inconsistencies? Should STV_SINGLETON
5566 // visibility be used for interposition where available?
5568 "Interposing malloc is unsafe on this system without libc malloc hooks."
5572 MOZ_EXPORT
void* _recalloc(void* aPtr
, size_t aCount
, size_t aSize
) {
5573 size_t oldsize
= aPtr
? AllocInfo::Get(aPtr
).Size() : 0;
5574 CheckedInt
<size_t> checkedSize
= CheckedInt
<size_t>(aCount
) * aSize
;
5576 if (!checkedSize
.isValid()) {
5580 size_t newsize
= checkedSize
.value();
5582 // In order for all trailing bytes to be zeroed, the caller needs to
5583 // use calloc(), followed by recalloc(). However, the current calloc()
5584 // implementation only zeros the bytes requested, so if recalloc() is
5585 // to work 100% correctly, calloc() will need to change to zero
5587 aPtr
= DefaultMalloc::realloc(aPtr
, newsize
);
5588 if (aPtr
&& oldsize
< newsize
) {
5589 memset((void*)((uintptr_t)aPtr
+ oldsize
), 0, newsize
- oldsize
);
5595 // This impl of _expand doesn't ever actually expand or shrink blocks: it
5596 // simply replies that you may continue using a shrunk block.
5597 MOZ_EXPORT
void* _expand(void* aPtr
, size_t newsize
) {
5598 if (AllocInfo::Get(aPtr
).Size() >= newsize
) {
5605 MOZ_EXPORT
size_t _msize(void* aPtr
) {
5606 return DefaultMalloc::malloc_usable_size(aPtr
);
5611 // Compile PHC and mozjemalloc together so that PHC can inline mozjemalloc.