1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/mozalloc.h"
8 #include "mozilla/UniquePtr.h"
9 #include "mozilla/Unused.h"
10 #include "mozilla/Vector.h"
11 #include "mozmemory.h"
13 #include "nsICrashReporter.h"
14 #include "nsServiceManagerUtils.h"
17 #include "gtest/gtest.h"
20 # include "replace_malloc_bridge.h"
23 #if defined(DEBUG) && !defined(XP_WIN) && !defined(ANDROID)
24 # define HAS_GDB_SLEEP_DURATION 1
25 extern unsigned int _gdb_sleep_duration
;
28 // Death tests are too slow on OSX because of the system crash reporter.
30 static void DisableCrashReporter() {
31 nsCOMPtr
<nsICrashReporter
> crashreporter
=
32 do_GetService("@mozilla.org/toolkit/crash-reporter;1");
34 crashreporter
->SetEnabled(false);
38 // Wrap ASSERT_DEATH_IF_SUPPORTED to disable the crash reporter
39 // when entering the subprocess, so that the expected crashes don't
40 // create a minidump that the gtest harness will interpret as an error.
41 # define ASSERT_DEATH_WRAP(a, b) \
42 ASSERT_DEATH_IF_SUPPORTED( \
44 DisableCrashReporter(); \
49 # define ASSERT_DEATH_WRAP(a, b)
52 using namespace mozilla
;
54 class AutoDisablePHCOnCurrentThread
{
56 AutoDisablePHCOnCurrentThread() {
58 ReplaceMalloc::DisablePHCOnCurrentThread();
62 ~AutoDisablePHCOnCurrentThread() {
64 ReplaceMalloc::ReenablePHCOnCurrentThread();
69 static inline void TestOne(size_t size
) {
71 size_t adv
= malloc_good_size(req
);
72 char* p
= (char*)malloc(req
);
73 size_t usable
= moz_malloc_usable_size(p
);
74 // NB: Using EXPECT here so that we still free the memory on failure.
75 EXPECT_EQ(adv
, usable
) << "malloc_good_size(" << req
<< ") --> " << adv
78 << req
<< ") --> " << usable
;
82 static inline void TestThree(size_t size
) {
83 ASSERT_NO_FATAL_FAILURE(TestOne(size
- 1));
84 ASSERT_NO_FATAL_FAILURE(TestOne(size
));
85 ASSERT_NO_FATAL_FAILURE(TestOne(size
+ 1));
88 TEST(Jemalloc
, UsableSizeInAdvance
)
91 * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
92 * various sizes beyond that.
95 for (size_t n
= 0; n
< 16_KiB
; n
++) ASSERT_NO_FATAL_FAILURE(TestOne(n
));
97 for (size_t n
= 16_KiB
; n
< 1_MiB
; n
+= 4_KiB
)
98 ASSERT_NO_FATAL_FAILURE(TestThree(n
));
100 for (size_t n
= 1_MiB
; n
< 8_MiB
; n
+= 128_KiB
)
101 ASSERT_NO_FATAL_FAILURE(TestThree(n
));
104 static int gStaticVar
;
106 bool InfoEq(jemalloc_ptr_info_t
& aInfo
, PtrInfoTag aTag
, void* aAddr
,
107 size_t aSize
, arena_id_t arenaId
) {
108 return aInfo
.tag
== aTag
&& aInfo
.addr
== aAddr
&& aInfo
.size
== aSize
110 && aInfo
.arenaId
== arenaId
115 bool InfoEqFreedPage(jemalloc_ptr_info_t
& aInfo
, void* aAddr
, size_t aPageSize
,
116 arena_id_t arenaId
) {
117 size_t pageSizeMask
= aPageSize
- 1;
119 return jemalloc_ptr_is_freed_page(&aInfo
) &&
120 aInfo
.addr
== (void*)(uintptr_t(aAddr
) & ~pageSizeMask
) &&
121 aInfo
.size
== aPageSize
123 && aInfo
.arenaId
== arenaId
128 TEST(Jemalloc
, PtrInfo
)
130 arena_id_t arenaId
= moz_create_arena();
131 ASSERT_TRUE(arenaId
!= 0);
133 jemalloc_stats_t stats
;
134 jemalloc_stats(&stats
);
136 jemalloc_ptr_info_t info
;
137 Vector
<char*> small
, large
, huge
;
139 // For small (<= 2KiB) allocations, test every position within many possible
141 size_t small_max
= stats
.page_size
/ 2;
142 for (size_t n
= 0; n
<= small_max
; n
+= 8) {
143 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
144 size_t usable
= moz_malloc_size_of(p
);
145 ASSERT_TRUE(small
.append(p
));
146 for (size_t j
= 0; j
< usable
; j
++) {
147 jemalloc_ptr_info(&p
[j
], &info
);
148 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
152 // Similar for large (2KiB + 1 KiB .. 1MiB - 8KiB) allocations.
153 for (size_t n
= small_max
+ 1_KiB
; n
<= stats
.large_max
; n
+= 1_KiB
) {
154 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
155 size_t usable
= moz_malloc_size_of(p
);
156 ASSERT_TRUE(large
.append(p
));
157 for (size_t j
= 0; j
< usable
; j
+= 347) {
158 jemalloc_ptr_info(&p
[j
], &info
);
159 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
163 // Similar for huge (> 1MiB - 8KiB) allocations.
164 for (size_t n
= stats
.chunksize
; n
<= 10_MiB
; n
+= 512_KiB
) {
165 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
166 size_t usable
= moz_malloc_size_of(p
);
167 ASSERT_TRUE(huge
.append(p
));
168 for (size_t j
= 0; j
< usable
; j
+= 567) {
169 jemalloc_ptr_info(&p
[j
], &info
);
170 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
174 // The following loops check freed allocations. We step through the vectors
175 // using prime-sized steps, which gives full coverage of the arrays while
176 // avoiding deallocating in the same order we allocated.
179 // Free the small allocations and recheck them.
180 int isFreedAlloc
= 0, isFreedPage
= 0;
181 len
= small
.length();
182 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 19) % len
) {
184 size_t usable
= moz_malloc_size_of(p
);
186 for (size_t k
= 0; k
< usable
; k
++) {
187 jemalloc_ptr_info(&p
[k
], &info
);
188 // There are two valid outcomes here.
189 if (InfoEq(info
, TagFreedAlloc
, p
, usable
, arenaId
)) {
191 } else if (InfoEqFreedPage(info
, &p
[k
], stats
.page_size
, arenaId
)) {
198 // There should be both FreedAlloc and FreedPage results, but a lot more of
200 ASSERT_TRUE(isFreedAlloc
!= 0);
201 ASSERT_TRUE(isFreedPage
!= 0);
202 ASSERT_TRUE(isFreedAlloc
/ isFreedPage
> 10);
204 // Free the large allocations and recheck them.
205 len
= large
.length();
206 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 31) % len
) {
208 size_t usable
= moz_malloc_size_of(p
);
210 for (size_t k
= 0; k
< usable
; k
+= 357) {
211 jemalloc_ptr_info(&p
[k
], &info
);
212 ASSERT_TRUE(InfoEqFreedPage(info
, &p
[k
], stats
.page_size
, arenaId
));
216 // Free the huge allocations and recheck them.
218 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 7) % len
) {
220 size_t usable
= moz_malloc_size_of(p
);
222 for (size_t k
= 0; k
< usable
; k
+= 587) {
223 jemalloc_ptr_info(&p
[k
], &info
);
224 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
229 jemalloc_ptr_info(nullptr, &info
);
230 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
233 jemalloc_ptr_info((void*)0x123, &info
);
234 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
237 jemalloc_ptr_info((void*)uintptr_t(-1), &info
);
238 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
242 jemalloc_ptr_info(&stackVar
, &info
);
243 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
246 jemalloc_ptr_info((const void*)&jemalloc_ptr_info
, &info
);
247 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
250 jemalloc_ptr_info(&gStaticVar
, &info
);
251 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
254 UniquePtr
<int> p
= MakeUnique
<int>();
255 size_t chunksizeMask
= stats
.chunksize
- 1;
256 char* chunk
= (char*)(uintptr_t(p
.get()) & ~chunksizeMask
);
257 size_t chunkHeaderSize
= stats
.chunksize
- stats
.large_max
- stats
.page_size
;
258 for (size_t i
= 0; i
< chunkHeaderSize
; i
+= 64) {
259 jemalloc_ptr_info(&chunk
[i
], &info
);
260 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
264 size_t page_sizeMask
= stats
.page_size
- 1;
265 char* run
= (char*)(uintptr_t(p
.get()) & ~page_sizeMask
);
266 for (size_t i
= 0; i
< 4 * sizeof(void*); i
++) {
267 jemalloc_ptr_info(&run
[i
], &info
);
268 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
271 // Entire chunk. It's impossible to check what is put into |info| for all of
272 // these addresses; this is more about checking that we don't crash.
273 for (size_t i
= 0; i
< stats
.chunksize
; i
+= 256) {
274 jemalloc_ptr_info(&chunk
[i
], &info
);
277 moz_dispose_arena(arenaId
);
280 size_t sSizes
[] = {1, 42, 79, 918, 1.5_KiB
,
281 73_KiB
, 129_KiB
, 1.1_MiB
, 2.6_MiB
, 5.1_MiB
};
283 TEST(Jemalloc
, Arenas
)
285 arena_id_t arena
= moz_create_arena();
286 ASSERT_TRUE(arena
!= 0);
287 void* ptr
= moz_arena_malloc(arena
, 42);
288 ASSERT_TRUE(ptr
!= nullptr);
289 ptr
= moz_arena_realloc(arena
, ptr
, 64);
290 ASSERT_TRUE(ptr
!= nullptr);
291 moz_arena_free(arena
, ptr
);
292 ptr
= moz_arena_calloc(arena
, 24, 2);
293 // For convenience, free can be used to free arena pointers.
295 moz_dispose_arena(arena
);
297 #ifdef HAS_GDB_SLEEP_DURATION
298 // Avoid death tests adding some unnecessary (long) delays.
299 unsigned int old_gdb_sleep_duration
= _gdb_sleep_duration
;
300 _gdb_sleep_duration
= 0;
303 // Can't use an arena after it's disposed.
304 // ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), "");
306 // Arena id 0 can't be used to somehow get to the main arena.
307 ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), "");
309 arena
= moz_create_arena();
310 arena_id_t arena2
= moz_create_arena();
311 // Ensure arena2 is used to prevent OSX errors:
314 // For convenience, realloc can also be used to reallocate arena pointers.
315 // The result should be in the same arena. Test various size class
317 for (size_t from_size
: sSizes
) {
318 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
319 for (size_t to_size
: sSizes
) {
320 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
321 ptr
= moz_arena_malloc(arena
, from_size
);
322 ptr
= realloc(ptr
, to_size
);
323 // Freeing with the wrong arena should crash.
324 ASSERT_DEATH_WRAP(moz_arena_free(arena2
, ptr
), "");
325 // Likewise for moz_arena_realloc.
326 ASSERT_DEATH_WRAP(moz_arena_realloc(arena2
, ptr
, from_size
), "");
327 // The following will crash if it's not in the right arena.
328 moz_arena_free(arena
, ptr
);
332 moz_dispose_arena(arena2
);
333 moz_dispose_arena(arena
);
335 #ifdef HAS_GDB_SLEEP_DURATION
336 _gdb_sleep_duration
= old_gdb_sleep_duration
;
340 // Check that a buffer aPtr is entirely filled with a given character from
341 // aOffset to aSize. For faster comparison, the caller is required to fill a
342 // reference buffer with the wanted character, and give the size of that
344 static void bulk_compare(char* aPtr
, size_t aOffset
, size_t aSize
,
345 char* aReference
, size_t aReferenceSize
) {
346 for (size_t i
= aOffset
; i
< aSize
; i
+= aReferenceSize
) {
347 size_t length
= std::min(aSize
- i
, aReferenceSize
);
348 if (memcmp(aPtr
+ i
, aReference
, length
)) {
349 // We got a mismatch, we now want to report more precisely where.
350 for (size_t j
= i
; j
< i
+ length
; j
++) {
351 ASSERT_EQ(aPtr
[j
], *aReference
);
357 // A range iterator for size classes between two given values.
358 class SizeClassesBetween
{
360 SizeClassesBetween(size_t aStart
, size_t aEnd
) : mStart(aStart
), mEnd(aEnd
) {}
364 explicit Iterator(size_t aValue
) : mValue(malloc_good_size(aValue
)) {}
366 operator size_t() const { return mValue
; }
367 size_t operator*() const { return mValue
; }
368 Iterator
& operator++() {
369 mValue
= malloc_good_size(mValue
+ 1);
377 Iterator
begin() { return Iterator(mStart
); }
378 Iterator
end() { return Iterator(mEnd
); }
384 #define ALIGNMENT_CEILING(s, alignment) \
385 (((s) + (alignment - 1)) & (~(alignment - 1)))
387 static bool IsSameRoundedHugeClass(size_t aSize1
, size_t aSize2
,
388 jemalloc_stats_t
& aStats
) {
389 return (aSize1
> aStats
.large_max
&& aSize2
> aStats
.large_max
&&
390 ALIGNMENT_CEILING(aSize1
+ aStats
.page_size
, aStats
.chunksize
) ==
391 ALIGNMENT_CEILING(aSize2
+ aStats
.page_size
, aStats
.chunksize
));
394 static bool CanReallocInPlace(size_t aFromSize
, size_t aToSize
,
395 jemalloc_stats_t
& aStats
) {
396 // PHC allocations must be disabled because PHC reallocs differently to
399 MOZ_RELEASE_ASSERT(!ReplaceMalloc::IsPHCEnabledOnCurrentThread());
402 if (aFromSize
== malloc_good_size(aToSize
)) {
403 // Same size class: in-place.
406 if (aFromSize
>= aStats
.page_size
&& aFromSize
<= aStats
.large_max
&&
407 aToSize
>= aStats
.page_size
&& aToSize
<= aStats
.large_max
) {
408 // Any large class to any large class: in-place when there is space to.
411 if (IsSameRoundedHugeClass(aFromSize
, aToSize
, aStats
)) {
412 // Huge sizes that round up to the same multiple of the chunk size:
419 TEST(Jemalloc
, InPlace
)
421 // Disable PHC allocations for this test, because CanReallocInPlace() isn't
422 // valid for PHC allocations.
423 AutoDisablePHCOnCurrentThread disable
;
425 jemalloc_stats_t stats
;
426 jemalloc_stats(&stats
);
428 // Using a separate arena, which is always emptied after an iteration, ensures
429 // that in-place reallocation happens in all cases it can happen. This test is
430 // intended for developers to notice they may have to adapt other tests if
431 // they change the conditions for in-place reallocation.
432 arena_id_t arena
= moz_create_arena();
434 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
435 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
436 for (size_t to_size
: sSizes
) {
437 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
438 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
439 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
440 if (CanReallocInPlace(from_size
, to_size
, stats
)) {
441 EXPECT_EQ(ptr
, ptr2
);
443 EXPECT_NE(ptr
, ptr2
);
445 moz_arena_free(arena
, ptr2
);
449 moz_dispose_arena(arena
);
452 // Bug 1474254: disable this test for windows ccov builds because it leads to
454 #if !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
455 TEST(Jemalloc
, JunkPoison
)
457 // Disable PHC allocations for this test, because CanReallocInPlace() isn't
458 // valid for PHC allocations, and the testing UAFs aren't valid.
459 AutoDisablePHCOnCurrentThread disable
;
461 jemalloc_stats_t stats
;
462 jemalloc_stats(&stats
);
464 # ifdef HAS_GDB_SLEEP_DURATION
465 // Avoid death tests adding some unnecessary (long) delays.
466 unsigned int old_gdb_sleep_duration
= _gdb_sleep_duration
;
467 _gdb_sleep_duration
= 0;
470 // Create buffers in a separate arena, for faster comparisons with
472 arena_id_t buf_arena
= moz_create_arena();
473 char* junk_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
474 // Depending on its configuration, the allocator will either fill the
475 // requested allocation with the junk byte (0xe4) or with zeroes, or do
476 // nothing, in which case, since we're allocating in a fresh arena,
477 // we'll be getting zeroes.
478 char junk
= stats
.opt_junk
? '\xe4' : '\0';
479 for (size_t i
= 0; i
< stats
.page_size
; i
++) {
480 ASSERT_EQ(junk_buf
[i
], junk
);
483 char* poison_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
484 memset(poison_buf
, 0xe5, stats
.page_size
);
486 static const char fill
= 0x42;
487 char* fill_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
488 memset(fill_buf
, fill
, stats
.page_size
);
490 arena_params_t params
;
491 // Allow as many dirty pages in the arena as possible, so that purge never
492 // happens in it. Purge breaks some of the tests below randomly depending on
493 // what other things happen on other threads.
494 params
.mMaxDirty
= size_t(-1);
495 arena_id_t arena
= moz_create_arena_with_params(¶ms
);
497 // Allocating should junk the buffer, and freeing should poison the buffer.
498 for (size_t size
: sSizes
) {
499 if (size
<= stats
.large_max
) {
500 SCOPED_TRACE(testing::Message() << "size = " << size
);
501 char* buf
= (char*)moz_arena_malloc(arena
, size
);
502 size_t allocated
= moz_malloc_usable_size(buf
);
503 if (stats
.opt_junk
|| stats
.opt_zero
) {
504 ASSERT_NO_FATAL_FAILURE(
505 bulk_compare(buf
, 0, allocated
, junk_buf
, stats
.page_size
));
507 moz_arena_free(arena
, buf
);
508 // We purposefully do a use-after-free here, to check that the data was
510 ASSERT_NO_FATAL_FAILURE(
511 bulk_compare(buf
, 0, allocated
, poison_buf
, stats
.page_size
));
515 // Shrinking in the same size class should be in place and poison between the
516 // new allocation size and the old one.
518 for (size_t size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
519 SCOPED_TRACE(testing::Message() << "size = " << size
);
520 SCOPED_TRACE(testing::Message() << "prev = " << prev
);
521 char* ptr
= (char*)moz_arena_malloc(arena
, size
);
522 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
523 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, prev
+ 1);
524 ASSERT_EQ(ptr
, ptr2
);
525 ASSERT_NO_FATAL_FAILURE(
526 bulk_compare(ptr
, 0, prev
+ 1, fill_buf
, stats
.page_size
));
527 ASSERT_NO_FATAL_FAILURE(
528 bulk_compare(ptr
, prev
+ 1, size
, poison_buf
, stats
.page_size
));
529 moz_arena_free(arena
, ptr
);
533 // In-place realloc should junk the new bytes when growing and poison the old
534 // bytes when shrinking.
535 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
536 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
537 for (size_t to_size
: sSizes
) {
538 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
539 if (CanReallocInPlace(from_size
, to_size
, stats
)) {
540 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
541 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
542 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
543 ASSERT_EQ(ptr
, ptr2
);
544 // Shrinking allocation
545 if (from_size
>= to_size
) {
546 ASSERT_NO_FATAL_FAILURE(
547 bulk_compare(ptr
, 0, to_size
, fill_buf
, stats
.page_size
));
548 // Huge allocations have guards and will crash when accessing
549 // beyond the valid range.
550 if (to_size
> stats
.large_max
) {
551 size_t page_limit
= ALIGNMENT_CEILING(to_size
, stats
.page_size
);
552 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, to_size
, page_limit
,
553 poison_buf
, stats
.page_size
));
554 ASSERT_DEATH_WRAP(ptr
[page_limit
] = 0, "");
556 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, to_size
, from_size
,
557 poison_buf
, stats
.page_size
));
560 // Enlarging allocation
561 ASSERT_NO_FATAL_FAILURE(
562 bulk_compare(ptr
, 0, from_size
, fill_buf
, stats
.page_size
));
563 if (stats
.opt_junk
|| stats
.opt_zero
) {
564 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, from_size
, to_size
,
565 junk_buf
, stats
.page_size
));
567 // Huge allocation, so should have a guard page following
568 if (to_size
> stats
.large_max
) {
570 ptr
[ALIGNMENT_CEILING(to_size
, stats
.page_size
)] = 0, "");
573 moz_arena_free(arena
, ptr2
);
578 // Growing to a different size class should poison the old allocation,
579 // preserve the original bytes, and junk the new bytes in the new allocation.
580 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
581 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
582 for (size_t to_size
: sSizes
) {
583 if (from_size
< to_size
&& malloc_good_size(to_size
) != from_size
&&
584 !IsSameRoundedHugeClass(from_size
, to_size
, stats
)) {
585 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
586 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
587 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
588 // Avoid in-place realloc by allocating a buffer, expecting it to be
589 // right after the buffer we just received. Buffers smaller than the
590 // page size and exactly or larger than the size of the largest large
591 // size class can't be reallocated in-place.
592 char* avoid_inplace
= nullptr;
593 if (from_size
>= stats
.page_size
&& from_size
< stats
.large_max
) {
594 avoid_inplace
= (char*)moz_arena_malloc(arena
, stats
.page_size
);
595 ASSERT_EQ(ptr
+ from_size
, avoid_inplace
);
597 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
598 ASSERT_NE(ptr
, ptr2
);
599 if (from_size
<= stats
.large_max
) {
600 ASSERT_NO_FATAL_FAILURE(
601 bulk_compare(ptr
, 0, from_size
, poison_buf
, stats
.page_size
));
603 ASSERT_NO_FATAL_FAILURE(
604 bulk_compare(ptr2
, 0, from_size
, fill_buf
, stats
.page_size
));
605 if (stats
.opt_junk
|| stats
.opt_zero
) {
606 size_t rounded_to_size
= malloc_good_size(to_size
);
607 ASSERT_NE(to_size
, rounded_to_size
);
608 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2
, from_size
, rounded_to_size
,
609 junk_buf
, stats
.page_size
));
611 moz_arena_free(arena
, ptr2
);
612 moz_arena_free(arena
, avoid_inplace
);
617 // Shrinking to a different size class should poison the old allocation,
618 // preserve the original bytes, and junk the extra bytes in the new
620 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
621 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
622 for (size_t to_size
: sSizes
) {
623 if (from_size
> to_size
&&
624 !CanReallocInPlace(from_size
, to_size
, stats
)) {
625 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
626 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
627 memset(ptr
, fill
, from_size
);
628 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
629 ASSERT_NE(ptr
, ptr2
);
630 if (from_size
<= stats
.large_max
) {
631 ASSERT_NO_FATAL_FAILURE(
632 bulk_compare(ptr
, 0, from_size
, poison_buf
, stats
.page_size
));
634 ASSERT_NO_FATAL_FAILURE(
635 bulk_compare(ptr2
, 0, to_size
, fill_buf
, stats
.page_size
));
636 if (stats
.opt_junk
|| stats
.opt_zero
) {
637 size_t rounded_to_size
= malloc_good_size(to_size
);
638 ASSERT_NE(to_size
, rounded_to_size
);
639 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2
, from_size
, rounded_to_size
,
640 junk_buf
, stats
.page_size
));
642 moz_arena_free(arena
, ptr2
);
647 moz_dispose_arena(arena
);
649 moz_arena_free(buf_arena
, poison_buf
);
650 moz_arena_free(buf_arena
, junk_buf
);
651 moz_arena_free(buf_arena
, fill_buf
);
652 moz_dispose_arena(buf_arena
);
654 # ifdef HAS_GDB_SLEEP_DURATION
655 _gdb_sleep_duration
= old_gdb_sleep_duration
;
658 #endif // !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
660 TEST(Jemalloc
, GuardRegion
)
662 // Disable PHC allocations for this test, because even a single PHC
663 // allocation occurring can throw it off.
664 AutoDisablePHCOnCurrentThread disable
;
666 jemalloc_stats_t stats
;
667 jemalloc_stats(&stats
);
669 #ifdef HAS_GDB_SLEEP_DURATION
670 // Avoid death tests adding some unnecessary (long) delays.
671 unsigned int old_gdb_sleep_duration
= _gdb_sleep_duration
;
672 _gdb_sleep_duration
= 0;
675 arena_id_t arena
= moz_create_arena();
676 ASSERT_TRUE(arena
!= 0);
678 // Do enough large allocations to fill a chunk, and then one additional one,
679 // and check that the guard page is still present after the one-but-last
680 // allocation, i.e. that we didn't allocate the guard.
681 Vector
<void*> ptr_list
;
682 for (size_t cnt
= 0; cnt
< stats
.large_max
/ stats
.page_size
; cnt
++) {
683 void* ptr
= moz_arena_malloc(arena
, stats
.page_size
);
684 ASSERT_TRUE(ptr
!= nullptr);
685 ASSERT_TRUE(ptr_list
.append(ptr
));
688 void* last_ptr_in_chunk
= ptr_list
[ptr_list
.length() - 1];
689 void* extra_ptr
= moz_arena_malloc(arena
, stats
.page_size
);
690 void* guard_page
= (void*)ALIGNMENT_CEILING(
691 (uintptr_t)last_ptr_in_chunk
+ stats
.page_size
, stats
.page_size
);
692 jemalloc_ptr_info_t info
;
693 jemalloc_ptr_info(guard_page
, &info
);
694 ASSERT_TRUE(jemalloc_ptr_is_freed_page(&info
));
695 ASSERT_TRUE(info
.tag
== TagFreedPage
);
697 ASSERT_DEATH_WRAP(*(char*)guard_page
= 0, "");
699 for (void* ptr
: ptr_list
) {
700 moz_arena_free(arena
, ptr
);
702 moz_arena_free(arena
, extra_ptr
);
704 moz_dispose_arena(arena
);
706 #ifdef HAS_GDB_SLEEP_DURATION
707 _gdb_sleep_duration
= old_gdb_sleep_duration
;
711 TEST(Jemalloc
, DisposeArena
)
713 jemalloc_stats_t stats
;
714 jemalloc_stats(&stats
);
716 #ifdef HAS_GDB_SLEEP_DURATION
717 // Avoid death tests adding some unnecessary (long) delays.
718 unsigned int old_gdb_sleep_duration
= _gdb_sleep_duration
;
719 _gdb_sleep_duration
= 0;
722 arena_id_t arena
= moz_create_arena();
723 void* ptr
= moz_arena_malloc(arena
, 42);
724 // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
725 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
726 moz_arena_free(arena
, ptr
);
727 moz_dispose_arena(arena
);
729 arena
= moz_create_arena();
730 ptr
= moz_arena_malloc(arena
, stats
.page_size
* 2);
731 // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
732 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
733 moz_arena_free(arena
, ptr
);
734 moz_dispose_arena(arena
);
736 arena
= moz_create_arena();
737 ptr
= moz_arena_malloc(arena
, stats
.chunksize
* 2);
739 // On debug builds, we do the expensive check that arenas are empty.
740 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
741 moz_arena_free(arena
, ptr
);
742 moz_dispose_arena(arena
);
744 // Currently, the allocator can't trivially check whether the arena is empty
745 // of huge allocations, so disposing of it works.
746 moz_dispose_arena(arena
);
747 // But trying to free a pointer that belongs to it will MOZ_CRASH.
748 ASSERT_DEATH_WRAP(free(ptr
), "");
749 // Likewise for realloc
750 ASSERT_DEATH_WRAP(ptr
= realloc(ptr
, stats
.chunksize
* 3), "");
753 // Using the arena after it's been disposed of is MOZ_CRASH-worthy.
754 ASSERT_DEATH_WRAP(moz_arena_malloc(arena
, 42), "");
756 #ifdef HAS_GDB_SLEEP_DURATION
757 _gdb_sleep_duration
= old_gdb_sleep_duration
;