1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/mozalloc.h"
8 #include "mozilla/UniquePtr.h"
9 #include "mozilla/Unused.h"
10 #include "mozilla/Vector.h"
11 #include "mozilla/gtest/MozHelpers.h"
12 #include "mozmemory.h"
16 #include "gtest/gtest.h"
19 # include "replace_malloc_bridge.h"
22 using namespace mozilla
;
24 class AutoDisablePHCOnCurrentThread
{
26 AutoDisablePHCOnCurrentThread() {
28 ReplaceMalloc::DisablePHCOnCurrentThread();
32 ~AutoDisablePHCOnCurrentThread() {
34 ReplaceMalloc::ReenablePHCOnCurrentThread();
39 static inline void TestOne(size_t size
) {
41 size_t adv
= malloc_good_size(req
);
42 char* p
= (char*)malloc(req
);
43 size_t usable
= moz_malloc_usable_size(p
);
44 // NB: Using EXPECT here so that we still free the memory on failure.
45 EXPECT_EQ(adv
, usable
) << "malloc_good_size(" << req
<< ") --> " << adv
48 << req
<< ") --> " << usable
;
52 static inline void TestThree(size_t size
) {
53 ASSERT_NO_FATAL_FAILURE(TestOne(size
- 1));
54 ASSERT_NO_FATAL_FAILURE(TestOne(size
));
55 ASSERT_NO_FATAL_FAILURE(TestOne(size
+ 1));
58 TEST(Jemalloc
, UsableSizeInAdvance
)
61 * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
62 * various sizes beyond that.
65 for (size_t n
= 0; n
< 16_KiB
; n
++) ASSERT_NO_FATAL_FAILURE(TestOne(n
));
67 for (size_t n
= 16_KiB
; n
< 1_MiB
; n
+= 4_KiB
)
68 ASSERT_NO_FATAL_FAILURE(TestThree(n
));
70 for (size_t n
= 1_MiB
; n
< 8_MiB
; n
+= 128_KiB
)
71 ASSERT_NO_FATAL_FAILURE(TestThree(n
));
74 static int gStaticVar
;
76 bool InfoEq(jemalloc_ptr_info_t
& aInfo
, PtrInfoTag aTag
, void* aAddr
,
77 size_t aSize
, arena_id_t arenaId
) {
78 return aInfo
.tag
== aTag
&& aInfo
.addr
== aAddr
&& aInfo
.size
== aSize
80 && aInfo
.arenaId
== arenaId
85 bool InfoEqFreedPage(jemalloc_ptr_info_t
& aInfo
, void* aAddr
, size_t aPageSize
,
87 size_t pageSizeMask
= aPageSize
- 1;
89 return jemalloc_ptr_is_freed_page(&aInfo
) &&
90 aInfo
.addr
== (void*)(uintptr_t(aAddr
) & ~pageSizeMask
) &&
91 aInfo
.size
== aPageSize
93 && aInfo
.arenaId
== arenaId
98 TEST(Jemalloc
, PtrInfo
)
100 arena_id_t arenaId
= moz_create_arena();
101 ASSERT_TRUE(arenaId
!= 0);
103 jemalloc_stats_t stats
;
104 jemalloc_stats(&stats
);
106 jemalloc_ptr_info_t info
;
107 Vector
<char*> small
, large
, huge
;
109 // For small (less than half the page size) allocations, test every position
110 // within many possible sizes.
112 stats
.subpage_max
? stats
.subpage_max
: stats
.quantum_wide_max
;
113 for (size_t n
= 0; n
<= small_max
; n
+= 8) {
114 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
115 size_t usable
= moz_malloc_size_of(p
);
116 ASSERT_TRUE(small
.append(p
));
117 for (size_t j
= 0; j
< usable
; j
++) {
118 jemalloc_ptr_info(&p
[j
], &info
);
119 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
123 // Similar for large (small_max + 1 KiB .. 1MiB - 8KiB) allocations.
124 for (size_t n
= small_max
+ 1_KiB
; n
<= stats
.large_max
; n
+= 1_KiB
) {
125 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
126 size_t usable
= moz_malloc_size_of(p
);
127 ASSERT_TRUE(large
.append(p
));
128 for (size_t j
= 0; j
< usable
; j
+= 347) {
129 jemalloc_ptr_info(&p
[j
], &info
);
130 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
134 // Similar for huge (> 1MiB - 8KiB) allocations.
135 for (size_t n
= stats
.chunksize
; n
<= 10_MiB
; n
+= 512_KiB
) {
136 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
137 size_t usable
= moz_malloc_size_of(p
);
138 ASSERT_TRUE(huge
.append(p
));
139 for (size_t j
= 0; j
< usable
; j
+= 567) {
140 jemalloc_ptr_info(&p
[j
], &info
);
141 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
145 // The following loops check freed allocations. We step through the vectors
146 // using prime-sized steps, which gives full coverage of the arrays while
147 // avoiding deallocating in the same order we allocated.
150 // Free the small allocations and recheck them.
151 int isFreedAlloc
= 0, isFreedPage
= 0;
152 len
= small
.length();
153 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 19) % len
) {
155 size_t usable
= moz_malloc_size_of(p
);
157 for (size_t k
= 0; k
< usable
; k
++) {
158 jemalloc_ptr_info(&p
[k
], &info
);
159 // There are two valid outcomes here.
160 if (InfoEq(info
, TagFreedAlloc
, p
, usable
, arenaId
)) {
162 } else if (InfoEqFreedPage(info
, &p
[k
], stats
.page_size
, arenaId
)) {
169 // There should be both FreedAlloc and FreedPage results, but a lot more of
171 ASSERT_TRUE(isFreedAlloc
!= 0);
172 ASSERT_TRUE(isFreedPage
!= 0);
173 ASSERT_TRUE(isFreedAlloc
/ isFreedPage
> 8);
175 // Free the large allocations and recheck them.
176 len
= large
.length();
177 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 31) % len
) {
179 size_t usable
= moz_malloc_size_of(p
);
181 for (size_t k
= 0; k
< usable
; k
+= 357) {
182 jemalloc_ptr_info(&p
[k
], &info
);
183 ASSERT_TRUE(InfoEqFreedPage(info
, &p
[k
], stats
.page_size
, arenaId
));
187 // Free the huge allocations and recheck them.
189 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 7) % len
) {
191 size_t usable
= moz_malloc_size_of(p
);
193 for (size_t k
= 0; k
< usable
; k
+= 587) {
194 jemalloc_ptr_info(&p
[k
], &info
);
195 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
200 jemalloc_ptr_info(nullptr, &info
);
201 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
204 jemalloc_ptr_info((void*)0x123, &info
);
205 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
208 jemalloc_ptr_info((void*)uintptr_t(-1), &info
);
209 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
213 jemalloc_ptr_info(&stackVar
, &info
);
214 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
217 jemalloc_ptr_info((const void*)&jemalloc_ptr_info
, &info
);
218 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
221 jemalloc_ptr_info(&gStaticVar
, &info
);
222 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
225 UniquePtr
<int> p
= MakeUnique
<int>();
226 size_t chunksizeMask
= stats
.chunksize
- 1;
227 char* chunk
= (char*)(uintptr_t(p
.get()) & ~chunksizeMask
);
228 size_t chunkHeaderSize
= stats
.chunksize
- stats
.large_max
- stats
.page_size
;
229 for (size_t i
= 0; i
< chunkHeaderSize
; i
+= 64) {
230 jemalloc_ptr_info(&chunk
[i
], &info
);
231 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
235 size_t page_sizeMask
= stats
.page_size
- 1;
236 char* run
= (char*)(uintptr_t(p
.get()) & ~page_sizeMask
);
237 for (size_t i
= 0; i
< 4 * sizeof(void*); i
++) {
238 jemalloc_ptr_info(&run
[i
], &info
);
239 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
242 // Entire chunk. It's impossible to check what is put into |info| for all of
243 // these addresses; this is more about checking that we don't crash.
244 for (size_t i
= 0; i
< stats
.chunksize
; i
+= 256) {
245 jemalloc_ptr_info(&chunk
[i
], &info
);
248 moz_dispose_arena(arenaId
);
251 size_t sSizes
[] = {1, 42, 79, 918, 1.4_KiB
,
252 73_KiB
, 129_KiB
, 1.1_MiB
, 2.6_MiB
, 5.1_MiB
};
254 TEST(Jemalloc
, Arenas
)
256 arena_id_t arena
= moz_create_arena();
257 ASSERT_TRUE(arena
!= 0);
258 void* ptr
= moz_arena_malloc(arena
, 42);
259 ASSERT_TRUE(ptr
!= nullptr);
260 ptr
= moz_arena_realloc(arena
, ptr
, 64);
261 ASSERT_TRUE(ptr
!= nullptr);
262 moz_arena_free(arena
, ptr
);
263 ptr
= moz_arena_calloc(arena
, 24, 2);
264 // For convenience, free can be used to free arena pointers.
266 moz_dispose_arena(arena
);
268 // Avoid death tests adding some unnecessary (long) delays.
269 SAVE_GDB_SLEEP_LOCAL();
271 // Can't use an arena after it's disposed.
272 // ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), "");
274 // Arena id 0 can't be used to somehow get to the main arena.
275 ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), "");
277 arena
= moz_create_arena();
278 arena_id_t arena2
= moz_create_arena();
279 // Ensure arena2 is used to prevent OSX errors:
282 // For convenience, realloc can also be used to reallocate arena pointers.
283 // The result should be in the same arena. Test various size class
285 for (size_t from_size
: sSizes
) {
286 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
287 for (size_t to_size
: sSizes
) {
288 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
289 ptr
= moz_arena_malloc(arena
, from_size
);
290 ptr
= realloc(ptr
, to_size
);
291 // Freeing with the wrong arena should crash.
292 ASSERT_DEATH_WRAP(moz_arena_free(arena2
, ptr
), "");
293 // Likewise for moz_arena_realloc.
294 ASSERT_DEATH_WRAP(moz_arena_realloc(arena2
, ptr
, from_size
), "");
295 // The following will crash if it's not in the right arena.
296 moz_arena_free(arena
, ptr
);
300 moz_dispose_arena(arena2
);
301 moz_dispose_arena(arena
);
303 RESTORE_GDB_SLEEP_LOCAL();
306 // Check that a buffer aPtr is entirely filled with a given character from
307 // aOffset to aSize. For faster comparison, the caller is required to fill a
308 // reference buffer with the wanted character, and give the size of that
310 static void bulk_compare(char* aPtr
, size_t aOffset
, size_t aSize
,
311 char* aReference
, size_t aReferenceSize
) {
312 for (size_t i
= aOffset
; i
< aSize
; i
+= aReferenceSize
) {
313 size_t length
= std::min(aSize
- i
, aReferenceSize
);
314 if (memcmp(aPtr
+ i
, aReference
, length
)) {
315 // We got a mismatch, we now want to report more precisely where.
316 for (size_t j
= i
; j
< i
+ length
; j
++) {
317 ASSERT_EQ(aPtr
[j
], *aReference
);
323 // A range iterator for size classes between two given values.
324 class SizeClassesBetween
{
326 SizeClassesBetween(size_t aStart
, size_t aEnd
) : mStart(aStart
), mEnd(aEnd
) {}
330 explicit Iterator(size_t aValue
) : mValue(malloc_good_size(aValue
)) {}
332 operator size_t() const { return mValue
; }
333 size_t operator*() const { return mValue
; }
334 Iterator
& operator++() {
335 mValue
= malloc_good_size(mValue
+ 1);
343 Iterator
begin() { return Iterator(mStart
); }
344 Iterator
end() { return Iterator(mEnd
); }
350 #define ALIGNMENT_CEILING(s, alignment) \
351 (((s) + ((alignment)-1)) & (~((alignment)-1)))
353 #define ALIGNMENT_FLOOR(s, alignment) ((s) & (~((alignment)-1)))
355 static bool IsSameRoundedHugeClass(size_t aSize1
, size_t aSize2
,
356 jemalloc_stats_t
& aStats
) {
357 return (aSize1
> aStats
.large_max
&& aSize2
> aStats
.large_max
&&
358 ALIGNMENT_CEILING(aSize1
+ aStats
.page_size
, aStats
.chunksize
) ==
359 ALIGNMENT_CEILING(aSize2
+ aStats
.page_size
, aStats
.chunksize
));
362 static bool CanReallocInPlace(size_t aFromSize
, size_t aToSize
,
363 jemalloc_stats_t
& aStats
) {
364 // PHC allocations must be disabled because PHC reallocs differently to
367 MOZ_RELEASE_ASSERT(!ReplaceMalloc::IsPHCEnabledOnCurrentThread());
370 if (aFromSize
== malloc_good_size(aToSize
)) {
371 // Same size class: in-place.
374 if (aFromSize
>= aStats
.page_size
&& aFromSize
<= aStats
.large_max
&&
375 aToSize
>= aStats
.page_size
&& aToSize
<= aStats
.large_max
) {
376 // Any large class to any large class: in-place when there is space to.
379 if (IsSameRoundedHugeClass(aFromSize
, aToSize
, aStats
)) {
380 // Huge sizes that round up to the same multiple of the chunk size:
387 TEST(Jemalloc
, InPlace
)
389 // Disable PHC allocations for this test, because CanReallocInPlace() isn't
390 // valid for PHC allocations.
391 AutoDisablePHCOnCurrentThread disable
;
393 jemalloc_stats_t stats
;
394 jemalloc_stats(&stats
);
396 // Using a separate arena, which is always emptied after an iteration, ensures
397 // that in-place reallocation happens in all cases it can happen. This test is
398 // intended for developers to notice they may have to adapt other tests if
399 // they change the conditions for in-place reallocation.
400 arena_id_t arena
= moz_create_arena();
402 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
403 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
404 for (size_t to_size
: sSizes
) {
405 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
406 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
407 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
408 if (CanReallocInPlace(from_size
, to_size
, stats
)) {
409 EXPECT_EQ(ptr
, ptr2
);
411 EXPECT_NE(ptr
, ptr2
);
413 moz_arena_free(arena
, ptr2
);
417 moz_dispose_arena(arena
);
420 // Bug 1474254: disable this test for windows ccov builds because it leads to
422 #if !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
423 TEST(Jemalloc
, JunkPoison
)
425 // Disable PHC allocations for this test, because CanReallocInPlace() isn't
426 // valid for PHC allocations, and the testing UAFs aren't valid.
427 AutoDisablePHCOnCurrentThread disable
;
429 jemalloc_stats_t stats
;
430 jemalloc_stats(&stats
);
432 // Avoid death tests adding some unnecessary (long) delays.
433 SAVE_GDB_SLEEP_LOCAL();
435 // Create buffers in a separate arena, for faster comparisons with
437 arena_id_t buf_arena
= moz_create_arena();
438 char* junk_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
439 // Depending on its configuration, the allocator will either fill the
440 // requested allocation with the junk byte (0xe4) or with zeroes, or do
441 // nothing, in which case, since we're allocating in a fresh arena,
442 // we'll be getting zeroes.
443 char junk
= stats
.opt_junk
? '\xe4' : '\0';
444 for (size_t i
= 0; i
< stats
.page_size
; i
++) {
445 ASSERT_EQ(junk_buf
[i
], junk
);
448 char* poison_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
449 memset(poison_buf
, 0xe5, stats
.page_size
);
451 static const char fill
= 0x42;
452 char* fill_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
453 memset(fill_buf
, fill
, stats
.page_size
);
455 arena_params_t params
;
456 // Allow as many dirty pages in the arena as possible, so that purge never
457 // happens in it. Purge breaks some of the tests below randomly depending on
458 // what other things happen on other threads.
459 params
.mMaxDirty
= size_t(-1);
460 arena_id_t arena
= moz_create_arena_with_params(¶ms
);
462 // Allocating should junk the buffer, and freeing should poison the buffer.
463 for (size_t size
: sSizes
) {
464 if (size
<= stats
.large_max
) {
465 SCOPED_TRACE(testing::Message() << "size = " << size
);
466 char* buf
= (char*)moz_arena_malloc(arena
, size
);
467 size_t allocated
= moz_malloc_usable_size(buf
);
468 if (stats
.opt_junk
|| stats
.opt_zero
) {
469 ASSERT_NO_FATAL_FAILURE(
470 bulk_compare(buf
, 0, allocated
, junk_buf
, stats
.page_size
));
472 moz_arena_free(arena
, buf
);
473 // We purposefully do a use-after-free here, to check that the data was
475 ASSERT_NO_FATAL_FAILURE(
476 bulk_compare(buf
, 0, allocated
, poison_buf
, stats
.page_size
));
480 // Shrinking in the same size class should be in place and poison between the
481 // new allocation size and the old one.
483 for (size_t size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
484 SCOPED_TRACE(testing::Message() << "size = " << size
);
485 SCOPED_TRACE(testing::Message() << "prev = " << prev
);
486 char* ptr
= (char*)moz_arena_malloc(arena
, size
);
487 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
488 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, prev
+ 1);
489 ASSERT_EQ(ptr
, ptr2
);
490 ASSERT_NO_FATAL_FAILURE(
491 bulk_compare(ptr
, 0, prev
+ 1, fill_buf
, stats
.page_size
));
492 ASSERT_NO_FATAL_FAILURE(
493 bulk_compare(ptr
, prev
+ 1, size
, poison_buf
, stats
.page_size
));
494 moz_arena_free(arena
, ptr
);
498 // In-place realloc should junk the new bytes when growing and poison the old
499 // bytes when shrinking.
500 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
501 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
502 for (size_t to_size
: sSizes
) {
503 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
504 if (CanReallocInPlace(from_size
, to_size
, stats
)) {
505 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
506 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
507 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
508 ASSERT_EQ(ptr
, ptr2
);
509 // Shrinking allocation
510 if (from_size
>= to_size
) {
511 ASSERT_NO_FATAL_FAILURE(
512 bulk_compare(ptr
, 0, to_size
, fill_buf
, stats
.page_size
));
513 // Huge allocations have guards and will crash when accessing
514 // beyond the valid range.
515 if (to_size
> stats
.large_max
) {
516 size_t page_limit
= ALIGNMENT_CEILING(to_size
, stats
.page_size
);
517 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, to_size
, page_limit
,
518 poison_buf
, stats
.page_size
));
519 ASSERT_DEATH_WRAP(ptr
[page_limit
] = 0, "");
521 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, to_size
, from_size
,
522 poison_buf
, stats
.page_size
));
525 // Enlarging allocation
526 ASSERT_NO_FATAL_FAILURE(
527 bulk_compare(ptr
, 0, from_size
, fill_buf
, stats
.page_size
));
528 if (stats
.opt_junk
|| stats
.opt_zero
) {
529 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, from_size
, to_size
,
530 junk_buf
, stats
.page_size
));
532 // Huge allocation, so should have a guard page following
533 if (to_size
> stats
.large_max
) {
535 ptr
[ALIGNMENT_CEILING(to_size
, stats
.page_size
)] = 0, "");
538 moz_arena_free(arena
, ptr2
);
543 // Growing to a different size class should poison the old allocation,
544 // preserve the original bytes, and junk the new bytes in the new allocation.
545 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
546 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
547 for (size_t to_size
: sSizes
) {
548 if (from_size
< to_size
&& malloc_good_size(to_size
) != from_size
&&
549 !IsSameRoundedHugeClass(from_size
, to_size
, stats
)) {
550 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
551 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
552 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
553 // Avoid in-place realloc by allocating a buffer, expecting it to be
554 // right after the buffer we just received. Buffers smaller than the
555 // page size and exactly or larger than the size of the largest large
556 // size class can't be reallocated in-place.
557 char* avoid_inplace
= nullptr;
558 if (from_size
>= stats
.page_size
&& from_size
< stats
.large_max
) {
559 avoid_inplace
= (char*)moz_arena_malloc(arena
, stats
.page_size
);
560 ASSERT_EQ(ptr
+ from_size
, avoid_inplace
);
562 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
563 ASSERT_NE(ptr
, ptr2
);
564 if (from_size
<= stats
.large_max
) {
565 ASSERT_NO_FATAL_FAILURE(
566 bulk_compare(ptr
, 0, from_size
, poison_buf
, stats
.page_size
));
568 ASSERT_NO_FATAL_FAILURE(
569 bulk_compare(ptr2
, 0, from_size
, fill_buf
, stats
.page_size
));
570 if (stats
.opt_junk
|| stats
.opt_zero
) {
571 size_t rounded_to_size
= malloc_good_size(to_size
);
572 ASSERT_NE(to_size
, rounded_to_size
);
573 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2
, from_size
, rounded_to_size
,
574 junk_buf
, stats
.page_size
));
576 moz_arena_free(arena
, ptr2
);
577 moz_arena_free(arena
, avoid_inplace
);
582 // Shrinking to a different size class should poison the old allocation,
583 // preserve the original bytes, and junk the extra bytes in the new
585 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
586 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
587 for (size_t to_size
: sSizes
) {
588 if (from_size
> to_size
&&
589 !CanReallocInPlace(from_size
, to_size
, stats
)) {
590 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
591 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
592 memset(ptr
, fill
, from_size
);
593 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
594 ASSERT_NE(ptr
, ptr2
);
595 if (from_size
<= stats
.large_max
) {
596 ASSERT_NO_FATAL_FAILURE(
597 bulk_compare(ptr
, 0, from_size
, poison_buf
, stats
.page_size
));
599 ASSERT_NO_FATAL_FAILURE(
600 bulk_compare(ptr2
, 0, to_size
, fill_buf
, stats
.page_size
));
601 if (stats
.opt_junk
|| stats
.opt_zero
) {
602 size_t rounded_to_size
= malloc_good_size(to_size
);
603 ASSERT_NE(to_size
, rounded_to_size
);
604 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2
, from_size
, rounded_to_size
,
605 junk_buf
, stats
.page_size
));
607 moz_arena_free(arena
, ptr2
);
612 moz_dispose_arena(arena
);
614 moz_arena_free(buf_arena
, poison_buf
);
615 moz_arena_free(buf_arena
, junk_buf
);
616 moz_arena_free(buf_arena
, fill_buf
);
617 moz_dispose_arena(buf_arena
);
619 RESTORE_GDB_SLEEP_LOCAL();
621 #endif // !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
623 TEST(Jemalloc
, TrailingGuard
)
625 // Disable PHC allocations for this test, because even a single PHC
626 // allocation occurring can throw it off.
627 AutoDisablePHCOnCurrentThread disable
;
629 jemalloc_stats_t stats
;
630 jemalloc_stats(&stats
);
632 // Avoid death tests adding some unnecessary (long) delays.
633 SAVE_GDB_SLEEP_LOCAL();
635 arena_id_t arena
= moz_create_arena();
636 ASSERT_TRUE(arena
!= 0);
638 // Do enough large allocations to fill a chunk, and then one additional one,
639 // and check that the guard page is still present after the one-but-last
640 // allocation, i.e. that we didn't allocate the guard.
641 Vector
<void*> ptr_list
;
642 for (size_t cnt
= 0; cnt
< stats
.large_max
/ stats
.page_size
; cnt
++) {
643 void* ptr
= moz_arena_malloc(arena
, stats
.page_size
);
644 ASSERT_TRUE(ptr
!= nullptr);
645 ASSERT_TRUE(ptr_list
.append(ptr
));
648 void* last_ptr_in_chunk
= ptr_list
[ptr_list
.length() - 1];
649 void* extra_ptr
= moz_arena_malloc(arena
, stats
.page_size
);
650 void* guard_page
= (void*)ALIGNMENT_CEILING(
651 (uintptr_t)last_ptr_in_chunk
+ stats
.page_size
, stats
.page_size
);
652 jemalloc_ptr_info_t info
;
653 jemalloc_ptr_info(guard_page
, &info
);
654 ASSERT_TRUE(jemalloc_ptr_is_freed_page(&info
));
656 ASSERT_DEATH_WRAP(*(char*)guard_page
= 0, "");
658 for (void* ptr
: ptr_list
) {
659 moz_arena_free(arena
, ptr
);
661 moz_arena_free(arena
, extra_ptr
);
663 moz_dispose_arena(arena
);
665 RESTORE_GDB_SLEEP_LOCAL();
668 TEST(Jemalloc
, LeadingGuard
)
670 // Disable PHC allocations for this test, because even a single PHC
671 // allocation occurring can throw it off.
672 AutoDisablePHCOnCurrentThread disable
;
674 jemalloc_stats_t stats
;
675 jemalloc_stats(&stats
);
677 // Avoid death tests adding some unnecessary (long) delays.
678 SAVE_GDB_SLEEP_LOCAL();
680 arena_id_t arena
= moz_create_arena();
681 ASSERT_TRUE(arena
!= 0);
683 // Do a simple normal allocation, but force all the allocation space
684 // in the chunk to be used up. This allows us to check that we get
685 // the safe area right in the logic that follows (all memory will be
686 // committed and initialized), and it forces this pointer to the start
687 // of the zone to sit at the very start of the usable chunk area.
688 void* ptr
= moz_arena_malloc(arena
, stats
.large_max
);
689 ASSERT_TRUE(ptr
!= nullptr);
690 // If ptr is chunk-aligned, the above allocation went wrong.
691 void* chunk_start
= (void*)ALIGNMENT_FLOOR((uintptr_t)ptr
, stats
.chunksize
);
692 ASSERT_NE((uintptr_t)ptr
, (uintptr_t)chunk_start
);
693 // If ptr is 1 page after the chunk start (so right after the header),
694 // we must have missed adding the guard page.
695 ASSERT_NE((uintptr_t)ptr
, (uintptr_t)chunk_start
+ stats
.page_size
);
696 // The actual start depends on the amount of metadata versus the page
697 // size, so we can't check equality without pulling in too many
698 // implementation details.
700 // Guard page should be right before data area
701 void* guard_page
= (void*)(((uintptr_t)ptr
) - sizeof(void*));
702 jemalloc_ptr_info_t info
;
703 jemalloc_ptr_info(guard_page
, &info
);
704 ASSERT_TRUE(info
.tag
== TagUnknown
);
705 ASSERT_DEATH_WRAP(*(char*)guard_page
= 0, "");
707 moz_arena_free(arena
, ptr
);
708 moz_dispose_arena(arena
);
710 RESTORE_GDB_SLEEP_LOCAL();
713 TEST(Jemalloc
, DisposeArena
)
715 jemalloc_stats_t stats
;
716 jemalloc_stats(&stats
);
718 // Avoid death tests adding some unnecessary (long) delays.
719 SAVE_GDB_SLEEP_LOCAL();
721 arena_id_t arena
= moz_create_arena();
722 void* ptr
= moz_arena_malloc(arena
, 42);
723 // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
724 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
725 moz_arena_free(arena
, ptr
);
726 moz_dispose_arena(arena
);
728 arena
= moz_create_arena();
729 ptr
= moz_arena_malloc(arena
, stats
.page_size
* 2);
730 // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
731 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
732 moz_arena_free(arena
, ptr
);
733 moz_dispose_arena(arena
);
735 arena
= moz_create_arena();
736 ptr
= moz_arena_malloc(arena
, stats
.chunksize
* 2);
738 // On debug builds, we do the expensive check that arenas are empty.
739 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
740 moz_arena_free(arena
, ptr
);
741 moz_dispose_arena(arena
);
743 // Currently, the allocator can't trivially check whether the arena is empty
744 // of huge allocations, so disposing of it works.
745 moz_dispose_arena(arena
);
746 // But trying to free a pointer that belongs to it will MOZ_CRASH.
747 ASSERT_DEATH_WRAP(free(ptr
), "");
748 // Likewise for realloc
749 ASSERT_DEATH_WRAP(ptr
= realloc(ptr
, stats
.chunksize
* 3), "");
752 // Using the arena after it's been disposed of is MOZ_CRASH-worthy.
753 ASSERT_DEATH_WRAP(moz_arena_malloc(arena
, 42), "");
755 RESTORE_GDB_SLEEP_LOCAL();