1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "mozilla/Literals.h"
8 #include "mozilla/mozalloc.h"
9 #include "mozilla/UniquePtr.h"
10 #include "mozilla/Unused.h"
11 #include "mozilla/Vector.h"
12 #include "mozilla/gtest/MozHelpers.h"
13 #include "mozmemory.h"
17 #include "gtest/gtest.h"
23 using namespace mozilla
;
25 class AutoDisablePHCOnCurrentThread
{
27 AutoDisablePHCOnCurrentThread() {
29 mozilla::phc::DisablePHCOnCurrentThread();
33 ~AutoDisablePHCOnCurrentThread() {
35 mozilla::phc::ReenablePHCOnCurrentThread();
40 static inline void TestOne(size_t size
) {
42 size_t adv
= malloc_good_size(req
);
43 char* p
= (char*)malloc(req
);
44 size_t usable
= moz_malloc_usable_size(p
);
45 // NB: Using EXPECT here so that we still free the memory on failure.
46 EXPECT_EQ(adv
, usable
) << "malloc_good_size(" << req
<< ") --> " << adv
49 << req
<< ") --> " << usable
;
53 static inline void TestThree(size_t size
) {
54 ASSERT_NO_FATAL_FAILURE(TestOne(size
- 1));
55 ASSERT_NO_FATAL_FAILURE(TestOne(size
));
56 ASSERT_NO_FATAL_FAILURE(TestOne(size
+ 1));
59 TEST(Jemalloc
, UsableSizeInAdvance
)
62 * Test every size up to a certain point, then (N-1, N, N+1) triplets for a
63 * various sizes beyond that.
66 for (size_t n
= 0; n
< 16_KiB
; n
++) ASSERT_NO_FATAL_FAILURE(TestOne(n
));
68 for (size_t n
= 16_KiB
; n
< 1_MiB
; n
+= 4_KiB
)
69 ASSERT_NO_FATAL_FAILURE(TestThree(n
));
71 for (size_t n
= 1_MiB
; n
< 8_MiB
; n
+= 128_KiB
)
72 ASSERT_NO_FATAL_FAILURE(TestThree(n
));
75 static int gStaticVar
;
77 bool InfoEq(jemalloc_ptr_info_t
& aInfo
, PtrInfoTag aTag
, void* aAddr
,
78 size_t aSize
, arena_id_t arenaId
) {
79 return aInfo
.tag
== aTag
&& aInfo
.addr
== aAddr
&& aInfo
.size
== aSize
81 && aInfo
.arenaId
== arenaId
86 bool InfoEqFreedPage(jemalloc_ptr_info_t
& aInfo
, void* aAddr
, size_t aPageSize
,
88 size_t pageSizeMask
= aPageSize
- 1;
90 return jemalloc_ptr_is_freed_page(&aInfo
) &&
91 aInfo
.addr
== (void*)(uintptr_t(aAddr
) & ~pageSizeMask
) &&
92 aInfo
.size
== aPageSize
94 && aInfo
.arenaId
== arenaId
99 TEST(Jemalloc
, PtrInfo
)
101 arena_id_t arenaId
= moz_create_arena();
102 ASSERT_TRUE(arenaId
!= 0);
104 jemalloc_stats_t stats
;
105 jemalloc_stats(&stats
);
107 jemalloc_ptr_info_t info
;
108 Vector
<char*> small
, large
, huge
;
110 // For small (less than half the page size) allocations, test every position
111 // within many possible sizes.
113 stats
.subpage_max
? stats
.subpage_max
: stats
.quantum_wide_max
;
114 for (size_t n
= 0; n
<= small_max
; n
+= 8) {
115 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
116 size_t usable
= moz_malloc_size_of(p
);
117 ASSERT_TRUE(small
.append(p
));
118 for (size_t j
= 0; j
< usable
; j
++) {
119 jemalloc_ptr_info(&p
[j
], &info
);
120 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
124 // Similar for large (small_max + 1 KiB .. 1MiB - 8KiB) allocations.
125 for (size_t n
= small_max
+ 1_KiB
; n
<= stats
.large_max
; n
+= 1_KiB
) {
126 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
127 size_t usable
= moz_malloc_size_of(p
);
128 ASSERT_TRUE(large
.append(p
));
129 for (size_t j
= 0; j
< usable
; j
+= 347) {
130 jemalloc_ptr_info(&p
[j
], &info
);
131 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
135 // Similar for huge (> 1MiB - 8KiB) allocations.
136 for (size_t n
= stats
.chunksize
; n
<= 10_MiB
; n
+= 512_KiB
) {
137 auto p
= (char*)moz_arena_malloc(arenaId
, n
);
138 size_t usable
= moz_malloc_size_of(p
);
139 ASSERT_TRUE(huge
.append(p
));
140 for (size_t j
= 0; j
< usable
; j
+= 567) {
141 jemalloc_ptr_info(&p
[j
], &info
);
142 ASSERT_TRUE(InfoEq(info
, TagLiveAlloc
, p
, usable
, arenaId
));
146 // The following loops check freed allocations. We step through the vectors
147 // using prime-sized steps, which gives full coverage of the arrays while
148 // avoiding deallocating in the same order we allocated.
151 // Free the small allocations and recheck them.
152 int isFreedAlloc
= 0, isFreedPage
= 0;
153 len
= small
.length();
154 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 19) % len
) {
156 size_t usable
= moz_malloc_size_of(p
);
158 for (size_t k
= 0; k
< usable
; k
++) {
159 jemalloc_ptr_info(&p
[k
], &info
);
160 // There are two valid outcomes here.
161 if (InfoEq(info
, TagFreedAlloc
, p
, usable
, arenaId
)) {
163 } else if (InfoEqFreedPage(info
, &p
[k
], stats
.page_size
, arenaId
)) {
170 // There should be both FreedAlloc and FreedPage results, but a lot more of
172 ASSERT_TRUE(isFreedAlloc
!= 0);
173 ASSERT_TRUE(isFreedPage
!= 0);
174 ASSERT_TRUE(isFreedAlloc
/ isFreedPage
> 8);
176 // Free the large allocations and recheck them.
177 len
= large
.length();
178 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 31) % len
) {
180 size_t usable
= moz_malloc_size_of(p
);
182 for (size_t k
= 0; k
< usable
; k
+= 357) {
183 jemalloc_ptr_info(&p
[k
], &info
);
184 ASSERT_TRUE(InfoEqFreedPage(info
, &p
[k
], stats
.page_size
, arenaId
));
188 // Free the huge allocations and recheck them.
190 for (size_t i
= 0, j
= 0; i
< len
; i
++, j
= (j
+ 7) % len
) {
192 size_t usable
= moz_malloc_size_of(p
);
194 for (size_t k
= 0; k
< usable
; k
+= 587) {
195 jemalloc_ptr_info(&p
[k
], &info
);
196 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
201 jemalloc_ptr_info(nullptr, &info
);
202 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
205 jemalloc_ptr_info((void*)0x123, &info
);
206 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
209 jemalloc_ptr_info((void*)uintptr_t(-1), &info
);
210 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
214 jemalloc_ptr_info(&stackVar
, &info
);
215 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
218 jemalloc_ptr_info((const void*)&jemalloc_ptr_info
, &info
);
219 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
222 jemalloc_ptr_info(&gStaticVar
, &info
);
223 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
226 UniquePtr
<int> p
= MakeUnique
<int>();
227 size_t chunksizeMask
= stats
.chunksize
- 1;
228 char* chunk
= (char*)(uintptr_t(p
.get()) & ~chunksizeMask
);
229 size_t chunkHeaderSize
= stats
.chunksize
- stats
.large_max
- stats
.page_size
;
230 for (size_t i
= 0; i
< chunkHeaderSize
; i
+= 64) {
231 jemalloc_ptr_info(&chunk
[i
], &info
);
232 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
236 size_t page_sizeMask
= stats
.page_size
- 1;
237 char* run
= (char*)(uintptr_t(p
.get()) & ~page_sizeMask
);
238 for (size_t i
= 0; i
< 4 * sizeof(void*); i
++) {
239 jemalloc_ptr_info(&run
[i
], &info
);
240 ASSERT_TRUE(InfoEq(info
, TagUnknown
, nullptr, 0U, 0U));
243 // Entire chunk. It's impossible to check what is put into |info| for all of
244 // these addresses; this is more about checking that we don't crash.
245 for (size_t i
= 0; i
< stats
.chunksize
; i
+= 256) {
246 jemalloc_ptr_info(&chunk
[i
], &info
);
249 moz_dispose_arena(arenaId
);
252 size_t sSizes
[] = {1, 42, 79, 918, 1.4_KiB
,
253 73_KiB
, 129_KiB
, 1.1_MiB
, 2.6_MiB
, 5.1_MiB
};
255 TEST(Jemalloc
, Arenas
)
257 arena_id_t arena
= moz_create_arena();
258 ASSERT_TRUE(arena
!= 0);
259 void* ptr
= moz_arena_malloc(arena
, 42);
260 ASSERT_TRUE(ptr
!= nullptr);
261 ptr
= moz_arena_realloc(arena
, ptr
, 64);
262 ASSERT_TRUE(ptr
!= nullptr);
263 moz_arena_free(arena
, ptr
);
264 ptr
= moz_arena_calloc(arena
, 24, 2);
265 // For convenience, free can be used to free arena pointers.
267 moz_dispose_arena(arena
);
269 // Avoid death tests adding some unnecessary (long) delays.
270 SAVE_GDB_SLEEP_LOCAL();
272 // Can't use an arena after it's disposed.
273 // ASSERT_DEATH_WRAP(moz_arena_malloc(arena, 80), "");
275 // Arena id 0 can't be used to somehow get to the main arena.
276 ASSERT_DEATH_WRAP(moz_arena_malloc(0, 80), "");
278 arena
= moz_create_arena();
279 arena_id_t arena2
= moz_create_arena();
280 // Ensure arena2 is used to prevent OSX errors:
283 // For convenience, realloc can also be used to reallocate arena pointers.
284 // The result should be in the same arena. Test various size class
286 for (size_t from_size
: sSizes
) {
287 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
288 for (size_t to_size
: sSizes
) {
289 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
290 ptr
= moz_arena_malloc(arena
, from_size
);
291 ptr
= realloc(ptr
, to_size
);
292 // Freeing with the wrong arena should crash.
293 ASSERT_DEATH_WRAP(moz_arena_free(arena2
, ptr
), "");
294 // Likewise for moz_arena_realloc.
295 ASSERT_DEATH_WRAP(moz_arena_realloc(arena2
, ptr
, from_size
), "");
296 // The following will crash if it's not in the right arena.
297 moz_arena_free(arena
, ptr
);
301 moz_dispose_arena(arena2
);
302 moz_dispose_arena(arena
);
304 RESTORE_GDB_SLEEP_LOCAL();
307 // Check that a buffer aPtr is entirely filled with a given character from
308 // aOffset to aSize. For faster comparison, the caller is required to fill a
309 // reference buffer with the wanted character, and give the size of that
311 static void bulk_compare(char* aPtr
, size_t aOffset
, size_t aSize
,
312 char* aReference
, size_t aReferenceSize
) {
313 for (size_t i
= aOffset
; i
< aSize
; i
+= aReferenceSize
) {
314 size_t length
= std::min(aSize
- i
, aReferenceSize
);
315 if (memcmp(aPtr
+ i
, aReference
, length
)) {
316 // We got a mismatch, we now want to report more precisely where.
317 for (size_t j
= i
; j
< i
+ length
; j
++) {
318 ASSERT_EQ(aPtr
[j
], *aReference
);
324 // A range iterator for size classes between two given values.
325 class SizeClassesBetween
{
327 SizeClassesBetween(size_t aStart
, size_t aEnd
) : mStart(aStart
), mEnd(aEnd
) {}
331 explicit Iterator(size_t aValue
) : mValue(malloc_good_size(aValue
)) {}
333 operator size_t() const { return mValue
; }
334 size_t operator*() const { return mValue
; }
335 Iterator
& operator++() {
336 mValue
= malloc_good_size(mValue
+ 1);
344 Iterator
begin() { return Iterator(mStart
); }
345 Iterator
end() { return Iterator(mEnd
); }
351 #define ALIGNMENT_CEILING(s, alignment) \
352 (((s) + ((alignment)-1)) & (~((alignment)-1)))
354 #define ALIGNMENT_FLOOR(s, alignment) ((s) & (~((alignment)-1)))
356 static bool IsSameRoundedHugeClass(size_t aSize1
, size_t aSize2
,
357 jemalloc_stats_t
& aStats
) {
358 return (aSize1
> aStats
.large_max
&& aSize2
> aStats
.large_max
&&
359 ALIGNMENT_CEILING(aSize1
+ aStats
.page_size
, aStats
.chunksize
) ==
360 ALIGNMENT_CEILING(aSize2
+ aStats
.page_size
, aStats
.chunksize
));
363 static bool CanReallocInPlace(size_t aFromSize
, size_t aToSize
,
364 jemalloc_stats_t
& aStats
) {
365 // PHC allocations must be disabled because PHC reallocs differently to
368 MOZ_RELEASE_ASSERT(!mozilla::phc::IsPHCEnabledOnCurrentThread());
371 if (aFromSize
== malloc_good_size(aToSize
)) {
372 // Same size class: in-place.
375 if (aFromSize
>= aStats
.page_size
&& aFromSize
<= aStats
.large_max
&&
376 aToSize
>= aStats
.page_size
&& aToSize
<= aStats
.large_max
) {
377 // Any large class to any large class: in-place when there is space to.
380 if (IsSameRoundedHugeClass(aFromSize
, aToSize
, aStats
)) {
381 // Huge sizes that round up to the same multiple of the chunk size:
388 TEST(Jemalloc
, InPlace
)
390 // Disable PHC allocations for this test, because CanReallocInPlace() isn't
391 // valid for PHC allocations.
392 AutoDisablePHCOnCurrentThread disable
;
394 jemalloc_stats_t stats
;
395 jemalloc_stats(&stats
);
397 // Using a separate arena, which is always emptied after an iteration, ensures
398 // that in-place reallocation happens in all cases it can happen. This test is
399 // intended for developers to notice they may have to adapt other tests if
400 // they change the conditions for in-place reallocation.
401 arena_id_t arena
= moz_create_arena();
403 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
404 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
405 for (size_t to_size
: sSizes
) {
406 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
407 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
408 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
409 if (CanReallocInPlace(from_size
, to_size
, stats
)) {
410 EXPECT_EQ(ptr
, ptr2
);
412 EXPECT_NE(ptr
, ptr2
);
414 moz_arena_free(arena
, ptr2
);
418 moz_dispose_arena(arena
);
421 // Bug 1474254: disable this test for windows ccov builds because it leads to
423 #if !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
424 TEST(Jemalloc
, JunkPoison
)
426 // Disable PHC allocations for this test, because CanReallocInPlace() isn't
427 // valid for PHC allocations, and the testing UAFs aren't valid.
428 AutoDisablePHCOnCurrentThread disable
;
430 jemalloc_stats_t stats
;
431 jemalloc_stats(&stats
);
433 // Avoid death tests adding some unnecessary (long) delays.
434 SAVE_GDB_SLEEP_LOCAL();
436 // Create buffers in a separate arena, for faster comparisons with
438 arena_id_t buf_arena
= moz_create_arena();
439 char* junk_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
440 // Depending on its configuration, the allocator will either fill the
441 // requested allocation with the junk byte (0xe4) or with zeroes, or do
442 // nothing, in which case, since we're allocating in a fresh arena,
443 // we'll be getting zeroes.
444 char junk
= stats
.opt_junk
? '\xe4' : '\0';
445 for (size_t i
= 0; i
< stats
.page_size
; i
++) {
446 ASSERT_EQ(junk_buf
[i
], junk
);
449 char* poison_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
450 memset(poison_buf
, 0xe5, stats
.page_size
);
452 static const char fill
= 0x42;
453 char* fill_buf
= (char*)moz_arena_malloc(buf_arena
, stats
.page_size
);
454 memset(fill_buf
, fill
, stats
.page_size
);
456 arena_params_t params
;
457 // Allow as many dirty pages in the arena as possible, so that purge never
458 // happens in it. Purge breaks some of the tests below randomly depending on
459 // what other things happen on other threads.
460 params
.mMaxDirty
= size_t(-1);
461 arena_id_t arena
= moz_create_arena_with_params(¶ms
);
463 // Mozjemalloc is configured to only poison the first four cache lines.
464 const size_t poison_check_len
= 256;
466 // Allocating should junk the buffer, and freeing should poison the buffer.
467 for (size_t size
: sSizes
) {
468 if (size
<= stats
.large_max
) {
469 SCOPED_TRACE(testing::Message() << "size = " << size
);
470 char* buf
= (char*)moz_arena_malloc(arena
, size
);
471 size_t allocated
= moz_malloc_usable_size(buf
);
472 if (stats
.opt_junk
|| stats
.opt_zero
) {
473 ASSERT_NO_FATAL_FAILURE(
474 bulk_compare(buf
, 0, allocated
, junk_buf
, stats
.page_size
));
476 moz_arena_free(arena
, buf
);
477 // We purposefully do a use-after-free here, to check that the data was
479 ASSERT_NO_FATAL_FAILURE(
480 bulk_compare(buf
, 0, std::min(allocated
, poison_check_len
),
481 poison_buf
, stats
.page_size
));
485 // Shrinking in the same size class should be in place and poison between the
486 // new allocation size and the old one.
488 for (size_t size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
489 SCOPED_TRACE(testing::Message() << "size = " << size
);
490 SCOPED_TRACE(testing::Message() << "prev = " << prev
);
491 char* ptr
= (char*)moz_arena_malloc(arena
, size
);
492 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
493 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, prev
+ 1);
494 ASSERT_EQ(ptr
, ptr2
);
495 ASSERT_NO_FATAL_FAILURE(
496 bulk_compare(ptr
, 0, prev
+ 1, fill_buf
, stats
.page_size
));
497 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, prev
+ 1,
498 std::min(size
, poison_check_len
),
499 poison_buf
, stats
.page_size
));
500 moz_arena_free(arena
, ptr
);
504 // In-place realloc should junk the new bytes when growing and poison the old
505 // bytes when shrinking.
506 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
507 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
508 for (size_t to_size
: sSizes
) {
509 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
510 if (CanReallocInPlace(from_size
, to_size
, stats
)) {
511 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
512 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
513 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
514 ASSERT_EQ(ptr
, ptr2
);
515 // Shrinking allocation
516 if (from_size
>= to_size
) {
517 ASSERT_NO_FATAL_FAILURE(
518 bulk_compare(ptr
, 0, to_size
, fill_buf
, stats
.page_size
));
519 // Huge allocations have guards and will crash when accessing
520 // beyond the valid range.
521 if (to_size
> stats
.large_max
) {
522 size_t page_limit
= ALIGNMENT_CEILING(to_size
, stats
.page_size
);
523 ASSERT_NO_FATAL_FAILURE(bulk_compare(
524 ptr
, to_size
, std::min(page_limit
, poison_check_len
),
525 poison_buf
, stats
.page_size
));
526 ASSERT_DEATH_WRAP(ptr
[page_limit
] = 0, "");
528 ASSERT_NO_FATAL_FAILURE(bulk_compare(
529 ptr
, to_size
, std::min(from_size
, poison_check_len
), poison_buf
,
533 // Enlarging allocation
534 ASSERT_NO_FATAL_FAILURE(
535 bulk_compare(ptr
, 0, from_size
, fill_buf
, stats
.page_size
));
536 if (stats
.opt_junk
|| stats
.opt_zero
) {
537 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr
, from_size
, to_size
,
538 junk_buf
, stats
.page_size
));
540 // Huge allocation, so should have a guard page following
541 if (to_size
> stats
.large_max
) {
543 ptr
[ALIGNMENT_CEILING(to_size
, stats
.page_size
)] = 0, "");
546 moz_arena_free(arena
, ptr2
);
551 // Growing to a different size class should poison the old allocation,
552 // preserve the original bytes, and junk the new bytes in the new allocation.
553 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
554 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
555 for (size_t to_size
: sSizes
) {
556 if (from_size
< to_size
&& malloc_good_size(to_size
) != from_size
&&
557 !IsSameRoundedHugeClass(from_size
, to_size
, stats
)) {
558 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
559 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
560 memset(ptr
, fill
, moz_malloc_usable_size(ptr
));
561 // Avoid in-place realloc by allocating a buffer, expecting it to be
562 // right after the buffer we just received. Buffers smaller than the
563 // page size and exactly or larger than the size of the largest large
564 // size class can't be reallocated in-place.
565 char* avoid_inplace
= nullptr;
566 if (from_size
>= stats
.page_size
&& from_size
< stats
.large_max
) {
567 avoid_inplace
= (char*)moz_arena_malloc(arena
, stats
.page_size
);
568 ASSERT_EQ(ptr
+ from_size
, avoid_inplace
);
570 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
571 ASSERT_NE(ptr
, ptr2
);
572 if (from_size
<= stats
.large_max
) {
573 ASSERT_NO_FATAL_FAILURE(
574 bulk_compare(ptr
, 0, std::min(from_size
, poison_check_len
),
575 poison_buf
, stats
.page_size
));
577 ASSERT_NO_FATAL_FAILURE(
578 bulk_compare(ptr2
, 0, from_size
, fill_buf
, stats
.page_size
));
579 if (stats
.opt_junk
|| stats
.opt_zero
) {
580 size_t rounded_to_size
= malloc_good_size(to_size
);
581 ASSERT_NE(to_size
, rounded_to_size
);
582 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2
, from_size
, rounded_to_size
,
583 junk_buf
, stats
.page_size
));
585 moz_arena_free(arena
, ptr2
);
586 moz_arena_free(arena
, avoid_inplace
);
591 // Shrinking to a different size class should poison the old allocation,
592 // preserve the original bytes, and junk the extra bytes in the new
594 for (size_t from_size
: SizeClassesBetween(1, 2 * stats
.chunksize
)) {
595 SCOPED_TRACE(testing::Message() << "from_size = " << from_size
);
596 for (size_t to_size
: sSizes
) {
597 if (from_size
> to_size
&&
598 !CanReallocInPlace(from_size
, to_size
, stats
)) {
599 SCOPED_TRACE(testing::Message() << "to_size = " << to_size
);
600 char* ptr
= (char*)moz_arena_malloc(arena
, from_size
);
601 memset(ptr
, fill
, from_size
);
602 char* ptr2
= (char*)moz_arena_realloc(arena
, ptr
, to_size
);
603 ASSERT_NE(ptr
, ptr2
);
604 if (from_size
<= stats
.large_max
) {
605 ASSERT_NO_FATAL_FAILURE(
606 bulk_compare(ptr
, 0, std::min(from_size
, poison_check_len
),
607 poison_buf
, stats
.page_size
));
609 ASSERT_NO_FATAL_FAILURE(
610 bulk_compare(ptr2
, 0, to_size
, fill_buf
, stats
.page_size
));
611 if (stats
.opt_junk
|| stats
.opt_zero
) {
612 size_t rounded_to_size
= malloc_good_size(to_size
);
613 ASSERT_NE(to_size
, rounded_to_size
);
614 ASSERT_NO_FATAL_FAILURE(bulk_compare(ptr2
, from_size
, rounded_to_size
,
615 junk_buf
, stats
.page_size
));
617 moz_arena_free(arena
, ptr2
);
622 moz_dispose_arena(arena
);
624 moz_arena_free(buf_arena
, poison_buf
);
625 moz_arena_free(buf_arena
, junk_buf
);
626 moz_arena_free(buf_arena
, fill_buf
);
627 moz_dispose_arena(buf_arena
);
629 RESTORE_GDB_SLEEP_LOCAL();
631 #endif // !defined(XP_WIN) || !defined(MOZ_CODE_COVERAGE)
633 TEST(Jemalloc
, TrailingGuard
)
635 // Disable PHC allocations for this test, because even a single PHC
636 // allocation occurring can throw it off.
637 AutoDisablePHCOnCurrentThread disable
;
639 jemalloc_stats_t stats
;
640 jemalloc_stats(&stats
);
642 // Avoid death tests adding some unnecessary (long) delays.
643 SAVE_GDB_SLEEP_LOCAL();
645 arena_id_t arena
= moz_create_arena();
646 ASSERT_TRUE(arena
!= 0);
648 // Do enough large allocations to fill a chunk, and then one additional one,
649 // and check that the guard page is still present after the one-but-last
650 // allocation, i.e. that we didn't allocate the guard.
651 Vector
<void*> ptr_list
;
652 for (size_t cnt
= 0; cnt
< stats
.large_max
/ stats
.page_size
; cnt
++) {
653 void* ptr
= moz_arena_malloc(arena
, stats
.page_size
);
654 ASSERT_TRUE(ptr
!= nullptr);
655 ASSERT_TRUE(ptr_list
.append(ptr
));
658 void* last_ptr_in_chunk
= ptr_list
[ptr_list
.length() - 1];
659 void* extra_ptr
= moz_arena_malloc(arena
, stats
.page_size
);
660 void* guard_page
= (void*)ALIGNMENT_CEILING(
661 (uintptr_t)last_ptr_in_chunk
+ stats
.page_size
, stats
.page_size
);
662 jemalloc_ptr_info_t info
;
663 jemalloc_ptr_info(guard_page
, &info
);
664 ASSERT_TRUE(jemalloc_ptr_is_freed_page(&info
));
666 ASSERT_DEATH_WRAP(*(char*)guard_page
= 0, "");
668 for (void* ptr
: ptr_list
) {
669 moz_arena_free(arena
, ptr
);
671 moz_arena_free(arena
, extra_ptr
);
673 moz_dispose_arena(arena
);
675 RESTORE_GDB_SLEEP_LOCAL();
678 TEST(Jemalloc
, LeadingGuard
)
680 // Disable PHC allocations for this test, because even a single PHC
681 // allocation occurring can throw it off.
682 AutoDisablePHCOnCurrentThread disable
;
684 jemalloc_stats_t stats
;
685 jemalloc_stats(&stats
);
687 // Avoid death tests adding some unnecessary (long) delays.
688 SAVE_GDB_SLEEP_LOCAL();
690 arena_id_t arena
= moz_create_arena();
691 ASSERT_TRUE(arena
!= 0);
693 // Do a simple normal allocation, but force all the allocation space
694 // in the chunk to be used up. This allows us to check that we get
695 // the safe area right in the logic that follows (all memory will be
696 // committed and initialized), and it forces this pointer to the start
697 // of the zone to sit at the very start of the usable chunk area.
698 void* ptr
= moz_arena_malloc(arena
, stats
.large_max
);
699 ASSERT_TRUE(ptr
!= nullptr);
700 // If ptr is chunk-aligned, the above allocation went wrong.
701 void* chunk_start
= (void*)ALIGNMENT_FLOOR((uintptr_t)ptr
, stats
.chunksize
);
702 ASSERT_NE((uintptr_t)ptr
, (uintptr_t)chunk_start
);
703 // If ptr is 1 page after the chunk start (so right after the header),
704 // we must have missed adding the guard page.
705 ASSERT_NE((uintptr_t)ptr
, (uintptr_t)chunk_start
+ stats
.page_size
);
706 // The actual start depends on the amount of metadata versus the page
707 // size, so we can't check equality without pulling in too many
708 // implementation details.
710 // Guard page should be right before data area
711 void* guard_page
= (void*)(((uintptr_t)ptr
) - sizeof(void*));
712 jemalloc_ptr_info_t info
;
713 jemalloc_ptr_info(guard_page
, &info
);
714 ASSERT_TRUE(info
.tag
== TagUnknown
);
715 ASSERT_DEATH_WRAP(*(char*)guard_page
= 0, "");
717 moz_arena_free(arena
, ptr
);
718 moz_dispose_arena(arena
);
720 RESTORE_GDB_SLEEP_LOCAL();
723 TEST(Jemalloc
, DisposeArena
)
725 jemalloc_stats_t stats
;
726 jemalloc_stats(&stats
);
728 // Avoid death tests adding some unnecessary (long) delays.
729 SAVE_GDB_SLEEP_LOCAL();
731 arena_id_t arena
= moz_create_arena();
732 void* ptr
= moz_arena_malloc(arena
, 42);
733 // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
734 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
735 moz_arena_free(arena
, ptr
);
736 moz_dispose_arena(arena
);
738 arena
= moz_create_arena();
739 ptr
= moz_arena_malloc(arena
, stats
.page_size
* 2);
740 // Disposing of the arena when it's not empty is a MOZ_CRASH-worthy error.
741 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
742 moz_arena_free(arena
, ptr
);
743 moz_dispose_arena(arena
);
745 arena
= moz_create_arena();
746 ptr
= moz_arena_malloc(arena
, stats
.chunksize
* 2);
748 // On debug builds, we do the expensive check that arenas are empty.
749 ASSERT_DEATH_WRAP(moz_dispose_arena(arena
), "");
750 moz_arena_free(arena
, ptr
);
751 moz_dispose_arena(arena
);
753 // Currently, the allocator can't trivially check whether the arena is empty
754 // of huge allocations, so disposing of it works.
755 moz_dispose_arena(arena
);
756 // But trying to free a pointer that belongs to it will MOZ_CRASH.
757 ASSERT_DEATH_WRAP(free(ptr
), "");
758 // Likewise for realloc
759 ASSERT_DEATH_WRAP(ptr
= realloc(ptr
, stats
.chunksize
* 3), "");
762 // Using the arena after it's been disposed of is MOZ_CRASH-worthy.
763 ASSERT_DEATH_WRAP(moz_arena_malloc(arena
, 42), "");
765 RESTORE_GDB_SLEEP_LOCAL();