1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_test_utils.h"
21 #include "gtest/gtest.h"
29 // Too slow for debug build
32 #if SANITIZER_WORDSIZE == 64
33 static const uptr kAllocatorSpace
= 0x700000000000ULL
;
34 static const uptr kAllocatorSize
= 0x010000000000ULL
; // 1T.
35 static const u64 kAddressSpaceSize
= 1ULL << 47;
37 typedef SizeClassAllocator64
<
38 kAllocatorSpace
, kAllocatorSize
, 16, DefaultSizeClassMap
> Allocator64
;
40 typedef SizeClassAllocator64
<
41 kAllocatorSpace
, kAllocatorSize
, 16, CompactSizeClassMap
> Allocator64Compact
;
43 static const u64 kAddressSpaceSize
= 1ULL << 32;
46 static const uptr kRegionSizeLog
= FIRST_32_SECOND_64(20, 24);
47 static const uptr kFlatByteMapSize
= kAddressSpaceSize
>> kRegionSizeLog
;
49 typedef SizeClassAllocator32
<
54 FlatByteMap
<kFlatByteMapSize
> >
57 template <class SizeClassMap
>
58 void TestSizeClassMap() {
59 typedef SizeClassMap SCMap
;
64 TEST(SanitizerCommon
, DefaultSizeClassMap
) {
65 TestSizeClassMap
<DefaultSizeClassMap
>();
68 TEST(SanitizerCommon
, CompactSizeClassMap
) {
69 TestSizeClassMap
<CompactSizeClassMap
>();
72 TEST(SanitizerCommon
, InternalSizeClassMap
) {
73 TestSizeClassMap
<InternalSizeClassMap
>();
76 template <class Allocator
>
77 void TestSizeClassAllocator() {
78 Allocator
*a
= new Allocator
;
80 SizeClassAllocatorLocalCache
<Allocator
> cache
;
81 memset(&cache
, 0, sizeof(cache
));
84 static const uptr sizes
[] = {1, 16, 30, 40, 100, 1000, 10000,
85 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
87 std::vector
<void *> allocated
;
89 uptr last_total_allocated
= 0;
90 for (int i
= 0; i
< 3; i
++) {
91 // Allocate a bunch of chunks.
92 for (uptr s
= 0; s
< ARRAY_SIZE(sizes
); s
++) {
94 if (!a
->CanAllocate(size
, 1)) continue;
95 // printf("s = %ld\n", size);
96 uptr n_iter
= std::max((uptr
)6, 8000000 / size
);
97 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
98 for (uptr i
= 0; i
< n_iter
; i
++) {
99 uptr class_id0
= Allocator::SizeClassMapT::ClassID(size
);
100 char *x
= (char*)cache
.Allocate(a
, class_id0
);
104 allocated
.push_back(x
);
105 CHECK_EQ(x
, a
->GetBlockBegin(x
));
106 CHECK_EQ(x
, a
->GetBlockBegin(x
+ size
- 1));
107 CHECK(a
->PointerIsMine(x
));
108 CHECK(a
->PointerIsMine(x
+ size
- 1));
109 CHECK(a
->PointerIsMine(x
+ size
/ 2));
110 CHECK_GE(a
->GetActuallyAllocatedSize(x
), size
);
111 uptr class_id
= a
->GetSizeClass(x
);
112 CHECK_EQ(class_id
, Allocator::SizeClassMapT::ClassID(size
));
113 uptr
*metadata
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
114 metadata
[0] = reinterpret_cast<uptr
>(x
) + 1;
115 metadata
[1] = 0xABCD;
119 for (uptr i
= 0; i
< allocated
.size(); i
++) {
120 void *x
= allocated
[i
];
121 uptr
*metadata
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
122 CHECK_EQ(metadata
[0], reinterpret_cast<uptr
>(x
) + 1);
123 CHECK_EQ(metadata
[1], 0xABCD);
124 cache
.Deallocate(a
, a
->GetSizeClass(x
), x
);
127 uptr total_allocated
= a
->TotalMemoryUsed();
128 if (last_total_allocated
== 0)
129 last_total_allocated
= total_allocated
;
130 CHECK_EQ(last_total_allocated
, total_allocated
);
133 // Check that GetBlockBegin never crashes.
134 for (uptr x
= 0, step
= kAddressSpaceSize
/ 100000;
135 x
< kAddressSpaceSize
- step
; x
+= step
)
136 if (a
->PointerIsMine(reinterpret_cast<void *>(x
)))
137 Ident(a
->GetBlockBegin(reinterpret_cast<void *>(x
)));
143 #if SANITIZER_WORDSIZE == 64
144 TEST(SanitizerCommon
, SizeClassAllocator64
) {
145 TestSizeClassAllocator
<Allocator64
>();
148 TEST(SanitizerCommon
, SizeClassAllocator64Compact
) {
149 TestSizeClassAllocator
<Allocator64Compact
>();
153 TEST(SanitizerCommon
, SizeClassAllocator32Compact
) {
154 TestSizeClassAllocator
<Allocator32Compact
>();
157 template <class Allocator
>
158 void SizeClassAllocatorMetadataStress() {
159 Allocator
*a
= new Allocator
;
161 SizeClassAllocatorLocalCache
<Allocator
> cache
;
162 memset(&cache
, 0, sizeof(cache
));
165 const uptr kNumAllocs
= 1 << 13;
166 void *allocated
[kNumAllocs
];
167 void *meta
[kNumAllocs
];
168 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
169 void *x
= cache
.Allocate(a
, 1 + i
% 50);
171 meta
[i
] = a
->GetMetaData(x
);
173 // Get Metadata kNumAllocs^2 times.
174 for (uptr i
= 0; i
< kNumAllocs
* kNumAllocs
; i
++) {
175 uptr idx
= i
% kNumAllocs
;
176 void *m
= a
->GetMetaData(allocated
[idx
]);
177 EXPECT_EQ(m
, meta
[idx
]);
179 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
180 cache
.Deallocate(a
, 1 + i
% 50, allocated
[i
]);
187 #if SANITIZER_WORDSIZE == 64
188 TEST(SanitizerCommon
, SizeClassAllocator64MetadataStress
) {
189 SizeClassAllocatorMetadataStress
<Allocator64
>();
192 TEST(SanitizerCommon
, SizeClassAllocator64CompactMetadataStress
) {
193 SizeClassAllocatorMetadataStress
<Allocator64Compact
>();
195 #endif // SANITIZER_WORDSIZE == 64
196 TEST(SanitizerCommon
, SizeClassAllocator32CompactMetadataStress
) {
197 SizeClassAllocatorMetadataStress
<Allocator32Compact
>();
200 template <class Allocator
>
201 void SizeClassAllocatorGetBlockBeginStress() {
202 Allocator
*a
= new Allocator
;
204 SizeClassAllocatorLocalCache
<Allocator
> cache
;
205 memset(&cache
, 0, sizeof(cache
));
208 uptr max_size_class
= Allocator::kNumClasses
- 1;
209 uptr size
= Allocator::SizeClassMapT::Size(max_size_class
);
211 // Make sure we correctly compute GetBlockBegin() w/o overflow.
212 for (size_t i
= 0; i
<= G8
/ size
; i
++) {
213 void *x
= cache
.Allocate(a
, max_size_class
);
214 void *beg
= a
->GetBlockBegin(x
);
215 // if ((i & (i - 1)) == 0)
216 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
224 #if SANITIZER_WORDSIZE == 64
225 TEST(SanitizerCommon
, SizeClassAllocator64GetBlockBegin
) {
226 SizeClassAllocatorGetBlockBeginStress
<Allocator64
>();
228 TEST(SanitizerCommon
, SizeClassAllocator64CompactGetBlockBegin
) {
229 SizeClassAllocatorGetBlockBeginStress
<Allocator64Compact
>();
231 TEST(SanitizerCommon
, SizeClassAllocator32CompactGetBlockBegin
) {
232 SizeClassAllocatorGetBlockBeginStress
<Allocator32Compact
>();
234 #endif // SANITIZER_WORDSIZE == 64
236 struct TestMapUnmapCallback
{
237 static int map_count
, unmap_count
;
238 void OnMap(uptr p
, uptr size
) const { map_count
++; }
239 void OnUnmap(uptr p
, uptr size
) const { unmap_count
++; }
241 int TestMapUnmapCallback::map_count
;
242 int TestMapUnmapCallback::unmap_count
;
244 #if SANITIZER_WORDSIZE == 64
245 TEST(SanitizerCommon
, SizeClassAllocator64MapUnmapCallback
) {
246 TestMapUnmapCallback::map_count
= 0;
247 TestMapUnmapCallback::unmap_count
= 0;
248 typedef SizeClassAllocator64
<
249 kAllocatorSpace
, kAllocatorSize
, 16, DefaultSizeClassMap
,
250 TestMapUnmapCallback
> Allocator64WithCallBack
;
251 Allocator64WithCallBack
*a
= new Allocator64WithCallBack
;
253 EXPECT_EQ(TestMapUnmapCallback::map_count
, 1); // Allocator state.
254 SizeClassAllocatorLocalCache
<Allocator64WithCallBack
> cache
;
255 memset(&cache
, 0, sizeof(cache
));
257 AllocatorStats stats
;
259 a
->AllocateBatch(&stats
, &cache
, 32);
260 EXPECT_EQ(TestMapUnmapCallback::map_count
, 3); // State + alloc + metadata.
262 EXPECT_EQ(TestMapUnmapCallback::unmap_count
, 1); // The whole thing.
267 TEST(SanitizerCommon
, SizeClassAllocator32MapUnmapCallback
) {
268 TestMapUnmapCallback::map_count
= 0;
269 TestMapUnmapCallback::unmap_count
= 0;
270 typedef SizeClassAllocator32
<
271 0, kAddressSpaceSize
,
275 FlatByteMap
<kFlatByteMapSize
>,
276 TestMapUnmapCallback
>
277 Allocator32WithCallBack
;
278 Allocator32WithCallBack
*a
= new Allocator32WithCallBack
;
280 EXPECT_EQ(TestMapUnmapCallback::map_count
, 0);
281 SizeClassAllocatorLocalCache
<Allocator32WithCallBack
> cache
;
282 memset(&cache
, 0, sizeof(cache
));
284 AllocatorStats stats
;
286 a
->AllocateBatch(&stats
, &cache
, 32);
287 EXPECT_EQ(TestMapUnmapCallback::map_count
, 1);
289 EXPECT_EQ(TestMapUnmapCallback::unmap_count
, 1);
291 // fprintf(stderr, "Map: %d Unmap: %d\n",
292 // TestMapUnmapCallback::map_count,
293 // TestMapUnmapCallback::unmap_count);
296 TEST(SanitizerCommon
, LargeMmapAllocatorMapUnmapCallback
) {
297 TestMapUnmapCallback::map_count
= 0;
298 TestMapUnmapCallback::unmap_count
= 0;
299 LargeMmapAllocator
<TestMapUnmapCallback
> a
;
301 AllocatorStats stats
;
303 void *x
= a
.Allocate(&stats
, 1 << 20, 1);
304 EXPECT_EQ(TestMapUnmapCallback::map_count
, 1);
305 a
.Deallocate(&stats
, x
);
306 EXPECT_EQ(TestMapUnmapCallback::unmap_count
, 1);
309 template<class Allocator
>
310 void FailInAssertionOnOOM() {
313 SizeClassAllocatorLocalCache
<Allocator
> cache
;
314 memset(&cache
, 0, sizeof(cache
));
316 AllocatorStats stats
;
318 for (int i
= 0; i
< 1000000; i
++) {
319 a
.AllocateBatch(&stats
, &cache
, 52);
325 #if SANITIZER_WORDSIZE == 64
326 TEST(SanitizerCommon
, SizeClassAllocator64Overflow
) {
327 EXPECT_DEATH(FailInAssertionOnOOM
<Allocator64
>(), "Out of memory");
331 TEST(SanitizerCommon
, LargeMmapAllocator
) {
332 LargeMmapAllocator
<> a
;
334 AllocatorStats stats
;
337 static const int kNumAllocs
= 1000;
338 char *allocated
[kNumAllocs
];
339 static const uptr size
= 4000;
341 for (int i
= 0; i
< kNumAllocs
; i
++) {
342 allocated
[i
] = (char *)a
.Allocate(&stats
, size
, 1);
343 CHECK(a
.PointerIsMine(allocated
[i
]));
346 CHECK_GT(a
.TotalMemoryUsed(), size
* kNumAllocs
);
347 for (int i
= 0; i
< kNumAllocs
; i
++) {
348 char *p
= allocated
[i
];
349 CHECK(a
.PointerIsMine(p
));
350 a
.Deallocate(&stats
, p
);
352 // Check that non left.
353 CHECK_EQ(a
.TotalMemoryUsed(), 0);
355 // Allocate some more, also add metadata.
356 for (int i
= 0; i
< kNumAllocs
; i
++) {
357 char *x
= (char *)a
.Allocate(&stats
, size
, 1);
358 CHECK_GE(a
.GetActuallyAllocatedSize(x
), size
);
359 uptr
*meta
= reinterpret_cast<uptr
*>(a
.GetMetaData(x
));
363 for (int i
= 0; i
< kNumAllocs
* kNumAllocs
; i
++) {
364 char *p
= allocated
[i
% kNumAllocs
];
365 CHECK(a
.PointerIsMine(p
));
366 CHECK(a
.PointerIsMine(p
+ 2000));
368 CHECK_GT(a
.TotalMemoryUsed(), size
* kNumAllocs
);
369 // Deallocate all in reverse order.
370 for (int i
= 0; i
< kNumAllocs
; i
++) {
371 int idx
= kNumAllocs
- i
- 1;
372 char *p
= allocated
[idx
];
373 uptr
*meta
= reinterpret_cast<uptr
*>(a
.GetMetaData(p
));
374 CHECK_EQ(*meta
, idx
);
375 CHECK(a
.PointerIsMine(p
));
376 a
.Deallocate(&stats
, p
);
378 CHECK_EQ(a
.TotalMemoryUsed(), 0);
381 uptr max_alignment
= SANITIZER_WORDSIZE
== 64 ? (1 << 28) : (1 << 24);
382 for (uptr alignment
= 8; alignment
<= max_alignment
; alignment
*= 2) {
383 const uptr kNumAlignedAllocs
= 100;
384 for (uptr i
= 0; i
< kNumAlignedAllocs
; i
++) {
385 uptr size
= ((i
% 10) + 1) * 4096;
386 char *p
= allocated
[i
] = (char *)a
.Allocate(&stats
, size
, alignment
);
387 CHECK_EQ(p
, a
.GetBlockBegin(p
));
388 CHECK_EQ(p
, a
.GetBlockBegin(p
+ size
- 1));
389 CHECK_EQ(p
, a
.GetBlockBegin(p
+ size
/ 2));
390 CHECK_EQ(0, (uptr
)allocated
[i
] % alignment
);
391 p
[0] = p
[size
- 1] = 0;
393 for (uptr i
= 0; i
< kNumAlignedAllocs
; i
++) {
394 a
.Deallocate(&stats
, allocated
[i
]);
398 // Regression test for boundary condition in GetBlockBegin().
399 uptr page_size
= GetPageSizeCached();
400 char *p
= (char *)a
.Allocate(&stats
, page_size
, 1);
401 CHECK_EQ(p
, a
.GetBlockBegin(p
));
402 CHECK_EQ(p
, (char *)a
.GetBlockBegin(p
+ page_size
- 1));
403 CHECK_NE(p
, (char *)a
.GetBlockBegin(p
+ page_size
));
404 a
.Deallocate(&stats
, p
);
408 <class PrimaryAllocator
, class SecondaryAllocator
, class AllocatorCache
>
409 void TestCombinedAllocator() {
411 CombinedAllocator
<PrimaryAllocator
, AllocatorCache
, SecondaryAllocator
>
413 Allocator
*a
= new Allocator
;
416 AllocatorCache cache
;
417 memset(&cache
, 0, sizeof(cache
));
418 a
->InitCache(&cache
);
420 bool allocator_may_return_null
= common_flags()->allocator_may_return_null
;
421 common_flags()->allocator_may_return_null
= true;
422 EXPECT_EQ(a
->Allocate(&cache
, -1, 1), (void*)0);
423 EXPECT_EQ(a
->Allocate(&cache
, -1, 1024), (void*)0);
424 EXPECT_EQ(a
->Allocate(&cache
, (uptr
)-1 - 1024, 1), (void*)0);
425 EXPECT_EQ(a
->Allocate(&cache
, (uptr
)-1 - 1024, 1024), (void*)0);
426 EXPECT_EQ(a
->Allocate(&cache
, (uptr
)-1 - 1023, 1024), (void*)0);
428 common_flags()->allocator_may_return_null
= false;
429 EXPECT_DEATH(a
->Allocate(&cache
, -1, 1),
430 "allocator is terminating the process");
431 // Restore the original value.
432 common_flags()->allocator_may_return_null
= allocator_may_return_null
;
434 const uptr kNumAllocs
= 100000;
435 const uptr kNumIter
= 10;
436 for (uptr iter
= 0; iter
< kNumIter
; iter
++) {
437 std::vector
<void*> allocated
;
438 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
439 uptr size
= (i
% (1 << 14)) + 1;
441 size
= 1 << (10 + (i
% 14));
442 void *x
= a
->Allocate(&cache
, size
, 1);
443 uptr
*meta
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
446 allocated
.push_back(x
);
449 random_shuffle(allocated
.begin(), allocated
.end());
451 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
452 void *x
= allocated
[i
];
453 uptr
*meta
= reinterpret_cast<uptr
*>(a
->GetMetaData(x
));
455 CHECK(a
->PointerIsMine(x
));
457 a
->Deallocate(&cache
, x
);
460 a
->SwallowCache(&cache
);
462 a
->DestroyCache(&cache
);
466 #if SANITIZER_WORDSIZE == 64
467 TEST(SanitizerCommon
, CombinedAllocator64
) {
468 TestCombinedAllocator
<Allocator64
,
469 LargeMmapAllocator
<>,
470 SizeClassAllocatorLocalCache
<Allocator64
> > ();
473 TEST(SanitizerCommon
, CombinedAllocator64Compact
) {
474 TestCombinedAllocator
<Allocator64Compact
,
475 LargeMmapAllocator
<>,
476 SizeClassAllocatorLocalCache
<Allocator64Compact
> > ();
480 TEST(SanitizerCommon
, CombinedAllocator32Compact
) {
481 TestCombinedAllocator
<Allocator32Compact
,
482 LargeMmapAllocator
<>,
483 SizeClassAllocatorLocalCache
<Allocator32Compact
> > ();
486 template <class AllocatorCache
>
487 void TestSizeClassAllocatorLocalCache() {
488 AllocatorCache cache
;
489 typedef typename
AllocatorCache::Allocator Allocator
;
490 Allocator
*a
= new Allocator();
493 memset(&cache
, 0, sizeof(cache
));
496 const uptr kNumAllocs
= 10000;
497 const int kNumIter
= 100;
498 uptr saved_total
= 0;
499 for (int class_id
= 1; class_id
<= 5; class_id
++) {
500 for (int it
= 0; it
< kNumIter
; it
++) {
501 void *allocated
[kNumAllocs
];
502 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
503 allocated
[i
] = cache
.Allocate(a
, class_id
);
505 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
506 cache
.Deallocate(a
, class_id
, allocated
[i
]);
509 uptr total_allocated
= a
->TotalMemoryUsed();
511 CHECK_EQ(saved_total
, total_allocated
);
512 saved_total
= total_allocated
;
520 #if SANITIZER_WORDSIZE == 64
521 TEST(SanitizerCommon
, SizeClassAllocator64LocalCache
) {
522 TestSizeClassAllocatorLocalCache
<
523 SizeClassAllocatorLocalCache
<Allocator64
> >();
526 TEST(SanitizerCommon
, SizeClassAllocator64CompactLocalCache
) {
527 TestSizeClassAllocatorLocalCache
<
528 SizeClassAllocatorLocalCache
<Allocator64Compact
> >();
532 TEST(SanitizerCommon
, SizeClassAllocator32CompactLocalCache
) {
533 TestSizeClassAllocatorLocalCache
<
534 SizeClassAllocatorLocalCache
<Allocator32Compact
> >();
537 #if SANITIZER_WORDSIZE == 64
538 typedef SizeClassAllocatorLocalCache
<Allocator64
> AllocatorCache
;
539 static AllocatorCache static_allocator_cache
;
541 void *AllocatorLeakTestWorker(void *arg
) {
542 typedef AllocatorCache::Allocator Allocator
;
543 Allocator
*a
= (Allocator
*)(arg
);
544 static_allocator_cache
.Allocate(a
, 10);
545 static_allocator_cache
.Drain(a
);
549 TEST(SanitizerCommon
, AllocatorLeakTest
) {
550 typedef AllocatorCache::Allocator Allocator
;
553 uptr total_used_memory
= 0;
554 for (int i
= 0; i
< 100; i
++) {
556 EXPECT_EQ(0, pthread_create(&t
, 0, AllocatorLeakTestWorker
, &a
));
557 EXPECT_EQ(0, pthread_join(t
, 0));
559 total_used_memory
= a
.TotalMemoryUsed();
560 EXPECT_EQ(a
.TotalMemoryUsed(), total_used_memory
);
566 // Struct which is allocated to pass info to new threads. The new thread frees
568 struct NewThreadParams
{
569 AllocatorCache
*thread_cache
;
570 AllocatorCache::Allocator
*allocator
;
574 // Called in a new thread. Just frees its argument.
575 static void *DeallocNewThreadWorker(void *arg
) {
576 NewThreadParams
*params
= reinterpret_cast<NewThreadParams
*>(arg
);
577 params
->thread_cache
->Deallocate(params
->allocator
, params
->class_id
, params
);
581 // The allocator cache is supposed to be POD and zero initialized. We should be
582 // able to call Deallocate on a zeroed cache, and it will self-initialize.
583 TEST(Allocator
, AllocatorCacheDeallocNewThread
) {
584 AllocatorCache::Allocator allocator
;
586 AllocatorCache main_cache
;
587 AllocatorCache child_cache
;
588 memset(&main_cache
, 0, sizeof(main_cache
));
589 memset(&child_cache
, 0, sizeof(child_cache
));
591 uptr class_id
= DefaultSizeClassMap::ClassID(sizeof(NewThreadParams
));
592 NewThreadParams
*params
= reinterpret_cast<NewThreadParams
*>(
593 main_cache
.Allocate(&allocator
, class_id
));
594 params
->thread_cache
= &child_cache
;
595 params
->allocator
= &allocator
;
596 params
->class_id
= class_id
;
598 EXPECT_EQ(0, pthread_create(&t
, 0, DeallocNewThreadWorker
, params
));
599 EXPECT_EQ(0, pthread_join(t
, 0));
603 TEST(Allocator
, Basic
) {
604 char *p
= (char*)InternalAlloc(10);
605 EXPECT_NE(p
, (char*)0);
606 char *p2
= (char*)InternalAlloc(20);
607 EXPECT_NE(p2
, (char*)0);
613 TEST(Allocator
, Stress
) {
614 const int kCount
= 1000;
617 for (int i
= 0; i
< kCount
; i
++) {
618 uptr sz
= my_rand_r(&rnd
) % 1000;
619 char *p
= (char*)InternalAlloc(sz
);
620 EXPECT_NE(p
, (char*)0);
623 for (int i
= 0; i
< kCount
; i
++) {
624 InternalFree(ptrs
[i
]);
628 TEST(Allocator
, InternalAllocFailure
) {
629 EXPECT_DEATH(Ident(InternalAlloc(10 << 20)),
630 "Unexpected mmap in InternalAllocator!");
633 TEST(Allocator
, ScopedBuffer
) {
634 const int kSize
= 512;
636 InternalScopedBuffer
<int> int_buf(kSize
);
637 EXPECT_EQ(sizeof(int) * kSize
, int_buf
.size()); // NOLINT
639 InternalScopedBuffer
<char> char_buf(kSize
);
640 EXPECT_EQ(sizeof(char) * kSize
, char_buf
.size()); // NOLINT
641 internal_memset(char_buf
.data(), 'c', kSize
);
642 for (int i
= 0; i
< kSize
; i
++) {
643 EXPECT_EQ('c', char_buf
[i
]);
647 void IterationTestCallback(uptr chunk
, void *arg
) {
648 reinterpret_cast<std::set
<uptr
> *>(arg
)->insert(chunk
);
651 template <class Allocator
>
652 void TestSizeClassAllocatorIteration() {
653 Allocator
*a
= new Allocator
;
655 SizeClassAllocatorLocalCache
<Allocator
> cache
;
656 memset(&cache
, 0, sizeof(cache
));
659 static const uptr sizes
[] = {1, 16, 30, 40, 100, 1000, 10000,
660 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
662 std::vector
<void *> allocated
;
664 // Allocate a bunch of chunks.
665 for (uptr s
= 0; s
< ARRAY_SIZE(sizes
); s
++) {
666 uptr size
= sizes
[s
];
667 if (!a
->CanAllocate(size
, 1)) continue;
668 // printf("s = %ld\n", size);
669 uptr n_iter
= std::max((uptr
)6, 80000 / size
);
670 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
671 for (uptr j
= 0; j
< n_iter
; j
++) {
672 uptr class_id0
= Allocator::SizeClassMapT::ClassID(size
);
673 void *x
= cache
.Allocate(a
, class_id0
);
674 allocated
.push_back(x
);
678 std::set
<uptr
> reported_chunks
;
680 a
->ForEachChunk(IterationTestCallback
, &reported_chunks
);
683 for (uptr i
= 0; i
< allocated
.size(); i
++) {
684 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
685 ASSERT_NE(reported_chunks
.find(reinterpret_cast<uptr
>(allocated
[i
])),
686 reported_chunks
.end());
693 #if SANITIZER_WORDSIZE == 64
694 TEST(SanitizerCommon
, SizeClassAllocator64Iteration
) {
695 TestSizeClassAllocatorIteration
<Allocator64
>();
699 TEST(SanitizerCommon
, SizeClassAllocator32Iteration
) {
700 TestSizeClassAllocatorIteration
<Allocator32Compact
>();
703 TEST(SanitizerCommon
, LargeMmapAllocatorIteration
) {
704 LargeMmapAllocator
<> a
;
706 AllocatorStats stats
;
709 static const uptr kNumAllocs
= 1000;
710 char *allocated
[kNumAllocs
];
711 static const uptr size
= 40;
713 for (uptr i
= 0; i
< kNumAllocs
; i
++)
714 allocated
[i
] = (char *)a
.Allocate(&stats
, size
, 1);
716 std::set
<uptr
> reported_chunks
;
718 a
.ForEachChunk(IterationTestCallback
, &reported_chunks
);
721 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
722 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
723 ASSERT_NE(reported_chunks
.find(reinterpret_cast<uptr
>(allocated
[i
])),
724 reported_chunks
.end());
726 for (uptr i
= 0; i
< kNumAllocs
; i
++)
727 a
.Deallocate(&stats
, allocated
[i
]);
730 TEST(SanitizerCommon
, LargeMmapAllocatorBlockBegin
) {
731 LargeMmapAllocator
<> a
;
733 AllocatorStats stats
;
736 static const uptr kNumAllocs
= 1024;
737 static const uptr kNumExpectedFalseLookups
= 10000000;
738 char *allocated
[kNumAllocs
];
739 static const uptr size
= 4096;
741 for (uptr i
= 0; i
< kNumAllocs
; i
++) {
742 allocated
[i
] = (char *)a
.Allocate(&stats
, size
, 1);
746 for (uptr i
= 0; i
< kNumAllocs
* kNumAllocs
; i
++) {
747 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
748 char *p1
= allocated
[i
% kNumAllocs
];
749 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
));
750 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
+ size
/ 2));
751 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
+ size
- 1));
752 EXPECT_EQ(p1
, a
.GetBlockBeginFastLocked(p1
- 100));
755 for (uptr i
= 0; i
< kNumExpectedFalseLookups
; i
++) {
756 void *p
= reinterpret_cast<void *>(i
% 1024);
757 EXPECT_EQ((void *)0, a
.GetBlockBeginFastLocked(p
));
758 p
= reinterpret_cast<void *>(~0L - (i
% 1024));
759 EXPECT_EQ((void *)0, a
.GetBlockBeginFastLocked(p
));
763 for (uptr i
= 0; i
< kNumAllocs
; i
++)
764 a
.Deallocate(&stats
, allocated
[i
]);
768 #if SANITIZER_WORDSIZE == 64
769 // Regression test for out-of-memory condition in PopulateFreeList().
770 TEST(SanitizerCommon
, SizeClassAllocator64PopulateFreeListOOM
) {
771 // In a world where regions are small and chunks are huge...
772 typedef SizeClassMap
<63, 128, 16> SpecialSizeClassMap
;
773 typedef SizeClassAllocator64
<kAllocatorSpace
, kAllocatorSize
, 0,
774 SpecialSizeClassMap
> SpecialAllocator64
;
775 const uptr kRegionSize
=
776 kAllocatorSize
/ SpecialSizeClassMap::kNumClassesRounded
;
777 SpecialAllocator64
*a
= new SpecialAllocator64
;
779 SizeClassAllocatorLocalCache
<SpecialAllocator64
> cache
;
780 memset(&cache
, 0, sizeof(cache
));
783 // ...one man is on a mission to overflow a region with a series of
784 // successive allocations.
785 const uptr kClassID
= 107;
786 const uptr kAllocationSize
= DefaultSizeClassMap::Size(kClassID
);
787 ASSERT_LT(2 * kAllocationSize
, kRegionSize
);
788 ASSERT_GT(3 * kAllocationSize
, kRegionSize
);
789 cache
.Allocate(a
, kClassID
);
790 EXPECT_DEATH(cache
.Allocate(a
, kClassID
) && cache
.Allocate(a
, kClassID
),
791 "The process has exhausted");
797 TEST(SanitizerCommon
, TwoLevelByteMap
) {
798 const u64 kSize1
= 1 << 6, kSize2
= 1 << 12;
799 const u64 n
= kSize1
* kSize2
;
800 TwoLevelByteMap
<kSize1
, kSize2
> m
;
802 for (u64 i
= 0; i
< n
; i
+= 7) {
803 m
.set(i
, (i
% 100) + 1);
805 for (u64 j
= 0; j
< n
; j
++) {
809 EXPECT_EQ(m
[j
], (j
% 100) + 1);
816 typedef TwoLevelByteMap
<1 << 12, 1 << 13, TestMapUnmapCallback
> TestByteMap
;
818 struct TestByteMapParam
{
824 void *TwoLevelByteMapUserThread(void *param
) {
825 TestByteMapParam
*p
= (TestByteMapParam
*)param
;
826 for (size_t i
= p
->shard
; i
< p
->m
->size(); i
+= p
->num_shards
) {
827 size_t val
= (i
% 100) + 1;
829 EXPECT_EQ((*p
->m
)[i
], val
);
834 TEST(SanitizerCommon
, ThreadedTwoLevelByteMap
) {
837 TestMapUnmapCallback::map_count
= 0;
838 TestMapUnmapCallback::unmap_count
= 0;
839 static const int kNumThreads
= 4;
840 pthread_t t
[kNumThreads
];
841 TestByteMapParam p
[kNumThreads
];
842 for (int i
= 0; i
< kNumThreads
; i
++) {
845 p
[i
].num_shards
= kNumThreads
;
846 EXPECT_EQ(0, pthread_create(&t
[i
], 0, TwoLevelByteMapUserThread
, &p
[i
]));
848 for (int i
= 0; i
< kNumThreads
; i
++) {
849 EXPECT_EQ(0, pthread_join(t
[i
], 0));
851 EXPECT_EQ((uptr
)TestMapUnmapCallback::map_count
, m
.size1());
852 EXPECT_EQ((uptr
)TestMapUnmapCallback::unmap_count
, 0UL);
854 EXPECT_EQ((uptr
)TestMapUnmapCallback::map_count
, m
.size1());
855 EXPECT_EQ((uptr
)TestMapUnmapCallback::unmap_count
, m
.size1());
858 #endif // #if TSAN_DEBUG==0