1 // Copyright (c) 2012-2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
7 #include "support/allocators/secure.h"
8 #include "test/test_bitcoin.h"
10 #include <boost/test/unit_test.hpp>
12 BOOST_FIXTURE_TEST_SUITE(allocator_tests
, BasicTestingSetup
)
14 BOOST_AUTO_TEST_CASE(arena_tests
)
16 // Fake memory base address for testing
17 // without actually using memory.
18 void *synth_base
= reinterpret_cast<void*>(0x08000000);
19 const size_t synth_size
= 1024*1024;
20 Arena
b(synth_base
, synth_size
, 16);
21 void *chunk
= b
.alloc(1000);
25 BOOST_CHECK(chunk
!= nullptr);
26 BOOST_CHECK(b
.stats().used
== 1008); // Aligned to 16
27 BOOST_CHECK(b
.stats().total
== synth_size
); // Nothing has disappeared?
32 BOOST_CHECK(b
.stats().used
== 0);
33 BOOST_CHECK(b
.stats().free
== synth_size
);
34 try { // Test exception on double-free
37 } catch(std::runtime_error
&)
41 void *a0
= b
.alloc(128);
42 void *a1
= b
.alloc(256);
43 void *a2
= b
.alloc(512);
44 BOOST_CHECK(b
.stats().used
== 896);
45 BOOST_CHECK(b
.stats().total
== synth_size
);
53 BOOST_CHECK(b
.stats().used
== 768);
55 BOOST_CHECK(b
.stats().used
== 512);
56 void *a3
= b
.alloc(128);
60 BOOST_CHECK(b
.stats().used
== 640);
62 BOOST_CHECK(b
.stats().used
== 128);
64 BOOST_CHECK(b
.stats().used
== 0);
65 BOOST_CHECK_EQUAL(b
.stats().chunks_used
, 0);
66 BOOST_CHECK(b
.stats().total
== synth_size
);
67 BOOST_CHECK(b
.stats().free
== synth_size
);
68 BOOST_CHECK_EQUAL(b
.stats().chunks_free
, 1);
70 std::vector
<void*> addr
;
71 BOOST_CHECK(b
.alloc(0) == nullptr); // allocating 0 always returns nullptr
75 // Sweeping allocate all memory
76 for (int x
=0; x
<1024; ++x
)
77 addr
.push_back(b
.alloc(1024));
78 BOOST_CHECK(b
.stats().free
== 0);
79 BOOST_CHECK(b
.alloc(1024) == nullptr); // memory is full, this must return nullptr
80 BOOST_CHECK(b
.alloc(0) == nullptr);
81 for (int x
=0; x
<1024; ++x
)
84 BOOST_CHECK(b
.stats().total
== synth_size
);
85 BOOST_CHECK(b
.stats().free
== synth_size
);
87 // Now in the other direction...
88 for (int x
=0; x
<1024; ++x
)
89 addr
.push_back(b
.alloc(1024));
90 for (int x
=0; x
<1024; ++x
)
94 // Now allocate in smaller unequal chunks, then deallocate haphazardly
95 // Not all the chunks will succeed allocating, but freeing nullptr is
96 // allowed so that is no problem.
97 for (int x
=0; x
<2048; ++x
)
98 addr
.push_back(b
.alloc(x
+1));
99 for (int x
=0; x
<2048; ++x
)
100 b
.free(addr
[((x
*23)%2048)^242]);
103 // Go entirely wild: free and alloc interleaved,
104 // generate targets and sizes using pseudo-randomness.
105 for (int x
=0; x
<2048; ++x
)
107 uint32_t s
= 0x12345678;
108 for (int x
=0; x
<5000; ++x
) {
109 int idx
= s
& (addr
.size()-1);
110 if (s
& 0x80000000) {
113 } else if(!addr
[idx
]) {
114 addr
[idx
] = b
.alloc((s
>> 16) & 2047);
119 s
^= 0xf00f00f0; // LFSR period 0xf7ffffe0
121 for (void *ptr
: addr
)
125 BOOST_CHECK(b
.stats().total
== synth_size
);
126 BOOST_CHECK(b
.stats().free
== synth_size
);
129 /** Mock LockedPageAllocator for testing */
130 class TestLockedPageAllocator
: public LockedPageAllocator
133 TestLockedPageAllocator(int count_in
, int lockedcount_in
): count(count_in
), lockedcount(lockedcount_in
) {}
134 void* AllocateLocked(size_t len
, bool *lockingSuccess
) override
136 *lockingSuccess
= false;
140 if (lockedcount
> 0) {
142 *lockingSuccess
= true;
145 return reinterpret_cast<void*>(0x08000000 + (count
<<24)); // Fake address, do not actually use this memory
149 void FreeLocked(void* addr
, size_t len
) override
152 size_t GetLimit() override
154 return std::numeric_limits
<size_t>::max();
161 BOOST_AUTO_TEST_CASE(lockedpool_tests_mock
)
163 // Test over three virtual arenas, of which one will succeed being locked
164 std::unique_ptr
<LockedPageAllocator
> x(new TestLockedPageAllocator(3, 1));
165 LockedPool
pool(std::move(x
));
166 BOOST_CHECK(pool
.stats().total
== 0);
167 BOOST_CHECK(pool
.stats().locked
== 0);
169 // Ensure unreasonable requests are refused without allocating anything
170 void *invalid_toosmall
= pool
.alloc(0);
171 BOOST_CHECK(invalid_toosmall
== nullptr);
172 BOOST_CHECK(pool
.stats().used
== 0);
173 BOOST_CHECK(pool
.stats().free
== 0);
174 void *invalid_toobig
= pool
.alloc(LockedPool::ARENA_SIZE
+1);
175 BOOST_CHECK(invalid_toobig
== nullptr);
176 BOOST_CHECK(pool
.stats().used
== 0);
177 BOOST_CHECK(pool
.stats().free
== 0);
179 void *a0
= pool
.alloc(LockedPool::ARENA_SIZE
/ 2);
181 BOOST_CHECK(pool
.stats().locked
== LockedPool::ARENA_SIZE
);
182 void *a1
= pool
.alloc(LockedPool::ARENA_SIZE
/ 2);
184 void *a2
= pool
.alloc(LockedPool::ARENA_SIZE
/ 2);
186 void *a3
= pool
.alloc(LockedPool::ARENA_SIZE
/ 2);
188 void *a4
= pool
.alloc(LockedPool::ARENA_SIZE
/ 2);
190 void *a5
= pool
.alloc(LockedPool::ARENA_SIZE
/ 2);
192 // We've passed a count of three arenas, so this allocation should fail
193 void *a6
= pool
.alloc(16);
202 BOOST_CHECK(pool
.stats().total
== 3*LockedPool::ARENA_SIZE
);
203 BOOST_CHECK(pool
.stats().locked
== LockedPool::ARENA_SIZE
);
204 BOOST_CHECK(pool
.stats().used
== 0);
207 // These tests used the live LockedPoolManager object, this is also used
208 // by other tests so the conditions are somewhat less controllable and thus the
209 // tests are somewhat more error-prone.
210 BOOST_AUTO_TEST_CASE(lockedpool_tests_live
)
212 LockedPoolManager
&pool
= LockedPoolManager::Instance();
213 LockedPool::Stats initial
= pool
.stats();
215 void *a0
= pool
.alloc(16);
217 // Test reading and writing the allocated memory
218 *((uint32_t*)a0
) = 0x1234;
219 BOOST_CHECK(*((uint32_t*)a0
) == 0x1234);
222 try { // Test exception on double-free
225 } catch(std::runtime_error
&)
228 // If more than one new arena was allocated for the above tests, something is wrong
229 BOOST_CHECK(pool
.stats().total
<= (initial
.total
+ LockedPool::ARENA_SIZE
));
230 // Usage must be back to where it started
231 BOOST_CHECK(pool
.stats().used
== initial
.used
);
234 BOOST_AUTO_TEST_SUITE_END()