Merge #10953: [Refactor] Combine scriptPubKey and amount as CTxOut in CScriptCheck
[bitcoinplatinum.git] / src / test / allocator_tests.cpp
blob4a533b5bf2fb5cbb376ef6b0db8b648649c4aa9a
1 // Copyright (c) 2012-2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #include "util.h"
7 #include "support/allocators/secure.h"
8 #include "test/test_bitcoin.h"
10 #include <boost/test/unit_test.hpp>
12 BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup)
14 BOOST_AUTO_TEST_CASE(arena_tests)
16 // Fake memory base address for testing
17 // without actually using memory.
18 void *synth_base = reinterpret_cast<void*>(0x08000000);
19 const size_t synth_size = 1024*1024;
20 Arena b(synth_base, synth_size, 16);
21 void *chunk = b.alloc(1000);
22 #ifdef ARENA_DEBUG
23 b.walk();
24 #endif
25 BOOST_CHECK(chunk != nullptr);
26 BOOST_CHECK(b.stats().used == 1008); // Aligned to 16
27 BOOST_CHECK(b.stats().total == synth_size); // Nothing has disappeared?
28 b.free(chunk);
29 #ifdef ARENA_DEBUG
30 b.walk();
31 #endif
32 BOOST_CHECK(b.stats().used == 0);
33 BOOST_CHECK(b.stats().free == synth_size);
34 try { // Test exception on double-free
35 b.free(chunk);
36 BOOST_CHECK(0);
37 } catch(std::runtime_error &)
41 void *a0 = b.alloc(128);
42 void *a1 = b.alloc(256);
43 void *a2 = b.alloc(512);
44 BOOST_CHECK(b.stats().used == 896);
45 BOOST_CHECK(b.stats().total == synth_size);
46 #ifdef ARENA_DEBUG
47 b.walk();
48 #endif
49 b.free(a0);
50 #ifdef ARENA_DEBUG
51 b.walk();
52 #endif
53 BOOST_CHECK(b.stats().used == 768);
54 b.free(a1);
55 BOOST_CHECK(b.stats().used == 512);
56 void *a3 = b.alloc(128);
57 #ifdef ARENA_DEBUG
58 b.walk();
59 #endif
60 BOOST_CHECK(b.stats().used == 640);
61 b.free(a2);
62 BOOST_CHECK(b.stats().used == 128);
63 b.free(a3);
64 BOOST_CHECK(b.stats().used == 0);
65 BOOST_CHECK_EQUAL(b.stats().chunks_used, 0);
66 BOOST_CHECK(b.stats().total == synth_size);
67 BOOST_CHECK(b.stats().free == synth_size);
68 BOOST_CHECK_EQUAL(b.stats().chunks_free, 1);
70 std::vector<void*> addr;
71 BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr
72 #ifdef ARENA_DEBUG
73 b.walk();
74 #endif
75 // Sweeping allocate all memory
76 for (int x=0; x<1024; ++x)
77 addr.push_back(b.alloc(1024));
78 BOOST_CHECK(b.stats().free == 0);
79 BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr
80 BOOST_CHECK(b.alloc(0) == nullptr);
81 for (int x=0; x<1024; ++x)
82 b.free(addr[x]);
83 addr.clear();
84 BOOST_CHECK(b.stats().total == synth_size);
85 BOOST_CHECK(b.stats().free == synth_size);
87 // Now in the other direction...
88 for (int x=0; x<1024; ++x)
89 addr.push_back(b.alloc(1024));
90 for (int x=0; x<1024; ++x)
91 b.free(addr[1023-x]);
92 addr.clear();
94 // Now allocate in smaller unequal chunks, then deallocate haphazardly
95 // Not all the chunks will succeed allocating, but freeing nullptr is
96 // allowed so that is no problem.
97 for (int x=0; x<2048; ++x)
98 addr.push_back(b.alloc(x+1));
99 for (int x=0; x<2048; ++x)
100 b.free(addr[((x*23)%2048)^242]);
101 addr.clear();
103 // Go entirely wild: free and alloc interleaved,
104 // generate targets and sizes using pseudo-randomness.
105 for (int x=0; x<2048; ++x)
106 addr.push_back(0);
107 uint32_t s = 0x12345678;
108 for (int x=0; x<5000; ++x) {
109 int idx = s & (addr.size()-1);
110 if (s & 0x80000000) {
111 b.free(addr[idx]);
112 addr[idx] = 0;
113 } else if(!addr[idx]) {
114 addr[idx] = b.alloc((s >> 16) & 2047);
116 bool lsb = s & 1;
117 s >>= 1;
118 if (lsb)
119 s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
121 for (void *ptr: addr)
122 b.free(ptr);
123 addr.clear();
125 BOOST_CHECK(b.stats().total == synth_size);
126 BOOST_CHECK(b.stats().free == synth_size);
129 /** Mock LockedPageAllocator for testing */
130 class TestLockedPageAllocator: public LockedPageAllocator
132 public:
133 TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {}
134 void* AllocateLocked(size_t len, bool *lockingSuccess) override
136 *lockingSuccess = false;
137 if (count > 0) {
138 --count;
140 if (lockedcount > 0) {
141 --lockedcount;
142 *lockingSuccess = true;
145 return reinterpret_cast<void*>(0x08000000 + (count<<24)); // Fake address, do not actually use this memory
147 return 0;
149 void FreeLocked(void* addr, size_t len) override
152 size_t GetLimit() override
154 return std::numeric_limits<size_t>::max();
156 private:
157 int count;
158 int lockedcount;
161 BOOST_AUTO_TEST_CASE(lockedpool_tests_mock)
163 // Test over three virtual arenas, of which one will succeed being locked
164 std::unique_ptr<LockedPageAllocator> x(new TestLockedPageAllocator(3, 1));
165 LockedPool pool(std::move(x));
166 BOOST_CHECK(pool.stats().total == 0);
167 BOOST_CHECK(pool.stats().locked == 0);
169 // Ensure unreasonable requests are refused without allocating anything
170 void *invalid_toosmall = pool.alloc(0);
171 BOOST_CHECK(invalid_toosmall == nullptr);
172 BOOST_CHECK(pool.stats().used == 0);
173 BOOST_CHECK(pool.stats().free == 0);
174 void *invalid_toobig = pool.alloc(LockedPool::ARENA_SIZE+1);
175 BOOST_CHECK(invalid_toobig == nullptr);
176 BOOST_CHECK(pool.stats().used == 0);
177 BOOST_CHECK(pool.stats().free == 0);
179 void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2);
180 BOOST_CHECK(a0);
181 BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
182 void *a1 = pool.alloc(LockedPool::ARENA_SIZE / 2);
183 BOOST_CHECK(a1);
184 void *a2 = pool.alloc(LockedPool::ARENA_SIZE / 2);
185 BOOST_CHECK(a2);
186 void *a3 = pool.alloc(LockedPool::ARENA_SIZE / 2);
187 BOOST_CHECK(a3);
188 void *a4 = pool.alloc(LockedPool::ARENA_SIZE / 2);
189 BOOST_CHECK(a4);
190 void *a5 = pool.alloc(LockedPool::ARENA_SIZE / 2);
191 BOOST_CHECK(a5);
192 // We've passed a count of three arenas, so this allocation should fail
193 void *a6 = pool.alloc(16);
194 BOOST_CHECK(!a6);
196 pool.free(a0);
197 pool.free(a2);
198 pool.free(a4);
199 pool.free(a1);
200 pool.free(a3);
201 pool.free(a5);
202 BOOST_CHECK(pool.stats().total == 3*LockedPool::ARENA_SIZE);
203 BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE);
204 BOOST_CHECK(pool.stats().used == 0);
207 // These tests used the live LockedPoolManager object, this is also used
208 // by other tests so the conditions are somewhat less controllable and thus the
209 // tests are somewhat more error-prone.
210 BOOST_AUTO_TEST_CASE(lockedpool_tests_live)
212 LockedPoolManager &pool = LockedPoolManager::Instance();
213 LockedPool::Stats initial = pool.stats();
215 void *a0 = pool.alloc(16);
216 BOOST_CHECK(a0);
217 // Test reading and writing the allocated memory
218 *((uint32_t*)a0) = 0x1234;
219 BOOST_CHECK(*((uint32_t*)a0) == 0x1234);
221 pool.free(a0);
222 try { // Test exception on double-free
223 pool.free(a0);
224 BOOST_CHECK(0);
225 } catch(std::runtime_error &)
228 // If more than one new arena was allocated for the above tests, something is wrong
229 BOOST_CHECK(pool.stats().total <= (initial.total + LockedPool::ARENA_SIZE));
230 // Usage must be back to where it started
231 BOOST_CHECK(pool.stats().used == initial.used);
234 BOOST_AUTO_TEST_SUITE_END()