1 // Copyright (c) 2016 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #include "support/lockedpool.h"
6 #include "support/cleanse.h"
8 #if defined(HAVE_CONFIG_H)
9 #include "config/bitcoin-config.h"
16 #define _WIN32_WINNT 0x0501
17 #define WIN32_LEAN_AND_MEAN 1
23 #include <sys/mman.h> // for mmap
24 #include <sys/resource.h> // for getrlimit
25 #include <limits.h> // for PAGESIZE
26 #include <unistd.h> // for sysconf
31 LockedPoolManager
* LockedPoolManager::_instance
= nullptr;
32 std::once_flag
LockedPoolManager::init_flag
;
34 /*******************************************************************************/
37 /** Align up to power of 2 */
38 static inline size_t align_up(size_t x
, size_t align
)
40 return (x
+ align
- 1) & ~(align
- 1);
43 /*******************************************************************************/
44 // Implementation: Arena
46 Arena::Arena(void *base_in
, size_t size_in
, size_t alignment_in
):
47 base(static_cast<char*>(base_in
)), end(static_cast<char*>(base_in
) + size_in
), alignment(alignment_in
)
49 // Start with one free chunk that covers the entire arena
50 chunks_free
.emplace(base
, size_in
);
57 void* Arena::alloc(size_t size
)
59 // Round to next multiple of alignment
60 size
= align_up(size
, alignment
);
62 // Don't handle zero-sized chunks
66 // Pick a large enough free-chunk
67 auto it
= std::find_if(chunks_free
.begin(), chunks_free
.end(),
68 [=](const std::map
<char*, size_t>::value_type
& chunk
){ return chunk
.second
>= size
; });
69 if (it
== chunks_free
.end())
72 // Create the used-chunk, taking its space from the end of the free-chunk
73 auto alloced
= chunks_used
.emplace(it
->first
+ it
->second
- size
, size
).first
;
74 if (!(it
->second
-= size
))
75 chunks_free
.erase(it
);
76 return reinterpret_cast<void*>(alloced
->first
);
79 /* extend the Iterator if other begins at its end */
80 template <class Iterator
, class Pair
> bool extend(Iterator it
, const Pair
& other
) {
81 if (it
->first
+ it
->second
== other
.first
) {
82 it
->second
+= other
.second
;
88 void Arena::free(void *ptr
)
90 // Freeing the nullptr pointer is OK.
95 // Remove chunk from used map
96 auto i
= chunks_used
.find(static_cast<char*>(ptr
));
97 if (i
== chunks_used
.end()) {
98 throw std::runtime_error("Arena: invalid or double free");
101 chunks_used
.erase(i
);
103 // Add space to free map, coalescing contiguous chunks
104 auto next
= chunks_free
.upper_bound(freed
.first
);
105 auto prev
= (next
== chunks_free
.begin()) ? chunks_free
.end() : std::prev(next
);
106 if (prev
== chunks_free
.end() || !extend(prev
, freed
))
107 prev
= chunks_free
.emplace_hint(next
, freed
);
108 if (next
!= chunks_free
.end() && extend(prev
, *next
))
109 chunks_free
.erase(next
);
112 Arena::Stats
Arena::stats() const
114 Arena::Stats r
{ 0, 0, 0, chunks_used
.size(), chunks_free
.size() };
115 for (const auto& chunk
: chunks_used
)
116 r
.used
+= chunk
.second
;
117 for (const auto& chunk
: chunks_free
)
118 r
.free
+= chunk
.second
;
119 r
.total
= r
.used
+ r
.free
;
124 void printchunk(char* base
, size_t sz
, bool used
) {
126 "0x" << std::hex
<< std::setw(16) << std::setfill('0') << base
<<
127 " 0x" << std::hex
<< std::setw(16) << std::setfill('0') << sz
<<
128 " 0x" << used
<< std::endl
;
130 void Arena::walk() const
132 for (const auto& chunk
: chunks_used
)
133 printchunk(chunk
.first
, chunk
.second
, true);
134 std::cout
<< std::endl
;
135 for (const auto& chunk
: chunks_free
)
136 printchunk(chunk
.first
, chunk
.second
, false);
137 std::cout
<< std::endl
;
141 /*******************************************************************************/
142 // Implementation: Win32LockedPageAllocator
145 /** LockedPageAllocator specialized for Windows.
147 class Win32LockedPageAllocator
: public LockedPageAllocator
150 Win32LockedPageAllocator();
151 void* AllocateLocked(size_t len
, bool *lockingSuccess
) override
;
152 void FreeLocked(void* addr
, size_t len
) override
;
153 size_t GetLimit() override
;
158 Win32LockedPageAllocator::Win32LockedPageAllocator()
160 // Determine system page size in bytes
161 SYSTEM_INFO sSysInfo
;
162 GetSystemInfo(&sSysInfo
);
163 page_size
= sSysInfo
.dwPageSize
;
165 void *Win32LockedPageAllocator::AllocateLocked(size_t len
, bool *lockingSuccess
)
167 len
= align_up(len
, page_size
);
168 void *addr
= VirtualAlloc(nullptr, len
, MEM_COMMIT
| MEM_RESERVE
, PAGE_READWRITE
);
170 // VirtualLock is used to attempt to keep keying material out of swap. Note
171 // that it does not provide this as a guarantee, but, in practice, memory
172 // that has been VirtualLock'd almost never gets written to the pagefile
173 // except in rare circumstances where memory is extremely low.
174 *lockingSuccess
= VirtualLock(const_cast<void*>(addr
), len
) != 0;
178 void Win32LockedPageAllocator::FreeLocked(void* addr
, size_t len
)
180 len
= align_up(len
, page_size
);
181 memory_cleanse(addr
, len
);
182 VirtualUnlock(const_cast<void*>(addr
), len
);
185 size_t Win32LockedPageAllocator::GetLimit()
187 // TODO is there a limit on windows, how to get it?
188 return std::numeric_limits
<size_t>::max();
192 /*******************************************************************************/
193 // Implementation: PosixLockedPageAllocator
196 /** LockedPageAllocator specialized for OSes that don't try to be
197 * special snowflakes.
199 class PosixLockedPageAllocator
: public LockedPageAllocator
202 PosixLockedPageAllocator();
203 void* AllocateLocked(size_t len
, bool *lockingSuccess
) override
;
204 void FreeLocked(void* addr
, size_t len
) override
;
205 size_t GetLimit() override
;
210 PosixLockedPageAllocator::PosixLockedPageAllocator()
212 // Determine system page size in bytes
213 #if defined(PAGESIZE) // defined in limits.h
214 page_size
= PAGESIZE
;
215 #else // assume some POSIX OS
216 page_size
= sysconf(_SC_PAGESIZE
);
220 // Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
221 // MAP_ANON which is deprecated
222 #ifndef MAP_ANONYMOUS
223 #define MAP_ANONYMOUS MAP_ANON
226 void *PosixLockedPageAllocator::AllocateLocked(size_t len
, bool *lockingSuccess
)
229 len
= align_up(len
, page_size
);
230 addr
= mmap(nullptr, len
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANONYMOUS
, -1, 0);
232 *lockingSuccess
= mlock(addr
, len
) == 0;
236 void PosixLockedPageAllocator::FreeLocked(void* addr
, size_t len
)
238 len
= align_up(len
, page_size
);
239 memory_cleanse(addr
, len
);
243 size_t PosixLockedPageAllocator::GetLimit()
245 #ifdef RLIMIT_MEMLOCK
247 if (getrlimit(RLIMIT_MEMLOCK
, &rlim
) == 0) {
248 if (rlim
.rlim_cur
!= RLIM_INFINITY
) {
249 return rlim
.rlim_cur
;
253 return std::numeric_limits
<size_t>::max();
257 /*******************************************************************************/
258 // Implementation: LockedPool
260 LockedPool::LockedPool(std::unique_ptr
<LockedPageAllocator
> allocator_in
, LockingFailed_Callback lf_cb_in
):
261 allocator(std::move(allocator_in
)), lf_cb(lf_cb_in
), cumulative_bytes_locked(0)
265 LockedPool::~LockedPool()
268 void* LockedPool::alloc(size_t size
)
270 std::lock_guard
<std::mutex
> lock(mutex
);
272 // Don't handle impossible sizes
273 if (size
== 0 || size
> ARENA_SIZE
)
276 // Try allocating from each current arena
277 for (auto &arena
: arenas
) {
278 void *addr
= arena
.alloc(size
);
283 // If that fails, create a new one
284 if (new_arena(ARENA_SIZE
, ARENA_ALIGN
)) {
285 return arenas
.back().alloc(size
);
290 void LockedPool::free(void *ptr
)
292 std::lock_guard
<std::mutex
> lock(mutex
);
293 // TODO we can do better than this linear search by keeping a map of arena
294 // extents to arena, and looking up the address.
295 for (auto &arena
: arenas
) {
296 if (arena
.addressInArena(ptr
)) {
301 throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
304 LockedPool::Stats
LockedPool::stats() const
306 std::lock_guard
<std::mutex
> lock(mutex
);
307 LockedPool::Stats r
{0, 0, 0, cumulative_bytes_locked
, 0, 0};
308 for (const auto &arena
: arenas
) {
309 Arena::Stats i
= arena
.stats();
313 r
.chunks_used
+= i
.chunks_used
;
314 r
.chunks_free
+= i
.chunks_free
;
319 bool LockedPool::new_arena(size_t size
, size_t align
)
322 // If this is the first arena, handle this specially: Cap the upper size
323 // by the process limit. This makes sure that the first arena will at least
324 // be locked. An exception to this is if the process limit is 0:
325 // in this case no memory can be locked at all so we'll skip past this logic.
326 if (arenas
.empty()) {
327 size_t limit
= allocator
->GetLimit();
329 size
= std::min(size
, limit
);
332 void *addr
= allocator
->AllocateLocked(size
, &locked
);
337 cumulative_bytes_locked
+= size
;
338 } else if (lf_cb
) { // Call the locking-failed callback if locking failed
339 if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
340 allocator
->FreeLocked(addr
, size
);
344 arenas
.emplace_back(allocator
.get(), addr
, size
, align
);
348 LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator
*allocator_in
, void *base_in
, size_t size_in
, size_t align_in
):
349 Arena(base_in
, size_in
, align_in
), base(base_in
), size(size_in
), allocator(allocator_in
)
352 LockedPool::LockedPageArena::~LockedPageArena()
354 allocator
->FreeLocked(base
, size
);
357 /*******************************************************************************/
358 // Implementation: LockedPoolManager
360 LockedPoolManager::LockedPoolManager(std::unique_ptr
<LockedPageAllocator
> allocator_in
):
361 LockedPool(std::move(allocator_in
), &LockedPoolManager::LockingFailed
)
365 bool LockedPoolManager::LockingFailed()
367 // TODO: log something but how? without including util.h
371 void LockedPoolManager::CreateInstance()
373 // Using a local static instance guarantees that the object is initialized
374 // when it's first needed and also deinitialized after all objects that use
375 // it are done with it. I can think of one unlikely scenario where we may
376 // have a static deinitialization order/problem, but the check in
377 // LockedPoolManagerBase's destructor helps us detect if that ever happens.
379 std::unique_ptr
<LockedPageAllocator
> allocator(new Win32LockedPageAllocator());
381 std::unique_ptr
<LockedPageAllocator
> allocator(new PosixLockedPageAllocator());
383 static LockedPoolManager
instance(std::move(allocator
));
384 LockedPoolManager::_instance
= &instance
;