Merge #12079: Improve prioritisetransaction test coverage
[bitcoinplatinum.git] / src / support / lockedpool.h
blobfc85e6c73cd106e184e034c5e31ba90ff021e72a
1 // Copyright (c) 2016-2017 The Bitcoin Core developers
2 // Distributed under the MIT software license, see the accompanying
3 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H
6 #define BITCOIN_SUPPORT_LOCKEDPOOL_H
8 #include <stdint.h>
9 #include <list>
10 #include <map>
11 #include <mutex>
12 #include <memory>
14 /**
15 * OS-dependent allocation and deallocation of locked/pinned memory pages.
16 * Abstract base class.
18 class LockedPageAllocator
20 public:
21 virtual ~LockedPageAllocator() {}
22 /** Allocate and lock memory pages.
23 * If len is not a multiple of the system page size, it is rounded up.
24 * Returns 0 in case of allocation failure.
26 * If locking the memory pages could not be accomplished it will still
27 * return the memory, however the lockingSuccess flag will be false.
28 * lockingSuccess is undefined if the allocation fails.
30 virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0;
32 /** Unlock and free memory pages.
33 * Clear the memory before unlocking.
35 virtual void FreeLocked(void* addr, size_t len) = 0;
37 /** Get the total limit on the amount of memory that may be locked by this
38 * process, in bytes. Return size_t max if there is no limit or the limit
39 * is unknown. Return 0 if no memory can be locked at all.
41 virtual size_t GetLimit() = 0;
44 /* An arena manages a contiguous region of memory by dividing it into
45 * chunks.
47 class Arena
49 public:
50 Arena(void *base, size_t size, size_t alignment);
51 virtual ~Arena();
53 Arena(const Arena& other) = delete; // non construction-copyable
54 Arena& operator=(const Arena&) = delete; // non copyable
56 /** Memory statistics. */
57 struct Stats
59 size_t used;
60 size_t free;
61 size_t total;
62 size_t chunks_used;
63 size_t chunks_free;
66 /** Allocate size bytes from this arena.
67 * Returns pointer on success, or 0 if memory is full or
68 * the application tried to allocate 0 bytes.
70 void* alloc(size_t size);
72 /** Free a previously allocated chunk of memory.
73 * Freeing the zero pointer has no effect.
74 * Raises std::runtime_error in case of error.
76 void free(void *ptr);
78 /** Get arena usage statistics */
79 Stats stats() const;
81 #ifdef ARENA_DEBUG
82 void walk() const;
83 #endif
85 /** Return whether a pointer points inside this arena.
86 * This returns base <= ptr < (base+size) so only use it for (inclusive)
87 * chunk starting addresses.
89 bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; }
90 private:
91 /** Map of chunk address to chunk information. This class makes use of the
92 * sorted order to merge previous and next chunks during deallocation.
94 std::map<char*, size_t> chunks_free;
95 std::map<char*, size_t> chunks_used;
96 /** Base address of arena */
97 char* base;
98 /** End address of arena */
99 char* end;
100 /** Minimum chunk alignment */
101 size_t alignment;
104 /** Pool for locked memory chunks.
106 * To avoid sensitive key data from being swapped to disk, the memory in this pool
107 * is locked/pinned.
109 * An arena manages a contiguous region of memory. The pool starts out with one arena
110 * but can grow to multiple arenas if the need arises.
112 * Unlike a normal C heap, the administrative structures are separate from the managed
113 * memory. This has been done as the sizes and bases of objects are not in themselves sensitive
114 * information, as to conserve precious locked memory. In some operating systems
115 * the amount of memory that can be locked is small.
117 class LockedPool
119 public:
120 /** Size of one arena of locked memory. This is a compromise.
121 * Do not set this too low, as managing many arenas will increase
122 * allocation and deallocation overhead. Setting it too high allocates
123 * more locked memory from the OS than strictly necessary.
125 static const size_t ARENA_SIZE = 256*1024;
126 /** Chunk alignment. Another compromise. Setting this too high will waste
127 * memory, setting it too low will facilitate fragmentation.
129 static const size_t ARENA_ALIGN = 16;
131 /** Callback when allocation succeeds but locking fails.
133 typedef bool (*LockingFailed_Callback)();
135 /** Memory statistics. */
136 struct Stats
138 size_t used;
139 size_t free;
140 size_t total;
141 size_t locked;
142 size_t chunks_used;
143 size_t chunks_free;
146 /** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
147 * you can only instantiate this with LockedPool(std::move(...)).
149 * The second argument is an optional callback when locking a newly allocated arena failed.
150 * If this callback is provided and returns false, the allocation fails (hard fail), if
151 * it returns true the allocation proceeds, but it could warn.
153 explicit LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = nullptr);
154 ~LockedPool();
156 LockedPool(const LockedPool& other) = delete; // non construction-copyable
157 LockedPool& operator=(const LockedPool&) = delete; // non copyable
159 /** Allocate size bytes from this arena.
160 * Returns pointer on success, or 0 if memory is full or
161 * the application tried to allocate 0 bytes.
163 void* alloc(size_t size);
165 /** Free a previously allocated chunk of memory.
166 * Freeing the zero pointer has no effect.
167 * Raises std::runtime_error in case of error.
169 void free(void *ptr);
171 /** Get pool usage statistics */
172 Stats stats() const;
173 private:
174 std::unique_ptr<LockedPageAllocator> allocator;
176 /** Create an arena from locked pages */
177 class LockedPageArena: public Arena
179 public:
180 LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align);
181 ~LockedPageArena();
182 private:
183 void *base;
184 size_t size;
185 LockedPageAllocator *allocator;
188 bool new_arena(size_t size, size_t align);
190 std::list<LockedPageArena> arenas;
191 LockingFailed_Callback lf_cb;
192 size_t cumulative_bytes_locked;
193 /** Mutex protects access to this pool's data structures, including arenas.
195 mutable std::mutex mutex;
199 * Singleton class to keep track of locked (ie, non-swappable) memory, for use in
200 * std::allocator templates.
202 * Some implementations of the STL allocate memory in some constructors (i.e., see
203 * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
204 * Due to the unpredictable order of static initializers, we have to make sure the
205 * LockedPoolManager instance exists before any other STL-based objects that use
206 * secure_allocator are created. So instead of having LockedPoolManager also be
207 * static-initialized, it is created on demand.
209 class LockedPoolManager : public LockedPool
211 public:
212 /** Return the current instance, or create it once */
213 static LockedPoolManager& Instance()
215 std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance);
216 return *LockedPoolManager::_instance;
219 private:
220 explicit LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator);
222 /** Create a new LockedPoolManager specialized to the OS */
223 static void CreateInstance();
224 /** Called when locking fails, warn the user here */
225 static bool LockingFailed();
227 static LockedPoolManager* _instance;
228 static std::once_flag init_flag;
231 #endif // BITCOIN_SUPPORT_LOCKEDPOOL_H