1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2012 The Bitcoin developers
3 // Distributed under the MIT/X11 software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_ALLOCATORS_H
6 #define BITCOIN_ALLOCATORS_H
10 #include <boost/thread/mutex.hpp>
17 #define _WIN32_WINNT 0x0501
18 #define WIN32_LEAN_AND_MEAN 1
23 // This is used to attempt to keep keying material out of swap
24 // Note that VirtualLock does not provide this as a guarantee on Windows,
25 // but, in practice, memory that has been VirtualLock'd almost never gets written to
26 // the pagefile except in rare circumstances where memory is extremely low.
29 #include <limits.h> // for PAGESIZE
30 #include <unistd.h> // for sysconf
34 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
36 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
37 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
38 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
40 * @note By using a map from each page base address to lock count, this class is optimized for
41 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
42 * something like an interval tree would be the preferred data structure.
44 template <class Locker
> class LockedPageManagerBase
47 LockedPageManagerBase(size_t page_size
):
50 // Determine bitmask for extracting page from address
51 assert(!(page_size
& (page_size
-1))); // size must be power of two
52 page_mask
= ~(page_size
- 1);
55 // For all pages in affected range, increase lock count
56 void LockRange(void *p
, size_t size
)
58 boost::mutex::scoped_lock
lock(mutex
);
60 const size_t base_addr
= reinterpret_cast<size_t>(p
);
61 const size_t start_page
= base_addr
& page_mask
;
62 const size_t end_page
= (base_addr
+ size
- 1) & page_mask
;
63 for(size_t page
= start_page
; page
<= end_page
; page
+= page_size
)
65 Histogram::iterator it
= histogram
.find(page
);
66 if(it
== histogram
.end()) // Newly locked page
68 locker
.Lock(reinterpret_cast<void*>(page
), page_size
);
69 histogram
.insert(std::make_pair(page
, 1));
71 else // Page was already locked; increase counter
78 // For all pages in affected range, decrease lock count
79 void UnlockRange(void *p
, size_t size
)
81 boost::mutex::scoped_lock
lock(mutex
);
83 const size_t base_addr
= reinterpret_cast<size_t>(p
);
84 const size_t start_page
= base_addr
& page_mask
;
85 const size_t end_page
= (base_addr
+ size
- 1) & page_mask
;
86 for(size_t page
= start_page
; page
<= end_page
; page
+= page_size
)
88 Histogram::iterator it
= histogram
.find(page
);
89 assert(it
!= histogram
.end()); // Cannot unlock an area that was not locked
90 // Decrease counter for page, when it is zero, the page will be unlocked
92 if(it
->second
== 0) // Nothing on the page anymore that keeps it locked
94 // Unlock page and remove the count from histogram
95 locker
.Unlock(reinterpret_cast<void*>(page
), page_size
);
101 // Get number of locked pages for diagnostics
102 int GetLockedPageCount()
104 boost::mutex::scoped_lock
lock(mutex
);
105 return histogram
.size();
111 size_t page_size
, page_mask
;
112 // map of page base address to lock count
113 typedef std::map
<size_t,int> Histogram
;
117 /** Determine system page size in bytes */
118 static inline size_t GetSystemPageSize()
122 SYSTEM_INFO sSysInfo
;
123 GetSystemInfo(&sSysInfo
);
124 page_size
= sSysInfo
.dwPageSize
;
125 #elif defined(PAGESIZE) // defined in limits.h
126 page_size
= PAGESIZE
;
127 #else // assume some POSIX OS
128 page_size
= sysconf(_SC_PAGESIZE
);
134 * OS-dependent memory page locking/unlocking.
135 * Defined as policy class to make stubbing for test possible.
137 class MemoryPageLocker
140 /** Lock memory pages.
141 * addr and len must be a multiple of the system page size
143 bool Lock(const void *addr
, size_t len
)
146 return VirtualLock(const_cast<void*>(addr
), len
);
148 return mlock(addr
, len
) == 0;
151 /** Unlock memory pages.
152 * addr and len must be a multiple of the system page size
154 bool Unlock(const void *addr
, size_t len
)
157 return VirtualUnlock(const_cast<void*>(addr
), len
);
159 return munlock(addr
, len
) == 0;
165 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
166 * std::allocator templates.
168 class LockedPageManager
: public LockedPageManagerBase
<MemoryPageLocker
>
171 static LockedPageManager instance
; // instantiated in util.cpp
174 LockedPageManagerBase
<MemoryPageLocker
>(GetSystemPageSize())
179 // Allocator that locks its contents from being paged
180 // out of memory and clears its contents before deletion.
183 struct secure_allocator
: public std::allocator
<T
>
185 // MSVC8 default copy constructor is broken
186 typedef std::allocator
<T
> base
;
187 typedef typename
base::size_type size_type
;
188 typedef typename
base::difference_type difference_type
;
189 typedef typename
base::pointer pointer
;
190 typedef typename
base::const_pointer const_pointer
;
191 typedef typename
base::reference reference
;
192 typedef typename
base::const_reference const_reference
;
193 typedef typename
base::value_type value_type
;
194 secure_allocator() throw() {}
195 secure_allocator(const secure_allocator
& a
) throw() : base(a
) {}
196 template <typename U
>
197 secure_allocator(const secure_allocator
<U
>& a
) throw() : base(a
) {}
198 ~secure_allocator() throw() {}
199 template<typename _Other
> struct rebind
200 { typedef secure_allocator
<_Other
> other
; };
202 T
* allocate(std::size_t n
, const void *hint
= 0)
205 p
= std::allocator
<T
>::allocate(n
, hint
);
207 LockedPageManager::instance
.LockRange(p
, sizeof(T
) * n
);
211 void deallocate(T
* p
, std::size_t n
)
215 memset(p
, 0, sizeof(T
) * n
);
216 LockedPageManager::instance
.UnlockRange(p
, sizeof(T
) * n
);
218 std::allocator
<T
>::deallocate(p
, n
);
224 // Allocator that clears its contents before deletion.
227 struct zero_after_free_allocator
: public std::allocator
<T
>
229 // MSVC8 default copy constructor is broken
230 typedef std::allocator
<T
> base
;
231 typedef typename
base::size_type size_type
;
232 typedef typename
base::difference_type difference_type
;
233 typedef typename
base::pointer pointer
;
234 typedef typename
base::const_pointer const_pointer
;
235 typedef typename
base::reference reference
;
236 typedef typename
base::const_reference const_reference
;
237 typedef typename
base::value_type value_type
;
238 zero_after_free_allocator() throw() {}
239 zero_after_free_allocator(const zero_after_free_allocator
& a
) throw() : base(a
) {}
240 template <typename U
>
241 zero_after_free_allocator(const zero_after_free_allocator
<U
>& a
) throw() : base(a
) {}
242 ~zero_after_free_allocator() throw() {}
243 template<typename _Other
> struct rebind
244 { typedef zero_after_free_allocator
<_Other
> other
; };
246 void deallocate(T
* p
, std::size_t n
)
249 memset(p
, 0, sizeof(T
) * n
);
250 std::allocator
<T
>::deallocate(p
, n
);
254 // This is exactly like std::string, but with a custom allocator.
255 typedef std::basic_string
<char, std::char_traits
<char>, secure_allocator
<char> > SecureString
;