Update Bugreport Links
[bitcoinplatinum.git] / src / allocators.h
blob99afa10c255829a2769dcbf7233c5943549aac3c
1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2012 The Bitcoin developers
3 // Distributed under the MIT/X11 software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_ALLOCATORS_H
6 #define BITCOIN_ALLOCATORS_H
8 #include <string.h>
9 #include <string>
10 #include <boost/thread/mutex.hpp>
11 #include <map>
13 #ifdef WIN32
14 #ifdef _WIN32_WINNT
15 #undef _WIN32_WINNT
16 #endif
17 #define _WIN32_WINNT 0x0501
18 #define WIN32_LEAN_AND_MEAN 1
19 #ifndef NOMINMAX
20 #define NOMINMAX
21 #endif
22 #include <windows.h>
23 // This is used to attempt to keep keying material out of swap
24 // Note that VirtualLock does not provide this as a guarantee on Windows,
25 // but, in practice, memory that has been VirtualLock'd almost never gets written to
26 // the pagefile except in rare circumstances where memory is extremely low.
27 #else
28 #include <sys/mman.h>
29 #include <limits.h> // for PAGESIZE
30 #include <unistd.h> // for sysconf
31 #endif
33 /**
34 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
36 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
37 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
38 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
40 * @note By using a map from each page base address to lock count, this class is optimized for
41 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
42 * something like an interval tree would be the preferred data structure.
44 template <class Locker> class LockedPageManagerBase
46 public:
47 LockedPageManagerBase(size_t page_size):
48 page_size(page_size)
50 // Determine bitmask for extracting page from address
51 assert(!(page_size & (page_size-1))); // size must be power of two
52 page_mask = ~(page_size - 1);
55 // For all pages in affected range, increase lock count
56 void LockRange(void *p, size_t size)
58 boost::mutex::scoped_lock lock(mutex);
59 if(!size) return;
60 const size_t base_addr = reinterpret_cast<size_t>(p);
61 const size_t start_page = base_addr & page_mask;
62 const size_t end_page = (base_addr + size - 1) & page_mask;
63 for(size_t page = start_page; page <= end_page; page += page_size)
65 Histogram::iterator it = histogram.find(page);
66 if(it == histogram.end()) // Newly locked page
68 locker.Lock(reinterpret_cast<void*>(page), page_size);
69 histogram.insert(std::make_pair(page, 1));
71 else // Page was already locked; increase counter
73 it->second += 1;
78 // For all pages in affected range, decrease lock count
79 void UnlockRange(void *p, size_t size)
81 boost::mutex::scoped_lock lock(mutex);
82 if(!size) return;
83 const size_t base_addr = reinterpret_cast<size_t>(p);
84 const size_t start_page = base_addr & page_mask;
85 const size_t end_page = (base_addr + size - 1) & page_mask;
86 for(size_t page = start_page; page <= end_page; page += page_size)
88 Histogram::iterator it = histogram.find(page);
89 assert(it != histogram.end()); // Cannot unlock an area that was not locked
90 // Decrease counter for page, when it is zero, the page will be unlocked
91 it->second -= 1;
92 if(it->second == 0) // Nothing on the page anymore that keeps it locked
94 // Unlock page and remove the count from histogram
95 locker.Unlock(reinterpret_cast<void*>(page), page_size);
96 histogram.erase(it);
101 // Get number of locked pages for diagnostics
102 int GetLockedPageCount()
104 boost::mutex::scoped_lock lock(mutex);
105 return histogram.size();
108 private:
109 Locker locker;
110 boost::mutex mutex;
111 size_t page_size, page_mask;
112 // map of page base address to lock count
113 typedef std::map<size_t,int> Histogram;
114 Histogram histogram;
117 /** Determine system page size in bytes */
118 static inline size_t GetSystemPageSize()
120 size_t page_size;
121 #if defined(WIN32)
122 SYSTEM_INFO sSysInfo;
123 GetSystemInfo(&sSysInfo);
124 page_size = sSysInfo.dwPageSize;
125 #elif defined(PAGESIZE) // defined in limits.h
126 page_size = PAGESIZE;
127 #else // assume some POSIX OS
128 page_size = sysconf(_SC_PAGESIZE);
129 #endif
130 return page_size;
134 * OS-dependent memory page locking/unlocking.
135 * Defined as policy class to make stubbing for test possible.
137 class MemoryPageLocker
139 public:
140 /** Lock memory pages.
141 * addr and len must be a multiple of the system page size
143 bool Lock(const void *addr, size_t len)
145 #ifdef WIN32
146 return VirtualLock(const_cast<void*>(addr), len);
147 #else
148 return mlock(addr, len) == 0;
149 #endif
151 /** Unlock memory pages.
152 * addr and len must be a multiple of the system page size
154 bool Unlock(const void *addr, size_t len)
156 #ifdef WIN32
157 return VirtualUnlock(const_cast<void*>(addr), len);
158 #else
159 return munlock(addr, len) == 0;
160 #endif
165 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
166 * std::allocator templates.
168 class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
170 public:
171 static LockedPageManager instance; // instantiated in util.cpp
172 private:
173 LockedPageManager():
174 LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
179 // Allocator that locks its contents from being paged
180 // out of memory and clears its contents before deletion.
182 template<typename T>
183 struct secure_allocator : public std::allocator<T>
185 // MSVC8 default copy constructor is broken
186 typedef std::allocator<T> base;
187 typedef typename base::size_type size_type;
188 typedef typename base::difference_type difference_type;
189 typedef typename base::pointer pointer;
190 typedef typename base::const_pointer const_pointer;
191 typedef typename base::reference reference;
192 typedef typename base::const_reference const_reference;
193 typedef typename base::value_type value_type;
194 secure_allocator() throw() {}
195 secure_allocator(const secure_allocator& a) throw() : base(a) {}
196 template <typename U>
197 secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
198 ~secure_allocator() throw() {}
199 template<typename _Other> struct rebind
200 { typedef secure_allocator<_Other> other; };
202 T* allocate(std::size_t n, const void *hint = 0)
204 T *p;
205 p = std::allocator<T>::allocate(n, hint);
206 if (p != NULL)
207 LockedPageManager::instance.LockRange(p, sizeof(T) * n);
208 return p;
211 void deallocate(T* p, std::size_t n)
213 if (p != NULL)
215 memset(p, 0, sizeof(T) * n);
216 LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
218 std::allocator<T>::deallocate(p, n);
224 // Allocator that clears its contents before deletion.
226 template<typename T>
227 struct zero_after_free_allocator : public std::allocator<T>
229 // MSVC8 default copy constructor is broken
230 typedef std::allocator<T> base;
231 typedef typename base::size_type size_type;
232 typedef typename base::difference_type difference_type;
233 typedef typename base::pointer pointer;
234 typedef typename base::const_pointer const_pointer;
235 typedef typename base::reference reference;
236 typedef typename base::const_reference const_reference;
237 typedef typename base::value_type value_type;
238 zero_after_free_allocator() throw() {}
239 zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
240 template <typename U>
241 zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
242 ~zero_after_free_allocator() throw() {}
243 template<typename _Other> struct rebind
244 { typedef zero_after_free_allocator<_Other> other; };
246 void deallocate(T* p, std::size_t n)
248 if (p != NULL)
249 memset(p, 0, sizeof(T) * n);
250 std::allocator<T>::deallocate(p, n);
254 // This is exactly like std::string, but with a custom allocator.
255 typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
257 #endif