Toplevel entrypoints for classes/traits/interfaces
[hiphop-php.git] / hphp / util / bump-mapper.cpp
blob4bed3309aa3738148345197949403cd8506035f9
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/util/bump-mapper.h"
19 #include "hphp/util/assertions.h"
20 #include "hphp/util/hugetlb.h"
21 #include "hphp/util/numa.h"
23 #include <algorithm>
24 #include <atomic>
25 #include <mutex>
27 #include <folly/portability/SysMman.h>
29 #ifdef HAVE_NUMA
30 #include <numaif.h>
31 #endif
32 #include <fcntl.h>
33 #include <stdlib.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <unistd.h>
38 #if USE_JEMALLOC_EXTENT_HOOKS
40 namespace HPHP {
42 bool g_useTHPUponHugeTLBFailure = false;
44 namespace alloc {
46 bool Bump1GMapper::addMappingImpl() {
47 if (m_currHugePages >= m_maxHugePages) return false;
48 if (get_huge1g_info().free_hugepages <= 0) return false;
50 std::lock_guard<RangeState> _(m_state);
51 auto const currFrontier = m_state.low_map.load(std::memory_order_relaxed);
52 if (currFrontier % size1g != 0) return false;
53 auto const newFrontier = currFrontier + size1g;
54 if (newFrontier > m_state.high_map.load(std::memory_order_relaxed)) {
55 return false;
57 #ifdef HAVE_NUMA
58 if (numa_num_nodes > 1) {
59 if (const int numAllowedNodes = __builtin_popcount(m_interleaveMask)) {
60 assertx((m_interleaveMask & ~numa_node_set) == 0);
61 int failCount = 0;
62 // Try to map huge pages in round-robin fashion starting from m_nextNode.
63 // We try on each allowed NUMA node at most once.
64 do {
65 auto const currNode = m_nextNode;
66 m_nextNode = (currNode + 1) & numa_node_mask;
67 if (!((1u << currNode) & m_interleaveMask)) {
68 // Node not allowed, try next one.
69 continue;
71 if (mmap_1g((void*)currFrontier, currNode, /* MAP_FIXED */ true)) {
72 ++m_currHugePages;
73 m_state.low_map.store(newFrontier, std::memory_order_release);
74 return true;
76 if (++failCount >= numAllowedNodes) return false;
77 } while (true);
80 #endif
81 if (mmap_1g((void*)currFrontier, -1, /* MAP_FIXED */ true)) {
82 ++m_currHugePages;
83 m_state.low_map.store(newFrontier, std::memory_order_release);
84 return true;
86 return false;
89 constexpr size_t kChunkSize = 4 * size2m;
91 bool Bump2MMapper::addMappingImpl() {
92 if (m_currHugePages >= m_maxHugePages) return false;
93 const uint32_t freePages =
94 g_useTHPUponHugeTLBFailure ? (kChunkSize / size2m)
95 : get_huge2m_info().free_hugepages;
96 if (freePages <= 0) return false;
98 std::lock_guard<RangeState> _(m_state);
99 // Recheck the mapping frontiers after grabbing the lock
100 auto const currFrontier = m_state.low_map.load(std::memory_order_relaxed);
101 if (currFrontier % size2m != 0) return false;
102 auto nPages = std::min(m_maxHugePages - m_currHugePages, freePages);
103 if (nPages <= 0) return false;
104 auto const hugeSize = std::min(kChunkSize, size2m * nPages);
105 auto const newFrontier = currFrontier + hugeSize;
106 if (newFrontier > m_state.high_map.load(std::memory_order_relaxed)) {
107 return false;
109 void* newPages = mmap((void*)currFrontier, hugeSize,
110 PROT_READ | PROT_WRITE,
111 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED | MAP_HUGETLB,
112 -1, 0);
113 if (newPages == MAP_FAILED) {
114 if (!g_useTHPUponHugeTLBFailure) return false;
115 // Use transparent hugepages instead.
116 newPages = mmap((void*)currFrontier, hugeSize,
117 PROT_READ | PROT_WRITE,
118 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED,
119 -1, 0);
120 if (newPages == MAP_FAILED) return false;
121 assertx(newPages == (void*)currFrontier);
122 madvise(newPages, hugeSize, MADV_HUGEPAGE);
124 assertx(newPages == (void*)currFrontier); // MAP_FIXED should work
125 #ifdef HAVE_NUMA
126 if (numa_num_nodes > 1 && m_interleaveMask) {
127 unsigned long mask = m_interleaveMask;
128 mbind(newPages, hugeSize, MPOL_INTERLEAVE,
129 &mask, 32 /* max node */, 0 /* flag */);
131 #endif
132 // Make sure pages are faulted in.
133 for (auto addr = currFrontier; addr < newFrontier; addr += size2m) {
134 if (mlock(reinterpret_cast<void*>(addr), 1)) {
135 // Forget it. We don't really have enough page reserved. At this moment,
136 // we haven't committed to RangeState yet, so it is safe to bail out.
137 munmap((void*)currFrontier, hugeSize);
138 return false;
141 m_currHugePages += hugeSize / size2m;
142 m_state.low_map.store(newFrontier, std::memory_order_release);
143 return true;
146 template<Direction D>
147 bool BumpNormalMapper<D>::addMappingImpl() {
148 std::lock_guard<RangeState> _(m_state);
149 auto const high = m_state.high_map.load(std::memory_order_relaxed);
150 auto const low = m_state.low_map.load(std::memory_order_relaxed);
151 auto const maxSize = static_cast<size_t>(high - low);
152 if (maxSize == 0) return false; // fully mapped
153 auto const size = std::min(kChunkSize, maxSize);
155 auto const newPageStart = (D == Direction::LowToHigh) ? low : high - size;
156 assertx(newPageStart % size4k == 0);
158 void* newPages = mmap((void*)newPageStart, size,
159 PROT_READ | PROT_WRITE,
160 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED,
161 -1, 0);
162 if (newPages == MAP_FAILED) return false;
163 if (newPages != (void*)newPageStart) {
164 assertx(false); // MAP_FIXED should've worked.
165 munmap(newPages, size);
166 return false;
169 #ifdef HAVE_NUMA
170 if (numa_num_nodes > 1 && m_interleaveMask) {
171 unsigned long mask = m_interleaveMask;
172 mbind(newPages, size, MPOL_INTERLEAVE,
173 &mask, 32 /* max node */, 0 /* flag */);
175 #endif
176 if (D == Direction::LowToHigh) {
177 m_state.low_map.store(newPageStart + size, std::memory_order_release);
178 } else {
179 m_state.high_map.store(newPageStart, std::memory_order_release);
181 return true;
184 bool BumpFileMapper::setDirectory(const char* dir) {
185 auto const len = strlen(dir);
186 if (len >= sizeof(m_dirName)) {
187 return false;
189 memcpy(m_dirName, dir, len + 1);
190 return true;
193 bool BumpFileMapper::addMappingImpl() {
194 std::lock_guard<RangeState> _(m_state);
195 if (m_fd) return false; // already initialized
196 if (!m_dirName[0]) return false; // setDirectory() not done successfully
197 // Create a temporary file and map it in upon the first request.
198 m_fd = open(m_dirName,
199 O_TMPFILE | O_DIRECTORY | O_RDWR | O_CLOEXEC,
200 S_IRUSR | S_IWUSR);
201 if (m_fd == -1) {
202 return false;
204 if (ftruncate(m_fd, m_state.capacity())) {
205 return false;
207 auto const addr = mmap(reinterpret_cast<void*>(m_state.low()),
208 m_state.capacity(),
209 PROT_READ | PROT_WRITE,
210 MAP_FIXED | MAP_SHARED,
211 m_fd, 0);
212 if (addr == (void*)-1) {
213 return false;
215 m_state.low_map.store(m_state.high(), std::memory_order_release);
216 return true;
219 bool BumpEmergencyMapper::addMappingImpl() {
220 std::lock_guard<RangeState> _(m_state);
221 auto low = m_state.low();
222 auto const high = m_state.high();
223 if (low == high) return false; // another thread added this range already.
224 mprotect(reinterpret_cast<void*>(low), high - low,
225 PROT_READ | PROT_WRITE);
226 m_state.low_map.store(high, std::memory_order_release);
227 if (m_exit) m_exit();
228 return true;
231 template bool BumpNormalMapper<Direction::LowToHigh>::addMappingImpl();
232 template bool BumpNormalMapper<Direction::HighToLow>::addMappingImpl();
234 } // namespace alloc
235 } // namespace HPHP
237 #endif