2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/util/bump-mapper.h"
19 #include "hphp/util/assertions.h"
20 #include "hphp/util/hugetlb.h"
21 #include "hphp/util/numa.h"
27 #include <folly/portability/SysMman.h>
35 #include <sys/types.h>
38 #if USE_JEMALLOC_EXTENT_HOOKS
42 bool g_useTHPUponHugeTLBFailure
= false;
46 bool Bump1GMapper::addMappingImpl() {
47 if (m_currHugePages
>= m_maxHugePages
) return false;
48 if (get_huge1g_info().free_hugepages
<= 0) return false;
50 auto _
= m_state
.lock();
51 auto const currFrontier
= m_state
.low_map
.load(std::memory_order_relaxed
);
52 if (currFrontier
% size1g
!= 0) return false;
53 auto const newFrontier
= currFrontier
+ size1g
;
54 if (newFrontier
> m_state
.high_map
.load(std::memory_order_relaxed
)) {
58 if (numa_num_nodes
> 1) {
59 if (const int numAllowedNodes
= __builtin_popcount(m_interleaveMask
)) {
60 assertx((m_interleaveMask
& ~numa_node_set
) == 0);
62 // Try to map huge pages in round-robin fashion starting from m_nextNode.
63 // We try on each allowed NUMA node at most once.
65 auto const currNode
= m_nextNode
;
66 m_nextNode
= (currNode
+ 1) & numa_node_mask
;
67 if (!((1u << currNode
) & m_interleaveMask
)) {
68 // Node not allowed, try next one.
71 if (mmap_1g((void*)currFrontier
, currNode
, /* MAP_FIXED */ true)) {
73 m_state
.low_map
.store(newFrontier
, std::memory_order_release
);
76 if (++failCount
>= numAllowedNodes
) return false;
81 if (mmap_1g((void*)currFrontier
, -1, /* MAP_FIXED */ true)) {
83 m_state
.low_map
.store(newFrontier
, std::memory_order_release
);
89 constexpr size_t kChunkSize
= 4 * size2m
;
91 bool Bump2MMapper::addMappingImpl() {
92 if (m_currHugePages
>= m_maxHugePages
) return false;
93 const uint32_t freePages
=
94 g_useTHPUponHugeTLBFailure
? (kChunkSize
/ size2m
)
95 : get_huge2m_info().free_hugepages
;
96 if (freePages
<= 0) return false;
98 auto _
= m_state
.lock();
99 // Recheck the mapping frontiers after grabbing the lock
100 auto const currFrontier
= m_state
.low_map
.load(std::memory_order_relaxed
);
101 if (currFrontier
% size2m
!= 0) return false;
102 auto nPages
= std::min(m_maxHugePages
- m_currHugePages
, freePages
);
103 if (nPages
<= 0) return false;
104 auto const hugeSize
= std::min(kChunkSize
, size2m
* nPages
);
105 auto const newFrontier
= currFrontier
+ hugeSize
;
106 if (newFrontier
> m_state
.high_map
.load(std::memory_order_relaxed
)) {
109 void* newPages
= mmap((void*)currFrontier
, hugeSize
,
110 PROT_READ
| PROT_WRITE
,
111 MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_FIXED
| MAP_HUGETLB
,
113 if (newPages
== MAP_FAILED
) {
114 if (!g_useTHPUponHugeTLBFailure
) return false;
115 // Use transparent hugepages instead.
116 newPages
= mmap((void*)currFrontier
, hugeSize
,
117 PROT_READ
| PROT_WRITE
,
118 MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_FIXED
,
120 if (newPages
== MAP_FAILED
) return false;
121 assertx(newPages
== (void*)currFrontier
);
122 madvise(newPages
, hugeSize
, MADV_HUGEPAGE
);
124 assertx(newPages
== (void*)currFrontier
); // MAP_FIXED should work
126 if (numa_num_nodes
> 1 && m_interleaveMask
) {
127 unsigned long mask
= m_interleaveMask
;
128 mbind(newPages
, hugeSize
, MPOL_INTERLEAVE
,
129 &mask
, 32 /* max node */, 0 /* flag */);
132 // Make sure pages are faulted in.
133 for (auto addr
= currFrontier
; addr
< newFrontier
; addr
+= size2m
) {
134 if (mlock(reinterpret_cast<void*>(addr
), 1)) {
135 // Forget it. We don't really have enough page reserved. At this moment,
136 // we haven't committed to RangeState yet, so it is safe to bail out.
137 munmap((void*)currFrontier
, hugeSize
);
141 m_currHugePages
+= hugeSize
/ size2m
;
142 m_state
.low_map
.store(newFrontier
, std::memory_order_release
);
146 template<Direction D
>
147 bool BumpNormalMapper
<D
>::addMappingImpl() {
148 auto _
= m_state
.lock();
149 auto const high
= m_state
.high_map
.load(std::memory_order_relaxed
);
150 auto const low
= m_state
.low_map
.load(std::memory_order_relaxed
);
151 auto const maxSize
= static_cast<size_t>(high
- low
);
152 if (maxSize
== 0) return false; // fully mapped
153 auto const size
= std::min(kChunkSize
, maxSize
);
155 auto const newPageStart
= (D
== Direction::LowToHigh
) ? low
: high
- size
;
156 assertx(newPageStart
% size4k
== 0);
158 void* newPages
= mmap((void*)newPageStart
, size
,
159 PROT_READ
| PROT_WRITE
,
160 MAP_ANONYMOUS
| MAP_PRIVATE
| MAP_FIXED
,
162 if (newPages
== MAP_FAILED
) return false;
163 if (newPages
!= (void*)newPageStart
) {
164 assertx(false); // MAP_FIXED should've worked.
165 munmap(newPages
, size
);
170 if (numa_num_nodes
> 1 && m_interleaveMask
) {
171 unsigned long mask
= m_interleaveMask
;
172 mbind(newPages
, size
, MPOL_INTERLEAVE
,
173 &mask
, 32 /* max node */, 0 /* flag */);
176 if (D
== Direction::LowToHigh
) {
177 m_state
.low_map
.store(newPageStart
+ size
, std::memory_order_release
);
179 m_state
.high_map
.store(newPageStart
, std::memory_order_release
);
184 bool BumpFileMapper::setDirectory(const char* dir
) {
185 auto const len
= strlen(dir
);
186 if (len
>= sizeof(m_dirName
)) {
189 memcpy(m_dirName
, dir
, len
+ 1);
193 bool BumpFileMapper::addMappingImpl() {
194 auto _
= m_state
.lock();
195 if (m_fd
) return false; // already initialized
196 if (!m_dirName
[0]) return false; // setDirectory() not done successfully
197 // Create a temporary file and map it in upon the first request.
198 m_fd
= open(m_dirName
,
199 O_TMPFILE
| O_DIRECTORY
| O_RDWR
| O_CLOEXEC
,
204 if (ftruncate(m_fd
, m_state
.capacity())) {
207 auto const addr
= mmap(reinterpret_cast<void*>(m_state
.low()),
209 PROT_READ
| PROT_WRITE
,
210 MAP_FIXED
| MAP_SHARED
,
212 if (addr
== (void*)-1) {
215 m_state
.low_map
.store(m_state
.high(), std::memory_order_release
);
219 std::atomic_flag
BumpEmergencyMapper::s_emergencyFlag
= ATOMIC_FLAG_INIT
;
221 bool BumpEmergencyMapper::addMappingImpl() {
222 auto _
= m_state
.lock();
223 auto low
= m_state
.low();
224 auto const high
= m_state
.high();
225 if (low
== high
) return false; // another thread added this range already.
226 mprotect(reinterpret_cast<void*>(low
), high
- low
,
227 PROT_READ
| PROT_WRITE
);
228 m_state
.low_map
.store(high
, std::memory_order_release
);
229 s_emergencyFlag
.test_and_set();
230 if (m_exit
) m_exit();
234 template bool BumpNormalMapper
<Direction::LowToHigh
>::addMappingImpl();
235 template bool BumpNormalMapper
<Direction::HighToLow
>::addMappingImpl();