Add logging for comparison behaviors
[hiphop-php.git] / hphp / util / bump-mapper.cpp
blob87b3809d50ba15c97c45e32d1c611b6cf2d0df04
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/util/bump-mapper.h"
19 #include "hphp/util/assertions.h"
20 #include "hphp/util/hugetlb.h"
21 #include "hphp/util/numa.h"
23 #include <algorithm>
24 #include <atomic>
25 #include <mutex>
27 #include <folly/portability/SysMman.h>
29 #ifdef HAVE_NUMA
30 #include <numaif.h>
31 #endif
33 #if USE_JEMALLOC_EXTENT_HOOKS
35 namespace HPHP { namespace alloc {
37 bool Bump1GMapper::addMappingImpl() {
38 if (m_currHugePages >= m_maxHugePages) return false;
39 if (get_huge1g_info().free_hugepages <= 0) return false;
41 std::lock_guard<RangeState> _(m_state);
42 auto const currFrontier = m_state.low_map.load(std::memory_order_relaxed);
43 if (currFrontier % size1g != 0) return false;
44 auto const newFrontier = currFrontier + size1g;
45 if (newFrontier > m_state.high_map.load(std::memory_order_relaxed)) {
46 return false;
48 #ifdef HAVE_NUMA
49 if (numa_num_nodes > 1) {
50 if (const int numAllowedNodes = __builtin_popcount(m_interleaveMask)) {
51 assertx((m_interleaveMask & ~numa_node_set) == 0);
52 int failCount = 0;
53 // Try to map huge pages in round-robin fashion starting from m_nextNode.
54 // We try on each allowed NUMA node at most once.
55 do {
56 auto const currNode = m_nextNode;
57 m_nextNode = (currNode + 1) & numa_node_mask;
58 if (!((1u << currNode) & m_interleaveMask)) {
59 // Node not allowed, try next one.
60 continue;
62 if (mmap_1g((void*)currFrontier, currNode, /* MAP_FIXED */ true)) {
63 ++m_currHugePages;
64 m_state.low_map.store(newFrontier, std::memory_order_release);
65 return true;
67 if (++failCount >= numAllowedNodes) return false;
68 } while (true);
71 #endif
72 if (mmap_1g((void*)currFrontier, -1, /* MAP_FIXED */ true)) {
73 ++m_currHugePages;
74 m_state.low_map.store(newFrontier, std::memory_order_release);
75 return true;
77 return false;
80 constexpr size_t kChunkSize = 4 * size2m;
82 bool Bump2MMapper::addMappingImpl() {
83 if (m_currHugePages >= m_maxHugePages) return false;
84 auto const freePages = get_huge2m_info().free_hugepages;
85 if (freePages <= 0) return false;
87 std::lock_guard<RangeState> _(m_state);
88 // Recheck the mapping frontiers after grabbing the lock
89 auto const currFrontier = m_state.low_map.load(std::memory_order_relaxed);
90 if (currFrontier % size2m != 0) return false;
91 auto nPages = std::min(m_maxHugePages - m_currHugePages, freePages);
92 if (nPages <= 0) return false;
93 auto const hugeSize = std::min(kChunkSize, size2m * nPages);
94 auto const newFrontier = currFrontier + hugeSize;
95 if (newFrontier > m_state.high_map.load(std::memory_order_relaxed)) {
96 return false;
98 void* newPages = mmap((void*)currFrontier, hugeSize,
99 PROT_READ | PROT_WRITE,
100 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED | MAP_HUGETLB,
101 -1, 0);
102 if (newPages == MAP_FAILED) return false;
103 assertx(newPages == (void*)currFrontier); // MAP_FIXED should work
104 #ifdef HAVE_NUMA
105 if (numa_num_nodes > 1 && m_interleaveMask) {
106 unsigned long mask = m_interleaveMask;
107 mbind(newPages, hugeSize, MPOL_INTERLEAVE,
108 &mask, 32 /* max node */, 0 /* flag */);
110 #endif
111 // Make sure pages are faulted in.
112 for (auto addr = currFrontier; addr < newFrontier; addr += size2m) {
113 if (mlock(reinterpret_cast<void*>(addr), 1)) {
114 // Forget it. We don't really have enough page reserved. At this moment,
115 // we haven't committed to RangeState yet, so it is safe to bail out.
116 munmap((void*)currFrontier, hugeSize);
117 return false;
120 m_currHugePages += hugeSize / size2m;
121 m_state.low_map.store(newFrontier, std::memory_order_release);
122 return true;
126 template<Direction D>
127 bool BumpNormalMapper<D>::addMappingImpl() {
128 std::lock_guard<RangeState> _(m_state);
129 auto const high = m_state.high_map.load(std::memory_order_relaxed);
130 auto const low = m_state.low_map.load(std::memory_order_relaxed);
131 auto const maxSize = static_cast<size_t>(high - low);
132 if (maxSize == 0) return false; // fully mapped
133 auto const size = std::min(kChunkSize, maxSize);
135 auto const newPageStart = (D == Direction::LowToHigh) ? low : high - size;
136 assertx(newPageStart % size4k == 0);
138 void* newPages = mmap((void*)newPageStart, size,
139 PROT_READ | PROT_WRITE,
140 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED,
141 -1, 0);
142 if (newPages == MAP_FAILED) return false;
143 if (newPages != (void*)newPageStart) {
144 assertx(false); // MAP_FIXED should've worked.
145 munmap(newPages, size);
146 return false;
149 #ifdef HAVE_NUMA
150 if (numa_num_nodes > 1 && m_interleaveMask) {
151 unsigned long mask = m_interleaveMask;
152 mbind(newPages, size, MPOL_INTERLEAVE,
153 &mask, 32 /* max node */, 0 /* flag */);
155 #endif
156 if (D == Direction::LowToHigh) {
157 m_state.low_map.store(newPageStart + size, std::memory_order_release);
158 } else {
159 m_state.high_map.store(newPageStart, std::memory_order_release);
161 return true;
164 template bool BumpNormalMapper<Direction::LowToHigh>::addMappingImpl();
165 template bool BumpNormalMapper<Direction::HighToLow>::addMappingImpl();
167 } // namespace alloc
168 } // namespace HPHP
170 #endif