Add a HHIR-level peephole optimization to reorder CheckTypes
[hiphop-php.git] / hphp / util / extent-hooks.cpp
blob741722893ceb5ed676ca5b0599ef6534a7bec7c2
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/util/extent-hooks.h"
19 #include "hphp/util/assertions.h"
20 #include "hphp/util/managed-arena.h"
21 #include <folly/portability/SysMman.h>
23 #if USE_JEMALLOC_EXTENT_HOOKS
25 namespace HPHP { namespace alloc {
27 // Trivial jemalloc extent hooks. If a hook always returns true (indicating
28 // failure), setting it to NULL can be more efficient.
30 static bool
31 extent_commit(extent_hooks_t* /*extent_hooks*/, void* /*addr*/, size_t /*size*/,
32 size_t /*offset*/, size_t /*length*/, unsigned /*arena_ind*/) {
33 return false;
36 static bool
37 extent_purge(extent_hooks_t* /*extent_hooks*/, void* addr, size_t size,
38 size_t offset, size_t length, unsigned /*arena_ind*/) {
39 // This function should return false upon success, which is the case when
40 // madvise returns 0.
41 return madvise((char*)addr + offset, length, MADV_DONTNEED);
44 static bool
45 extent_purge_lazy(extent_hooks_t* /*extent_hooks*/, void* addr, size_t size,
46 size_t offset, size_t length, unsigned /*arena_ind*/) {
47 #ifdef MADV_FREE
48 return madvise((char*)addr + offset, length, MADV_FREE);
49 #else
50 return madvise((char*)addr + offset, length, MADV_DONTNEED);
51 #endif
54 static bool extent_split(extent_hooks_t* /*extent_hooks*/, void* /*addr*/,
55 size_t /*size*/, size_t /*sizea*/, size_t /*sizeb*/,
56 bool /*comitted*/, unsigned /*arena_ind*/) {
57 return false;
60 static bool extent_merge(extent_hooks_t* /*extent_hooks*/, void* /*addra*/,
61 size_t /*sizea*/, void* /*addrb*/, size_t /*sizeb*/,
62 bool /*committed*/, unsigned /*arena_ind*/) {
63 return false;
66 extent_hooks_t MultiRangeExtentAllocator::s_hooks {
67 MultiRangeExtentAllocator::extent_alloc,
68 nullptr, // dalloc
69 nullptr, // destroy
70 extent_commit,
71 nullptr, // decommit
72 extent_purge_lazy, // purge_lazy
73 extent_purge, // purge_forced
74 extent_split,
75 extent_merge
78 void MultiRangeExtentAllocator::appendMapper(RangeMapper* m) {
79 for (auto& p : m_mappers) {
80 RangeMapper* expected = nullptr;
81 if (p.compare_exchange_strong(expected, m, std::memory_order_acq_rel)) {
82 return;
85 throw std::runtime_error{"too many mappers (check kMaxMapperCount)"};
88 size_t MultiRangeExtentAllocator::maxCapacity() const {
89 size_t result = 0;
90 for (auto& mapper : m_mappers) {
91 auto p = mapper.load(std::memory_order_acquire);
92 if (p == nullptr) {
93 break;
95 result += p->getRangeState().capacity();
97 return result;
101 void* MultiRangeExtentAllocator::
102 extent_alloc(extent_hooks_t* extent_hooks, void* addr,
103 size_t size, size_t alignment, bool* zero,
104 bool* commit, unsigned arena_ind) {
105 assertx(extent_hooks == &MultiRangeExtentAllocator::s_hooks);
106 if (addr != nullptr) {
107 assertx(false);
108 return nullptr;
110 assert(folly::isPowTwo(alignment));
111 assertx(alignment <= (2u << 20));
113 auto extAlloc = GetByArenaId<MultiRangeExtentAllocator>(arena_ind);
114 for (auto& mapper : extAlloc->m_mappers) {
115 auto rangeMapper = mapper.load(std::memory_order_acquire);
116 if (!rangeMapper) return nullptr;
117 // RangeMapper::addMappingImpl() holds the lock on RangeState when adding
118 // new mappings, so no additional locking is needed here.
119 if (auto addr = rangeMapper->alloc(size, alignment)) {
120 extAlloc->m_allocatedSize.fetch_add(size, std::memory_order_relaxed);
121 return addr;
124 not_reached();
125 return nullptr;
128 extent_hooks_t RangeFallbackExtentAllocator::s_hooks {
129 RangeFallbackExtentAllocator::extent_alloc,
130 nullptr, // dalloc, always fail with opt_retain
131 RangeFallbackExtentAllocator::extent_destroy,
132 RangeFallbackExtentAllocator::extent_commit,
133 nullptr, // decommit, no need with vm_overcommit
134 RangeFallbackExtentAllocator::extent_purge_lazy,
135 RangeFallbackExtentAllocator::extent_purge,
136 extent_split, // always split
137 extent_merge // always merge
141 void* RangeFallbackExtentAllocator::
142 extent_alloc(extent_hooks_t* extent_hooks, void* addr,
143 size_t size, size_t alignment, bool* zero,
144 bool* commit, unsigned arena_ind) {
145 assertx(extent_hooks == &RangeFallbackExtentAllocator::s_hooks);
146 auto extAlloc = GetByArenaId<RangeFallbackExtentAllocator>(arena_ind);
147 auto fallback_hooks = extAlloc->m_fallback_hooks;
148 constexpr size_t kAlign = 2u << 20;
149 if (addr != nullptr || alignment > kAlign) {
150 // Let the default hook handle weird cases.
151 return fallback_hooks->alloc(extent_hooks, addr, size, alignment,
152 zero, commit, arena_ind);
154 if (auto addr = extAlloc->getLowMapper()->alloc(size, alignment)) return addr;
155 return fallback_hooks->alloc(extent_hooks, addr, size, alignment,
156 zero, commit, arena_ind);
159 void RangeFallbackExtentAllocator::
160 extent_destroy(extent_hooks_t* extent_hooks, void* addr, size_t size,
161 bool committed, unsigned arena_ind) {
162 auto extAlloc = GetByArenaId<RangeFallbackExtentAllocator>(arena_ind);
163 if (extAlloc->inRange(addr)) return;
164 auto fallback_hooks = extAlloc->m_fallback_hooks;
165 return fallback_hooks->destroy(extent_hooks, addr, size,
166 committed, arena_ind);
169 bool RangeFallbackExtentAllocator::
170 extent_commit(extent_hooks_t* extent_hooks, void* addr, size_t size,
171 size_t offset, size_t length, unsigned arena_ind) {
172 auto extAlloc = GetByArenaId<RangeFallbackExtentAllocator>(arena_ind);
173 if (extAlloc->inRange(addr)) return false;
174 auto fallback_hooks = extAlloc->m_fallback_hooks;
175 return fallback_hooks->commit(extent_hooks, addr, size,
176 offset, length, arena_ind);
179 bool RangeFallbackExtentAllocator::
180 extent_purge_lazy(extent_hooks_t* extent_hooks, void* addr, size_t size,
181 size_t offset, size_t length, unsigned arena_ind) {
182 auto extAlloc = GetByArenaId<RangeFallbackExtentAllocator>(arena_ind);
183 if (extAlloc->inRange(addr)) return true; // never purge
184 auto fallback_hooks = extAlloc->m_fallback_hooks;
185 auto fallback_purge = fallback_hooks->purge_lazy;
186 if (!fallback_purge) return true;
187 return fallback_purge(extent_hooks, addr, size, offset, length, arena_ind);
190 bool RangeFallbackExtentAllocator::
191 extent_purge(extent_hooks_t* extent_hooks, void* addr, size_t size,
192 size_t offset, size_t length, unsigned arena_ind) {
193 auto extAlloc = GetByArenaId<RangeFallbackExtentAllocator>(arena_ind);
194 if (extAlloc->inRange(addr)) return true; // never purge
195 auto fallback_hooks = extAlloc->m_fallback_hooks;
196 auto fallback_purge = fallback_hooks->purge_forced;
197 if (!fallback_purge) return true;
198 return fallback_purge(extent_hooks, addr, size, offset, length, arena_ind);
203 #endif