Bug 1867190 - Add prefs for PHC probablities r=glandium
[gecko.git] / js / src / jit / ExecutableAllocator.cpp
blob700ffe57f6707d4fe7d625246fc346afa83476e1
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
4 * Copyright (C) 2008 Apple Inc. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #include "jit/ExecutableAllocator.h"
30 #include "js/MemoryMetrics.h"
31 #include "util/Poison.h"
33 using namespace js::jit;
35 ExecutablePool::~ExecutablePool() {
36 #ifdef DEBUG
37 for (size_t bytes : m_codeBytes) {
38 MOZ_ASSERT(bytes == 0);
40 #endif
42 MOZ_ASSERT(!isMarked());
44 m_allocator->releasePoolPages(this);
47 void ExecutablePool::release(bool willDestroy) {
48 MOZ_ASSERT(m_refCount != 0);
49 MOZ_ASSERT_IF(willDestroy, m_refCount == 1);
50 if (--m_refCount == 0) {
51 js_delete(this);
55 void ExecutablePool::release(size_t n, CodeKind kind) {
56 m_codeBytes[kind] -= n;
57 MOZ_ASSERT(m_codeBytes[kind] < m_allocation.size); // Shouldn't underflow.
59 release();
62 void ExecutablePool::addRef() {
63 // It should be impossible for us to roll over, because only small
64 // pools have multiple holders, and they have one holder per chunk
65 // of generated code, and they only hold 16KB or so of code.
66 MOZ_ASSERT(m_refCount);
67 ++m_refCount;
68 MOZ_ASSERT(m_refCount, "refcount overflow");
71 void* ExecutablePool::alloc(size_t n, CodeKind kind) {
72 MOZ_ASSERT(n <= available());
73 void* result = m_freePtr;
74 m_freePtr += n;
76 m_codeBytes[kind] += n;
78 MOZ_MAKE_MEM_UNDEFINED(result, n);
79 return result;
82 size_t ExecutablePool::available() const {
83 MOZ_ASSERT(m_end >= m_freePtr);
84 return m_end - m_freePtr;
87 ExecutableAllocator::~ExecutableAllocator() {
88 for (size_t i = 0; i < m_smallPools.length(); i++) {
89 m_smallPools[i]->release(/* willDestroy = */ true);
92 // If this asserts we have a pool leak.
93 MOZ_ASSERT(m_pools.empty());
96 ExecutablePool* ExecutableAllocator::poolForSize(size_t n) {
97 // Try to fit in an existing small allocator. Use the pool with the
98 // least available space that is big enough (best-fit). This is the
99 // best strategy because (a) it maximizes the chance of the next
100 // allocation fitting in a small pool, and (b) it minimizes the
101 // potential waste when a small pool is next abandoned.
102 ExecutablePool* minPool = nullptr;
103 for (size_t i = 0; i < m_smallPools.length(); i++) {
104 ExecutablePool* pool = m_smallPools[i];
105 if (n <= pool->available() &&
106 (!minPool || pool->available() < minPool->available())) {
107 minPool = pool;
110 if (minPool) {
111 minPool->addRef();
112 return minPool;
115 // If the request is large, we just provide a unshared allocator
116 if (n > ExecutableCodePageSize) {
117 return createPool(n);
120 // Create a new allocator
121 ExecutablePool* pool = createPool(ExecutableCodePageSize);
122 if (!pool) {
123 return nullptr;
125 // At this point, local |pool| is the owner.
127 if (m_smallPools.length() < maxSmallPools) {
128 // We haven't hit the maximum number of live pools; add the new pool.
129 // If append() OOMs, we just return an unshared allocator.
130 if (m_smallPools.append(pool)) {
131 pool->addRef();
133 } else {
134 // Find the pool with the least space.
135 int iMin = 0;
136 for (size_t i = 1; i < m_smallPools.length(); i++) {
137 if (m_smallPools[i]->available() < m_smallPools[iMin]->available()) {
138 iMin = i;
142 // If the new allocator will result in more free space than the small
143 // pool with the least space, then we will use it instead
144 ExecutablePool* minPool = m_smallPools[iMin];
145 if ((pool->available() - n) > minPool->available()) {
146 minPool->release();
147 m_smallPools[iMin] = pool;
148 pool->addRef();
152 // Pass ownership to the caller.
153 return pool;
156 /* static */
157 size_t ExecutableAllocator::roundUpAllocationSize(size_t request,
158 size_t granularity) {
159 if ((std::numeric_limits<size_t>::max() - granularity) <= request) {
160 return OVERSIZE_ALLOCATION;
163 // Round up to next page boundary
164 size_t size = request + (granularity - 1);
165 size = size & ~(granularity - 1);
166 MOZ_ASSERT(size >= request);
167 return size;
170 ExecutablePool* ExecutableAllocator::createPool(size_t n) {
171 size_t allocSize = roundUpAllocationSize(n, ExecutableCodePageSize);
172 if (allocSize == OVERSIZE_ALLOCATION) {
173 return nullptr;
176 ExecutablePool::Allocation a = systemAlloc(allocSize);
177 if (!a.pages) {
178 return nullptr;
181 ExecutablePool* pool = js_new<ExecutablePool>(this, a);
182 if (!pool) {
183 systemRelease(a);
184 return nullptr;
187 if (!m_pools.put(pool)) {
188 // Note: this will call |systemRelease(a)|.
189 js_delete(pool);
190 return nullptr;
193 return pool;
196 void* ExecutableAllocator::alloc(JSContext* cx, size_t n,
197 ExecutablePool** poolp, CodeKind type) {
198 // Caller must ensure 'n' is word-size aligned. If all allocations are
199 // of word sized quantities, then all subsequent allocations will be
200 // aligned.
201 MOZ_ASSERT(roundUpAllocationSize(n, sizeof(void*)) == n);
203 if (n == OVERSIZE_ALLOCATION) {
204 *poolp = nullptr;
205 return nullptr;
208 *poolp = poolForSize(n);
209 if (!*poolp) {
210 return nullptr;
213 // This alloc is infallible because poolForSize() just obtained
214 // (found, or created if necessary) a pool that had enough space.
215 void* result = (*poolp)->alloc(n, type);
216 MOZ_ASSERT(result);
218 return result;
221 void ExecutableAllocator::releasePoolPages(ExecutablePool* pool) {
222 MOZ_ASSERT(pool->m_allocation.pages);
223 systemRelease(pool->m_allocation);
225 // Pool may not be present in m_pools if we hit OOM during creation.
226 if (auto ptr = m_pools.lookup(pool)) {
227 m_pools.remove(ptr);
231 void ExecutableAllocator::purge() {
232 for (size_t i = 0; i < m_smallPools.length();) {
233 ExecutablePool* pool = m_smallPools[i];
234 if (pool->m_refCount > 1) {
235 // Releasing this pool is not going to deallocate it, so we might as
236 // well hold on to it and reuse it for future allocations.
237 i++;
238 continue;
241 MOZ_ASSERT(pool->m_refCount == 1);
242 pool->release();
243 m_smallPools.erase(&m_smallPools[i]);
247 void ExecutableAllocator::addSizeOfCode(JS::CodeSizes* sizes) const {
248 for (ExecPoolHashSet::Range r = m_pools.all(); !r.empty(); r.popFront()) {
249 ExecutablePool* pool = r.front();
250 sizes->ion += pool->m_codeBytes[CodeKind::Ion];
251 sizes->baseline += pool->m_codeBytes[CodeKind::Baseline];
252 sizes->regexp += pool->m_codeBytes[CodeKind::RegExp];
253 sizes->other += pool->m_codeBytes[CodeKind::Other];
254 sizes->unused += pool->m_allocation.size - pool->usedCodeBytes();
258 /* static */
259 void ExecutableAllocator::reprotectPool(JSRuntime* rt, ExecutablePool* pool,
260 ProtectionSetting protection,
261 MustFlushICache flushICache) {
262 char* start = pool->m_allocation.pages;
263 AutoEnterOOMUnsafeRegion oomUnsafe;
264 if (!ReprotectRegion(start, pool->m_freePtr - start, protection,
265 flushICache)) {
266 oomUnsafe.crash("ExecutableAllocator::reprotectPool");
270 /* static */
271 void ExecutableAllocator::poisonCode(JSRuntime* rt,
272 JitPoisonRangeVector& ranges) {
273 MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
275 #ifdef DEBUG
276 // Make sure no pools have the mark bit set.
277 for (size_t i = 0; i < ranges.length(); i++) {
278 MOZ_ASSERT(!ranges[i].pool->isMarked());
280 #endif
283 AutoMarkJitCodeWritableForThread writable;
285 for (size_t i = 0; i < ranges.length(); i++) {
286 ExecutablePool* pool = ranges[i].pool;
287 if (pool->m_refCount == 1) {
288 // This is the last reference so the release() call below will
289 // unmap the memory. Don't bother poisoning it.
290 continue;
293 MOZ_ASSERT(pool->m_refCount > 1);
295 // Use the pool's mark bit to indicate we made the pool writable.
296 // This avoids reprotecting a pool multiple times.
297 if (!pool->isMarked()) {
298 reprotectPool(rt, pool, ProtectionSetting::Writable,
299 MustFlushICache::No);
300 pool->mark();
303 // Note: we use memset instead of js::Poison because we want to poison
304 // JIT code in release builds too. Furthermore, we don't want the
305 // invalid-ObjectValue poisoning js::Poison does in debug builds.
306 memset(ranges[i].start, JS_SWEPT_CODE_PATTERN, ranges[i].size);
307 MOZ_MAKE_MEM_NOACCESS(ranges[i].start, ranges[i].size);
311 // Make the pools executable again and drop references. We don't flush the
312 // ICache here to not add extra overhead.
313 for (size_t i = 0; i < ranges.length(); i++) {
314 ExecutablePool* pool = ranges[i].pool;
315 if (pool->isMarked()) {
316 reprotectPool(rt, pool, ProtectionSetting::Executable,
317 MustFlushICache::No);
318 pool->unmark();
320 pool->release();
324 ExecutablePool::Allocation ExecutableAllocator::systemAlloc(size_t n) {
325 void* allocation = AllocateExecutableMemory(n, ProtectionSetting::Executable,
326 MemCheckKind::MakeNoAccess);
327 ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
328 return alloc;
331 void ExecutableAllocator::systemRelease(
332 const ExecutablePool::Allocation& alloc) {
333 DeallocateExecutableMemory(alloc.pages, alloc.size);