Toplevel entrypoints for classes/traits/interfaces
[hiphop-php.git] / hphp / util / read-only-arena.h
blobb24ba5fc85cccef38f832ad6537f2c8c4e7cf7ac
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #pragma once
18 #include "hphp/util/address-range.h"
19 #include "hphp/util/slab-manager.h"
21 #include <atomic>
22 #include <mutex>
23 #include <folly/Range.h>
25 namespace HPHP {
27 //////////////////////////////////////////////////////////////////////
30 * ReadOnlyChunk is a header for a piece of allocated memory. It can work as a
31 * bump allocator, and can form a list, either inside a ReadOnlyArena, or in a
32 * pool shared by multiple ReadOnlyArenas, using the AtomicTaggedSlabPtr
33 * facility.
35 struct ReadOnlyChunk {
36 alloc::RangeState state;
37 AtomicTaggedSlabPtr next{};
38 ReadOnlyChunk(uintptr_t low, size_t high)
39 : state(low, high, alloc::Mapped{}) {}
41 auto retained() const {
42 return state.retained();
44 auto tryAlloc(size_t size, size_t align) {
45 return state.tryAllocLow(size, align);
47 auto tryFree(void* ptr, size_t size) {
48 return state.tryFreeLow(ptr, size);
50 // Convert a tagged pointer to `next` to a pointer to the ReadOnlyChunk.
51 static ReadOnlyChunk* fromTPtr(const TaggedSlabPtr& tagged) {
52 auto constexpr kOffset = offsetof(ReadOnlyChunk, next);
53 auto const chunk = reinterpret_cast<char*>(tagged.ptr()) - kOffset;
54 return reinterpret_cast<ReadOnlyChunk*>(chunk);
59 * ReadOnlyArena is a bump allocator for process-lifetime constant data. It is
60 * backed by a list of ReadOnlyChunk's. Deallocation is not supported, but an
61 * instance is able to return partially allocated ReadOnlyChunks to a global
62 * pool (a TaggestSlabList), if it is specified during construction.
64 * If `Local` is true, no concurrent allocation is supported for an instance
65 * (but allocating new chunks, or returning chunks to the global pool can still
66 * happen concurrently among multiple instances).
68 template<typename Alloc, bool Local = false, size_t Alignment = 16>
69 struct ReadOnlyArena : TaggedSlabList {
70 static_assert((Alignment & (Alignment - 1)) == 0, "");
72 static constexpr size_t kMinChunkSize = 128u << 10;
73 static constexpr size_t kChunkSizeMask = kMinChunkSize - 1;
74 static_assert((kMinChunkSize & kChunkSizeMask) == 0, "");
75 static constexpr unsigned kFragFactor = 8u;
77 using Allocator = typename Alloc::template rebind<char>::other;
80 * Create a ReadOnlyArena that uses at least `minChunkSize' bytes for each
81 * call to the allocator, and returns partially used chunks to `pool` when
82 * destructed.
84 explicit ReadOnlyArena(size_t minChunkSize, TaggedSlabList* pool = nullptr)
85 : m_pool(pool)
86 , m_minChunkSize((minChunkSize + kChunkSizeMask) & ~kChunkSizeMask) {}
87 ReadOnlyArena(const ReadOnlyArena&) = delete;
88 ReadOnlyArena& operator=(const ReadOnlyArena&) = delete;
91 * Destroying a ReadOnlyArena will not release the chunks it allocated.
92 * Instead, partial chunks may be returned to a global pool.
94 ~ReadOnlyArena() {
95 if (!m_pool) return;
96 // No one should be allocating from here at this moment, so no need to hold
97 // the lock.
98 while (auto const head = try_shared_pop()) {
99 auto const chunk = ReadOnlyChunk::fromTPtr(head);
100 // Return the partially used chunk to the global pool.
101 if (chunk->retained() * kFragFactor >= kMinChunkSize) {
102 m_pool->push_front(head.ptr(), head.tag());
103 } // otherwise leak it
107 size_t capacity() const {
108 return m_cap;
111 void* tryAlloc(size_t size) {
112 if (auto head = unsafe_peek<Local>()) {
113 auto chunk = ReadOnlyChunk::fromTPtr(head);
114 // Even if it isn't the most up-to-date head now, this still works.
115 auto ret = chunk->tryAlloc(size, Alignment);
116 if (Local && ret) recordLast(ret, size);
117 return ret;
119 return nullptr;
122 void* allocate(size_t size) {
123 if (auto p = tryAlloc(size)) return p;
124 guard g(m_mutex);
125 // Maybe someone else added a chunk already.
126 if (auto p = tryAlloc(size)) return p;
127 return addChunk(size);
130 void deallocate(void* ptr, size_t size) {
131 if (!Local) return;
132 if (ptr != m_lastAlloc) return;
133 // In theory we shouldn't need to get the lock in Local mode, but let's try
134 // to be slightly safer in case a bug is introduced.
135 guard g(m_mutex);
136 if (ptr != m_lastAlloc) return;
137 if (auto head = unsafe_peek<Local>()) {
138 auto chunk = ReadOnlyChunk::fromTPtr(head);
139 DEBUG_ONLY auto const succ = chunk->tryFree(ptr, size);
140 assertx(succ);
144 private:
145 template<size_t align, typename T>
146 static T ru(T n) {
147 static_assert((align & (align - 1)) == 0, "");
148 return (T)(((uintptr_t)n + align - 1) & ~(align - 1));
151 // Need to hold the lock before calling this.
152 void* addChunk(size_t size) {
153 ReadOnlyChunk* chunk = nullptr;
154 if (m_pool) {
155 if (auto const tPtr = m_pool->try_shared_pop()) {
156 chunk = ReadOnlyChunk::fromTPtr(tPtr);
157 if (auto ret = chunk->tryAlloc(size, Alignment)) {
158 // Add the partial chunk to local list.
159 push_front(tPtr.ptr(), tPtr.tag());
160 m_lastAlloc = ret;
161 return ret;
163 // The remaining space in the chunk is too small, return it to the pool.
164 m_pool->push_front(tPtr.ptr(), tPtr.tag());
165 chunk = nullptr;
168 assertx(chunk == nullptr);
169 auto allocSize =
170 ru<kMinChunkSize>(size + ru<Alignment>(sizeof(ReadOnlyChunk)));
171 constexpr size_t kHugeThreshold = 4096 * 1024;
172 if (kMinChunkSize < kHugeThreshold && allocSize > kHugeThreshold) {
173 allocSize = ru<kHugeThreshold>(allocSize);
175 auto const mem = static_cast<char*>(m_alloc.allocate(allocSize));
176 auto const low = ru<Alignment>(mem + sizeof(ReadOnlyChunk));
177 auto const high = reinterpret_cast<uintptr_t>(mem + allocSize);
178 chunk = new (mem) ReadOnlyChunk((uintptr_t)low, (uintptr_t)high);
179 m_cap += allocSize;
180 auto ret = chunk->tryAlloc(size, Alignment);
181 assertx(ret);
182 if (Local) recordLast(ret, size);
183 push_front(&(chunk->next), 0); // do this after we get the memory.
184 return ret;
187 void recordLast(void* ptr, size_t size) {
188 if (!Local) return;
189 m_lastAlloc = ptr;
190 m_lastSize = size;
193 private:
194 TaggedSlabList* m_pool{nullptr};
195 // Result of last allocation, used to support immediate deallocation after
196 // allocation.
197 void* m_lastAlloc{nullptr};
198 size_t m_lastSize{0};
199 Allocator m_alloc;
200 size_t const m_minChunkSize;
201 size_t m_cap{0};
202 mutable std::mutex m_mutex;
203 using guard = std::lock_guard<std::mutex>;
206 //////////////////////////////////////////////////////////////////////