Ignore BC size budget on very cheap regions
[hiphop-php.git] / hphp / util / slab-manager.h
blobe105e6d7cb1b13834c943bab5e33e52c44844add
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #pragma once
19 #include "hphp/util/assertions.h"
20 #include "hphp/util/portability.h"
22 #include <atomic>
23 #include <utility>
25 #include <folly/portability/SysMman.h>
27 namespace HPHP {
29 constexpr unsigned kLgSlabSize = 21;
30 constexpr size_t kSlabSize = 1ull << kLgSlabSize;
31 constexpr size_t kSlabAlign = kSlabSize;
33 // To mitigate the ABA problem (i.e., a slab is allocated and returned to the
34 // list without another thread noticing), we tag the pointers on the higher 16
35 // bits. This should be sufficient for our purpose of slab management, so we
36 // don't consider also using other bits for now.
37 struct TaggedSlabPtr {
38 static constexpr size_t TagShift = 48;
39 static constexpr uintptr_t TagMask = ((1ull << 16) - 1) << TagShift;
40 TaggedSlabPtr() noexcept : rep(0) {}
41 /* implicit */ TaggedSlabPtr(std::nullptr_t) noexcept : rep(0) {}
42 TaggedSlabPtr(void* p, uint16_t tag = 0) noexcept
43 : rep(reinterpret_cast<uintptr_t>(p) |
44 (static_cast<uintptr_t>(tag) << TagShift)) {
45 assertx(ptr() == p);
47 void* ptr() const {
48 return reinterpret_cast<void*>(rep & ~TagMask);
50 uint16_t tag() const {
51 return static_cast<uint16_t>(rep >> TagShift);
53 explicit operator bool() const {
54 return !!rep;
56 bool operator==(const TaggedSlabPtr o) const {
57 return rep == o.rep;
59 private:
60 uintptr_t rep;
63 using AtomicTaggedSlabPtr = std::atomic<TaggedSlabPtr>;
66 * Instrusive singly linked list of slabs using TaggedSlabPtr at the beginning
67 * of each slab.
69 struct TaggedSlabList {
70 bool empty() const {
71 return !m_head.load(std::memory_order_relaxed);
73 TaggedSlabPtr head() {
74 return m_head.load(std::memory_order_relaxed);
77 * Return the number of bytes held in the tagged slab list.
79 * The value is calculated using atomic adds and subs, and may become
80 * negative as the operations are executed with a relaxed ordering.
82 ssize_t bytes() const {
83 return m_bytes.load(std::memory_order_relaxed);
86 * Add a slab to the list. If `local`, assume the list is only accessed in a
87 * single thread.
89 template<bool local = false> void push_front(void* p, uint16_t tag) {
90 m_bytes.fetch_add(kSlabSize, std::memory_order_relaxed);
91 ++tag;
92 TaggedSlabPtr tagged{p, tag};
93 auto ptr = reinterpret_cast<AtomicTaggedSlabPtr*>(p);
94 if (local) {
95 auto currHead = m_head.load(std::memory_order_relaxed);
96 ptr->store(currHead, std::memory_order_relaxed);
97 m_head.store(tagged, std::memory_order_relaxed);
98 return;
100 auto currHead = m_head.load(std::memory_order_acquire);
101 while (true) {
102 ptr->store(currHead, std::memory_order_release);
103 if (m_head.compare_exchange_weak(currHead, tagged,
104 std::memory_order_release)) {
105 return;
106 } // otherwise currHead is updated with latest value of m_head.
111 * Get the head. Note that you cannot assume that the head hasn't changed
112 * unless access happens only in a single thread.
114 template<bool local = false> TaggedSlabPtr unsafe_peek() {
115 return m_head.load(local ? std::memory_order_relaxed
116 : std::memory_order_acquire);
120 * Try to return a slab from the list, assuming the list is only accessed in a
121 * single thread. Return a nullptr when list is empty.
123 TaggedSlabPtr try_local_pop() {
124 if (auto const currHead = m_head.load(std::memory_order_relaxed)) {
125 auto const ptr = reinterpret_cast<AtomicTaggedSlabPtr*>(currHead.ptr());
126 auto next = ptr->load(std::memory_order_relaxed);
127 assertx(m_head.load(std::memory_order_acquire) == currHead);
128 m_head.store(next, std::memory_order_relaxed);
129 m_bytes.fetch_sub(kSlabSize, std::memory_order_relaxed);
130 return currHead;
132 return nullptr;
136 * Try to return a slab from the list, which can happen concurrently from
137 * multiple threads. Return a nullptr when list is empty.
139 TaggedSlabPtr try_shared_pop() {
140 auto currHead = m_head.load(std::memory_order_acquire);
141 while (currHead) {
142 auto const ptr = reinterpret_cast<AtomicTaggedSlabPtr*>(currHead.ptr());
143 auto next = ptr->load(std::memory_order_acquire);
144 if (m_head.compare_exchange_weak(currHead, next,
145 std::memory_order_release)) {
146 m_bytes.fetch_sub(kSlabSize, std::memory_order_relaxed);
147 return currHead;
148 } // otherwise currHead is updated with latest value of m_head.
150 return nullptr;
153 // Divide a preallocated piece of memory into slabs and add to the list.
154 template<bool local = false>
155 void addRange(void* ptr, std::size_t size) {
156 if (!ptr) return;
157 while (size >= kSlabSize) {
158 push_front<local>(ptr, 0);
159 size -= kSlabSize;
160 ptr = reinterpret_cast<char*>(ptr) + kSlabSize;
164 protected:
165 AtomicTaggedSlabPtr m_head;
166 std::atomic<ssize_t> m_bytes{0};
169 struct SlabManager : TaggedSlabList {
170 TaggedSlabPtr tryAlloc() {
171 return try_shared_pop();
174 // Push everything in a local TaggedSlabList `other` that ends with with
175 // `otherTail` to this global list. The linking on the local list should be
176 // performed before this call. This is intended for returning multiple local
177 // slabs to the global list in one batch at the end of each request.
178 void merge(TaggedSlabList&& other, void* otherTail) {
179 auto const newHead = other.head();
180 assertx(newHead);
181 m_bytes.fetch_add(other.bytes(), std::memory_order_relaxed);
182 // No need to bump the tag here, as it is already bumped when forming the
183 // local list.
184 auto last = reinterpret_cast<AtomicTaggedSlabPtr*>(otherTail);
185 auto currHead = m_head.load(std::memory_order_acquire);
186 while (true) {
187 last->store(currHead, std::memory_order_release);
188 if (m_head.compare_exchange_weak(currHead, newHead,
189 std::memory_order_release)) {
190 return;
192 } // otherwise currHead is updated with latest value of m_head.
195 // Try to unmap the slabs when the server is about to shut down.
196 void shutdown() {
197 // List of slabs that are still in memory after efforts to unmap them.
198 TaggedSlabList failed;
199 void* failedTail = nullptr;
200 while (auto p = tryAlloc()) {
201 auto ptr = p.ptr();
202 auto tag = p.tag();
203 if (!UnmapSlab(ptr)) {
204 failed.push_front<true>(ptr, tag);
205 if (!failedTail) failedTail = ptr;
208 if (failedTail) merge(std::move(failed), failedTail);
211 // Return whether unmapping was successful.
212 static bool UnmapSlab(void* ptr) {
213 // The Linux man page for munmap mentions "All pages containing a part of
214 // the indicated range are unmapped", which could mean that the entire 1GB
215 // page could get unmapped when we only intend to unmap a single slab in it.
216 // Thus, we try to create a new mapping (not in RSS) to replace this slab.
217 return mmap(ptr, kSlabSize, PROT_NONE,
218 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
219 -1 , 0) == ptr;