2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_SLAB_MANAGER_H_
18 #define incl_HPHP_SLAB_MANAGER_H_
20 #include "hphp/util/portability.h"
29 constexpr unsigned kLgSlabSize
= 21;
30 constexpr size_t kSlabSize
= 1ull << kLgSlabSize
;
31 constexpr size_t kSlabAlign
= kSlabSize
;
33 // To mitigate the ABA problem (i.e., a slab is allocated and returned to the
34 // list without another thread noticing), we tag the pointers on the lower 16
35 // bits. This should be sufficient for our purpose of slab management, so we
36 // don't consider also using other bits for now.
37 struct TaggedSlabPtr
{
38 static constexpr uintptr_t TagMask
= (1ul << 16) - 1;
39 static_assert(kSlabAlign
> TagMask
, "");
40 TaggedSlabPtr() noexcept
: rep(0) {}
41 /* implicit */ TaggedSlabPtr(std::nullptr_t
) noexcept
: rep(0) {}
42 TaggedSlabPtr(void* p
, uint16_t tag
= 0) noexcept
43 : rep(reinterpret_cast<uintptr_t>(p
) | tag
) {
47 return reinterpret_cast<void*>(rep
& ~TagMask
);
49 uint16_t tag() const {
50 return static_cast<uint16_t>(rep
);
52 explicit operator bool() const {
59 using AtomicTaggedSlabPtr
= std::atomic
<TaggedSlabPtr
>;
62 * Instrusive singly linked list of slabs using TaggedSlabPtr at the beginning
65 struct TaggedSlabList
{
67 return !m_head
.load(std::memory_order_relaxed
);
69 TaggedSlabPtr
head() {
70 return m_head
.load(std::memory_order_relaxed
);
73 * Add a slab to the list. If `local`, assume the list is only accessed in a
76 template<bool local
= false> void push_front(void* p
, uint16_t tag
) {
78 TaggedSlabPtr tagged
{p
, tag
};
79 auto ptr
= reinterpret_cast<AtomicTaggedSlabPtr
*>(p
);
81 auto currHead
= m_head
.load(std::memory_order_relaxed
);
82 ptr
->store(currHead
, std::memory_order_relaxed
);
83 m_head
.store(tagged
, std::memory_order_relaxed
);
87 auto currHead
= m_head
.load(std::memory_order_acquire
);
88 ptr
->store(currHead
, std::memory_order_release
);
89 if (m_head
.compare_exchange_weak(currHead
, tagged
,
90 std::memory_order_release
)) {
96 // Divide a preallocated piece of memory into slabs and add to the list.
97 NEVER_INLINE
void addRange(void* ptr
, std::size_t size
);
100 AtomicTaggedSlabPtr m_head
;
103 struct SlabManager
: TaggedSlabList
{
104 // Create one SlabManager for each NUMA node, and add some slabs there.
105 // Currently they are backed by huge pages, see EvalNum1GPagesForSlabs and
106 // EvalNum2MPagesForSlabs.
109 static SlabManager
* get(int node
= -1) {
110 if (node
< 0) node
= 0;
111 if (node
>= s_slabManagers
.size()) return nullptr;
112 return s_slabManagers
[node
];
115 TaggedSlabPtr
tryAlloc() {
116 while (auto currHead
= m_head
.load(std::memory_order_acquire
)) {
117 auto const ptr
=reinterpret_cast<AtomicTaggedSlabPtr
*>(currHead
.ptr());
118 auto next
= ptr
->load(std::memory_order_acquire
);
119 if (m_head
.compare_exchange_weak(currHead
, next
,
120 std::memory_order_release
)) {
127 // Push everything in a local TaggedSlabList starting with `newHead` and
128 // ending with `localTail` to this global list. The linking on the local list
129 // should be performed before this call. This is intended for returning
130 // multiple local slabs to the global list in one batch at the end of each
132 void merge(TaggedSlabPtr newHead
, void* localTail
) {
134 // No need to bump the tag here, as it is already bumped when forming the
136 auto last
= reinterpret_cast<AtomicTaggedSlabPtr
*>(localTail
);
138 auto currHead
= m_head
.load(std::memory_order_acquire
);
139 last
->store(currHead
, std::memory_order_release
);
140 if (m_head
.compare_exchange_weak(currHead
, newHead
,
141 std::memory_order_release
)) {
147 static std::vector
<SlabManager
*> s_slabManagers
; // one for each NUMA node