2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #ifndef incl_HPHP_VM_TREAD_HASH_MAP_H_
17 #define incl_HPHP_VM_TREAD_HASH_MAP_H_
20 #include <type_traits>
22 #include <boost/iterator/iterator_facade.hpp>
24 #include <folly/Bits.h>
26 #include "hphp/runtime/vm/treadmill.h"
27 #include "hphp/util/assertions.h"
31 extern uint64_t g_emptyTable
;
34 * A hashtable safe for multiple concurrent readers, even while writes are
35 * happening, but with at most one concurrent writer. Reads and writes both
36 * are wait-free, but that there is only at most one writer will generally
37 * require synchronization outside of this class. (E.g. the translator write
40 * Key must be an atomically loadable/storable type. The Value must be a
41 * trivially copyable and assignable type, and it must be legal to copy from it
42 * without synchronization (this table may do this during a rehash). Also,
43 * assumes Key == 0 is invalid (i.e. the empty key).
45 * Insertions must be unique. It is an error to insert the same key more than
48 * Uses the treadmill to collect garbage.
51 template<class Key
, class Val
,
52 class HashFunc
= std::hash
<Key
>,
53 class Alloc
= std::allocator
<char>>
54 struct TreadHashMap
: private HashFunc
,
55 private Alloc::template rebind
<char>::other
{
56 using Allocator
= typename
Alloc::template rebind
<char>::other
;
57 using value_type
= std::pair
<std::atomic
<Key
>,Val
>;
59 std::is_trivially_destructible
<Key
>::value
&&
60 std::is_trivially_destructible
<Val
>::value
,
61 "TreadHashMap only supports trivially destructible keys and values"
68 value_type entries
[0];
71 Table
* staticEmptyTable() {
72 static_assert(sizeof(Table
) == sizeof(g_emptyTable
), "");
73 return reinterpret_cast<Table
*>(&g_emptyTable
);
77 explicit TreadHashMap(uint32_t initialCapacity
)
78 : m_table(staticEmptyTable())
79 , m_initialSize(folly::nextPowTwo(initialCapacity
))
83 auto t
= m_table
.load(std::memory_order_acquire
);
84 if (t
!= staticEmptyTable()) {
89 TreadHashMap(const TreadHashMap
&) = delete;
90 TreadHashMap
& operator=(const TreadHashMap
&) = delete;
92 template<class IterVal
>
94 : boost::iterator_facade
<thm_iterator
<IterVal
>,IterVal
,
95 boost::forward_traversal_tag
>
97 explicit thm_iterator() : m_table(nullptr) {}
99 // Conversion constructor for interoperability between iterator
100 // and const_iterator. The enable_if<> magic prevents the
101 // constructor from existing if we're going in the const_iterator
102 // to iterator direction.
103 template<class OtherVal
>
104 thm_iterator(const thm_iterator
<OtherVal
>& o
,
105 typename
std::enable_if
<
106 std::is_convertible
<OtherVal
*,IterVal
*>::value
109 , m_offset(o
.m_offset
)
112 explicit thm_iterator(Table
* tab
, size_t offset
)
120 friend struct TreadHashMap
;
121 friend class boost::iterator_core_access
;
128 bool equal(const thm_iterator
& o
) const {
129 return m_table
== o
.m_table
&& m_offset
== o
.m_offset
;
132 IterVal
& dereference() const {
133 return m_table
->entries
[m_offset
];
137 void advancePastEmpty() {
138 while (m_offset
< m_table
->capac
&&
139 m_table
->entries
[m_offset
].first
.load(std::memory_order_acquire
) == 0) {
149 typedef thm_iterator
<value_type
> iterator
;
150 typedef thm_iterator
<const value_type
> const_iterator
;
152 size_t size() const {
153 return m_table
.load(std::memory_order_acquire
)->size
;
157 return iterator(m_table
.load(std::memory_order_acquire
), 0);
161 auto tab
= m_table
.load(std::memory_order_acquire
);
162 return iterator(tab
, tab
->capac
);
165 const_iterator
begin() const {
166 return const_cast<TreadHashMap
*>(this)->begin();
168 const_iterator
end() const {
169 return const_cast<TreadHashMap
*>(this)->end();
172 Val
* insert(Key key
, Val val
) {
174 return insertImpl(acquireAndGrowIfNeeded(), key
, val
);
177 Val
* find(Key key
) const {
180 auto tab
= m_table
.load(std::memory_order_acquire
);
181 if (tab
->size
== 0) return nullptr; // empty
182 assertx(tab
->capac
> tab
->size
);
183 auto idx
= project(tab
, key
);
185 auto& entry
= tab
->entries
[idx
];
186 Key currentProbe
= entry
.first
.load(std::memory_order_acquire
);
187 if (currentProbe
== key
) return &entry
.second
;
188 if (currentProbe
== 0) return nullptr;
189 if (++idx
== tab
->capac
) idx
= 0;
193 template<typename F
> void filter_keys(F fn
) {
194 auto const old
= m_table
.load(std::memory_order_acquire
);
195 if (UNLIKELY(old
== staticEmptyTable())) return;
197 Table
* newTable
= allocTable(old
->capac
);
198 for (auto i
= uint32_t{}; i
< old
->capac
; ++i
) {
199 auto const ent
= &old
->entries
[i
];
200 auto const key
= ent
->first
.load(std::memory_order_acquire
);
201 if (key
&& !fn(key
)) insertImpl(newTable
, key
, ent
->second
);
203 Treadmill::enqueue([this, old
] { freeTable(old
); });
204 m_table
.store(newTable
, std::memory_order_release
);
208 Val
* insertImpl(Table
* const tab
, Key newKey
, Val newValue
) {
209 auto probe
= &tab
->entries
[project(tab
, newKey
)];
210 assertx(size_t(probe
- tab
->entries
) < tab
->capac
);
212 while (Key currentProbe
= probe
->first
.load(std::memory_order_acquire
)) {
213 assertx(currentProbe
!= newKey
); // insertions must be unique
214 assertx(probe
<= (tab
->entries
+ tab
->capac
));
215 // acquireAndGrowIfNeeded ensures there's at least one empty slot.
217 if (++probe
== (tab
->entries
+ tab
->capac
)) probe
= tab
->entries
;
220 // Copy over the value before we publish.
221 probe
->second
= newValue
;
224 probe
->first
.store(newKey
, std::memory_order_release
);
226 // size is only written to by the writer thread, relaxed memory
230 return &probe
->second
;
233 Table
* acquireAndGrowIfNeeded() {
234 auto old
= m_table
.load(std::memory_order_acquire
);
236 // 50% occupancy, avoiding the FPU.
237 if (LIKELY((old
->size
) < (old
->capac
/ 2))) {
242 if (UNLIKELY(old
== staticEmptyTable())) {
243 newTable
= allocTable(m_initialSize
);
245 newTable
= allocTable(old
->capac
* 2);
246 for (uint32_t i
= 0; i
< old
->capac
; ++i
) {
247 auto const ent
= old
->entries
+ i
;
248 auto const key
= ent
->first
.load(std::memory_order_acquire
);
249 if (key
) insertImpl(newTable
, key
, ent
->second
);
251 Treadmill::enqueue([this, old
] { freeTable(old
); });
253 assertx(newTable
->size
== old
->size
); // only one writer thread
254 m_table
.store(newTable
, std::memory_order_release
);
258 size_t project(Table
* tab
, Key key
) const {
259 assertx(folly::isPowTwo(tab
->capac
));
260 return HashFunc::operator()(key
) & (tab
->capac
- 1);
263 constexpr size_t allocSize(uint32_t cap
) {
264 return cap
* sizeof(value_type
) + sizeof(Table
);
267 Table
* allocTable(uint32_t capacity
) {
268 auto size
= allocSize(capacity
);
269 auto ret
= reinterpret_cast<Table
*>(Allocator::allocate(size
));
270 memset(ret
, 0, size
);
271 ret
->capac
= capacity
;
276 void freeTable(Table
* t
) {
277 Allocator::deallocate(reinterpret_cast<char*>(t
), allocSize(t
->capac
));
281 std::atomic
<Table
*> m_table
;
282 uint32_t m_initialSize
;
285 //////////////////////////////////////////////////////////////////////