Promote AsyncGenerator use-after-completion check
[hiphop-php.git] / hphp / runtime / vm / name-value-table.cpp
blobfbc1eed0ae12f5cb3d1c62fa720741266d4cb23d
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/name-value-table.h"
19 #include <limits>
20 #include <folly/Bits.h>
22 #include "hphp/runtime/base/tv-refcount.h"
23 #include "hphp/runtime/vm/bytecode.h"
24 #include "hphp/runtime/vm/runtime.h"
26 #include "hphp/runtime/vm/jit/prof-data-serialize.h"
27 #include "hphp/runtime/vm/jit/type.h"
29 #include <tbb/concurrent_hash_map.h>
31 namespace HPHP {
33 //////////////////////////////////////////////////////////////////////
35 NameValueTable::NameValueTable() {
36 allocate(folly::nextPowTwo(RuntimeOption::EvalVMInitialGlobalTableSize));
39 NameValueTable::~NameValueTable() {
40 for (Elm* elm = &m_table[m_tabMask]; elm != &m_table[-1]; --elm) {
41 if (elm->m_name) {
42 decRefStr(const_cast<StringData*>(elm->m_name));
43 if (elm->m_link.bound()) {
44 if (elm->m_link.isInitNoProfile()) {
45 tvDecRefGen(elm->m_link.getNoProfile());
46 elm->m_link.markUninit();
48 } else {
49 tvDecRefGen(elm->m_tv);
53 req::free(m_table);
56 tv_lval NameValueTable::set(const StringData* name, tv_rval val) {
57 auto const target = findTypedValue(name);
58 tvSet(*val, target);
59 return target;
62 void NameValueTable::unset(const StringData* name) {
63 Elm* elm = findElm(name);
64 if (!elm) return;
65 if (elm->m_link.bound()) {
66 if (!elm->m_link.isInitNoProfile()) return;
67 tvDecRefGen(elm->m_link.getNoProfile());
68 elm->m_link.markUninit();
69 return;
71 tvUnset(&elm->m_tv);
74 tv_lval NameValueTable::lookup(const StringData* name) {
75 Elm* elm = findElm(name);
76 if (!elm) return tv_lval{};
77 if (elm->m_link.bound()) {
78 if (!elm->m_link.isInitNoProfile()) return tv_lval{};
79 return elm->m_link.getNoProfile();
81 auto const lval = &elm->m_tv;
82 return type(lval) == KindOfUninit ? tv_lval{} : lval;
85 tv_lval NameValueTable::lookupAdd(const StringData* name) {
86 auto const val = findTypedValue(name);
87 if (type(val) == KindOfUninit) tvWriteNull(val);
88 return val;
91 void NameValueTable::reserve(size_t desiredSize) {
93 * Reserve space for size * 4 / 3 because we limit our max load
94 * factor to .75.
96 * Add one because the way we compute whether there is enough
97 * space on lookupAdd will look at m_elms + 1.
99 const size_t reqCapac = desiredSize * 4 / 3 + 1;
100 const size_t curCapac = m_tabMask + 1;
102 if (reqCapac > curCapac) {
103 allocate(folly::nextPowTwo(reqCapac));
107 void NameValueTable::allocate(const size_t newCapac) {
108 assertx(folly::isPowTwo(newCapac));
109 assertx(newCapac - 1 <= std::numeric_limits<uint32_t>::max());
111 Elm* oldTab = m_table;
112 const size_t oldMask = m_tabMask;
114 m_table = req::calloc_raw_array<Elm>(newCapac);
115 m_tabMask = uint32_t(newCapac - 1);
117 if (oldTab) {
118 rehash(oldTab, oldMask);
119 req::free(oldTab);
123 tv_lval NameValueTable::findTypedValue(const StringData* name) {
124 Elm* elm = insert(name);
125 if (elm->m_link.bound()) {
126 if (!elm->m_link.isInitNoProfile()) {
127 elm->m_link.initWith(make_tv<KindOfNull>());
129 return elm->m_link.getNoProfile();
131 if (elm->m_tv.m_type == kInvalidDataType) {
132 tvWriteNull(elm->m_tv);
134 return &elm->m_tv;
137 NameValueTable::Elm* NameValueTable::insertImpl(const StringData* name) {
138 Elm* elm = &m_table[name->hash() & m_tabMask];
139 UNUSED size_t numProbes = 0;
140 for (;;) {
141 assertx(numProbes++ < m_tabMask + 1);
142 if (nullptr == elm->m_name) {
143 elm->m_name = name;
144 elm->m_link = decltype(elm->m_link){};
145 elm->m_tv.m_type = kInvalidDataType;
146 return elm;
148 if (name->same(elm->m_name)) {
149 return elm;
151 if (UNLIKELY(++elm == &m_table[m_tabMask + 1])) {
152 elm = m_table;
157 NameValueTable::Elm* NameValueTable::insert(const StringData* name) {
158 reserve(m_elms + 1);
159 Elm* elm = insertImpl(name);
160 if (elm->m_tv.m_type == kInvalidDataType) {
161 // Attempt to hook this entry up to an existing RDS slot.
162 elm->m_link = linkForGlobal(name);
163 ++m_elms;
164 name->incRefCount();
166 return elm;
169 void NameValueTable::rehash(Elm* const oldTab, const size_t oldMask) {
170 for (Elm* srcElm = &oldTab[oldMask]; srcElm != &oldTab[-1]; --srcElm) {
171 if (srcElm->m_name) {
172 if (debug) {
173 if (srcElm->m_link.bound()) {
174 if (srcElm->m_link.isInitNoProfile()) {
175 always_assert(tvIsPlausible(*srcElm->m_link.getNoProfile()));
177 } else {
178 always_assert(tvIsPlausible(srcElm->m_tv));
181 Elm* dstElm = insertImpl(srcElm->m_name);
182 dstElm->m_name = srcElm->m_name;
183 dstElm->m_tv = srcElm->m_tv;
184 dstElm->m_link = srcElm->m_link;
189 NameValueTable::Elm* NameValueTable::findElm(const StringData* name) const {
190 Elm* elm = &m_table[name->hash() & m_tabMask];
191 UNUSED size_t numProbes = 0;
192 for (;;) {
193 assertx(numProbes++ < m_tabMask + 1);
194 if (UNLIKELY(nullptr == elm->m_name)) {
195 return nullptr;
197 if (name->same(elm->m_name)) return elm;
198 if (UNLIKELY(++elm == &m_table[m_tabMask + 1])) {
199 elm = m_table;
204 //////////////////////////////////////////////////////////////////////
206 NameValueTable::Iterator::Iterator(const NameValueTable* tab)
207 : m_tab(tab)
208 , m_idx(0)
210 if (!valid()) next();
213 NameValueTable::Iterator
214 NameValueTable::Iterator::getLast(const NameValueTable* tab) {
215 Iterator it;
216 it.m_tab = tab;
217 it.m_idx = tab->m_tabMask + 1;
218 it.prev();
219 return it;
222 NameValueTable::Iterator
223 NameValueTable::Iterator::getEnd(const NameValueTable* tab) {
224 Iterator it;
225 it.m_tab = tab;
226 it.m_idx = tab->m_tabMask + 1;
227 return it;
230 ssize_t NameValueTable::Iterator::toInteger() const {
231 const ssize_t invalid = m_tab->m_tabMask + 1;
232 return valid() ? m_idx : invalid;
235 bool NameValueTable::Iterator::valid() const {
236 return m_idx >= 0 && size_t(m_idx) < m_tab->m_tabMask + 1 && !atEmpty();
239 const StringData* NameValueTable::Iterator::curKey() const {
240 assertx(valid());
241 return m_tab->m_table[m_idx].m_name;
244 tv_rval NameValueTable::Iterator::curVal() const {
245 assertx(valid());
246 auto const& elem = m_tab->m_table[m_idx];
247 if (elem.m_link.bound()) {
248 assertx(elem.m_link.isInitNoProfile());
249 return elem.m_link.getNoProfile();
251 return &elem.m_tv;
254 void NameValueTable::Iterator::next() {
255 size_t const sz = m_tab->m_tabMask + 1;
256 if (m_idx + 1 >= sz) {
257 m_idx = sz;
258 return;
260 ++m_idx;
261 if (LIKELY(!atEmpty())) {
262 return;
264 do {
265 ++m_idx;
266 } while (size_t(m_idx) < sz && atEmpty());
269 void NameValueTable::Iterator::prev() {
270 do {
271 --m_idx;
272 } while (m_idx >= 0 && atEmpty());
275 bool NameValueTable::Iterator::atEmpty() const {
276 auto const& elem = m_tab->m_table[m_idx];
277 if (!elem.m_name) return true;
278 if (elem.m_link.bound()) return !elem.m_link.isInitNoProfile();
279 return type(elem.m_tv) == KindOfUninit;
282 //////////////////////////////////////////////////////////////////////
284 namespace {
286 struct ProfileMeta {
287 uint64_t count{0};
288 uint64_t present{0};
289 jit::Type type{jit::TBottom};
292 using ProfileMap = tbb::concurrent_hash_map<
293 const StringData*,
294 ProfileMeta,
295 pointer_hash<StringData>
297 ProfileMap s_profiling;
299 struct PreAllocElem {
300 rds::Link<TypedValue, rds::Mode::Normal> link;
301 jit::Type type;
302 bool mainlyPresent;
305 hphp_fast_map<
306 const StringData*,
307 PreAllocElem,
308 hphp_string_hash,
309 hphp_string_same
310 > s_prealloc;
314 bool shouldProfileGlobals() {
315 return isJitSerializing() && RO::EvalProfileGlobalsLimit > 0;
318 void profileGlobal(const StringData* name) {
319 // Look up the global. Record if it's present or not. If it is
320 // present, record it's type.
321 assertx(shouldProfileGlobals());
322 if (!name->isStatic()) name = makeStaticString(name);
323 auto const tv = g_context->m_globalNVTable->lookup(name);
324 ProfileMap::accessor acc;
325 s_profiling.insert(acc, name);
326 ++acc->second.count;
327 if (tv) {
328 ++acc->second.present;
329 acc->second.type |= jit::typeFromTV(tv, nullptr);
333 rds::Link<TypedValue, rds::Mode::Normal> linkForGlobal(const StringData* name) {
334 auto const it = s_prealloc.find(name);
335 if (it == s_prealloc.end()) return rds::Link<TypedValue, rds::Mode::Normal>{};
336 return it->second.link;
339 bool globalMainlyPresent(const StringData* name) {
340 auto const it = s_prealloc.find(name);
341 if (it == s_prealloc.end()) return false;
342 return it->second.mainlyPresent;
345 jit::Type predictedTypeForGlobal(const StringData* name) {
346 auto const it = s_prealloc.find(name);
347 if (it == s_prealloc.end()) return jit::TCell;
348 return it->second.type;
351 void writeGlobalProfiles(jit::ProfDataSerializer& ser) {
352 if (!shouldProfileGlobals()) {
353 // Always write an entry, even if we're not profiling. This means
354 // we can safely load if the options are different.
355 jit::write_raw(ser, uint32_t(0));
356 return;
359 // Sort the profiled data by the total count:
360 struct SortedElem {
361 const StringData* name;
362 ProfileMeta meta;
364 std::vector<SortedElem> sorted;
365 sorted.reserve(s_profiling.size());
366 for (auto const& s : s_profiling) {
367 sorted.emplace_back(SortedElem{s.first, s.second});
369 std::sort(
370 sorted.begin(),
371 sorted.end(),
372 [&] (const SortedElem& v1, const SortedElem& v2) {
373 if (v1.meta.count != v2.meta.count) {
374 return v1.meta.count > v2.meta.count;
376 return strcmp(v1.name->data(), v2.name->data()) < 0;
380 // Only write out the first EvalProfileGlobalsLimit entries. These
381 // will be the hottest.
382 auto const size =
383 std::min<uint32_t>(RO::EvalProfileGlobalsLimit, sorted.size());
384 jit::write_raw(ser, uint32_t{size});
385 for (uint32_t i = 0; i < size; ++i) {
386 jit::write_string(ser, sorted[i].name);
388 // Record whether this global was present enough to side-exit for
389 // the missing case.
390 auto const success = sorted[i].meta.present / double(sorted[i].meta.count);
391 jit::write_raw(ser, success >= RO::EvalProfileGlobalsSlowExitThreshold);
393 auto type = sorted[i].meta.type;
394 // TBottom means it was never present. Nothing to guard on this
395 // case.
396 if (type == jit::TBottom) type = jit::TCell;
397 type = jit::relaxToGuardable(type);
398 type.serialize(ser);
402 void readGlobalProfiles(jit::ProfDataDeserializer& ser) {
403 auto const size = jit::read_raw<uint32_t>(ser);
404 for (uint32_t i = 0; i < size; ++i) {
405 auto const name = jit::read_string(ser);
406 auto const mainlyPresent = jit::read_raw<bool>(ser);
407 auto const type = jit::Type::deserialize(ser);
408 if (RO::EvalProfileGlobalsLimit <= 0) continue;
409 auto const link = rds::alloc<TypedValue, rds::Mode::Normal>();
410 s_prealloc.emplace(name, PreAllocElem{link, type, mainlyPresent});
414 //////////////////////////////////////////////////////////////////////