Fix frame-state processing of CallBuiltin for non-inlined NativeImpls
[hiphop-php.git] / hphp / runtime / vm / jit / irgen-builtin.cpp
blob80807b9540b081d6e6e2581e2fb3dc19c1e55492
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/vm/jit/irgen-builtin.h"
18 #include "hphp/runtime/base/array-init.h"
19 #include "hphp/runtime/base/collections.h"
20 #include "hphp/runtime/base/enum-cache.h"
21 #include "hphp/runtime/base/file-util.h"
22 #include "hphp/runtime/base/tv-refcount.h"
23 #include "hphp/runtime/base/type-variant.h"
24 #include "hphp/runtime/vm/func.h"
25 #include "hphp/runtime/vm/repo.h"
26 #include "hphp/runtime/vm/repo-global-data.h"
27 #include "hphp/runtime/vm/vm-regs.h"
29 #include "hphp/runtime/vm/jit/analysis.h"
30 #include "hphp/runtime/vm/jit/extra-data.h"
31 #include "hphp/runtime/vm/jit/guard-constraint.h"
32 #include "hphp/runtime/vm/jit/type.h"
33 #include "hphp/runtime/vm/jit/vm-protect.h"
35 #include "hphp/runtime/vm/jit/irgen-call.h"
36 #include "hphp/runtime/vm/jit/irgen-control.h"
37 #include "hphp/runtime/vm/jit/irgen-exit.h"
38 #include "hphp/runtime/vm/jit/irgen-inlining.h"
39 #include "hphp/runtime/vm/jit/irgen-internal.h"
40 #include "hphp/runtime/vm/jit/irgen-interpone.h"
41 #include "hphp/runtime/vm/jit/irgen-minstr.h"
42 #include "hphp/runtime/vm/jit/irgen-ret.h"
43 #include "hphp/runtime/vm/jit/irgen-types.h"
45 #include "hphp/runtime/ext/collections/ext_collections-map.h"
46 #include "hphp/runtime/ext/collections/ext_collections-set.h"
47 #include "hphp/runtime/ext/collections/ext_collections-vector.h"
48 #include "hphp/runtime/ext/hh/ext_hh.h"
50 #include "hphp/util/text-util.h"
52 namespace HPHP { namespace jit { namespace irgen {
54 namespace {
56 //////////////////////////////////////////////////////////////////////
58 struct ParamPrep {
59 explicit ParamPrep(size_t count, const Func* callee) : info{count} {}
61 void decRefParams(IRGS& env) const {
62 if (forNativeImpl) return;
63 if (ctx && ctx->type() <= TObj) {
64 decRef(env, ctx);
66 for (auto i = size(); i--;) {
67 decRef(env, info[i].value);
71 struct Info {
72 SSATmp* value{nullptr};
73 bool passByAddr{false};
74 bool needsConversion{false};
75 bool isInOut{false};
78 const Info& operator[](size_t idx) const { return info[idx]; }
79 Info& operator[](size_t idx) { return info[idx]; }
80 size_t size() const { return info.size(); }
82 // For free/class/instance methods, ctx is null/Class*/Object* respectively.
83 SSATmp* ctx{nullptr};
84 jit::vector<Info> info;
85 uint32_t numByAddr{0};
87 bool forNativeImpl{false};
90 //////////////////////////////////////////////////////////////////////
92 // Will turn into either an int or a double in zend_convert_scalar_to_number.
93 bool type_converts_to_number(Type ty) {
94 return ty.subtypeOfAny(
95 TDbl,
96 TInt,
97 TNull,
98 TObj,
99 TRes,
100 TStr,
101 TBool
105 //////////////////////////////////////////////////////////////////////
107 Block* make_opt_catch(IRGS& env, const ParamPrep& params) {
108 // The params have been popped and if we're inlining the ActRec is gone
109 env.irb->setCurMarker(makeMarker(env, nextBcOff(env)));
110 env.irb->exceptionStackBoundary();
112 assertx(!env.irb->fs().stublogue());
113 auto const exit = defBlock(env, Block::Hint::Unlikely);
114 BlockPusher bp(*env.irb, makeMarker(env, nextBcOff(env)), exit);
115 gen(env, BeginCatch);
116 params.decRefParams(env);
117 auto const data = EndCatchData {
118 spOffBCFromIRSP(env),
119 EndCatchData::CatchMode::UnwindOnly,
120 EndCatchData::FrameMode::Phplogue,
121 EndCatchData::Teardown::Full
123 gen(env, EndCatch, data, fp(env), sp(env));
124 return exit;
127 SSATmp* is_a_impl(IRGS& env, const ParamPrep& params, bool subclassOnly) {
128 if (params.size() != 3) return nullptr;
130 auto const allowString = params[2].value;
131 auto const classname = params[1].value;
132 auto const obj = params[0].value;
134 if (!obj->isA(TObj) ||
135 !classname->hasConstVal(TStr) ||
136 !allowString->isA(TBool)) {
137 return nullptr;
140 auto const objCls = gen(env, LdObjClass, obj);
142 auto const cls = lookupUniqueClass(env, classname->strVal());
143 if (!cls) return nullptr;
145 auto const testCls = cns(env, cls);
147 // is_a() finishes here.
148 if (!subclassOnly) return gen(env, InstanceOf, objCls, testCls);
150 // is_subclass_of() needs to check that the LHS doesn't have the same class as
151 // as the RHS.
152 return cond(
153 env,
154 [&] (Block* taken) {
155 auto const eq = gen(env, EqCls, objCls, testCls);
156 gen(env, JmpNZero, taken, eq);
158 [&] {
159 return gen(env, InstanceOf, objCls, testCls);
161 [&] {
162 return cns(env, false);
167 SSATmp* opt_is_a(IRGS& env, const ParamPrep& params) {
168 return is_a_impl(env, params, false /* subclassOnly */);
171 SSATmp* opt_is_subclass_of(IRGS& env, const ParamPrep& params) {
172 return is_a_impl(env, params, true /* subclassOnly */);
175 SSATmp* opt_method_exists(IRGS& env, const ParamPrep& params) {
176 if (params.size() != 2) return nullptr;
178 auto const meth = params[1].value;
179 auto const obj = params[0].value;
181 if (!obj->isA(TObj) || !meth->isA(TStr)) return nullptr;
183 auto const cls = gen(env, LdObjClass, obj);
184 return gen(env, MethodExists, cls, meth);
187 const StaticString
188 s_conv_clsmeth_to_varray("Implicit clsmeth to varray conversion"),
189 s_conv_clsmeth_to_vec("Implicit clsmeth to vec conversion");
191 void raiseClsMethToVecWarningHelper(IRGS& env, const ParamPrep& params) {
192 if (RuntimeOption::EvalRaiseClsMethConversionWarning) {
193 gen(
194 env,
195 RaiseNotice,
196 make_opt_catch(env, params),
197 cns(env, RuntimeOption::EvalHackArrDVArrs ?
198 s_conv_clsmeth_to_vec.get() : s_conv_clsmeth_to_varray.get())
203 SSATmp* opt_count(IRGS& env, const ParamPrep& params) {
204 if (params.size() != 2) return nullptr;
206 auto const mode = params[1].value;
207 auto const val = params[0].value;
209 if (val->isA(TClsMeth)) {
210 raiseClsMethToVecWarningHelper(env, params);
211 return cns(env, 2);
214 // Bail if we're trying to do a recursive count()
215 if (!mode->hasConstVal(0)) return nullptr;
217 // Count may throw
218 return gen(env, Count, make_opt_catch(env, params), val);
221 SSATmp* opt_sizeof(IRGS& env, const ParamPrep& params) {
222 return opt_count(env, params);
225 SSATmp* opt_ord(IRGS& env, const ParamPrep& params) {
226 if (params.size() != 1) return nullptr;
228 auto const arg = params[0].value;
229 auto const arg_type = arg->type();
230 if (arg_type <= TStr) {
231 return gen(env, OrdStr, arg);
234 return nullptr;
237 SSATmp* opt_chr(IRGS& env, const ParamPrep& params) {
238 if (params.size() != 1) return nullptr;
240 auto const arg = params[0].value;
241 auto const arg_type = arg->type();
242 if (arg_type <= TInt) {
243 return gen(env, ChrInt, arg);
246 return nullptr;
249 SSATmp* opt_ini_get(IRGS& env, const ParamPrep& params) {
250 if (params.size() != 1) return nullptr;
252 // Only generate the optimized version if the argument passed in is a
253 // static string with a constant literal value so we can get the string value
254 // at JIT time.
255 auto const argType = params[0].value->type();
256 if (!(argType.hasConstVal(TStaticStr))) {
257 return nullptr;
260 // We can only optimize settings that are system wide since user level
261 // settings can be overridden during the execution of a request.
263 // TODO: the above is true for settings whose value we burn directly into the
264 // TC, but for non-system settings, we can optimize them as a load from the
265 // known static address or thread-local address of where the setting lives.
266 // This might be worth doing specifically for the zend.assertions setting,
267 // for which the emitter emits an ini_get around every call to assertx().
268 auto const settingName = params[0].value->strVal()->toCppString();
269 IniSetting::Mode mode = IniSetting::PHP_INI_NONE;
270 if (!IniSetting::GetMode(settingName, mode)) {
271 return nullptr;
273 if (mode & ~IniSetting::PHP_INI_SYSTEM) {
274 return nullptr;
276 if (mode == IniSetting::PHP_INI_ALL) { /* PHP_INI_ALL has a weird encoding */
277 return nullptr;
280 Variant value;
281 IniSetting::Get(settingName, value);
282 // All scalar values are cast to a string before being returned.
283 if (value.isString()) {
284 return cns(env, makeStaticString(value.toString()));
286 if (value.isInteger()) {
287 return cns(env, makeStaticString(folly::to<std::string>(value.toInt64())));
289 if (value.isBoolean()) {
290 static auto const s_one = makeStaticString("1");
291 return cns(env, value.toBoolean() ? s_one : staticEmptyString());
293 // ini_get() is now enhanced to return more than strings.
294 // Get out of here if we are something else like an array.
295 return nullptr;
299 * Transforms in_array with a static haystack argument into an AKExistsKeyset.
301 SSATmp* opt_in_array(IRGS& env, const ParamPrep& params) {
302 if (params.size() != 3 && params.size() != 2) return nullptr;
304 // We will restrict this optimization to needles that are strings, and
305 // haystacks that have only non-numeric string keys. This avoids a bunch of
306 // complication around numeric-string array-index semantics.
307 auto const needle = params[0].value;
308 if (!(needle->type() <= TStr)) {
309 return nullptr;
312 auto const haystackType = params[1].value->type();
313 if (!haystackType.hasConstVal(TStaticArr)) {
314 // Haystack isn't statically known
315 return nullptr;
318 auto const haystack = haystackType.arrVal();
319 if (haystack->size() == 0) {
320 return cns(env, false);
323 KeysetInit flipped{haystack->size()};
324 bool failed{false};
325 IterateVNoInc(
326 haystack,
327 [&](TypedValue key) {
329 if (!isStringType(type(key)) || val(key).pstr->isNumeric()) {
330 // Numeric strings will complicate matters because the loose comparisons
331 // done with array keys are not quite the same as loose comparisons done
332 // by in_array. For example: in_array('0', array('0000')) is true, but
333 // doing array('0000' => true)['0'] will say "undefined index".
334 // This seems unlikely to affect real-world usage.
335 failed = true;
336 return true;
339 flipped.add(val(key).pstr);
340 return false;
343 if (failed) {
344 return nullptr;
347 return gen(
348 env,
349 AKExistsKeyset,
350 cns(env, ArrayData::GetScalarArray(flipped.toArray())),
351 needle
355 SSATmp* opt_get_class(IRGS& env, const ParamPrep& params) {
356 auto const curCls = !params.forNativeImpl ? curClass(env) : nullptr;
357 auto const curName = [&] {
358 return curCls != nullptr ? cns(env, curCls->name()) : nullptr;
360 if (params.size() == 0 && RuntimeOption::EvalGetClassBadArgument == 0) {
361 return curName();
363 if (params.size() != 1) return nullptr;
365 auto const val = params[0].value;
366 auto const ty = val->type();
367 if (ty <= TNull && RuntimeOption::EvalGetClassBadArgument == 0) {
368 return curName();
370 if (ty <= TObj) {
371 auto const cls = gen(env, LdObjClass, val);
372 return gen(env, LdClsName, cls);
375 return nullptr;
378 SSATmp* opt_sqrt(IRGS& env, const ParamPrep& params) {
379 if (params.size() != 1) return nullptr;
381 auto const val = params[0].value;
382 auto const ty = val->type();
383 if (ty <= TDbl) return gen(env, Sqrt, val);
384 return nullptr;
387 SSATmp* opt_strlen(IRGS& env, const ParamPrep& params) {
388 if (params.size() != 1) return nullptr;
390 auto const val = params[0].value;
391 auto const ty = val->type();
393 if (ty <= TStr) {
394 return gen(env, LdStrLen, val);
397 return nullptr;
400 SSATmp* opt_clock_gettime_ns(IRGS& env, const ParamPrep& params) {
401 if (params.size() != 1) return nullptr;
403 auto const val = params[0].value;
405 // CLOCK_THREAD_CPUTIME_ID needs special handling
406 if (val->hasConstVal(TInt) && val->intVal() != CLOCK_THREAD_CPUTIME_ID) {
407 return gen(env, GetTimeNs, val);
410 return nullptr;
413 SSATmp* opt_microtime(IRGS& env, const ParamPrep& params) {
414 if (params.size() != 1) return nullptr;
416 auto const val = params[0].value;
418 if (val->hasConstVal(true)) {
419 return gen(env, GetTime);
422 return nullptr;
425 SSATmp* minmax(IRGS& env, const ParamPrep& params, const bool is_max) {
426 auto const val1 = params[1].value;
427 auto const ty1 = val1->type();
428 auto const val2 = params[0].value;
429 auto const ty2 = val2->type();
431 // this optimization is only for 2 ints/doubles
432 if (!(ty1 <= TInt || ty1 <= TDbl) ||
433 !(ty2 <= TInt || ty2 <= TDbl)) return nullptr;
435 auto const cmp = [&]{
436 if (ty1 <= TInt && ty2 <= TInt) {
437 return gen(env, is_max ? GtInt : LtInt, val1, val2);
438 } else {
439 auto conv1 = (ty1 <= TDbl) ? val1 : gen(env, ConvIntToDbl, val1);
440 auto conv2 = (ty2 <= TDbl) ? val2 : gen(env, ConvIntToDbl, val2);
441 return gen(env, is_max ? GtDbl : LtDbl, conv1, conv2);
443 }();
444 return gen(env, Select, cmp, val1, val2);
447 SSATmp* opt_max2(IRGS& env, const ParamPrep& params) {
448 // max2 is only called for 2 operands
449 return params.size() == 2 ? minmax(env, params, true) : nullptr;
452 SSATmp* opt_min2(IRGS& env, const ParamPrep& params) {
453 // min2 is only called for 2 operands
454 return params.size() == 2 ? minmax(env, params, false) : nullptr;
457 SSATmp* opt_ceil(IRGS& env, const ParamPrep& params) {
458 if (params.size() != 1) return nullptr;
459 if (!folly::CpuId().sse41()) return nullptr;
460 auto const val = params[0].value;
461 if (!type_converts_to_number(val->type())) return nullptr;
462 // May throw
463 auto const dbl = gen(env, ConvTVToDbl, make_opt_catch(env, params), val);
464 return gen(env, Ceil, dbl);
467 SSATmp* opt_floor(IRGS& env, const ParamPrep& params) {
468 if (params.size() != 1) return nullptr;
469 if (!folly::CpuId().sse41()) return nullptr;
470 auto const val = params[0].value;
471 if (!type_converts_to_number(val->type())) return nullptr;
472 // May throw
473 auto const dbl = gen(env, ConvTVToDbl, make_opt_catch(env, params), val);
474 return gen(env, Floor, dbl);
477 SSATmp* opt_abs(IRGS& env, const ParamPrep& params) {
478 if (params.size() != 1) return nullptr;
480 auto const value = params[0].value;
481 if (value->type() <= TInt) {
482 // compute integer absolute value ((src>>63) ^ src) - (src>>63)
483 auto const t1 = gen(env, Shr, value, cns(env, 63));
484 auto const t2 = gen(env, XorInt, t1, value);
485 return gen(env, SubInt, t2, t1);
488 if (value->type() <= TDbl) return gen(env, AbsDbl, value);
489 if (value->type() <= TArrLike) return cns(env, false);
491 return nullptr;
494 SSATmp* opt_array_key_cast(IRGS& env, const ParamPrep& params) {
495 if (params.size() != 1) return nullptr;
496 auto const value = params[0].value;
498 if (value->isA(TInt)) return value;
499 if (value->isA(TNull)) return cns(env, staticEmptyString());
500 if (value->isA(TBool)) return gen(env, ConvBoolToInt, value);
501 if (value->isA(TDbl)) return gen(env, ConvDblToInt, value);
502 if (value->isA(TRes)) return gen(env, ConvResToInt, value);
503 if (value->isA(TStr)) return gen(env, StrictlyIntegerConv, value);
505 return nullptr;
508 SSATmp* impl_opt_type_structure(IRGS& env, const ParamPrep& params,
509 bool getName) {
510 if (params.size() != 2) return nullptr;
511 auto const clsNameTmp = params[0].value;
512 auto const cnsNameTmp = params[1].value;
514 if (!clsNameTmp->isA(TStr)) return nullptr;
515 if (!cnsNameTmp->hasConstVal(TStaticStr)) return nullptr;
516 auto const cnsName = cnsNameTmp->strVal();
518 auto const clsTmp = [&] () -> SSATmp* {
519 if (clsNameTmp->inst()->is(LdClsName)) {
520 return clsNameTmp->inst()->src(0);
522 return ldCls(env, clsNameTmp, make_opt_catch(env, params));
523 }();
525 if (!clsTmp->type().clsSpec()) return nullptr;
526 auto const cls = clsTmp->type().clsSpec().cls();
528 auto const cnsSlot = cls->clsCnsSlot(cnsName, true, true);
529 if (cnsSlot == kInvalidSlot) return nullptr;
531 auto const data = LdSubClsCnsData { cnsName, cnsSlot };
532 if (!getName) {
533 auto const ptr = gen(env, LdSubClsCns, data, clsTmp);
534 return cond(
535 env,
536 [&] (Block* taken) {
537 gen(env, CheckTypeMem, TUncountedInit, taken, ptr);
538 return gen(env, LdTypeCns, taken, gen(env, LdMem, TUncountedInit, ptr));
540 [&] (SSATmp* cns) { return cns; },
541 [&] /* taken */ {
542 return gen(
543 env, LdClsTypeCns, make_opt_catch(env, params), clsTmp, cnsNameTmp
548 return cond(
549 env,
550 [&] (Block* taken) {
551 auto const clsNameFromTS = gen(env, LdSubClsCnsClsName, data, clsTmp);
552 return gen(env, CheckNonNull, taken, clsNameFromTS);
554 [&] (SSATmp* s) { return s; },
555 [&] {
556 return gen(
557 env,
558 LdClsTypeCnsClsName,
559 make_opt_catch(env, params),
560 clsTmp,
561 cnsNameTmp
567 SSATmp* opt_type_structure(IRGS& env, const ParamPrep& params) {
568 return impl_opt_type_structure(env, params, false);
570 SSATmp* opt_type_structure_classname(IRGS& env, const ParamPrep& params) {
571 return impl_opt_type_structure(env, params, true);
574 SSATmp* opt_is_list_like(IRGS& env, const ParamPrep& params) {
575 if (params.size() != 1) return nullptr;
576 auto const type = params[0].value->type();
577 // Type might be a Ptr here, so the maybe() below will go wrong if we don't
578 // bail out here.
579 if (!(type <= TInitCell)) return nullptr;
580 if (type <= TClsMeth) {
581 raiseClsMethToVecWarningHelper(env, params);
582 return cns(env, true);
584 if (!type.maybe(TArrLike)) return cns(env, false);
585 if (type.subtypeOfAny(TVec, TPackedArr)) return cns(env, true);
586 return nullptr;
589 SSATmp* opt_foldable(IRGS& env,
590 const Func* func,
591 const ParamPrep& params,
592 uint32_t numNonDefaultArgs) {
593 ARRPROV_USE_RUNTIME_LOCATION();
594 if (!func->isFoldable()) return nullptr;
596 const Class* cls = nullptr;
597 if (func->isMethod()) {
598 if (!params.ctx || !func->isStatic()) return nullptr;
599 cls = params.ctx->type().clsSpec().exactCls();
600 if (!cls) return nullptr;
603 ArrayData* variadicArgs = nullptr;
604 uint32_t numVariadicArgs = 0;
605 if (numNonDefaultArgs > func->numNonVariadicParams()) {
606 assertx(params.size() == func->numParams());
607 auto const variadic = params.info.back().value;
608 auto const ty = RuntimeOption::EvalHackArrDVArrs ? TVec : TArr;
609 if (!variadic->type().hasConstVal(ty)) return nullptr;
611 variadicArgs = variadic->variantVal().asCArrRef().get();
612 numVariadicArgs = variadicArgs->size();
614 if (numVariadicArgs && !variadicArgs->isVecOrVArray()) return nullptr;
616 assertx(variadicArgs->isStatic());
617 numNonDefaultArgs = func->numNonVariadicParams();
620 // Don't pop the args yet---if the builtin throws at compile time (because
621 // it would raise a warning or something at runtime) we're going to leave
622 // the call alone.
623 VArrayInit args(numNonDefaultArgs + numVariadicArgs);
624 for (auto i = 0; i < numNonDefaultArgs; ++i) {
625 auto const t = params[i].value->type();
626 if (!t.hasConstVal() && !t.subtypeOfAny(TUninit, TInitNull, TNullptr)) {
627 return nullptr;
628 } else {
629 args.append(params[i].value->variantVal());
632 if (variadicArgs) {
633 for (auto i = 0; i < numVariadicArgs; i++) {
634 args.append(variadicArgs->get(i));
638 try {
639 // We don't know if notices would be enabled or not when this function
640 // would normally get called, so be safe and don't optimize any calls that
641 // COULD generate notices.
642 ThrowAllErrorsSetter taes;
644 VMProtect::Pause deprot;
645 always_assert(tl_regState == VMRegState::CLEAN);
647 // Even though tl_regState is marked clean, vmpc() has not necessarily been
648 // set to anything valid, so we need to do so here (for assertions and
649 // backtraces in the invocation, among other things).
650 auto const savedPC = vmpc();
651 vmpc() = vmfp() ? vmfp()->m_func->getEntry() : nullptr;
652 SCOPE_EXIT{ vmpc() = savedPC; };
654 assertx(!RID().getJitFolding());
655 RID().setJitFolding(true);
656 SCOPE_EXIT{ RID().setJitFolding(false); };
658 auto retVal = g_context->invokeFunc(func, args.toArray(),
659 nullptr, const_cast<Class*>(cls),
660 nullptr, false);
661 SCOPE_EXIT { tvDecRefGen(retVal); };
662 assertx(tvIsPlausible(retVal));
664 auto scalar_array = [&] {
665 return ArrayData::GetScalarArray(std::move(tvAsVariant(&retVal)));
668 switch (retVal.m_type) {
669 case KindOfNull:
670 case KindOfBoolean:
671 case KindOfInt64:
672 case KindOfDouble:
673 return cns(env, retVal);
674 case KindOfPersistentString:
675 case KindOfString:
676 return cns(env, makeStaticString(retVal.m_data.pstr));
677 case KindOfPersistentVec:
678 case KindOfVec:
679 return cns(
680 env,
681 make_tv<KindOfPersistentVec>(scalar_array())
683 case KindOfPersistentDict:
684 case KindOfDict:
685 return cns(
686 env,
687 make_tv<KindOfPersistentDict>(scalar_array())
689 case KindOfPersistentKeyset:
690 case KindOfKeyset:
691 return cns(
692 env,
693 make_tv<KindOfPersistentKeyset>(scalar_array())
695 case KindOfPersistentDArray:
696 case KindOfDArray:
697 case KindOfPersistentVArray:
698 case KindOfVArray:
699 case KindOfPersistentArray:
700 case KindOfArray:
701 return cns(
702 env,
703 make_persistent_array_like_tv(scalar_array())
705 case KindOfUninit:
706 case KindOfObject:
707 case KindOfResource:
708 // TODO (T29639296)
709 case KindOfFunc:
710 case KindOfClass:
711 case KindOfClsMeth:
712 case KindOfRecord: // TODO(arnabde)
713 return nullptr;
715 } catch (...) {
716 // If an exception or notice occurred, don't optimize
718 return nullptr;
722 * Container intrinsic for HH\traversable
724 SSATmp* opt_container_first(IRGS& env, const ParamPrep& params) {
725 if (params.size() != 1) {
726 return nullptr;
728 auto const value = params[0].value;
729 auto const type = value->type();
730 if (type.subtypeOfAny(TVec, TPackedArr)) {
731 auto const r = gen(env, VecFirst, value);
732 gen(env, IncRef, r);
733 return r;
734 } else if (type.subtypeOfAny(TDict, TMixedArr)) {
735 auto const r = gen(env, DictFirst, value);
736 gen(env, IncRef, r);
737 return r;
738 } else if (type <= TKeyset) {
739 auto const r = gen(env, KeysetFirst, value);
740 gen(env, IncRef, r);
741 return r;
743 return nullptr;
746 SSATmp* opt_container_last(IRGS& env, const ParamPrep& params) {
747 if (params.size() != 1) {
748 return nullptr;
750 auto const value = params[0].value;
751 auto const type = value->type();
752 if (type.subtypeOfAny(TVec, TPackedArr)) {
753 auto const r = gen(env, VecLast, value);
754 gen(env, IncRef, r);
755 return r;
756 } else if (type.subtypeOfAny(TDict, TMixedArr)) {
757 auto const r = gen(env, DictLast, value);
758 gen(env, IncRef, r);
759 return r;
760 } else if (type <= TKeyset) {
761 auto const r = gen(env, KeysetLast, value);
762 gen(env, IncRef, r);
763 return r;
765 return nullptr;
768 SSATmp* opt_container_first_key(IRGS& env, const ParamPrep& params) {
769 if (params.size() != 1) {
770 return nullptr;
772 auto const value = params[0].value;
773 auto const type = value->type();
775 if (type.subtypeOfAny(TVec, TPackedArr)) {
776 return cond(
777 env,
778 [&](Block* taken) {
779 auto const length = type <= TVec ?
780 gen(env, CountVec, value) : gen(env, CountArray, value);
781 gen(env, JmpZero, taken, length);
783 [&] { return cns(env, 0); },
784 [&] { return cns(env, TInitNull); }
786 } else if (type.subtypeOfAny(TDict, TMixedArr)) {
787 auto const r = gen(env, DictFirstKey, value);
788 gen(env, IncRef, r);
789 return r;
790 } else if (type <= TKeyset) {
791 auto const r = gen(env, KeysetFirst, value);
792 gen(env, IncRef, r);
793 return r;
795 return nullptr;
798 SSATmp* opt_container_last_key(IRGS& env, const ParamPrep& params) {
799 if (params.size() != 1) {
800 return nullptr;
802 auto const value = params[0].value;
803 auto const type = value->type();
805 if (type.subtypeOfAny(TVec, TPackedArr)) {
806 return cond(
807 env,
808 [&](Block* taken) {
809 auto const length = type <= TVec ?
810 gen(env, CountVec, value) : gen(env, CountArray, value);
811 gen(env, JmpZero, taken, length);
812 return length;
814 [&] (SSATmp* next) { return gen(env, SubInt, next, cns(env, 1)); },
815 [&] { return cns(env, TInitNull); }
817 } else if (type.subtypeOfAny(TDict, TMixedArr)) {
818 auto const r = gen(env, DictLastKey, value);
819 gen(env, IncRef, r);
820 return r;
821 } else if (type <= TKeyset) {
822 auto const r = gen(env, KeysetLast, value);
823 gen(env, IncRef, r);
824 return r;
826 return nullptr;
829 namespace {
830 const StaticString
831 s_MCHELPER_ON_GET_CLS("MethCallerHelper is used on meth_caller_get_class()"),
832 s_MCHELPER_ON_GET_METH(
833 "MethCallerHelper is used on meth_caller_get_method()"),
834 s_BAD_ARG_ON_MC_GET_CLS(
835 "Argument 1 passed to meth_caller_get_class() must be a MethCaller"),
836 s_BAD_ARG_ON_MC_GET_METH(
837 "Argument 1 passed to meth_caller_get_method() must be a MethCaller"),
838 s_meth_caller_cls("__SystemLib\\MethCallerHelper"),
839 s_cls_prop("class"),
840 s_meth_prop("method");
841 const Slot s_cls_idx{0};
842 const Slot s_meth_idx{1};
844 SSATmp* opt_fun_get_function(IRGS& env, const ParamPrep& params) {
845 if (params.size() != 1) return nullptr;
846 auto const value = params[0].value;
847 auto const type = value->type();
848 if (type <= TFunc) {
849 return gen(env, LdFuncName, value);
851 return nullptr;
854 DEBUG_ONLY bool meth_caller_has_expected_prop(const Class *mcCls) {
855 return mcCls->lookupDeclProp(s_cls_prop.get()) == s_cls_idx &&
856 mcCls->lookupDeclProp(s_meth_prop.get()) == s_meth_idx &&
857 mcCls->declPropTypeConstraint(s_cls_idx).isString() &&
858 mcCls->declPropTypeConstraint(s_meth_idx).isString();
861 template<bool isCls>
862 SSATmp* meth_caller_get_name(IRGS& env, SSATmp *value) {
863 if (value->isA(TFunc)) {
864 return cond(
865 env,
866 [&] (Block* taken) {
867 auto const attr = AttrData {static_cast<int32_t>(AttrIsMethCaller)};
868 auto isMC = gen(env, FuncHasAttr, attr, value);
869 gen(env, JmpZero, taken, isMC);
871 [&] {
872 return gen(env, LdMethCallerName, MethCallerData{isCls}, value);
874 [&] { // Taken: src is not a meth_caller
875 hint(env, Block::Hint::Unlikely);
876 updateMarker(env);
877 env.irb->exceptionStackBoundary();
878 gen(env, RaiseError, cns(env, isCls ?
879 s_BAD_ARG_ON_MC_GET_CLS.get() : s_BAD_ARG_ON_MC_GET_METH.get()));
880 // Dead-code, but needed to satisfy cond().
881 return cns(env, staticEmptyString());
885 if (value->isA(TObj)) {
886 auto loadProp = [&] (Class* cls, bool isGetCls, SSATmp* obj) {
887 auto const slot = isGetCls ? s_cls_idx : s_meth_idx;
888 auto const idx = cls->propSlotToIndex(slot);
889 auto const prop = gen(
890 env, LdPropAddr, IndexData{idx}, TStr.lval(Ptr::Prop), obj);
891 auto const ret = gen(env, LdMem, TStr, prop);
892 gen(env, IncRef, ret);
893 return ret;
896 auto const mcCls = Unit::lookupClass(s_meth_caller_cls.get());
897 assertx(mcCls && meth_caller_has_expected_prop(mcCls));
898 return cond(
899 env,
900 [&] (Block* taken) {
901 auto isMC = gen(
902 env, EqCls, cns(env, mcCls), gen(env, LdObjClass, value));
903 gen(env, JmpZero, taken, isMC);
905 [&] {
906 if (RuntimeOption::EvalEmitMethCallerFuncPointers &&
907 RuntimeOption::EvalNoticeOnMethCallerHelperUse) {
908 updateMarker(env);
909 env.irb->exceptionStackBoundary();
910 auto const msg = cns(env, isCls ?
911 s_MCHELPER_ON_GET_CLS.get() : s_MCHELPER_ON_GET_METH.get());
912 gen(env, RaiseNotice, msg);
914 return loadProp(mcCls, isCls, value);
916 [&] { // Taken: src is not a meth_caller
917 hint(env, Block::Hint::Unlikely);
918 updateMarker(env);
919 env.irb->exceptionStackBoundary();
920 gen(env, RaiseError, cns(env, isCls ?
921 s_BAD_ARG_ON_MC_GET_CLS.get() : s_BAD_ARG_ON_MC_GET_METH.get()));
922 // Dead-code, but needed to satisfy cond().
923 return cns(env, staticEmptyString());
927 return nullptr;
931 SSATmp* opt_class_meth_get_class(IRGS& env, const ParamPrep& params) {
932 if (params.size() != 1) return nullptr;
933 auto const value = params[0].value;
934 if (value->type() <= TClsMeth) {
935 return gen(env, LdClsName, gen(env, LdClsFromClsMeth, value));
937 return nullptr;
940 SSATmp* opt_class_meth_get_method(IRGS& env, const ParamPrep& params) {
941 if (params.size() != 1) return nullptr;
942 auto const value = params[0].value;
943 if (value->type() <= TClsMeth) {
944 return gen(env, LdFuncName, gen(env, LdFuncFromClsMeth, value));
946 return nullptr;
949 SSATmp* opt_shapes_idx(IRGS& env, const ParamPrep& params) {
950 // We first check the number and types of each argument. If any check fails,
951 // we'll fall back to the native code which will raise an appropriate error.
952 auto const nparams = params.size();
953 if (nparams != 2 && nparams != 3) return nullptr;
955 // params[0] is a darray, which may be a Dict or an Arr based on options.
956 // If the Hack typehint check flag is on, then we fall back to the native
957 // implementation of this method, which checks the DVArray bit in ArrayData.
958 bool is_dict;
959 auto const arrType = params[0].value->type();
960 if (RuntimeOption::EvalHackArrDVArrs && arrType <= TDict) {
961 is_dict = true;
962 } else if (!RuntimeOption::EvalHackArrDVArrs && arrType <= TArr) {
963 is_dict = false;
964 } else {
965 return nullptr;
968 // params[1] is an arraykey. We only optimize if it's narrowed to int or str.
969 auto const keyType = params[1].value->type();
970 if (!(keyType <= TInt || keyType <= TStr)) return nullptr;
972 // params[2] is an optional argument. If it's uninit, we convert it to null.
973 // We only optimize if we can distinguish between uninit and other types.
974 auto const defType = nparams == 3 ? params[2].value->type() : TUninit;
975 if (!(defType <= TUninit) && defType.maybe(TUninit)) return nullptr;
976 auto const def = defType <= TUninit ? cns(env, TInitNull) : params[2].value;
978 // params[0] is the array, for which we may need to do a dvarray check.
979 auto const arr = [&]() -> SSATmp* {
980 auto const val = params[0].value;
981 if (!(RO::EvalHackArrCompatTypeHintNotices && val->isA(TArr))) return val;
983 // Rather than just emitting a notice here, we interp and side-exit for
984 // non-darrays so that we can use layout information in the main trace.
986 // To interp, we must restore the stack offset from the start of the HHBC.
987 // It's easy to undo the stack offset for FCallBuiltin, but it's too hard
988 // to do the same for inlined NativeImpls (since they end inlined traces).
989 if (curSrcKey(env).op() != Op::FCallBuiltin) return nullptr;
990 env.irb->fs().incBCSPDepth(nparams);
991 auto const result = gen(env, CheckDArray, makeExitSlow(env), val);
992 env.irb->fs().decBCSPDepth(nparams);
993 return result;
994 }();
995 if (!arr) return nullptr;
997 // Do the array access, using array offset profiling to optimize it.
998 auto const key = params[1].value;
999 auto const elm = profiledArrayAccess(env, arr, key,
1000 [&] (SSATmp* arr, SSATmp* key, uint32_t pos) {
1001 auto const op = is_dict ? DictGetK : MixedArrayGetK;
1002 return gen(env, op, IndexData { pos }, arr, key);
1004 [&] (SSATmp* key, SizeHintData data) {
1005 auto const op = is_dict ? DictIdx : ArrayIdx;
1006 return gen(env, op, data, arr, key, def);
1010 auto const finish = [&](SSATmp* val){
1011 gen(env, IncRef, val);
1012 return val;
1014 return finish(profiledType(env, elm, [&] {
1015 auto const cell = finish(elm);
1016 params.decRefParams(env);
1017 push(env, cell);
1018 }));
1021 const EnumValues* getEnumValues(IRGS& env, const ParamPrep& params) {
1022 if (RO::EvalArrayProvenance) return nullptr;
1023 if (!(params.ctx && params.ctx->hasConstVal(TCls))) return nullptr;
1024 auto const cls = params.ctx->clsVal();
1025 if (!(isEnum(cls) && classHasPersistentRDS(cls))) return nullptr;
1026 return EnumCache::getValuesStatic(cls);
1029 SSATmp* opt_enum_names(IRGS& env, const ParamPrep& params) {
1030 if (params.size() != 0) return nullptr;
1031 auto const enum_values = getEnumValues(env, params);
1032 return enum_values ? cns(env, enum_values->names.get()) : nullptr;
1035 SSATmp* opt_enum_values(IRGS& env, const ParamPrep& params) {
1036 if (params.size() != 0) return nullptr;
1037 auto const enum_values = getEnumValues(env, params);
1038 return enum_values ? cns(env, enum_values->values.get()) : nullptr;
1041 SSATmp* opt_enum_is_valid(IRGS& env, const ParamPrep& params) {
1042 if (params.size() != 1) return nullptr;
1043 auto const value = params[0].value;
1044 if (!value->type().isKnownDataType()) return nullptr;
1045 auto const enum_values = getEnumValues(env, params);
1046 if (!enum_values) return nullptr;
1047 auto const ad = MixedArray::asMixed(enum_values->names.get());
1048 auto const op = ad->isDictType() ? AKExistsDict : AKExistsArr;
1049 if (value->isA(TInt)) {
1050 if (ad->keyTypes().mustBeStrs()) return cns(env, false);
1051 return gen(env, op, cns(env, ad->asArrayData()), value);
1052 } else if (value->isA(TStr)) {
1053 // We're not doing intish-casts here, so we bail if ad has any int keys.
1054 if (!ad->keyTypes().mustBeStrs()) return nullptr;
1055 return gen(env, op, cns(env, ad->asArrayData()), value);
1057 return cns(env, false);
1060 SSATmp* opt_enum_coerce(IRGS& env, const ParamPrep& params) {
1061 auto const valid = opt_enum_is_valid(env, params);
1062 if (!valid) return nullptr;
1063 return cond(env,
1064 [&](Block* taken) { gen(env, JmpZero, taken, valid); },
1065 [&]{
1066 // We never need to coerce strs to ints here, but we may need to coerce
1067 // ints to strs if the enum is a string type with intish values.
1068 auto const value = params[0].value;
1069 auto const isstr = isStringType(params.ctx->clsVal()->enumBaseTy());
1070 if (value->isA(TInt) && isstr) return gen(env, ConvIntToStr, value);
1071 gen(env, IncRef, value);
1072 return value;
1074 [&]{ return cns(env, TInitNull); }
1078 SSATmp* opt_tag_provenance_here(IRGS& env, const ParamPrep& params) {
1079 if (params.size() != 1) return nullptr;
1080 if (RO::EvalLogArrayProvenance) return nullptr;
1081 auto const result = params[0].value;
1082 gen(env, IncRef, result);
1083 return result;
1086 StaticString s_ARRAY_MARK_LEGACY_VEC(Strings::ARRAY_MARK_LEGACY_VEC);
1087 StaticString s_ARRAY_MARK_LEGACY_DICT(Strings::ARRAY_MARK_LEGACY_DICT);
1089 SSATmp* opt_array_mark_legacy(IRGS& env, const ParamPrep& params) {
1090 if (params.size() != 1) return nullptr;
1091 auto const value = params[0].value;
1092 if (!RO::EvalHackArrDVArrs) {
1093 if (value->isA(TVec)) {
1094 gen(env,
1095 RaiseWarning,
1096 make_opt_catch(env, params),
1097 cns(env, s_ARRAY_MARK_LEGACY_VEC.get()));
1098 } else if (value->isA(TDict)) {
1099 gen(env,
1100 RaiseWarning,
1101 make_opt_catch(env, params),
1102 cns(env, s_ARRAY_MARK_LEGACY_DICT.get()));
1105 if (value->isA(TVec)) {
1106 return gen(env, SetLegacyVec, value);
1107 } else if (value->isA(TDict)) {
1108 return gen(env, SetLegacyDict, value);
1110 return nullptr;
1113 SSATmp* opt_is_meth_caller(IRGS& env, const ParamPrep& params) {
1114 if (params.size() != 1) return nullptr;
1115 auto const value = params[0].value;
1116 if (value->isA(TFunc)) {
1117 return gen(
1118 env,
1119 FuncHasAttr,
1120 AttrData {static_cast<int32_t>(AttrIsMethCaller)},
1121 value);
1123 if (value->isA(TObj)) {
1124 auto const mcCls = Unit::lookupClass(s_meth_caller_cls.get());
1125 assertx(mcCls);
1126 return gen(env, EqCls, cns(env, mcCls), gen(env, LdObjClass, value));
1128 return nullptr;
1131 SSATmp* opt_meth_caller_get_class(IRGS& env, const ParamPrep& params) {
1132 if (params.size() != 1) return nullptr;
1133 return meth_caller_get_name<true>(env, params[0].value);
1136 SSATmp* opt_meth_caller_get_method(IRGS& env, const ParamPrep& params) {
1137 if (params.size() != 1) return nullptr;
1138 return meth_caller_get_name<false>(env, params[0].value);
1141 //////////////////////////////////////////////////////////////////////
1143 // Whitelists of builtins that we have optimized HHIR emitters for.
1144 // The first whitelist here simply lets us look up the functions above.
1146 using OptEmitFn = SSATmp* (*)(IRGS& env, const ParamPrep& params);
1148 const hphp_fast_string_imap<OptEmitFn> s_opt_emit_fns{
1149 {"is_a", opt_is_a},
1150 {"is_subclass_of", opt_is_subclass_of},
1151 {"method_exists", opt_method_exists},
1152 {"count", opt_count},
1153 {"sizeof", opt_sizeof},
1154 {"ini_get", opt_ini_get},
1155 {"in_array", opt_in_array},
1156 {"get_class", opt_get_class},
1157 {"sqrt", opt_sqrt},
1158 {"strlen", opt_strlen},
1159 {"clock_gettime_ns", opt_clock_gettime_ns},
1160 {"microtime", opt_microtime},
1161 {"__SystemLib\\max2", opt_max2},
1162 {"__SystemLib\\min2", opt_min2},
1163 {"ceil", opt_ceil},
1164 {"floor", opt_floor},
1165 {"abs", opt_abs},
1166 {"ord", opt_ord},
1167 {"chr", opt_chr},
1168 {"hh\\array_key_cast", opt_array_key_cast},
1169 {"hh\\type_structure", opt_type_structure},
1170 {"hh\\type_structure_classname", opt_type_structure_classname},
1171 {"hh\\is_list_like", opt_is_list_like},
1172 {"HH\\Lib\\_Private\\Native\\first", opt_container_first},
1173 {"HH\\Lib\\_Private\\Native\\last", opt_container_last},
1174 {"HH\\Lib\\_Private\\Native\\first_key", opt_container_first_key},
1175 {"HH\\Lib\\_Private\\Native\\last_key", opt_container_last_key},
1176 {"HH\\fun_get_function", opt_fun_get_function},
1177 {"HH\\class_meth_get_class", opt_class_meth_get_class},
1178 {"HH\\class_meth_get_method", opt_class_meth_get_method},
1179 {"HH\\Shapes::idx", opt_shapes_idx},
1180 {"HH\\BuiltinEnum::getNames", opt_enum_names},
1181 {"HH\\BuiltinEnum::getValues", opt_enum_values},
1182 {"HH\\BuiltinEnum::coerce", opt_enum_coerce},
1183 {"HH\\BuiltinEnum::isValid", opt_enum_is_valid},
1184 {"HH\\is_meth_caller", opt_is_meth_caller},
1185 {"HH\\tag_provenance_here", opt_tag_provenance_here},
1186 {"HH\\array_mark_legacy", opt_array_mark_legacy},
1187 {"HH\\meth_caller_get_class", opt_meth_caller_get_class},
1188 {"HH\\meth_caller_get_method", opt_meth_caller_get_method},
1191 // This second whitelist, a subset of the first, records which parameter
1192 // (if any) we need a vanilla input for to generate optimized HHIR.
1194 const hphp_fast_string_imap<int> s_vanilla_params{
1195 {"count", 0},
1196 {"sizeof", 0},
1197 {"HH\\Shapes::idx", 0},
1198 {"HH\\Lib\\_Private\\Native\\first", 0},
1199 {"HH\\Lib\\_Private\\Native\\last", 0},
1200 {"HH\\Lib\\_Private\\Native\\first_key", 0},
1201 {"HH\\Lib\\_Private\\Native\\last_key", 0},
1204 //////////////////////////////////////////////////////////////////////
1206 SSATmp* optimizedFCallBuiltin(IRGS& env,
1207 const Func* func,
1208 const ParamPrep& params,
1209 uint32_t numNonDefault) {
1210 auto const result = [&]() -> SSATmp* {
1212 auto const fname = func->fullName();
1214 if (auto const retVal = opt_foldable(env, func, params, numNonDefault)) {
1215 // Check if any of the parameters are in-out. If not, we don't
1216 // need any special handling.
1217 auto const numInOut = std::count_if(
1218 params.info.begin(), params.info.end(),
1219 [] (const ParamPrep::Info& i) { return i.isInOut; }
1221 if (!numInOut) return retVal;
1223 // Otherwise, the return value is actually a tuple containing
1224 // all of the results. We need to unpack the tuple and write the
1225 // contents to their proper place on the stack.
1226 auto const ad = retVal->arrLikeVal();
1227 assertx(ad->isStatic());
1228 assertx(ad->hasVanillaPackedLayout());
1229 assertx(ad->size() == numInOut + 1);
1231 size_t inOutIndex = 0;
1232 for (auto const& param : params.info) {
1233 if (!param.isInOut) continue;
1234 // NB: The parameters to the builtin have already been popped
1235 // at this point, so we don't need to account for them when
1236 // calculating the stack offset.
1237 auto const val = cns(env, ad->nvGetVal(inOutIndex + 1));
1238 auto const offset = offsetFromIRSP(
1239 env,
1240 BCSPRelOffset{safe_cast<int32_t>(inOutIndex)}
1242 gen(env, StStk, IRSPRelOffsetData{offset}, sp(env), val);
1243 ++inOutIndex;
1246 // The first element of the tuple is always the actual function
1247 // return.
1248 return cns(env, ad->nvGetVal(0));
1251 auto const it = s_opt_emit_fns.find(fname->data());
1252 if (it != s_opt_emit_fns.end()) return it->second(env, params);
1253 return nullptr;
1254 }();
1256 if (result == nullptr) return nullptr;
1258 // We don't constrain types when loading parameters whose typehints don't
1259 // imply any checks. However, optimized codegen for a builtin generally
1260 // requires specific input types (and uses them to produce specific outputs).
1262 // As a result, if we're returning optimized builtin codegen, we also need
1263 // to constrain our input parameters as well.
1265 // To play well with assumptions in tracelet region selection, our optimized
1266 // codegen must obey the following restriction:
1267 // - IF we relax the inputs for an optimized builtin to DataTypeSpecific
1268 // - THEN the output must have its current type relaxed to DataTypeSpecific
1270 // Here's a breaking example: a builtin that returns an int for one string
1271 // input and null for a different string input. If any builtin behaves like
1272 // this, it should place additional constraints on its inputs. (No current
1273 // builtins need to do so - DataTypeSpecific is a good default.)
1274 for (auto const& param : params.info) {
1275 env.irb->constrainValue(param.value, DataTypeSpecific);
1277 params.decRefParams(env);
1278 return result;
1281 //////////////////////////////////////////////////////////////////////
1284 * Return the target type of a parameter to a builtin function.
1286 * If the builtin parameter has no type hints to cause coercion, this function
1287 * returns TBottom.
1289 Type param_target_type(const Func* callee, uint32_t paramIdx) {
1290 auto const& pi = callee->params()[paramIdx];
1291 auto const& tc = pi.typeConstraint;
1292 if (tc.isNullable()) {
1293 auto const dt = tc.underlyingDataType();
1294 if (!dt) return TBottom;
1295 return TNull | Type(*dt);
1297 if (!pi.builtinType) return tc.isVArrayOrDArray() ? TArr : TBottom;
1298 if (pi.builtinType == KindOfObject &&
1299 pi.defaultValue.m_type == KindOfNull) {
1300 return TNullableObj;
1302 return Type(*pi.builtinType);
1305 //////////////////////////////////////////////////////////////////////
1308 * Collect parameters for a call to a builtin. Also determine which ones will
1309 * need to be passed through the eval stack, and which ones will need
1310 * conversions.
1312 template <class LoadParam>
1313 ParamPrep
1314 prepare_params(IRGS& /*env*/, const Func* callee, SSATmp* ctx,
1315 uint32_t numArgs, uint32_t numNonDefault, bool forNativeImpl,
1316 LoadParam loadParam) {
1317 auto ret = ParamPrep{numArgs, callee};
1318 ret.ctx = ctx;
1319 ret.forNativeImpl = forNativeImpl;
1321 // Fill in in reverse order, since they may come from popC's (depending on
1322 // what loadParam wants to do).
1323 for (auto offset = uint32_t{numArgs}; offset-- > 0;) {
1324 auto const ty = param_target_type(callee, offset);
1325 auto& cur = ret[offset];
1326 auto& pi = callee->params()[offset];
1328 cur.value = loadParam(offset, ty);
1329 // If ty > TBottom, it had some kind of type hint.
1330 cur.needsConversion = (offset < numNonDefault && ty > TBottom);
1331 cur.isInOut = callee->isInOut(offset);
1332 // We do actually mean exact type equality here. We're only capable of
1333 // passing the following primitives through registers; everything else goes
1334 // by address unless its flagged "NativeArg".
1335 if (!pi.isTakenAsVariant() &&
1336 (ty == TBool || ty == TInt || ty == TDbl ||
1337 pi.isNativeArg() || pi.isTakenAsTypedValue())) {
1338 continue;
1340 if (cur.isInOut) continue;
1342 ++ret.numByAddr;
1343 cur.passByAddr = true;
1346 return ret;
1349 //////////////////////////////////////////////////////////////////////
1352 * CatchMaker makes catch blocks for calling builtins. There's a fair bit of
1353 * complexity here right now, for these reasons:
1355 * o Sometimes we're 'logically' inlining a php-level call to a function
1356 * that contains a NativeImpl opcode.
1358 * But we implement this by generating all the relevant NativeImpl code
1359 * after the InlineReturn for the callee, to make it easier for DCE to
1360 * eliminate the code that constructs the callee's activation record.
1361 * This means the unwinder is going to see our PC as equal to the FCall*
1362 * for the call to the function. We produce consistent state for unwinder
1363 * by decrefing and popping the arguments.
1365 * o HNI-style param coerce modes can force the entire function to return
1366 * false or null if the coersions fail. This is implemented via a
1367 * TVCoercionException, which is not a user-visible exception. So our
1368 * catch blocks are sometimes handling a PHP exception, and sometimes a
1369 * failure to coerce.
1371 * o Both of these things may be relevant to the same catch block.
1373 * Also, note that the CatchMaker keeps a pointer to the builtin call's
1374 * ParamPrep, which will have its values mutated by realize_params as it's
1375 * making coersions, so that we can see what's changed so far (and what to
1376 * clean up on the offramps). Some values that were refcounted may become
1377 * non-refcounted after conversions, and we can't DecRef things twice.
1379 struct CatchMaker {
1380 enum class Kind { NotInlining, Inlining };
1382 explicit CatchMaker(IRGS& env, Kind kind, const ParamPrep* params)
1383 : env(env)
1384 , m_kind(kind)
1385 , m_params(*params)
1387 // Native instance method calls are allowed from NativeImpl or in inlining.
1388 // Native static method calls are *additionally* allowed from FCallBuiltin.
1389 if (m_params.ctx == nullptr) return;
1390 DEBUG_ONLY auto const this_type = m_params.ctx->type();
1391 assertx(this_type <= TCls || this_type <= TObj);
1392 assertx(this_type <= TCls || m_params.forNativeImpl || inlining());
1395 CatchMaker(const CatchMaker&) = delete;
1396 CatchMaker(CatchMaker&&) = default;
1398 bool inlining() const {
1399 switch (m_kind) {
1400 case Kind::NotInlining: return false;
1401 case Kind::Inlining: return true;
1403 not_reached();
1406 Block* makeUnusualCatch() const {
1407 assertx(!env.irb->fs().stublogue());
1408 auto const exit = defBlock(env, Block::Hint::Unlikely);
1409 BlockPusher bp(*env.irb, makeMarker(env, bcOff(env)), exit);
1410 gen(env, BeginCatch);
1411 decRefParams();
1412 prepareForCatch();
1413 gen(env,
1414 EndCatch,
1415 EndCatchData {
1416 spOffBCFromIRSP(env),
1417 EndCatchData::CatchMode::UnwindOnly,
1418 EndCatchData::FrameMode::Phplogue,
1419 EndCatchData::Teardown::Full
1421 fp(env), sp(env));
1422 return exit;
1426 * DecRef the params in preparation for an exception or side
1427 * exit. Parameters that are not being passed through the stack
1428 * still may need to be decref'd, because they may have been a
1429 * reference counted type that was going to be converted to a
1430 * non-reference counted type that we'd pass in a register. As we
1431 * do the coersions, params.value gets updated so whenever we call
1432 * these catch block creation functions it will only decref things
1433 * that weren't yet converted.
1435 void decRefParams() const {
1436 if (m_params.forNativeImpl) return;
1437 for (auto i = m_params.size(); i--; ) {
1438 auto const &pi = m_params[i];
1439 if (pi.passByAddr) {
1440 popDecRef(env);
1441 } else {
1442 decRef(env, pi.value);
1447 private:
1448 void prepareForCatch() const {
1449 if (inlining() && m_params.ctx) {
1450 decRef(env, m_params.ctx);
1453 * We're potentially spilling to a different depth than the unwinder
1454 * would've expected, so we need an eager sync. Even if we aren't inlining
1455 * this can happen, because before doing the CallBuiltin we set the marker
1456 * stack offset to only include the passed-through-stack args.
1458 * So before we leave, update the marker to placate EndCatch assertions,
1459 * which is trying to detect failure to do this properly.
1461 auto const spOff = IRSPRelOffsetData { spOffBCFromIRSP(env) };
1462 gen(env, EagerSyncVMRegs, spOff, fp(env), sp(env));
1463 updateMarker(env); // Mark the EndCatch safe, since we're eager syncing.
1466 private:
1467 IRGS& env;
1468 Kind const m_kind;
1469 const ParamPrep& m_params;
1472 //////////////////////////////////////////////////////////////////////
1475 * Take the value in param, apply any needed conversions
1476 * and return the value to be passed to CallBuiltin.
1478 * checkType(ty, fail):
1479 * verify that the param is of type ty, and branch to fail
1480 * if not. If it results in a new SSATmp*, (as eg CheckType
1481 * would), then that should be returned; otherwise it should
1482 * return nullptr;
1483 * convertParam(ty):
1484 * convert the param to ty; failure should be handled by
1485 * CatchMaker::makeParamCoerceCatch, and it should return
1486 * a new SSATmp* (if appropriate) or nullptr.
1487 * realize():
1488 * return the SSATmp* needed by CallBuiltin for this parameter.
1489 * if checkType and convertParam returned non-null values,
1490 * param.value will have been updated with a phi of their results.
1492 template<class V, class C, class R>
1493 SSATmp* realize_param(IRGS& env,
1494 ParamPrep::Info& param,
1495 const Func* callee,
1496 Type targetTy,
1497 V checkType,
1498 C convertParam,
1499 R realize) {
1500 if (param.needsConversion) {
1501 auto const baseTy = targetTy - TNull;
1502 assertx(baseTy.isKnownDataType());
1503 auto const convertTy = baseTy;
1505 if (auto const value = cond(
1506 env,
1507 [&] (Block* convert) -> SSATmp* {
1508 if (targetTy == baseTy) {
1509 return checkType(baseTy, convert);
1511 return cond(
1512 env,
1513 [&] (Block* fail) { return checkType(baseTy, fail); },
1514 [&] (SSATmp* v) { return v; },
1515 [&] {
1516 return checkType(TInitNull, convert);
1519 [&] (SSATmp* v) { return v; },
1520 [&] () -> SSATmp* {
1521 return convertParam(convertTy);
1523 )) {
1524 // Heads up on non-local state here: we have to update
1525 // the values inside ParamPrep so that the CatchMaker
1526 // functions know about new potentially refcounted types
1527 // to decref, or values that were already decref'd and
1528 // replaced with things like ints.
1529 param.value = value;
1533 return realize();
1536 template<class U, class F>
1537 SSATmp* maybeCoerceValue(
1538 IRGS& env,
1539 SSATmp* val,
1540 Type target,
1541 uint32_t id,
1542 const Func* func,
1543 U update,
1544 F fail
1546 auto bail = [&] { fail(); return cns(env, TBottom); };
1547 if (target <= TStr) {
1548 if (!val->type().maybe(TFunc|TCls)) return bail();
1550 auto castW = [&] (SSATmp* val, bool isCls){
1551 if (RuntimeOption::EvalStringHintNotices) {
1552 gen(
1553 env,
1554 RaiseNotice,
1555 cns(
1556 env,
1557 makeStaticString(
1558 isCls ? Strings::CLASS_TO_STRING_IMPLICIT
1559 : Strings::FUNC_TO_STRING_IMPLICIT
1564 return update(val);
1567 return cond(
1568 env,
1569 [&] (Block* f) { return gen(env, CheckType, TFunc, f, val); },
1570 [&] (SSATmp* fval) { return castW(gen(env, LdFuncName, fval), false); },
1571 [&] {
1572 hint(env, Block::Hint::Unlikely);
1573 return cond(
1574 env,
1575 [&] (Block* f) { return gen(env, CheckType, TCls, f, val); },
1576 [&] (SSATmp* cval) { return castW(gen(env, LdClsName, cval), true); },
1577 [&] {
1578 hint(env, Block::Hint::Unlikely);
1579 return bail();
1586 if (target <= (RuntimeOption::EvalHackArrDVArrs ? TVec : TArr)) {
1587 if (!val->type().maybe(TClsMeth)) return bail();
1588 return cond(
1589 env,
1590 [&] (Block* f) { return gen(env, CheckType, TClsMeth, f, val); },
1591 [&] (SSATmp* methVal) {
1592 if (RuntimeOption::EvalVecHintNotices) {
1593 raiseClsmethCompatTypeHint(
1594 env, id, func, func->params()[id].typeConstraint);
1596 auto const ret = update(convertClsMethToVec(env, methVal));
1597 decRef(env, methVal);
1598 return ret;
1600 [&] {
1601 hint(env, Block::Hint::Unlikely);
1602 return bail();
1607 return bail();
1611 * Prepare the actual arguments to the CallBuiltin instruction, by converting a
1612 * ParamPrep into a vector of SSATmps to pass to CallBuiltin. If any of the
1613 * parameters needed type conversions, we need to do that here too.
1615 jit::vector<SSATmp*> realize_params(IRGS& env,
1616 const Func* callee,
1617 ParamPrep& params,
1618 const CatchMaker& maker) {
1619 auto const cbNumArgs = 2 + params.size() + (params.ctx ? 1 : 0);
1620 auto ret = jit::vector<SSATmp*>(cbNumArgs);
1621 auto argIdx = uint32_t{0};
1622 ret[argIdx++] = fp(env);
1623 ret[argIdx++] = sp(env);
1624 if (params.ctx) ret[argIdx++] = params.ctx;
1626 auto const needDVCheck = [&](uint32_t param, const Type& ty) {
1627 if (!RuntimeOption::EvalHackArrCompatTypeHintNotices) return false;
1628 if (!callee->params()[param].typeConstraint.isArray()) return false;
1629 return ty <= TArr;
1632 auto const dvCheck = [&](uint32_t param, SSATmp* val) {
1633 assertx(needDVCheck(param, val->type()));
1634 auto const& tc = callee->params()[param].typeConstraint;
1636 gen(
1637 env,
1638 RaiseHackArrParamNotice,
1639 RaiseHackArrParamNoticeData { tc, int32_t(param), false },
1640 maker.makeUnusualCatch(),
1641 val,
1642 cns(env, callee)
1646 DEBUG_ONLY auto seenBottom = false;
1647 DEBUG_ONLY auto usedStack = false;
1648 auto stackIdx = uint32_t{0};
1649 for (auto paramIdx = uint32_t{0}; paramIdx < params.size(); ++paramIdx) {
1650 auto& param = params[paramIdx];
1651 auto const targetTy = param_target_type(callee, paramIdx);
1653 seenBottom |= (param.value->type() == TBottom);
1655 if (param.value->type() <= TMemToCell) {
1656 ret[argIdx++] = realize_param(
1657 env, param, callee, targetTy,
1658 [&] (const Type& ty, Block* fail) -> SSATmp* {
1659 gen(env, CheckTypeMem, ty, fail, param.value);
1660 if (needDVCheck(paramIdx, ty)) {
1661 dvCheck(paramIdx, gen(env, LdMem, ty, param.value));
1663 return nullptr;
1665 [&] (const Type& ty) -> SSATmp* {
1666 hint(env, Block::Hint::Unlikely);
1667 auto val = gen(env, LdMem, TCell, param.value);
1668 assertx(ty.isKnownDataType());
1669 maybeCoerceValue(
1670 env,
1671 val,
1673 paramIdx,
1674 callee,
1675 [&] (SSATmp* val) {
1676 gen(env, StLoc, LocalId{paramIdx}, fp(env), val);
1677 return val;
1679 [&] {
1680 gen(env, ThrowParameterWrongType,
1681 FuncArgTypeData { callee, paramIdx + 1, ty.toDataType() },
1682 maker.makeUnusualCatch(), val);
1685 return nullptr;
1687 [&] {
1688 if (!param.passByAddr) {
1689 assertx(!callee->params()[paramIdx].isTakenAsVariant());
1690 assertx(targetTy == TBool ||
1691 targetTy == TInt ||
1692 targetTy == TDbl ||
1693 callee->params()[paramIdx].isNativeArg() ||
1694 callee->params()[paramIdx].isTakenAsTypedValue() ||
1695 callee->isInOut(paramIdx));
1696 return gen(env, LdMem,
1697 targetTy == TBottom ? TCell : targetTy,
1698 param.value);
1700 return param.value;
1702 continue;
1705 if (!param.passByAddr) {
1706 auto const oldVal = params[paramIdx].value;
1707 ret[argIdx++] = realize_param(
1708 env, param, callee, targetTy,
1709 [&] (const Type& ty, Block* fail) {
1710 auto ret = gen(env, CheckType, ty, fail, param.value);
1711 env.irb->constrainValue(ret, DataTypeSpecific);
1712 if (needDVCheck(paramIdx, ty)) dvCheck(paramIdx, ret);
1713 return ret;
1715 [&] (const Type& ty) -> SSATmp* {
1716 hint(env, Block::Hint::Unlikely);
1717 assert(ty.isKnownDataType());
1718 return maybeCoerceValue(
1719 env,
1720 param.value,
1722 paramIdx,
1723 callee,
1724 [&] (SSATmp* val) { return val; },
1725 [&] {
1726 gen(env, ThrowParameterWrongType,
1727 FuncArgTypeData { callee, paramIdx + 1, ty.toDataType() },
1728 maker.makeUnusualCatch(), oldVal);
1732 [&] {
1734 * This gets tricky:
1735 * - if we had a ref-counted type, and it was converted
1736 * to a Bool, Int or Dbl above, we explicitly DecReffed it
1737 * (in coerce_value).
1738 * - if we did a CoerceMem which implicitly DecReffed the old value
1739 * In either case, the old value is taken care of, and any future
1740 * DecRefs (from exceptions, or after the call on the normal flow
1741 * of execution) should DecRef param.value (ie the post-coercion
1742 * value).
1744 return param.value;
1746 continue;
1749 usedStack = true;
1750 auto const offset = BCSPRelOffset{safe_cast<int32_t>(
1751 params.numByAddr - stackIdx - 1)};
1753 ret[argIdx++] = realize_param(
1754 env, param, callee, targetTy,
1755 [&] (const Type& ty, Block* fail) -> SSATmp* {
1756 auto irSPRel = offsetFromIRSP(env, offset);
1757 gen(env, CheckStk, IRSPRelOffsetData { irSPRel }, ty, fail, sp(env));
1758 env.irb->constrainStack(irSPRel, DataTypeSpecific);
1759 if (needDVCheck(paramIdx, ty)) {
1760 dvCheck(
1761 paramIdx,
1762 gen(env, LdStk, ty, IRSPRelOffsetData { irSPRel }, sp(env))
1765 return nullptr;
1767 [&] (const Type& ty) -> SSATmp* {
1768 always_assert(ty.isKnownDataType());
1769 hint(env, Block::Hint::Unlikely);
1770 auto const off = IRSPRelOffsetData{ offsetFromIRSP(env, offset) };
1771 auto const tv = gen(env, LdStk, TCell, off, sp(env));
1773 maybeCoerceValue(
1774 env,
1777 paramIdx,
1778 callee,
1779 [&] (SSATmp* val) {
1780 gen(env, StStk, off, sp(env), val);
1781 env.irb->exceptionStackBoundary();
1782 return val;
1784 [&] {
1785 gen(env, ThrowParameterWrongType,
1786 FuncArgTypeData { callee, paramIdx + 1, ty.toDataType() },
1787 maker.makeUnusualCatch(), tv);
1790 return nullptr;
1792 [&] {
1793 return ldStkAddr(env, offset);
1795 ++stackIdx;
1798 assertx(seenBottom || !usedStack || stackIdx == params.numByAddr);
1799 assertx(argIdx == cbNumArgs);
1801 return ret;
1804 //////////////////////////////////////////////////////////////////////
1806 SSATmp* builtinInValue(IRGS& env, const Func* builtin, uint32_t i) {
1807 auto const tv = Native::builtinInValue(builtin, i);
1808 if (!tv) return nullptr;
1809 return cns(env, *tv);
1812 SSATmp* builtinCall(IRGS& env,
1813 const Func* callee,
1814 ParamPrep& params,
1815 int32_t numNonDefault,
1816 const CatchMaker& catchMaker) {
1817 assertx(callee->nativeFuncPtr());
1819 if (!params.forNativeImpl) {
1820 // For FCallBuiltin, params are TypedValues, while for NativeImpl, they're
1821 // pointers to these values on the frame. We only optimize native calls
1822 // when we have the values.
1823 auto const opt = optimizedFCallBuiltin(env, callee, params, numNonDefault);
1824 if (opt) return opt;
1827 * Everything that needs to be on the stack gets spilled now.
1829 * If we're not inlining, the reason we do this even when numByAddr is
1830 * zero is to make it so that in either case the stack depth when we enter
1831 * our catch blocks is always the same as the numByAddr value, in all
1832 * situations. If we didn't do this, then when we aren't inlining, and
1833 * numByAddr is zero, we'd have the stack depth be the total num
1834 * params (the depth before the FCallBuiltin), which would add more cases
1835 * to handle in the catch blocks.
1837 if (params.numByAddr != 0 || !catchMaker.inlining()) {
1838 for (auto i = uint32_t{0}; i < params.size(); ++i) {
1839 if (params[i].passByAddr) {
1840 push(env, params[i].value);
1844 * This marker update is to make sure rbx points to the bottom of our
1845 * stack if we enter a catch trace. It's also necessary because we might
1846 * run destructors as part of parameter coersions, which we don't want to
1847 * clobber our spilled stack.
1849 updateMarker(env);
1852 // If we are inlining, we've done various DefInlineFP-type stuff that can
1853 // affect stack depth.
1854 env.irb->exceptionStackBoundary();
1857 // Collect the realized parameters.
1858 auto realized = realize_params(env, callee, params, catchMaker);
1860 // Store the inout parameters into their out locations.
1861 if (callee->takesInOutParams()) {
1862 int32_t idx = 0;
1863 uint32_t aoff = params.ctx ? 3 : 2;
1864 for (auto i = uint32_t{0}; i < params.size(); ++i) {
1865 if (!params[i].isInOut) continue;
1866 auto ty = [&] () -> folly::Optional<Type> {
1867 auto const r = builtinOutType(callee, i);
1868 if (r.isKnownDataType()) return r;
1869 return {};
1870 }();
1871 if (auto const iv = builtinInValue(env, callee, i)) {
1872 decRef(env, realized[i + aoff]);
1873 realized[i + aoff] = iv;
1874 ty = iv->type();
1875 if (ty->maybe(TPersistentArr)) *ty |= TArr;
1876 if (ty->maybe(TPersistentVec)) *ty |= TVec;
1877 if (ty->maybe(TPersistentDict)) *ty |= TDict;
1878 if (ty->maybe(TPersistentKeyset)) *ty |= TKeyset;
1879 if (ty->maybe(TPersistentStr)) *ty |= TStr;
1881 if (params.forNativeImpl) {
1882 // Move the value to the caller stack to avoid an extra ref-count
1883 gen(env, StLoc, LocalId{i}, fp(env), cns(env, TInitNull));
1884 auto const addr = gen(env, LdOutAddr, IndexData(idx++), fp(env));
1885 gen(env, StMem, ty, addr, realized[i + aoff]);
1886 realized[i + aoff] = addr;
1887 continue;
1889 auto const offset =
1890 BCSPRelOffset{safe_cast<int32_t>(params.numByAddr + idx++)};
1891 auto const out = offsetFromIRSP(env, offset);
1892 gen(env, StStk, IRSPRelOffsetData{out}, ty, sp(env), realized[i + aoff]);
1893 params[i].value = cns(env, TInitNull);
1894 realized[i + aoff] = gen(env, LdStkAddr, IRSPRelOffsetData{out}, sp(env));
1896 env.irb->exceptionStackBoundary();
1899 // Only record the return stack offset if we're inlining or if we're
1900 // processing a FCallBuiltin. Otherwise we're processing a
1901 // non-inlined NativeImpl. In that case, there shouldn't be anything
1902 // on the stack and any out parameters point to the caller's stack,
1903 // so there's nothing for FrameState to do.
1904 auto const retOff = [&] () -> folly::Optional<IRSPRelOffset> {
1905 if (params.forNativeImpl && !catchMaker.inlining()) {
1906 assertx(env.irb->fs().bcSPOff() == env.context.initSpOffset);
1907 return folly::none;
1909 return offsetFromIRSP(
1910 env,
1911 BCSPRelOffset{safe_cast<int32_t>(params.numByAddr)}
1913 }();
1915 // Make the actual call.
1916 SSATmp** const decayedPtr = &realized[0];
1917 auto const ret = gen(
1918 env,
1919 CallBuiltin,
1920 CallBuiltinData {
1921 spOffBCFromIRSP(env),
1922 retOff,
1923 callee,
1924 numNonDefault
1926 catchMaker.makeUnusualCatch(),
1927 std::make_pair(realized.size(), decayedPtr)
1930 if (!params.forNativeImpl) {
1931 if (params.ctx && params.ctx->type() <= TObj) {
1932 decRef(env, params.ctx);
1934 catchMaker.decRefParams();
1937 return ret;
1941 * When we're inlining a NativeImpl opcode, we know this is the only opcode in
1942 * the callee method body aside from AssertRATs (bytecode invariant). So in
1943 * order to make sure we can eliminate the DefInlineFP, we do the CallBuiltin
1944 * instruction after we've left the inlined frame.
1946 * We may need to pass some arguments to the builtin through the stack (e.g. if
1947 * it takes const Variant&'s)---these are spilled to the stack after leaving
1948 * the callee.
1950 * To make this work, we need to do some weird things in the catch trace. ;)
1952 void nativeImplInlined(IRGS& env) {
1953 auto const callee = curFunc(env);
1954 assertx(callee->nativeFuncPtr());
1956 auto const numArgs = callee->numParams();
1957 auto const paramThis = callee->isMethod() ? ldCtx(env) : nullptr;
1959 auto numNonDefault = fp(env)->inst()->extra<DefInlineFP>()->numArgs;
1960 auto params = prepare_params(
1961 env,
1962 callee,
1963 paramThis,
1964 numArgs,
1965 numNonDefault,
1966 false,
1967 [&] (uint32_t i, const Type) {
1968 return ldLoc(env, i, nullptr, DataTypeSpecific);
1972 implInlineReturn(env);
1974 auto const catcher = CatchMaker {
1975 env,
1976 CatchMaker::Kind::Inlining,
1977 &params
1980 push(env, builtinCall(env, callee, params, numNonDefault, catcher));
1983 //////////////////////////////////////////////////////////////////////
1987 //////////////////////////////////////////////////////////////////////
1989 int getBuiltinVanillaParam(const char* name) {
1990 auto const it = s_vanilla_params.find(name);
1991 return it != s_vanilla_params.end() ? it->second : -1;
1994 SSATmp* optimizedCallIsObject(IRGS& env, SSATmp* src) {
1995 if (src->isA(TObj) && src->type().clsSpec()) {
1996 auto const cls = src->type().clsSpec().cls();
1997 if (!env.irb->constrainValue(src, GuardConstraint(cls).setWeak())) {
1998 // If we know the class without having to specialize a guard
1999 // any further, use it.
2000 return cns(env, cls != SystemLib::s___PHP_Incomplete_ClassClass);
2004 if (!src->type().maybe(TObj)) {
2005 return cns(env, false);
2008 auto checkClass = [&] (SSATmp* obj) {
2009 auto cls = gen(env, LdObjClass, obj);
2010 auto testCls = SystemLib::s___PHP_Incomplete_ClassClass;
2011 auto eq = gen(env, EqCls, cls, cns(env, testCls));
2012 return gen(env, XorBool, eq, cns(env, true));
2015 return cond(
2016 env,
2017 [&] (Block* taken) {
2018 auto isObj = gen(env, IsType, TObj, src);
2019 gen(env, JmpZero, taken, isObj);
2021 [&] { // Next: src is an object
2022 auto obj = gen(env, AssertType, TObj, src);
2023 return checkClass(obj);
2025 [&] { // Taken: src is not an object
2026 return cns(env, false);
2031 //////////////////////////////////////////////////////////////////////
2033 void emitFCallBuiltin(IRGS& env,
2034 uint32_t numArgs,
2035 uint32_t numNonDefault,
2036 uint32_t numOut,
2037 const StringData* funcName) {
2038 auto const callee = Unit::lookupBuiltin(funcName);
2039 if (!callee) PUNT(Missing-builtin);
2041 if (callee->numInOutParams() != numOut) PUNT(bad-inout);
2043 emitCallerRxChecksKnown(env, callee);
2044 assertx(!callee->isMethod() || (callee->isStatic() && callee->cls()));
2045 auto const ctx = callee->isStatic() ? cns(env, callee->cls()) : nullptr;
2047 auto params = prepare_params(
2048 env, callee, ctx,
2049 numArgs, numNonDefault, false, [&](uint32_t /*i*/, const Type ty) {
2050 auto specificity =
2051 ty == TBottom ? DataTypeGeneric : DataTypeSpecific;
2052 return pop(env, specificity);
2055 auto const catcher = CatchMaker {
2056 env,
2057 CatchMaker::Kind::NotInlining,
2058 &params
2061 push(env, builtinCall(env, callee, params, numNonDefault, catcher));
2064 void emitNativeImpl(IRGS& env) {
2065 if (isInlining(env)) return nativeImplInlined(env);
2067 auto const callee = curFunc(env);
2069 auto genericNativeImpl = [&]() {
2070 gen(env, NativeImpl, fp(env), sp(env));
2071 auto const retVal = gen(env, LdRetVal, callReturnType(callee), fp(env));
2072 auto const spAdjust = offsetToReturnSlot(env);
2073 auto const data = RetCtrlData { spAdjust, false, AuxUnion{0} };
2074 gen(env, RetCtrl, data, sp(env), fp(env), retVal);
2077 if (!callee->nativeFuncPtr()) {
2078 genericNativeImpl();
2079 return;
2082 auto ctx = callee->isMethod() ? ldCtx(env) : nullptr;
2084 auto params = prepare_params(
2085 env,
2086 callee,
2087 ctx,
2088 callee->numParams(),
2089 callee->numParams(),
2090 true,
2091 [&] (uint32_t i, const Type) {
2092 return gen(env, LdLocAddr, LocalId(i), fp(env));
2095 auto const catcher = CatchMaker {
2096 env,
2097 CatchMaker::Kind::NotInlining,
2098 &params
2101 push(env, builtinCall(env, callee, params, callee->numParams(), catcher));
2102 emitRetC(env);
2105 //////////////////////////////////////////////////////////////////////
2107 namespace {
2109 const StaticString s_add("add");
2110 const StaticString s_addall("addall");
2111 const StaticString s_append("append");
2112 const StaticString s_clear("clear");
2113 const StaticString s_remove("remove");
2114 const StaticString s_removeall("removeall");
2115 const StaticString s_removekey("removekey");
2116 const StaticString s_set("set");
2117 const StaticString s_setall("setall");
2119 // Whitelist of known collection methods that always return $this (ignoring
2120 // parameter coercion failure issues).
2121 bool collectionMethodReturnsThis(const Func* callee) {
2122 auto const cls = callee->implCls();
2124 if (cls == c_Vector::classof()) {
2125 return
2126 callee->name()->isame(s_add.get()) ||
2127 callee->name()->isame(s_addall.get()) ||
2128 callee->name()->isame(s_append.get()) ||
2129 callee->name()->isame(s_clear.get()) ||
2130 callee->name()->isame(s_removekey.get()) ||
2131 callee->name()->isame(s_set.get()) ||
2132 callee->name()->isame(s_setall.get());
2135 if (cls == c_Map::classof()) {
2136 return
2137 callee->name()->isame(s_add.get()) ||
2138 callee->name()->isame(s_addall.get()) ||
2139 callee->name()->isame(s_clear.get()) ||
2140 callee->name()->isame(s_remove.get()) ||
2141 callee->name()->isame(s_set.get()) ||
2142 callee->name()->isame(s_setall.get());
2145 if (cls == c_Set::classof()) {
2146 return
2147 callee->name()->isame(s_add.get()) ||
2148 callee->name()->isame(s_addall.get()) ||
2149 callee->name()->isame(s_clear.get()) ||
2150 callee->name()->isame(s_remove.get()) ||
2151 callee->name()->isame(s_removeall.get());
2154 return false;
2159 Type builtinOutType(const Func* builtin, uint32_t i) {
2160 assertx(builtin->isCPPBuiltin());
2161 assertx(builtin->isInOut(i));
2163 if (auto const dt = Native::builtinOutType(builtin, i)) return Type{*dt};
2165 auto const& tc = builtin->params()[i].typeConstraint;
2167 if (tc.isSoft() || tc.isMixed()) return TInitCell;
2169 auto ty = [&] () -> Type {
2170 switch (tc.metaType()) {
2171 case AnnotMetaType::Precise:
2172 return Type{*tc.underlyingDataType()};
2173 case AnnotMetaType::Mixed:
2174 return TInitCell;
2175 case AnnotMetaType::Self:
2176 return TObj;
2177 case AnnotMetaType::Parent:
2178 return TObj;
2179 case AnnotMetaType::Callable:
2180 return TInitCell;
2181 case AnnotMetaType::Number:
2182 return TInt | TDbl;
2183 case AnnotMetaType::ArrayKey:
2184 return TInt | TStr;
2185 case AnnotMetaType::This:
2186 return TObj;
2187 case AnnotMetaType::VArray:
2188 return RuntimeOption::EvalHackArrDVArrs ? TVec : TArr;
2189 case AnnotMetaType::DArray:
2190 return RuntimeOption::EvalHackArrDVArrs ? TDict : TArr;
2191 case AnnotMetaType::VArrOrDArr:
2192 return RuntimeOption::EvalHackArrDVArrs ? TVec | TDict : TArr;
2193 case AnnotMetaType::VecOrDict:
2194 return TVec | TDict;
2195 case AnnotMetaType::ArrayLike:
2196 return TArrLike;
2197 case AnnotMetaType::Nonnull:
2198 case AnnotMetaType::NoReturn:
2199 case AnnotMetaType::Nothing:
2200 return TInitCell;
2202 not_reached();
2203 }();
2205 return tc.isNullable() ? ty | TInitNull : ty;
2208 Type builtinReturnType(const Func* builtin) {
2209 // Why do we recalculate the type here than just using HHBBC's inferred type?
2210 // Unlike for regular PHP functions, we have access to all the same
2211 // information that HHBBC does, and the JIT type-system is slightly more
2212 // expressive. So, by doing it ourself, we can derive a slightly more precise
2213 // type.
2214 assertx(builtin->isCPPBuiltin());
2216 // NB: It is *not* safe to be pessimistic here and return TCell (or any other
2217 // approximation). The builtin's return type inferred here is used to control
2218 // code-gen when lowering the builtin call to vasm and must be no more general
2219 // than the HNI declaration (if present).
2220 auto type = [&]{
2221 // If this is a collection method which returns $this, use that fact to
2222 // infer the exact returning type. Otherwise try to use HNI declaration.
2223 if (collectionMethodReturnsThis(builtin)) {
2224 assertx(builtin->hniReturnType() == KindOfObject);
2225 return Type::ExactObj(builtin->implCls());
2227 if (auto const hniType = builtin->hniReturnType()) {
2228 if (isArrayType(*hniType)) {
2229 auto const& constraint = builtin->returnTypeConstraint();
2230 if (constraint.isVArray()) return TVArr;
2231 if (constraint.isDArray()) return TDArr;
2233 return Type{*hniType};
2235 return TInitCell;
2236 }();
2238 // We're not sure what kind of array-likes builtins will produce.
2239 if (RO::EvalAllowBespokeArrayLikes) type = type.widenToBespoke();
2241 // "Reference" types (types represented by a pointer) can always be null.
2242 if (type.isReferenceType()) {
2243 type |= TInitNull;
2244 } else {
2245 assertx(type == TInitCell || type.isSimpleType());
2248 return type & TInitCell;
2251 /////////////////////////////////////////////////////////////////////
2253 namespace {
2255 void implArrayIdx(IRGS& env) {
2256 // These types are just used to decide what to do; once we know what we're
2257 // actually doing we constrain the values with the popC()s later on in this
2258 // function.
2259 auto const keyType = topC(env, BCSPRelOffset{1}, DataTypeGeneric)->type();
2261 if (keyType <= TNull) {
2262 auto const def = popC(env, DataTypeGeneric);
2263 auto const key = popC(env);
2264 auto const base = popC(env);
2266 // if the key is null it will not be found so just return the default
2267 push(env, def);
2268 decRef(env, base);
2269 decRef(env, key);
2270 return;
2272 if (!(keyType <= TInt || keyType <= TStr)) {
2273 interpOne(env, TCell, 3);
2274 return;
2277 auto const def = popC(env, DataTypeGeneric); // a helper will decref it but
2278 // the translated code doesn't
2279 // care about the type
2280 auto const key = popC(env);
2281 auto const base = popC(env);
2283 auto const elem = profiledArrayAccess(env, base, key,
2284 [&] (SSATmp* arr, SSATmp* key, uint32_t pos) {
2285 return gen(env, MixedArrayGetK, IndexData { pos }, arr, key);
2287 [&] (SSATmp* key, SizeHintData data) {
2288 return gen(env, ArrayIdx, data, base, key, def);
2292 auto finish = [&](SSATmp* tmp) {
2293 pushIncRef(env, tmp);
2294 decRef(env, base);
2295 decRef(env, key);
2296 decRef(env, def);
2299 auto const pelem = profiledType(env, elem, [&] { finish(elem); });
2300 finish(pelem);
2303 void implVecIdx(IRGS& env, SSATmp* loaded_collection_vec) {
2304 auto const def = popC(env);
2305 auto const key = popC(env);
2306 auto const stack_base = popC(env);
2308 auto const finish = [&](SSATmp* elem) {
2309 pushIncRef(env, elem);
2310 decRef(env, def);
2311 decRef(env, key);
2312 decRef(env, stack_base);
2315 if (key->isA(TNull | TStr)) return finish(def);
2317 if (!key->isA(TInt)) {
2318 // TODO(T11019533): Fix the underlying issue with unreachable code rather
2319 // than papering over it by pushing an unused value here.
2320 finish(def);
2321 updateMarker(env);
2322 env.irb->exceptionStackBoundary();
2323 gen(env, ThrowInvalidArrayKey, stack_base, key);
2324 return;
2327 auto const use_base = loaded_collection_vec
2328 ? loaded_collection_vec
2329 : stack_base;
2330 assertx(use_base->isA(TVec));
2332 auto const elem = cond(
2333 env,
2334 [&] (Block* taken) {
2335 gen(env, CheckPackedArrayDataBounds, taken, use_base, key);
2337 [&] { return gen(env, LdVecElem, use_base, key); },
2338 [&] { return def; }
2341 auto const pelem = profiledType(env, elem, [&] { finish(elem); } );
2342 finish(pelem);
2345 void implDictKeysetIdx(IRGS& env,
2346 bool is_dict,
2347 SSATmp* loaded_collection_dict) {
2348 auto const def = popC(env);
2349 auto const key = popC(env);
2350 auto const stack_base = popC(env);
2352 auto const finish = [&](SSATmp* elem) {
2353 pushIncRef(env, elem);
2354 decRef(env, def);
2355 decRef(env, key);
2356 decRef(env, stack_base);
2359 if (key->isA(TNull)) return finish(def);
2361 if (!key->isA(TInt) && !key->isA(TStr)) {
2362 // TODO(T11019533): Fix the underlying issue with unreachable code rather
2363 // than papering over it by pushing an unused value here.
2364 finish(def);
2365 updateMarker(env);
2366 env.irb->exceptionStackBoundary();
2367 gen(env, ThrowInvalidArrayKey, stack_base, key);
2368 return;
2371 assertx(is_dict || !loaded_collection_dict);
2372 auto const use_base = loaded_collection_dict
2373 ? loaded_collection_dict
2374 : stack_base;
2375 assertx(use_base->isA(is_dict ? TDict : TKeyset));
2377 auto const elem = profiledArrayAccess(env, use_base, key,
2378 [&] (SSATmp* base, SSATmp* key, uint32_t pos) {
2379 return gen(env, is_dict ? DictGetK : KeysetGetK, IndexData { pos },
2380 base, key);
2382 [&] (SSATmp* key, SizeHintData data) {
2383 return is_dict ? gen(env, DictIdx, data, use_base, key, def)
2384 : gen(env, KeysetIdx, use_base, key, def);
2388 auto const pelem = profiledType(env, elem, [&] { finish(elem); });
2389 finish(pelem);
2393 * Return the GuardConstraint that should be used to constrain baseType for an
2394 * Idx bytecode.
2396 GuardConstraint idxBaseConstraint(Type baseType, Type keyType,
2397 bool& useVec, bool& useDict) {
2398 if (baseType < TObj && baseType.clsSpec()) {
2399 auto const cls = baseType.clsSpec().cls();
2401 // Vector is only usable with int keys, so we can only optimize for
2402 // Vector if the key is an Int
2403 useVec = (collections::isType(cls, CollectionType::Vector) ||
2404 collections::isType(cls, CollectionType::ImmVector)) &&
2405 keyType <= TInt;
2407 useDict = collections::isType(cls, CollectionType::Map) ||
2408 collections::isType(cls, CollectionType::ImmMap) ||
2409 collections::isType(cls, CollectionType::Set) ||
2410 collections::isType(cls, CollectionType::ImmSet);
2412 if (useVec || useDict) return GuardConstraint(cls);
2415 useVec = useDict = false;
2416 return DataTypeSpecific;
2419 //////////////////////////////////////////////////////////////////////
2423 void emitArrayIdx(IRGS& env) {
2424 auto const arrType = topC(env, BCSPRelOffset{2}, DataTypeGeneric)->type();
2425 if (arrType <= TVec) return implVecIdx(env, nullptr);
2426 if (arrType <= TDict) return implDictKeysetIdx(env, true, nullptr);
2427 if (arrType <= TKeyset) return implDictKeysetIdx(env, false, nullptr);
2428 if (arrType <= TClsMeth) PUNT(ArrayIdx_clsmeth);
2430 if (!(arrType <= TArr)) {
2431 // raise fatal
2432 interpOne(env, TCell, 3);
2433 return;
2436 implArrayIdx(env);
2439 void emitIdx(IRGS& env) {
2440 auto const key = topC(env, BCSPRelOffset{1}, DataTypeGeneric);
2441 auto const base = topC(env, BCSPRelOffset{2}, DataTypeGeneric);
2442 auto const keyType = key->type();
2443 auto const baseType = base->type();
2445 if (baseType <= TVec) return implVecIdx(env, nullptr);
2446 if (baseType <= TDict) return implDictKeysetIdx(env, true, nullptr);
2447 if (baseType <= TKeyset) return implDictKeysetIdx(env, false, nullptr);
2449 if (keyType <= TNull || !baseType.maybe(TArr | TObj | TStr)) {
2450 auto const def = popC(env, DataTypeGeneric);
2451 popC(env, keyType <= TNull ? DataTypeSpecific : DataTypeGeneric);
2452 popC(env, keyType <= TNull ? DataTypeGeneric : DataTypeSpecific);
2453 push(env, def);
2454 decRef(env, base);
2455 decRef(env, key);
2456 return;
2459 if (!(keyType <= TInt || keyType <= TStr)) {
2460 interpOne(env, TCell, 3);
2461 return;
2464 if (baseType <= TArr) {
2465 implArrayIdx(env);
2466 return;
2469 bool useVec, useDict;
2470 auto const tc = idxBaseConstraint(baseType, keyType, useVec, useDict);
2471 if (useVec || useDict) {
2472 env.irb->constrainValue(base, tc);
2473 env.irb->constrainValue(key, DataTypeSpecific);
2475 if (useVec) {
2476 auto const vec = gen(env, LdColVec, base);
2477 implVecIdx(env, vec);
2478 } else {
2479 auto const dict = gen(env, LdColDict, base);
2480 implDictKeysetIdx(env, true, dict);
2482 return;
2485 interpOne(env, TCell, 3);
2488 void emitAKExists(IRGS& env) {
2489 auto const arr = popC(env);
2490 auto key = popC(env);
2491 if (key->isA(TFunc) || key->isA(TCls)) PUNT(AKExists_func_cls_key);
2493 auto throwBadKey = [&] {
2494 // TODO(T11019533): Fix the underlying issue with unreachable code rather
2495 // than papering over it by pushing an unused value here.
2496 push(env, cns(env, false));
2497 decRef(env, arr);
2498 decRef(env, key);
2499 updateMarker(env);
2500 env.irb->exceptionStackBoundary();
2501 gen(env, ThrowInvalidArrayKey, arr, key);
2504 auto const check_packed = [&] {
2505 assertx(key->isA(TInt));
2507 auto const result = cond(
2508 env,
2509 [&](Block* taken) {
2510 gen(env, CheckPackedArrayDataBounds, taken, arr, key);
2512 [&] { return cns(env, true); },
2513 [&] { return cns(env, false); }
2515 push(env, result);
2516 decRef(env, arr);
2519 if (arr->isA(TVec)) {
2520 if (key->isA(TStr)) {
2521 push(env, cns(env, false));
2522 decRef(env, arr);
2523 decRef(env, key);
2524 return;
2526 if (key->isA(TInt)) {
2527 return check_packed();
2529 return throwBadKey();
2532 if (arr->isA(TDict) || arr->isA(TKeyset)) {
2533 if (!key->isA(TInt) && !key->isA(TStr)) {
2534 return throwBadKey();
2536 auto const val = gen(
2537 env,
2538 arr->isA(TDict) ? AKExistsDict : AKExistsKeyset,
2539 arr,
2542 push(env, val);
2543 decRef(env, arr);
2544 decRef(env, key);
2545 return;
2548 if (!arr->isA(TArr) && !arr->isA(TObj)) PUNT(AKExists_badArray);
2550 if (key->isA(TInitNull) && arr->isA(TArr)) {
2551 if (checkHACArrayKeyCast()) {
2552 gen(
2553 env,
2554 RaiseHackArrCompatNotice,
2555 cns(
2556 env,
2557 makeStaticString(
2558 makeHackArrCompatImplicitArrayKeyMsg(uninit_variant.asTypedValue())
2564 key = cns(env, staticEmptyString());
2567 if (!key->isA(TStr) && !key->isA(TInt)) PUNT(AKExists_badKey);
2569 if (arr->isA(TObj) && key->isA(TInt) &&
2570 collections::isType(arr->type().clsSpec().cls(), CollectionType::Vector,
2571 CollectionType::ImmVector)) {
2572 auto const val =
2573 gen(env, CheckRange, key, gen(env, CountCollection, arr));
2574 push(env, val);
2575 decRef(env, arr);
2576 return;
2578 if (arr->isA(TArr) && key->isA(TInt) &&
2579 arr->type().arrSpec().kind() == ArrayData::kPackedKind) {
2580 return check_packed();
2583 auto const val =
2584 gen(env, arr->isA(TArr) ? AKExistsArr : AKExistsObj, arr, key);
2585 push(env, val);
2586 decRef(env, arr);
2587 decRef(env, key);
2590 //////////////////////////////////////////////////////////////////////
2592 void emitGetMemoKeyL(IRGS& env, NamedLocal loc) {
2593 DEBUG_ONLY auto const func = curFunc(env);
2594 assertx(func->isMemoizeWrapper());
2596 auto const value = ldLocWarn(
2597 env,
2598 loc,
2599 nullptr,
2600 DataTypeSpecific
2603 // Use the generic scheme, which is implemented by GetMemoKey. The simplifier
2604 // will catch any additional special cases.
2605 push(env, gen(env, GetMemoKey, value));
2608 namespace {
2610 void memoGetImpl(IRGS& env,
2611 Offset notfoundOff,
2612 Offset suspendedOff,
2613 LocalRange keys) {
2614 assertx(curFunc(env)->isMemoizeWrapper());
2615 assertx(keys.first + keys.count <= curFunc(env)->numLocals());
2616 assertx(suspendedOff == kInvalidOffset || curFunc(env)->isAsyncFunction());
2618 CompactVector<bool> types;
2619 for (size_t i = 0; i < keys.count; ++i) {
2620 auto const type = env.irb->local(i + keys.first, DataTypeSpecific).type;
2621 if (type <= TStr) {
2622 types.emplace_back(true);
2623 } else if (type <= TInt) {
2624 types.emplace_back(false);
2625 } else {
2626 // Let it fatal from the interpreter
2627 PUNT(MemoGet);
2631 auto const notFound = getBlock(env, bcOff(env) + notfoundOff);
2632 assertx(notFound != nullptr);
2634 auto const func = curFunc(env);
2636 auto const loadAux = suspendedOff != kInvalidOffset;
2638 auto const val = [&]{
2639 // Any value we get from memoization must be the same type we return from
2640 // this function. If we need to load the aux field, force the type to be
2641 // InitCell so that we actually load the type. We'll assert the proper type
2642 // once we've checked aux.
2643 auto const retTy = loadAux
2644 ? TInitCell
2645 : typeFromRAT(func->repoReturnType(), curClass(env)) & TInitCell;
2647 if (func->isMethod() && !func->isStatic()) {
2648 auto const cls = func->cls();
2649 assertx(cls != nullptr);
2650 assertx(cls->hasMemoSlots());
2652 auto const this_ = checkAndLoadThis(env);
2653 if (!this_->isA(Type::SubObj(cls))) PUNT(MemoGet);
2655 auto const memoInfo = cls->memoSlotForFunc(func->getFuncId());
2657 if (keys.count == 0 && !memoInfo.second) {
2658 return gen(
2659 env,
2660 MemoGetInstanceValue,
2661 MemoValueInstanceData { memoInfo.first, func, folly::none, loadAux },
2662 notFound,
2663 retTy,
2664 this_
2668 return gen(
2669 env,
2670 MemoGetInstanceCache,
2671 MemoCacheInstanceData {
2672 memoInfo.first,
2673 keys,
2674 types.data(),
2675 func,
2676 memoInfo.second,
2677 folly::none,
2678 loadAux
2680 notFound,
2681 retTy,
2682 fp(env),
2683 this_
2687 if (func->isMemoizeWrapperLSB()) {
2688 /* For LSB memoization, we need the LSB class */
2689 auto const lsbCls = ldCtxCls(env);
2690 if (keys.count > 0) {
2691 return gen(
2692 env,
2693 MemoGetLSBCache,
2694 MemoCacheStaticData {
2695 func,
2696 keys,
2697 types.data(),
2698 folly::none,
2699 loadAux
2701 notFound,
2702 retTy,
2703 fp(env),
2704 lsbCls
2707 return gen(
2708 env,
2709 MemoGetLSBValue,
2710 MemoValueStaticData { func, folly::none, loadAux },
2711 notFound,
2712 retTy,
2713 lsbCls
2717 /* Static (non-LSB) Memoization */
2718 if (keys.count > 0) {
2719 return gen(
2720 env,
2721 MemoGetStaticCache,
2722 MemoCacheStaticData { func, keys, types.data(), folly::none, loadAux },
2723 notFound,
2724 retTy,
2725 fp(env)
2728 return gen(
2729 env,
2730 MemoGetStaticValue,
2731 MemoValueStaticData { func, folly::none, loadAux },
2732 notFound,
2733 retTy
2735 }();
2737 if (!loadAux) {
2738 pushIncRef(env, val);
2739 return;
2742 ifThenElse(
2743 env,
2744 [&] (Block* taken) {
2745 auto const aux = gen(env, LdTVAux, LdTVAuxData {}, val);
2746 auto const tst = gen(env, AndInt, aux, cns(env, 1u << 31));
2747 gen(env, JmpZero, taken, tst);
2749 [&] {
2750 pushIncRef(
2751 env,
2752 gen(
2753 env,
2754 AssertType,
2755 typeFromRAT(func->repoAwaitedReturnType(), curClass(env)) & TInitCell,
2760 [&] {
2761 hint(env, Block::Hint::Unlikely);
2762 pushIncRef(
2763 env,
2764 gen(
2765 env,
2766 AssertType,
2767 typeFromRAT(func->repoReturnType(), curClass(env)) & TInitCell,
2771 jmpImpl(env, bcOff(env) + suspendedOff);
2778 void emitMemoGet(IRGS& env, Offset notfoundOff, LocalRange keys) {
2779 memoGetImpl(env, notfoundOff, kInvalidOffset, keys);
2782 void emitMemoGetEager(IRGS& env,
2783 Offset notfoundOff,
2784 Offset suspendedOff,
2785 LocalRange keys) {
2786 assertx(curFunc(env)->isAsyncFunction());
2787 assertx(resumeMode(env) == ResumeMode::None);
2788 memoGetImpl(env, notfoundOff, suspendedOff, keys);
2791 namespace {
2793 void memoSetImpl(IRGS& env, LocalRange keys, bool eager) {
2794 assertx(curFunc(env)->isMemoizeWrapper());
2795 assertx(keys.first + keys.count <= curFunc(env)->numLocals());
2796 assertx(!eager || curFunc(env)->isAsyncFunction());
2798 CompactVector<bool> types;
2799 for (size_t i = 0; i < keys.count; ++i) {
2800 auto const type = env.irb->local(i + keys.first, DataTypeSpecific).type;
2801 if (type <= TStr) {
2802 types.emplace_back(true);
2803 } else if (type <= TInt) {
2804 types.emplace_back(false);
2805 } else {
2806 // Let it fatal from the interpreter
2807 PUNT(MemoSet);
2811 auto const ldVal = [&] (DataTypeCategory tc) {
2812 return gen(
2813 env,
2814 AssertType,
2815 TInitCell,
2816 topC(env, BCSPRelOffset{ 0 }, tc)
2820 auto const func = curFunc(env);
2822 auto const asyncEager = [&] () -> folly::Optional<bool> {
2823 if (!func->isAsyncFunction()) return folly::none;
2824 return eager;
2825 }();
2827 if (func->isMethod() && !func->isStatic()) {
2828 auto const cls = func->cls();
2829 assertx(cls != nullptr);
2830 assertx(cls->hasMemoSlots());
2832 auto const this_ = checkAndLoadThis(env);
2833 if (!this_->isA(Type::SubObj(cls))) PUNT(MemoSet);
2835 auto const memoInfo = cls->memoSlotForFunc(func->getFuncId());
2837 if (keys.count == 0 && !memoInfo.second) {
2838 gen(
2839 env,
2840 MemoSetInstanceValue,
2841 MemoValueInstanceData { memoInfo.first, func, asyncEager, false },
2842 this_,
2843 ldVal(DataTypeCountness)
2845 return;
2848 gen(
2849 env,
2850 MemoSetInstanceCache,
2851 MemoCacheInstanceData {
2852 memoInfo.first,
2853 keys,
2854 types.data(),
2855 func,
2856 memoInfo.second,
2857 asyncEager,
2858 false
2860 fp(env),
2861 this_,
2862 ldVal(DataTypeGeneric)
2864 return;
2867 if (func->isMemoizeWrapperLSB()) {
2868 /* For LSB memoization, we need the LSB class */
2869 auto const lsbCls = ldCtxCls(env);
2870 if (keys.count > 0) {
2871 gen(
2872 env,
2873 MemoSetLSBCache,
2874 MemoCacheStaticData { func, keys, types.data(), asyncEager, false },
2875 fp(env),
2876 lsbCls,
2877 ldVal(DataTypeGeneric)
2879 return;
2882 gen(
2883 env,
2884 MemoSetLSBValue,
2885 MemoValueStaticData { func, asyncEager, false },
2886 ldVal(DataTypeCountness),
2887 lsbCls
2889 return;
2892 if (keys.count > 0) {
2893 gen(
2894 env,
2895 MemoSetStaticCache,
2896 MemoCacheStaticData { func, keys, types.data(), asyncEager, false },
2897 fp(env),
2898 ldVal(DataTypeGeneric)
2900 return;
2903 gen(
2904 env,
2905 MemoSetStaticValue,
2906 MemoValueStaticData { func, asyncEager, false },
2907 ldVal(DataTypeCountness)
2913 void emitMemoSet(IRGS& env, LocalRange keys) {
2914 memoSetImpl(env, keys, false);
2917 void emitMemoSetEager(IRGS& env, LocalRange keys) {
2918 assertx(curFunc(env)->isAsyncFunction());
2919 assertx(resumeMode(env) == ResumeMode::None);
2920 memoSetImpl(env, keys, true);
2923 //////////////////////////////////////////////////////////////////////
2925 void emitSilence(IRGS& env, Id localId, SilenceOp subop) {
2926 // We can't generate direct StLoc and LdLocs in pseudomains (violates an IR
2927 // invariant).
2928 if (curFunc(env)->isPseudoMain()) PUNT(PseudoMain-Silence);
2930 switch (subop) {
2931 case SilenceOp::Start:
2932 // We assume that whatever is in the local is dead and doesn't need to be
2933 // refcounted before being overwritten.
2934 gen(env, AssertLoc, TUncounted, LocalId(localId), fp(env));
2935 gen(env, StLoc, LocalId(localId), fp(env), gen(env, ZeroErrorLevel));
2936 break;
2937 case SilenceOp::End:
2939 gen(env, AssertLoc, TInt, LocalId(localId), fp(env));
2940 auto const level = ldLoc(env, localId, makeExit(env), DataTypeGeneric);
2941 gen(env, RestoreErrorLevel, level);
2943 break;
2947 //////////////////////////////////////////////////////////////////////