Get ARM mode passing again
[hiphop-php.git] / hphp / runtime / vm / jit / translator.cpp
blob4286f505b7de7dca356bb7752b8b6ba1b48a4122
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/vm/jit/translator.h"
18 // Translator front-end: parse instruction stream into basic blocks, decode
19 // and normalize instructions. Propagate run-time type info to instructions
20 // to annotate their inputs and outputs with types.
21 #include <cinttypes>
22 #include <assert.h>
23 #include <stdint.h>
24 #include <stdarg.h>
26 #include <vector>
27 #include <string>
29 #include "folly/Conv.h"
31 #include "hphp/util/trace.h"
32 #include "hphp/util/biased-coin.h"
33 #include "hphp/util/map-walker.h"
34 #include "hphp/runtime/base/file-repository.h"
35 #include "hphp/runtime/base/runtime-option.h"
36 #include "hphp/runtime/base/stats.h"
37 #include "hphp/runtime/base/types.h"
38 #include "hphp/runtime/ext/ext_continuation.h"
39 #include "hphp/runtime/ext/ext_collections.h"
40 #include "hphp/runtime/vm/hhbc.h"
41 #include "hphp/runtime/vm/bytecode.h"
42 #include "hphp/runtime/vm/jit/annotation.h"
43 #include "hphp/runtime/vm/jit/hhbc-translator.h"
44 #include "hphp/runtime/vm/jit/ir-unit.h"
45 #include "hphp/runtime/vm/jit/ir-translator.h"
46 #include "hphp/runtime/vm/jit/normalized-instruction.h"
47 #include "hphp/runtime/vm/jit/region-selection.h"
48 #include "hphp/runtime/base/rds.h"
49 #include "hphp/runtime/vm/jit/tracelet.h"
50 #include "hphp/runtime/vm/jit/translator-inline.h"
51 #include "hphp/runtime/vm/jit/translator-x64.h"
52 #include "hphp/runtime/vm/jit/type.h"
53 #include "hphp/runtime/vm/pendq.h"
54 #include "hphp/runtime/vm/treadmill.h"
55 #include "hphp/runtime/vm/type-profile.h"
56 #include "hphp/runtime/vm/runtime.h"
58 #define KindOfUnknown DontUseKindOfUnknownInThisFile
59 #define KindOfInvalid DontUseKindOfInvalidInThisFile
61 namespace HPHP {
62 namespace Transl {
64 using namespace HPHP;
65 using HPHP::JIT::Type;
66 using HPHP::JIT::HhbcTranslator;
68 TRACE_SET_MOD(trans)
70 static __thread BiasedCoin *dbgTranslateCoin;
71 Translator* g_translator;
72 Lease Translator::s_writeLease;
74 struct TraceletContext {
75 TraceletContext() = delete;
77 TraceletContext(Tracelet* t, const TypeMap& initialTypes)
78 : m_t(t)
79 , m_numJmps(0)
80 , m_aliasTaint(false)
81 , m_varEnvTaint(false)
83 for (auto& kv : initialTypes) {
84 TRACE(1, "%s\n",
85 Trace::prettyNode("InitialType", kv.first, kv.second).c_str());
86 m_currentMap[kv.first] = t->newDynLocation(kv.first, kv.second);
90 Tracelet* m_t;
91 ChangeMap m_currentMap;
92 DepMap m_dependencies;
93 DepMap m_resolvedDeps; // dependencies resolved by static analysis
94 LocationSet m_changeSet;
95 LocationSet m_deletedSet;
96 int m_numJmps;
97 bool m_aliasTaint;
98 bool m_varEnvTaint;
100 RuntimeType currentType(const Location& l) const;
101 DynLocation* recordRead(const InputInfo& l, bool useHHIR,
102 DataType staticType = KindOfAny);
103 void recordWrite(DynLocation* dl);
104 void recordDelete(const Location& l);
105 void recordJmp();
106 void aliasTaint();
107 void varEnvTaint();
110 void InstrStream::append(NormalizedInstruction* ni) {
111 if (last) {
112 assert(first);
113 last->next = ni;
114 ni->prev = last;
115 ni->next = nullptr;
116 last = ni;
117 return;
119 assert(!first);
120 first = ni;
121 last = ni;
122 ni->prev = nullptr;
123 ni->next = nullptr;
126 void InstrStream::remove(NormalizedInstruction* ni) {
127 if (ni->prev) {
128 ni->prev->next = ni->next;
129 } else {
130 first = ni->next;
132 if (ni->next) {
133 ni->next->prev = ni->prev;
134 } else {
135 last = ni->prev;
137 ni->prev = nullptr;
138 ni->next = nullptr;
142 * locPhysicalOffset --
144 * Return offset, in cells, of this location from its base
145 * pointer. It needs a function descriptor to see how many locals
146 * to skip for iterators; if the current frame pointer is not the context
147 * you're looking for, be sure to pass in a non-default f.
149 int locPhysicalOffset(Location l, const Func* f) {
150 f = f ? f : liveFunc();
151 assert_not_implemented(l.space == Location::Stack ||
152 l.space == Location::Local ||
153 l.space == Location::Iter);
154 int localsToSkip = l.space == Location::Iter ? f->numLocals() : 0;
155 int iterInflator = l.space == Location::Iter ? kNumIterCells : 1;
156 return -((l.offset + 1) * iterInflator + localsToSkip);
159 RuntimeType Translator::liveType(Location l,
160 const Unit& u,
161 bool specialize) {
162 Cell *outer;
163 switch (l.space) {
164 case Location::Stack:
165 // Stack accesses must be to addresses pushed before
166 // translation time; if they are to addresses pushed after,
167 // they should be hitting in the changemap.
168 assert(locPhysicalOffset(l) >= 0);
169 // fallthru
170 case Location::Local: {
171 Cell *base;
172 int offset = locPhysicalOffset(l);
173 base = l.space == Location::Stack ? vmsp() : vmfp();
174 outer = &base[offset];
175 } break;
176 case Location::Iter: {
177 const Iter *it = frame_iter(liveFrame(), l.offset);
178 TRACE(1, "Iter input: fp %p, iter %p, offset %" PRId64 "\n", vmfp(),
179 it, l.offset);
180 return RuntimeType(it);
181 } break;
182 case Location::Litstr: {
183 return RuntimeType(u.lookupLitstrId(l.offset));
184 } break;
185 case Location::Litint: {
186 return RuntimeType(l.offset);
187 } break;
188 case Location::This: {
189 return outThisObjectType();
190 } break;
191 default: {
192 not_reached();
195 assert(IS_REAL_TYPE(outer->m_type));
196 return liveType(outer, l, specialize);
199 RuntimeType
200 Translator::liveType(const Cell* outer, const Location& l, bool specialize) {
201 always_assert(analysisDepth() == 0);
203 if (!outer) {
204 // An undefined global; starts out as a variant null
205 return RuntimeType(KindOfRef, KindOfNull);
207 DataType outerType = (DataType)outer->m_type;
208 assert(IS_REAL_TYPE(outerType));
209 DataType valueType = outerType;
210 DataType innerType = KindOfNone;
211 const Cell* valCell = outer;
212 if (outerType == KindOfRef) {
213 // Variant. Pick up the inner type, too.
214 valCell = outer->m_data.pref->tv();
215 innerType = valCell->m_type;
216 assert(IS_REAL_TYPE(innerType));
217 valueType = innerType;
218 assert(innerType != KindOfRef);
219 FTRACE(2, "liveType {}: Var -> {}\n", l.pretty(), tname(innerType));
220 } else {
221 FTRACE(2, "liveType {}: {}\n", l.pretty(), tname(outerType));
223 RuntimeType retval = RuntimeType(outerType, innerType);
224 const Class *klass = nullptr;
225 if (specialize) {
226 // Only infer the class/array kind if specialization requested
227 if (valueType == KindOfObject) {
228 klass = valCell->m_data.pobj->getVMClass();
229 if (klass != nullptr) {
230 retval = retval.setKnownClass(klass);
232 } else if (valueType == KindOfArray) {
233 ArrayData::ArrayKind arrayKind = valCell->m_data.parr->kind();
234 retval = retval.setArrayKind(arrayKind);
237 return retval;
240 RuntimeType Translator::outThisObjectType() {
242 * Use the current method's context class (ctx) as a constraint.
243 * For instance methods, if $this is non-null, we are guaranteed
244 * that $this is an instance of ctx or a class derived from
245 * ctx. Zend allows this assumption to be violated but we have
246 * deliberately chosen to diverge from them here.
248 * Note that if analysisDepth() != 0 we'll have !hasThis() here,
249 * because our fake ActRec has no $this, but we'll still return the
250 * correct object type because arGetContextClass() looks at
251 * ar->m_func's class for methods.
253 const Class *ctx = liveFunc()->isMethod() ?
254 arGetContextClass(liveFrame()) : nullptr;
255 if (ctx) {
256 assert(!liveFrame()->hasThis() ||
257 liveFrame()->getThis()->getVMClass()->classof(ctx));
258 TRACE(2, "OutThisObject: derived from Class \"%s\"\n",
259 ctx->name()->data());
260 return RuntimeType(KindOfObject, KindOfNone, ctx);
262 return RuntimeType(KindOfObject, KindOfNone);
265 bool Translator::liveFrameIsPseudoMain() {
266 ActRec* ar = (ActRec*)vmfp();
267 return ar->hasVarEnv() && ar->getVarEnv()->isGlobalScope();
270 static int64_t typeToMask(DataType t) {
271 return (t == KindOfAny) ? 1 : (1 << (1 + getDataTypeIndex(t)));
274 struct InferenceRule {
275 int64_t mask;
276 DataType result;
279 static DataType inferType(const InferenceRule* rules,
280 const vector<DynLocation*>& inputs) {
281 int inputMask = 0;
282 // We generate the inputMask by ORing together the mask for each input's
283 // type.
284 for (unsigned int i = 0; i < inputs.size(); ++i) {
285 DataType inType = inputs[i]->rtt.valueType();
286 inputMask |= typeToMask(inType);
288 // This loop checks each rule in order, looking for the first rule that
289 // applies. Note that we assume there's a "catch-all" at the end.
290 for (unsigned int i = 0; ; ++i) {
291 if (rules[i].mask == 0 || (rules[i].mask & inputMask) != 0) {
292 return rules[i].result;
295 // We return KindOfAny by default if none of the rules applied.
296 return KindOfAny;
300 * Inference rules used for OutArith. These are applied in order
301 * row-by-row.
304 #define TYPE_MASK(name) \
305 static const int64_t name ## Mask = typeToMask(KindOf ## name);
306 TYPE_MASK(Any);
307 TYPE_MASK(Uninit);
308 TYPE_MASK(Null);
309 TYPE_MASK(Boolean);
310 static const int64_t IntMask = typeToMask(KindOfInt64);
311 TYPE_MASK(Double);
312 static const int64_t StringMask = typeToMask(KindOfString) |
313 typeToMask(KindOfStaticString);
314 TYPE_MASK(Array);
315 TYPE_MASK(Object);
317 static const InferenceRule ArithRules[] = {
318 { DoubleMask, KindOfDouble },
319 { ArrayMask, KindOfArray },
320 // If one of the inputs is known to be a String or if one of the input
321 // types is unknown, the output type is Unknown
322 { StringMask | AnyMask, KindOfAny },
323 // Default to Int64
324 { 0, KindOfInt64 },
327 static const int NumArithRules = sizeof(ArithRules) / sizeof(InferenceRule);
330 * Returns the type of the output of a bitwise operator on the two
331 * DynLocs. The only case that doesn't result in KindOfInt64 is String
332 * op String.
334 static const InferenceRule BitOpRules[] = {
335 { UninitMask | NullMask | BooleanMask |
336 IntMask | DoubleMask | ArrayMask | ObjectMask,
337 KindOfInt64 },
338 { StringMask, KindOfString },
339 { 0, KindOfAny },
342 static RuntimeType bitOpType(DynLocation* a, DynLocation* b) {
343 vector<DynLocation*> ins;
344 ins.push_back(a);
345 if (b) ins.push_back(b);
346 return RuntimeType(inferType(BitOpRules, ins));
349 static uint32_t m_w = 1; /* must not be zero */
350 static uint32_t m_z = 1; /* must not be zero */
352 static uint32_t get_random()
354 m_z = 36969 * (m_z & 65535) + (m_z >> 16);
355 m_w = 18000 * (m_w & 65535) + (m_w >> 16);
356 return (m_z << 16) + m_w; /* 32-bit result */
359 static const int kTooPolyPred = 2;
360 static const int kTooPolyRet = 6;
362 bool
363 isNormalPropertyAccess(const NormalizedInstruction& i,
364 int propInput,
365 int objInput) {
366 const LocationCode lcode = i.immVec.locationCode();
367 return
368 i.immVecM.size() == 1 &&
369 (lcode == LC || lcode == LL || lcode == LR || lcode == LH) &&
370 mcodeMaybePropName(i.immVecM[0]) &&
371 i.inputs[propInput]->isString() &&
372 i.inputs[objInput]->valueType() == KindOfObject;
375 bool
376 mInstrHasUnknownOffsets(const NormalizedInstruction& ni, Class* context) {
377 const MInstrInfo& mii = getMInstrInfo(ni.mInstrOp());
378 unsigned mi = 0;
379 unsigned ii = mii.valCount() + 1;
380 for (; mi < ni.immVecM.size(); ++mi) {
381 MemberCode mc = ni.immVecM[mi];
382 if (mcodeMaybePropName(mc)) {
383 const Class* cls = nullptr;
384 if (getPropertyOffset(ni, context, cls, mii, mi, ii).offset == -1) {
385 return true;
387 ++ii;
388 } else {
389 return true;
393 return false;
396 PropInfo getPropertyOffset(const NormalizedInstruction& ni,
397 Class* ctx,
398 const Class*& baseClass,
399 const MInstrInfo& mii,
400 unsigned mInd, unsigned iInd) {
401 if (mInd == 0) {
402 auto const baseIndex = mii.valCount();
403 baseClass = ni.inputs[baseIndex]->rtt.isObject()
404 ? ni.inputs[baseIndex]->rtt.valueClass()
405 : nullptr;
406 } else {
407 baseClass = ni.immVecClasses[mInd - 1];
409 if (!baseClass) return PropInfo();
411 if (!ni.inputs[iInd]->rtt.isString()) {
412 return PropInfo();
414 auto* const name = ni.inputs[iInd]->rtt.valueString();
415 if (!name) return PropInfo();
417 bool accessible;
418 // If we are not in repo-authoriative mode, we need to check that
419 // baseClass cannot change in between requests
420 if (!RuntimeOption::RepoAuthoritative ||
421 !(baseClass->preClass()->attrs() & AttrUnique)) {
422 if (!ctx) return PropInfo();
423 if (!ctx->classof(baseClass)) {
424 if (baseClass->classof(ctx)) {
425 // baseClass can change on us in between requests, but since
426 // ctx is an ancestor of baseClass we can make the weaker
427 // assumption that the object is an instance of ctx
428 baseClass = ctx;
429 } else {
430 // baseClass can change on us in between requests and it is
431 // not related to ctx, so bail out
432 return PropInfo();
436 // Lookup the index of the property based on ctx and baseClass
437 Slot idx = baseClass->getDeclPropIndex(ctx, name, accessible);
438 // If we couldn't find a property that is accessible in the current
439 // context, bail out
440 if (idx == kInvalidSlot || !accessible) {
441 return PropInfo();
443 // If it's a declared property we're good to go: even if a subclass
444 // redefines an accessible property with the same name it's guaranteed
445 // to be at the same offset
446 return PropInfo(
447 baseClass->declPropOffset(idx),
448 baseClass->declPropHphpcType(idx)
452 PropInfo getFinalPropertyOffset(const NormalizedInstruction& ni,
453 Class* context,
454 const MInstrInfo& mii) {
455 unsigned mInd = ni.immVecM.size() - 1;
456 unsigned iInd = mii.valCount() + 1 + mInd;
458 const Class* cls = nullptr;
459 return getPropertyOffset(ni, context, cls, mii, mInd, iInd);
462 static std::pair<DataType,double>
463 predictMVec(const NormalizedInstruction* ni) {
464 auto info = getFinalPropertyOffset(*ni,
465 ni->func()->cls(),
466 getMInstrInfo(ni->mInstrOp()));
467 if (info.offset != -1 && info.hphpcType != KindOfNone) {
468 FTRACE(1, "prediction for CGetM prop: {}, hphpc\n",
469 int(info.hphpcType));
470 return std::make_pair(info.hphpcType, 1.0);
473 auto& immVec = ni->immVec;
474 StringData* name;
475 MemberCode mc;
476 if (immVec.decodeLastMember(ni->m_unit, name, mc)) {
477 auto pred = predictType(TypeProfileKey(mc, name));
478 TRACE(1, "prediction for CGetM %s named %s: %d, %f\n",
479 mc == MET ? "elt" : "prop",
480 name->data(),
481 pred.first,
482 pred.second);
483 return pred;
486 return std::make_pair(KindOfAny, 0.0);
490 * predictOutputs --
492 * Provide a best guess for the output type of this instruction.
494 static DataType
495 predictOutputs(SrcKey startSk,
496 const NormalizedInstruction* ni) {
497 if (!RuntimeOption::EvalJitTypePrediction) return KindOfAny;
499 if (RuntimeOption::EvalJitStressTypePredPercent &&
500 RuntimeOption::EvalJitStressTypePredPercent > int(get_random() % 100)) {
501 int dt;
502 while (true) {
503 dt = get_random() % (KindOfRef + 1);
504 switch (dt) {
505 case KindOfNull:
506 case KindOfBoolean:
507 case KindOfInt64:
508 case KindOfDouble:
509 case KindOfString:
510 case KindOfArray:
511 case KindOfObject:
512 case KindOfResource:
513 break;
514 // KindOfRef and KindOfUninit can't happen for lots of predicted
515 // types.
516 case KindOfRef:
517 case KindOfUninit:
518 default:
519 continue;
521 break;
523 return DataType(dt);
526 if (ni->op() == OpCns ||
527 ni->op() == OpCnsE ||
528 ni->op() == OpCnsU) {
529 StringData* sd = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
530 TypedValue* tv = Unit::lookupCns(sd);
531 if (tv) {
532 return tv->m_type;
536 if (ni->op() == OpMod) {
537 // x % 0 returns boolean false, so we don't know for certain, but it's
538 // probably an int.
539 return KindOfInt64;
542 if (ni->op() == OpSqrt) {
543 // sqrt returns a double, unless you pass something nasty to it.
544 return KindOfDouble;
547 if (ni->op() == OpDiv) {
548 // Integers can produce integers if there's no residue, but $i / $j in
549 // general produces a double. $i / 0 produces boolean false, so we have
550 // actually check the result.
551 auto lhs = ni->inputs[0];
552 auto rhs = ni->inputs[1];
554 if (lhs->valueType() == KindOfDouble || rhs->valueType() == KindOfDouble) {
555 return KindOfDouble;
558 if (rhs->isLiteral()) {
559 if (ni->imm[1].u_I64A == 0) return KindOfBoolean;
560 if (ni->imm[1].u_I64A == 1) return lhs->valueType();
562 if (rhs->isLiteral()) {
563 return ni->imm[0].u_I64A % ni->imm[1].u_I64A ? KindOfDouble
564 : KindOfInt64;
568 return KindOfDouble;
571 if (ni->op() == OpAbs) {
572 if (ni->inputs[0]->valueType() == KindOfDouble) {
573 return KindOfDouble;
576 // some types can't be converted to integers and will return false here
577 if (ni->inputs[0]->valueType() == KindOfArray) {
578 return KindOfBoolean;
581 // If the type is not numeric we need to convert it to a numeric type,
582 // a string can be converted to an Int64 or a Double but most other types
583 // will end up being integral.
584 return KindOfInt64;
587 if (ni->op() == OpClsCnsD) {
588 const NamedEntityPair& cne =
589 ni->unit()->lookupNamedEntityPairId(ni->imm[1].u_SA);
590 StringData* cnsName = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
591 Class* cls = cne.second->getCachedClass();
592 if (cls) {
593 DataType dt = cls->clsCnsType(cnsName);
594 if (dt != KindOfUninit) {
595 TRACE(1, "clscnsd: %s:%s prediction type %d\n",
596 cne.first->data(), cnsName->data(), dt);
597 return dt;
602 if (ni->op() == OpSetM) {
604 * SetM pushes null for certain rare combinations of input types, a string
605 * if the base was a string, or (most commonly) its first stack input. We
606 * mark the output as predicted here and do a very rough approximation of
607 * what really happens; most of the time the prediction will be a noop
608 * since MInstrTranslator side exits in all uncommon cases.
611 auto const inDt = ni->inputs[0]->rtt.valueType();
612 // If the base is a string, the output is probably a string. Unless the
613 // member code is MW, then we're either going to fatal or promote the
614 // string to an array.
615 Type baseType;
616 switch (ni->immVec.locationCode()) {
617 case LGL: case LGC:
618 case LNL: case LNC:
619 case LSL: case LSC:
620 baseType = Type::Gen;
621 break;
623 default:
624 baseType = Type(ni->inputs[1]->rtt);
626 if (baseType.isString() && ni->immVecM.size() == 1) {
627 return ni->immVecM[0] == MW ? inDt : KindOfString;
630 // Otherwise, it's probably the input type.
631 return inDt;
634 auto const op = ni->op();
635 static const double kAccept = 1.0;
636 std::pair<DataType, double> pred = std::make_pair(KindOfAny, 0.0);
637 // Type predictions grow tracelets, and can have a side effect of making
638 // them combinatorially explode if they bring in precondtions that vary a
639 // lot. Get more conservative as evidence mounts that this is a
640 // polymorphic tracelet.
641 if (tx64->numTranslations(startSk) >= kTooPolyPred) return KindOfAny;
642 if (op == OpCGetS) {
643 const StringData* propName = ni->inputs[1]->rtt.valueStringOrNull();
644 if (propName) {
645 pred = predictType(TypeProfileKey(TypeProfileKey::StaticPropName,
646 propName));
647 TRACE(1, "prediction for static fields named %s: %d, %f\n",
648 propName->data(),
649 pred.first,
650 pred.second);
652 } else if (op == OpCGetM) {
653 pred = predictMVec(ni);
655 if (pred.second < kAccept) {
656 if (const StringData* invName = fcallToFuncName(ni)) {
657 pred = predictType(TypeProfileKey(TypeProfileKey::MethodName, invName));
658 TRACE(1, "prediction for methods named %s: %d, %f\n",
659 invName->data(),
660 pred.first,
661 pred.second);
664 if (pred.second >= kAccept) {
665 TRACE(1, "accepting prediction of type %d\n", pred.first);
666 assert(pred.first != KindOfUninit);
667 return pred.first;
669 return KindOfAny;
673 * Returns the type of the value a SetOpL will store into the local.
675 static RuntimeType setOpOutputType(NormalizedInstruction* ni,
676 const vector<DynLocation*>& inputs) {
677 assert(inputs.size() == 2);
678 const int kValIdx = 0;
679 const int kLocIdx = 1;
680 unsigned char op = ni->imm[1].u_OA;
681 DynLocation locLocation(inputs[kLocIdx]->location,
682 inputs[kLocIdx]->rtt.unbox());
683 assert(inputs[kLocIdx]->location.isLocal());
684 switch (op) {
685 case SetOpPlusEqual:
686 case SetOpMinusEqual:
687 case SetOpMulEqual: {
688 // Same as OutArith, except we have to fiddle with inputs a bit.
689 vector<DynLocation*> arithInputs;
690 arithInputs.push_back(&locLocation);
691 arithInputs.push_back(inputs[kValIdx]);
692 return RuntimeType(inferType(ArithRules, arithInputs));
694 case SetOpConcatEqual: return RuntimeType(KindOfString);
695 case SetOpDivEqual:
696 case SetOpModEqual: return RuntimeType(KindOfAny);
697 case SetOpAndEqual:
698 case SetOpOrEqual:
699 case SetOpXorEqual: return bitOpType(&locLocation, inputs[kValIdx]);
700 case SetOpSlEqual:
701 case SetOpSrEqual: return RuntimeType(KindOfInt64);
702 default:
703 not_reached();
707 static RuntimeType
708 getDynLocType(const SrcKey startSk,
709 NormalizedInstruction* ni,
710 InstrFlags::OutTypeConstraints constraint,
711 TransKind mode) {
712 using namespace InstrFlags;
713 auto const& inputs = ni->inputs;
714 assert(constraint != OutFInputL);
716 switch (constraint) {
717 #define CS(OutXLike, KindOfX) \
718 case OutXLike: \
719 return RuntimeType(KindOfX);
720 CS(OutInt64, KindOfInt64);
721 CS(OutBoolean, KindOfBoolean);
722 CS(OutDouble, KindOfDouble);
723 CS(OutString, KindOfString);
724 CS(OutNull, KindOfNull);
725 CS(OutUnknown, KindOfAny); // Subtle interaction with BB-breaking.
726 CS(OutFDesc, KindOfAny); // Unclear if OutFDesc has a purpose.
727 CS(OutArray, KindOfArray);
728 CS(OutObject, KindOfObject);
729 CS(OutResource, KindOfResource);
730 #undef CS
732 case OutCns: {
733 // If it's a system constant, burn in its type. Otherwise we have
734 // to accept prediction; use the translation-time value, or fall back
735 // to the targetcache if none exists.
736 StringData* sd = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
737 assert(sd);
738 const TypedValue* tv = Unit::lookupPersistentCns(sd);
739 if (tv) {
740 return RuntimeType(tv->m_type);
742 } // Fall through
743 case OutPred: {
744 // In TransProfile mode, disable type prediction to avoid side exits.
745 auto dt = mode == TransProfile ? KindOfAny : predictOutputs(startSk, ni);
746 if (dt != KindOfAny) ni->outputPredicted = true;
747 return RuntimeType(dt, dt == KindOfRef ? KindOfAny : KindOfNone);
750 case OutClassRef: {
751 Op op = Op(ni->op());
752 if ((op == OpAGetC && inputs[0]->isString())) {
753 const StringData* sd = inputs[0]->rtt.valueString();
754 if (sd) {
755 Class *klass = Unit::lookupUniqueClass(sd);
756 TRACE(3, "KindOfClass: derived class \"%s\" from string literal\n",
757 klass ? klass->preClass()->name()->data() : "NULL");
758 return RuntimeType(klass);
760 } else if (op == OpSelf) {
761 return RuntimeType(liveClass());
762 } else if (op == OpParent) {
763 Class* clss = liveClass();
764 if (clss != nullptr)
765 return RuntimeType(clss->parent());
767 return RuntimeType(KindOfClass);
770 case OutNullUninit: {
771 assert(ni->op() == OpNullUninit);
772 return RuntimeType(KindOfUninit);
775 case OutStringImm: {
776 assert(ni->op() == OpString);
777 StringData* sd = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
778 assert(sd);
779 return RuntimeType(sd);
782 case OutArrayImm: {
783 assert(ni->op() == OpArray);
784 ArrayData *ad = ni->m_unit->lookupArrayId(ni->imm[0].u_AA);
785 assert(ad);
786 return RuntimeType(ad);
789 case OutBooleanImm: {
790 assert(ni->op() == OpTrue || ni->op() == OpFalse);
791 return RuntimeType(ni->op() == OpTrue);
794 case OutThisObject: {
795 return Translator::outThisObjectType();
798 case OutVUnknown: {
799 return RuntimeType(KindOfRef, KindOfAny);
802 case OutArith: {
803 return RuntimeType(inferType(ArithRules, inputs));
806 case OutSameAsInput: {
808 * Relies closely on the order that inputs are pushed in
809 * getInputs(). (Pushing top of stack first for multi-stack
810 * consumers, stack elements before M-vectors and locals, etc.)
812 assert(inputs.size() >= 1);
813 auto op = ni->op();
814 ASSERT_NOT_IMPLEMENTED(
815 // Sets and binds that take multiple arguments have the rhs
816 // pushed first. In the case of the M-vector versions, the
817 // rhs comes before the M-vector elements.
818 op == OpSetL || op == OpSetN || op == OpSetG || op == OpSetS ||
819 op == OpBindL || op == OpBindG || op == OpBindS || op == OpBindN ||
820 op == OpBindM ||
821 // Dup takes a single element.
822 op == OpDup
825 const int idx = 0; // all currently supported cases.
827 if (debug) {
828 if (!inputs[idx]->rtt.isVagueValue()) {
829 if (op == OpBindG || op == OpBindN || op == OpBindS ||
830 op == OpBindM || op == OpBindL) {
831 assert(inputs[idx]->rtt.isRef() && !inputs[idx]->isLocal());
832 } else {
833 assert(inputs[idx]->rtt.valueType() ==
834 inputs[idx]->rtt.outerType());
838 return inputs[idx]->rtt;
841 case OutCInputL: {
842 assert(inputs.size() >= 1);
843 const DynLocation* in = inputs[inputs.size() - 1];
844 RuntimeType retval;
845 if (in->rtt.outerType() == KindOfUninit) {
846 // Locals can be KindOfUninit, so we need to convert
847 // this to KindOfNull
848 retval = RuntimeType(KindOfNull);
849 } else {
850 retval = in->rtt.unbox();
852 TRACE(2, "Input (%d, %d) -> (%d, %d)\n",
853 in->rtt.outerType(), in->rtt.innerType(),
854 retval.outerType(), retval.innerType());
855 return retval;
858 case OutIncDec: {
859 const RuntimeType &inRtt = ni->inputs[0]->rtt;
860 // TODO: instead of KindOfAny this should track the actual
861 // type we will get from interping a non-int IncDec.
862 return RuntimeType(IS_INT_TYPE(inRtt.valueType()) ?
863 KindOfInt64 : KindOfAny);
866 case OutStrlen: {
867 auto const& rtt = ni->inputs[0]->rtt;
868 return RuntimeType(rtt.isString() ? KindOfInt64 : KindOfAny);
871 case OutCInput: {
872 assert(inputs.size() >= 1);
873 const DynLocation* in = inputs[inputs.size() - 1];
874 if (in->rtt.outerType() == KindOfRef) {
875 return in->rtt.unbox();
877 return in->rtt;
880 case OutBitOp: {
881 assert(inputs.size() == 2 ||
882 (inputs.size() == 1 && ni->op() == OpBitNot));
883 if (inputs.size() == 2) {
884 return bitOpType(inputs[0], inputs[1]);
885 } else {
886 return bitOpType(inputs[0], nullptr);
890 case OutSetOp: {
891 return setOpOutputType(ni, inputs);
894 case OutVInput:
895 case OutVInputL:
896 case OutFInputL:
897 case OutFInputR:
898 case OutAsyncAwait:
899 return RuntimeType(KindOfAny);
901 case OutFPushCufSafe:
902 not_reached();
904 case OutNone: not_reached();
906 always_assert(false && "Invalid output type constraint");
910 * NB: this opcode structure is sparse; it cannot just be indexed by
911 * opcode.
913 using namespace InstrFlags;
914 static const struct {
915 Op op;
916 InstrInfo info;
917 } instrInfoSparse [] = {
919 // Op Inputs Outputs OutputTypes Stack delta
920 // -- ------ ------- ----------- -----------
922 /*** 1. Basic instructions ***/
924 { OpNop, {None, None, OutNone, 0 }},
925 { OpPopA, {Stack1, None, OutNone, -1 }},
926 { OpPopC, {Stack1|
927 DontGuardStack1, None, OutNone, -1 }},
928 { OpPopV, {Stack1|
929 DontGuardStack1|
930 IgnoreInnerType, None, OutNone, -1 }},
931 { OpPopR, {Stack1|
932 DontGuardStack1|
933 IgnoreInnerType, None, OutNone, -1 }},
934 { OpDup, {Stack1, StackTop2, OutSameAsInput, 1 }},
935 { OpBox, {Stack1, Stack1, OutVInput, 0 }},
936 { OpUnbox, {Stack1, Stack1, OutCInput, 0 }},
937 { OpBoxR, {Stack1, Stack1, OutVInput, 0 }},
938 { OpBoxRNop, {None, None, OutNone, 0 }},
939 { OpUnboxR, {Stack1, Stack1, OutCInput, 0 }},
940 { OpUnboxRNop, {None, None, OutNone, 0 }},
942 /*** 2. Literal and constant instructions ***/
944 { OpNull, {None, Stack1, OutNull, 1 }},
945 { OpNullUninit, {None, Stack1, OutNullUninit, 1 }},
946 { OpTrue, {None, Stack1, OutBooleanImm, 1 }},
947 { OpFalse, {None, Stack1, OutBooleanImm, 1 }},
948 { OpInt, {None, Stack1, OutInt64, 1 }},
949 { OpDouble, {None, Stack1, OutDouble, 1 }},
950 { OpString, {None, Stack1, OutStringImm, 1 }},
951 { OpArray, {None, Stack1, OutArrayImm, 1 }},
952 { OpNewArray, {None, Stack1, OutArray, 1 }},
953 { OpNewArrayReserve, {None, Stack1, OutArray, 1 }},
954 { OpNewPackedArray, {StackN, Stack1, OutArray, 0 }},
955 { OpNewStructArray, {StackN, Stack1, OutArray, 0 }},
956 { OpAddElemC, {StackTop3, Stack1, OutArray, -2 }},
957 { OpAddElemV, {StackTop3, Stack1, OutArray, -2 }},
958 { OpAddNewElemC, {StackTop2, Stack1, OutArray, -1 }},
959 { OpAddNewElemV, {StackTop2, Stack1, OutArray, -1 }},
960 { OpNewCol, {None, Stack1, OutObject, 1 }},
961 { OpColAddElemC, {StackTop3, Stack1, OutObject, -2 }},
962 { OpColAddNewElemC, {StackTop2, Stack1, OutObject, -1 }},
963 { OpCns, {None, Stack1, OutCns, 1 }},
964 { OpCnsE, {None, Stack1, OutCns, 1 }},
965 { OpCnsU, {None, Stack1, OutCns, 1 }},
966 { OpClsCns, {Stack1, Stack1, OutUnknown, 0 }},
967 { OpClsCnsD, {None, Stack1, OutPred, 1 }},
968 { OpFile, {None, Stack1, OutString, 1 }},
969 { OpDir, {None, Stack1, OutString, 1 }},
971 /*** 3. Operator instructions ***/
973 /* Binary string */
974 { OpConcat, {StackTop2, Stack1, OutString, -1 }},
975 /* Arithmetic ops */
976 { OpAbs, {Stack1, Stack1, OutPred, 0 }},
977 { OpAdd, {StackTop2, Stack1, OutArith, -1 }},
978 { OpSub, {StackTop2, Stack1, OutArith, -1 }},
979 { OpMul, {StackTop2, Stack1, OutArith, -1 }},
980 /* Div and mod might return boolean false. Sigh. */
981 { OpDiv, {StackTop2, Stack1, OutPred, -1 }},
982 { OpMod, {StackTop2, Stack1, OutPred, -1 }},
983 { OpSqrt, {Stack1, Stack1, OutPred, 0 }},
984 /* Logical ops */
985 { OpXor, {StackTop2, Stack1, OutBoolean, -1 }},
986 { OpNot, {Stack1, Stack1, OutBoolean, 0 }},
987 { OpSame, {StackTop2, Stack1, OutBoolean, -1 }},
988 { OpNSame, {StackTop2, Stack1, OutBoolean, -1 }},
989 { OpEq, {StackTop2, Stack1, OutBoolean, -1 }},
990 { OpNeq, {StackTop2, Stack1, OutBoolean, -1 }},
991 { OpLt, {StackTop2, Stack1, OutBoolean, -1 }},
992 { OpLte, {StackTop2, Stack1, OutBoolean, -1 }},
993 { OpGt, {StackTop2, Stack1, OutBoolean, -1 }},
994 { OpGte, {StackTop2, Stack1, OutBoolean, -1 }},
995 /* Bitwise ops */
996 { OpBitAnd, {StackTop2, Stack1, OutBitOp, -1 }},
997 { OpBitOr, {StackTop2, Stack1, OutBitOp, -1 }},
998 { OpBitXor, {StackTop2, Stack1, OutBitOp, -1 }},
999 { OpBitNot, {Stack1, Stack1, OutBitOp, 0 }},
1000 { OpShl, {StackTop2, Stack1, OutInt64, -1 }},
1001 { OpShr, {StackTop2, Stack1, OutInt64, -1 }},
1002 /* Cast instructions */
1003 { OpCastBool, {Stack1, Stack1, OutBoolean, 0 }},
1004 { OpCastInt, {Stack1, Stack1, OutInt64, 0 }},
1005 { OpCastDouble, {Stack1, Stack1, OutDouble, 0 }},
1006 { OpCastString, {Stack1, Stack1, OutString, 0 }},
1007 { OpCastArray, {Stack1, Stack1, OutArray, 0 }},
1008 { OpCastObject, {Stack1, Stack1, OutObject, 0 }},
1009 { OpInstanceOf, {StackTop2, Stack1, OutBoolean, -1 }},
1010 { OpInstanceOfD, {Stack1, Stack1, OutBoolean, 0 }},
1011 { OpPrint, {Stack1, Stack1, OutInt64, 0 }},
1012 { OpClone, {Stack1, Stack1, OutObject, 0 }},
1013 { OpExit, {Stack1, None, OutNone, -1 }},
1014 { OpFatal, {Stack1, None, OutNone, -1 }},
1016 /*** 4. Control flow instructions ***/
1018 { OpJmp, {None, None, OutNone, 0 }},
1019 { OpJmpZ, {Stack1, None, OutNone, -1 }},
1020 { OpJmpNZ, {Stack1, None, OutNone, -1 }},
1021 { OpSwitch, {Stack1, None, OutNone, -1 }},
1022 { OpSSwitch, {Stack1, None, OutNone, -1 }},
1024 * RetC and RetV are special. Their manipulation of the runtime stack are
1025 * outside the boundaries of the tracelet abstraction; since they always end
1026 * a basic block, they behave more like "glue" between BBs than the
1027 * instructions in the body of a BB.
1029 * RetC and RetV consume a value from the stack, and this value's type needs
1030 * to be known at compile-time.
1032 { OpRetC, {AllLocals, None, OutNone, 0 }},
1033 { OpRetV, {AllLocals, None, OutNone, 0 }},
1034 { OpThrow, {Stack1, None, OutNone, -1 }},
1035 { OpUnwind, {None, None, OutNone, 0 }},
1037 /*** 5. Get instructions ***/
1039 { OpCGetL, {Local, Stack1, OutCInputL, 1 }},
1040 { OpCGetL2, {Stack1|Local, StackIns1, OutCInputL, 1 }},
1041 { OpCGetL3, {StackTop2|Local, StackIns2, OutCInputL, 1 }},
1042 { OpPushL, {Local, Stack1|Local, OutCInputL, 1 }},
1043 { OpCGetN, {Stack1, Stack1, OutUnknown, 0 }},
1044 { OpCGetG, {Stack1, Stack1, OutUnknown, 0 }},
1045 { OpCGetS, {StackTop2, Stack1, OutPred, -1 }},
1046 { OpCGetM, {MVector, Stack1, OutPred, 1 }},
1047 { OpVGetL, {Local, Stack1|Local, OutVInputL, 1 }},
1048 { OpVGetN, {Stack1, Stack1|Local, OutVUnknown, 0 }},
1049 // TODO: In pseudo-main, the VGetG instruction invalidates what we know
1050 // about the types of the locals because it could cause any one of the
1051 // local variables to become "boxed". We need to add logic to tracelet
1052 // analysis to deal with this properly.
1053 { OpVGetG, {Stack1, Stack1, OutVUnknown, 0 }},
1054 { OpVGetS, {StackTop2, Stack1, OutVUnknown, -1 }},
1055 { OpVGetM, {MVector, Stack1|Local, OutVUnknown, 1 }},
1056 { OpAGetC, {Stack1, Stack1, OutClassRef, 0 }},
1057 { OpAGetL, {Local, Stack1, OutClassRef, 1 }},
1059 /*** 6. Isset, Empty, and type querying instructions ***/
1061 { OpAKExists, {StackTop2, Stack1, OutBoolean, -1 }},
1062 { OpIssetL, {Local, Stack1, OutBoolean, 1 }},
1063 { OpIssetN, {Stack1, Stack1, OutBoolean, 0 }},
1064 { OpIssetG, {Stack1, Stack1, OutBoolean, 0 }},
1065 { OpIssetS, {StackTop2, Stack1, OutBoolean, -1 }},
1066 { OpIssetM, {MVector, Stack1, OutBoolean, 1 }},
1067 { OpEmptyL, {Local, Stack1, OutBoolean, 1 }},
1068 { OpEmptyN, {Stack1, Stack1, OutBoolean, 0 }},
1069 { OpEmptyG, {Stack1, Stack1, OutBoolean, 0 }},
1070 { OpEmptyS, {StackTop2, Stack1, OutBoolean, -1 }},
1071 { OpEmptyM, {MVector, Stack1, OutBoolean, 1 }},
1072 { OpIsTypeC, {Stack1, Stack1, OutBoolean, 0 }},
1073 { OpIsTypeL, {Local, Stack1, OutBoolean, 1 }},
1075 /*** 7. Mutator instructions ***/
1077 { OpSetL, {Stack1|Local, Stack1|Local, OutSameAsInput, 0 }},
1078 { OpSetN, {StackTop2, Stack1|Local, OutSameAsInput, -1 }},
1079 { OpSetG, {StackTop2, Stack1, OutSameAsInput, -1 }},
1080 { OpSetS, {StackTop3, Stack1, OutSameAsInput, -2 }},
1081 { OpSetM, {MVector|Stack1, Stack1|Local, OutPred, 0 }},
1082 { OpSetWithRefLM,{MVector|Local , Local, OutNone, 0 }},
1083 { OpSetWithRefRM,{MVector|Stack1, Local, OutNone, -1 }},
1084 { OpSetOpL, {Stack1|Local, Stack1|Local, OutSetOp, 0 }},
1085 { OpSetOpN, {StackTop2, Stack1|Local, OutUnknown, -1 }},
1086 { OpSetOpG, {StackTop2, Stack1, OutUnknown, -1 }},
1087 { OpSetOpS, {StackTop3, Stack1, OutUnknown, -2 }},
1088 { OpSetOpM, {MVector|Stack1, Stack1|Local, OutUnknown, 0 }},
1089 { OpIncDecL, {Local, Stack1|Local, OutIncDec, 1 }},
1090 { OpIncDecN, {Stack1, Stack1|Local, OutUnknown, 0 }},
1091 { OpIncDecG, {Stack1, Stack1, OutUnknown, 0 }},
1092 { OpIncDecS, {StackTop2, Stack1, OutUnknown, -1 }},
1093 { OpIncDecM, {MVector, Stack1, OutUnknown, 1 }},
1094 { OpBindL, {Stack1|Local|
1095 IgnoreInnerType, Stack1|Local, OutSameAsInput, 0 }},
1096 { OpBindN, {StackTop2, Stack1|Local, OutSameAsInput, -1 }},
1097 { OpBindG, {StackTop2, Stack1, OutSameAsInput, -1 }},
1098 { OpBindS, {StackTop3, Stack1, OutSameAsInput, -2 }},
1099 { OpBindM, {MVector|Stack1, Stack1|Local, OutSameAsInput, 0 }},
1100 { OpUnsetL, {Local, Local, OutNone, 0 }},
1101 { OpUnsetN, {Stack1, Local, OutNone, -1 }},
1102 { OpUnsetG, {Stack1, None, OutNone, -1 }},
1103 { OpUnsetM, {MVector, Local, OutNone, 0 }},
1105 /*** 8. Call instructions ***/
1107 { OpFPushFunc, {Stack1, FStack, OutFDesc,
1108 kNumActRecCells - 1 }},
1109 { OpFPushFuncD, {None, FStack, OutFDesc,
1110 kNumActRecCells }},
1111 { OpFPushFuncU, {None, FStack, OutFDesc,
1112 kNumActRecCells }},
1113 { OpFPushObjMethod,
1114 {StackTop2, FStack, OutFDesc,
1115 kNumActRecCells - 2 }},
1116 { OpFPushObjMethodD,
1117 {Stack1, FStack, OutFDesc,
1118 kNumActRecCells - 1 }},
1119 { OpFPushClsMethod,
1120 {StackTop2, FStack, OutFDesc,
1121 kNumActRecCells - 2 }},
1122 { OpFPushClsMethodF,
1123 {StackTop2, FStack, OutFDesc,
1124 kNumActRecCells - 2 }},
1125 { OpFPushClsMethodD,
1126 {None, FStack, OutFDesc,
1127 kNumActRecCells }},
1128 { OpFPushCtor, {Stack1, Stack1|FStack,OutObject,
1129 kNumActRecCells }},
1130 { OpFPushCtorD, {None, Stack1|FStack,OutObject,
1131 kNumActRecCells + 1 }},
1132 { OpFPushCufIter,{None, FStack, OutFDesc,
1133 kNumActRecCells }},
1134 { OpFPushCuf, {Stack1, FStack, OutFDesc,
1135 kNumActRecCells - 1 }},
1136 { OpFPushCufF, {Stack1, FStack, OutFDesc,
1137 kNumActRecCells - 1 }},
1138 { OpFPushCufSafe,{StackTop2|DontGuardAny,
1139 StackTop2|FStack, OutFPushCufSafe,
1140 kNumActRecCells }},
1141 { OpFPassC, {FuncdRef, None, OutSameAsInput, 0 }},
1142 { OpFPassCW, {FuncdRef, None, OutSameAsInput, 0 }},
1143 { OpFPassCE, {FuncdRef, None, OutSameAsInput, 0 }},
1144 { OpFPassVNop, {None, None, OutNone, 0 }},
1145 { OpFPassV, {Stack1|FuncdRef, Stack1, OutUnknown, 0 }},
1146 { OpFPassR, {Stack1|FuncdRef, Stack1, OutFInputR, 0 }},
1147 { OpFPassL, {Local|FuncdRef, Stack1, OutFInputL, 1 }},
1148 { OpFPassN, {Stack1|FuncdRef, Stack1, OutUnknown, 0 }},
1149 { OpFPassG, {Stack1|FuncdRef, Stack1, OutFInputR, 0 }},
1150 { OpFPassS, {StackTop2|FuncdRef,
1151 Stack1, OutUnknown, -1 }},
1152 { OpFPassM, {MVector|FuncdRef, Stack1|Local, OutUnknown, 1 }},
1154 * FCall is special. Like the Ret* instructions, its manipulation of the
1155 * runtime stack are outside the boundaries of the tracelet abstraction.
1157 { OpFCall, {FStack, Stack1, OutPred, 0 }},
1158 { OpFCallArray, {FStack, Stack1, OutPred,
1159 -(int)kNumActRecCells }},
1160 // TODO: output type is known
1161 { OpFCallBuiltin,{BStackN, Stack1, OutPred, 0 }},
1162 { OpCufSafeArray,{StackTop3|DontGuardAny,
1163 Stack1, OutArray, -2 }},
1164 { OpCufSafeReturn,{StackTop3|DontGuardAny,
1165 Stack1, OutUnknown, -2 }},
1166 { OpDecodeCufIter,{Stack1, None, OutNone, -1 }},
1168 /*** 11. Iterator instructions ***/
1170 { OpIterInit, {Stack1, Local, OutUnknown, -1 }},
1171 { OpMIterInit, {Stack1, Local, OutUnknown, -1 }},
1172 { OpWIterInit, {Stack1, Local, OutUnknown, -1 }},
1173 { OpIterInitK, {Stack1, Local, OutUnknown, -1 }},
1174 { OpMIterInitK, {Stack1, Local, OutUnknown, -1 }},
1175 { OpWIterInitK, {Stack1, Local, OutUnknown, -1 }},
1176 { OpIterNext, {None, Local, OutUnknown, 0 }},
1177 { OpMIterNext, {None, Local, OutUnknown, 0 }},
1178 { OpWIterNext, {None, Local, OutUnknown, 0 }},
1179 { OpIterNextK, {None, Local, OutUnknown, 0 }},
1180 { OpMIterNextK, {None, Local, OutUnknown, 0 }},
1181 { OpWIterNextK, {None, Local, OutUnknown, 0 }},
1182 { OpIterFree, {None, None, OutNone, 0 }},
1183 { OpMIterFree, {None, None, OutNone, 0 }},
1184 { OpCIterFree, {None, None, OutNone, 0 }},
1185 { OpIterBreak, {None, None, OutNone, 0 }},
1187 /*** 12. Include, eval, and define instructions ***/
1189 { OpIncl, {Stack1, Stack1, OutUnknown, 0 }},
1190 { OpInclOnce, {Stack1, Stack1, OutUnknown, 0 }},
1191 { OpReq, {Stack1, Stack1, OutUnknown, 0 }},
1192 { OpReqOnce, {Stack1, Stack1, OutUnknown, 0 }},
1193 { OpReqDoc, {Stack1, Stack1, OutUnknown, 0 }},
1194 { OpEval, {Stack1, Stack1, OutUnknown, 0 }},
1195 { OpDefFunc, {None, None, OutNone, 0 }},
1196 { OpDefTypeAlias,{None, None, OutNone, 0 }},
1197 { OpDefCls, {None, None, OutNone, 0 }},
1198 { OpNopDefCls, {None, None, OutNone, 0 }},
1199 { OpDefCns, {Stack1, Stack1, OutBoolean, 0 }},
1201 /*** 13. Miscellaneous instructions ***/
1203 { OpThis, {None, Stack1, OutThisObject, 1 }},
1204 { OpBareThis, {None, Stack1, OutUnknown, 1 }},
1205 { OpCheckThis, {This, None, OutNone, 0 }},
1206 { OpInitThisLoc,
1207 {None, Local, OutUnknown, 0 }},
1208 { OpStaticLoc,
1209 {None, Stack1, OutBoolean, 1 }},
1210 { OpStaticLocInit,
1211 {Stack1, Local, OutVUnknown, -1 }},
1212 { OpCatch, {None, Stack1, OutObject, 1 }},
1213 { OpVerifyParamType,
1214 {Local, None, OutNone, 0 }},
1215 { OpClassExists, {StackTop2, Stack1, OutBoolean, -1 }},
1216 { OpInterfaceExists,
1217 {StackTop2, Stack1, OutBoolean, -1 }},
1218 { OpTraitExists, {StackTop2, Stack1, OutBoolean, -1 }},
1219 { OpSelf, {None, Stack1, OutClassRef, 1 }},
1220 { OpParent, {None, Stack1, OutClassRef, 1 }},
1221 { OpLateBoundCls,{None, Stack1, OutClassRef, 1 }},
1222 { OpNativeImpl, {None, None, OutNone, 0 }},
1223 { OpCreateCl, {BStackN, Stack1, OutObject, 1 }},
1224 { OpStrlen, {Stack1, Stack1, OutStrlen, 0 }},
1225 { OpIncStat, {None, None, OutNone, 0 }},
1226 { OpIdx, {StackTop3, Stack1, OutUnknown, -2 }},
1227 { OpArrayIdx, {StackTop3, Stack1, OutUnknown, -2 }},
1228 { OpFloor, {Stack1, Stack1, OutDouble, 0 }},
1229 { OpCeil, {Stack1, Stack1, OutDouble, 0 }},
1230 { OpAssertTL, {None, None, OutNone, 0 }},
1231 { OpAssertTStk, {None, None, OutNone, 0 }},
1232 { OpAssertObjL, {None, None, OutNone, 0 }},
1233 { OpAssertObjStk,{None, None, OutNone, 0 }},
1234 { OpPredictTL, {None, None, OutNone, 0 }},
1235 { OpPredictTStk, {None, None, OutNone, 0 }},
1236 { OpBreakTraceHint,{None, None, OutNone, 0 }},
1238 /*** 14. Continuation instructions ***/
1240 { OpCreateCont, {None, Stack1|Local, OutObject, 1 }},
1241 { OpContEnter, {Stack1, None, OutNone, -1 }},
1242 { OpUnpackCont, {None, StackTop2, OutInt64, 2 }},
1243 { OpContSuspend, {Stack1, None, OutNone, -1 }},
1244 { OpContSuspendK,{StackTop2, None, OutNone, -2 }},
1245 { OpContRetC, {Stack1, None, OutNone, -1 }},
1246 { OpContCheck, {None, None, OutNone, 0 }},
1247 { OpContRaise, {None, None, OutNone, 0 }},
1248 { OpContValid, {None, Stack1, OutBoolean, 1 }},
1249 { OpContKey, {None, Stack1, OutUnknown, 1 }},
1250 { OpContCurrent, {None, Stack1, OutUnknown, 1 }},
1251 { OpContStopped, {None, None, OutNone, 0 }},
1252 { OpContHandle, {Stack1, None, OutNone, -1 }},
1254 /*** 15. Async functions instructions ***/
1256 { OpAsyncAwait, {Stack1, StackTop2, OutAsyncAwait, 1 }},
1257 { OpAsyncESuspend,
1258 {Stack1, Stack1|Local, OutObject, 0 }},
1259 { OpAsyncWrapResult,
1260 {Stack1, Stack1, OutObject, 0 }},
1261 { OpAsyncWrapException,
1262 {Stack1, Stack1, OutObject, 0 }},
1265 static hphp_hash_map<Op, InstrInfo> instrInfo;
1266 static bool instrInfoInited;
1267 static void initInstrInfo() {
1268 if (!instrInfoInited) {
1269 for (size_t i = 0; i < sizeof(instrInfoSparse) / sizeof(instrInfoSparse[0]);
1270 i++) {
1271 instrInfo[instrInfoSparse[i].op] = instrInfoSparse[i].info;
1274 instrInfoInited = true;
1278 const InstrInfo& getInstrInfo(Op op) {
1279 assert(instrInfoInited);
1280 return instrInfo[op];
1283 static int numHiddenStackInputs(const NormalizedInstruction& ni) {
1284 assert(ni.immVec.isValid());
1285 return ni.immVec.numStackValues();
1288 namespace {
1289 int64_t countOperands(uint64_t mask) {
1290 const uint64_t ignore = FuncdRef | Local | Iter | AllLocals |
1291 DontGuardStack1 | IgnoreInnerType | DontGuardAny | This;
1292 mask &= ~ignore;
1294 static const uint64_t counts[][2] = {
1295 {Stack3, 1},
1296 {Stack2, 1},
1297 {Stack1, 1},
1298 {StackIns1, 2},
1299 {StackIns2, 3},
1300 {FStack, kNumActRecCells},
1303 int64_t count = 0;
1304 for (auto const& pair : counts) {
1305 if (mask & pair[0]) {
1306 count += pair[1];
1307 mask &= ~pair[0];
1310 assert(mask == 0);
1311 return count;
1315 int64_t getStackPopped(PC pc) {
1316 auto op = toOp(*pc);
1317 switch (op) {
1318 case OpFCall: return getImm((Op*)pc, 0).u_IVA + kNumActRecCells;
1319 case OpFCallArray: return kNumActRecCells + 1;
1321 case OpFCallBuiltin:
1322 case OpNewPackedArray:
1323 case OpCreateCl: return getImm((Op*)pc, 0).u_IVA;
1325 case OpNewStructArray: return getImmVector((Op*)pc).size();
1327 default: break;
1330 uint64_t mask = getInstrInfo(op).in;
1331 int64_t count = 0;
1333 // All instructions with these properties are handled above
1334 assert((mask & (StackN | BStackN)) == 0);
1336 if (mask & MVector) {
1337 count += getImmVector((Op*)pc).numStackValues();
1338 mask &= ~MVector;
1341 return count + countOperands(mask);
1344 int64_t getStackPushed(PC pc) {
1345 return countOperands(getInstrInfo(toOp(*pc)).out);
1348 int getStackDelta(const NormalizedInstruction& ni) {
1349 int hiddenStackInputs = 0;
1350 initInstrInfo();
1351 auto op = ni.op();
1352 switch (op) {
1353 case OpFCall: {
1354 int numArgs = ni.imm[0].u_IVA;
1355 return 1 - numArgs - kNumActRecCells;
1358 case OpFCallBuiltin:
1359 case OpNewPackedArray:
1360 case OpCreateCl:
1361 return 1 - ni.imm[0].u_IVA;
1363 case OpNewStructArray:
1364 return 1 - ni.immVec.numStackValues();
1366 default:
1367 break;
1369 const InstrInfo& info = instrInfo[op];
1370 if (info.in & MVector) {
1371 hiddenStackInputs = numHiddenStackInputs(ni);
1372 SKTRACE(2, ni.source, "Has %d hidden stack inputs\n", hiddenStackInputs);
1374 int delta = instrInfo[op].numPushed - hiddenStackInputs;
1375 return delta;
1378 static NormalizedInstruction* findInputSrc(NormalizedInstruction* ni,
1379 DynLocation* dl) {
1380 while (ni != nullptr) {
1381 if (ni->outStack == dl ||
1382 ni->outLocal == dl ||
1383 ni->outLocal2 == dl ||
1384 ni->outStack2 == dl ||
1385 ni->outStack3 == dl) {
1386 break;
1388 ni = ni->prev;
1390 return ni;
1393 bool outputIsPredicted(SrcKey startSk,
1394 NormalizedInstruction& inst) {
1395 auto const& iInfo = getInstrInfo(inst.op());
1396 auto doPrediction =
1397 (iInfo.type == OutPred || iInfo.type == OutCns) && !inst.breaksTracelet;
1398 if (doPrediction) {
1399 // All OutPred ops except for SetM have a single stack output for now.
1400 assert(iInfo.out == Stack1 || inst.op() == OpSetM);
1401 auto dt = predictOutputs(startSk, &inst);
1402 if (dt != KindOfAny) {
1403 inst.outPred = Type(dt, dt == KindOfRef ? KindOfAny : KindOfNone);
1404 } else {
1405 doPrediction = false;
1409 return doPrediction;
1413 * For MetaData information that affects whether we want to even put a
1414 * value in the ni->inputs, we need to look at it before we call
1415 * getInputs(), so this is separate from applyInputMetaData.
1417 * We also check GuardedThis here, since RetC is short-circuited in
1418 * applyInputMetaData.
1420 void preInputApplyMetaData(Unit::MetaHandle metaHand,
1421 NormalizedInstruction* ni) {
1422 if (!metaHand.findMeta(ni->unit(), ni->offset())) return;
1424 Unit::MetaInfo info;
1425 while (metaHand.nextArg(info)) {
1426 switch (info.m_kind) {
1427 case Unit::MetaInfo::Kind::NonRefCounted:
1428 ni->nonRefCountedLocals.resize(ni->func()->numLocals());
1429 ni->nonRefCountedLocals[info.m_data] = 1;
1430 break;
1431 case Unit::MetaInfo::Kind::GuardedThis:
1432 ni->guardedThis = true;
1433 break;
1434 default:
1435 break;
1440 static bool isTypeAssert(Op op) {
1441 return op == Op::AssertTL || op == Op::AssertTStk ||
1442 op == Op::AssertObjL || op == Op::AssertObjStk;
1445 static bool isTypePredict(Op op) {
1446 return op == Op::PredictTL || op == Op::PredictTStk;
1449 static bool isAlwaysNop(Op op) {
1450 if (isTypeAssert(op) || isTypePredict(op)) return true;
1451 switch (op) {
1452 case Op::UnboxRNop:
1453 case Op::BoxRNop:
1454 case Op::FPassVNop:
1455 case Op::FPassC:
1456 return true;
1457 default:
1458 return false;
1462 void Translator::handleAssertionEffects(Tracelet& t,
1463 const NormalizedInstruction& ni,
1464 TraceletContext& tas,
1465 int currentStackOffset) {
1466 assert(isTypeAssert(ni.op()) || isTypePredict(ni.op()));
1468 auto const loc = [&] {
1469 switch (ni.op()) {
1470 case Op::AssertTL:
1471 case Op::AssertObjL:
1472 case Op::PredictTL:
1473 return Location(Location::Local, ni.imm[0].u_LA);
1474 case Op::AssertTStk:
1475 case Op::AssertObjStk:
1476 case Op::PredictTStk:
1477 return Location(Location::Stack,
1478 currentStackOffset - 1 - ni.imm[0].u_IVA);
1479 default:
1480 not_reached();
1482 }();
1483 if (loc.isInvalid()) return;
1485 auto const rt = [&]() -> folly::Optional<RuntimeType> {
1486 if (ni.op() == Op::AssertObjStk || ni.op() == Op::AssertObjL) {
1488 * Even though the class must be defined at the point of the
1489 * AssertObj, we might not have defined it yet in this tracelet,
1490 * or it might not be unique. For now just restrict this to
1491 * unique classes (we could also check parent of current
1492 * context).
1494 * There's nothing we can do with the 'exact' bit right now.
1496 auto const cls = Unit::lookupUniqueClass(
1497 ni.m_unit->lookupLitstrId(ni.imm[2].u_SA)
1499 if (cls && (cls->attrs() & AttrUnique)) {
1500 return RuntimeType{KindOfObject, KindOfNone, cls};
1502 return folly::none;
1505 switch (static_cast<AssertTOp>(ni.imm[1].u_OA)) {
1506 case AssertTOp::Uninit: return RuntimeType{KindOfUninit};
1507 case AssertTOp::InitNull: return RuntimeType{KindOfNull};
1508 case AssertTOp::Int: return RuntimeType{KindOfInt64};
1509 case AssertTOp::Dbl: return RuntimeType{KindOfDouble};
1510 case AssertTOp::Res: return RuntimeType{KindOfResource};
1511 case AssertTOp::Null: return folly::none;
1512 case AssertTOp::Bool: return RuntimeType{KindOfBoolean};
1513 case AssertTOp::SStr: return RuntimeType{KindOfString};
1514 case AssertTOp::Str: return RuntimeType{KindOfString};
1515 case AssertTOp::SArr: return RuntimeType{KindOfArray};
1516 case AssertTOp::Arr: return RuntimeType{KindOfArray};
1517 case AssertTOp::Obj: return RuntimeType{KindOfObject};
1519 // We can turn these into information in hhbc-translator but can't
1520 // really remove guards, since it can be more than one DataType,
1521 // so don't do anything here.
1522 case AssertTOp::OptInt:
1523 case AssertTOp::OptDbl:
1524 case AssertTOp::OptRes:
1525 case AssertTOp::OptBool:
1526 case AssertTOp::OptSStr:
1527 case AssertTOp::OptStr:
1528 case AssertTOp::OptSArr:
1529 case AssertTOp::OptArr:
1530 case AssertTOp::OptObj:
1531 return folly::none;
1533 case AssertTOp::Ref:
1534 // We should be able to use this to avoid the outer-type guards
1535 // on KindOfRefs, but for now we don't because of complications
1536 // with communicating the predicted inner type to
1537 // hhbc-translator.
1538 return folly::none;
1540 // There's really not much we can do with a Cell assertion at
1541 // translation time, right now.
1542 case AssertTOp::Cell:
1543 return folly::none;
1545 // Since these don't correspond to data types, there's not much we
1546 // can do in the current situation.
1547 case AssertTOp::InitUnc:
1548 case AssertTOp::Unc:
1549 case AssertTOp::InitCell:
1550 // These could also remove guards, but it's a little too hard to
1551 // get this information to hhbc-translator with this legacy
1552 // tracelet stuff since they don't map directly to a DataType.
1553 return folly::none;
1555 not_reached();
1556 }();
1557 if (!rt) return;
1559 auto const dl = t.newDynLocation(loc, *rt);
1561 // No need for m_resolvedDeps---because we're in the bytecode stream
1562 // we don't need to tell hhbc-translator about it out of band.
1563 auto& curVal = tas.m_currentMap[dl->location];
1564 if (curVal && !curVal->rtt.isVagueValue()) {
1565 if (curVal->rtt.outerType() != dl->rtt.outerType()) {
1567 * The tracked type disagrees with ahead of time analysis. A
1568 * similar case occurs in applyInputMetaData.
1570 * Either static analysis is wrong, this was a mispredicted type
1571 * from warmup profiling, or the code is unreachable because we're
1572 * about to fatal (e.g. a VerifyParamType is about to throw).
1574 * Punt this opcode to end the trace.
1576 FTRACE(1, "punting for {}\n", loc.pretty());
1577 punt();
1580 auto const isSpecializedObj =
1581 rt->outerType() == KindOfObject && rt->valueClass();
1582 if (!isSpecializedObj || curVal->rtt.valueClass()) {
1583 // Otherwise, we may have more information in the curVal
1584 // RuntimeType than would come from the AssertT if we were
1585 // tracking a literal value or something.
1586 FTRACE(1, "assertion leaving curVal alone {}\n", curVal->pretty());
1587 return;
1590 FTRACE(1, "assertion effects {} -> {}\n",
1591 curVal ? curVal->pretty() : std::string{},
1592 dl->pretty());
1593 curVal = dl;
1596 bool Translator::applyInputMetaData(Unit::MetaHandle& metaHand,
1597 NormalizedInstruction* ni,
1598 TraceletContext& tas,
1599 InputInfos &inputInfos) {
1600 if (isAlwaysNop(ni->op())) {
1601 ni->noOp = true;
1602 return true;
1605 if (!metaHand.findMeta(ni->unit(), ni->offset())) return false;
1607 Unit::MetaInfo info;
1608 if (!metaHand.nextArg(info)) return false;
1611 * We need to adjust the indexes in MetaInfo::m_arg if this
1612 * instruction takes other stack arguments than those related to the
1613 * MVector. (For example, the rhs of an assignment.)
1615 const InstrInfo& iInfo = instrInfo[ni->op()];
1616 if (iInfo.in & AllLocals) {
1618 * RetC/RetV dont care about their stack input, but it may have
1619 * been annotated. Skip it (because RetC/RetV pretend they dont
1620 * have a stack input).
1622 return false;
1624 if (iInfo.in == FuncdRef) {
1626 * FPassC* pretend to have no inputs
1628 return false;
1630 const int base = !(iInfo.in & MVector) ? 0 :
1631 !(iInfo.in & Stack1) ? 0 :
1632 !(iInfo.in & Stack2) ? 1 :
1633 !(iInfo.in & Stack3) ? 2 : 3;
1635 do {
1636 SKTRACE(3, ni->source, "considering MetaInfo of kind %d\n", info.m_kind);
1638 int arg = info.m_arg & Unit::MetaInfo::VectorArg ?
1639 base + (info.m_arg & ~Unit::MetaInfo::VectorArg) : info.m_arg;
1641 switch (info.m_kind) {
1642 case Unit::MetaInfo::Kind::NoSurprise:
1643 ni->noSurprise = true;
1644 break;
1645 case Unit::MetaInfo::Kind::GuardedCls:
1646 ni->guardedCls = true;
1647 break;
1648 case Unit::MetaInfo::Kind::DataTypePredicted: {
1649 // In TransProfile mode, disable type predictions to avoid side exits.
1650 if (m_mode == TransProfile) break;
1652 // If the original type was invalid or predicted, then use the
1653 // prediction in the meta-data.
1654 assert((unsigned) arg < inputInfos.size());
1656 SKTRACE(1, ni->source, "MetaInfo DataTypePredicted for input %d; "
1657 "newType = %d\n", arg, DataType(info.m_data));
1658 InputInfo& ii = inputInfos[arg];
1659 DynLocation* dl = tas.recordRead(ii, false, KindOfAny);
1660 NormalizedInstruction* src = findInputSrc(tas.m_t->m_instrStream.last,
1661 dl);
1662 if (src) {
1663 // Update the rtt and mark src's output as predicted if either:
1664 // a) we don't have type information yet (ie, it's KindOfAny), or
1665 // b) src's output was predicted. This is assuming that the
1666 // front-end's prediction is more accurate.
1667 if (dl->rtt.outerType() == KindOfAny || src->outputPredicted) {
1668 SKTRACE(1, ni->source, "MetaInfo DataTypePredicted for input %d; "
1669 "replacing oldType = %d with newType = %d\n", arg,
1670 dl->rtt.outerType(), DataType(info.m_data));
1671 dl->rtt = RuntimeType((DataType)info.m_data);
1672 src->outputPredicted = true;
1673 src->outputPredictionStatic = true;
1676 break;
1678 case Unit::MetaInfo::Kind::DataTypeInferred: {
1679 assert((unsigned)arg < inputInfos.size());
1680 SKTRACE(1, ni->source, "MetaInfo DataTypeInferred for input %d; "
1681 "newType = %d\n", arg, DataType(info.m_data));
1682 InputInfo& ii = inputInfos[arg];
1683 ii.dontGuard = true;
1684 DynLocation* dl = tas.recordRead(ii, true, (DataType)info.m_data);
1685 if (dl->rtt.outerType() != info.m_data &&
1686 (!dl->isString() || info.m_data != KindOfString)) {
1687 if (dl->rtt.outerType() != KindOfAny) {
1688 // Either static analysis is wrong, or
1689 // this was mis-predicted by the type
1690 // profiler, or this code is unreachable,
1691 // and there's an earlier bytecode in the tracelet
1692 // thats going to fatal
1693 NormalizedInstruction *src = nullptr;
1694 if (mapContains(tas.m_changeSet, dl->location)) {
1695 src = findInputSrc(tas.m_t->m_instrStream.last, dl);
1696 if (src && src->outputPredicted) {
1697 src->outputPredicted = false;
1698 } else {
1699 src = nullptr;
1702 if (!src) {
1703 // Not a type-profiler mis-predict
1704 if (tas.m_t->m_instrStream.first) {
1705 // We're not the first instruction, so punt
1706 // If this bytecode /is/ reachable, we'll
1707 // get here again, and that time, we will
1708 // be the first instruction
1709 punt();
1711 not_reached();
1714 dl->rtt = RuntimeType((DataType)info.m_data);
1715 ni->markInputInferred(arg);
1716 } else {
1718 * Static inference confirmed the expected type
1719 * but if the expected type was provided by the type
1720 * profiler we want to clear outputPredicted to
1721 * avoid unneeded guards
1723 if (mapContains(tas.m_changeSet, dl->location)) {
1724 NormalizedInstruction *src =
1725 findInputSrc(tas.m_t->m_instrStream.last, dl);
1726 if (src->outputPredicted) {
1727 src->outputPredicted = false;
1728 ni->markInputInferred(arg);
1732 break;
1735 case Unit::MetaInfo::Kind::String: {
1736 const StringData* sd = ni->unit()->lookupLitstrId(info.m_data);
1737 assert((unsigned)arg < inputInfos.size());
1738 InputInfo& ii = inputInfos[arg];
1739 ii.dontGuard = true;
1740 DynLocation* dl = tas.recordRead(ii, true, KindOfString);
1741 assert(!dl->rtt.isString() || !dl->rtt.valueString() ||
1742 dl->rtt.valueString() == sd);
1743 SKTRACE(1, ni->source, "MetaInfo on input %d; old type = %s\n",
1744 arg, dl->pretty().c_str());
1745 dl->rtt = RuntimeType(sd);
1746 break;
1749 case Unit::MetaInfo::Kind::Class: {
1750 assert((unsigned)arg < inputInfos.size());
1751 InputInfo& ii = inputInfos[arg];
1752 DynLocation* dl = tas.recordRead(ii, true);
1753 if (dl->rtt.valueType() != KindOfObject) {
1754 continue;
1757 const StringData* metaName = ni->unit()->lookupLitstrId(info.m_data);
1758 const StringData* rttName =
1759 dl->rtt.valueClass() ? dl->rtt.valueClass()->name() : nullptr;
1760 // The two classes might not be exactly the same, which is ok
1761 // as long as metaCls is more derived than rttCls.
1762 Class* metaCls = Unit::lookupUniqueClass(metaName);
1763 Class* rttCls = rttName ? Unit::lookupUniqueClass(rttName) : nullptr;
1764 if (metaCls && rttCls && metaCls != rttCls &&
1765 !metaCls->classof(rttCls)) {
1766 // Runtime type is more derived
1767 metaCls = 0;
1769 if (metaCls && metaCls != rttCls) {
1770 SKTRACE(1, ni->source, "replacing input %d with a MetaInfo-supplied "
1771 "class of %s; old type = %s\n",
1772 arg, metaName->data(), dl->pretty().c_str());
1773 if (dl->rtt.isRef()) {
1774 dl->rtt = RuntimeType(KindOfRef, KindOfObject, metaCls);
1775 } else {
1776 dl->rtt = RuntimeType(KindOfObject, KindOfNone, metaCls);
1779 break;
1782 case Unit::MetaInfo::Kind::MVecPropClass: {
1783 const StringData* metaName = ni->unit()->lookupLitstrId(info.m_data);
1784 Class* metaCls = Unit::lookupUniqueClass(metaName);
1785 if (metaCls) {
1786 ni->immVecClasses[arg] = metaCls;
1788 break;
1791 case Unit::MetaInfo::Kind::GuardedThis:
1792 case Unit::MetaInfo::Kind::NonRefCounted:
1793 // fallthrough; these are handled in preInputApplyMetaData.
1794 case Unit::MetaInfo::Kind::None:
1795 break;
1797 } while (metaHand.nextArg(info));
1799 return false;
1802 static void addMVectorInputs(NormalizedInstruction& ni,
1803 int& currentStackOffset,
1804 std::vector<InputInfo>& inputs) {
1805 assert(ni.immVec.isValid());
1806 ni.immVecM.reserve(ni.immVec.size());
1808 int UNUSED stackCount = 0;
1809 int UNUSED localCount = 0;
1811 currentStackOffset -= ni.immVec.numStackValues();
1812 int localStackOffset = currentStackOffset;
1814 auto push_stack = [&] {
1815 ++stackCount;
1816 inputs.emplace_back(Location(Location::Stack, localStackOffset++));
1818 auto push_local = [&] (int imm) {
1819 ++localCount;
1820 inputs.emplace_back(Location(Location::Local, imm));
1824 * Note that we have to push as we go so that the arguments come in
1825 * the order expected for the M-vector.
1827 * Indexes into these argument lists must also be in the same order
1828 * as the information in Unit::MetaInfo, because the analysis phase
1829 * may replace some of them with literals.
1833 * Also note: if we eventually have immediates that are not local
1834 * ids (i.e. string ids), this analysis step is going to have to be
1835 * a bit wiser.
1837 auto opPtr = (const Op*)ni.source.pc();
1838 auto const location = getMLocation(opPtr);
1839 auto const lcode = location.lcode;
1841 const bool trailingClassRef = lcode == LSL || lcode == LSC;
1843 switch (numLocationCodeStackVals(lcode)) {
1844 case 0: {
1845 if (lcode == LH) {
1846 inputs.emplace_back(Location(Location::This));
1847 } else {
1848 assert(lcode == LL || lcode == LGL || lcode == LNL);
1849 if (location.hasImm()) {
1850 push_local(location.imm);
1853 } break;
1854 case 1:
1855 if (lcode == LSL) {
1856 // We'll get the trailing stack value after pushing all the
1857 // member vector elements.
1858 assert(location.hasImm());
1859 push_local(location.imm);
1860 } else {
1861 push_stack();
1863 break;
1864 case 2:
1865 push_stack();
1866 if (!trailingClassRef) {
1867 // This one is actually at the back.
1868 push_stack();
1870 break;
1871 default: not_reached();
1874 // Now push all the members in the correct order.
1875 for (auto const& member : getMVector(opPtr)) {
1876 auto const mcode = member.mcode;
1877 ni.immVecM.push_back(mcode);
1879 if (mcode == MW) {
1880 // No stack and no locals.
1881 continue;
1882 } else if (member.hasImm()) {
1883 int64_t imm = member.imm;
1884 if (memberCodeImmIsLoc(mcode)) {
1885 push_local(imm);
1886 } else if (memberCodeImmIsString(mcode)) {
1887 inputs.emplace_back(Location(Location::Litstr, imm));
1888 } else {
1889 assert(memberCodeImmIsInt(mcode));
1890 inputs.emplace_back(Location(Location::Litint, imm));
1892 } else {
1893 push_stack();
1895 inputs.back().dontGuardInner = true;
1898 if (trailingClassRef) {
1899 push_stack();
1902 ni.immVecClasses.resize(ni.immVecM.size());
1904 assert(stackCount == ni.immVec.numStackValues());
1906 SKTRACE(2, ni.source, "M-vector using %d hidden stack "
1907 "inputs, %d locals\n", stackCount, localCount);
1910 void getInputs(SrcKey startSk, NormalizedInstruction& inst, InputInfos& infos,
1911 const Func* func, const LocalTypeFn& localType) {
1912 // TranslatorX64 expected top of stack to be index -1, with indexes growing
1913 // down from there. hhir defines top of stack to be index 0, with indexes
1914 // growing up from there. To compensate we start with a stack offset of 1 and
1915 // negate the index of any stack input after the call to getInputs.
1916 int stackOff = 1;
1917 getInputsImpl(startSk, &inst, stackOff, infos, func, localType);
1918 for (auto& info : infos) {
1919 if (info.loc.isStack()) info.loc.offset = -info.loc.offset;
1924 * getInputsImpl --
1925 * Returns locations for this instruction's inputs.
1927 * Throws:
1928 * TranslationFailedExc:
1929 * Unimplemented functionality, probably an opcode.
1931 * UnknownInputExc:
1932 * Consumed a datum whose type or value could not be constrained at
1933 * translation time, because the tracelet has already modified it.
1934 * Truncate the tracelet at the preceding instruction, which must
1935 * exists because *something* modified something in it.
1937 void getInputsImpl(SrcKey startSk,
1938 NormalizedInstruction* ni,
1939 int& currentStackOffset,
1940 InputInfos& inputs,
1941 const Func* func,
1942 const LocalTypeFn& localType) {
1943 #ifdef USE_TRACE
1944 const SrcKey& sk = ni->source;
1945 #endif
1946 assert(inputs.empty());
1947 if (debug && !mapContains(instrInfo, ni->op())) {
1948 fprintf(stderr, "Translator does not understand "
1949 "instruction %s\n", opcodeToName(ni->op()));
1950 assert(false);
1952 const InstrInfo& info = instrInfo[ni->op()];
1953 Operands input = info.in;
1954 if (input & FuncdRef) {
1955 inputs.needsRefCheck = true;
1957 if (input & Iter) {
1958 inputs.emplace_back(Location(Location::Iter, ni->imm[0].u_IVA));
1960 if (input & FStack) {
1961 currentStackOffset -= ni->imm[0].u_IVA; // arguments consumed
1962 currentStackOffset -= kNumActRecCells; // ActRec is torn down as well
1964 if (input & IgnoreInnerType) ni->ignoreInnerType = true;
1965 if (input & Stack1) {
1966 SKTRACE(1, sk, "getInputs: stack1 %d\n", currentStackOffset - 1);
1967 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1968 if (input & DontGuardStack1) inputs.back().dontGuard = true;
1969 if (input & Stack2) {
1970 SKTRACE(1, sk, "getInputs: stack2 %d\n", currentStackOffset - 1);
1971 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1972 if (input & Stack3) {
1973 SKTRACE(1, sk, "getInputs: stack3 %d\n", currentStackOffset - 1);
1974 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1978 if (input & StackN) {
1979 int numArgs = ni->op() == OpNewPackedArray ? ni->imm[0].u_IVA :
1980 ni->immVec.numStackValues();
1981 SKTRACE(1, sk, "getInputs: stackN %d %d\n", currentStackOffset - 1,
1982 numArgs);
1983 for (int i = 0; i < numArgs; i++) {
1984 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1985 inputs.back().dontGuard = true;
1986 inputs.back().dontBreak = true;
1989 if (input & BStackN) {
1990 int numArgs = ni->imm[0].u_IVA;
1991 SKTRACE(1, sk, "getInputs: BStackN %d %d\n", currentStackOffset - 1,
1992 numArgs);
1993 for (int i = 0; i < numArgs; i++) {
1994 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1997 if (input & MVector) {
1998 addMVectorInputs(*ni, currentStackOffset, inputs);
2000 if (input & Local) {
2001 // (Almost) all instructions that take a Local have its index at
2002 // their first immediate.
2003 int loc;
2004 auto insertAt = inputs.end();
2005 switch (ni->op()) {
2006 case OpSetWithRefLM:
2007 insertAt = inputs.begin();
2008 // fallthrough
2009 case OpFPassL:
2010 loc = ni->imm[1].u_IVA;
2011 break;
2013 default:
2014 loc = ni->imm[0].u_IVA;
2015 break;
2017 SKTRACE(1, sk, "getInputs: local %d\n", loc);
2018 inputs.emplace(insertAt, Location(Location::Local, loc));
2021 auto wantInlineReturn = [&] {
2022 const int localCount = ni->func()->numLocals();
2023 // Inline return causes us to guard this tracelet more precisely. If
2024 // we're already chaining to get here, just do a generic return in the
2025 // hopes of avoiding further specialization. The localCount constraint
2026 // is an unfortunate consequence of the current generic machinery not
2027 // working for 0 locals.
2028 if (tx64->numTranslations(startSk) >= kTooPolyRet && localCount > 0) {
2029 return false;
2031 ni->nonRefCountedLocals.resize(localCount);
2032 int numRefCounted = 0;
2033 for (int i = 0; i < localCount; ++i) {
2034 auto curType = localType(i);
2035 if (ni->nonRefCountedLocals[i]) {
2036 assert(curType.notCounted() && "Static analysis was wrong");
2038 if (curType.maybeCounted()) {
2039 numRefCounted++;
2042 return numRefCounted <= RuntimeOption::EvalHHIRInliningMaxReturnDecRefs;
2045 if ((input & AllLocals) && wantInlineReturn()) {
2046 ni->inlineReturn = true;
2047 ni->ignoreInnerType = true;
2048 int n = ni->func()->numLocals();
2049 for (int i = 0; i < n; ++i) {
2050 if (!ni->nonRefCountedLocals[i]) {
2051 inputs.emplace_back(Location(Location::Local, i));
2056 SKTRACE(1, sk, "stack args: virtual sfo now %d\n", currentStackOffset);
2057 TRACE(1, "%s\n", Trace::prettyNode("Inputs", inputs).c_str());
2059 if (inputs.size() &&
2060 ((input & DontGuardAny) || dontGuardAnyInputs(ni->op()))) {
2061 for (int i = inputs.size(); i--; ) {
2062 inputs[i].dontGuard = true;
2065 if (input & This) {
2066 inputs.emplace_back(Location(Location::This));
2070 bool dontGuardAnyInputs(Op op) {
2071 switch (op) {
2072 #define CASE(iNm) case Op ## iNm:
2073 #define NOOP(...)
2074 INSTRS
2075 PSEUDOINSTR_DISPATCH(NOOP)
2076 return false;
2078 default:
2079 return true;
2081 #undef NOOP
2082 #undef CASE
2085 bool outputDependsOnInput(const Op instr) {
2086 switch (instrInfo[instr].type) {
2087 case OutNull:
2088 case OutNullUninit:
2089 case OutString:
2090 case OutStringImm:
2091 case OutDouble:
2092 case OutBoolean:
2093 case OutBooleanImm:
2094 case OutInt64:
2095 case OutArray:
2096 case OutArrayImm:
2097 case OutObject:
2098 case OutResource:
2099 case OutThisObject:
2100 case OutUnknown:
2101 case OutVUnknown:
2102 case OutClassRef:
2103 case OutPred:
2104 case OutCns:
2105 case OutStrlen:
2106 case OutNone:
2107 return false;
2109 case OutAsyncAwait:
2110 case OutFDesc:
2111 case OutSameAsInput:
2112 case OutCInput:
2113 case OutVInput:
2114 case OutCInputL:
2115 case OutVInputL:
2116 case OutFInputL:
2117 case OutFInputR:
2118 case OutArith:
2119 case OutBitOp:
2120 case OutSetOp:
2121 case OutIncDec:
2122 case OutFPushCufSafe:
2123 return true;
2125 not_reached();
2129 * getOutputs --
2130 * Builds a vector describing this instruction's outputs. Also
2131 * records any write to a value that *might* alias a local.
2133 * Throws:
2134 * TranslationFailedExc:
2135 * Unimplemented functionality, probably an opcode.
2137 void Translator::getOutputs(/*inout*/ Tracelet& t,
2138 /*inout*/ NormalizedInstruction* ni,
2139 /*inout*/ int& currentStackOffset,
2140 /*out*/ bool& varEnvTaint) {
2141 varEnvTaint = false;
2143 const vector<DynLocation*>& inputs = ni->inputs;
2144 const Op op = ni->op();
2146 initInstrInfo();
2147 assert_not_implemented(instrInfo.find(op) != instrInfo.end());
2148 const Operands outLocs = instrInfo[op].out;
2149 const OutTypeConstraints typeInfo = instrInfo[op].type;
2151 SKTRACE(1, ni->source, "output flavor %d\n", typeInfo);
2152 if (typeInfo == OutFInputL || typeInfo == OutFInputR ||
2153 typeInfo == OutVInputL) {
2154 // Variable number of outputs. If we box the loc we're reading,
2155 // we need to write out its boxed-ness.
2156 assert(inputs.size() >= 1);
2157 const DynLocation* in = inputs[inputs.size() - 1];
2158 DynLocation* outDynLoc = t.newDynLocation(in->location, in->rtt);
2159 outDynLoc->location = Location(Location::Stack, currentStackOffset++);
2160 bool isRef;
2161 if (typeInfo == OutVInputL) {
2162 isRef = true;
2163 } else {
2164 assert(typeInfo == OutFInputL || typeInfo == OutFInputR);
2165 isRef = ni->preppedByRef;
2167 if (isRef) {
2168 // Locals can be KindOfUninit, so we need to convert
2169 // this to KindOfNull
2170 if (in->rtt.outerType() == KindOfUninit) {
2171 outDynLoc->rtt = RuntimeType(KindOfRef, KindOfNull);
2172 } else {
2173 outDynLoc->rtt = in->rtt.box();
2175 SKTRACE(1, ni->source, "boxed type: %d -> %d\n",
2176 outDynLoc->rtt.outerType(), outDynLoc->rtt.innerType());
2177 } else {
2178 if (outDynLoc->rtt.outerType() == KindOfUninit) {
2179 outDynLoc->rtt = RuntimeType(KindOfNull);
2180 } else {
2181 outDynLoc->rtt = outDynLoc->rtt.unbox();
2183 SKTRACE(1, ni->source, "unboxed type: %d\n",
2184 outDynLoc->rtt.outerType());
2186 assert(outDynLoc->location.isStack());
2187 ni->outStack = outDynLoc;
2189 if (isRef && in->rtt.outerType() != KindOfRef &&
2190 typeInfo != OutFInputR &&
2191 in->location.isLocal()) {
2192 // VGetH or FPassH boxing a local
2193 DynLocation* smashedLocal =
2194 t.newDynLocation(in->location, outDynLoc->rtt);
2195 assert(smashedLocal->location.isLocal());
2196 ni->outLocal = smashedLocal;
2198 // Other things that might be getting boxed here include globals
2199 // and array values; since we don't attempt to track these things'
2200 // types in symbolic execution anyway, we can ignore them.
2201 return;
2204 int opnd = None;
2205 for (int outLocsCopy = (int)outLocs;
2206 outLocsCopy != (int)None;
2207 outLocsCopy &= ~opnd) {
2208 opnd = 1 << (ffs(outLocsCopy) - 1);
2209 assert(opnd != None && opnd != Stack3); // no instr produces 3 values
2210 assert(opnd != FuncdRef); // reffiness is immutable
2211 Location loc;
2212 switch (opnd) {
2213 // Pseudo-outputs that affect translator state
2214 case FStack: {
2215 currentStackOffset += kNumActRecCells;
2216 t.m_arState.pushFunc(*ni);
2217 } continue; // no instr-associated output
2219 case Local: {
2220 if (op == OpSetN || op == OpSetOpN || op == OpIncDecN ||
2221 op == OpBindN || op == OpUnsetN || op == OpVGetN) {
2222 varEnvTaint = true;
2223 continue;
2225 if (op == OpCreateCont || op == OpAsyncESuspend) {
2226 // CreateCont stores Uninit to all locals but NormalizedInstruction
2227 // doesn't have enough output fields, so we special case it in
2228 // analyze().
2229 continue;
2232 ASSERT_NOT_IMPLEMENTED(op == OpSetOpL ||
2233 op == OpSetM || op == OpSetOpM ||
2234 op == OpBindM ||
2235 op == OpSetWithRefLM || op == OpSetWithRefRM ||
2236 op == OpUnsetM ||
2237 op == OpIncDecL ||
2238 op == OpVGetM || op == OpFPassM ||
2239 op == OpStaticLocInit || op == OpInitThisLoc ||
2240 op == OpSetL || op == OpBindL || op == OpVGetL ||
2241 op == OpPushL || op == OpUnsetL ||
2242 op == OpIterInit || op == OpIterInitK ||
2243 op == OpMIterInit || op == OpMIterInitK ||
2244 op == OpWIterInit || op == OpWIterInitK ||
2245 op == OpIterNext || op == OpIterNextK ||
2246 op == OpMIterNext || op == OpMIterNextK ||
2247 op == OpWIterNext || op == OpWIterNextK);
2248 if (op == OpFPassM && !ni->preppedByRef) {
2249 // Equivalent to CGetM. Won't mutate the base.
2250 continue;
2252 if (op == OpIncDecL) {
2253 assert(ni->inputs.size() == 1);
2254 const RuntimeType &inRtt = ni->inputs[0]->rtt;
2255 RuntimeType rtt =
2256 IS_INT_TYPE(inRtt.valueType()) ? inRtt : RuntimeType(KindOfAny);
2257 DynLocation* incDecLoc =
2258 t.newDynLocation(ni->inputs[0]->location, rtt);
2259 assert(incDecLoc->location.isLocal());
2260 ni->outLocal = incDecLoc;
2261 continue; // Doesn't mutate a loc's types for int. Carry on.
2263 if (op == OpUnsetL || op == OpPushL) {
2264 assert(ni->inputs.size() == 1);
2265 DynLocation* inLoc = ni->inputs[0];
2266 assert(inLoc->location.isLocal());
2267 RuntimeType newLhsRtt = RuntimeType(KindOfUninit);
2268 Location locLocation = inLoc->location;
2269 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2270 locLocation.spaceName(), locLocation.offset,
2271 newLhsRtt.valueType());
2272 DynLocation* unsetLoc = t.newDynLocation(locLocation, newLhsRtt);
2273 assert(unsetLoc->location.isLocal());
2274 ni->outLocal = unsetLoc;
2275 continue;
2277 if (op == OpStaticLocInit || op == OpInitThisLoc) {
2278 ni->outLocal = t.newDynLocation(Location(Location::Local,
2279 ni->imm[0].u_OA),
2280 KindOfAny);
2281 continue;
2283 if (op == OpSetM || op == OpSetOpM ||
2284 op == OpVGetM || op == OpBindM ||
2285 op == OpSetWithRefLM || op == OpSetWithRefRM ||
2286 op == OpUnsetM || op == OpFPassM) {
2287 switch (ni->immVec.locationCode()) {
2288 case LL: {
2289 const int kVecStart = (op == OpSetM ||
2290 op == OpSetOpM ||
2291 op == OpBindM ||
2292 op == OpSetWithRefLM ||
2293 op == OpSetWithRefRM) ?
2294 1 : 0; // 0 is rhs for SetM/SetOpM
2295 DynLocation* inLoc = ni->inputs[kVecStart];
2296 assert(inLoc->location.isLocal());
2297 Location locLoc = inLoc->location;
2298 if (op == OpUnsetM) {
2299 // UnsetM can change the value of its base local when it's an
2300 // array. Output a new DynLocation with a the same type to
2301 // reflect the new value.
2302 ni->outLocal = t.newDynLocation(locLoc, inLoc->rtt);
2303 } else if (inLoc->rtt.isString() ||
2304 inLoc->rtt.valueType() == KindOfBoolean) {
2305 // Strings and bools produce value-dependent results; "" and
2306 // false upgrade to an array successfully, while other values
2307 // fail and leave the lhs unmodified.
2308 DynLocation* baseLoc = t.newDynLocation(locLoc, KindOfAny);
2309 assert(baseLoc->isLocal());
2310 ni->outLocal = baseLoc;
2311 } else if (inLoc->rtt.valueType() == KindOfUninit ||
2312 inLoc->rtt.valueType() == KindOfNull) {
2313 RuntimeType newLhsRtt = inLoc->rtt.setValueType(
2314 mcodeMaybePropName(ni->immVecM[0]) ?
2315 KindOfObject : KindOfArray);
2316 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2317 locLoc.spaceName(), locLoc.offset,
2318 newLhsRtt.valueType());
2319 DynLocation* baseLoc = t.newDynLocation(locLoc, newLhsRtt);
2320 assert(baseLoc->location.isLocal());
2321 ni->outLocal = baseLoc;
2323 // Note (if we start translating pseudo-mains):
2325 // A SetM in pseudo-main might alias a local whose type we're
2326 // remembering:
2328 // $GLOBALS['a'] = 123; // $a :: Int
2330 // and more deviously:
2332 // $loc['b'][17] = $GLOBALS; $x = 'b'; $y = 17;
2333 // $loc[$x][$y]['a'] = 123; // $a :: Int
2334 break;
2336 case LNL:
2337 case LNC:
2338 varEnvTaint = true;
2339 break;
2340 case LGL:
2341 case LGC:
2342 break;
2343 default:
2344 break;
2346 continue;
2348 if (op == OpSetOpL) {
2349 const int kLocIdx = 1;
2350 DynLocation* inLoc = ni->inputs[kLocIdx];
2351 assert(inLoc->location.isLocal());
2352 DynLocation* dl = t.newDynLocation();
2353 dl->location = inLoc->location;
2354 dl->rtt = setOpOutputType(ni, ni->inputs);
2355 if (inLoc->isRef()) {
2356 dl->rtt = dl->rtt.box();
2358 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2359 inLoc->location.spaceName(), inLoc->location.offset,
2360 dl->rtt.valueType());
2361 assert(dl->location.isLocal());
2362 ni->outLocal = dl;
2363 continue;
2365 if (op >= OpIterInit && op <= OpWIterNextK) {
2366 assert(op == OpIterInit || op == OpIterInitK ||
2367 op == OpMIterInit || op == OpMIterInitK ||
2368 op == OpWIterInit || op == OpWIterInitK ||
2369 op == OpIterNext || op == OpIterNextK ||
2370 op == OpMIterNext || op == OpMIterNextK ||
2371 op == OpWIterNext || op == OpWIterNextK);
2372 const int kValImmIdx = 2;
2373 const int kKeyImmIdx = 3;
2374 DynLocation* outVal = t.newDynLocation();
2375 int off = ni->imm[kValImmIdx].u_IVA;
2376 outVal->location = Location(Location::Local, off);
2377 if (op == OpMIterInit || op == OpMIterInitK ||
2378 op == OpMIterNext || op == OpMIterNextK) {
2379 outVal->rtt = RuntimeType(KindOfRef, KindOfAny);
2380 } else {
2381 outVal->rtt = RuntimeType(KindOfAny);
2383 ni->outLocal = outVal;
2384 if (op == OpIterInitK || op == OpIterNextK ||
2385 op == OpWIterInitK || op == OpWIterNextK ||
2386 op == OpMIterInitK || op == OpMIterNextK) {
2387 DynLocation* outKey = t.newDynLocation();
2388 int keyOff = getImm((Op*)ni->pc(), kKeyImmIdx).u_IVA;
2389 outKey->location = Location(Location::Local, keyOff);
2390 outKey->rtt = RuntimeType(KindOfAny);
2391 ni->outLocal2 = outKey;
2393 continue;
2395 assert(ni->inputs.size() == 2);
2396 const int kValIdx = 0;
2397 const int kLocIdx = 1;
2398 DynLocation* inLoc = ni->inputs[kLocIdx];
2399 DynLocation* inVal = ni->inputs[kValIdx];
2400 Location locLocation = inLoc->location;
2401 // Variant RHS possible only when binding.
2402 assert(inVal->rtt.isVagueValue() ||
2403 (op == OpBindL) ==
2404 (inVal->rtt.outerType() == KindOfRef));
2405 assert(!inVal->location.isLocal());
2406 assert(inLoc->location.isLocal());
2407 RuntimeType newLhsRtt = inVal->rtt.isVagueValue() || op == OpBindL ?
2408 inVal->rtt :
2409 inLoc->rtt.setValueType(inVal->rtt.outerType());
2410 if (inLoc->rtt.outerType() == KindOfRef) {
2411 assert(newLhsRtt.outerType() == KindOfRef);
2412 } else {
2413 assert(op == OpBindL ||
2414 newLhsRtt.outerType() != KindOfRef);
2416 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2417 locLocation.spaceName(), locLocation.offset,
2418 inVal->rtt.valueType());
2419 DynLocation* outLhsLoc = t.newDynLocation(locLocation, newLhsRtt);
2420 assert(outLhsLoc->location.isLocal());
2421 ni->outLocal = outLhsLoc;
2422 } continue; // already pushed an output for the local
2424 case Stack1:
2425 case Stack2: {
2426 loc = Location(Location::Stack, currentStackOffset++);
2427 if (ni->op() == OpFPushCufSafe) {
2428 // FPushCufSafe pushes its first stack input, then a bool.
2429 if (opnd == Stack2) {
2430 assert(ni->outStack == nullptr);
2431 auto* dl = t.newDynLocation(loc, ni->inputs[0]->rtt);
2432 ni->outStack = dl;
2433 } else {
2434 assert(ni->outStack2 == nullptr);
2435 auto* dl = t.newDynLocation(loc, KindOfBoolean);
2436 ni->outStack2 = dl;
2438 continue;
2440 if (ni->op() == OpAsyncAwait) {
2441 // The second output of OpAsyncAwait is a bool.
2442 if (opnd == Stack2) {
2443 assert(ni->outStack == nullptr);
2444 // let getDynLocType do it.
2445 } else {
2446 assert(ni->outStack2 == nullptr);
2447 ni->outStack2 = t.newDynLocation(loc, KindOfBoolean);
2448 continue;
2451 } break;
2452 case StackIns1: {
2453 // First stack output is where the inserted element will go.
2454 // The output code for the instruction will affect what we
2455 // think about this location.
2456 loc = Location(Location::Stack, currentStackOffset++);
2458 // The existing top is just being moved up a notch. This one
2459 // always functions as if it were OutSameAsInput.
2460 assert(ni->inputs.size() >= 1);
2461 ni->outStack2 = t.newDynLocation(
2462 Location(Location::Stack, currentStackOffset++),
2463 ni->inputs[0]->rtt
2465 } break;
2466 case StackIns2: {
2467 // Similar to StackIns1.
2468 loc = Location(Location::Stack, currentStackOffset++);
2470 // Move the top two locations up a slot.
2471 assert(ni->inputs.size() >= 2);
2472 ni->outStack2 = t.newDynLocation(
2473 Location(Location::Stack, currentStackOffset++),
2474 ni->inputs[1]->rtt
2476 ni->outStack3 = t.newDynLocation(
2477 Location(Location::Stack, currentStackOffset++),
2478 ni->inputs[0]->rtt
2480 } break;
2481 default:
2482 not_reached();
2484 DynLocation* dl = t.newDynLocation();
2485 dl->location = loc;
2486 dl->rtt = getDynLocType(t.m_sk, ni, typeInfo, m_mode);
2487 SKTRACE(2, ni->source, "recording output t(%d->%d) #(%s, %" PRId64 ")\n",
2488 dl->rtt.outerType(), dl->rtt.innerType(),
2489 dl->location.spaceName(), dl->location.offset);
2490 assert(dl->location.isStack());
2491 ni->outStack = dl;
2495 void
2496 Translator::requestResetHighLevelTranslator() {
2497 if (dbgTranslateCoin) {
2498 dbgTranslateCoin->reset();
2502 bool DynLocation::canBeAliased() const {
2503 return isValue() &&
2504 ((Translator::liveFrameIsPseudoMain() && isLocal()) || isRef());
2507 // Test the type of a location without recording it as a read yet.
2508 RuntimeType TraceletContext::currentType(const Location& l) const {
2509 DynLocation* dl;
2510 if (!mapGet(m_currentMap, l, &dl)) {
2511 assert(!mapContains(m_deletedSet, l));
2512 assert(!mapContains(m_changeSet, l));
2513 return tx64->liveType(l, *liveUnit());
2515 return dl->rtt;
2518 DynLocation* TraceletContext::recordRead(const InputInfo& ii,
2519 bool useHHIR,
2520 DataType staticType) {
2521 if (staticType == KindOfNone) staticType = KindOfAny;
2523 DynLocation* dl;
2524 const Location& l = ii.loc;
2525 if (!mapGet(m_currentMap, l, &dl)) {
2526 // We should never try to read a location that has been deleted
2527 assert(!mapContains(m_deletedSet, l));
2528 // If the given location was not in m_currentMap, then it shouldn't
2529 // be in m_changeSet either
2530 assert(!mapContains(m_changeSet, l));
2531 if (ii.dontGuard && !l.isLiteral()) {
2532 assert(!useHHIR || staticType != KindOfRef);
2533 dl = m_t->newDynLocation(l, RuntimeType(staticType));
2534 if (useHHIR && staticType != KindOfAny) {
2535 m_resolvedDeps[l] = dl;
2537 } else {
2538 // TODO: Once the region translator supports guard relaxation
2539 // (task #2598894), we can enable specialization for all modes.
2540 const bool specialize = tx64->mode() == TransLive;
2541 RuntimeType rtt = tx64->liveType(l, *liveUnit(), specialize);
2542 assert(rtt.isIter() || !rtt.isVagueValue());
2543 // Allocate a new DynLocation to represent this and store it in the
2544 // current map.
2545 dl = m_t->newDynLocation(l, rtt);
2547 if (!l.isLiteral()) {
2548 if (m_varEnvTaint && dl->isValue() && dl->isLocal()) {
2549 dl->rtt = RuntimeType(KindOfAny);
2550 } else if ((m_aliasTaint && dl->canBeAliased()) ||
2551 (rtt.isValue() && rtt.isRef() && ii.dontGuardInner)) {
2552 dl->rtt = rtt.setValueType(KindOfAny);
2554 // Record that we depend on the live type of the specified location
2555 // as well (and remember what the live type was)
2556 m_dependencies[l] = dl;
2559 m_currentMap[l] = dl;
2561 TRACE(2, "recordRead: %s : %s\n", l.pretty().c_str(),
2562 dl->rtt.pretty().c_str());
2563 return dl;
2566 void TraceletContext::recordWrite(DynLocation* dl) {
2567 TRACE(2, "recordWrite: %s : %s\n", dl->location.pretty().c_str(),
2568 dl->rtt.pretty().c_str());
2569 m_currentMap[dl->location] = dl;
2570 m_changeSet.insert(dl->location);
2571 m_deletedSet.erase(dl->location);
2574 void TraceletContext::recordDelete(const Location& l) {
2575 // We should not be trying to delete the rtt of location that is
2576 // not in m_currentMap
2577 TRACE(2, "recordDelete: %s\n", l.pretty().c_str());
2578 m_currentMap.erase(l);
2579 m_changeSet.erase(l);
2580 m_deletedSet.insert(l);
2583 void TraceletContext::aliasTaint() {
2584 m_aliasTaint = true;
2585 for (ChangeMap::iterator it = m_currentMap.begin();
2586 it != m_currentMap.end(); ++it) {
2587 DynLocation* dl = it->second;
2588 if (dl->canBeAliased()) {
2589 TRACE(1, "(%s, %" PRId64 ") <- inner type invalidated\n",
2590 it->first.spaceName(), it->first.offset);
2591 RuntimeType newRtt = dl->rtt.setValueType(KindOfAny);
2592 it->second = m_t->newDynLocation(dl->location, newRtt);
2597 void TraceletContext::varEnvTaint() {
2598 m_varEnvTaint = true;
2599 for (ChangeMap::iterator it = m_currentMap.begin();
2600 it != m_currentMap.end(); ++it) {
2601 DynLocation* dl = it->second;
2602 if (dl->isValue() && dl->isLocal()) {
2603 TRACE(1, "(%s, %" PRId64 ") <- type invalidated\n",
2604 it->first.spaceName(), it->first.offset);
2605 it->second = m_t->newDynLocation(dl->location,
2606 RuntimeType(KindOfAny));
2611 void TraceletContext::recordJmp() {
2612 m_numJmps++;
2615 void Translator::postAnalyze(NormalizedInstruction* ni, SrcKey& sk,
2616 Tracelet& t, TraceletContext& tas) {
2617 if (ni->op() == OpBareThis &&
2618 ni->outStack->rtt.isVagueValue()) {
2619 SrcKey src = sk;
2620 const Unit* unit = ni->m_unit;
2621 src.advance(unit);
2622 Op next = toOp(*unit->at(src.offset()));
2623 if (next == OpInstanceOfD
2624 || (next == OpIsTypeC &&
2625 ni->imm[0].u_OA == static_cast<uint8_t>(IsTypeOp::Null))) {
2626 ni->outStack->rtt = RuntimeType(KindOfObject);
2628 return;
2632 static bool isPop(const NormalizedInstruction* instr) {
2633 auto opc = instr->op();
2634 return (opc == OpPopC ||
2635 opc == OpPopV ||
2636 opc == OpPopR);
2639 GuardType::GuardType(DataType outer, DataType inner)
2640 : outerType(outer), innerType(inner), klass(nullptr) {
2643 GuardType::GuardType(const RuntimeType& rtt) {
2644 assert(rtt.isValue());
2645 outerType = rtt.outerType();
2646 innerType = rtt.innerType();
2647 if (rtt.hasKnownClass()) {
2648 klass = rtt.knownClass();
2649 } else if (rtt.hasArrayKind()) {
2650 arrayKindValid = true;
2651 arrayKind = rtt.arrayKind();
2652 } else {
2653 klass = nullptr;
2657 GuardType::GuardType(const GuardType& other) {
2658 *this = other;
2661 const DataType GuardType::getOuterType() const {
2662 return outerType;
2665 const DataType GuardType::getInnerType() const {
2666 return innerType;
2669 const Class* GuardType::getSpecializedClass() const {
2670 return klass;
2673 bool GuardType::isSpecific() const {
2674 return outerType > KindOfNone;
2677 bool GuardType::isSpecialized() const {
2678 return (outerType == KindOfObject && klass != nullptr) ||
2679 (outerType == KindOfArray && arrayKindValid);
2682 bool GuardType::isRelaxed() const {
2683 switch (outerType) {
2684 case KindOfAny:
2685 case KindOfUncounted:
2686 case KindOfUncountedInit:
2687 return true;
2688 default:
2689 return false;
2693 bool GuardType::isGeneric() const {
2694 return outerType == KindOfAny;
2697 bool GuardType::isCounted() const {
2698 switch (outerType) {
2699 case KindOfAny:
2700 case KindOfStaticString:
2701 case KindOfString:
2702 case KindOfArray:
2703 case KindOfObject:
2704 case KindOfResource:
2705 case KindOfRef:
2706 return true;
2707 default:
2708 return false;
2712 bool GuardType::isMoreRefinedThan(const GuardType& other) const {
2713 return getCategory() > other.getCategory();
2716 DataTypeCategory GuardType::getCategory() const {
2717 switch (outerType) {
2718 case KindOfAny: return DataTypeGeneric;
2719 case KindOfUncounted: return DataTypeCountness;
2720 case KindOfUncountedInit: return DataTypeCountnessInit;
2721 default: return (klass != nullptr || arrayKindValid) ?
2722 DataTypeSpecialized :
2723 DataTypeSpecific;
2727 bool GuardType::mayBeUninit() const {
2728 switch (outerType) {
2729 case KindOfAny:
2730 case KindOfUncounted:
2731 case KindOfUninit:
2732 return true;
2733 default:
2734 return false;
2738 GuardType GuardType::getCountness() const {
2739 // Note that translations need to be able to handle KindOfString and
2740 // KindOfStaticString interchangeably. This implies that KindOfStaticString
2741 // needs to be treated as KindOfString, i.e. as possibly counted.
2742 assert(isSpecific());
2743 switch (outerType) {
2744 case KindOfUninit:
2745 case KindOfNull:
2746 case KindOfBoolean:
2747 case KindOfInt64:
2748 case KindOfDouble: return GuardType(KindOfUncounted);
2749 default: return GuardType(outerType, innerType);
2753 GuardType GuardType::dropSpecialization() const {
2754 return GuardType(outerType, innerType);
2757 RuntimeType GuardType::getRuntimeType() const {
2758 if (outerType == KindOfObject && klass != nullptr) {
2759 return RuntimeType(outerType, innerType).setKnownClass(klass);
2761 if (outerType == KindOfArray && arrayKindValid) {
2762 return RuntimeType(outerType, innerType).setArrayKind(arrayKind);
2764 return RuntimeType(outerType, innerType);
2767 bool GuardType::isEqual(GuardType other) const {
2768 return outerType == other.outerType &&
2769 innerType == other.innerType &&
2770 klass == other.klass;
2773 GuardType GuardType::getCountnessInit() const {
2774 assert(isSpecific());
2775 switch (outerType) {
2776 case KindOfNull:
2777 case KindOfBoolean:
2778 case KindOfInt64:
2779 case KindOfDouble: return GuardType(KindOfUncountedInit);
2780 default: return GuardType(outerType, innerType);
2784 bool GuardType::hasArrayKind() const {
2785 return arrayKindValid;
2788 ArrayData::ArrayKind GuardType::getArrayKind() const {
2789 return arrayKind;
2793 * Returns true iff loc is consumed by a Pop* instruction in the sequence
2794 * starting at instr.
2796 bool isPopped(DynLocation* loc, NormalizedInstruction* instr) {
2797 for (; instr ; instr = instr->next) {
2798 for (size_t i = 0; i < instr->inputs.size(); i++) {
2799 if (instr->inputs[i] == loc) {
2800 return isPop(instr);
2804 return false;
2807 DataTypeCategory
2808 Translator::getOperandConstraintCategory(NormalizedInstruction* instr,
2809 size_t opndIdx,
2810 const GuardType& specType) {
2811 auto opc = instr->op();
2813 switch (opc) {
2814 case OpSetS:
2815 case OpSetG:
2816 case OpSetL: {
2817 if (opndIdx == 0) { // stack value
2818 // If the output on the stack is simply popped, then we don't
2819 // even care whether the type is ref-counted or not because
2820 // the ref-count is transfered to the target location.
2821 if (!instr->outStack || isPopped(instr->outStack, instr->next)) {
2822 return DataTypeGeneric;
2824 return DataTypeCountness;
2826 if (opc == OpSetL) {
2827 // old local value is dec-refed
2828 assert(opndIdx == 1);
2829 return DataTypeCountness;
2831 return DataTypeSpecific;
2834 case OpCGetL:
2835 return DataTypeCountnessInit;
2837 case OpPushL:
2838 case OpContEnter:
2839 return DataTypeGeneric;
2841 case OpRetC:
2842 case OpRetV:
2843 return DataTypeCountness;
2845 case OpFCall:
2846 // Note: instead of pessimizing calls that may be inlined with
2847 // DataTypeSpecific, we could apply the operand constraints of
2848 // the callee in constrainDep.
2849 return (instr->calleeTrace && !instr->calleeTrace->m_inliningFailed)
2850 ? DataTypeSpecific
2851 : DataTypeGeneric;
2853 case OpFCallArray:
2854 return DataTypeGeneric;
2856 case OpPopC:
2857 case OpPopV:
2858 case OpPopR:
2859 return DataTypeCountness;
2861 case OpContSuspend:
2862 case OpContSuspendK:
2863 case OpContRetC:
2864 // The stack input is teleported to the continuation's m_value field
2865 return DataTypeGeneric;
2867 case OpContHandle:
2868 // This always calls the interpreter
2869 return DataTypeGeneric;
2871 case OpAddElemC:
2872 // The stack input is teleported to the array
2873 return opndIdx == 0 ? DataTypeGeneric : DataTypeSpecific;
2875 case OpIdx:
2876 case OpArrayIdx:
2877 // The default value (w/ opndIdx 0) is simply passed to a helper,
2878 // which takes care of dec-refing it if needed
2879 return opndIdx == 0 ? DataTypeGeneric : DataTypeSpecific;
2882 // Collections and Iterator related specializations
2884 case OpCGetM:
2885 case OpIssetM:
2886 case OpFPassM:
2887 if (specType.getOuterType() == KindOfArray) {
2888 if (instr->inputs.size() == 2 && opndIdx == 0) {
2889 if (specType.hasArrayKind() &&
2890 specType.getArrayKind() == ArrayData::ArrayKind::kPackedKind &&
2891 instr->inputs[1]->isInt()) {
2892 return DataTypeSpecialized;
2895 } else if (specType.getOuterType() == KindOfObject) {
2896 if (instr->inputs.size() == 2 && opndIdx == 0) {
2897 const Class* klass = specType.getSpecializedClass();
2898 if (klass != nullptr && isOptimizableCollectionClass(klass)) {
2899 return DataTypeSpecialized;
2903 return DataTypeSpecific;
2904 case OpSetM:
2905 if (specType.getOuterType() == KindOfObject) {
2906 if (instr->inputs.size() == 3 && opndIdx == 1) {
2907 const Class* klass = specType.getSpecializedClass();
2908 if (klass != nullptr && isOptimizableCollectionClass(klass)) {
2909 return DataTypeSpecialized;
2913 return DataTypeSpecific;
2915 default:
2916 return DataTypeSpecific;
2920 GuardType
2921 Translator::getOperandConstraintType(NormalizedInstruction* instr,
2922 size_t opndIdx,
2923 const GuardType& specType) {
2924 DataTypeCategory dtCategory = getOperandConstraintCategory(instr,
2925 opndIdx,
2926 specType);
2927 FTRACE(4, "got constraint {} for {}\n", dtCategory, *instr);
2928 switch (dtCategory) {
2929 case DataTypeGeneric: return GuardType(KindOfAny);
2930 case DataTypeCountness: return specType.getCountness();
2931 case DataTypeCountnessInit: return specType.getCountnessInit();
2932 case DataTypeSpecific: return specType.dropSpecialization();
2933 case DataTypeSpecialized:
2934 return specType;
2936 return specType;
2939 void Translator::constrainOperandType(GuardType& relxType,
2940 NormalizedInstruction* instr,
2941 size_t opndIdx,
2942 const GuardType& specType) {
2943 if (relxType.isEqual(specType)) return; // Can't constrain any further
2945 GuardType consType = getOperandConstraintType(instr, opndIdx, specType);
2946 if (consType.isMoreRefinedThan(relxType)) {
2947 FTRACE(3, "constraining from {}({}) to {}({})\n",
2948 relxType.getOuterType(), relxType.getInnerType(),
2949 consType.getOuterType(), consType.getInnerType());
2950 relxType = consType;
2955 * This method looks at every use of loc in the stream of instructions
2956 * starting at firstInstr and constrains the relxType towards specType
2957 * according to each use. Note that this method not only looks at
2958 * direct uses of loc, but it also recursively looks at any other
2959 * DynLocs whose type depends on loc's type.
2961 void Translator::constrainDep(const DynLocation* loc,
2962 NormalizedInstruction* firstInstr,
2963 GuardType specType,
2964 GuardType& relxType) {
2965 if (relxType.isEqual(specType)) return; // can't contrain it any further
2967 FTRACE(3, "\nconstraining dep {}\n", loc->pretty());
2968 for (NormalizedInstruction* instr = firstInstr; instr; instr = instr->next) {
2969 if (instr->noOp) continue;
2970 auto opc = instr->op();
2971 size_t nInputs = instr->inputs.size();
2972 for (size_t i = 0; i < nInputs; i++) {
2973 DynLocation* usedLoc = instr->inputs[i];
2974 if (usedLoc == loc) {
2975 constrainOperandType(relxType, instr, i, specType);
2977 // If the instruction's input doesn't propagate to its output,
2978 // then we're done. Otherwise, we need to constrain relxType
2979 // based on the uses of the output.
2980 if (!outputDependsOnInput(opc)) {
2981 FTRACE(4, "output doesn't propagate to input; stopping\n");
2982 continue;
2985 bool outputIsStackInput = false;
2986 const DynLocation* outStack = instr->outStack;
2987 const DynLocation* outLocal = instr->outLocal;
2989 switch (instrInfo[opc].type) {
2990 case OutSameAsInput:
2991 outputIsStackInput = true;
2992 break;
2994 case OutCInput:
2995 outputIsStackInput = true;
2996 // fall-through
2997 case OutCInputL:
2998 if (specType.getOuterType() == KindOfRef &&
2999 instr->isAnyOutputUsed()) {
3000 // Value gets unboxed along the way. Pessimize it for now.
3001 if (!relxType.isSpecialized()) {
3002 relxType = specType.dropSpecialization();
3004 return;
3006 break;
3008 default:
3009 if (!relxType.isSpecialized()) {
3010 relxType = specType.dropSpecialization();
3012 return;
3015 // The instruction input's type propagates to the outputs.
3016 // So constrain the dependence further based on uses of outputs.
3017 if ((i == 0 && outputIsStackInput) || // stack input @ [0]
3018 (i == nInputs - 1 && !outputIsStackInput)) { // local input is last
3019 if (outStack && !outStack->rtt.isVagueValue()) {
3020 // For SetL, getOperandConstraintCategory() generates
3021 // DataTypeGeneric if the stack output is popped. In this
3022 // case, don't further constrain the stack output,
3023 // otherwise the Pop* would make it a DataTypeCountness.
3024 if (opc != OpSetL || !relxType.isGeneric()) {
3025 FTRACE(3, "constraining outStack dep {}\n", outStack->pretty());
3026 constrainDep(outStack, instr->next, specType, relxType);
3030 // PushL has a local output that doesn't depend on the input
3031 // type but its stack output does, so we special case it here.
3032 if (outLocal && !outLocal->rtt.isVagueValue() &&
3033 opc != OpPushL) {
3034 FTRACE(3, "constraining outLocal dep {}\n", outLocal->pretty());
3035 constrainDep(outLocal, instr->next, specType, relxType);
3044 * This method looks at all the uses of the tracelet dependencies in the
3045 * instruction stream and tries to relax the type associated with each location.
3047 void Translator::relaxDeps(Tracelet& tclet, TraceletContext& tctxt) {
3048 DynLocTypeMap locRelxTypeMap;
3050 // Initialize type maps. Relaxed types start off very relaxed, and then
3051 // they may get more specific depending on how the instructions use them.
3052 FTRACE(3, "starting relaxDeps\n");
3053 DepMap& deps = tctxt.m_dependencies;
3054 for (auto depIt = deps.begin(); depIt != deps.end(); depIt++) {
3055 DynLocation* loc = depIt->second;
3056 const RuntimeType& rtt = depIt->second->rtt;
3057 if (rtt.isValue() && !rtt.isVagueValue() && !rtt.isClass() &&
3058 !loc->location.isThis()) {
3059 GuardType relxType = GuardType(KindOfAny);
3060 GuardType specType = GuardType(rtt);
3061 constrainDep(loc, tclet.m_instrStream.first, specType, relxType);
3062 if (!specType.isEqual(relxType)) {
3063 locRelxTypeMap[loc] = relxType;
3068 // For each dependency, if we found a more relaxed type for it, use
3069 // such type.
3070 FTRACE(3, "applying relaxed deps\n");
3071 for (auto& kv : locRelxTypeMap) {
3072 DynLocation* loc = kv.first;
3073 const GuardType& relxType = kv.second;
3074 TRACE(1, "relaxDeps: Loc: %s oldType: %s => newType: %s\n",
3075 loc->location.pretty().c_str(),
3076 deps[loc->location]->rtt.pretty().c_str(),
3077 RuntimeType(relxType.getOuterType(),
3078 relxType.getInnerType(),
3079 relxType.getSpecializedClass()).pretty().c_str());
3080 assert(deps[loc->location] == loc);
3081 deps[loc->location]->rtt = relxType.getRuntimeType();
3083 FTRACE(3, "relaxDeps finished\n");
3086 bool callDestroysLocals(const NormalizedInstruction& inst,
3087 const Func* caller) {
3088 auto* unit = caller->unit();
3089 auto checkTaintId = [&](Id id) {
3090 static const StringData* s_extract = makeStaticString("extract");
3091 return unit->lookupLitstrId(id)->isame(s_extract);
3094 if (inst.op() == OpFCallBuiltin) return checkTaintId(inst.imm[2].u_SA);
3095 if (!isFCallStar(inst.op())) return false;
3097 const FPIEnt *fpi = caller->findFPI(inst.source.offset());
3098 assert(fpi);
3099 Op* fpushPc = (Op*)unit->at(fpi->m_fpushOff);
3100 auto const op = *fpushPc;
3102 if (op == OpFPushFunc) {
3103 // If the call has any arguments, the FPushFunc will be in a different
3104 // tracelet -- the tracelet will break on every FPass* because the reffiness
3105 // of the callee isn't knowable. So we have to say the call destroys locals,
3106 // to be conservative. If there aren't any arguments, then it can't destroy
3107 // locals -- even if the call is to extract(), there's no argument, so it
3108 // won't do anything.
3109 auto const numArgs = inst.imm[0].u_IVA;
3110 return (numArgs != 0);
3112 if (op == OpFPushFuncD) return checkTaintId(getImm(fpushPc, 1).u_SA);
3113 if (op == OpFPushFuncU) {
3114 return checkTaintId(getImm(fpushPc, 1).u_SA) ||
3115 checkTaintId(getImm(fpushPc, 2).u_SA);
3118 return false;
3122 * Check whether the a given FCall should be analyzed for possible
3123 * inlining or not.
3125 bool shouldAnalyzeCallee(const NormalizedInstruction* fcall,
3126 const FPIEnt* fpi,
3127 const Op pushOp,
3128 const int depth) {
3129 auto const numArgs = fcall->imm[0].u_IVA;
3130 auto const target = fcall->funcd;
3132 if (!RuntimeOption::RepoAuthoritative) return false;
3134 if (pushOp != OpFPushFuncD && pushOp != OpFPushObjMethodD
3135 && pushOp != OpFPushCtorD && pushOp != OpFPushCtor
3136 && pushOp != OpFPushClsMethodD) {
3137 FTRACE(1, "analyzeCallee: push op ({}) was not supported\n",
3138 opcodeToName(pushOp));
3139 return false;
3142 if (!target) {
3143 FTRACE(1, "analyzeCallee: target func not known\n");
3144 return false;
3146 if (target->isCPPBuiltin()) {
3147 FTRACE(1, "analyzeCallee: target func is a builtin\n");
3148 return false;
3151 if (depth + 1 > RuntimeOption::EvalHHIRInliningMaxDepth) {
3152 FTRACE(1, "analyzeCallee: max inlining depth reached\n");
3153 return false;
3156 // TODO(2716400): support __call and friends
3157 if (numArgs != target->numParams()) {
3158 FTRACE(1, "analyzeCallee: param count mismatch {} != {}\n",
3159 numArgs, target->numParams());
3160 return false;
3163 if (pushOp == OpFPushClsMethodD && target->mayHaveThis()) {
3164 FTRACE(1, "analyzeCallee: not inlining static calls which may have a "
3165 "this pointer\n");
3166 return false;
3169 // Find the fpush and ensure it's in this tracelet---refuse to
3170 // inline if there are any calls in order to prepare arguments.
3171 for (auto* ni = fcall->prev; ni; ni = ni->prev) {
3172 if (ni->source.offset() == fpi->m_fpushOff) {
3173 if (ni->op() == OpFPushObjMethodD ||
3174 ni->op() == OpFPushObjMethod) {
3175 if (!ni->inputs[ni->op() == OpFPushObjMethod]->isObject()) {
3177 * In this case, we're going to throw or fatal when we
3178 * execute the FPush*. But we have statically proven that
3179 * if we get to the FCall, then target is the Func that will
3180 * be called. So the FCall is unreachable - but unfortunately,
3181 * various assumptions by the jit will be violated if we try
3182 * to inline it. So just don't inline in that case.
3184 return false;
3187 return true;
3189 if (isFCallStar(ni->op()) || ni->op() == OpFCallBuiltin) {
3190 FTRACE(1, "analyzeCallee: fpi region contained other calls\n");
3191 return false;
3194 FTRACE(1, "analyzeCallee: push instruction was in a different "
3195 "tracelet\n");
3196 return false;
3199 void Translator::analyzeCallee(TraceletContext& tas,
3200 Tracelet& parent,
3201 NormalizedInstruction* fcall) {
3202 auto const callerFunc = fcall->func();
3203 auto const fpi = callerFunc->findFPI(fcall->source.offset());
3204 auto const pushOp = fcall->m_unit->getOpcode(fpi->m_fpushOff);
3206 if (!shouldAnalyzeCallee(fcall, fpi, pushOp, analysisDepth())) return;
3208 auto const numArgs = fcall->imm[0].u_IVA;
3209 auto const target = fcall->funcd;
3212 * Prepare a map for all the known information about the argument
3213 * types.
3215 * Also, fill out KindOfUninit for any remaining locals. The point
3216 * here is that the subtrace can't call liveType for a local or
3217 * stack location (since our ActRec is fake), so we need them all in
3218 * the TraceletContext.
3220 * If any of the argument types are unknown (including inner-types
3221 * of KindOfRefs), we don't really try to analyze the callee. It
3222 * might be possible to do this but we'll need to modify the
3223 * analyzer to support unknown input types before there are any
3224 * NormalizedInstructions in the Tracelet.
3226 TypeMap initialMap;
3227 LocationSet callerArgLocs;
3228 for (int i = 0; i < numArgs; ++i) {
3229 auto callerLoc = Location(Location::Stack, fcall->stackOffset - i - 1);
3230 auto calleeLoc = Location(Location::Local, numArgs - i - 1);
3231 auto type = tas.currentType(callerLoc);
3233 callerArgLocs.insert(callerLoc);
3235 if (type.isVagueValue()) {
3236 FTRACE(1, "analyzeCallee: {} has unknown type\n", callerLoc.pretty());
3237 return;
3239 if (type.isValue() && type.isRef() && type.innerType() == KindOfAny) {
3240 FTRACE(1, "analyzeCallee: {} has unknown inner-refdata type\n",
3241 callerLoc.pretty());
3242 return;
3245 FTRACE(2, "mapping arg{} locs {} -> {} :: {}\n",
3246 numArgs - i - 1,
3247 callerLoc.pretty(),
3248 calleeLoc.pretty(),
3249 type.pretty());
3250 initialMap[calleeLoc] = type;
3252 for (int i = numArgs; i < target->numLocals(); ++i) {
3253 initialMap[Location(Location::Local, i)] = RuntimeType(KindOfUninit);
3257 * When reentering analyze to generate a Tracelet for a callee,
3258 * currently we handle this by creating a fake ActRec on the stack.
3260 * This is mostly a compromise to deal with existing code during the
3261 * analysis phase which pretty liberally inspects live VM state.
3263 ActRec fakeAR;
3264 fakeAR.m_savedRbp = reinterpret_cast<uintptr_t>(liveFrame());
3265 fakeAR.m_savedRip = 0xbaabaa; // should never be inspected
3266 fakeAR.m_func = fcall->funcd;
3267 fakeAR.m_soff = 0xb00b00; // should never be inspected
3268 fakeAR.m_numArgsAndCtorFlag = numArgs;
3269 fakeAR.m_varEnv = nullptr;
3272 * Even when inlining an object method, we can leave the m_this as
3273 * null. See outThisObjectType().
3275 fakeAR.m_this = nullptr;
3277 FTRACE(1, "analyzing sub trace =================================\n");
3278 auto const oldFP = vmfp();
3279 auto const oldSP = vmsp();
3280 auto const oldPC = vmpc();
3281 auto const oldAnalyzeCalleeDepth = m_analysisDepth++;
3282 vmpc() = nullptr; // should never be used
3283 vmsp() = nullptr; // should never be used
3284 vmfp() = reinterpret_cast<Cell*>(&fakeAR);
3285 auto restoreFrame = [&]{
3286 vmfp() = oldFP;
3287 vmsp() = oldSP;
3288 vmpc() = oldPC;
3289 m_analysisDepth = oldAnalyzeCalleeDepth;
3291 SCOPE_EXIT {
3292 // It's ok to restoreFrame() twice---we have it in this scope
3293 // handler to ensure it still happens if we exit via an exception.
3294 restoreFrame();
3295 FTRACE(1, "finished sub trace ===================================\n");
3298 auto subTrace = analyze(SrcKey(target, target->base()), initialMap);
3301 * Verify the target trace actually ended with a return, or we have
3302 * no business doing anything based on it right now.
3304 if (!subTrace->m_instrStream.last ||
3305 (subTrace->m_instrStream.last->op() != OpRetC &&
3306 subTrace->m_instrStream.last->op() != OpRetV)) {
3307 FTRACE(1, "analyzeCallee: callee did not end in a return\n");
3308 return;
3312 * If the IR can't inline this, give up now. Below we're going to
3313 * start making changes to the tracelet that is making the call
3314 * (potentially increasing the specificity of guards), and we don't
3315 * want to do that unnecessarily.
3317 if (!JIT::shouldIRInline(callerFunc, target, *subTrace)) {
3318 if (UNLIKELY(Stats::enabledAny() && getenv("HHVM_STATS_FAILEDINL"))) {
3319 subTrace->m_inliningFailed = true;
3320 // Save the trace for stats purposes but don't waste time doing any
3321 // further processing since we know we won't inline it.
3322 fcall->calleeTrace = std::move(subTrace);
3324 return;
3328 * Disabled for now:
3330 * Propagate the return type to our caller. If the return type is
3331 * not vague, it will hold if we can inline the trace.
3333 * This isn't really a sensible thing to do if we aren't also going
3334 * to inline the callee, however, because the return type may only
3335 * be what it is due to other output predictions (CGetMs or FCall)
3336 * inside the callee. This means we would need to check the return
3337 * value in the caller still as if it were a predicted return type.
3339 Location retVal(Location::Stack, 0);
3340 auto it = subTrace->m_changes.find(retVal);
3341 assert(it != subTrace->m_changes.end());
3342 FTRACE(1, "subtrace return: {}\n", it->second->pretty());
3343 if (false) {
3344 if (!it->second->rtt.isVagueValue() && !it->second->rtt.isRef()) {
3345 FTRACE(1, "changing callee's return type from {} to {}\n",
3346 fcall->outStack->rtt.pretty(),
3347 it->second->pretty());
3349 fcall->outputPredicted = true;
3350 fcall->outputPredictionStatic = false;
3351 fcall->outStack = parent.newDynLocation(fcall->outStack->location,
3352 it->second->rtt);
3353 tas.recordWrite(fcall->outStack);
3358 * In order for relaxDeps not to relax guards on some things we may
3359 * potentially have depended on here, we need to ensure that the
3360 * call instruction depends on all the inputs we've used.
3362 * (We could do better by letting relaxDeps look through the
3363 * callee.)
3365 restoreFrame();
3366 for (auto& loc : callerArgLocs) {
3367 fcall->inputs.push_back(tas.recordRead(InputInfo(loc), true));
3370 FTRACE(1, "analyzeCallee: inline candidate\n");
3371 fcall->calleeTrace = std::move(subTrace);
3374 static bool instrBreaksProfileBB(const NormalizedInstruction* instr) {
3375 return (instrIsNonCallControlFlow(instr->op()) ||
3376 instr->outputPredicted ||
3377 instr->op() == OpClsCnsD); // side exits if misses in the RDS
3381 * analyze --
3383 * Given a sequence of bytecodes, return our tracelet IR.
3385 * The purposes of this analysis is to determine:
3387 * 1. Pre-conditions: What locations get read before they get written to:
3388 * we will need typechecks for these and we will want to load them into
3389 * registers. (m_dependencies)
3391 * 2. Post-conditions: the locations that have been written to and are
3392 * still live at the end of the tracelet. We need to allocate registers
3393 * of these and we need to spill them at the end of the tracelet.
3394 * (m_changes)
3396 * 3. Determine the runtime types for each instruction's input locations
3397 * and output locations.
3399 * The main analysis works by doing a single pass over the instructions. It
3400 * effectively simulates the execution of each instruction, updating its
3401 * knowledge about types as it goes.
3403 * The TraceletContext class is used to keep track of the current state of
3404 * the world. Initially it is empty, and when the inputs for the first
3405 * instruction are analyzed we call recordRead(). The recordRead() function
3406 * in turn inspects the live types of the inputs and adds them to the type
3407 * map. This serves two purposes: (1) it figures out what typechecks this
3408 * tracelet needs; and (2) it guarantees that the code we generate will
3409 * satisfy the live types that are about to be passed in.
3411 * Over time the TraceletContext's type map will change. However, we need to
3412 * record what the types _were_ right before and right after a given
3413 * instruction executes. This is where the NormalizedInstruction class comes
3414 * in. We store the RuntimeTypes from the TraceletContext right before an
3415 * instruction executes into the NormalizedInstruction's 'inputs' field, and
3416 * we store the RuntimeTypes from the TraceletContext right after the
3417 * instruction executes into the various output fields.
3419 std::unique_ptr<Tracelet> Translator::analyze(SrcKey sk,
3420 const TypeMap& initialTypes) {
3421 std::unique_ptr<Tracelet> retval(new Tracelet());
3422 auto func = sk.func();
3423 auto unit = sk.unit();
3424 auto& t = *retval;
3425 t.m_sk = sk;
3427 DEBUG_ONLY const char* file = unit->filepath()->data();
3428 DEBUG_ONLY const int lineNum = unit->getLineNumber(t.m_sk.offset());
3429 DEBUG_ONLY const char* funcName = func->fullName()->data();
3431 TRACE(1, "Translator::analyze %s:%d %s\n", file, lineNum, funcName);
3432 TraceletContext tas(&t, initialTypes);
3433 int stackFrameOffset = 0;
3434 int oldStackFrameOffset = 0;
3436 // numOpcodes counts the original number of opcodes in a tracelet
3437 // before the translator does any optimization
3438 t.m_numOpcodes = 0;
3439 Unit::MetaHandle metaHand;
3441 for (;; sk.advance(unit)) {
3442 head:
3443 NormalizedInstruction* ni = t.newNormalizedInstruction();
3444 ni->source = sk;
3445 ni->stackOffset = stackFrameOffset;
3446 ni->funcd = t.m_arState.knownFunc();
3447 ni->m_unit = unit;
3448 ni->breaksTracelet = false;
3449 ni->changesPC = opcodeChangesPC(ni->op());
3450 ni->fuseBranch = false;
3452 assert(!t.m_analysisFailed);
3453 oldStackFrameOffset = stackFrameOffset;
3454 populateImmediates(*ni);
3456 SKTRACE(1, sk, "stack args: virtual sfo now %d\n", stackFrameOffset);
3458 // Translation could fail entirely (because of an unknown opcode), or
3459 // encounter an input that cannot be computed.
3460 try {
3461 if (isTypeAssert(ni->op()) || isTypePredict(ni->op())) {
3462 handleAssertionEffects(t, *ni, tas, stackFrameOffset);
3464 preInputApplyMetaData(metaHand, ni);
3465 InputInfos inputInfos;
3466 getInputsImpl(
3467 t.m_sk, ni, stackFrameOffset, inputInfos, sk.func(),
3468 [&](int i) {
3469 return Type(
3470 tas.currentType(Location(Location::Local, i)));
3474 bool noOp = applyInputMetaData(metaHand, ni, tas, inputInfos);
3475 if (noOp) {
3476 t.m_instrStream.append(ni);
3477 ++t.m_numOpcodes;
3478 stackFrameOffset = oldStackFrameOffset;
3479 continue;
3481 if (inputInfos.needsRefCheck) {
3482 // Drive the arState machine; if it is going to throw an input
3483 // exception, do so here.
3484 int argNum = ni->imm[0].u_IVA;
3485 // instrSpToArDelta() returns the delta relative to the sp at the
3486 // beginning of the instruction, but checkByRef() wants the delta
3487 // relative to the sp at the beginning of the tracelet, so we adjust
3488 // by subtracting ni->stackOff
3489 int entryArDelta = instrSpToArDelta((Op*)ni->pc()) - ni->stackOffset;
3490 ni->preppedByRef = t.m_arState.checkByRef(argNum, entryArDelta,
3491 &t.m_refDeps);
3492 SKTRACE(1, sk, "passing arg%d by %s\n", argNum,
3493 ni->preppedByRef ? "reference" : "value");
3496 for (unsigned int i = 0; i < inputInfos.size(); i++) {
3497 SKTRACE(2, sk, "typing input %d\n", i);
3498 const InputInfo& ii = inputInfos[i];
3499 DynLocation* dl = tas.recordRead(ii, true);
3500 const RuntimeType& rtt = dl->rtt;
3501 // Some instructions are able to handle an input with an unknown type
3502 if (!ii.dontBreak && !ii.dontGuard) {
3503 if (rtt.isVagueValue()) {
3504 // Consumed a "poisoned" output: e.g., result of an array
3505 // deref.
3506 throwUnknownInput();
3508 if (!ni->ignoreInnerType && !ii.dontGuardInner) {
3509 if (rtt.isValue() && rtt.isRef() &&
3510 rtt.innerType() == KindOfAny) {
3511 throwUnknownInput();
3514 if ((m_mode == TransProfile || m_mode == TransOptimize) &&
3515 t.m_numOpcodes > 0) {
3516 // We want to break blocks at every instrution that consumes a ref,
3517 // so that we avoid side exits. Therefore, instructions consume ref
3518 // can only be the first in the tracelet/block.
3519 if (rtt.isValue() && rtt.isRef()) {
3520 throwUnknownInput();
3524 ni->inputs.push_back(dl);
3526 } catch (TranslationFailedExc& tfe) {
3527 SKTRACE(1, sk, "Translator fail: %s\n", tfe.what());
3528 if (!t.m_numOpcodes) {
3529 t.m_analysisFailed = true;
3530 t.m_instrStream.append(ni);
3531 ++t.m_numOpcodes;
3533 goto breakBB;
3534 } catch (UnknownInputExc& uie) {
3535 // Subtle: if this instruction consumes an unknown runtime type,
3536 // break the BB on the *previous* instruction. We know that a
3537 // previous instruction exists, because the KindOfAny must
3538 // have come from somewhere.
3539 always_assert(t.m_instrStream.last);
3540 SKTRACE(2, sk, "Consumed unknown input (%s:%d); breaking BB at "
3541 "predecessor\n", uie.m_file, uie.m_line);
3542 goto breakBB;
3545 SKTRACE(2, sk, "stack args: virtual sfo now %d\n", stackFrameOffset);
3547 bool doVarEnvTaint; // initialized by reference.
3548 try {
3549 getOutputs(t, ni, stackFrameOffset, doVarEnvTaint);
3550 } catch (TranslationFailedExc& tfe) {
3551 SKTRACE(1, sk, "Translator getOutputs fail: %s\n", tfe.what());
3552 if (!t.m_numOpcodes) {
3553 t.m_analysisFailed = true;
3554 t.m_instrStream.append(ni);
3555 ++t.m_numOpcodes;
3557 goto breakBB;
3560 if (isFCallStar(ni->op())) t.m_arState.pop();
3561 if (doVarEnvTaint || callDestroysLocals(*ni, func)) tas.varEnvTaint();
3563 DynLocation* outputs[] = { ni->outStack,
3564 ni->outLocal, ni->outLocal2,
3565 ni->outStack2, ni->outStack3 };
3566 for (size_t i = 0; i < sizeof(outputs) / sizeof(*outputs); ++i) {
3567 if (outputs[i]) {
3568 DynLocation* o = outputs[i];
3569 SKTRACE(2, sk, "inserting output t(%d->%d) #(%s, %" PRId64 ")\n",
3570 o->rtt.outerType(), o->rtt.innerType(),
3571 o->location.spaceName(), o->location.offset);
3572 tas.recordWrite(o);
3575 if (ni->op() == OpCreateCont || ni->op() == OpAsyncESuspend) {
3576 // CreateCont stores Uninit to all locals but NormalizedInstruction
3577 // doesn't have enough output fields, so we special case it here.
3578 auto const numLocals = ni->func()->numLocals();
3579 for (unsigned i = 0; i < numLocals; ++i) {
3580 tas.recordWrite(t.newDynLocation(Location(Location::Local, i),
3581 KindOfUninit));
3585 SKTRACE(1, sk, "stack args: virtual sfo now %d\n", stackFrameOffset);
3587 // This assert failing means that your instruction has an
3588 // inconsistent row in the InstrInfo table; the stackDelta doesn't
3589 // agree with the inputs and outputs.
3590 assert(getStackDelta(*ni) == (stackFrameOffset - oldStackFrameOffset));
3591 // If this instruction decreased the depth of the stack, mark the
3592 // appropriate stack locations as "dead". But we need to leave
3593 // them in the TraceletContext until after analyzeCallee (if this
3594 // is an FCall).
3595 if (stackFrameOffset < oldStackFrameOffset) {
3596 for (int i = stackFrameOffset; i < oldStackFrameOffset; ++i) {
3597 ni->deadLocs.push_back(Location(Location::Stack, i));
3601 if (ni->outputPredicted) {
3602 assert(ni->outStack);
3603 ni->outPred = Type(ni->outStack);
3606 t.m_stackChange += getStackDelta(*ni);
3608 t.m_instrStream.append(ni);
3609 ++t.m_numOpcodes;
3612 * The annotation step attempts to track Func*'s associated with
3613 * given FCalls when the FPush is in a different tracelet.
3615 * When we're analyzing a callee, we can't do this because we may
3616 * have class information in some of our RuntimeTypes that is only
3617 * true because of who the caller was. (Normally it is only there
3618 * if it came from static analysis.)
3620 if (analysisDepth() == 0) {
3621 annotate(ni);
3624 if (ni->op() == OpFCall) {
3625 analyzeCallee(tas, t, ni);
3628 for (auto& l : ni->deadLocs) {
3629 tas.recordDelete(l);
3632 if (m_mode == TransProfile && instrBreaksProfileBB(ni)) {
3633 SKTRACE(1, sk, "BB broken\n");
3634 sk.advance(unit);
3635 goto breakBB;
3638 // Check if we need to break the tracelet.
3640 // If we've gotten this far, it mostly boils down to control-flow
3641 // instructions. However, we'll trace through a few unconditional jmps.
3642 if (ni->op() == OpJmp &&
3643 ni->imm[0].u_BA > 0 &&
3644 tas.m_numJmps < MaxJmpsTracedThrough) {
3645 // Continue tracing through jumps. To prevent pathologies, only trace
3646 // through a finite number of forward jumps.
3647 SKTRACE(1, sk, "greedily continuing through %dth jmp + %d\n",
3648 tas.m_numJmps, ni->imm[0].u_IA);
3649 tas.recordJmp();
3650 sk = SrcKey(func, sk.offset() + ni->imm[0].u_IA);
3651 goto head; // don't advance sk
3652 } else if (opcodeBreaksBB(ni->op()) ||
3653 (dontGuardAnyInputs(ni->op()) && opcodeChangesPC(ni->op()))) {
3654 SKTRACE(1, sk, "BB broken\n");
3655 sk.advance(unit);
3656 goto breakBB;
3658 postAnalyze(ni, sk, t, tas);
3660 breakBB:
3661 NormalizedInstruction* ni = t.m_instrStream.last;
3662 while (ni) {
3663 // We dont want to end a tracelet with a literal; it will cause the literal
3664 // to be pushed on the stack, and the next tracelet will have to guard on
3665 // the type. Similarly, This, Self and Parent will lose type information
3666 // thats only useful in the following tracelet.
3667 if (isLiteral(ni->op()) ||
3668 isThisSelfOrParent(ni->op()) ||
3669 isTypeAssert(ni->op()) || isTypePredict(ni->op())) {
3670 ni = ni->prev;
3671 continue;
3673 break;
3675 if (ni) {
3676 while (ni != t.m_instrStream.last) {
3677 t.m_stackChange -= getStackDelta(*t.m_instrStream.last);
3678 sk = t.m_instrStream.last->source;
3679 t.m_instrStream.remove(t.m_instrStream.last);
3680 --t.m_numOpcodes;
3684 // translateRegion doesn't support guard relaxation/specialization yet
3685 if (RuntimeOption::EvalHHBCRelaxGuards &&
3686 m_mode != TransProfile && m_mode != TransOptimize) {
3687 relaxDeps(t, tas);
3690 // Mark the last instruction appropriately
3691 assert(t.m_instrStream.last);
3692 t.m_instrStream.last->breaksTracelet = true;
3693 // Populate t.m_changes, t.intermediates, t.m_dependencies
3694 t.m_dependencies = tas.m_dependencies;
3695 t.m_resolvedDeps = tas.m_resolvedDeps;
3696 t.m_changes.clear();
3697 LocationSet::iterator it = tas.m_changeSet.begin();
3698 for (; it != tas.m_changeSet.end(); ++it) {
3699 t.m_changes[*it] = tas.m_currentMap[*it];
3702 TRACE(1, "Tracelet done: stack delta %d\n", t.m_stackChange);
3703 return retval;
3706 Translator::Translator()
3707 : uniqueStubs{}
3708 , m_createdTime(Timer::GetCurrentTimeMicros())
3709 , m_mode(TransInvalid)
3710 , m_profData(nullptr)
3711 , m_analysisDepth(0)
3713 initInstrInfo();
3714 if (RuntimeOption::EvalJitPGO) {
3715 m_profData = new ProfData();
3719 Translator::~Translator() {
3720 delete m_profData;
3721 m_profData = nullptr;
3724 bool
3725 Translator::isSrcKeyInBL(const SrcKey& sk) {
3726 auto unit = sk.unit();
3727 if (unit->isInterpretOnly()) return true;
3728 Lock l(m_dbgBlacklistLock);
3729 if (m_dbgBLSrcKey.find(sk) != m_dbgBLSrcKey.end()) {
3730 return true;
3732 for (PC pc = unit->at(sk.offset()); !opcodeBreaksBB(toOp(*pc));
3733 pc += instrLen((Op*)pc)) {
3734 if (m_dbgBLPC.checkPC(pc)) {
3735 m_dbgBLSrcKey.insert(sk);
3736 return true;
3739 return false;
3742 void
3743 Translator::clearDbgBL() {
3744 Lock l(m_dbgBlacklistLock);
3745 m_dbgBLSrcKey.clear();
3746 m_dbgBLPC.clear();
3749 bool
3750 Translator::addDbgBLPC(PC pc) {
3751 Lock l(m_dbgBlacklistLock);
3752 if (m_dbgBLPC.checkPC(pc)) {
3753 // already there
3754 return false;
3756 m_dbgBLPC.addPC(pc);
3757 return true;
3760 void populateImmediates(NormalizedInstruction& inst) {
3761 for (int i = 0; i < numImmediates(inst.op()); i++) {
3762 inst.imm[i] = getImm((Op*)inst.pc(), i);
3764 if (hasImmVector(toOp(*inst.pc()))) {
3765 inst.immVec = getImmVector((Op*)inst.pc());
3767 if (inst.op() == OpFCallArray) {
3768 inst.imm[0].u_IVA = 1;
3772 const char* Translator::translateResultName(TranslateResult r) {
3773 static const char* const names[] = {
3774 "Failure",
3775 "Retry",
3776 "Success",
3778 return names[r];
3782 * Similar to applyInputMetaData, but designed to be used during ir
3783 * generation. Reads and writes types of values using hhbcTrans. This will
3784 * eventually replace applyInputMetaData.
3786 void readMetaData(Unit::MetaHandle& handle, NormalizedInstruction& inst,
3787 HhbcTranslator& hhbcTrans, MetaMode metaMode /* = Normal */) {
3788 if (isAlwaysNop(inst.op())) {
3789 inst.noOp = true;
3790 return;
3793 if (!handle.findMeta(inst.unit(), inst.offset())) return;
3795 Unit::MetaInfo info;
3796 if (!handle.nextArg(info)) return;
3799 * We need to adjust the indexes in MetaInfo::m_arg if this instruction takes
3800 * other stack arguments than those related to the MVector. (For example,
3801 * the rhs of an assignment.)
3803 auto const& iInfo = instrInfo[inst.op()];
3804 if (iInfo.in & AllLocals) {
3806 * RetC/RetV dont care about their stack input, but it may have been
3807 * annotated. Skip it (because RetC/RetV pretend they dont have a stack
3808 * input).
3810 return;
3812 if (iInfo.in == FuncdRef) {
3814 * FPassC* pretend to have no inputs
3816 return;
3818 const int base = !(iInfo.in & MVector) ? 0 :
3819 !(iInfo.in & Stack1) ? 0 :
3820 !(iInfo.in & Stack2) ? 1 :
3821 !(iInfo.in & Stack3) ? 2 : 3;
3823 auto stackFilter = [metaMode, &inst](Location loc) {
3824 if (metaMode == MetaMode::Legacy && loc.space == Location::Stack) {
3825 loc.offset = -(loc.offset + 1) + inst.stackOffset;
3827 return loc;
3830 do {
3831 SKTRACE(3, inst.source, "considering MetaInfo of kind %d\n", info.m_kind);
3833 int arg = info.m_arg & Unit::MetaInfo::VectorArg ?
3834 base + (info.m_arg & ~Unit::MetaInfo::VectorArg) : info.m_arg;
3835 auto updateType = [&]{
3836 /* don't update input rtt for Legacy mode */
3837 if (metaMode == MetaMode::Legacy) return;
3838 auto& input = *inst.inputs[arg];
3839 input.rtt = hhbcTrans.rttFromLocation(stackFilter(input.location));
3842 switch (info.m_kind) {
3843 case Unit::MetaInfo::Kind::NoSurprise:
3844 inst.noSurprise = true;
3845 break;
3846 case Unit::MetaInfo::Kind::GuardedCls:
3847 inst.guardedCls = true;
3848 break;
3849 case Unit::MetaInfo::Kind::DataTypePredicted: {
3850 // When we're translating a Tracelet from Translator::analyze(), the
3851 // information from these predictions has been added to the
3852 // NormalizedInstructions in the instruction stream, so they aren't
3853 // necessary (and they caused a perf regression). HHIR guard relaxation
3854 // is capable of eliminating unnecessary predictions and the
3855 // information added here is valuable to it.
3856 if (metaMode == MetaMode::Legacy &&
3857 !RuntimeOption::EvalHHIRRelaxGuards) {
3858 break;
3860 auto const loc = stackFilter(inst.inputs[arg]->location).
3861 toLocation(inst.stackOffset);
3862 auto const t = Type(DataType(info.m_data));
3863 auto const offset = inst.source.offset();
3865 // These 'predictions' mean the type is InitNull or the predicted type,
3866 // so we assert InitNull | t, then guard t. This allows certain
3867 // optimizations in the IR.
3868 hhbcTrans.assertType(loc, Type::InitNull | t);
3869 hhbcTrans.checkType(loc, t, offset);
3870 updateType();
3871 break;
3873 case Unit::MetaInfo::Kind::DataTypeInferred: {
3874 hhbcTrans.assertType(
3875 stackFilter(inst.inputs[arg]->location).toLocation(inst.stackOffset),
3876 Type(DataType(info.m_data)));
3877 updateType();
3878 break;
3880 case Unit::MetaInfo::Kind::String: {
3881 hhbcTrans.assertString(
3882 stackFilter(inst.inputs[arg]->location).toLocation(inst.stackOffset),
3883 inst.unit()->lookupLitstrId(info.m_data));
3884 updateType();
3885 break;
3887 case Unit::MetaInfo::Kind::Class: {
3888 auto& rtt = inst.inputs[arg]->rtt;
3889 auto const& location = inst.inputs[arg]->location;
3890 if (rtt.valueType() != KindOfObject) break;
3892 const StringData* metaName = inst.unit()->lookupLitstrId(info.m_data);
3893 const StringData* rttName =
3894 rtt.valueClass() ? rtt.valueClass()->name() : nullptr;
3895 // The two classes might not be exactly the same, which is ok
3896 // as long as metaCls is more derived than rttCls.
3897 Class* metaCls = Unit::lookupUniqueClass(metaName);
3898 Class* rttCls = rttName ? Unit::lookupUniqueClass(rttName) : nullptr;
3899 if (!metaCls || (rttCls && metaCls != rttCls &&
3900 !metaCls->classof(rttCls))) {
3901 // Runtime type is more derived
3902 metaCls = rttCls;
3904 if (!metaCls) break;
3905 if (location.space != Location::This) {
3906 hhbcTrans.assertClass(
3907 stackFilter(location).toLocation(inst.stackOffset), metaCls);
3908 } else {
3909 assert(metaCls->classof(hhbcTrans.curClass()));
3912 if (metaCls == rttCls) break;
3913 SKTRACE(1, inst.source, "replacing input %d with a MetaInfo-supplied "
3914 "class of %s; old type = %s\n",
3915 arg, metaName->data(), rtt.pretty().c_str());
3916 if (rtt.isRef()) {
3917 rtt = RuntimeType(KindOfRef, KindOfObject, metaCls);
3918 } else {
3919 rtt = RuntimeType(KindOfObject, KindOfNone, metaCls);
3921 break;
3923 case Unit::MetaInfo::Kind::MVecPropClass: {
3924 const StringData* metaName = inst.unit()->lookupLitstrId(info.m_data);
3925 Class* metaCls = Unit::lookupUniqueClass(metaName);
3926 if (metaCls) {
3927 inst.immVecClasses[arg] = metaCls;
3929 break;
3932 case Unit::MetaInfo::Kind::GuardedThis:
3933 case Unit::MetaInfo::Kind::NonRefCounted:
3934 // fallthrough; these are handled in preInputApplyMetaData.
3935 case Unit::MetaInfo::Kind::None:
3936 break;
3938 } while (handle.nextArg(info));
3941 bool instrMustInterp(const NormalizedInstruction& inst) {
3942 if (RuntimeOption::EvalJitAlwaysInterpOne) return true;
3944 switch (inst.op()) {
3945 // Generate a case for each instruction we support at least partially.
3946 # define CASE(name) case Op::name:
3947 INSTRS
3948 # undef CASE
3949 # define NOTHING(...) // PSEUDOINSTR_DISPATCH has the cases in it
3950 PSEUDOINSTR_DISPATCH(NOTHING)
3951 # undef NOTHING
3952 return false;
3954 default:
3955 return true;
3959 void Translator::traceStart(Offset initBcOffset, Offset initSpOffset) {
3960 assert(!m_irTrans);
3962 FTRACE(1, "{}{:-^40}{}\n",
3963 color(ANSI_COLOR_BLACK, ANSI_BGCOLOR_GREEN),
3964 " HHIR during translation ",
3965 color(ANSI_COLOR_END));
3967 m_irTrans.reset(new JIT::IRTranslator(initBcOffset, initSpOffset,
3968 liveFunc()));
3971 void Translator::traceEnd() {
3972 assert(!m_irTrans->hhbcTrans().isInlining());
3973 m_irTrans->hhbcTrans().end();
3974 FTRACE(1, "{}{:-^40}{}\n",
3975 color(ANSI_COLOR_BLACK, ANSI_BGCOLOR_GREEN),
3977 color(ANSI_COLOR_END));
3980 void Translator::traceFree() {
3981 FTRACE(1, "HHIR free: arena size: {}\n",
3982 m_irTrans->hhbcTrans().unit().arena().size());
3983 m_irTrans.reset();
3986 Translator::TranslateResult
3987 Translator::translateRegion(const RegionDesc& region,
3988 RegionBlacklist& toInterp) {
3989 FTRACE(1, "translateRegion starting with:\n{}\n", show(region));
3990 HhbcTranslator& ht = m_irTrans->hhbcTrans();
3991 assert(!region.blocks.empty());
3992 const SrcKey startSk = region.blocks.front()->start();
3994 for (auto b = 0; b < region.blocks.size(); b++) {
3995 auto const& block = region.blocks[b];
3996 Unit::MetaHandle metaHand;
3997 SrcKey sk = block->start();
3998 const Func* topFunc = nullptr;
3999 auto typePreds = makeMapWalker(block->typePreds());
4000 auto byRefs = makeMapWalker(block->paramByRefs());
4001 auto refPreds = makeMapWalker(block->reffinessPreds());
4002 auto knownFuncs = makeMapWalker(block->knownFuncs());
4004 for (unsigned i = 0; i < block->length(); ++i, sk.advance(block->unit())) {
4005 // Update bcOff here so any guards or assertions from metadata are
4006 // attributed to this instruction.
4007 ht.setBcOff(sk.offset(), false);
4009 // Emit prediction guards. If this is the first instruction in the
4010 // region the guards will go to a retranslate request. Otherwise, they'll
4011 // go to a side exit.
4012 bool isFirstRegionInstr = block == region.blocks.front() && i == 0;
4013 while (typePreds.hasNext(sk)) {
4014 auto const& pred = typePreds.next();
4015 auto type = pred.type;
4016 auto loc = pred.location;
4017 if (type <= Type::Cls) {
4018 // Do not generate guards for class; instead assert the type
4019 assert(loc.tag() == JIT::RegionDesc::Location::Tag::Stack);
4020 ht.assertType(loc, type);
4021 } else if (isFirstRegionInstr) {
4022 bool checkOuterTypeOnly = m_mode != TransProfile;
4023 ht.guardTypeLocation(loc, type, checkOuterTypeOnly);
4024 } else {
4025 ht.checkType(loc, type, sk.offset());
4029 // Emit reffiness guards. For now, we only support reffiness guards at
4030 // the beginning of the region.
4031 while (refPreds.hasNext(sk)) {
4032 assert(sk == startSk);
4033 auto const& pred = refPreds.next();
4034 ht.guardRefs(pred.arSpOffset, pred.mask, pred.vals);
4037 if (RuntimeOption::EvalJitTransCounters && isFirstRegionInstr) {
4038 ht.emitIncTransCounter();
4041 // Update the current funcd, if we have a new one.
4042 if (knownFuncs.hasNext(sk)) {
4043 topFunc = knownFuncs.next();
4046 // Create and initialize the instruction.
4047 NormalizedInstruction inst;
4048 inst.source = sk;
4049 inst.m_unit = block->unit();
4050 inst.breaksTracelet =
4051 i == block->length() - 1 && block == region.blocks.back();
4052 inst.changesPC = opcodeChangesPC(inst.op());
4053 inst.funcd = topFunc;
4054 inst.nextOffset = kInvalidOffset;
4055 if (instrIsNonCallControlFlow(inst.op()) && !inst.breaksTracelet) {
4056 assert(b < region.blocks.size());
4057 inst.nextOffset = region.blocks[b+1]->start().offset();
4059 inst.outputPredicted = false;
4060 populateImmediates(inst);
4062 // If this block ends with an inlined FCall, we don't emit anything for
4063 // the FCall and instead set up HhbcTranslator for inlining. Blocks from
4064 // the callee will be next in the region.
4065 if (i == block->length() - 1 &&
4066 inst.op() == OpFCall && block->inlinedCallee()) {
4067 auto const* callee = block->inlinedCallee();
4068 FTRACE(1, "\nstarting inlined call from {} to {} with {} args "
4069 "and stack:\n{}\n",
4070 block->func()->fullName()->data(),
4071 callee->fullName()->data(),
4072 inst.imm[0].u_IVA,
4073 ht.showStack());
4074 auto returnSk = inst.nextSk();
4075 auto returnFuncOff = returnSk.offset() - block->func()->base();
4076 ht.beginInlining(inst.imm[0].u_IVA, callee, returnFuncOff);
4077 continue;
4080 // We can get a more precise output type for interpOne if we know all of
4081 // its inputs, so we still populate the rest of the instruction even if
4082 // this is true.
4083 inst.interp = toInterp.count(sk);
4085 // Apply the first round of metadata from the repo and get a list of
4086 // input locations.
4087 preInputApplyMetaData(metaHand, &inst);
4089 InputInfos inputInfos;
4090 getInputs(startSk, inst, inputInfos, block->func(), [&](int i) {
4091 return ht.traceBuilder().localType(i, DataTypeGeneric);
4094 // Populate the NormalizedInstruction's input vector, using types from
4095 // HhbcTranslator.
4096 std::vector<DynLocation> dynLocs;
4097 dynLocs.reserve(inputInfos.size());
4098 auto newDynLoc = [&](const InputInfo& ii) {
4099 dynLocs.emplace_back(ii.loc, ht.rttFromLocation(ii.loc));
4100 FTRACE(2, "rttFromLocation: {} -> {}\n",
4101 ii.loc.pretty(), dynLocs.back().rtt.pretty());
4102 return &dynLocs.back();
4104 FTRACE(2, "populating inputs for {}\n", inst.toString());
4105 for (auto const& ii : inputInfos) {
4106 inst.inputs.push_back(newDynLoc(ii));
4109 // Apply the remaining metadata. This may change the types of some of
4110 // inst's inputs.
4111 readMetaData(metaHand, inst, ht);
4112 if (!inst.noOp && inputInfos.needsRefCheck) {
4113 assert(byRefs.hasNext(sk));
4114 inst.preppedByRef = byRefs.next();
4117 // Check for a type prediction. Put it in the NormalizedInstruction so
4118 // the emit* method can use it if needed.
4119 auto const doPrediction = outputIsPredicted(startSk, inst);
4121 // Emit IR for the body of the instruction.
4122 try {
4123 m_irTrans->translateInstr(inst);
4124 } catch (const JIT::FailedIRGen& exn) {
4125 FTRACE(1, "ir generation for {} failed with {}\n",
4126 inst.toString(), exn.what());
4127 always_assert(!toInterp.count(sk));
4128 toInterp.insert(sk);
4129 return Retry;
4132 // Check the prediction. If the predicted type is less specific than what
4133 // is currently on the eval stack, checkType won't emit any code.
4134 if (doPrediction) {
4135 ht.checkTypeStack(0, inst.outPred,
4136 sk.advanced(block->unit()).offset());
4140 assert(!typePreds.hasNext());
4141 assert(!byRefs.hasNext());
4142 assert(!refPreds.hasNext());
4143 assert(!knownFuncs.hasNext());
4146 traceEnd();
4147 try {
4148 traceCodeGen();
4149 } catch (const JIT::FailedCodeGen& exn) {
4150 FTRACE(1, "code generation failed with {}\n", exn.what());
4151 SrcKey sk{exn.vmFunc, exn.bcOff};
4152 always_assert(!toInterp.count(sk));
4153 toInterp.insert(sk);
4154 return Retry;
4157 return Success;
4160 uint64_t* Translator::getTransCounterAddr() {
4161 if (!isTransDBEnabled()) return nullptr;
4163 TransID id = m_translations.size();
4165 // allocate a new chunk of counters if necessary
4166 if (id >= m_transCounters.size() * transCountersPerChunk) {
4167 uint32_t size = sizeof(uint64_t) * transCountersPerChunk;
4168 auto *chunk = (uint64_t*)malloc(size);
4169 bzero(chunk, size);
4170 m_transCounters.push_back(chunk);
4172 assert(id / transCountersPerChunk < m_transCounters.size());
4173 return &(m_transCounters[id / transCountersPerChunk]
4174 [id % transCountersPerChunk]);
4177 void Translator::addTranslation(const TransRec& transRec) {
4178 if (Trace::moduleEnabledRelease(Trace::trans, 1)) {
4179 // Log the translation's size, creation time, SrcKey, and size
4180 Trace::traceRelease("New translation: %" PRId64 " %s %u %u %d\n",
4181 Timer::GetCurrentTimeMicros() - m_createdTime,
4182 folly::format("{}:{}:{}",
4183 transRec.src.unit()->filepath()->data(),
4184 transRec.src.getFuncId(),
4185 transRec.src.offset()).str().c_str(),
4186 transRec.aLen,
4187 transRec.astubsLen,
4188 transRec.kind);
4191 if (!isTransDBEnabled()) return;
4192 uint32_t id = getCurrentTransID();
4193 m_translations.push_back(transRec);
4194 m_translations[id].setID(id);
4196 if (transRec.aLen > 0) {
4197 m_transDB[transRec.aStart] = id;
4199 if (transRec.astubsLen > 0) {
4200 m_transDB[transRec.astubsStart] = id;
4204 uint64_t Translator::getTransCounter(TransID transId) const {
4205 if (!isTransDBEnabled()) return -1ul;
4206 assert(transId < m_translations.size());
4208 uint64_t counter;
4210 if (transId / transCountersPerChunk >= m_transCounters.size()) {
4211 counter = 0;
4212 } else {
4213 counter = m_transCounters[transId / transCountersPerChunk]
4214 [transId % transCountersPerChunk];
4216 return counter;
4219 namespace {
4221 struct DeferredPathInvalidate : public DeferredWorkItem {
4222 const std::string m_path;
4223 explicit DeferredPathInvalidate(const std::string& path) : m_path(path) {
4224 assert(m_path.size() >= 1 && m_path[0] == '/');
4226 void operator()() {
4227 String spath(m_path);
4229 * inotify saw this path change. Now poke the file repository;
4230 * it will notice the underlying PhpFile* has changed.
4232 * We don't actually need to *do* anything with the PhpFile* from
4233 * this lookup; since the path has changed, the file we'll get out is
4234 * going to be some new file, not the old file that needs invalidation.
4236 (void)g_vmContext->lookupPhpFile(spath.get(), "");
4242 static const char *transKindStr[] = {
4243 #define DO(KIND) #KIND,
4244 TRANS_KINDS
4245 #undef DO
4248 const char *getTransKindName(TransKind kind) {
4249 assert(kind >= 0 && kind < TransInvalid);
4250 return transKindStr[kind];
4253 TransRec::TransRec(SrcKey s,
4254 MD5 _md5,
4255 TransKind _kind,
4256 const Tracelet* t,
4257 TCA _aStart,
4258 uint32_t _aLen,
4259 TCA _astubsStart,
4260 uint32_t _astubsLen,
4261 vector<TransBCMapping> _bcMapping)
4262 : id(0)
4263 , kind(_kind)
4264 , src(s)
4265 , md5(_md5)
4266 , bcStopOffset(t ? t->nextSk().offset() : 0)
4267 , aStart(_aStart)
4268 , aLen(_aLen)
4269 , astubsStart(_astubsStart)
4270 , astubsLen(_astubsLen)
4271 , bcMapping(_bcMapping) {
4272 if (t != nullptr) {
4273 for (auto dep : t->m_dependencies) {
4274 dependencies.push_back(*dep.second);
4280 string
4281 TransRec::print(uint64_t profCount) const {
4282 std::string ret;
4284 // Split up the call to prevent template explosion
4285 ret += folly::format(
4286 "Translation {} {{\n"
4287 " src.md5 = {}\n"
4288 " src.funcId = {}\n"
4289 " src.startOffset = {}\n"
4290 " src.stopOffset = {}\n",
4291 id, md5, src.getFuncId(), src.offset(), bcStopOffset).str();
4293 ret += folly::format(
4294 " kind = {} ({})\n"
4295 " aStart = {}\n"
4296 " aLen = {:#x}\n"
4297 " stubStart = {}\n"
4298 " stubLen = {:#x}\n",
4299 static_cast<uint32_t>(kind), getTransKindName(kind),
4300 aStart, aLen, astubsStart, astubsLen).str();
4302 ret += folly::format(
4303 " profCount = {}\n"
4304 " bcMapping = {}\n",
4305 profCount, bcMapping.size()).str();
4307 for (auto const& info : bcMapping) {
4308 ret += folly::format(
4309 " {} {} {} {}\n",
4310 info.md5, info.bcStart,
4311 info.aStart, info.astubsStart).str();
4314 ret += "}\n\n";
4315 return ret;
4318 void
4319 ActRecState::pushFunc(const NormalizedInstruction& inst) {
4320 assert(isFPush(inst.op()));
4321 if (inst.op() == OpFPushFuncD || inst.op() == OpFPushFuncU) {
4322 const Unit& unit = *inst.unit();
4323 Id funcId = inst.imm[1].u_SA;
4324 auto const& nep = unit.lookupNamedEntityPairId(funcId);
4325 auto const func = Unit::lookupFunc(nep.second);
4326 if (func) func->validate();
4327 if (func && func->isNameBindingImmutable(&unit)) {
4328 pushFuncD(func);
4329 return;
4332 pushDynFunc();
4335 void
4336 ActRecState::pushFuncD(const Func* func) {
4337 TRACE(2, "ActRecState: pushStatic func %p(%s)\n", func, func->name()->data());
4338 func->validate();
4339 Record r;
4340 r.m_state = State::KNOWN;
4341 r.m_topFunc = func;
4342 r.m_entryArDelta = InvalidEntryArDelta;
4343 m_arStack.push_back(r);
4346 void
4347 ActRecState::pushDynFunc() {
4348 TRACE(2, "ActRecState: pushDynFunc\n");
4349 Record r;
4350 r.m_state = State::UNKNOWABLE;
4351 r.m_topFunc = nullptr;
4352 r.m_entryArDelta = InvalidEntryArDelta;
4353 m_arStack.push_back(r);
4356 void
4357 ActRecState::pop() {
4358 if (!m_arStack.empty()) {
4359 m_arStack.pop_back();
4364 * checkByRef() returns true if the parameter specified by argNum is pass
4365 * by reference, otherwise it returns false. This function may also throw an
4366 * UnknownInputException if the reffiness cannot be determined.
4368 * Note that the 'entryArDelta' parameter specifies the delta between sp at
4369 * the beginning of the tracelet and ar.
4371 bool
4372 ActRecState::checkByRef(int argNum, int entryArDelta, RefDeps* refDeps) {
4373 FTRACE(2, "ActRecState: getting reffiness for arg {}, arDelta {}\n",
4374 argNum, entryArDelta);
4375 if (m_arStack.empty()) {
4376 // The ActRec in question was pushed before the beginning of the
4377 // tracelet, so we can make a guess about parameter reffiness and
4378 // record our assumptions about parameter reffiness as tracelet
4379 // guards.
4380 const ActRec* ar = arFromSpOffset((ActRec*)vmsp(), entryArDelta);
4381 Record r;
4382 r.m_state = State::GUESSABLE;
4383 r.m_entryArDelta = entryArDelta;
4384 ar->m_func->validate();
4385 r.m_topFunc = ar->m_func;
4386 m_arStack.push_back(r);
4388 Record& r = m_arStack.back();
4389 if (r.m_state == State::UNKNOWABLE) {
4390 TRACE(2, "ActRecState: unknowable, throwing in the towel\n");
4391 throwUnknownInput();
4392 not_reached();
4394 assert(r.m_topFunc);
4395 bool retval = r.m_topFunc->byRef(argNum);
4396 if (r.m_state == State::GUESSABLE) {
4397 assert(r.m_entryArDelta != InvalidEntryArDelta);
4398 TRACE(2, "ActRecState: guessing arg%d -> %d\n", argNum, retval);
4399 refDeps->addDep(r.m_entryArDelta, argNum, retval);
4401 return retval;
4404 const Func*
4405 ActRecState::knownFunc() {
4406 if (currentState() != State::KNOWN) return nullptr;
4407 assert(!m_arStack.empty());
4408 return m_arStack.back().m_topFunc;
4411 ActRecState::State
4412 ActRecState::currentState() {
4413 if (m_arStack.empty()) return State::GUESSABLE;
4414 return m_arStack.back().m_state;
4417 const Func* lookupImmutableMethod(const Class* cls, const StringData* name,
4418 bool& magicCall, bool staticLookup,
4419 Class* ctx) {
4420 if (!cls || RuntimeOption::EvalJitEnableRenameFunction) return nullptr;
4421 if (cls->attrs() & AttrInterface) return nullptr;
4422 bool privateOnly = false;
4423 if (!RuntimeOption::RepoAuthoritative ||
4424 !(cls->preClass()->attrs() & AttrUnique)) {
4425 if (!ctx || !ctx->classof(cls)) {
4426 return nullptr;
4428 if (!staticLookup) privateOnly = true;
4431 const Func* func;
4432 MethodLookup::LookupResult res = staticLookup ?
4433 g_vmContext->lookupClsMethod(func, cls, name, nullptr, ctx, false) :
4434 g_vmContext->lookupObjMethod(func, cls, name, ctx, false);
4436 if (res == MethodLookup::LookupResult::MethodNotFound) return nullptr;
4438 assert(res == MethodLookup::LookupResult::MethodFoundWithThis ||
4439 res == MethodLookup::LookupResult::MethodFoundNoThis ||
4440 (staticLookup ?
4441 res == MethodLookup::LookupResult::MagicCallStaticFound :
4442 res == MethodLookup::LookupResult::MagicCallFound));
4444 magicCall =
4445 res == MethodLookup::LookupResult::MagicCallStaticFound ||
4446 res == MethodLookup::LookupResult::MagicCallFound;
4448 if ((privateOnly && (!(func->attrs() & AttrPrivate) || magicCall)) ||
4449 func->isAbstract() ||
4450 func->attrs() & AttrDynamicInvoke) {
4451 return nullptr;
4454 if (staticLookup) {
4455 if (magicCall) {
4457 * i) We cant tell if a magic call would go to __call or __callStatic
4458 * - Could deal with this by checking for the existence of __call
4460 * ii) hphp semantics is that in the case of an object call, we look
4461 * for __call in the scope of the object (this is incompatible
4462 * with zend) which means we would have to know that there is no
4463 * __call higher up in the tree
4464 * - Could deal with this by checking for AttrNoOverride on the
4465 * class
4467 func = nullptr;
4469 } else if (!(func->attrs() & AttrPrivate)) {
4470 if (magicCall || func->attrs() & AttrStatic) {
4471 if (!(cls->preClass()->attrs() & AttrNoOverride)) {
4472 func = nullptr;
4474 } else if (!(func->attrs() & AttrNoOverride && !func->hasStaticLocals()) &&
4475 !(cls->preClass()->attrs() & AttrNoOverride)) {
4476 func = nullptr;
4479 return func;
4482 std::string traceletShape(const Tracelet& trace) {
4483 std::string ret;
4485 for (auto ni = trace.m_instrStream.first; ni; ni = ni->next) {
4486 using folly::toAppend;
4488 toAppend(opcodeToName(ni->op()), &ret);
4489 if (ni->immVec.isValid()) {
4490 toAppend(
4491 "<",
4492 locationCodeString(ni->immVec.locationCode()),
4493 &ret);
4494 for (auto& mc : ni->immVecM) {
4495 toAppend(" ", memberCodeString(mc), &ret);
4497 toAppend(">", &ret);
4499 toAppend(" ", &ret);
4502 return ret;
4505 } // HPHP::Transl
4507 void invalidatePath(const std::string& path) {
4508 TRACE(1, "invalidatePath: abspath %s\n", path.c_str());
4509 PendQ::defer(new DeferredPathInvalidate(path));
4512 } // HPHP