Slightly relax FPushFunc's locals-destroying semantics
[hiphop-php.git] / hphp / runtime / vm / jit / translator.cpp
blobf7c8ab5bbf941657a7dd0e93605cc912468744fc
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/vm/jit/translator.h"
18 // Translator front-end: parse instruction stream into basic blocks, decode
19 // and normalize instructions. Propagate run-time type info to instructions
20 // to annotate their inputs and outputs with types.
21 #include <cinttypes>
22 #include <assert.h>
23 #include <stdint.h>
24 #include <stdarg.h>
26 #include <vector>
27 #include <string>
29 #include "folly/Conv.h"
31 #include "hphp/util/trace.h"
32 #include "hphp/util/biased-coin.h"
33 #include "hphp/util/map-walker.h"
34 #include "hphp/runtime/base/file-repository.h"
35 #include "hphp/runtime/base/runtime-option.h"
36 #include "hphp/runtime/base/stats.h"
37 #include "hphp/runtime/base/types.h"
38 #include "hphp/runtime/ext/ext_continuation.h"
39 #include "hphp/runtime/ext/ext_collections.h"
40 #include "hphp/runtime/vm/hhbc.h"
41 #include "hphp/runtime/vm/bytecode.h"
42 #include "hphp/runtime/vm/jit/annotation.h"
43 #include "hphp/runtime/vm/jit/hhbc-translator.h"
44 #include "hphp/runtime/vm/jit/ir-unit.h"
45 #include "hphp/runtime/vm/jit/ir-translator.h"
46 #include "hphp/runtime/vm/jit/normalized-instruction.h"
47 #include "hphp/runtime/vm/jit/region-selection.h"
48 #include "hphp/runtime/base/rds.h"
49 #include "hphp/runtime/vm/jit/tracelet.h"
50 #include "hphp/runtime/vm/jit/translator-inline.h"
51 #include "hphp/runtime/vm/jit/translator-x64.h"
52 #include "hphp/runtime/vm/jit/type.h"
53 #include "hphp/runtime/vm/pendq.h"
54 #include "hphp/runtime/vm/treadmill.h"
55 #include "hphp/runtime/vm/type-profile.h"
56 #include "hphp/runtime/vm/runtime.h"
58 #define KindOfUnknown DontUseKindOfUnknownInThisFile
59 #define KindOfInvalid DontUseKindOfInvalidInThisFile
61 namespace HPHP {
62 namespace Transl {
64 using namespace HPHP;
65 using HPHP::JIT::Type;
66 using HPHP::JIT::HhbcTranslator;
68 TRACE_SET_MOD(trans)
70 static __thread BiasedCoin *dbgTranslateCoin;
71 Translator* transl;
72 Lease Translator::s_writeLease;
74 struct TraceletContext {
75 TraceletContext() = delete;
77 TraceletContext(Tracelet* t, const TypeMap& initialTypes)
78 : m_t(t)
79 , m_numJmps(0)
80 , m_aliasTaint(false)
81 , m_varEnvTaint(false)
83 for (auto& kv : initialTypes) {
84 TRACE(1, "%s\n",
85 Trace::prettyNode("InitialType", kv.first, kv.second).c_str());
86 m_currentMap[kv.first] = t->newDynLocation(kv.first, kv.second);
90 Tracelet* m_t;
91 ChangeMap m_currentMap;
92 DepMap m_dependencies;
93 DepMap m_resolvedDeps; // dependencies resolved by static analysis
94 LocationSet m_changeSet;
95 LocationSet m_deletedSet;
96 int m_numJmps;
97 bool m_aliasTaint;
98 bool m_varEnvTaint;
100 RuntimeType currentType(const Location& l) const;
101 DynLocation* recordRead(const InputInfo& l, bool useHHIR,
102 DataType staticType = KindOfAny);
103 void recordWrite(DynLocation* dl);
104 void recordDelete(const Location& l);
105 void recordJmp();
106 void aliasTaint();
107 void varEnvTaint();
110 void InstrStream::append(NormalizedInstruction* ni) {
111 if (last) {
112 assert(first);
113 last->next = ni;
114 ni->prev = last;
115 ni->next = nullptr;
116 last = ni;
117 return;
119 assert(!first);
120 first = ni;
121 last = ni;
122 ni->prev = nullptr;
123 ni->next = nullptr;
126 void InstrStream::remove(NormalizedInstruction* ni) {
127 if (ni->prev) {
128 ni->prev->next = ni->next;
129 } else {
130 first = ni->next;
132 if (ni->next) {
133 ni->next->prev = ni->prev;
134 } else {
135 last = ni->prev;
137 ni->prev = nullptr;
138 ni->next = nullptr;
142 * locPhysicalOffset --
144 * Return offset, in cells, of this location from its base
145 * pointer. It needs a function descriptor to see how many locals
146 * to skip for iterators; if the current frame pointer is not the context
147 * you're looking for, be sure to pass in a non-default f.
149 int locPhysicalOffset(Location l, const Func* f) {
150 f = f ? f : liveFunc();
151 assert_not_implemented(l.space == Location::Stack ||
152 l.space == Location::Local ||
153 l.space == Location::Iter);
154 int localsToSkip = l.space == Location::Iter ? f->numLocals() : 0;
155 int iterInflator = l.space == Location::Iter ? kNumIterCells : 1;
156 return -((l.offset + 1) * iterInflator + localsToSkip);
159 RuntimeType Translator::liveType(Location l,
160 const Unit& u,
161 bool specialize) {
162 Cell *outer;
163 switch (l.space) {
164 case Location::Stack:
165 // Stack accesses must be to addresses pushed before
166 // translation time; if they are to addresses pushed after,
167 // they should be hitting in the changemap.
168 assert(locPhysicalOffset(l) >= 0);
169 // fallthru
170 case Location::Local: {
171 Cell *base;
172 int offset = locPhysicalOffset(l);
173 base = l.space == Location::Stack ? vmsp() : vmfp();
174 outer = &base[offset];
175 } break;
176 case Location::Iter: {
177 const Iter *it = frame_iter(liveFrame(), l.offset);
178 TRACE(1, "Iter input: fp %p, iter %p, offset %" PRId64 "\n", vmfp(),
179 it, l.offset);
180 return RuntimeType(it);
181 } break;
182 case Location::Litstr: {
183 return RuntimeType(u.lookupLitstrId(l.offset));
184 } break;
185 case Location::Litint: {
186 return RuntimeType(l.offset);
187 } break;
188 case Location::This: {
189 return outThisObjectType();
190 } break;
191 default: {
192 not_reached();
195 assert(IS_REAL_TYPE(outer->m_type));
196 return liveType(outer, l, specialize);
199 RuntimeType
200 Translator::liveType(const Cell* outer, const Location& l, bool specialize) {
201 always_assert(analysisDepth() == 0);
203 if (!outer) {
204 // An undefined global; starts out as a variant null
205 return RuntimeType(KindOfRef, KindOfNull);
207 DataType outerType = (DataType)outer->m_type;
208 assert(IS_REAL_TYPE(outerType));
209 DataType valueType = outerType;
210 DataType innerType = KindOfNone;
211 const Cell* valCell = outer;
212 if (outerType == KindOfRef) {
213 // Variant. Pick up the inner type, too.
214 valCell = outer->m_data.pref->tv();
215 innerType = valCell->m_type;
216 assert(IS_REAL_TYPE(innerType));
217 valueType = innerType;
218 assert(innerType != KindOfRef);
219 FTRACE(2, "liveType {}: Var -> {}\n", l.pretty(), tname(innerType));
220 } else {
221 FTRACE(2, "liveType {}: {}\n", l.pretty(), tname(outerType));
223 RuntimeType retval = RuntimeType(outerType, innerType);
224 const Class *klass = nullptr;
225 if (specialize) {
226 // Only infer the class/array kind if specialization requested
227 if (valueType == KindOfObject) {
228 klass = valCell->m_data.pobj->getVMClass();
229 if (klass != nullptr) {
230 retval = retval.setKnownClass(klass);
232 } else if (valueType == KindOfArray) {
233 ArrayData::ArrayKind arrayKind = valCell->m_data.parr->kind();
234 retval = retval.setArrayKind(arrayKind);
237 return retval;
240 RuntimeType Translator::outThisObjectType() {
242 * Use the current method's context class (ctx) as a constraint.
243 * For instance methods, if $this is non-null, we are guaranteed
244 * that $this is an instance of ctx or a class derived from
245 * ctx. Zend allows this assumption to be violated but we have
246 * deliberately chosen to diverge from them here.
248 * Note that if analysisDepth() != 0 we'll have !hasThis() here,
249 * because our fake ActRec has no $this, but we'll still return the
250 * correct object type because arGetContextClass() looks at
251 * ar->m_func's class for methods.
253 const Class *ctx = liveFunc()->isMethod() ?
254 arGetContextClass(liveFrame()) : nullptr;
255 if (ctx) {
256 assert(!liveFrame()->hasThis() ||
257 liveFrame()->getThis()->getVMClass()->classof(ctx));
258 TRACE(2, "OutThisObject: derived from Class \"%s\"\n",
259 ctx->name()->data());
260 return RuntimeType(KindOfObject, KindOfNone, ctx);
262 return RuntimeType(KindOfObject, KindOfNone);
265 bool Translator::liveFrameIsPseudoMain() {
266 ActRec* ar = (ActRec*)vmfp();
267 return ar->hasVarEnv() && ar->getVarEnv()->isGlobalScope();
270 static int64_t typeToMask(DataType t) {
271 return (t == KindOfAny) ? 1 : (1 << (1 + getDataTypeIndex(t)));
274 struct InferenceRule {
275 int64_t mask;
276 DataType result;
279 static DataType inferType(const InferenceRule* rules,
280 const vector<DynLocation*>& inputs) {
281 int inputMask = 0;
282 // We generate the inputMask by ORing together the mask for each input's
283 // type.
284 for (unsigned int i = 0; i < inputs.size(); ++i) {
285 DataType inType = inputs[i]->rtt.valueType();
286 inputMask |= typeToMask(inType);
288 // This loop checks each rule in order, looking for the first rule that
289 // applies. Note that we assume there's a "catch-all" at the end.
290 for (unsigned int i = 0; ; ++i) {
291 if (rules[i].mask == 0 || (rules[i].mask & inputMask) != 0) {
292 return rules[i].result;
295 // We return KindOfAny by default if none of the rules applied.
296 return KindOfAny;
300 * Inference rules used for OutArith. These are applied in order
301 * row-by-row.
304 #define TYPE_MASK(name) \
305 static const int64_t name ## Mask = typeToMask(KindOf ## name);
306 TYPE_MASK(Any);
307 TYPE_MASK(Uninit);
308 TYPE_MASK(Null);
309 TYPE_MASK(Boolean);
310 static const int64_t IntMask = typeToMask(KindOfInt64);
311 TYPE_MASK(Double);
312 static const int64_t StringMask = typeToMask(KindOfString) |
313 typeToMask(KindOfStaticString);
314 TYPE_MASK(Array);
315 TYPE_MASK(Object);
317 static const InferenceRule ArithRules[] = {
318 { DoubleMask, KindOfDouble },
319 { ArrayMask, KindOfArray },
320 // If one of the inputs is known to be a String or if one of the input
321 // types is unknown, the output type is Unknown
322 { StringMask | AnyMask, KindOfAny },
323 // Default to Int64
324 { 0, KindOfInt64 },
327 static const int NumArithRules = sizeof(ArithRules) / sizeof(InferenceRule);
330 * Returns the type of the output of a bitwise operator on the two
331 * DynLocs. The only case that doesn't result in KindOfInt64 is String
332 * op String.
334 static const InferenceRule BitOpRules[] = {
335 { UninitMask | NullMask | BooleanMask |
336 IntMask | DoubleMask | ArrayMask | ObjectMask,
337 KindOfInt64 },
338 { StringMask, KindOfString },
339 { 0, KindOfAny },
342 static RuntimeType bitOpType(DynLocation* a, DynLocation* b) {
343 vector<DynLocation*> ins;
344 ins.push_back(a);
345 if (b) ins.push_back(b);
346 return RuntimeType(inferType(BitOpRules, ins));
349 static uint32_t m_w = 1; /* must not be zero */
350 static uint32_t m_z = 1; /* must not be zero */
352 static uint32_t get_random()
354 m_z = 36969 * (m_z & 65535) + (m_z >> 16);
355 m_w = 18000 * (m_w & 65535) + (m_w >> 16);
356 return (m_z << 16) + m_w; /* 32-bit result */
359 static const int kTooPolyPred = 2;
360 static const int kTooPolyRet = 6;
362 bool
363 isNormalPropertyAccess(const NormalizedInstruction& i,
364 int propInput,
365 int objInput) {
366 const LocationCode lcode = i.immVec.locationCode();
367 return
368 i.immVecM.size() == 1 &&
369 (lcode == LC || lcode == LL || lcode == LR || lcode == LH) &&
370 mcodeMaybePropName(i.immVecM[0]) &&
371 i.inputs[propInput]->isString() &&
372 i.inputs[objInput]->valueType() == KindOfObject;
375 bool
376 mInstrHasUnknownOffsets(const NormalizedInstruction& ni, Class* context) {
377 const MInstrInfo& mii = getMInstrInfo(ni.mInstrOp());
378 unsigned mi = 0;
379 unsigned ii = mii.valCount() + 1;
380 for (; mi < ni.immVecM.size(); ++mi) {
381 MemberCode mc = ni.immVecM[mi];
382 if (mcodeMaybePropName(mc)) {
383 const Class* cls = nullptr;
384 if (getPropertyOffset(ni, context, cls, mii, mi, ii).offset == -1) {
385 return true;
387 ++ii;
388 } else {
389 return true;
393 return false;
396 PropInfo getPropertyOffset(const NormalizedInstruction& ni,
397 Class* ctx,
398 const Class*& baseClass,
399 const MInstrInfo& mii,
400 unsigned mInd, unsigned iInd) {
401 if (mInd == 0) {
402 auto const baseIndex = mii.valCount();
403 baseClass = ni.inputs[baseIndex]->rtt.isObject()
404 ? ni.inputs[baseIndex]->rtt.valueClass()
405 : nullptr;
406 } else {
407 baseClass = ni.immVecClasses[mInd - 1];
409 if (!baseClass) return PropInfo();
411 if (!ni.inputs[iInd]->rtt.isString()) {
412 return PropInfo();
414 auto* const name = ni.inputs[iInd]->rtt.valueString();
415 if (!name) return PropInfo();
417 bool accessible;
418 // If we are not in repo-authoriative mode, we need to check that
419 // baseClass cannot change in between requests
420 if (!RuntimeOption::RepoAuthoritative ||
421 !(baseClass->preClass()->attrs() & AttrUnique)) {
422 if (!ctx) return PropInfo();
423 if (!ctx->classof(baseClass)) {
424 if (baseClass->classof(ctx)) {
425 // baseClass can change on us in between requests, but since
426 // ctx is an ancestor of baseClass we can make the weaker
427 // assumption that the object is an instance of ctx
428 baseClass = ctx;
429 } else {
430 // baseClass can change on us in between requests and it is
431 // not related to ctx, so bail out
432 return PropInfo();
436 // Lookup the index of the property based on ctx and baseClass
437 Slot idx = baseClass->getDeclPropIndex(ctx, name, accessible);
438 // If we couldn't find a property that is accessible in the current
439 // context, bail out
440 if (idx == kInvalidSlot || !accessible) {
441 return PropInfo();
443 // If it's a declared property we're good to go: even if a subclass
444 // redefines an accessible property with the same name it's guaranteed
445 // to be at the same offset
446 return PropInfo(
447 baseClass->declPropOffset(idx),
448 baseClass->declPropHphpcType(idx)
452 PropInfo getFinalPropertyOffset(const NormalizedInstruction& ni,
453 Class* context,
454 const MInstrInfo& mii) {
455 unsigned mInd = ni.immVecM.size() - 1;
456 unsigned iInd = mii.valCount() + 1 + mInd;
458 const Class* cls = nullptr;
459 return getPropertyOffset(ni, context, cls, mii, mInd, iInd);
462 static std::pair<DataType,double>
463 predictMVec(const NormalizedInstruction* ni) {
464 auto info = getFinalPropertyOffset(*ni,
465 ni->func()->cls(),
466 getMInstrInfo(ni->mInstrOp()));
467 if (info.offset != -1 && info.hphpcType != KindOfNone) {
468 FTRACE(1, "prediction for CGetM prop: {}, hphpc\n",
469 int(info.hphpcType));
470 return std::make_pair(info.hphpcType, 1.0);
473 auto& immVec = ni->immVec;
474 StringData* name;
475 MemberCode mc;
476 if (immVec.decodeLastMember(ni->m_unit, name, mc)) {
477 auto pred = predictType(TypeProfileKey(mc, name));
478 TRACE(1, "prediction for CGetM %s named %s: %d, %f\n",
479 mc == MET ? "elt" : "prop",
480 name->data(),
481 pred.first,
482 pred.second);
483 return pred;
486 return std::make_pair(KindOfAny, 0.0);
490 * predictOutputs --
492 * Provide a best guess for the output type of this instruction.
494 static DataType
495 predictOutputs(SrcKey startSk,
496 const NormalizedInstruction* ni) {
497 if (!RuntimeOption::EvalJitTypePrediction) return KindOfAny;
499 if (RuntimeOption::EvalJitStressTypePredPercent &&
500 RuntimeOption::EvalJitStressTypePredPercent > int(get_random() % 100)) {
501 int dt;
502 while (true) {
503 dt = get_random() % (KindOfRef + 1);
504 switch (dt) {
505 case KindOfNull:
506 case KindOfBoolean:
507 case KindOfInt64:
508 case KindOfDouble:
509 case KindOfString:
510 case KindOfArray:
511 case KindOfObject:
512 case KindOfResource:
513 break;
514 // KindOfRef and KindOfUninit can't happen for lots of predicted
515 // types.
516 case KindOfRef:
517 case KindOfUninit:
518 default:
519 continue;
521 break;
523 return DataType(dt);
526 if (ni->op() == OpCns ||
527 ni->op() == OpCnsE ||
528 ni->op() == OpCnsU) {
529 StringData* sd = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
530 TypedValue* tv = Unit::lookupCns(sd);
531 if (tv) {
532 return tv->m_type;
536 if (ni->op() == OpMod) {
537 // x % 0 returns boolean false, so we don't know for certain, but it's
538 // probably an int.
539 return KindOfInt64;
542 if (ni->op() == OpSqrt) {
543 // sqrt returns a double, unless you pass something nasty to it.
544 return KindOfDouble;
547 if (ni->op() == OpDiv) {
548 // Integers can produce integers if there's no residue, but $i / $j in
549 // general produces a double. $i / 0 produces boolean false, so we have
550 // actually check the result.
551 auto lhs = ni->inputs[0];
552 auto rhs = ni->inputs[1];
554 if (lhs->valueType() == KindOfDouble || rhs->valueType() == KindOfDouble) {
555 return KindOfDouble;
558 if (rhs->isLiteral()) {
559 if (ni->imm[1].u_I64A == 0) return KindOfBoolean;
560 if (ni->imm[1].u_I64A == 1) return lhs->valueType();
562 if (rhs->isLiteral()) {
563 return ni->imm[0].u_I64A % ni->imm[1].u_I64A ? KindOfDouble
564 : KindOfInt64;
568 return KindOfDouble;
571 if (ni->op() == OpAbs) {
572 if (ni->inputs[0]->valueType() == KindOfDouble) {
573 return KindOfDouble;
576 // some types can't be converted to integers and will return false here
577 if (ni->inputs[0]->valueType() == KindOfArray) {
578 return KindOfBoolean;
581 // If the type is not numeric we need to convert it to a numeric type,
582 // a string can be converted to an Int64 or a Double but most other types
583 // will end up being integral.
584 return KindOfInt64;
587 if (ni->op() == OpClsCnsD) {
588 const NamedEntityPair& cne =
589 ni->unit()->lookupNamedEntityPairId(ni->imm[1].u_SA);
590 StringData* cnsName = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
591 Class* cls = cne.second->getCachedClass();
592 if (cls) {
593 DataType dt = cls->clsCnsType(cnsName);
594 if (dt != KindOfUninit) {
595 TRACE(1, "clscnsd: %s:%s prediction type %d\n",
596 cne.first->data(), cnsName->data(), dt);
597 return dt;
602 if (ni->op() == OpSetM) {
604 * SetM pushes null for certain rare combinations of input types, a string
605 * if the base was a string, or (most commonly) its first stack input. We
606 * mark the output as predicted here and do a very rough approximation of
607 * what really happens; most of the time the prediction will be a noop
608 * since MInstrTranslator side exits in all uncommon cases.
611 // If the base is a string, the output is probably a string.
612 Type baseType;
613 switch (ni->immVec.locationCode()) {
614 case LGL: case LGC:
615 case LNL: case LNC:
616 case LSL: case LSC:
617 baseType = Type::Gen;
618 break;
620 default:
621 baseType = Type(ni->inputs[1]->rtt);
623 if (baseType.isString()) return KindOfString;
625 // Otherwise, it's probably the input type.
626 return ni->inputs[0]->rtt.valueType();
629 static const double kAccept = 1.0;
630 std::pair<DataType, double> pred = std::make_pair(KindOfAny, 0.0);
631 // Type predictions grow tracelets, and can have a side effect of making
632 // them combinatorially explode if they bring in precondtions that vary a
633 // lot. Get more conservative as evidence mounts that this is a
634 // polymorphic tracelet.
635 if (tx64->numTranslations(startSk) >= kTooPolyPred) return KindOfAny;
636 if (ni->op() == OpCGetS) {
637 const StringData* propName = ni->inputs[1]->rtt.valueStringOrNull();
638 if (propName) {
639 pred = predictType(TypeProfileKey(TypeProfileKey::StaticPropName,
640 propName));
641 TRACE(1, "prediction for static fields named %s: %d, %f\n",
642 propName->data(),
643 pred.first,
644 pred.second);
646 } else if (hasImmVector(ni->op())) {
647 pred = predictMVec(ni);
649 if (pred.second < kAccept) {
650 if (const StringData* invName = fcallToFuncName(ni)) {
651 pred = predictType(TypeProfileKey(TypeProfileKey::MethodName, invName));
652 TRACE(1, "prediction for methods named %s: %d, %f\n",
653 invName->data(),
654 pred.first,
655 pred.second);
658 if (pred.second >= kAccept) {
659 TRACE(1, "accepting prediction of type %d\n", pred.first);
660 assert(pred.first != KindOfUninit);
661 return pred.first;
663 return KindOfAny;
667 * Returns the type of the value a SetOpL will store into the local.
669 static RuntimeType setOpOutputType(NormalizedInstruction* ni,
670 const vector<DynLocation*>& inputs) {
671 assert(inputs.size() == 2);
672 const int kValIdx = 0;
673 const int kLocIdx = 1;
674 unsigned char op = ni->imm[1].u_OA;
675 DynLocation locLocation(inputs[kLocIdx]->location,
676 inputs[kLocIdx]->rtt.unbox());
677 assert(inputs[kLocIdx]->location.isLocal());
678 switch (op) {
679 case SetOpPlusEqual:
680 case SetOpMinusEqual:
681 case SetOpMulEqual: {
682 // Same as OutArith, except we have to fiddle with inputs a bit.
683 vector<DynLocation*> arithInputs;
684 arithInputs.push_back(&locLocation);
685 arithInputs.push_back(inputs[kValIdx]);
686 return RuntimeType(inferType(ArithRules, arithInputs));
688 case SetOpConcatEqual: return RuntimeType(KindOfString);
689 case SetOpDivEqual:
690 case SetOpModEqual: return RuntimeType(KindOfAny);
691 case SetOpAndEqual:
692 case SetOpOrEqual:
693 case SetOpXorEqual: return bitOpType(&locLocation, inputs[kValIdx]);
694 case SetOpSlEqual:
695 case SetOpSrEqual: return RuntimeType(KindOfInt64);
696 default:
697 not_reached();
701 static RuntimeType
702 getDynLocType(const SrcKey startSk,
703 NormalizedInstruction* ni,
704 InstrFlags::OutTypeConstraints constraint,
705 TransKind mode) {
706 using namespace InstrFlags;
707 auto const& inputs = ni->inputs;
708 assert(constraint != OutFInputL);
710 switch (constraint) {
711 #define CS(OutXLike, KindOfX) \
712 case OutXLike: \
713 return RuntimeType(KindOfX);
714 CS(OutInt64, KindOfInt64);
715 CS(OutBoolean, KindOfBoolean);
716 CS(OutDouble, KindOfDouble);
717 CS(OutString, KindOfString);
718 CS(OutNull, KindOfNull);
719 CS(OutUnknown, KindOfAny); // Subtle interaction with BB-breaking.
720 CS(OutFDesc, KindOfAny); // Unclear if OutFDesc has a purpose.
721 CS(OutArray, KindOfArray);
722 CS(OutObject, KindOfObject);
723 CS(OutResource, KindOfResource);
724 #undef CS
726 case OutCns: {
727 // If it's a system constant, burn in its type. Otherwise we have
728 // to accept prediction; use the translation-time value, or fall back
729 // to the targetcache if none exists.
730 StringData* sd = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
731 assert(sd);
732 const TypedValue* tv = Unit::lookupPersistentCns(sd);
733 if (tv) {
734 return RuntimeType(tv->m_type);
736 } // Fall through
737 case OutPred: {
738 // In TransProfile mode, disable type prediction to avoid side exits.
739 auto dt = mode == TransProfile ? KindOfAny : predictOutputs(startSk, ni);
740 if (dt != KindOfAny) ni->outputPredicted = true;
741 return RuntimeType(dt, dt == KindOfRef ? KindOfAny : KindOfNone);
744 case OutClassRef: {
745 Op op = Op(ni->op());
746 if ((op == OpAGetC && inputs[0]->isString())) {
747 const StringData* sd = inputs[0]->rtt.valueString();
748 if (sd) {
749 Class *klass = Unit::lookupUniqueClass(sd);
750 TRACE(3, "KindOfClass: derived class \"%s\" from string literal\n",
751 klass ? klass->preClass()->name()->data() : "NULL");
752 return RuntimeType(klass);
754 } else if (op == OpSelf) {
755 return RuntimeType(liveClass());
756 } else if (op == OpParent) {
757 Class* clss = liveClass();
758 if (clss != nullptr)
759 return RuntimeType(clss->parent());
761 return RuntimeType(KindOfClass);
764 case OutNullUninit: {
765 assert(ni->op() == OpNullUninit);
766 return RuntimeType(KindOfUninit);
769 case OutStringImm: {
770 assert(ni->op() == OpString);
771 StringData* sd = ni->m_unit->lookupLitstrId(ni->imm[0].u_SA);
772 assert(sd);
773 return RuntimeType(sd);
776 case OutArrayImm: {
777 assert(ni->op() == OpArray);
778 ArrayData *ad = ni->m_unit->lookupArrayId(ni->imm[0].u_AA);
779 assert(ad);
780 return RuntimeType(ad);
783 case OutBooleanImm: {
784 assert(ni->op() == OpTrue || ni->op() == OpFalse);
785 return RuntimeType(ni->op() == OpTrue);
788 case OutThisObject: {
789 return Translator::outThisObjectType();
792 case OutVUnknown: {
793 return RuntimeType(KindOfRef, KindOfAny);
796 case OutArith: {
797 return RuntimeType(inferType(ArithRules, inputs));
800 case OutSameAsInput: {
802 * Relies closely on the order that inputs are pushed in
803 * getInputs(). (Pushing top of stack first for multi-stack
804 * consumers, stack elements before M-vectors and locals, etc.)
806 assert(inputs.size() >= 1);
807 auto op = ni->op();
808 ASSERT_NOT_IMPLEMENTED(
809 // Sets and binds that take multiple arguments have the rhs
810 // pushed first. In the case of the M-vector versions, the
811 // rhs comes before the M-vector elements.
812 op == OpSetL || op == OpSetN || op == OpSetG || op == OpSetS ||
813 op == OpBindL || op == OpBindG || op == OpBindS || op == OpBindN ||
814 op == OpBindM ||
815 // Dup takes a single element.
816 op == OpDup
819 const int idx = 0; // all currently supported cases.
821 if (debug) {
822 if (!inputs[idx]->rtt.isVagueValue()) {
823 if (op == OpBindG || op == OpBindN || op == OpBindS ||
824 op == OpBindM || op == OpBindL) {
825 assert(inputs[idx]->rtt.isRef() && !inputs[idx]->isLocal());
826 } else {
827 assert(inputs[idx]->rtt.valueType() ==
828 inputs[idx]->rtt.outerType());
832 return inputs[idx]->rtt;
835 case OutCInputL: {
836 assert(inputs.size() >= 1);
837 const DynLocation* in = inputs[inputs.size() - 1];
838 RuntimeType retval;
839 if (in->rtt.outerType() == KindOfUninit) {
840 // Locals can be KindOfUninit, so we need to convert
841 // this to KindOfNull
842 retval = RuntimeType(KindOfNull);
843 } else {
844 retval = in->rtt.unbox();
846 TRACE(2, "Input (%d, %d) -> (%d, %d)\n",
847 in->rtt.outerType(), in->rtt.innerType(),
848 retval.outerType(), retval.innerType());
849 return retval;
852 case OutIncDec: {
853 const RuntimeType &inRtt = ni->inputs[0]->rtt;
854 // TODO: instead of KindOfAny this should track the actual
855 // type we will get from interping a non-int IncDec.
856 return RuntimeType(IS_INT_TYPE(inRtt.valueType()) ?
857 KindOfInt64 : KindOfAny);
860 case OutStrlen: {
861 auto const& rtt = ni->inputs[0]->rtt;
862 return RuntimeType(rtt.isString() ? KindOfInt64 : KindOfAny);
865 case OutCInput: {
866 assert(inputs.size() >= 1);
867 const DynLocation* in = inputs[inputs.size() - 1];
868 if (in->rtt.outerType() == KindOfRef) {
869 return in->rtt.unbox();
871 return in->rtt;
874 case OutBitOp: {
875 assert(inputs.size() == 2 ||
876 (inputs.size() == 1 && ni->op() == OpBitNot));
877 if (inputs.size() == 2) {
878 return bitOpType(inputs[0], inputs[1]);
879 } else {
880 return bitOpType(inputs[0], nullptr);
884 case OutSetOp: {
885 return setOpOutputType(ni, inputs);
888 case OutVInput:
889 case OutVInputL:
890 case OutFInputL:
891 case OutFInputR:
892 case OutFPushCufSafe: {
893 return RuntimeType(KindOfAny);
896 case OutNone: not_reached();
898 always_assert(false && "Invalid output type constraint");
902 * NB: this opcode structure is sparse; it cannot just be indexed by
903 * opcode.
905 using namespace InstrFlags;
906 static const struct {
907 Op op;
908 InstrInfo info;
909 } instrInfoSparse [] = {
911 // Op Inputs Outputs OutputTypes Stack delta
912 // -- ------ ------- ----------- -----------
914 /*** 1. Basic instructions ***/
916 { OpNop, {None, None, OutNone, 0 }},
917 { OpPopA, {Stack1, None, OutNone, -1 }},
918 { OpPopC, {Stack1|
919 DontGuardStack1, None, OutNone, -1 }},
920 { OpPopV, {Stack1|
921 DontGuardStack1|
922 IgnoreInnerType, None, OutNone, -1 }},
923 { OpPopR, {Stack1|
924 DontGuardStack1|
925 IgnoreInnerType, None, OutNone, -1 }},
926 { OpDup, {Stack1, StackTop2, OutSameAsInput, 1 }},
927 { OpBox, {Stack1, Stack1, OutVInput, 0 }},
928 { OpUnbox, {Stack1, Stack1, OutCInput, 0 }},
929 { OpBoxR, {Stack1, Stack1, OutVInput, 0 }},
930 { OpBoxRNop, {None, None, OutNone, 0 }},
931 { OpUnboxR, {Stack1, Stack1, OutCInput, 0 }},
932 { OpUnboxRNop, {None, None, OutNone, 0 }},
934 /*** 2. Literal and constant instructions ***/
936 { OpNull, {None, Stack1, OutNull, 1 }},
937 { OpNullUninit, {None, Stack1, OutNullUninit, 1 }},
938 { OpTrue, {None, Stack1, OutBooleanImm, 1 }},
939 { OpFalse, {None, Stack1, OutBooleanImm, 1 }},
940 { OpInt, {None, Stack1, OutInt64, 1 }},
941 { OpDouble, {None, Stack1, OutDouble, 1 }},
942 { OpString, {None, Stack1, OutStringImm, 1 }},
943 { OpArray, {None, Stack1, OutArrayImm, 1 }},
944 { OpNewArray, {None, Stack1, OutArray, 1 }},
945 { OpNewArrayReserve, {None, Stack1, OutArray, 1 }},
946 { OpNewPackedArray, {StackN, Stack1, OutArray, 0 }},
947 { OpAddElemC, {StackTop3, Stack1, OutArray, -2 }},
948 { OpAddElemV, {StackTop3, Stack1, OutArray, -2 }},
949 { OpAddNewElemC, {StackTop2, Stack1, OutArray, -1 }},
950 { OpAddNewElemV, {StackTop2, Stack1, OutArray, -1 }},
951 { OpNewCol, {None, Stack1, OutObject, 1 }},
952 { OpColAddElemC, {StackTop3, Stack1, OutObject, -2 }},
953 { OpColAddNewElemC, {StackTop2, Stack1, OutObject, -1 }},
954 { OpCns, {None, Stack1, OutCns, 1 }},
955 { OpCnsE, {None, Stack1, OutCns, 1 }},
956 { OpCnsU, {None, Stack1, OutCns, 1 }},
957 { OpClsCns, {Stack1, Stack1, OutUnknown, 0 }},
958 { OpClsCnsD, {None, Stack1, OutPred, 1 }},
959 { OpFile, {None, Stack1, OutString, 1 }},
960 { OpDir, {None, Stack1, OutString, 1 }},
962 /*** 3. Operator instructions ***/
964 /* Binary string */
965 { OpConcat, {StackTop2, Stack1, OutString, -1 }},
966 /* Arithmetic ops */
967 { OpAbs, {Stack1, Stack1, OutPred, 0 }},
968 { OpAdd, {StackTop2, Stack1, OutArith, -1 }},
969 { OpSub, {StackTop2, Stack1, OutArith, -1 }},
970 { OpMul, {StackTop2, Stack1, OutArith, -1 }},
971 /* Div and mod might return boolean false. Sigh. */
972 { OpDiv, {StackTop2, Stack1, OutPred, -1 }},
973 { OpMod, {StackTop2, Stack1, OutPred, -1 }},
974 { OpSqrt, {Stack1, Stack1, OutPred, 0 }},
975 /* Logical ops */
976 { OpXor, {StackTop2, Stack1, OutBoolean, -1 }},
977 { OpNot, {Stack1, Stack1, OutBoolean, 0 }},
978 { OpSame, {StackTop2, Stack1, OutBoolean, -1 }},
979 { OpNSame, {StackTop2, Stack1, OutBoolean, -1 }},
980 { OpEq, {StackTop2, Stack1, OutBoolean, -1 }},
981 { OpNeq, {StackTop2, Stack1, OutBoolean, -1 }},
982 { OpLt, {StackTop2, Stack1, OutBoolean, -1 }},
983 { OpLte, {StackTop2, Stack1, OutBoolean, -1 }},
984 { OpGt, {StackTop2, Stack1, OutBoolean, -1 }},
985 { OpGte, {StackTop2, Stack1, OutBoolean, -1 }},
986 /* Bitwise ops */
987 { OpBitAnd, {StackTop2, Stack1, OutBitOp, -1 }},
988 { OpBitOr, {StackTop2, Stack1, OutBitOp, -1 }},
989 { OpBitXor, {StackTop2, Stack1, OutBitOp, -1 }},
990 { OpBitNot, {Stack1, Stack1, OutBitOp, 0 }},
991 { OpShl, {StackTop2, Stack1, OutInt64, -1 }},
992 { OpShr, {StackTop2, Stack1, OutInt64, -1 }},
993 /* Cast instructions */
994 { OpCastBool, {Stack1, Stack1, OutBoolean, 0 }},
995 { OpCastInt, {Stack1, Stack1, OutInt64, 0 }},
996 { OpCastDouble, {Stack1, Stack1, OutDouble, 0 }},
997 { OpCastString, {Stack1, Stack1, OutString, 0 }},
998 { OpCastArray, {Stack1, Stack1, OutArray, 0 }},
999 { OpCastObject, {Stack1, Stack1, OutObject, 0 }},
1000 { OpInstanceOf, {StackTop2, Stack1, OutBoolean, -1 }},
1001 { OpInstanceOfD, {Stack1, Stack1, OutBoolean, 0 }},
1002 { OpPrint, {Stack1, Stack1, OutInt64, 0 }},
1003 { OpClone, {Stack1, Stack1, OutObject, 0 }},
1004 { OpExit, {Stack1, None, OutNone, -1 }},
1005 { OpFatal, {Stack1, None, OutNone, -1 }},
1007 /*** 4. Control flow instructions ***/
1009 { OpJmp, {None, None, OutNone, 0 }},
1010 { OpJmpZ, {Stack1, None, OutNone, -1 }},
1011 { OpJmpNZ, {Stack1, None, OutNone, -1 }},
1012 { OpSwitch, {Stack1, None, OutNone, -1 }},
1013 { OpSSwitch, {Stack1, None, OutNone, -1 }},
1015 * RetC and RetV are special. Their manipulation of the runtime stack are
1016 * outside the boundaries of the tracelet abstraction; since they always end
1017 * a basic block, they behave more like "glue" between BBs than the
1018 * instructions in the body of a BB.
1020 * RetC and RetV consume a value from the stack, and this value's type needs
1021 * to be known at compile-time.
1023 { OpRetC, {AllLocals, None, OutNone, 0 }},
1024 { OpRetV, {AllLocals, None, OutNone, 0 }},
1025 { OpThrow, {Stack1, None, OutNone, -1 }},
1026 { OpUnwind, {None, None, OutNone, 0 }},
1028 /*** 5. Get instructions ***/
1030 { OpCGetL, {Local, Stack1, OutCInputL, 1 }},
1031 { OpCGetL2, {Stack1|Local, StackIns1, OutCInputL, 1 }},
1032 { OpCGetL3, {StackTop2|Local, StackIns2, OutCInputL, 1 }},
1033 { OpCGetN, {Stack1, Stack1, OutUnknown, 0 }},
1034 { OpCGetG, {Stack1, Stack1, OutUnknown, 0 }},
1035 { OpCGetS, {StackTop2, Stack1, OutPred, -1 }},
1036 { OpCGetM, {MVector, Stack1, OutPred, 1 }},
1037 { OpVGetL, {Local, Stack1, OutVInputL, 1 }},
1038 { OpVGetN, {Stack1, Stack1, OutVUnknown, 0 }},
1039 // TODO: In pseudo-main, the VGetG instruction invalidates what we know
1040 // about the types of the locals because it could cause any one of the
1041 // local variables to become "boxed". We need to add logic to tracelet
1042 // analysis to deal with this properly.
1043 { OpVGetG, {Stack1, Stack1, OutVUnknown, 0 }},
1044 { OpVGetS, {StackTop2, Stack1, OutVUnknown, -1 }},
1045 { OpVGetM, {MVector, Stack1|Local, OutVUnknown, 1 }},
1046 { OpAGetC, {Stack1, Stack1, OutClassRef, 0 }},
1047 { OpAGetL, {Local, Stack1, OutClassRef, 1 }},
1049 /*** 6. Isset, Empty, and type querying instructions ***/
1051 { OpAKExists, {StackTop2, Stack1, OutBoolean, -1 }},
1052 { OpIssetL, {Local, Stack1, OutBoolean, 1 }},
1053 { OpIssetN, {Stack1, Stack1, OutBoolean, 0 }},
1054 { OpIssetG, {Stack1, Stack1, OutBoolean, 0 }},
1055 { OpIssetS, {StackTop2, Stack1, OutBoolean, -1 }},
1056 { OpIssetM, {MVector, Stack1, OutBoolean, 1 }},
1057 { OpEmptyL, {Local, Stack1, OutBoolean, 1 }},
1058 { OpEmptyN, {Stack1, Stack1, OutBoolean, 0 }},
1059 { OpEmptyG, {Stack1, Stack1, OutBoolean, 0 }},
1060 { OpEmptyS, {StackTop2, Stack1, OutBoolean, -1 }},
1061 { OpEmptyM, {MVector, Stack1, OutBoolean, 1 }},
1062 { OpIsNullC, {Stack1, Stack1, OutBoolean, 0 }},
1063 { OpIsBoolC, {Stack1, Stack1, OutBoolean, 0 }},
1064 { OpIsIntC, {Stack1, Stack1, OutBoolean, 0 }},
1065 { OpIsDoubleC, {Stack1, Stack1, OutBoolean, 0 }},
1066 { OpIsStringC, {Stack1, Stack1, OutBoolean, 0 }},
1067 { OpIsArrayC, {Stack1, Stack1, OutBoolean, 0 }},
1068 { OpIsObjectC, {Stack1, Stack1, OutBoolean, 0 }},
1069 { OpIsNullL, {Local, Stack1, OutBoolean, 1 }},
1070 { OpIsBoolL, {Local, Stack1, OutBoolean, 1 }},
1071 { OpIsIntL, {Local, Stack1, OutBoolean, 1 }},
1072 { OpIsDoubleL, {Local, Stack1, OutBoolean, 1 }},
1073 { OpIsStringL, {Local, Stack1, OutBoolean, 1 }},
1074 { OpIsArrayL, {Local, Stack1, OutBoolean, 1 }},
1075 { OpIsObjectL, {Local, Stack1, OutBoolean, 1 }},
1077 /*** 7. Mutator instructions ***/
1079 { OpSetL, {Stack1|Local, Stack1|Local, OutSameAsInput, 0 }},
1080 { OpSetN, {StackTop2, Stack1|Local, OutSameAsInput, -1 }},
1081 { OpSetG, {StackTop2, Stack1, OutSameAsInput, -1 }},
1082 { OpSetS, {StackTop3, Stack1, OutSameAsInput, -2 }},
1083 { OpSetM, {MVector|Stack1, Stack1|Local, OutPred, 0 }},
1084 { OpSetWithRefLM,{MVector|Local , Local, OutNone, 0 }},
1085 { OpSetWithRefRM,{MVector|Stack1, Local, OutNone, -1 }},
1086 { OpSetOpL, {Stack1|Local, Stack1|Local, OutSetOp, 0 }},
1087 { OpSetOpN, {StackTop2, Stack1|Local, OutUnknown, -1 }},
1088 { OpSetOpG, {StackTop2, Stack1, OutUnknown, -1 }},
1089 { OpSetOpS, {StackTop3, Stack1, OutUnknown, -2 }},
1090 { OpSetOpM, {MVector|Stack1, Stack1|Local, OutUnknown, 0 }},
1091 { OpIncDecL, {Local, Stack1|Local, OutIncDec, 1 }},
1092 { OpIncDecN, {Stack1, Stack1|Local, OutUnknown, 0 }},
1093 { OpIncDecG, {Stack1, Stack1, OutUnknown, 0 }},
1094 { OpIncDecS, {StackTop2, Stack1, OutUnknown, -1 }},
1095 { OpIncDecM, {MVector, Stack1, OutUnknown, 1 }},
1096 { OpBindL, {Stack1|Local|
1097 IgnoreInnerType, Stack1|Local, OutSameAsInput, 0 }},
1098 { OpBindN, {StackTop2, Stack1|Local, OutSameAsInput, -1 }},
1099 { OpBindG, {StackTop2, Stack1, OutSameAsInput, -1 }},
1100 { OpBindS, {StackTop3, Stack1, OutSameAsInput, -2 }},
1101 { OpBindM, {MVector|Stack1, Stack1|Local, OutSameAsInput, 0 }},
1102 { OpUnsetL, {Local, Local, OutNone, 0 }},
1103 { OpUnsetN, {Stack1, Local, OutNone, -1 }},
1104 { OpUnsetG, {Stack1, None, OutNone, -1 }},
1105 { OpUnsetM, {MVector, Local, OutNone, 0 }},
1107 /*** 8. Call instructions ***/
1109 { OpFPushFunc, {Stack1, FStack, OutFDesc,
1110 kNumActRecCells - 1 }},
1111 { OpFPushFuncD, {None, FStack, OutFDesc,
1112 kNumActRecCells }},
1113 { OpFPushFuncU, {None, FStack, OutFDesc,
1114 kNumActRecCells }},
1115 { OpFPushObjMethod,
1116 {StackTop2, FStack, OutFDesc,
1117 kNumActRecCells - 2 }},
1118 { OpFPushObjMethodD,
1119 {Stack1, FStack, OutFDesc,
1120 kNumActRecCells - 1 }},
1121 { OpFPushClsMethod,
1122 {StackTop2, FStack, OutFDesc,
1123 kNumActRecCells - 2 }},
1124 { OpFPushClsMethodF,
1125 {StackTop2, FStack, OutFDesc,
1126 kNumActRecCells - 2 }},
1127 { OpFPushClsMethodD,
1128 {None, FStack, OutFDesc,
1129 kNumActRecCells }},
1130 { OpFPushCtor, {Stack1, Stack1|FStack,OutObject,
1131 kNumActRecCells }},
1132 { OpFPushCtorD, {None, Stack1|FStack,OutObject,
1133 kNumActRecCells + 1 }},
1134 { OpFPushCufIter,{None, FStack, OutFDesc,
1135 kNumActRecCells }},
1136 { OpFPushCuf, {Stack1, FStack, OutFDesc,
1137 kNumActRecCells - 1 }},
1138 { OpFPushCufF, {Stack1, FStack, OutFDesc,
1139 kNumActRecCells - 1 }},
1140 { OpFPushCufSafe,{StackTop2|DontGuardAny,
1141 StackTop2|FStack, OutFPushCufSafe,
1142 kNumActRecCells }},
1143 { OpFPassC, {FuncdRef, None, OutSameAsInput, 0 }},
1144 { OpFPassCW, {FuncdRef, None, OutSameAsInput, 0 }},
1145 { OpFPassCE, {FuncdRef, None, OutSameAsInput, 0 }},
1146 { OpFPassVNop, {None, None, OutNone, 0 }},
1147 { OpFPassV, {Stack1|FuncdRef, Stack1, OutUnknown, 0 }},
1148 { OpFPassR, {Stack1|FuncdRef, Stack1, OutFInputR, 0 }},
1149 { OpFPassL, {Local|FuncdRef, Stack1, OutFInputL, 1 }},
1150 { OpFPassN, {Stack1|FuncdRef, Stack1, OutUnknown, 0 }},
1151 { OpFPassG, {Stack1|FuncdRef, Stack1, OutFInputR, 0 }},
1152 { OpFPassS, {StackTop2|FuncdRef,
1153 Stack1, OutUnknown, -1 }},
1154 { OpFPassM, {MVector|FuncdRef, Stack1|Local, OutUnknown, 1 }},
1156 * FCall is special. Like the Ret* instructions, its manipulation of the
1157 * runtime stack are outside the boundaries of the tracelet abstraction.
1159 { OpFCall, {FStack, Stack1, OutPred, 0 }},
1160 { OpFCallArray, {FStack, Stack1, OutPred,
1161 -(int)kNumActRecCells }},
1162 // TODO: output type is known
1163 { OpFCallBuiltin,{BStackN, Stack1, OutPred, 0 }},
1164 { OpCufSafeArray,{StackTop3|DontGuardAny,
1165 Stack1, OutArray, -2 }},
1166 { OpCufSafeReturn,{StackTop3|DontGuardAny,
1167 Stack1, OutUnknown, -2 }},
1168 { OpDecodeCufIter,{Stack1, None, OutNone, -1 }},
1170 /*** 11. Iterator instructions ***/
1172 { OpIterInit, {Stack1, Local, OutUnknown, -1 }},
1173 { OpMIterInit, {Stack1, Local, OutUnknown, -1 }},
1174 { OpWIterInit, {Stack1, Local, OutUnknown, -1 }},
1175 { OpIterInitK, {Stack1, Local, OutUnknown, -1 }},
1176 { OpMIterInitK, {Stack1, Local, OutUnknown, -1 }},
1177 { OpWIterInitK, {Stack1, Local, OutUnknown, -1 }},
1178 { OpIterNext, {None, Local, OutUnknown, 0 }},
1179 { OpMIterNext, {None, Local, OutUnknown, 0 }},
1180 { OpWIterNext, {None, Local, OutUnknown, 0 }},
1181 { OpIterNextK, {None, Local, OutUnknown, 0 }},
1182 { OpMIterNextK, {None, Local, OutUnknown, 0 }},
1183 { OpWIterNextK, {None, Local, OutUnknown, 0 }},
1184 { OpIterFree, {None, None, OutNone, 0 }},
1185 { OpMIterFree, {None, None, OutNone, 0 }},
1186 { OpCIterFree, {None, None, OutNone, 0 }},
1187 { OpIterBreak, {None, None, OutNone, 0 }},
1189 /*** 12. Include, eval, and define instructions ***/
1191 { OpIncl, {Stack1, Stack1, OutUnknown, 0 }},
1192 { OpInclOnce, {Stack1, Stack1, OutUnknown, 0 }},
1193 { OpReq, {Stack1, Stack1, OutUnknown, 0 }},
1194 { OpReqOnce, {Stack1, Stack1, OutUnknown, 0 }},
1195 { OpReqDoc, {Stack1, Stack1, OutUnknown, 0 }},
1196 { OpEval, {Stack1, Stack1, OutUnknown, 0 }},
1197 { OpDefFunc, {None, None, OutNone, 0 }},
1198 { OpDefTypeAlias,{None, None, OutNone, 0 }},
1199 { OpDefCls, {None, None, OutNone, 0 }},
1200 { OpNopDefCls, {None, None, OutNone, 0 }},
1201 { OpDefCns, {Stack1, Stack1, OutBoolean, 0 }},
1203 /*** 13. Miscellaneous instructions ***/
1205 { OpThis, {None, Stack1, OutThisObject, 1 }},
1206 { OpBareThis, {None, Stack1, OutUnknown, 1 }},
1207 { OpCheckThis, {This, None, OutNone, 0 }},
1208 { OpInitThisLoc,
1209 {None, Local, OutUnknown, 0 }},
1210 { OpStaticLoc,
1211 {None, Stack1, OutBoolean, 1 }},
1212 { OpStaticLocInit,
1213 {Stack1, Local, OutVUnknown, -1 }},
1214 { OpCatch, {None, Stack1, OutObject, 1 }},
1215 { OpVerifyParamType,
1216 {Local, None, OutNone, 0 }},
1217 { OpClassExists, {StackTop2, Stack1, OutBoolean, -1 }},
1218 { OpInterfaceExists,
1219 {StackTop2, Stack1, OutBoolean, -1 }},
1220 { OpTraitExists, {StackTop2, Stack1, OutBoolean, -1 }},
1221 { OpSelf, {None, Stack1, OutClassRef, 1 }},
1222 { OpParent, {None, Stack1, OutClassRef, 1 }},
1223 { OpLateBoundCls,{None, Stack1, OutClassRef, 1 }},
1224 { OpNativeImpl, {None, None, OutNone, 0 }},
1225 { OpCreateCl, {BStackN, Stack1, OutObject, 1 }},
1226 { OpStrlen, {Stack1, Stack1, OutStrlen, 0 }},
1227 { OpIncStat, {None, None, OutNone, 0 }},
1228 { OpArrayIdx, {StackTop3, Stack1, OutUnknown, -2 }},
1229 { OpFloor, {Stack1, Stack1, OutDouble, 0 }},
1230 { OpCeil, {Stack1, Stack1, OutDouble, 0 }},
1231 { OpAssertTL, {None, None, OutNone, 0 }},
1232 { OpAssertTStk, {None, None, OutNone, 0 }},
1233 { OpAssertObjL, {None, None, OutNone, 0 }},
1234 { OpAssertObjStk,{None, None, OutNone, 0 }},
1235 { OpBreakTraceHint,{None, None, OutNone, 0 }},
1237 /*** 14. Continuation instructions ***/
1239 { OpCreateCont, {None, Stack1|Local, OutObject, 1 }},
1240 { OpCreateAsync, {Stack1, Stack1|Local, OutObject, 0 }},
1241 { OpContEnter, {Stack1, None, OutNone, -1 }},
1242 { OpUnpackCont, {None, StackTop2, OutInt64, 2 }},
1243 { OpContSuspend, {Stack1, None, OutNone, -1 }},
1244 { OpContSuspendK,{StackTop2, None, OutNone, -2 }},
1245 { OpContRetC, {Stack1, None, OutNone, -1 }},
1246 { OpContCheck, {None, None, OutNone, 0 }},
1247 { OpContRaise, {None, None, OutNone, 0 }},
1248 { OpContValid, {None, Stack1, OutBoolean, 1 }},
1249 { OpContKey, {None, Stack1, OutUnknown, 1 }},
1250 { OpContCurrent, {None, Stack1, OutUnknown, 1 }},
1251 { OpContStopped, {None, None, OutNone, 0 }},
1252 { OpContHandle, {Stack1, None, OutNone, -1 }},
1255 static hphp_hash_map<Op, InstrInfo> instrInfo;
1256 static bool instrInfoInited;
1257 static void initInstrInfo() {
1258 if (!instrInfoInited) {
1259 for (size_t i = 0; i < sizeof(instrInfoSparse) / sizeof(instrInfoSparse[0]);
1260 i++) {
1261 instrInfo[instrInfoSparse[i].op] = instrInfoSparse[i].info;
1264 instrInfoInited = true;
1268 const InstrInfo& getInstrInfo(Op op) {
1269 assert(instrInfoInited);
1270 return instrInfo[op];
1273 static int numHiddenStackInputs(const NormalizedInstruction& ni) {
1274 assert(ni.immVec.isValid());
1275 return ni.immVec.numStackValues();
1278 namespace {
1279 int64_t countOperands(uint64_t mask) {
1280 const uint64_t ignore = FuncdRef | Local | Iter | AllLocals |
1281 DontGuardStack1 | IgnoreInnerType | DontGuardAny | This;
1282 mask &= ~ignore;
1284 static const uint64_t counts[][2] = {
1285 {Stack3, 1},
1286 {Stack2, 1},
1287 {Stack1, 1},
1288 {StackIns1, 2},
1289 {StackIns2, 3},
1290 {FStack, kNumActRecCells},
1293 int64_t count = 0;
1294 for (auto const& pair : counts) {
1295 if (mask & pair[0]) {
1296 count += pair[1];
1297 mask &= ~pair[0];
1300 assert(mask == 0);
1301 return count;
1305 int64_t getStackPopped(const NormalizedInstruction& ni) {
1306 switch (ni.op()) {
1307 case OpFCall: return ni.imm[0].u_IVA + kNumActRecCells;
1308 case OpFCallArray: return kNumActRecCells + 1;
1310 case OpFCallBuiltin:
1311 case OpNewPackedArray:
1312 case OpCreateCl: return ni.imm[0].u_IVA;
1314 default: break;
1317 uint64_t mask = getInstrInfo(ni.op()).in;
1318 int64_t count = 0;
1320 if (mask & MVector) {
1321 count += ni.immVec.numStackValues();
1322 mask &= ~MVector;
1324 if (mask & (StackN | BStackN)) {
1325 count += ni.imm[0].u_IVA;
1326 mask &= ~(StackN | BStackN);
1329 return count + countOperands(mask);
1332 int64_t getStackPushed(const NormalizedInstruction& ni) {
1333 return countOperands(getInstrInfo(ni.op()).out);
1336 int getStackDelta(const NormalizedInstruction& ni) {
1337 int hiddenStackInputs = 0;
1338 initInstrInfo();
1339 auto op = ni.op();
1340 switch (op) {
1341 case OpFCall: {
1342 int numArgs = ni.imm[0].u_IVA;
1343 return 1 - numArgs - kNumActRecCells;
1346 case OpFCallBuiltin:
1347 case OpNewPackedArray:
1348 case OpCreateCl:
1349 return 1 - ni.imm[0].u_IVA;
1351 default:
1352 break;
1354 const InstrInfo& info = instrInfo[op];
1355 if (info.in & MVector) {
1356 hiddenStackInputs = numHiddenStackInputs(ni);
1357 SKTRACE(2, ni.source, "Has %d hidden stack inputs\n", hiddenStackInputs);
1359 int delta = instrInfo[op].numPushed - hiddenStackInputs;
1360 return delta;
1363 static NormalizedInstruction* findInputSrc(NormalizedInstruction* ni,
1364 DynLocation* dl) {
1365 while (ni != nullptr) {
1366 if (ni->outStack == dl ||
1367 ni->outLocal == dl ||
1368 ni->outLocal2 == dl ||
1369 ni->outStack2 == dl ||
1370 ni->outStack3 == dl) {
1371 break;
1373 ni = ni->prev;
1375 return ni;
1378 bool outputIsPredicted(SrcKey startSk,
1379 NormalizedInstruction& inst) {
1380 auto const& iInfo = getInstrInfo(inst.op());
1381 auto doPrediction =
1382 (iInfo.type == OutPred || iInfo.type == OutCns) && !inst.breaksTracelet;
1383 if (doPrediction) {
1384 // All OutPred ops except for SetM have a single stack output for now.
1385 assert(iInfo.out == Stack1 || inst.op() == OpSetM);
1386 auto dt = predictOutputs(startSk, &inst);
1387 if (dt != KindOfAny) {
1388 inst.outPred = Type(dt, dt == KindOfRef ? KindOfAny : KindOfNone);
1389 } else {
1390 doPrediction = false;
1394 return doPrediction;
1398 * For MetaData information that affects whether we want to even put a
1399 * value in the ni->inputs, we need to look at it before we call
1400 * getInputs(), so this is separate from applyInputMetaData.
1402 * We also check GuardedThis here, since RetC is short-circuited in
1403 * applyInputMetaData.
1405 void preInputApplyMetaData(Unit::MetaHandle metaHand,
1406 NormalizedInstruction* ni) {
1407 if (!metaHand.findMeta(ni->unit(), ni->offset())) return;
1409 Unit::MetaInfo info;
1410 while (metaHand.nextArg(info)) {
1411 switch (info.m_kind) {
1412 case Unit::MetaInfo::Kind::NonRefCounted:
1413 ni->nonRefCountedLocals.resize(ni->func()->numLocals());
1414 ni->nonRefCountedLocals[info.m_data] = 1;
1415 break;
1416 case Unit::MetaInfo::Kind::GuardedThis:
1417 ni->guardedThis = true;
1418 break;
1419 default:
1420 break;
1425 static bool isTypeAssert(Op op) {
1426 return op == Op::AssertTL || op == Op::AssertTStk ||
1427 op == Op::AssertObjL || op == Op::AssertObjStk;
1430 static bool isAlwaysNop(Op op) {
1431 if (isTypeAssert(op)) return true;
1432 switch (op) {
1433 case Op::UnboxRNop:
1434 case Op::BoxRNop:
1435 case Op::FPassVNop:
1436 case Op::FPassC:
1437 return true;
1438 default:
1439 return false;
1443 void Translator::handleAssertionEffects(Tracelet& t,
1444 const NormalizedInstruction& ni,
1445 TraceletContext& tas,
1446 int currentStackOffset) {
1447 assert(isTypeAssert(ni.op()));
1449 auto const loc = [&] {
1450 switch (ni.op()) {
1451 case Op::AssertTL:
1452 case Op::AssertObjL:
1453 return Location(Location::Local, ni.imm[0].u_LA);
1454 case Op::AssertTStk:
1455 case Op::AssertObjStk:
1456 return Location(Location::Stack,
1457 currentStackOffset - 1 - ni.imm[0].u_IVA);
1458 default:
1459 not_reached();
1461 }();
1462 if (loc.isInvalid()) return;
1464 auto const rt = [&]() -> folly::Optional<RuntimeType> {
1465 if (ni.op() == Op::AssertObjStk || ni.op() == Op::AssertObjL) {
1467 * Even though the class must be defined at the point of the
1468 * AssertObj, we might not have defined it yet in this tracelet,
1469 * or it might not be unique. For now just restrict this to
1470 * unique classes (we could also check parent of current
1471 * context).
1473 * There's nothing we can do with the 'exact' bit right now.
1475 auto const cls = Unit::lookupUniqueClass(
1476 ni.m_unit->lookupLitstrId(ni.imm[2].u_SA)
1478 if (cls && (cls->attrs() & AttrUnique)) {
1479 return RuntimeType{KindOfObject, KindOfNone, cls};
1481 return folly::none;
1484 switch (static_cast<AssertTOp>(ni.imm[1].u_OA)) {
1485 case AssertTOp::Uninit: return RuntimeType{KindOfUninit};
1486 case AssertTOp::InitNull: return RuntimeType{KindOfNull};
1487 case AssertTOp::Int: return RuntimeType{KindOfInt64};
1488 case AssertTOp::Dbl: return RuntimeType{KindOfDouble};
1489 case AssertTOp::Res: return RuntimeType{KindOfResource};
1490 case AssertTOp::Null: return folly::none;
1491 case AssertTOp::Bool: return RuntimeType{KindOfBoolean};
1492 case AssertTOp::SStr: return RuntimeType{KindOfString};
1493 case AssertTOp::Str: return RuntimeType{KindOfString};
1494 case AssertTOp::SArr: return RuntimeType{KindOfArray};
1495 case AssertTOp::Arr: return RuntimeType{KindOfArray};
1496 case AssertTOp::Obj: return RuntimeType{KindOfObject};
1498 // We can turn these into information in hhbc-translator but can't
1499 // really remove guards, since it can be more than one DataType,
1500 // so don't do anything here.
1501 case AssertTOp::OptInt:
1502 case AssertTOp::OptDbl:
1503 case AssertTOp::OptRes:
1504 case AssertTOp::OptBool:
1505 case AssertTOp::OptSStr:
1506 case AssertTOp::OptStr:
1507 case AssertTOp::OptSArr:
1508 case AssertTOp::OptArr:
1509 case AssertTOp::OptObj:
1510 return folly::none;
1512 case AssertTOp::Ref:
1513 // We should be able to use this to avoid the outer-type guards
1514 // on KindOfRefs, but for now we don't because of complications
1515 // with communicating the predicted inner type to
1516 // hhbc-translator.
1517 return folly::none;
1519 // There's really not much we can do with a Cell assertion at
1520 // translation time, right now.
1521 case AssertTOp::Cell:
1522 return folly::none;
1524 // Since these don't correspond to data types, there's not much we
1525 // can do in the current situation.
1526 case AssertTOp::InitUnc:
1527 case AssertTOp::Unc:
1528 case AssertTOp::InitCell:
1529 // These could also remove guards, but it's a little too hard to
1530 // get this information to hhbc-translator with this legacy
1531 // tracelet stuff since they don't map directly to a DataType.
1532 return folly::none;
1534 not_reached();
1535 }();
1536 if (!rt) return;
1538 auto const dl = t.newDynLocation(loc, *rt);
1540 // No need for m_resolvedDeps---because we're in the bytecode stream
1541 // we don't need to tell hhbc-translator about it out of band.
1542 auto& curVal = tas.m_currentMap[dl->location];
1543 if (curVal && !curVal->rtt.isVagueValue()) {
1544 if (curVal->rtt.outerType() != dl->rtt.outerType()) {
1546 * The tracked type disagrees with ahead of time analysis. A
1547 * similar case occurs in applyInputMetaData.
1549 * Either static analysis is wrong, this was a mispredicted type
1550 * from warmup profiling, or the code is unreachable because we're
1551 * about to fatal (e.g. a VerifyParamType is about to throw).
1553 * Punt this opcode to end the trace.
1555 FTRACE(1, "punting for {}\n", loc.pretty());
1556 punt();
1559 auto const isSpecializedObj =
1560 rt->outerType() == KindOfObject && rt->valueClass();
1561 if (!isSpecializedObj || curVal->rtt.valueClass()) {
1562 // Otherwise, we may have more information in the curVal
1563 // RuntimeType than would come from the AssertT if we were
1564 // tracking a literal value or something.
1565 FTRACE(1, "assertion leaving curVal alone {}\n", curVal->pretty());
1566 return;
1569 FTRACE(1, "assertion effects {} -> {}\n",
1570 curVal ? curVal->pretty() : std::string{},
1571 dl->pretty());
1572 curVal = dl;
1575 bool Translator::applyInputMetaData(Unit::MetaHandle& metaHand,
1576 NormalizedInstruction* ni,
1577 TraceletContext& tas,
1578 InputInfos &inputInfos) {
1579 if (isAlwaysNop(ni->op())) {
1580 ni->noOp = true;
1581 return true;
1584 if (!metaHand.findMeta(ni->unit(), ni->offset())) return false;
1586 Unit::MetaInfo info;
1587 if (!metaHand.nextArg(info)) return false;
1590 * We need to adjust the indexes in MetaInfo::m_arg if this
1591 * instruction takes other stack arguments than those related to the
1592 * MVector. (For example, the rhs of an assignment.)
1594 const InstrInfo& iInfo = instrInfo[ni->op()];
1595 if (iInfo.in & AllLocals) {
1597 * RetC/RetV dont care about their stack input, but it may have
1598 * been annotated. Skip it (because RetC/RetV pretend they dont
1599 * have a stack input).
1601 return false;
1603 if (iInfo.in == FuncdRef) {
1605 * FPassC* pretend to have no inputs
1607 return false;
1609 const int base = !(iInfo.in & MVector) ? 0 :
1610 !(iInfo.in & Stack1) ? 0 :
1611 !(iInfo.in & Stack2) ? 1 :
1612 !(iInfo.in & Stack3) ? 2 : 3;
1614 do {
1615 SKTRACE(3, ni->source, "considering MetaInfo of kind %d\n", info.m_kind);
1617 int arg = info.m_arg & Unit::MetaInfo::VectorArg ?
1618 base + (info.m_arg & ~Unit::MetaInfo::VectorArg) : info.m_arg;
1620 switch (info.m_kind) {
1621 case Unit::MetaInfo::Kind::NoSurprise:
1622 ni->noSurprise = true;
1623 break;
1624 case Unit::MetaInfo::Kind::GuardedCls:
1625 ni->guardedCls = true;
1626 break;
1627 case Unit::MetaInfo::Kind::DataTypePredicted: {
1628 // In TransProfile mode, disable type predictions to avoid side exits.
1629 if (m_mode == TransProfile) break;
1631 // If the original type was invalid or predicted, then use the
1632 // prediction in the meta-data.
1633 assert((unsigned) arg < inputInfos.size());
1635 SKTRACE(1, ni->source, "MetaInfo DataTypePredicted for input %d; "
1636 "newType = %d\n", arg, DataType(info.m_data));
1637 InputInfo& ii = inputInfos[arg];
1638 DynLocation* dl = tas.recordRead(ii, false, KindOfAny);
1639 NormalizedInstruction* src = findInputSrc(tas.m_t->m_instrStream.last,
1640 dl);
1641 if (src) {
1642 // Update the rtt and mark src's output as predicted if either:
1643 // a) we don't have type information yet (ie, it's KindOfAny), or
1644 // b) src's output was predicted. This is assuming that the
1645 // front-end's prediction is more accurate.
1646 if (dl->rtt.outerType() == KindOfAny || src->outputPredicted) {
1647 SKTRACE(1, ni->source, "MetaInfo DataTypePredicted for input %d; "
1648 "replacing oldType = %d with newType = %d\n", arg,
1649 dl->rtt.outerType(), DataType(info.m_data));
1650 dl->rtt = RuntimeType((DataType)info.m_data);
1651 src->outputPredicted = true;
1652 src->outputPredictionStatic = true;
1655 break;
1657 case Unit::MetaInfo::Kind::DataTypeInferred: {
1658 assert((unsigned)arg < inputInfos.size());
1659 SKTRACE(1, ni->source, "MetaInfo DataTypeInferred for input %d; "
1660 "newType = %d\n", arg, DataType(info.m_data));
1661 InputInfo& ii = inputInfos[arg];
1662 ii.dontGuard = true;
1663 DynLocation* dl = tas.recordRead(ii, true, (DataType)info.m_data);
1664 if (dl->rtt.outerType() != info.m_data &&
1665 (!dl->isString() || info.m_data != KindOfString)) {
1666 if (dl->rtt.outerType() != KindOfAny) {
1667 // Either static analysis is wrong, or
1668 // this was mis-predicted by the type
1669 // profiler, or this code is unreachable,
1670 // and there's an earlier bytecode in the tracelet
1671 // thats going to fatal
1672 NormalizedInstruction *src = nullptr;
1673 if (mapContains(tas.m_changeSet, dl->location)) {
1674 src = findInputSrc(tas.m_t->m_instrStream.last, dl);
1675 if (src && src->outputPredicted) {
1676 src->outputPredicted = false;
1677 } else {
1678 src = nullptr;
1681 if (!src) {
1682 // Not a type-profiler mis-predict
1683 if (tas.m_t->m_instrStream.first) {
1684 // We're not the first instruction, so punt
1685 // If this bytecode /is/ reachable, we'll
1686 // get here again, and that time, we will
1687 // be the first instruction
1688 punt();
1690 not_reached();
1693 dl->rtt = RuntimeType((DataType)info.m_data);
1694 ni->markInputInferred(arg);
1695 } else {
1697 * Static inference confirmed the expected type
1698 * but if the expected type was provided by the type
1699 * profiler we want to clear outputPredicted to
1700 * avoid unneeded guards
1702 if (mapContains(tas.m_changeSet, dl->location)) {
1703 NormalizedInstruction *src =
1704 findInputSrc(tas.m_t->m_instrStream.last, dl);
1705 if (src->outputPredicted) {
1706 src->outputPredicted = false;
1707 ni->markInputInferred(arg);
1711 break;
1714 case Unit::MetaInfo::Kind::String: {
1715 const StringData* sd = ni->unit()->lookupLitstrId(info.m_data);
1716 assert((unsigned)arg < inputInfos.size());
1717 InputInfo& ii = inputInfos[arg];
1718 ii.dontGuard = true;
1719 DynLocation* dl = tas.recordRead(ii, true, KindOfString);
1720 assert(!dl->rtt.isString() || !dl->rtt.valueString() ||
1721 dl->rtt.valueString() == sd);
1722 SKTRACE(1, ni->source, "MetaInfo on input %d; old type = %s\n",
1723 arg, dl->pretty().c_str());
1724 dl->rtt = RuntimeType(sd);
1725 break;
1728 case Unit::MetaInfo::Kind::Class: {
1729 assert((unsigned)arg < inputInfos.size());
1730 InputInfo& ii = inputInfos[arg];
1731 DynLocation* dl = tas.recordRead(ii, true);
1732 if (dl->rtt.valueType() != KindOfObject) {
1733 continue;
1736 const StringData* metaName = ni->unit()->lookupLitstrId(info.m_data);
1737 const StringData* rttName =
1738 dl->rtt.valueClass() ? dl->rtt.valueClass()->name() : nullptr;
1739 // The two classes might not be exactly the same, which is ok
1740 // as long as metaCls is more derived than rttCls.
1741 Class* metaCls = Unit::lookupUniqueClass(metaName);
1742 Class* rttCls = rttName ? Unit::lookupUniqueClass(rttName) : nullptr;
1743 if (metaCls && rttCls && metaCls != rttCls &&
1744 !metaCls->classof(rttCls)) {
1745 // Runtime type is more derived
1746 metaCls = 0;
1748 if (metaCls && metaCls != rttCls) {
1749 SKTRACE(1, ni->source, "replacing input %d with a MetaInfo-supplied "
1750 "class of %s; old type = %s\n",
1751 arg, metaName->data(), dl->pretty().c_str());
1752 if (dl->rtt.isRef()) {
1753 dl->rtt = RuntimeType(KindOfRef, KindOfObject, metaCls);
1754 } else {
1755 dl->rtt = RuntimeType(KindOfObject, KindOfNone, metaCls);
1758 break;
1761 case Unit::MetaInfo::Kind::MVecPropClass: {
1762 const StringData* metaName = ni->unit()->lookupLitstrId(info.m_data);
1763 Class* metaCls = Unit::lookupUniqueClass(metaName);
1764 if (metaCls) {
1765 ni->immVecClasses[arg] = metaCls;
1767 break;
1770 case Unit::MetaInfo::Kind::GuardedThis:
1771 case Unit::MetaInfo::Kind::NonRefCounted:
1772 // fallthrough; these are handled in preInputApplyMetaData.
1773 case Unit::MetaInfo::Kind::None:
1774 break;
1776 } while (metaHand.nextArg(info));
1778 return false;
1781 static void addMVectorInputs(NormalizedInstruction& ni,
1782 int& currentStackOffset,
1783 std::vector<InputInfo>& inputs) {
1784 assert(ni.immVec.isValid());
1785 ni.immVecM.reserve(ni.immVec.size());
1787 int UNUSED stackCount = 0;
1788 int UNUSED localCount = 0;
1790 currentStackOffset -= ni.immVec.numStackValues();
1791 int localStackOffset = currentStackOffset;
1793 auto push_stack = [&] {
1794 ++stackCount;
1795 inputs.emplace_back(Location(Location::Stack, localStackOffset++));
1797 auto push_local = [&] (int imm) {
1798 ++localCount;
1799 inputs.emplace_back(Location(Location::Local, imm));
1803 * Note that we have to push as we go so that the arguments come in
1804 * the order expected for the M-vector.
1806 * Indexes into these argument lists must also be in the same order
1807 * as the information in Unit::MetaInfo, because the analysis phase
1808 * may replace some of them with literals.
1812 * Also note: if we eventually have immediates that are not local
1813 * ids (i.e. string ids), this analysis step is going to have to be
1814 * a bit wiser.
1816 auto opPtr = (const Op*)ni.source.pc();
1817 auto const location = getMLocation(opPtr);
1818 auto const lcode = location.lcode;
1820 const bool trailingClassRef = lcode == LSL || lcode == LSC;
1822 switch (numLocationCodeStackVals(lcode)) {
1823 case 0: {
1824 if (lcode == LH) {
1825 inputs.emplace_back(Location(Location::This));
1826 } else {
1827 assert(lcode == LL || lcode == LGL || lcode == LNL);
1828 if (location.hasImm()) {
1829 push_local(location.imm);
1832 } break;
1833 case 1:
1834 if (lcode == LSL) {
1835 // We'll get the trailing stack value after pushing all the
1836 // member vector elements.
1837 assert(location.hasImm());
1838 push_local(location.imm);
1839 } else {
1840 push_stack();
1842 break;
1843 case 2:
1844 push_stack();
1845 if (!trailingClassRef) {
1846 // This one is actually at the back.
1847 push_stack();
1849 break;
1850 default: not_reached();
1853 // Now push all the members in the correct order.
1854 for (auto const& member : getMVector(opPtr)) {
1855 auto const mcode = member.mcode;
1856 ni.immVecM.push_back(mcode);
1858 if (mcode == MW) {
1859 // No stack and no locals.
1860 continue;
1861 } else if (member.hasImm()) {
1862 int64_t imm = member.imm;
1863 if (memberCodeImmIsLoc(mcode)) {
1864 push_local(imm);
1865 } else if (memberCodeImmIsString(mcode)) {
1866 inputs.emplace_back(Location(Location::Litstr, imm));
1867 } else {
1868 assert(memberCodeImmIsInt(mcode));
1869 inputs.emplace_back(Location(Location::Litint, imm));
1871 } else {
1872 push_stack();
1874 inputs.back().dontGuardInner = true;
1877 if (trailingClassRef) {
1878 push_stack();
1881 ni.immVecClasses.resize(ni.immVecM.size());
1883 assert(stackCount == ni.immVec.numStackValues());
1885 SKTRACE(2, ni.source, "M-vector using %d hidden stack "
1886 "inputs, %d locals\n", stackCount, localCount);
1889 void getInputs(SrcKey startSk, NormalizedInstruction& inst, InputInfos& infos,
1890 const Func* func, const LocalTypeFn& localType) {
1891 // TranslatorX64 expected top of stack to be index -1, with indexes growing
1892 // down from there. hhir defines top of stack to be index 0, with indexes
1893 // growing up from there. To compensate we start with a stack offset of 1 and
1894 // negate the index of any stack input after the call to getInputs.
1895 int stackOff = 1;
1896 getInputsImpl(startSk, &inst, stackOff, infos, func, localType);
1897 for (auto& info : infos) {
1898 if (info.loc.isStack()) info.loc.offset = -info.loc.offset;
1903 * getInputsImpl --
1904 * Returns locations for this instruction's inputs.
1906 * Throws:
1907 * TranslationFailedExc:
1908 * Unimplemented functionality, probably an opcode.
1910 * UnknownInputExc:
1911 * Consumed a datum whose type or value could not be constrained at
1912 * translation time, because the tracelet has already modified it.
1913 * Truncate the tracelet at the preceding instruction, which must
1914 * exists because *something* modified something in it.
1916 void getInputsImpl(SrcKey startSk,
1917 NormalizedInstruction* ni,
1918 int& currentStackOffset,
1919 InputInfos& inputs,
1920 const Func* func,
1921 const LocalTypeFn& localType) {
1922 #ifdef USE_TRACE
1923 const SrcKey& sk = ni->source;
1924 #endif
1925 assert(inputs.empty());
1926 if (debug && !mapContains(instrInfo, ni->op())) {
1927 fprintf(stderr, "Translator does not understand "
1928 "instruction %s\n", opcodeToName(ni->op()));
1929 assert(false);
1931 const InstrInfo& info = instrInfo[ni->op()];
1932 Operands input = info.in;
1933 if (input & FuncdRef) {
1934 inputs.needsRefCheck = true;
1936 if (input & Iter) {
1937 inputs.emplace_back(Location(Location::Iter, ni->imm[0].u_IVA));
1939 if (input & FStack) {
1940 currentStackOffset -= ni->imm[0].u_IVA; // arguments consumed
1941 currentStackOffset -= kNumActRecCells; // ActRec is torn down as well
1943 if (input & IgnoreInnerType) ni->ignoreInnerType = true;
1944 if (input & Stack1) {
1945 SKTRACE(1, sk, "getInputs: stack1 %d\n", currentStackOffset - 1);
1946 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1947 if (input & DontGuardStack1) inputs.back().dontGuard = true;
1948 if (input & Stack2) {
1949 SKTRACE(1, sk, "getInputs: stack2 %d\n", currentStackOffset - 1);
1950 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1951 if (input & Stack3) {
1952 SKTRACE(1, sk, "getInputs: stack3 %d\n", currentStackOffset - 1);
1953 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1957 if (input & StackN) {
1958 int numArgs = ni->imm[0].u_IVA;
1959 SKTRACE(1, sk, "getInputs: stackN %d %d\n", currentStackOffset - 1,
1960 numArgs);
1961 for (int i = 0; i < numArgs; i++) {
1962 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1963 inputs.back().dontGuard = true;
1964 inputs.back().dontBreak = true;
1967 if (input & BStackN) {
1968 int numArgs = ni->imm[0].u_IVA;
1969 SKTRACE(1, sk, "getInputs: BStackN %d %d\n", currentStackOffset - 1,
1970 numArgs);
1971 for (int i = 0; i < numArgs; i++) {
1972 inputs.emplace_back(Location(Location::Stack, --currentStackOffset));
1975 if (input & MVector) {
1976 addMVectorInputs(*ni, currentStackOffset, inputs);
1978 if (input & Local) {
1979 // All instructions that take a Local have its index at their first
1980 // immediate.
1981 int loc;
1982 auto insertAt = inputs.end();
1983 switch (ni->op()) {
1984 case OpSetWithRefLM:
1985 insertAt = inputs.begin();
1986 // fallthrough
1987 case OpFPassL:
1988 loc = ni->imm[1].u_IVA;
1989 break;
1991 default:
1992 loc = ni->imm[0].u_IVA;
1993 break;
1995 SKTRACE(1, sk, "getInputs: local %d\n", loc);
1996 inputs.emplace(insertAt, Location(Location::Local, loc));
1999 auto wantInlineReturn = [&] {
2000 const int localCount = ni->func()->numLocals();
2001 // Inline return causes us to guard this tracelet more precisely. If
2002 // we're already chaining to get here, just do a generic return in the
2003 // hopes of avoiding further specialization. The localCount constraint
2004 // is an unfortunate consequence of the current generic machinery not
2005 // working for 0 locals.
2006 if (tx64->numTranslations(startSk) >= kTooPolyRet && localCount > 0) {
2007 return false;
2009 ni->nonRefCountedLocals.resize(localCount);
2010 int numRefCounted = 0;
2011 for (int i = 0; i < localCount; ++i) {
2012 auto curType = localType(i);
2013 if (ni->nonRefCountedLocals[i]) {
2014 assert(curType.notCounted() && "Static analysis was wrong");
2016 if (curType.maybeCounted()) {
2017 numRefCounted++;
2020 return numRefCounted <= RuntimeOption::EvalHHIRInliningMaxReturnDecRefs;
2023 if ((input & AllLocals) && wantInlineReturn()) {
2024 ni->inlineReturn = true;
2025 ni->ignoreInnerType = true;
2026 int n = ni->func()->numLocals();
2027 for (int i = 0; i < n; ++i) {
2028 if (!ni->nonRefCountedLocals[i]) {
2029 inputs.emplace_back(Location(Location::Local, i));
2034 SKTRACE(1, sk, "stack args: virtual sfo now %d\n", currentStackOffset);
2035 TRACE(1, "%s\n", Trace::prettyNode("Inputs", inputs).c_str());
2037 if (inputs.size() &&
2038 ((input & DontGuardAny) || dontGuardAnyInputs(ni->op()))) {
2039 for (int i = inputs.size(); i--; ) {
2040 inputs[i].dontGuard = true;
2043 if (input & This) {
2044 inputs.emplace_back(Location(Location::This));
2048 bool dontGuardAnyInputs(Op op) {
2049 switch (op) {
2050 #define CASE(iNm) case Op ## iNm:
2051 #define NOOP(...)
2052 INSTRS
2053 PSEUDOINSTR_DISPATCH(NOOP)
2054 return false;
2056 default:
2057 return true;
2059 #undef NOOP
2060 #undef CASE
2063 bool outputDependsOnInput(const Op instr) {
2064 switch (instrInfo[instr].type) {
2065 case OutNull:
2066 case OutNullUninit:
2067 case OutString:
2068 case OutStringImm:
2069 case OutDouble:
2070 case OutBoolean:
2071 case OutBooleanImm:
2072 case OutInt64:
2073 case OutArray:
2074 case OutArrayImm:
2075 case OutObject:
2076 case OutResource:
2077 case OutThisObject:
2078 case OutUnknown:
2079 case OutVUnknown:
2080 case OutClassRef:
2081 case OutPred:
2082 case OutCns:
2083 case OutStrlen:
2084 case OutNone:
2085 return false;
2087 case OutFDesc:
2088 case OutSameAsInput:
2089 case OutCInput:
2090 case OutVInput:
2091 case OutCInputL:
2092 case OutVInputL:
2093 case OutFInputL:
2094 case OutFInputR:
2095 case OutArith:
2096 case OutBitOp:
2097 case OutSetOp:
2098 case OutIncDec:
2099 case OutFPushCufSafe:
2100 return true;
2102 not_reached();
2106 * getOutputs --
2107 * Builds a vector describing this instruction's outputs. Also
2108 * records any write to a value that *might* alias a local.
2110 * Throws:
2111 * TranslationFailedExc:
2112 * Unimplemented functionality, probably an opcode.
2114 void Translator::getOutputs(/*inout*/ Tracelet& t,
2115 /*inout*/ NormalizedInstruction* ni,
2116 /*inout*/ int& currentStackOffset,
2117 /*out*/ bool& varEnvTaint) {
2118 varEnvTaint = false;
2120 const vector<DynLocation*>& inputs = ni->inputs;
2121 const Op op = ni->op();
2123 initInstrInfo();
2124 assert_not_implemented(instrInfo.find(op) != instrInfo.end());
2125 const Operands outLocs = instrInfo[op].out;
2126 const OutTypeConstraints typeInfo = instrInfo[op].type;
2128 SKTRACE(1, ni->source, "output flavor %d\n", typeInfo);
2129 if (typeInfo == OutFInputL || typeInfo == OutFInputR ||
2130 typeInfo == OutVInputL) {
2131 // Variable number of outputs. If we box the loc we're reading,
2132 // we need to write out its boxed-ness.
2133 assert(inputs.size() >= 1);
2134 const DynLocation* in = inputs[inputs.size() - 1];
2135 DynLocation* outDynLoc = t.newDynLocation(in->location, in->rtt);
2136 outDynLoc->location = Location(Location::Stack, currentStackOffset++);
2137 bool isRef;
2138 if (typeInfo == OutVInputL) {
2139 isRef = true;
2140 } else {
2141 assert(typeInfo == OutFInputL || typeInfo == OutFInputR);
2142 isRef = ni->preppedByRef;
2144 if (isRef) {
2145 // Locals can be KindOfUninit, so we need to convert
2146 // this to KindOfNull
2147 if (in->rtt.outerType() == KindOfUninit) {
2148 outDynLoc->rtt = RuntimeType(KindOfRef, KindOfNull);
2149 } else {
2150 outDynLoc->rtt = in->rtt.box();
2152 SKTRACE(1, ni->source, "boxed type: %d -> %d\n",
2153 outDynLoc->rtt.outerType(), outDynLoc->rtt.innerType());
2154 } else {
2155 if (outDynLoc->rtt.outerType() == KindOfUninit) {
2156 outDynLoc->rtt = RuntimeType(KindOfNull);
2157 } else {
2158 outDynLoc->rtt = outDynLoc->rtt.unbox();
2160 SKTRACE(1, ni->source, "unboxed type: %d\n",
2161 outDynLoc->rtt.outerType());
2163 assert(outDynLoc->location.isStack());
2164 ni->outStack = outDynLoc;
2166 if (isRef && in->rtt.outerType() != KindOfRef &&
2167 typeInfo != OutFInputR &&
2168 in->location.isLocal()) {
2169 // VGetH or FPassH boxing a local
2170 DynLocation* smashedLocal =
2171 t.newDynLocation(in->location, outDynLoc->rtt);
2172 assert(smashedLocal->location.isLocal());
2173 ni->outLocal = smashedLocal;
2175 // Other things that might be getting boxed here include globals
2176 // and array values; since we don't attempt to track these things'
2177 // types in symbolic execution anyway, we can ignore them.
2178 return;
2181 int opnd = None;
2182 for (int outLocsCopy = (int)outLocs;
2183 outLocsCopy != (int)None;
2184 outLocsCopy &= ~opnd) {
2185 opnd = 1 << (ffs(outLocsCopy) - 1);
2186 assert(opnd != None && opnd != Stack3); // no instr produces 3 values
2187 assert(opnd != FuncdRef); // reffiness is immutable
2188 Location loc;
2189 switch (opnd) {
2190 // Pseudo-outputs that affect translator state
2191 case FStack: {
2192 currentStackOffset += kNumActRecCells;
2193 t.m_arState.pushFunc(*ni);
2194 } continue; // no instr-associated output
2196 case Local: {
2197 if (op == OpSetN || op == OpSetOpN || op == OpIncDecN ||
2198 op == OpBindN || op == OpUnsetN) {
2199 varEnvTaint = true;
2200 continue;
2202 if (op == OpCreateCont || op == OpCreateAsync) {
2203 // CreateCont stores Uninit to all locals but NormalizedInstruction
2204 // doesn't have enough output fields, so we special case it in
2205 // analyze().
2206 continue;
2209 ASSERT_NOT_IMPLEMENTED(op == OpSetOpL ||
2210 op == OpSetM || op == OpSetOpM ||
2211 op == OpBindM ||
2212 op == OpSetWithRefLM || op == OpSetWithRefRM ||
2213 op == OpUnsetM ||
2214 op == OpIncDecL ||
2215 op == OpVGetM || op == OpFPassM ||
2216 op == OpStaticLocInit || op == OpInitThisLoc ||
2217 op == OpSetL || op == OpBindL ||
2218 op == OpUnsetL ||
2219 op == OpIterInit || op == OpIterInitK ||
2220 op == OpMIterInit || op == OpMIterInitK ||
2221 op == OpWIterInit || op == OpWIterInitK ||
2222 op == OpIterNext || op == OpIterNextK ||
2223 op == OpMIterNext || op == OpMIterNextK ||
2224 op == OpWIterNext || op == OpWIterNextK);
2225 if (op == OpFPassM && !ni->preppedByRef) {
2226 // Equivalent to CGetM. Won't mutate the base.
2227 continue;
2229 if (op == OpIncDecL) {
2230 assert(ni->inputs.size() == 1);
2231 const RuntimeType &inRtt = ni->inputs[0]->rtt;
2232 RuntimeType rtt =
2233 IS_INT_TYPE(inRtt.valueType()) ? inRtt : RuntimeType(KindOfAny);
2234 DynLocation* incDecLoc =
2235 t.newDynLocation(ni->inputs[0]->location, rtt);
2236 assert(incDecLoc->location.isLocal());
2237 ni->outLocal = incDecLoc;
2238 continue; // Doesn't mutate a loc's types for int. Carry on.
2240 if (op == OpUnsetL) {
2241 assert(ni->inputs.size() == 1);
2242 DynLocation* inLoc = ni->inputs[0];
2243 assert(inLoc->location.isLocal());
2244 RuntimeType newLhsRtt = RuntimeType(KindOfUninit);
2245 Location locLocation = inLoc->location;
2246 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2247 locLocation.spaceName(), locLocation.offset,
2248 newLhsRtt.valueType());
2249 DynLocation* unsetLoc = t.newDynLocation(locLocation, newLhsRtt);
2250 assert(unsetLoc->location.isLocal());
2251 ni->outLocal = unsetLoc;
2252 continue;
2254 if (op == OpStaticLocInit || op == OpInitThisLoc) {
2255 ni->outLocal = t.newDynLocation(Location(Location::Local,
2256 ni->imm[0].u_OA),
2257 KindOfAny);
2258 continue;
2260 if (op == OpSetM || op == OpSetOpM ||
2261 op == OpVGetM || op == OpBindM ||
2262 op == OpSetWithRefLM || op == OpSetWithRefRM ||
2263 op == OpUnsetM || op == OpFPassM) {
2264 switch (ni->immVec.locationCode()) {
2265 case LL: {
2266 const int kVecStart = (op == OpSetM ||
2267 op == OpSetOpM ||
2268 op == OpBindM ||
2269 op == OpSetWithRefLM ||
2270 op == OpSetWithRefRM) ?
2271 1 : 0; // 0 is rhs for SetM/SetOpM
2272 DynLocation* inLoc = ni->inputs[kVecStart];
2273 assert(inLoc->location.isLocal());
2274 Location locLoc = inLoc->location;
2275 if (op == OpUnsetM) {
2276 // UnsetM can change the value of its base local when it's an
2277 // array. Output a new DynLocation with a the same type to
2278 // reflect the new value.
2279 ni->outLocal = t.newDynLocation(locLoc, inLoc->rtt);
2280 } else if (inLoc->rtt.isString() ||
2281 inLoc->rtt.valueType() == KindOfBoolean) {
2282 // Strings and bools produce value-dependent results; "" and
2283 // false upgrade to an array successfully, while other values
2284 // fail and leave the lhs unmodified.
2285 DynLocation* baseLoc = t.newDynLocation(locLoc, KindOfAny);
2286 assert(baseLoc->isLocal());
2287 ni->outLocal = baseLoc;
2288 } else if (inLoc->rtt.valueType() == KindOfUninit ||
2289 inLoc->rtt.valueType() == KindOfNull) {
2290 RuntimeType newLhsRtt = inLoc->rtt.setValueType(
2291 mcodeMaybePropName(ni->immVecM[0]) ?
2292 KindOfObject : KindOfArray);
2293 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2294 locLoc.spaceName(), locLoc.offset,
2295 newLhsRtt.valueType());
2296 DynLocation* baseLoc = t.newDynLocation(locLoc, newLhsRtt);
2297 assert(baseLoc->location.isLocal());
2298 ni->outLocal = baseLoc;
2300 // Note (if we start translating pseudo-mains):
2302 // A SetM in pseudo-main might alias a local whose type we're
2303 // remembering:
2305 // $GLOBALS['a'] = 123; // $a :: Int
2307 // and more deviously:
2309 // $loc['b'][17] = $GLOBALS; $x = 'b'; $y = 17;
2310 // $loc[$x][$y]['a'] = 123; // $a :: Int
2311 break;
2313 case LNL:
2314 case LNC:
2315 varEnvTaint = true;
2316 break;
2317 case LGL:
2318 case LGC:
2319 break;
2320 default:
2321 break;
2323 continue;
2325 if (op == OpSetOpL) {
2326 const int kLocIdx = 1;
2327 DynLocation* inLoc = ni->inputs[kLocIdx];
2328 assert(inLoc->location.isLocal());
2329 DynLocation* dl = t.newDynLocation();
2330 dl->location = inLoc->location;
2331 dl->rtt = setOpOutputType(ni, ni->inputs);
2332 if (inLoc->isRef()) {
2333 dl->rtt = dl->rtt.box();
2335 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2336 inLoc->location.spaceName(), inLoc->location.offset,
2337 dl->rtt.valueType());
2338 assert(dl->location.isLocal());
2339 ni->outLocal = dl;
2340 continue;
2342 if (op >= OpIterInit && op <= OpWIterNextK) {
2343 assert(op == OpIterInit || op == OpIterInitK ||
2344 op == OpMIterInit || op == OpMIterInitK ||
2345 op == OpWIterInit || op == OpWIterInitK ||
2346 op == OpIterNext || op == OpIterNextK ||
2347 op == OpMIterNext || op == OpMIterNextK ||
2348 op == OpWIterNext || op == OpWIterNextK);
2349 const int kValImmIdx = 2;
2350 const int kKeyImmIdx = 3;
2351 DynLocation* outVal = t.newDynLocation();
2352 int off = ni->imm[kValImmIdx].u_IVA;
2353 outVal->location = Location(Location::Local, off);
2354 if (op == OpMIterInit || op == OpMIterInitK ||
2355 op == OpMIterNext || op == OpMIterNextK) {
2356 outVal->rtt = RuntimeType(KindOfRef, KindOfAny);
2357 } else {
2358 outVal->rtt = RuntimeType(KindOfAny);
2360 ni->outLocal = outVal;
2361 if (op == OpIterInitK || op == OpIterNextK ||
2362 op == OpWIterInitK || op == OpWIterNextK ||
2363 op == OpMIterInitK || op == OpMIterNextK) {
2364 DynLocation* outKey = t.newDynLocation();
2365 int keyOff = getImm((Op*)ni->pc(), kKeyImmIdx).u_IVA;
2366 outKey->location = Location(Location::Local, keyOff);
2367 outKey->rtt = RuntimeType(KindOfAny);
2368 ni->outLocal2 = outKey;
2370 continue;
2372 assert(ni->inputs.size() == 2);
2373 const int kValIdx = 0;
2374 const int kLocIdx = 1;
2375 DynLocation* inLoc = ni->inputs[kLocIdx];
2376 DynLocation* inVal = ni->inputs[kValIdx];
2377 Location locLocation = inLoc->location;
2378 // Variant RHS possible only when binding.
2379 assert(inVal->rtt.isVagueValue() ||
2380 (op == OpBindL) ==
2381 (inVal->rtt.outerType() == KindOfRef));
2382 assert(!inVal->location.isLocal());
2383 assert(inLoc->location.isLocal());
2384 RuntimeType newLhsRtt = inVal->rtt.isVagueValue() || op == OpBindL ?
2385 inVal->rtt :
2386 inLoc->rtt.setValueType(inVal->rtt.outerType());
2387 if (inLoc->rtt.outerType() == KindOfRef) {
2388 assert(newLhsRtt.outerType() == KindOfRef);
2389 } else {
2390 assert(op == OpBindL ||
2391 newLhsRtt.outerType() != KindOfRef);
2393 SKTRACE(2, ni->source, "(%s, %" PRId64 ") <- type %d\n",
2394 locLocation.spaceName(), locLocation.offset,
2395 inVal->rtt.valueType());
2396 DynLocation* outLhsLoc = t.newDynLocation(locLocation, newLhsRtt);
2397 assert(outLhsLoc->location.isLocal());
2398 ni->outLocal = outLhsLoc;
2399 } continue; // already pushed an output for the local
2401 case Stack1:
2402 case Stack2: {
2403 loc = Location(Location::Stack, currentStackOffset++);
2404 if (ni->op() == OpFPushCufSafe) {
2405 // FPushCufSafe pushes its first stack input, then a bool.
2406 if (opnd == Stack2) {
2407 assert(ni->outStack == nullptr);
2408 auto* dl = t.newDynLocation(loc, ni->inputs[0]->rtt);
2409 ni->outStack = dl;
2410 } else {
2411 assert(ni->outStack2 == nullptr);
2412 auto* dl = t.newDynLocation(loc, KindOfBoolean);
2413 ni->outStack2 = dl;
2415 continue;
2417 } break;
2418 case StackIns1: {
2419 // First stack output is where the inserted element will go.
2420 // The output code for the instruction will affect what we
2421 // think about this location.
2422 loc = Location(Location::Stack, currentStackOffset++);
2424 // The existing top is just being moved up a notch. This one
2425 // always functions as if it were OutSameAsInput.
2426 assert(ni->inputs.size() >= 1);
2427 ni->outStack2 = t.newDynLocation(
2428 Location(Location::Stack, currentStackOffset++),
2429 ni->inputs[0]->rtt
2431 } break;
2432 case StackIns2: {
2433 // Similar to StackIns1.
2434 loc = Location(Location::Stack, currentStackOffset++);
2436 // Move the top two locations up a slot.
2437 assert(ni->inputs.size() >= 2);
2438 ni->outStack2 = t.newDynLocation(
2439 Location(Location::Stack, currentStackOffset++),
2440 ni->inputs[1]->rtt
2442 ni->outStack3 = t.newDynLocation(
2443 Location(Location::Stack, currentStackOffset++),
2444 ni->inputs[0]->rtt
2446 } break;
2447 default:
2448 not_reached();
2450 DynLocation* dl = t.newDynLocation();
2451 dl->location = loc;
2452 dl->rtt = getDynLocType(t.m_sk, ni, typeInfo, m_mode);
2453 SKTRACE(2, ni->source, "recording output t(%d->%d) #(%s, %" PRId64 ")\n",
2454 dl->rtt.outerType(), dl->rtt.innerType(),
2455 dl->location.spaceName(), dl->location.offset);
2456 assert(dl->location.isStack());
2457 ni->outStack = dl;
2461 void
2462 Translator::requestResetHighLevelTranslator() {
2463 if (dbgTranslateCoin) {
2464 dbgTranslateCoin->reset();
2468 bool DynLocation::canBeAliased() const {
2469 return isValue() &&
2470 ((Translator::liveFrameIsPseudoMain() && isLocal()) || isRef());
2473 // Test the type of a location without recording it as a read yet.
2474 RuntimeType TraceletContext::currentType(const Location& l) const {
2475 DynLocation* dl;
2476 if (!mapGet(m_currentMap, l, &dl)) {
2477 assert(!mapContains(m_deletedSet, l));
2478 assert(!mapContains(m_changeSet, l));
2479 return tx64->liveType(l, *liveUnit());
2481 return dl->rtt;
2484 DynLocation* TraceletContext::recordRead(const InputInfo& ii,
2485 bool useHHIR,
2486 DataType staticType) {
2487 if (staticType == KindOfNone) staticType = KindOfAny;
2489 DynLocation* dl;
2490 const Location& l = ii.loc;
2491 if (!mapGet(m_currentMap, l, &dl)) {
2492 // We should never try to read a location that has been deleted
2493 assert(!mapContains(m_deletedSet, l));
2494 // If the given location was not in m_currentMap, then it shouldn't
2495 // be in m_changeSet either
2496 assert(!mapContains(m_changeSet, l));
2497 if (ii.dontGuard && !l.isLiteral()) {
2498 assert(!useHHIR || staticType != KindOfRef);
2499 dl = m_t->newDynLocation(l, RuntimeType(staticType));
2500 if (useHHIR && staticType != KindOfAny) {
2501 m_resolvedDeps[l] = dl;
2503 } else {
2504 // TODO: Once the region translator supports guard relaxation
2505 // (task #2598894), we can enable specialization for all modes.
2506 const bool specialize = tx64->mode() == TransLive;
2507 RuntimeType rtt = tx64->liveType(l, *liveUnit(), specialize);
2508 assert(rtt.isIter() || !rtt.isVagueValue());
2509 // Allocate a new DynLocation to represent this and store it in the
2510 // current map.
2511 dl = m_t->newDynLocation(l, rtt);
2513 if (!l.isLiteral()) {
2514 if (m_varEnvTaint && dl->isValue() && dl->isLocal()) {
2515 dl->rtt = RuntimeType(KindOfAny);
2516 } else if ((m_aliasTaint && dl->canBeAliased()) ||
2517 (rtt.isValue() && rtt.isRef() && ii.dontGuardInner)) {
2518 dl->rtt = rtt.setValueType(KindOfAny);
2520 // Record that we depend on the live type of the specified location
2521 // as well (and remember what the live type was)
2522 m_dependencies[l] = dl;
2525 m_currentMap[l] = dl;
2527 TRACE(2, "recordRead: %s : %s\n", l.pretty().c_str(),
2528 dl->rtt.pretty().c_str());
2529 return dl;
2532 void TraceletContext::recordWrite(DynLocation* dl) {
2533 TRACE(2, "recordWrite: %s : %s\n", dl->location.pretty().c_str(),
2534 dl->rtt.pretty().c_str());
2535 m_currentMap[dl->location] = dl;
2536 m_changeSet.insert(dl->location);
2537 m_deletedSet.erase(dl->location);
2540 void TraceletContext::recordDelete(const Location& l) {
2541 // We should not be trying to delete the rtt of location that is
2542 // not in m_currentMap
2543 TRACE(2, "recordDelete: %s\n", l.pretty().c_str());
2544 m_currentMap.erase(l);
2545 m_changeSet.erase(l);
2546 m_deletedSet.insert(l);
2549 void TraceletContext::aliasTaint() {
2550 m_aliasTaint = true;
2551 for (ChangeMap::iterator it = m_currentMap.begin();
2552 it != m_currentMap.end(); ++it) {
2553 DynLocation* dl = it->second;
2554 if (dl->canBeAliased()) {
2555 TRACE(1, "(%s, %" PRId64 ") <- inner type invalidated\n",
2556 it->first.spaceName(), it->first.offset);
2557 RuntimeType newRtt = dl->rtt.setValueType(KindOfAny);
2558 it->second = m_t->newDynLocation(dl->location, newRtt);
2563 void TraceletContext::varEnvTaint() {
2564 m_varEnvTaint = true;
2565 for (ChangeMap::iterator it = m_currentMap.begin();
2566 it != m_currentMap.end(); ++it) {
2567 DynLocation* dl = it->second;
2568 if (dl->isValue() && dl->isLocal()) {
2569 TRACE(1, "(%s, %" PRId64 ") <- type invalidated\n",
2570 it->first.spaceName(), it->first.offset);
2571 it->second = m_t->newDynLocation(dl->location,
2572 RuntimeType(KindOfAny));
2577 void TraceletContext::recordJmp() {
2578 m_numJmps++;
2581 void Translator::postAnalyze(NormalizedInstruction* ni, SrcKey& sk,
2582 Tracelet& t, TraceletContext& tas) {
2583 if (ni->op() == OpBareThis &&
2584 ni->outStack->rtt.isVagueValue()) {
2585 SrcKey src = sk;
2586 const Unit* unit = ni->m_unit;
2587 src.advance(unit);
2588 Op next = toOp(*unit->at(src.offset()));
2589 if (next == OpInstanceOfD || next == OpIsNullC) {
2590 ni->outStack->rtt = RuntimeType(KindOfObject);
2592 return;
2596 static bool isPop(const NormalizedInstruction* instr) {
2597 auto opc = instr->op();
2598 return (opc == OpPopC ||
2599 opc == OpPopV ||
2600 opc == OpPopR);
2603 GuardType::GuardType(DataType outer, DataType inner)
2604 : outerType(outer), innerType(inner), klass(nullptr) {
2607 GuardType::GuardType(const RuntimeType& rtt) {
2608 assert(rtt.isValue());
2609 outerType = rtt.outerType();
2610 innerType = rtt.innerType();
2611 if (rtt.hasKnownClass()) {
2612 klass = rtt.knownClass();
2613 } else if (rtt.hasArrayKind()) {
2614 arrayKindValid = true;
2615 arrayKind = rtt.arrayKind();
2616 } else {
2617 klass = nullptr;
2621 GuardType::GuardType(const GuardType& other) {
2622 *this = other;
2625 const DataType GuardType::getOuterType() const {
2626 return outerType;
2629 const DataType GuardType::getInnerType() const {
2630 return innerType;
2633 const Class* GuardType::getSpecializedClass() const {
2634 return klass;
2637 bool GuardType::isSpecific() const {
2638 return outerType > KindOfNone;
2641 bool GuardType::isSpecialized() const {
2642 return (outerType == KindOfObject && klass != nullptr) ||
2643 (outerType == KindOfArray && arrayKindValid);
2646 bool GuardType::isRelaxed() const {
2647 switch (outerType) {
2648 case KindOfAny:
2649 case KindOfUncounted:
2650 case KindOfUncountedInit:
2651 return true;
2652 default:
2653 return false;
2657 bool GuardType::isGeneric() const {
2658 return outerType == KindOfAny;
2661 bool GuardType::isCounted() const {
2662 switch (outerType) {
2663 case KindOfAny:
2664 case KindOfStaticString:
2665 case KindOfString:
2666 case KindOfArray:
2667 case KindOfObject:
2668 case KindOfResource:
2669 case KindOfRef:
2670 return true;
2671 default:
2672 return false;
2676 bool GuardType::isMoreRefinedThan(const GuardType& other) const {
2677 return getCategory() > other.getCategory();
2680 DataTypeCategory GuardType::getCategory() const {
2681 switch (outerType) {
2682 case KindOfAny: return DataTypeGeneric;
2683 case KindOfUncounted: return DataTypeCountness;
2684 case KindOfUncountedInit: return DataTypeCountnessInit;
2685 default: return (klass != nullptr || arrayKindValid) ?
2686 DataTypeSpecialized :
2687 DataTypeSpecific;
2691 bool GuardType::mayBeUninit() const {
2692 switch (outerType) {
2693 case KindOfAny:
2694 case KindOfUncounted:
2695 case KindOfUninit:
2696 return true;
2697 default:
2698 return false;
2702 GuardType GuardType::getCountness() const {
2703 // Note that translations need to be able to handle KindOfString and
2704 // KindOfStaticString interchangeably. This implies that KindOfStaticString
2705 // needs to be treated as KindOfString, i.e. as possibly counted.
2706 assert(isSpecific());
2707 switch (outerType) {
2708 case KindOfUninit:
2709 case KindOfNull:
2710 case KindOfBoolean:
2711 case KindOfInt64:
2712 case KindOfDouble: return GuardType(KindOfUncounted);
2713 default: return GuardType(outerType, innerType);
2717 GuardType GuardType::dropSpecialization() const {
2718 return GuardType(outerType, innerType);
2721 RuntimeType GuardType::getRuntimeType() const {
2722 if (outerType == KindOfObject && klass != nullptr) {
2723 return RuntimeType(outerType, innerType).setKnownClass(klass);
2725 if (outerType == KindOfArray && arrayKindValid) {
2726 return RuntimeType(outerType, innerType).setArrayKind(arrayKind);
2728 return RuntimeType(outerType, innerType);
2731 bool GuardType::isEqual(GuardType other) const {
2732 return outerType == other.outerType &&
2733 innerType == other.innerType &&
2734 klass == other.klass;
2737 GuardType GuardType::getCountnessInit() const {
2738 assert(isSpecific());
2739 switch (outerType) {
2740 case KindOfNull:
2741 case KindOfBoolean:
2742 case KindOfInt64:
2743 case KindOfDouble: return GuardType(KindOfUncountedInit);
2744 default: return GuardType(outerType, innerType);
2748 bool GuardType::hasArrayKind() const {
2749 return arrayKindValid;
2752 ArrayData::ArrayKind GuardType::getArrayKind() const {
2753 return arrayKind;
2757 * Returns true iff loc is consumed by a Pop* instruction in the sequence
2758 * starting at instr.
2760 bool isPopped(DynLocation* loc, NormalizedInstruction* instr) {
2761 for (; instr ; instr = instr->next) {
2762 for (size_t i = 0; i < instr->inputs.size(); i++) {
2763 if (instr->inputs[i] == loc) {
2764 return isPop(instr);
2768 return false;
2771 DataTypeCategory
2772 Translator::getOperandConstraintCategory(NormalizedInstruction* instr,
2773 size_t opndIdx,
2774 const GuardType& specType) {
2775 auto opc = instr->op();
2777 switch (opc) {
2778 case OpSetS:
2779 case OpSetG:
2780 case OpSetL: {
2781 if (opndIdx == 0) { // stack value
2782 // If the output on the stack is simply popped, then we don't
2783 // even care whether the type is ref-counted or not because
2784 // the ref-count is transfered to the target location.
2785 if (!instr->outStack || isPopped(instr->outStack, instr->next)) {
2786 return DataTypeGeneric;
2788 return DataTypeCountness;
2790 if (opc == OpSetL) {
2791 // old local value is dec-refed
2792 assert(opndIdx == 1);
2793 return DataTypeCountness;
2795 return DataTypeSpecific;
2798 case OpCGetL:
2799 return DataTypeCountnessInit;
2801 case OpRetC:
2802 case OpRetV:
2803 return DataTypeCountness;
2805 case OpFCall:
2806 // Note: instead of pessimizing calls that may be inlined with
2807 // DataTypeSpecific, we could apply the operand constraints of
2808 // the callee in constrainDep.
2809 return (instr->calleeTrace && !instr->calleeTrace->m_inliningFailed)
2810 ? DataTypeSpecific
2811 : DataTypeGeneric;
2813 case OpFCallArray:
2814 return DataTypeGeneric;
2816 case OpPopC:
2817 case OpPopV:
2818 case OpPopR:
2819 return DataTypeCountness;
2821 case OpContSuspend:
2822 case OpContSuspendK:
2823 case OpContRetC:
2824 // The stack input is teleported to the continuation's m_value field
2825 return DataTypeGeneric;
2827 case OpContHandle:
2828 // This always calls the interpreter
2829 return DataTypeGeneric;
2831 case OpAddElemC:
2832 // The stack input is teleported to the array
2833 return opndIdx == 0 ? DataTypeGeneric : DataTypeSpecific;
2835 case OpArrayIdx:
2836 // The default value (w/ opndIdx 0) is simply passed to a helper,
2837 // which takes care of dec-refing it if needed
2838 return opndIdx == 0 ? DataTypeGeneric : DataTypeSpecific;
2841 // Collections and Iterator related specializations
2843 case OpCGetM:
2844 case OpIssetM:
2845 case OpFPassM:
2846 if (specType.getOuterType() == KindOfArray) {
2847 if (instr->inputs.size() == 2 && opndIdx == 0) {
2848 if (specType.hasArrayKind() &&
2849 specType.getArrayKind() == ArrayData::ArrayKind::kPackedKind &&
2850 instr->inputs[1]->isInt()) {
2851 return DataTypeSpecialized;
2854 } else if (specType.getOuterType() == KindOfObject) {
2855 if (instr->inputs.size() == 2 && opndIdx == 0) {
2856 const Class* klass = specType.getSpecializedClass();
2857 if (klass != nullptr && isOptimizableCollectionClass(klass)) {
2858 return DataTypeSpecialized;
2862 return DataTypeSpecific;
2863 case OpSetM:
2864 if (specType.getOuterType() == KindOfObject) {
2865 if (instr->inputs.size() == 3 && opndIdx == 1) {
2866 const Class* klass = specType.getSpecializedClass();
2867 if (klass != nullptr && isOptimizableCollectionClass(klass)) {
2868 return DataTypeSpecialized;
2872 /* Input of SetM only needs a countness guard, for the cases
2873 * for the cases when the base is a String or an Object. In these
2874 * cases, we may need to incref/decref the input. */
2875 if (opndIdx == 0) {
2876 return DataTypeCountness;
2878 return DataTypeSpecific;
2880 default:
2881 return DataTypeSpecific;
2885 GuardType
2886 Translator::getOperandConstraintType(NormalizedInstruction* instr,
2887 size_t opndIdx,
2888 const GuardType& specType) {
2889 DataTypeCategory dtCategory = getOperandConstraintCategory(instr,
2890 opndIdx,
2891 specType);
2892 switch (dtCategory) {
2893 case DataTypeGeneric: return GuardType(KindOfAny);
2894 case DataTypeCountness: return specType.getCountness();
2895 case DataTypeCountnessInit: return specType.getCountnessInit();
2896 case DataTypeSpecific: return specType.dropSpecialization();
2897 case DataTypeSpecialized:
2898 return specType;
2900 return specType;
2903 void Translator::constrainOperandType(GuardType& relxType,
2904 NormalizedInstruction* instr,
2905 size_t opndIdx,
2906 const GuardType& specType) {
2907 if (relxType.isEqual(specType)) return; // Can't constrain any further
2909 GuardType consType = getOperandConstraintType(instr, opndIdx, specType);
2910 if (consType.isMoreRefinedThan(relxType)) {
2911 relxType = consType;
2916 * This method looks at every use of loc in the stream of instructions
2917 * starting at firstInstr and constrains the relxType towards specType
2918 * according to each use. Note that this method not only looks at
2919 * direct uses of loc, but it also recursively looks at any other
2920 * DynLocs whose type depends on loc's type.
2922 void Translator::constrainDep(const DynLocation* loc,
2923 NormalizedInstruction* firstInstr,
2924 GuardType specType,
2925 GuardType& relxType) {
2926 if (relxType.isEqual(specType)) return; // can't contrain it any further
2928 for (NormalizedInstruction* instr = firstInstr; instr; instr = instr->next) {
2929 if (instr->noOp) continue;
2930 auto opc = instr->op();
2931 size_t nInputs = instr->inputs.size();
2932 for (size_t i = 0; i < nInputs; i++) {
2933 DynLocation* usedLoc = instr->inputs[i];
2934 if (usedLoc == loc) {
2935 constrainOperandType(relxType, instr, i, specType);
2937 // If the instruction's input doesn't propagate to its output,
2938 // then we're done. Otherwise, we need to constrain relxType
2939 // based on the uses of the output.
2940 if (!outputDependsOnInput(opc)) continue;
2942 bool outputIsStackInput = false;
2943 const DynLocation* outStack = instr->outStack;
2944 const DynLocation* outLocal = instr->outLocal;
2946 switch (instrInfo[opc].type) {
2947 case OutSameAsInput:
2948 outputIsStackInput = true;
2949 break;
2951 case OutCInput:
2952 outputIsStackInput = true;
2953 // fall-through
2954 case OutCInputL:
2955 if (specType.getOuterType() == KindOfRef &&
2956 instr->isAnyOutputUsed()) {
2957 // Value gets unboxed along the way. Pessimize it for now.
2958 if (!relxType.isSpecialized()) {
2959 relxType = specType.dropSpecialization();
2961 return;
2963 break;
2965 default:
2966 if (!relxType.isSpecialized()) {
2967 relxType = specType.dropSpecialization();
2969 return;
2972 // The instruction input's type propagates to the outputs.
2973 // So constrain the dependence further based on uses of outputs.
2974 if ((i == 0 && outputIsStackInput) || // stack input @ [0]
2975 (i == nInputs - 1 && !outputIsStackInput)) { // local input is last
2976 if (outStack && !outStack->rtt.isVagueValue()) {
2977 // For SetL, getOperandConstraintCategory() generates
2978 // DataTypeGeneric if the stack output is popped. In this
2979 // case, don't further constrain the stack output,
2980 // otherwise the Pop* would make it a DataTypeCountness.
2981 if (opc != OpSetL || !relxType.isGeneric()) {
2982 constrainDep(outStack, instr->next, specType, relxType);
2985 if (outLocal && !outLocal->rtt.isVagueValue()) {
2986 constrainDep(outLocal, instr->next, specType, relxType);
2995 * This method looks at all the uses of the tracelet dependencies in the
2996 * instruction stream and tries to relax the type associated with each location.
2998 void Translator::relaxDeps(Tracelet& tclet, TraceletContext& tctxt) {
2999 DynLocTypeMap locRelxTypeMap;
3001 // Initialize type maps. Relaxed types start off very relaxed, and then
3002 // they may get more specific depending on how the instructions use them.
3003 DepMap& deps = tctxt.m_dependencies;
3004 for (auto depIt = deps.begin(); depIt != deps.end(); depIt++) {
3005 DynLocation* loc = depIt->second;
3006 const RuntimeType& rtt = depIt->second->rtt;
3007 if (rtt.isValue() && !rtt.isVagueValue() && !rtt.isClass() &&
3008 !loc->location.isThis()) {
3009 GuardType relxType = GuardType(KindOfAny);
3010 GuardType specType = GuardType(rtt);
3011 constrainDep(loc, tclet.m_instrStream.first, specType, relxType);
3012 if (!specType.isEqual(relxType)) {
3013 locRelxTypeMap[loc] = relxType;
3018 // For each dependency, if we found a more relaxed type for it, use
3019 // such type.
3020 for (auto& kv : locRelxTypeMap) {
3021 DynLocation* loc = kv.first;
3022 const GuardType& relxType = kv.second;
3023 TRACE(1, "relaxDeps: Loc: %s oldType: %s => newType: %s\n",
3024 loc->location.pretty().c_str(),
3025 deps[loc->location]->rtt.pretty().c_str(),
3026 RuntimeType(relxType.getOuterType(),
3027 relxType.getInnerType(),
3028 relxType.getSpecializedClass()).pretty().c_str());
3029 assert(deps[loc->location] == loc);
3030 deps[loc->location]->rtt = relxType.getRuntimeType();
3034 bool callDestroysLocals(const NormalizedInstruction& inst,
3035 const Func* caller) {
3036 auto* unit = caller->unit();
3037 auto checkTaintId = [&](Id id) {
3038 static const StringData* s_extract = makeStaticString("extract");
3039 return unit->lookupLitstrId(id)->isame(s_extract);
3042 if (inst.op() == OpFCallBuiltin) return checkTaintId(inst.imm[2].u_SA);
3043 if (!isFCallStar(inst.op())) return false;
3045 const FPIEnt *fpi = caller->findFPI(inst.source.offset());
3046 assert(fpi);
3047 Op* fpushPc = (Op*)unit->at(fpi->m_fpushOff);
3048 auto const op = *fpushPc;
3050 if (op == OpFPushFunc) {
3051 // If the call has any arguments, the FPushFunc will be in a different
3052 // tracelet -- the tracelet will break on every FPass* because the reffiness
3053 // of the callee isn't knowable. So we have to say the call destroys locals,
3054 // to be conservative. If there aren't any arguments, then it can't destroy
3055 // locals -- even if the call is to extract(), there's no argument, so it
3056 // won't do anything.
3057 auto const numArgs = inst.imm[0].u_IVA;
3058 return (numArgs != 0);
3060 if (op == OpFPushFuncD) return checkTaintId(getImm(fpushPc, 1).u_SA);
3061 if (op == OpFPushFuncU) {
3062 return checkTaintId(getImm(fpushPc, 1).u_SA) ||
3063 checkTaintId(getImm(fpushPc, 2).u_SA);
3066 return false;
3070 * Check whether the a given FCall should be analyzed for possible
3071 * inlining or not.
3073 bool shouldAnalyzeCallee(const NormalizedInstruction* fcall,
3074 const FPIEnt* fpi,
3075 const Op pushOp,
3076 const int depth) {
3077 auto const numArgs = fcall->imm[0].u_IVA;
3078 auto const target = fcall->funcd;
3080 if (!RuntimeOption::RepoAuthoritative) return false;
3082 if (pushOp != OpFPushFuncD && pushOp != OpFPushObjMethodD
3083 && pushOp != OpFPushCtorD && pushOp != OpFPushCtor
3084 && pushOp != OpFPushClsMethodD) {
3085 FTRACE(1, "analyzeCallee: push op ({}) was not supported\n",
3086 opcodeToName(pushOp));
3087 return false;
3090 if (!target) {
3091 FTRACE(1, "analyzeCallee: target func not known\n");
3092 return false;
3094 if (target->isCPPBuiltin()) {
3095 FTRACE(1, "analyzeCallee: target func is a builtin\n");
3096 return false;
3099 if (depth + 1 > RuntimeOption::EvalHHIRInliningMaxDepth) {
3100 FTRACE(1, "analyzeCallee: max inlining depth reached\n");
3101 return false;
3104 // TODO(2716400): support __call and friends
3105 if (numArgs != target->numParams()) {
3106 FTRACE(1, "analyzeCallee: param count mismatch {} != {}\n",
3107 numArgs, target->numParams());
3108 return false;
3111 if (pushOp == OpFPushClsMethodD && target->mayHaveThis()) {
3112 FTRACE(1, "analyzeCallee: not inlining static calls which may have a "
3113 "this pointer\n");
3114 return false;
3117 // Find the fpush and ensure it's in this tracelet---refuse to
3118 // inline if there are any calls in order to prepare arguments.
3119 for (auto* ni = fcall->prev; ni; ni = ni->prev) {
3120 if (ni->source.offset() == fpi->m_fpushOff) {
3121 return true;
3123 if (isFCallStar(ni->op()) || ni->op() == OpFCallBuiltin) {
3124 FTRACE(1, "analyzeCallee: fpi region contained other calls\n");
3125 return false;
3128 FTRACE(1, "analyzeCallee: push instruction was in a different "
3129 "tracelet\n");
3130 return false;
3133 void Translator::analyzeCallee(TraceletContext& tas,
3134 Tracelet& parent,
3135 NormalizedInstruction* fcall) {
3136 auto const callerFunc = fcall->func();
3137 auto const fpi = callerFunc->findFPI(fcall->source.offset());
3138 auto const pushOp = fcall->m_unit->getOpcode(fpi->m_fpushOff);
3140 if (!shouldAnalyzeCallee(fcall, fpi, pushOp, analysisDepth())) return;
3142 auto const numArgs = fcall->imm[0].u_IVA;
3143 auto const target = fcall->funcd;
3146 * Prepare a map for all the known information about the argument
3147 * types.
3149 * Also, fill out KindOfUninit for any remaining locals. The point
3150 * here is that the subtrace can't call liveType for a local or
3151 * stack location (since our ActRec is fake), so we need them all in
3152 * the TraceletContext.
3154 * If any of the argument types are unknown (including inner-types
3155 * of KindOfRefs), we don't really try to analyze the callee. It
3156 * might be possible to do this but we'll need to modify the
3157 * analyzer to support unknown input types before there are any
3158 * NormalizedInstructions in the Tracelet.
3160 TypeMap initialMap;
3161 LocationSet callerArgLocs;
3162 for (int i = 0; i < numArgs; ++i) {
3163 auto callerLoc = Location(Location::Stack, fcall->stackOffset - i - 1);
3164 auto calleeLoc = Location(Location::Local, numArgs - i - 1);
3165 auto type = tas.currentType(callerLoc);
3167 callerArgLocs.insert(callerLoc);
3169 if (type.isVagueValue()) {
3170 FTRACE(1, "analyzeCallee: {} has unknown type\n", callerLoc.pretty());
3171 return;
3173 if (type.isValue() && type.isRef() && type.innerType() == KindOfAny) {
3174 FTRACE(1, "analyzeCallee: {} has unknown inner-refdata type\n",
3175 callerLoc.pretty());
3176 return;
3179 FTRACE(2, "mapping arg{} locs {} -> {} :: {}\n",
3180 numArgs - i - 1,
3181 callerLoc.pretty(),
3182 calleeLoc.pretty(),
3183 type.pretty());
3184 initialMap[calleeLoc] = type;
3186 for (int i = numArgs; i < target->numLocals(); ++i) {
3187 initialMap[Location(Location::Local, i)] = RuntimeType(KindOfUninit);
3191 * When reentering analyze to generate a Tracelet for a callee,
3192 * currently we handle this by creating a fake ActRec on the stack.
3194 * This is mostly a compromise to deal with existing code during the
3195 * analysis phase which pretty liberally inspects live VM state.
3197 ActRec fakeAR;
3198 fakeAR.m_savedRbp = reinterpret_cast<uintptr_t>(liveFrame());
3199 fakeAR.m_savedRip = 0xbaabaa; // should never be inspected
3200 fakeAR.m_func = fcall->funcd;
3201 fakeAR.m_soff = 0xb00b00; // should never be inspected
3202 fakeAR.m_numArgsAndCtorFlag = numArgs;
3203 fakeAR.m_varEnv = nullptr;
3206 * Even when inlining an object method, we can leave the m_this as
3207 * null. See outThisObjectType().
3209 fakeAR.m_this = nullptr;
3211 FTRACE(1, "analyzing sub trace =================================\n");
3212 auto const oldFP = vmfp();
3213 auto const oldSP = vmsp();
3214 auto const oldPC = vmpc();
3215 auto const oldAnalyzeCalleeDepth = m_analysisDepth++;
3216 vmpc() = nullptr; // should never be used
3217 vmsp() = nullptr; // should never be used
3218 vmfp() = reinterpret_cast<Cell*>(&fakeAR);
3219 auto restoreFrame = [&]{
3220 vmfp() = oldFP;
3221 vmsp() = oldSP;
3222 vmpc() = oldPC;
3223 m_analysisDepth = oldAnalyzeCalleeDepth;
3225 SCOPE_EXIT {
3226 // It's ok to restoreFrame() twice---we have it in this scope
3227 // handler to ensure it still happens if we exit via an exception.
3228 restoreFrame();
3229 FTRACE(1, "finished sub trace ===================================\n");
3232 auto subTrace = analyze(SrcKey(target, target->base()), initialMap);
3235 * Verify the target trace actually ended with a return, or we have
3236 * no business doing anything based on it right now.
3238 if (!subTrace->m_instrStream.last ||
3239 (subTrace->m_instrStream.last->op() != OpRetC &&
3240 subTrace->m_instrStream.last->op() != OpRetV)) {
3241 FTRACE(1, "analyzeCallee: callee did not end in a return\n");
3242 return;
3246 * If the IR can't inline this, give up now. Below we're going to
3247 * start making changes to the tracelet that is making the call
3248 * (potentially increasing the specificity of guards), and we don't
3249 * want to do that unnecessarily.
3251 if (!JIT::shouldIRInline(callerFunc, target, *subTrace)) {
3252 if (UNLIKELY(Stats::enabledAny() && getenv("HHVM_STATS_FAILEDINL"))) {
3253 subTrace->m_inliningFailed = true;
3254 // Save the trace for stats purposes but don't waste time doing any
3255 // further processing since we know we won't inline it.
3256 fcall->calleeTrace = std::move(subTrace);
3258 return;
3262 * Disabled for now:
3264 * Propagate the return type to our caller. If the return type is
3265 * not vague, it will hold if we can inline the trace.
3267 * This isn't really a sensible thing to do if we aren't also going
3268 * to inline the callee, however, because the return type may only
3269 * be what it is due to other output predictions (CGetMs or FCall)
3270 * inside the callee. This means we would need to check the return
3271 * value in the caller still as if it were a predicted return type.
3273 Location retVal(Location::Stack, 0);
3274 auto it = subTrace->m_changes.find(retVal);
3275 assert(it != subTrace->m_changes.end());
3276 FTRACE(1, "subtrace return: {}\n", it->second->pretty());
3277 if (false) {
3278 if (!it->second->rtt.isVagueValue() && !it->second->rtt.isRef()) {
3279 FTRACE(1, "changing callee's return type from {} to {}\n",
3280 fcall->outStack->rtt.pretty(),
3281 it->second->pretty());
3283 fcall->outputPredicted = true;
3284 fcall->outputPredictionStatic = false;
3285 fcall->outStack = parent.newDynLocation(fcall->outStack->location,
3286 it->second->rtt);
3287 tas.recordWrite(fcall->outStack);
3292 * In order for relaxDeps not to relax guards on some things we may
3293 * potentially have depended on here, we need to ensure that the
3294 * call instruction depends on all the inputs we've used.
3296 * (We could do better by letting relaxDeps look through the
3297 * callee.)
3299 restoreFrame();
3300 for (auto& loc : callerArgLocs) {
3301 fcall->inputs.push_back(tas.recordRead(InputInfo(loc), true));
3304 FTRACE(1, "analyzeCallee: inline candidate\n");
3305 fcall->calleeTrace = std::move(subTrace);
3308 static bool instrBreaksProfileBB(const NormalizedInstruction* instr) {
3309 return (instrIsNonCallControlFlow(instr->op()) ||
3310 instr->outputPredicted ||
3311 instr->op() == OpClsCnsD); // side exits if misses in the RDS
3315 * analyze --
3317 * Given a sequence of bytecodes, return our tracelet IR.
3319 * The purposes of this analysis is to determine:
3321 * 1. Pre-conditions: What locations get read before they get written to:
3322 * we will need typechecks for these and we will want to load them into
3323 * registers. (m_dependencies)
3325 * 2. Post-conditions: the locations that have been written to and are
3326 * still live at the end of the tracelet. We need to allocate registers
3327 * of these and we need to spill them at the end of the tracelet.
3328 * (m_changes)
3330 * 3. Determine the runtime types for each instruction's input locations
3331 * and output locations.
3333 * The main analysis works by doing a single pass over the instructions. It
3334 * effectively simulates the execution of each instruction, updating its
3335 * knowledge about types as it goes.
3337 * The TraceletContext class is used to keep track of the current state of
3338 * the world. Initially it is empty, and when the inputs for the first
3339 * instruction are analyzed we call recordRead(). The recordRead() function
3340 * in turn inspects the live types of the inputs and adds them to the type
3341 * map. This serves two purposes: (1) it figures out what typechecks this
3342 * tracelet needs; and (2) it guarantees that the code we generate will
3343 * satisfy the live types that are about to be passed in.
3345 * Over time the TraceletContext's type map will change. However, we need to
3346 * record what the types _were_ right before and right after a given
3347 * instruction executes. This is where the NormalizedInstruction class comes
3348 * in. We store the RuntimeTypes from the TraceletContext right before an
3349 * instruction executes into the NormalizedInstruction's 'inputs' field, and
3350 * we store the RuntimeTypes from the TraceletContext right after the
3351 * instruction executes into the various output fields.
3353 std::unique_ptr<Tracelet> Translator::analyze(SrcKey sk,
3354 const TypeMap& initialTypes) {
3355 std::unique_ptr<Tracelet> retval(new Tracelet());
3356 auto func = sk.func();
3357 auto unit = sk.unit();
3358 auto& t = *retval;
3359 t.m_sk = sk;
3361 DEBUG_ONLY const char* file = unit->filepath()->data();
3362 DEBUG_ONLY const int lineNum = unit->getLineNumber(t.m_sk.offset());
3363 DEBUG_ONLY const char* funcName = func->fullName()->data();
3365 TRACE(1, "Translator::analyze %s:%d %s\n", file, lineNum, funcName);
3366 TraceletContext tas(&t, initialTypes);
3367 int stackFrameOffset = 0;
3368 int oldStackFrameOffset = 0;
3370 // numOpcodes counts the original number of opcodes in a tracelet
3371 // before the translator does any optimization
3372 t.m_numOpcodes = 0;
3373 Unit::MetaHandle metaHand;
3375 for (;; sk.advance(unit)) {
3376 head:
3377 NormalizedInstruction* ni = t.newNormalizedInstruction();
3378 ni->source = sk;
3379 ni->stackOffset = stackFrameOffset;
3380 ni->funcd = t.m_arState.knownFunc();
3381 ni->m_unit = unit;
3382 ni->breaksTracelet = false;
3383 ni->changesPC = opcodeChangesPC(ni->op());
3384 ni->fuseBranch = false;
3386 assert(!t.m_analysisFailed);
3387 oldStackFrameOffset = stackFrameOffset;
3388 populateImmediates(*ni);
3390 SKTRACE(1, sk, "stack args: virtual sfo now %d\n", stackFrameOffset);
3392 // Translation could fail entirely (because of an unknown opcode), or
3393 // encounter an input that cannot be computed.
3394 try {
3395 if (isTypeAssert(ni->op())) {
3396 handleAssertionEffects(t, *ni, tas, stackFrameOffset);
3398 preInputApplyMetaData(metaHand, ni);
3399 InputInfos inputInfos;
3400 getInputsImpl(
3401 t.m_sk, ni, stackFrameOffset, inputInfos, sk.func(),
3402 [&](int i) {
3403 return Type(
3404 tas.currentType(Location(Location::Local, i)));
3408 bool noOp = applyInputMetaData(metaHand, ni, tas, inputInfos);
3409 if (noOp) {
3410 t.m_instrStream.append(ni);
3411 ++t.m_numOpcodes;
3412 stackFrameOffset = oldStackFrameOffset;
3413 continue;
3415 if (inputInfos.needsRefCheck) {
3416 // Drive the arState machine; if it is going to throw an input
3417 // exception, do so here.
3418 int argNum = ni->imm[0].u_IVA;
3419 // instrSpToArDelta() returns the delta relative to the sp at the
3420 // beginning of the instruction, but checkByRef() wants the delta
3421 // relative to the sp at the beginning of the tracelet, so we adjust
3422 // by subtracting ni->stackOff
3423 int entryArDelta = instrSpToArDelta((Op*)ni->pc()) - ni->stackOffset;
3424 ni->preppedByRef = t.m_arState.checkByRef(argNum, entryArDelta,
3425 &t.m_refDeps);
3426 SKTRACE(1, sk, "passing arg%d by %s\n", argNum,
3427 ni->preppedByRef ? "reference" : "value");
3430 for (unsigned int i = 0; i < inputInfos.size(); i++) {
3431 SKTRACE(2, sk, "typing input %d\n", i);
3432 const InputInfo& ii = inputInfos[i];
3433 DynLocation* dl = tas.recordRead(ii, true);
3434 const RuntimeType& rtt = dl->rtt;
3435 // Some instructions are able to handle an input with an unknown type
3436 if (!ii.dontBreak && !ii.dontGuard) {
3437 if (rtt.isVagueValue()) {
3438 // Consumed a "poisoned" output: e.g., result of an array
3439 // deref.
3440 throwUnknownInput();
3442 if (!ni->ignoreInnerType && !ii.dontGuardInner) {
3443 if (rtt.isValue() && rtt.isRef() &&
3444 rtt.innerType() == KindOfAny) {
3445 throwUnknownInput();
3448 if ((m_mode == TransProfile || m_mode == TransOptimize) &&
3449 t.m_numOpcodes > 0) {
3450 // We want to break blocks at every instrution that consumes a ref,
3451 // so that we avoid side exits. Therefore, instructions consume ref
3452 // can only be the first in the tracelet/block.
3453 if (rtt.isValue() && rtt.isRef()) {
3454 throwUnknownInput();
3458 ni->inputs.push_back(dl);
3460 } catch (TranslationFailedExc& tfe) {
3461 SKTRACE(1, sk, "Translator fail: %s\n", tfe.what());
3462 if (!t.m_numOpcodes) {
3463 t.m_analysisFailed = true;
3464 t.m_instrStream.append(ni);
3465 ++t.m_numOpcodes;
3467 goto breakBB;
3468 } catch (UnknownInputExc& uie) {
3469 // Subtle: if this instruction consumes an unknown runtime type,
3470 // break the BB on the *previous* instruction. We know that a
3471 // previous instruction exists, because the KindOfAny must
3472 // have come from somewhere.
3473 always_assert(t.m_instrStream.last);
3474 SKTRACE(2, sk, "Consumed unknown input (%s:%d); breaking BB at "
3475 "predecessor\n", uie.m_file, uie.m_line);
3476 goto breakBB;
3479 SKTRACE(2, sk, "stack args: virtual sfo now %d\n", stackFrameOffset);
3481 bool doVarEnvTaint; // initialized by reference.
3482 try {
3483 getOutputs(t, ni, stackFrameOffset, doVarEnvTaint);
3484 } catch (TranslationFailedExc& tfe) {
3485 SKTRACE(1, sk, "Translator getOutputs fail: %s\n", tfe.what());
3486 if (!t.m_numOpcodes) {
3487 t.m_analysisFailed = true;
3488 t.m_instrStream.append(ni);
3489 ++t.m_numOpcodes;
3491 goto breakBB;
3494 if (isFCallStar(ni->op())) t.m_arState.pop();
3495 if (doVarEnvTaint || callDestroysLocals(*ni, func)) tas.varEnvTaint();
3497 DynLocation* outputs[] = { ni->outStack,
3498 ni->outLocal, ni->outLocal2,
3499 ni->outStack2, ni->outStack3 };
3500 for (size_t i = 0; i < sizeof(outputs) / sizeof(*outputs); ++i) {
3501 if (outputs[i]) {
3502 DynLocation* o = outputs[i];
3503 SKTRACE(2, sk, "inserting output t(%d->%d) #(%s, %" PRId64 ")\n",
3504 o->rtt.outerType(), o->rtt.innerType(),
3505 o->location.spaceName(), o->location.offset);
3506 tas.recordWrite(o);
3509 if (ni->op() == OpCreateCont || ni->op() == OpCreateAsync) {
3510 // CreateCont stores Uninit to all locals but NormalizedInstruction
3511 // doesn't have enough output fields, so we special case it here.
3512 auto const numLocals = ni->func()->numLocals();
3513 for (unsigned i = 0; i < numLocals; ++i) {
3514 tas.recordWrite(t.newDynLocation(Location(Location::Local, i),
3515 KindOfUninit));
3519 SKTRACE(1, sk, "stack args: virtual sfo now %d\n", stackFrameOffset);
3521 // This assert failing means that your instruction has an
3522 // inconsistent row in the InstrInfo table; the stackDelta doesn't
3523 // agree with the inputs and outputs.
3524 assert(getStackDelta(*ni) == (stackFrameOffset - oldStackFrameOffset));
3525 // If this instruction decreased the depth of the stack, mark the
3526 // appropriate stack locations as "dead". But we need to leave
3527 // them in the TraceletContext until after analyzeCallee (if this
3528 // is an FCall).
3529 if (stackFrameOffset < oldStackFrameOffset) {
3530 for (int i = stackFrameOffset; i < oldStackFrameOffset; ++i) {
3531 ni->deadLocs.push_back(Location(Location::Stack, i));
3535 if (ni->outputPredicted) {
3536 assert(ni->outStack);
3537 ni->outPred = Type(ni->outStack);
3540 t.m_stackChange += getStackDelta(*ni);
3542 t.m_instrStream.append(ni);
3543 ++t.m_numOpcodes;
3546 * The annotation step attempts to track Func*'s associated with
3547 * given FCalls when the FPush is in a different tracelet.
3549 * When we're analyzing a callee, we can't do this because we may
3550 * have class information in some of our RuntimeTypes that is only
3551 * true because of who the caller was. (Normally it is only there
3552 * if it came from static analysis.)
3554 if (analysisDepth() == 0) {
3555 annotate(ni);
3558 if (ni->op() == OpFCall) {
3559 analyzeCallee(tas, t, ni);
3562 for (auto& l : ni->deadLocs) {
3563 tas.recordDelete(l);
3566 if (m_mode == TransProfile && instrBreaksProfileBB(ni)) {
3567 SKTRACE(1, sk, "BB broken\n");
3568 sk.advance(unit);
3569 goto breakBB;
3572 // Check if we need to break the tracelet.
3574 // If we've gotten this far, it mostly boils down to control-flow
3575 // instructions. However, we'll trace through a few unconditional jmps.
3576 if (ni->op() == OpJmp &&
3577 ni->imm[0].u_BA > 0 &&
3578 tas.m_numJmps < MaxJmpsTracedThrough) {
3579 // Continue tracing through jumps. To prevent pathologies, only trace
3580 // through a finite number of forward jumps.
3581 SKTRACE(1, sk, "greedily continuing through %dth jmp + %d\n",
3582 tas.m_numJmps, ni->imm[0].u_IA);
3583 tas.recordJmp();
3584 sk = SrcKey(func, sk.offset() + ni->imm[0].u_IA);
3585 goto head; // don't advance sk
3586 } else if (opcodeBreaksBB(ni->op()) ||
3587 (dontGuardAnyInputs(ni->op()) && opcodeChangesPC(ni->op()))) {
3588 SKTRACE(1, sk, "BB broken\n");
3589 sk.advance(unit);
3590 goto breakBB;
3592 postAnalyze(ni, sk, t, tas);
3594 breakBB:
3595 NormalizedInstruction* ni = t.m_instrStream.last;
3596 while (ni) {
3597 // We dont want to end a tracelet with a literal; it will cause the literal
3598 // to be pushed on the stack, and the next tracelet will have to guard on
3599 // the type. Similarly, This, Self and Parent will lose type information
3600 // thats only useful in the following tracelet.
3601 if (isLiteral(ni->op()) ||
3602 isThisSelfOrParent(ni->op()) ||
3603 isTypeAssert(ni->op())) {
3604 ni = ni->prev;
3605 continue;
3607 break;
3609 if (ni) {
3610 while (ni != t.m_instrStream.last) {
3611 t.m_stackChange -= getStackDelta(*t.m_instrStream.last);
3612 sk = t.m_instrStream.last->source;
3613 t.m_instrStream.remove(t.m_instrStream.last);
3614 --t.m_numOpcodes;
3618 // translateRegion doesn't support guard relaxation/specialization yet
3619 if (RuntimeOption::EvalHHBCRelaxGuards &&
3620 m_mode != TransProfile && m_mode != TransOptimize) {
3621 relaxDeps(t, tas);
3624 // Mark the last instruction appropriately
3625 assert(t.m_instrStream.last);
3626 t.m_instrStream.last->breaksTracelet = true;
3627 // Populate t.m_changes, t.intermediates, t.m_dependencies
3628 t.m_dependencies = tas.m_dependencies;
3629 t.m_resolvedDeps = tas.m_resolvedDeps;
3630 t.m_changes.clear();
3631 LocationSet::iterator it = tas.m_changeSet.begin();
3632 for (; it != tas.m_changeSet.end(); ++it) {
3633 t.m_changes[*it] = tas.m_currentMap[*it];
3636 TRACE(1, "Tracelet done: stack delta %d\n", t.m_stackChange);
3637 return retval;
3640 Translator::Translator()
3641 : uniqueStubs{}
3642 , m_createdTime(Timer::GetCurrentTimeMicros())
3643 , m_mode(TransInvalid)
3644 , m_profData(nullptr)
3645 , m_analysisDepth(0)
3647 initInstrInfo();
3648 if (RuntimeOption::EvalJitPGO) {
3649 m_profData = new ProfData();
3653 Translator::~Translator() {
3654 delete m_profData;
3655 m_profData = nullptr;
3658 Translator*
3659 Translator::Get() {
3660 return TranslatorX64::Get();
3663 bool
3664 Translator::isSrcKeyInBL(const SrcKey& sk) {
3665 auto unit = sk.unit();
3666 if (unit->isInterpretOnly()) return true;
3667 Lock l(m_dbgBlacklistLock);
3668 if (m_dbgBLSrcKey.find(sk) != m_dbgBLSrcKey.end()) {
3669 return true;
3671 for (PC pc = unit->at(sk.offset()); !opcodeBreaksBB(toOp(*pc));
3672 pc += instrLen((Op*)pc)) {
3673 if (m_dbgBLPC.checkPC(pc)) {
3674 m_dbgBLSrcKey.insert(sk);
3675 return true;
3678 return false;
3681 void
3682 Translator::clearDbgBL() {
3683 Lock l(m_dbgBlacklistLock);
3684 m_dbgBLSrcKey.clear();
3685 m_dbgBLPC.clear();
3688 bool
3689 Translator::addDbgBLPC(PC pc) {
3690 Lock l(m_dbgBlacklistLock);
3691 if (m_dbgBLPC.checkPC(pc)) {
3692 // already there
3693 return false;
3695 m_dbgBLPC.addPC(pc);
3696 return true;
3699 void populateImmediates(NormalizedInstruction& inst) {
3700 for (int i = 0; i < numImmediates(inst.op()); i++) {
3701 inst.imm[i] = getImm((Op*)inst.pc(), i);
3703 if (hasImmVector(toOp(*inst.pc()))) {
3704 inst.immVec = getImmVector((Op*)inst.pc());
3706 if (inst.op() == OpFCallArray) {
3707 inst.imm[0].u_IVA = 1;
3711 const char* Translator::translateResultName(TranslateResult r) {
3712 static const char* const names[] = {
3713 "Failure",
3714 "Retry",
3715 "Success",
3717 return names[r];
3721 * Similar to applyInputMetaData, but designed to be used during ir
3722 * generation. Reads and writes types of values using hhbcTrans. This will
3723 * eventually replace applyInputMetaData.
3725 void readMetaData(Unit::MetaHandle& handle, NormalizedInstruction& inst,
3726 HhbcTranslator& hhbcTrans, MetaMode metaMode /* = Normal */) {
3727 if (isAlwaysNop(inst.op())) {
3728 inst.noOp = true;
3729 return;
3732 if (!handle.findMeta(inst.unit(), inst.offset())) return;
3734 Unit::MetaInfo info;
3735 if (!handle.nextArg(info)) return;
3738 * We need to adjust the indexes in MetaInfo::m_arg if this instruction takes
3739 * other stack arguments than those related to the MVector. (For example,
3740 * the rhs of an assignment.)
3742 auto const& iInfo = instrInfo[inst.op()];
3743 if (iInfo.in & AllLocals) {
3745 * RetC/RetV dont care about their stack input, but it may have been
3746 * annotated. Skip it (because RetC/RetV pretend they dont have a stack
3747 * input).
3749 return;
3751 if (iInfo.in == FuncdRef) {
3753 * FPassC* pretend to have no inputs
3755 return;
3757 const int base = !(iInfo.in & MVector) ? 0 :
3758 !(iInfo.in & Stack1) ? 0 :
3759 !(iInfo.in & Stack2) ? 1 :
3760 !(iInfo.in & Stack3) ? 2 : 3;
3762 auto stackFilter = [metaMode, &inst](Location loc) {
3763 if (metaMode == MetaMode::Legacy && loc.space == Location::Stack) {
3764 loc.offset = -(loc.offset + 1) + inst.stackOffset;
3766 return loc;
3769 do {
3770 SKTRACE(3, inst.source, "considering MetaInfo of kind %d\n", info.m_kind);
3772 int arg = info.m_arg & Unit::MetaInfo::VectorArg ?
3773 base + (info.m_arg & ~Unit::MetaInfo::VectorArg) : info.m_arg;
3774 auto updateType = [&]{
3775 /* don't update input rtt for Legacy mode */
3776 if (metaMode == MetaMode::Legacy) return;
3777 auto& input = *inst.inputs[arg];
3778 input.rtt = hhbcTrans.rttFromLocation(stackFilter(input.location));
3781 switch (info.m_kind) {
3782 case Unit::MetaInfo::Kind::NoSurprise:
3783 inst.noSurprise = true;
3784 break;
3785 case Unit::MetaInfo::Kind::GuardedCls:
3786 inst.guardedCls = true;
3787 break;
3788 case Unit::MetaInfo::Kind::DataTypePredicted: {
3789 // When we're translating a Tracelet from Translator::analyze(), the
3790 // information from these predictions has been added to the
3791 // NormalizedInstructions in the instruction stream, so they aren't
3792 // necessary (and they caused a perf regression). HHIR guard relaxation
3793 // is capable of eliminating unnecessary predictions and the
3794 // information added here is valuable to it.
3795 if (metaMode == MetaMode::Legacy &&
3796 !RuntimeOption::EvalHHIRRelaxGuards) {
3797 break;
3799 auto const loc = stackFilter(inst.inputs[arg]->location).
3800 toLocation(inst.stackOffset);
3801 auto const t = Type(DataType(info.m_data));
3802 auto const offset = inst.source.offset();
3804 // These 'predictions' mean the type is InitNull or the predicted type,
3805 // so we assert InitNull | t, then guard t. This allows certain
3806 // optimizations in the IR.
3807 hhbcTrans.assertType(loc, Type::InitNull | t);
3808 hhbcTrans.checkType(loc, t, offset);
3809 updateType();
3810 break;
3812 case Unit::MetaInfo::Kind::DataTypeInferred: {
3813 hhbcTrans.assertType(
3814 stackFilter(inst.inputs[arg]->location).toLocation(inst.stackOffset),
3815 Type(DataType(info.m_data)));
3816 updateType();
3817 break;
3819 case Unit::MetaInfo::Kind::String: {
3820 hhbcTrans.assertString(
3821 stackFilter(inst.inputs[arg]->location).toLocation(inst.stackOffset),
3822 inst.unit()->lookupLitstrId(info.m_data));
3823 updateType();
3824 break;
3826 case Unit::MetaInfo::Kind::Class: {
3827 auto& rtt = inst.inputs[arg]->rtt;
3828 auto const& location = inst.inputs[arg]->location;
3829 if (rtt.valueType() != KindOfObject) break;
3831 const StringData* metaName = inst.unit()->lookupLitstrId(info.m_data);
3832 const StringData* rttName =
3833 rtt.valueClass() ? rtt.valueClass()->name() : nullptr;
3834 // The two classes might not be exactly the same, which is ok
3835 // as long as metaCls is more derived than rttCls.
3836 Class* metaCls = Unit::lookupUniqueClass(metaName);
3837 Class* rttCls = rttName ? Unit::lookupUniqueClass(rttName) : nullptr;
3838 if (!metaCls || (rttCls && metaCls != rttCls &&
3839 !metaCls->classof(rttCls))) {
3840 // Runtime type is more derived
3841 metaCls = rttCls;
3843 if (!metaCls) break;
3844 if (location.space != Location::This) {
3845 hhbcTrans.assertClass(
3846 stackFilter(location).toLocation(inst.stackOffset), metaCls);
3847 } else {
3848 assert(metaCls->classof(hhbcTrans.curClass()));
3851 if (metaCls == rttCls) break;
3852 SKTRACE(1, inst.source, "replacing input %d with a MetaInfo-supplied "
3853 "class of %s; old type = %s\n",
3854 arg, metaName->data(), rtt.pretty().c_str());
3855 if (rtt.isRef()) {
3856 rtt = RuntimeType(KindOfRef, KindOfObject, metaCls);
3857 } else {
3858 rtt = RuntimeType(KindOfObject, KindOfNone, metaCls);
3860 break;
3862 case Unit::MetaInfo::Kind::MVecPropClass: {
3863 const StringData* metaName = inst.unit()->lookupLitstrId(info.m_data);
3864 Class* metaCls = Unit::lookupUniqueClass(metaName);
3865 if (metaCls) {
3866 inst.immVecClasses[arg] = metaCls;
3868 break;
3871 case Unit::MetaInfo::Kind::GuardedThis:
3872 case Unit::MetaInfo::Kind::NonRefCounted:
3873 // fallthrough; these are handled in preInputApplyMetaData.
3874 case Unit::MetaInfo::Kind::None:
3875 break;
3877 } while (handle.nextArg(info));
3880 bool instrMustInterp(const NormalizedInstruction& inst) {
3881 if (RuntimeOption::EvalJitAlwaysInterpOne) return true;
3883 switch (inst.op()) {
3884 // Generate a case for each instruction we support at least partially.
3885 # define CASE(name) case Op::name:
3886 INSTRS
3887 # undef CASE
3888 # define NOTHING(...) // PSEUDOINSTR_DISPATCH has the cases in it
3889 PSEUDOINSTR_DISPATCH(NOTHING)
3890 # undef NOTHING
3891 return false;
3893 default:
3894 return true;
3898 void Translator::traceStart(Offset initBcOffset, Offset initSpOffset) {
3899 assert(!m_irTrans);
3901 FTRACE(1, "{}{:-^40}{}\n",
3902 color(ANSI_COLOR_BLACK, ANSI_BGCOLOR_GREEN),
3903 " HHIR during translation ",
3904 color(ANSI_COLOR_END));
3906 m_irTrans.reset(new JIT::IRTranslator(initBcOffset, initSpOffset,
3907 liveFunc()));
3910 void Translator::traceEnd() {
3911 assert(!m_irTrans->hhbcTrans().isInlining());
3912 m_irTrans->hhbcTrans().end();
3913 FTRACE(1, "{}{:-^40}{}\n",
3914 color(ANSI_COLOR_BLACK, ANSI_BGCOLOR_GREEN),
3916 color(ANSI_COLOR_END));
3919 void Translator::traceFree() {
3920 FTRACE(1, "HHIR free: arena size: {}\n",
3921 m_irTrans->hhbcTrans().unit().arena().size());
3922 m_irTrans.reset();
3925 Translator::TranslateResult
3926 Translator::translateRegion(const RegionDesc& region,
3927 RegionBlacklist& toInterp) {
3928 FTRACE(1, "translateRegion starting with:\n{}\n", show(region));
3929 HhbcTranslator& ht = m_irTrans->hhbcTrans();
3930 assert(!region.blocks.empty());
3931 const SrcKey startSk = region.blocks.front()->start();
3933 for (auto b = 0; b < region.blocks.size(); b++) {
3934 auto const& block = region.blocks[b];
3935 Unit::MetaHandle metaHand;
3936 SrcKey sk = block->start();
3937 const Func* topFunc = nullptr;
3938 auto typePreds = makeMapWalker(block->typePreds());
3939 auto byRefs = makeMapWalker(block->paramByRefs());
3940 auto refPreds = makeMapWalker(block->reffinessPreds());
3941 auto knownFuncs = makeMapWalker(block->knownFuncs());
3943 for (unsigned i = 0; i < block->length(); ++i, sk.advance(block->unit())) {
3944 // Update bcOff here so any guards or assertions from metadata are
3945 // attributed to this instruction.
3946 ht.setBcOff(sk.offset(), false);
3948 // Emit prediction guards. If this is the first instruction in the
3949 // region the guards will go to a retranslate request. Otherwise, they'll
3950 // go to a side exit.
3951 bool isFirstRegionInstr = block == region.blocks.front() && i == 0;
3952 while (typePreds.hasNext(sk)) {
3953 auto const& pred = typePreds.next();
3954 auto type = pred.type;
3955 auto loc = pred.location;
3956 if (type <= Type::Cls) {
3957 // Do not generate guards for class; instead assert the type
3958 assert(loc.tag() == JIT::RegionDesc::Location::Tag::Stack);
3959 ht.assertType(loc, type);
3960 } else if (isFirstRegionInstr) {
3961 bool checkOuterTypeOnly = m_mode != TransProfile;
3962 ht.guardTypeLocation(loc, type, checkOuterTypeOnly);
3963 } else {
3964 ht.checkType(loc, type, sk.offset());
3968 // Emit reffiness guards. For now, we only support reffiness guards at
3969 // the beginning of the region.
3970 while (refPreds.hasNext(sk)) {
3971 assert(sk == startSk);
3972 auto const& pred = refPreds.next();
3973 ht.guardRefs(pred.arSpOffset, pred.mask, pred.vals);
3976 if (RuntimeOption::EvalJitTransCounters && isFirstRegionInstr) {
3977 ht.emitIncTransCounter();
3980 // Update the current funcd, if we have a new one.
3981 if (knownFuncs.hasNext(sk)) {
3982 topFunc = knownFuncs.next();
3985 // Create and initialize the instruction.
3986 NormalizedInstruction inst;
3987 inst.source = sk;
3988 inst.m_unit = block->unit();
3989 inst.breaksTracelet =
3990 i == block->length() - 1 && block == region.blocks.back();
3991 inst.changesPC = opcodeChangesPC(inst.op());
3992 inst.funcd = topFunc;
3993 inst.nextOffset = kInvalidOffset;
3994 if (instrIsNonCallControlFlow(inst.op()) && !inst.breaksTracelet) {
3995 assert(b < region.blocks.size());
3996 inst.nextOffset = region.blocks[b+1]->start().offset();
3998 inst.outputPredicted = false;
3999 populateImmediates(inst);
4001 // If this block ends with an inlined FCall, we don't emit anything for
4002 // the FCall and instead set up HhbcTranslator for inlining. Blocks from
4003 // the callee will be next in the region.
4004 if (i == block->length() - 1 &&
4005 inst.op() == OpFCall && block->inlinedCallee()) {
4006 auto const* callee = block->inlinedCallee();
4007 FTRACE(1, "\nstarting inlined call from {} to {} with {} args "
4008 "and stack:\n{}\n",
4009 block->func()->fullName()->data(),
4010 callee->fullName()->data(),
4011 inst.imm[0].u_IVA,
4012 ht.showStack());
4013 auto returnSk = inst.nextSk();
4014 auto returnFuncOff = returnSk.offset() - block->func()->base();
4015 ht.beginInlining(inst.imm[0].u_IVA, callee, returnFuncOff);
4016 continue;
4019 // We can get a more precise output type for interpOne if we know all of
4020 // its inputs, so we still populate the rest of the instruction even if
4021 // this is true.
4022 inst.interp = toInterp.count(sk);
4024 // Apply the first round of metadata from the repo and get a list of
4025 // input locations.
4026 preInputApplyMetaData(metaHand, &inst);
4028 InputInfos inputInfos;
4029 getInputs(startSk, inst, inputInfos, block->func(), [&](int i) {
4030 return ht.traceBuilder().localType(i, DataTypeGeneric);
4033 // Populate the NormalizedInstruction's input vector, using types from
4034 // HhbcTranslator.
4035 std::vector<DynLocation> dynLocs;
4036 dynLocs.reserve(inputInfos.size());
4037 auto newDynLoc = [&](const InputInfo& ii) {
4038 dynLocs.emplace_back(ii.loc, ht.rttFromLocation(ii.loc));
4039 FTRACE(2, "rttFromLocation: {} -> {}\n",
4040 ii.loc.pretty(), dynLocs.back().rtt.pretty());
4041 return &dynLocs.back();
4043 FTRACE(2, "populating inputs for {}\n", inst.toString());
4044 for (auto const& ii : inputInfos) {
4045 inst.inputs.push_back(newDynLoc(ii));
4048 // Apply the remaining metadata. This may change the types of some of
4049 // inst's inputs.
4050 readMetaData(metaHand, inst, ht);
4051 if (!inst.noOp && inputInfos.needsRefCheck) {
4052 assert(byRefs.hasNext(sk));
4053 inst.preppedByRef = byRefs.next();
4056 // Check for a type prediction. Put it in the NormalizedInstruction so
4057 // the emit* method can use it if needed.
4058 auto const doPrediction = outputIsPredicted(startSk, inst);
4060 // Emit IR for the body of the instruction.
4061 try {
4062 m_irTrans->translateInstr(inst);
4063 } catch (const JIT::FailedIRGen& exn) {
4064 FTRACE(1, "ir generation for {} failed with {}\n",
4065 inst.toString(), exn.what());
4066 always_assert(!toInterp.count(sk));
4067 toInterp.insert(sk);
4068 return Retry;
4071 // Check the prediction. If the predicted type is less specific than what
4072 // is currently on the eval stack, checkType won't emit any code.
4073 if (doPrediction) {
4074 ht.checkTypeStack(0, inst.outPred,
4075 sk.advanced(block->unit()).offset());
4079 assert(!typePreds.hasNext());
4080 assert(!byRefs.hasNext());
4081 assert(!refPreds.hasNext());
4082 assert(!knownFuncs.hasNext());
4085 traceEnd();
4086 try {
4087 traceCodeGen();
4088 } catch (const JIT::FailedCodeGen& exn) {
4089 FTRACE(1, "code generation failed with {}\n", exn.what());
4090 SrcKey sk{exn.vmFunc, exn.bcOff};
4091 always_assert(!toInterp.count(sk));
4092 toInterp.insert(sk);
4093 return Retry;
4096 return Success;
4099 uint64_t* Translator::getTransCounterAddr() {
4100 if (!isTransDBEnabled()) return nullptr;
4102 TransID id = m_translations.size();
4104 // allocate a new chunk of counters if necessary
4105 if (id >= m_transCounters.size() * transCountersPerChunk) {
4106 uint32_t size = sizeof(uint64_t) * transCountersPerChunk;
4107 auto *chunk = (uint64_t*)malloc(size);
4108 bzero(chunk, size);
4109 m_transCounters.push_back(chunk);
4111 assert(id / transCountersPerChunk < m_transCounters.size());
4112 return &(m_transCounters[id / transCountersPerChunk]
4113 [id % transCountersPerChunk]);
4116 void Translator::addTranslation(const TransRec& transRec) {
4117 if (Trace::moduleEnabledRelease(Trace::trans, 1)) {
4118 // Log the translation's size, creation time, SrcKey, and size
4119 Trace::traceRelease("New translation: %" PRId64 " %s %u %u %d\n",
4120 Timer::GetCurrentTimeMicros() - m_createdTime,
4121 folly::format("{}:{}:{}",
4122 transRec.src.unit()->filepath()->data(),
4123 transRec.src.getFuncId(),
4124 transRec.src.offset()).str().c_str(),
4125 transRec.aLen,
4126 transRec.astubsLen,
4127 transRec.kind);
4130 if (!isTransDBEnabled()) return;
4131 uint32_t id = getCurrentTransID();
4132 m_translations.push_back(transRec);
4133 m_translations[id].setID(id);
4135 if (transRec.aLen > 0) {
4136 m_transDB[transRec.aStart] = id;
4138 if (transRec.astubsLen > 0) {
4139 m_transDB[transRec.astubsStart] = id;
4143 uint64_t Translator::getTransCounter(TransID transId) const {
4144 if (!isTransDBEnabled()) return -1ul;
4145 assert(transId < m_translations.size());
4147 uint64_t counter;
4149 if (transId / transCountersPerChunk >= m_transCounters.size()) {
4150 counter = 0;
4151 } else {
4152 counter = m_transCounters[transId / transCountersPerChunk]
4153 [transId % transCountersPerChunk];
4155 return counter;
4158 namespace {
4160 struct DeferredPathInvalidate : public DeferredWorkItem {
4161 const std::string m_path;
4162 explicit DeferredPathInvalidate(const std::string& path) : m_path(path) {
4163 assert(m_path.size() >= 1 && m_path[0] == '/');
4165 void operator()() {
4166 String spath(m_path);
4168 * inotify saw this path change. Now poke the file repository;
4169 * it will notice the underlying PhpFile* has changed.
4171 * We don't actually need to *do* anything with the PhpFile* from
4172 * this lookup; since the path has changed, the file we'll get out is
4173 * going to be some new file, not the old file that needs invalidation.
4175 (void)g_vmContext->lookupPhpFile(spath.get(), "");
4181 static const char *transKindStr[] = {
4182 #define DO(KIND) #KIND,
4183 TRANS_KINDS
4184 #undef DO
4187 const char *getTransKindName(TransKind kind) {
4188 assert(kind >= 0 && kind < TransInvalid);
4189 return transKindStr[kind];
4192 TransRec::TransRec(SrcKey s,
4193 MD5 _md5,
4194 TransKind _kind,
4195 const Tracelet* t,
4196 TCA _aStart,
4197 uint32_t _aLen,
4198 TCA _astubsStart,
4199 uint32_t _astubsLen,
4200 vector<TransBCMapping> _bcMapping)
4201 : id(0)
4202 , kind(_kind)
4203 , src(s)
4204 , md5(_md5)
4205 , bcStopOffset(t ? t->nextSk().offset() : 0)
4206 , aStart(_aStart)
4207 , aLen(_aLen)
4208 , astubsStart(_astubsStart)
4209 , astubsLen(_astubsLen)
4210 , bcMapping(_bcMapping) {
4211 if (t != nullptr) {
4212 for (auto dep : t->m_dependencies) {
4213 dependencies.push_back(*dep.second);
4219 string
4220 TransRec::print(uint64_t profCount) const {
4221 std::string ret;
4223 // Split up the call to prevent template explosion
4224 ret += folly::format(
4225 "Translation {} {{\n"
4226 " src.md5 = {}\n"
4227 " src.funcId = {}\n"
4228 " src.startOffset = {}\n"
4229 " src.stopOffset = {}\n",
4230 id, md5, src.getFuncId(), src.offset(), bcStopOffset).str();
4232 ret += folly::format(
4233 " kind = {} ({})\n"
4234 " aStart = {}\n"
4235 " aLen = {:#x}\n"
4236 " stubStart = {}\n"
4237 " stubLen = {:#x}\n",
4238 static_cast<uint32_t>(kind), getTransKindName(kind),
4239 aStart, aLen, astubsStart, astubsLen).str();
4241 ret += folly::format(
4242 " profCount = {}\n"
4243 " bcMapping = {}\n",
4244 profCount, bcMapping.size()).str();
4246 for (auto const& info : bcMapping) {
4247 ret += folly::format(
4248 " {} {} {} {}\n",
4249 info.md5, info.bcStart,
4250 info.aStart, info.astubsStart).str();
4253 ret += "}\n\n";
4254 return ret;
4257 void
4258 ActRecState::pushFunc(const NormalizedInstruction& inst) {
4259 assert(isFPush(inst.op()));
4260 if (inst.op() == OpFPushFuncD || inst.op() == OpFPushFuncU) {
4261 const Unit& unit = *inst.unit();
4262 Id funcId = inst.imm[1].u_SA;
4263 auto const& nep = unit.lookupNamedEntityPairId(funcId);
4264 auto const func = Unit::lookupFunc(nep.second);
4265 if (func) func->validate();
4266 if (func && func->isNameBindingImmutable(&unit)) {
4267 pushFuncD(func);
4268 return;
4271 pushDynFunc();
4274 void
4275 ActRecState::pushFuncD(const Func* func) {
4276 TRACE(2, "ActRecState: pushStatic func %p(%s)\n", func, func->name()->data());
4277 func->validate();
4278 Record r;
4279 r.m_state = State::KNOWN;
4280 r.m_topFunc = func;
4281 r.m_entryArDelta = InvalidEntryArDelta;
4282 m_arStack.push_back(r);
4285 void
4286 ActRecState::pushDynFunc() {
4287 TRACE(2, "ActRecState: pushDynFunc\n");
4288 Record r;
4289 r.m_state = State::UNKNOWABLE;
4290 r.m_topFunc = nullptr;
4291 r.m_entryArDelta = InvalidEntryArDelta;
4292 m_arStack.push_back(r);
4295 void
4296 ActRecState::pop() {
4297 if (!m_arStack.empty()) {
4298 m_arStack.pop_back();
4303 * checkByRef() returns true if the parameter specified by argNum is pass
4304 * by reference, otherwise it returns false. This function may also throw an
4305 * UnknownInputException if the reffiness cannot be determined.
4307 * Note that the 'entryArDelta' parameter specifies the delta between sp at
4308 * the beginning of the tracelet and ar.
4310 bool
4311 ActRecState::checkByRef(int argNum, int entryArDelta, RefDeps* refDeps) {
4312 FTRACE(2, "ActRecState: getting reffiness for arg {}, arDelta {}\n",
4313 argNum, entryArDelta);
4314 if (m_arStack.empty()) {
4315 // The ActRec in question was pushed before the beginning of the
4316 // tracelet, so we can make a guess about parameter reffiness and
4317 // record our assumptions about parameter reffiness as tracelet
4318 // guards.
4319 const ActRec* ar = arFromSpOffset((ActRec*)vmsp(), entryArDelta);
4320 Record r;
4321 r.m_state = State::GUESSABLE;
4322 r.m_entryArDelta = entryArDelta;
4323 ar->m_func->validate();
4324 r.m_topFunc = ar->m_func;
4325 m_arStack.push_back(r);
4327 Record& r = m_arStack.back();
4328 if (r.m_state == State::UNKNOWABLE) {
4329 TRACE(2, "ActRecState: unknowable, throwing in the towel\n");
4330 throwUnknownInput();
4331 not_reached();
4333 assert(r.m_topFunc);
4334 bool retval = r.m_topFunc->byRef(argNum);
4335 if (r.m_state == State::GUESSABLE) {
4336 assert(r.m_entryArDelta != InvalidEntryArDelta);
4337 TRACE(2, "ActRecState: guessing arg%d -> %d\n", argNum, retval);
4338 refDeps->addDep(r.m_entryArDelta, argNum, retval);
4340 return retval;
4343 const Func*
4344 ActRecState::knownFunc() {
4345 if (currentState() != State::KNOWN) return nullptr;
4346 assert(!m_arStack.empty());
4347 return m_arStack.back().m_topFunc;
4350 ActRecState::State
4351 ActRecState::currentState() {
4352 if (m_arStack.empty()) return State::GUESSABLE;
4353 return m_arStack.back().m_state;
4356 const Func* lookupImmutableMethod(const Class* cls, const StringData* name,
4357 bool& magicCall, bool staticLookup,
4358 Class* ctx) {
4359 if (!cls || RuntimeOption::EvalJitEnableRenameFunction) return nullptr;
4360 if (cls->attrs() & AttrInterface) return nullptr;
4361 bool privateOnly = false;
4362 if (!RuntimeOption::RepoAuthoritative ||
4363 !(cls->preClass()->attrs() & AttrUnique)) {
4364 if (!ctx || !ctx->classof(cls)) {
4365 return nullptr;
4367 if (!staticLookup) privateOnly = true;
4370 const Func* func;
4371 MethodLookup::LookupResult res = staticLookup ?
4372 g_vmContext->lookupClsMethod(func, cls, name, nullptr, ctx, false) :
4373 g_vmContext->lookupObjMethod(func, cls, name, ctx, false);
4375 if (res == MethodLookup::LookupResult::MethodNotFound) return nullptr;
4377 assert(res == MethodLookup::LookupResult::MethodFoundWithThis ||
4378 res == MethodLookup::LookupResult::MethodFoundNoThis ||
4379 (staticLookup ?
4380 res == MethodLookup::LookupResult::MagicCallStaticFound :
4381 res == MethodLookup::LookupResult::MagicCallFound));
4383 magicCall =
4384 res == MethodLookup::LookupResult::MagicCallStaticFound ||
4385 res == MethodLookup::LookupResult::MagicCallFound;
4387 if ((privateOnly && (!(func->attrs() & AttrPrivate) || magicCall)) ||
4388 func->isAbstract() ||
4389 func->attrs() & AttrDynamicInvoke) {
4390 return nullptr;
4393 if (staticLookup) {
4394 if (magicCall) {
4396 * i) We cant tell if a magic call would go to __call or __callStatic
4397 * - Could deal with this by checking for the existence of __call
4399 * ii) hphp semantics is that in the case of an object call, we look
4400 * for __call in the scope of the object (this is incompatible
4401 * with zend) which means we would have to know that there is no
4402 * __call higher up in the tree
4403 * - Could deal with this by checking for AttrNoOverride on the
4404 * class
4406 func = nullptr;
4408 } else if (!(func->attrs() & AttrPrivate)) {
4409 if (magicCall || func->attrs() & AttrStatic) {
4410 if (!(cls->preClass()->attrs() & AttrNoOverride)) {
4411 func = nullptr;
4413 } else if (!(func->attrs() & AttrNoOverride && !func->hasStaticLocals()) &&
4414 !(cls->preClass()->attrs() & AttrNoOverride)) {
4415 func = nullptr;
4418 return func;
4421 std::string traceletShape(const Tracelet& trace) {
4422 std::string ret;
4424 for (auto ni = trace.m_instrStream.first; ni; ni = ni->next) {
4425 using folly::toAppend;
4427 toAppend(opcodeToName(ni->op()), &ret);
4428 if (ni->immVec.isValid()) {
4429 toAppend(
4430 "<",
4431 locationCodeString(ni->immVec.locationCode()),
4432 &ret);
4433 for (auto& mc : ni->immVecM) {
4434 toAppend(" ", memberCodeString(mc), &ret);
4436 toAppend(">", &ret);
4438 toAppend(" ", &ret);
4441 return ret;
4444 } // HPHP::Transl
4446 void invalidatePath(const std::string& path) {
4447 TRACE(1, "invalidatePath: abspath %s\n", path.c_str());
4448 PendQ::defer(new DeferredPathInvalidate(path));
4451 } // HPHP