From e282644ddfa14ad17471bb59725054bbfc1c7841 Mon Sep 17 00:00:00 2001 From: bsimmers Date: Tue, 30 Jul 2013 17:00:07 -0700 Subject: [PATCH] Add inlining support to the tracelet region selector The tracelet region selector now supports creating regions with inlined calls. When it sees an FCall with a known callee, it invokes a new instance of the tracelet region selector on the callee. The callee is evaluated using the same cost metric as Translator::analyze. If it passes, the callee is inlined for real, which allows creation of longer tracelets based on the return value from the callee. Differential Revision: D909359 --- hphp/runtime/vm/hhbc.cpp | 31 ++- hphp/runtime/vm/hhbc.h | 22 ++ hphp/runtime/vm/jit/annotation.cpp | 20 +- hphp/runtime/vm/jit/check.cpp | 12 +- hphp/runtime/vm/jit/hhbc-translator.cpp | 19 +- hphp/runtime/vm/jit/hhbc-translator.h | 3 +- hphp/runtime/vm/jit/ir-translator.cpp | 110 +++++---- hphp/runtime/vm/jit/ir-translator.h | 19 +- hphp/runtime/vm/jit/region-selection.cpp | 195 ++++++++++------ hphp/runtime/vm/jit/region-selection.h | 29 +-- hphp/runtime/vm/jit/region-tracelet.cpp | 296 +++++++++++++++++++++---- hphp/runtime/vm/jit/trace-builder.cpp | 4 +- hphp/runtime/vm/jit/translator.cpp | 25 ++- hphp/runtime/vm/srckey.cpp | 12 + hphp/runtime/vm/srckey.h | 21 +- hphp/test/quick/vector-clscns.php | 10 + hphp/test/quick/vector-clscns.php.expectf | 1 + hphp/test/slow/ir_inlining/recurse.php | 7 + hphp/test/slow/ir_inlining/recurse.php.expectf | 1 + 19 files changed, 639 insertions(+), 198 deletions(-) create mode 100644 hphp/test/quick/vector-clscns.php create mode 100644 hphp/test/quick/vector-clscns.php.expectf create mode 100644 hphp/test/slow/ir_inlining/recurse.php create mode 100644 hphp/test/slow/ir_inlining/recurse.php.expectf diff --git a/hphp/runtime/vm/hhbc.cpp b/hphp/runtime/vm/hhbc.cpp index decc8ac8c04..4f74af4b12f 100644 --- a/hphp/runtime/vm/hhbc.cpp +++ b/hphp/runtime/vm/hhbc.cpp @@ -964,7 +964,36 @@ ImmVector getImmVector(const Op* opcode) { } } - NOT_REACHED(); + not_reached(); +} + +MInstrLocation getMLocation(const Op* opcode) { + auto immVec = getImmVector(opcode); + auto vec = immVec.vec(); + auto const lcode = LocationCode(*vec++); + auto const imm = numLocationCodeImms(lcode) ? decodeVariableSizeImm(&vec) + : 0; + return {lcode, imm}; +} + +std::vector getMVector(const Op* opcode) { + auto immVec = getImmVector(opcode); + std::vector result; + auto it = immVec.vec(); + auto end = it + immVec.size(); + + // Skip the LocationCode and its immediate + auto const lcode = LocationCode(*it++); + if (numLocationCodeImms(lcode)) decodeVariableSizeImm(&it); + + while (it < end) { + auto const mcode = MemberCode(*it++); + auto const imm = memberCodeHasImm(mcode) ? decodeMemberCodeImm(&it, mcode) + : 0; + result.push_back({mcode, imm}); + } + + return result; } const uint8_t* ImmVector::findLastMember() const { diff --git a/hphp/runtime/vm/hhbc.h b/hphp/runtime/vm/hhbc.h index 09196695b91..fb4b5988fc0 100644 --- a/hphp/runtime/vm/hhbc.h +++ b/hphp/runtime/vm/hhbc.h @@ -723,6 +723,28 @@ private: // Must be an opcode that actually has an ImmVector. ImmVector getImmVector(const Op* opcode); +struct MInstrLocation { + LocationCode lcode; + int64_t imm; + + bool hasImm() const { + auto count = numLocationCodeImms(lcode); + assert(count == 0 || count == 1); + return count; + } +}; +MInstrLocation getMLocation(const Op* opcode); + +struct MVectorItem { + MemberCode mcode; + int64_t imm; + + bool hasImm() const { + return memberCodeHasImm(mcode); + } +}; +std::vector getMVector(const Op* opcode); + /* Some decoding helper functions. */ int numImmediates(Op opcode); ArgType immType(Op opcode, int idx); diff --git a/hphp/runtime/vm/jit/annotation.cpp b/hphp/runtime/vm/jit/annotation.cpp index a1dbc625085..79b44e327a3 100644 --- a/hphp/runtime/vm/jit/annotation.cpp +++ b/hphp/runtime/vm/jit/annotation.cpp @@ -64,7 +64,7 @@ decodeNameAndArgs(const StringData* enc, string& outName, int& outNumArgs) { outName = name; } -static void recordNameAndArgs(const SrcKey& sk, +static void recordNameAndArgs(const SrcKey sk, const StringData* name, int numArgs) { CallRecord cr; @@ -73,11 +73,10 @@ static void recordNameAndArgs(const SrcKey& sk, s_callDB.insert(std::make_pair(sk, cr)); } -static void recordFunc(NormalizedInstruction& i, - const SrcKey& sk, +static void recordFunc(const SrcKey sk, const Func* func) { FTRACE(2, "annotation: recordFunc: {}@{} {}\n", - i.m_unit->filepath()->data(), + sk.unit()->filepath()->data(), sk.offset(), func->fullName()->data()); @@ -87,12 +86,11 @@ static void recordFunc(NormalizedInstruction& i, s_callDB.insert(std::make_pair(sk, cr)); } -static void recordActRecPush(NormalizedInstruction& i, - const Unit* unit, +static void recordActRecPush(const SrcKey sk, const StringData* name, const StringData* clsName, bool staticCall) { - const SrcKey& sk = i.source; + auto unit = sk.unit(); FTRACE(2, "annotation: recordActRecPush: {}@{} {}{}{} ({}static)\n", unit->filepath()->data(), sk.offset(), @@ -112,11 +110,11 @@ static void recordActRecPush(NormalizedInstruction& i, if (clsName) { const Class* cls = Unit::lookupUniqueClass(clsName); bool magic = false; - Class* ctx = i.source.func()->cls(); + Class* ctx = sk.func()->cls(); const Func* func = lookupImmutableMethod(cls, name, magic, staticCall, ctx); if (func) { - recordFunc(i, fcall, func); + recordFunc(fcall, func); } return; } @@ -125,7 +123,7 @@ static void recordActRecPush(NormalizedInstruction& i, // this will never go into a call cache, so we dont need to // encode the args. it will be used in OpFCall below to // set the i->funcd. - recordFunc(i, fcall, func); + recordFunc(fcall, func); } else { // It's not enough to remember the function name; we also need to encode // the number of arguments and current flag disposition. @@ -179,7 +177,7 @@ void annotate(NormalizedInstruction* i) { funcName = cls->getCtor()->name(); } assert(funcName->isStatic()); - recordActRecPush(*i, i->m_unit, funcName, className, + recordActRecPush(i->source, funcName, className, i->op() == OpFPushClsMethodD || i->op() == OpFPushClsMethodF); } break; diff --git a/hphp/runtime/vm/jit/check.cpp b/hphp/runtime/vm/jit/check.cpp index 433012c9cb9..fb903d49cbe 100644 --- a/hphp/runtime/vm/jit/check.cpp +++ b/hphp/runtime/vm/jit/check.cpp @@ -272,11 +272,13 @@ bool checkTmpsSpanningCalls(IRTrace* trace, const IRFactory& irFactory) { if (src->isA(Type::FramePtr)) continue; if (src->isConst()) continue; if (!state[src]) { - FTRACE(1, "checkTmpsSpanningCalls failed\n" - " instruction: {}\n" - " src: {}\n", - inst.toString(), - src->toString()); + auto msg = folly::format("checkTmpsSpanningCalls failed\n" + " instruction: {}\n" + " src: {}\n", + inst.toString(), + src->toString()).str(); + std::cerr << msg; + FTRACE(1, "{}", msg); isValid = false; } } diff --git a/hphp/runtime/vm/jit/hhbc-translator.cpp b/hphp/runtime/vm/jit/hhbc-translator.cpp index 620f15490e8..70fb5646b2f 100755 --- a/hphp/runtime/vm/jit/hhbc-translator.cpp +++ b/hphp/runtime/vm/jit/hhbc-translator.cpp @@ -364,6 +364,10 @@ bool HhbcTranslator::isInlining() const { return m_bcStateStack.size() > 1; } +int HhbcTranslator::inliningDepth() const { + return m_bcStateStack.size() - 1; +} + BCMarker HhbcTranslator::makeMarker(Offset bcOff) { int32_t stackOff = m_tb->spOffset() + m_evalStack.numCells() - m_stackDeficit; @@ -4025,14 +4029,22 @@ void HhbcTranslator::emitInterpOne(Type outType, int popped) { } void HhbcTranslator::emitInterpOne(Type outType, int popped, int pushed) { + auto unit = curFunc()->unit(); auto sp = spillStack(); - Unit *u = curFunc()->unit(); + auto op = unit->getOpcode(bcOff()); + + auto& iInfo = getInstrInfo(op); + if (iInfo.type == Transl::InstrFlags::OutFDesc) { + m_fpiStack.emplace(sp, m_tb->spOffset()); + } else if (isFCallStar(op) && !m_fpiStack.empty()) { + m_fpiStack.pop(); + } InterpOneData idata; idata.bcOff = bcOff(); idata.cellsPopped = popped; idata.cellsPushed = pushed; - idata.opcode = u->getOpcode(bcOff()); + idata.opcode = op; auto const changesPC = opcodeChangesPC(idata.opcode); gen(changesPC ? InterpOneCF : InterpOne, outType, idata, m_tb->fp(), sp); @@ -4077,7 +4089,8 @@ std::string HhbcTranslator::showStack() const { msg << "ActRec from "; curUnit()->prettyPrint(msg, Unit::PrintOpts().range(fpushOff, after) .noLineNumbers() - .indent(0)); + .indent(0) + .noFuncs()); auto msgStr = msg.str(); assert(msgStr.back() == '\n'); msgStr.erase(msgStr.size() - 1); diff --git a/hphp/runtime/vm/jit/hhbc-translator.h b/hphp/runtime/vm/jit/hhbc-translator.h index 8a00c7ba9fc..6ed9c1a7fa8 100755 --- a/hphp/runtime/vm/jit/hhbc-translator.h +++ b/hphp/runtime/vm/jit/hhbc-translator.h @@ -151,6 +151,7 @@ struct HhbcTranslator { const Func* target, Offset returnBcOffset); bool isInlining() const; + int inliningDepth() const; void profileFunctionEntry(const char* category); void profileInlineFunctionShape(const std::string& str); void profileSmallFunctionShape(const std::string& str); @@ -786,6 +787,7 @@ public: Offset bcOff() const { return m_bcStateStack.back().bcOff; } SrcKey curSrcKey() const { return SrcKey(curFunc(), bcOff()); } size_t spOffset() const; + Type topType(uint32_t i, DataTypeCategory c = DataTypeSpecific) const; private: /* @@ -844,7 +846,6 @@ private: return top(Type::Cell, i, cat); } SSATmp* topV(uint32_t i = 0) { return top(Type::BoxedCell, i); } - Type topType(uint32_t i, DataTypeCategory c = DataTypeSpecific) const; std::vector peekSpillValues() const; SSATmp* emitSpillStack(SSATmp* sp, const std::vector& spillVals); diff --git a/hphp/runtime/vm/jit/ir-translator.cpp b/hphp/runtime/vm/jit/ir-translator.cpp index 5019f21d4ac..d093ef1ba6f 100644 --- a/hphp/runtime/vm/jit/ir-translator.cpp +++ b/hphp/runtime/vm/jit/ir-translator.cpp @@ -1147,78 +1147,112 @@ IRTranslator::translateFCallBuiltin(const NormalizedInstruction& i) { HHIR_EMIT(FCallBuiltin, numArgs, numNonDefault, funcId); } -bool shouldIRInline(const Func* curFunc, - const Func* func, - const Tracelet& callee) { +bool shouldIRInline(const Func* caller, const Func* callee, RegionIter& iter) { if (!RuntimeOption::EvalHHIREnableGenTimeInlining) { return false; } - const NormalizedInstruction* cursor; - Op current; - auto refuse = [&](const char* why) -> bool { FTRACE(1, "shouldIRInline: refusing {} [NI = {}]\n", - func->fullName()->data(), why, cursor->toString()); + callee->fullName()->data(), why, + iter.finished() ? "" : iter.sk().showInst()); return false; }; auto accept = [&](const char* kind) -> bool { FTRACE(1, "shouldIRInline: inlining {} \n", - func->fullName()->data(), kind); + callee->fullName()->data(), kind); return true; }; - if (func->numIterators() != 0) { + if (callee->numIterators() != 0) { return refuse("iterators"); } - if (func->isMagic() || Func::isSpecial(func->name())) { + if (callee->isMagic() || Func::isSpecial(callee->name())) { return refuse("special or magic function"); } - if (func->attrs() & AttrMayUseVV) { + if (callee->attrs() & AttrMayUseVV) { return refuse("may use dynamic environment"); } - if (!(func->attrs() & AttrHot) && (curFunc->attrs() & AttrHot)) { + if (!(callee->attrs() & AttrHot) && (caller->attrs() & AttrHot)) { return refuse("inlining cold func into hot func"); } - auto resetCursor = [&] { - cursor = callee.m_instrStream.first; - current = cursor->op(); - }; - auto next = [&]() -> Op { - auto op = cursor->op(); - cursor = cursor->next; - current = cursor->op(); - return op; - }; - - auto atRet = [&] { - return current == OpRetC || current == OpRetV; - }; - - resetCursor(); + //////////// uint64_t cost = 0; - for (; !atRet(); next()) { - if (current == OpFCallArray) return refuse("FCallArray"); + int inlineDepth = 0; + Op op = OpLowInvalid; + const Func* func = nullptr; + + for (; !iter.finished(); iter.advance()) { + // If func has changed after an FCall, we've started an inlined call. This + // will have to change when we support inlining recursive calls. + if (func && func != iter.sk().func()) { + assert(isRet(op) || op == OpFCall); + if (op == OpFCall) { + ++inlineDepth; + } + } + op = iter.sk().op(); + func = iter.sk().func(); + + // If we hit a RetC/V while inlining, leave that level and + // continue. Otherwise, accept the tracelet. + if (isRet(op)) { + if (inlineDepth > 0) { + --inlineDepth; + continue; + } else { + assert(inlineDepth == 0); + return accept("entire function fits in one region"); + } + } + + if (op == OpFCallArray) return refuse("FCallArray"); cost += 1; + if (hasImmVector(op)) { + cost += getMVector(reinterpret_cast(iter.sk().pc())).size(); + // static cost + scale factor for vector ops + } - // static cost + scale factor for vector ops - if (cursor->immVecM.size()) { - cost += cursor->immVecM.size(); + if (cost > RuntimeOption::EvalHHIRInliningMaxCost) { + return refuse("too expensive"); } - if (cursor->breaksTracelet) { + if (Transl::opcodeBreaksBB(op)) { return refuse("breaks tracelet"); } + } - if (cost > RuntimeOption::EvalHHIRInliningMaxCost) { - return refuse("too expensive"); - } + return refuse("region doesn't end in RetC/RetV"); +} + +struct TraceletIter : public RegionIter { + explicit TraceletIter(const Tracelet& tlet) + : m_current(tlet.m_instrStream.first) + {} + + bool finished() const { return m_current == nullptr; } + + SrcKey sk() const { + assert(!finished()); + return m_current->source; } - return accept("function is okay"); + void advance() { + assert(!finished()); + m_current = m_current->next; + } + + private: + const NormalizedInstruction* m_current; +}; + +bool shouldIRInline(const Func* caller, const Func* callee, + const Tracelet& tlet) { + TraceletIter iter(tlet); + return shouldIRInline(caller, callee, iter); } void diff --git a/hphp/runtime/vm/jit/ir-translator.h b/hphp/runtime/vm/jit/ir-translator.h index 9cb487e58c1..be4c0ab9336 100644 --- a/hphp/runtime/vm/jit/ir-translator.h +++ b/hphp/runtime/vm/jit/ir-translator.h @@ -29,8 +29,23 @@ struct RuntimeType; namespace JIT { using Transl::NormalizedInstruction; -bool shouldIRInline(const Func* curFunc, const Func* func, - const Transl::Tracelet& callee); +/* + * RegionIter is a temporary class used to traverse a region of hhbc + * instruction that may be more than just a straight-line series of + * instructions. It is used by shouldIRInline to traverse both Tracelets and + * RegionDescs. + */ +struct RegionIter { + virtual ~RegionIter() {} + + virtual bool finished() const = 0; + virtual SrcKey sk() const = 0; + virtual void advance() = 0; +}; +bool shouldIRInline(const Func* caller, const Func* callee, + RegionIter& iter); +bool shouldIRInline(const Func* caller, const Func* callee, + const Transl::Tracelet& tlet); /* * IRTranslator is used to convert hhbc instructions to an IRTrace of hhir diff --git a/hphp/runtime/vm/jit/region-selection.cpp b/hphp/runtime/vm/jit/region-selection.cpp index 475494b0931..ff964242399 100644 --- a/hphp/runtime/vm/jit/region-selection.cpp +++ b/hphp/runtime/vm/jit/region-selection.cpp @@ -41,7 +41,7 @@ using Transl::TranslatorX64; extern RegionDescPtr selectMethod(const RegionContext&); extern RegionDescPtr selectOneBC(const RegionContext&); -extern RegionDescPtr selectTracelet(const RegionContext&); +extern RegionDescPtr selectTracelet(const RegionContext&, int inlineDepth); extern RegionDescPtr selectHotBlock(TransID transId, const ProfData* profData, const TransCFG& cfg); @@ -83,31 +83,105 @@ RegionMode regionMode() { return RegionMode::None; } +template +void truncateMap(Container& c, SrcKey final) { + c.erase(c.upper_bound(final), c.end()); +} } ////////////////////////////////////////////////////////////////////// +RegionDesc::Block::Block(const Func* func, Offset start, int length) + : m_func(func) + , m_start(start) + , m_last(kInvalidOffset) + , m_length(length) + , m_inlinedCallee(nullptr) +{ + assert(length >= 0); + if (length > 0) { + SrcKey sk(func, start); + for (unsigned i = 1; i < length; ++i) sk.advance(); + m_last = sk.offset(); + } + checkInstructions(); + checkMetadata(); +} + +bool RegionDesc::Block::contains(SrcKey sk) const { + return sk >= start() && sk <= last(); +} + +void RegionDesc::Block::addInstruction() { + if (m_length > 0) checkInstruction(last().op()); + assert((m_last == kInvalidOffset) == (m_length == 0)); + + ++m_length; + if (m_length == 1) { + m_last = m_start; + } else { + m_last = last().advanced().offset(); + } +} + +void RegionDesc::Block::truncateAfter(SrcKey final) { + assert_not_implemented(!m_inlinedCallee); + + auto skIter = start(); + int newLen = -1; + for (int i = 0; i < m_length; ++i, skIter.advance(unit())) { + if (skIter == final) { + newLen = i + 1; + break; + } + } + assert(newLen != -1); + m_length = newLen; + m_last = final.offset(); + + truncateMap(m_typePreds, final); + truncateMap(m_byRefs, final); + truncateMap(m_refPreds, final); + truncateMap(m_knownFuncs, final); + + checkInstructions(); + checkMetadata(); +} + void RegionDesc::Block::addPredicted(SrcKey sk, TypePred pred) { + FTRACE(2, "Block::addPredicted({}, {})\n", showShort(sk), show(pred)); assert(pred.type.subtypeOf(Type::Gen | Type::Cls)); + assert(contains(sk)); m_typePreds.insert(std::make_pair(sk, pred)); - checkInvariants(); } void RegionDesc::Block::setParamByRef(SrcKey sk, bool byRef) { + FTRACE(2, "Block::setParamByRef({}, {})\n", showShort(sk), + byRef ? "by ref" : "by val"); assert(m_byRefs.find(sk) == m_byRefs.end()); + assert(contains(sk)); m_byRefs.insert(std::make_pair(sk, byRef)); - checkInvariants(); } void RegionDesc::Block::addReffinessPred(SrcKey sk, const ReffinessPred& pred) { + FTRACE(2, "Block::addReffinessPred({}, {})\n", showShort(sk), show(pred)); + assert(contains(sk)); m_refPreds.insert(std::make_pair(sk, pred)); - checkInvariants(); } void RegionDesc::Block::setKnownFunc(SrcKey sk, const Func* func) { + FTRACE(2, "Block::setKnownFunc({}, {})\n", showShort(sk), + func ? func->fullName()->data() : "nullptr"); assert(m_knownFuncs.find(sk) == m_knownFuncs.end()); + assert(contains(sk)); + auto it = m_knownFuncs.lower_bound(sk); + if (it != m_knownFuncs.begin() && (--it)->second == func) { + // Adding func at this sk won't add any new information. + FTRACE(2, " func exists at {}, not adding\n", showShort(it->first)); + return; + } + m_knownFuncs.insert(std::make_pair(sk, func)); - checkInvariants(); } void RegionDesc::Block::setPostConditions(const PostConditions& conds) { @@ -115,58 +189,60 @@ void RegionDesc::Block::setPostConditions(const PostConditions& conds) { } /* - * Check invariants on a RegionDesc::Block. + * Check invariants about the bytecode instructions in this Block. * * 1. Single entry, single exit (aside from exceptions). I.e. no * non-fallthrough instructions mid-block and no control flow (not * counting calls as control flow). * - * 2. Each SrcKey in m_typePreds, m_byRefs, m_refPreds, and m_knownFuncs is - * within the bounds of the block. - * - * 3. Each local id referred to in the type prediction list is valid. - * - * 4. (Unchecked) each stack offset in the type prediction list is - * valid. */ -void RegionDesc::Block::checkInvariants() const { +void RegionDesc::Block::checkInstructions() const { if (!debug || length() == 0) return; - smart::set keysInRange; - auto firstKey = [&] { return *keysInRange.begin(); }; - auto lastKey = [&] { - assert(!keysInRange.empty()); - return *--keysInRange.end(); - }; - keysInRange.insert(start()); + auto u = unit(); + auto sk = start(); + for (int i = 1; i < length(); ++i) { - if (i != length() - 1) { - auto const pc = unit()->at(lastKey().offset()); - if (instrFlags(toOp(*pc)) & TF) { - FTRACE(1, "Bad block: {}\n", show(*this)); - assert(!"Block may not contain non-fallthrough instruction unless " - "they are last"); - } - if (instrIsNonCallControlFlow(toOp(*pc))) { - FTRACE(1, "Bad block: {}\n", show(*this)); - assert(!"Block may not contain control flow instructions unless " - "they are last"); - } - } - keysInRange.insert(lastKey().advanced(unit())); + if (i != length() - 1) checkInstruction(sk.op()); + sk.advance(u); } - assert(keysInRange.size() == length()); + assert(sk.offset() == m_last); +} + +void RegionDesc::Block::checkInstruction(Op op) const { + if (instrFlags(op) & TF) { + FTRACE(1, "Bad block: {}\n", show(*this)); + assert(!"Block may not contain non-fallthrough instruction unless " + "they are last"); + } + if (instrIsNonCallControlFlow(op)) { + FTRACE(1, "Bad block: {}\n", show(*this)); + assert(!"Block may not contain control flow instructions unless " + "they are last"); + } +} - auto rangeCheck = [&](const char* type, SrcKey sk) { - if (!keysInRange.count(sk)) { +/* + * Check invariants about the metadata for this Block. + * + * 1. Each SrcKey in m_typePreds, m_byRefs, m_refPreds, and m_knownFuncs is + * within the bounds of the block. + * + * 2. Each local id referred to in the type prediction list is valid. + * + * 3. (Unchecked) each stack offset in the type prediction list is + * valid. +*/ +void RegionDesc::Block::checkMetadata() const { + auto rangeCheck = [&](const char* type, Offset o) { + if (o < m_start || o > m_last) { std::cerr << folly::format("{} at {} outside range [{}, {}]\n", - type, show(sk), - show(firstKey()), show(lastKey())); + type, o, m_start, m_last); assert(!"Region::Block contained out-of-range metadata"); } }; for (auto& tpred : m_typePreds) { - rangeCheck("type prediction", tpred.first); + rangeCheck("type prediction", tpred.first.offset()); auto& loc = tpred.second.location; switch (loc.tag()) { case Location::Tag::Local: assert(loc.localId() < m_func->numLocals()); @@ -177,13 +253,13 @@ void RegionDesc::Block::checkInvariants() const { } for (auto& byRef : m_byRefs) { - rangeCheck("parameter reference flag", byRef.first); + rangeCheck("parameter reference flag", byRef.first.offset()); } for (auto& refPred : m_refPreds) { - rangeCheck("reffiness prediction", refPred.first); + rangeCheck("reffiness prediction", refPred.first.offset()); } for (auto& func : m_knownFuncs) { - rangeCheck("known Func*", func.first); + rangeCheck("known Func*", func.first.offset()); } } @@ -323,24 +399,8 @@ RegionDescPtr selectRegion(const RegionContext& context, auto const mode = regionMode(); FTRACE(1, - "Select region: {}@{} mode={} context:\n{}{}", - context.func->fullName()->data(), - context.bcOffset, - static_cast(mode), - [&]{ - std::string ret; - for (auto& t : context.liveTypes) { - folly::toAppend(" ", show(t), "\n", &ret); - } - return ret; - }(), - [&]{ - std::string ret; - for (auto& ar : context.preLiveARs) { - folly::toAppend(" ", show(ar), "\n", &ret); - } - return ret; - }() + "Select region: mode={} context:\n{}", + static_cast(mode), show(context) ); auto region = [&]{ @@ -349,7 +409,7 @@ RegionDescPtr selectRegion(const RegionContext& context, case RegionMode::None: return RegionDescPtr{nullptr}; case RegionMode::OneBC: return selectOneBC(context); case RegionMode::Method: return selectMethod(context); - case RegionMode::Tracelet: return selectTracelet(context); + case RegionMode::Tracelet: return selectTracelet(context, 0); case RegionMode::Legacy: always_assert(t); return selectTraceletLegacy(*t); case RegionMode::HotBlock: @@ -460,6 +520,15 @@ std::string show(RegionContext::PreLiveAR ar) { ).str(); } +std::string show(const RegionContext& ctx) { + std::string ret; + folly::toAppend(ctx.func->fullName()->data(), "@", ctx.bcOffset, "\n", &ret); + for (auto& t : ctx.liveTypes) folly::toAppend(" ", show(t), "\n", &ret); + for (auto& ar : ctx.preLiveARs) folly::toAppend(" ", show(ar), "\n", &ret); + + return ret; +} + std::string show(const RegionDesc::Block& b) { std::string ret{"Block "}; folly::toAppend( diff --git a/hphp/runtime/vm/jit/region-selection.h b/hphp/runtime/vm/jit/region-selection.h index 2ee9a60cdaf..4cc914e4f20 100644 --- a/hphp/runtime/vm/jit/region-selection.h +++ b/hphp/runtime/vm/jit/region-selection.h @@ -167,14 +167,7 @@ class RegionDesc::Block { typedef flat_map KnownFuncMap; public: - explicit Block(const Func* func, Offset start, int length) - : m_func(func) - , m_start(start) - , m_length(length) - , m_inlinedCallee(nullptr) - { - checkInvariants(); - } + explicit Block(const Func* func, Offset start, int length); Block& operator=(const Block&) = delete; @@ -185,7 +178,10 @@ public: const Unit* unit() const { return m_func->unit(); } const Func* func() const { return m_func; } SrcKey start() const { return SrcKey { m_func, m_start }; } + SrcKey last() const { return SrcKey { m_func, m_last }; } int length() const { return m_length; } + bool empty() const { return length() == 0; } + bool contains(SrcKey sk) const; /* * Set and get whether or not this block ends with an inlined FCall. Inlined @@ -195,7 +191,6 @@ public: void setInlinedCallee(const Func* callee) { assert(callee); m_inlinedCallee = callee; - checkInvariants(); } const Func* inlinedCallee() const { return m_inlinedCallee; @@ -204,10 +199,12 @@ public: /* * Increase the length of the Block by 1. */ - void addInstruction() { - ++m_length; - checkInvariants(); - } + void addInstruction(); + + /* + * Remove all instructions after sk from the block. + */ + void truncateAfter(SrcKey sk); /* * Add a predicted type to this block. @@ -251,11 +248,14 @@ public: const PostConditions& postConds() const { return m_postConds; } private: - void checkInvariants() const; + void checkInstructions() const; + void checkInstruction(Op op) const; + void checkMetadata() const; private: const Func* m_func; const Offset m_start; + Offset m_last; int m_length; const Func* m_inlinedCallee; @@ -347,6 +347,7 @@ std::string show(RegionDesc::TypePred); std::string show(const RegionDesc::ReffinessPred&); std::string show(RegionContext::LiveType); std::string show(RegionContext::PreLiveAR); +std::string show(const RegionContext&); std::string show(const RegionDesc::Block&); std::string show(const RegionDesc&); diff --git a/hphp/runtime/vm/jit/region-tracelet.cpp b/hphp/runtime/vm/jit/region-tracelet.cpp index bf0be0c379a..a63f7636ce5 100644 --- a/hphp/runtime/vm/jit/region-tracelet.cpp +++ b/hphp/runtime/vm/jit/region-tracelet.cpp @@ -34,53 +34,102 @@ using Transl::RefDeps; TRACE_SET_MOD(region); typedef hphp_hash_set InterpSet; +RegionDescPtr selectTracelet(const RegionContext& ctx, int inlineDepth); namespace { +struct RegionDescIter : public RegionIter { + explicit RegionDescIter(const RegionDesc& region) + : m_blocks(region.blocks) + , m_blockIter(region.blocks.begin()) + , m_sk(m_blockIter == m_blocks.end() ? SrcKey() : (*m_blockIter)->start()) + {} + + bool finished() const { return m_blockIter == m_blocks.end(); } + + SrcKey sk() const { + assert(!finished()); + return m_sk; + } + + void advance() { + assert(!finished()); + assert(m_sk.func() == (*m_blockIter)->func()); + + if (m_sk == (*m_blockIter)->last()) { + ++m_blockIter; + if (!finished()) m_sk = (*m_blockIter)->start(); + } else { + m_sk.advance(); + } + } + + private: + const smart::vector& m_blocks; + smart::vector::const_iterator m_blockIter; + SrcKey m_sk; +}; + struct RegionFormer { - RegionFormer(const RegionContext& ctx, InterpSet& interp); + RegionFormer(const RegionContext& ctx, InterpSet& interp, int inlineDepth); RegionDescPtr go(); private: const RegionContext& m_ctx; InterpSet& m_interp; - const Func* m_curFunc; - const Unit* m_curUnit; SrcKey m_sk; const SrcKey m_startSk; NormalizedInstruction m_inst; RegionDescPtr m_region; RegionDesc::Block* m_curBlock; bool m_blockFinished; - int m_pendingLiterals; IRTranslator m_irTrans; HhbcTranslator& m_ht; Unit::MetaHandle m_metaHand; - ActRecState m_arState; + smart::vector m_arStates; RefDeps m_refDeps; + const int m_inlineDepth; + + const Func* curFunc() const; + const Unit* curUnit() const; + int inliningDepth() const; bool prepareInstruction(); void addInstruction(); bool consumeInput(int i, const Transl::InputInfo& ii); + bool tryInline(); void recordDependencies(); + void truncateLiterals(); }; -RegionFormer::RegionFormer(const RegionContext& ctx, InterpSet& interp) +RegionFormer::RegionFormer(const RegionContext& ctx, InterpSet& interp, + int inlineDepth) : m_ctx(ctx) , m_interp(interp) - , m_curFunc(ctx.func) - , m_curUnit(m_curFunc->unit()) - , m_sk(m_curFunc, ctx.bcOffset) + , m_sk(ctx.func, ctx.bcOffset) , m_startSk(m_sk) , m_region(smart::make_unique()) - , m_curBlock(m_region->addBlock(m_curFunc, m_sk.offset(), 0)) + , m_curBlock(m_region->addBlock(ctx.func, m_sk.offset(), 0)) , m_blockFinished(false) - , m_pendingLiterals(0) , m_irTrans(ctx.bcOffset, ctx.spOffset, ctx.func) , m_ht(m_irTrans.hhbcTrans()) + , m_arStates(1) + , m_inlineDepth(inlineDepth) { } +const Func* RegionFormer::curFunc() const { + return m_ht.curFunc(); +} + +const Unit* RegionFormer::curUnit() const { + return m_ht.curUnit(); +} + +int RegionFormer::inliningDepth() const { + return m_inlineDepth + m_ht.inliningDepth(); +} + RegionDescPtr RegionFormer::go() { uint32_t numJmps = 0; for (auto const& lt : m_ctx.liveTypes) { @@ -95,7 +144,6 @@ RegionDescPtr RegionFormer::go() { while (true) { if (!prepareInstruction()) break; - Transl::annotate(&m_inst); // Instead of translating a Jmp, go to its destination. if (m_inst.op() == OpJmp && m_inst.imm[0].u_BA > 0 && @@ -105,13 +153,38 @@ RegionDescPtr RegionFormer::go() { m_sk.setOffset(m_sk.offset() + m_inst.imm[0].u_BA); m_blockFinished = true; - m_ht.setBcOff(m_sk.offset(), false); continue; } + m_curBlock->setKnownFunc(m_sk, m_inst.funcd); + m_inst.interp = m_interp.count(m_sk); auto const doPrediction = Transl::outputIsPredicted(m_startSk, m_inst); + if (tryInline()) { + // If m_inst is an FCall and the callee is suitable for inlining, we can + // translate the callee and potentially use its return type to extend the + // tracelet. + + auto callee = m_inst.funcd; + FTRACE(1, "\nselectTracelet starting inlined call from {} to " + "{} with stack:\n{}\n", curFunc()->fullName()->data(), + callee->fullName()->data(), m_ht.showStack()); + auto returnSk = m_inst.nextSk(); + auto returnFuncOff = returnSk.offset() - curFunc()->base(); + + m_arStates.back().pop(); + m_arStates.emplace_back(); + m_curBlock->setInlinedCallee(callee); + m_ht.beginInlining(m_inst.imm[0].u_IVA, callee, returnFuncOff); + m_metaHand = Unit::MetaHandle(); + + m_sk = m_ht.curSrcKey(); + m_blockFinished = true; + continue; + } + + auto const inlineReturn = m_ht.isInlining() && isRet(m_inst.op()); try { m_irTrans.translateInstr(m_inst); } catch (const FailedIRGen& exn) { @@ -125,10 +198,23 @@ RegionDescPtr RegionFormer::go() { if (m_inst.breaksTracelet) break; - if (isFCallStar(m_inst.op())) m_arState.pop(); + if (inlineReturn) { + // If we just translated an inlined RetC, grab the updated SrcKey from + // m_ht and clean up. + m_metaHand = Unit::MetaHandle(); + m_sk = m_ht.curSrcKey().advanced(curUnit()); + m_arStates.pop_back(); + m_blockFinished = true; + continue; + } else { + assert(m_sk.func() == m_ht.curFunc()); + } + + if (isFCallStar(m_inst.op())) m_arStates.back().pop(); // Advance sk and check the prediction, if any. - m_sk.advance(m_curBlock->unit()); + m_sk.advance(curUnit()); + if (doPrediction) m_ht.checkTypeStack(0, m_inst.outPred, m_sk.offset()); } @@ -137,6 +223,9 @@ RegionDescPtr RegionFormer::go() { if (m_region && !m_region->blocks.empty()) recordDependencies(); + assert(!m_ht.isInlining()); + + truncateLiterals(); return std::move(m_region); } @@ -149,14 +238,15 @@ bool RegionFormer::prepareInstruction() { m_inst.~NormalizedInstruction(); new (&m_inst) NormalizedInstruction(); m_inst.source = m_sk; - m_inst.m_unit = m_curUnit; + m_inst.m_unit = curUnit(); m_inst.breaksTracelet = Transl::opcodeBreaksBB(m_inst.op()) || (Transl::dontGuardAnyInputs(m_inst.op()) && Transl::opcodeChangesPC(m_inst.op())); m_inst.changesPC = Transl::opcodeChangesPC(m_inst.op()); - m_inst.funcd = m_arState.knownFunc(); + m_inst.funcd = m_arStates.back().knownFunc(); Transl::populateImmediates(m_inst); Transl::preInputApplyMetaData(m_metaHand, &m_inst); + m_ht.setBcOff(m_sk.offset(), false); Transl::InputInfos inputInfos; getInputs(m_startSk, m_inst, inputInfos, m_curBlock->func(), [&](int i) { @@ -187,10 +277,12 @@ bool RegionFormer::prepareInstruction() { size_t entryArDelta = instrSpToArDelta((Op*)m_inst.pc()) - (m_ht.spOffset() - m_ctx.spOffset); try { - m_inst.preppedByRef = m_arState.checkByRef(argNum, entryArDelta, - &m_refDeps); + m_inst.preppedByRef = m_arStates.back().checkByRef(argNum, entryArDelta, + &m_refDeps); } catch (const Transl::UnknownInputExc& exn) { // We don't have a guess for the current ActRec. + FTRACE(1, "selectTracelet: don't have reffiness guess for {}\n", + m_inst.toString()); return false; } addInstruction(); @@ -199,35 +291,149 @@ bool RegionFormer::prepareInstruction() { addInstruction(); } - if (isFPush(m_inst.op())) m_arState.pushFunc(m_inst); + if (isFPush(m_inst.op())) m_arStates.back().pushFunc(m_inst); return true; } /* - * Add the current instruction to the region. Instructions that push constant - * values aren't pushed unless more instructions come after them. + * Add the current instruction to the region. */ void RegionFormer::addInstruction() { if (m_blockFinished) { - m_curBlock = m_region->addBlock(m_curFunc, m_inst.source.offset(), 0); + FTRACE(2, "selectTracelet adding new block at {} after:\n{}\n", + showShort(m_sk), show(*m_curBlock)); + m_curBlock = m_region->addBlock(curFunc(), m_sk.offset(), 0); m_blockFinished = false; } - auto op = m_curUnit->getOpcode(m_inst.source.offset()); - if (isLiteral(op) || isThisSelfOrParent(op)) { - // Don't finish a region with literal values or values that have a class - // related to the current context class. They produce valuable information - // for optimizations that's lost across region boundaries. - ++m_pendingLiterals; - } else { - // This op isn't a literal so add any that are pending before the current - // instruction. - for (; m_pendingLiterals; --m_pendingLiterals) { - m_curBlock->addInstruction(); + FTRACE(2, "selectTracelet adding instruction {}\n", m_inst.toString()); + m_curBlock->addInstruction(); +} + +bool RegionFormer::tryInline() { + if (!RuntimeOption::RepoAuthoritative || m_inst.op() != OpFCall) return false; + + auto refuse = [this](const std::string& str) { + FTRACE(2, "selectTracelet not inlining {}: {}\n", + m_inst.toString(), str); + return false; + }; + + if (inliningDepth() >= RuntimeOption::EvalHHIRInliningMaxDepth) { + return refuse("inlining level would be too deep"); + } + + auto callee = m_inst.funcd; + if (!callee || callee->info()) { + return refuse("don't know callee or callee is builtin"); + } + + if (callee == curFunc()) { + return refuse("call is recursive"); + } + + if (m_inst.imm[0].u_IVA != callee->numParams()) { + return refuse("numArgs doesn't match numParams of callee"); + } + + // For analysis purposes, we require that the FPush* instruction is in the + // same region. + auto fpi = curFunc()->findFPI(m_sk.offset()); + const SrcKey pushSk{curFunc(), fpi->m_fpushOff}; + int pushBlock = -1; + auto& blocks = m_region->blocks; + for (unsigned i = 0; i < blocks.size(); ++i) { + if (blocks[i]->contains(pushSk)) { + pushBlock = i; + break; + } + } + if (pushBlock == -1) { + return refuse("FPush* is not in the current region"); + } + + // Calls invalidate all live SSATmps, so don't allow any in the fpi region + auto findFCall = [&] { + for (unsigned i = pushBlock; i < blocks.size(); ++i) { + auto& block = *blocks[i]; + auto sk = i == pushBlock ? pushSk.advanced() : block.start(); + while (sk <= block.last()) { + if (sk == m_sk) return false; + + auto op = sk.op(); + if (isFCallStar(op) || op == OpFCallBuiltin) return true; + sk.advance(); + } } - m_curBlock->addInstruction(); + not_reached(); + }; + if (findFCall()) { + return refuse("fpi region contains another call"); } + + switch (pushSk.op()) { + case OpFPushClsMethodD: + if (callee->mayHaveThis()) return refuse("callee may have this pointer"); + // fallthrough + case OpFPushFuncD: + case OpFPushObjMethodD: + case OpFPushCtorD: + case OpFPushCtor: + break; + + default: + return refuse(folly::format("unsupported push op {}", + opcodeToName(pushSk.op())).str()); + } + + // Set up the region context, mapping stack slots in the caller to locals in + // the callee. + RegionContext ctx; + ctx.func = callee; + ctx.bcOffset = callee->base(); + ctx.spOffset = callee->isGenerator() ? 0 : callee->numSlotsInFrame(); + for (int i = 0; i < callee->numParams(); ++i) { + // DataTypeGeneric is used because we're just passing the locals into the + // callee. It's up to the callee to constraint further if needed. + auto type = m_ht.topType(i, DataTypeGeneric); + uint32_t paramIdx = callee->numParams() - 1 - i; + typedef RegionDesc::Location Location; + ctx.liveTypes.push_back({Location::Local{paramIdx}, type}); + } + + FTRACE(1, "selectTracelet analyzing callee {} with context:\n{}", + callee->fullName()->data(), show(ctx)); + auto region = selectTracelet(ctx, m_inlineDepth + 1); + if (!region) { + return refuse("failed to select region in callee"); + } + + RegionDescIter iter(*region); + return shouldIRInline(curFunc(), callee, iter); +} + +void RegionFormer::truncateLiterals() { + if (!m_region || m_region->blocks.empty() || + m_region->blocks.back()->empty()) return; + + // Don't finish a region with literal values or values that have a class + // related to the current context class. They produce valuable information + // for optimizations that's lost across region boundaries. + auto& lastBlock = *m_region->blocks.back(); + auto sk = lastBlock.start(); + auto endSk = sk; + auto unit = lastBlock.unit(); + for (int i = 0, len = lastBlock.length(); i < len; ++i, sk.advance(unit)) { + auto const op = sk.op(); + if (!isLiteral(op) && !isThisSelfOrParent(op)) { + if (i == len - 1) return; + endSk = sk; + } + } + FTRACE(1, "selectTracelet truncating block after offset {}:\n{}\n", + endSk.offset(), show(lastBlock)); + lastBlock.truncateAfter(endSk); } /* @@ -296,22 +502,28 @@ void RegionFormer::recordDependencies() { /* * Region selector that attempts to form the longest possible region using the * given context. The region will be broken before the first instruction that - * attempts to consume an input with an insufficiently precise type. + * attempts to consume an input with an insufficiently precise type, or after + * most control flow instructions. * - * Always returns a RegionDesc containing at least one instruction. + * May return a null region if the given RegionContext doesn't have + * enough information to translate at least one instruction. */ -RegionDescPtr selectTracelet(const RegionContext& ctx) { +RegionDescPtr selectTracelet(const RegionContext& ctx, int inlineDepth) { InterpSet interp; RegionDescPtr region; uint32_t tries = 1; - while (!(region = RegionFormer(ctx, interp).go())) { + while (!(region = RegionFormer(ctx, interp, inlineDepth).go())) { ++tries; } - FTRACE(1, "regionTracelet returning after {} tries:\n{}\n", - tries, show(*region)); - assert(region->blocks.size() > 0 && region->blocks.front()->length() > 0); + if (region->blocks.size() == 0 || region->blocks.front()->length() == 0) { + FTRACE(1, "selectTracelet giving up after {} tries\n", tries); + return RegionDescPtr { nullptr }; + } + + FTRACE(1, "selectTracelet returning after {} tries:\n{}\n", + tries, show(*region)); if (region->blocks.back()->length() == 0) { // If the final block is empty because it would've only contained // instructions producing literal values, kill it. diff --git a/hphp/runtime/vm/jit/trace-builder.cpp b/hphp/runtime/vm/jit/trace-builder.cpp index 707a4c286af..ee8ae7d7ec8 100644 --- a/hphp/runtime/vm/jit/trace-builder.cpp +++ b/hphp/runtime/vm/jit/trace-builder.cpp @@ -894,6 +894,7 @@ SSATmp* TraceBuilder::optimizeWork(IRInstruction* inst, if (m_inlineSavedStates.size() && !m_needsFPAnchor) { if (inst->isNative() || inst->mayRaiseError()) { m_needsFPAnchor = true; + always_assert(m_fpValue != nullptr); gen(InlineFPAnchor, m_fpValue); FTRACE(2, "Anchor for: {}\n", inst->toString()); } @@ -985,7 +986,8 @@ void TraceBuilder::reoptimize() { FTRACE(5, "ReOptimize:vvvvvvvvvvvvvvvvvvvv\n"); SCOPE_EXIT { FTRACE(5, "ReOptimize:^^^^^^^^^^^^^^^^^^^^\n"); }; assert(m_curTrace == m_mainTrace.get()); - assert(m_savedTraces.size() == 0); + assert(m_savedTraces.empty()); + assert(m_inlineSavedStates.empty()); m_enableCse = RuntimeOption::EvalHHIRCse; m_enableSimplification = RuntimeOption::EvalHHIRSimplification; diff --git a/hphp/runtime/vm/jit/translator.cpp b/hphp/runtime/vm/jit/translator.cpp index 04b5ced6aec..dc03a4d9dd5 100755 --- a/hphp/runtime/vm/jit/translator.cpp +++ b/hphp/runtime/vm/jit/translator.cpp @@ -1646,8 +1646,9 @@ static void addMVectorInputs(NormalizedInstruction& ni, * ids (i.e. string ids), this analysis step is going to have to be * a bit wiser. */ - const uint8_t* vec = ni.immVec.vec(); - const LocationCode lcode = LocationCode(*vec++); + auto opPtr = (const Op*)ni.source.pc(); + auto const location = getMLocation(opPtr); + auto const lcode = location.lcode; const bool trailingClassRef = lcode == LSL || lcode == LSC; @@ -1657,9 +1658,8 @@ static void addMVectorInputs(NormalizedInstruction& ni, inputs.emplace_back(Location(Location::This)); } else { assert(lcode == LL || lcode == LGL || lcode == LNL); - int numImms = numLocationCodeImms(lcode); - for (int i = 0; i < numImms; ++i) { - push_local(decodeVariableSizeImm(&vec)); + if (location.hasImm()) { + push_local(location.imm); } } } break; @@ -1667,7 +1667,8 @@ static void addMVectorInputs(NormalizedInstruction& ni, if (lcode == LSL) { // We'll get the trailing stack value after pushing all the // member vector elements. - push_local(decodeVariableSizeImm(&vec)); + assert(location.hasImm()); + push_local(location.imm); } else { push_stack(); } @@ -1683,14 +1684,14 @@ static void addMVectorInputs(NormalizedInstruction& ni, } // Now push all the members in the correct order. - while (vec - ni.immVec.vec() < ni.immVec.size()) { - const MemberCode mcode = MemberCode(*vec++); + for (auto const& member : getMVector(opPtr)) { + auto const mcode = member.mcode; ni.immVecM.push_back(mcode); if (mcode == MW) { // No stack and no locals. - } else if (memberCodeHasImm(mcode)) { - int64_t imm = decodeMemberCodeImm(&vec, mcode); + } else if (member.hasImm()) { + int64_t imm = member.imm; if (memberCodeImmIsLoc(mcode)) { push_local(imm); } else if (memberCodeImmIsString(mcode)) { @@ -1711,7 +1712,6 @@ static void addMVectorInputs(NormalizedInstruction& ni, ni.immVecClasses.resize(ni.immVecM.size()); - assert(vec - ni.immVec.vec() == ni.immVec.size()); assert(stackCount == ni.immVec.numStackValues()); SKTRACE(2, ni.source, "M-vector using %d hidden stack " @@ -3058,7 +3058,7 @@ void Translator::analyzeCallee(TraceletContext& tas, /* * If the IR can't inline this, give up now. Below we're going to - * start making changes to the traclet that is making the call + * start making changes to the tracelet that is making the call * (potentially increasing the specificity of guards), and we don't * want to do that unnecessarily. */ @@ -3702,6 +3702,7 @@ void Translator::traceStart(Offset bcStartOffset) { } void Translator::traceEnd() { + assert(!m_irTrans->hhbcTrans().isInlining()); m_irTrans->hhbcTrans().end(); FTRACE(1, "{}{:-^40}{}\n", color(ANSI_COLOR_BLACK, ANSI_BGCOLOR_GREEN), diff --git a/hphp/runtime/vm/srckey.cpp b/hphp/runtime/vm/srckey.cpp index df1d465e58b..ebb0ab0a2c4 100644 --- a/hphp/runtime/vm/srckey.cpp +++ b/hphp/runtime/vm/srckey.cpp @@ -18,8 +18,14 @@ #include "folly/Format.h" +#include "hphp/runtime/vm/hhbc.h" + namespace HPHP { +std::string SrcKey::showInst() const { + return instrToString(reinterpret_cast(unit()->at(offset()))); +} + std::string show(SrcKey sk) { auto func = sk.func(); auto unit = sk.unit(); @@ -34,6 +40,12 @@ std::string show(SrcKey sk) { (unsigned long long)sk.getFuncId(), sk.offset()).str(); } +std::string showShort(SrcKey sk) { + return folly::format("{}(id 0x{:#x})@{}", + sk.func()->fullName()->data(), sk.getFuncId(), + sk.offset()).str(); +} + void sktrace(SrcKey sk, const char *fmt, ...) { if (!Trace::enabled) return; diff --git a/hphp/runtime/vm/srckey.h b/hphp/runtime/vm/srckey.h index 61e9556c83a..11908239195 100644 --- a/hphp/runtime/vm/srckey.h +++ b/hphp/runtime/vm/srckey.h @@ -81,6 +81,16 @@ struct SrcKey : private boost::totally_ordered { return func()->unit(); } + Op op() const { + return unit()->getOpcode(offset()); + } + + PC pc() const { + return unit()->at(offset()); + } + + std::string showInst() const; + void setOffset(Offset o) { m_offset = o; } @@ -96,15 +106,15 @@ struct SrcKey : private boost::totally_ordered { * will advance past the end of the function, and potentially * contain an invalid bytecode offset. */ - void advance(const Unit* u) { - m_offset += instrLen((Op*)u->at(offset())); + void advance(const Unit* u = nullptr) { + m_offset += instrLen((Op*)(u ? u : unit())->at(offset())); } /* * Return a SrcKey representing the next instruction, without * mutating this SrcKey. */ - SrcKey advanced(const Unit* u) const { + SrcKey advanced(const Unit* u = nullptr) const { auto tmp = *this; tmp.advance(u); return tmp; @@ -116,8 +126,8 @@ struct SrcKey : private boost::totally_ordered { } bool operator<(const SrcKey& r) const { - return std::make_tuple(offset(), getFuncId()) < - std::make_tuple(r.offset(), r.getFuncId()); + return std::make_tuple(getFuncId(), offset()) < + std::make_tuple(r.getFuncId(), r.offset()); } std::string getSymbol() const; @@ -138,6 +148,7 @@ typedef hphp_hash_set SrcKeySet; ////////////////////////////////////////////////////////////////////// std::string show(SrcKey sk); +std::string showShort(SrcKey sk); void sktrace(SrcKey sk, const char *fmt, ...) ATTRIBUTE_PRINTF(2,3); #define SKTRACE(level, sk, ...) \ diff --git a/hphp/test/quick/vector-clscns.php b/hphp/test/quick/vector-clscns.php new file mode 100644 index 00000000000..cf08f337688 --- /dev/null +++ b/hphp/test/quick/vector-clscns.php @@ -0,0 +1,10 @@ + 'success'))); diff --git a/hphp/test/quick/vector-clscns.php.expectf b/hphp/test/quick/vector-clscns.php.expectf new file mode 100644 index 00000000000..abaf45c2d7d --- /dev/null +++ b/hphp/test/quick/vector-clscns.php.expectf @@ -0,0 +1 @@ +HipHop Fatal error: Couldn't find constant c::BAR in %s/test/quick/vector-clscns.php on line 8 diff --git a/hphp/test/slow/ir_inlining/recurse.php b/hphp/test/slow/ir_inlining/recurse.php new file mode 100644 index 00000000000..f0ef17ac105 --- /dev/null +++ b/hphp/test/slow/ir_inlining/recurse.php @@ -0,0 +1,7 @@ +