2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/translator.h"
30 #include <folly/Conv.h>
31 #include <folly/MapUtil.h>
33 #include "hphp/util/map-walker.h"
34 #include "hphp/util/ringbuffer.h"
35 #include "hphp/util/timer.h"
36 #include "hphp/util/trace.h"
38 #include "hphp/runtime/base/arch.h"
39 #include "hphp/runtime/base/repo-auth-type-codec.h"
40 #include "hphp/runtime/base/runtime-option.h"
41 #include "hphp/runtime/base/stats.h"
42 #include "hphp/runtime/base/types.h"
43 #include "hphp/runtime/base/unit-cache.h"
44 #include "hphp/runtime/ext/ext_collections.h"
45 #include "hphp/runtime/ext/ext_generator.h"
46 #include "hphp/runtime/vm/bytecode.h"
47 #include "hphp/runtime/vm/hhbc.h"
48 #include "hphp/runtime/vm/runtime.h"
49 #include "hphp/runtime/vm/treadmill.h"
50 #include "hphp/runtime/vm/type-profile.h"
51 #include "hphp/runtime/vm/bc-pattern.h"
53 #include "hphp/runtime/vm/jit/annotation.h"
54 #include "hphp/runtime/vm/jit/ir-unit.h"
55 #include "hphp/runtime/vm/jit/mc-generator.h"
56 #include "hphp/runtime/vm/jit/normalized-instruction.h"
57 #include "hphp/runtime/vm/jit/print.h"
58 #include "hphp/runtime/vm/jit/prof-data.h"
59 #include "hphp/runtime/vm/jit/punt.h"
60 #include "hphp/runtime/vm/jit/region-selection.h"
61 #include "hphp/runtime/vm/jit/timer.h"
62 #include "hphp/runtime/vm/jit/translator-inline.h"
63 #include "hphp/runtime/vm/jit/type.h"
64 #include "hphp/runtime/vm/jit/inlining-decider.h"
65 #include "hphp/runtime/vm/jit/translate-region.h"
66 #include "hphp/runtime/vm/jit/irgen.h"
70 namespace HPHP
{ namespace jit
{
71 ///////////////////////////////////////////////////////////////////////////////
73 Lease
Translator::s_writeLease
;
75 int locPhysicalOffset(Location l
, const Func
* f
) {
76 f
= f
? f
: liveFunc();
77 assert_not_implemented(l
.space
== Location::Stack
||
78 l
.space
== Location::Local
||
79 l
.space
== Location::Iter
);
80 int localsToSkip
= l
.space
== Location::Iter
? f
->numLocals() : 0;
81 int iterInflator
= l
.space
== Location::Iter
? kNumIterCells
: 1;
82 return -((l
.offset
+ 1) * iterInflator
+ localsToSkip
);
85 static uint32_t m_w
= 1; /* must not be zero */
86 static uint32_t m_z
= 1; /* must not be zero */
88 static uint32_t get_random()
90 m_z
= 36969 * (m_z
& 65535) + (m_z
>> 16);
91 m_w
= 18000 * (m_w
& 65535) + (m_w
>> 16);
92 return (m_z
<< 16) + m_w
; /* 32-bit result */
95 PropInfo
getPropertyOffset(const NormalizedInstruction
& ni
,
96 const Class
* ctx
, const Class
*& baseClass
,
97 const MInstrInfo
& mii
,
98 unsigned mInd
, unsigned iInd
) {
100 auto const baseIndex
= mii
.valCount();
101 baseClass
= ni
.inputs
[baseIndex
]->rtt
< Type::Obj
102 ? ni
.inputs
[baseIndex
]->rtt
.getClass()
105 if (!baseClass
) return PropInfo();
107 auto keyType
= ni
.inputs
[iInd
]->rtt
;
108 if (!keyType
.isConst(Type::Str
)) return PropInfo();
109 auto const name
= keyType
.strVal();
111 // If we are not in repo-authoriative mode, we need to check that
112 // baseClass cannot change in between requests
113 if (!RuntimeOption::RepoAuthoritative
||
114 !(baseClass
->preClass()->attrs() & AttrUnique
)) {
115 if (!ctx
) return PropInfo();
116 if (!ctx
->classof(baseClass
)) {
117 if (baseClass
->classof(ctx
)) {
118 // baseClass can change on us in between requests, but since
119 // ctx is an ancestor of baseClass we can make the weaker
120 // assumption that the object is an instance of ctx
123 // baseClass can change on us in between requests and it is
124 // not related to ctx, so bail out
129 // Lookup the index of the property based on ctx and baseClass
130 auto const lookup
= baseClass
->getDeclPropIndex(ctx
, name
);
131 auto const idx
= lookup
.prop
;
133 // If we couldn't find a property that is accessible in the current context,
135 if (idx
== kInvalidSlot
|| !lookup
.accessible
) return PropInfo();
137 // If it's a declared property we're good to go: even if a subclass redefines
138 // an accessible property with the same name it's guaranteed to be at the same
141 baseClass
->declPropOffset(idx
),
142 baseClass
->declPropRepoAuthType(idx
)
146 PropInfo
getFinalPropertyOffset(const NormalizedInstruction
& ni
,
147 Class
* ctx
, const MInstrInfo
& mii
) {
148 unsigned mInd
= ni
.immVecM
.size() - 1;
149 unsigned iInd
= mii
.valCount() + 1 + mInd
;
151 const Class
* cls
= nullptr;
152 return getPropertyOffset(ni
, ctx
, cls
, mii
, mInd
, iInd
);
156 ///////////////////////////////////////////////////////////////////////////////
162 * Pair of (predicted type, confidence).
164 * A folly::none prediction means mixed/unknown.
166 using TypePred
= std::pair
<MaybeDataType
, double>;
168 MaybeDataType
predictionForRepoAuthType(RepoAuthType repoTy
) {
169 using T
= RepoAuthType::Tag
;
170 switch (repoTy
.tag()) {
171 case T::OptBool
: return KindOfBoolean
;
172 case T::OptInt
: return KindOfInt64
;
173 case T::OptDbl
: return KindOfDouble
;
174 case T::OptRes
: return KindOfResource
;
215 TypePred
predictMVec(const NormalizedInstruction
* ni
) {
216 auto info
= getFinalPropertyOffset(*ni
,
218 getMInstrInfo(ni
->mInstrOp()));
219 if (info
.offset
!= -1) {
220 auto const predTy
= predictionForRepoAuthType(info
.repoAuthType
);
222 FTRACE(1, "prediction for CGetM prop: {}, hphpc\n",
223 static_cast<int>(*predTy
));
224 return std::make_pair(predTy
, 1.0);
226 // If the RepoAuthType converts to an exact data type, there's no
227 // point in having a prediction because we know its type with 100%
228 // accuracy. Disable it in that case here.
229 if (convertToDataType(info
.repoAuthType
)) {
230 return std::make_pair(folly::none
, 0.0);
234 auto& immVec
= ni
->immVec
;
237 if (immVec
.decodeLastMember(ni
->m_unit
, name
, mc
)) {
238 auto pred
= predictType(TypeProfileKey(mc
, name
));
239 TRACE(1, "prediction for CGetM %s named %s: %d, %f\n",
240 mc
== MET
? "elt" : "prop",
242 pred
.first
? *pred
.first
: -1,
247 return std::make_pair(folly::none
, 0.0);
251 * Provide a best guess for the output type of this instruction.
253 MaybeDataType
predictOutputs(const NormalizedInstruction
* ni
) {
254 if (!RuntimeOption::EvalJitTypePrediction
) return folly::none
;
256 if (RuntimeOption::EvalJitStressTypePredPercent
&&
257 RuntimeOption::EvalJitStressTypePredPercent
> int(get_random() % 100)) {
260 dt
= getDataTypeValue(get_random() % (kMaxDataType
+ 1));
272 // KindOfRef and KindOfUninit can't happen for lots of predicted types.
274 case KindOfStaticString
:
286 if (ni
->op() == OpCns
||
287 ni
->op() == OpCnsE
||
288 ni
->op() == OpCnsU
) {
289 StringData
* sd
= ni
->m_unit
->lookupLitstrId(ni
->imm
[0].u_SA
);
290 auto const tv
= Unit::lookupCns(sd
);
291 if (tv
) return tv
->m_type
;
294 if (ni
->op() == OpMod
) {
295 // x % 0 returns boolean false, so we don't know for certain, but it's
300 if (ni
->op() == OpPow
) {
301 // int ** int => int, unless result > 2 ** 52, then it's a double
302 // anything ** double => double
303 // double ** anything => double
304 // anything ** anything => int
305 auto lhs
= ni
->inputs
[0]->rtt
;
306 auto rhs
= ni
->inputs
[1]->rtt
;
308 if (lhs
<= Type::Int
&& rhs
<= Type::Int
) {
309 // Best guess, since overflowing isn't common
313 if (lhs
<= Type::Dbl
|| rhs
<= Type::Dbl
) {
320 if (ni
->op() == OpSqrt
) {
321 // sqrt returns a double, unless you pass something nasty to it.
325 if (ni
->op() == OpDiv
) {
326 // Integers can produce integers if there's no residue, but $i / $j in
327 // general produces a double. $i / 0 produces boolean false, so we have
328 // actually check the result.
332 if (ni
->op() == OpAbs
) {
333 if (ni
->inputs
[0]->rtt
<= Type::Dbl
) {
337 // some types can't be converted to integers and will return false here
338 if (ni
->inputs
[0]->rtt
<= Type::Arr
) {
339 return KindOfBoolean
;
342 // If the type is not numeric we need to convert it to a numeric type,
343 // a string can be converted to an Int64 or a Double but most other types
344 // will end up being integral.
348 if (ni
->op() == OpClsCnsD
) {
349 const NamedEntityPair
& cne
=
350 ni
->unit()->lookupNamedEntityPairId(ni
->imm
[1].u_SA
);
351 StringData
* cnsName
= ni
->m_unit
->lookupLitstrId(ni
->imm
[0].u_SA
);
352 Class
* cls
= cne
.second
->getCachedClass();
354 DataType dt
= cls
->clsCnsType(cnsName
);
355 if (dt
!= KindOfUninit
) {
356 TRACE(1, "clscnsd: %s:%s prediction type %d\n",
357 cne
.first
->data(), cnsName
->data(), dt
);
363 if (ni
->op() == OpSetM
) {
365 * SetM pushes null for certain rare combinations of input types, a string
366 * if the base was a string, or (most commonly) its first stack input. We
367 * mark the output as predicted here and do a very rough approximation of
368 * what really happens; most of the time the prediction will be a noop
369 * since MInstrTranslator side exits in all uncommon cases.
372 auto inType
= ni
->inputs
[0]->rtt
;
373 auto const inDt
= inType
.isKnownDataType()
374 ? MaybeDataType(inType
.toDataType())
376 // If the base is a string, the output is probably a string. Unless the
377 // member code is MW, then we're either going to fatal or promote the
378 // string to an array.
380 switch (ni
->immVec
.locationCode()) {
384 baseType
= Type::Gen
;
388 baseType
= ni
->inputs
[1]->rtt
;
390 if (baseType
<= Type::Str
&& ni
->immVecM
.size() == 1) {
391 return ni
->immVecM
[0] == MW
? inDt
: KindOfString
;
394 // Otherwise, it's probably the input type.
398 auto const op
= ni
->op();
399 static const double kAccept
= 1.0;
401 std::pair
<MaybeDataType
, double> pred
= std::make_pair(folly::none
, 0.0);
404 auto nameType
= ni
->inputs
[1]->rtt
;
405 if (nameType
.isConst(Type::Str
)) {
406 auto propName
= nameType
.strVal();
407 pred
= predictType(TypeProfileKey(TypeProfileKey::StaticPropName
,
409 TRACE(1, "prediction for static fields named %s: %d, %f\n",
411 pred
.first
? *pred
.first
: -1,
414 } else if (op
== OpCGetM
) {
415 pred
= predictMVec(ni
);
417 if (pred
.second
< kAccept
) {
418 const StringData
* const invName
419 = ni
->op() == Op::FCallD
420 ? ni
->m_unit
->lookupLitstrId(ni
->imm
[2].u_SA
)
423 pred
= predictType(TypeProfileKey(TypeProfileKey::MethodName
, invName
));
424 FTRACE(1, "prediction for methods named {}: {}, {:.2}\n",
426 pred
.first
? *pred
.first
: -1,
430 if (pred
.second
>= kAccept
) {
431 FTRACE(1, "accepting prediction of type {}\n",
432 pred
.first
? *pred
.first
: -1);
433 assert(!pred
.first
|| *pred
.first
!= KindOfUninit
);
441 ///////////////////////////////////////////////////////////////////////////////
444 * NB: this opcode structure is sparse; it cannot just be indexed by
447 using namespace InstrFlags
;
448 static const struct {
451 } instrInfoSparse
[] = {
453 // Op Inputs Outputs OutputTypes Stack delta
454 // -- ------ ------- ----------- -----------
456 /*** 1. Basic instructions ***/
458 { OpPopA
, {Stack1
, None
, OutNone
, -1 }},
460 DontGuardStack1
, None
, OutNone
, -1 }},
463 IgnoreInnerType
, None
, OutNone
, -1 }},
466 IgnoreInnerType
, None
, OutNone
, -1 }},
467 { OpDup
, {Stack1
, StackTop2
, OutSameAsInput
, 1 }},
468 { OpBox
, {Stack1
, Stack1
, OutVInput
, 0 }},
469 { OpUnbox
, {Stack1
, Stack1
, OutCInput
, 0 }},
470 { OpBoxR
, {Stack1
, Stack1
, OutVInput
, 0 }},
471 { OpUnboxR
, {Stack1
, Stack1
, OutCInput
, 0 }},
473 /*** 2. Literal and constant instructions ***/
475 { OpNull
, {None
, Stack1
, OutNull
, 1 }},
476 { OpNullUninit
, {None
, Stack1
, OutNullUninit
, 1 }},
477 { OpTrue
, {None
, Stack1
, OutBooleanImm
, 1 }},
478 { OpFalse
, {None
, Stack1
, OutBooleanImm
, 1 }},
479 { OpInt
, {None
, Stack1
, OutInt64
, 1 }},
480 { OpDouble
, {None
, Stack1
, OutDouble
, 1 }},
481 { OpString
, {None
, Stack1
, OutStringImm
, 1 }},
482 { OpArray
, {None
, Stack1
, OutArrayImm
, 1 }},
483 { OpNewArray
, {None
, Stack1
, OutArray
, 1 }},
484 { OpNewMixedArray
, {None
, Stack1
, OutArray
, 1 }},
485 { OpNewVArray
, {None
, Stack1
, OutArray
, 1 }},
486 { OpNewMIArray
, {None
, Stack1
, OutArray
, 1 }},
487 { OpNewMSArray
, {None
, Stack1
, OutArray
, 1 }},
488 { OpNewLikeArrayL
, {None
, Stack1
, OutArray
, 1 }},
489 { OpNewPackedArray
, {StackN
, Stack1
, OutArray
, 0 }},
490 { OpNewStructArray
, {StackN
, Stack1
, OutArray
, 0 }},
491 { OpAddElemC
, {StackTop3
, Stack1
, OutArray
, -2 }},
492 { OpAddElemV
, {StackTop3
, Stack1
, OutArray
, -2 }},
493 { OpAddNewElemC
, {StackTop2
, Stack1
, OutArray
, -1 }},
494 { OpAddNewElemV
, {StackTop2
, Stack1
, OutArray
, -1 }},
495 { OpNewCol
, {None
, Stack1
, OutObject
, 1 }},
496 { OpColAddElemC
, {StackTop3
, Stack1
, OutObject
, -2 }},
497 { OpColAddNewElemC
, {StackTop2
, Stack1
, OutObject
, -1 }},
498 { OpCns
, {None
, Stack1
, OutCns
, 1 }},
499 { OpCnsE
, {None
, Stack1
, OutCns
, 1 }},
500 { OpCnsU
, {None
, Stack1
, OutCns
, 1 }},
501 { OpClsCns
, {Stack1
, Stack1
, OutUnknown
, 0 }},
502 { OpClsCnsD
, {None
, Stack1
, OutPred
, 1 }},
503 { OpFile
, {None
, Stack1
, OutString
, 1 }},
504 { OpDir
, {None
, Stack1
, OutString
, 1 }},
505 { OpNameA
, {Stack1
, Stack1
, OutString
, 0 }},
507 /*** 3. Operator instructions ***/
510 { OpConcat
, {StackTop2
, Stack1
, OutString
, -1 }},
511 { OpConcatN
, {StackN
, Stack1
, OutString
, 0 }},
513 { OpAbs
, {Stack1
, Stack1
, OutPred
, 0 }},
514 { OpAdd
, {StackTop2
, Stack1
, OutArith
, -1 }},
515 { OpSub
, {StackTop2
, Stack1
, OutArith
, -1 }},
516 { OpMul
, {StackTop2
, Stack1
, OutArith
, -1 }},
517 /* Arithmetic ops that overflow ints to floats */
518 { OpAddO
, {StackTop2
, Stack1
, OutArithO
, -1 }},
519 { OpSubO
, {StackTop2
, Stack1
, OutArithO
, -1 }},
520 { OpMulO
, {StackTop2
, Stack1
, OutArithO
, -1 }},
521 /* Div and mod might return boolean false. Sigh. */
522 { OpDiv
, {StackTop2
, Stack1
, OutPred
, -1 }},
523 { OpMod
, {StackTop2
, Stack1
, OutPred
, -1 }},
524 { OpPow
, {StackTop2
, Stack1
, OutPred
, -1 }},
525 { OpSqrt
, {Stack1
, Stack1
, OutPred
, 0 }},
527 { OpXor
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
528 { OpNot
, {Stack1
, Stack1
, OutBoolean
, 0 }},
529 { OpSame
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
530 { OpNSame
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
531 { OpEq
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
532 { OpNeq
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
533 { OpLt
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
534 { OpLte
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
535 { OpGt
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
536 { OpGte
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
538 { OpBitAnd
, {StackTop2
, Stack1
, OutBitOp
, -1 }},
539 { OpBitOr
, {StackTop2
, Stack1
, OutBitOp
, -1 }},
540 { OpBitXor
, {StackTop2
, Stack1
, OutBitOp
, -1 }},
541 { OpBitNot
, {Stack1
, Stack1
, OutBitOp
, 0 }},
542 { OpShl
, {StackTop2
, Stack1
, OutInt64
, -1 }},
543 { OpShr
, {StackTop2
, Stack1
, OutInt64
, -1 }},
544 /* Cast instructions */
545 { OpCastBool
, {Stack1
, Stack1
, OutBoolean
, 0 }},
546 { OpCastInt
, {Stack1
, Stack1
, OutInt64
, 0 }},
547 { OpCastDouble
, {Stack1
, Stack1
, OutDouble
, 0 }},
548 { OpCastString
, {Stack1
, Stack1
, OutString
, 0 }},
549 { OpCastArray
, {Stack1
, Stack1
, OutArray
, 0 }},
550 { OpCastObject
, {Stack1
, Stack1
, OutObject
, 0 }},
551 { OpInstanceOf
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
552 { OpInstanceOfD
, {Stack1
, Stack1
, OutPredBool
, 0 }},
553 { OpPrint
, {Stack1
, Stack1
, OutInt64
, 0 }},
554 { OpClone
, {Stack1
, Stack1
, OutObject
, 0 }},
555 { OpExit
, {Stack1
, Stack1
, OutNull
, 0 }},
556 { OpFatal
, {Stack1
, None
, OutNone
, -1 }},
558 /*** 4. Control flow instructions ***/
560 { OpJmp
, {None
, None
, OutNone
, 0 }},
561 { OpJmpNS
, {None
, None
, OutNone
, 0 }},
562 { OpJmpZ
, {Stack1
, None
, OutNone
, -1 }},
563 { OpJmpNZ
, {Stack1
, None
, OutNone
, -1 }},
564 { OpSwitch
, {Stack1
, None
, OutNone
, -1 }},
565 { OpSSwitch
, {Stack1
, None
, OutNone
, -1 }},
567 * RetC and RetV are special. Their manipulation of the runtime stack are
568 * outside the boundaries of the tracelet abstraction; since they always end
569 * a basic block, they behave more like "glue" between BBs than the
570 * instructions in the body of a BB.
572 * RetC and RetV consume a value from the stack, and this value's type needs
573 * to be known at compile-time.
575 { OpRetC
, {AllLocals
, None
, OutNone
, 0 }},
576 { OpRetV
, {AllLocals
, None
, OutNone
, 0 }},
577 { OpThrow
, {Stack1
, None
, OutNone
, -1 }},
578 { OpUnwind
, {None
, None
, OutNone
, 0 }},
580 /*** 5. Get instructions ***/
582 { OpCGetL
, {Local
, Stack1
, OutCInputL
, 1 }},
583 { OpCGetL2
, {Stack1
|Local
, StackIns1
, OutCInputL
, 1 }},
584 { OpCGetL3
, {StackTop2
|Local
, StackIns2
, OutCInputL
, 1 }},
585 { OpPushL
, {Local
, Stack1
|Local
, OutCInputL
, 1 }},
586 { OpCGetN
, {Stack1
, Stack1
, OutUnknown
, 0 }},
587 { OpCGetG
, {Stack1
, Stack1
, OutUnknown
, 0 }},
588 { OpCGetS
, {StackTop2
, Stack1
, OutPred
, -1 }},
589 { OpCGetM
, {MVector
, Stack1
, OutPred
, 1 }},
590 { OpVGetL
, {Local
, Stack1
|Local
, OutVInputL
, 1 }},
591 { OpVGetN
, {Stack1
, Stack1
|Local
, OutVUnknown
, 0 }},
592 // TODO: In pseudo-main, the VGetG instruction invalidates what we know
593 // about the types of the locals because it could cause any one of the
594 // local variables to become "boxed". We need to add logic to tracelet
595 // analysis to deal with this properly.
596 { OpVGetG
, {Stack1
, Stack1
, OutVUnknown
, 0 }},
597 { OpVGetS
, {StackTop2
, Stack1
, OutVUnknown
, -1 }},
598 { OpVGetM
, {MVector
, Stack1
|Local
, OutVUnknown
, 1 }},
599 { OpAGetC
, {Stack1
, Stack1
, OutClassRef
, 0 }},
600 { OpAGetL
, {Local
, Stack1
, OutClassRef
, 1 }},
602 /*** 6. Isset, Empty, and type querying instructions ***/
604 { OpAKExists
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
605 { OpIssetL
, {Local
, Stack1
, OutBoolean
, 1 }},
606 { OpIssetN
, {Stack1
, Stack1
, OutBoolean
, 0 }},
607 { OpIssetG
, {Stack1
, Stack1
, OutBoolean
, 0 }},
608 { OpIssetS
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
609 { OpIssetM
, {MVector
, Stack1
, OutBoolean
, 1 }},
610 { OpEmptyL
, {Local
, Stack1
, OutBoolean
, 1 }},
611 { OpEmptyN
, {Stack1
, Stack1
, OutBoolean
, 0 }},
612 { OpEmptyG
, {Stack1
, Stack1
, OutBoolean
, 0 }},
613 { OpEmptyS
, {StackTop2
, Stack1
, OutBoolean
, -1 }},
614 { OpEmptyM
, {MVector
, Stack1
, OutBoolean
, 1 }},
615 { OpIsTypeC
, {Stack1
|
616 DontGuardStack1
, Stack1
, OutBoolean
, 0 }},
617 { OpIsTypeL
, {Local
, Stack1
, OutIsTypeL
, 1 }},
619 /*** 7. Mutator instructions ***/
621 { OpSetL
, {Stack1
|Local
, Stack1
|Local
, OutSameAsInput
, 0 }},
622 { OpSetN
, {StackTop2
, Stack1
|Local
, OutSameAsInput
, -1 }},
623 { OpSetG
, {StackTop2
, Stack1
, OutSameAsInput
, -1 }},
624 { OpSetS
, {StackTop3
, Stack1
, OutSameAsInput
, -2 }},
625 { OpSetM
, {MVector
|Stack1
, Stack1
|Local
, OutPred
, 0 }},
626 { OpSetWithRefLM
,{MVector
|Local
, Local
, OutNone
, 0 }},
627 { OpSetWithRefRM
,{MVector
|Stack1
, Local
, OutNone
, -1 }},
628 { OpSetOpL
, {Stack1
|Local
, Stack1
|Local
, OutSetOp
, 0 }},
629 { OpSetOpN
, {StackTop2
, Stack1
|Local
, OutUnknown
, -1 }},
630 { OpSetOpG
, {StackTop2
, Stack1
, OutUnknown
, -1 }},
631 { OpSetOpS
, {StackTop3
, Stack1
, OutUnknown
, -2 }},
632 { OpSetOpM
, {MVector
|Stack1
, Stack1
|Local
, OutUnknown
, 0 }},
633 { OpIncDecL
, {Local
, Stack1
|Local
, OutIncDec
, 1 }},
634 { OpIncDecN
, {Stack1
, Stack1
|Local
, OutUnknown
, 0 }},
635 { OpIncDecG
, {Stack1
, Stack1
, OutUnknown
, 0 }},
636 { OpIncDecS
, {StackTop2
, Stack1
, OutUnknown
, -1 }},
637 { OpIncDecM
, {MVector
, Stack1
|Local
, OutUnknown
, 1 }},
638 { OpBindL
, {Stack1
|Local
|
639 IgnoreInnerType
, Stack1
|Local
, OutSameAsInput
, 0 }},
640 { OpBindN
, {StackTop2
, Stack1
|Local
, OutSameAsInput
, -1 }},
641 { OpBindG
, {StackTop2
, Stack1
, OutSameAsInput
, -1 }},
642 { OpBindS
, {StackTop3
, Stack1
, OutSameAsInput
, -2 }},
643 { OpBindM
, {MVector
|Stack1
, Stack1
|Local
, OutSameAsInput
, 0 }},
644 { OpUnsetL
, {Local
, Local
, OutNone
, 0 }},
645 { OpUnsetN
, {Stack1
, Local
, OutNone
, -1 }},
646 { OpUnsetG
, {Stack1
, None
, OutNone
, -1 }},
647 { OpUnsetM
, {MVector
, Local
, OutNone
, 0 }},
649 /*** 8. Call instructions ***/
651 { OpFPushFunc
, {Stack1
, FStack
, OutFDesc
,
652 kNumActRecCells
- 1 }},
653 { OpFPushFuncD
, {None
, FStack
, OutFDesc
,
655 { OpFPushFuncU
, {None
, FStack
, OutFDesc
,
658 {StackTop2
, FStack
, OutFDesc
,
659 kNumActRecCells
- 2 }},
661 {Stack1
, FStack
, OutFDesc
,
662 kNumActRecCells
- 1 }},
664 {StackTop2
, FStack
, OutFDesc
,
665 kNumActRecCells
- 2 }},
667 {StackTop2
, FStack
, OutFDesc
,
668 kNumActRecCells
- 2 }},
670 {None
, FStack
, OutFDesc
,
672 { OpFPushCtor
, {Stack1
, Stack1
|FStack
,OutObject
,
674 { OpFPushCtorD
, {None
, Stack1
|FStack
,OutObject
,
675 kNumActRecCells
+ 1 }},
676 { OpFPushCufIter
,{None
, FStack
, OutFDesc
,
678 { OpFPushCuf
, {Stack1
, FStack
, OutFDesc
,
679 kNumActRecCells
- 1 }},
680 { OpFPushCufF
, {Stack1
, FStack
, OutFDesc
,
681 kNumActRecCells
- 1 }},
682 { OpFPushCufSafe
,{StackTop2
|DontGuardAny
,
683 StackTop2
|FStack
, OutFPushCufSafe
,
685 { OpFPassCW
, {FuncdRef
, None
, OutSameAsInput
, 0 }},
686 { OpFPassCE
, {FuncdRef
, None
, OutSameAsInput
, 0 }},
687 { OpFPassV
, {Stack1
|FuncdRef
, Stack1
, OutUnknown
, 0 }},
688 { OpFPassR
, {Stack1
|FuncdRef
, Stack1
, OutFInputR
, 0 }},
689 { OpFPassL
, {Local
|FuncdRef
, Stack1
, OutFInputL
, 1 }},
690 { OpFPassN
, {Stack1
|FuncdRef
, Stack1
, OutUnknown
, 0 }},
691 { OpFPassG
, {Stack1
|FuncdRef
, Stack1
, OutUnknown
, 0 }},
692 { OpFPassS
, {StackTop2
|FuncdRef
,
693 Stack1
, OutUnknown
, -1 }},
694 { OpFPassM
, {MVector
|FuncdRef
, Stack1
|Local
, OutUnknown
, 1 }},
696 * FCall is special. Like the Ret* instructions, its manipulation of the
697 * runtime stack are outside the boundaries of the tracelet abstraction.
699 { OpFCall
, {FStack
, Stack1
, OutPred
, 0 }},
700 { OpFCallD
, {FStack
, Stack1
, OutPred
, 0 }},
701 { OpFCallUnpack
, {FStack
, Stack1
, OutPred
, 0 }},
702 { OpFCallArray
, {FStack
, Stack1
, OutPred
,
703 -(int)kNumActRecCells
}},
704 // TODO: output type is known
705 { OpFCallBuiltin
,{BStackN
, Stack1
, OutPred
, 0 }},
706 { OpCufSafeArray
,{StackTop3
|DontGuardAny
,
707 Stack1
, OutArray
, -2 }},
708 { OpCufSafeReturn
,{StackTop3
|DontGuardAny
,
709 Stack1
, OutUnknown
, -2 }},
710 { OpDecodeCufIter
,{Stack1
, None
, OutNone
, -1 }},
712 /*** 11. Iterator instructions ***/
714 { OpIterInit
, {Stack1
, Local
, OutUnknown
, -1 }},
715 { OpMIterInit
, {Stack1
, Local
, OutUnknown
, -1 }},
716 { OpWIterInit
, {Stack1
, Local
, OutUnknown
, -1 }},
717 { OpIterInitK
, {Stack1
, Local
, OutUnknown
, -1 }},
718 { OpMIterInitK
, {Stack1
, Local
, OutUnknown
, -1 }},
719 { OpWIterInitK
, {Stack1
, Local
, OutUnknown
, -1 }},
720 { OpIterNext
, {None
, Local
, OutUnknown
, 0 }},
721 { OpMIterNext
, {None
, Local
, OutUnknown
, 0 }},
722 { OpWIterNext
, {None
, Local
, OutUnknown
, 0 }},
723 { OpIterNextK
, {None
, Local
, OutUnknown
, 0 }},
724 { OpMIterNextK
, {None
, Local
, OutUnknown
, 0 }},
725 { OpWIterNextK
, {None
, Local
, OutUnknown
, 0 }},
726 { OpIterFree
, {None
, None
, OutNone
, 0 }},
727 { OpMIterFree
, {None
, None
, OutNone
, 0 }},
728 { OpCIterFree
, {None
, None
, OutNone
, 0 }},
729 { OpIterBreak
, {None
, None
, OutNone
, 0 }},
731 /*** 12. Include, eval, and define instructions ***/
733 { OpIncl
, {Stack1
, Stack1
, OutUnknown
, 0 }},
734 { OpInclOnce
, {Stack1
, Stack1
, OutUnknown
, 0 }},
735 { OpReq
, {Stack1
, Stack1
, OutUnknown
, 0 }},
736 { OpReqOnce
, {Stack1
, Stack1
, OutUnknown
, 0 }},
737 { OpReqDoc
, {Stack1
, Stack1
, OutUnknown
, 0 }},
738 { OpEval
, {Stack1
, Stack1
, OutUnknown
, 0 }},
739 { OpDefFunc
, {None
, None
, OutNone
, 0 }},
740 { OpDefTypeAlias
,{None
, None
, OutNone
, 0 }},
741 { OpDefCls
, {None
, None
, OutNone
, 0 }},
742 { OpDefCns
, {Stack1
, Stack1
, OutBoolean
, 0 }},
744 /*** 13. Miscellaneous instructions ***/
746 { OpThis
, {None
, Stack1
, OutThisObject
, 1 }},
747 { OpBareThis
, {None
, Stack1
, OutUnknown
, 1 }},
748 { OpCheckThis
, {This
, None
, OutNone
, 0 }},
750 {None
, Local
, OutUnknown
, 0 }},
752 {None
, Stack1
, OutBoolean
, 1 }},
754 {Stack1
, Local
, OutVUnknown
, -1 }},
755 { OpCatch
, {None
, Stack1
, OutObject
, 1 }},
757 {Local
, Local
, OutUnknown
, 0 }},
759 {Stack1
, Stack1
, OutSameAsInput
, 0 }},
761 {Stack1
, Stack1
, OutSameAsInput
, 0 }},
763 {StackTop2
, Stack1
, OutBoolean
, -1 }},
764 { OpSelf
, {None
, Stack1
, OutClassRef
, 1 }},
765 { OpParent
, {None
, Stack1
, OutClassRef
, 1 }},
766 { OpLateBoundCls
,{None
, Stack1
, OutClassRef
, 1 }},
767 { OpNativeImpl
, {None
, None
, OutNone
, 0 }},
768 { OpCreateCl
, {BStackN
, Stack1
, OutObject
, 1 }},
769 { OpStrlen
, {Stack1
, Stack1
, OutStrlen
, 0 }},
770 { OpIncStat
, {None
, None
, OutNone
, 0 }},
771 { OpIdx
, {StackTop3
, Stack1
, OutUnknown
, -2 }},
772 { OpArrayIdx
, {StackTop3
, Stack1
, OutUnknown
, -2 }},
773 { OpFloor
, {Stack1
, Stack1
, OutDouble
, 0 }},
774 { OpCeil
, {Stack1
, Stack1
, OutDouble
, 0 }},
775 { OpCheckProp
, {None
, Stack1
, OutBoolean
, 1 }},
776 { OpInitProp
, {Stack1
, None
, OutNone
, -1 }},
777 { OpSilence
, {Local
|DontGuardAny
,
778 Local
, OutNone
, 0 }},
779 { OpAssertRATL
, {None
, None
, OutNone
, 0 }},
780 { OpAssertRATStk
,{None
, None
, OutNone
, 0 }},
781 { OpBreakTraceHint
,{None
, None
, OutNone
, 0 }},
783 /*** 14. Generator instructions ***/
785 { OpCreateCont
, {None
, Stack1
, OutNull
, 1 }},
786 { OpContEnter
, {Stack1
, Stack1
, OutUnknown
, 0 }},
787 { OpContRaise
, {Stack1
, Stack1
, OutUnknown
, 0 }},
788 { OpYield
, {Stack1
, Stack1
, OutUnknown
, 0 }},
789 { OpYieldK
, {StackTop2
, Stack1
, OutUnknown
, -1 }},
790 { OpContCheck
, {None
, None
, OutNone
, 0 }},
791 { OpContValid
, {None
, Stack1
, OutBoolean
, 1 }},
792 { OpContKey
, {None
, Stack1
, OutUnknown
, 1 }},
793 { OpContCurrent
, {None
, Stack1
, OutUnknown
, 1 }},
795 /*** 15. Async functions instructions ***/
797 { OpAwait
, {Stack1
, Stack1
, OutUnknown
, 0 }},
800 static hphp_hash_map
<Op
, InstrInfo
> instrInfo
;
801 static bool instrInfoInited
;
802 static void initInstrInfo() {
803 if (!instrInfoInited
) {
804 for (size_t i
= 0; i
< sizeof(instrInfoSparse
) / sizeof(instrInfoSparse
[0]);
806 instrInfo
[instrInfoSparse
[i
].op
] = instrInfoSparse
[i
].info
;
808 if (!RuntimeOption::EvalCheckReturnTypeHints
) {
809 for (size_t j
= 0; j
< 2; ++j
) {
810 auto& ii
= instrInfo
[j
== 0 ? OpVerifyRetTypeC
: OpVerifyRetTypeV
];
811 ii
.in
= ii
.out
= None
;
815 instrInfoInited
= true;
819 const InstrInfo
& getInstrInfo(Op op
) {
820 assert(instrInfoInited
);
821 return instrInfo
[op
];
824 static int numHiddenStackInputs(const NormalizedInstruction
& ni
) {
825 assert(ni
.immVec
.isValid());
826 return ni
.immVec
.numStackValues();
830 int64_t countOperands(uint64_t mask
) {
831 const uint64_t ignore
= FuncdRef
| Local
| Iter
| AllLocals
|
832 DontGuardStack1
| IgnoreInnerType
| DontGuardAny
| This
;
835 static const uint64_t counts
[][2] = {
841 {FStack
, kNumActRecCells
},
845 for (auto const& pair
: counts
) {
846 if (mask
& pair
[0]) {
856 int64_t getStackPopped(PC pc
) {
857 auto const op
= *reinterpret_cast<const Op
*>(pc
);
859 case Op::FCall
: return getImm((Op
*)pc
, 0).u_IVA
+ kNumActRecCells
;
860 case Op::FCallD
: return getImm((Op
*)pc
, 0).u_IVA
+ kNumActRecCells
;
861 case Op::FCallArray
: return kNumActRecCells
+ 1;
863 case Op::NewPackedArray
:
865 case Op::FCallBuiltin
:
866 case Op::CreateCl
: return getImm((Op
*)pc
, 0).u_IVA
;
868 case Op::NewStructArray
: return getImmVector((Op
*)pc
).size();
873 uint64_t mask
= getInstrInfo(op
).in
;
876 // All instructions with these properties are handled above
877 assert((mask
& (StackN
| BStackN
)) == 0);
879 if (mask
& MVector
) {
880 count
+= getImmVector((Op
*)pc
).numStackValues();
884 return count
+ countOperands(mask
);
887 int64_t getStackPushed(PC pc
) {
888 return countOperands(getInstrInfo(*reinterpret_cast<const Op
*>(pc
)).out
);
891 int getStackDelta(const NormalizedInstruction
& ni
) {
892 int hiddenStackInputs
= 0;
899 int numArgs
= ni
.imm
[0].u_IVA
;
900 return 1 - numArgs
- kNumActRecCells
;
903 case Op::NewPackedArray
:
905 case Op::FCallBuiltin
:
907 return 1 - ni
.imm
[0].u_IVA
;
909 case Op::NewStructArray
:
910 return 1 - ni
.immVec
.numStackValues();
915 const InstrInfo
& info
= instrInfo
[op
];
916 if (info
.in
& MVector
) {
917 hiddenStackInputs
= numHiddenStackInputs(ni
);
918 SKTRACE(2, ni
.source
, "Has %d hidden stack inputs\n", hiddenStackInputs
);
920 int delta
= instrInfo
[op
].numPushed
- hiddenStackInputs
;
924 // Task #3449943: This returns true even if there's meta-data telling
925 // that the value was inferred.
926 bool outputIsPredicted(NormalizedInstruction
& inst
) {
927 auto const& iInfo
= getInstrInfo(inst
.op());
929 (iInfo
.type
== OutPred
|| iInfo
.type
== OutCns
) && !inst
.endsRegion
;
931 // All OutPred ops except for SetM have a single stack output for now.
932 assert(iInfo
.out
== Stack1
|| inst
.op() == OpSetM
);
933 auto dt
= predictOutputs(&inst
);
935 inst
.outPred
= *dt
== KindOfRef
? Type(*dt
, KindOfAny
{}) : Type(*dt
);
936 inst
.outputPredicted
= true;
938 doPrediction
= false;
945 bool isAlwaysNop(Op op
) {
955 case Op::VerifyRetTypeC
:
956 case Op::VerifyRetTypeV
:
957 return !RuntimeOption::EvalCheckReturnTypeHints
;
963 static void addMVectorInputs(NormalizedInstruction
& ni
,
964 int& currentStackOffset
,
965 std::vector
<InputInfo
>& inputs
) {
966 assert(ni
.immVec
.isValid());
967 ni
.immVecM
.reserve(ni
.immVec
.size());
969 int UNUSED stackCount
= 0;
970 int UNUSED localCount
= 0;
972 currentStackOffset
-= ni
.immVec
.numStackValues();
973 int localStackOffset
= currentStackOffset
;
975 auto push_stack
= [&] {
977 inputs
.emplace_back(Location(Location::Stack
, localStackOffset
++));
979 auto push_local
= [&] (int imm
) {
981 inputs
.emplace_back(Location(Location::Local
, imm
));
985 * Note that we have to push as we go so that the arguments come in
986 * the order expected for the M-vector.
990 * Also note: if we eventually have immediates that are not local
991 * ids (i.e. string ids), this analysis step is going to have to be
994 auto opPtr
= (const Op
*)ni
.source
.pc();
995 auto const location
= getMLocation(opPtr
);
996 auto const lcode
= location
.lcode
;
998 const bool trailingClassRef
= lcode
== LSL
|| lcode
== LSC
;
1000 switch (numLocationCodeStackVals(lcode
)) {
1003 inputs
.emplace_back(Location(Location::This
));
1005 assert(lcode
== LL
|| lcode
== LGL
|| lcode
== LNL
);
1006 if (location
.hasImm()) {
1007 push_local(location
.imm
);
1013 // We'll get the trailing stack value after pushing all the
1014 // member vector elements.
1015 assert(location
.hasImm());
1016 push_local(location
.imm
);
1023 if (!trailingClassRef
) {
1024 // This one is actually at the back.
1028 default: not_reached();
1031 // Now push all the members in the correct order.
1032 for (auto const& member
: getMVector(opPtr
)) {
1033 auto const mcode
= member
.mcode
;
1034 ni
.immVecM
.push_back(mcode
);
1037 // No stack and no locals.
1039 } else if (member
.hasImm()) {
1040 int64_t imm
= member
.imm
;
1041 if (memberCodeImmIsLoc(mcode
)) {
1043 } else if (memberCodeImmIsString(mcode
)) {
1044 inputs
.emplace_back(Location(Location::Litstr
, imm
));
1046 assert(memberCodeImmIsInt(mcode
));
1047 inputs
.emplace_back(Location(Location::Litint
, imm
));
1052 inputs
.back().dontGuardInner
= true;
1055 if (trailingClassRef
) {
1059 assert(stackCount
== ni
.immVec
.numStackValues());
1061 SKTRACE(2, ni
.source
, "M-vector using %d hidden stack "
1062 "inputs, %d locals\n", stackCount
, localCount
);
1067 * Returns locations for this instruction's inputs.
1070 * TranslationFailedExc:
1071 * Unimplemented functionality, probably an opcode.
1074 * Consumed a datum whose type or value could not be constrained at
1075 * translation time, because the tracelet has already modified it.
1076 * Truncate the tracelet at the preceding instruction, which must
1077 * exists because *something* modified something in it.
1079 static void getInputsImpl(SrcKey startSk
,
1080 NormalizedInstruction
* ni
,
1081 int& currentStackOffset
,
1082 InputInfoVec
& inputs
) {
1084 auto sk
= ni
->source
;
1086 if (isAlwaysNop(ni
->op())) return;
1088 assert(inputs
.empty());
1090 instrInfo
.count(ni
->op()),
1091 "Invalid opcode in getInputsImpl: {}\n",
1092 opcodeToName(ni
->op())
1094 const InstrInfo
& info
= instrInfo
[ni
->op()];
1095 Operands input
= info
.in
;
1096 if (input
& FuncdRef
) {
1097 inputs
.needsRefCheck
= true;
1100 inputs
.emplace_back(Location(Location::Iter
, ni
->imm
[0].u_IVA
));
1102 if (input
& FStack
) {
1103 currentStackOffset
-= ni
->imm
[0].u_IVA
; // arguments consumed
1104 currentStackOffset
-= kNumActRecCells
; // ActRec is torn down as well
1106 if (input
& IgnoreInnerType
) ni
->ignoreInnerType
= true;
1107 if (input
& Stack1
) {
1108 SKTRACE(1, sk
, "getInputs: stack1 %d\n", currentStackOffset
- 1);
1109 inputs
.emplace_back(Location(Location::Stack
, --currentStackOffset
));
1110 if (input
& DontGuardStack1
) inputs
.back().dontGuard
= true;
1111 if (input
& Stack2
) {
1112 SKTRACE(1, sk
, "getInputs: stack2 %d\n", currentStackOffset
- 1);
1113 inputs
.emplace_back(Location(Location::Stack
, --currentStackOffset
));
1114 if (input
& Stack3
) {
1115 SKTRACE(1, sk
, "getInputs: stack3 %d\n", currentStackOffset
- 1);
1116 inputs
.emplace_back(Location(Location::Stack
, --currentStackOffset
));
1120 if (input
& StackN
) {
1121 int numArgs
= (ni
->op() == Op::NewPackedArray
||
1122 ni
->op() == Op::ConcatN
)
1124 : ni
->immVec
.numStackValues();
1126 SKTRACE(1, sk
, "getInputs: stackN %d %d\n",
1127 currentStackOffset
- 1, numArgs
);
1128 for (int i
= 0; i
< numArgs
; i
++) {
1129 inputs
.emplace_back(Location(Location::Stack
, --currentStackOffset
));
1130 inputs
.back().dontGuard
= true;
1131 inputs
.back().dontBreak
= true;
1134 if (input
& BStackN
) {
1135 int numArgs
= ni
->imm
[0].u_IVA
;
1136 SKTRACE(1, sk
, "getInputs: BStackN %d %d\n", currentStackOffset
- 1,
1138 for (int i
= 0; i
< numArgs
; i
++) {
1139 inputs
.emplace_back(Location(Location::Stack
, --currentStackOffset
));
1142 if (input
& MVector
) {
1143 addMVectorInputs(*ni
, currentStackOffset
, inputs
);
1145 if (input
& Local
) {
1146 // (Almost) all instructions that take a Local have its index at
1147 // their first immediate.
1149 auto insertAt
= inputs
.end();
1151 case OpSetWithRefLM
:
1152 insertAt
= inputs
.begin();
1155 loc
= ni
->imm
[1].u_IVA
;
1159 loc
= ni
->imm
[0].u_IVA
;
1162 SKTRACE(1, sk
, "getInputs: local %d\n", loc
);
1163 inputs
.emplace(insertAt
, Location(Location::Local
, loc
));
1166 if (input
& AllLocals
) {
1167 ni
->ignoreInnerType
= true;
1170 SKTRACE(1, sk
, "stack args: virtual sfo now %d\n", currentStackOffset
);
1171 TRACE(1, "%s\n", Trace::prettyNode("Inputs", inputs
).c_str());
1173 if (inputs
.size() &&
1174 ((input
& DontGuardAny
) || dontGuardAnyInputs(ni
->op()))) {
1175 for (int i
= inputs
.size(); i
--; ) {
1176 inputs
[i
].dontGuard
= true;
1180 inputs
.emplace_back(Location(Location::This
));
1184 InputInfoVec
getInputs(SrcKey startSk
, NormalizedInstruction
& inst
) {
1186 // MCGenerator expected top of stack to be index -1, with indexes growing
1187 // down from there. hhir defines top of stack to be index 0, with indexes
1188 // growing up from there. To compensate we start with a stack offset of 1 and
1189 // negate the index of any stack input after the call to getInputs.
1191 getInputsImpl(startSk
, &inst
, stackOff
, infos
);
1192 for (auto& info
: infos
) {
1193 if (info
.loc
.isStack()) info
.loc
.offset
= -info
.loc
.offset
;
1198 bool dontGuardAnyInputs(Op op
) {
1201 case Op::DecodeCufIter
:
1205 case Op::WIterInitK
:
1207 case Op::WIterNextK
:
1209 case Op::MIterInitK
:
1211 case Op::MIterNextK
:
1226 case Op::SetWithRefLM
:
1227 case Op::SetWithRefRM
:
1230 case Op::FCallArray
:
1241 case Op::FCallBuiltin
:
1242 case Op::NewStructArray
:
1251 case Op::BreakTraceHint
:
1258 case Op::FPushCufSafe
:
1262 case Op::AssertRATL
:
1263 case Op::AssertRATStk
:
1274 case Op::CreateCont
:
1290 case Op::AddNewElemC
:
1303 case Op::CastDouble
:
1305 case Op::CastObject
:
1306 case Op::CastString
:
1314 case Op::ColAddElemC
:
1315 case Op::ColAddNewElemC
:
1319 case Op::ContCurrent
:
1331 case Op::FPushClsMethodD
:
1332 case Op::FPushClsMethod
:
1333 case Op::FPushClsMethodF
:
1335 case Op::FPushCtorD
:
1336 case Op::FPushCufIter
:
1338 case Op::FPushFuncD
:
1339 case Op::FPushFuncU
:
1340 case Op::FPushObjMethodD
:
1345 case Op::InitThisLoc
:
1346 case Op::InstanceOf
:
1347 case Op::InstanceOfD
:
1353 case Op::LateBoundCls
:
1358 case Op::NativeImpl
:
1361 case Op::NewLikeArrayL
:
1362 case Op::NewMixedArray
:
1364 case Op::NewMIArray
:
1365 case Op::NewMSArray
:
1366 case Op::NewPackedArray
:
1369 case Op::NullUninit
:
1370 case Op::OODeclExists
:
1388 case Op::StaticLocInit
:
1399 case Op::VerifyParamType
:
1400 case Op::VerifyRetTypeC
:
1401 case Op::VerifyRetTypeV
:
1405 // These are instructions that are always interp-one'd, or are always no-ops.
1406 case Op::LowInvalid
:
1414 case Op::AddNewElemV
:
1435 case Op::FPushObjMethod
:
1439 case Op::FCallUnpack
:
1440 case Op::CufSafeArray
:
1441 case Op::CufSafeReturn
:
1449 case Op::DefTypeAlias
:
1451 case Op::HighInvalid
:
1455 always_assert_flog(0, "invalid opcode {}\n", static_cast<uint32_t>(op
));
1458 const StaticString
s_http_response_header("http_response_header");
1459 const StaticString
s_php_errormsg("php_errormsg");
1460 const StaticString
s_extract("extract");
1461 const StaticString
s_extractNative("__SystemLib\\extract");
1462 const StaticString
s_parse_str("parse_str");
1463 const StaticString
s_parse_strNative("__SystemLib\\parse_str");
1464 const StaticString
s_assert("assert");
1465 const StaticString
s_assertNative("__SystemLib\\assert");
1467 bool funcByNameDestroysLocals(const StringData
* fname
) {
1468 if (!fname
) return false;
1469 return fname
->isame(s_extract
.get()) ||
1470 fname
->isame(s_extractNative
.get()) ||
1471 fname
->isame(s_parse_str
.get()) ||
1472 fname
->isame(s_parse_strNative
.get()) ||
1473 fname
->isame(s_assert
.get()) ||
1474 fname
->isame(s_assertNative
.get());
1477 bool builtinFuncDestroysLocals(const Func
* callee
) {
1478 assert(callee
&& callee
->isCPPBuiltin());
1479 auto const fname
= callee
->name();
1480 return funcByNameDestroysLocals(fname
);
1483 bool callDestroysLocals(const NormalizedInstruction
& inst
,
1484 const Func
* caller
) {
1485 // We don't handle these two cases, because we don't compile functions
1487 assert(caller
->lookupVarId(s_php_errormsg
.get()) == -1);
1488 assert(caller
->lookupVarId(s_http_response_header
.get()) == -1);
1490 auto* unit
= caller
->unit();
1491 auto checkTaintId
= [&](Id id
) {
1492 auto const str
= unit
->lookupLitstrId(id
);
1493 return funcByNameDestroysLocals(str
);
1496 if (inst
.op() == OpFCallBuiltin
) return checkTaintId(inst
.imm
[2].u_SA
);
1497 if (!isFCallStar(inst
.op())) return false;
1499 const FPIEnt
*fpi
= caller
->findFPI(inst
.source
.offset());
1501 Op
* fpushPc
= (Op
*)unit
->at(fpi
->m_fpushOff
);
1502 auto const op
= *fpushPc
;
1504 if (op
== OpFPushFunc
) {
1505 // If the call has any arguments, the FPushFunc will be in a different
1506 // tracelet -- the tracelet will break on every FPass* because the reffiness
1507 // of the callee isn't knowable. So we have to say the call destroys locals,
1508 // to be conservative. If there aren't any arguments, then it can't destroy
1509 // locals -- even if the call is to extract(), there's no argument, so it
1510 // won't do anything.
1511 auto const numArgs
= inst
.imm
[0].u_IVA
;
1512 return (numArgs
!= 0);
1514 if (op
== OpFPushFuncD
) return checkTaintId(getImm(fpushPc
, 1).u_SA
);
1515 if (op
== OpFPushFuncU
) {
1516 return checkTaintId(getImm(fpushPc
, 1).u_SA
) ||
1517 checkTaintId(getImm(fpushPc
, 2).u_SA
);
1523 bool instrBreaksProfileBB(const NormalizedInstruction
* inst
) {
1524 if (instrIsNonCallControlFlow(inst
->op()) ||
1525 inst
->outputPredicted
||
1526 inst
->op() == OpAwait
|| // may branch to scheduler and suspend execution
1527 inst
->op() == OpClsCnsD
) { // side exits if misses in the RDS
1530 // In profiling mode, don't trace through a control flow merge point,
1531 // however, allow inlining of default parameter funclets
1532 if (mcg
->tx().profData()->anyBlockEndsAt(inst
->func(), inst
->offset()) &&
1533 !inst
->func()->isEntry(inst
->nextSk().offset())) {
1539 Translator::Translator()
1541 , m_createdTime(HPHP::Timer::GetCurrentTimeMicros())
1542 , m_mode(TransKind::Invalid
)
1543 , m_profData(nullptr)
1544 , m_useAHot(RuntimeOption::RepoAuthoritative
&&
1545 RuntimeOption::EvalJitAHotSize
> 0)
1548 if (RuntimeOption::EvalJitPGO
) {
1549 m_profData
.reset(new ProfData());
1554 Translator::isSrcKeyInBL(SrcKey sk
) {
1555 auto unit
= sk
.unit();
1556 if (unit
->isInterpretOnly()) return true;
1557 Lock
l(m_dbgBlacklistLock
);
1558 if (m_dbgBLSrcKey
.find(sk
) != m_dbgBLSrcKey
.end()) {
1562 // Loop until the end of the basic block inclusively. This is useful for
1563 // function exit breakpoints, which are implemented by blacklisting the RetC
1567 pc
= (pc
== nullptr) ?
1568 unit
->at(sk
.offset()) : pc
+ instrLen((Op
*) pc
);
1569 if (m_dbgBLPC
.checkPC(pc
)) {
1570 m_dbgBLSrcKey
.insert(sk
);
1573 } while (!opcodeBreaksBB(*reinterpret_cast<const Op
*>(pc
)));
1578 Translator::clearDbgBL() {
1579 Lock
l(m_dbgBlacklistLock
);
1580 m_dbgBLSrcKey
.clear();
1585 Translator::addDbgBLPC(PC pc
) {
1586 Lock
l(m_dbgBlacklistLock
);
1587 if (m_dbgBLPC
.checkPC(pc
)) {
1591 m_dbgBLPC
.addPC(pc
);
1595 const char* show(TranslateResult r
) {
1597 case TranslateResult::Failure
: return "Failure";
1598 case TranslateResult::Retry
: return "Retry";
1599 case TranslateResult::Success
: return "Success";
1605 * Create a map from RegionDesc::BlockId -> IR Block* for all region blocks.
1607 static void createBlockMap(HTS
& hts
,
1608 const RegionDesc
& region
,
1609 BlockIdToIRBlockMap
& blockIdToIRBlock
) {
1610 auto& irb
= *hts
.irb
;
1611 blockIdToIRBlock
.clear();
1612 auto const& blocks
= region
.blocks();
1613 for (unsigned i
= 0; i
< blocks
.size(); i
++) {
1614 auto rBlock
= blocks
[i
];
1615 auto id
= rBlock
->id();
1616 DEBUG_ONLY Offset bcOff
= rBlock
->start().offset();
1617 assert(IMPLIES(i
== 0, bcOff
== irb
.unit().bcOff()));
1619 // NB: This maps the region entry block to a new IR block, even though
1620 // we've already constructed an IR entry block. We'll make the IR entry
1621 // block jump to this block.
1622 Block
* iBlock
= irb
.unit().defBlock();
1624 blockIdToIRBlock
[id
] = iBlock
;
1626 "createBlockMaps: RegionBlock {} => IRBlock {} (BC offset = {})\n",
1627 id
, iBlock
->id(), bcOff
);
1632 * Set IRBuilder's Block associated to blockId's block according to
1633 * the mapping in blockIdToIRBlock.
1635 static void setIRBlock(HTS
& hts
,
1636 RegionDesc::BlockId blockId
,
1637 const RegionDesc
& region
,
1638 const BlockIdToIRBlockMap
& blockIdToIRBlock
) {
1639 auto& irb
= *hts
.irb
;
1640 auto rBlock
= region
.block(blockId
);
1641 Offset bcOffset
= rBlock
->start().offset();
1643 auto iit
= blockIdToIRBlock
.find(blockId
);
1644 assert(iit
!= blockIdToIRBlock
.end());
1646 assert(!irb
.hasBlock(bcOffset
));
1647 FTRACE(3, " setIRBlock: blockId {}, offset {} => IR Block {}\n",
1648 blockId
, bcOffset
, iit
->second
->id());
1649 irb
.setBlock(bcOffset
, iit
->second
);
1653 * Set IRBuilder's Blocks for srcBlockId's successors' offsets within
1656 static void setSuccIRBlocks(HTS
& hts
,
1657 const RegionDesc
& region
,
1658 RegionDesc::BlockId srcBlockId
,
1659 const BlockIdToIRBlockMap
& blockIdToIRBlock
) {
1660 FTRACE(3, "setSuccIRBlocks: srcBlockId = {}\n", srcBlockId
);
1661 auto& irb
= *hts
.irb
;
1662 irb
.resetOffsetMapping();
1663 for (auto dstBlockId
: region
.succs(srcBlockId
)) {
1664 setIRBlock(hts
, dstBlockId
, region
, blockIdToIRBlock
);
1669 * Check if `i' is an FPush{Func,ClsMethod}D followed by an FCall{,D} to a
1670 * function with a singleton pattern, and if so, inline it. Returns true if
1671 * this succeeds, else false.
1673 static bool tryTranslateSingletonInline(HTS
& hts
,
1674 const NormalizedInstruction
& i
,
1675 const Func
* funcd
) {
1676 using Atom
= BCPattern::Atom
;
1677 using Captures
= BCPattern::CaptureVec
;
1679 if (!funcd
) return false;
1681 // Make sure we have an acceptable FPush and non-null callee.
1682 assert(i
.op() == Op::FPushFuncD
||
1683 i
.op() == Op::FPushClsMethodD
);
1685 auto fcall
= i
.nextSk();
1687 // Check if the next instruction is an acceptable FCall.
1688 if ((fcall
.op() != Op::FCall
&& fcall
.op() != Op::FCallD
) ||
1689 funcd
->isResumable() || funcd
->isReturnRef()) {
1693 // First, check for the static local singleton pattern...
1695 // Lambda to check if CGetL and StaticLocInit refer to the same local.
1696 auto has_same_local
= [] (PC pc
, const Captures
& captures
) {
1697 if (captures
.size() == 0) return false;
1699 auto cgetl
= (const Op
*)pc
;
1700 auto sli
= (const Op
*)captures
[0];
1702 assert(*cgetl
== Op::CGetL
);
1703 assert(*sli
== Op::StaticLocInit
);
1705 return (getImm(sli
, 0).u_IVA
== getImm(cgetl
, 0).u_IVA
);
1708 auto cgetl
= Atom(Op::CGetL
).onlyif(has_same_local
);
1709 auto retc
= Atom(Op::RetC
);
1711 // Look for a static local singleton pattern.
1712 auto result
= BCPattern
{
1714 Atom(Op::StaticLocInit
).capture(),
1717 Atom(Op::JmpZ
).taken({cgetl
, retc
}),
1718 Atom::seq(Atom(Op::JmpNZ
), cgetl
, retc
)
1721 {Op::AssertRATL
, Op::AssertRATStk
}
1722 ).matchAnchored(funcd
);
1724 if (result
.found()) {
1726 irgen::inlSingletonSLoc(
1729 (const Op
*)result
.getCapture(0)
1731 } catch (const FailedIRGen
& e
) {
1733 } catch (const FailedCodeGen
& e
) {
1736 TRACE(1, "[singleton-sloc] %s <- %s\n",
1737 funcd
->fullName()->data(),
1738 fcall
.func()->fullName()->data());
1742 // Not found; check for the static property pattern.
1744 // Factory for String atoms that are required to match another captured
1746 auto same_string_as
= [&] (int i
) {
1747 return Atom(Op::String
).onlyif([=] (PC pc
, const Captures
& captures
) {
1748 auto string1
= (const Op
*)pc
;
1749 auto string2
= (const Op
*)captures
[i
];
1750 assert(*string1
== Op::String
);
1751 assert(*string2
== Op::String
);
1753 auto const unit
= funcd
->unit();
1754 auto sd1
= unit
->lookupLitstrId(getImmPtr(string1
, 0)->u_SA
);
1755 auto sd2
= unit
->lookupLitstrId(getImmPtr(string2
, 0)->u_SA
);
1757 return (sd1
&& sd1
== sd2
);
1761 auto stringProp
= same_string_as(0);
1762 auto stringCls
= same_string_as(1);
1763 auto agetc
= Atom(Op::AGetC
);
1764 auto cgets
= Atom(Op::CGetS
);
1766 // Look for a class static singleton pattern.
1767 result
= BCPattern
{
1768 Atom(Op::String
).capture(),
1769 Atom(Op::String
).capture(),
1774 Atom(Op::JmpZ
).taken({stringProp
, stringCls
, agetc
, cgets
, retc
}),
1775 Atom::seq(Atom(Op::JmpNZ
), stringProp
, stringCls
, agetc
, cgets
, retc
)
1778 {Op::AssertRATL
, Op::AssertRATStk
}
1779 ).matchAnchored(funcd
);
1781 if (result
.found()) {
1783 irgen::inlSingletonSProp(
1786 (const Op
*)result
.getCapture(1),
1787 (const Op
*)result
.getCapture(0)
1789 } catch (const FailedIRGen
& e
) {
1791 } catch (const FailedCodeGen
& e
) {
1794 TRACE(1, "[singleton-sprop] %s <- %s\n",
1795 funcd
->fullName()->data(),
1796 fcall
.func()->fullName()->data());
1804 * Returns whether offset is a control-flow merge within region.
1806 static bool isMergePoint(Offset offset
, const RegionDesc
& region
) {
1807 for (auto const block
: region
.blocks()) {
1808 auto const bid
= block
->id();
1809 if (block
->start().offset() == offset
) {
1810 auto inCount
= region
.preds(bid
).size();
1811 // NB: The entry block is a merge point if it has one predecessor.
1812 if (block
== region
.entry()) ++inCount
;
1813 if (inCount
>= 2) return true;
1819 static bool blockIsLoopHeader(
1820 const RegionDesc
& region
,
1821 RegionDesc::BlockId blockId
,
1822 const RegionDesc::BlockIdSet
& processedBlocks
)
1824 for (auto predId
: region
.preds(blockId
)) {
1825 if (processedBlocks
.count(predId
) == 0) {
1833 * Returns whether any instruction following inst (whether by fallthrough or
1834 * branch target) is a merge in region.
1836 static bool nextIsMerge(const NormalizedInstruction
& inst
,
1837 const RegionDesc
& region
) {
1838 Offset fallthruOffset
= inst
.offset() + instrLen((Op
*)(inst
.pc()));
1839 if (instrHasConditionalBranch(inst
.op())) {
1840 auto offsetPtr
= instrJumpOffset((Op
*)inst
.pc());
1841 Offset takenOffset
= inst
.offset() + *offsetPtr
;
1842 return fallthruOffset
== takenOffset
1843 || isMergePoint(takenOffset
, region
)
1844 || isMergePoint(fallthruOffset
, region
);
1846 if (isUnconditionalJmp(inst
.op())) {
1847 auto offsetPtr
= instrJumpOffset((Op
*)inst
.pc());
1848 Offset takenOffset
= inst
.offset() + *offsetPtr
;
1849 return isMergePoint(takenOffset
, region
);
1851 return isMergePoint(fallthruOffset
, region
);
1854 //////////////////////////////////////////////////////////////////////
1856 #define IMM_MA(n) 0 /* ignored, but we need something (for commas) */
1857 #define IMM_BLA(n) ni.immVec
1858 #define IMM_SLA(n) ni.immVec
1859 #define IMM_ILA(n) ni.immVec
1860 #define IMM_VSA(n) ni.immVec
1861 #define IMM_IVA(n) ni.imm[n].u_IVA
1862 #define IMM_I64A(n) ni.imm[n].u_I64A
1863 #define IMM_LA(n) ni.imm[n].u_LA
1864 #define IMM_IA(n) ni.imm[n].u_IA
1865 #define IMM_DA(n) ni.imm[n].u_DA
1866 #define IMM_SA(n) ni.unit()->lookupLitstrId(ni.imm[n].u_SA)
1867 #define IMM_RATA(n) ni.imm[n].u_RATA
1868 #define IMM_AA(n) ni.unit()->lookupArrayId(ni.imm[n].u_AA)
1869 #define IMM_BA(n) ni.imm[n].u_BA
1870 #define IMM_OA_IMPL(n) ni.imm[n].u_OA
1871 #define IMM_OA(subop) (subop)IMM_OA_IMPL
1873 #define ONE(x0) , IMM_##x0(0)
1874 #define TWO(x0,x1) , IMM_##x0(0), IMM_##x1(1)
1875 #define THREE(x0,x1,x2) , IMM_##x0(0), IMM_##x1(1), IMM_##x2(2)
1876 #define FOUR(x0,x1,x2,x3) , IMM_##x0(0), IMM_##x1(1), IMM_##x2(2), IMM_##x3(3)
1879 static void translateDispatch(HTS
& hts
,
1880 const NormalizedInstruction
& ni
) {
1881 #define O(nm, imms, ...) case Op::nm: irgen::emit##nm(hts imms); return;
1882 switch (ni
.op()) { OPCODES
}
1909 //////////////////////////////////////////////////////////////////////
1911 static Type
flavorToType(FlavorDesc f
) {
1913 case NOV
: not_reached();
1915 case CV
: return Type::Cell
; // TODO(#3029148) this could be InitCell
1916 case UV
: return Type::Uninit
;
1917 case VV
: return Type::BoxedCell
;
1918 case AV
: return Type::Cls
;
1919 case RV
: case FV
: case CVV
: case CVUV
: return Type::Gen
;
1924 void translateInstr(HTS
& hts
, const NormalizedInstruction
& ni
) {
1929 ni
.endsRegion
&& !irgen::isInlining(hts
)
1931 FTRACE(1, "\n{:-^60}\n", folly::format("Translating {}: {} with stack:\n{}",
1932 ni
.offset(), ni
.toString(),
1934 // When profiling, we disable type predictions to avoid side exits
1935 assert(IMPLIES(mcg
->tx().mode() == TransKind::Profile
, !ni
.outputPredicted
));
1937 irgen::ringbuffer(hts
, Trace::RBTypeBytecodeStart
, ni
.source
, 2);
1938 irgen::emitIncStat(hts
, Stats::Instr_TC
, 1);
1940 auto pc
= reinterpret_cast<const Op
*>(ni
.pc());
1941 for (auto i
= 0, num
= instrNumPops(pc
); i
< num
; ++i
) {
1942 auto const type
= flavorToType(instrInputFlavor(pc
, i
));
1943 if (type
!= Type::Gen
) {
1944 // TODO(#5706706): want to use assertTypeLocation, but Location::Stack
1945 // is a little unsure of itself.
1946 irgen::assertTypeStack(hts
, i
, type
);
1950 if (RuntimeOption::EvalHHIRGenerateAsserts
>= 2) {
1951 hts
.irb
->gen(DbgAssertRetAddr
);
1954 if (isAlwaysNop(ni
.op())) {
1956 } else if (ni
.interp
|| RuntimeOption::EvalJitAlwaysInterpOne
) {
1957 irgen::interpOne(hts
, ni
);
1959 translateDispatch(hts
, ni
);
1963 //////////////////////////////////////////////////////////////////////
1965 TranslateResult
translateRegion(HTS
& hts
,
1966 const RegionDesc
& region
,
1967 RegionBlacklist
& toInterp
,
1968 TransFlags trflags
) {
1969 const Timer
translateRegionTimer(Timer::translateRegion
);
1970 FTRACE(1, "translateRegion starting with:\n{}\n", show(region
));
1972 std::string errorMsg
;
1973 always_assert_log(check(region
, errorMsg
),
1974 [&] { return errorMsg
+ "\n" + show(region
); });
1976 auto& irb
= *hts
.irb
;
1978 auto const startSk
= region
.start();
1980 BlockIdToIRBlockMap blockIdToIRBlock
;
1981 if (RuntimeOption::EvalHHIRBytecodeControlFlow
) {
1982 hts
.mode
= IRGenMode::CFG
;
1983 createBlockMap(hts
, region
, blockIdToIRBlock
);
1985 // Make the IR entry block jump to the IR block we mapped the region entry
1986 // block to (they are not the same!).
1988 auto const entry
= irb
.unit().entry();
1989 irb
.startBlock(entry
, entry
->front().marker(), false /* isLoopHeader */);
1991 auto const irBlock
= blockIdToIRBlock
[region
.entry()->id()];
1992 always_assert(irBlock
!= entry
);
1994 irb
.gen(Jmp
, irBlock
);
1997 RegionDesc::BlockIdSet processedBlocks
;
1999 Timer
irGenTimer(Timer::translateRegion_irGeneration
);
2000 auto& blocks
= region
.blocks();
2001 for (auto b
= 0; b
< blocks
.size(); b
++) {
2002 auto const& block
= blocks
[b
];
2003 auto const blockId
= block
->id();
2004 auto sk
= block
->start();
2005 auto typePreds
= makeMapWalker(block
->typePreds());
2006 auto byRefs
= makeMapWalker(block
->paramByRefs());
2007 auto refPreds
= makeMapWalker(block
->reffinessPreds());
2008 auto knownFuncs
= makeMapWalker(block
->knownFuncs());
2009 auto skipTrans
= false;
2011 const Func
* topFunc
= nullptr;
2012 TransID profTransId
= getTransId(blockId
);
2013 hts
.profTransID
= profTransId
;
2015 bool isLoopHeader
= false;
2017 if (hts
.mode
== IRGenMode::CFG
) {
2018 Block
* irBlock
= blockIdToIRBlock
[blockId
];
2019 isLoopHeader
= blockIsLoopHeader(region
, blockId
, processedBlocks
);
2020 always_assert(IMPLIES(isLoopHeader
, RuntimeOption::EvalJitLoops
));
2022 BCMarker
marker(sk
, block
->initialSpOffset(), profTransId
);
2023 if (!irb
.startBlock(irBlock
, marker
, isLoopHeader
)) {
2024 FTRACE(1, "translateRegion: block {} is unreachable, skipping\n",
2026 processedBlocks
.insert(blockId
);
2029 setSuccIRBlocks(hts
, region
, blockId
, blockIdToIRBlock
);
2032 for (unsigned i
= 0; i
< block
->length(); ++i
, sk
.advance(block
->unit())) {
2033 // Update bcOff here so any guards or assertions from metadata are
2034 // attributed to this instruction.
2035 irgen::updateBCOff(hts
, nullptr, sk
.offset(), false);
2037 // Emit prediction guards. If this is the first instruction in the
2038 // region, and the region's entry block is not a loop header, the guards
2039 // will go to a retranslate request. Otherwise, they'll go to a side
2041 auto const isEntry
= block
== region
.entry();
2042 auto const useGuards
= (isEntry
&& !isLoopHeader
&& i
== 0);
2043 if (useGuards
) irgen::ringbuffer(hts
, Trace::RBTypeTraceletGuards
, sk
);
2045 // Emit type guards.
2046 while (typePreds
.hasNext(sk
)) {
2047 auto const& pred
= typePreds
.next();
2048 auto type
= pred
.type
;
2049 auto loc
= pred
.location
;
2050 if (type
<= Type::Cls
) {
2051 // Do not generate guards for class; instead assert the type.
2052 assert(loc
.tag() == RegionDesc::Location::Tag::Stack
);
2053 irgen::assertTypeLocation(hts
, loc
, type
);
2054 } else if (useGuards
) {
2055 bool checkOuterTypeOnly
= mcg
->tx().mode() != TransKind::Profile
;
2056 irgen::guardTypeLocation(hts
, loc
, type
, checkOuterTypeOnly
);
2058 irgen::checkTypeLocation(hts
, loc
, type
, sk
.offset());
2062 while (refPreds
.hasNext(sk
)) {
2063 auto const& pred
= refPreds
.next();
2065 irgen::guardRefs(hts
, pred
.arSpOffset
, pred
.mask
, pred
.vals
);
2067 irgen::checkRefs(hts
, pred
.arSpOffset
, pred
.mask
, pred
.vals
,
2072 // Finish emitting guards, and emit profiling counters.
2075 if (RuntimeOption::EvalJitTransCounters
) {
2076 irgen::incTransCounter(hts
);
2079 if (mcg
->tx().mode() == TransKind::Profile
) {
2080 if (block
->func()->isEntry(block
->start().offset())) {
2081 irgen::checkCold(hts
, mcg
->tx().profData()->curTransID());
2083 irgen::incProfCounter(hts
, mcg
->tx().profData()->curTransID());
2086 irgen::ringbuffer(hts
, Trace::RBTypeTraceletBody
, sk
);
2089 // In the entry block, hhbc-translator gets a chance to emit some code
2090 // immediately after the initial guards/checks on the first instruction.
2091 if (isEntry
&& i
== 0) {
2094 irgen::prepareEntry(hts
);
2097 // Don't do this for ARM, because it can lead to interpOne on the
2098 // first SrcKey in a translation, which isn't allowed.
2103 // Update the current funcd, if we have a new one.
2104 if (knownFuncs
.hasNext(sk
)) {
2105 topFunc
= knownFuncs
.next();
2108 // Create and initialize the instruction.
2109 NormalizedInstruction
inst(sk
, block
->unit());
2110 inst
.funcd
= topFunc
;
2111 if (i
== block
->length() - 1) {
2112 inst
.endsRegion
= region
.isExit(blockId
);
2113 inst
.nextIsMerge
= nextIsMerge(inst
, region
);
2114 if (hts
.mode
== IRGenMode::Trace
&&
2115 instrIsNonCallControlFlow(inst
.op()) &&
2116 b
< blocks
.size() - 1) {
2117 inst
.nextOffset
= blocks
[b
+1]->start().offset();
2121 // We can get a more precise output type for interpOne if we know all of
2122 // its inputs, so we still populate the rest of the instruction even if
2124 inst
.interp
= toInterp
.count(ProfSrcKey
{profTransId
, sk
});
2126 auto const inputInfos
= getInputs(startSk
, inst
);
2128 // Populate the NormalizedInstruction's input vector, using types from
2130 std::vector
<DynLocation
> dynLocs
;
2131 dynLocs
.reserve(inputInfos
.size());
2132 auto newDynLoc
= [&] (const InputInfo
& ii
) {
2133 dynLocs
.emplace_back(
2135 irgen::predictedTypeFromLocation(hts
, ii
.loc
)
2137 FTRACE(2, "predictedTypeFromLocation: {} -> {}\n",
2138 ii
.loc
.pretty(), dynLocs
.back().rtt
);
2139 return &dynLocs
.back();
2141 FTRACE(2, "populating inputs for {}\n", inst
.toString());
2142 for (auto const& ii
: inputInfos
) {
2143 inst
.inputs
.push_back(newDynLoc(ii
));
2145 if (inputInfos
.needsRefCheck
) {
2146 assert(byRefs
.hasNext(sk
));
2147 inst
.preppedByRef
= byRefs
.next();
2151 * Check for a type prediction. Put it in the NormalizedInstruction so
2152 * the emit* method can use it if needed. In PGO mode, we don't really
2153 * need the values coming from the interpreter type profiler.
2154 * TransKind::Profile translations end whenever there's a side-exit, and
2155 * type predictions incur side-exits. And when we stitch multiple
2156 * TransKind::Profile translations together to form a larger region (in
2157 * TransKind::Optimize mode), the guard for the top of the stack
2158 * essentially does the role of type prediction. And, if the value is
2159 * also inferred, then the guard is omitted.
2161 auto const doPrediction
= mcg
->tx().mode() == TransKind::Live
&&
2162 outputIsPredicted(inst
);
2164 // If this block ends with an inlined FCall, we don't emit anything for
2165 // the FCall and instead set up HhbcTranslator for inlining. Blocks from
2166 // the callee will be next in the region.
2167 if (i
== block
->length() - 1 &&
2168 (inst
.op() == Op::FCall
|| inst
.op() == Op::FCallD
) &&
2169 block
->inlinedCallee()) {
2170 auto const* callee
= block
->inlinedCallee();
2171 FTRACE(1, "\nstarting inlined call from {} to {} with {} args "
2173 block
->func()->fullName()->data(),
2174 callee
->fullName()->data(),
2177 auto returnSk
= inst
.nextSk();
2178 auto returnFuncOff
= returnSk
.offset() - block
->func()->base();
2179 irgen::beginInlining(hts
, inst
.imm
[0].u_IVA
, callee
, returnFuncOff
,
2180 doPrediction
? inst
.outPred
: Type::Gen
);
2181 // "Fallthrough" into the callee's first block
2182 irgen::endBlock(hts
, blocks
[b
+ 1]->start().offset(), inst
.nextIsMerge
);
2186 // Singleton inlining optimization.
2187 if (RuntimeOption::EvalHHIRInlineSingletons
) {
2188 bool didInlineSingleton
= [&] {
2189 if (!RuntimeOption::RepoAuthoritative
) return false;
2191 // I don't really want to inline my arm, thanks.
2192 if (arch() != Arch::X64
) return false;
2194 // Don't inline if we're retranslating due to a side-exit from an
2196 if (trflags
.noinlineSingleton
&& startSk
== inst
.source
) return false;
2198 // Bail early if this isn't a push.
2199 if (inst
.op() != Op::FPushFuncD
&&
2200 inst
.op() != Op::FPushClsMethodD
) {
2204 // ...and also if this is the end of the block.
2205 if (i
== block
->length() - 1) return false;
2207 auto nextSK
= inst
.nextSk();
2209 // If the normal machinery is already inlining this function, don't
2210 // do anything here.
2211 if (i
== block
->length() - 2 &&
2212 (nextSK
.op() == Op::FCall
|| nextSK
.op() == Op::FCallD
) &&
2213 block
->inlinedCallee()) {
2217 // This is safe to do even if singleton inlining fails; we just won't
2218 // change topFunc in the next pass since hasNext() will return false.
2219 if (knownFuncs
.hasNext(nextSK
)) {
2220 topFunc
= knownFuncs
.next();
2222 // Detect a singleton pattern and inline it if found.
2223 return tryTranslateSingletonInline(hts
, inst
, topFunc
);
2229 // Skip the translation of this instruction (the FPush) -and- the next
2230 // instruction (the FCall) if we succeeded at singleton inlining. We
2231 // still want the fallthrough and prediction logic, though.
2232 if (didInlineSingleton
) {
2238 // Emit IR for the body of the instruction.
2240 if (!skipTrans
) translateInstr(hts
, inst
);
2241 } catch (const FailedIRGen
& exn
) {
2242 ProfSrcKey psk
{profTransId
, sk
};
2244 !toInterp
.count(psk
),
2246 std::ostringstream oss
;
2247 oss
<< folly::format("IR generation failed with {}\n", exn
.what());
2248 print(oss
, hts
.unit
);
2251 toInterp
.insert(psk
);
2252 return TranslateResult::Retry
;
2257 // In CFG mode, insert a fallthrough jump at the end of each block.
2258 if (hts
.mode
== IRGenMode::CFG
&& i
== block
->length() - 1) {
2259 if (instrAllowsFallThru(inst
.op())) {
2260 auto nextOffset
= inst
.offset() + instrLen((Op
*)(inst
.pc()));
2261 // prepareForSideExit is done later in Trace mode, but it
2262 // needs to happen here or else we generate the SpillStack
2263 // after the fallthrough jump, which is just weird.
2264 if (b
< blocks
.size() - 1 && region
.isSideExitingBlock(blockId
)) {
2265 irgen::prepareForSideExit(hts
);
2267 irgen::endBlock(hts
, nextOffset
, inst
.nextIsMerge
);
2268 } else if (b
< blocks
.size() - 1 &&
2269 (isRet(inst
.op()) || inst
.op() == OpNativeImpl
)) {
2270 // "Fallthrough" from inlined return to the next block
2271 irgen::endBlock(hts
, blocks
[b
+ 1]->start().offset(), inst
.nextIsMerge
);
2273 if (region
.isExit(blockId
)) {
2274 irgen::endRegion(hts
);
2278 // Check the prediction. If the predicted type is less specific than what
2279 // is currently on the eval stack, checkType won't emit any code.
2281 // TODO(#5710339): would be nice to remove the following check
2282 irgen::publicTopType(hts
, 0).maybe(inst
.outPred
)) {
2283 irgen::checkTypeStack(hts
, 0, inst
.outPred
,
2284 sk
.advanced(block
->unit()).offset());
2288 if (hts
.mode
== IRGenMode::Trace
) {
2289 if (b
< blocks
.size() - 1 && region
.isSideExitingBlock(blockId
)) {
2290 irgen::prepareForSideExit(hts
);
2294 processedBlocks
.insert(blockId
);
2296 assert(!typePreds
.hasNext());
2297 assert(!byRefs
.hasNext());
2298 assert(!refPreds
.hasNext());
2299 assert(!knownFuncs
.hasNext());
2302 if (hts
.mode
== IRGenMode::Trace
) irgen::endRegion(hts
);
2307 mcg
->traceCodeGen(hts
);
2308 if (mcg
->tx().mode() == TransKind::Profile
) {
2309 mcg
->tx().profData()->setProfiling(startSk
.func()->getFuncId());
2311 } catch (const FailedCodeGen
& exn
) {
2312 SrcKey sk
{exn
.vmFunc
, exn
.bcOff
, exn
.resumed
};
2313 ProfSrcKey psk
{exn
.profTransId
, sk
};
2315 !toInterp
.count(psk
),
2317 std::ostringstream oss
;
2318 oss
<< folly::format("code generation failed with {}\n", exn
.what());
2319 print(oss
, hts
.irb
->unit());
2322 toInterp
.insert(psk
);
2323 return TranslateResult::Retry
;
2324 } catch (const DataBlockFull
& dbFull
) {
2325 if (dbFull
.name
== "hot") {
2326 assert(mcg
->tx().useAHot());
2327 mcg
->tx().setUseAHot(false);
2328 // We can't return Retry here because the code block selection
2329 // will still say hot.
2330 return TranslateResult::Failure
;
2332 always_assert_flog(0, "data block = {}\nmessage: {}\n",
2333 dbFull
.name
, dbFull
.what());
2336 return TranslateResult::Success
;
2339 uint64_t* Translator::getTransCounterAddr() {
2340 if (!isTransDBEnabled()) return nullptr;
2342 TransID id
= m_translations
.size();
2344 // allocate a new chunk of counters if necessary
2345 if (id
>= m_transCounters
.size() * transCountersPerChunk
) {
2346 uint32_t size
= sizeof(uint64_t) * transCountersPerChunk
;
2347 auto *chunk
= (uint64_t*)malloc(size
);
2349 m_transCounters
.push_back(chunk
);
2351 assert(id
/ transCountersPerChunk
< m_transCounters
.size());
2352 return &(m_transCounters
[id
/ transCountersPerChunk
]
2353 [id
% transCountersPerChunk
]);
2356 void Translator::addTranslation(const TransRec
& transRec
) {
2357 if (Trace::moduleEnabledRelease(Trace::trans
, 1)) {
2358 // Log the translation's size, creation time, SrcKey, and size
2359 Trace::traceRelease("New translation: %" PRId64
" %s %u %u %d\n",
2360 HPHP::Timer::GetCurrentTimeMicros() - m_createdTime
,
2361 folly::format("{}:{}:{}",
2362 transRec
.src
.unit()->filepath()->data(),
2363 transRec
.src
.getFuncId(),
2364 transRec
.src
.offset()).str().c_str(),
2370 if (!isTransDBEnabled()) return;
2371 uint32_t id
= getCurrentTransID();
2372 m_translations
.emplace_back(transRec
);
2373 m_translations
[id
].id
= id
;
2375 if (transRec
.aLen
> 0) {
2376 m_transDB
[transRec
.aStart
] = id
;
2378 if (transRec
.acoldLen
> 0) {
2379 m_transDB
[transRec
.acoldStart
] = id
;
2383 uint64_t Translator::getTransCounter(TransID transId
) const {
2384 if (!isTransDBEnabled()) return -1ul;
2385 assert(transId
< m_translations
.size());
2389 if (transId
/ transCountersPerChunk
>= m_transCounters
.size()) {
2392 counter
= m_transCounters
[transId
/ transCountersPerChunk
]
2393 [transId
% transCountersPerChunk
];
2398 const Func
* lookupImmutableMethod(const Class
* cls
, const StringData
* name
,
2399 bool& magicCall
, bool staticLookup
,
2401 if (!cls
|| RuntimeOption::EvalJitEnableRenameFunction
) return nullptr;
2402 if (cls
->attrs() & AttrInterface
) return nullptr;
2403 bool privateOnly
= false;
2404 if (!RuntimeOption::RepoAuthoritative
||
2405 !(cls
->preClass()->attrs() & AttrUnique
)) {
2406 if (!ctx
|| !ctx
->classof(cls
)) {
2409 if (!staticLookup
) privateOnly
= true;
2413 LookupResult res
= staticLookup
?
2414 g_context
->lookupClsMethod(func
, cls
, name
, nullptr, ctx
, false) :
2415 g_context
->lookupObjMethod(func
, cls
, name
, ctx
, false);
2417 if (res
== LookupResult::MethodNotFound
) return nullptr;
2419 assert(res
== LookupResult::MethodFoundWithThis
||
2420 res
== LookupResult::MethodFoundNoThis
||
2422 res
== LookupResult::MagicCallStaticFound
:
2423 res
== LookupResult::MagicCallFound
));
2426 res
== LookupResult::MagicCallStaticFound
||
2427 res
== LookupResult::MagicCallFound
;
2429 if ((privateOnly
&& (!(func
->attrs() & AttrPrivate
) || magicCall
)) ||
2430 func
->isAbstract() ||
2431 func
->attrs() & AttrInterceptable
) {
2438 * i) We cant tell if a magic call would go to __call or __callStatic
2439 * - Could deal with this by checking for the existence of __call
2441 * ii) hphp semantics is that in the case of an object call, we look
2442 * for __call in the scope of the object (this is incompatible
2443 * with zend) which means we would have to know that there is no
2444 * __call higher up in the tree
2445 * - Could deal with this by checking for AttrNoOverride on the
2450 } else if (!(func
->attrs() & AttrPrivate
)) {
2451 if (magicCall
|| func
->attrs() & AttrStatic
) {
2452 if (!(cls
->preClass()->attrs() & AttrNoOverride
)) {
2455 } else if (!(func
->attrs() & AttrNoOverride
&& !func
->hasStaticLocals()) &&
2456 !(cls
->preClass()->attrs() & AttrNoOverride
)) {
2457 // Even if a func has AttrNoOverride, if it has static locals it
2458 // is cloned into subclasses (to give them different copies of
2459 // the static locals), so we need to skip this.
2466 ///////////////////////////////////////////////////////////////////////////////
2469 void invalidatePath(const std::string
& path
) {
2470 TRACE(1, "invalidatePath: abspath %s\n", path
.c_str());
2471 assert(path
.size() >= 1 && path
[0] == '/');
2472 Treadmill::enqueue([path
] {
2474 * inotify saw this path change. Now poke the unit loader; it will
2475 * notice the underlying php file has changed.
2477 * We don't actually need to *do* anything with the Unit* from
2478 * this lookup; since the path has changed, the file we'll get out is
2479 * going to be some new file, not the old file that needs invalidation.
2482 lookupUnit(spath
.get(), "", nullptr /* initial_opt */);
2486 ///////////////////////////////////////////////////////////////////////////////