2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/codegen.h"
22 #include "folly/ScopeGuard.h"
23 #include "folly/Format.h"
24 #include "hphp/util/trace.h"
25 #include "hphp/util/util.h"
27 #include "hphp/runtime/base/array/hphp_array.h"
28 #include "hphp/runtime/base/comparisons.h"
29 #include "hphp/runtime/base/complex_types.h"
30 #include "hphp/runtime/base/runtime_option.h"
31 #include "hphp/runtime/base/string_data.h"
32 #include "hphp/runtime/base/types.h"
33 #include "hphp/runtime/ext/ext_closure.h"
34 #include "hphp/runtime/ext/ext_continuation.h"
35 #include "hphp/runtime/ext/ext_collections.h"
36 #include "hphp/runtime/vm/bytecode.h"
37 #include "hphp/runtime/vm/runtime.h"
38 #include "hphp/runtime/base/stats.h"
39 #include "hphp/runtime/vm/jit/targetcache.h"
40 #include "hphp/runtime/vm/jit/translator-inline.h"
41 #include "hphp/runtime/vm/jit/translator-x64.h"
42 #include "hphp/runtime/vm/jit/translator-x64-internal.h"
43 #include "hphp/runtime/vm/jit/translator.h"
44 #include "hphp/runtime/vm/jit/types.h"
45 #include "hphp/runtime/vm/jit/x64-util.h"
46 #include "hphp/runtime/vm/jit/ir.h"
47 #include "hphp/runtime/vm/jit/linearscan.h"
48 #include "hphp/runtime/vm/jit/nativecalls.h"
49 #include "hphp/runtime/vm/jit/print.h"
50 #include "hphp/runtime/vm/jit/layout.h"
52 using HPHP::Transl::TCA
;
53 using namespace HPHP::Transl::TargetCache
;
60 //////////////////////////////////////////////////////////////////////
63 using namespace Transl::reg
;
68 * It's not normally ok to directly use tracelet abi registers in
69 * codegen, unless you're directly dealing with an instruction that
70 * does near-end-of-tracelet glue. (Or also we sometimes use them
71 * just for some static_assertions relating to calls to helpers from
72 * tx64 that hardcode these registers.)
77 const size_t kTypeWordOffset
= (offsetof(TypedValue
, m_type
) % 8);
78 const size_t kTypeShiftBits
= kTypeWordOffset
* CHAR_BIT
;
80 // left shift an immediate DataType, for type, to the correct position
81 // within one of the registers used to pass a TypedValue by value.
82 uint64_t toDataTypeForCall(Type type
) {
83 return uint64_t(type
.toDataType()) << kTypeShiftBits
;
86 int64_t spillSlotsToSize(int n
) {
87 return n
* sizeof(int64_t);
90 void cgPunt(const char* file
, int line
, const char* func
, uint32_t bcOff
) {
91 if (dumpIREnabled()) {
92 HPHP::Trace::trace("--------- CG_PUNT %s %d %s bcOff: %d \n",
93 file
, line
, func
, bcOff
);
95 throw FailedCodeGen(file
, line
, func
, bcOff
);
98 #define CG_PUNT(instr) cgPunt(__FILE__, __LINE__, #instr, m_curBcOff)
106 enum Kind
{ Move
, Xchg
};
108 MoveInfo(Kind kind
, int reg1
, int reg2
):
109 m_kind(kind
), m_reg1(reg1
), m_reg2(reg2
) {}
112 PhysReg m_reg1
, m_reg2
;
116 static bool cycleHasXMMReg(const CycleInfo
& cycle
,
117 const int (&moves
)[N
]) {
118 int first
= cycle
.node
;
121 if (PhysReg(node
).isXMM()) return true;
123 } while (node
!= first
);
128 void doRegMoves(int (&moves
)[N
], int rTmp
,
129 std::vector
<MoveInfo
>& howTo
) {
130 assert(howTo
.empty());
134 // Iterate over the nodes filling in outDegree[] and cycles[] as we go
137 for (int node
= 0; node
< N
; ++node
) {
138 // If a node's source is itself, its a nop
139 if (moves
[node
] == node
) moves
[node
] = -1;
140 if (node
== rTmp
&& moves
[node
] >= 0) {
141 // ERROR: rTmp cannot be referenced in moves[].
148 for (int startNode
= 0; startNode
< N
; ++startNode
) {
149 // If startNode has not been visited yet, begin walking
150 // a path from start node
151 if (index
[startNode
] < 0) {
152 int node
= startNode
;
154 index
[node
] = nextIndex
++;
155 if (moves
[node
] >= 0) {
156 int nextNode
= moves
[node
];
157 ++outDegree
[nextNode
];
158 if (index
[nextNode
] < 0) {
159 // If there is an edge from v to nextNode and nextNode has not been
160 // visited, extend the current path to include nextNode and recurse
164 // There is an edge from v to nextNode but nextNode has already been
165 // visited, check if nextNode is on the current path
166 if (index
[nextNode
] >= index
[startNode
]) {
167 // nextNode is on the current path so we've found a cycle
168 int length
= nextIndex
- index
[nextNode
];
169 CycleInfo ci
= { nextNode
, length
};
170 cycles
[numCycles
] = ci
;
177 // Handle all moves that aren't part of a cycle
181 for (int node
= 0; node
< N
; ++node
) {
182 if (outDegree
[node
] == 0) {
187 for (int i
= 0; i
< qBack
; ++i
) {
189 if (moves
[node
] >= 0) {
190 int nextNode
= moves
[node
];
191 howTo
.push_back(MoveInfo(MoveInfo::Move
, nextNode
, node
));
192 --outDegree
[nextNode
];
193 if (outDegree
[nextNode
] == 0) {
200 // Deal with any cycles we encountered
201 for (int i
= 0; i
< numCycles
; ++i
) {
202 // can't use xchg if one of the registers is XMM
203 bool hasXMMReg
= cycleHasXMMReg(cycles
[i
], moves
);
204 if (cycles
[i
].length
== 2 && !hasXMMReg
) {
205 int v
= cycles
[i
].node
;
207 howTo
.push_back(MoveInfo(MoveInfo::Xchg
, w
, v
));
208 } else if (cycles
[i
].length
== 3 && !hasXMMReg
) {
209 int v
= cycles
[i
].node
;
211 howTo
.push_back(MoveInfo(MoveInfo::Xchg
, w
, v
));
213 howTo
.push_back(MoveInfo(MoveInfo::Xchg
, x
, w
));
215 int v
= cycles
[i
].node
;
216 howTo
.push_back(MoveInfo(MoveInfo::Move
, v
, rTmp
));
220 howTo
.push_back(MoveInfo(MoveInfo::Move
, x
, w
));
224 howTo
.push_back(MoveInfo(MoveInfo::Move
, rTmp
, w
));
229 const char* getContextName(Class
* ctx
) {
230 return ctx
? ctx
->name()->data() : ":anonymous:";
233 } // unnamed namespace
235 //////////////////////////////////////////////////////////////////////
237 ArgDesc::ArgDesc(SSATmp
* tmp
, const RegisterInfo
& info
, bool val
)
238 : m_imm(-1), m_zeroExtend(false), m_done(false) {
239 if (tmp
->type() == Type::None
) {
244 if (tmp
->inst()->op() == DefConst
) {
245 m_srcReg
= InvalidReg
;
247 m_imm
= tmp
->getValBits();
249 m_imm
= toDataTypeForCall(tmp
->type());
254 if (tmp
->type().isNull()) {
255 m_srcReg
= InvalidReg
;
259 m_imm
= toDataTypeForCall(tmp
->type());
264 if (val
|| tmp
->numNeededRegs() > 1) {
265 auto reg
= info
.reg(val
? 0 : 1);
266 assert(reg
!= InvalidReg
);
269 // If val is false then we're passing tmp's type. TypeReg lets
270 // CodeGenerator know that the value might require some massaging
271 // to be in the right format for the call.
272 m_kind
= val
? Reg
: TypeReg
;
273 // zero extend any boolean value that we pass to the helper in case
274 // the helper expects it (e.g., as TypedValue)
275 if (val
&& tmp
->isA(Type::Bool
)) m_zeroExtend
= true;
279 m_srcReg
= InvalidReg
;
280 m_imm
= toDataTypeForCall(tmp
->type());
284 const Func
* CodeGenerator::curFunc() const {
285 always_assert(m_state
.lastMarker
&&
286 "We shouldn't be looking for a func when we have no marker");
287 return m_state
.lastMarker
->func
;
291 * Select a scratch register to use in the given instruction, prefering the
292 * lower registers which don't require a REX prefix. The selected register
293 * must not be any of the instructions inputs or outputs, and neither a register
294 * that is alive across this instruction.
296 PhysReg
CodeGenerator::selectScratchReg(IRInstruction
* inst
) {
297 static const RegSet kLowGPRegs
= RegSet()
304 RegSet liveRegs
= m_state
.liveRegs
[inst
];
305 for (const auto& tmp
: inst
->srcs()) {
306 liveRegs
|= m_regs
[tmp
].regs();
308 for (const auto& tmp
: inst
->dsts()) {
309 liveRegs
|= m_regs
[tmp
].regs();
312 if ((kLowGPRegs
- liveRegs
).findFirst(selectedReg
)) {
318 Address
CodeGenerator::cgInst(IRInstruction
* inst
) {
319 Opcode opc
= inst
->op();
320 auto const start
= m_as
.code
.frontier
;
321 m_rScratch
= selectScratchReg(inst
);
322 if (inst
->taken() && inst
->taken()->trace()->isCatch()) {
323 m_state
.catchTrace
= inst
->taken()->trace();
325 m_state
.catchTrace
= nullptr;
329 #define O(name, dsts, srcs, flags) \
330 case name: FTRACE(7, "cg" #name "\n"); \
332 return m_as.code.frontier == start ? nullptr : start;
342 #define NOOP_OPCODE(opcode) \
343 void CodeGenerator::cg##opcode(IRInstruction*) {}
345 #define CALL_OPCODE(opcode) \
346 void CodeGenerator::cg##opcode(IRInstruction* i) { cgCallNative(i); }
348 #define CALL_STK_OPCODE(opcode) \
349 CALL_OPCODE(opcode) \
350 CALL_OPCODE(opcode ## Stk)
352 NOOP_OPCODE(DefConst
)
355 NOOP_OPCODE(AssertLoc
)
356 NOOP_OPCODE(OverrideLoc
)
357 NOOP_OPCODE(AssertStk
)
359 NOOP_OPCODE(DefLabel
)
360 NOOP_OPCODE(ExceptionBarrier
)
362 CALL_OPCODE(AddElemStrKey
)
363 CALL_OPCODE(AddElemIntKey
)
364 CALL_OPCODE(AddNewElem
)
365 CALL_OPCODE(ArrayAdd
)
368 CALL_OPCODE(ConvBoolToArr
);
369 CALL_OPCODE(ConvDblToArr
);
370 CALL_OPCODE(ConvIntToArr
);
371 CALL_OPCODE(ConvObjToArr
);
372 CALL_OPCODE(ConvStrToArr
);
373 CALL_OPCODE(ConvCellToArr
);
375 CALL_OPCODE(ConvArrToBool
);
376 CALL_OPCODE(ConvStrToBool
);
377 CALL_OPCODE(ConvCellToBool
);
379 CALL_OPCODE(ConvArrToDbl
);
380 CALL_OPCODE(ConvObjToDbl
);
381 CALL_OPCODE(ConvStrToDbl
);
382 CALL_OPCODE(ConvCellToDbl
);
384 CALL_OPCODE(ConvArrToInt
);
385 CALL_OPCODE(ConvDblToInt
);
386 CALL_OPCODE(ConvObjToInt
);
387 CALL_OPCODE(ConvStrToInt
);
388 CALL_OPCODE(ConvCellToInt
);
390 CALL_OPCODE(ConvCellToObj
);
392 CALL_OPCODE(ConvDblToStr
);
393 CALL_OPCODE(ConvIntToStr
);
394 CALL_OPCODE(ConvObjToStr
);
395 CALL_OPCODE(ConvCellToStr
);
397 CALL_OPCODE(CreateCont
)
398 CALL_OPCODE(FillContLocals
)
399 CALL_OPCODE(NewArray
)
400 CALL_OPCODE(NewTuple
)
401 CALL_OPCODE(AllocObj
)
402 CALL_OPCODE(LdClsCtor
);
403 CALL_OPCODE(CreateCl
)
404 CALL_OPCODE(PrintStr
)
405 CALL_OPCODE(PrintInt
)
406 CALL_OPCODE(PrintBool
)
407 CALL_OPCODE(DbgAssertPtr
)
408 CALL_OPCODE(LdSwitchDblIndex
)
409 CALL_OPCODE(LdSwitchStrIndex
)
410 CALL_OPCODE(LdSwitchObjIndex
)
411 CALL_OPCODE(VerifyParamCallable
)
412 CALL_OPCODE(VerifyParamFail
)
413 CALL_OPCODE(RaiseUninitLoc
)
414 CALL_OPCODE(WarnNonObjProp
)
415 CALL_OPCODE(ThrowNonObjProp
)
416 CALL_OPCODE(RaiseUndefProp
)
417 CALL_OPCODE(RaiseError
)
418 CALL_OPCODE(RaiseWarning
)
419 CALL_OPCODE(IncStatGrouped
)
420 CALL_OPCODE(StaticLocInit
)
421 CALL_OPCODE(StaticLocInitCached
)
423 CALL_OPCODE(ArrayIdx
)
425 // Vector instruction helpers
428 CALL_STK_OPCODE(PropDX
)
429 CALL_OPCODE(CGetProp
)
430 CALL_STK_OPCODE(VGetProp
)
431 CALL_STK_OPCODE(BindProp
)
432 CALL_STK_OPCODE(SetProp
)
433 CALL_OPCODE(UnsetProp
)
434 CALL_STK_OPCODE(SetOpProp
)
435 CALL_STK_OPCODE(IncDecProp
)
436 CALL_OPCODE(EmptyProp
)
437 CALL_OPCODE(IssetProp
)
439 CALL_STK_OPCODE(ElemDX
)
440 CALL_STK_OPCODE(ElemUX
)
441 CALL_OPCODE(ArrayGet
)
442 CALL_OPCODE(VectorGet
)
444 CALL_OPCODE(CGetElem
)
445 CALL_STK_OPCODE(VGetElem
)
446 CALL_STK_OPCODE(BindElem
)
447 CALL_STK_OPCODE(SetWithRefElem
)
448 CALL_STK_OPCODE(SetWithRefNewElem
)
449 CALL_OPCODE(ArraySet
)
450 CALL_OPCODE(VectorSet
)
452 CALL_OPCODE(ArraySetRef
)
453 CALL_STK_OPCODE(SetElem
)
454 CALL_STK_OPCODE(UnsetElem
)
455 CALL_STK_OPCODE(SetOpElem
)
456 CALL_STK_OPCODE(IncDecElem
)
457 CALL_STK_OPCODE(SetNewElem
)
458 CALL_STK_OPCODE(BindNewElem
)
459 CALL_OPCODE(ArrayIsset
)
460 CALL_OPCODE(VectorIsset
)
461 CALL_OPCODE(MapIsset
)
462 CALL_OPCODE(IssetElem
)
463 CALL_OPCODE(EmptyElem
)
467 // Thread chain of patch locations using the 4 byte space in each jmp/jcc
468 static void prependPatchAddr(CodegenState
& state
,
471 auto &patches
= state
.patches
;
472 ssize_t diff
= patches
[block
] ? (patchAddr
- (TCA
)patches
[block
]) : 0;
473 assert(deltaFits(diff
, sz::dword
));
474 *(int32_t*)(patchAddr
) = (int32_t)diff
;
475 patches
[block
] = patchAddr
;
478 static void emitFwdJmp(Asm
& a
, Block
* target
, CodegenState
& state
) {
479 if (auto addr
= state
.addresses
[target
]) {
480 return a
.jmpAuto(addr
);
483 // TODO(#2101926): it'd be nice to get 1-byte forward jumps here
484 a
.jmp(a
.code
.frontier
);
485 TCA immPtr
= a
.code
.frontier
- 4;
486 prependPatchAddr(state
, target
, immPtr
);
489 void CodeGenerator::emitFwdJcc(Asm
& a
, ConditionCode cc
, Block
* target
) {
490 if (auto addr
= m_state
.addresses
[target
]) {
491 return a
.jccAuto(cc
, addr
);
494 // TODO(#2101926): it'd be nice to get 1-byte forward jumps here
495 a
.jcc(cc
, a
.code
.frontier
);
496 TCA immPtr
= a
.code
.frontier
- 4;
497 prependPatchAddr(m_state
, target
, immPtr
);
500 void CodeGenerator::emitFwdJcc(ConditionCode cc
, Block
* target
) {
501 emitFwdJcc(m_as
, cc
, target
);
504 void emitLoadImm(CodeGenerator::Asm
& as
, int64_t val
, PhysReg dstReg
) {
505 as
.emitImmReg(val
, dstReg
);
509 emitMovRegReg(CodeGenerator::Asm
& as
, PhysReg srcReg
, PhysReg dstReg
) {
510 assert(srcReg
!= InvalidReg
);
511 assert(dstReg
!= InvalidReg
);
513 if (srcReg
== dstReg
) return;
516 if (dstReg
.isGP()) { // GP => GP
517 as
.movq(srcReg
, dstReg
);
518 } else { // GP => XMM
519 // This generates a movq x86 instruction, which zero extends
520 // the 64-bit value in srcReg into a 128-bit XMM register
521 as
.mov_reg64_xmm(srcReg
, dstReg
);
524 if (dstReg
.isGP()) { // XMM => GP
525 as
.mov_xmm_reg64(srcReg
, dstReg
);
526 } else { // XMM => XMM
527 // This copies all 128 bits in XMM,
528 // thus avoiding partial register stalls
529 as
.movdqa(srcReg
, dstReg
);
534 void CodeGenerator::emitLoadImm(CodeGenerator::Asm
& as
, int64_t val
,
536 assert(dstReg
!= InvalidReg
);
538 as
.emitImmReg(val
, dstReg
);
540 assert(dstReg
.isXMM());
542 as
.pxor_xmm_xmm(dstReg
, dstReg
);
544 // Can't move immediate directly into XMM register, so use m_rScratch
545 as
.emitImmReg(val
, m_rScratch
);
546 emitMovRegReg(as
, m_rScratch
, dstReg
);
551 static void emitLea(CodeGenerator::Asm
& as
, MemoryRef mr
, PhysReg dst
) {
552 if (dst
== InvalidReg
) return;
553 if (mr
.r
.disp
== 0) {
554 emitMovRegReg(as
, mr
.r
.base
, dst
);
561 static void emitLoadReg(CodeGenerator::Asm
& as
, Mem mem
, PhysReg reg
) {
562 assert(reg
!= InvalidReg
);
571 static void emitStoreReg(CodeGenerator::Asm
& as
, PhysReg reg
, Mem mem
) {
572 assert(reg
!= InvalidReg
);
580 static void shuffle2(CodeGenerator::Asm
& a
,
581 PhysReg s0
, PhysReg s1
, PhysReg d0
, PhysReg d1
) {
583 if (d0
== s1
&& d1
!= InvalidReg
) {
588 a
. movq (s1
, d1
); // save s1 first; d1 != s0
592 if (d0
!= InvalidReg
) emitMovRegReg(a
, s0
, d0
); // d0 != s1
593 if (d1
!= InvalidReg
) emitMovRegReg(a
, s1
, d1
);
597 static void zeroExtendBool(X64Assembler
& as
, const RegisterInfo
& info
) {
598 auto reg
= info
.reg();
599 if (reg
!= InvalidReg
) {
600 // zero-extend the bool from a byte to a quad
601 // note: movzbl actually extends the value to 64 bits.
602 as
.movzbl(rbyte(reg
), r32(reg
));
606 static void zeroExtendIfBool(X64Assembler
& as
, const SSATmp
* src
,
607 const RegisterInfo
& info
) {
608 if (src
->isA(Type::Bool
)) {
609 zeroExtendBool(as
, info
);
613 static int64_t convIntToDouble(int64_t i
) {
623 * Returns a XMM register containing the value of SSATmp tmp,
624 * which can be either a bool, an int, or a double.
625 * If the value is already in a XMM register, simply returns it.
626 * Otherwise, the value is moved into rCgXMM, which is returned.
627 * If instructions to convert to a double at runtime are needed,
628 * they're emitted in 'as'.
630 PhysReg
CodeGenerator::prepXMMReg(const SSATmp
* tmp
,
632 const RegAllocInfo
& allocInfo
,
634 assert(tmp
->isA(Type::Bool
) || tmp
->isA(Type::Int
) || tmp
->isA(Type::Dbl
));
636 PhysReg reg
= allocInfo
[tmp
].reg();
638 // Case 1: tmp is already in a XMM register
639 if (reg
.isXMM()) return reg
;
641 // Case 2: tmp is in a GP register
642 if (reg
!= InvalidReg
) {
643 // Case 2.a: Dbl stored in GP reg
644 if (tmp
->isA(Type::Dbl
)) {
645 emitMovRegReg(as
, reg
, rCgXMM
);
648 // Case 2.b: Bool or Int stored in GP reg
649 assert(tmp
->isA(Type::Bool
) || tmp
->isA(Type::Int
));
650 zeroExtendIfBool(as
, tmp
, allocInfo
[tmp
]);
651 as
.pxor_xmm_xmm(rCgXMM
, rCgXMM
);
652 as
.cvtsi2sd_reg64_xmm(reg
, rCgXMM
);
656 // Case 3: tmp is a constant
657 assert(tmp
->isConst());
659 int64_t val
= tmp
->getValRawInt();
660 if (!tmp
->isA(Type::Dbl
)) {
661 assert(tmp
->isA(Type::Bool
| Type::Int
));
662 if (tmp
->isA(Type::Bool
)) val
= val
!= 0; // see task #2401790
663 val
= convIntToDouble(val
);
665 emitLoadImm(as
, val
, m_rScratch
);
666 emitMovRegReg(as
, m_rScratch
, rCgXMM
);
670 void CodeGenerator::doubleCmp(X64Assembler
& a
, RegXMM xmmReg0
, RegXMM xmmReg1
) {
671 a
. ucomisd_xmm_xmm(xmmReg0
, xmmReg1
);
674 // PF means the doubles were unordered. We treat this as !equal, so
676 a
. or_imm32_reg64(1, m_rScratch
);
680 static ConditionCode
opToConditionCode(Opcode opc
) {
681 using namespace HPHP::Transl
;
684 case JmpGt
: return CC_G
;
685 case JmpGte
: return CC_GE
;
686 case JmpLt
: return CC_L
;
687 case JmpLte
: return CC_LE
;
688 case JmpEq
: return CC_E
;
689 case JmpNeq
: return CC_NE
;
690 case JmpSame
: return CC_E
;
691 case JmpNSame
: return CC_NE
;
692 case JmpInstanceOfBitmask
: return CC_NZ
;
693 case JmpNInstanceOfBitmask
: return CC_Z
;
694 case JmpIsType
: return CC_NZ
;
695 case JmpIsNType
: return CC_Z
;
696 case JmpZero
: return CC_Z
;
697 case JmpNZero
: return CC_NZ
;
698 case ReqBindJmpGt
: return CC_G
;
699 case ReqBindJmpGte
: return CC_GE
;
700 case ReqBindJmpLt
: return CC_L
;
701 case ReqBindJmpLte
: return CC_LE
;
702 case ReqBindJmpEq
: return CC_E
;
703 case ReqBindJmpNeq
: return CC_NE
;
704 case ReqBindJmpSame
: return CC_E
;
705 case ReqBindJmpNSame
: return CC_NE
;
706 case ReqBindJmpInstanceOfBitmask
: return CC_NZ
;
707 case ReqBindJmpNInstanceOfBitmask
: return CC_Z
;
708 case ReqBindJmpZero
: return CC_Z
;
709 case ReqBindJmpNZero
: return CC_NZ
;
715 void CodeGenerator::emitCompare(SSATmp
* src1
, SSATmp
* src2
) {
716 auto const src1Type
= src1
->type();
717 auto const src2Type
= src2
->type();
719 // can't generate CMP instructions correctly for anything that isn't
720 // a bool or a numeric, and we can't mix bool/numerics because
721 // -1 == true in PHP, but not in HHIR binary representation
722 if (!((src1Type
== Type::Int
&& src2Type
== Type::Int
) ||
723 ((src1Type
== Type::Int
|| src1Type
== Type::Dbl
) &&
724 (src2Type
== Type::Int
|| src2Type
== Type::Dbl
)) ||
725 (src1Type
== Type::Bool
&& src2Type
== Type::Bool
) ||
726 (src1Type
== Type::Cls
&& src2Type
== Type::Cls
))) {
727 CG_PUNT(emitCompare
);
729 if (src1Type
== Type::Dbl
|| src2Type
== Type::Dbl
) {
730 PhysReg srcReg1
= prepXMMReg(src1
, m_as
, m_regs
, rCgXMM0
);
731 PhysReg srcReg2
= prepXMMReg(src2
, m_as
, m_regs
, rCgXMM1
);
732 assert(srcReg1
!= rCgXMM1
&& srcReg2
!= rCgXMM0
);
733 doubleCmp(m_as
, srcReg1
, srcReg2
);
735 auto srcReg1
= m_regs
[src1
].reg();
736 auto srcReg2
= m_regs
[src2
].reg();
738 // Note: when both src1 and src2 are constants, we should transform the
739 // branch into an unconditional jump earlier in the IR.
740 if (src1
->isConst()) {
741 // TODO: use compare with immediate or make sure simplifier
742 // canonicalizes this so that constant is src2
743 srcReg1
= m_rScratch
;
744 m_as
. mov_imm64_reg(src1
->getValRawInt(), srcReg1
);
746 if (src2
->isConst()) {
747 if (src1Type
.subtypeOf(Type::Bool
)) {
748 m_as
. cmpb (src2
->getValRawInt(), Reg8(int(srcReg1
)));
750 m_as
. cmp_imm64_reg64(src2
->getValRawInt(), srcReg1
);
753 // Note the reverse syntax in the assembler.
754 // This cmp will compute srcReg1 - srcReg2
755 if (src1Type
.subtypeOf(Type::Bool
)) {
756 m_as
. cmpb (Reg8(int(srcReg2
)), Reg8(int(srcReg1
)));
758 m_as
. cmp_reg64_reg64(srcReg2
, srcReg1
);
764 void CodeGenerator::emitReqBindJcc(ConditionCode cc
,
765 const ReqBindJccData
* extra
) {
767 assert(&m_as
!= &m_astubs
&&
768 "ReqBindJcc only makes sense outside of astubs");
770 prepareForTestAndSmash(a
, 0, kAlignJccAndJmp
);
771 auto const patchAddr
= a
.code
.frontier
;
773 auto const jccStub
= m_astubs
.code
.frontier
;
776 // TODO(#2404398): move the setcc into the generic stub code so we
777 // don't need SRFlags::Persistent.
778 a
. setcc (cc
, rbyte(serviceReqArgRegs
[4]));
779 m_tx64
->emitServiceReq(
781 REQ_BIND_JMPCC_FIRST
,
784 uint64_t(extra
->taken
),
785 uint64_t(extra
->notTaken
),
790 a
. jcc (cc
, jccStub
);
794 void CodeGenerator::cgAssertNonNull(IRInstruction
* inst
) {
795 auto srcReg
= m_regs
[inst
->src(0)].reg();
796 auto dstReg
= m_regs
[inst
->dst()].reg();
797 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
799 m_as
.testq (srcReg
, srcReg
);
802 asm_label(m_as
, nonNull
);
804 emitMovRegReg(m_as
, srcReg
, dstReg
);
807 void CodeGenerator::cgLdUnwinderValue(IRInstruction
* inst
) {
808 cgLoad(rVmTl
, TargetCache::kUnwinderTvOff
, inst
);
811 void CodeGenerator::cgBeginCatch(IRInstruction
* inst
) {
812 auto const& info
= m_state
.catches
[inst
->block()];
813 assert(info
.afterCall
);
815 m_tx64
->registerCatchTrace(info
.afterCall
, m_as
.code
.frontier
);
817 Stats::emitInc(m_as
, Stats::TC_CatchTrace
);
819 // We want to restore state as though the call had completed
820 // successfully, so skip over any stack arguments and pop any
822 if (info
.rspOffset
) {
823 m_as
.addq(info
.rspOffset
, rsp
);
825 PhysRegSaverParity::emitPops(m_as
, info
.savedRegs
);
828 void CodeGenerator::cgEndCatch(IRInstruction
* inst
) {
829 m_as
.cmpb (0, rVmTl
[TargetCache::kUnwinderSideExitOff
]);
830 unlikelyIfBlock(CC_E
,
831 [&](Asm
& as
) { // doSideExit == false, so call _Unwind_Resume
832 as
.loadq(rVmTl
[TargetCache::kUnwinderScratchOff
], rdi
);
833 as
.call ((TCA
)_Unwind_Resume
); // pass control back to the unwinder
837 // doSideExit == true, so fall through to the side exit code
838 Stats::emitInc(m_as
, Stats::TC_CatchSideExit
);
841 void CodeGenerator::cgDeleteUnwinderException(IRInstruction
* inst
) {
842 m_as
.loadq(rVmTl
[TargetCache::kUnwinderScratchOff
], rdi
);
843 m_as
.call ((TCA
)_Unwind_DeleteException
);
846 void CodeGenerator::cgJcc(IRInstruction
* inst
) {
847 emitCompare(inst
->src(0), inst
->src(1));
848 emitFwdJcc(opToConditionCode(inst
->op()), inst
->taken());
851 void CodeGenerator::cgReqBindJcc(IRInstruction
* inst
) {
852 // TODO(#2404427): prepareForTestAndSmash?
853 emitCompare(inst
->src(0), inst
->src(1));
854 emitReqBindJcc(opToConditionCode(inst
->op()),
855 inst
->extra
<ReqBindJccData
>());
859 void CodeGenerator::cgReqBind##x(IRInstruction* i) { cgReqBindJcc(i); } \
860 void CodeGenerator::cg##x (IRInstruction* i) { cgJcc(i); }
875 * Once the arg sources and dests are all assigned; emit moves and exchanges to
876 * put all the args in desired registers. Any arguments that don't fit in
877 * registers will be put on the stack. In addition to moves and exchanges,
878 * shuffleArgs also handles adding lea-offsets for dest registers (dest = src +
879 * lea-offset) and zero extending bools (dest = zeroExtend(src)).
881 typedef Transl::X64Assembler Asm
;
882 static int64_t shuffleArgs(Asm
& a
, ArgGroup
& args
) {
883 // Compute the move/shuffle plan.
885 ArgDesc
* argDescs
[kNumRegs
];
886 memset(moves
, -1, sizeof moves
);
887 memset(argDescs
, 0, sizeof argDescs
);
888 for (size_t i
= 0; i
< args
.numRegArgs(); ++i
) {
889 auto kind
= args
[i
].kind();
890 if (!(kind
== ArgDesc::Reg
||
891 kind
== ArgDesc::Addr
||
892 kind
== ArgDesc::TypeReg
)) {
895 auto dstReg
= args
[i
].dstReg();
896 auto srcReg
= args
[i
].srcReg();
897 if (dstReg
!= srcReg
) {
898 moves
[int(dstReg
)] = int(srcReg
);
899 argDescs
[int(dstReg
)] = &args
[i
];
902 std::vector
<MoveInfo
> howTo
;
903 doRegMoves(moves
, int(rCgGP
), howTo
);
906 for (size_t i
= 0; i
< howTo
.size(); ++i
) {
907 if (howTo
[i
].m_kind
== MoveInfo::Move
) {
908 if (howTo
[i
].m_reg2
== rCgGP
) {
909 emitMovRegReg(a
, howTo
[i
].m_reg1
, howTo
[i
].m_reg2
);
911 ArgDesc
* argDesc
= argDescs
[int(howTo
[i
].m_reg2
)];
912 ArgDesc::Kind kind
= argDesc
->kind();
913 if (kind
== ArgDesc::Reg
|| kind
== ArgDesc::TypeReg
) {
914 if (argDesc
->isZeroExtend()) {
915 assert(howTo
[i
].m_reg1
.isGP());
916 assert(howTo
[i
].m_reg2
.isGP());
917 a
. movzbl (rbyte(howTo
[i
].m_reg1
), r32(howTo
[i
].m_reg2
));
919 emitMovRegReg(a
, howTo
[i
].m_reg1
, howTo
[i
].m_reg2
);
922 assert(kind
== ArgDesc::Addr
);
923 assert(howTo
[i
].m_reg1
.isGP());
924 assert(howTo
[i
].m_reg2
.isGP());
925 a
. lea (howTo
[i
].m_reg1
[argDesc
->imm().q()],
928 if (kind
!= ArgDesc::TypeReg
) {
933 assert(howTo
[i
].m_reg1
.isGP());
934 assert(howTo
[i
].m_reg2
.isGP());
935 a
. xchgq (howTo
[i
].m_reg1
, howTo
[i
].m_reg2
);
938 // Handle const-to-register moves, type shifting,
939 // load-effective address and zero extending for bools.
940 // Ignore args that have been handled by the
942 for (size_t i
= 0; i
< args
.numRegArgs(); ++i
) {
943 if (!args
[i
].done()) {
944 ArgDesc::Kind kind
= args
[i
].kind();
945 PhysReg dst
= args
[i
].dstReg();
947 if (kind
== ArgDesc::Imm
) {
948 a
.emitImmReg(args
[i
].imm().q(), dst
);
949 } else if (kind
== ArgDesc::TypeReg
) {
950 a
. shlq (kTypeShiftBits
, dst
);
951 } else if (kind
== ArgDesc::Addr
) {
952 a
. addq (args
[i
].imm(), dst
);
953 } else if (args
[i
].isZeroExtend()) {
954 a
. movzbl (rbyte(dst
), r32(dst
));
955 } else if (RuntimeOption::EvalHHIRGenerateAsserts
&&
956 kind
== ArgDesc::None
) {
957 a
.emitImmReg(0xbadbadbadbadbad, dst
);
962 // Store any remaining arguments to the stack
963 for (int i
= args
.numStackArgs() - 1; i
>= 0; --i
) {
964 auto& arg
= args
.stk(i
);
965 auto srcReg
= arg
.srcReg();
966 assert(arg
.dstReg() == InvalidReg
);
967 switch (arg
.kind()) {
969 if (arg
.isZeroExtend()) {
970 a
. movzbl(rbyte(srcReg
), r32(rCgGP
));
973 if (srcReg
.isXMM()) {
974 emitMovRegReg(a
, srcReg
, rCgGP
);
982 case ArgDesc::TypeReg
:
983 static_assert(kTypeWordOffset
== 4 || kTypeWordOffset
== 1,
984 "kTypeWordOffset value not supported");
985 assert(srcReg
.isGP());
986 // x86 stacks grow down, so push higher offset items first
987 if (kTypeWordOffset
== 4) {
988 a
. pushl(r32(srcReg
));
989 // 4 bytes of garbage:
992 // 4 bytes of garbage:
994 // get the type in the right place in rCgGP before pushing it
995 a
. movb (rbyte(srcReg
), rbyte(rCgGP
));
996 a
. shll (CHAR_BIT
, r32(rCgGP
));
997 a
. pushl(r32(rCgGP
));
1002 a
. emitImmReg(arg
.imm(), rCgGP
);
1011 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
1012 a
. storeq(0xbadbadbadbadbad, *rsp
);
1017 return args
.numStackArgs() * sizeof(int64_t);
1020 void CodeGenerator::cgCallNative(Asm
& a
, IRInstruction
* inst
) {
1021 using namespace NativeCalls
;
1022 Opcode opc
= inst
->op();
1023 always_assert(CallMap::hasInfo(opc
));
1025 const CallInfo
& info
= CallMap::info(opc
);
1026 ArgGroup
argGroup(m_regs
);
1027 for (auto const& arg
: info
.args
) {
1028 SSATmp
* src
= inst
->src(arg
.srcIdx
);
1034 argGroup
.typedValue(src
);
1037 argGroup
.vectorKeyS(src
);
1040 argGroup
.vectorKeyIS(src
);
1043 always_assert(0 && "We can't generate a native call for this");
1049 switch (info
.func
.type
) {
1051 addr
= info
.func
.ptr
;
1054 addr
= inst
->src(info
.func
.srcIdx
)->getValTCA();
1059 info
.dest
!= DestType::None
? inst
->dst(0) : nullptr,
1065 void CodeGenerator::cgCallHelper(Asm
& a
,
1070 DestType destType
) {
1071 PhysReg dstReg0
= InvalidReg
;
1072 PhysReg dstReg1
= InvalidReg
;
1074 auto &info
= m_regs
[dst
];
1075 dstReg0
= info
.reg(0);
1076 dstReg1
= info
.reg(1);
1078 return cgCallHelper(a
, Transl::CppCall(addr
), dstReg0
, dstReg1
, sync
, args
,
1082 void CodeGenerator::cgCallHelper(Asm
& a
,
1087 DestType destType
) {
1088 cgCallHelper(a
, Transl::CppCall(addr
), dstReg
, InvalidReg
, sync
, args
,
1092 void CodeGenerator::cgCallHelper(Asm
& a
,
1093 const Transl::CppCall
& call
,
1097 DestType destType
) {
1098 cgCallHelper(a
, call
, dstReg
, InvalidReg
, sync
, args
, destType
);
1101 void CodeGenerator::cgCallHelper(Asm
& a
,
1102 const Transl::CppCall
& call
,
1107 DestType destType
) {
1108 cgCallHelper(a
, call
, dstReg0
, dstReg1
, sync
, args
,
1109 m_state
.liveRegs
[m_curInst
], destType
);
1112 void CodeGenerator::cgCallHelper(Asm
& a
,
1113 const Transl::CppCall
& call
,
1119 DestType destType
) {
1120 assert(m_curInst
->isNative());
1122 // Save the caller-saved registers that are live across this
1123 // instruction. The number of regs to save and the number of args
1124 // being passed on the stack affect the parity of the PhysRegSaver,
1125 // so we use the generic version here.
1126 toSave
= toSave
& kCallerSaved
;
1127 assert((toSave
& RegSet().add(dstReg0
).add(dstReg1
)).empty());
1128 PhysRegSaverParity
regSaver(1 + args
.numStackArgs(), a
, toSave
);
1130 // Assign registers to the arguments then prepare them for the call.
1131 for (size_t i
= 0; i
< args
.numRegArgs(); i
++) {
1132 args
[i
].setDstReg(argNumToRegName
[i
]);
1134 regSaver
.bytesPushed(shuffleArgs(a
, args
));
1136 // do the call; may use a trampoline
1137 m_tx64
->emitCall(a
, call
);
1138 if (sync
!= kNoSyncPoint
) {
1139 recordSyncPoint(a
, sync
);
1142 if (m_state
.catchTrace
) {
1143 auto& info
= m_state
.catches
[m_state
.catchTrace
->front()];
1144 assert(!info
.afterCall
);
1145 info
.afterCall
= a
.code
.frontier
;
1146 info
.savedRegs
= toSave
;
1147 info
.rspOffset
= regSaver
.rspAdjustment();
1150 // copy the call result to the destination register(s)
1151 if (destType
== DestType::TV
) {
1152 // rax contains m_type and m_aux but we're expecting just the
1153 // type in the lower bits, so shift the type result register.
1154 auto rval
= packed_tv
? reg::rdx
: reg::rax
;
1155 auto rtyp
= packed_tv
? reg::rax
: reg::rdx
;
1156 if (kTypeShiftBits
> 0) a
.shrq(kTypeShiftBits
, rtyp
);
1157 shuffle2(a
, rval
, rtyp
, dstReg0
, dstReg1
);
1158 } else if (destType
== DestType::SSA
) {
1159 // copy the single-register result to dstReg0
1160 assert(dstReg1
== InvalidReg
);
1161 if (dstReg0
!= InvalidReg
) emitMovRegReg(a
, reg::rax
, dstReg0
);
1163 // void return type, no registers have values
1164 assert(dstReg0
== InvalidReg
&& dstReg1
== InvalidReg
);
1169 * This doesn't really produce any code; it just keeps track of the current
1172 void CodeGenerator::cgMarker(IRInstruction
* inst
) {
1173 m_curBcOff
= inst
->extra
<MarkerData
>()->bcOff
;
1176 void CodeGenerator::cgMov(IRInstruction
* inst
) {
1177 assert(!m_regs
[inst
->src(0)].hasReg(1));//TODO: t2082361: handle Gen & Cell
1178 SSATmp
* dst
= inst
->dst();
1179 SSATmp
* src
= inst
->src(0);
1180 auto dstReg
= m_regs
[dst
].reg();
1181 if (!m_regs
[src
].hasReg(0)) {
1182 assert(src
->isConst());
1183 if (src
->type() == Type::Bool
) {
1184 emitLoadImm(m_as
, (int64_t)src
->getValBool(), dstReg
);
1186 emitLoadImm(m_as
, src
->getValRawInt(), dstReg
);
1189 auto srcReg
= m_regs
[src
].reg();
1190 emitMovRegReg(m_as
, srcReg
, dstReg
);
1194 template<class OpInstr
, class Oper
>
1195 void CodeGenerator::cgUnaryIntOp(SSATmp
* dst
,
1199 if (src
->type() != Type::Int
&& src
->type() != Type::Bool
) {
1200 assert(0); CG_PUNT(UnaryIntOp
);
1202 auto dstReg
= m_regs
[dst
].reg();
1203 auto srcReg
= m_regs
[src
].reg();
1204 assert(dstReg
!= InvalidReg
);
1207 // Integer operations require 64-bit representations
1208 zeroExtendIfBool(a
, src
, m_regs
[src
]);
1210 if (srcReg
!= InvalidReg
) {
1211 emitMovRegReg(a
, srcReg
, dstReg
);
1212 (a
.*instr
) (dstReg
);
1214 assert(src
->isConst());
1215 emitLoadImm(a
, oper(src
->getValRawInt()), dstReg
);
1219 void CodeGenerator::cgNegateWork(SSATmp
* dst
, SSATmp
* src
) {
1220 cgUnaryIntOp(dst
, src
, &Asm::neg
, [](int64_t i
) { return -i
; });
1223 inline static Reg8
convertToReg8(PhysReg reg
) { return rbyte(reg
); }
1224 inline static Reg64
convertToReg64(PhysReg reg
) { return reg
; }
1226 template<class Oper
, class RegType
>
1227 void CodeGenerator::cgBinaryIntOp(IRInstruction
* inst
,
1228 void (Asm::*instrIR
)(Immed
, RegType
),
1229 void (Asm::*instrRR
)(RegType
, RegType
),
1230 void (Asm::*movInstr
)(RegType
, RegType
),
1232 RegType (*convertReg
)(PhysReg
),
1233 Commutativity commuteFlag
) {
1234 const SSATmp
* dst
= inst
->dst();
1235 const SSATmp
* src1
= inst
->src(0);
1236 const SSATmp
* src2
= inst
->src(1);
1237 if (!(src1
->isA(Type::Bool
) || src1
->isA(Type::Int
)) ||
1238 !(src2
->isA(Type::Bool
) || src2
->isA(Type::Int
))) {
1239 CG_PUNT(cgBinaryIntOp
);
1242 bool const commutative
= commuteFlag
== Commutative
;
1243 auto const dstReg
= m_regs
[dst
].reg();
1244 auto const src1Reg
= m_regs
[src1
].reg();
1245 auto const src2Reg
= m_regs
[src2
].reg();
1248 auto const dstOpReg
= convertReg(dstReg
);
1249 auto const src1OpReg
= convertReg(src1Reg
);
1250 auto const src2OpReg
= convertReg(src2Reg
);
1251 auto const rOpScratch
= convertReg(m_rScratch
);
1254 if (src1Reg
!= InvalidReg
&& src2Reg
!= InvalidReg
) {
1255 if (dstReg
== src1Reg
) {
1256 (a
.*instrRR
) (src2OpReg
, dstOpReg
);
1257 } else if (dstReg
== src2Reg
) {
1259 (a
.*instrRR
) (src1OpReg
, dstOpReg
);
1261 (a
.*movInstr
)(src1OpReg
, rOpScratch
);
1262 (a
.*instrRR
) (src2OpReg
, rOpScratch
);
1263 (a
.*movInstr
)(rOpScratch
, dstOpReg
);
1266 emitMovRegReg(a
, src1Reg
, dstReg
);
1267 (a
.*instrRR
) (src2OpReg
, dstOpReg
);
1273 if (src1Reg
== InvalidReg
&& src2Reg
== InvalidReg
) {
1274 assert(src1
->isConst() && src2
->isConst());
1275 int64_t value
= oper(src1
->getValRawInt(), src2
->getValRawInt());
1276 emitLoadImm(a
, value
, dstReg
);
1280 // One register, and one immediate.
1282 auto immedSrc
= (src2Reg
== InvalidReg
? src2
: src1
);
1283 auto immed
= immedSrc
->getValRawInt();
1284 auto srcReg
= m_regs
[(src2Reg
== InvalidReg
? src1
: src2
)].reg();
1285 if (srcReg
== dstReg
) {
1286 (a
.*instrIR
) (immed
, dstOpReg
);
1288 emitLoadImm(a
, immed
, dstReg
);
1289 (a
.*instrRR
) (convertReg(srcReg
), dstOpReg
);
1295 if (src1Reg
== InvalidReg
) {
1296 if (dstReg
== src2Reg
) {
1297 emitLoadImm(a
, src1
->getValRawInt(), m_rScratch
);
1298 (a
.*instrRR
) (src2OpReg
, rOpScratch
);
1299 (a
.*movInstr
)(rOpScratch
, dstOpReg
);
1301 emitLoadImm(a
, src1
->getValRawInt(), dstReg
);
1302 (a
.*instrRR
) (src2OpReg
, dstOpReg
);
1307 assert(src2Reg
== InvalidReg
);
1308 emitMovRegReg(a
, src1Reg
, dstReg
);
1309 (a
.*instrIR
) (src2
->getValRawInt(), dstOpReg
);
1312 template<class Oper
, class RegType
>
1313 void CodeGenerator::cgBinaryOp(IRInstruction
* inst
,
1314 void (Asm::*instrIR
)(Immed
, RegType
),
1315 void (Asm::*instrRR
)(RegType
, RegType
),
1316 void (Asm::*movInstr
)(RegType
, RegType
),
1317 void (Asm::*fpInstr
)(RegXMM
, RegXMM
),
1319 RegType (*convertReg
)(PhysReg
),
1320 Commutativity commuteFlag
) {
1321 const SSATmp
* dst
= inst
->dst();
1322 const SSATmp
* src1
= inst
->src(0);
1323 const SSATmp
* src2
= inst
->src(1);
1324 if (!(src1
->isA(Type::Bool
) || src1
->isA(Type::Int
) || src1
->isA(Type::Dbl
))
1326 !(src2
->isA(Type::Bool
) || src2
->isA(Type::Int
) || src2
->isA(Type::Dbl
)) )
1328 CG_PUNT(cgBinaryOp
);
1330 if (src1
->isA(Type::Dbl
) || src2
->isA(Type::Dbl
)) {
1331 PhysReg dstReg
= m_regs
[dst
].reg();
1332 PhysReg resReg
= dstReg
.isXMM() && dstReg
!= m_regs
[src2
].reg() ?
1333 dstReg
: PhysReg(rCgXMM0
);
1334 assert(resReg
.isXMM());
1336 PhysReg srcReg1
= prepXMMReg(src1
, m_as
, m_regs
, resReg
);
1337 PhysReg srcReg2
= prepXMMReg(src2
, m_as
, m_regs
, rCgXMM1
);
1338 assert(srcReg1
!= rCgXMM1
&& srcReg2
!= rCgXMM0
);
1340 emitMovRegReg(m_as
, srcReg1
, resReg
);
1342 (m_as
.*fpInstr
)(srcReg2
, resReg
);
1344 emitMovRegReg(m_as
, resReg
, dstReg
);
1347 cgBinaryIntOp(inst
, instrIR
, instrRR
, movInstr
,
1348 oper
, convertReg
, commuteFlag
);
1351 bool CodeGenerator::emitIncDecHelper(SSATmp
* dst
, SSATmp
* src1
, SSATmp
* src2
,
1352 void(Asm::*emitFunc
)(Reg64
)) {
1353 if (m_regs
[src1
].reg() != InvalidReg
&&
1354 m_regs
[dst
].reg() != InvalidReg
&&
1355 src1
->isA(Type::Int
) &&
1357 src2
->isConst() && src2
->isA(Type::Int
) && src2
->getValInt() == 1) {
1358 emitMovRegReg(m_as
, m_regs
[src1
].reg(), m_regs
[dst
].reg());
1359 (m_as
.*emitFunc
)(m_regs
[dst
].reg());
1366 * If src2 is 1, this generates dst = src1 + 1 using the "inc" x86 instruction.
1367 * The return value is whether or not the instruction could be generated.
1369 bool CodeGenerator::emitInc(SSATmp
* dst
, SSATmp
* src1
, SSATmp
* src2
) {
1370 return emitIncDecHelper(dst
, src1
, src2
, &Asm::incq
);
1374 * If src2 is 1, this generates dst = src1 - 1 using the "dec" x86 instruction.
1375 * The return value is whether or not the instruction could be generated.
1377 bool CodeGenerator::emitDec(SSATmp
* dst
, SSATmp
* src1
, SSATmp
* src2
) {
1378 return emitIncDecHelper(dst
, src1
, src2
, &Asm::decq
);
1381 void CodeGenerator::cgOpAdd(IRInstruction
* inst
) {
1382 SSATmp
* dst
= inst
->dst();
1383 SSATmp
* src1
= inst
->src(0);
1384 SSATmp
* src2
= inst
->src(1);
1386 // Special cases: x = y + 1
1387 if (emitInc(dst
, src1
, src2
) || emitInc(dst
, src2
, src1
)) return;
1393 &Asm::addsd_xmm_xmm
,
1394 std::plus
<int64_t>(),
1399 void CodeGenerator::cgOpSub(IRInstruction
* inst
) {
1400 SSATmp
* dst
= inst
->dst();
1401 SSATmp
* src1
= inst
->src(0);
1402 SSATmp
* src2
= inst
->src(1);
1404 if (emitDec(dst
, src1
, src2
)) return;
1406 if (src1
->isConst() && src1
->isA(Type::Int
) && src1
->getValInt() == 0 &&
1407 src2
->isA(Type::Int
)) {
1408 cgNegateWork(dst
, src2
);
1416 &Asm::subsd_xmm_xmm
,
1417 std::minus
<int64_t>(),
1422 void CodeGenerator::cgOpDiv(IRInstruction
* inst
) {
1426 void CodeGenerator::cgOpBitAnd(IRInstruction
* inst
) {
1431 [] (int64_t a
, int64_t b
) { return a
& b
; },
1436 void CodeGenerator::cgOpBitOr(IRInstruction
* inst
) {
1441 [] (int64_t a
, int64_t b
) { return a
| b
; },
1446 void CodeGenerator::cgOpBitXor(IRInstruction
* inst
) {
1451 [] (int64_t a
, int64_t b
) { return a
^ b
; },
1456 void CodeGenerator::cgOpBitNot(IRInstruction
* inst
) {
1457 cgUnaryIntOp(inst
->dst(),
1460 [](int64_t i
) { return ~i
; });
1463 void CodeGenerator::cgOpLogicXor(IRInstruction
* inst
) {
1468 [] (bool a
, bool b
) { return a
^ b
; },
1473 void CodeGenerator::cgOpMul(IRInstruction
* inst
) {
1478 &Asm::mulsd_xmm_xmm
,
1479 std::multiplies
<int64_t>(),
1484 void CodeGenerator::cgOpNot(IRInstruction
* inst
) {
1485 auto const src
= inst
->src(0);
1486 auto const dstReg
= m_regs
[inst
->dst()].reg();
1489 if (src
->isConst()) {
1490 a
. movb (!src
->getValBool(), rbyte(dstReg
));
1492 if (dstReg
!= m_regs
[src
].reg()) {
1493 a
. movb (rbyte(m_regs
[src
].reg()), rbyte(dstReg
));
1495 a
. xorb (1, rbyte(dstReg
));
1499 ///////////////////////////////////////////////////////////////////////////////
1500 // Comparison Operators
1501 ///////////////////////////////////////////////////////////////////////////////
1503 #define DISPATCHER(name) \
1504 HOT_FUNC_VM int64_t ccmp_ ## name (StringData* a1, StringData* a2) \
1505 { return name(a1, a2); } \
1506 HOT_FUNC_VM int64_t ccmp_ ## name (StringData* a1, int64_t a2) \
1507 { return name(a1, a2); } \
1508 HOT_FUNC_VM int64_t ccmp_ ## name (StringData* a1, ObjectData* a2) \
1509 { return name(a1, Object(a2)); } \
1510 HOT_FUNC_VM int64_t ccmp_ ## name (ObjectData* a1, ObjectData* a2) \
1511 { return name(Object(a1), Object(a2)); } \
1512 HOT_FUNC_VM int64_t ccmp_ ## name (ObjectData* a1, int64_t a2) \
1513 { return name(Object(a1), a2); } \
1514 HOT_FUNC_VM int64_t ccmp_ ## name (ArrayData* a1, ArrayData* a2) \
1515 { return name(Array(a1), Array(a2)); }
1524 template <typename A
, typename B
>
1525 inline int64_t ccmp_nsame(A a
, B b
) { return !ccmp_same(a
, b
); }
1527 template <typename A
, typename B
>
1528 inline int64_t ccmp_nequal(A a
, B b
) { return !ccmp_equal(a
, b
); }
1530 template <typename A
, typename B
>
1531 inline int64_t ccmp_lte(A a
, B b
) { return !ccmp_more(a
, b
); }
1533 template <typename A
, typename B
>
1534 inline int64_t ccmp_gte(A a
, B b
) { return !ccmp_less(a
, b
); }
1536 #define CG_OP_CMP(inst, setter, name) \
1537 cgOpCmpHelper(inst, &Asm:: setter, ccmp_ ## name, ccmp_ ## name, \
1538 ccmp_ ## name, ccmp_ ## name, ccmp_ ## name, ccmp_ ## name)
1540 // SRON - string, resource, object, or number
1541 static bool typeIsSRON(Type t
) {
1543 || t
== Type::Obj
// encompases object and resource
1549 void CodeGenerator::cgOpCmpHelper(
1550 IRInstruction
* inst
,
1551 void (Asm::*setter
)(Reg8
),
1552 int64_t (*str_cmp_str
)(StringData
*, StringData
*),
1553 int64_t (*str_cmp_int
)(StringData
*, int64_t),
1554 int64_t (*str_cmp_obj
)(StringData
*, ObjectData
*),
1555 int64_t (*obj_cmp_obj
)(ObjectData
*, ObjectData
*),
1556 int64_t (*obj_cmp_int
)(ObjectData
*, int64_t),
1557 int64_t (*arr_cmp_arr
)( ArrayData
*, ArrayData
*)
1559 SSATmp
* dst
= inst
->dst();
1560 SSATmp
* src1
= inst
->src(0);
1561 SSATmp
* src2
= inst
->src(1);
1563 Type type1
= src1
->type();
1564 Type type2
= src2
->type();
1566 auto src1Reg
= m_regs
[src1
].reg();
1567 auto src2Reg
= m_regs
[src2
].reg();
1568 auto dstReg
= m_regs
[dst
].reg();
1570 auto setFromFlags
= [&] {
1571 (m_as
.*setter
)(rbyte(dstReg
));
1573 // It is possible that some pass has been done after simplification; if such
1574 // a pass invalidates our invariants, then just punt.
1576 // simplifyCmp has done const-const optimization
1578 // If the types are the same and there is only one constant,
1579 // simplifyCmp has moved it to the right.
1580 if (src1
->isConst()) {
1581 CG_PUNT(cgOpCmpHelper_const
);
1584 /////////////////////////////////////////////////////////////////////////////
1585 // case 1: null/string cmp string
1586 // simplifyCmp has converted the null to ""
1587 if (type1
.isString() && type2
.isString()) {
1588 ArgGroup
args(m_regs
);
1589 args
.ssa(src1
).ssa(src2
);
1590 cgCallHelper(m_as
, (TCA
)str_cmp_str
, dst
, kSyncPoint
, args
);
1593 /////////////////////////////////////////////////////////////////////////////
1594 // case 2: bool/null cmp anything
1595 // simplifyCmp has converted all args to bool
1596 else if (type1
== Type::Bool
&& type2
== Type::Bool
) {
1597 if (src2
->isConst()) {
1598 m_as
. cmpb (src2
->getValBool(), Reg8(int(src1Reg
)));
1600 m_as
. cmpb (Reg8(int(src2Reg
)), Reg8(int(src1Reg
)));
1605 /////////////////////////////////////////////////////////////////////////////
1606 // case 3, 4, and 7: string/resource/object/number (sron) cmp sron
1607 // These cases must be amalgamated because Type::Obj can refer to an object
1608 // or to a resource.
1609 // strings are canonicalized to the left, ints to the right
1610 else if (typeIsSRON(type1
) && typeIsSRON(type2
)) {
1611 // the common case: int cmp int
1612 if (type1
== Type::Int
&& type2
== Type::Int
) {
1613 if (src2
->isConst()) {
1614 m_as
.cmp_imm64_reg64(src2
->getValInt(), src1Reg
);
1616 m_as
.cmp_reg64_reg64(src2Reg
, src1Reg
);
1621 else if (type1
== Type::Dbl
|| type2
== Type::Dbl
) {
1622 if ((type1
== Type::Dbl
|| type1
== Type::Int
) &&
1623 (type2
== Type::Dbl
|| type2
== Type::Int
)) {
1624 PhysReg srcReg1
= prepXMMReg(src1
, m_as
, m_regs
, rCgXMM0
);
1625 PhysReg srcReg2
= prepXMMReg(src2
, m_as
, m_regs
, rCgXMM1
);
1626 assert(srcReg1
!= rCgXMM1
&& srcReg2
!= rCgXMM0
);
1627 doubleCmp(m_as
, srcReg1
, srcReg2
);
1630 CG_PUNT(cgOpCmpHelper_Dbl
);
1634 else if (type1
.isString()) {
1635 // string cmp string is dealt with in case 1
1636 // string cmp double is punted above
1638 if (type2
== Type::Int
) {
1639 ArgGroup
args(m_regs
);
1640 args
.ssa(src1
).ssa(src2
);
1641 cgCallHelper(m_as
, (TCA
)str_cmp_int
, dst
, kSyncPoint
, args
);
1642 } else if (type2
== Type::Obj
) {
1643 ArgGroup
args(m_regs
);
1644 args
.ssa(src1
).ssa(src2
);
1645 cgCallHelper(m_as
, (TCA
)str_cmp_obj
, dst
, kSyncPoint
, args
);
1647 CG_PUNT(cgOpCmpHelper_sx
);
1651 else if (type1
== Type::Obj
) {
1652 // string cmp object/resource is dealt with above
1653 // object cmp double is punted above
1655 if (type2
== Type::Obj
) {
1656 ArgGroup
args(m_regs
);
1657 args
.ssa(src1
).ssa(src2
);
1658 cgCallHelper(m_as
, (TCA
)obj_cmp_obj
, dst
, kSyncPoint
, args
);
1659 } else if (type2
== Type::Int
) {
1660 ArgGroup
args(m_regs
);
1661 args
.ssa(src1
).ssa(src2
);
1662 cgCallHelper(m_as
, (TCA
)obj_cmp_int
, dst
, kSyncPoint
, args
);
1664 CG_PUNT(cgOpCmpHelper_ox
);
1671 /////////////////////////////////////////////////////////////////////////////
1672 // case 5: array cmp array
1673 else if (type1
.isArray() && type2
.isArray()) {
1674 ArgGroup
args(m_regs
);
1675 args
.ssa(src1
).ssa(src2
);
1676 cgCallHelper(m_as
, (TCA
)arr_cmp_arr
, dst
, kSyncPoint
, args
);
1679 /////////////////////////////////////////////////////////////////////////////
1680 // case 6: array cmp anything
1681 // simplifyCmp has already dealt with this case.
1683 /////////////////////////////////////////////////////////////////////////////
1685 // We have a type which is not a common type. It might be a cell or a box.
1686 CG_PUNT(cgOpCmpHelper_unimplemented
);
1690 void CodeGenerator::cgOpEq(IRInstruction
* inst
) {
1691 CG_OP_CMP(inst
, sete
, equal
);
1694 void CodeGenerator::cgOpNeq(IRInstruction
* inst
) {
1695 CG_OP_CMP(inst
, setne
, nequal
);
1698 void CodeGenerator::cgOpSame(IRInstruction
* inst
) {
1699 CG_OP_CMP(inst
, sete
, same
);
1702 void CodeGenerator::cgOpNSame(IRInstruction
* inst
) {
1703 CG_OP_CMP(inst
, setne
, nsame
);
1706 void CodeGenerator::cgOpLt(IRInstruction
* inst
) {
1707 CG_OP_CMP(inst
, setl
, less
);
1710 void CodeGenerator::cgOpGt(IRInstruction
* inst
) {
1711 CG_OP_CMP(inst
, setg
, more
);
1714 void CodeGenerator::cgOpLte(IRInstruction
* inst
) {
1715 CG_OP_CMP(inst
, setle
, lte
);
1718 void CodeGenerator::cgOpGte(IRInstruction
* inst
) {
1719 CG_OP_CMP(inst
, setge
, gte
);
1722 ///////////////////////////////////////////////////////////////////////////////
1723 // Type check operators
1724 ///////////////////////////////////////////////////////////////////////////////
1726 // Overloads to put the ObjectData* into a register so emitTypeTest
1727 // can cmp to the Class* expected by the specialized Type
1729 // Nothing to do, return the register that contain the ObjectData already
1730 Reg64
getObjectDataEnregistered(Asm
& as
, PhysReg dataSrc
, Reg64 scratch
) {
1734 // Enregister the meoryRef so it can be used with an offset by the
1736 Reg64
getObjectDataEnregistered(Asm
& as
,
1739 as
.loadq(dataSrc
, scratch
);
1743 template<class Loc1
, class Loc2
, class JmpFn
>
1744 void CodeGenerator::emitTypeTest(Type type
, Loc1 typeSrc
, Loc2 dataSrc
,
1746 assert(!type
.subtypeOf(Type::Cls
));
1748 if (type
.isString()) {
1749 emitTestTVType(m_as
, KindOfStringBit
, typeSrc
);
1751 } else if (type
.equals(Type::UncountedInit
)) {
1752 emitTestTVType(m_as
, KindOfUncountedInitBit
, typeSrc
);
1754 } else if (type
.equals(Type::Uncounted
)) {
1755 emitCmpTVType(m_as
, KindOfRefCountThreshold
, typeSrc
);
1757 } else if (type
.equals(Type::Cell
)) {
1758 emitCmpTVType(m_as
, KindOfRef
, typeSrc
);
1760 } else if (type
.equals(Type::Gen
)) {
1764 DataType dataType
= type
.toDataType();
1765 assert(dataType
== KindOfRef
||
1766 (dataType
>= KindOfUninit
&& dataType
<= KindOfObject
));
1767 emitCmpTVType(m_as
, dataType
, typeSrc
);
1771 if (type
.strictSubtypeOf(Type::Obj
)) {
1772 // emit the specific class test
1773 assert(type
.getClass()->attrs() & AttrFinal
);
1774 auto reg
= getObjectDataEnregistered(m_as
, dataSrc
, m_rScratch
);
1775 m_as
.cmpq(type
.getClass(), reg
[ObjectData::getVMClassOffset()]);
1780 template<class JmpFn
>
1781 void CodeGenerator::emitIsTypeTest(IRInstruction
* inst
, JmpFn doJcc
) {
1782 auto const src
= inst
->src(0);
1784 // punt if specialized object for now
1785 if (inst
->typeParam().strictSubtypeOf(Type::Obj
)) {
1786 CG_PUNT(IsType
-SpecializedUnsupported
);
1788 if (inst
->typeParam().equals(Type::Obj
)) {
1789 auto const srcReg
= m_regs
[src
].reg();
1790 if (src
->isA(Type::PtrToGen
)) {
1791 emitTestTVType(m_as
, KindOfObject
, srcReg
[TVOFF(m_type
)]);
1792 TCA toPatch
= m_as
.code
.frontier
;
1793 m_as
. jne8(toPatch
); // 1
1795 // Get the ObjectData*
1796 emitDeref(m_as
, srcReg
, m_rScratch
);
1797 m_as
. cmpq(SystemLib::s_resourceClass
,
1798 m_rScratch
[ObjectData::getVMClassOffset()]);
1800 m_as
.patchJcc8(toPatch
, m_as
.code
.frontier
);
1802 // Cases where src isn't an Obj should have been simplified away
1803 if (!src
->isA(Type::Obj
)) {
1804 CG_PUNT(IsType
-KnownWrongType
);
1806 m_as
. cmpq(SystemLib::s_resourceClass
,
1807 srcReg
[ObjectData::getVMClassOffset()]);
1809 // At this point, the flags say "equal" if is_object is false.
1814 if (src
->isA(Type::PtrToGen
)) {
1815 PhysReg base
= m_regs
[src
].reg();
1816 emitTypeTest(inst
->typeParam(), base
[TVOFF(m_type
)],
1817 base
[TVOFF(m_data
)],
1818 [&](ConditionCode cc
) { doJcc(cc
); });
1821 assert(src
->isA(Type::Gen
));
1822 assert(!src
->isConst());
1824 PhysReg typeSrcReg
= m_regs
[src
].reg(1); // type register
1825 if (typeSrcReg
== InvalidReg
) {
1826 CG_PUNT(IsType
-KnownType
);
1828 PhysReg dataSrcReg
= m_regs
[src
].reg(); // data register
1829 emitTypeTest(inst
->typeParam(), typeSrcReg
, dataSrcReg
,
1830 [&](ConditionCode cc
) { doJcc(cc
); });
1834 void CodeGenerator::emitTypeCheck(Type type
,
1838 emitTypeTest(type
, typeSrc
, dataSrc
,
1839 [&](ConditionCode cc
) {
1840 emitFwdJcc(ccNegate(cc
), taken
);
1845 void CodeGenerator::emitTypeGuard(Type type
, Loc typeSrc
, Loc dataSrc
) {
1846 emitTypeTest(type
, typeSrc
, dataSrc
,
1847 [&](ConditionCode cc
) {
1848 auto const destSK
= SrcKey(curFunc(), m_curTrace
->bcOff());
1849 auto const destSR
= m_tx64
->getSrcRec(destSK
);
1850 m_tx64
->emitFallbackCondJmp(m_as
, *destSR
, ccNegate(cc
));
1854 void CodeGenerator::emitSetCc(IRInstruction
* inst
, ConditionCode cc
) {
1855 m_as
.setcc(cc
, rbyte(m_regs
[inst
->dst()].reg()));
1858 void CodeGenerator::cgIsTypeMemCommon(IRInstruction
* inst
, bool negate
) {
1859 bool called
= false; // check emitSetCc is called only once
1860 emitIsTypeTest(inst
,
1861 [&](ConditionCode cc
) {
1863 emitSetCc(inst
, negate
? ccNegate(cc
) : cc
);
1868 void CodeGenerator::cgIsTypeCommon(IRInstruction
* inst
, bool negate
) {
1869 bool called
= false; // check emitSetCc is called only once
1870 emitIsTypeTest(inst
,
1871 [&](ConditionCode cc
) {
1873 emitSetCc(inst
, negate
? ccNegate(cc
) : cc
);
1878 void CodeGenerator::cgJmpIsTypeCommon(IRInstruction
* inst
, bool negate
) {
1879 emitIsTypeTest(inst
,
1880 [&](ConditionCode cc
) {
1881 emitFwdJcc(negate
? ccNegate(cc
) : cc
, inst
->taken());
1885 void CodeGenerator::cgIsType(IRInstruction
* inst
) {
1886 cgIsTypeCommon(inst
, false);
1889 void CodeGenerator::cgIsNType(IRInstruction
* inst
) {
1890 cgIsTypeCommon(inst
, true);
1893 // TODO(#2404341): remove JmpIs{N,}Type
1895 void CodeGenerator::cgJmpIsType(IRInstruction
* inst
) {
1896 cgJmpIsTypeCommon(inst
, false);
1899 void CodeGenerator::cgJmpIsNType(IRInstruction
* inst
) {
1900 cgJmpIsTypeCommon(inst
, true);
1903 void CodeGenerator::cgIsTypeMem(IRInstruction
* inst
) {
1904 cgIsTypeMemCommon(inst
, false);
1907 void CodeGenerator::cgIsNTypeMem(IRInstruction
* inst
) {
1908 cgIsTypeMemCommon(inst
, true);
1911 ///////////////////////////////////////////////////////////////////////////////
1913 HOT_FUNC_VM
static bool instanceOfHelper(const Class
* objClass
,
1914 const Class
* testClass
) {
1915 return testClass
&& objClass
->classof(testClass
);
1918 void CodeGenerator::cgInstanceOf(IRInstruction
* inst
) {
1920 TCA(instanceOfHelper
),
1925 .ssa(inst
->src(1)));
1929 * Check instanceof using instance bitmasks.
1931 * Note it's not necessary to check whether the test class is defined:
1932 * if it doesn't exist than the candidate can't be an instance of it
1933 * and will fail this check.
1935 void CodeGenerator::emitInstanceBitmaskCheck(IRInstruction
* inst
) {
1936 auto const rObjClass
= m_regs
[inst
->src(0)].reg(0);
1937 auto const testClassName
= inst
->src(1)->getValStr();
1942 if (!Class::getInstanceBitMask(testClassName
, offset
, mask
)) {
1943 always_assert(!"cgInstanceOfBitmask had no bitmask");
1945 a
. testb (int8_t(mask
), rObjClass
[offset
]);
1948 void CodeGenerator::cgInstanceOfBitmask(IRInstruction
* inst
) {
1950 emitInstanceBitmaskCheck(inst
);
1951 a
. setnz (rbyte(m_regs
[inst
->dst()].reg()));
1954 void CodeGenerator::cgNInstanceOfBitmask(IRInstruction
* inst
) {
1956 emitInstanceBitmaskCheck(inst
);
1957 a
. setz (rbyte(m_regs
[inst
->dst()].reg()));
1960 void CodeGenerator::cgJmpInstanceOfBitmask(IRInstruction
* inst
) {
1961 emitInstanceBitmaskCheck(inst
);
1962 emitFwdJcc(CC_NZ
, inst
->taken());
1965 void CodeGenerator::cgJmpNInstanceOfBitmask(IRInstruction
* inst
) {
1966 emitInstanceBitmaskCheck(inst
);
1967 emitFwdJcc(CC_Z
, inst
->taken());
1970 void CodeGenerator::cgReqBindJmpInstanceOfBitmask(IRInstruction
* inst
) {
1971 emitInstanceBitmaskCheck(inst
);
1972 emitReqBindJcc(opToConditionCode(inst
->op()),
1973 inst
->extra
<ReqBindJccData
>());
1976 void CodeGenerator::cgReqBindJmpNInstanceOfBitmask(IRInstruction
* inst
) {
1977 emitInstanceBitmaskCheck(inst
);
1978 emitReqBindJcc(opToConditionCode(inst
->op()),
1979 inst
->extra
<ReqBindJccData
>());
1983 * Check instanceof using the superclass vector on the end of the
1986 void CodeGenerator::cgExtendsClass(IRInstruction
* inst
) {
1987 auto const rObjClass
= m_regs
[inst
->src(0)].reg();
1988 auto const testClass
= inst
->src(1)->getValClass();
1989 auto rTestClass
= m_regs
[inst
->src(1)].reg();
1990 auto const rdst
= rbyte(m_regs
[inst
->dst()].reg());
1997 if (rTestClass
== InvalidReg
) { // TODO(#2031606)
1998 rTestClass
= m_rScratch
; // careful below about asm-x64 smashing this
1999 emitLoadImm(a
, (int64_t)testClass
, rTestClass
);
2002 // Test if it is the exact same class. TODO(#2044801): we should be
2003 // doing this control flow at the IR level.
2004 if (!(testClass
->attrs() & AttrAbstract
)) {
2005 a
. cmpq (rTestClass
, rObjClass
);
2011 auto const vecOffset
= Class::classVecOff() +
2012 sizeof(Class
*) * (testClass
->classVecLen() - 1);
2014 // Check the length of the class vectors---if the candidate's is at
2015 // least as long as the potential base (testClass) it might be a
2017 asm_label(a
, notExact
);
2018 a
. cmpl (testClass
->classVecLen(),
2019 rObjClass
[Class::classVecLenOff()]);
2020 a
. jb8 (falseLabel
);
2022 // If it's a subclass, rTestClass must be at the appropriate index.
2023 a
. cmpq (rTestClass
, rObjClass
[vecOffset
]);
2027 asm_label(a
, falseLabel
);
2028 a
. xorl (r32(rdst
), r32(rdst
));
2033 void CodeGenerator::cgConvDblToBool(IRInstruction
* inst
) {
2034 SSATmp
* dst
= inst
->dst();
2035 auto dstReg
= m_regs
[dst
].reg();
2036 assert(dstReg
!= InvalidReg
);
2037 SSATmp
* src
= inst
->src(0);
2038 auto srcReg
= m_regs
[src
].reg();
2039 if (srcReg
== InvalidReg
) {
2040 assert(src
->isConst());
2041 double constVal
= src
->getValDbl();
2042 if (constVal
== 0.0) {
2043 m_as
.xor_reg64_reg64(dstReg
, dstReg
);
2045 m_as
.mov_imm64_reg(1, dstReg
);
2048 emitMovRegReg(m_as
, srcReg
, dstReg
);
2049 m_as
.shlq(1, dstReg
); // 0.0 stays zero and -0.0 is now 0.0
2050 m_as
.setne(rbyte(dstReg
)); // lower byte becomes 1 if dstReg != 0
2051 m_as
.movzbl(rbyte(dstReg
), r32(dstReg
));
2055 void CodeGenerator::cgConvIntToBool(IRInstruction
* inst
) {
2056 SSATmp
* dst
= inst
->dst();
2057 auto dstReg
= m_regs
[dst
].reg();
2058 assert(dstReg
!= InvalidReg
);
2059 SSATmp
* src
= inst
->src(0);
2060 auto srcReg
= m_regs
[src
].reg();
2062 if (srcReg
== InvalidReg
) {
2063 assert(src
->isConst());
2064 int64_t constVal
= src
->getValInt();
2065 if (constVal
== 0) {
2066 m_as
.xor_reg64_reg64(dstReg
, dstReg
);
2068 m_as
.mov_imm64_reg(1, dstReg
);
2071 m_as
.test_reg64_reg64(srcReg
, srcReg
);
2072 m_as
.setne(rbyte(dstReg
));
2073 m_as
.movzbl(rbyte(dstReg
), r32(dstReg
));
2077 void CodeGenerator::emitConvBoolOrIntToDbl(IRInstruction
* inst
) {
2078 SSATmp
* src
= inst
->src(0);
2079 SSATmp
* dst
= inst
->dst();
2080 PhysReg dstReg
= m_regs
[dst
].reg();
2081 assert(src
->isA(Type::Bool
) || src
->isA(Type::Int
));
2082 assert(dstReg
!= InvalidReg
);
2083 if (src
->isConst()) {
2084 int64_t constVal
= src
->getValRawInt();
2085 if (src
->isA(Type::Bool
)) constVal
= constVal
!= 0; // see task #2401790
2086 constVal
= convIntToDouble(constVal
);
2087 emitLoadImm(m_as
, constVal
, dstReg
);
2089 // cvtsi2sd doesn't modify the high bits of its target, which can
2090 // cause false dependencies to prevent register renaming from kicking
2091 // in. Break the dependency chain by zeroing out the XMM reg.
2092 PhysReg srcReg
= m_regs
[src
].reg();
2093 PhysReg xmmReg
= dstReg
.isXMM() ? dstReg
: PhysReg(rCgXMM0
);
2094 m_as
.pxor_xmm_xmm(xmmReg
, xmmReg
);
2095 m_as
.cvtsi2sd_reg64_xmm(srcReg
, xmmReg
);
2096 zeroExtendIfBool(m_as
, src
, m_regs
[src
]);
2097 emitMovRegReg(m_as
, xmmReg
, dstReg
);
2101 void CodeGenerator::cgConvBoolToDbl(IRInstruction
* inst
) {
2102 emitConvBoolOrIntToDbl(inst
);
2105 void CodeGenerator::cgConvIntToDbl(IRInstruction
* inst
) {
2106 emitConvBoolOrIntToDbl(inst
);
2109 void CodeGenerator::cgConvBoolToInt(IRInstruction
* inst
) {
2110 SSATmp
* dst
= inst
->dst();
2111 auto dstReg
= m_regs
[dst
].reg();
2112 assert(dstReg
!= InvalidReg
);
2113 SSATmp
* src
= inst
->src(0);
2114 auto srcReg
= m_regs
[src
].reg();
2115 assert(src
->isConst() == (srcReg
== InvalidReg
));
2116 if (srcReg
== InvalidReg
) {
2117 int64_t constVal
= src
->getValRawInt();
2118 if (constVal
== 0) {
2119 m_as
.xor_reg64_reg64(dstReg
, dstReg
);
2121 m_as
.mov_imm64_reg(1, dstReg
);
2124 m_as
.movzbl(rbyte(srcReg
), r32(dstReg
));
2128 void CodeGenerator::cgConvBoolToStr(IRInstruction
* inst
) {
2129 SSATmp
* dst
= inst
->dst();
2130 auto dstReg
= m_regs
[dst
].reg();
2131 assert(dstReg
!= InvalidReg
);
2132 SSATmp
* src
= inst
->src(0);
2133 auto srcReg
= m_regs
[src
].reg();
2134 assert(src
->isConst() == (srcReg
== InvalidReg
));
2135 if (srcReg
== InvalidReg
) {
2136 auto constVal
= src
->getValBool();
2138 m_as
.mov_imm64_reg((uint64_t)StringData::GetStaticString(""), dstReg
);
2140 m_as
.mov_imm64_reg((uint64_t)StringData::GetStaticString("1"), dstReg
);
2143 m_as
.testb(Reg8(int(srcReg
)), Reg8(int(srcReg
)));
2144 m_as
.mov_imm64_reg((uint64_t)StringData::GetStaticString(""), dstReg
);
2145 m_as
.mov_imm64_reg((uint64_t)StringData::GetStaticString("1"), m_rScratch
);
2146 m_as
.cmov_reg64_reg64(CC_NZ
, m_rScratch
, dstReg
);
2150 void CodeGenerator::cgUnboxPtr(IRInstruction
* inst
) {
2151 SSATmp
* dst
= inst
->dst();
2152 SSATmp
* src
= inst
->src(0);
2154 auto srcReg
= m_regs
[src
].reg();
2155 auto dstReg
= m_regs
[dst
].reg();
2157 assert(srcReg
!= InvalidReg
);
2158 assert(dstReg
!= InvalidReg
);
2160 emitMovRegReg(m_as
, srcReg
, dstReg
);
2161 emitDerefIfVariant(m_as
, PhysReg(dstReg
));
2164 void CodeGenerator::cgUnbox(IRInstruction
* inst
) {
2165 SSATmp
* dst
= inst
->dst();
2166 SSATmp
* src
= inst
->src(0);
2167 auto dstValReg
= m_regs
[dst
].reg(0);
2168 auto dstTypeReg
= m_regs
[dst
].reg(1);
2169 auto srcValReg
= m_regs
[src
].reg(0);
2170 auto srcTypeReg
= m_regs
[src
].reg(1);
2172 assert(dstValReg
!= dstTypeReg
);
2173 assert(src
->type().equals(Type::Gen
));
2174 assert(dst
->type().notBoxed());
2176 emitCmpTVType(m_as
, HPHP::KindOfRef
, srcTypeReg
);
2177 ifThenElse(CC_E
, [&] {
2178 // srcTypeReg == KindOfRef; srcValReg is RefData*
2179 const size_t ref_tv_off
= RefData::tvOffset();
2180 if (dstValReg
!= srcValReg
) {
2181 emitLoadReg(m_as
, srcValReg
[ref_tv_off
+ TVOFF(m_data
)], dstValReg
);
2182 emitLoadTVType(m_as
, srcValReg
[ref_tv_off
+ TVOFF(m_type
)],
2185 emitLoadTVType(m_as
, srcValReg
[ref_tv_off
+ TVOFF(m_type
)],
2187 m_as
.loadq(srcValReg
[ref_tv_off
+ TVOFF(m_data
)], dstValReg
);
2190 // srcTypeReg != KindOfRef; copy src -> dst
2191 shuffle2(m_as
, srcValReg
, srcTypeReg
, dstValReg
, dstTypeReg
);
2195 void CodeGenerator::cgLdFuncCachedCommon(IRInstruction
* inst
) {
2196 SSATmp
* dst
= inst
->dst();
2197 SSATmp
* methodName
= inst
->src(0);
2199 const StringData
* name
= methodName
->getValStr();
2200 CacheHandle ch
= TargetCache::allocFixedFunction(name
);
2201 size_t funcCacheOff
= ch
+ offsetof(FixedFuncCache
, m_func
);
2203 auto dstReg
= m_regs
[dst
].reg();
2204 if (dstReg
== InvalidReg
) {
2205 // happens if LdFixedFunc and FCall not in same trace
2206 m_as
. cmpq(0, rVmTl
[funcCacheOff
]);
2208 m_as
. loadq (rVmTl
[funcCacheOff
], dstReg
);
2209 m_as
. testq (dstReg
, dstReg
);
2213 void CodeGenerator::cgLdFuncCached(IRInstruction
* inst
) {
2214 cgLdFuncCachedCommon(inst
);
2215 // jz off to the helper call in astubs
2216 unlikelyIfBlock(CC_Z
, [&] (Asm
& a
) {
2217 // this helper tries the autoload map, and fatals on failure
2218 cgCallNative(a
, inst
);
2222 void CodeGenerator::cgLdFuncCachedSafe(IRInstruction
* inst
) {
2223 cgLdFuncCachedCommon(inst
);
2224 if (Block
* taken
= inst
->taken()) {
2225 emitFwdJcc(m_as
, CC_Z
, taken
);
2229 void CodeGenerator::cgLdFunc(IRInstruction
* inst
) {
2230 SSATmp
* dst
= inst
->dst();
2231 SSATmp
* methodName
= inst
->src(0);
2233 TargetCache::CacheHandle ch
= TargetCache::FuncCache::alloc();
2234 // raises an error if function not found
2235 cgCallHelper(m_as
, (TCA
)FuncCache::lookup
, m_regs
[dst
].reg(), kSyncPoint
,
2236 ArgGroup(m_regs
).imm(ch
).ssa(methodName
));
2239 static void emitLdObjClass(CodeGenerator::Asm
& a
,
2242 a
.loadq (objReg
[ObjectData::getVMClassOffset()], dstReg
);
2245 void CodeGenerator::cgLdObjClass(IRInstruction
* inst
) {
2246 auto dstReg
= m_regs
[inst
->dst()].reg();
2247 auto objReg
= m_regs
[inst
->src(0)].reg();
2249 emitLdObjClass(m_as
, objReg
, dstReg
);
2252 void CodeGenerator::cgLdObjMethod(IRInstruction
*inst
) {
2253 auto cls
= inst
->src(0);
2254 auto clsReg
= m_regs
[cls
].reg();
2255 auto name
= inst
->src(1);
2256 auto actRec
= inst
->src(2);
2257 auto actRecReg
= m_regs
[actRec
].reg();
2258 CacheHandle handle
= Transl::TargetCache::MethodCache::alloc();
2260 // lookup in the targetcache
2261 assert(MethodCache::kNumLines
== 1);
2263 MethodCache::Pair p
;
2264 static_assert(sizeof(p
.m_value
) == 8,
2265 "MethodCache::Pair::m_value assumed to be 8 bytes");
2266 static_assert(sizeof(p
.m_key
) == 8,
2267 "MethodCache::Pair::m_key assumed to be 8 bytes");
2270 // preload handle->m_value
2271 m_as
.loadq(rVmTl
[handle
+ offsetof(MethodCache::Pair
, m_value
)], m_rScratch
);
2272 m_as
.cmpq (rVmTl
[handle
+ offsetof(MethodCache::Pair
, m_key
)], clsReg
);
2273 ifThenElse(CC_E
, // if handle->key == cls
2274 [&] { // then actReg->m_func = handle->value
2275 m_as
.storeq(m_rScratch
, actRecReg
[AROFF(m_func
)]);
2277 [&] { // else call slow path helper
2278 cgCallHelper(m_as
, (TCA
)methodCacheSlowPath
, InvalidReg
,
2280 ArgGroup(m_regs
).addr(rVmTl
, handle
)
2287 void CodeGenerator::cgStRetVal(IRInstruction
* inst
) {
2288 auto const rFp
= m_regs
[inst
->src(0)].reg();
2289 auto* const val
= inst
->src(1);
2290 cgStore(rFp
, AROFF(m_r
), val
);
2293 void CodeGenerator::cgRetAdjustStack(IRInstruction
* inst
) {
2294 auto const rFp
= m_regs
[inst
->src(0)].reg();
2295 auto const dstSp
= m_regs
[inst
->dst()].reg();
2297 a
. lea (rFp
[AROFF(m_r
)], dstSp
);
2300 void CodeGenerator::cgLdRetAddr(IRInstruction
* inst
) {
2301 auto fpReg
= m_regs
[inst
->src(0)].reg(0);
2302 assert(fpReg
!= InvalidReg
);
2303 m_as
.push(fpReg
[AROFF(m_savedRip
)]);
2306 void checkFrame(ActRec
* fp
, Cell
* sp
, bool checkLocals
) {
2307 const Func
* func
= fp
->m_func
;
2308 if (fp
->hasVarEnv()) {
2309 assert(fp
->getVarEnv()->getCfp() == fp
);
2311 // TODO: validate this pointer from actrec
2312 int numLocals
= func
->numLocals();
2313 assert(sp
<= (Cell
*)fp
- func
->numSlotsInFrame()
2314 || func
->isGenerator());
2316 int numParams
= func
->numParams();
2317 for (int i
=0; i
< numLocals
; i
++) {
2318 if (i
>= numParams
&& func
->isGenerator() && i
< func
->numNamedLocals()) {
2321 assert(checkTv(frame_local(fp
, i
)));
2324 // We unfortunately can't do the same kind of check for the stack
2325 // without knowing about FPI regions, because it may contain
2329 void traceRet(ActRec
* fp
, Cell
* sp
, void* rip
) {
2330 if (rip
== TranslatorX64::Get()->getCallToExit()) {
2333 checkFrame(fp
, sp
, /*checkLocals*/ false);
2334 assert(sp
<= (Cell
*)fp
|| fp
->m_func
->isGenerator());
2335 // check return value if stack not empty
2336 if (sp
< (Cell
*)fp
) assertTv(sp
);
2339 void CodeGenerator::emitTraceRet(CodeGenerator::Asm
& a
) {
2340 // call to a trace function
2341 a
. movq (rVmFp
, rdi
);
2342 a
. movq (rVmSp
, rsi
);
2343 a
. loadq (*rsp
, rdx
); // return ip from native stack
2344 // do the call; may use a trampoline
2345 m_tx64
->emitCall(a
, TCA(traceRet
));
2348 void CodeGenerator::cgRetCtrl(IRInstruction
* inst
) {
2349 SSATmp
* sp
= inst
->src(0);
2350 SSATmp
* fp
= inst
->src(1);
2352 // Make sure rVmFp and rVmSp are set appropriately
2353 emitMovRegReg(m_as
, m_regs
[sp
].reg(), rVmSp
);
2354 emitMovRegReg(m_as
, m_regs
[fp
].reg(), rVmFp
);
2356 // Return control to caller
2357 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2361 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2366 void CodeGenerator::emitReqBindAddr(const Func
* func
,
2369 dest
= m_tx64
->emitServiceReq(SRFlags::None
,
2376 void CodeGenerator::cgJmpSwitchDest(IRInstruction
* inst
) {
2377 JmpSwitchData
* data
= inst
->extra
<JmpSwitchDest
>();
2378 SSATmp
* index
= inst
->src(0);
2379 auto indexReg
= m_regs
[index
].reg();
2381 if (!index
->isConst()) {
2382 if (data
->bounded
) {
2384 m_as
. subq(data
->base
, indexReg
);
2386 m_as
. cmpq(data
->cases
- 2, indexReg
);
2387 prepareForSmash(m_as
, kJmpccLen
);
2388 TCA def
= m_tx64
->emitServiceReq(REQ_BIND_JMPCC_SECOND
, 3,
2389 m_as
.code
.frontier
, data
->defaultOff
, CC_AE
);
2393 TCA
* table
= m_tx64
->allocData
<TCA
>(sizeof(TCA
), data
->cases
);
2394 TCA afterLea
= m_as
.code
.frontier
+ kLeaRipLen
;
2395 ptrdiff_t diff
= (TCA
)table
- afterLea
;
2396 assert(deltaFits(diff
, sz::dword
));
2397 m_as
. lea(rip
[diff
], m_rScratch
);
2398 assert(m_as
.code
.frontier
== afterLea
);
2399 m_as
. jmp(m_rScratch
[indexReg
*8]);
2401 for (int i
= 0; i
< data
->cases
; i
++) {
2402 emitReqBindAddr(data
->func
, table
[i
], data
->targets
[i
]);
2405 int64_t indexVal
= index
->getValInt();
2407 if (data
->bounded
) {
2408 indexVal
-= data
->base
;
2409 if (indexVal
>= data
->cases
- 2 || indexVal
< 0) {
2410 m_tx64
->emitBindJmp(m_as
, SrcKey(data
->func
, data
->defaultOff
));
2414 m_tx64
->emitBindJmp(m_as
, SrcKey(data
->func
, data
->targets
[indexVal
]));
2418 typedef FixedStringMap
<TCA
,true> SSwitchMap
;
2420 static TCA
sswitchHelperFast(const StringData
* val
,
2421 const SSwitchMap
* table
,
2423 TCA
* dest
= table
->find(val
);
2424 return dest
? *dest
: *def
;
2427 void CodeGenerator::cgLdSSwitchDestFast(IRInstruction
* inst
) {
2428 auto data
= inst
->extra
<LdSSwitchDestFast
>();
2430 auto table
= m_tx64
->allocData
<SSwitchMap
>(64);
2431 table
->init(data
->numCases
);
2432 for (int64_t i
= 0; i
< data
->numCases
; ++i
) {
2433 table
->add(data
->cases
[i
].str
, nullptr);
2434 TCA
* addr
= table
->find(data
->cases
[i
].str
);
2435 emitReqBindAddr(data
->func
, *addr
, data
->cases
[i
].dest
);
2437 TCA
* def
= m_tx64
->allocData
<TCA
>(sizeof(TCA
), 1);
2438 emitReqBindAddr(data
->func
, *def
, data
->defaultOff
);
2441 TCA(sswitchHelperFast
),
2450 static TCA
sswitchHelperSlow(TypedValue typedVal
,
2451 const StringData
** strs
,
2454 TypedValue
* cell
= tvToCell(&typedVal
);
2455 for (int i
= 0; i
< numStrs
; ++i
) {
2456 if (tvAsCVarRef(cell
).equal(strs
[i
])) return jmptab
[i
];
2458 return jmptab
[numStrs
]; // default case
2461 void CodeGenerator::cgLdSSwitchDestSlow(IRInstruction
* inst
) {
2462 auto data
= inst
->extra
<LdSSwitchDestSlow
>();
2464 auto strtab
= m_tx64
->allocData
<const StringData
*>(
2465 sizeof(const StringData
*), data
->numCases
);
2466 auto jmptab
= m_tx64
->allocData
<TCA
>(sizeof(TCA
), data
->numCases
+ 1);
2467 for (int i
= 0; i
< data
->numCases
; ++i
) {
2468 strtab
[i
] = data
->cases
[i
].str
;
2469 emitReqBindAddr(data
->func
, jmptab
[i
], data
->cases
[i
].dest
);
2471 emitReqBindAddr(data
->func
, jmptab
[data
->numCases
], data
->defaultOff
);
2474 TCA(sswitchHelperSlow
),
2478 .typedValue(inst
->src(0))
2480 .imm(data
->numCases
)
2485 * It'd be nice not to have the cgMov here (and just copy propagate
2486 * the source or something), but for now we're keeping it allocated to
2487 * rVmFp so inlined calls to C++ helpers that use the rbp chain to
2488 * find the caller's ActRec will work correctly.
2490 * This instruction primarily exists to assist in optimizing away
2491 * unused activation records, so it's usually not going to happen
2494 void CodeGenerator::cgDefInlineFP(IRInstruction
* inst
) {
2495 auto const fp
= m_regs
[inst
->src(0)].reg();
2496 auto const fakeRet
= m_tx64
->getRetFromInlinedFrame();
2497 auto const retBCOff
= inst
->extra
<DefInlineFP
>()->retBCOff
;
2499 m_as
. storeq (fakeRet
, fp
[AROFF(m_savedRip
)]);
2500 m_as
. storel (retBCOff
, fp
[AROFF(m_soff
)]);
2505 void CodeGenerator::cgInlineReturn(IRInstruction
* inst
) {
2506 auto fpReg
= m_regs
[inst
->src(0)].reg();
2507 assert(fpReg
== rVmFp
);
2508 m_as
. loadq (fpReg
[AROFF(m_savedRbp
)], rVmFp
);
2511 void CodeGenerator::cgReDefSP(IRInstruction
* inst
) {
2512 // TODO(#2288359): this instruction won't be necessary (for
2513 // non-generator frames) when we don't track rVmSp independently
2514 // from rVmFp. In generator frames we'll have to track offsets from
2515 // a DefGeneratorSP or something similar.
2516 auto fp
= m_regs
[inst
->src(0)].reg();
2517 auto dst
= m_regs
[inst
->dst()].reg();
2518 auto off
= -inst
->extra
<ReDefSP
>()->offset
* sizeof(Cell
);
2519 emitLea(m_as
, fp
[off
], dst
);
2522 void CodeGenerator::cgStashGeneratorSP(IRInstruction
* inst
) {
2526 void CodeGenerator::cgReDefGeneratorSP(IRInstruction
* inst
) {
2530 void CodeGenerator::cgFreeActRec(IRInstruction
* inst
) {
2531 m_as
.loadq(m_regs
[inst
->src(0)].reg()[AROFF(m_savedRbp
)],
2532 m_regs
[inst
->dst()].reg());
2535 void CodeGenerator::cgSpill(IRInstruction
* inst
) {
2536 SSATmp
* dst
= inst
->dst();
2537 SSATmp
* src
= inst
->src(0);
2539 assert(dst
->numNeededRegs() == src
->numNeededRegs());
2540 for (int locIndex
= 0; locIndex
< m_regs
[src
].numAllocatedRegs();
2542 // We do not need to mask booleans, since the IR will reload the spill
2543 auto srcReg
= m_regs
[src
].reg(locIndex
);
2544 auto sinfo
= m_regs
[dst
].spillInfo(locIndex
);
2545 if (m_regs
[src
].isFullXMM()) {
2546 m_as
.movdqa(srcReg
, reg::rsp
[sinfo
.offset()]);
2548 int offset
= sinfo
.offset();
2549 if (locIndex
== 0 || packed_tv
) {
2550 emitStoreReg(m_as
, srcReg
, reg::rsp
[offset
]);
2552 // Note that type field is shifted in memory
2553 assert(srcReg
.isGP());
2554 offset
+= TVOFF(m_type
) - (TVOFF(m_data
) + sizeof(Value
));
2555 emitStoreTVType(m_as
, srcReg
, reg::rsp
[offset
]);
2561 void CodeGenerator::cgReload(IRInstruction
* inst
) {
2562 SSATmp
* dst
= inst
->dst();
2563 SSATmp
* src
= inst
->src(0);
2565 assert(dst
->numNeededRegs() == src
->numNeededRegs());
2566 for (int locIndex
= 0; locIndex
< m_regs
[dst
].numAllocatedRegs();
2568 auto dstReg
= m_regs
[dst
].reg(locIndex
);
2569 auto sinfo
= m_regs
[src
].spillInfo(locIndex
);
2570 if (m_regs
[dst
].isFullXMM()) {
2571 assert(dstReg
.isXMM());
2572 m_as
.movdqa(reg::rsp
[sinfo
.offset()], dstReg
);
2574 int offset
= sinfo
.offset();
2575 if (locIndex
== 0 || packed_tv
) {
2576 emitLoadReg(m_as
, reg::rsp
[offset
], dstReg
);
2578 // Note that type field is shifted in memory
2579 offset
+= TVOFF(m_type
) - (TVOFF(m_data
) + sizeof(Value
));
2580 assert(dstReg
.isGP());
2581 emitLoadTVType(m_as
, reg::rsp
[offset
], dstReg
);
2587 void CodeGenerator::cgStPropWork(IRInstruction
* inst
, bool genTypeStore
) {
2588 SSATmp
* obj
= inst
->src(0);
2589 SSATmp
* prop
= inst
->src(1);
2590 SSATmp
* src
= inst
->src(2);
2591 cgStore(m_regs
[obj
].reg(), prop
->getValInt(), src
, genTypeStore
);
2593 void CodeGenerator::cgStProp(IRInstruction
* inst
) {
2594 cgStPropWork(inst
, true);
2596 void CodeGenerator::cgStPropNT(IRInstruction
* inst
) {
2597 cgStPropWork(inst
, false);
2600 void CodeGenerator::cgStMemWork(IRInstruction
* inst
, bool genStoreType
) {
2601 SSATmp
* addr
= inst
->src(0);
2602 SSATmp
* offset
= inst
->src(1);
2603 SSATmp
* src
= inst
->src(2);
2604 cgStore(m_regs
[addr
].reg(), offset
->getValInt(), src
, genStoreType
);
2606 void CodeGenerator::cgStMem(IRInstruction
* inst
) {
2607 cgStMemWork(inst
, true);
2609 void CodeGenerator::cgStMemNT(IRInstruction
* inst
) {
2610 cgStMemWork(inst
, false);
2613 void CodeGenerator::cgStRefWork(IRInstruction
* inst
, bool genStoreType
) {
2614 auto destReg
= m_regs
[inst
->dst()].reg();
2615 auto addrReg
= m_regs
[inst
->src(0)].reg();
2616 SSATmp
* src
= inst
->src(1);
2617 always_assert(!m_regs
[src
].isFullXMM());
2618 cgStore(addrReg
, RefData::tvOffset(), src
, genStoreType
);
2619 if (destReg
!= InvalidReg
) emitMovRegReg(m_as
, addrReg
, destReg
);
2622 void CodeGenerator::cgStRef(IRInstruction
* inst
) {
2623 cgStRefWork(inst
, true);
2625 void CodeGenerator::cgStRefNT(IRInstruction
* inst
) {
2626 cgStRefWork(inst
, false);
2629 static int64_t localOffset(int64_t index
) {
2630 return -cellsToBytes(index
+ 1);
2633 static int64_t localOffset(SSATmp
* index
) {
2634 return localOffset(index
->getValInt());
2637 int CodeGenerator::iterOffset(SSATmp
* tmp
) {
2638 const Func
* func
= curFunc();
2639 int64_t index
= tmp
->getValInt();
2640 return -cellsToBytes(((index
+ 1) * kNumIterCells
+ func
->numLocals()));
2643 int CodeGenerator::iterOffset(uint32_t id
) {
2644 const Func
* func
= curFunc();
2645 return -cellsToBytes(((id
+ 1) * kNumIterCells
+ func
->numLocals()));
2648 void CodeGenerator::cgStLoc(IRInstruction
* inst
) {
2649 cgStore(m_regs
[inst
->src(0)].reg(),
2650 localOffset(inst
->extra
<StLoc
>()->locId
),
2652 true /* store type */);
2655 void CodeGenerator::cgStLocNT(IRInstruction
* inst
) {
2656 cgStore(m_regs
[inst
->src(0)].reg(),
2657 localOffset(inst
->extra
<StLocNT
>()->locId
),
2659 false /* store type */);
2662 void CodeGenerator::cgSyncABIRegs(IRInstruction
* inst
) {
2663 emitMovRegReg(m_as
, m_regs
[inst
->src(0)].reg(), rVmFp
);
2664 emitMovRegReg(m_as
, m_regs
[inst
->src(1)].reg(), rVmSp
);
2667 void CodeGenerator::cgReqBindJmp(IRInstruction
* inst
) {
2668 m_tx64
->emitBindJmp(
2670 SrcKey(curFunc(), inst
->extra
<ReqBindJmp
>()->offset
)
2674 static void emitExitNoIRStats(Asm
& a
,
2675 TranslatorX64
* tx64
,
2678 if (RuntimeOption::EnableInstructionCounts
||
2679 HPHP::Trace::moduleEnabled(HPHP::Trace::stats
, 3)) {
2681 Stats::opcodeToIRPreStatCounter(
2682 Op(*func
->unit()->at(dest
.offset()))),
2689 void CodeGenerator::cgReqBindJmpNoIR(IRInstruction
* inst
) {
2690 auto const dest
= SrcKey(curFunc(),
2691 inst
->extra
<ReqBindJmpNoIR
>()->offset
);
2692 emitExitNoIRStats(m_as
, m_tx64
, curFunc(), dest
);
2693 m_tx64
->emitBindJmp(m_as
, dest
, REQ_BIND_JMP_NO_IR
);
2696 void CodeGenerator::cgReqRetranslateNoIR(IRInstruction
* inst
) {
2697 auto const dest
= SrcKey(curFunc(),
2698 inst
->extra
<ReqRetranslateNoIR
>()->offset
);
2699 emitExitNoIRStats(m_as
, m_tx64
, curFunc(), dest
);
2700 m_tx64
->emitReqRetransNoIR(m_as
, dest
);
2703 void CodeGenerator::cgReqRetranslate(IRInstruction
* inst
) {
2704 auto const destSK
= SrcKey(curFunc(), m_curTrace
->bcOff());
2705 auto const destSR
= m_tx64
->getSrcRec(destSK
);
2706 m_tx64
->emitFallbackUncondJmp(m_as
, *destSR
);
2709 static void emitAssertFlagsNonNegative(CodeGenerator::Asm
& as
) {
2710 ifThen(as
, CC_NGE
, [&] { as
.ud2(); });
2713 static void emitAssertRefCount(CodeGenerator::Asm
& as
, PhysReg base
) {
2714 as
.cmpl(HPHP::RefCountStaticValue
, base
[FAST_REFCOUNT_OFFSET
]);
2715 ifThen(as
, CC_NBE
, [&] { as
.ud2(); });
2718 static void emitIncRef(CodeGenerator::Asm
& as
, PhysReg base
) {
2719 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2720 emitAssertRefCount(as
, base
);
2723 as
.incl(base
[FAST_REFCOUNT_OFFSET
]);
2724 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2725 // Assert that the ref count is greater than zero
2726 emitAssertFlagsNonNegative(as
);
2730 void CodeGenerator::cgIncRefWork(Type type
, SSATmp
* src
) {
2731 assert(type
.maybeCounted());
2732 auto increfMaybeStatic
= [&] {
2733 auto base
= m_regs
[src
].reg(0);
2734 if (!type
.needsStaticBitCheck()) {
2735 emitIncRef(m_as
, base
);
2737 m_as
.cmpl(RefCountStaticValue
, base
[FAST_REFCOUNT_OFFSET
]);
2738 ifThen(m_as
, CC_NE
, [&] { emitIncRef(m_as
, base
); });
2742 if (type
.isKnownDataType()) {
2743 assert(IS_REFCOUNTED_TYPE(type
.toDataType()));
2744 increfMaybeStatic();
2746 emitCmpTVType(m_as
, KindOfRefCountThreshold
, m_regs
[src
].reg(1));
2747 ifThen(m_as
, CC_NLE
, [&] { increfMaybeStatic(); });
2751 void CodeGenerator::cgIncRef(IRInstruction
* inst
) {
2752 SSATmp
* dst
= inst
->dst();
2753 SSATmp
* src
= inst
->src(0);
2754 Type type
= src
->type();
2756 cgIncRefWork(type
, src
);
2757 shuffle2(m_as
, m_regs
[src
].reg(0), m_regs
[src
].reg(1),
2758 m_regs
[dst
].reg(0), m_regs
[dst
].reg(1));
2761 void CodeGenerator::cgDecRefStack(IRInstruction
* inst
) {
2762 cgDecRefMem(inst
->typeParam(),
2763 m_regs
[inst
->src(0)].reg(),
2764 cellsToBytes(inst
->extra
<DecRefStack
>()->offset
),
2768 void CodeGenerator::cgDecRefThis(IRInstruction
* inst
) {
2769 SSATmp
* fp
= inst
->src(0);
2770 Block
* exit
= inst
->taken();
2771 auto fpReg
= m_regs
[fp
].reg();
2772 auto scratchReg
= m_rScratch
;
2774 // Load AR->m_this into m_rScratch
2775 m_as
.loadq(fpReg
[AROFF(m_this
)], scratchReg
);
2777 auto decrefIfAvailable
= [&] {
2778 // Check if this is available and we're not in a static context instead
2779 m_as
.testb(1, rbyte(scratchReg
));
2780 ifThen(m_as
, CC_Z
, [&] {
2785 true /* genZeroCheck */
2790 if (curFunc()->isPseudoMain()) {
2791 // In pseudo-mains, emit check for presence of m_this
2792 m_as
.testq(scratchReg
, scratchReg
);
2793 ifThen(m_as
, CC_NZ
, [&] { decrefIfAvailable(); });
2795 decrefIfAvailable();
2799 void CodeGenerator::cgDecRefLoc(IRInstruction
* inst
) {
2800 cgDecRefMem(inst
->typeParam(),
2801 m_regs
[inst
->src(0)].reg(),
2802 localOffset(inst
->extra
<DecRefLoc
>()->locId
),
2806 void CodeGenerator::cgGenericRetDecRefs(IRInstruction
* inst
) {
2807 auto const rFp
= m_regs
[inst
->src(0)].reg();
2808 auto const numLocals
= inst
->src(1)->getValInt();
2809 auto const rDest
= m_regs
[inst
->dst()].reg();
2812 assert(rFp
== rVmFp
&&
2813 "free locals helper assumes the frame pointer is rVmFp");
2814 assert(rDest
== rVmSp
&&
2815 "free locals helper adjusts rVmSp, which must be our dst reg");
2817 if (numLocals
== 0) return;
2819 // The helpers called below use a special ABI, in which r15 is not saved.
2820 // So save r15 on the stack if it's live.
2821 bool saveR15
= m_state
.liveRegs
[inst
].contains(r15
);
2823 int stackAdjust
= 8;
2829 auto const target
= numLocals
> kNumFreeLocalsHelpers
2830 ? m_tx64
->m_freeManyLocalsHelper
2831 : m_tx64
->m_freeLocalsHelpers
[numLocals
- 1];
2833 a
.subq(stackAdjust
, rsp
); // For parity; callee does retq $0x8.
2834 a
.lea(rFp
[-numLocals
* sizeof(TypedValue
)], rDest
);
2845 tv_release_generic(TypedValue
* tv
) {
2846 assert(Transl::tx64
->stateIsDirty());
2847 assert(tv
->m_type
>= KindOfString
&& tv
->m_type
<= KindOfRef
);
2848 g_destructors
[typeToDestrIndex(tv
->m_type
)](tv
->m_data
.pref
);
2852 tv_release_typed(RefData
* pv
, DataType dt
) {
2853 assert(Transl::tx64
->stateIsDirty());
2854 assert(dt
>= KindOfString
&& dt
<= KindOfRef
);
2855 g_destructors
[typeToDestrIndex(dt
)](pv
);
2858 Address
CodeGenerator::getDtorGeneric() {
2859 return (Address
)tv_release_generic
;
2862 Address
CodeGenerator::getDtorTyped() {
2863 return (Address
)tv_release_typed
;
2867 // This method generates code that checks the static bit and jumps if the bit
2868 // is set. If regIsCount is true, reg contains the _count field. Otherwise,
2869 // it's assumed to contain m_data field.
2871 // Return value: the address to be patched with the address to jump to in case
2872 // the static bit is set. If the check is unnecessary, this method retuns NULL.
2873 Address
CodeGenerator::cgCheckStaticBit(Type type
,
2876 if (!type
.needsStaticBitCheck()) return NULL
;
2879 // reg has the _count value
2880 m_as
.cmp_imm32_reg32(RefCountStaticValue
, reg
);
2882 // reg has the data pointer
2883 m_as
.cmp_imm32_disp_reg32(RefCountStaticValue
, FAST_REFCOUNT_OFFSET
, reg
);
2886 Address addrToPatch
= m_as
.code
.frontier
;
2887 m_as
.jcc8(CC_E
, addrToPatch
);
2893 // Using the given dataReg, this method generates code that checks the static
2894 // bit out of dataReg, and emits a DecRef if needed.
2895 // NOTE: the flags are left with the result of the DecRef's subtraction,
2896 // which can then be tested immediately after this.
2898 // Return value: the address to be patched if a RefCountedStaticValue check is
2899 // emitted; NULL otherwise.
2901 Address
CodeGenerator::cgCheckStaticBitAndDecRef(Type type
,
2904 assert(type
.maybeCounted());
2906 Address patchStaticCheck
= nullptr;
2907 const auto scratchReg
= m_rScratch
;
2909 bool canUseScratch
= dataReg
!= scratchReg
;
2911 // TODO: run experiments to check whether the 'if' code sequence
2912 // is any better than the 'else' branch below; otherwise, always
2913 // use the 'else' code sequence
2914 if (type
.needsStaticBitCheck() && canUseScratch
) {
2915 // If we need to check for static value, then load the _count into a
2916 // register to avoid doing two loads. The generated sequence is:
2918 // scratchReg = [dataReg + offset(_count)]
2919 // if scratchReg == RefCountStaticValue then skip DecRef
2920 // scratchReg = scratchReg - 1
2921 // ( if exit != NULL, emit:
2924 // [dataReg + offset(_count)] = scratchReg
2926 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2927 emitAssertRefCount(m_as
, dataReg
);
2929 // Load _count in scratchReg
2930 m_as
.loadl(dataReg
[FAST_REFCOUNT_OFFSET
], r32(scratchReg
));
2932 // Check for RefCountStaticValue
2933 patchStaticCheck
= cgCheckStaticBit(type
, scratchReg
,
2934 true /* reg has _count */);
2936 // Decrement count and store it back in memory.
2937 // If there's an exit, emit jump to it when _count would get down to 0
2938 m_as
.decq(scratchReg
);
2940 emitFwdJcc(CC_E
, exit
);
2942 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2943 // Assert that the ref count is greater than zero
2944 emitAssertFlagsNonNegative(m_as
);
2946 m_as
.store_reg32_disp_reg64(scratchReg
, FAST_REFCOUNT_OFFSET
, dataReg
);
2949 // Can't use scratch reg, so emit code that operates directly in
2950 // memory. Compared to the sequence above, this will result in one
2951 // extra load, but it has the advantage of producing a instruction
2954 // ( if needStaticBitCheck, emit :
2955 // cmp [dataReg + offset(_count)], RefCountStaticValue
2956 // je LabelAfterDecRef
2958 // ( if exit != NULL, emit:
2959 // cmp [dataReg + offset(_count)], 1
2962 // sub [dataReg + offset(_count)], 1
2964 // If necessary, check for RefCountStaticValue
2965 patchStaticCheck
= cgCheckStaticBit(type
, dataReg
,
2966 false /* passing dataReg */);
2968 // If there's an exit, emit jump to it if _count would get down to 0
2970 m_as
.cmp_imm32_disp_reg32(1, FAST_REFCOUNT_OFFSET
, dataReg
);
2971 emitFwdJcc(CC_E
, exit
);
2973 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2974 emitAssertRefCount(m_as
, dataReg
);
2978 m_as
.decl(dataReg
[FAST_REFCOUNT_OFFSET
]);
2980 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
2981 // Assert that the ref count is not less than zero
2982 emitAssertFlagsNonNegative(m_as
);
2986 return patchStaticCheck
;
2991 // Returns the address to be patched with the address to jump to in case
2992 // the type is not ref-counted.
2994 Address
CodeGenerator::cgCheckRefCountedType(PhysReg typeReg
) {
2995 emitCmpTVType(m_as
, KindOfRefCountThreshold
, typeReg
);
2996 Address addrToPatch
= m_as
.code
.frontier
;
2997 m_as
.jcc8(CC_LE
, addrToPatch
);
3001 Address
CodeGenerator::cgCheckRefCountedType(PhysReg baseReg
, int64_t offset
) {
3002 emitCmpTVType(m_as
, KindOfRefCountThreshold
, baseReg
[offset
+ TVOFF(m_type
)]);
3003 Address addrToPatch
= m_as
.code
.frontier
;
3004 m_as
.jcc8(CC_LE
, addrToPatch
);
3009 // Generates dec-ref of a typed value with statically known type.
3011 void CodeGenerator::cgDecRefStaticType(Type type
,
3014 bool genZeroCheck
) {
3015 assert(type
!= Type::Cell
&& type
!= Type::Gen
);
3016 assert(type
.isKnownDataType());
3018 if (type
.notCounted()) return;
3020 // Check for RefCountStaticValue if needed, do the actual DecRef,
3021 // and leave flags set based on the subtract result, which is
3023 Address patchStaticCheck
;
3025 patchStaticCheck
= cgCheckStaticBitAndDecRef(type
, dataReg
, exit
);
3027 // Set exit as NULL so that the code doesn't jump to error checking.
3028 patchStaticCheck
= cgCheckStaticBitAndDecRef(type
, dataReg
, nullptr);
3031 // If not exiting on count down to zero, emit the zero-check and
3033 if (genZeroCheck
&& exit
== nullptr) {
3034 // Emit jump to m_astubs (to call release) if count got down to zero
3035 unlikelyIfBlock(CC_Z
, [&] (Asm
& a
) {
3036 // Emit the call to release in m_astubs
3037 cgCallHelper(a
, m_tx64
->getDtorCall(type
.toDataType()),
3038 InvalidReg
, InvalidReg
, kSyncPoint
,
3039 ArgGroup(m_regs
).reg(dataReg
));
3042 if (patchStaticCheck
) {
3043 m_as
.patchJcc8(patchStaticCheck
, m_as
.code
.frontier
);
3048 // Generates dec-ref of a typed value with dynamic (statically unknown) type,
3049 // when the type is stored in typeReg.
3051 void CodeGenerator::cgDecRefDynamicType(PhysReg typeReg
,
3054 bool genZeroCheck
) {
3055 // Emit check for ref-counted type
3056 Address patchTypeCheck
= cgCheckRefCountedType(typeReg
);
3058 // Emit check for RefCountStaticValue and the actual DecRef
3059 Address patchStaticCheck
;
3061 patchStaticCheck
= cgCheckStaticBitAndDecRef(Type::Cell
, dataReg
, exit
);
3063 patchStaticCheck
= cgCheckStaticBitAndDecRef(Type::Cell
, dataReg
, nullptr);
3066 // If not exiting on count down to zero, emit the zero-check and release call
3067 if (genZeroCheck
&& exit
== nullptr) {
3068 // Emit jump to m_astubs (to call release) if count got down to zero
3069 unlikelyIfBlock(CC_Z
, [&] (Asm
& a
) {
3070 // Emit call to release in m_astubs
3071 cgCallHelper(a
, getDtorTyped(), InvalidReg
, kSyncPoint
,
3072 ArgGroup(m_regs
).reg(dataReg
).reg(typeReg
));
3075 // Patch checks to jump around the DecRef
3076 if (patchTypeCheck
) m_as
.patchJcc8(patchTypeCheck
, m_as
.code
.frontier
);
3077 if (patchStaticCheck
) m_as
.patchJcc8(patchStaticCheck
, m_as
.code
.frontier
);
3081 // Generates dec-ref of a typed value with dynamic (statically
3082 // unknown) type, when all we have is the baseReg and offset of
3083 // the typed value. This method assumes that baseReg is not the
3084 // scratch register.
3086 void CodeGenerator::cgDecRefDynamicTypeMem(PhysReg baseReg
,
3089 auto scratchReg
= m_rScratch
;
3091 assert(baseReg
!= scratchReg
);
3093 // Emit check for ref-counted type
3094 Address patchTypeCheck
= cgCheckRefCountedType(baseReg
, offset
);
3095 if (exit
== nullptr && RuntimeOption::EvalHHIRGenericDtorHelper
) {
3097 // This PhysRegSaverStub saves rdi redundantly if
3098 // !liveRegs[m_curInst].contains(rdi), but its
3099 // necessary to maintain stack alignment. We can do better
3100 // by making the helpers adjust the stack for us in the cold
3101 // path, which calls the destructor.
3102 PhysRegSaverStub
regSaver(m_as
, RegSet(rdi
));
3105 * rVmSp is ok here because this is part of the special
3106 * ABI to m_irPopRHelper. We're not using a secret dependency
3107 * on the frame or stack---we're only going to use that ABI if
3108 * we happen to have that register allocated for baseReg.
3110 if (offset
== 0 && baseReg
== rVmSp
) {
3111 // Decref'ing top of vm stack, very likely a popR
3112 m_tx64
->emitCall(m_as
, m_tx64
->m_irPopRHelper
);
3114 if (baseReg
== rsp
) {
3115 // Because we just pushed %rdi, %rsp is 8 bytes below where
3116 // offset is expecting it to be.
3117 offset
+= sizeof(int64_t);
3119 m_as
.lea(baseReg
[offset
], rdi
);
3120 m_tx64
->emitCall(m_as
, m_tx64
->m_dtorGenericStub
);
3122 recordSyncPoint(m_as
);
3124 if (patchTypeCheck
) {
3125 m_as
.patchJcc8(patchTypeCheck
, m_as
.code
.frontier
);
3129 // Load m_data into the scratch reg
3130 m_as
.loadq(baseReg
[offset
+ TVOFF(m_data
)], scratchReg
);
3132 // Emit check for RefCountStaticValue and the actual DecRef
3133 Address patchStaticCheck
= cgCheckStaticBitAndDecRef(Type::Cell
, scratchReg
,
3136 // If not exiting on count down to zero, emit the zero-check and release call
3137 if (exit
== nullptr) {
3138 // Emit jump to m_astubs (to call release) if count got down to zero
3139 unlikelyIfBlock(CC_Z
, [&] (Asm
& a
) {
3140 // Emit call to release in m_astubs
3141 a
.lea(baseReg
[offset
], scratchReg
);
3142 cgCallHelper(a
, getDtorGeneric(), InvalidReg
, kSyncPoint
,
3143 ArgGroup(m_regs
).reg(scratchReg
));
3147 // Patch checks to jump around the DecRef
3148 if (patchTypeCheck
) m_as
.patchJcc8(patchTypeCheck
, m_as
.code
.frontier
);
3149 if (patchStaticCheck
) m_as
.patchJcc8(patchStaticCheck
, m_as
.code
.frontier
);
3153 // Generates the dec-ref of a typed value in memory address [baseReg + offset].
3154 // This handles cases where type is either static or dynamic.
3156 void CodeGenerator::cgDecRefMem(Type type
,
3160 auto scratchReg
= m_rScratch
;
3161 assert(baseReg
!= scratchReg
);
3163 if (type
.needsReg()) {
3164 // The type is dynamic, but we don't have two registers available
3165 // to load the type and the data.
3166 cgDecRefDynamicTypeMem(baseReg
, offset
, exit
);
3167 } else if (type
.maybeCounted()) {
3168 m_as
.loadq(baseReg
[offset
+ TVOFF(m_data
)], scratchReg
);
3169 cgDecRefStaticType(type
, scratchReg
, exit
, true);
3173 void CodeGenerator::cgDecRefMem(IRInstruction
* inst
) {
3174 assert(inst
->src(0)->type().isPtr());
3175 cgDecRefMem(inst
->typeParam(),
3176 m_regs
[inst
->src(0)].reg(),
3177 inst
->src(1)->getValInt(),
3181 void CodeGenerator::cgDecRefWork(IRInstruction
* inst
, bool genZeroCheck
) {
3182 SSATmp
* src
= inst
->src(0);
3183 if (!isRefCounted(src
)) return;
3184 Block
* exit
= inst
->taken();
3185 Type type
= src
->type();
3186 if (type
.isKnownDataType()) {
3187 cgDecRefStaticType(type
, m_regs
[src
].reg(), exit
, genZeroCheck
);
3189 cgDecRefDynamicType(m_regs
[src
].reg(1),
3196 void CodeGenerator::cgDecRef(IRInstruction
*inst
) {
3197 // DecRef may bring the count to zero, and run the destructor.
3198 // Generate code for this.
3199 assert(!inst
->taken());
3200 cgDecRefWork(inst
, true);
3203 void CodeGenerator::cgDecRefNZ(IRInstruction
* inst
) {
3204 // DecRefNZ cannot bring the count to zero.
3205 // Therefore, we don't generate zero-checking code.
3206 assert(!inst
->taken());
3207 cgDecRefWork(inst
, false);
3210 void CodeGenerator::cgDecRefNZOrBranch(IRInstruction
* inst
) {
3211 assert(inst
->taken());
3212 cgDecRefWork(inst
, true);
3215 void CodeGenerator::cgCufIterSpillFrame(IRInstruction
* inst
) {
3216 auto const sp
= inst
->src(0);
3217 auto const fp
= inst
->src(1);
3218 auto const nArgs
= inst
->extra
<CufIterSpillFrame
>()->args
;
3219 auto const iterId
= inst
->extra
<CufIterSpillFrame
>()->iterId
;
3220 auto const itOff
= iterOffset(iterId
);
3222 const int64_t spOffset
= -kNumActRecCells
* sizeof(Cell
);
3223 auto spReg
= m_regs
[sp
].reg();
3224 auto fpReg
= m_regs
[fp
].reg();
3226 m_as
.loadq (fpReg
[itOff
+ CufIter::funcOff()], m_rScratch
);
3227 m_as
.storeq (m_rScratch
, spReg
[spOffset
+ int(AROFF(m_func
))]);
3229 m_as
.loadq (fpReg
[itOff
+ CufIter::ctxOff()], m_rScratch
);
3230 m_as
.storeq (m_rScratch
, spReg
[spOffset
+ int(AROFF(m_this
))]);
3232 m_as
.shrq (1, m_rScratch
);
3233 ifThen(m_as
, CC_NBE
, [this] {
3234 m_as
.shlq(1, m_rScratch
);
3235 emitIncRef(m_as
, m_rScratch
);
3237 m_as
.loadq (fpReg
[itOff
+ CufIter::nameOff()], m_rScratch
);
3238 m_as
.testq (m_rScratch
, m_rScratch
);
3239 ifThen(m_as
, CC_NZ
, [this] {
3240 m_as
.cmpl(RefCountStaticValue
, m_rScratch
[FAST_REFCOUNT_OFFSET
]);
3241 ifThen(m_as
, CC_NE
, [&] { emitIncRef(m_as
, m_rScratch
); });
3242 m_as
.orq (ActRec::kInvNameBit
, m_rScratch
);
3244 m_as
.storeq (m_rScratch
, spReg
[spOffset
+ int(AROFF(m_invName
))]);
3245 m_as
.storeq (fpReg
, spReg
[spOffset
+ int(AROFF(m_savedRbp
))]);
3246 m_as
.storel (nArgs
, spReg
[spOffset
+ int(AROFF(m_numArgsAndCtorFlag
))]);
3249 m_regs
[inst
->dst()].reg(),
3253 void CodeGenerator::cgSpillFrame(IRInstruction
* inst
) {
3254 auto const sp
= inst
->src(0);
3255 auto const fp
= inst
->src(1);
3256 auto const func
= inst
->src(2);
3257 auto const objOrCls
= inst
->src(3);
3258 auto const magicName
= inst
->extra
<SpillFrame
>()->invName
;
3259 auto const nArgs
= inst
->extra
<SpillFrame
>()->numArgs
;
3261 const int64_t spOffset
= -kNumActRecCells
* sizeof(Cell
);
3263 DEBUG_ONLY
bool setThis
= true;
3265 auto spReg
= m_regs
[sp
].reg();
3267 if (objOrCls
->isA(Type::Cls
)) {
3269 if (objOrCls
->isConst()) {
3270 m_as
.store_imm64_disp_reg64(uintptr_t(objOrCls
->getValClass()) | 1,
3271 spOffset
+ int(AROFF(m_this
)),
3274 Reg64 clsPtrReg
= m_regs
[objOrCls
].reg();
3275 m_as
.movq (clsPtrReg
, m_rScratch
);
3276 m_as
.orq (1, m_rScratch
);
3277 m_as
.storeq(m_rScratch
, spReg
[spOffset
+ int(AROFF(m_this
))]);
3279 } else if (objOrCls
->isA(Type::Obj
)) {
3280 // store this pointer
3281 m_as
.store_reg64_disp_reg64(m_regs
[objOrCls
].reg(),
3282 spOffset
+ int(AROFF(m_this
)),
3284 } else if (objOrCls
->isA(Type::Ctx
)) {
3285 // Stores either a this pointer or a Cctx -- statically unknown.
3286 Reg64 objOrClsPtrReg
= m_regs
[objOrCls
].reg();
3287 m_as
.storeq(objOrClsPtrReg
, spReg
[spOffset
+ int(AROFF(m_this
))]);
3289 assert(objOrCls
->isA(Type::InitNull
));
3290 // no obj or class; this happens in FPushFunc
3291 int offset_m_this
= spOffset
+ int(AROFF(m_this
));
3292 // When func is either Type::FuncCls or Type::FuncCtx,
3293 // m_this/m_cls will be initialized below
3294 if (!func
->isConst() && (func
->isA(Type::FuncCtx
))) {
3295 // m_this is unioned with m_cls and will be initialized below
3298 m_as
.store_imm64_disp_reg64(0, offset_m_this
, spReg
);
3301 // actRec->m_invName
3302 // ActRec::m_invName is encoded as a pointer with bit kInvNameBit
3303 // set to distinguish it from m_varEnv and m_extrArgs
3304 uintptr_t invName
= !magicName
3306 : reinterpret_cast<uintptr_t>(magicName
) | ActRec::kInvNameBit
;
3307 m_as
.store_imm64_disp_reg64(invName
,
3308 spOffset
+ int(AROFF(m_invName
)),
3310 // actRec->m_func and possibly actRec->m_cls
3311 // Note m_cls is unioned with m_this and may overwrite previous value
3312 if (func
->type().isNull()) {
3313 assert(func
->isConst());
3314 } else if (func
->isConst()) {
3315 const Func
* f
= func
->getValFunc();
3316 m_as
. mov_imm64_reg((uint64_t)f
, m_rScratch
);
3317 m_as
.store_reg64_disp_reg64(m_rScratch
,
3318 spOffset
+ int(AROFF(m_func
)),
3320 if (func
->isA(Type::FuncCtx
)) {
3321 // Fill in m_cls if provided with both func* and class*
3322 CG_PUNT(cgAllocActRec
);
3325 int offset_m_func
= spOffset
+ int(AROFF(m_func
));
3326 m_as
.store_reg64_disp_reg64(m_regs
[func
].reg(0),
3329 if (func
->isA(Type::FuncCtx
)) {
3330 int offset_m_cls
= spOffset
+ int(AROFF(m_cls
));
3331 m_as
.store_reg64_disp_reg64(m_regs
[func
].reg(1),
3334 setThis
= true; /* m_this and m_cls are in a union */
3338 // actRec->m_savedRbp
3339 m_as
.store_reg64_disp_reg64(m_regs
[fp
].reg(),
3340 spOffset
+ int(AROFF(m_savedRbp
)),
3343 // actRec->m_numArgsAndCtorFlag
3344 m_as
.store_imm32_disp_reg(nArgs
,
3345 spOffset
+ int(AROFF(m_numArgsAndCtorFlag
)),
3349 m_regs
[inst
->dst()].reg(),
3353 const Func
* loadClassCtor(Class
* cls
) {
3354 const Func
* f
= cls
->getCtor();
3355 if (UNLIKELY(!(f
->attrs() & AttrPublic
))) {
3357 UNUSED
MethodLookup::LookupResult res
=
3358 g_vmContext
->lookupCtorMethod(f
, cls
, true /*raise*/);
3359 assert(res
== MethodLookup::MethodFoundWithThis
);
3364 Instance
* createClHelper(Class
* cls
, int numArgs
, ActRec
* ar
, TypedValue
* sp
) {
3365 Instance
* newObj
= newInstance(cls
);
3366 newObj
->incRefCount();
3367 return static_cast<c_Closure
*>(newObj
)->init(numArgs
, ar
, sp
);
3370 void CodeGenerator::cgAllocObjFast(IRInstruction
* inst
) {
3371 const Class
* cls
= inst
->src(0)->getValClass();
3372 auto dstReg
= m_regs
[inst
->dst()].reg();
3374 // First, make sure our property init vectors are all set up
3375 bool props
= cls
->pinitVec().size() > 0;
3376 bool sprops
= cls
->numStaticProperties() > 0;
3377 assert((props
|| sprops
) == cls
->needInitialization());
3378 if (cls
->needInitialization()) {
3380 cls
->initPropHandle();
3381 m_as
.testq(-1, rVmTl
[cls
->propHandle()]);
3382 unlikelyIfBlock(CC_Z
, [&] (Asm
& a
) {
3384 (TCA
)getMethodPtr(&Class::initProps
),
3387 ArgGroup(m_regs
).imm((uint64_t)cls
));
3391 cls
->initSPropHandle();
3392 m_as
.testq(-1, rVmTl
[cls
->sPropHandle()]);
3393 unlikelyIfBlock(CC_Z
, [&] (Asm
& a
) {
3395 (TCA
)getMethodPtr(&Class::initSProps
),
3398 ArgGroup(m_regs
).imm((uint64_t)cls
));
3403 // Next, allocate the object
3404 if (cls
->instanceCtor()) {
3406 (TCA
)cls
->instanceCtor(),
3409 ArgGroup(m_regs
).imm((uint64_t)cls
));
3411 size_t size
= Instance::sizeForNProps(cls
->numDeclProperties());
3412 int allocator
= object_alloc_size_to_index(size
);
3413 assert(allocator
!= -1);
3415 (TCA
)getMethodPtr(&Instance::newInstanceRaw
),
3418 ArgGroup(m_regs
).imm((uint64_t)cls
).imm(allocator
));
3421 // Set the attributes, if any
3422 int odAttrs
= cls
->getODAttrs();
3424 // o_attribute is 16 bits but the fact that we're or-ing a mask makes it ok
3425 assert(!(odAttrs
& 0xffff0000));
3426 m_as
.orq(odAttrs
, dstReg
[ObjectData::attributeOff()]);
3429 // Initialize the properties
3430 size_t nProps
= cls
->numDeclProperties();
3433 m_as
.subq(8, reg::rsp
);
3434 if (cls
->pinitVec().size() == 0) {
3435 // Fast case: copy from a known address in the Class
3436 ArgGroup args
= ArgGroup(m_regs
)
3437 .addr(dstReg
, sizeof(ObjectData
) + cls
->builtinPropSize())
3438 .imm(int64_t(&cls
->declPropInit()[0]))
3439 .imm(cellsToBytes(nProps
));
3446 // Slower case: we have to load the src address from the targetcache
3447 auto rPropData
= m_rScratch
;
3448 // Load the Class's propInitVec from the targetcache
3449 m_as
.loadq(rVmTl
[cls
->propHandle()], rPropData
);
3450 // propData holds the PropInitVec. We want &(*propData)[0]
3451 m_as
.loadq(rPropData
[Class::PropInitVec::dataOff()], rPropData
);
3452 if (!cls
->hasDeepInitProps()) {
3453 ArgGroup args
= ArgGroup(m_regs
)
3454 .addr(dstReg
, sizeof(ObjectData
) + cls
->builtinPropSize())
3456 .imm(cellsToBytes(nProps
));
3463 ArgGroup args
= ArgGroup(m_regs
)
3464 .addr(dstReg
, sizeof(ObjectData
) + cls
->builtinPropSize())
3468 (TCA
)deepInitHelper
,
3474 m_as
.addq(8, reg::rsp
);
3477 if (cls
->callsCustomInstanceInit()) {
3478 // callCustomInstanceInit returns the instance in rax
3480 (TCA
)getMethodPtr(&Instance::callCustomInstanceInit
),
3483 ArgGroup(m_regs
).reg(dstReg
));
3487 void CodeGenerator::cgInlineCreateCont(IRInstruction
* inst
) {
3488 auto const& data
= *inst
->extra
<InlineCreateCont
>();
3489 auto const helper
= data
.origFunc
->isMethod()
3490 ? &VMExecutionContext::createContinuationHelper
<true>
3491 : &VMExecutionContext::createContinuationHelper
<false>;
3495 reinterpret_cast<TCA
>(helper
),
3499 .immPtr(data
.origFunc
)
3500 .immPtr(data
.genFunc
)
3502 // Deliberately ignoring frameStaticClass parameter, because
3503 // it's unused if we have a $this pointer, and we don't inline
3504 // functions with a null $this.
3506 if (data
.origFunc
->isMethod()) {
3507 // We can't support a null $this.
3508 assert(inst
->src(0)->isA(Type::Obj
));
3512 void CodeGenerator::cgCallArray(IRInstruction
* inst
) {
3513 Offset pc
= inst
->extra
<CallArray
>()->pc
;
3514 Offset after
= inst
->extra
<CallArray
>()->after
;
3516 ArgGroup
args(m_regs
);
3517 args
.imm(pc
).imm(after
);
3519 // fCallArrayHelper makes the actual call by smashing its return address.
3520 cgCallHelper(m_as
, (TCA
)TranslatorX64::fCallArrayHelper
,
3521 nullptr, kSyncPoint
, args
);
3524 void CodeGenerator::cgCall(IRInstruction
* inst
) {
3525 SSATmp
* actRec
= inst
->src(0);
3526 SSATmp
* returnBcOffset
= inst
->src(1);
3527 SSATmp
* func
= inst
->src(2);
3528 SrcRange args
= inst
->srcs().subpiece(3);
3529 int32_t numArgs
= args
.size();
3531 auto spReg
= m_regs
[actRec
].reg();
3532 // put all outgoing arguments onto the VM stack
3533 int64_t adjustment
= (-(int64_t)numArgs
) * sizeof(Cell
);
3534 for (int32_t i
= 0; i
< numArgs
; i
++) {
3535 // Type::None here means that the simplifier proved that the value
3536 // matches the value already in memory, thus the store is redundant.
3537 if (args
[i
]->type() != Type::None
) {
3538 cgStore(spReg
, -(i
+ 1) * sizeof(Cell
), args
[i
]);
3541 // store the return bytecode offset into the outgoing actrec
3542 uint64_t returnBc
= returnBcOffset
->getValInt();
3543 m_as
.store_imm32_disp_reg(returnBc
, AROFF(m_soff
), spReg
);
3544 if (adjustment
!= 0) {
3545 m_as
.add_imm32_reg64(adjustment
, spReg
);
3548 assert(m_state
.lastMarker
);
3549 SrcKey srcKey
= SrcKey(m_state
.lastMarker
->func
, m_state
.lastMarker
->bcOff
);
3550 bool isImmutable
= (func
->isConst() && !func
->type().isNull());
3551 const Func
* funcd
= isImmutable
? func
->getValFunc() : nullptr;
3552 assert(&m_as
== &m_tx64
->getAsm());
3553 int32_t adjust
= m_tx64
->emitBindCall(srcKey
, funcd
, numArgs
);
3555 m_as
.addq (adjust
, rVmSp
);
3559 void CodeGenerator::cgCastStk(IRInstruction
*inst
) {
3560 Type type
= inst
->typeParam();
3561 SSATmp
* sp
= inst
->src(0);
3562 uint32_t offset
= inst
->extra
<CastStk
>()->offset
;
3563 PhysReg spReg
= m_regs
[sp
].reg();
3565 ArgGroup
args(m_regs
);
3566 args
.addr(spReg
, cellsToBytes(offset
));
3569 if (type
.subtypeOf(Type::Bool
)) {
3570 tvCastHelper
= (TCA
)tvCastToBooleanInPlace
;
3571 } else if (type
.subtypeOf(Type::Int
)) {
3572 // if casting to integer, pass 10 as the base for the conversion
3574 tvCastHelper
= (TCA
)tvCastToInt64InPlace
;
3575 } else if (type
.subtypeOf(Type::Dbl
)) {
3576 tvCastHelper
= (TCA
)tvCastToDoubleInPlace
;
3577 } else if (type
.subtypeOf(Type::Arr
)) {
3578 tvCastHelper
= (TCA
)tvCastToArrayInPlace
;
3579 } else if (type
.subtypeOf(Type::Str
)) {
3580 tvCastHelper
= (TCA
)tvCastToStringInPlace
;
3581 } else if (type
.subtypeOf(Type::Obj
)) {
3582 tvCastHelper
= (TCA
)tvCastToObjectInPlace
;
3586 cgCallHelper(m_as
, tvCastHelper
, nullptr,
3587 kSyncPoint
, args
, DestType::None
);
3590 void CodeGenerator::cgCallBuiltin(IRInstruction
* inst
) {
3591 SSATmp
* f
= inst
->src(0);
3592 auto args
= inst
->srcs().subpiece(2);
3593 int32_t numArgs
= args
.size();
3594 SSATmp
* dst
= inst
->dst();
3595 auto dstReg
= m_regs
[dst
].reg(0);
3596 auto dstType
= m_regs
[dst
].reg(1);
3597 Type returnType
= inst
->typeParam();
3599 const Func
* func
= f
->getValFunc();
3600 DataType funcReturnType
= func
->returnType();
3601 int returnOffset
= HHIR_MISOFF(tvBuiltinReturn
);
3603 if (TranslatorX64::eagerRecord(func
)) {
3604 const uchar
* pc
= curUnit()->entry() + m_state
.lastMarker
->bcOff
;
3605 // we have spilled all args to stack, so spDiff is 0
3606 m_tx64
->emitEagerSyncPoint(m_as
, pc
, 0);
3608 // RSP points to the MInstrState we need to use.
3609 // workaround the fact that rsp moves when we spill registers around call
3610 PhysReg misReg
= m_rScratch
;
3611 emitMovRegReg(m_as
, reg::rsp
, misReg
);
3613 ArgGroup
callArgs(m_regs
);
3614 if (isCppByRef(funcReturnType
)) {
3615 // first arg is pointer to storage for that return value
3616 if (isSmartPtrRef(funcReturnType
)) {
3617 returnOffset
+= TVOFF(m_data
);
3619 // misReg is pointing to an MInstrState struct on the C stack. Pass
3620 // the address of tvBuiltinReturn to the native function as the location
3621 // it can construct the return Array, String, Object, or Variant.
3622 callArgs
.addr(misReg
, returnOffset
); // &misReg[returnOffset]
3625 // non-pointer args are plain values passed by value. String, Array,
3626 // Object, and Variant are passed by const&, ie a pointer to stack memory
3627 // holding the value, so expect PtrToT types for these.
3628 // Pointers to smartptr types (String, Array, Object) need adjusting to
3629 // point to &ptr->m_data.
3630 for (int i
= 0; i
< numArgs
; i
++) {
3631 const Func::ParamInfo
& pi
= func
->params()[i
];
3632 if (TVOFF(m_data
) && isSmartPtrRef(pi
.builtinType())) {
3633 assert(args
[i
]->type().isPtr() && m_regs
[args
[i
]].reg() != InvalidReg
);
3634 callArgs
.addr(m_regs
[args
[i
]].reg(), TVOFF(m_data
));
3636 callArgs
.ssa(args
[i
]);
3640 // if the return value is returned by reference, we don't need the
3641 // return value from this call since we know where the value is.
3642 cgCallHelper(m_as
, Transl::CppCall((TCA
)func
->nativeFuncPtr()),
3643 isCppByRef(funcReturnType
) ? InvalidReg
: dstReg
,
3644 kSyncPoint
, callArgs
);
3646 // load return value from builtin
3647 // for primitive return types (int, bool), the return value
3648 // is already in dstReg (the builtin call returns in rax). For return
3649 // by reference (String, Object, Array, Variant), the builtin writes the
3650 // return value into MInstrState::tvBuiltinReturn TV, from where it
3651 // has to be tested and copied.
3652 if (dstReg
== InvalidReg
|| returnType
.isSimpleType()) {
3655 // after the call, RSP is back pointing to MInstrState and rSratch
3656 // has been clobberred.
3659 if (returnType
.isReferenceType()) {
3660 assert(isCppByRef(funcReturnType
) && isSmartPtrRef(funcReturnType
));
3661 // return type is String, Array, or Object; fold nullptr to KindOfNull
3662 m_as
. loadq (misReg
[returnOffset
], dstReg
);
3663 emitLoadImm(m_as
, returnType
.toDataType(), dstType
);
3664 emitLoadImm(m_as
, KindOfNull
, m_rScratch
);
3665 m_as
. testq (dstReg
, dstReg
);
3666 m_as
. cmov_reg64_reg64 (CC_Z
, m_rScratch
, dstType
);
3669 if (returnType
.subtypeOf(Type::Cell
)
3670 || returnType
.subtypeOf(Type::BoxedCell
)) {
3671 // return type is Variant; fold KindOfUninit to KindOfNull
3672 assert(isCppByRef(funcReturnType
) && !isSmartPtrRef(funcReturnType
));
3673 assert(misReg
!= dstType
);
3674 emitLoadTVType(m_as
, misReg
[returnOffset
+ TVOFF(m_type
)], dstType
);
3675 m_as
. loadq (misReg
[returnOffset
+ TVOFF(m_data
)], dstReg
);
3676 emitLoadImm(m_as
, KindOfNull
, m_rScratch
);
3677 static_assert(KindOfUninit
== 0, "KindOfUninit must be 0 for test");
3678 m_as
. testb (rbyte(dstType
), rbyte(dstType
));
3679 m_as
. cmov_reg64_reg64 (CC_Z
, m_rScratch
, dstType
);
3685 void CodeGenerator::cgSpillStack(IRInstruction
* inst
) {
3686 SSATmp
* dst
= inst
->dst();
3687 SSATmp
* sp
= inst
->src(0);
3688 auto const spDeficit
= inst
->src(1)->getValInt();
3689 auto const spillVals
= inst
->srcs().subpiece(2);
3690 auto const numSpillSrcs
= spillVals
.size();
3691 auto const dstReg
= m_regs
[dst
].reg();
3692 auto const spReg
= m_regs
[sp
].reg();
3693 auto const spillCells
= spillValueCells(inst
);
3695 int64_t adjustment
= (spDeficit
- spillCells
) * sizeof(Cell
);
3696 for (uint32_t i
= 0; i
< numSpillSrcs
; ++i
) {
3697 const int64_t offset
= i
* sizeof(Cell
) + adjustment
;
3698 if (spillVals
[i
]->type() == Type::None
) {
3699 // The simplifier detected that we're storing the same value
3700 // already in there.
3704 auto* val
= spillVals
[i
];
3705 auto* inst
= val
->inst();
3706 while (inst
->isPassthrough()) {
3707 inst
= inst
->getPassthroughValue()->inst();
3709 // If our value came from a LdStack on the same sp and offset,
3710 // we don't need to spill it.
3711 if (inst
->op() == LdStack
&& inst
->src(0) == sp
&&
3712 inst
->extra
<LdStack
>()->offset
* sizeof(Cell
) == offset
) {
3713 FTRACE(6, "{}: Not spilling spill value {} from {}\n",
3714 __func__
, i
, inst
->toString());
3716 cgStore(spReg
, offset
, val
);
3720 emitAdjustSp(spReg
, dstReg
, adjustment
);
3723 void CodeGenerator::emitAdjustSp(PhysReg spReg
,
3725 int64_t adjustment
/* bytes */) {
3726 if (adjustment
!= 0) {
3727 if (dstReg
!= spReg
) {
3728 m_as
. lea (spReg
[adjustment
], dstReg
);
3730 m_as
. addq (adjustment
, dstReg
);
3733 emitMovRegReg(m_as
, spReg
, dstReg
);
3737 void CodeGenerator::cgNativeImpl(IRInstruction
* inst
) {
3738 SSATmp
* func
= inst
->src(0);
3739 SSATmp
* fp
= inst
->src(1);
3741 assert(func
->isConst());
3742 assert(func
->type() == Type::Func
);
3743 const Func
* fn
= func
->getValFunc();
3745 BuiltinFunction builtinFuncPtr
= func
->getValFunc()->builtinFuncPtr();
3746 emitMovRegReg(m_as
, m_regs
[fp
].reg(), argNumToRegName
[0]);
3747 if (TranslatorX64::eagerRecord(fn
)) {
3748 m_tx64
->emitEagerSyncPoint(m_as
, fn
->getEntry(), 0);
3750 m_as
.call((TCA
)builtinFuncPtr
);
3751 recordSyncPoint(m_as
);
3754 void CodeGenerator::cgLdThis(IRInstruction
* inst
) {
3755 SSATmp
* dst
= inst
->dst();
3756 SSATmp
* src
= inst
->src(0);
3757 Block
* label
= inst
->taken();
3758 // mov dst, [fp + 0x20]
3759 auto dstReg
= m_regs
[dst
].reg();
3761 // the destination of LdThis could be dead but the instruction
3762 // itself still useful because of the checks that it does (if it has
3763 // a label). So we need to make sure there is a dstReg for this
3765 if (dstReg
!= InvalidReg
) {
3766 // instruction's result is not dead
3767 m_as
.loadq(m_regs
[src
].reg()[AROFF(m_this
)], dstReg
);
3769 if (label
== NULL
) return; // no need to perform its checks
3770 if (dstReg
!= InvalidReg
) {
3772 m_as
.testb(1, rbyte(dstReg
));
3774 m_as
.testb(1, m_regs
[src
].reg()[AROFF(m_this
)]);
3777 emitFwdJcc(CC_NZ
, label
);
3780 static void emitLdClsCctx(CodeGenerator::Asm
& a
,
3783 emitMovRegReg(a
, srcReg
, dstReg
);
3787 void CodeGenerator::cgLdClsCtx(IRInstruction
* inst
) {
3788 PhysReg srcReg
= m_regs
[inst
->src(0)].reg();
3789 PhysReg dstReg
= m_regs
[inst
->dst()].reg();
3790 // Context could be either a this object or a class ptr
3791 m_as
. testb(1, rbyte(srcReg
));
3793 [&] { emitLdClsCctx(m_as
, srcReg
, dstReg
); }, // ctx is a class
3794 [&] { emitLdObjClass(m_as
, srcReg
, dstReg
); } // ctx is this ptr
3798 void CodeGenerator::cgLdClsCctx(IRInstruction
* inst
) {
3799 PhysReg srcReg
= m_regs
[inst
->src(0)].reg();
3800 PhysReg dstReg
= m_regs
[inst
->dst()].reg();
3801 emitLdClsCctx(m_as
, srcReg
, dstReg
);
3804 void CodeGenerator::cgLdCtx(IRInstruction
* inst
) {
3805 PhysReg dstReg
= m_regs
[inst
->dst()].reg();
3806 PhysReg srcReg
= m_regs
[inst
->src(0)].reg();
3807 if (dstReg
!= InvalidReg
) {
3808 m_as
.loadq(srcReg
[AROFF(m_this
)], dstReg
);
3812 void CodeGenerator::cgLdCctx(IRInstruction
* inst
) {
3813 return cgLdCtx(inst
);
3816 void CodeGenerator::cgLdConst(IRInstruction
* inst
) {
3817 auto const dstReg
= m_regs
[inst
->dst()].reg();
3818 auto const val
= inst
->extra
<LdConst
>()->as
<uintptr_t>();
3819 if (dstReg
== InvalidReg
) return;
3820 emitLoadImm(m_as
, val
, dstReg
);
3823 void CodeGenerator::cgLdARFuncPtr(IRInstruction
* inst
) {
3824 SSATmp
* dst
= inst
->dst();
3825 SSATmp
* baseAddr
= inst
->src(0);
3826 SSATmp
* offset
= inst
->src(1);
3828 auto dstReg
= m_regs
[dst
].reg();
3829 auto baseReg
= m_regs
[baseAddr
].reg();
3831 assert(offset
->isConst());
3833 m_as
.load_reg64_disp_reg64(baseReg
,
3834 offset
->getValInt() + AROFF(m_func
),
3838 static int getNativeTypeSize(Type type
) {
3839 if (type
.subtypeOf(Type::Int
| Type::Func
)) return sz::qword
;
3840 if (type
.subtypeOf(Type::Bool
)) return sz::byte
;
3844 void CodeGenerator::cgLdRaw(IRInstruction
* inst
) {
3845 SSATmp
* dest
= inst
->dst();
3846 SSATmp
* addr
= inst
->src(0);
3847 SSATmp
* offset
= inst
->src(1);
3849 assert(!(dest
->isConst()));
3851 Reg64 addrReg
= m_regs
[addr
].reg();
3852 PhysReg destReg
= m_regs
[dest
].reg();
3854 if (addr
->isConst()) {
3855 addrReg
= m_rScratch
;
3856 emitLoadImm(m_as
, addr
->getValRawInt(), addrReg
);
3859 if (offset
->isConst()) {
3860 assert(offset
->type() == Type::Int
);
3861 int64_t kind
= offset
->getValInt();
3862 RawMemSlot
& slot
= RawMemSlot::Get(RawMemSlot::Kind(kind
));
3863 int ldSize
= slot
.size();
3864 int64_t off
= slot
.offset();
3865 if (ldSize
== sz::qword
) {
3866 m_as
.loadq (addrReg
[off
], destReg
);
3867 } else if (ldSize
== sz::dword
) {
3868 m_as
.loadl (addrReg
[off
], r32(destReg
));
3870 assert(ldSize
== sz::byte
);
3871 m_as
.loadzbl (addrReg
[off
], r32(destReg
));
3874 int ldSize
= getNativeTypeSize(dest
->type());
3875 Reg64 offsetReg
= r64(m_regs
[offset
].reg());
3876 if (ldSize
== sz::qword
) {
3877 m_as
.loadq (addrReg
[offsetReg
], destReg
);
3879 // Not yet supported by our assembler
3880 assert(ldSize
== sz::byte
);
3886 void CodeGenerator::cgStRaw(IRInstruction
* inst
) {
3887 auto baseReg
= m_regs
[inst
->src(0)].reg();
3888 int64_t kind
= inst
->src(1)->getValInt();
3889 SSATmp
* value
= inst
->src(2);
3891 RawMemSlot
& slot
= RawMemSlot::Get(RawMemSlot::Kind(kind
));
3892 assert(value
->type().equals(slot
.type()));
3893 int stSize
= slot
.size();
3894 int64_t off
= slot
.offset();
3895 auto dest
= baseReg
[off
];
3897 if (value
->isConst()) {
3898 if (stSize
== sz::qword
) {
3899 m_as
.storeq(value
->getValRawInt(), dest
);
3900 } else if (stSize
== sz::dword
) {
3901 m_as
.storel(value
->getValRawInt(), dest
);
3903 assert(stSize
== sz::byte
);
3904 m_as
.storeb(value
->getValBool(), dest
);
3907 if (stSize
== sz::qword
) {
3908 m_as
.storeq(r64(m_regs
[value
].reg()), dest
);
3909 } else if (stSize
== sz::dword
) {
3910 m_as
.storel(r32(m_regs
[value
].reg()), dest
);
3912 assert(stSize
== sz::byte
);
3913 m_as
.storeb(rbyte(m_regs
[value
].reg()), dest
);
3918 void CodeGenerator::cgLdStaticLocCached(IRInstruction
* inst
) {
3919 auto ch
= inst
->src(0)->getValRawInt();
3920 auto outReg
= m_regs
[inst
->dst()].reg();
3922 m_as
.loadq (rVmTl
[ch
], outReg
);
3923 m_as
.testq (outReg
, outReg
);
3924 emitFwdJcc(m_as
, CC_Z
, inst
->taken());
3927 // If label is set and type is not Gen, this method generates a check
3928 // that bails to the label if the loaded typed value doesn't match type.
3929 void CodeGenerator::cgLoadTypedValue(PhysReg base
,
3931 IRInstruction
* inst
) {
3932 Block
* label
= inst
->taken();
3933 Type type
= inst
->typeParam();
3934 SSATmp
* dst
= inst
->dst();
3936 assert(type
== dst
->type());
3937 assert(type
.needsReg());
3938 auto valueDstReg
= m_regs
[dst
].reg(0);
3939 auto typeDstReg
= m_regs
[dst
].reg(1);
3941 if (valueDstReg
.isXMM()) {
3942 // Whole typed value is stored in single XMM reg valueDstReg
3943 assert(RuntimeOption::EvalHHIRAllocXMMRegs
);
3944 assert(typeDstReg
== InvalidReg
);
3945 m_as
.movdqa(base
[off
+ TVOFF(m_data
)], valueDstReg
);
3949 if (valueDstReg
== InvalidReg
&& typeDstReg
== InvalidReg
&&
3950 (label
== nullptr || type
== Type::Gen
)) {
3954 bool useScratchReg
= (base
== typeDstReg
&& valueDstReg
!= InvalidReg
);
3955 if (useScratchReg
) {
3956 // Save base to m_rScratch, because base will be overwritten.
3957 m_as
.mov_reg64_reg64(base
, m_rScratch
);
3960 // Load type if it's not dead
3961 if (typeDstReg
!= InvalidReg
) {
3962 emitLoadTVType(m_as
, base
[off
+ TVOFF(m_type
)], typeDstReg
);
3964 emitTypeCheck(inst
->typeParam(), typeDstReg
,
3965 valueDstReg
, inst
->taken());
3968 emitTypeCheck(inst
->typeParam(),
3969 base
[off
+ TVOFF(m_type
)],
3970 base
[off
+ TVOFF(m_data
)],
3974 // Load value if it's not dead
3975 if (valueDstReg
== InvalidReg
) return;
3977 if (useScratchReg
) {
3978 m_as
.loadq(m_rScratch
[off
+ TVOFF(m_data
)], valueDstReg
);
3980 m_as
.loadq(base
[off
+ TVOFF(m_data
)], valueDstReg
);
3984 void CodeGenerator::cgStoreTypedValue(PhysReg base
,
3987 assert(src
->type().needsReg());
3988 auto srcReg0
= m_regs
[src
].reg(0);
3989 auto srcReg1
= m_regs
[src
].reg(1);
3990 if (srcReg0
.isXMM()) {
3991 // Whole typed value is stored in single XMM reg srcReg0
3992 assert(RuntimeOption::EvalHHIRAllocXMMRegs
);
3993 assert(srcReg1
== InvalidReg
);
3994 m_as
.movdqa(srcReg0
, base
[off
+ TVOFF(m_data
)]);
3997 m_as
.storeq(srcReg0
, base
[off
+ TVOFF(m_data
)]);
3998 emitStoreTVType(m_as
, srcReg1
, base
[off
+ TVOFF(m_type
)]);
4001 void CodeGenerator::cgStore(PhysReg base
,
4004 bool genStoreType
) {
4005 Type type
= src
->type();
4006 if (type
.needsReg()) {
4007 cgStoreTypedValue(base
, off
, src
);
4012 emitStoreTVType(m_as
, type
.toDataType(), base
[off
+ TVOFF(m_type
)]);
4014 if (type
.isNull()) {
4015 // no need to store a value for null or uninit
4018 if (src
->isConst()) {
4020 if (type
.subtypeOf(Type::Bool
| Type::Int
| Type::Dbl
|
4021 Type::Arr
| Type::StaticStr
| Type::Cls
)) {
4022 val
= src
->getValBits();
4026 m_as
.storeq(val
, base
[off
+ TVOFF(m_data
)]);
4028 zeroExtendIfBool(m_as
, src
, m_regs
[src
]);
4029 emitStoreReg(m_as
, m_regs
[src
].reg(), base
[off
+ TVOFF(m_data
)]);
4033 void CodeGenerator::cgLoad(PhysReg base
,
4035 IRInstruction
* inst
) {
4036 Type type
= inst
->typeParam();
4037 if (type
.needsReg()) {
4038 return cgLoadTypedValue(base
, off
, inst
);
4040 Block
* label
= inst
->taken();
4041 if (label
!= NULL
) {
4042 emitTypeCheck(inst
->typeParam(),
4043 base
[off
+ TVOFF(m_type
)],
4044 base
[off
+ TVOFF(m_data
)],
4047 if (type
.isNull()) return; // these are constants
4048 auto dstReg
= m_regs
[inst
->dst()].reg();
4049 // if dstReg == InvalidReg then the value of this load is dead
4050 if (dstReg
== InvalidReg
) return;
4052 if (type
== Type::Bool
) {
4053 m_as
.load_reg64_disp_reg32(base
, off
+ TVOFF(m_data
), dstReg
);
4055 emitLoadReg(m_as
, base
[off
+ TVOFF(m_data
)], dstReg
);
4059 void CodeGenerator::cgLdProp(IRInstruction
* inst
) {
4060 cgLoad(m_regs
[inst
->src(0)].reg(), inst
->src(1)->getValInt(), inst
);
4063 void CodeGenerator::cgLdMem(IRInstruction
* inst
) {
4064 cgLoad(m_regs
[inst
->src(0)].reg(), inst
->src(1)->getValInt(), inst
);
4067 void CodeGenerator::cgLdRef(IRInstruction
* inst
) {
4068 cgLoad(m_regs
[inst
->src(0)].reg(), RefData::tvOffset(), inst
);
4071 void CodeGenerator::recordSyncPoint(Asm
& as
,
4072 SyncOptions sync
/* = kSyncPoint */) {
4073 assert(m_state
.lastMarker
);
4075 Offset stackOff
= m_state
.lastMarker
->stackOff
;
4077 case kSyncPointAdjustOne
:
4086 Offset pcOff
= m_state
.lastMarker
->bcOff
- m_state
.lastMarker
->func
->base();
4088 FTRACE(5, "IR recordSyncPoint: {} {} {}\n", as
.code
.frontier
, pcOff
,
4090 m_tx64
->recordSyncPoint(as
, pcOff
, stackOff
);
4093 void CodeGenerator::cgLdAddr(IRInstruction
* inst
) {
4094 auto base
= m_regs
[inst
->src(0)].reg();
4095 int64_t offset
= inst
->src(1)->getValInt();
4096 m_as
.lea (base
[offset
], m_regs
[inst
->dst()].reg());
4099 void CodeGenerator::cgLdLoc(IRInstruction
* inst
) {
4100 cgLoad(m_regs
[inst
->src(0)].reg(),
4101 localOffset(inst
->extra
<LdLoc
>()->locId
),
4105 void CodeGenerator::cgLdLocAddr(IRInstruction
* inst
) {
4106 auto const fpReg
= m_regs
[inst
->src(0)].reg();
4107 auto const offset
= localOffset(inst
->extra
<LdLocAddr
>()->locId
);
4108 if (m_regs
[inst
->dst()].hasReg()) {
4109 m_as
.lea(fpReg
[offset
], m_regs
[inst
->dst()].reg());
4113 void CodeGenerator::cgLdStackAddr(IRInstruction
* inst
) {
4114 auto const base
= m_regs
[inst
->src(0)].reg();
4115 auto const offset
= cellsToBytes(inst
->extra
<LdStackAddr
>()->offset
);
4116 m_as
.lea (base
[offset
], m_regs
[inst
->dst()].reg());
4119 void CodeGenerator::cgLdStack(IRInstruction
* inst
) {
4120 assert(inst
->taken() == nullptr);
4121 cgLoad(m_regs
[inst
->src(0)].reg(),
4122 cellsToBytes(inst
->extra
<LdStack
>()->offset
),
4126 void CodeGenerator::cgGuardStk(IRInstruction
* inst
) {
4127 auto const rSP
= m_regs
[inst
->src(0)].reg();
4128 auto const baseOff
= cellsToBytes(inst
->extra
<GuardStk
>()->offset
);
4129 emitTypeGuard(inst
->typeParam(),
4130 rSP
[baseOff
+ TVOFF(m_type
)],
4131 rSP
[baseOff
+ TVOFF(m_data
)]);
4134 void CodeGenerator::cgCheckStk(IRInstruction
* inst
) {
4135 auto const rbase
= m_regs
[inst
->src(0)].reg();
4136 auto const baseOff
= cellsToBytes(inst
->extra
<CheckStk
>()->offset
);
4137 emitTypeCheck(inst
->typeParam(), rbase
[baseOff
+ TVOFF(m_type
)],
4138 rbase
[baseOff
+ TVOFF(m_data
)], inst
->taken());
4141 void CodeGenerator::cgGuardLoc(IRInstruction
* inst
) {
4142 auto const rFP
= m_regs
[inst
->src(0)].reg();
4143 auto const baseOff
= localOffset(inst
->extra
<GuardLoc
>()->locId
);
4144 emitTypeGuard(inst
->typeParam(),
4145 rFP
[baseOff
+ TVOFF(m_type
)],
4146 rFP
[baseOff
+ TVOFF(m_data
)]);
4149 void CodeGenerator::cgCheckLoc(IRInstruction
* inst
) {
4150 auto const rbase
= m_regs
[inst
->src(0)].reg();
4151 auto const baseOff
= localOffset(inst
->extra
<CheckLoc
>()->locId
);
4152 emitTypeCheck(inst
->typeParam(), rbase
[baseOff
+ TVOFF(m_type
)],
4153 rbase
[baseOff
+ TVOFF(m_data
)], inst
->taken());
4157 void CodeGenerator::emitSideExitGuard(Type type
,
4161 emitTypeTest(type
, typeSrc
, dataSrc
,
4162 [&](ConditionCode cc
) {
4163 auto const sk
= SrcKey(curFunc(), taken
);
4164 m_tx64
->emitBindJcc(m_as
, ccNegate(cc
), sk
, REQ_BIND_SIDE_EXIT
);
4168 void CodeGenerator::cgSideExitGuardLoc(IRInstruction
* inst
) {
4169 auto const fp
= m_regs
[inst
->src(0)].reg();
4170 auto const extra
= inst
->extra
<SideExitGuardLoc
>();
4171 emitSideExitGuard(inst
->typeParam(),
4172 fp
[localOffset(extra
->checkedSlot
) + TVOFF(m_type
)],
4173 fp
[localOffset(extra
->checkedSlot
) + TVOFF(m_data
)],
4177 void CodeGenerator::cgSideExitGuardStk(IRInstruction
* inst
) {
4178 auto const sp
= m_regs
[inst
->src(0)].reg();
4179 auto const extra
= inst
->extra
<SideExitGuardStk
>();
4180 emitSideExitGuard(inst
->typeParam(),
4181 sp
[cellsToBytes(extra
->checkedSlot
) + TVOFF(m_type
)],
4182 sp
[cellsToBytes(extra
->checkedSlot
) + TVOFF(m_data
)],
4186 void CodeGenerator::cgDefMIStateBase(IRInstruction
* inst
) {
4187 assert(inst
->dst()->type() == Type::PtrToCell
);
4188 assert(m_regs
[inst
->dst()].reg() == rsp
);
4191 void CodeGenerator::cgCheckType(IRInstruction
* inst
) {
4192 auto const src
= inst
->src(0);
4193 auto const t
= inst
->typeParam();
4194 auto const rData
= m_regs
[src
].reg(0);
4195 auto const rType
= m_regs
[src
].reg(1);
4197 auto doJcc
= [&](ConditionCode cc
) {
4198 emitFwdJcc(ccNegate(cc
), inst
->taken());
4201 if (t
.equals(Type::Nullptr
)) {
4202 if (!src
->type().equals(Type::Nullptr
| Type::CountedStr
)) {
4203 CG_PUNT(CheckType
-Nullptr
-UnsupportedType
);
4205 m_as
.testq (rData
, rData
);
4208 emitTypeTest(inst
->typeParam(), rType
, rData
, doJcc
);
4211 auto const dstReg
= m_regs
[inst
->dst()].reg();
4212 if (dstReg
!= InvalidReg
) {
4213 emitMovRegReg(m_as
, rData
, dstReg
);
4217 void CodeGenerator::cgCheckTypeMem(IRInstruction
* inst
) {
4218 auto const reg
= m_regs
[inst
->src(0)].reg();
4219 emitTypeCheck(inst
->typeParam(), reg
[TVOFF(m_type
)],
4220 reg
[TVOFF(m_data
)], inst
->taken());
4223 void CodeGenerator::cgGuardRefs(IRInstruction
* inst
) {
4224 assert(inst
->numSrcs() == 5);
4226 SSATmp
* funcPtrTmp
= inst
->src(0);
4227 SSATmp
* nParamsTmp
= inst
->src(1);
4228 SSATmp
* firstBitNumTmp
= inst
->src(2);
4229 SSATmp
* mask64Tmp
= inst
->src(3);
4230 SSATmp
* vals64Tmp
= inst
->src(4);
4232 // Get values in place
4233 assert(funcPtrTmp
->type() == Type::Func
);
4234 auto funcPtrReg
= m_regs
[funcPtrTmp
].reg();
4235 assert(funcPtrReg
!= InvalidReg
);
4237 assert(nParamsTmp
->type() == Type::Int
);
4238 auto nParamsReg
= m_regs
[nParamsTmp
].reg();
4239 assert(nParamsReg
!= InvalidReg
|| nParamsTmp
->isConst());
4241 assert(firstBitNumTmp
->isConst() && firstBitNumTmp
->type() == Type::Int
);
4242 uint32_t firstBitNum
= (uint32_t)(firstBitNumTmp
->getValInt());
4244 assert(mask64Tmp
->type() == Type::Int
);
4245 assert(mask64Tmp
->isConst());
4246 auto mask64Reg
= m_regs
[mask64Tmp
].reg();
4247 assert(mask64Reg
!= InvalidReg
|| mask64Tmp
->inst()->op() != LdConst
);
4248 uint64_t mask64
= mask64Tmp
->getValInt();
4251 assert(vals64Tmp
->type() == Type::Int
);
4252 assert(vals64Tmp
->isConst());
4253 auto vals64Reg
= m_regs
[vals64Tmp
].reg();
4254 assert(vals64Reg
!= InvalidReg
|| vals64Tmp
->inst()->op() != LdConst
);
4255 uint64_t vals64
= vals64Tmp
->getValInt();
4256 assert((vals64
& mask64
) == vals64
);
4258 auto const destSK
= SrcKey(curFunc(), m_curTrace
->bcOff());
4259 auto const destSR
= m_tx64
->getSrcRec(destSK
);
4261 auto thenBody
= [&] {
4262 auto bitsOff
= sizeof(uint64_t) * (firstBitNum
/ 64);
4264 auto bitsPtrReg
= m_rScratch
;
4266 if (firstBitNum
== 0) {
4267 bitsOff
= Func::refBitValOff();
4268 bitsPtrReg
= funcPtrReg
;
4270 m_as
.loadq(funcPtrReg
[Func::sharedOff()], bitsPtrReg
);
4271 bitsOff
-= sizeof(uint64_t);
4274 if (vals64
== 0 || (mask64
& (mask64
- 1)) == 0) {
4275 // If vals64 is zero, or we're testing a single
4276 // bit, we can get away with a single test,
4277 // rather than mask-and-compare
4278 if (mask64Reg
!= InvalidReg
) {
4279 m_as
.testq (mask64Reg
, bitsPtrReg
[bitsOff
]);
4282 m_as
.testb((int8_t)mask64
, bitsPtrReg
[bitsOff
]);
4284 m_as
.testl((int32_t)mask64
, bitsPtrReg
[bitsOff
]);
4287 if (vals64
) cond
= CC_E
;
4289 auto bitsValReg
= m_rScratch
;
4290 m_as
. loadq (bitsPtrReg
[bitsOff
], bitsValReg
);
4291 if (debug
) bitsPtrReg
= InvalidReg
;
4293 // bitsValReg <- bitsValReg & mask64
4294 if (mask64Reg
!= InvalidReg
) {
4295 m_as
. andq (mask64Reg
, bitsValReg
);
4296 } else if (mask64
< 256) {
4297 m_as
. andb ((int8_t)mask64
, rbyte(bitsValReg
));
4299 m_as
. andl ((int32_t)mask64
, r32(bitsValReg
));
4302 // If bitsValReg != vals64, then goto Exit
4303 if (vals64Reg
!= InvalidReg
) {
4304 m_as
. cmpq (vals64Reg
, bitsValReg
);
4305 } else if (mask64
< 256) {
4306 assert(vals64
< 256);
4307 m_as
. cmpb ((int8_t)vals64
, rbyte(bitsValReg
));
4309 m_as
. cmpl ((int32_t)vals64
, r32(bitsValReg
));
4312 m_tx64
->emitFallbackCondJmp(m_as
, *destSR
, cond
);
4315 if (firstBitNum
== 0) {
4316 assert(nParamsReg
== InvalidReg
);
4317 // This is the first 64 bits. No need to check
4321 assert(nParamsReg
!= InvalidReg
);
4322 // Check number of args...
4323 m_as
. cmpq (firstBitNum
, nParamsReg
);
4325 if (vals64
!= 0 && vals64
!= mask64
) {
4326 // If we're beyond nParams, then either all params
4327 // are refs, or all params are non-refs, so if vals64
4328 // isn't 0 and isnt mask64, there's no possibility of
4330 m_tx64
->emitFallbackCondJmp(m_as
, *destSR
, CC_LE
);
4333 ifThenElse(CC_NLE
, thenBody
, /* else */ [&] {
4334 // If not special builtin...
4335 m_as
.testl(AttrVariadicByRef
, funcPtrReg
[Func::attrsOff()]);
4336 m_tx64
->emitFallbackCondJmp(m_as
, *destSR
, vals64
? CC_Z
: CC_NZ
);
4342 void CodeGenerator::cgLdPropAddr(IRInstruction
* inst
) {
4343 SSATmp
* dst
= inst
->dst();
4344 SSATmp
* obj
= inst
->src(0);
4345 SSATmp
* prop
= inst
->src(1);
4347 assert(prop
->isConst() && prop
->type() == Type::Int
);
4349 auto dstReg
= m_regs
[dst
].reg();
4350 auto objReg
= m_regs
[obj
].reg();
4352 assert(objReg
!= InvalidReg
);
4353 assert(dstReg
!= InvalidReg
);
4355 int64_t offset
= prop
->getValInt();
4356 m_as
.lea_reg64_disp_reg64(objReg
, offset
, dstReg
);
4359 void CodeGenerator::cgLdClsMethod(IRInstruction
* inst
) {
4360 SSATmp
* dst
= inst
->dst();
4361 SSATmp
* cls
= inst
->src(0);
4362 SSATmp
* mSlot
= inst
->src(1);
4364 assert(cls
->type() == Type::Cls
);
4365 assert(mSlot
->isConst() && mSlot
->type() == Type::Int
);
4366 uint64_t mSlotInt64
= mSlot
->getValRawInt();
4367 // We're going to multiply mSlotVal by sizeof(Func*) and use
4368 // it as a 32-bit offset (methOff) below.
4369 if (mSlotInt64
> (std::numeric_limits
<uint32_t>::max() / sizeof(Func
*))) {
4370 CG_PUNT(cgLdClsMethod_large_offset
);
4372 int32_t mSlotVal
= (uint32_t) mSlotInt64
;
4374 Reg64 dstReg
= m_regs
[dst
].reg();
4375 assert(dstReg
!= InvalidReg
);
4377 Reg64 clsReg
= m_regs
[cls
].reg();
4378 if (clsReg
== InvalidReg
) {
4379 CG_PUNT(LdClsMethod
);
4382 Offset vecOff
= Class::getMethodsOffset() + Class::MethodMap::vecOff();
4383 int32_t methOff
= mSlotVal
* sizeof(Func
*);
4384 m_as
.loadq(clsReg
[vecOff
], dstReg
);
4385 m_as
.loadq(dstReg
[methOff
], dstReg
);
4388 void CodeGenerator::cgLdClsMethodCache(IRInstruction
* inst
) {
4389 SSATmp
* dst
= inst
->dst();
4390 SSATmp
* className
= inst
->src(0);
4391 SSATmp
* methodName
= inst
->src(1);
4392 SSATmp
* baseClass
= inst
->src(2);
4393 Block
* label
= inst
->taken();
4395 // Stats::emitInc(a, Stats::TgtCache_StaticMethodHit);
4396 const StringData
* cls
= className
->getValStr();
4397 const StringData
* method
= methodName
->getValStr();
4398 auto const ne
= baseClass
->getValNamedEntity();
4399 TargetCache::CacheHandle ch
=
4400 TargetCache::StaticMethodCache::alloc(cls
,
4402 getContextName(curClass()));
4403 auto funcDestReg
= m_regs
[dst
].reg(0);
4404 auto classDestReg
= m_regs
[dst
].reg(1);
4405 auto offsetof_func
= offsetof(TargetCache::StaticMethodCache
, m_func
);
4406 auto offsetof_cls
= offsetof(TargetCache::StaticMethodCache
, m_cls
);
4408 assert(funcDestReg
!= InvalidReg
&& classDestReg
!= InvalidReg
);
4409 // Attempt to retrieve the func* and class* from cache
4410 m_as
.loadq(rVmTl
[ch
+ offsetof_func
], funcDestReg
);
4411 m_as
.loadq(rVmTl
[ch
+ offsetof_cls
], classDestReg
);
4412 m_as
.testq(funcDestReg
, funcDestReg
);
4413 // May have retrieved a NULL from the cache
4414 // handle case where method is not entered in the cache
4415 unlikelyIfBlock(CC_E
, [&] (Asm
& a
) {
4416 if (false) { // typecheck
4417 const UNUSED Func
* f
= StaticMethodCache::lookupIR(ch
, ne
, cls
, method
);
4419 // can raise an error if class is undefined
4421 (TCA
)StaticMethodCache::lookupIR
,
4424 ArgGroup(m_regs
).imm(ch
) // Handle ch
4425 .immPtr(ne
) // NamedEntity* np.second
4426 .immPtr(cls
) // className
4427 .immPtr(method
) // methodName
4429 // recordInstrCall is done in cgCallHelper
4430 a
.testq(funcDestReg
, funcDestReg
);
4431 a
.loadq(rVmTl
[ch
+ offsetof_cls
], classDestReg
);
4432 // if StaticMethodCache::lookupIR() returned NULL, jmp to label
4433 emitFwdJcc(a
, CC_Z
, label
);
4438 * Helper to emit getting the value for ActRec's m_this/m_cls slot
4439 * from a This pointer depending on whether the callee method is
4442 void CodeGenerator::emitGetCtxFwdCallWithThis(PhysReg ctxReg
,
4443 bool staticCallee
) {
4445 // Load (this->m_cls | 0x1) into ctxReg.
4446 m_as
.loadq(ctxReg
[ObjectData::getVMClassOffset()], ctxReg
);
4447 m_as
.orq(1, ctxReg
);
4449 // Just incref $this.
4450 emitIncRef(m_as
, ctxReg
);
4455 * This method is similar to emitGetCtxFwdCallWithThis above, but
4456 * whether or not the callee is a static method is unknown at JIT
4457 * time, and that is determined dynamically by looking up into the
4458 * StaticMethodFCache.
4460 void CodeGenerator::emitGetCtxFwdCallWithThisDyn(PhysReg destCtxReg
,
4463 Label NonStaticCall
, End
;
4465 // thisReg is holding $this. Should we pass it to the callee?
4466 m_as
.cmpl(1, rVmTl
[ch
+ offsetof(StaticMethodFCache
, m_static
)]);
4467 m_as
.jcc8(CC_NE
, NonStaticCall
);
4468 // If calling a static method...
4470 // Load (this->m_cls | 0x1) into destCtxReg
4471 m_as
.loadq(thisReg
[ObjectData::getVMClassOffset()], destCtxReg
);
4472 m_as
.orq(1, destCtxReg
);
4475 // Else: calling non-static method
4477 asm_label(m_as
, NonStaticCall
);
4478 emitMovRegReg(m_as
, thisReg
, destCtxReg
);
4479 emitIncRef(m_as
, destCtxReg
);
4481 asm_label(m_as
, End
);
4484 void CodeGenerator::cgGetCtxFwdCall(IRInstruction
* inst
) {
4485 PhysReg destCtxReg
= m_regs
[inst
->dst()].reg(0);
4486 SSATmp
* srcCtxTmp
= inst
->src(0);
4487 const Func
* callee
= inst
->src(1)->getValFunc();
4488 bool withThis
= srcCtxTmp
->isA(Type::Obj
);
4490 // Eagerly move src into the dest reg
4491 emitMovRegReg(m_as
, m_regs
[srcCtxTmp
].reg(0), destCtxReg
);
4494 // If we don't know whether we have a This, we need to check dynamically
4496 m_as
.testb(1, rbyte(destCtxReg
));
4497 m_as
.jcc8(CC_NZ
, End
);
4500 // If we have a This pointer in destCtxReg, then select either This
4501 // or its Class based on whether callee is static or not
4502 emitGetCtxFwdCallWithThis(destCtxReg
, (callee
->attrs() & AttrStatic
));
4504 asm_label(m_as
, End
);
4507 void CodeGenerator::cgLdClsMethodFCache(IRInstruction
* inst
) {
4508 PhysReg funcDestReg
= m_regs
[inst
->dst()].reg(0);
4509 PhysReg destCtxReg
= m_regs
[inst
->dst()].reg(1);
4510 const Class
* cls
= inst
->src(0)->getValClass();
4511 const StringData
* methName
= inst
->src(1)->getValStr();
4512 SSATmp
* srcCtxTmp
= inst
->src(2);
4513 PhysReg srcCtxReg
= m_regs
[srcCtxTmp
].reg(0);
4514 Block
* exitLabel
= inst
->taken();
4515 const StringData
* clsName
= cls
->name();
4516 CacheHandle ch
= StaticMethodFCache::alloc(clsName
, methName
,
4517 getContextName(curClass()));
4519 assert(funcDestReg
!= InvalidReg
&& destCtxReg
!= InvalidReg
);
4520 emitMovRegReg(m_as
, srcCtxReg
, destCtxReg
);
4521 m_as
.loadq(rVmTl
[ch
], funcDestReg
);
4522 m_as
.testq(funcDestReg
, funcDestReg
);
4526 // Handle case where method is not entered in the cache
4527 unlikelyIfBlock(CC_E
, [&] (Asm
& a
) {
4528 const Func
* (*lookup
)(CacheHandle
, const Class
*, const StringData
*) =
4529 StaticMethodFCache::lookupIR
;
4530 // preserve destCtxReg across the call since it wouldn't be otherwise
4531 RegSet toSave
= m_state
.liveRegs
[inst
] | RegSet(destCtxReg
);
4532 cgCallHelper(a
, Transl::CppCall((TCA
)lookup
),
4533 funcDestReg
, InvalidReg
,
4535 ArgGroup(m_regs
).imm(ch
)
4539 // If entry found in target cache, jump back to m_as.
4540 // Otherwise, bail to exit label
4541 a
.testq(funcDestReg
, funcDestReg
);
4542 emitFwdJcc(a
, CC_Z
, exitLabel
);
4545 auto t
= srcCtxTmp
->type();
4546 assert(!t
.equals(Type::Cls
));
4547 if (t
.equals(Type::Cctx
)) {
4548 return; // done: destCtxReg already has srcCtxReg
4549 } else if (t
== Type::Obj
) {
4550 // unconditionally run code produced by emitGetCtxFwdCallWithThisDyn below
4552 } else if (t
== Type::Ctx
) {
4553 // dynamically check if we have a This pointer and
4554 // call emitGetCtxFwdCallWithThisDyn below
4555 m_as
.testb(1, rbyte(destCtxReg
));
4556 m_as
.jcc8(CC_NZ
, End
);
4561 // If we have a 'this' pointer ...
4562 emitGetCtxFwdCallWithThisDyn(destCtxReg
, destCtxReg
, ch
);
4564 asm_label(m_as
, End
);
4567 void CodeGenerator::cgLdClsPropAddrCached(IRInstruction
* inst
) {
4568 using namespace Transl::TargetCache
;
4569 SSATmp
* dst
= inst
->dst();
4570 SSATmp
* cls
= inst
->src(0);
4571 SSATmp
* propName
= inst
->src(1);
4572 SSATmp
* clsName
= inst
->src(2);
4573 SSATmp
* cxt
= inst
->src(3);
4574 Block
* target
= inst
->taken();
4576 const StringData
* propNameString
= propName
->getValStr();
4577 const StringData
* clsNameString
= clsName
->getValStr();
4579 string
sds(Util::toLower(clsNameString
->data()) + ":" +
4580 string(propNameString
->data(), propNameString
->size()));
4581 StackStringData
sd(sds
.c_str(), sds
.size(), AttachLiteral
);
4582 CacheHandle ch
= SPropCache::alloc(&sd
);
4584 auto dstReg
= m_regs
[dst
].reg();
4585 // Cls is live in the slow path call to lookupIR, so we have to be
4586 // careful not to clobber it before the branch to slow path. So
4587 // use the scratch register as a temporary destination if cls is
4588 // assigned the same register as the dst register.
4589 auto tmpReg
= dstReg
;
4590 if (dstReg
== InvalidReg
|| dstReg
== m_regs
[cls
].reg()) {
4591 tmpReg
= PhysReg(m_rScratch
);
4594 // Could be optimized to cmp against zero when !label && dstReg == InvalidReg
4595 m_as
.loadq(rVmTl
[ch
], tmpReg
);
4596 m_as
.testq(tmpReg
, tmpReg
);
4597 unlikelyIfBlock(CC_E
, [&] (Asm
& a
) {
4599 target
? (TCA
)SPropCache::lookupIR
<false>
4600 : (TCA
)SPropCache::lookupIR
<true>, // raise on error
4602 kSyncPoint
, // could re-enter to initialize properties
4603 ArgGroup(m_regs
).imm(ch
).ssa(cls
).ssa(propName
).ssa(cxt
));
4605 a
.testq(tmpReg
, tmpReg
);
4606 emitFwdJcc(a
, CC_Z
, target
);
4609 if (dstReg
!= InvalidReg
) {
4610 emitMovRegReg(m_as
, tmpReg
, dstReg
);
4614 void CodeGenerator::cgLdClsPropAddr(IRInstruction
* inst
) {
4615 SSATmp
* dst
= inst
->dst();
4616 SSATmp
* cls
= inst
->src(0);
4617 SSATmp
* prop
= inst
->src(1);
4618 SSATmp
* ctx
= inst
->src(2);
4619 Block
* target
= inst
->taken();
4620 // If our label is a catch trace we pretend we don't have one, to
4621 // avoid emitting a jmp to it or calling the wrong helper.
4622 if (target
&& target
->trace()->isCatch()) target
= nullptr;
4624 auto dstReg
= m_regs
[dst
].reg();
4625 if (dstReg
== InvalidReg
&& target
) {
4626 // result is unused but this instruction was not eliminated
4627 // because its essential
4628 dstReg
= m_rScratch
;
4631 target
? (TCA
)SPropCache::lookupSProp
<false>
4632 : (TCA
)SPropCache::lookupSProp
<true>, // raise on error
4634 kSyncPoint
, // could re-enter to initialize properties
4635 ArgGroup(m_regs
).ssa(cls
).ssa(prop
).ssa(ctx
));
4637 m_as
.testq(dstReg
, dstReg
);
4638 emitFwdJcc(m_as
, CC_Z
, target
);
4642 TargetCache::CacheHandle
CodeGenerator::cgLdClsCachedCommon(
4643 IRInstruction
* inst
) {
4644 SSATmp
* dst
= inst
->dst();
4645 const StringData
* className
= inst
->src(0)->getValStr();
4646 auto ch
= TargetCache::allocKnownClass(className
);
4647 auto dstReg
= m_regs
[dst
].reg();
4648 if (dstReg
== InvalidReg
) {
4649 m_as
. cmpq (0, rVmTl
[ch
]);
4651 m_as
. loadq (rVmTl
[ch
], dstReg
);
4652 m_as
. testq (dstReg
, dstReg
);
4658 void CodeGenerator::cgLdClsCached(IRInstruction
* inst
) {
4659 auto ch
= cgLdClsCachedCommon(inst
);
4660 unlikelyIfBlock(CC_E
, [&] (Asm
& a
) {
4661 // Passing only two arguments to lookupKnownClass, since the
4662 // third is ignored in the checkOnly==false case.
4664 (TCA
)TargetCache::lookupKnownClass
<false>,
4667 ArgGroup(m_regs
).addr(rVmTl
, intptr_t(ch
)).ssas(inst
, 0));
4671 void CodeGenerator::cgLdClsCachedSafe(IRInstruction
* inst
) {
4672 cgLdClsCachedCommon(inst
);
4673 if (Block
* taken
= inst
->taken()) {
4674 emitFwdJcc(CC_Z
, taken
);
4678 void CodeGenerator::cgLdCls(IRInstruction
* inst
) {
4679 SSATmp
* dst
= inst
->dst();
4680 SSATmp
* className
= inst
->src(0);
4682 CacheHandle ch
= ClassCache::alloc();
4683 cgCallHelper(m_as
, (TCA
)ClassCache::lookup
, dst
, kSyncPoint
,
4684 ArgGroup(m_regs
).imm(ch
).ssa(className
));
4687 static StringData
* fullConstName(const StringData
* cls
,
4688 const StringData
* cnsName
) {
4689 return StringData::GetStaticString(
4690 Util::toLower(cls
->data()) + "::" + cnsName
->data()
4694 void CodeGenerator::cgLdClsCns(IRInstruction
* inst
) {
4695 auto const extra
= inst
->extra
<LdClsCns
>();
4696 auto const fullName
= fullConstName(extra
->clsName
, extra
->cnsName
);
4697 auto const ch
= TargetCache::allocClassConstant(fullName
);
4698 cgLoad(rVmTl
, ch
, inst
);
4701 void CodeGenerator::cgLookupClsCns(IRInstruction
* inst
) {
4702 auto const extra
= inst
->extra
<LookupClsCns
>();
4703 auto const fullName
= fullConstName(extra
->clsName
, extra
->cnsName
);
4704 auto const ch
= TargetCache::allocClassConstant(fullName
);
4707 TCA(TargetCache::lookupClassConstantTv
),
4712 .immPtr(Unit::GetNamedEntity(extra
->clsName
))
4713 .immPtr(extra
->clsName
)
4714 .immPtr(extra
->cnsName
),
4719 void CodeGenerator::cgLdCns(IRInstruction
* inst
) {
4720 const StringData
* cnsName
= inst
->src(0)->getValStr();
4722 TargetCache::CacheHandle ch
= StringData::DefCnsHandle(cnsName
, false);
4723 // Has an unlikely branch to a LookupCns
4724 cgLoad(rVmTl
, ch
, inst
);
4727 static TypedValue
lookupCnsHelper(const TypedValue
* tv
, StringData
* nm
) {
4728 assert(tv
->m_type
== KindOfUninit
);
4729 TypedValue
*cns
= nullptr;
4731 if (UNLIKELY(tv
->m_data
.pref
!= nullptr)) {
4732 ClassInfo::ConstantInfo
* ci
=
4733 (ClassInfo::ConstantInfo
*)(void*)tv
->m_data
.pref
;
4734 cns
= const_cast<Variant
&>(ci
->getDeferredValue()).asTypedValue();
4735 tvReadCell(cns
, &c1
);
4737 if (UNLIKELY(TargetCache::s_constants
!= nullptr)) {
4738 cns
= TargetCache::s_constants
->HphpArray::nvGet(nm
);
4741 cns
= Unit::loadCns(const_cast<StringData
*>(nm
));
4743 if (UNLIKELY(!cns
)) {
4744 raise_notice(Strings::UNDEFINED_CONSTANT
, nm
->data(), nm
->data());
4745 c1
.m_data
.pstr
= const_cast<StringData
*>(nm
);
4746 c1
.m_type
= KindOfStaticString
;
4748 c1
.m_type
= cns
->m_type
;
4749 c1
.m_data
= cns
->m_data
;
4755 void CodeGenerator::cgLookupCns(IRInstruction
* inst
) {
4756 SSATmp
* cnsNameTmp
= inst
->src(0);
4758 assert(inst
->typeParam() == Type::Cell
);
4759 assert(cnsNameTmp
->isConst() && cnsNameTmp
->type() == Type::StaticStr
);
4761 const StringData
* cnsName
= cnsNameTmp
->getValStr();
4762 TargetCache::CacheHandle ch
= StringData::DefCnsHandle(cnsName
, false);
4764 ArgGroup
args(m_regs
);
4765 args
.addr(rVmTl
, ch
)
4768 cgCallHelper(m_as
, TCA(lookupCnsHelper
),
4769 inst
->dst(), kSyncPoint
, args
, DestType::TV
);
4773 static inline int64_t ak_exist_string_helper(StringData
* key
, ArrayData
* arr
) {
4775 if (key
->isStrictlyInteger(n
)) {
4776 return arr
->exists(n
);
4778 return arr
->exists(StrNR(key
));
4782 static int64_t ak_exist_string(StringData
* key
, ArrayData
* arr
) {
4783 int64_t res
= ak_exist_string_helper(key
, arr
);
4788 static int64_t ak_exist_int(int64_t key
, ArrayData
* arr
) {
4789 bool res
= arr
->exists(key
);
4794 static int64_t ak_exist_string_obj(StringData
* key
, ObjectData
* obj
) {
4795 if (obj
->isCollection()) {
4796 return collectionOffsetContains(obj
, key
);
4798 CArrRef arr
= obj
->o_toArray();
4799 int64_t res
= ak_exist_string_helper(key
, arr
.get());
4804 static int64_t ak_exist_int_obj(int64_t key
, ObjectData
* obj
) {
4805 if (obj
->isCollection()) {
4806 return collectionOffsetContains(obj
, key
);
4808 CArrRef arr
= obj
->o_toArray();
4809 bool res
= arr
.get()->exists(key
);
4813 void CodeGenerator::cgAKExists(IRInstruction
* inst
) {
4814 SSATmp
* arr
= inst
->src(0);
4815 SSATmp
* key
= inst
->src(1);
4817 if (key
->type().isNull()) {
4818 if (arr
->isA(Type::Arr
)) {
4820 (TCA
)ak_exist_string
,
4823 ArgGroup(m_regs
).immPtr(empty_string
.get()).ssa(arr
));
4825 m_as
.mov_imm64_reg(0, m_regs
[inst
->dst()].reg());
4832 ? (key
->isA(Type::Int
) ? (TCA
)ak_exist_int_obj
: (TCA
)ak_exist_string_obj
)
4833 : (key
->isA(Type::Int
) ? (TCA
)ak_exist_int
: (TCA
)ak_exist_string
);
4839 ArgGroup(m_regs
).ssa(key
).ssa(arr
));
4842 HOT_FUNC_VM
static TypedValue
* ldGblAddrHelper(StringData
* name
) {
4843 return g_vmContext
->m_globalVarEnv
->lookup(name
);
4846 HOT_FUNC_VM
static TypedValue
* ldGblAddrDefHelper(StringData
* name
) {
4847 TypedValue
* r
= g_vmContext
->m_globalVarEnv
->lookupAdd(name
);
4852 void CodeGenerator::cgLdGblAddr(IRInstruction
* inst
) {
4853 auto dstReg
= m_regs
[inst
->dst()].reg();
4854 cgCallHelper(m_as
, (TCA
)ldGblAddrHelper
, dstReg
, kNoSyncPoint
,
4855 ArgGroup(m_regs
).ssa(inst
->src(0)));
4856 m_as
.testq(dstReg
, dstReg
);
4857 emitFwdJcc(CC_Z
, inst
->taken());
4860 void CodeGenerator::cgLdGblAddrDef(IRInstruction
* inst
) {
4861 cgCallHelper(m_as
, (TCA
)ldGblAddrDefHelper
, inst
->dst(), kNoSyncPoint
,
4862 ArgGroup(m_regs
).ssa(inst
->src(0)));
4865 void CodeGenerator::emitTestZero(SSATmp
* src
) {
4867 auto reg
= m_regs
[src
].reg();
4870 * If src is const, normally a earlier optimization pass should have
4871 * converted the thing testing this condition into something
4872 * unconditional. So rather than supporting constants efficiently
4873 * here, we just materialize the value into a register.
4875 if (reg
== InvalidReg
) {
4877 a
. movq (src
->getValBits(), reg
);
4880 if (src
->isA(Type::Bool
)) {
4881 a
. testb (rbyte(reg
), rbyte(reg
));
4883 a
. testq (reg
, reg
);
4887 void CodeGenerator::cgJmpZero(IRInstruction
* inst
) {
4888 emitTestZero(inst
->src(0));
4889 emitFwdJcc(CC_Z
, inst
->taken());
4892 void CodeGenerator::cgJmpNZero(IRInstruction
* inst
) {
4893 emitTestZero(inst
->src(0));
4894 emitFwdJcc(CC_NZ
, inst
->taken());
4897 void CodeGenerator::cgReqBindJmpZero(IRInstruction
* inst
) {
4898 // TODO(#2404427): prepareForTestAndSmash?
4899 emitTestZero(inst
->src(0));
4900 emitReqBindJcc(CC_Z
, inst
->extra
<ReqBindJmpZero
>());
4903 void CodeGenerator::cgReqBindJmpNZero(IRInstruction
* inst
) {
4904 // TODO(#2404427): prepareForTestAndSmash?
4905 emitTestZero(inst
->src(0));
4906 emitReqBindJcc(CC_NZ
, inst
->extra
<ReqBindJmpNZero
>());
4909 void CodeGenerator::cgJmp_(IRInstruction
* inst
) {
4910 Block
* target
= inst
->taken();
4911 if (unsigned n
= inst
->numSrcs()) {
4912 // Parallel-copy sources to the label's destination registers.
4913 // TODO: t2040286: this only works if all destinations fit in registers.
4914 auto srcs
= inst
->srcs();
4915 auto dsts
= target
->front()->dsts();
4916 ArgGroup
args(m_regs
);
4917 for (unsigned i
= 0, j
= 0; i
< n
; i
++) {
4918 assert(srcs
[i
]->type().subtypeOf(dsts
[i
].type()));
4919 auto dst
= &dsts
[i
];
4921 // Currently, full XMM registers cannot be assigned to SSATmps
4922 // passed from to Jmp_ to DefLabel. If this changes, it'll require
4923 // teaching shuffleArgs() how to handle full XMM values.
4924 assert(!m_regs
[src
].isFullXMM() && !m_regs
[dst
].isFullXMM());
4925 if (m_regs
[dst
].reg(0) == InvalidReg
) continue; // dst is unused.
4926 // first dst register
4928 args
[j
++].setDstReg(m_regs
[dst
].reg(0));
4929 // second dst register, if any
4930 if (dst
->numNeededRegs() == 2) {
4931 if (src
->numNeededRegs() < 2) {
4932 // src has known data type, but dst doesn't - pass immediate type
4933 assert(src
->type().isKnownDataType());
4934 args
.imm(src
->type().toDataType());
4936 // pass src's second register
4937 assert(m_regs
[src
].reg(1) != InvalidReg
);
4938 args
.reg(m_regs
[src
].reg(1));
4940 args
[j
++].setDstReg(m_regs
[dst
].reg(1));
4943 assert(args
.numStackArgs() == 0 &&
4944 "Jmp_ doesn't support passing arguments on the stack yet.");
4945 shuffleArgs(m_as
, args
);
4947 if (!m_state
.noTerminalJmp_
) {
4948 emitFwdJmp(m_as
, target
, m_state
);
4952 void CodeGenerator::cgJmpIndirect(IRInstruction
* inst
) {
4953 m_as
.jmp(m_regs
[inst
->src(0)].reg());
4956 void CodeGenerator::cgCheckInit(IRInstruction
* inst
) {
4957 Block
* label
= inst
->taken();
4959 SSATmp
* src
= inst
->src(0);
4961 if (src
->type().isInit()) return;
4963 auto typeReg
= m_regs
[src
].reg(1);
4964 assert(typeReg
!= InvalidReg
);
4966 static_assert(KindOfUninit
== 0, "cgCheckInit assumes KindOfUninit == 0");
4967 m_as
.testb (rbyte(typeReg
), rbyte(typeReg
));
4968 emitFwdJcc(CC_Z
, label
);
4971 void CodeGenerator::cgCheckInitMem(IRInstruction
* inst
) {
4972 Block
* label
= inst
->taken();
4974 SSATmp
* base
= inst
->src(0);
4975 int64_t offset
= inst
->src(1)->getValInt();
4976 Type t
= base
->type().deref();
4977 if (t
.isInit()) return;
4978 auto basereg
= m_regs
[base
].reg();
4979 emitCmpTVType(m_as
, KindOfUninit
, basereg
[offset
+ TVOFF(m_type
)]);
4980 emitFwdJcc(CC_Z
, label
);
4983 void CodeGenerator::cgExitWhenSurprised(IRInstruction
* inst
) {
4984 Block
* label
= inst
->taken();
4985 m_tx64
->emitTestSurpriseFlags(m_as
);
4986 emitFwdJcc(CC_NZ
, label
);
4989 void CodeGenerator::cgExitOnVarEnv(IRInstruction
* inst
) {
4990 SSATmp
* fp
= inst
->src(0);
4991 Block
* label
= inst
->taken();
4993 assert(!(fp
->isConst()));
4995 auto fpReg
= m_regs
[fp
].reg();
4996 m_as
. cmpq (0, fpReg
[AROFF(m_varEnv
)]);
4997 emitFwdJcc(CC_NE
, label
);
5000 void CodeGenerator::cgReleaseVVOrExit(IRInstruction
* inst
) {
5001 auto* const label
= inst
->taken();
5002 auto const rFp
= m_regs
[inst
->src(0)].reg();
5004 m_as
. cmpq (0, rFp
[AROFF(m_varEnv
)]);
5005 unlikelyIfBlock(CC_NZ
, [&] (Asm
& a
) {
5006 a
. testl (ActRec::kExtraArgsBit
, rFp
[AROFF(m_varEnv
)]);
5007 emitFwdJcc(a
, CC_Z
, label
);
5010 TCA(static_cast<void (*)(ActRec
*)>(ExtraArgs::deallocate
)),
5013 ArgGroup(m_regs
).reg(rFp
),
5019 void CodeGenerator::cgBoxPtr(IRInstruction
* inst
) {
5020 SSATmp
* dst
= inst
->dst();
5021 SSATmp
* addr
= inst
->src(0);
5022 auto base
= m_regs
[addr
].reg();
5023 auto dstReg
= m_regs
[dst
].reg();
5024 emitMovRegReg(m_as
, base
, dstReg
);
5025 emitTypeTest(Type::BoxedCell
, base
[TVOFF(m_type
)],
5026 base
[TVOFF(m_data
)],
5027 [&](ConditionCode cc
) {
5028 ifThen(m_as
, ccNegate(cc
), [&] {
5029 cgCallHelper(m_as
, (TCA
)tvBox
, dstReg
, kNoSyncPoint
,
5030 ArgGroup(m_regs
).ssa(addr
));
5035 void CodeGenerator::cgDefCns(IRInstruction
* inst
) {
5036 UNUSED SSATmp
* dst
= inst
->dst();
5037 UNUSED SSATmp
* cnsName
= inst
->src(0);
5038 UNUSED SSATmp
* val
= inst
->src(1);
5039 using namespace TargetCache
;
5043 // TODO: Kill this #2031980
5044 static StringData
* concat_value(TypedValue tv1
, TypedValue tv2
) {
5045 return concat_tv(tv1
.m_type
, tv1
.m_data
.num
, tv2
.m_type
, tv2
.m_data
.num
);
5048 void CodeGenerator::cgConcat(IRInstruction
* inst
) {
5049 SSATmp
* dst
= inst
->dst();
5050 SSATmp
* tl
= inst
->src(0);
5051 SSATmp
* tr
= inst
->src(1);
5053 Type lType
= tl
->type();
5054 Type rType
= tr
->type();
5055 // We have specialized helpers for concatenating two strings, a
5056 // string and an int, and an int and a string.
5057 void* fptr
= nullptr;
5058 if (lType
.isString() && rType
.isString()) {
5059 fptr
= (void*)concat_ss
;
5060 } else if (lType
.isString() && rType
== Type::Int
) {
5061 fptr
= (void*)concat_si
;
5062 } else if (lType
== Type::Int
&& rType
.isString()) {
5063 fptr
= (void*)concat_is
;
5066 cgCallHelper(m_as
, (TCA
)fptr
, dst
, kNoSyncPoint
,
5067 ArgGroup(m_regs
).ssa(tl
).ssa(tr
));
5069 if (lType
.subtypeOf(Type::Obj
) || lType
.needsReg() ||
5070 rType
.subtypeOf(Type::Obj
) || rType
.needsReg()) {
5073 cgCallHelper(m_as
, (TCA
)concat_value
, dst
, kNoSyncPoint
,
5074 ArgGroup(m_regs
).typedValue(tl
).typedValue(tr
));
5078 void CodeGenerator::cgInterpOne(IRInstruction
* inst
) {
5079 SSATmp
* fp
= inst
->src(0);
5080 SSATmp
* sp
= inst
->src(1);
5081 SSATmp
* pcOffTmp
= inst
->src(2);
5082 SSATmp
* spAdjustmentTmp
= inst
->src(3);
5083 int64_t pcOff
= pcOffTmp
->getValInt();
5085 auto opc
= *(curFunc()->unit()->at(pcOff
));
5086 void* interpOneHelper
= interpOneEntryPoints
[opc
];
5088 auto dstReg
= InvalidReg
;
5089 cgCallHelper(m_as
, (TCA
)interpOneHelper
, dstReg
, kSyncPoint
,
5090 ArgGroup(m_regs
).ssa(fp
).ssa(sp
).imm(pcOff
));
5092 auto newSpReg
= m_regs
[inst
->dst()].reg();
5093 assert(newSpReg
== m_regs
[sp
].reg());
5095 int64_t spAdjustBytes
= cellsToBytes(spAdjustmentTmp
->getValInt());
5096 if (spAdjustBytes
!= 0) {
5097 m_as
.addq(spAdjustBytes
, newSpReg
);
5101 void CodeGenerator::cgInterpOneCF(IRInstruction
* inst
) {
5102 SSATmp
* fp
= inst
->src(0);
5103 SSATmp
* sp
= inst
->src(1);
5104 int64_t pcOff
= inst
->src(2)->getValInt();
5106 auto opc
= *(curFunc()->unit()->at(pcOff
));
5107 void* interpOneHelper
= interpOneEntryPoints
[opc
];
5109 auto dstReg
= InvalidReg
;
5110 cgCallHelper(m_as
, (TCA
)interpOneHelper
, dstReg
, kSyncPoint
,
5111 ArgGroup(m_regs
).ssa(fp
).ssa(sp
).imm(pcOff
));
5113 // The interpOne method returns a pointer to the current ExecutionContext
5114 // in rax. Use it read the 'm_fp' and 'm_stack.m_top' fields into the
5115 // rVmFp and rVmSp registers.
5116 m_as
.loadq(rax
[offsetof(VMExecutionContext
, m_fp
)], rVmFp
);
5117 m_as
.loadq(rax
[offsetof(VMExecutionContext
, m_stack
) +
5118 Stack::topOfStackOffset()], rVmSp
);
5120 m_tx64
->emitServiceReq(SRFlags::EmitInA
, REQ_RESUME
, 0ull);
5123 void CodeGenerator::cgContEnter(IRInstruction
* inst
) {
5124 auto contAR
= inst
->src(0);
5125 auto addr
= inst
->src(1);
5126 auto returnOff
= inst
->src(2);
5127 auto curFp
= m_regs
[inst
->src(3)].reg();
5128 auto contARReg
= m_regs
[contAR
].reg();
5130 m_as
. storel (returnOff
->getValInt(), contARReg
[AROFF(m_soff
)]);
5131 m_as
. storeq (curFp
, contARReg
[AROFF(m_savedRbp
)]);
5132 m_as
. movq (contARReg
, rStashedAR
);
5134 m_as
. call (m_regs
[addr
].reg());
5137 void CodeGenerator::emitContVarEnvHelperCall(SSATmp
* fp
, TCA helper
) {
5138 auto scratch
= m_rScratch
;
5140 m_as
. loadq (m_regs
[fp
].reg()[AROFF(m_varEnv
)], scratch
);
5141 m_as
. testq (scratch
, scratch
);
5142 unlikelyIfBlock(CC_NZ
, [&] (Asm
& a
) {
5143 cgCallHelper(a
, helper
, InvalidReg
, kNoSyncPoint
,
5144 ArgGroup(m_regs
).ssa(fp
));
5148 void CodeGenerator::cgUnlinkContVarEnv(IRInstruction
* inst
) {
5149 emitContVarEnvHelperCall(
5151 (TCA
)VMExecutionContext::packContVarEnvLinkage
);
5154 void CodeGenerator::cgLinkContVarEnv(IRInstruction
* inst
) {
5155 emitContVarEnvHelperCall(
5157 (TCA
)VMExecutionContext::unpackContVarEnvLinkage
);
5160 void CodeGenerator::cgContPreNext(IRInstruction
* inst
) {
5161 auto contReg
= m_regs
[inst
->src(0)].reg();
5163 const Offset doneOffset
= c_Continuation::doneOffset();
5164 static_assert((doneOffset
+ 1) == c_Continuation::runningOffset(),
5165 "done should immediately precede running");
5166 // Check done and running at the same time
5167 m_as
.test_imm16_disp_reg16(0x0101, doneOffset
, contReg
);
5168 emitFwdJcc(CC_NZ
, inst
->taken());
5171 m_as
.add_imm64_disp_reg64(0x1, CONTOFF(m_index
), contReg
);
5173 m_as
.store_imm8_disp_reg(0x1, c_Continuation::runningOffset(), contReg
);
5176 void CodeGenerator::cgContStartedCheck(IRInstruction
* inst
) {
5177 m_as
.cmp_imm64_disp_reg64(0, CONTOFF(m_index
),
5178 m_regs
[inst
->src(0)].reg());
5179 emitFwdJcc(CC_L
, inst
->taken());
5182 void CodeGenerator::cgIterInit(IRInstruction
* inst
) {
5183 cgIterInitCommon(inst
);
5186 void CodeGenerator::cgIterInitK(IRInstruction
* inst
) {
5187 cgIterInitCommon(inst
);
5190 void CodeGenerator::cgWIterInit(IRInstruction
* inst
) {
5191 cgIterInitCommon(inst
);
5194 void CodeGenerator::cgWIterInitK(IRInstruction
* inst
) {
5195 cgIterInitCommon(inst
);
5198 void CodeGenerator::cgIterInitCommon(IRInstruction
* inst
) {
5199 bool isInitK
= inst
->op() == IterInitK
|| inst
->op() == WIterInitK
;
5200 bool isWInit
= inst
->op() == WIterInit
|| inst
->op() == WIterInitK
;
5202 PhysReg fpReg
= m_regs
[inst
->src(1)].reg();
5203 int64_t iterOffset
= this->iterOffset(inst
->src(2));
5204 int64_t valLocalOffset
= localOffset(inst
->src(3));
5205 SSATmp
* src
= inst
->src(0);
5206 ArgGroup
args(m_regs
);
5207 args
.addr(fpReg
, iterOffset
).ssa(src
);
5208 if (src
->isArray()) {
5209 args
.addr(fpReg
, valLocalOffset
);
5211 args
.addr(fpReg
, localOffset(inst
->src(4)));
5212 } else if (isWInit
) {
5215 TCA helperAddr
= isWInit
? (TCA
)new_iter_array_key
<true> :
5216 isInitK
? (TCA
)new_iter_array_key
<false> : (TCA
)new_iter_array
;
5217 cgCallHelper(m_as
, helperAddr
, inst
->dst(), kSyncPoint
, args
);
5219 assert(src
->type() == Type::Obj
);
5220 args
.imm(uintptr_t(curClass())).addr(fpReg
, valLocalOffset
);
5222 args
.addr(fpReg
, localOffset(inst
->src(4)));
5226 // new_iter_object decrefs its src object if it propagates an
5227 // exception out, so we use kSyncPointAdjustOne, which adjusts the
5228 // stack pointer by 1 stack element on an unwind, skipping over
5230 cgCallHelper(m_as
, (TCA
)new_iter_object
, inst
->dst(),
5231 kSyncPointAdjustOne
, args
);
5235 void CodeGenerator::cgIterNext(IRInstruction
* inst
) {
5236 cgIterNextCommon(inst
);
5239 void CodeGenerator::cgIterNextK(IRInstruction
* inst
) {
5240 cgIterNextCommon(inst
);
5243 void CodeGenerator::cgWIterNext(IRInstruction
* inst
) {
5244 cgIterNextCommon(inst
);
5247 void CodeGenerator::cgWIterNextK(IRInstruction
* inst
) {
5248 cgIterNextCommon(inst
);
5251 void CodeGenerator::cgIterNextCommon(IRInstruction
* inst
) {
5252 bool isNextK
= inst
->op() == IterNextK
|| inst
->op() == WIterNextK
;
5253 bool isWNext
= inst
->op() == WIterNext
|| inst
->op() == WIterNextK
;
5254 PhysReg fpReg
= m_regs
[inst
->src(0)].reg();
5255 ArgGroup
args(m_regs
);
5256 args
.addr(fpReg
, iterOffset(inst
->src(1)))
5257 .addr(fpReg
, localOffset(inst
->src(2)));
5259 args
.addr(fpReg
, localOffset(inst
->src(3)));
5260 } else if (isWNext
) {
5263 TCA helperAddr
= isWNext
? (TCA
)iter_next_key
<true> :
5264 isNextK
? (TCA
)iter_next_key
<false> : (TCA
)iter_next
;
5265 cgCallHelper(m_as
, helperAddr
, inst
->dst(), kSyncPoint
, args
);
5268 void iterFreeHelper(Iter
* iter
) {
5272 void citerFreeHelper(Iter
* iter
) {
5276 void CodeGenerator::cgIterFree(IRInstruction
* inst
) {
5277 PhysReg fpReg
= m_regs
[inst
->src(0)].reg();
5278 int64_t offset
= iterOffset(inst
->extra
<IterFree
>()->iterId
);
5279 cgCallHelper(m_as
, (TCA
)iterFreeHelper
, InvalidReg
, kSyncPoint
,
5280 ArgGroup(m_regs
).addr(fpReg
, offset
));
5283 void CodeGenerator::cgDecodeCufIter(IRInstruction
* inst
) {
5284 PhysReg fpReg
= m_regs
[inst
->src(1)].reg();
5285 int64_t offset
= iterOffset(inst
->extra
<DecodeCufIter
>()->iterId
);
5286 cgCallHelper(m_as
, (TCA
)decodeCufIterHelper
, inst
->dst(), kSyncPoint
,
5287 ArgGroup(m_regs
).addr(fpReg
, offset
).typedValue(inst
->src(0)));
5290 void CodeGenerator::cgCIterFree(IRInstruction
* inst
) {
5291 PhysReg fpReg
= m_regs
[inst
->src(0)].reg();
5292 int64_t offset
= iterOffset(inst
->extra
<CIterFree
>()->iterId
);
5293 cgCallHelper(m_as
, (TCA
)citerFreeHelper
, InvalidReg
, kSyncPoint
,
5294 ArgGroup(m_regs
).addr(fpReg
, offset
));
5297 void CodeGenerator::cgIncStat(IRInstruction
*inst
) {
5298 Stats::emitInc(m_as
,
5299 Stats::StatCounter(inst
->src(0)->getValInt()),
5300 inst
->src(1)->getValInt(),
5302 inst
->src(2)->getValBool());
5305 void CodeGenerator::cgIncTransCounter(IRInstruction
* inst
) {
5306 m_tx64
->emitTransCounterInc(m_as
);
5309 void CodeGenerator::cgDbgAssertRefCount(IRInstruction
* inst
) {
5310 emitAssertRefCount(m_as
, m_regs
[inst
->src(0)].reg());
5313 void traceCallback(ActRec
* fp
, Cell
* sp
, int64_t pcOff
, void* rip
) {
5314 if (HPHP::Trace::moduleEnabled(HPHP::Trace::hhirTracelets
)) {
5315 FTRACE(0, "{} {} {}\n", fp
->m_func
->fullName()->data(), pcOff
, rip
);
5317 checkFrame(fp
, sp
, /*checkLocals*/true);
5320 void CodeGenerator::cgDbgAssertType(IRInstruction
* inst
) {
5321 emitTypeTest(inst
->typeParam(),
5322 m_regs
[inst
->src(0)].reg(1),
5323 m_regs
[inst
->src(0)].reg(0),
5324 [&](ConditionCode cc
) {
5325 ifThen(m_as
, ccNegate(cc
), [&] { m_as
.ud2(); });
5329 void CodeGenerator::cgVerifyParamCls(IRInstruction
* inst
) {
5330 SSATmp
* objClass
= inst
->src(0);
5331 assert(!objClass
->isConst());
5332 auto objClassReg
= m_regs
[objClass
].reg();
5333 SSATmp
* constraint
= inst
->src(1);
5335 if (constraint
->isConst()) {
5336 m_as
. cmpq(constraint
->getValClass(), objClassReg
);
5338 m_as
. cmpq(m_regs
[constraint
].reg(), objClassReg
);
5341 // The native call for this instruction is the slow path that does
5342 // proper subtype checking. The comparison above is just to
5343 // short-circuit the overhead when the Classes are an exact match.
5344 ifThen(m_as
, CC_NE
, [&]{ cgCallNative(inst
); });
5347 static void emitTraceCall(CodeGenerator::Asm
& as
,
5349 Transl::TranslatorX64
* tx64
) {
5350 // call to a trace function
5351 as
.mov_imm64_reg((int64_t)as
.code
.frontier
, reg::rcx
);
5352 as
.mov_reg64_reg64(rVmFp
, reg::rdi
);
5353 as
.mov_reg64_reg64(rVmSp
, reg::rsi
);
5354 as
.mov_imm64_reg(pcOff
, reg::rdx
);
5355 // do the call; may use a trampoline
5356 tx64
->emitCall(as
, (TCA
)traceCallback
);
5359 void CodeGenerator::print() const {
5360 JIT::print(std::cout
, m_curTrace
, &m_state
.regs
, m_state
.lifetime
,
5364 static void patchJumps(Asm
& as
, CodegenState
& state
, Block
* block
) {
5365 void* list
= state
.patches
[block
];
5366 Address labelAddr
= as
.code
.frontier
;
5368 int32_t* toPatch
= (int32_t*)list
;
5369 int32_t diffToNext
= *toPatch
;
5370 ssize_t diff
= labelAddr
- ((Address
)list
+ sizeof(int32_t));
5371 *toPatch
= safe_cast
<int32_t>(diff
); // patch the jump address
5372 if (diffToNext
== 0) break;
5373 void* next
= (TCA
)list
- diffToNext
;
5378 void CodeGenerator::cgBlock(Block
* block
, vector
<TransBCMapping
>* bcMap
) {
5379 FTRACE(6, "cgBlock: {}\n", block
->id());
5381 for (IRInstruction
& instr
: *block
) {
5382 IRInstruction
* inst
= &instr
;
5383 if (inst
->op() == Marker
) {
5384 m_state
.lastMarker
= inst
->extra
<Marker
>();
5385 FTRACE(7, "lastMarker is now {}\n", inst
->extra
<Marker
>()->show());
5386 if (m_tx64
&& m_tx64
->isTransDBEnabled() && bcMap
) {
5387 bcMap
->push_back((TransBCMapping
){Offset(m_state
.lastMarker
->bcOff
),
5389 m_astubs
.code
.frontier
});
5393 auto nuller
= folly::makeGuard([&]{ m_curInst
= nullptr; });
5394 auto* addr
= cgInst(inst
);
5395 if (m_state
.asmInfo
&& addr
) {
5396 m_state
.asmInfo
->instRanges
[inst
] = TcaRange(addr
, m_as
.code
.frontier
);
5397 m_state
.asmInfo
->asmRanges
[block
] =
5398 TcaRange(m_state
.asmInfo
->asmRanges
[block
].start(), m_as
.code
.frontier
);
5404 * Compute and save registers that are live *across* each inst, not including
5405 * registers whose lifetimes end at inst, nor registers defined by inst.
5407 LiveRegs
computeLiveRegs(const IRFactory
* factory
, const RegAllocInfo
& regs
,
5408 Block
* start_block
) {
5409 StateVector
<Block
, RegSet
> liveMap(factory
, RegSet());
5410 LiveRegs
live_regs(factory
, RegSet());
5413 RegSet
& live
= liveMap
[block
];
5414 if (Block
* taken
= block
->taken()) live
= liveMap
[taken
];
5415 if (Block
* next
= block
->next()) live
|= liveMap
[next
];
5416 for (auto it
= block
->end(); it
!= block
->begin(); ) {
5417 IRInstruction
& inst
= *--it
;
5418 for (const SSATmp
& dst
: inst
.dsts()) {
5419 live
-= regs
[dst
].regs();
5421 live_regs
[inst
] = live
;
5422 for (SSATmp
* src
: inst
.srcs()) {
5423 live
|= regs
[src
].regs();
5427 factory
->numBlocks(),
5433 void genCodeForTrace(IRTrace
* trace
,
5434 CodeGenerator::Asm
& as
,
5435 CodeGenerator::Asm
& astubs
,
5436 IRFactory
* irFactory
,
5437 vector
<TransBCMapping
>* bcMap
,
5438 Transl::TranslatorX64
* tx64
,
5439 const RegAllocInfo
& regs
,
5440 const LifetimeInfo
* lifetime
,
5442 assert(trace
->isMain());
5443 LiveRegs live_regs
= computeLiveRegs(irFactory
, regs
, trace
->front());
5444 CodegenState
state(irFactory
, regs
, live_regs
, lifetime
, asmInfo
);
5446 // Returns: whether a block has already been emitted.
5447 DEBUG_ONLY
auto isEmitted
= [&](Block
* block
) {
5448 return state
.addresses
[block
];
5452 * Emit the given block on the supplied assembler. The `nextBlock'
5453 * is the nextBlock that will be emitted on this assembler. If is
5454 * not the fallthrough block, emit a patchable jump to the
5455 * fallthrough block.
5457 auto emitBlock
= [&](Asm
& a
, Block
* block
, Block
* nextBlock
) {
5458 assert(!isEmitted(block
));
5460 FTRACE(6, "cgBlock {} on {}\n", block
->id(),
5461 &a
== &astubs
? "astubs" : "a");
5463 auto const aStart
= a
.code
.frontier
;
5464 auto const astubsStart
= astubs
.code
.frontier
;
5465 patchJumps(a
, state
, block
);
5466 state
.addresses
[block
] = aStart
;
5468 // If the block ends with a Jmp_ and the next block is going to be
5469 // its target, we don't need to actually emit it.
5470 IRInstruction
* last
= block
->back();
5471 state
.noTerminalJmp_
= last
->op() == Jmp_
&& nextBlock
== last
->taken();
5473 CodeGenerator
cg(trace
, a
, astubs
, tx64
, state
);
5474 if (state
.asmInfo
) {
5475 state
.asmInfo
->asmRanges
[block
] = TcaRange(aStart
, a
.code
.frontier
);
5478 cg
.cgBlock(block
, bcMap
);
5479 state
.lastMarker
= nullptr;
5480 if (auto next
= block
->next()) {
5481 if (next
!= nextBlock
) {
5482 // If there's a fallthrough block and it's not the next thing
5483 // going into this assembler, then emit a jump to it.
5484 emitFwdJmp(a
, next
, state
);
5488 if (state
.asmInfo
) {
5489 state
.asmInfo
->asmRanges
[block
] = TcaRange(aStart
, a
.code
.frontier
);
5490 if (&a
!= &astubs
) {
5491 state
.asmInfo
->astubRanges
[block
] = TcaRange(astubsStart
,
5492 astubs
.code
.frontier
);
5497 if (RuntimeOption::EvalHHIRGenerateAsserts
&& trace
->isMain()) {
5498 emitTraceCall(as
, trace
->bcOff(), tx64
);
5501 auto const linfo
= layoutBlocks(trace
, *irFactory
);
5503 for (auto it
= linfo
.blocks
.begin(); it
!= linfo
.astubsIt
; ++it
) {
5504 Block
* nextBlock
= boost::next(it
) != linfo
.astubsIt
5505 ? *boost::next(it
) : nullptr;
5506 emitBlock(as
, *it
, nextBlock
);
5508 for (auto it
= linfo
.astubsIt
; it
!= linfo
.blocks
.end(); ++it
) {
5509 Block
* nextBlock
= boost::next(it
) != linfo
.blocks
.end()
5510 ? *boost::next(it
) : nullptr;
5511 emitBlock(astubs
, *it
, nextBlock
);
5515 for (Block
* UNUSED block
: linfo
.blocks
) {
5516 assert(isEmitted(block
));
5522 TypedValue
& getDefaultIfNullCell(TypedValue
* tv
, TypedValue
& def
) {
5523 if (UNLIKELY(nullptr == tv
)) {
5524 // refcount is already correct since def was never decrefed
5527 tvRefcountedDecRef(&def
);
5528 TypedValue
* ret
= tvToCell(tv
);
5529 tvRefcountedIncRef(ret
);
5534 TypedValue
arrayIdxS(ArrayData
* a
, StringData
* key
, TypedValue def
) {
5535 return getDefaultIfNullCell(a
->nvGet(key
), def
);
5539 TypedValue
arrayIdxSi(ArrayData
* a
, StringData
* key
, TypedValue def
) {
5541 return UNLIKELY(key
->isStrictlyInteger(i
)) ?
5542 getDefaultIfNullCell(a
->nvGet(i
), def
) :
5543 getDefaultIfNullCell(a
->nvGet(key
), def
);
5547 TypedValue
arrayIdxI(ArrayData
* a
, int64_t key
, TypedValue def
) {
5548 return getDefaultIfNullCell(a
->nvGet(key
), def
);