2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/irlower-internal.h"
19 #include "hphp/runtime/base/array-init.h"
20 #include "hphp/runtime/base/array-iterator.h"
21 #include "hphp/runtime/base/attr.h"
22 #include "hphp/runtime/base/countable.h"
23 #include "hphp/runtime/base/datatype.h"
24 #include "hphp/runtime/base/execution-context.h"
25 #include "hphp/runtime/base/header-kind.h"
26 #include "hphp/runtime/base/packed-array.h"
27 #include "hphp/runtime/base/runtime-option.h"
28 #include "hphp/runtime/base/tv-mutate.h"
29 #include "hphp/runtime/base/tv-variant.h"
30 #include "hphp/runtime/base/tv-refcount.h"
31 #include "hphp/runtime/base/type-array.h"
32 #include "hphp/runtime/base/typed-value.h"
33 #include "hphp/runtime/vm/act-rec.h"
34 #include "hphp/runtime/vm/bytecode.h"
35 #include "hphp/runtime/vm/func.h"
36 #include "hphp/runtime/vm/unit.h"
37 #include "hphp/runtime/vm/vm-regs.h"
39 #include "hphp/runtime/vm/jit/types.h"
40 #include "hphp/runtime/vm/jit/arg-group.h"
41 #include "hphp/runtime/vm/jit/bc-marker.h"
42 #include "hphp/runtime/vm/jit/call-spec.h"
43 #include "hphp/runtime/vm/jit/code-gen-cf.h"
44 #include "hphp/runtime/vm/jit/code-gen-helpers.h"
45 #include "hphp/runtime/vm/jit/extra-data.h"
46 #include "hphp/runtime/vm/jit/ir-instruction.h"
47 #include "hphp/runtime/vm/jit/ir-opcode.h"
48 #include "hphp/runtime/vm/jit/ssa-tmp.h"
49 #include "hphp/runtime/vm/jit/translator-inline.h"
50 #include "hphp/runtime/vm/jit/type.h"
51 #include "hphp/runtime/vm/jit/vasm-gen.h"
52 #include "hphp/runtime/vm/jit/vasm-instr.h"
53 #include "hphp/runtime/vm/jit/vasm-reg.h"
55 #include "hphp/util/trace.h"
56 #include "hphp/util/asm-x64.h"
58 namespace HPHP
{ namespace jit
{ namespace irlower
{
60 TRACE_SET_MOD(irlower
);
62 ///////////////////////////////////////////////////////////////////////////////
64 void cgSpillFrame(IRLS
& env
, const IRInstruction
* inst
) {
65 auto const sp
= srcLoc(env
, inst
, 0).reg();
66 auto const extra
= inst
->extra
<SpillFrame
>();
67 auto const funcTmp
= inst
->src(1);
68 auto const ctxTmp
= inst
->src(2);
69 auto const invNameTmp
= inst
->src(3);
70 auto const isDynamic
= inst
->src(4);
73 auto const ar
= sp
[cellsToBytes(extra
->spOffset
.offset
)];
76 if (ctxTmp
->isA(TCls
)) {
77 // Store the Class* as a Cctx.
78 if (ctxTmp
->hasConstVal()) {
80 uintptr_t(ctxTmp
->clsVal()) | ActRec::kHasClassBit
,
81 ar
+ AROFF(m_thisUnsafe
));
83 auto const cls
= srcLoc(env
, inst
, 2).reg();
84 auto const cctx
= v
.makeReg();
85 v
<< orqi
{ActRec::kHasClassBit
, cls
, cctx
, v
.makeReg()};
86 v
<< store
{cctx
, ar
+ AROFF(m_thisUnsafe
)};
88 } else if (ctxTmp
->isA(TNullptr
)) {
89 // No $this or class; this happens in FPushFunc.
90 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
91 emitImmStoreq(v
, ActRec::kTrashedThisSlot
, ar
+ AROFF(m_thisUnsafe
));
94 // It could be TCls | TCtx | TNullptr, but we can't distinguish TCls and
95 // TCtx so assert it doesn't happen. We don't generate SpillFrames with such
97 assertx(ctxTmp
->isA(TCtx
| TNullptr
));
99 // We don't have to incref here
100 auto const ctx
= srcLoc(env
, inst
, 2).reg();
101 v
<< store
{ctx
, ar
+ AROFF(m_thisUnsafe
)};
102 if (RuntimeOption::EvalHHIRGenerateAsserts
&&
103 ctxTmp
->type().maybe(TNullptr
)) {
104 auto const sf
= v
.makeReg();
105 v
<< testq
{ctx
, ctx
, sf
};
111 emitImmStoreq(v
, ActRec::kTrashedThisSlot
, ar
+ AROFF(m_thisUnsafe
));
118 if (invNameTmp
->isA(TNullptr
)) {
119 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
120 emitImmStoreq(v
, ActRec::kTrashedVarEnvSlot
, ar
+ AROFF(m_invName
));
123 assertx(invNameTmp
->isA(TStr
| TNullptr
));
125 // We don't have to incref here
126 auto const invName
= srcLoc(env
, inst
, 3).reg();
127 v
<< store
{invName
, ar
+ AROFF(m_invName
)};
128 if (invNameTmp
->type().maybe(TNullptr
)) {
129 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
130 auto const sf
= v
.makeReg();
131 v
<< testq
{invName
, invName
, sf
};
137 emitImmStoreq(v
, ActRec::kTrashedVarEnvSlot
, ar
+ AROFF(m_invName
));
145 if (funcTmp
->isA(TNullptr
)) {
146 if (RuntimeOption::EvalHHIRGenerateAsserts
) {
147 emitImmStoreq(v
, ActRec::kTrashedFuncSlot
, ar
+ AROFF(m_func
));
150 assertx(funcTmp
->isA(TFunc
| TNullptr
));
151 auto const func
= srcLoc(env
, inst
, 1).reg();
152 v
<< store
{func
, ar
+ AROFF(m_func
)};
153 if (RuntimeOption::EvalHHIRGenerateAsserts
&&
154 funcTmp
->type().maybe(TNullptr
)) {
155 auto const sf
= v
.makeReg();
156 v
<< testq
{func
, func
, sf
};
162 emitImmStoreq(v
, ActRec::kTrashedFuncSlot
, ar
+ AROFF(m_func
));
169 auto flags
= ActRec::Flags::None
;
171 bool dynamicCheck
= false;
172 bool magicCheck
= false;
173 if (!isDynamic
->hasConstVal()) {
175 } else if (isDynamic
->hasConstVal(true)) {
176 flags
= static_cast<ActRec::Flags
>(flags
| ActRec::Flags::DynamicCall
);
179 if (!invNameTmp
->type().maybe(TNullptr
)) {
180 flags
= static_cast<ActRec::Flags
>(flags
| ActRec::Flags::MagicDispatch
);
181 } else if (!invNameTmp
->isA(TNullptr
)) {
186 static_cast<int32_t>(ActRec::encodeNumArgsAndFlags(extra
->numArgs
, flags
))
190 auto const invName
= srcLoc(env
, inst
, 3).reg();
191 auto const sf
= v
.makeReg();
192 v
<< testq
{invName
, invName
, sf
};
200 auto const dst
= v
.makeReg();
202 static_cast<int32_t>(ActRec::Flags::MagicDispatch
),
209 [&] (Vout
& v
) { return naaf
; }
214 auto const dynamicReg
= srcLoc(env
, inst
, 4).reg();
215 auto const sf
= v
.makeReg();
216 v
<< testb
{dynamicReg
, dynamicReg
, sf
};
224 auto const dst
= v
.makeReg();
226 static_cast<int32_t>(ActRec::Flags::DynamicCall
),
233 [&] (Vout
& v
) { return naaf
; }
237 v
<< storel
{naaf
, ar
+ AROFF(m_numArgsAndFlags
)};
240 ///////////////////////////////////////////////////////////////////////////////
242 void cgAssertARFunc(IRLS
&, const IRInstruction
*) {}
244 void cgLdARFuncPtr(IRLS
& env
, const IRInstruction
* inst
) {
245 auto const dst
= dstLoc(env
, inst
, 0).reg();
246 auto const sp
= srcLoc(env
, inst
, 0).reg();
247 auto const off
= cellsToBytes(inst
->extra
<LdARFuncPtr
>()->offset
.offset
);
248 vmain(env
) << load
{sp
[off
+ AROFF(m_func
)], dst
};
251 void cgLdARIsDynamic(IRLS
& env
, const IRInstruction
* inst
) {
252 auto const dst
= dstLoc(env
, inst
, 0).reg();
253 auto const sp
= srcLoc(env
, inst
, 0).reg();
254 auto const off
= cellsToBytes(inst
->extra
<LdARIsDynamic
>()->offset
.offset
);
256 auto& v
= vmain(env
);
257 auto const sf
= v
.makeReg();
259 static_cast<int32_t>(ActRec::Flags::DynamicCall
),
260 sp
[off
+ AROFF(m_numArgsAndFlags
)], sf
262 v
<< setcc
{CC_NZ
, sf
, dst
};
265 void cgLdARCtx(IRLS
& env
, const IRInstruction
* inst
) {
266 auto const dst
= dstLoc(env
, inst
, 0).reg();
267 auto const sp
= srcLoc(env
, inst
, 0).reg();
268 auto const off
= cellsToBytes(inst
->extra
<LdARCtx
>()->offset
.offset
);
269 vmain(env
) << load
{sp
[off
+ AROFF(m_thisUnsafe
)], dst
};
272 void cgLdARNumArgsAndFlags(IRLS
& env
, const IRInstruction
* inst
) {
273 auto const dst
= dstLoc(env
, inst
, 0).reg();
274 auto const fp
= srcLoc(env
, inst
, 0).reg();
275 vmain(env
) << loadzlq
{fp
[AROFF(m_numArgsAndFlags
)], dst
};
278 void cgStARNumArgsAndFlags(IRLS
& env
, const IRInstruction
* inst
) {
279 auto const fp
= srcLoc(env
, inst
, 0).reg();
280 auto const val
= srcLoc(env
, inst
, 1).reg();
281 auto &v
= vmain(env
);
283 auto const tmp
= v
.makeReg();
284 v
<< movtql
{val
, tmp
};
285 v
<< storel
{tmp
, fp
[AROFF(m_numArgsAndFlags
)]};
288 void cgLdARNumParams(IRLS
& env
, const IRInstruction
* inst
) {
289 auto const dst
= dstLoc(env
, inst
, 0).reg();
290 auto const fp
= srcLoc(env
, inst
, 0).reg();
291 auto& v
= vmain(env
);
293 auto const naaf
= v
.makeReg();
294 v
<< loadzlq
{fp
[AROFF(m_numArgsAndFlags
)], naaf
};
295 v
<< andqi
{ActRec::kNumArgsMask
, naaf
, dst
, v
.makeReg()};
298 void cgCheckARMagicFlag(IRLS
& env
, const IRInstruction
* inst
) {
299 auto const fp
= srcLoc(env
, inst
, 0).reg();
301 auto& v
= vmain(env
);
302 auto const sf
= v
.makeReg();
304 auto const mask
= static_cast<int32_t>(ActRec::Flags::MagicDispatch
);
306 if (mask
& (mask
- 1)) {
307 auto const tmp
= v
.makeReg();
308 auto const naaf
= v
.makeReg();
309 // We need to test multiple bits.
310 v
<< loadl
{fp
[AROFF(m_numArgsAndFlags
)], naaf
};
311 v
<< andli
{mask
, naaf
, tmp
, v
.makeReg()};
312 v
<< cmpli
{mask
, tmp
, sf
};
313 v
<< jcc
{CC_NZ
, sf
, {label(env
, inst
->next()), label(env
, inst
->taken())}};
315 v
<< testlim
{mask
, fp
[AROFF(m_numArgsAndFlags
)], sf
};
316 v
<< jcc
{CC_Z
, sf
, {label(env
, inst
->next()), label(env
, inst
->taken())}};
320 void cgLdCtx(IRLS
& env
, const IRInstruction
* inst
) {
321 assertx(!inst
->func() || inst
->ctx());
322 auto const dst
= dstLoc(env
, inst
, 0).reg();
323 auto const fp
= srcLoc(env
, inst
, 0).reg();
324 vmain(env
) << load
{fp
[AROFF(m_thisUnsafe
)], dst
};
327 void cgLdCctx(IRLS
& env
, const IRInstruction
* inst
) {
328 return cgLdCtx(env
, inst
);
331 void cgInitCtx(IRLS
& env
, const IRInstruction
* inst
) {
332 assertx(!inst
->func() || inst
->func()->isClosureBody());
333 auto const fp
= srcLoc(env
, inst
, 0).reg();
334 auto const ctx
= srcLoc(env
, inst
, 1).reg();
335 vmain(env
) << store
{ctx
, fp
[AROFF(m_thisUnsafe
)]};
338 void cgLdARInvName(IRLS
& env
, const IRInstruction
* inst
) {
339 auto const dst
= dstLoc(env
, inst
, 0).reg();
340 auto const fp
= srcLoc(env
, inst
, 0).reg();
341 vmain(env
) << load
{fp
[AROFF(m_invName
)], dst
};
344 void cgStARInvName(IRLS
& env
, const IRInstruction
* inst
) {
345 auto const fp
= srcLoc(env
, inst
, 0).reg();
346 auto const val
= srcLoc(env
, inst
, 1).reg();
347 vmain(env
) << store
{val
, fp
[AROFF(m_invName
)]};
350 ///////////////////////////////////////////////////////////////////////////////
355 * The standard VMRegAnchor treatment won't work for some cases called during
356 * function prologues.
358 * The fp sync machinery is fundamentally based on the notion that instruction
359 * pointers in the TC are uniquely associated with source HHBC instructions,
360 * and that source HHBC instructions are in turn uniquely associated with
363 * trimExtraArgs() is called from the prologue of the callee. The prologue is
364 * (a) still in the caller frame for now, and (b) shared across multiple call
365 * sites. (a) means that we have the fp from the caller's frame, and (b) means
366 * that this fp is not enough to figure out sp.
368 * However, the prologue passes us the callee ActRec, whose predecessor has to
369 * be the caller. So we can sync sp and fp by ourselves here. Geronimo!
371 static void sync_regstate_to_caller(ActRec
* preLive
) {
372 assertx(tl_regState
== VMRegState::DIRTY
);
373 auto const ec
= g_context
.getNoCheck();
374 auto& regs
= vmRegsUnsafe();
376 regs
.stack
.top() = reinterpret_cast<TypedValue
*>(preLive
)
377 - preLive
->numArgs();
378 auto fp
= preLive
== vmFirstAR()
379 ? ec
->m_nestedVMs
.back().fp
382 regs
.pc
= fp
->func()->unit()->at(fp
->func()->base() + preLive
->m_soff
);
383 regs
.jitReturnAddr
= (TCA
)preLive
->m_savedRip
;
385 tl_regState
= VMRegState::CLEAN
;
389 * Perform the action specified by 'action1' on the range of TypedValues
390 * represented by 'tv' and 'limit'. If 'pred' ever returns true, sync the
391 * register register state and then start calling 'action2' instead.
393 template <typename Iter
, typename Pred
, typename Action1
, typename Action2
>
395 static void actionMayReenter(ActRec
* ar
,
403 sync_regstate_to_caller(ar
);
404 // Go back to dirty (see the comments of sync_regstate_to_caller()).
405 SCOPE_EXIT
{ tl_regState
= VMRegState::DIRTY
; };
409 } while (tv
!= limit
);
414 } while (tv
!= limit
);
419 #define SHUFFLE_EXTRA_ARGS_PRELUDE() \
420 auto const f = ar->func(); \
421 auto const numParams = f->numNonVariadicParams(); \
422 auto const numArgs = ar->numArgs(); \
423 assertx(numArgs > numParams); \
424 auto const numExtra = numArgs - numParams; \
425 TRACE(1, "extra args: %d args, function %s takes only %d, ar %p\n", \
426 numArgs, f->name()->data(), numParams, ar); \
427 auto tvArgs = reinterpret_cast<TypedValue*>(ar) - numArgs; \
428 /* end SHUFFLE_EXTRA_ARGS_PRELUDE */
430 void trimExtraArgs(ActRec
* ar
) {
431 SHUFFLE_EXTRA_ARGS_PRELUDE()
432 assertx(!f
->hasVariadicCaptureParam());
433 assertx(!(f
->attrs() & AttrMayUseVV
));
439 [](TypedValue v
){ return tvDecRefWillCallHelper(v
); },
440 [](TypedValue v
){ tvDecRefGenNZ(v
); },
441 [](TypedValue v
){ tvDecRefGen(v
); }
444 assertx(f
->numParams() == (numArgs
- numExtra
));
445 assertx(f
->numParams() == numParams
);
446 ar
->setNumArgs(numParams
);
449 void shuffleExtraArgsMayUseVV(ActRec
* ar
) {
450 SHUFFLE_EXTRA_ARGS_PRELUDE()
451 assertx(!f
->hasVariadicCaptureParam());
452 assertx(f
->attrs() & AttrMayUseVV
);
454 ar
->setExtraArgs(ExtraArgs::allocateCopy(tvArgs
, numExtra
));
457 void shuffleExtraArgsVariadic(ActRec
* ar
) {
458 SHUFFLE_EXTRA_ARGS_PRELUDE()
459 assertx(f
->hasVariadicCaptureParam());
460 assertx(!(f
->attrs() & AttrMayUseVV
));
462 VArrayInit ai
{numExtra
};
465 std::reverse_iterator
<TypedValue
*>(tvArgs
+ numExtra
),
466 std::reverse_iterator
<TypedValue
*>(tvArgs
),
467 [](TypedValue v
) { return isRefType(v
.m_type
); },
468 [&](TypedValue v
) { ai
.appendWithRef(v
); },
469 [&](TypedValue v
) { ai
.appendWithRef(v
); }
475 /* If the value wasn't a ref, we'll have definitely inc-reffed it, so we
477 [](TypedValue v
){ return isRefType(v
.m_type
); },
478 [](TypedValue v
){ tvDecRefGenNZ(v
); },
479 [](TypedValue v
){ tvDecRefGen(v
); }
482 // Write into the last (variadic) param.
483 auto tv
= reinterpret_cast<TypedValue
*>(ar
) - numParams
- 1;
484 *tv
= make_array_like_tv(ai
.create());
485 assertx(tv
->m_data
.parr
->hasExactlyOneRef());
487 // No incref is needed, since extra values are being transferred from the
488 // stack to the last local.
489 assertx(f
->numParams() == (numArgs
- numExtra
+ 1));
490 assertx(f
->numParams() == (numParams
+ 1));
491 ar
->setNumArgs(numParams
+ 1);
494 void shuffleExtraArgsVariadicAndVV(ActRec
* ar
) {
495 SHUFFLE_EXTRA_ARGS_PRELUDE()
496 assertx(f
->hasVariadicCaptureParam());
497 assertx(f
->attrs() & AttrMayUseVV
);
499 ar
->setExtraArgs(ExtraArgs::allocateCopy(tvArgs
, numExtra
));
501 VArrayInit ai
{numExtra
};
504 std::reverse_iterator
<TypedValue
*>(tvArgs
+ numExtra
),
505 std::reverse_iterator
<TypedValue
*>(tvArgs
),
506 [](TypedValue v
) { return isRefType(v
.m_type
); },
507 [&](TypedValue v
) { ai
.appendWithRef(v
); },
508 [&](TypedValue v
) { ai
.appendWithRef(v
); }
510 // Write into the last (variadic) param.
511 auto tv
= reinterpret_cast<TypedValue
*>(ar
) - numParams
- 1;
512 *tv
= make_array_like_tv(ai
.create());
513 assertx(tv
->m_data
.parr
->hasExactlyOneRef());
514 // Before, for each arg: refcount = n + 1 (stack).
515 // After, for each arg: refcount = n + 2 (ExtraArgs, varArgsArray).
517 ExtraArgs::deallocateRaw(ar
->getExtraArgs());
518 ar
->resetExtraArgs();
523 #undef SHUFFLE_EXTRA_ARGS_PRELUDE
525 ///////////////////////////////////////////////////////////////////////////////
527 void cgInitExtraArgs(IRLS
& env
, const IRInstruction
* inst
) {
528 auto const fp
= srcLoc(env
, inst
, 0).reg();
529 auto const extra
= inst
->extra
<InitExtraArgs
>();
530 auto const func
= extra
->func
;
531 auto const argc
= extra
->argc
;
533 using Action
= ExtraArgsAction
;
535 auto& v
= vmain(env
);
536 void (*handler
)(ActRec
*) = nullptr;
538 switch (extra_args_action(func
, argc
)) {
540 if (func
->attrs() & AttrMayUseVV
) {
541 v
<< storeqi
{0, fp
[AROFF(m_invName
)]};
545 case Action::Discard
:
546 handler
= trimExtraArgs
;
548 case Action::Variadic
:
549 handler
= shuffleExtraArgsVariadic
;
551 case Action::MayUseVV
:
552 handler
= shuffleExtraArgsMayUseVV
;
554 case Action::VarAndVV
:
555 handler
= shuffleExtraArgsVariadicAndVV
;
562 CallSpec::direct(handler
),
565 argGroup(env
, inst
).reg(fp
)
569 void cgPackMagicArgs(IRLS
& env
, const IRInstruction
* inst
) {
570 auto const fp
= srcLoc(env
, inst
, 0).reg();
572 auto& v
= vmain(env
);
573 auto const naaf
= v
.makeReg();
574 auto const num_args
= v
.makeReg();
576 v
<< loadl
{fp
[AROFF(m_numArgsAndFlags
)], naaf
};
577 v
<< andli
{ActRec::kNumArgsMask
, naaf
, num_args
, v
.makeReg()};
579 auto const offset
= v
.makeReg();
580 auto const offsetq
= v
.makeReg();
581 auto const values
= v
.makeReg();
583 static_assert(sizeof(Cell
) == 16, "");
584 v
<< shlli
{4, num_args
, offset
, v
.makeReg()};
585 v
<< movzlq
{offset
, offsetq
};
586 v
<< subq
{offsetq
, fp
, values
, v
.makeReg()};
588 auto const args
= argGroup(env
, inst
)
595 RuntimeOption::EvalHackArrDVArrs
596 ? CallSpec::direct(PackedArray::MakeVec
)
597 : CallSpec::direct(PackedArray::MakeVArray
),
604 ///////////////////////////////////////////////////////////////////////////////