2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #ifndef incl_HPHP_JIT_BACK_END_H
17 #define incl_HPHP_JIT_BACK_END_H
21 #include "hphp/runtime/vm/jit/types.h"
22 #include "hphp/runtime/vm/jit/phys-reg.h"
23 #include "hphp/runtime/vm/jit/service-requests.h"
25 namespace HPHP
{ namespace jit
{
35 struct RelocationInfo
;
41 * This module supports both X64 and ARM behind a platform-agnostic interface.
43 * On X64, concurrent modification and execution of instructions is safe if all
44 * of the following hold:
46 * 1) The modification is done with a single processor store
48 * 2) Only one instruction in the original stream is modified
50 * 3) The modified instruction does not cross a cacheline boundary
57 // Some TC registers need to be preserved across service requests.
58 uintptr_t saved_rStashedAr
;
60 // Stub addresses are passed back to allow us to recycle used stubs.
64 enum class TestAndSmashFlags
{
70 enum class MoveToAlignFlags
{
78 std::unique_ptr
<BackEnd
> newBackEnd();
86 virtual Abi
abi() = 0;
87 virtual size_t cacheLineSize() = 0;
88 size_t cacheLineMask() {
89 assert((cacheLineSize() & (cacheLineSize()-1)) == 0);
90 return cacheLineSize() - 1;
93 virtual PhysReg
rSp() = 0;
94 virtual PhysReg
rVmSp() = 0;
95 virtual PhysReg
rVmFp() = 0;
96 virtual PhysReg
rVmTl() = 0;
97 virtual bool storesCell(const IRInstruction
& inst
, uint32_t srcIdx
) = 0;
98 virtual bool loadsCell(const IRInstruction
& inst
) = 0;
100 virtual void enterTCHelper(TCA start
, TReqInfo
& info
) = 0;
101 virtual void moveToAlign(CodeBlock
& cb
,
102 MoveToAlignFlags alignment
103 = MoveToAlignFlags::kJmpTargetAlign
) = 0;
104 virtual UniqueStubs
emitUniqueStubs() = 0;
105 virtual TCA
emitServiceReqWork(CodeBlock
& cb
, TCA start
,
106 SRFlags flags
, ServiceRequest req
,
107 const ServiceReqArgVec
& argv
) = 0;
108 virtual void emitInterpReq(CodeBlock
& mainCode
, CodeBlock
& coldCode
,
110 virtual bool funcPrologueHasGuard(TCA prologue
, const Func
* func
) = 0;
111 virtual TCA
funcPrologueToGuard(TCA prologue
, const Func
* func
) = 0;
112 virtual SrcKey
emitFuncPrologue(CodeBlock
& mainCode
, CodeBlock
& coldCode
,
113 Func
* func
, bool funcIsMagic
, int nPassed
,
114 TCA
& start
, TCA
& aStart
) = 0;
115 virtual TCA
emitCallArrayPrologue(Func
* func
, DVFuncletsVec
& dvs
) = 0;
116 virtual void funcPrologueSmashGuard(TCA prologue
, const Func
* func
) = 0;
117 virtual void emitIncStat(CodeBlock
& cb
, intptr_t disp
, int n
) = 0;
118 virtual void emitTraceCall(CodeBlock
& cb
, Offset pcOff
) = 0;
120 * Returns true if the given current frontier can have an nBytes-long
121 * instruction written that will be smashable later.
123 virtual bool isSmashable(Address frontier
, int nBytes
, int offset
= 0) = 0;
125 * Call before emitting a test-jcc sequence. Inserts a nop gap such that after
126 * writing a testBytes-long instruction, the frontier will be smashable.
128 virtual void prepareForSmash(CodeBlock
& cb
, int nBytes
, int offset
= 0) = 0;
129 virtual void prepareForTestAndSmash(CodeBlock
& cb
, int testBytes
,
130 TestAndSmashFlags flags
) = 0;
132 virtual void smashJmp(TCA jmpAddr
, TCA newDest
) = 0;
133 virtual void smashCall(TCA callAddr
, TCA newDest
) = 0;
134 virtual void smashJcc(TCA jccAddr
, TCA newDest
) = 0;
136 * Emits a jump or call that satisfies the smash* routines above.
138 virtual void emitSmashableJump(CodeBlock
& cb
, TCA dest
, ConditionCode cc
) = 0;
139 virtual void emitSmashableCall(CodeBlock
& cb
, TCA dest
) = 0;
141 * Find the start of a smashable call from the return address
142 * observed in the callee
144 virtual TCA
smashableCallFromReturn(TCA returnAddr
) = 0;
147 * Decodes jump instructions and returns their target. This includes handling
148 * for ARM's multi-instruction "smashable jump" sequences. If the code does
149 * not encode the right kind of jump, these functions return nullptr.
151 virtual TCA
jmpTarget(TCA jmp
) = 0;
152 virtual TCA
jccTarget(TCA jmp
) = 0;
153 virtual TCA
callTarget(TCA call
) = 0;
155 virtual void addDbgGuard(CodeBlock
& codeMain
, CodeBlock
& codeCold
,
156 SrcKey sk
, size_t dbgOff
) = 0;
158 virtual void streamPhysReg(std::ostream
& os
, PhysReg reg
) = 0;
159 virtual void disasmRange(std::ostream
& os
, int indent
, bool dumpIR
,
160 TCA begin
, TCA end
) = 0;
162 virtual void genCodeImpl(IRUnit
& unit
, AsmInfo
*) = 0;
164 virtual bool supportsRelocation() const { return false; }
167 * Relocate code in the range start, end into dest, and record
168 * information about what was done to rel.
169 * On exit, internal references (references into the source range)
170 * will have been adjusted (ie they are still references into the
171 * relocated code). External code references continue to point to
172 * the same address as before relocation.
174 virtual size_t relocate(RelocationInfo
& rel
, CodeBlock
& dest
,
176 CodeGenFixups
& fixups
,
178 always_assert(false);
183 * This should be called after calling relocate on all relevant ranges. It
184 * will adjust all references into the original src ranges to point into the
185 * corresponding relocated ranges.
187 virtual void adjustForRelocation(RelocationInfo
& rel
) {
188 always_assert(false);
192 * This will update a single range that was not relocated, but that
193 * might refer to relocated code (such as the cold code corresponding
194 * to a tracelet). Unless its guaranteed to be all position independent,
195 * its "fixups" should have been passed into a relocate call earlier.
197 virtual void adjustForRelocation(RelocationInfo
& rel
, TCA start
, TCA end
) {
198 always_assert(false);
202 * Adjust the contents of fixups and asmInfo based on the relocation
203 * already performed on rel. This will not cause any of the relocated
204 * code to be "hooked up", and its not safe to do so until all of the
205 * CodeGenFixups have been processed.
207 virtual void adjustMetaDataForRelocation(RelocationInfo
& rel
,
209 CodeGenFixups
& fixups
) {
210 always_assert(false);
214 * Adjust potentially live references that point into the relocated
216 * Must not be called until its safe to run the relocated code.
218 virtual void adjustCodeForRelocation(RelocationInfo
& rel
,
219 CodeGenFixups
& fixups
) {
220 always_assert(false);
223 virtual void findFixups(TCA start
, TCA end
, CodeGenFixups
& fixups
) {
224 always_assert(false);