2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #ifndef incl_HPHP_TRANSLATOR_X64_INTERNAL_H_
17 #define incl_HPHP_TRANSLATOR_X64_INTERNAL_H_
19 #include <boost/optional.hpp>
20 #include <boost/filesystem.hpp>
21 #include <boost/utility/typed_in_place_factory.hpp>
23 #include "hphp/runtime/vm/jit/abi-x64.h"
29 static const DataType BitwiseKindOfString
= KindOfString
;
31 // Generate an if-then block into a. thenBlock is executed if cc is true.
33 void ifThen(Transl::X64Assembler
& a
, ConditionCode cc
, Then thenBlock
) {
35 a
.jcc8(ccNegate(cc
), done
);
40 // RAII aids to machine code.
44 // Branch to distant code (that we presumably don't expect to
45 // take). This helps keep hot paths compact.
47 // A common pattern using this involves patching the jump in astubs
48 // to jump past the normal control flow in a (as in the following
49 // example). Do this using DiamondReturn so the register allocator
50 // state will be properly maintained. (Spills/fills to keep the
51 // states in sync will be emitted on the unlikely path.)
56 // PhysReg inputParam = i.getReg(i.inputs[0]->location);
57 // a. test_reg_reg(inputParam, inputParam);
58 // DiamondReturn retFromStubs;
60 // UnlikelyIfBlock ifNotRax(CC_Z, a, astubs, &retFromStubs);
61 // EMIT_CALL(a, TCA(launch_nuclear_missiles));
63 // // The inputParam was non-zero, here is the likely branch:
64 // m_regMap.allocOutputRegs(i);
65 // emitMovRegReg(inputParam, m_regMap.getReg(i.outLocal->location));
66 // // ~DiamondReturn patches the jump, and reconciles the branch
67 // // with the main line. (In this case it will fill the outLocal
68 // // register since the main line thinks it is dirty.)
70 // // The two cases are joined here. We can do logic that was
71 // // independent of whether the branch was taken, if necessary.
72 // emitMovRegReg(i.outLocal, m_regMap.getReg(i.outStack->location));
74 // Note: it is ok to nest UnlikelyIfBlocks, as long as their
75 // corresponding DiamondReturns are correctly destroyed in reverse
76 // order. But also note that this can lead to more jumps on the
77 // unlikely branch (see ~DiamondReturn).
78 struct UnlikelyIfBlock
{
79 X64Assembler
& m_likely
;
80 X64Assembler
& m_unlikely
;
81 TCA m_likelyPostBranch
;
83 explicit UnlikelyIfBlock(ConditionCode cc
,
85 X64Assembler
& unlikely
)
87 , m_unlikely(unlikely
)
89 m_likely
.jcc(cc
, m_unlikely
.code
.frontier
);
90 m_likelyPostBranch
= m_likely
.code
.frontier
;
94 m_unlikely
.jmp(m_likelyPostBranch
);
98 // Helper structs for jcc vs. jcc8.
100 static void branch(X64Assembler
& a
, ConditionCode cc
, TCA dest
) {
103 static void patch(X64Assembler
& a
, TCA site
, TCA newDest
) {
104 a
.patchJcc8(site
, newDest
);
109 static void branch(X64Assembler
& a
, ConditionCode cc
, TCA dest
) {
112 static void patch(X64Assembler
& a
, TCA site
, TCA newDest
) {
113 a
.patchJcc(site
, newDest
);
118 // A raw condition-code block; assumes whatever comparison or ALU op
119 // that sets the Jcc has already executed.
120 template <ConditionCode Jcc
, typename J
=Jcc8
>
122 mutable X64Assembler
* m_a
;
125 explicit JccBlock(X64Assembler
& a
)
127 m_jcc
= a
.code
.frontier
;
128 J::branch(a
, Jcc
, m_a
->code
.frontier
);
133 J::patch(*m_a
, m_jcc
, m_a
->code
.frontier
);
138 JccBlock(const JccBlock
&);
139 JccBlock
& operator=(const JccBlock
&);
142 template<ConditionCode Jcc
, class Lambda
>
143 void jccBlock(X64Assembler
& a
, Lambda body
) {
149 // A CondBlock is an RAII structure for emitting conditional code. It
150 // compares the source register at fieldOffset with fieldValue, and
151 // conditionally branches over the enclosing block of assembly on the
152 // passed-in condition-code.
156 // RefCountedOnly ifRefCounted(a, rdi, 0);
160 // will only execute emitIncRef if we find at runtime that rdi points at
161 // a ref-counted cell.
163 // It's ok to do reconcilable register operations in the body.
164 template<unsigned FieldOffset
, unsigned FieldValue
, ConditionCode Jcc
,
171 CondBlock(X64Assembler
& a
, PhysReg reg
, int offset
= 0)
174 int typeDisp
= m_off
+ FieldOffset
;
175 static_assert(sizeof(FieldType
) == 1 || sizeof(FieldType
) == 4,
176 "CondBlock of unimplemented field size");
177 if (sizeof(FieldType
) == 4) {
178 a
. cmpl(FieldValue
, reg
[typeDisp
]);
179 } else if (sizeof(FieldType
) == 1) {
180 a
. cmpb(FieldValue
, reg
[typeDisp
]);
182 m_jcc8
= a
.code
.frontier
;
183 a
. jcc8(Jcc
, m_jcc8
);
188 m_a
.patchJcc8(m_jcc8
, m_a
.code
.frontier
);
193 // Emits if (IS_REFCOUNTED_TYPE()) { ... }
194 typedef CondBlock
<TVOFF(m_type
),
195 KindOfRefCountThreshold
,
197 DataType
> IfRefCounted
;
199 typedef CondBlock
<TVOFF(m_type
),
204 typedef CondBlock
<TVOFF(m_type
),
207 DataType
> UnlessUninit
;
212 * Helper code for stack frames. The struct is a "template" in the
213 * non-C++ sense: we don't build source-level stack frames in C++
214 * for the most part, but its offsets tell us where to find fields
217 * If we were physically pushing stack frames, we would push them
218 * in reverse order to what you see here.
221 locToRegDisp(const Location
& l
, PhysReg
*outbase
, int *outdisp
,
222 const Func
* f
= nullptr) {
223 assert_not_implemented((l
.space
== Location::Stack
||
224 l
.space
== Location::Local
||
225 l
.space
== Location::Iter
));
226 *outdisp
= cellsToBytes(Translator::locPhysicalOffset(l
, f
));
227 *outbase
= l
.space
== Location::Stack
? rVmSp
: rVmFp
;
230 // Common code emission patterns.
232 static_assert(sizeof(DataType
) == 4 || sizeof(DataType
) == 1,
233 "Your DataType has an unsupported size.");
234 static inline Reg8
toByte(const Reg32
& x
) { return rbyte(x
); }
235 static inline Reg8
toByte(const Reg64
& x
) { return rbyte(x
); }
236 static inline Reg8
toByte(PhysReg x
) { return rbyte(x
); }
238 static inline Reg32
toReg32(const Reg64
& x
) { return r32(x
); }
239 static inline Reg32
toReg32(const Reg8
& x
) { return r32(x
); }
240 static inline Reg32
toReg32(PhysReg x
) { return r32(x
); }
242 // For other operand types, let whatever conversions (or compile
243 // errors) exist handle it.
244 template<typename OpndType
>
245 static OpndType
toByte(const OpndType
& x
) { return x
; }
246 template<typename OpndType
>
247 static OpndType
toReg32(const OpndType
& x
) { return x
; }
249 template<typename OpndType
>
250 static inline void verifyTVOff(const OpndType
& op
) { /* nop */ }
251 static inline void verifyTVOff(const MemoryRef
& mr
) {
252 DEBUG_ONLY
auto disp
= mr
.r
.disp
;
253 // Make sure that we're operating on the m_type field of a
255 assert((disp
& (sizeof(TypedValue
) - 1)) == TVOFF(m_type
));
258 template<typename SrcType
, typename OpndType
>
260 emitTestTVType(X64Assembler
& a
, SrcType src
, OpndType tvOp
) {
262 if (sizeof(DataType
) == 4) {
263 a
. testl(src
, toReg32(tvOp
));
265 a
. testb(src
, toByte(tvOp
));
269 template<typename SrcType
, typename OpndType
>
271 emitLoadTVType(X64Assembler
& a
, SrcType src
, OpndType tvOp
) {
273 if (sizeof(DataType
) == 4) {
274 a
. loadl(src
, toReg32(tvOp
));
276 // Zero extend the type, just in case.
277 a
. loadzbl(src
, toReg32(tvOp
));
281 template<typename SrcType
, typename OpndType
>
283 emitCmpTVType(X64Assembler
& a
, SrcType src
, OpndType tvOp
) {
285 if (sizeof(DataType
) == 4) {
286 a
. cmpl(src
, toReg32(tvOp
));
288 a
. cmpb(src
, toByte(tvOp
));
292 template<typename DestType
, typename OpndType
>
294 emitStoreTVType(X64Assembler
& a
, OpndType tvOp
, DestType dest
) {
296 if (sizeof(DataType
) == 4) {
297 a
. storel(toReg32(tvOp
), dest
);
299 a
. storeb(toByte(tvOp
), dest
);
304 // emitStoreTypedValue --
305 // emitStoreUninitNull --
307 // Helpers for common cell operations.
309 // Dereference the var in the cell whose address lives in src into
312 emitDeref(X64Assembler
&a
, PhysReg src
, PhysReg dest
) {
313 // src is a RefData, dest will be m_data field of inner gizmoom.
314 a
. loadq (src
[TVOFF(m_data
)], dest
);
318 emitDerefIfVariant(X64Assembler
&a
, PhysReg reg
) {
319 emitCmpTVType(a
, KindOfRef
, reg
[TVOFF(m_type
)]);
320 if (RefData::tvOffset() == 0) {
321 a
. cload_reg64_disp_reg64(CC_E
, reg
, TVOFF(m_data
), reg
);
323 ifThen(a
, CC_E
, [&] {
324 a
. loadq(reg
[TVOFF(m_data
)], reg
);
325 a
. addq(RefData::tvOffset(), reg
);
330 // NB: leaves count field unmodified. Does not store to m_data if type
333 emitStoreTypedValue(X64Assembler
& a
, DataType type
, PhysReg val
,
334 int disp
, PhysReg dest
, bool writeType
= true) {
336 emitStoreTVType(a
, type
, dest
[disp
+ TVOFF(m_type
)]);
338 if (!IS_NULL_TYPE(type
)) {
339 assert(val
!= reg::noreg
);
340 a
. storeq(val
, dest
[disp
+ TVOFF(m_data
)]);
345 emitStoreUninitNull(X64Assembler
& a
,
348 // OK to leave garbage in m_data, m_aux.
349 emitStoreTVType(a
, KindOfUninit
, dest
[disp
+ TVOFF(m_type
)]);
353 emitCopyTo(X64Assembler
& a
,
359 assert(src
!= scratch
);
360 // This is roughly how gcc compiles this. Blow off m_aux.
361 auto s64
= r64(scratch
);
362 auto s32
= r32(scratch
);
363 a
. loadq (src
[srcOff
+ TVOFF(m_data
)], s64
);
364 a
. storeq (s64
, dest
[destOff
+ TVOFF(m_data
)]);
365 emitLoadTVType(a
, src
[srcOff
+ TVOFF(m_type
)], s32
);
366 emitStoreTVType(a
, s32
, dest
[destOff
+ TVOFF(m_type
)]);
369 static inline const char* getContextName() {
370 Class
* ctx
= arGetContextClass(curFrame());
371 return ctx
? ctx
->name()->data() : ":anonymous:";