Fix spilling bug
[hiphop-php.git] / hphp / runtime / vm / jit / translator-x64-internal.h
blobdc6bd21d362de4a69176e764732c0ca23dc55c0c
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2013 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #ifndef incl_HPHP_TRANSLATOR_X64_INTERNAL_H_
17 #define incl_HPHP_TRANSLATOR_X64_INTERNAL_H_
19 #include <boost/optional.hpp>
20 #include <boost/filesystem.hpp>
21 #include <boost/utility/typed_in_place_factory.hpp>
23 #include "hphp/runtime/vm/jit/abi-x64.h"
25 namespace HPHP {
26 namespace Transl {
28 TRACE_SET_MOD(tx64);
29 static const DataType BitwiseKindOfString = KindOfString;
31 // Generate an if-then block into a. thenBlock is executed if cc is true.
32 template <class Then>
33 void ifThen(Transl::X64Assembler& a, ConditionCode cc, Then thenBlock) {
34 Label done;
35 a.jcc8(ccNegate(cc), done);
36 thenBlock();
37 asm_label(a, done);
40 // RAII aids to machine code.
42 // UnlikelyIfBlock:
44 // Branch to distant code (that we presumably don't expect to
45 // take). This helps keep hot paths compact.
47 // A common pattern using this involves patching the jump in astubs
48 // to jump past the normal control flow in a (as in the following
49 // example). Do this using DiamondReturn so the register allocator
50 // state will be properly maintained. (Spills/fills to keep the
51 // states in sync will be emitted on the unlikely path.)
53 // Example:
55 // {
56 // PhysReg inputParam = i.getReg(i.inputs[0]->location);
57 // a. test_reg_reg(inputParam, inputParam);
58 // DiamondReturn retFromStubs;
59 // {
60 // UnlikelyIfBlock ifNotRax(CC_Z, a, astubs, &retFromStubs);
61 // EMIT_CALL(a, TCA(launch_nuclear_missiles));
62 // }
63 // // The inputParam was non-zero, here is the likely branch:
64 // m_regMap.allocOutputRegs(i);
65 // emitMovRegReg(inputParam, m_regMap.getReg(i.outLocal->location));
66 // // ~DiamondReturn patches the jump, and reconciles the branch
67 // // with the main line. (In this case it will fill the outLocal
68 // // register since the main line thinks it is dirty.)
69 // }
70 // // The two cases are joined here. We can do logic that was
71 // // independent of whether the branch was taken, if necessary.
72 // emitMovRegReg(i.outLocal, m_regMap.getReg(i.outStack->location));
74 // Note: it is ok to nest UnlikelyIfBlocks, as long as their
75 // corresponding DiamondReturns are correctly destroyed in reverse
76 // order. But also note that this can lead to more jumps on the
77 // unlikely branch (see ~DiamondReturn).
78 struct UnlikelyIfBlock {
79 X64Assembler& m_likely;
80 X64Assembler& m_unlikely;
81 TCA m_likelyPostBranch;
83 explicit UnlikelyIfBlock(ConditionCode cc,
84 X64Assembler& likely,
85 X64Assembler& unlikely)
86 : m_likely(likely)
87 , m_unlikely(unlikely)
89 m_likely.jcc(cc, m_unlikely.code.frontier);
90 m_likelyPostBranch = m_likely.code.frontier;
93 ~UnlikelyIfBlock() {
94 m_unlikely.jmp(m_likelyPostBranch);
98 // Helper structs for jcc vs. jcc8.
99 struct Jcc8 {
100 static void branch(X64Assembler& a, ConditionCode cc, TCA dest) {
101 a. jcc8(cc, dest);
103 static void patch(X64Assembler& a, TCA site, TCA newDest) {
104 a.patchJcc8(site, newDest);
108 struct Jcc32 {
109 static void branch(X64Assembler& a, ConditionCode cc, TCA dest) {
110 a. jcc(cc, dest);
112 static void patch(X64Assembler& a, TCA site, TCA newDest) {
113 a.patchJcc(site, newDest);
117 // JccBlock --
118 // A raw condition-code block; assumes whatever comparison or ALU op
119 // that sets the Jcc has already executed.
120 template <ConditionCode Jcc, typename J=Jcc8>
121 struct JccBlock {
122 mutable X64Assembler* m_a;
123 TCA m_jcc;
125 explicit JccBlock(X64Assembler& a)
126 : m_a(&a) {
127 m_jcc = a.code.frontier;
128 J::branch(a, Jcc, m_a->code.frontier);
131 ~JccBlock() {
132 if (m_a) {
133 J::patch(*m_a, m_jcc, m_a->code.frontier);
137 private:
138 JccBlock(const JccBlock&);
139 JccBlock& operator=(const JccBlock&);
142 template<ConditionCode Jcc, class Lambda>
143 void jccBlock(X64Assembler& a, Lambda body) {
144 Label exit;
145 exit.jcc8(a, Jcc);
146 body();
147 asm_label(a, exit);
149 // A CondBlock is an RAII structure for emitting conditional code. It
150 // compares the source register at fieldOffset with fieldValue, and
151 // conditionally branches over the enclosing block of assembly on the
152 // passed-in condition-code.
154 // E.g.:
155 // {
156 // RefCountedOnly ifRefCounted(a, rdi, 0);
157 // emitIncRef(rdi);
158 // }
160 // will only execute emitIncRef if we find at runtime that rdi points at
161 // a ref-counted cell.
163 // It's ok to do reconcilable register operations in the body.
164 template<unsigned FieldOffset, unsigned FieldValue, ConditionCode Jcc,
165 typename FieldType>
166 struct CondBlock {
167 X64Assembler& m_a;
168 int m_off;
169 TCA m_jcc8;
171 CondBlock(X64Assembler& a, PhysReg reg, int offset = 0)
172 : m_a(a)
173 , m_off(offset) {
174 int typeDisp = m_off + FieldOffset;
175 static_assert(sizeof(FieldType) == 1 || sizeof(FieldType) == 4,
176 "CondBlock of unimplemented field size");
177 if (sizeof(FieldType) == 4) {
178 a. cmpl(FieldValue, reg[typeDisp]);
179 } else if (sizeof(FieldType) == 1) {
180 a. cmpb(FieldValue, reg[typeDisp]);
182 m_jcc8 = a.code.frontier;
183 a. jcc8(Jcc, m_jcc8);
184 // ...
187 ~CondBlock() {
188 m_a.patchJcc8(m_jcc8, m_a.code.frontier);
192 // IfRefCounted --
193 // Emits if (IS_REFCOUNTED_TYPE()) { ... }
194 typedef CondBlock <TVOFF(m_type),
195 KindOfRefCountThreshold,
196 CC_LE,
197 DataType> IfRefCounted;
199 typedef CondBlock <TVOFF(m_type),
200 KindOfRef,
201 CC_NZ,
202 DataType> IfVariant;
204 typedef CondBlock <TVOFF(m_type),
205 KindOfUninit,
206 CC_Z,
207 DataType> UnlessUninit;
210 * locToRegDisp --
212 * Helper code for stack frames. The struct is a "template" in the
213 * non-C++ sense: we don't build source-level stack frames in C++
214 * for the most part, but its offsets tell us where to find fields
215 * in assembly.
217 * If we were physically pushing stack frames, we would push them
218 * in reverse order to what you see here.
220 static inline void
221 locToRegDisp(const Location& l, PhysReg *outbase, int *outdisp,
222 const Func* f = nullptr) {
223 assert_not_implemented((l.space == Location::Stack ||
224 l.space == Location::Local ||
225 l.space == Location::Iter));
226 *outdisp = cellsToBytes(Translator::locPhysicalOffset(l, f));
227 *outbase = l.space == Location::Stack ? rVmSp : rVmFp;
230 // Common code emission patterns.
232 static_assert(sizeof(DataType) == 4 || sizeof(DataType) == 1,
233 "Your DataType has an unsupported size.");
234 static inline Reg8 toByte(const Reg32& x) { return rbyte(x); }
235 static inline Reg8 toByte(const Reg64& x) { return rbyte(x); }
236 static inline Reg8 toByte(PhysReg x) { return rbyte(x); }
238 static inline Reg32 toReg32(const Reg64& x) { return r32(x); }
239 static inline Reg32 toReg32(const Reg8& x) { return r32(x); }
240 static inline Reg32 toReg32(PhysReg x) { return r32(x); }
242 // For other operand types, let whatever conversions (or compile
243 // errors) exist handle it.
244 template<typename OpndType>
245 static OpndType toByte(const OpndType& x) { return x; }
246 template<typename OpndType>
247 static OpndType toReg32(const OpndType& x) { return x; }
249 template<typename OpndType>
250 static inline void verifyTVOff(const OpndType& op) { /* nop */ }
251 static inline void verifyTVOff(const MemoryRef& mr) {
252 DEBUG_ONLY auto disp = mr.r.disp;
253 // Make sure that we're operating on the m_type field of a
254 // TypedValue*.
255 assert((disp & (sizeof(TypedValue) - 1)) == TVOFF(m_type));
258 template<typename SrcType, typename OpndType>
259 static inline void
260 emitTestTVType(X64Assembler& a, SrcType src, OpndType tvOp) {
261 verifyTVOff(src);
262 if (sizeof(DataType) == 4) {
263 a. testl(src, toReg32(tvOp));
264 } else {
265 a. testb(src, toByte(tvOp));
269 template<typename SrcType, typename OpndType>
270 static inline void
271 emitLoadTVType(X64Assembler& a, SrcType src, OpndType tvOp) {
272 verifyTVOff(src);
273 if (sizeof(DataType) == 4) {
274 a. loadl(src, toReg32(tvOp));
275 } else {
276 // Zero extend the type, just in case.
277 a. loadzbl(src, toReg32(tvOp));
281 template<typename SrcType, typename OpndType>
282 static inline void
283 emitCmpTVType(X64Assembler& a, SrcType src, OpndType tvOp) {
284 verifyTVOff(src);
285 if (sizeof(DataType) == 4) {
286 a. cmpl(src, toReg32(tvOp));
287 } else {
288 a. cmpb(src, toByte(tvOp));
292 template<typename DestType, typename OpndType>
293 static inline void
294 emitStoreTVType(X64Assembler& a, OpndType tvOp, DestType dest) {
295 verifyTVOff(dest);
296 if (sizeof(DataType) == 4) {
297 a. storel(toReg32(tvOp), dest);
298 } else {
299 a. storeb(toByte(tvOp), dest);
303 // emitDeref --
304 // emitStoreTypedValue --
305 // emitStoreUninitNull --
307 // Helpers for common cell operations.
309 // Dereference the var in the cell whose address lives in src into
310 // dest.
311 static inline void
312 emitDeref(X64Assembler &a, PhysReg src, PhysReg dest) {
313 // src is a RefData, dest will be m_data field of inner gizmoom.
314 a. loadq (src[TVOFF(m_data)], dest);
317 static inline void
318 emitDerefIfVariant(X64Assembler &a, PhysReg reg) {
319 emitCmpTVType(a, KindOfRef, reg[TVOFF(m_type)]);
320 if (RefData::tvOffset() == 0) {
321 a. cload_reg64_disp_reg64(CC_E, reg, TVOFF(m_data), reg);
322 } else {
323 ifThen(a, CC_E, [&] {
324 a. loadq(reg[TVOFF(m_data)], reg);
325 a. addq(RefData::tvOffset(), reg);
330 // NB: leaves count field unmodified. Does not store to m_data if type
331 // is a null type.
332 static inline void
333 emitStoreTypedValue(X64Assembler& a, DataType type, PhysReg val,
334 int disp, PhysReg dest, bool writeType = true) {
335 if (writeType) {
336 emitStoreTVType(a, type, dest[disp + TVOFF(m_type)]);
338 if (!IS_NULL_TYPE(type)) {
339 assert(val != reg::noreg);
340 a. storeq(val, dest[disp + TVOFF(m_data)]);
344 static inline void
345 emitStoreUninitNull(X64Assembler& a,
346 int disp,
347 PhysReg dest) {
348 // OK to leave garbage in m_data, m_aux.
349 emitStoreTVType(a, KindOfUninit, dest[disp + TVOFF(m_type)]);
352 static inline void
353 emitCopyTo(X64Assembler& a,
354 Reg64 src,
355 int srcOff,
356 Reg64 dest,
357 int destOff,
358 PhysReg scratch) {
359 assert(src != scratch);
360 // This is roughly how gcc compiles this. Blow off m_aux.
361 auto s64 = r64(scratch);
362 auto s32 = r32(scratch);
363 a. loadq (src[srcOff + TVOFF(m_data)], s64);
364 a. storeq (s64, dest[destOff + TVOFF(m_data)]);
365 emitLoadTVType(a, src[srcOff + TVOFF(m_type)], s32);
366 emitStoreTVType(a, s32, dest[destOff + TVOFF(m_type)]);
369 static inline const char* getContextName() {
370 Class* ctx = arGetContextClass(curFrame());
371 return ctx ? ctx->name()->data() : ":anonymous:";
376 #endif