Fix asserted type in simplifyAssertTypeOp
[hiphop-php.git] / hphp / runtime / vm / jit / code-gen-x64.h
blobee10d870a06b1bf729f6a59ea232884f1fba827c
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_VM_CG_H_
18 #define incl_HPHP_VM_CG_H_
20 #include <vector>
21 #include "hphp/runtime/vm/jit/ir.h"
22 #include "hphp/runtime/vm/jit/ir-unit.h"
23 #include "hphp/runtime/vm/jit/reg-alloc.h"
24 #include "hphp/runtime/base/rds.h"
25 #include "hphp/runtime/vm/jit/arg-group.h"
26 #include "hphp/runtime/vm/jit/code-gen-helpers.h"
27 #include "hphp/runtime/vm/jit/translator-x64.h"
28 #include "hphp/runtime/vm/jit/state-vector.h"
30 namespace HPHP { namespace JIT {
32 enum class SyncOptions {
33 kNoSyncPoint,
34 kSyncPoint,
35 kSyncPointAdjustOne,
36 kSmashableAndSyncPoint,
39 // Returned information from cgCallHelper
40 struct CallHelperInfo {
41 TCA returnAddress;
44 // Information about where code was generated, for pretty-printing.
45 struct AsmInfo {
46 explicit AsmInfo(const IRUnit& unit)
47 : instRanges(unit, TcaRange(nullptr, nullptr))
48 , asmRanges(unit, TcaRange(nullptr, nullptr))
49 , astubRanges(unit, TcaRange(nullptr, nullptr))
52 // Asm address info for each instruction and block
53 StateVector<IRInstruction,TcaRange> instRanges;
54 StateVector<Block,TcaRange> asmRanges;
55 StateVector<Block,TcaRange> astubRanges;
57 void updateForInstruction(IRInstruction* inst, TCA start, TCA end);
60 typedef StateVector<IRInstruction, RegSet> LiveRegs;
62 // Stuff we need to preserve between blocks while generating code,
63 // and address information produced during codegen.
64 struct CodegenState {
65 CodegenState(const IRUnit& unit, const RegAllocInfo& regs,
66 const LiveRegs& liveRegs, AsmInfo* asmInfo)
67 : patches(unit, nullptr)
68 , addresses(unit, nullptr)
69 , regs(regs)
70 , liveRegs(liveRegs)
71 , asmInfo(asmInfo)
72 , catches(unit, CatchInfo())
75 // Each block has a list of addresses to patch, and an address if
76 // it's already been emitted.
77 StateVector<Block,void*> patches;
78 StateVector<Block,TCA> addresses;
80 // True if this block's terminal Jmp has a desination equal to the
81 // next block in the same assmbler.
82 bool noTerminalJmp;
84 // output from register allocator
85 const RegAllocInfo& regs;
87 // for each instruction, holds the RegSet of registers that must be
88 // preserved across that instruction. This is for push/pop of caller-saved
89 // registers.
90 const LiveRegs& liveRegs;
92 // Output: start/end ranges of machine code addresses of each instruction.
93 AsmInfo* asmInfo;
95 // Used to pass information about the state of the world at native
96 // calls between cgCallHelper and cgBeginCatch.
97 StateVector<Block, CatchInfo> catches;
100 constexpr Reg64 rCgGP (reg::r11);
101 constexpr RegXMM rCgXMM0(reg::xmm0);
102 constexpr RegXMM rCgXMM1(reg::xmm1);
104 struct CodeGenerator {
105 typedef JIT::X64Assembler Asm;
107 CodeGenerator(const IRUnit& unit, CodeBlock& mainCode, CodeBlock& stubsCode,
108 JIT::TranslatorX64* tx64, CodegenState& state)
109 : m_unit(unit)
110 , m_mainCode(mainCode)
111 , m_stubsCode(stubsCode)
112 , m_as(mainCode)
113 , m_astubs(stubsCode)
114 , m_tx64(tx64)
115 , m_state(state)
116 , m_rScratch(InvalidReg)
117 , m_curInst(nullptr)
121 void cgBlock(Block* block, std::vector<TransBCMapping>* bcMap);
123 private:
124 Address cgInst(IRInstruction* inst);
126 const PhysLoc srcLoc(unsigned i) const {
127 return (*m_instRegs).src(i);
129 const PhysLoc dstLoc(unsigned i) const {
130 return (*m_instRegs).dst(i);
132 ArgGroup argGroup() const {
133 return ArgGroup(m_curInst, *m_instRegs);
136 // Autogenerate function declarations for each IR instruction in ir.h
137 #define O(name, dsts, srcs, flags) void cg##name(IRInstruction* inst);
138 IR_OPCODES
139 #undef O
141 void cgCallNative(Asm& a, IRInstruction* inst);
143 CallDest callDest(PhysReg reg0, PhysReg reg1 = InvalidReg) const;
144 CallDest callDest(const IRInstruction*) const;
145 CallDest callDestTV(const IRInstruction*) const;
146 CallDest callDest2(const IRInstruction*) const;
148 // Main call helper:
149 CallHelperInfo cgCallHelper(Asm& a,
150 CppCall call,
151 const CallDest& dstInfo,
152 SyncOptions sync,
153 ArgGroup& args,
154 RegSet toSave);
155 // Overload to make the toSave RegSet optional:
156 CallHelperInfo cgCallHelper(Asm& a,
157 CppCall call,
158 const CallDest& dstInfo,
159 SyncOptions sync,
160 ArgGroup& args);
161 void cgInterpOneCommon(IRInstruction* inst);
163 enum class Width { Value, Full };
164 template<class MemRef>
165 void cgStore(MemRef dst, SSATmp* src, PhysLoc src_loc, Width);
166 template<class MemRef>
167 void cgStoreTypedValue(MemRef dst, SSATmp* src, PhysLoc src_loc);
169 // helpers to load a value in dst. When label is not null a type check
170 // is performed against value to ensure it is of the type expected by dst
171 template<class BaseRef>
172 void cgLoad(SSATmp* dst, PhysLoc dstLoc, BaseRef value,
173 Block* label = nullptr);
174 template<class BaseRef>
175 void cgLoadTypedValue(SSATmp* dst, PhysLoc dstLoc, BaseRef base,
176 Block* label = nullptr);
178 // internal helpers to manage register conflicts from a source to a PhysReg
179 // destination.
180 // If the conflict cannot be resolved the out param isResolved is set to
181 // false and the caller should take proper action
182 IndexedMemoryRef resolveRegCollision(PhysReg dst,
183 IndexedMemoryRef value,
184 bool& isResolved);
185 MemoryRef resolveRegCollision(PhysReg dst,
186 MemoryRef value,
187 bool& isResolved);
189 template<class Loc1, class Loc2, class JmpFn>
190 void emitTypeTest(Type type, Loc1 typeSrc, Loc2 dataSrc, JmpFn doJcc,
191 OptType prevType = folly::none );
193 template<class DataLoc, class JmpFn>
194 void emitSpecializedTypeTest(Type type, DataLoc data, JmpFn doJcc);
196 template<class Loc>
197 void emitTypeCheck(Type type, Loc typeSrc, Loc dataSrc, Block* taken,
198 OptType prevType = folly::none);
199 template<class Loc>
200 void emitTypeGuard(Type type, Loc typeLoc, Loc dataLoc);
202 void cgIncRefWork(Type type, SSATmp* src, PhysLoc srcLoc);
203 void cgDecRefWork(IRInstruction* inst, bool genZeroCheck);
205 template<class OpInstr, class Oper>
206 void cgUnaryIntOp(PhysLoc dst, SSATmp* src, PhysLoc src_loc, OpInstr, Oper);
208 enum Commutativity { Commutative, NonCommutative };
210 void cgRoundCommon(IRInstruction* inst, RoundDirection dir);
212 template<class Oper, class RegType>
213 void cgBinaryIntOp(IRInstruction*,
214 void (Asm::*intImm)(Immed, RegType),
215 void (Asm::*intRR)(RegType, RegType),
216 void (Asm::*mov)(RegType, RegType),
217 Oper,
218 RegType (*conv)(PhysReg),
219 Commutativity);
220 void cgBinaryDblOp(IRInstruction*,
221 void (Asm::*fpRR)(RegXMM, RegXMM));
223 template<class Oper>
224 void cgShiftCommon(IRInstruction* inst,
225 void (Asm::*instrIR)(Immed, Reg64),
226 void (Asm::*instrR)(Reg64),
227 Oper oper);
229 void cgVerifyClsWork(IRInstruction* inst);
231 void emitGetCtxFwdCallWithThis(PhysReg ctxReg,
232 bool staticCallee);
234 void emitGetCtxFwdCallWithThisDyn(PhysReg destCtxReg,
235 PhysReg thisReg,
236 RDS::Handle ch);
238 void cgJcc(IRInstruction* inst); // helper
239 void cgReqBindJcc(IRInstruction* inst); // helper
240 void cgExitJcc(IRInstruction* inst); // helper
241 void cgJccInt(IRInstruction* inst); // helper
242 void cgReqBindJccInt(IRInstruction* inst); // helper
243 void cgExitJccInt(IRInstruction* inst); // helper
244 void emitCmpInt(IRInstruction* inst, ConditionCode);
245 void cgCmpHelper(IRInstruction* inst,
246 void (Asm::*setter)(Reg8),
247 int64_t (*str_cmp_str)(StringData*, StringData*),
248 int64_t (*str_cmp_int)(StringData*, int64_t),
249 int64_t (*str_cmp_obj)(StringData*, ObjectData*),
250 int64_t (*obj_cmp_obj)(ObjectData*, ObjectData*),
251 int64_t (*obj_cmp_int)(ObjectData*, int64_t),
252 int64_t (*arr_cmp_arr)(ArrayData*, ArrayData*));
254 template<class Loc>
255 void emitSideExitGuard(Type type, Loc typeLoc,
256 Loc dataLoc, Offset taken,
257 OptType prevType = folly::none);
258 void emitReqBindJcc(ConditionCode cc, const ReqBindJccData*);
260 void emitCompare(IRInstruction* inst);
261 void emitCompareInt(IRInstruction* inst);
262 void emitTestZero(SSATmp*, PhysLoc);
263 bool emitIncDecHelper(PhysLoc dst, SSATmp* src1, PhysLoc loc1,
264 SSATmp* src2, PhysLoc loc2,
265 void(Asm::*emitFunc)(Reg64));
267 private:
268 PhysReg selectScratchReg(IRInstruction* inst);
269 void emitLoadImm(Asm& as, int64_t val, PhysReg dstReg);
270 PhysReg prepXMMReg(const SSATmp* tmp,
271 Asm& as,
272 const PhysLoc&,
273 RegXMM rXMMScratch);
274 void emitSetCc(IRInstruction*, ConditionCode);
275 template<class JmpFn>
276 void emitIsTypeTest(IRInstruction* inst, JmpFn doJcc);
277 void doubleCmp(Asm& a, RegXMM xmmReg0, RegXMM xmmReg1);
278 void cgIsTypeCommon(IRInstruction* inst, bool negate);
279 void cgJmpIsTypeCommon(IRInstruction* inst, bool negate);
280 void cgIsTypeMemCommon(IRInstruction*, bool negate);
281 void emitInstanceBitmaskCheck(IRInstruction*);
282 void emitTraceRet(Asm& as);
283 void emitInitObjProps(PhysReg dstReg, const Class* cls, size_t nProps);
285 template <typename F>
286 Address cgCheckStaticBitAndDecRef(Type type,
287 PhysReg dataReg,
288 F destroy);
289 Address cgCheckStaticBitAndDecRef(Type type,
290 PhysReg dataReg);
291 Address cgCheckRefCountedType(PhysReg typeReg);
292 Address cgCheckRefCountedType(PhysReg baseReg,
293 int64_t offset);
294 void cgDecRefStaticType(Type type,
295 PhysReg dataReg,
296 bool genZeroCheck);
297 void cgDecRefDynamicType(PhysReg typeReg,
298 PhysReg dataReg,
299 bool genZeroCheck);
300 void cgDecRefDynamicTypeMem(PhysReg baseReg,
301 int64_t offset);
302 void cgDecRefMem(Type type,
303 PhysReg baseReg,
304 int64_t offset);
306 void cgIterNextCommon(IRInstruction* inst);
307 void cgIterInitCommon(IRInstruction* inst);
308 void cgMIterNextCommon(IRInstruction* inst);
309 void cgMIterInitCommon(IRInstruction* inst);
310 void cgLdFuncCachedCommon(IRInstruction* inst);
311 void cgLookupCnsCommon(IRInstruction* inst);
312 RDS::Handle cgLdClsCachedCommon(IRInstruction* inst);
313 void emitFwdJcc(ConditionCode cc, Block* target);
314 void emitFwdJcc(Asm& a, ConditionCode cc, Block* target);
315 const Func* curFunc() const;
316 Class* curClass() const { return curFunc()->cls(); }
317 const Unit* curUnit() const { return curFunc()->unit(); }
318 void recordSyncPoint(Asm& as, SyncOptions sync = SyncOptions::kSyncPoint);
319 int iterOffset(SSATmp* tmp) { return iterOffset(tmp->getValInt()); }
320 int iterOffset(uint32_t id);
321 void emitReqBindAddr(const Func* func, TCA& dest, Offset offset);
323 void emitAdjustSp(PhysReg spReg, PhysReg dstReg, int64_t adjustment);
324 void emitConvBoolOrIntToDbl(IRInstruction* inst);
325 void cgLdClsMethodCacheCommon(IRInstruction* inst, Offset offset);
328 * Generate an if-block that branches around some unlikely code, handling
329 * the cases when a == astubs and a != astubs. cc is the branch condition
330 * to run the unlikely block.
332 * Passes the proper assembler to use to the unlikely function.
334 template <class Block>
335 void unlikelyIfBlock(ConditionCode cc, Block unlikely) {
336 if (m_as.base() == m_astubs.base()) {
337 Label done;
338 m_as.jcc(ccNegate(cc), done);
339 unlikely(m_as);
340 asm_label(m_as, done);
341 } else {
342 Label unlikelyLabel, done;
343 m_as.jcc(cc, unlikelyLabel);
344 asm_label(m_astubs, unlikelyLabel);
345 unlikely(m_astubs);
346 m_astubs.jmp(done);
347 asm_label(m_as, done);
351 template <class Then>
352 void ifBlock(ConditionCode cc, Then thenBlock) {
353 Label done;
354 m_as.jcc8(ccNegate(cc), done);
355 thenBlock(m_as);
356 asm_label(m_as, done);
359 // Generate an if-then-else block
360 template <class Then, class Else>
361 void ifThenElse(Asm& a, ConditionCode cc, Then thenBlock, Else elseBlock) {
362 Label elseLabel, done;
363 a.jcc8(ccNegate(cc), elseLabel);
364 thenBlock();
365 a.jmp8(done);
366 asm_label(a, elseLabel);
367 elseBlock();
368 asm_label(a, done);
371 // Generate an if-then-else block into m_as.
372 template <class Then, class Else>
373 void ifThenElse(ConditionCode cc, Then thenBlock, Else elseBlock) {
374 ifThenElse(m_as, cc, thenBlock, elseBlock);
378 * Same as ifThenElse except the first block is off in astubs
380 template <class Then, class Else>
381 void unlikelyIfThenElse(ConditionCode cc, Then unlikely, Else elseBlock) {
382 if (m_as.base() == m_astubs.base()) {
383 Label elseLabel, done;
384 m_as.jcc8(ccNegate(cc), elseLabel);
385 unlikely(m_as);
386 m_as.jmp8(done);
387 asm_label(m_as, elseLabel);
388 elseBlock(m_as);
389 asm_label(m_as, done);
390 } else {
391 Label unlikelyLabel, done;
392 m_as.jcc(cc, unlikelyLabel);
393 elseBlock(m_as);
394 asm_label(m_astubs, unlikelyLabel);
395 unlikely(m_astubs);
396 m_astubs.jmp(done);
397 asm_label(m_as, done);
401 // This is for printing partially-generated traces when debugging
402 void print() const;
404 private:
405 const IRUnit& m_unit;
406 CodeBlock& m_mainCode;
407 CodeBlock& m_stubsCode;
408 Asm m_as; // current "main" assembler
409 Asm m_astubs; // for stubs and other cold code
410 TranslatorX64* m_tx64;
411 CodegenState& m_state;
412 Reg64 m_rScratch; // currently selected GP scratch reg
413 IRInstruction* m_curInst; // current instruction being generated
414 const RegAllocInfo::RegMap* m_instRegs; // registers for current m_curInst.
417 const Func* loadClassCtor(Class* cls);
419 ObjectData* createClHelper(Class*, int, ActRec*, TypedValue*);
421 void genCode(CodeBlock& mainCode,
422 CodeBlock& stubsCode,
423 IRUnit& unit,
424 std::vector<TransBCMapping>* bcMap,
425 TranslatorX64* tx64,
426 const RegAllocInfo& regs);
428 // Helpers to compute a reference to a TypedValue type and data
429 inline MemoryRef refTVType(PhysReg reg) {
430 return reg[TVOFF(m_type)];
433 inline MemoryRef refTVData(PhysReg reg) {
434 return reg[TVOFF(m_data)];
437 inline MemoryRef refTVType(MemoryRef ref) {
438 return *(ref.r + TVOFF(m_type));
441 inline MemoryRef refTVData(MemoryRef ref) {
442 return *(ref.r + TVOFF(m_data));
445 inline IndexedMemoryRef refTVType(IndexedMemoryRef ref) {
446 return *(ref.r + TVOFF(m_type));
449 inline IndexedMemoryRef refTVData(IndexedMemoryRef ref) {
450 return *(ref.r + TVOFF(m_data));
455 #endif