2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #ifndef incl_HPHP_VM_CODEGENHELPERS_X64_H_
18 #define incl_HPHP_VM_CODEGENHELPERS_X64_H_
20 #include "hphp/util/asm-x64.h"
21 #include "hphp/util/ringbuffer.h"
23 #include "hphp/runtime/base/types.h"
24 #include "hphp/runtime/vm/jit/abi-x64.h"
25 #include "hphp/runtime/vm/jit/code-gen-helpers.h"
26 #include "hphp/runtime/vm/jit/cpp-call.h"
27 #include "hphp/runtime/vm/jit/ir-opcode.h"
28 #include "hphp/runtime/vm/jit/phys-reg.h"
29 #include "hphp/runtime/vm/jit/service-requests-x64.h"
30 #include "hphp/runtime/vm/jit/service-requests.h"
31 #include "hphp/runtime/vm/jit/translator.h"
32 #include "hphp/runtime/vm/jit/vasm-x64.h"
35 //////////////////////////////////////////////////////////////////////
40 //////////////////////////////////////////////////////////////////////
46 //////////////////////////////////////////////////////////////////////
48 typedef X64Assembler Asm
;
50 constexpr size_t kJmpTargetAlign
= 16;
52 void moveToAlign(CodeBlock
& cb
, size_t alignment
= kJmpTargetAlign
);
54 void emitEagerSyncPoint(Asm
& as
, const Op
* pc
);
55 void emitEagerSyncPoint(Vout
& v
, const Op
* pc
);
56 void emitEagerVMRegSave(Asm
& as
, RegSaveFlags flags
);
57 void emitGetGContext(Asm
& as
, PhysReg dest
);
58 void emitGetGContext(Vout
& as
, Vreg dest
);
60 void emitTransCounterInc(Asm
& a
);
61 void emitTransCounterInc(Vout
&);
63 void emitIncRef(Asm
& as
, PhysReg base
);
64 void emitIncRef(Vout
& v
, Vreg base
);
65 void emitIncRefCheckNonStatic(Asm
& as
, PhysReg base
, DataType dtype
);
66 void emitIncRefGenericRegSafe(Asm
& as
, PhysReg base
, int disp
, PhysReg tmpReg
);
68 void emitAssertFlagsNonNegative(Vout
& v
, Vreg sf
);
69 void emitAssertRefCount(Vout
& v
, Vreg base
);
71 void emitMovRegReg(Asm
& as
, PhysReg srcReg
, PhysReg dstReg
);
72 void emitLea(Asm
& as
, MemoryRef mr
, PhysReg dst
);
74 Vreg
emitLdObjClass(Vout
& v
, Vreg objReg
, Vreg dstReg
);
75 Vreg
emitLdClsCctx(Vout
& v
, Vreg srcReg
, Vreg dstReg
);
77 void emitCall(Asm
& as
, TCA dest
, RegSet args
);
78 void emitCall(Asm
& as
, CppCall call
, RegSet args
);
79 void emitCall(Vout
& v
, CppCall call
, RegSet args
);
81 // store imm to the 8-byte memory location at ref. Warning: don't use this
82 // if you wanted an atomic store; large imms cause two stores.
83 void emitImmStoreq(Vout
& v
, Immed64 imm
, Vptr ref
);
84 void emitImmStoreq(Asm
& as
, Immed64 imm
, MemoryRef ref
);
86 void emitJmpOrJcc(Asm
& as
, ConditionCode cc
, TCA dest
);
88 void emitRB(Asm
& a
, Trace::RingBufferType t
, const char* msgm
);
90 void emitTraceCall(CodeBlock
& cb
, Offset pcOff
);
93 * Tests the surprise flags for the current thread. Should be used
94 * before a jnz to surprise handling code.
96 void emitTestSurpriseFlags(Asm
& as
);
97 Vreg
emitTestSurpriseFlags(Vout
&);
99 void emitCheckSurpriseFlagsEnter(Vout
& main
, Vout
& cold
, Fixup fixup
);
100 void emitCheckSurpriseFlagsEnter(CodeBlock
& mainCode
, CodeBlock
& coldCode
,
103 #ifdef USE_GCC_FAST_TLS
106 * TLS access: XXX we currently only support static-style TLS directly
109 * x86 terminology review: "Virtual addresses" are subject to both
110 * segmented translation and paged translation. "Linear addresses" are
111 * post-segmentation address, subject only to paging. C and C++ generally
112 * only have access to bitwise linear addresses.
114 * TLS data live at negative virtual addresses off FS: the first datum
115 * is typically at VA(FS:-sizeof(datum)). Linux's x64 ABI stores the linear
116 * address of the base of TLS at VA(FS:0). While this is just a convention, it
117 * is firm: gcc builds binaries that assume it when, e.g., evaluating
120 * The virtual addresses of TLS data are not exposed to C/C++. To figure it
121 * out, we take a datum's linear address, and subtract it from the linear
122 * address where TLS starts.
126 emitTLSLoad(Vout
& v
, const ThreadLocalNoCheck
<T
>& datum
, Vreg reg
) {
127 uintptr_t virtualAddress
= uintptr_t(&datum
.m_node
.m_p
) - tlsBase();
128 Vptr addr
{baseless(virtualAddress
), Vptr::FS
};
129 v
<< load
{addr
, reg
};
134 emitTLSLoad(X64Assembler
& a
, const ThreadLocalNoCheck
<T
>& datum
, Reg64 reg
) {
135 uintptr_t virtualAddress
= uintptr_t(&datum
.m_node
.m_p
) - tlsBase();
136 a
.fs().loadq(baseless(virtualAddress
), reg
);
139 #else // USE_GCC_FAST_TLS
143 emitTLSLoad(Vout
& v
, const ThreadLocalNoCheck
<T
>& datum
, Vreg dest
) {
144 PhysRegSaver(v
, kGPCallerSaved
); // we don't know for sure what's alive
145 v
<< ldimm
{datum
.m_key
, argNumToRegName
[0]};
146 const CodeAddress addr
= (CodeAddress
)pthread_getspecific
;
147 if (deltaFits((uintptr_t)addr
, sz::dword
)) {
148 v
<< call
{addr
, argSet(1)};
150 v
<< ldimm
{addr
, reg::rax
};
151 v
<< callr
{reg::rax
, argSet(1)};
153 if (dest
!= Vreg(reg::rax
)) {
154 v
<< movq
{reg::rax
, dest
};
160 emitTLSLoad(X64Assembler
& a
, const ThreadLocalNoCheck
<T
>& datum
, Reg64 dest
) {
161 PhysRegSaver(a
, kGPCallerSaved
); // we don't know for sure what's alive
162 a
. emitImmReg(datum
.m_key
, argNumToRegName
[0]);
163 const TCA addr
= (TCA
)pthread_getspecific
;
164 if (deltaFits((uintptr_t)addr
, sz::dword
)) {
167 a
. movq(addr
, reg::rax
);
170 if (dest
!= reg::rax
) {
171 a
. movq(reg::rax
, dest
);
175 #endif // USE_GCC_FAST_TLS
177 // Emit a load of a low pointer.
178 void emitLdLowPtr(Vout
& v
, Vptr mem
, Vreg reg
, size_t size
);
180 void emitCmpClass(Vout
& v
, Vreg sf
, const Class
* c
, Vptr mem
);
181 void emitCmpClass(Vout
& v
, Vreg sf
, Vreg reg
, Vptr mem
);
182 void emitCmpClass(Vout
& v
, Vreg sf
, Vreg reg1
, Vreg reg2
);
184 void copyTV(Vout
& v
, Vloc src
, Vloc dst
);
185 void pack2(Vout
& v
, Vreg s0
, Vreg s1
, Vreg d0
);
187 Vreg
zeroExtendIfBool(Vout
& v
, const SSATmp
* src
, Vreg reg
);
189 ConditionCode
opToConditionCode(Opcode opc
);
191 template<ConditionCode Jcc
, class Lambda
>
192 void jccBlock(Asm
& a
, Lambda body
) {
200 * callDestructor/jumpDestructor --
202 * Emit a call or jump to the appropriate destructor for a dynamically
205 * No registers are saved; most translated code should be using
206 * emitDecRefGeneric{Reg,} instead of this.
210 * - typeReg is destroyed and may not be argNumToRegName[0].
211 * - argNumToRegName[0] should contain the m_data for this value.
212 * - scratch is destoyed.
215 inline MemoryRef
lookupDestructor(X64Assembler
& a
, PhysReg typeReg
) {
216 auto const table
= reinterpret_cast<intptr_t>(g_destructors
);
217 always_assert_flog(deltaFits(table
, sz::dword
),
218 "Destructor function table is expected to be in the data "
219 "segment, with addresses less than 2^31"
221 static_assert((KindOfString
>> kShiftDataTypeToDestrIndex
== 1) &&
222 (KindOfArray
>> kShiftDataTypeToDestrIndex
== 2) &&
223 (KindOfObject
>> kShiftDataTypeToDestrIndex
== 3) &&
224 (KindOfResource
>> kShiftDataTypeToDestrIndex
== 4) &&
225 (KindOfRef
>> kShiftDataTypeToDestrIndex
== 5),
226 "lookup of destructors depends on KindOf* values");
227 a
. shrl (kShiftDataTypeToDestrIndex
, r32(typeReg
));
228 return baseless(typeReg
*8 + table
);
231 inline MemoryRef
lookupDestructor(Vout
& v
, PhysReg typeReg
) {
232 auto const table
= reinterpret_cast<intptr_t>(g_destructors
);
233 always_assert_flog(deltaFits(table
, sz::dword
),
234 "Destructor function table is expected to be in the data "
235 "segment, with addresses less than 2^31"
237 static_assert((KindOfString
>> kShiftDataTypeToDestrIndex
== 1) &&
238 (KindOfArray
>> kShiftDataTypeToDestrIndex
== 2) &&
239 (KindOfObject
>> kShiftDataTypeToDestrIndex
== 3) &&
240 (KindOfResource
>> kShiftDataTypeToDestrIndex
== 4) &&
241 (KindOfRef
>> kShiftDataTypeToDestrIndex
== 5),
242 "lookup of destructors depends on KindOf* values");
243 v
<< shrli
{kShiftDataTypeToDestrIndex
, typeReg
, typeReg
, v
.makeReg()};
244 return baseless(typeReg
*8 + table
);
247 //////////////////////////////////////////////////////////////////////