Relocate to arbitrary alignment
[hiphop-php.git] / hphp / runtime / vm / jit / service-requests-x64.cpp
blob8d1acebad5d1a397e898a2ce0c9fc80be9dfd08b
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/service-requests-x64.h"
19 #include "folly/Optional.h"
21 #include "hphp/runtime/vm/jit/code-gen-helpers-x64.h"
22 #include "hphp/runtime/vm/jit/back-end.h"
23 #include "hphp/runtime/vm/jit/back-end-x64.h"
24 #include "hphp/runtime/vm/jit/translator-inline.h"
25 #include "hphp/runtime/vm/jit/mc-generator.h"
26 #include "hphp/runtime/vm/jit/mc-generator-internal.h"
27 #include "hphp/runtime/vm/jit/service-requests-inline.h"
28 #include "hphp/runtime/vm/jit/types.h"
29 #include "hphp/runtime/vm/srckey.h"
30 #include "hphp/util/asm-x64.h"
31 #include "hphp/util/ringbuffer.h"
33 namespace HPHP { namespace JIT { namespace X64 {
35 using JIT::reg::rip;
37 TRACE_SET_MOD(servicereq);
39 // An intentionally funny-looking-in-core-dumps constant for uninitialized
40 // instruction pointers.
41 constexpr uint64_t kUninitializedRIP = 0xba5eba11acc01ade;
43 TCA
44 emitServiceReqImpl(TCA stubStart, TCA start, TCA& end, int maxStubSpace,
45 SRFlags flags, ServiceRequest req,
46 const ServiceReqArgVec& argv);
48 namespace {
50 void emitBindJ(CodeBlock& cb, CodeBlock& frozen,
51 ConditionCode cc, SrcKey dest, ServiceRequest req) {
52 mcg->backEnd().prepareForSmash(cb, cc == JIT::CC_None ? kJmpLen : kJmpccLen);
53 TCA toSmash = cb.frontier();
54 if (cb.base() == frozen.base()) {
55 Asm a { cb };
56 emitJmpOrJcc(a, cc, toSmash);
59 mcg->setJmpTransID(toSmash);
61 TCA sr = emitEphemeralServiceReq(frozen,
62 mcg->getFreeStub(frozen,
63 &mcg->cgFixups()),
64 req, RipRelative(toSmash),
65 dest.toAtomicInt());
67 Asm a { cb };
68 if (cb.base() == frozen.base()) {
69 CodeCursor cursor(cb, toSmash);
70 emitJmpOrJcc(a, cc, sr);
71 } else {
72 emitJmpOrJcc(a, cc, sr);
77 * NativeImpl is a special operation in the sense that it must be the
78 * only opcode in a function body, and also functions as the return.
80 int32_t emitNativeImpl(CodeBlock& mainCode, const Func* func) {
81 BuiltinFunction builtinFuncPtr = func->builtinFuncPtr();
82 if (false) { // typecheck
83 ActRec* ar = nullptr;
84 builtinFuncPtr(ar);
87 TRACE(2, "calling builtin preClass %p func %p\n", func->preClass(),
88 builtinFuncPtr);
90 * Call the native implementation. This will free the locals for us in the
91 * normal case. In the case where an exception is thrown, the VM unwinder
92 * will handle it for us.
94 Asm a { mainCode };
95 a. movq (rVmFp, argNumToRegName[0]);
96 if (mcg->fixupMap().eagerRecord(func)) {
97 emitEagerSyncPoint(a, reinterpret_cast<const Op*>(func->getEntry()));
99 emitCall(a, (TCA)builtinFuncPtr);
102 * We're sometimes calling this while curFunc() isn't really the
103 * builtin---make sure to properly record the sync point as if we
104 * are inside the builtin.
106 * The assumption here is that for builtins, the generated func
107 * contains only a single opcode (NativeImpl), and there are no
108 * non-argument locals.
110 assert(func->numIterators() == 0 && func->methInfo());
111 assert(func->numLocals() == func->numParams());
112 assert(*reinterpret_cast<const Op*>(func->getEntry()) == Op::NativeImpl);
113 assert(instrLen((Op*)func->getEntry()) == func->past() - func->base());
114 Offset pcOffset = 0; // NativeImpl is the only instruction in the func
115 Offset stackOff = func->numLocals(); // Builtin stubs have no
116 // non-arg locals
117 mcg->recordSyncPoint(mainCode.frontier(), pcOffset, stackOff);
120 * The native implementation already put the return value on the
121 * stack for us, and handled cleaning up the arguments. We have to
122 * update the frame pointer and the stack pointer, and load the
123 * return value into the return register so the trace we are
124 * returning to has it where it expects.
126 * TODO(#1273094): we should probably modify the actual builtins to
127 * return values via registers (rax:edx) using the C ABI and do a
128 * reg-to-reg move.
130 int nLocalCells = func->numSlotsInFrame();
131 a. loadq (rVmFp[AROFF(m_sfp)], rVmFp);
133 emitRB(a, Trace::RBTypeFuncExit, func->fullName()->data());
134 return sizeof(ActRec) + cellsToBytes(nLocalCells-1);
137 static int maxStubSpace() {
138 /* max space for moving to align, saving VM regs plus emitting args */
139 static constexpr int
140 kVMRegSpace = 0x14,
141 kMovSize = 0xa,
142 kNumServiceRegs = sizeof(serviceReqArgRegs) / sizeof(PhysReg),
143 kMaxStubSpace = kJmpTargetAlign - 1 + kVMRegSpace +
144 kNumServiceRegs * kMovSize;
145 return kMaxStubSpace;
148 void emitBindCallHelper(CodeBlock& mainCode, CodeBlock& frozenCode,
149 SrcKey srcKey,
150 const Func* funcd,
151 int numArgs) {
152 // Whatever prologue we're branching to will check at runtime that we
153 // went to the right Func*, correcting if necessary. We treat the first
154 // Func we encounter as a decent prediction. Make space to burn in a
155 // TCA.
156 ReqBindCall* req = mcg->globalData().alloc<ReqBindCall>();
158 // Use some space from the beginning of the service
159 // request stub to emit BIND_CALL specific code.
160 TCA start = mcg->getFreeStub(frozenCode, &mcg->cgFixups());
162 Asm a { mainCode };
163 mcg->backEnd().prepareForSmash(mainCode, kCallLen);
164 TCA toSmash = mainCode.frontier();
165 a. call(start);
167 TCA end;
168 CodeBlock cb;
169 auto stubSpace = maxStubSpace();
170 cb.init(start, stubSpace, "stubTemp");
171 Asm as { cb };
173 as. movq (rStashedAR, serviceReqArgRegs[1]);
174 emitPopRetIntoActRec(as);
176 auto spaceLeft = stubSpace - (cb.frontier() - start);
177 ServiceReqArgVec argv;
178 packServiceReqArgs(argv, req);
180 emitServiceReqImpl(start, cb.frontier(), end, spaceLeft,
181 SRFlags::None, JIT::REQ_BIND_CALL, argv);
183 if (start == frozenCode.frontier()) {
184 frozenCode.skip(end - start);
187 TRACE(1, "will bind static call: tca %p, funcd %p, acold %p\n",
188 toSmash, funcd, frozenCode.frontier());
189 mcg->cgFixups().m_codePointers.insert(&req->m_toSmash);
190 req->m_toSmash = toSmash;
191 req->m_nArgs = numArgs;
192 req->m_sourceInstr = srcKey;
193 req->m_isImmutable = (bool)funcd;
196 } // anonymous namespace
198 //////////////////////////////////////////////////////////////////////
201 emitServiceReqImpl(TCA stubStart, TCA start, TCA& end, int maxStubSpace,
202 SRFlags flags, ServiceRequest req,
203 const ServiceReqArgVec& argv) {
204 assert(start);
205 const bool align = flags & SRFlags::Align;
206 const bool persist = flags & SRFlags::Persist;
208 DEBUG_ONLY static constexpr int kMovSize = 0xa;
210 CodeBlock cb;
211 cb.init(start, maxStubSpace, "stubTemp");
212 Asm as { cb };
214 if (align) {
215 moveToAlign(cb);
217 TCA retval = as.frontier();
218 TRACE(3, "Emit Service Req @%p %s(", start, serviceReqName(req));
220 * Move args into appropriate regs. Eager VMReg save may bash flags,
221 * so set the CondCode arguments first.
223 for (int i = 0; i < argv.size(); ++i) {
224 assert(i < kNumServiceReqArgRegs);
225 auto reg = serviceReqArgRegs[i];
226 const auto& argInfo = argv[i];
227 switch(argv[i].m_kind) {
228 case ServiceReqArgInfo::Immediate: {
229 TRACE(3, "%" PRIx64 ", ", argInfo.m_imm);
230 as. emitImmReg(argInfo.m_imm, reg);
231 } break;
232 case ServiceReqArgInfo::RipRelative: {
233 TRACE(3, "$rip(%" PRIx64 "), ", argInfo.m_imm);
234 as. lea(rip[argInfo.m_imm], reg);
235 } break;
236 case ServiceReqArgInfo::CondCode: {
237 // Already set before VM reg save.
238 DEBUG_ONLY TCA start = as.frontier();
239 as. setcc(argInfo.m_cc, rbyte(reg));
240 assert(start - as.frontier() <= kMovSize);
241 TRACE(3, "cc(%x), ", argInfo.m_cc);
242 } break;
243 default: not_reached();
246 emitEagerVMRegSave(as, RegSaveFlags::SaveFP);
247 if (persist) {
248 as. emitImmReg(0, JIT::X64::rAsm);
249 } else {
250 as. lea(rip[(int64_t)stubStart], JIT::X64::rAsm);
252 TRACE(3, ")\n");
253 as. emitImmReg(req, JIT::reg::rdi);
256 * Weird hand-shaking with enterTC: reverse-call a service routine.
258 * In the case of some special stubs (m_callToExit, m_retHelper), we
259 * have already unbalanced the return stack by doing a ret to
260 * something other than enterTCHelper. In that case
261 * SRJmpInsteadOfRet indicates to fake the return.
263 if (flags & SRFlags::JmpInsteadOfRet) {
264 as. pop(JIT::reg::rax);
265 as. jmp(JIT::reg::rax);
266 } else {
267 as. ret();
270 if (debug || !persist) {
272 * not reached.
273 * For re-usable stubs, used to mark the
274 * end of the code, for the relocator's benefit.
276 as.ud2();
279 if (!persist) {
281 * Recycled stubs need to be uniformly sized. Make space for the
282 * maximal possible service requests.
284 assert(as.frontier() - start <= maxStubSpace);
285 // do not use nops, or the relocator will strip them out
286 while (as.frontier() - start <= maxStubSpace - 2) as.ud2();
287 if (as.frontier() - start < maxStubSpace) as.int3();
288 assert(as.frontier() - start == maxStubSpace);
291 end = cb.frontier();
292 return retval;
296 emitServiceReqWork(CodeBlock& cb, TCA start, SRFlags flags,
297 ServiceRequest req, const ServiceReqArgVec& argv) {
298 TCA end;
299 auto ret = emitServiceReqImpl(start, start, end, maxStubSpace(), flags,
300 req, argv);
302 if (start == cb.frontier()) {
303 cb.skip(end - start);
305 return ret;
308 void emitBindSideExit(CodeBlock& cb, CodeBlock& frozen, JIT::ConditionCode cc,
309 SrcKey dest) {
310 emitBindJ(cb, frozen, cc, dest, REQ_BIND_SIDE_EXIT);
313 void emitBindJcc(CodeBlock& cb, CodeBlock& frozen, JIT::ConditionCode cc,
314 SrcKey dest) {
315 emitBindJ(cb, frozen, cc, dest, REQ_BIND_JCC);
318 void emitBindJmp(CodeBlock& cb, CodeBlock& frozen, SrcKey dest) {
319 emitBindJ(cb, frozen, JIT::CC_None, dest, REQ_BIND_JMP);
322 int32_t emitBindCall(CodeBlock& mainCode, CodeBlock& coldCode,
323 CodeBlock& frozenCode, SrcKey srcKey,
324 const Func* funcd, int numArgs) {
325 // If this is a call to a builtin and we don't need any argument
326 // munging, we can skip the prologue system and do it inline.
327 if (isNativeImplCall(funcd, numArgs)) {
328 Asm a { mainCode };
329 auto retAddr = (int64_t)mcg->tx().uniqueStubs.retHelper;
330 if (deltaFits(retAddr, sz::dword)) {
331 a.storeq(int32_t(retAddr),
332 rVmSp[cellsToBytes(numArgs) + AROFF(m_savedRip)]);
333 } else {
334 a.lea(rip[retAddr], reg::rax);
335 a.storeq(reg::rax, rVmSp[cellsToBytes(numArgs) + AROFF(m_savedRip)]);
337 assert(funcd->numLocals() == funcd->numParams());
338 assert(funcd->numIterators() == 0);
339 emitLea(a, rVmSp[cellsToBytes(numArgs)], rVmFp);
340 emitCheckSurpriseFlagsEnter(mainCode, coldCode, Fixup(0, numArgs));
341 // rVmSp is already correctly adjusted, because there's no locals
342 // other than the arguments passed.
343 return emitNativeImpl(mainCode, funcd);
346 Asm a { mainCode };
347 if (debug && RuntimeOption::EvalHHIRGenerateAsserts) {
348 auto off = cellsToBytes(numArgs) + AROFF(m_savedRip);
349 emitImmStoreq(a, kUninitializedRIP, rVmSp[off]);
351 // Stash callee's rVmFp into rStashedAR for the callee's prologue
352 emitLea(a, rVmSp[cellsToBytes(numArgs)], rStashedAR);
353 emitBindCallHelper(mainCode, frozenCode, srcKey, funcd, numArgs);
354 return 0;