Remove SpillFrame, merge its memory effects into CallEffects and InlineEnterEffects
[hiphop-php.git] / hphp / runtime / vm / jit / service-requests.cpp
blobed0cb98b44bd0c0fda4b8112d3e4d696ba8705ca
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/service-requests.h"
19 #include "hphp/runtime/vm/jit/types.h"
20 #include "hphp/runtime/vm/jit/abi.h"
21 #include "hphp/runtime/vm/jit/align.h"
22 #include "hphp/runtime/vm/jit/stack-offsets.h"
23 #include "hphp/runtime/vm/jit/stub-alloc.h"
24 #include "hphp/runtime/vm/jit/tc.h"
25 #include "hphp/runtime/vm/jit/translator-inline.h"
26 #include "hphp/runtime/vm/jit/unique-stubs.h"
27 #include "hphp/runtime/vm/jit/vasm-gen.h"
28 #include "hphp/runtime/vm/jit/vasm-instr.h"
29 #include "hphp/runtime/vm/jit/vasm-unit.h"
30 #include "hphp/runtime/vm/resumable.h"
32 #include "hphp/util/arch.h"
33 #include "hphp/util/data-block.h"
34 #include "hphp/util/trace.h"
36 #include "hphp/vixl/a64/macro-assembler-a64.h"
37 #include "hphp/vixl/a64/disasm-a64.h"
39 #include "hphp/ppc64-asm/decoded-instr-ppc64.h"
41 #include <folly/Optional.h>
43 namespace HPHP { namespace jit { namespace svcreq {
45 ///////////////////////////////////////////////////////////////////////////////
47 TRACE_SET_MOD(servicereq);
49 ///////////////////////////////////////////////////////////////////////////////
51 namespace detail {
53 ///////////////////////////////////////////////////////////////////////////////
56 * Service request stub emitter.
58 * Emit a service request stub of type `sr' at `start' in `cb'.
60 void emit_svcreq(CodeBlock& cb,
61 DataBlock& data,
62 CGMeta& meta,
63 TCA start,
64 bool persist,
65 folly::Optional<FPInvOffset> spOff,
66 ServiceRequest sr,
67 const ArgVec& argv) {
68 FTRACE(2, "svcreq @{} {}(", start, to_name(sr));
70 auto const is_reused = start != cb.frontier();
72 if (!is_reused) cb.assertCanEmit(stub_size());
74 CodeBlock stub;
75 auto const realAddr = is_reused ? start : cb.toDestAddress(start);
76 stub.init(start, realAddr, stub_size(), "svcreq_stub");
79 Vauto vasm{stub, stub, data, meta};
80 auto& v = vasm.main();
82 // If we have an spOff, materialize rvmsp() so that handleSRHelper() can do
83 // a VM reg sync. (When we don't have an spOff, the caller of the service
84 // request was responsible for making sure rvmsp already contained the top
85 // of the stack.)
86 if (spOff) {
87 v << lea{rvmfp()[-cellsToBytes(spOff->offset)], rvmsp()};
90 auto live_out = leave_trace_regs();
92 assertx(argv.size() <= kMaxArgs);
94 // Pick up CondCode arguments first---vasm may optimize immediate loads
95 // into operations which clobber status flags.
96 for (auto i = 0; i < argv.size(); ++i) {
97 auto const& arg = argv[i];
98 if (arg.kind != Arg::Kind::CondCode) continue;
100 FTRACE(2, "c({}), ", cc_names[arg.cc]);
101 v << setcc{arg.cc, r_svcreq_sf(), rbyte(r_svcreq_arg(i))};
104 for (auto i = 0; i < argv.size(); ++i) {
105 auto const& arg = argv[i];
106 auto const r = r_svcreq_arg(i);
108 switch (arg.kind) {
109 case Arg::Kind::Immed:
110 FTRACE(2, "{}, ", arg.imm);
111 v << copy{v.cns(arg.imm), r};
112 break;
113 case Arg::Kind::Address:
114 FTRACE(2, "{}(%rip), ", arg.imm);
115 v << leap{reg::rip[arg.imm], r};
116 break;
117 case Arg::Kind::CondCode:
118 break;
120 live_out |= r;
122 FTRACE(2, ") : stub@");
124 if (persist) {
125 FTRACE(2, "<none>");
126 v << copy{v.cns(0), r_svcreq_stub()};
127 } else {
128 FTRACE(2, "{}", stub.base());
129 v << leap{reg::rip[int64_t(stub.base())], r_svcreq_stub()};
131 v << copy{v.cns(sr), r_svcreq_req()};
133 live_out |= r_svcreq_stub();
134 live_out |= r_svcreq_req();
136 v << jmpi{tc::ustubs().handleSRHelper, live_out};
138 // We pad ephemeral stubs unconditionally. This is required for
139 // correctness by the x64 code relocator.
140 vasm.unit().padding = !persist;
143 if (!is_reused) cb.skip(stub.used());
146 ///////////////////////////////////////////////////////////////////////////////
150 ///////////////////////////////////////////////////////////////////////////////
152 TCA emit_bindjmp_stub(CodeBlock& cb, DataBlock& data, CGMeta& fixups,
153 FPInvOffset spOff,
154 TCA jmp, SrcKey target, TransFlags trflags) {
155 return emit_ephemeral(
157 data,
158 fixups,
159 allocTCStub(cb, &fixups),
160 target.resumeMode() != ResumeMode::None
161 ? folly::none : folly::make_optional(spOff),
162 REQ_BIND_JMP,
163 jmp,
164 target.toAtomicInt(),
165 trflags.packed
169 TCA emit_bindaddr_stub(CodeBlock& cb, DataBlock& data, CGMeta& fixups,
170 FPInvOffset spOff,
171 TCA* addr, SrcKey target, TransFlags trflags) {
172 // Right now it's possible that addr isn't PIC addressable, as it may be into
173 // the heap (SSwitchMap binds addresses directly into its heap memory,
174 // see #10347945). Passing a TCA generates an RIP relative address which can
175 // be handled by the relocation logic, while a TCA* will generate an immediate
176 // address which will not be remapped.
177 if (deltaFits((TCA)addr - cb.frontier(), sz::dword)) {
178 return emit_ephemeral(
180 data,
181 fixups,
182 allocTCStub(cb, &fixups),
183 target.resumeMode() != ResumeMode::None
184 ? folly::none : folly::make_optional(spOff),
185 REQ_BIND_ADDR,
186 (TCA)addr, // needs to be RIP relative so that we can relocate it
187 target.toAtomicInt(),
188 trflags.packed
192 return emit_ephemeral(
194 data,
195 fixups,
196 allocTCStub(cb, &fixups),
197 target.resumeMode() != ResumeMode::None
198 ? folly::none : folly::make_optional(spOff),
199 REQ_BIND_ADDR,
200 addr,
201 target.toAtomicInt(),
202 trflags.packed
206 TCA emit_retranslate_stub(CodeBlock& cb, DataBlock& data, CGMeta& fixups,
207 FPInvOffset spOff,
208 SrcKey target, TransFlags trflags) {
209 return emit_persistent(
211 data,
212 fixups,
213 target.resumeMode() != ResumeMode::None
214 ? folly::none : folly::make_optional(spOff),
215 REQ_RETRANSLATE,
216 target.offset(),
217 trflags.packed
221 TCA emit_retranslate_opt_stub(CodeBlock& cb, DataBlock& data, CGMeta& fixups,
222 FPInvOffset spOff,
223 SrcKey sk) {
224 return emit_persistent(
226 data,
227 fixups,
228 sk.resumeMode() != ResumeMode::None
229 ? folly::none : folly::make_optional(spOff),
230 REQ_RETRANSLATE_OPT,
231 sk.toAtomicInt()
235 ///////////////////////////////////////////////////////////////////////////////
237 FPInvOffset extract_spoff(TCA stub) {
238 switch (arch()) {
239 case Arch::X64: {
240 HPHP::jit::x64::DecodedInstruction instr(stub);
242 // If it's not a lea, vasm optimized a lea{rvmfp, rvmsp} to a mov, so
243 // the offset was 0.
244 if (!instr.isLea()) return FPInvOffset{0};
246 auto const offBytes = safe_cast<int32_t>(instr.offset());
247 always_assert((offBytes % sizeof(Cell)) == 0);
248 return FPInvOffset{-(offBytes / int32_t{sizeof(Cell)})};
251 case Arch::ARM: {
252 auto instr = reinterpret_cast<vixl::Instruction*>(stub);
254 if (instr->IsAddSubImmediate()) {
255 auto const offBytes = safe_cast<int32_t>(instr->ImmAddSub());
256 always_assert((offBytes % sizeof(Cell)) == 0);
258 if (instr->Mask(vixl::AddSubImmediateMask) == vixl::SUB_w_imm ||
259 instr->Mask(vixl::AddSubImmediateMask) == vixl::SUB_x_imm) {
260 return FPInvOffset{offBytes / int32_t{sizeof(Cell)}};
261 } else if (instr->Mask(vixl::AddSubImmediateMask) == vixl::ADD_w_imm ||
262 instr->Mask(vixl::AddSubImmediateMask) == vixl::ADD_x_imm) {
263 return FPInvOffset{-(offBytes / int32_t{sizeof(Cell)})};
265 } else if (instr->IsMovn()) {
266 auto next = instr->NextInstruction();
267 always_assert(next->Mask(vixl::AddSubShiftedMask) == vixl::ADD_w_shift ||
268 next->Mask(vixl::AddSubShiftedMask) == vixl::ADD_x_shift);
269 auto const offBytes = safe_cast<int32_t>(~instr->ImmMoveWide());
270 always_assert((offBytes % sizeof(Cell)) == 0);
271 return FPInvOffset{-(offBytes / int32_t{sizeof(Cell)})};
272 } else if (instr->IsMovz()) {
273 auto next = instr->NextInstruction();
274 always_assert(next->Mask(vixl::AddSubShiftedMask) == vixl::SUB_w_shift ||
275 next->Mask(vixl::AddSubShiftedMask) == vixl::SUB_x_shift);
276 auto const offBytes = safe_cast<int32_t>(instr->ImmMoveWide());
277 always_assert((offBytes % sizeof(Cell)) == 0);
278 return FPInvOffset{offBytes / int32_t{sizeof(Cell)}};
279 } else {
280 always_assert(false && "Expected an instruction that offsets SP");
284 case Arch::PPC64: {
285 ppc64_asm::DecodedInstruction instr(stub);
286 if (!instr.isSpOffsetInstr()) {
287 return FPInvOffset{0};
288 } else {
289 auto const offBytes = safe_cast<int32_t>(instr.offset());
290 return FPInvOffset{-(offBytes / int32_t{sizeof(Cell)})};
294 not_reached();
297 ///////////////////////////////////////////////////////////////////////////////