Move vmfp, vmsp, and vmpc into RDS
[hiphop-php.git] / hphp / runtime / vm / jit / fixup.cpp
blob871100f1c151ade83f7bc8d849d0e37ca4259161
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-2014 Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
16 #include "hphp/runtime/vm/jit/fixup.h"
18 #include "hphp/vixl/a64/simulator-a64.h"
20 #include "hphp/runtime/vm/vm-regs.h"
21 #include "hphp/runtime/vm/jit/abi-arm.h"
22 #include "hphp/runtime/vm/jit/mc-generator.h"
23 #include "hphp/runtime/vm/jit/translator-inline.h"
24 #include "hphp/util/data-block.h"
26 namespace HPHP {
27 namespace JIT {
29 bool
30 FixupMap::getFrameRegs(const ActRec* ar, const ActRec* prevAr,
31 VMRegs* outVMRegs) const {
32 CTCA tca = (CTCA)ar->m_savedRip;
33 // Non-obvious off-by-one fun: if the *return address* points into the TC,
34 // then the frame we were running on in the TC is actually the previous
35 // frame.
36 ar = ar->m_sfp;
37 auto* ent = m_fixups.find(tca);
38 if (!ent) return false;
39 if (ent->isIndirect()) {
40 // Note: if indirect fixups happen frequently enough, we could
41 // just compare savedRip to be less than some threshold where
42 // stubs in a.code stop.
43 assert(prevAr);
44 auto pRealRip = ent->indirect.returnIpDisp +
45 uintptr_t(prevAr->m_sfp);
46 ent = m_fixups.find(*reinterpret_cast<CTCA*>(pRealRip));
47 assert(ent && !ent->isIndirect());
49 regsFromActRec(tca, ar, ent->fixup, outVMRegs);
50 return true;
53 void
54 FixupMap::recordIndirectFixup(CodeAddress frontier, int dwordsPushed) {
55 recordIndirectFixup(frontier, IndirectFixup((2 + dwordsPushed) * 8));
58 namespace {
60 // If this function asserts or crashes, it is usually because VMRegAnchor was
61 // not used to force a sync prior to calling a runtime function.
62 bool isVMFrame(const ExecutionContext* ec, const ActRec* ar) {
63 assert(ar);
64 // Determine whether the frame pointer is outside the native stack, cleverly
65 // using a single unsigned comparison to do both halves of the bounds check.
66 bool ret = uintptr_t(ar) - s_stackLimit >= s_stackSize;
67 assert(!ret || isValidVMStackAddress(ar) ||
68 (ar->m_func->validate(), ar->resumed()));
69 return ret;
73 void
74 FixupMap::fixupWork(ExecutionContext* ec, ActRec* rbp) const {
75 assert(RuntimeOption::EvalJit);
77 TRACE(1, "fixup(begin):\n");
79 auto* nextRbp = rbp;
80 rbp = 0;
81 do {
82 auto* prevRbp = rbp;
83 rbp = nextRbp;
84 assert(rbp && "Missing fixup for native call");
85 nextRbp = rbp->m_sfp;
86 TRACE(2, "considering frame %p, %p\n", rbp, (void*)rbp->m_savedRip);
88 if (isVMFrame(ec, nextRbp)) {
89 TRACE(2, "fixup checking vm frame %s\n",
90 nextRbp->m_func->name()->data());
91 VMRegs regs;
92 if (getFrameRegs(rbp, prevRbp, &regs)) {
93 TRACE(2, "fixup(end): func %s fp %p sp %p pc %p\n",
94 regs.m_fp->m_func->name()->data(),
95 regs.m_fp, regs.m_sp, regs.m_pc);
96 auto& vmRegs = vmRegsUnsafe();
97 vmRegs.fp = const_cast<ActRec*>(regs.m_fp);
98 vmRegs.pc = reinterpret_cast<PC>(regs.m_pc);
99 vmRegs.stack.top() = regs.m_sp;
100 return;
103 } while (rbp && rbp != nextRbp);
105 // OK, we've exhausted the entire actRec chain. We are only
106 // invoking ::fixup() from contexts that were known to be called out
107 // of the TC, so this cannot happen.
108 always_assert(false);
111 void
112 FixupMap::fixupWorkSimulated(ExecutionContext* ec) const {
113 TRACE(1, "fixup(begin):\n");
115 auto isVMFrame = [] (ActRec* ar, const vixl::Simulator* sim) {
116 // If this assert is failing, you may have forgotten a sync point somewhere
117 assert(ar);
118 bool ret =
119 uintptr_t(ar) - s_stackLimit >= s_stackSize &&
120 !sim->is_on_stack(ar);
121 assert(!ret ||
122 (ar >= vmStack().getStackLowAddress() &&
123 ar < vmStack().getStackHighAddress()) ||
124 ar->resumed());
125 return ret;
128 // For each nested simulator (corresponding to nested VM invocations), look at
129 // its PC to find a potential fixup key.
131 // Callstack walking is necessary, because we may get called from a
132 // uniqueStub.
133 for (int i = ec->m_activeSims.size() - 1; i >= 0; --i) {
134 auto const* sim = ec->m_activeSims[i];
135 auto* rbp = reinterpret_cast<ActRec*>(sim->xreg(JIT::ARM::rVmFp.code()));
136 auto tca = reinterpret_cast<TCA>(sim->pc());
137 TRACE(2, "considering frame %p, %p\n", rbp, tca);
139 while (rbp && !isVMFrame(rbp, sim)) {
140 tca = reinterpret_cast<TCA>(rbp->m_savedRip);
141 rbp = rbp->m_sfp;
144 if (!rbp) continue;
146 auto* ent = m_fixups.find(tca);
147 if (!ent) {
148 continue;
151 if (ent->isIndirect()) {
152 not_implemented();
155 VMRegs regs;
156 regsFromActRec(tca, rbp, ent->fixup, &regs);
157 TRACE(2, "fixup(end): func %s fp %p sp %p pc %p\b",
158 regs.m_fp->m_func->name()->data(),
159 regs.m_fp, regs.m_sp, regs.m_pc);
160 vmfp() = const_cast<ActRec*>(regs.m_fp);
161 vmpc() = reinterpret_cast<PC>(regs.m_pc);
162 vmsp() = regs.m_sp;
163 return;
166 // This shouldn't be reached.
167 always_assert(false);
170 void
171 FixupMap::fixup(ExecutionContext* ec) const {
172 if (RuntimeOption::EvalSimulateARM) {
173 // Walking the C++ stack doesn't work in simulation mode. Fortunately, the
174 // execution context has a stack of simulators, which we consult instead.
175 fixupWorkSimulated(ec);
176 } else {
177 // Start looking for fixup entries at the current (C++) frame. This
178 // will walk the frames upward until we find a TC frame.
179 DECLARE_FRAME_POINTER(framePtr);
180 fixupWork(ec, framePtr);
184 /* This is somewhat hacky. It decides which helpers/builtins should
185 * use eager vmreganchor based on profile information. Using eager
186 * vmreganchor for all helper calls is a perf regression. */
187 bool
188 FixupMap::eagerRecord(const Func* func) {
189 const char* list[] = {
190 "func_get_args",
191 "__SystemLib\\func_get_args_sl",
192 "get_called_class",
193 "func_num_args",
194 "__SystemLib\\func_num_arg_",
195 "array_filter",
196 "array_map",
197 "__SystemLib\\func_slice_args",
200 for (int i = 0; i < sizeof(list)/sizeof(list[0]); i++) {
201 if (!strcmp(func->name()->data(), list[i])) {
202 return true;
205 if (func->cls() && !strcmp(func->cls()->name()->data(), "WaitHandle")
206 && !strcmp(func->name()->data(), "join")) {
207 return true;
209 return false;
212 } // HPHP::JIT
214 } // HPHP