Move unique stubs to unique-stubs-resumable.cpp
[hiphop-php.git] / hphp / runtime / vm / jit / srcdb.cpp
blobde54289439902b117492765d922d524be97e073c
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/srcdb.h"
19 #include "hphp/runtime/vm/debug/debug.h"
20 #include "hphp/runtime/vm/treadmill.h"
22 #include "hphp/runtime/vm/jit/cg-meta.h"
23 #include "hphp/runtime/vm/jit/relocation.h"
24 #include "hphp/runtime/vm/jit/service-requests.h"
25 #include "hphp/runtime/vm/jit/smashable-instr.h"
26 #include "hphp/runtime/vm/jit/tc.h"
28 #include "hphp/util/trace.h"
30 #include <cstdarg>
31 #include <cstdint>
32 #include <string>
34 namespace HPHP { namespace jit {
36 TRACE_SET_MOD(trans)
38 void IncomingBranch::relocate(RelocationInfo& rel) {
39 // compute adjustedTarget before altering the smash address,
40 // because it might be a 5-byte nop
41 TCA adjustedTarget = rel.adjustedAddressAfter(target());
43 if (TCA adjusted = rel.adjustedAddressAfter(toSmash())) {
44 m_ptr.set(m_ptr.tag(), adjusted);
47 if (adjustedTarget) {
48 FTRACE_MOD(Trace::mcg, 1, "Patching: 0x{:08x} from 0x{:08x} to 0x{:08x}\n",
49 (uintptr_t)toSmash(), (uintptr_t)target(),
50 (uintptr_t)adjustedTarget);
52 patch(adjustedTarget);
56 void IncomingBranch::patch(TCA dest) {
57 switch (type()) {
58 case Tag::JMP:
59 smashJmp(toSmash(), dest);
60 Debug::DebugInfo::Get()->recordRelocMap(toSmash(), dest, "Arc-2");
61 break;
63 case Tag::JCC:
64 smashJcc(toSmash(), dest);
65 Debug::DebugInfo::Get()->recordRelocMap(toSmash(), dest, "Arc-1");
66 break;
68 case Tag::ADDR: {
69 // Note that this effectively ignores a
70 TCA* addr = reinterpret_cast<TCA*>(toSmash());
71 assert_address_is_atomically_accessible(addr);
72 *addr = dest;
73 break;
78 TCA IncomingBranch::target() const {
79 switch (type()) {
80 case Tag::JMP:
81 return smashableJmpTarget(toSmash());
83 case Tag::JCC:
84 return smashableJccTarget(toSmash());
86 case Tag::ADDR:
87 return *reinterpret_cast<TCA*>(toSmash());
89 always_assert(false);
92 TCA TransLoc::mainStart() const { return tc::offsetToAddr(m_mainOff); }
93 TCA TransLoc::coldStart() const { return tc::offsetToAddr(m_coldOff); }
94 TCA TransLoc::frozenStart() const { return tc::offsetToAddr(m_frozenOff); }
96 void TransLoc::setMainStart(TCA newStart) {
97 assert(tc::isValidCodeAddress(newStart));
98 m_mainOff = tc::addrToOffset(newStart);
101 void TransLoc::setColdStart(TCA newStart) {
102 assert(tc::isValidCodeAddress(newStart));
103 m_coldOff = tc::addrToOffset(newStart);
106 void TransLoc::setFrozenStart(TCA newStart) {
107 assert(tc::isValidCodeAddress(newStart));
108 m_frozenOff = tc::addrToOffset(newStart);
112 * The fallback translation is where to jump to if the
113 * currently-translating translation's checks fail.
115 * The current heuristic we use for translation chaining is to assume
116 * the most common cases are probably translated first, so we chain
117 * new translations on the end. This means if we have to fallback
118 * from the currently-translating translation we jump to the "anchor"
119 * translation (which just is a REQ_RETRANSLATE).
121 TCA SrcRec::getFallbackTranslation() const {
122 assertx(m_anchorTranslation);
123 return m_anchorTranslation;
126 FPInvOffset SrcRec::nonResumedSPOff() const {
127 return svcreq::extract_spoff(getFallbackTranslation());
130 void SrcRec::chainFrom(IncomingBranch br) {
131 assertx(br.type() == IncomingBranch::Tag::ADDR ||
132 tc::isValidCodeAddress(br.toSmash()));
133 TCA destAddr = getTopTranslation();
134 m_incomingBranches.push_back(br);
135 TRACE(1, "SrcRec(%p)::chainFrom %p -> %p (type %d); %zd incoming branches\n",
136 this,
137 br.toSmash(), destAddr, static_cast<int>(br.type()),
138 m_incomingBranches.size());
139 br.patch(destAddr);
141 if (RuntimeOption::EvalEnableReusableTC) {
142 tc::recordJump(br.toSmash(), this);
146 void SrcRec::newTranslation(TransLoc loc,
147 GrowableVector<IncomingBranch>& tailBranches) {
148 auto srLock = writelock();
149 // When translation punts due to hitting limit, will generate one
150 // more translation that will call the interpreter.
151 assertx(m_translations.size() <=
152 std::max(RuntimeOption::EvalJitMaxProfileTranslations,
153 RuntimeOption::EvalJitMaxTranslations));
155 TRACE(1, "SrcRec(%p)::newTranslation @%p, ", this, loc.mainStart());
157 m_translations.push_back(loc);
158 if (!m_topTranslation.get()) {
159 m_topTranslation = loc.mainStart();
160 patchIncomingBranches(loc.mainStart());
164 * Link all the jumps from the current tail translation to this new
165 * guy.
167 * It's (mostly) ok if someone is running in this code while we do
168 * this: we hold the write lease, they'll instead jump to the anchor
169 * and do REQ_RETRANSLATE and failing to get the write lease they'll
170 * interp. FIXME: Unfortunately, right now, in an unlikely race
171 * another thread could create another translation with the same
172 * type specialization that we just created in this case. (If we
173 * happen to release the write lease after they jump but before they
174 * get into REQ_RETRANSLATE, they'll acquire it and generate a
175 * translation possibly for this same situation.)
177 for (auto& br : m_tailFallbackJumps) {
178 br.patch(loc.mainStart());
181 // This is the new tail translation, so store the fallback jump list
182 // in case we translate this again.
183 m_tailFallbackJumps.swap(tailBranches);
186 void SrcRec::relocate(RelocationInfo& rel) {
187 tc::assertOwnsCodeLock();
189 auto srLock = writelock();
190 if (auto adjusted = rel.adjustedAddressAfter(m_anchorTranslation)) {
191 m_anchorTranslation = adjusted;
194 if (auto adjusted = rel.adjustedAddressAfter(m_topTranslation.get())) {
195 m_topTranslation = adjusted;
198 for (auto &t : m_translations) {
199 if (TCA adjusted = rel.adjustedAddressAfter(t.mainStart())) {
200 t.setMainStart(adjusted);
203 if (TCA adjusted = rel.adjustedAddressAfter(t.coldStart())) {
204 t.setColdStart(adjusted);
207 if (TCA adjusted = rel.adjustedAddressAfter(t.frozenStart())) {
208 t.setFrozenStart(adjusted);
212 for (auto &ib : m_tailFallbackJumps) {
213 ib.relocate(rel);
216 for (auto &ib : m_incomingBranches) {
217 ib.relocate(rel);
221 void SrcRec::addDebuggerGuard(TCA dbgGuard, TCA dbgBranchGuardSrc) {
222 assertx(!m_dbgBranchGuardSrc);
224 TRACE(1, "SrcRec(%p)::addDebuggerGuard @%p, "
225 "%zd incoming branches to rechain\n",
226 this, dbgGuard, m_incomingBranches.size());
228 patchIncomingBranches(dbgGuard);
230 // Set m_dbgBranchGuardSrc after patching, so we don't try to patch
231 // the debug guard.
232 m_dbgBranchGuardSrc = dbgBranchGuardSrc;
233 m_topTranslation = dbgGuard;
236 void SrcRec::patchIncomingBranches(TCA newStart) {
237 if (hasDebuggerGuard()) {
238 // We have a debugger guard, so all jumps to us funnel through
239 // this. Just smash m_dbgBranchGuardSrc.
240 TRACE(1, "smashing m_dbgBranchGuardSrc @%p\n", m_dbgBranchGuardSrc.get());
241 smashJmp(m_dbgBranchGuardSrc, newStart);
242 return;
245 TRACE(1, "%zd incoming branches to rechain\n", m_incomingBranches.size());
247 for (auto &br : m_incomingBranches) {
248 TRACE(1, "SrcRec(%p)::newTranslation rechaining @%p -> %p\n",
249 this, br.toSmash(), newStart);
250 br.patch(newStart);
254 void SrcRec::removeIncomingBranch(TCA toSmash) {
255 auto srLock = writelock();
257 auto end = std::remove_if(
258 m_incomingBranches.begin(),
259 m_incomingBranches.end(),
260 [toSmash] (const IncomingBranch& ib) { return ib.toSmash() == toSmash; }
262 assertx(end != m_incomingBranches.end());
263 m_incomingBranches.setEnd(end);
266 void SrcRec::replaceOldTranslations() {
267 auto srLock = writelock();
269 // Everyone needs to give up on old translations; send them to the anchor,
270 // which is a REQ_RETRANSLATE.
271 auto translations = std::move(m_translations);
272 m_tailFallbackJumps.clear();
273 m_topTranslation = nullptr;
276 * It may seem a little weird that we're about to point every
277 * incoming branch at the anchor, since that's going to just
278 * unconditionally retranslate this SrcKey and never patch the
279 * incoming branch to do something else.
281 * The reason this is ok is this mechanism is only used in
282 * non-RepoAuthoritative mode, and the granularity of code
283 * invalidation there is such that we'll only have incoming branches
284 * like this basically within the same file since we don't have
285 * whole program analysis.
287 * This means all these incoming branches are about to go away
288 * anyway ...
290 * If we ever change that we'll have to change this to patch to
291 * some sort of rebind requests.
293 assertx(!RuntimeOption::RepoAuthoritative || RuntimeOption::EvalJitPGO);
294 patchIncomingBranches(m_anchorTranslation);
296 // Now that we've smashed all the IBs for these translations they should be
297 // unreachable-- to prevent a race we treadmill here and then reclaim their
298 // associated TC space
299 if (RuntimeOption::EvalEnableReusableTC) {
300 tc::reclaimTranslations(std::move(translations));
301 return;
304 translations.clear();
307 } } // HPHP::jit