Rename RefcountProfile to IncRefProfile
[hiphop-php.git] / hphp / runtime / vm / jit / vasm-internal.cpp
blob315eba06713e4732aeebb67aad40a8b577aa24e3
1 /*
2 +----------------------------------------------------------------------+
3 | HipHop for PHP |
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/vasm-internal.h"
19 #include "hphp/runtime/vm/jit/asm-info.h"
20 #include "hphp/runtime/vm/jit/cg-meta.h"
21 #include "hphp/runtime/vm/jit/containers.h"
22 #include "hphp/runtime/vm/jit/func-guard.h"
23 #include "hphp/runtime/vm/jit/ir-opcode.h"
24 #include "hphp/runtime/vm/jit/service-requests.h"
25 #include "hphp/runtime/vm/jit/smashable-instr.h"
26 #include "hphp/runtime/vm/jit/srcdb.h"
27 #include "hphp/runtime/vm/jit/tc.h"
28 #include "hphp/runtime/vm/jit/trans-db.h"
29 #include "hphp/runtime/vm/jit/trans-rec.h"
30 #include "hphp/runtime/vm/jit/unique-stubs.h"
31 #include "hphp/runtime/vm/jit/vasm-instr.h"
32 #include "hphp/runtime/vm/jit/vasm-text.h"
33 #include "hphp/runtime/vm/jit/vasm-unit.h"
34 #include "hphp/runtime/vm/jit/vasm.h"
36 #include "hphp/util/data-block.h"
38 #include <vector>
40 namespace HPHP { namespace jit { namespace vasm_detail {
42 ///////////////////////////////////////////////////////////////////////////////
44 IRMetadataUpdater::IRMetadataUpdater(const Venv& env, AsmInfo* asm_info)
45 : m_env(env)
46 , m_asm_info(asm_info)
48 if (m_asm_info) {
49 m_area_to_blockinfos.resize(m_env.text.areas().size());
50 for (auto& r : m_area_to_blockinfos) r.resize(m_env.unit.blocks.size());
52 if (transdb::enabled() || RuntimeOption::EvalJitUseVtuneAPI) {
53 m_bcmap = &env.meta.bcMap;
57 void IRMetadataUpdater::register_inst(const Vinstr& inst) {
58 // Update HHIR mappings for AsmInfo.
59 if (m_asm_info) {
60 auto& snippets = block_info();
61 auto const frontier = m_env.cb->frontier();
63 if (!snippets.empty()) {
64 auto& snip = snippets.back();
65 snip.range = TcaRange { snip.range.start(), frontier };
67 snippets.push_back(
68 Snippet { inst.origin, TcaRange { frontier, nullptr } }
71 m_origin = inst.origin;
73 // Update HHBC mappings for the TransDB.
74 if (m_bcmap && m_origin) {
75 auto const sk = inst.origin->marker().sk();
76 if (m_bcmap->empty() ||
77 m_bcmap->back().md5 != sk.unit()->md5() ||
78 m_bcmap->back().bcStart != sk.offset()) {
79 m_bcmap->push_back(TransBCMapping{
80 sk.unit()->md5(),
81 sk.offset(),
82 m_env.text.main().code.frontier(),
83 m_env.text.cold().code.frontier(),
84 m_env.text.frozen().code.frontier()
85 });
90 void IRMetadataUpdater::register_block_end() {
91 if (!m_asm_info) return;
92 auto& snippets = block_info();
94 if (!snippets.empty()) {
95 auto& snip = snippets.back();
96 snip.range = TcaRange { snip.range.start(), m_env.cb->frontier() };
100 void IRMetadataUpdater::finish(const jit::vector<Vlabel>& labels) {
101 if (!m_asm_info) return;
103 auto const& areas = m_env.text.areas();
105 for (auto i = 0; i < areas.size(); ++i) {
106 auto& block_infos = m_area_to_blockinfos[i];
108 for (auto const b : labels) {
109 auto const& snippets = block_infos[b];
110 if (snippets.empty()) continue;
112 const IRInstruction* origin = nullptr;
114 for (auto const& snip : snippets) {
115 if (origin != snip.origin && snip.origin) {
116 origin = snip.origin;
118 m_asm_info->updateForInstruction(
119 origin,
120 static_cast<AreaIndex>(i),
121 snip.range.start(),
122 snip.range.end()
129 jit::vector<IRMetadataUpdater::Snippet>&
130 IRMetadataUpdater::block_info() {
131 auto const b = m_env.current;
132 auto const& block = m_env.unit.blocks[b];
134 return m_area_to_blockinfos[size_t(block.area_idx)][b];
137 ///////////////////////////////////////////////////////////////////////////////
139 bool is_empty_catch(const Vblock& block) {
140 return block.code.size() == 2 &&
141 block.code[0].op == Vinstr::landingpad &&
142 block.code[1].op == Vinstr::jmpi &&
143 block.code[1].jmpi_.target == tc::ustubs().endCatchHelper;
146 void register_catch_block(const Venv& env, const Venv::LabelPatch& p) {
147 // If the catch block is empty, we can just let tc_unwind_resume() and
148 // tc_unwind_personality() skip over our frame.
149 if (is_empty_catch(env.unit.blocks[p.target])) {
150 return;
153 auto const catch_target = env.addrs[p.target];
154 assertx(catch_target);
155 env.meta.catches.emplace_back(p.instr, catch_target);
158 ///////////////////////////////////////////////////////////////////////////////
160 namespace {
163 * Record in ProfData that the control-transfer instruction `jmp' is associated
164 * with the current translation being emitted.
166 void setJmpTransID(Venv& env, TCA jmp) {
167 if (!env.unit.context) return;
169 env.meta.setJmpTransID(
170 jmp, env.unit.context->transID, env.unit.context->kind
174 void registerFallbackJump(Venv& env, TCA jmp, ConditionCode cc) {
175 auto const incoming = cc == CC_None ? IncomingBranch::jmpFrom(jmp)
176 : IncomingBranch::jccFrom(jmp);
178 env.meta.inProgressTailJumps.push_back(incoming);
183 bool emit(Venv& env, const callphp& i) {
184 const auto call = emitSmashableCall(*env.cb, env.meta, i.stub);
185 setJmpTransID(env, call);
186 // If the callee is known, keep metadata to be able to eagerly smash the call.
187 if (i.func != nullptr) {
188 env.meta.smashableCallData[call] = PrologueID(i.func, i.nargs);
190 return true;
193 bool emit(Venv& env, const bindjmp& i) {
194 auto const jmp = emitSmashableJmp(*env.cb, env.meta, env.cb->frontier());
195 env.stubs.push_back({jmp, nullptr, i});
196 setJmpTransID(env, jmp);
197 env.meta.smashableJumpData[jmp] = {i.target, CGMeta::JumpKind::Bindjmp};
198 return true;
201 bool emit(Venv& env, const bindjcc& i) {
202 auto const jcc =
203 emitSmashableJcc(*env.cb, env.meta, env.cb->frontier(), i.cc);
204 env.stubs.push_back({nullptr, jcc, i});
205 setJmpTransID(env, jcc);
206 env.meta.smashableJumpData[jcc] = {i.target, CGMeta::JumpKind::Bindjcc};
207 return true;
210 bool emit(Venv& env, const bindaddr& i) {
211 env.stubs.push_back({nullptr, nullptr, i});
212 setJmpTransID(env, TCA(i.addr.get()));
213 env.meta.codePointers.emplace(i.addr.get());
214 return true;
217 bool emit(Venv& env, const fallback& i) {
218 auto const jmp = emitSmashableJmp(*env.cb, env.meta, env.cb->frontier());
219 env.stubs.push_back({jmp, nullptr, i});
220 registerFallbackJump(env, jmp, CC_None);
221 env.meta.smashableJumpData[jmp] = {i.target, CGMeta::JumpKind::Fallback};
222 return true;
225 bool emit(Venv& env, const fallbackcc& i) {
226 auto const jcc =
227 emitSmashableJcc(*env.cb, env.meta, env.cb->frontier(), i.cc);
228 env.stubs.push_back({nullptr, jcc, i});
229 registerFallbackJump(env, jcc, i.cc);
230 env.meta.smashableJumpData[jcc] = {i.target, CGMeta::JumpKind::Fallbackcc};
231 return true;
234 bool emit(Venv& env, const retransopt& i) {
235 svcreq::emit_retranslate_opt_stub(*env.cb, env.text.data(), env.meta,
236 i.spOff, i.sk);
237 return true;
240 bool emit(Venv& env, const funcguard& i) {
241 emitFuncGuard(i.func, *env.cb, env.meta, i.watch);
242 return true;
245 bool emit(Venv& env, const debugguardjmp& i) {
246 auto const jmp = emitSmashableJmp(*env.cb, env.meta, i.realCode);
247 if (i.watch) {
248 *i.watch = jmp;
249 env.meta.watchpoints.push_back(i.watch);
251 return true;
254 ///////////////////////////////////////////////////////////////////////////////
256 void emit_svcreq_stub(Venv& env, const Venv::SvcReqPatch& p) {
257 auto& frozen = env.text.frozen().code;
259 TCA stub = nullptr;
261 switch (p.svcreq.op) {
262 case Vinstr::bindjmp:
263 { auto const& i = p.svcreq.bindjmp_;
264 assertx(p.jmp && !p.jcc);
265 stub = svcreq::emit_bindjmp_stub(frozen, env.text.data(),
266 env.meta, i.spOff, p.jmp,
267 i.target, i.trflags);
268 } break;
270 case Vinstr::bindjcc:
271 { auto const& i = p.svcreq.bindjcc_;
272 assertx(!p.jmp && p.jcc);
273 stub = svcreq::emit_bindjmp_stub(frozen, env.text.data(),
274 env.meta, i.spOff, p.jcc,
275 i.target, i.trflags);
276 } break;
278 case Vinstr::bindaddr:
279 { auto const& i = p.svcreq.bindaddr_;
280 assertx(!p.jmp && !p.jcc);
281 stub = svcreq::emit_bindaddr_stub(frozen, env.text.data(),
282 env.meta, i.spOff, i.addr.get(),
283 i.target, TransFlags{});
284 // The bound pointer may not belong to the data segment, as is the case
285 // with SSwitchMap (see #10347945)
286 auto realAddr = env.text.data().contains((TCA)i.addr.get())
287 ? (TCA*)env.text.data().toDestAddress((TCA)i.addr.get())
288 : (TCA*)i.addr.get();
289 *realAddr = stub;
290 } break;
292 case Vinstr::fallback:
293 { auto const& i = p.svcreq.fallback_;
294 assertx(p.jmp && !p.jcc);
296 auto const srcrec = tc::findSrcRec(i.target);
297 always_assert(srcrec);
298 stub = i.trflags.packed
299 ? svcreq::emit_retranslate_stub(frozen, env.text.data(), env.meta,
300 i.spOff, i.target, i.trflags)
301 : srcrec->getFallbackTranslation();
302 } break;
304 case Vinstr::fallbackcc:
305 { auto const& i = p.svcreq.fallbackcc_;
306 assertx(!p.jmp && p.jcc);
308 auto const srcrec = tc::findSrcRec(i.target);
309 always_assert(srcrec);
310 stub = i.trflags.packed
311 ? svcreq::emit_retranslate_stub(frozen, env.text.data(), env.meta,
312 i.spOff, i.target, i.trflags)
313 : srcrec->getFallbackTranslation();
314 } break;
316 default: always_assert(false);
318 assertx(stub != nullptr);
320 // Register any necessary patches by creating fake labels for the stubs.
321 if (p.jmp) {
322 env.jmps.push_back({p.jmp, Vlabel { env.addrs.size() }});
323 env.addrs.push_back(stub);
325 if (p.jcc) {
326 env.jccs.push_back({p.jcc, Vlabel { env.addrs.size() }});
327 env.addrs.push_back(stub);
331 ///////////////////////////////////////////////////////////////////////////////
334 * Computes inline frames for each block in unit. Inline frames are dominated
335 * by an inlinestart instruction and post-dominated by an inlineend instruction.
336 * This function annotates Vblocks with their associated frame, and populates
337 * the frame vector. Additionally, inlinestart and inlineend instructions are
338 * replaced by jmp instructions.
340 void computeFrames(Vunit& unit) {
341 auto const topFunc = unit.context ? unit.context->func : nullptr;
343 auto const rpo = sortBlocks(unit);
345 unit.frames.emplace_back(
346 topFunc, 0, Vframe::Top, 0, unit.blocks[rpo[0]].weight
348 unit.blocks[rpo[0]].frame = 0;
349 unit.blocks[rpo[0]].pending_frames = 0;
350 for (auto const b : rpo) {
351 auto& block = unit.blocks[b];
352 int pending = block.pending_frames;
353 assert_flog(block.frame != -1, "Block frames cannot be uninitialized.");
355 if (block.code.empty()) continue;
357 auto const next_frame = [&] () -> int {
358 auto frame = block.frame;
359 for (auto& inst : block.code) {
360 auto origin = inst.origin;
361 switch (inst.op) {
362 case Vinstr::inlinestart:
363 // Each inlined frame will have a single start but may have multiple
364 // ends, and so we need to propagate this state here so that it only
365 // happens once per frame.
366 for (auto f = frame; f != Vframe::Top; f = unit.frames[f].parent) {
367 unit.frames[f].inclusive_cost += inst.inlinestart_.cost;
368 unit.frames[f].num_inner_frames++;
371 unit.frames.emplace_back(
372 inst.inlinestart_.func,
373 origin->marker().bcOff() - origin->marker().func()->base(),
374 frame,
375 inst.inlinestart_.cost,
376 block.weight
378 frame = inst.inlinestart_.id = unit.frames.size() - 1;
379 pending++;
380 break;
381 case Vinstr::inlineend:
382 frame = unit.frames[frame].parent;
383 pending--;
384 break;
385 case Vinstr::pushframe:
386 pending--;
387 break;
388 case Vinstr::popframe:
389 pending++;
390 break;
391 default: break;
394 return frame;
395 }();
397 for (auto const s : succs(block)) {
398 auto& sblock = unit.blocks[s];
399 assert_flog(
400 (sblock.frame == -1 || sblock.frame == next_frame) &&
401 (sblock.pending_frames == -1 || sblock.pending_frames == pending),
402 "Blocks must be dominated by a single inline frame at the same depth,"
403 "{} cannot have frames {} ({}) and {} ({}) at depths {} and {}.",
405 sblock.frame,
406 unit.frames[sblock.frame].func
407 ? unit.frames[sblock.frame].func->fullName()->data()
408 : "(null)",
409 next_frame,
410 unit.frames[next_frame].func
411 ? unit.frames[next_frame].func->fullName()->data()
412 : "(null)",
413 sblock.pending_frames,
414 pending
416 sblock.frame = next_frame;
417 sblock.pending_frames = pending;
423 ///////////////////////////////////////////////////////////////////////////////
427 const uint64_t* alloc_literal(Venv& env, uint64_t val) {
428 // TreadHashMap doesn't support 0 as a key, and we have far more efficient
429 // ways of getting 0 in a register anyway.
430 always_assert(val != 0);
432 if (auto addr = addrForLiteral(val)) return addr;
434 auto& pending = env.meta.literalAddrs;
435 auto it = pending.find(val);
436 if (it != pending.end()) {
437 DEBUG_ONLY auto realAddr =
438 (uint64_t*)env.text.data().toDestAddress((TCA)it->second);
439 assertx(*realAddr == val);
440 return it->second;
443 auto addr = env.text.data().alloc<uint64_t>(alignof(uint64_t));
444 auto realAddr = (uint64_t*)env.text.data().toDestAddress((TCA)addr);
445 *realAddr = val;
447 pending.emplace(val, addr);
448 return addr;
451 ///////////////////////////////////////////////////////////////////////////////