2 +----------------------------------------------------------------------+
4 +----------------------------------------------------------------------+
5 | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
6 +----------------------------------------------------------------------+
7 | This source file is subject to version 3.01 of the PHP license, |
8 | that is bundled with this package in the file LICENSE, and is |
9 | available through the world-wide-web at the following url: |
10 | http://www.php.net/license/3_01.txt |
11 | If you did not receive a copy of the PHP license and are unable to |
12 | obtain it through the world-wide-web, please send a note to |
13 | license@php.net so we can mail you a copy immediately. |
14 +----------------------------------------------------------------------+
17 #include "hphp/runtime/vm/jit/vasm-internal.h"
19 #include "hphp/runtime/vm/jit/asm-info.h"
20 #include "hphp/runtime/vm/jit/cg-meta.h"
21 #include "hphp/runtime/vm/jit/containers.h"
22 #include "hphp/runtime/vm/jit/ir-opcode.h"
23 #include "hphp/runtime/vm/jit/service-requests.h"
24 #include "hphp/runtime/vm/jit/smashable-instr.h"
25 #include "hphp/runtime/vm/jit/srcdb.h"
26 #include "hphp/runtime/vm/jit/tc.h"
27 #include "hphp/runtime/vm/jit/trans-db.h"
28 #include "hphp/runtime/vm/jit/trans-rec.h"
29 #include "hphp/runtime/vm/jit/unique-stubs.h"
30 #include "hphp/runtime/vm/jit/vasm-instr.h"
31 #include "hphp/runtime/vm/jit/vasm-text.h"
32 #include "hphp/runtime/vm/jit/vasm-unit.h"
33 #include "hphp/runtime/vm/jit/vasm.h"
35 #include "hphp/util/data-block.h"
39 namespace HPHP
{ namespace jit
{ namespace vasm_detail
{
41 ///////////////////////////////////////////////////////////////////////////////
43 IRMetadataUpdater::IRMetadataUpdater(const Venv
& env
, AsmInfo
* asm_info
)
45 , m_asm_info(asm_info
)
48 m_area_to_blockinfos
.resize(m_env
.text
.areas().size());
49 for (auto& r
: m_area_to_blockinfos
) r
.resize(m_env
.unit
.blocks
.size());
51 if (transdb::enabled() || RuntimeOption::EvalJitUseVtuneAPI
) {
52 m_bcmap
= &env
.meta
.bcMap
;
56 void IRMetadataUpdater::register_inst(const Vinstr
& inst
) {
57 // Update HHIR mappings for AsmInfo.
59 auto& snippets
= block_info();
60 auto const frontier
= m_env
.cb
->frontier();
62 if (!snippets
.empty()) {
63 auto& snip
= snippets
.back();
64 snip
.range
= TcaRange
{ snip
.range
.start(), frontier
};
67 Snippet
{ inst
.origin
, TcaRange
{ frontier
, nullptr } }
70 m_origin
= inst
.origin
;
72 // Update HHBC mappings for the TransDB.
73 if (m_bcmap
&& m_origin
) {
74 auto const sk
= inst
.origin
->marker().sk();
75 if (m_bcmap
->empty() ||
76 m_bcmap
->back().sha1
!= sk
.unit()->sha1() ||
77 m_bcmap
->back().bcStart
!= sk
.offset()) {
78 m_bcmap
->push_back(TransBCMapping
{
81 m_env
.text
.main().code
.frontier(),
82 m_env
.text
.cold().code
.frontier(),
83 m_env
.text
.frozen().code
.frontier()
89 void IRMetadataUpdater::register_block_end() {
90 if (!m_asm_info
) return;
91 auto& snippets
= block_info();
93 if (!snippets
.empty()) {
94 auto& snip
= snippets
.back();
95 snip
.range
= TcaRange
{ snip
.range
.start(), m_env
.cb
->frontier() };
99 void IRMetadataUpdater::finish(const jit::vector
<Vlabel
>& labels
) {
100 if (!m_asm_info
) return;
102 auto const& areas
= m_env
.text
.areas();
104 for (auto i
= 0; i
< areas
.size(); ++i
) {
105 auto& block_infos
= m_area_to_blockinfos
[i
];
107 for (auto const b
: labels
) {
108 auto const& snippets
= block_infos
[b
];
109 if (snippets
.empty()) continue;
111 const IRInstruction
* origin
= nullptr;
113 for (auto const& snip
: snippets
) {
114 if (origin
!= snip
.origin
&& snip
.origin
) {
115 origin
= snip
.origin
;
117 m_asm_info
->updateForInstruction(
119 static_cast<AreaIndex
>(i
),
128 jit::vector
<IRMetadataUpdater::Snippet
>&
129 IRMetadataUpdater::block_info() {
130 auto const b
= m_env
.current
;
131 auto const& block
= m_env
.unit
.blocks
[b
];
133 return m_area_to_blockinfos
[size_t(block
.area_idx
)][b
];
136 ///////////////////////////////////////////////////////////////////////////////
138 bool is_empty_catch(const Vblock
& block
) {
139 return block
.code
.size() == 2 &&
140 block
.code
[0].op
== Vinstr::landingpad
&&
141 block
.code
[1].op
== Vinstr::jmpi
&&
142 block
.code
[1].jmpi_
.target
== tc::ustubs().endCatchHelper
;
145 void register_catch_block(const Venv
& env
, const Venv::LabelPatch
& p
) {
146 // If the catch block is empty, we can just let tc_unwind_resume() and
147 // tc_unwind_personality() skip over our frame.
148 if (is_empty_catch(env
.unit
.blocks
[p
.target
])) {
152 auto const catch_target
= env
.addrs
[p
.target
];
153 assertx(catch_target
);
154 env
.meta
.catches
.emplace_back(p
.instr
, catch_target
);
157 ///////////////////////////////////////////////////////////////////////////////
162 * Record in ProfData that the control-transfer instruction `jmp' is associated
163 * with the current translation being emitted.
165 void setJmpTransID(Venv
& env
, TCA jmp
) {
166 if (!env
.unit
.context
) return;
168 env
.meta
.setJmpTransID(
169 jmp
, env
.unit
.context
->transID
, env
.unit
.context
->kind
173 void registerFallbackJump(Venv
& env
, TCA jmp
, ConditionCode cc
) {
174 auto const incoming
= cc
== CC_None
? IncomingBranch::jmpFrom(jmp
)
175 : IncomingBranch::jccFrom(jmp
);
177 env
.meta
.inProgressTailJumps
.push_back(incoming
);
182 bool emit(Venv
& env
, const callphp
& i
) {
183 const auto call
= emitSmashableCall(*env
.cb
, env
.meta
, i
.stub
);
184 setJmpTransID(env
, call
);
185 // If the callee is known, keep metadata to be able to eagerly smash the call.
186 if (i
.func
!= nullptr) {
187 env
.meta
.smashableCallData
[call
] = PrologueID(i
.func
, i
.nargs
);
192 bool emit(Venv
& env
, const bindjmp
& i
) {
193 auto const jmp
= emitSmashableJmp(*env
.cb
, env
.meta
, env
.cb
->frontier());
194 env
.stubs
.push_back({jmp
, nullptr, i
});
195 setJmpTransID(env
, jmp
);
196 env
.meta
.smashableJumpData
[jmp
] = {i
.target
, CGMeta::JumpKind::Bindjmp
};
200 bool emit(Venv
& env
, const bindjcc
& i
) {
202 emitSmashableJcc(*env
.cb
, env
.meta
, env
.cb
->frontier(), i
.cc
);
203 env
.stubs
.push_back({nullptr, jcc
, i
});
204 setJmpTransID(env
, jcc
);
205 env
.meta
.smashableJumpData
[jcc
] = {i
.target
, CGMeta::JumpKind::Bindjcc
};
209 bool emit(Venv
& env
, const bindaddr
& i
) {
210 env
.stubs
.push_back({nullptr, nullptr, i
});
211 setJmpTransID(env
, TCA(i
.addr
.get()));
212 env
.meta
.codePointers
.emplace(i
.addr
.get());
216 bool emit(Venv
& env
, const fallback
& i
) {
217 auto const jmp
= emitSmashableJmp(*env
.cb
, env
.meta
, env
.cb
->frontier());
218 env
.stubs
.push_back({jmp
, nullptr, i
});
219 registerFallbackJump(env
, jmp
, CC_None
);
220 env
.meta
.smashableJumpData
[jmp
] = {i
.target
, CGMeta::JumpKind::Fallback
};
224 bool emit(Venv
& env
, const fallbackcc
& i
) {
226 emitSmashableJcc(*env
.cb
, env
.meta
, env
.cb
->frontier(), i
.cc
);
227 env
.stubs
.push_back({nullptr, jcc
, i
});
228 registerFallbackJump(env
, jcc
, i
.cc
);
229 env
.meta
.smashableJumpData
[jcc
] = {i
.target
, CGMeta::JumpKind::Fallbackcc
};
233 bool emit(Venv
& env
, const retransopt
& i
) {
234 svcreq::emit_retranslate_opt_stub(*env
.cb
, env
.text
.data(), env
.meta
,
239 bool emit(Venv
& env
, const movqs
& i
) {
240 auto const mov
= emitSmashableMovq(*env
.cb
, env
.meta
, i
.s
.q(), r64(i
.d
));
241 if (i
.addr
.isValid()) {
242 env
.vaddrs
[i
.addr
] = mov
;
247 bool emit(Venv
& env
, const debugguardjmp
& i
) {
248 auto const jmp
= emitSmashableJmp(*env
.cb
, env
.meta
, i
.realCode
);
251 env
.meta
.watchpoints
.push_back(i
.watch
);
256 bool emit(Venv
& env
, const jmps
& i
) {
257 auto const jmp
= emitSmashableJmp(*env
.cb
, env
.meta
, env
.cb
->frontier());
258 env
.jmps
.push_back({jmp
, i
.targets
[0]});
259 if (i
.jmp_addr
.isValid()) {
260 env
.vaddrs
[i
.jmp_addr
] = jmp
;
262 if (i
.taken_addr
.isValid()) {
263 env
.pending_vaddrs
.push_back({i
.taken_addr
, i
.targets
[1]});
268 ///////////////////////////////////////////////////////////////////////////////
270 void emit_svcreq_stub(Venv
& env
, const Venv::SvcReqPatch
& p
) {
271 auto& frozen
= env
.text
.frozen().code
;
275 switch (p
.svcreq
.op
) {
276 case Vinstr::bindjmp
:
277 { auto const& i
= p
.svcreq
.bindjmp_
;
278 assertx(p
.jmp
&& !p
.jcc
);
279 stub
= svcreq::emit_bindjmp_stub(frozen
, env
.text
.data(),
280 env
.meta
, i
.spOff
, p
.jmp
,
281 i
.target
, i
.trflags
);
284 case Vinstr::bindjcc
:
285 { auto const& i
= p
.svcreq
.bindjcc_
;
286 assertx(!p
.jmp
&& p
.jcc
);
287 stub
= svcreq::emit_bindjmp_stub(frozen
, env
.text
.data(),
288 env
.meta
, i
.spOff
, p
.jcc
,
289 i
.target
, i
.trflags
);
292 case Vinstr::bindaddr
:
293 { auto const& i
= p
.svcreq
.bindaddr_
;
294 assertx(!p
.jmp
&& !p
.jcc
);
295 stub
= svcreq::emit_bindaddr_stub(frozen
, env
.text
.data(),
296 env
.meta
, i
.spOff
, i
.addr
.get(),
297 i
.target
, TransFlags
{});
298 // The bound pointer may not belong to the data segment, as is the case
299 // with SSwitchMap (see #10347945)
300 auto realAddr
= env
.text
.data().contains((TCA
)i
.addr
.get())
301 ? (TCA
*)env
.text
.data().toDestAddress((TCA
)i
.addr
.get())
302 : (TCA
*)i
.addr
.get();
306 case Vinstr::fallback
:
307 { auto const& i
= p
.svcreq
.fallback_
;
308 assertx(p
.jmp
&& !p
.jcc
);
310 auto const srcrec
= tc::findSrcRec(i
.target
);
311 always_assert(srcrec
);
312 stub
= i
.trflags
.packed
313 ? svcreq::emit_retranslate_stub(frozen
, env
.text
.data(), env
.meta
,
314 i
.spOff
, i
.target
, i
.trflags
)
315 : srcrec
->getFallbackTranslation();
318 case Vinstr::fallbackcc
:
319 { auto const& i
= p
.svcreq
.fallbackcc_
;
320 assertx(!p
.jmp
&& p
.jcc
);
322 auto const srcrec
= tc::findSrcRec(i
.target
);
323 always_assert(srcrec
);
324 stub
= i
.trflags
.packed
325 ? svcreq::emit_retranslate_stub(frozen
, env
.text
.data(), env
.meta
,
326 i
.spOff
, i
.target
, i
.trflags
)
327 : srcrec
->getFallbackTranslation();
330 default: always_assert(false);
332 assertx(stub
!= nullptr);
334 // Register any necessary patches by creating fake labels for the stubs.
336 env
.jmps
.push_back({p
.jmp
, Vlabel
{ env
.addrs
.size() }});
337 env
.addrs
.push_back(stub
);
340 env
.jccs
.push_back({p
.jcc
, Vlabel
{ env
.addrs
.size() }});
341 env
.addrs
.push_back(stub
);
345 ///////////////////////////////////////////////////////////////////////////////
348 * Computes inline frames for each block in unit. Inline frames are dominated
349 * by an inlinestart instruction and post-dominated by an inlineend instruction.
350 * This function annotates Vblocks with their associated frame, and populates
351 * the frame vector. Additionally, inlinestart and inlineend instructions are
352 * replaced by jmp instructions.
354 void computeFrames(Vunit
& unit
) {
355 auto const topFunc
= unit
.context
? unit
.context
->initSrcKey
.func() : nullptr;
357 auto const rpo
= sortBlocks(unit
);
359 unit
.frames
.emplace_back(
360 topFunc
, 0, Vframe::Top
, 0, unit
.blocks
[rpo
[0]].weight
362 unit
.blocks
[rpo
[0]].frame
= 0;
363 unit
.blocks
[rpo
[0]].pending_frames
= 0;
364 for (auto const b
: rpo
) {
365 auto& block
= unit
.blocks
[b
];
366 int pending
= block
.pending_frames
;
367 assert_flog(block
.frame
!= -1, "Block frames cannot be uninitialized.");
369 if (block
.code
.empty()) continue;
371 auto const next_frame
= [&] () -> int {
372 auto frame
= block
.frame
;
373 for (auto& inst
: block
.code
) {
374 auto origin
= inst
.origin
;
376 case Vinstr::inlinestart
:
377 // Each inlined frame will have a single start but may have multiple
378 // ends, and so we need to propagate this state here so that it only
379 // happens once per frame.
380 for (auto f
= frame
; f
!= Vframe::Top
; f
= unit
.frames
[f
].parent
) {
381 unit
.frames
[f
].inclusive_cost
+= inst
.inlinestart_
.cost
;
382 unit
.frames
[f
].num_inner_frames
++;
385 unit
.frames
.emplace_back(
386 inst
.inlinestart_
.func
,
387 origin
->marker().bcOff() - origin
->marker().func()->base(),
389 inst
.inlinestart_
.cost
,
392 frame
= inst
.inlinestart_
.id
= unit
.frames
.size() - 1;
395 case Vinstr::inlineend
:
396 frame
= unit
.frames
[frame
].parent
;
399 case Vinstr::pushframe
:
402 case Vinstr::popframe
:
411 for (auto const s
: succs(block
)) {
412 auto& sblock
= unit
.blocks
[s
];
414 (sblock
.frame
== -1 || sblock
.frame
== next_frame
) &&
415 (sblock
.pending_frames
== -1 || sblock
.pending_frames
== pending
),
416 "Blocks must be dominated by a single inline frame at the same depth,"
417 "{} cannot have frames {} ({}) and {} ({}) at depths {} and {}.",
420 unit
.frames
[sblock
.frame
].func
421 ? unit
.frames
[sblock
.frame
].func
->fullName()->data()
424 unit
.frames
[next_frame
].func
425 ? unit
.frames
[next_frame
].func
->fullName()->data()
427 sblock
.pending_frames
,
430 sblock
.frame
= next_frame
;
431 sblock
.pending_frames
= pending
;
437 ///////////////////////////////////////////////////////////////////////////////
441 const uint64_t* alloc_literal(Venv
& env
, uint64_t val
) {
442 // TreadHashMap doesn't support 0 as a key, and we have far more efficient
443 // ways of getting 0 in a register anyway.
444 always_assert(val
!= 0);
446 if (auto addr
= addrForLiteral(val
)) return addr
;
448 auto& pending
= env
.meta
.literalAddrs
;
449 auto it
= pending
.find(val
);
450 if (it
!= pending
.end()) {
451 DEBUG_ONLY
auto realAddr
=
452 (uint64_t*)env
.text
.data().toDestAddress((TCA
)it
->second
);
453 assertx(*realAddr
== val
);
457 auto addr
= env
.text
.data().alloc
<uint64_t>(alignof(uint64_t));
458 auto realAddr
= (uint64_t*)env
.text
.data().toDestAddress((TCA
)addr
);
461 pending
.emplace(val
, addr
);
465 ///////////////////////////////////////////////////////////////////////////////