1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/BytecodeAnalysis.h"
9 #include "jit/JitSpewer.h"
10 #include "jit/WarpBuilder.h"
11 #include "vm/BytecodeIterator.h"
12 #include "vm/BytecodeLocation.h"
13 #include "vm/BytecodeUtil.h"
14 #include "vm/Opcodes.h"
16 #include "vm/BytecodeIterator-inl.h"
17 #include "vm/BytecodeLocation-inl.h"
18 #include "vm/JSScript-inl.h"
21 using namespace js::jit
;
23 // While Warp can compile generators and async functions, it may not aways be
24 // profitable to due to the incomplete support that we have (See bug 1681338 for
27 // As an example, in Bug 1839078 the overhead of constantly OSR'ing back into a
28 // Warp body eats any benefit that might have been obtained via warp.
30 // This class implements the heuristic that yield can only be allowed in a Warp
31 // body under two circumstances:
33 // - There is an inner loop, which is presumed to do work that will provide
34 // enough work to avoid pathological cases
35 // - There is sufficient bytecode around the yield that we expect Warp
36 // compilation to drive enough benefit that we will still let yield occur.
38 // This is of course a heuristic, and can of course be defeated.
41 bool hasInnerLoop
= false;
42 bool sawYield
= false;
43 size_t bytecodeOps
= 0;
46 // The minimum amount of bytecode to allow in a yielding loop.
48 // This number is extremely arbitrary, and may be too low by an order of
50 static const size_t BYTECODE_MINIUM
= 40;
52 Vector
<LoopInfo
, 0, JitAllocPolicy
> loopInfos
;
56 explicit YieldAnalyzer(TempAllocator
& alloc
) : loopInfos(alloc
) {}
58 [[nodiscard
]] bool init() {
59 // a pretend outer loop for the function body.
60 return loopInfos
.emplaceBack();
63 void analyzeBackedgeForIon() {
64 const LoopInfo
& loopInfo
= loopInfos
.back();
65 if (loopInfo
.sawYield
) {
66 if (!loopInfo
.hasInnerLoop
&& loopInfo
.bytecodeOps
< BYTECODE_MINIUM
) {
75 // Analyze the host function as if it were a loop;
77 // This should help us avoid ion compiling a tiny function which just
79 analyzeBackedgeForIon();
81 MOZ_ASSERT(loopInfos
.empty());
86 [[nodiscard
]] bool handleBytecode(BytecodeLocation loc
) {
87 LoopInfo
& loopInfo
= loopInfos
.back();
89 loopInfo
.bytecodeOps
++;
91 if (loc
.is(JSOp::LoopHead
)) {
92 loopInfo
.hasInnerLoop
= true;
94 // Bail out here because the below two cases won't be hit.
95 return loopInfos
.emplaceBack();
98 if (loc
.is(JSOp::Yield
) || loc
.is(JSOp::FinalYieldRval
)) {
99 loopInfo
.sawYield
= true;
102 if (loc
.isBackedge()) {
103 analyzeBackedgeForIon();
110 BytecodeAnalysis::BytecodeAnalysis(TempAllocator
& alloc
, JSScript
* script
)
111 : script_(script
), infos_(alloc
) {}
113 bool BytecodeAnalysis::init(TempAllocator
& alloc
) {
114 if (!infos_
.growByUninitialized(script_
->length())) {
118 // Clear all BytecodeInfo.
119 mozilla::PodZero(infos_
.begin(), infos_
.length());
120 infos_
[0].init(/*stackDepth=*/0);
122 // WarpBuilder can compile try blocks, but doesn't support handling
123 // exceptions. If exception unwinding would resume in a catch or finally
124 // block, we instead bail out to the baseline interpreter. Finally blocks can
125 // still be reached by normal means, but the catch block is unreachable and is
126 // not compiled. We therefore need some special machinery to prevent OSR into
127 // Warp code in the following cases:
129 // (1) Loops in catch blocks:
134 // while (..) {} // Can't OSR here.
137 // (2) Loops only reachable via a catch block:
146 // while (..) {} // Loop is only reachable via the catch-block.
148 // To deal with both of these cases, we track whether the current op is
149 // 'normally reachable' (reachable without exception handling).
150 // Forward jumps propagate this flag to their jump targets (see
151 // BytecodeInfo::jumpTargetNormallyReachable) and when the analysis reaches a
152 // jump target it updates its normallyReachable flag based on the target's
155 // Inlining a function without a normally reachable return can cause similar
156 // problems. To avoid this, we mark such functions as uninlineable.
157 bool normallyReachable
= true;
158 bool normallyReachableReturn
= false;
160 YieldAnalyzer
analyzer(alloc
);
161 if (!analyzer
.init()) {
165 for (const BytecodeLocation
& it
: AllBytecodesIterable(script_
)) {
166 JSOp op
= it
.getOp();
167 if (!analyzer
.handleBytecode(it
)) {
171 uint32_t offset
= it
.bytecodeToOffset(script_
);
173 JitSpew(JitSpew_BaselineOp
, "Analyzing op @ %u (end=%u): %s",
174 unsigned(offset
), unsigned(script_
->length()), CodeName(op
));
176 checkWarpSupport(op
);
178 // If this bytecode info has not yet been initialized, it's not reachable.
179 if (!infos_
[offset
].initialized
) {
183 uint32_t stackDepth
= infos_
[offset
].stackDepth
;
185 if (infos_
[offset
].jumpTarget
) {
186 normallyReachable
= infos_
[offset
].jumpTargetNormallyReachable
;
190 size_t endOffset
= offset
+ it
.length();
191 for (size_t checkOffset
= offset
+ 1; checkOffset
< endOffset
;
193 MOZ_ASSERT(!infos_
[checkOffset
].initialized
);
196 uint32_t nuses
= it
.useCount();
197 uint32_t ndefs
= it
.defCount();
199 MOZ_ASSERT(stackDepth
>= nuses
);
203 // If stack depth exceeds max allowed by analysis, fail fast.
204 MOZ_ASSERT(stackDepth
<= BytecodeInfo::MAX_STACK_DEPTH
);
207 case JSOp::TableSwitch
: {
208 uint32_t defaultOffset
= it
.getTableSwitchDefaultOffset(script_
);
209 int32_t low
= it
.getTableSwitchLow();
210 int32_t high
= it
.getTableSwitchHigh();
212 infos_
[defaultOffset
].init(stackDepth
);
213 infos_
[defaultOffset
].setJumpTarget(normallyReachable
);
215 uint32_t ncases
= high
- low
+ 1;
217 for (uint32_t i
= 0; i
< ncases
; i
++) {
218 uint32_t targetOffset
= it
.tableSwitchCaseOffset(script_
, i
);
219 if (targetOffset
!= defaultOffset
) {
220 infos_
[targetOffset
].init(stackDepth
);
221 infos_
[targetOffset
].setJumpTarget(normallyReachable
);
228 for (const TryNote
& tn
: script_
->trynotes()) {
229 if (tn
.start
== offset
+ JSOpLength_Try
&&
230 (tn
.kind() == TryNoteKind::Catch
||
231 tn
.kind() == TryNoteKind::Finally
)) {
232 uint32_t catchOrFinallyOffset
= tn
.start
+ tn
.length
;
233 uint32_t targetDepth
=
234 tn
.kind() == TryNoteKind::Finally
? stackDepth
+ 3 : stackDepth
;
235 BytecodeInfo
& targetInfo
= infos_
[catchOrFinallyOffset
];
236 targetInfo
.init(targetDepth
);
237 targetInfo
.setJumpTarget(/* normallyReachable = */ false);
244 infos_
[offset
].loopHeadCanOsr
= normallyReachable
;
248 case JSOp::Exception
:
249 case JSOp::ExceptionAndStack
:
250 // Sanity check: ops only emitted in catch blocks are never
251 // normally reachable.
252 MOZ_ASSERT(!normallyReachable
);
258 if (normallyReachable
) {
259 normallyReachableReturn
= true;
267 bool jump
= it
.isJump();
269 // Case instructions do not push the lvalue back when branching.
270 uint32_t newStackDepth
= stackDepth
;
271 if (it
.is(JSOp::Case
)) {
275 uint32_t targetOffset
= it
.getJumpTargetOffset(script_
);
278 // If this is a backedge, the target JSOp::LoopHead must have been
279 // analyzed already. Furthermore, if the backedge is normally reachable,
280 // the loop head must be normally reachable too (loopHeadCanOsr can be
281 // used to check this since it's equivalent).
282 if (targetOffset
< offset
) {
283 MOZ_ASSERT(infos_
[targetOffset
].initialized
);
284 MOZ_ASSERT_IF(normallyReachable
, infos_
[targetOffset
].loopHeadCanOsr
);
288 infos_
[targetOffset
].init(newStackDepth
);
289 infos_
[targetOffset
].setJumpTarget(normallyReachable
);
292 // Handle any fallthrough from this opcode.
293 if (it
.fallsThrough()) {
294 BytecodeLocation fallthroughLoc
= it
.next();
295 MOZ_ASSERT(fallthroughLoc
.isInBounds(script_
));
296 uint32_t fallthroughOffset
= fallthroughLoc
.bytecodeToOffset(script_
);
298 infos_
[fallthroughOffset
].init(stackDepth
);
300 // Treat the fallthrough of a branch instruction as a jump target.
302 infos_
[fallthroughOffset
].setJumpTarget(normallyReachable
);
307 // Flag (reachable) resume offset instructions.
308 for (uint32_t offset
: script_
->resumeOffsets()) {
309 BytecodeInfo
& info
= infos_
[offset
];
310 if (info
.initialized
) {
311 info
.hasResumeOffset
= true;
315 if (!normallyReachableReturn
) {
316 script_
->setUninlineable();
319 if (!analyzer
.canIon()) {
320 if (script_
->canIonCompile()) {
323 "Disabling Warp support for %s:%d:%d due to Yield being in a loop",
324 script_
->filename(), script_
->lineno(),
325 script_
->column().oneOriginValue());
326 script_
->disableIon();
333 void BytecodeAnalysis::checkWarpSupport(JSOp op
) {
335 #define DEF_CASE(OP) case JSOp::OP:
336 WARP_UNSUPPORTED_OPCODE_LIST(DEF_CASE
)
338 if (script_
->canIonCompile()) {
339 JitSpew(JitSpew_IonAbort
, "Disabling Warp support for %s:%d:%d due to %s",
340 script_
->filename(), script_
->lineno(),
341 script_
->column().oneOriginValue(), CodeName(op
));
342 script_
->disableIon();
350 bool js::jit::ScriptUsesEnvironmentChain(JSScript
* script
) {
351 if (script
->isModule() || script
->initialEnvironmentShape() ||
352 (script
->function() &&
353 script
->function()->needsSomeEnvironmentObject())) {
357 AllBytecodesIterable
iterator(script
);
359 for (const BytecodeLocation
& location
: iterator
) {
360 if (OpUsesEnvironmentChain(location
.getOp())) {