1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/IonAnalysis.h"
10 #include <utility> // for ::std::pair
12 #include "jit/AliasAnalysis.h"
13 #include "jit/CompileInfo.h"
14 #include "jit/MIRGenerator.h"
15 #include "jit/MIRGraph.h"
16 #include "util/CheckedArithmetic.h"
18 #include "vm/BytecodeUtil-inl.h"
21 using namespace js::jit
;
23 using mozilla::DebugOnly
;
25 // Stack used by FlagPhiInputsAsImplicitlyUsed. It stores the Phi instruction
26 // pointer and the MUseIterator which should be visited next.
27 using MPhiUseIteratorStack
=
28 Vector
<std::pair
<MPhi
*, MUseIterator
>, 16, SystemAllocPolicy
>;
30 // Look for Phi uses with a depth-first search. If any uses are found the stack
31 // of MPhi instructions is returned in the |worklist| argument.
32 static bool DepthFirstSearchUse(MIRGenerator
* mir
,
33 MPhiUseIteratorStack
& worklist
, MPhi
* phi
) {
34 // Push a Phi and the next use to iterate over in the worklist.
35 auto push
= [&worklist
](MPhi
* phi
, MUseIterator use
) -> bool {
37 return worklist
.append(std::make_pair(phi
, use
));
41 // Used to assert that when we have no uses, we at least visited all the
43 size_t refUseCount
= phi
->useCount();
46 MOZ_ASSERT(worklist
.empty());
47 if (!push(phi
, phi
->usesBegin())) {
51 while (!worklist
.empty()) {
52 // Resume iterating over the last phi-use pair added by the next loop.
53 auto pair
= worklist
.popCopy();
54 MPhi
* producer
= pair
.first
;
55 MUseIterator use
= pair
.second
;
56 MUseIterator
end(producer
->usesEnd());
57 producer
->setNotInWorklist();
59 // Keep going down the tree of uses, skipping (continue)
60 // non-observable/unused cases and Phi which are already listed in the
61 // worklist. Stop (return) as soon as one use is found.
63 MNode
* consumer
= (*use
)->consumer();
64 MUseIterator it
= use
;
69 if (mir
->shouldCancel("FlagPhiInputsAsImplicitlyUsed inner loop")) {
73 if (consumer
->isResumePoint()) {
74 MResumePoint
* rp
= consumer
->toResumePoint();
75 // Observable operands are similar to potential uses.
76 if (rp
->isObservableOperand(*it
)) {
77 return push(producer
, use
);
82 MDefinition
* cdef
= consumer
->toDefinition();
84 // The producer is explicitly used by a definition.
85 return push(producer
, use
);
88 MPhi
* cphi
= cdef
->toPhi();
89 if (cphi
->getUsageAnalysis() == PhiUsage::Used
||
90 cphi
->isImplicitlyUsed()) {
91 // The information got cached on the Phi the last time it
92 // got visited, or when flagging operands of implicitly used
94 return push(producer
, use
);
97 if (cphi
->isInWorklist() || cphi
== producer
) {
98 // We are already iterating over the uses of this Phi instruction which
99 // are part of a loop, instead of trying to handle loops, conservatively
100 // mark them as used.
101 return push(producer
, use
);
104 if (cphi
->getUsageAnalysis() == PhiUsage::Unused
) {
105 // The instruction already got visited and is known to have
110 // We found another Phi instruction, move the use iterator to
111 // the next use push it to the worklist stack. Then, continue
112 // with a depth search.
113 if (!push(producer
, use
)) {
117 use
= producer
->usesBegin();
118 end
= producer
->usesEnd();
120 refUseCount
+= producer
->useCount();
124 // When unused, we cannot bubble up this information without iterating
125 // over the rest of the previous Phi instruction consumers.
126 MOZ_ASSERT(use
== end
);
127 producer
->setUsageAnalysis(PhiUsage::Unused
);
130 MOZ_ASSERT(useCount
== refUseCount
);
134 static bool FlagPhiInputsAsImplicitlyUsed(MIRGenerator
* mir
, MBasicBlock
* block
,
136 MPhiUseIteratorStack
& worklist
) {
137 // When removing an edge between 2 blocks, we might remove the ability of
138 // later phases to figure out that the uses of a Phi should be considered as
139 // a use of all its inputs. Thus we need to mark the Phi inputs as being
140 // implicitly used iff the phi has any uses.
143 // +--------------------+ +---------------------+
144 // |12 MFoo 6 | |32 MBar 5 |
148 // |25 MGoto Block 4 | |43 MGoto Block 4 |
149 // +--------------------+ +---------------------+
153 // | +-----X------------------------+
157 // | +------------v-----------+
158 // | |50 MPhi 12 32 |
162 // | |70 MReturn 50 |
163 // | +------------------------+
165 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
169 // ^ +--------------------+ +---------------------+
170 // /!\ |12 MConst opt-out | |32 MBar 5 |
174 // |80 MUnreachable | |43 MGoto Block 4 |
175 // +--------------------+ +---------------------+
183 // +------------v-----------+
189 // +------------------------+
192 // If the inputs of the Phi are not flagged as implicitly used, then
193 // later compilation phase might optimize them out. The problem is that a
194 // bailout will use this value and give it back to baseline, which will then
195 // use the OptimizedOut magic value in a computation.
197 // Unfortunately, we cannot be too conservative about flagging Phi inputs as
198 // having implicit uses, as this would prevent many optimizations from being
199 // used. Thus, the following code is in charge of flagging Phi instructions
200 // as Unused or Used, and setting ImplicitlyUsed accordingly.
201 size_t predIndex
= succ
->getPredecessorIndex(block
);
202 MPhiIterator end
= succ
->phisEnd();
203 MPhiIterator it
= succ
->phisBegin();
204 for (; it
!= end
; it
++) {
207 if (mir
->shouldCancel("FlagPhiInputsAsImplicitlyUsed outer loop")) {
211 // We are looking to mark the Phi inputs which are used across the edge
212 // between the |block| and its successor |succ|.
213 MDefinition
* def
= phi
->getOperand(predIndex
);
214 if (def
->isImplicitlyUsed()) {
218 // If the Phi is either Used or Unused, set the ImplicitlyUsed flag
220 if (phi
->getUsageAnalysis() == PhiUsage::Used
|| phi
->isImplicitlyUsed()) {
221 def
->setImplicitlyUsedUnchecked();
223 } else if (phi
->getUsageAnalysis() == PhiUsage::Unused
) {
227 // We do not know if the Phi was Used or Unused, iterate over all uses
228 // with a depth-search of uses. Returns the matching stack in the
229 // worklist as soon as one use is found.
230 MOZ_ASSERT(worklist
.empty());
231 if (!DepthFirstSearchUse(mir
, worklist
, phi
)) {
235 MOZ_ASSERT_IF(worklist
.empty(),
236 phi
->getUsageAnalysis() == PhiUsage::Unused
);
237 if (!worklist
.empty()) {
238 // One of the Phis is used, set Used flags on all the Phis which are
240 def
->setImplicitlyUsedUnchecked();
242 auto pair
= worklist
.popCopy();
243 MPhi
* producer
= pair
.first
;
244 producer
->setUsageAnalysis(PhiUsage::Used
);
245 producer
->setNotInWorklist();
246 } while (!worklist
.empty());
248 MOZ_ASSERT(phi
->getUsageAnalysis() != PhiUsage::Unknown
);
254 static MInstructionIterator
FindFirstInstructionAfterBail(MBasicBlock
* block
) {
255 MOZ_ASSERT(block
->alwaysBails());
256 for (MInstructionIterator it
= block
->begin(); it
!= block
->end(); it
++) {
257 MInstruction
* ins
= *it
;
263 MOZ_CRASH("Expected MBail in alwaysBails block");
266 // Given an iterator pointing to the first removed instruction, mark
267 // the operands of each removed instruction as having implicit uses.
268 static bool FlagOperandsAsImplicitlyUsedAfter(
269 MIRGenerator
* mir
, MBasicBlock
* block
, MInstructionIterator firstRemoved
) {
270 MOZ_ASSERT(firstRemoved
->block() == block
);
272 const CompileInfo
& info
= block
->info();
274 // Flag operands of removed instructions as having implicit uses.
275 MInstructionIterator end
= block
->end();
276 for (MInstructionIterator it
= firstRemoved
; it
!= end
; it
++) {
277 if (mir
->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 1)")) {
281 MInstruction
* ins
= *it
;
282 for (size_t i
= 0, e
= ins
->numOperands(); i
< e
; i
++) {
283 ins
->getOperand(i
)->setImplicitlyUsedUnchecked();
286 // Flag observable resume point operands as having implicit uses.
287 if (MResumePoint
* rp
= ins
->resumePoint()) {
288 // Note: no need to iterate over the caller's of the resume point as
289 // this is the same as the entry resume point.
290 MOZ_ASSERT(&rp
->block()->info() == &info
);
291 for (size_t i
= 0, e
= rp
->numOperands(); i
< e
; i
++) {
292 if (info
.isObservableSlot(i
)) {
293 rp
->getOperand(i
)->setImplicitlyUsedUnchecked();
299 // Flag Phi inputs of the successors as having implicit uses.
300 MPhiUseIteratorStack worklist
;
301 for (size_t i
= 0, e
= block
->numSuccessors(); i
< e
; i
++) {
302 if (mir
->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 2)")) {
306 if (!FlagPhiInputsAsImplicitlyUsed(mir
, block
, block
->getSuccessor(i
),
315 static bool FlagEntryResumePointOperands(MIRGenerator
* mir
,
316 MBasicBlock
* block
) {
317 // Flag observable operands of the entry resume point as having implicit uses.
318 MResumePoint
* rp
= block
->entryResumePoint();
320 if (mir
->shouldCancel("FlagEntryResumePointOperands")) {
324 const CompileInfo
& info
= rp
->block()->info();
325 for (size_t i
= 0, e
= rp
->numOperands(); i
< e
; i
++) {
326 if (info
.isObservableSlot(i
)) {
327 rp
->getOperand(i
)->setImplicitlyUsedUnchecked();
337 static bool FlagAllOperandsAsImplicitlyUsed(MIRGenerator
* mir
,
338 MBasicBlock
* block
) {
339 return FlagEntryResumePointOperands(mir
, block
) &&
340 FlagOperandsAsImplicitlyUsedAfter(mir
, block
, block
->begin());
343 // WarpBuilder sets the alwaysBails flag on blocks that contain an
344 // unconditional bailout. We trim any instructions in those blocks
345 // after the first unconditional bailout, and remove any blocks that
346 // are only reachable through bailing blocks.
347 bool jit::PruneUnusedBranches(MIRGenerator
* mir
, MIRGraph
& graph
) {
348 JitSpew(JitSpew_Prune
, "Begin");
350 // Pruning is guided by unconditional bailouts. Wasm does not have bailouts.
351 MOZ_ASSERT(!mir
->compilingWasm());
353 Vector
<MBasicBlock
*, 16, SystemAllocPolicy
> worklist
;
354 uint32_t numMarked
= 0;
355 bool needsTrim
= false;
357 auto markReachable
= [&](MBasicBlock
* block
) -> bool {
360 if (block
->alwaysBails()) {
363 return worklist
.append(block
);
366 // The entry block is always reachable.
367 if (!markReachable(graph
.entryBlock())) {
371 // The OSR entry block is always reachable if it exists.
372 if (graph
.osrBlock() && !markReachable(graph
.osrBlock())) {
376 // Iteratively mark all reachable blocks.
377 while (!worklist
.empty()) {
378 if (mir
->shouldCancel("Prune unused branches (marking reachable)")) {
381 MBasicBlock
* block
= worklist
.popCopy();
383 JitSpew(JitSpew_Prune
, "Visit block %u:", block
->id());
384 JitSpewIndent
indent(JitSpew_Prune
);
386 // If this block always bails, then it does not reach its successors.
387 if (block
->alwaysBails()) {
391 for (size_t i
= 0; i
< block
->numSuccessors(); i
++) {
392 MBasicBlock
* succ
= block
->getSuccessor(i
);
393 if (succ
->isMarked()) {
396 JitSpew(JitSpew_Prune
, "Reaches block %u", succ
->id());
397 if (!markReachable(succ
)) {
403 if (!needsTrim
&& numMarked
== graph
.numBlocks()) {
404 // There is nothing to prune.
405 graph
.unmarkBlocks();
409 JitSpew(JitSpew_Prune
, "Remove unreachable instructions and blocks:");
410 JitSpewIndent
indent(JitSpew_Prune
);
412 // The operands of removed instructions may be needed in baseline
413 // after bailing out.
414 for (PostorderIterator
it(graph
.poBegin()); it
!= graph
.poEnd();) {
415 if (mir
->shouldCancel("Prune unused branches (marking operands)")) {
419 MBasicBlock
* block
= *it
++;
420 if (!block
->isMarked()) {
421 // If we are removing the block entirely, mark the operands of every
422 // instruction as being implicitly used.
423 FlagAllOperandsAsImplicitlyUsed(mir
, block
);
424 } else if (block
->alwaysBails()) {
425 // If we are only trimming instructions after a bail, only mark operands
426 // of removed instructions.
427 MInstructionIterator firstRemoved
= FindFirstInstructionAfterBail(block
);
428 FlagOperandsAsImplicitlyUsedAfter(mir
, block
, firstRemoved
);
432 // Remove the blocks in post-order such that consumers are visited before
433 // the predecessors, the only exception being the Phi nodes of loop headers.
434 for (PostorderIterator
it(graph
.poBegin()); it
!= graph
.poEnd();) {
435 if (mir
->shouldCancel("Prune unused branches (removal loop)")) {
438 if (!graph
.alloc().ensureBallast()) {
442 MBasicBlock
* block
= *it
++;
443 if (block
->isMarked() && !block
->alwaysBails()) {
447 // As we are going to replace/remove the last instruction, we first have
448 // to remove this block from the predecessor list of its successors.
449 size_t numSucc
= block
->numSuccessors();
450 for (uint32_t i
= 0; i
< numSucc
; i
++) {
451 MBasicBlock
* succ
= block
->getSuccessor(i
);
452 if (succ
->isDead()) {
456 // Our dominators code expects all loop headers to have two predecessors.
457 // If we are removing the normal entry to a loop, but can still reach
458 // the loop header via OSR, we create a fake unreachable predecessor.
459 if (succ
->isLoopHeader() && block
!= succ
->backedge()) {
460 MOZ_ASSERT(graph
.osrBlock());
461 if (!graph
.alloc().ensureBallast()) {
465 MBasicBlock
* fake
= MBasicBlock::NewFakeLoopPredecessor(graph
, succ
);
469 // Mark the block to avoid removing it as unreachable.
472 JitSpew(JitSpew_Prune
,
473 "Header %u only reachable by OSR. Add fake predecessor %u",
474 succ
->id(), fake
->id());
477 JitSpew(JitSpew_Prune
, "Remove block edge %u -> %u.", block
->id(),
479 succ
->removePredecessor(block
);
482 if (!block
->isMarked()) {
483 // Remove unreachable blocks from the CFG.
484 JitSpew(JitSpew_Prune
, "Remove block %u.", block
->id());
485 graph
.removeBlock(block
);
487 // Remove unreachable instructions after unconditional bailouts.
488 JitSpew(JitSpew_Prune
, "Trim block %u.", block
->id());
490 // Discard all instructions after the first MBail.
491 MInstructionIterator firstRemoved
= FindFirstInstructionAfterBail(block
);
492 block
->discardAllInstructionsStartingAt(firstRemoved
);
494 if (block
->outerResumePoint()) {
495 block
->clearOuterResumePoint();
498 block
->end(MUnreachable::New(graph
.alloc()));
501 graph
.unmarkBlocks();
506 static bool SplitCriticalEdgesForBlock(MIRGraph
& graph
, MBasicBlock
* block
) {
507 if (block
->numSuccessors() < 2) {
510 for (size_t i
= 0; i
< block
->numSuccessors(); i
++) {
511 MBasicBlock
* target
= block
->getSuccessor(i
);
512 if (target
->numPredecessors() < 2) {
516 // Create a simple new block which contains a goto and which split the
517 // edge between block and target.
518 MBasicBlock
* split
= MBasicBlock::NewSplitEdge(graph
, block
, i
, target
);
526 // A critical edge is an edge which is neither its successor's only predecessor
527 // nor its predecessor's only successor. Critical edges must be split to
528 // prevent copy-insertion and code motion from affecting other edges.
529 bool jit::SplitCriticalEdges(MIRGraph
& graph
) {
530 for (MBasicBlockIterator
iter(graph
.begin()); iter
!= graph
.end(); iter
++) {
531 MBasicBlock
* block
= *iter
;
532 if (!SplitCriticalEdgesForBlock(graph
, block
)) {
539 bool jit::IsUint32Type(const MDefinition
* def
) {
541 def
= def
->getOperand(0);
544 if (def
->type() != MIRType::Int32
) {
548 return def
->isUrsh() && def
->getOperand(1)->isConstant() &&
549 def
->getOperand(1)->toConstant()->type() == MIRType::Int32
&&
550 def
->getOperand(1)->toConstant()->toInt32() == 0;
553 // Determine whether phiBlock/testBlock simply compute a phi and perform a
555 static bool BlockIsSingleTest(MBasicBlock
* phiBlock
, MBasicBlock
* testBlock
,
556 MPhi
** pphi
, MTest
** ptest
) {
560 if (phiBlock
!= testBlock
) {
561 MOZ_ASSERT(phiBlock
->numSuccessors() == 1 &&
562 phiBlock
->getSuccessor(0) == testBlock
);
563 if (!phiBlock
->begin()->isGoto()) {
568 auto iter
= testBlock
->rbegin();
569 if (!iter
->isTest()) {
572 MTest
* test
= iter
->toTest();
574 // Unwrap boolean conversion performed through the '!!' idiom.
575 MInstruction
* testOrNot
= test
;
576 bool hasOddNumberOfNots
= false;
577 while (++iter
!= testBlock
->rend()) {
579 // The MNot must only be used by |testOrNot|.
580 auto* notIns
= iter
->toNot();
581 if (testOrNot
->getOperand(0) != notIns
) {
584 if (!notIns
->hasOneUse()) {
589 hasOddNumberOfNots
= !hasOddNumberOfNots
;
591 // Fail if there are any other instructions than MNot.
596 // There's an odd number of MNot, so this can't be the '!!' idiom.
597 if (hasOddNumberOfNots
) {
601 MOZ_ASSERT(testOrNot
->isTest() || testOrNot
->isNot());
603 MDefinition
* testInput
= testOrNot
->getOperand(0);
604 if (!testInput
->isPhi()) {
607 MPhi
* phi
= testInput
->toPhi();
608 if (phi
->block() != phiBlock
) {
612 for (MUseIterator iter
= phi
->usesBegin(); iter
!= phi
->usesEnd(); ++iter
) {
614 if (use
->consumer() == testOrNot
) {
617 if (use
->consumer()->isResumePoint()) {
618 MBasicBlock
* useBlock
= use
->consumer()->block();
619 if (useBlock
== phiBlock
|| useBlock
== testBlock
) {
626 for (MPhiIterator iter
= phiBlock
->phisBegin(); iter
!= phiBlock
->phisEnd();
633 if (phiBlock
!= testBlock
&& !testBlock
->phisEmpty()) {
643 // Determine if value is directly or indirectly the test input.
644 static bool IsTestInputMaybeToBool(MTest
* test
, MDefinition
* value
) {
645 auto* input
= test
->input();
646 bool hasEvenNumberOfNots
= true;
648 // Only accept if there's an even number of MNot.
649 if (input
== value
&& hasEvenNumberOfNots
) {
653 // Unwrap boolean conversion performed through the '!!' idiom.
654 if (input
->isNot()) {
655 input
= input
->toNot()->input();
656 hasEvenNumberOfNots
= !hasEvenNumberOfNots
;
664 // Change block so that it ends in a goto to the specific target block.
665 // existingPred is an existing predecessor of the block.
666 [[nodiscard
]] static bool UpdateGotoSuccessor(TempAllocator
& alloc
,
669 MBasicBlock
* existingPred
) {
670 MInstruction
* ins
= block
->lastIns();
671 MOZ_ASSERT(ins
->isGoto());
672 ins
->toGoto()->target()->removePredecessor(block
);
673 block
->discardLastIns();
675 MGoto
* newGoto
= MGoto::New(alloc
, target
);
678 return target
->addPredecessorSameInputsAs(block
, existingPred
);
681 // Change block so that it ends in a test of the specified value, going to
682 // either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
683 // or ifFalse with the same values incoming to ifTrue/ifFalse as block.
684 // existingPred is not required to be a predecessor of ifTrue/ifFalse if block
685 // already ends in a test going to that block on a true/false result.
686 [[nodiscard
]] static bool UpdateTestSuccessors(
687 TempAllocator
& alloc
, MBasicBlock
* block
, MDefinition
* value
,
688 MBasicBlock
* ifTrue
, MBasicBlock
* ifFalse
, MBasicBlock
* existingPred
) {
689 MInstruction
* ins
= block
->lastIns();
691 MTest
* test
= ins
->toTest();
692 MOZ_ASSERT(test
->input() == value
);
694 if (ifTrue
!= test
->ifTrue()) {
695 test
->ifTrue()->removePredecessor(block
);
696 if (!ifTrue
->addPredecessorSameInputsAs(block
, existingPred
)) {
699 MOZ_ASSERT(test
->ifTrue() == test
->getSuccessor(0));
700 test
->replaceSuccessor(0, ifTrue
);
703 if (ifFalse
!= test
->ifFalse()) {
704 test
->ifFalse()->removePredecessor(block
);
705 if (!ifFalse
->addPredecessorSameInputsAs(block
, existingPred
)) {
708 MOZ_ASSERT(test
->ifFalse() == test
->getSuccessor(1));
709 test
->replaceSuccessor(1, ifFalse
);
715 MOZ_ASSERT(ins
->isGoto());
716 ins
->toGoto()->target()->removePredecessor(block
);
717 block
->discardLastIns();
719 MTest
* test
= MTest::New(alloc
, value
, ifTrue
, ifFalse
);
722 if (!ifTrue
->addPredecessorSameInputsAs(block
, existingPred
)) {
725 if (!ifFalse
->addPredecessorSameInputsAs(block
, existingPred
)) {
732 * Look for a diamond pattern:
736 * trueBranch falseBranch
742 static bool IsDiamondPattern(MBasicBlock
* initialBlock
) {
743 MInstruction
* ins
= initialBlock
->lastIns();
744 if (!ins
->isTest()) {
747 MTest
* initialTest
= ins
->toTest();
749 MBasicBlock
* trueBranch
= initialTest
->ifTrue();
750 if (trueBranch
->numPredecessors() != 1 || !trueBranch
->lastIns()->isGoto()) {
754 MBasicBlock
* falseBranch
= initialTest
->ifFalse();
755 if (falseBranch
->numPredecessors() != 1 ||
756 !falseBranch
->lastIns()->isGoto()) {
760 MBasicBlock
* phiBlock
= trueBranch
->getSuccessor(0);
761 if (phiBlock
!= falseBranch
->getSuccessor(0)) {
764 if (phiBlock
->numPredecessors() != 2) {
770 static bool MaybeFoldDiamondConditionBlock(MIRGraph
& graph
,
771 MBasicBlock
* initialBlock
) {
772 MOZ_ASSERT(IsDiamondPattern(initialBlock
));
774 // Optimize the MIR graph to improve the code generated for conditional
775 // operations. A test like 'if (a ? b : c)' normally requires four blocks,
776 // with a phi for the intermediate value. This can be improved to use three
777 // blocks with no phi value.
780 * Look for a diamond pattern:
784 * trueBranch falseBranch
790 * Where phiBlock contains a single phi combining values pushed onto the
791 * stack by trueBranch and falseBranch, and testBlock contains a test on
792 * that phi. phiBlock and testBlock may be the same block; generated code
793 * will use different blocks if the (?:) op is in an inlined function.
796 MTest
* initialTest
= initialBlock
->lastIns()->toTest();
798 MBasicBlock
* trueBranch
= initialTest
->ifTrue();
799 MBasicBlock
* falseBranch
= initialTest
->ifFalse();
800 if (initialBlock
->isLoopBackedge() || trueBranch
->isLoopBackedge() ||
801 falseBranch
->isLoopBackedge()) {
805 MBasicBlock
* phiBlock
= trueBranch
->getSuccessor(0);
806 MBasicBlock
* testBlock
= phiBlock
;
807 if (testBlock
->numSuccessors() == 1) {
808 if (testBlock
->isLoopBackedge()) {
811 testBlock
= testBlock
->getSuccessor(0);
812 if (testBlock
->numPredecessors() != 1) {
819 if (!BlockIsSingleTest(phiBlock
, testBlock
, &phi
, &finalTest
)) {
823 MOZ_ASSERT(phi
->numOperands() == 2);
825 // Make sure the test block does not have any outgoing loop backedges.
826 if (!SplitCriticalEdgesForBlock(graph
, testBlock
)) {
830 MDefinition
* trueResult
=
831 phi
->getOperand(phiBlock
->indexForPredecessor(trueBranch
));
832 MDefinition
* falseResult
=
833 phi
->getOperand(phiBlock
->indexForPredecessor(falseBranch
));
835 // OK, we found the desired pattern, now transform the graph.
837 // Remove the phi from phiBlock.
838 phiBlock
->discardPhi(*phiBlock
->phisBegin());
840 // Change the end of the block to a test that jumps directly to successors of
841 // testBlock, rather than to testBlock itself.
843 if (IsTestInputMaybeToBool(initialTest
, trueResult
)) {
844 if (!UpdateGotoSuccessor(graph
.alloc(), trueBranch
, finalTest
->ifTrue(),
849 if (!UpdateTestSuccessors(graph
.alloc(), trueBranch
, trueResult
,
850 finalTest
->ifTrue(), finalTest
->ifFalse(),
856 if (IsTestInputMaybeToBool(initialTest
, falseResult
)) {
857 if (!UpdateGotoSuccessor(graph
.alloc(), falseBranch
, finalTest
->ifFalse(),
862 if (!UpdateTestSuccessors(graph
.alloc(), falseBranch
, falseResult
,
863 finalTest
->ifTrue(), finalTest
->ifFalse(),
869 // Remove phiBlock, if different from testBlock.
870 if (phiBlock
!= testBlock
) {
871 testBlock
->removePredecessor(phiBlock
);
872 graph
.removeBlock(phiBlock
);
875 // Remove testBlock itself.
876 finalTest
->ifTrue()->removePredecessor(testBlock
);
877 finalTest
->ifFalse()->removePredecessor(testBlock
);
878 graph
.removeBlock(testBlock
);
884 * Look for a triangle pattern:
890 * phiBlock+falseBranch
900 * phiBlock+trueBranch
904 static bool IsTrianglePattern(MBasicBlock
* initialBlock
) {
905 MInstruction
* ins
= initialBlock
->lastIns();
906 if (!ins
->isTest()) {
909 MTest
* initialTest
= ins
->toTest();
911 MBasicBlock
* trueBranch
= initialTest
->ifTrue();
912 MBasicBlock
* falseBranch
= initialTest
->ifFalse();
914 if (trueBranch
->numSuccessors() == 1 &&
915 trueBranch
->getSuccessor(0) == falseBranch
) {
916 if (trueBranch
->numPredecessors() != 1) {
919 if (falseBranch
->numPredecessors() != 2) {
925 if (falseBranch
->numSuccessors() == 1 &&
926 falseBranch
->getSuccessor(0) == trueBranch
) {
927 if (trueBranch
->numPredecessors() != 2) {
930 if (falseBranch
->numPredecessors() != 1) {
939 static bool MaybeFoldTriangleConditionBlock(MIRGraph
& graph
,
940 MBasicBlock
* initialBlock
) {
941 MOZ_ASSERT(IsTrianglePattern(initialBlock
));
943 // Optimize the MIR graph to improve the code generated for boolean
944 // operations. A test like 'if (a && b)' normally requires three blocks, with
945 // a phi for the intermediate value. This can be improved to use no phi value.
948 * Look for a triangle pattern:
954 * phiBlock+falseBranch
964 * phiBlock+trueBranch
968 * Where phiBlock contains a single phi combining values pushed onto the stack
969 * by trueBranch and falseBranch, and testBlock contains a test on that phi.
970 * phiBlock and testBlock may be the same block; generated code will use
971 * different blocks if the (&&) op is in an inlined function.
974 MTest
* initialTest
= initialBlock
->lastIns()->toTest();
976 MBasicBlock
* trueBranch
= initialTest
->ifTrue();
977 MBasicBlock
* falseBranch
= initialTest
->ifFalse();
978 if (initialBlock
->isLoopBackedge() || trueBranch
->isLoopBackedge() ||
979 falseBranch
->isLoopBackedge()) {
983 MBasicBlock
* phiBlock
;
984 if (trueBranch
->numSuccessors() == 1 &&
985 trueBranch
->getSuccessor(0) == falseBranch
) {
986 phiBlock
= falseBranch
;
988 MOZ_ASSERT(falseBranch
->getSuccessor(0) == trueBranch
);
989 phiBlock
= trueBranch
;
992 MBasicBlock
* testBlock
= phiBlock
;
993 if (testBlock
->numSuccessors() == 1) {
994 MOZ_ASSERT(!testBlock
->isLoopBackedge());
996 testBlock
= testBlock
->getSuccessor(0);
997 if (testBlock
->numPredecessors() != 1) {
1004 if (!BlockIsSingleTest(phiBlock
, testBlock
, &phi
, &finalTest
)) {
1008 MOZ_ASSERT(phi
->numOperands() == 2);
1010 // If the phi-operand doesn't match the initial input, we can't fold the test.
1011 auto* phiInputForInitialBlock
=
1012 phi
->getOperand(phiBlock
->indexForPredecessor(initialBlock
));
1013 if (!IsTestInputMaybeToBool(initialTest
, phiInputForInitialBlock
)) {
1017 // Make sure the test block does not have any outgoing loop backedges.
1018 if (!SplitCriticalEdgesForBlock(graph
, testBlock
)) {
1022 MDefinition
* trueResult
;
1023 MDefinition
* falseResult
;
1024 if (phiBlock
== trueBranch
) {
1025 trueResult
= phi
->getOperand(phiBlock
->indexForPredecessor(initialBlock
));
1026 falseResult
= phi
->getOperand(phiBlock
->indexForPredecessor(falseBranch
));
1028 trueResult
= phi
->getOperand(phiBlock
->indexForPredecessor(trueBranch
));
1029 falseResult
= phi
->getOperand(phiBlock
->indexForPredecessor(initialBlock
));
1032 // OK, we found the desired pattern, now transform the graph.
1034 // Remove the phi from phiBlock.
1035 phiBlock
->discardPhi(*phiBlock
->phisBegin());
1037 // Change the end of the block to a test that jumps directly to successors of
1038 // testBlock, rather than to testBlock itself.
1040 if (phiBlock
== trueBranch
) {
1041 if (!UpdateTestSuccessors(graph
.alloc(), initialBlock
, initialTest
->input(),
1042 finalTest
->ifTrue(), initialTest
->ifFalse(),
1046 } else if (IsTestInputMaybeToBool(initialTest
, trueResult
)) {
1047 if (!UpdateGotoSuccessor(graph
.alloc(), trueBranch
, finalTest
->ifTrue(),
1052 if (!UpdateTestSuccessors(graph
.alloc(), trueBranch
, trueResult
,
1053 finalTest
->ifTrue(), finalTest
->ifFalse(),
1059 if (phiBlock
== falseBranch
) {
1060 if (!UpdateTestSuccessors(graph
.alloc(), initialBlock
, initialTest
->input(),
1061 initialTest
->ifTrue(), finalTest
->ifFalse(),
1065 } else if (IsTestInputMaybeToBool(initialTest
, falseResult
)) {
1066 if (!UpdateGotoSuccessor(graph
.alloc(), falseBranch
, finalTest
->ifFalse(),
1071 if (!UpdateTestSuccessors(graph
.alloc(), falseBranch
, falseResult
,
1072 finalTest
->ifTrue(), finalTest
->ifFalse(),
1078 // Remove phiBlock, if different from testBlock.
1079 if (phiBlock
!= testBlock
) {
1080 testBlock
->removePredecessor(phiBlock
);
1081 graph
.removeBlock(phiBlock
);
1084 // Remove testBlock itself.
1085 finalTest
->ifTrue()->removePredecessor(testBlock
);
1086 finalTest
->ifFalse()->removePredecessor(testBlock
);
1087 graph
.removeBlock(testBlock
);
1092 static bool MaybeFoldConditionBlock(MIRGraph
& graph
,
1093 MBasicBlock
* initialBlock
) {
1094 if (IsDiamondPattern(initialBlock
)) {
1095 return MaybeFoldDiamondConditionBlock(graph
, initialBlock
);
1097 if (IsTrianglePattern(initialBlock
)) {
1098 return MaybeFoldTriangleConditionBlock(graph
, initialBlock
);
1103 static bool MaybeFoldTestBlock(MIRGraph
& graph
, MBasicBlock
* initialBlock
) {
1104 // Handle test expressions on more than two inputs. For example
1105 // |if ((x > 10) && (y > 20) && (z > 30)) { ... }|, which results in the below
1108 // Look for the pattern:
1109 // ┌─────────────────┐
1111 // ┌─────┤ 2 test compare1 │
1112 // │ └──────┬──────────┘
1114 // ┌───────▼─────────┐ │
1116 // │ 4 test compare3 │ └──────────┐
1117 // └──┬──────────┬───┘ │
1119 // ┌──────────▼──────┐ │ │
1120 // │ 5 compare │ └─────────┐ │
1122 // └───────┬─────────┘ │ │
1124 // │ ┌──────────────────▼───────▼───────┐
1125 // └───►│ 9 phi compare1 compare3 compare5 │
1127 // └────────────────┬─────────────────┘
1129 // ┌────────▼────────┐
1131 // └─────┬─────┬─────┘
1133 // ┌────────────┐ │ │ ┌─────────────┐
1134 // │ TrueBranch │◄────┘ └─────►│ FalseBranch │
1135 // └────────────┘ └─────────────┘
1137 // And transform it to:
1139 // ┌─────────────────┐
1141 // ┌───┤ 2 test compare1 │
1142 // │ └──────────┬──────┘
1144 // ┌───────▼─────────┐ │
1146 // │ 4 test compare3 │ │
1147 // └──┬─────────┬────┘ │
1149 // ┌──────────▼──────┐ │ │
1150 // │ 5 compare │ └──────┐ │
1151 // │ 6 test compare5 │ │ │
1152 // └────┬────────┬───┘ │ │
1154 // ┌─────▼──────┐ │ ┌───▼──▼──────┐
1155 // │ TrueBranch │ └─────────► FalseBranch │
1156 // └────────────┘ └─────────────┘
1158 auto* ins
= initialBlock
->lastIns();
1159 if (!ins
->isTest()) {
1162 auto* initialTest
= ins
->toTest();
1164 MBasicBlock
* trueBranch
= initialTest
->ifTrue();
1165 MBasicBlock
* falseBranch
= initialTest
->ifFalse();
1167 // MaybeFoldConditionBlock handles the case for two operands.
1168 MBasicBlock
* phiBlock
;
1169 if (trueBranch
->numPredecessors() > 2) {
1170 phiBlock
= trueBranch
;
1171 } else if (falseBranch
->numPredecessors() > 2) {
1172 phiBlock
= falseBranch
;
1177 MBasicBlock
* testBlock
= phiBlock
;
1178 if (testBlock
->numSuccessors() == 1) {
1179 if (testBlock
->isLoopBackedge()) {
1182 testBlock
= testBlock
->getSuccessor(0);
1183 if (testBlock
->numPredecessors() != 1) {
1188 MOZ_ASSERT(!phiBlock
->isLoopBackedge());
1190 MPhi
* phi
= nullptr;
1191 MTest
* finalTest
= nullptr;
1192 if (!BlockIsSingleTest(phiBlock
, testBlock
, &phi
, &finalTest
)) {
1196 MOZ_ASSERT(phiBlock
->numPredecessors() == phi
->numOperands());
1198 // If the phi-operand doesn't match the initial input, we can't fold the test.
1199 auto* phiInputForInitialBlock
=
1200 phi
->getOperand(phiBlock
->indexForPredecessor(initialBlock
));
1201 if (!IsTestInputMaybeToBool(initialTest
, phiInputForInitialBlock
)) {
1205 MBasicBlock
* newTestBlock
= nullptr;
1206 MDefinition
* newTestInput
= nullptr;
1208 // The block of each phi operand must either end with a test instruction on
1209 // that phi operand or it's the sole block which ends with a goto instruction.
1210 for (size_t i
= 0; i
< phiBlock
->numPredecessors(); i
++) {
1211 auto* pred
= phiBlock
->getPredecessor(i
);
1212 auto* operand
= phi
->getOperand(i
);
1214 // Each predecessor must end with either a test or goto instruction.
1215 auto* lastIns
= pred
->lastIns();
1216 if (lastIns
->isGoto() && !newTestBlock
) {
1217 newTestBlock
= pred
;
1218 newTestInput
= operand
;
1219 } else if (lastIns
->isTest()) {
1220 if (!IsTestInputMaybeToBool(lastIns
->toTest(), operand
)) {
1227 MOZ_ASSERT(!pred
->isLoopBackedge());
1230 // Ensure we found the single goto block.
1231 if (!newTestBlock
) {
1235 // Make sure the test block does not have any outgoing loop backedges.
1236 if (!SplitCriticalEdgesForBlock(graph
, testBlock
)) {
1240 // OK, we found the desired pattern, now transform the graph.
1242 // Remove the phi from phiBlock.
1243 phiBlock
->discardPhi(*phiBlock
->phisBegin());
1245 // Create the new test instruction.
1246 if (!UpdateTestSuccessors(graph
.alloc(), newTestBlock
, newTestInput
,
1247 finalTest
->ifTrue(), finalTest
->ifFalse(),
1252 // Update all test instructions to point to the final target.
1253 while (phiBlock
->numPredecessors()) {
1254 mozilla::DebugOnly
<size_t> oldNumPred
= phiBlock
->numPredecessors();
1256 auto* pred
= phiBlock
->getPredecessor(0);
1257 auto* test
= pred
->lastIns()->toTest();
1258 if (test
->ifTrue() == phiBlock
) {
1259 if (!UpdateTestSuccessors(graph
.alloc(), pred
, test
->input(),
1260 finalTest
->ifTrue(), test
->ifFalse(),
1265 MOZ_ASSERT(test
->ifFalse() == phiBlock
);
1266 if (!UpdateTestSuccessors(graph
.alloc(), pred
, test
->input(),
1267 test
->ifTrue(), finalTest
->ifFalse(),
1273 // Ensure we've made progress.
1274 MOZ_ASSERT(phiBlock
->numPredecessors() + 1 == oldNumPred
);
1277 // Remove phiBlock, if different from testBlock.
1278 if (phiBlock
!= testBlock
) {
1279 testBlock
->removePredecessor(phiBlock
);
1280 graph
.removeBlock(phiBlock
);
1283 // Remove testBlock itself.
1284 finalTest
->ifTrue()->removePredecessor(testBlock
);
1285 finalTest
->ifFalse()->removePredecessor(testBlock
);
1286 graph
.removeBlock(testBlock
);
1291 bool jit::FoldTests(MIRGraph
& graph
) {
1292 for (PostorderIterator
block(graph
.poBegin()); block
!= graph
.poEnd();
1294 if (!MaybeFoldConditionBlock(graph
, *block
)) {
1297 if (!MaybeFoldTestBlock(graph
, *block
)) {
1304 bool jit::FoldEmptyBlocks(MIRGraph
& graph
) {
1305 for (MBasicBlockIterator
iter(graph
.begin()); iter
!= graph
.end();) {
1306 MBasicBlock
* block
= *iter
;
1309 if (block
->numPredecessors() != 1 || block
->numSuccessors() != 1) {
1313 if (!block
->phisEmpty()) {
1317 if (block
->outerResumePoint()) {
1321 if (*block
->begin() != *block
->rbegin()) {
1325 MBasicBlock
* succ
= block
->getSuccessor(0);
1326 MBasicBlock
* pred
= block
->getPredecessor(0);
1328 if (succ
->numPredecessors() != 1) {
1332 size_t pos
= pred
->getSuccessorIndex(block
);
1333 pred
->lastIns()->replaceSuccessor(pos
, succ
);
1335 graph
.removeBlock(block
);
1337 if (!succ
->addPredecessorSameInputsAs(pred
, block
)) {
1340 succ
->removePredecessor(block
);
1345 static void EliminateTriviallyDeadResumePointOperands(MIRGraph
& graph
,
1347 // If we will pop the top of the stack immediately after resuming,
1348 // then don't preserve the top value in the resume point.
1349 if (rp
->mode() != ResumeMode::ResumeAt
) {
1353 jsbytecode
* pc
= rp
->pc();
1354 if (JSOp(*pc
) == JSOp::JumpTarget
) {
1355 pc
+= JSOpLength_JumpTarget
;
1357 if (JSOp(*pc
) != JSOp::Pop
) {
1361 size_t top
= rp
->stackDepth() - 1;
1362 MOZ_ASSERT(!rp
->isObservableOperand(top
));
1364 MDefinition
* def
= rp
->getOperand(top
);
1365 if (def
->isConstant()) {
1369 MConstant
* constant
= rp
->block()->optimizedOutConstant(graph
.alloc());
1370 rp
->replaceOperand(top
, constant
);
1373 // Operands to a resume point which are dead at the point of the resume can be
1374 // replaced with a magic value. This pass only replaces resume points which are
1377 // This is intended to ensure that extra resume points within a basic block
1378 // will not artificially extend the lifetimes of any SSA values. This could
1379 // otherwise occur if the new resume point captured a value which is created
1380 // between the old and new resume point and is dead at the new resume point.
1381 bool jit::EliminateTriviallyDeadResumePointOperands(MIRGenerator
* mir
,
1383 for (auto* block
: graph
) {
1384 if (MResumePoint
* rp
= block
->entryResumePoint()) {
1385 if (!graph
.alloc().ensureBallast()) {
1388 ::EliminateTriviallyDeadResumePointOperands(graph
, rp
);
1394 // Operands to a resume point which are dead at the point of the resume can be
1395 // replaced with a magic value. This analysis supports limited detection of
1396 // dead operands, pruning those which are defined in the resume point's basic
1397 // block and have no uses outside the block or at points later than the resume
1400 // This is intended to ensure that extra resume points within a basic block
1401 // will not artificially extend the lifetimes of any SSA values. This could
1402 // otherwise occur if the new resume point captured a value which is created
1403 // between the old and new resume point and is dead at the new resume point.
1404 bool jit::EliminateDeadResumePointOperands(MIRGenerator
* mir
, MIRGraph
& graph
) {
1405 // If we are compiling try blocks, locals and arguments may be observable
1406 // from catch or finally blocks (which Ion does not compile). For now just
1407 // disable the pass in this case.
1408 if (graph
.hasTryBlock()) {
1412 for (PostorderIterator block
= graph
.poBegin(); block
!= graph
.poEnd();
1414 if (mir
->shouldCancel("Eliminate Dead Resume Point Operands (main loop)")) {
1418 if (MResumePoint
* rp
= block
->entryResumePoint()) {
1419 if (!graph
.alloc().ensureBallast()) {
1422 ::EliminateTriviallyDeadResumePointOperands(graph
, rp
);
1425 // The logic below can get confused on infinite loops.
1426 if (block
->isLoopHeader() && block
->backedge() == *block
) {
1430 for (MInstructionIterator ins
= block
->begin(); ins
!= block
->end();
1432 if (MResumePoint
* rp
= ins
->resumePoint()) {
1433 if (!graph
.alloc().ensureBallast()) {
1436 ::EliminateTriviallyDeadResumePointOperands(graph
, rp
);
1439 // No benefit to replacing constant operands with other constants.
1440 if (ins
->isConstant()) {
1444 // Scanning uses does not give us sufficient information to tell
1445 // where instructions that are involved in box/unbox operations or
1446 // parameter passing might be live. Rewriting uses of these terms
1447 // in resume points may affect the interpreter's behavior. Rather
1448 // than doing a more sophisticated analysis, just ignore these.
1449 if (ins
->isUnbox() || ins
->isParameter() || ins
->isBoxNonStrictThis()) {
1453 // Early intermediate values captured by resume points, such as
1454 // ArrayState and its allocation, may be legitimately dead in Ion code,
1455 // but are still needed if we bail out. They can recover on bailout.
1456 if (ins
->isRecoveredOnBailout()) {
1457 MOZ_ASSERT(ins
->canRecoverOnBailout());
1461 // If the instruction's behavior has been constant folded into a
1462 // separate instruction, we can't determine precisely where the
1463 // instruction becomes dead and can't eliminate its uses.
1464 if (ins
->isImplicitlyUsed()) {
1468 // Check if this instruction's result is only used within the
1469 // current block, and keep track of its last use in a definition
1470 // (not resume point). This requires the instructions in the block
1471 // to be numbered, ensured by running this immediately after alias
1473 uint32_t maxDefinition
= 0;
1474 for (MUseIterator
uses(ins
->usesBegin()); uses
!= ins
->usesEnd();
1476 MNode
* consumer
= uses
->consumer();
1477 if (consumer
->isResumePoint()) {
1478 // If the instruction's is captured by one of the resume point, then
1479 // it might be observed indirectly while the frame is live on the
1480 // stack, so it has to be computed.
1481 MResumePoint
* resume
= consumer
->toResumePoint();
1482 if (resume
->isObservableOperand(*uses
)) {
1483 maxDefinition
= UINT32_MAX
;
1489 MDefinition
* def
= consumer
->toDefinition();
1490 if (def
->block() != *block
|| def
->isBox() || def
->isPhi()) {
1491 maxDefinition
= UINT32_MAX
;
1494 maxDefinition
= std::max(maxDefinition
, def
->id());
1496 if (maxDefinition
== UINT32_MAX
) {
1500 // Walk the uses a second time, removing any in resume points after
1501 // the last use in a definition.
1502 for (MUseIterator
uses(ins
->usesBegin()); uses
!= ins
->usesEnd();) {
1503 MUse
* use
= *uses
++;
1504 if (use
->consumer()->isDefinition()) {
1507 MResumePoint
* mrp
= use
->consumer()->toResumePoint();
1508 if (mrp
->block() != *block
|| !mrp
->instruction() ||
1509 mrp
->instruction() == *ins
||
1510 mrp
->instruction()->id() <= maxDefinition
) {
1514 if (!graph
.alloc().ensureBallast()) {
1518 // Store an optimized out magic value in place of all dead
1519 // resume point operands. Making any such substitution can in
1520 // general alter the interpreter's behavior, even though the
1521 // code is dead, as the interpreter will still execute opcodes
1522 // whose effects cannot be observed. If the magic value value
1523 // were to flow to, say, a dead property access the
1524 // interpreter could throw an exception; we avoid this problem
1525 // by removing dead operands before removing dead code.
1526 MConstant
* constant
=
1527 MConstant::New(graph
.alloc(), MagicValue(JS_OPTIMIZED_OUT
));
1528 block
->insertBefore(*(block
->begin()), constant
);
1529 use
->replaceProducer(constant
);
1537 // Test whether |def| would be needed if it had no uses.
1538 bool js::jit::DeadIfUnused(const MDefinition
* def
) {
1539 // Effectful instructions of course cannot be removed.
1540 if (def
->isEffectful()) {
1544 // Never eliminate guard instructions.
1545 if (def
->isGuard()) {
1549 // Required to be preserved, as the type guard related to this instruction
1550 // is part of the semantics of a transformation.
1551 if (def
->isGuardRangeBailouts()) {
1555 // Control instructions have no uses, but also shouldn't be optimized out
1556 if (def
->isControlInstruction()) {
1560 // Used when lowering to generate the corresponding snapshots and aggregate
1561 // the list of recover instructions to be repeated.
1562 if (def
->isInstruction() && def
->toInstruction()->resumePoint()) {
1569 // Similar to DeadIfUnused(), but additionally allows effectful instructions.
1570 bool js::jit::DeadIfUnusedAllowEffectful(const MDefinition
* def
) {
1571 // Never eliminate guard instructions.
1572 if (def
->isGuard()) {
1576 // Required to be preserved, as the type guard related to this instruction
1577 // is part of the semantics of a transformation.
1578 if (def
->isGuardRangeBailouts()) {
1582 // Control instructions have no uses, but also shouldn't be optimized out
1583 if (def
->isControlInstruction()) {
1587 // Used when lowering to generate the corresponding snapshots and aggregate
1588 // the list of recover instructions to be repeated.
1589 if (def
->isInstruction() && def
->toInstruction()->resumePoint()) {
1590 // All effectful instructions must have a resume point attached. We're
1591 // allowing effectful instructions here, so we have to ignore any resume
1592 // points if we want to consider effectful instructions as dead.
1593 if (!def
->isEffectful()) {
1601 // Test whether |def| may be safely discarded, due to being dead or due to being
1602 // located in a basic block which has itself been marked for discarding.
1603 bool js::jit::IsDiscardable(const MDefinition
* def
) {
1604 return !def
->hasUses() && (DeadIfUnused(def
) || def
->block()->isMarked());
1607 // Similar to IsDiscardable(), but additionally allows effectful instructions.
1608 bool js::jit::IsDiscardableAllowEffectful(const MDefinition
* def
) {
1609 return !def
->hasUses() &&
1610 (DeadIfUnusedAllowEffectful(def
) || def
->block()->isMarked());
1613 // Instructions are useless if they are unused and have no side effects.
1614 // This pass eliminates useless instructions.
1615 // The graph itself is unchanged.
1616 bool jit::EliminateDeadCode(MIRGenerator
* mir
, MIRGraph
& graph
) {
1617 // Traverse in postorder so that we hit uses before definitions.
1618 // Traverse instruction list backwards for the same reason.
1619 for (PostorderIterator block
= graph
.poBegin(); block
!= graph
.poEnd();
1621 if (mir
->shouldCancel("Eliminate Dead Code (main loop)")) {
1625 // Remove unused instructions.
1626 for (MInstructionReverseIterator iter
= block
->rbegin();
1627 iter
!= block
->rend();) {
1628 MInstruction
* inst
= *iter
++;
1629 if (js::jit::IsDiscardable(inst
)) {
1630 block
->discard(inst
);
1638 static inline bool IsPhiObservable(MPhi
* phi
, Observability observe
) {
1639 // If the phi has uses which are not reflected in SSA, then behavior in the
1640 // interpreter may be affected by removing the phi.
1641 if (phi
->isImplicitlyUsed()) {
1645 // Check for uses of this phi node outside of other phi nodes.
1646 // Note that, initially, we skip reading resume points, which we
1647 // don't count as actual uses. If the only uses are resume points,
1648 // then the SSA name is never consumed by the program. However,
1649 // after optimizations have been performed, it's possible that the
1650 // actual uses in the program have been (incorrectly) optimized
1651 // away, so we must be more conservative and consider resume
1653 for (MUseIterator
iter(phi
->usesBegin()); iter
!= phi
->usesEnd(); iter
++) {
1654 MNode
* consumer
= iter
->consumer();
1655 if (consumer
->isResumePoint()) {
1656 MResumePoint
* resume
= consumer
->toResumePoint();
1657 if (observe
== ConservativeObservability
) {
1660 if (resume
->isObservableOperand(*iter
)) {
1664 MDefinition
* def
= consumer
->toDefinition();
1665 if (!def
->isPhi()) {
1674 // Handles cases like:
1675 // x is phi(a, x) --> a
1676 // x is phi(a, a) --> a
1677 static inline MDefinition
* IsPhiRedundant(MPhi
* phi
) {
1678 MDefinition
* first
= phi
->operandIfRedundant();
1679 if (first
== nullptr) {
1683 // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
1684 if (phi
->isImplicitlyUsed()) {
1685 first
->setImplicitlyUsedUnchecked();
1691 bool jit::EliminatePhis(MIRGenerator
* mir
, MIRGraph
& graph
,
1692 Observability observe
) {
1693 // Eliminates redundant or unobservable phis from the graph. A
1694 // redundant phi is something like b = phi(a, a) or b = phi(a, b),
1695 // both of which can be replaced with a. An unobservable phi is
1696 // one that whose value is never used in the program.
1698 // Note that we must be careful not to eliminate phis representing
1699 // values that the interpreter will require later. When the graph
1700 // is first constructed, we can be more aggressive, because there
1701 // is a greater correspondence between the CFG and the bytecode.
1702 // After optimizations such as GVN have been performed, however,
1703 // the bytecode and CFG may not correspond as closely to one
1704 // another. In that case, we must be more conservative. The flag
1705 // |conservativeObservability| is used to indicate that eliminate
1706 // phis is being run after some optimizations have been performed,
1707 // and thus we should use more conservative rules about
1708 // observability. The particular danger is that we can optimize
1709 // away uses of a phi because we think they are not executable,
1710 // but the foundation for that assumption is false TI information
1711 // that will eventually be invalidated. Therefore, if
1712 // |conservativeObservability| is set, we will consider any use
1713 // from a resume point to be observable. Otherwise, we demand a
1714 // use from an actual instruction.
1716 Vector
<MPhi
*, 16, SystemAllocPolicy
> worklist
;
1718 // Add all observable phis to a worklist. We use the "in worklist" bit to
1719 // mean "this phi is live".
1720 for (PostorderIterator block
= graph
.poBegin(); block
!= graph
.poEnd();
1722 MPhiIterator iter
= block
->phisBegin();
1723 while (iter
!= block
->phisEnd()) {
1724 MPhi
* phi
= *iter
++;
1726 if (mir
->shouldCancel("Eliminate Phis (populate loop)")) {
1730 // Flag all as unused, only observable phis would be marked as used
1731 // when processed by the work list.
1734 // If the phi is redundant, remove it here.
1735 if (MDefinition
* redundant
= IsPhiRedundant(phi
)) {
1736 phi
->justReplaceAllUsesWith(redundant
);
1737 block
->discardPhi(phi
);
1741 // Enqueue observable Phis.
1742 if (IsPhiObservable(phi
, observe
)) {
1743 phi
->setInWorklist();
1744 if (!worklist
.append(phi
)) {
1751 // Iteratively mark all phis reachable from live phis.
1752 while (!worklist
.empty()) {
1753 if (mir
->shouldCancel("Eliminate Phis (worklist)")) {
1757 MPhi
* phi
= worklist
.popCopy();
1758 MOZ_ASSERT(phi
->isUnused());
1759 phi
->setNotInWorklist();
1761 // The removal of Phis can produce newly redundant phis.
1762 if (MDefinition
* redundant
= IsPhiRedundant(phi
)) {
1763 // Add to the worklist the used phis which are impacted.
1764 for (MUseDefIterator
it(phi
); it
; it
++) {
1765 if (it
.def()->isPhi()) {
1766 MPhi
* use
= it
.def()->toPhi();
1767 if (!use
->isUnused()) {
1768 use
->setUnusedUnchecked();
1769 use
->setInWorklist();
1770 if (!worklist
.append(use
)) {
1776 phi
->justReplaceAllUsesWith(redundant
);
1778 // Otherwise flag them as used.
1779 phi
->setNotUnused();
1782 // The current phi is/was used, so all its operands are used.
1783 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; i
++) {
1784 MDefinition
* in
= phi
->getOperand(i
);
1785 if (!in
->isPhi() || !in
->isUnused() || in
->isInWorklist()) {
1788 in
->setInWorklist();
1789 if (!worklist
.append(in
->toPhi())) {
1796 for (PostorderIterator block
= graph
.poBegin(); block
!= graph
.poEnd();
1798 MPhiIterator iter
= block
->phisBegin();
1799 while (iter
!= block
->phisEnd()) {
1800 MPhi
* phi
= *iter
++;
1801 if (phi
->isUnused()) {
1802 if (!phi
->optimizeOutAllUses(graph
.alloc())) {
1805 block
->discardPhi(phi
);
1815 // The type analysis algorithm inserts conversions and box/unbox instructions
1816 // to make the IR graph well-typed for future passes.
1818 // Phi adjustment: If a phi's inputs are all the same type, the phi is
1819 // specialized to return that type.
1821 // Input adjustment: Each input is asked to apply conversion operations to its
1822 // inputs. This may include Box, Unbox, or other instruction-specific type
1823 // conversion operations.
1825 class TypeAnalyzer
{
1828 Vector
<MPhi
*, 0, SystemAllocPolicy
> phiWorklist_
;
1830 TempAllocator
& alloc() const { return graph
.alloc(); }
1832 bool addPhiToWorklist(MPhi
* phi
) {
1833 if (phi
->isInWorklist()) {
1836 if (!phiWorklist_
.append(phi
)) {
1839 phi
->setInWorklist();
1843 MPhi
* phi
= phiWorklist_
.popCopy();
1844 phi
->setNotInWorklist();
1848 [[nodiscard
]] bool propagateAllPhiSpecializations();
1850 bool respecialize(MPhi
* phi
, MIRType type
);
1851 bool propagateSpecialization(MPhi
* phi
);
1852 bool specializePhis();
1853 bool specializeOsrOnlyPhis();
1854 void replaceRedundantPhi(MPhi
* phi
);
1855 bool adjustPhiInputs(MPhi
* phi
);
1856 bool adjustInputs(MDefinition
* def
);
1857 bool insertConversions();
1859 bool checkFloatCoherency();
1860 bool graphContainsFloat32();
1861 bool markPhiConsumers();
1862 bool markPhiProducers();
1863 bool specializeValidFloatOps();
1864 bool tryEmitFloatOperations();
1865 bool propagateUnbox();
1867 bool shouldSpecializeOsrPhis() const;
1868 MIRType
guessPhiType(MPhi
* phi
) const;
1871 TypeAnalyzer(MIRGenerator
* mir
, MIRGraph
& graph
) : mir(mir
), graph(graph
) {}
1876 } /* anonymous namespace */
1878 bool TypeAnalyzer::shouldSpecializeOsrPhis() const {
1879 // [SMDOC] OSR Phi Type Specialization
1881 // Without special handling for OSR phis, we end up with unspecialized phis
1882 // (MIRType::Value) in the loop (pre)header and other blocks, resulting in
1883 // unnecessary boxing and unboxing in the loop body.
1885 // To fix this, phi type specialization needs special code to deal with the
1886 // OSR entry block. Recall that OSR results in the following basic block
1889 // +------------------+ +-----------------+
1890 // | Code before loop | | OSR entry block |
1891 // +------------------+ +-----------------+
1894 // | +---------------+ |
1895 // +---------> | OSR preheader | <---------+
1896 // +---------------+
1899 // +---------------+
1900 // | Loop header |<-----+
1901 // +---------------+ |
1905 // +---------------+ |
1906 // | Loop backedge |------+
1907 // +---------------+
1909 // OSR phi specialization happens in three steps:
1911 // (1) Specialize phis but ignore MOsrValue phi inputs. In other words,
1912 // pretend the OSR entry block doesn't exist. See guessPhiType.
1914 // (2) Once phi specialization is done, look at the types of loop header phis
1915 // and add these types to the corresponding preheader phis. This way, the
1916 // types of the preheader phis are based on the code before the loop and
1917 // the code in the loop body. These are exactly the types we expect for
1918 // the OSR Values. See the last part of TypeAnalyzer::specializePhis.
1920 // (3) For type-specialized preheader phis, add guard/unbox instructions to
1921 // the OSR entry block to guard the incoming Value indeed has this type.
1924 // * TypeAnalyzer::adjustPhiInputs: adds a fallible unbox for values that
1927 // * TypeAnalyzer::replaceRedundantPhi: adds a type guard for values that
1928 // can't be unboxed (null/undefined/magic Values).
1929 if (!mir
->graph().osrBlock()) {
1933 return !mir
->outerInfo().hadSpeculativePhiBailout();
1936 // Try to specialize this phi based on its non-cyclic inputs.
1937 MIRType
TypeAnalyzer::guessPhiType(MPhi
* phi
) const {
1939 // Check that different magic constants aren't flowing together. Ignore
1940 // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
1942 MIRType magicType
= MIRType::None
;
1943 for (size_t i
= 0; i
< phi
->numOperands(); i
++) {
1944 MDefinition
* in
= phi
->getOperand(i
);
1945 if (in
->type() == MIRType::MagicHole
||
1946 in
->type() == MIRType::MagicIsConstructing
) {
1947 if (magicType
== MIRType::None
) {
1948 magicType
= in
->type();
1950 MOZ_ASSERT(magicType
== in
->type());
1955 MIRType type
= MIRType::None
;
1956 bool convertibleToFloat32
= false;
1957 bool hasOSRValueInput
= false;
1958 DebugOnly
<bool> hasSpecializableInput
= false;
1959 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; i
++) {
1960 MDefinition
* in
= phi
->getOperand(i
);
1962 hasSpecializableInput
= true;
1963 if (!in
->toPhi()->triedToSpecialize()) {
1966 if (in
->type() == MIRType::None
) {
1967 // The operand is a phi we tried to specialize, but we were
1968 // unable to guess its type. propagateSpecialization will
1969 // propagate the type to this phi when it becomes known.
1974 // See shouldSpecializeOsrPhis comment. This is the first step mentioned
1976 if (shouldSpecializeOsrPhis() && in
->isOsrValue()) {
1977 hasOSRValueInput
= true;
1978 hasSpecializableInput
= true;
1982 if (type
== MIRType::None
) {
1984 if (in
->canProduceFloat32() &&
1985 !mir
->outerInfo().hadSpeculativePhiBailout()) {
1986 convertibleToFloat32
= true;
1991 if (type
== in
->type()) {
1992 convertibleToFloat32
= convertibleToFloat32
&& in
->canProduceFloat32();
1994 if (convertibleToFloat32
&& in
->type() == MIRType::Float32
) {
1995 // If we only saw definitions that can be converted into Float32 before
1996 // and encounter a Float32 value, promote previous values to Float32
1997 type
= MIRType::Float32
;
1998 } else if (IsTypeRepresentableAsDouble(type
) &&
1999 IsTypeRepresentableAsDouble(in
->type())) {
2000 // Specialize phis with int32 and double operands as double.
2001 type
= MIRType::Double
;
2002 convertibleToFloat32
= convertibleToFloat32
&& in
->canProduceFloat32();
2004 return MIRType::Value
;
2009 if (hasOSRValueInput
&& type
== MIRType::Float32
) {
2010 // TODO(post-Warp): simplify float32 handling in this function or (better)
2011 // make the float32 analysis a stand-alone optimization pass instead of
2012 // complicating type analysis. See bug 1655773.
2013 type
= MIRType::Double
;
2016 MOZ_ASSERT_IF(type
== MIRType::None
, hasSpecializableInput
);
2020 bool TypeAnalyzer::respecialize(MPhi
* phi
, MIRType type
) {
2021 if (phi
->type() == type
) {
2024 phi
->specialize(type
);
2025 return addPhiToWorklist(phi
);
2028 bool TypeAnalyzer::propagateSpecialization(MPhi
* phi
) {
2029 MOZ_ASSERT(phi
->type() != MIRType::None
);
2031 // Verify that this specialization matches any phis depending on it.
2032 for (MUseDefIterator
iter(phi
); iter
; iter
++) {
2033 if (!iter
.def()->isPhi()) {
2036 MPhi
* use
= iter
.def()->toPhi();
2037 if (!use
->triedToSpecialize()) {
2040 if (use
->type() == MIRType::None
) {
2041 // We tried to specialize this phi, but were unable to guess its
2042 // type. Now that we know the type of one of its operands, we can
2043 // specialize it. If it can't be specialized as float32, specialize
2045 MIRType type
= phi
->type();
2046 if (type
== MIRType::Float32
&& !use
->canProduceFloat32()) {
2047 type
= MIRType::Double
;
2049 if (!respecialize(use
, type
)) {
2054 if (use
->type() != phi
->type()) {
2055 // Specialize phis with int32 that can be converted to float and float
2056 // operands as floats.
2057 if ((use
->type() == MIRType::Int32
&& use
->canProduceFloat32() &&
2058 phi
->type() == MIRType::Float32
) ||
2059 (phi
->type() == MIRType::Int32
&& phi
->canProduceFloat32() &&
2060 use
->type() == MIRType::Float32
)) {
2061 if (!respecialize(use
, MIRType::Float32
)) {
2067 // Specialize phis with int32 and double operands as double.
2068 if (IsTypeRepresentableAsDouble(use
->type()) &&
2069 IsTypeRepresentableAsDouble(phi
->type())) {
2070 if (!respecialize(use
, MIRType::Double
)) {
2076 // This phi in our use chain can now no longer be specialized.
2077 if (!respecialize(use
, MIRType::Value
)) {
2086 bool TypeAnalyzer::propagateAllPhiSpecializations() {
2087 while (!phiWorklist_
.empty()) {
2088 if (mir
->shouldCancel("Specialize Phis (worklist)")) {
2092 MPhi
* phi
= popPhi();
2093 if (!propagateSpecialization(phi
)) {
2101 // If branch pruning removes the path from the entry block to the OSR
2102 // preheader, we may have phis (or chains of phis) with no operands
2103 // other than OsrValues. These phis will still have MIRType::None.
2104 // Since we don't have any information about them, we specialize them
2105 // as MIRType::Value.
2106 bool TypeAnalyzer::specializeOsrOnlyPhis() {
2107 MOZ_ASSERT(graph
.osrBlock());
2108 MOZ_ASSERT(graph
.osrPreHeaderBlock()->numPredecessors() == 1);
2110 for (PostorderIterator
block(graph
.poBegin()); block
!= graph
.poEnd();
2112 if (mir
->shouldCancel("Specialize osr-only phis (main loop)")) {
2116 for (MPhiIterator
phi(block
->phisBegin()); phi
!= block
->phisEnd(); phi
++) {
2117 if (mir
->shouldCancel("Specialize osr-only phis (inner loop)")) {
2121 if (phi
->type() == MIRType::None
) {
2122 phi
->specialize(MIRType::Value
);
2129 bool TypeAnalyzer::specializePhis() {
2130 for (PostorderIterator
block(graph
.poBegin()); block
!= graph
.poEnd();
2132 if (mir
->shouldCancel("Specialize Phis (main loop)")) {
2136 for (MPhiIterator
phi(block
->phisBegin()); phi
!= block
->phisEnd(); phi
++) {
2137 if (mir
->shouldCancel("Specialize Phis (inner loop)")) {
2141 MIRType type
= guessPhiType(*phi
);
2142 phi
->specialize(type
);
2143 if (type
== MIRType::None
) {
2144 // We tried to guess the type but failed because all operands are
2145 // phis we still have to visit. Set the triedToSpecialize flag but
2146 // don't propagate the type to other phis, propagateSpecialization
2147 // will do that once we know the type of one of the operands.
2150 if (!propagateSpecialization(*phi
)) {
2156 if (!propagateAllPhiSpecializations()) {
2160 if (shouldSpecializeOsrPhis()) {
2161 // See shouldSpecializeOsrPhis comment. This is the second step, propagating
2162 // loop header phi types to preheader phis.
2163 MBasicBlock
* preHeader
= graph
.osrPreHeaderBlock();
2164 MBasicBlock
* header
= preHeader
->getSingleSuccessor();
2166 if (preHeader
->numPredecessors() == 1) {
2167 MOZ_ASSERT(preHeader
->getPredecessor(0) == graph
.osrBlock());
2168 // Branch pruning has removed the path from the entry block
2169 // to the preheader. Specialize any phis with no non-osr inputs.
2170 if (!specializeOsrOnlyPhis()) {
2173 } else if (header
->isLoopHeader()) {
2174 for (MPhiIterator
phi(header
->phisBegin()); phi
!= header
->phisEnd();
2176 MPhi
* preHeaderPhi
= phi
->getOperand(0)->toPhi();
2177 MOZ_ASSERT(preHeaderPhi
->block() == preHeader
);
2179 if (preHeaderPhi
->type() == MIRType::Value
) {
2180 // Already includes everything.
2184 MIRType loopType
= phi
->type();
2185 if (!respecialize(preHeaderPhi
, loopType
)) {
2189 if (!propagateAllPhiSpecializations()) {
2193 // Edge case: there is no backedge in this loop. This can happen
2194 // if the header is a 'pending' loop header when control flow in
2195 // the loop body is terminated unconditionally, or if a block
2196 // that dominates the backedge unconditionally bails out. In
2197 // this case the header only has the preheader as predecessor
2198 // and we don't need to do anything.
2199 MOZ_ASSERT(header
->numPredecessors() == 1);
2203 MOZ_ASSERT(phiWorklist_
.empty());
2207 bool TypeAnalyzer::adjustPhiInputs(MPhi
* phi
) {
2208 MIRType phiType
= phi
->type();
2209 MOZ_ASSERT(phiType
!= MIRType::None
);
2211 // If we specialized a type that's not Value, there are 3 cases:
2212 // 1. Every input is of that type.
2213 // 2. Every observed input is of that type (i.e., some inputs haven't been
2215 // 3. Inputs were doubles and int32s, and was specialized to double.
2216 if (phiType
!= MIRType::Value
) {
2217 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; i
++) {
2218 MDefinition
* in
= phi
->getOperand(i
);
2219 if (in
->type() == phiType
) {
2223 if (!alloc().ensureBallast()) {
2227 if (in
->isBox() && in
->toBox()->input()->type() == phiType
) {
2228 phi
->replaceOperand(i
, in
->toBox()->input());
2230 MInstruction
* replacement
;
2231 MBasicBlock
* predecessor
= phi
->block()->getPredecessor(i
);
2233 if (phiType
== MIRType::Double
&& IsFloatType(in
->type())) {
2234 // Convert int32 operands to double.
2235 replacement
= MToDouble::New(alloc(), in
);
2236 } else if (phiType
== MIRType::Float32
) {
2237 if (in
->type() == MIRType::Int32
|| in
->type() == MIRType::Double
) {
2238 replacement
= MToFloat32::New(alloc(), in
);
2240 // See comment below
2241 if (in
->type() != MIRType::Value
) {
2242 MBox
* box
= MBox::New(alloc(), in
);
2243 predecessor
->insertAtEnd(box
);
2248 MUnbox::New(alloc(), in
, MIRType::Double
, MUnbox::Fallible
);
2249 unbox
->setBailoutKind(BailoutKind::SpeculativePhi
);
2250 predecessor
->insertAtEnd(unbox
);
2251 replacement
= MToFloat32::New(alloc(), in
);
2254 // If we know this branch will fail to convert to phiType,
2255 // insert a box that'll immediately fail in the fallible unbox
2257 if (in
->type() != MIRType::Value
) {
2258 MBox
* box
= MBox::New(alloc(), in
);
2259 predecessor
->insertAtEnd(box
);
2263 // Be optimistic and insert unboxes when the operand is a
2265 replacement
= MUnbox::New(alloc(), in
, phiType
, MUnbox::Fallible
);
2268 replacement
->setBailoutKind(BailoutKind::SpeculativePhi
);
2269 predecessor
->insertAtEnd(replacement
);
2270 phi
->replaceOperand(i
, replacement
);
2277 // Box every typed input.
2278 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; i
++) {
2279 MDefinition
* in
= phi
->getOperand(i
);
2280 if (in
->type() == MIRType::Value
) {
2284 // The input is being explicitly unboxed, so sneak past and grab the
2285 // original box. Don't bother optimizing if magic values are involved.
2286 if (in
->isUnbox()) {
2287 MDefinition
* unboxInput
= in
->toUnbox()->input();
2288 if (!IsMagicType(unboxInput
->type()) && phi
->typeIncludes(unboxInput
)) {
2289 in
= in
->toUnbox()->input();
2293 if (in
->type() != MIRType::Value
) {
2294 if (!alloc().ensureBallast()) {
2298 MBasicBlock
* pred
= phi
->block()->getPredecessor(i
);
2299 in
= AlwaysBoxAt(alloc(), pred
->lastIns(), in
);
2302 phi
->replaceOperand(i
, in
);
2308 bool TypeAnalyzer::adjustInputs(MDefinition
* def
) {
2309 // Definitions such as MPhi have no type policy.
2310 if (!def
->isInstruction()) {
2314 MInstruction
* ins
= def
->toInstruction();
2315 const TypePolicy
* policy
= ins
->typePolicy();
2316 if (policy
&& !policy
->adjustInputs(alloc(), ins
)) {
2322 void TypeAnalyzer::replaceRedundantPhi(MPhi
* phi
) {
2323 MBasicBlock
* block
= phi
->block();
2325 switch (phi
->type()) {
2326 case MIRType::Undefined
:
2327 v
= UndefinedValue();
2332 case MIRType::MagicOptimizedOut
:
2333 v
= MagicValue(JS_OPTIMIZED_OUT
);
2335 case MIRType::MagicUninitializedLexical
:
2336 v
= MagicValue(JS_UNINITIALIZED_LEXICAL
);
2338 case MIRType::MagicIsConstructing
:
2339 v
= MagicValue(JS_IS_CONSTRUCTING
);
2341 case MIRType::MagicHole
:
2343 MOZ_CRASH("unexpected type");
2345 MConstant
* c
= MConstant::New(alloc(), v
);
2346 // The instruction pass will insert the box
2347 block
->insertBefore(*(block
->begin()), c
);
2348 phi
->justReplaceAllUsesWith(c
);
2350 if (shouldSpecializeOsrPhis()) {
2351 // See shouldSpecializeOsrPhis comment. This is part of the third step,
2352 // guard the incoming MOsrValue is of this type.
2353 for (uint32_t i
= 0; i
< phi
->numOperands(); i
++) {
2354 MDefinition
* def
= phi
->getOperand(i
);
2355 if (def
->type() != phi
->type()) {
2356 MOZ_ASSERT(def
->isOsrValue() || def
->isPhi());
2357 MOZ_ASSERT(def
->type() == MIRType::Value
);
2358 MGuardValue
* guard
= MGuardValue::New(alloc(), def
, v
);
2359 guard
->setBailoutKind(BailoutKind::SpeculativePhi
);
2360 def
->block()->insertBefore(def
->block()->lastIns(), guard
);
2366 bool TypeAnalyzer::insertConversions() {
2367 // Instructions are processed in reverse postorder: all uses are defs are
2368 // seen before uses. This ensures that output adjustment (which may rewrite
2369 // inputs of uses) does not conflict with input adjustment.
2370 for (ReversePostorderIterator
block(graph
.rpoBegin());
2371 block
!= graph
.rpoEnd(); block
++) {
2372 if (mir
->shouldCancel("Insert Conversions")) {
2376 for (MPhiIterator
iter(block
->phisBegin()), end(block
->phisEnd());
2378 MPhi
* phi
= *iter
++;
2379 if (IsNullOrUndefined(phi
->type()) || IsMagicType(phi
->type())) {
2380 // We can replace this phi with a constant.
2381 if (!alloc().ensureBallast()) {
2384 replaceRedundantPhi(phi
);
2385 block
->discardPhi(phi
);
2387 if (!adjustPhiInputs(phi
)) {
2393 // AdjustInputs can add/remove/mutate instructions before and after the
2394 // current instruction. Only increment the iterator after it is finished.
2395 for (MInstructionIterator
iter(block
->begin()); iter
!= block
->end();
2397 if (!alloc().ensureBallast()) {
2401 if (!adjustInputs(*iter
)) {
2409 /* clang-format off */
2411 // This function tries to emit Float32 specialized operations whenever it's possible.
2412 // MIR nodes are flagged as:
2413 // - Producers, when they can create Float32 that might need to be coerced into a Double.
2414 // Loads in Float32 arrays and conversions to Float32 are producers.
2415 // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
2416 // Stores in Float32 arrays and conversions to Float32 are consumers.
2417 // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
2418 // does not result in a compound loss of precision. This is the case for +, -, /, * with 2
2419 // operands, for instance. However, an addition with 3 operands is not commutative anymore,
2420 // so an intermediate coercion is needed.
2421 // Except for phis, all these flags are known after Ion building, so they cannot change during
2424 // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
2425 // has only producers as inputs and consumers as uses, we can specialize the operation as a
2426 // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
2427 // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
2429 // Phis have a special status. Phis need to be flagged as producers or consumers as they can
2430 // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
2431 // properties are such that we can deduce the property using all non phis inputs first (which form
2432 // an initial phi graph) and then propagate all properties from one phi to another using a
2433 // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
2434 // many flagged phis as the previous iteration (so the worst steady state case is all phis being
2435 // flagged as false).
2437 // In a nutshell, the algorithm applies three passes:
2438 // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
2439 // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
2440 // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
2441 // consumer if all of its uses are consumers.
2442 // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
2443 // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
2444 // 3 - Go through all commutative operations and ensure their inputs are all producers and their
2445 // uses are all consumers.
2447 /* clang-format on */
2448 bool TypeAnalyzer::markPhiConsumers() {
2449 MOZ_ASSERT(phiWorklist_
.empty());
2451 // Iterate in postorder so worklist is initialized to RPO.
2452 for (PostorderIterator
block(graph
.poBegin()); block
!= graph
.poEnd();
2454 if (mir
->shouldCancel(
2455 "Ensure Float32 commutativity - Consumer Phis - Initial state")) {
2459 for (MPhiIterator
phi(block
->phisBegin()); phi
!= block
->phisEnd(); ++phi
) {
2460 MOZ_ASSERT(!phi
->isInWorklist());
2461 bool canConsumeFloat32
= !phi
->isImplicitlyUsed();
2462 for (MUseDefIterator
use(*phi
); canConsumeFloat32
&& use
; use
++) {
2463 MDefinition
* usedef
= use
.def();
2464 canConsumeFloat32
&=
2465 usedef
->isPhi() || usedef
->canConsumeFloat32(use
.use());
2467 phi
->setCanConsumeFloat32(canConsumeFloat32
);
2468 if (canConsumeFloat32
&& !addPhiToWorklist(*phi
)) {
2474 while (!phiWorklist_
.empty()) {
2475 if (mir
->shouldCancel(
2476 "Ensure Float32 commutativity - Consumer Phis - Fixed point")) {
2480 MPhi
* phi
= popPhi();
2481 MOZ_ASSERT(phi
->canConsumeFloat32(nullptr /* unused */));
2483 bool validConsumer
= true;
2484 for (MUseDefIterator
use(phi
); use
; use
++) {
2485 MDefinition
* def
= use
.def();
2486 if (def
->isPhi() && !def
->canConsumeFloat32(use
.use())) {
2487 validConsumer
= false;
2492 if (validConsumer
) {
2496 // Propagate invalidated phis
2497 phi
->setCanConsumeFloat32(false);
2498 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; ++i
) {
2499 MDefinition
* input
= phi
->getOperand(i
);
2500 if (input
->isPhi() && !input
->isInWorklist() &&
2501 input
->canConsumeFloat32(nullptr /* unused */)) {
2502 if (!addPhiToWorklist(input
->toPhi())) {
2511 bool TypeAnalyzer::markPhiProducers() {
2512 MOZ_ASSERT(phiWorklist_
.empty());
2514 // Iterate in reverse postorder so worklist is initialized to PO.
2515 for (ReversePostorderIterator
block(graph
.rpoBegin());
2516 block
!= graph
.rpoEnd(); ++block
) {
2517 if (mir
->shouldCancel(
2518 "Ensure Float32 commutativity - Producer Phis - initial state")) {
2522 for (MPhiIterator
phi(block
->phisBegin()); phi
!= block
->phisEnd(); ++phi
) {
2523 MOZ_ASSERT(!phi
->isInWorklist());
2524 bool canProduceFloat32
= true;
2525 for (size_t i
= 0, e
= phi
->numOperands(); canProduceFloat32
&& i
< e
;
2527 MDefinition
* input
= phi
->getOperand(i
);
2528 canProduceFloat32
&= input
->isPhi() || input
->canProduceFloat32();
2530 phi
->setCanProduceFloat32(canProduceFloat32
);
2531 if (canProduceFloat32
&& !addPhiToWorklist(*phi
)) {
2537 while (!phiWorklist_
.empty()) {
2538 if (mir
->shouldCancel(
2539 "Ensure Float32 commutativity - Producer Phis - Fixed point")) {
2543 MPhi
* phi
= popPhi();
2544 MOZ_ASSERT(phi
->canProduceFloat32());
2546 bool validProducer
= true;
2547 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; ++i
) {
2548 MDefinition
* input
= phi
->getOperand(i
);
2549 if (input
->isPhi() && !input
->canProduceFloat32()) {
2550 validProducer
= false;
2555 if (validProducer
) {
2559 // Propagate invalidated phis
2560 phi
->setCanProduceFloat32(false);
2561 for (MUseDefIterator
use(phi
); use
; use
++) {
2562 MDefinition
* def
= use
.def();
2563 if (def
->isPhi() && !def
->isInWorklist() && def
->canProduceFloat32()) {
2564 if (!addPhiToWorklist(def
->toPhi())) {
2573 bool TypeAnalyzer::specializeValidFloatOps() {
2574 for (ReversePostorderIterator
block(graph
.rpoBegin());
2575 block
!= graph
.rpoEnd(); ++block
) {
2576 if (mir
->shouldCancel("Ensure Float32 commutativity - Instructions")) {
2580 for (MInstructionIterator
ins(block
->begin()); ins
!= block
->end(); ++ins
) {
2581 if (!ins
->isFloat32Commutative()) {
2585 if (ins
->type() == MIRType::Float32
) {
2589 if (!alloc().ensureBallast()) {
2593 // This call will try to specialize the instruction iff all uses are
2594 // consumers and all inputs are producers.
2595 ins
->trySpecializeFloat32(alloc());
2601 bool TypeAnalyzer::graphContainsFloat32() {
2602 for (ReversePostorderIterator
block(graph
.rpoBegin());
2603 block
!= graph
.rpoEnd(); ++block
) {
2604 for (MDefinitionIterator
def(*block
); def
; def
++) {
2605 if (mir
->shouldCancel(
2606 "Ensure Float32 commutativity - Graph contains Float32")) {
2610 if (def
->type() == MIRType::Float32
) {
2618 bool TypeAnalyzer::tryEmitFloatOperations() {
2619 // Asm.js uses the ahead of time type checks to specialize operations, no need
2620 // to check them again at this point.
2621 if (mir
->compilingWasm()) {
2625 // Check ahead of time that there is at least one definition typed as Float32,
2626 // otherwise we don't need this pass.
2627 if (!graphContainsFloat32()) {
2631 // WarpBuilder skips over code that can't be reached except through
2632 // a catch block. Locals and arguments may be observable in such
2633 // code after bailing out, so we can't rely on seeing all uses.
2634 if (graph
.hasTryBlock()) {
2638 if (!markPhiConsumers()) {
2641 if (!markPhiProducers()) {
2644 if (!specializeValidFloatOps()) {
2650 bool TypeAnalyzer::checkFloatCoherency() {
2652 // Asserts that all Float32 instructions are flowing into Float32 consumers or
2653 // specialized operations
2654 for (ReversePostorderIterator
block(graph
.rpoBegin());
2655 block
!= graph
.rpoEnd(); ++block
) {
2656 if (mir
->shouldCancel("Check Float32 coherency")) {
2660 for (MDefinitionIterator
def(*block
); def
; def
++) {
2661 if (def
->type() != MIRType::Float32
) {
2665 for (MUseDefIterator
use(*def
); use
; use
++) {
2666 MDefinition
* consumer
= use
.def();
2667 MOZ_ASSERT(consumer
->isConsistentFloat32Use(use
.use()));
2675 static bool HappensBefore(const MDefinition
* earlier
,
2676 const MDefinition
* later
) {
2677 MOZ_ASSERT(earlier
->block() == later
->block());
2679 for (auto* ins
: *earlier
->block()) {
2680 if (ins
== earlier
) {
2687 MOZ_CRASH("earlier and later are instructions in the block");
2690 // Propagate type information from dominating unbox instructions.
2692 // This optimization applies for example for self-hosted String.prototype
2697 // String.prototype.id = function() {
2698 // // Strict mode to avoid ToObject on primitive this-values.
2701 // // Template string to apply ToString on the this-value.
2702 // return `${this}`;
2706 // // Assume |s| is a string value.
2711 // Compiles into: (Graph after Scalar Replacement)
2713 // ┌───────────────────────────────────────────────────────────────────────────┐
2715 // │ resumepoint 1 0 2 2 │
2716 // │ 0 parameter THIS_SLOT Value │
2717 // │ 1 parameter 0 Value │
2718 // │ 2 constant undefined Undefined │
2720 // │ 4 checkoverrecursed │
2721 // │ 5 unbox parameter1 to String (fallible) String │
2722 // │ 6 constant object 1d908053e088 (String) Object │
2723 // │ 7 guardshape constant6:Object Object │
2724 // │ 8 slots guardshape7:Object Slots │
2725 // │ 9 loaddynamicslot slots8:Slots (slot 53) Value │
2726 // │ 10 constant 0x0 Int32 │
2727 // │ 11 unbox loaddynamicslot9 to Object (fallible) Object │
2728 // │ 12 nurseryobject Object │
2729 // │ 13 guardspecificfunction unbox11:Object nurseryobject12:Object Object │
2730 // │ 14 goto block1 │
2731 // └──────────────────────────────────┬────────────────────────────────────────┘
2733 // ┌──────────────────────────────────▼────────────────────────────────────────┐
2735 // │ ((0)) resumepoint 15 1 15 15 | 1 13 1 0 2 2 │
2736 // │ 15 constant undefined Undefined │
2737 // │ 16 tostring parameter1:Value String │
2738 // │ 18 goto block2 │
2739 // └──────────────────────────────────┬────────────────────────────────────────┘
2741 // ┌──────────────▼──────────────┐
2743 // │ resumepoint 16 1 0 2 2 │
2744 // │ 19 return tostring16:String │
2745 // └─────────────────────────────┘
2747 // The Unbox instruction is used as a type guard. The ToString instruction
2748 // doesn't use the type information from the preceding Unbox instruction and
2749 // therefore has to assume its operand can be any value.
2751 // When instead propagating the type information from the preceding Unbox
2752 // instruction, this graph is constructed after the "Apply types" phase:
2754 // ┌───────────────────────────────────────────────────────────────────────────┐
2756 // │ resumepoint 1 0 2 2 │
2757 // │ 0 parameter THIS_SLOT Value │
2758 // │ 1 parameter 0 Value │
2759 // │ 2 constant undefined Undefined │
2761 // │ 4 checkoverrecursed │
2762 // │ 5 unbox parameter1 to String (fallible) String │
2763 // │ 6 constant object 1d908053e088 (String) Object │
2764 // │ 7 guardshape constant6:Object Object │
2765 // │ 8 slots guardshape7:Object Slots │
2766 // │ 9 loaddynamicslot slots8:Slots (slot 53) Value │
2767 // │ 10 constant 0x0 Int32 │
2768 // │ 11 unbox loaddynamicslot9 to Object (fallible) Object │
2769 // │ 12 nurseryobject Object │
2770 // │ 13 guardspecificfunction unbox11:Object nurseryobject12:Object Object │
2771 // │ 14 goto block1 │
2772 // └──────────────────────────────────┬────────────────────────────────────────┘
2774 // ┌──────────────────────────────────▼────────────────────────────────────────┐
2776 // │ ((0)) resumepoint 15 1 15 15 | 1 13 1 0 2 2 │
2777 // │ 15 constant undefined Undefined │
2778 // │ 20 unbox parameter1 to String (fallible) String │
2779 // │ 16 tostring parameter1:Value String │
2780 // │ 18 goto block2 │
2781 // └──────────────────────────────────┬────────────────────────────────────────┘
2783 // ┌──────────────▼─────────────────────┐
2785 // │ resumepoint 16 1 0 2 2 │
2786 // │ 21 box tostring16:String Value │
2787 // │ 19 return box21:Value │
2788 // └────────────────────────────────────┘
2790 // GVN will later merge both Unbox instructions and fold away the ToString
2791 // instruction, so we get this final graph:
2793 // ┌───────────────────────────────────────────────────────────────────────────┐
2795 // │ resumepoint 1 0 2 2 │
2796 // │ 0 parameter THIS_SLOT Value │
2797 // │ 1 parameter 0 Value │
2798 // │ 2 constant undefined Undefined │
2800 // │ 4 checkoverrecursed │
2801 // │ 5 unbox parameter1 to String (fallible) String │
2802 // │ 6 constant object 1d908053e088 (String) Object │
2803 // │ 7 guardshape constant6:Object Object │
2804 // │ 8 slots guardshape7:Object Slots │
2805 // │ 22 loaddynamicslotandunbox slots8:Slots (slot 53) Object │
2806 // │ 11 nurseryobject Object │
2807 // │ 12 guardspecificfunction load22:Object nurseryobject11:Object Object │
2808 // │ 13 goto block1 │
2809 // └──────────────────────────────────┬────────────────────────────────────────┘
2811 // ┌──────────────────────────────────▼────────────────────────────────────────┐
2813 // │ ((0)) resumepoint 2 1 2 2 | 1 12 1 0 2 2 │
2814 // │ 14 goto block2 │
2815 // └──────────────────────────────────┬────────────────────────────────────────┘
2817 // ┌──────────────▼─────────────────────┐
2819 // │ resumepoint 5 1 0 2 2 │
2820 // │ 15 return parameter1:Value │
2821 // └────────────────────────────────────┘
2823 bool TypeAnalyzer::propagateUnbox() {
2824 // Visit the blocks in post-order, so that the type information of the closest
2825 // unbox operation is used.
2826 for (PostorderIterator
block(graph
.poBegin()); block
!= graph
.poEnd();
2828 if (mir
->shouldCancel("Propagate Unbox")) {
2832 // Iterate over all instructions to look for unbox instructions.
2833 for (MInstructionIterator
iter(block
->begin()); iter
!= block
->end();
2835 if (!iter
->isUnbox()) {
2839 auto* unbox
= iter
->toUnbox();
2840 auto* input
= unbox
->input();
2842 // Ignore unbox operations on typed values.
2843 if (input
->type() != MIRType::Value
) {
2847 // Ignore unbox to floating point types, because propagating boxed Int32
2848 // values as Double can lead to repeated bailouts when later instructions
2849 // expect Int32 inputs.
2850 if (IsFloatingPointType(unbox
->type())) {
2854 // Inspect other uses of |input| to propagate the unboxed type information
2856 for (auto uses
= input
->usesBegin(); uses
!= input
->usesEnd();) {
2857 auto* use
= *uses
++;
2859 // Ignore resume points.
2860 if (!use
->consumer()->isDefinition()) {
2863 auto* def
= use
->consumer()->toDefinition();
2865 // Ignore any unbox operations, including the current |unbox|.
2866 if (def
->isUnbox()) {
2870 // Ignore phi nodes, because we don't yet support them.
2875 // The unbox operation needs to happen before the other use, otherwise
2876 // we can't propagate the type information.
2877 if (unbox
->block() == def
->block()) {
2878 if (!HappensBefore(unbox
, def
)) {
2882 if (!unbox
->block()->dominates(def
->block())) {
2887 // Replace the use with |unbox|, so that GVN knows about the actual
2888 // value type and can more easily fold unnecessary operations. If the
2889 // instruction actually needs a boxed input, the BoxPolicy type policy
2890 // will simply unwrap the unbox instruction.
2891 use
->replaceProducer(unbox
);
2893 // The uses in the MIR graph no longer reflect the uses in the bytecode,
2894 // so we have to mark |input| as implicitly used.
2895 input
->setImplicitlyUsedUnchecked();
2902 bool TypeAnalyzer::analyze() {
2903 if (!tryEmitFloatOperations()) {
2906 if (!specializePhis()) {
2909 if (!propagateUnbox()) {
2912 if (!insertConversions()) {
2915 if (!checkFloatCoherency()) {
2921 bool jit::ApplyTypeInformation(MIRGenerator
* mir
, MIRGraph
& graph
) {
2922 TypeAnalyzer
analyzer(mir
, graph
);
2924 if (!analyzer
.analyze()) {
2931 void jit::RenumberBlocks(MIRGraph
& graph
) {
2933 for (ReversePostorderIterator
block(graph
.rpoBegin());
2934 block
!= graph
.rpoEnd(); block
++) {
2939 // A utility for code which adds/deletes blocks. Renumber the remaining blocks,
2940 // recompute dominators, and optionally recompute AliasAnalysis dependencies.
2941 bool jit::AccountForCFGChanges(MIRGenerator
* mir
, MIRGraph
& graph
,
2942 bool updateAliasAnalysis
,
2943 bool underValueNumberer
) {
2944 // Renumber the blocks and clear out the old dominator info.
2946 for (ReversePostorderIterator
i(graph
.rpoBegin()), e(graph
.rpoEnd()); i
!= e
;
2948 i
->clearDominatorInfo();
2952 // Recompute dominator info.
2953 if (!BuildDominatorTree(graph
)) {
2957 // If needed, update alias analysis dependencies.
2958 if (updateAliasAnalysis
) {
2959 if (!AliasAnalysis(mir
, graph
).analyze()) {
2964 AssertExtendedGraphCoherency(graph
, underValueNumberer
);
2968 // Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
2969 // Alias analysis dependencies may be invalid after calling this function.
2970 bool jit::RemoveUnmarkedBlocks(MIRGenerator
* mir
, MIRGraph
& graph
,
2971 uint32_t numMarkedBlocks
) {
2972 if (numMarkedBlocks
== graph
.numBlocks()) {
2973 // If all blocks are marked, no blocks need removal. Just clear the
2974 // marks. We'll still need to update the dominator tree below though,
2975 // since we may have removed edges even if we didn't remove any blocks.
2976 graph
.unmarkBlocks();
2978 // As we are going to remove edges and basic blocks, we have to mark
2979 // instructions which would be needed by baseline if we were to
2981 for (PostorderIterator
it(graph
.poBegin()); it
!= graph
.poEnd();) {
2982 MBasicBlock
* block
= *it
++;
2983 if (block
->isMarked()) {
2987 FlagAllOperandsAsImplicitlyUsed(mir
, block
);
2990 // Find unmarked blocks and remove them.
2991 for (ReversePostorderIterator
iter(graph
.rpoBegin());
2992 iter
!= graph
.rpoEnd();) {
2993 MBasicBlock
* block
= *iter
++;
2995 if (block
->isMarked()) {
3000 // The block is unreachable. Clear out the loop header flag, as
3001 // we're doing the sweep of a mark-and-sweep here, so we no longer
3002 // need to worry about whether an unmarked block is a loop or not.
3003 if (block
->isLoopHeader()) {
3004 block
->clearLoopHeader();
3007 for (size_t i
= 0, e
= block
->numSuccessors(); i
!= e
; ++i
) {
3008 block
->getSuccessor(i
)->removePredecessor(block
);
3010 graph
.removeBlock(block
);
3014 // Renumber the blocks and update the dominator tree.
3015 return AccountForCFGChanges(mir
, graph
, /*updateAliasAnalysis=*/false);
3018 // A Simple, Fast Dominance Algorithm by Cooper et al.
3019 // Modified to support empty intersections for OSR, and in RPO.
3020 static MBasicBlock
* IntersectDominators(MBasicBlock
* block1
,
3021 MBasicBlock
* block2
) {
3022 MBasicBlock
* finger1
= block1
;
3023 MBasicBlock
* finger2
= block2
;
3025 MOZ_ASSERT(finger1
);
3026 MOZ_ASSERT(finger2
);
3028 // In the original paper, the block ID comparisons are on the postorder index.
3029 // This implementation iterates in RPO, so the comparisons are reversed.
3031 // For this function to be called, the block must have multiple predecessors.
3032 // If a finger is then found to be self-dominating, it must therefore be
3033 // reachable from multiple roots through non-intersecting control flow.
3034 // nullptr is returned in this case, to denote an empty intersection.
3036 while (finger1
->id() != finger2
->id()) {
3037 while (finger1
->id() > finger2
->id()) {
3038 MBasicBlock
* idom
= finger1
->immediateDominator();
3039 if (idom
== finger1
) {
3040 return nullptr; // Empty intersection.
3045 while (finger2
->id() > finger1
->id()) {
3046 MBasicBlock
* idom
= finger2
->immediateDominator();
3047 if (idom
== finger2
) {
3048 return nullptr; // Empty intersection.
3056 void jit::ClearDominatorTree(MIRGraph
& graph
) {
3057 for (MBasicBlockIterator iter
= graph
.begin(); iter
!= graph
.end(); iter
++) {
3058 iter
->clearDominatorInfo();
3062 static void ComputeImmediateDominators(MIRGraph
& graph
) {
3063 // The default start block is a root and therefore only self-dominates.
3064 MBasicBlock
* startBlock
= graph
.entryBlock();
3065 startBlock
->setImmediateDominator(startBlock
);
3067 // Any OSR block is a root and therefore only self-dominates.
3068 MBasicBlock
* osrBlock
= graph
.osrBlock();
3070 osrBlock
->setImmediateDominator(osrBlock
);
3073 bool changed
= true;
3078 ReversePostorderIterator block
= graph
.rpoBegin();
3080 // For each block in RPO, intersect all dominators.
3081 for (; block
!= graph
.rpoEnd(); block
++) {
3082 // If a node has once been found to have no exclusive dominator,
3083 // it will never have an exclusive dominator, so it may be skipped.
3084 if (block
->immediateDominator() == *block
) {
3088 // A block with no predecessors is not reachable from any entry, so
3089 // it self-dominates.
3090 if (MOZ_UNLIKELY(block
->numPredecessors() == 0)) {
3091 block
->setImmediateDominator(*block
);
3095 MBasicBlock
* newIdom
= block
->getPredecessor(0);
3097 // Find the first common dominator.
3098 for (size_t i
= 1; i
< block
->numPredecessors(); i
++) {
3099 MBasicBlock
* pred
= block
->getPredecessor(i
);
3100 if (pred
->immediateDominator() == nullptr) {
3104 newIdom
= IntersectDominators(pred
, newIdom
);
3106 // If there is no common dominator, the block self-dominates.
3107 if (newIdom
== nullptr) {
3108 block
->setImmediateDominator(*block
);
3114 if (newIdom
&& block
->immediateDominator() != newIdom
) {
3115 block
->setImmediateDominator(newIdom
);
3122 // Assert that all blocks have dominator information.
3123 for (MBasicBlockIterator
block(graph
.begin()); block
!= graph
.end();
3125 MOZ_ASSERT(block
->immediateDominator() != nullptr);
3130 bool jit::BuildDominatorTree(MIRGraph
& graph
) {
3131 MOZ_ASSERT(graph
.canBuildDominators());
3133 ComputeImmediateDominators(graph
);
3135 Vector
<MBasicBlock
*, 4, JitAllocPolicy
> worklist(graph
.alloc());
3137 // Traversing through the graph in post-order means that every non-phi use
3138 // of a definition is visited before the def itself. Since a def
3139 // dominates its uses, by the time we reach a particular
3140 // block, we have processed all of its dominated children, so
3141 // block->numDominated() is accurate.
3142 for (PostorderIterator
i(graph
.poBegin()); i
!= graph
.poEnd(); i
++) {
3143 MBasicBlock
* child
= *i
;
3144 MBasicBlock
* parent
= child
->immediateDominator();
3146 // Dominance is defined such that blocks always dominate themselves.
3147 child
->addNumDominated(1);
3149 // If the block only self-dominates, it has no definite parent.
3150 // Add it to the worklist as a root for pre-order traversal.
3151 // This includes all roots. Order does not matter.
3152 if (child
== parent
) {
3153 if (!worklist
.append(child
)) {
3159 if (!parent
->addImmediatelyDominatedBlock(child
)) {
3163 parent
->addNumDominated(child
->numDominated());
3167 // If compiling with OSR, many blocks will self-dominate.
3168 // Without OSR, there is only one root block which dominates all.
3169 if (!graph
.osrBlock()) {
3170 MOZ_ASSERT(graph
.entryBlock()->numDominated() == graph
.numBlocks());
3173 // Now, iterate through the dominator tree in pre-order and annotate every
3174 // block with its index in the traversal.
3176 while (!worklist
.empty()) {
3177 MBasicBlock
* block
= worklist
.popCopy();
3178 block
->setDomIndex(index
);
3180 if (!worklist
.append(block
->immediatelyDominatedBlocksBegin(),
3181 block
->immediatelyDominatedBlocksEnd())) {
3190 bool jit::BuildPhiReverseMapping(MIRGraph
& graph
) {
3191 // Build a mapping such that given a basic block, whose successor has one or
3192 // more phis, we can find our specific input to that phi. To make this fast
3193 // mapping work we rely on a specific property of our structured control
3194 // flow graph: For a block with phis, its predecessors each have only one
3195 // successor with phis. Consider each case:
3196 // * Blocks with less than two predecessors cannot have phis.
3197 // * Breaks. A break always has exactly one successor, and the break
3198 // catch block has exactly one predecessor for each break, as
3199 // well as a final predecessor for the actual loop exit.
3200 // * Continues. A continue always has exactly one successor, and the
3201 // continue catch block has exactly one predecessor for each
3202 // continue, as well as a final predecessor for the actual
3203 // loop continuation. The continue itself has exactly one
3205 // * An if. Each branch as exactly one predecessor.
3206 // * A switch. Each branch has exactly one predecessor.
3207 // * Loop tail. A new block is always created for the exit, and if a
3208 // break statement is present, the exit block will forward
3209 // directly to the break block.
3210 for (MBasicBlockIterator
block(graph
.begin()); block
!= graph
.end();
3212 if (block
->phisEmpty()) {
3216 // Assert on the above.
3217 for (size_t j
= 0; j
< block
->numPredecessors(); j
++) {
3218 MBasicBlock
* pred
= block
->getPredecessor(j
);
3221 size_t numSuccessorsWithPhis
= 0;
3222 for (size_t k
= 0; k
< pred
->numSuccessors(); k
++) {
3223 MBasicBlock
* successor
= pred
->getSuccessor(k
);
3224 if (!successor
->phisEmpty()) {
3225 numSuccessorsWithPhis
++;
3228 MOZ_ASSERT(numSuccessorsWithPhis
<= 1);
3231 pred
->setSuccessorWithPhis(*block
, j
);
3239 static bool CheckSuccessorImpliesPredecessor(MBasicBlock
* A
, MBasicBlock
* B
) {
3240 // Assuming B = succ(A), verify A = pred(B).
3241 for (size_t i
= 0; i
< B
->numPredecessors(); i
++) {
3242 if (A
== B
->getPredecessor(i
)) {
3249 static bool CheckPredecessorImpliesSuccessor(MBasicBlock
* A
, MBasicBlock
* B
) {
3250 // Assuming B = pred(A), verify A = succ(B).
3251 for (size_t i
= 0; i
< B
->numSuccessors(); i
++) {
3252 if (A
== B
->getSuccessor(i
)) {
3259 // If you have issues with the usesBalance assertions, then define the macro
3260 // _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
3261 // This output can then be processed with the following awk script to filter and
3262 // highlight which checks are missing or if there is an unexpected operand /
3265 // define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
3270 /^==Check/ { context = ""; state = $2; }
3271 /^[a-z]/ { context = context "\n\t" $0; }
3273 if (state == "Operand") {
3274 list[context] = list[context] - 1;
3275 } else if (state == "Use") {
3276 list[context] = list[context] + 1;
3281 if (list[ctx] > 0) {
3282 print "Missing operand check", ctx, "\n"
3284 if (list[ctx] < 0) {
3285 print "Missing use check", ctx, "\n"
3292 static void CheckOperand(const MNode
* consumer
, const MUse
* use
,
3293 int32_t* usesBalance
) {
3294 MOZ_ASSERT(use
->hasProducer());
3295 MDefinition
* producer
= use
->producer();
3296 MOZ_ASSERT(!producer
->isDiscarded());
3297 MOZ_ASSERT(producer
->block() != nullptr);
3298 MOZ_ASSERT(use
->consumer() == consumer
);
3299 # ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
3300 Fprinter
print(stderr
);
3301 print
.printf("==Check Operand\n");
3302 use
->producer()->dump(print
);
3303 print
.printf(" index: %zu\n", use
->consumer()->indexOf(use
));
3304 use
->consumer()->dump(print
);
3305 print
.printf("==End\n");
3310 static void CheckUse(const MDefinition
* producer
, const MUse
* use
,
3311 int32_t* usesBalance
) {
3312 MOZ_ASSERT(!use
->consumer()->block()->isDead());
3313 MOZ_ASSERT_IF(use
->consumer()->isDefinition(),
3314 !use
->consumer()->toDefinition()->isDiscarded());
3315 MOZ_ASSERT(use
->consumer()->block() != nullptr);
3316 MOZ_ASSERT(use
->consumer()->getOperand(use
->index()) == producer
);
3317 # ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
3318 Fprinter
print(stderr
);
3319 print
.printf("==Check Use\n");
3320 use
->producer()->dump(print
);
3321 print
.printf(" index: %zu\n", use
->consumer()->indexOf(use
));
3322 use
->consumer()->dump(print
);
3323 print
.printf("==End\n");
3328 // To properly encode entry resume points, we have to ensure that all the
3329 // operands of the entry resume point are located before the safeInsertTop
3331 static void AssertOperandsBeforeSafeInsertTop(MResumePoint
* resume
) {
3332 MBasicBlock
* block
= resume
->block();
3333 if (block
== block
->graph().osrBlock()) {
3336 MInstruction
* stop
= block
->safeInsertTop();
3337 for (size_t i
= 0, e
= resume
->numOperands(); i
< e
; ++i
) {
3338 MDefinition
* def
= resume
->getOperand(i
);
3339 if (def
->block() != block
) {
3346 for (MInstructionIterator ins
= block
->begin(); true; ins
++) {
3352 "Resume point operand located after the safeInsertTop location");
3358 void jit::AssertBasicGraphCoherency(MIRGraph
& graph
, bool force
) {
3360 if (!JitOptions
.fullDebugChecks
&& !force
) {
3364 MOZ_ASSERT(graph
.entryBlock()->numPredecessors() == 0);
3365 MOZ_ASSERT(graph
.entryBlock()->phisEmpty());
3366 MOZ_ASSERT(!graph
.entryBlock()->unreachable());
3368 if (MBasicBlock
* osrBlock
= graph
.osrBlock()) {
3369 MOZ_ASSERT(osrBlock
->numPredecessors() == 0);
3370 MOZ_ASSERT(osrBlock
->phisEmpty());
3371 MOZ_ASSERT(osrBlock
!= graph
.entryBlock());
3372 MOZ_ASSERT(!osrBlock
->unreachable());
3375 if (MResumePoint
* resumePoint
= graph
.entryResumePoint()) {
3376 MOZ_ASSERT(resumePoint
->block() == graph
.entryBlock());
3379 // Assert successor and predecessor list coherency.
3381 int32_t usesBalance
= 0;
3382 for (MBasicBlockIterator
block(graph
.begin()); block
!= graph
.end();
3386 MOZ_ASSERT(&block
->graph() == &graph
);
3387 MOZ_ASSERT(!block
->isDead());
3388 MOZ_ASSERT_IF(block
->outerResumePoint() != nullptr,
3389 block
->entryResumePoint() != nullptr);
3391 for (size_t i
= 0; i
< block
->numSuccessors(); i
++) {
3393 CheckSuccessorImpliesPredecessor(*block
, block
->getSuccessor(i
)));
3396 for (size_t i
= 0; i
< block
->numPredecessors(); i
++) {
3398 CheckPredecessorImpliesSuccessor(*block
, block
->getPredecessor(i
)));
3401 if (MResumePoint
* resume
= block
->entryResumePoint()) {
3402 MOZ_ASSERT(!resume
->instruction());
3403 MOZ_ASSERT(resume
->block() == *block
);
3404 AssertOperandsBeforeSafeInsertTop(resume
);
3406 if (MResumePoint
* resume
= block
->outerResumePoint()) {
3407 MOZ_ASSERT(!resume
->instruction());
3408 MOZ_ASSERT(resume
->block() == *block
);
3410 for (MResumePointIterator
iter(block
->resumePointsBegin());
3411 iter
!= block
->resumePointsEnd(); iter
++) {
3412 // We cannot yet assert that is there is no instruction then this is
3413 // the entry resume point because we are still storing resume points
3414 // in the InlinePropertyTable.
3415 MOZ_ASSERT_IF(iter
->instruction(),
3416 iter
->instruction()->block() == *block
);
3417 for (uint32_t i
= 0, e
= iter
->numOperands(); i
< e
; i
++) {
3418 CheckOperand(*iter
, iter
->getUseFor(i
), &usesBalance
);
3421 for (MPhiIterator
phi(block
->phisBegin()); phi
!= block
->phisEnd(); phi
++) {
3422 MOZ_ASSERT(phi
->numOperands() == block
->numPredecessors());
3423 MOZ_ASSERT(!phi
->isRecoveredOnBailout());
3424 MOZ_ASSERT(phi
->type() != MIRType::None
);
3425 MOZ_ASSERT(phi
->dependency() == nullptr);
3427 for (MDefinitionIterator
iter(*block
); iter
; iter
++) {
3428 MOZ_ASSERT(iter
->block() == *block
);
3429 MOZ_ASSERT_IF(iter
->hasUses(), iter
->type() != MIRType::None
);
3430 MOZ_ASSERT(!iter
->isDiscarded());
3431 MOZ_ASSERT_IF(iter
->isStart(),
3432 *block
== graph
.entryBlock() || *block
== graph
.osrBlock());
3433 MOZ_ASSERT_IF(iter
->isParameter(),
3434 *block
== graph
.entryBlock() || *block
== graph
.osrBlock());
3435 MOZ_ASSERT_IF(iter
->isOsrEntry(), *block
== graph
.osrBlock());
3436 MOZ_ASSERT_IF(iter
->isOsrValue(), *block
== graph
.osrBlock());
3438 // Assert that use chains are valid for this instruction.
3439 for (uint32_t i
= 0, end
= iter
->numOperands(); i
< end
; i
++) {
3440 CheckOperand(*iter
, iter
->getUseFor(i
), &usesBalance
);
3442 for (MUseIterator
use(iter
->usesBegin()); use
!= iter
->usesEnd(); use
++) {
3443 CheckUse(*iter
, *use
, &usesBalance
);
3446 if (iter
->isInstruction()) {
3447 if (MResumePoint
* resume
= iter
->toInstruction()->resumePoint()) {
3448 MOZ_ASSERT(resume
->instruction() == *iter
);
3449 MOZ_ASSERT(resume
->block() == *block
);
3450 MOZ_ASSERT(resume
->block()->entryResumePoint() != nullptr);
3454 if (iter
->isRecoveredOnBailout()) {
3455 MOZ_ASSERT(!iter
->hasLiveDefUses());
3459 // The control instruction is not visited by the MDefinitionIterator.
3460 MControlInstruction
* control
= block
->lastIns();
3461 MOZ_ASSERT(control
->block() == *block
);
3462 MOZ_ASSERT(!control
->hasUses());
3463 MOZ_ASSERT(control
->type() == MIRType::None
);
3464 MOZ_ASSERT(!control
->isDiscarded());
3465 MOZ_ASSERT(!control
->isRecoveredOnBailout());
3466 MOZ_ASSERT(control
->resumePoint() == nullptr);
3467 for (uint32_t i
= 0, end
= control
->numOperands(); i
< end
; i
++) {
3468 CheckOperand(control
, control
->getUseFor(i
), &usesBalance
);
3470 for (size_t i
= 0; i
< control
->numSuccessors(); i
++) {
3471 MOZ_ASSERT(control
->getSuccessor(i
));
3475 // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
3476 MOZ_ASSERT(usesBalance
<= 0, "More use checks than operand checks");
3477 MOZ_ASSERT(usesBalance
>= 0, "More operand checks than use checks");
3478 MOZ_ASSERT(graph
.numBlocks() == count
);
3483 static void AssertReversePostorder(MIRGraph
& graph
) {
3484 // Check that every block is visited after all its predecessors (except
3486 for (ReversePostorderIterator
iter(graph
.rpoBegin()); iter
!= graph
.rpoEnd();
3488 MBasicBlock
* block
= *iter
;
3489 MOZ_ASSERT(!block
->isMarked());
3491 for (size_t i
= 0; i
< block
->numPredecessors(); i
++) {
3492 MBasicBlock
* pred
= block
->getPredecessor(i
);
3493 if (!pred
->isMarked()) {
3494 MOZ_ASSERT(pred
->isLoopBackedge());
3495 MOZ_ASSERT(block
->backedge() == pred
);
3502 graph
.unmarkBlocks();
3507 static void AssertDominatorTree(MIRGraph
& graph
) {
3508 // Check dominators.
3510 MOZ_ASSERT(graph
.entryBlock()->immediateDominator() == graph
.entryBlock());
3511 if (MBasicBlock
* osrBlock
= graph
.osrBlock()) {
3512 MOZ_ASSERT(osrBlock
->immediateDominator() == osrBlock
);
3514 MOZ_ASSERT(graph
.entryBlock()->numDominated() == graph
.numBlocks());
3517 size_t i
= graph
.numBlocks();
3518 size_t totalNumDominated
= 0;
3519 for (MBasicBlockIterator
block(graph
.begin()); block
!= graph
.end();
3521 MOZ_ASSERT(block
->dominates(*block
));
3523 MBasicBlock
* idom
= block
->immediateDominator();
3524 MOZ_ASSERT(idom
->dominates(*block
));
3525 MOZ_ASSERT(idom
== *block
|| idom
->id() < block
->id());
3527 if (idom
== *block
) {
3528 totalNumDominated
+= block
->numDominated();
3530 bool foundInParent
= false;
3531 for (size_t j
= 0; j
< idom
->numImmediatelyDominatedBlocks(); j
++) {
3532 if (idom
->getImmediatelyDominatedBlock(j
) == *block
) {
3533 foundInParent
= true;
3537 MOZ_ASSERT(foundInParent
);
3540 size_t numDominated
= 1;
3541 for (size_t j
= 0; j
< block
->numImmediatelyDominatedBlocks(); j
++) {
3542 MBasicBlock
* dom
= block
->getImmediatelyDominatedBlock(j
);
3543 MOZ_ASSERT(block
->dominates(dom
));
3544 MOZ_ASSERT(dom
->id() > block
->id());
3545 MOZ_ASSERT(dom
->immediateDominator() == *block
);
3547 numDominated
+= dom
->numDominated();
3549 MOZ_ASSERT(block
->numDominated() == numDominated
);
3550 MOZ_ASSERT(block
->numDominated() <= i
);
3551 MOZ_ASSERT(block
->numSuccessors() != 0 || block
->numDominated() == 1);
3555 MOZ_ASSERT(totalNumDominated
== graph
.numBlocks());
3559 void jit::AssertGraphCoherency(MIRGraph
& graph
, bool force
) {
3561 if (!JitOptions
.checkGraphConsistency
) {
3564 if (!JitOptions
.fullDebugChecks
&& !force
) {
3567 AssertBasicGraphCoherency(graph
, force
);
3568 AssertReversePostorder(graph
);
3573 static bool IsResumableMIRType(MIRType type
) {
3574 // see CodeGeneratorShared::encodeAllocation
3576 case MIRType::Undefined
:
3578 case MIRType::Boolean
:
3579 case MIRType::Int32
:
3580 case MIRType::Double
:
3581 case MIRType::Float32
:
3582 case MIRType::String
:
3583 case MIRType::Symbol
:
3584 case MIRType::BigInt
:
3585 case MIRType::Object
:
3586 case MIRType::Shape
:
3587 case MIRType::MagicOptimizedOut
:
3588 case MIRType::MagicUninitializedLexical
:
3589 case MIRType::MagicIsConstructing
:
3590 case MIRType::Value
:
3591 case MIRType::Simd128
:
3594 case MIRType::MagicHole
:
3596 case MIRType::Slots
:
3597 case MIRType::Elements
:
3598 case MIRType::Pointer
:
3599 case MIRType::Int64
:
3600 case MIRType::WasmAnyRef
:
3601 case MIRType::WasmArrayData
:
3602 case MIRType::StackResults
:
3603 case MIRType::IntPtr
:
3606 MOZ_CRASH("Unknown MIRType.");
3609 static void AssertResumableOperands(MNode
* node
) {
3610 for (size_t i
= 0, e
= node
->numOperands(); i
< e
; ++i
) {
3611 MDefinition
* op
= node
->getOperand(i
);
3612 if (op
->isRecoveredOnBailout()) {
3615 MOZ_ASSERT(IsResumableMIRType(op
->type()),
3616 "Resume point cannot encode its operands");
3620 static void AssertIfResumableInstruction(MDefinition
* def
) {
3621 if (!def
->isRecoveredOnBailout()) {
3624 AssertResumableOperands(def
);
3627 static void AssertResumePointDominatedByOperands(MResumePoint
* resume
) {
3628 for (size_t i
= 0, e
= resume
->numOperands(); i
< e
; ++i
) {
3629 MDefinition
* op
= resume
->getOperand(i
);
3630 MOZ_ASSERT(op
->block()->dominates(resume
->block()),
3631 "Resume point is not dominated by its operands");
3636 void jit::AssertExtendedGraphCoherency(MIRGraph
& graph
, bool underValueNumberer
,
3638 // Checks the basic GraphCoherency but also other conditions that
3639 // do not hold immediately (such as the fact that critical edges
3643 if (!JitOptions
.checkGraphConsistency
) {
3646 if (!JitOptions
.fullDebugChecks
&& !force
) {
3650 AssertGraphCoherency(graph
, force
);
3652 AssertDominatorTree(graph
);
3654 DebugOnly
<uint32_t> idx
= 0;
3655 for (MBasicBlockIterator
block(graph
.begin()); block
!= graph
.end();
3657 MOZ_ASSERT(block
->id() == idx
);
3660 // No critical edges:
3661 if (block
->numSuccessors() > 1) {
3662 for (size_t i
= 0; i
< block
->numSuccessors(); i
++) {
3663 MOZ_ASSERT(block
->getSuccessor(i
)->numPredecessors() == 1);
3667 if (block
->isLoopHeader()) {
3668 if (underValueNumberer
&& block
->numPredecessors() == 3) {
3670 MOZ_ASSERT(block
->getPredecessor(1)->numPredecessors() == 0);
3671 MOZ_ASSERT(graph
.osrBlock(),
3672 "Fixup blocks should only exists if we have an osr block.");
3674 MOZ_ASSERT(block
->numPredecessors() == 2);
3676 MBasicBlock
* backedge
= block
->backedge();
3677 MOZ_ASSERT(backedge
->id() >= block
->id());
3678 MOZ_ASSERT(backedge
->numSuccessors() == 1);
3679 MOZ_ASSERT(backedge
->getSuccessor(0) == *block
);
3682 if (!block
->phisEmpty()) {
3683 for (size_t i
= 0; i
< block
->numPredecessors(); i
++) {
3684 MBasicBlock
* pred
= block
->getPredecessor(i
);
3685 MOZ_ASSERT(pred
->successorWithPhis() == *block
);
3686 MOZ_ASSERT(pred
->positionInPhiSuccessor() == i
);
3690 uint32_t successorWithPhis
= 0;
3691 for (size_t i
= 0; i
< block
->numSuccessors(); i
++) {
3692 if (!block
->getSuccessor(i
)->phisEmpty()) {
3693 successorWithPhis
++;
3697 MOZ_ASSERT(successorWithPhis
<= 1);
3698 MOZ_ASSERT((successorWithPhis
!= 0) ==
3699 (block
->successorWithPhis() != nullptr));
3701 // Verify that phi operands dominate the corresponding CFG predecessor
3703 for (MPhiIterator
iter(block
->phisBegin()), end(block
->phisEnd());
3704 iter
!= end
; ++iter
) {
3706 for (size_t i
= 0, e
= phi
->numOperands(); i
< e
; ++i
) {
3708 phi
->getOperand(i
)->block()->dominates(block
->getPredecessor(i
)),
3709 "Phi input is not dominated by its operand");
3713 // Verify that instructions are dominated by their operands.
3714 for (MInstructionIterator
iter(block
->begin()), end(block
->end());
3715 iter
!= end
; ++iter
) {
3716 MInstruction
* ins
= *iter
;
3717 for (size_t i
= 0, e
= ins
->numOperands(); i
< e
; ++i
) {
3718 MDefinition
* op
= ins
->getOperand(i
);
3719 MBasicBlock
* opBlock
= op
->block();
3720 MOZ_ASSERT(opBlock
->dominates(*block
),
3721 "Instruction is not dominated by its operands");
3723 // If the operand is an instruction in the same block, check
3724 // that it comes first.
3725 if (opBlock
== *block
&& !op
->isPhi()) {
3726 MInstructionIterator opIter
= block
->begin(op
->toInstruction());
3729 MOZ_ASSERT(opIter
!= block
->end(),
3730 "Operand in same block as instruction does not precede");
3731 } while (*opIter
!= ins
);
3734 AssertIfResumableInstruction(ins
);
3735 if (MResumePoint
* resume
= ins
->resumePoint()) {
3736 AssertResumePointDominatedByOperands(resume
);
3737 AssertResumableOperands(resume
);
3741 // Verify that the block resume points are dominated by their operands.
3742 if (MResumePoint
* resume
= block
->entryResumePoint()) {
3743 AssertResumePointDominatedByOperands(resume
);
3744 AssertResumableOperands(resume
);
3746 if (MResumePoint
* resume
= block
->outerResumePoint()) {
3747 AssertResumePointDominatedByOperands(resume
);
3748 AssertResumableOperands(resume
);
3754 struct BoundsCheckInfo
{
3755 MBoundsCheck
* check
;
3759 typedef HashMap
<uint32_t, BoundsCheckInfo
, DefaultHasher
<uint32_t>,
3763 // Compute a hash for bounds checks which ignores constant offsets in the index.
3764 static HashNumber
BoundsCheckHashIgnoreOffset(MBoundsCheck
* check
) {
3765 SimpleLinearSum indexSum
= ExtractLinearSum(check
->index());
3766 uintptr_t index
= indexSum
.term
? uintptr_t(indexSum
.term
) : 0;
3767 uintptr_t length
= uintptr_t(check
->length());
3768 return index
^ length
;
3771 static MBoundsCheck
* FindDominatingBoundsCheck(BoundsCheckMap
& checks
,
3772 MBoundsCheck
* check
,
3774 // Since we are traversing the dominator tree in pre-order, when we
3775 // are looking at the |index|-th block, the next numDominated() blocks
3776 // we traverse are precisely the set of blocks that are dominated.
3778 // So, this value is visible in all blocks if:
3779 // index <= index + ins->block->numDominated()
3780 // and becomes invalid after that.
3781 HashNumber hash
= BoundsCheckHashIgnoreOffset(check
);
3782 BoundsCheckMap::Ptr p
= checks
.lookup(hash
);
3783 if (!p
|| index
>= p
->value().validEnd
) {
3784 // We didn't find a dominating bounds check.
3785 BoundsCheckInfo info
;
3787 info
.validEnd
= index
+ check
->block()->numDominated();
3789 if (!checks
.put(hash
, info
)) return nullptr;
3794 return p
->value().check
;
3797 static MathSpace
ExtractMathSpace(MDefinition
* ins
) {
3798 MOZ_ASSERT(ins
->isAdd() || ins
->isSub());
3799 MBinaryArithInstruction
* arith
= nullptr;
3801 arith
= ins
->toAdd();
3803 arith
= ins
->toSub();
3805 switch (arith
->truncateKind()) {
3806 case TruncateKind::NoTruncate
:
3807 case TruncateKind::TruncateAfterBailouts
:
3808 // TruncateAfterBailouts is considered as infinite space because the
3809 // LinearSum will effectively remove the bailout check.
3810 return MathSpace::Infinite
;
3811 case TruncateKind::IndirectTruncate
:
3812 case TruncateKind::Truncate
:
3813 return MathSpace::Modulo
;
3815 MOZ_CRASH("Unknown TruncateKind");
3818 static bool MonotoneAdd(int32_t lhs
, int32_t rhs
) {
3819 return (lhs
>= 0 && rhs
>= 0) || (lhs
<= 0 && rhs
<= 0);
3822 static bool MonotoneSub(int32_t lhs
, int32_t rhs
) {
3823 return (lhs
>= 0 && rhs
<= 0) || (lhs
<= 0 && rhs
>= 0);
3826 // Extract a linear sum from ins, if possible (otherwise giving the
3828 SimpleLinearSum
jit::ExtractLinearSum(MDefinition
* ins
, MathSpace space
,
3829 int32_t recursionDepth
) {
3830 const int32_t SAFE_RECURSION_LIMIT
= 100;
3831 if (recursionDepth
> SAFE_RECURSION_LIMIT
) {
3832 return SimpleLinearSum(ins
, 0);
3835 // Unwrap Int32ToIntPtr. This instruction only changes the representation
3836 // (int32_t to intptr_t) without affecting the value.
3837 if (ins
->isInt32ToIntPtr()) {
3838 ins
= ins
->toInt32ToIntPtr()->input();
3841 if (ins
->isBeta()) {
3842 ins
= ins
->getOperand(0);
3845 MOZ_ASSERT(!ins
->isInt32ToIntPtr());
3847 if (ins
->type() != MIRType::Int32
) {
3848 return SimpleLinearSum(ins
, 0);
3851 if (ins
->isConstant()) {
3852 return SimpleLinearSum(nullptr, ins
->toConstant()->toInt32());
3855 if (!ins
->isAdd() && !ins
->isSub()) {
3856 return SimpleLinearSum(ins
, 0);
3859 // Only allow math which are in the same space.
3860 MathSpace insSpace
= ExtractMathSpace(ins
);
3861 if (space
== MathSpace::Unknown
) {
3863 } else if (space
!= insSpace
) {
3864 return SimpleLinearSum(ins
, 0);
3866 MOZ_ASSERT(space
== MathSpace::Modulo
|| space
== MathSpace::Infinite
);
3868 MDefinition
* lhs
= ins
->getOperand(0);
3869 MDefinition
* rhs
= ins
->getOperand(1);
3870 if (lhs
->type() != MIRType::Int32
|| rhs
->type() != MIRType::Int32
) {
3871 return SimpleLinearSum(ins
, 0);
3874 // Extract linear sums of each operand.
3875 SimpleLinearSum lsum
= ExtractLinearSum(lhs
, space
, recursionDepth
+ 1);
3876 SimpleLinearSum rsum
= ExtractLinearSum(rhs
, space
, recursionDepth
+ 1);
3878 // LinearSum only considers a single term operand, if both sides have
3879 // terms, then ignore extracted linear sums.
3880 if (lsum
.term
&& rsum
.term
) {
3881 return SimpleLinearSum(ins
, 0);
3884 // Check if this is of the form <SUM> + n or n + <SUM>.
3887 if (space
== MathSpace::Modulo
) {
3888 constant
= uint32_t(lsum
.constant
) + uint32_t(rsum
.constant
);
3889 } else if (!SafeAdd(lsum
.constant
, rsum
.constant
, &constant
) ||
3890 !MonotoneAdd(lsum
.constant
, rsum
.constant
)) {
3891 return SimpleLinearSum(ins
, 0);
3893 return SimpleLinearSum(lsum
.term
? lsum
.term
: rsum
.term
, constant
);
3896 MOZ_ASSERT(ins
->isSub());
3897 // Check if this is of the form <SUM> - n.
3900 if (space
== MathSpace::Modulo
) {
3901 constant
= uint32_t(lsum
.constant
) - uint32_t(rsum
.constant
);
3902 } else if (!SafeSub(lsum
.constant
, rsum
.constant
, &constant
) ||
3903 !MonotoneSub(lsum
.constant
, rsum
.constant
)) {
3904 return SimpleLinearSum(ins
, 0);
3906 return SimpleLinearSum(lsum
.term
, constant
);
3909 // Ignore any of the form n - <SUM>.
3910 return SimpleLinearSum(ins
, 0);
3913 // Extract a linear inequality holding when a boolean test goes in the
3914 // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
3915 bool jit::ExtractLinearInequality(MTest
* test
, BranchDirection direction
,
3916 SimpleLinearSum
* plhs
, MDefinition
** prhs
,
3918 if (!test
->getOperand(0)->isCompare()) {
3922 MCompare
* compare
= test
->getOperand(0)->toCompare();
3924 MDefinition
* lhs
= compare
->getOperand(0);
3925 MDefinition
* rhs
= compare
->getOperand(1);
3927 // TODO: optimize Compare_UInt32
3928 if (!compare
->isInt32Comparison()) {
3932 MOZ_ASSERT(lhs
->type() == MIRType::Int32
);
3933 MOZ_ASSERT(rhs
->type() == MIRType::Int32
);
3935 JSOp jsop
= compare
->jsop();
3936 if (direction
== FALSE_BRANCH
) {
3937 jsop
= NegateCompareOp(jsop
);
3940 SimpleLinearSum lsum
= ExtractLinearSum(lhs
);
3941 SimpleLinearSum rsum
= ExtractLinearSum(rhs
);
3943 if (!SafeSub(lsum
.constant
, rsum
.constant
, &lsum
.constant
)) {
3947 // Normalize operations to use <= or >=.
3953 /* x < y ==> x + 1 <= y */
3954 if (!SafeAdd(lsum
.constant
, 1, &lsum
.constant
)) {
3960 *plessEqual
= false;
3963 /* x > y ==> x - 1 >= y */
3964 if (!SafeSub(lsum
.constant
, 1, &lsum
.constant
)) {
3967 *plessEqual
= false;
3979 static bool TryEliminateBoundsCheck(BoundsCheckMap
& checks
, size_t blockIndex
,
3980 MBoundsCheck
* dominated
, bool* eliminated
) {
3981 MOZ_ASSERT(!*eliminated
);
3983 // Replace all uses of the bounds check with the actual index.
3984 // This is (a) necessary, because we can coalesce two different
3985 // bounds checks and would otherwise use the wrong index and
3986 // (b) helps register allocation. Note that this is safe since
3987 // no other pass after bounds check elimination moves instructions.
3988 dominated
->replaceAllUsesWith(dominated
->index());
3990 if (!dominated
->isMovable()) {
3994 if (!dominated
->fallible()) {
3998 MBoundsCheck
* dominating
=
3999 FindDominatingBoundsCheck(checks
, dominated
, blockIndex
);
4004 if (dominating
== dominated
) {
4005 // We didn't find a dominating bounds check.
4009 // We found two bounds checks with the same hash number, but we still have
4010 // to make sure the lengths and index terms are equal.
4011 if (dominating
->length() != dominated
->length()) {
4015 SimpleLinearSum sumA
= ExtractLinearSum(dominating
->index());
4016 SimpleLinearSum sumB
= ExtractLinearSum(dominated
->index());
4018 // Both terms should be nullptr or the same definition.
4019 if (sumA
.term
!= sumB
.term
) {
4023 // This bounds check is redundant.
4026 // Normalize the ranges according to the constant offsets in the two indexes.
4027 int32_t minimumA
, maximumA
, minimumB
, maximumB
;
4028 if (!SafeAdd(sumA
.constant
, dominating
->minimum(), &minimumA
) ||
4029 !SafeAdd(sumA
.constant
, dominating
->maximum(), &maximumA
) ||
4030 !SafeAdd(sumB
.constant
, dominated
->minimum(), &minimumB
) ||
4031 !SafeAdd(sumB
.constant
, dominated
->maximum(), &maximumB
)) {
4035 // Update the dominating check to cover both ranges, denormalizing the
4036 // result per the constant offset in the index.
4037 int32_t newMinimum
, newMaximum
;
4038 if (!SafeSub(std::min(minimumA
, minimumB
), sumA
.constant
, &newMinimum
) ||
4039 !SafeSub(std::max(maximumA
, maximumB
), sumA
.constant
, &newMaximum
)) {
4043 dominating
->setMinimum(newMinimum
);
4044 dominating
->setMaximum(newMaximum
);
4045 dominating
->setBailoutKind(BailoutKind::HoistBoundsCheck
);
4050 // Eliminate checks which are redundant given each other or other instructions.
4052 // A bounds check is considered redundant if it's dominated by another bounds
4053 // check with the same length and the indexes differ by only a constant amount.
4054 // In this case we eliminate the redundant bounds check and update the other one
4055 // to cover the ranges of both checks.
4057 // Bounds checks are added to a hash map and since the hash function ignores
4058 // differences in constant offset, this offers a fast way to find redundant
4060 bool jit::EliminateRedundantChecks(MIRGraph
& graph
) {
4061 BoundsCheckMap
checks(graph
.alloc());
4063 // Stack for pre-order CFG traversal.
4064 Vector
<MBasicBlock
*, 1, JitAllocPolicy
> worklist(graph
.alloc());
4066 // The index of the current block in the CFG traversal.
4069 // Add all self-dominating blocks to the worklist.
4070 // This includes all roots. Order does not matter.
4071 for (MBasicBlockIterator
i(graph
.begin()); i
!= graph
.end(); i
++) {
4072 MBasicBlock
* block
= *i
;
4073 if (block
->immediateDominator() == block
) {
4074 if (!worklist
.append(block
)) {
4080 // Starting from each self-dominating block, traverse the CFG in pre-order.
4081 while (!worklist
.empty()) {
4082 MBasicBlock
* block
= worklist
.popCopy();
4084 // Add all immediate dominators to the front of the worklist.
4085 if (!worklist
.append(block
->immediatelyDominatedBlocksBegin(),
4086 block
->immediatelyDominatedBlocksEnd())) {
4090 for (MDefinitionIterator
iter(block
); iter
;) {
4091 MDefinition
* def
= *iter
++;
4093 if (!def
->isBoundsCheck()) {
4096 auto* boundsCheck
= def
->toBoundsCheck();
4098 bool eliminated
= false;
4099 if (!TryEliminateBoundsCheck(checks
, index
, boundsCheck
, &eliminated
)) {
4104 block
->discard(boundsCheck
);
4110 MOZ_ASSERT(index
== graph
.numBlocks());
4115 static bool ShapeGuardIsRedundant(MGuardShape
* guard
,
4116 const MDefinition
* storeObject
,
4117 const Shape
* storeShape
) {
4118 const MDefinition
* guardObject
= guard
->object()->skipObjectGuards();
4119 if (guardObject
!= storeObject
) {
4120 JitSpew(JitSpew_RedundantShapeGuards
, "SKIP: different objects (%d vs %d)",
4121 guardObject
->id(), storeObject
->id());
4125 const Shape
* guardShape
= guard
->shape();
4126 if (guardShape
!= storeShape
) {
4127 JitSpew(JitSpew_RedundantShapeGuards
, "SKIP: different shapes");
4134 // Eliminate shape guards which are redundant given other instructions.
4136 // A shape guard is redundant if we can prove that the object being
4137 // guarded already has the correct shape. The conditions for doing so
4140 // 1. We can see the most recent change to the shape of this object.
4141 // (This can be an AddAndStoreSlot, an AllocateAndStoreSlot, or the
4142 // creation of the object itself.
4143 // 2. That mutation dominates the shape guard.
4144 // 3. The shape that was assigned at that point matches the shape
4147 // If all of these conditions hold, then we can remove the shape guard.
4148 // In debug, we replace it with an AssertShape to help verify correctness.
4149 bool jit::EliminateRedundantShapeGuards(MIRGraph
& graph
) {
4150 JitSpew(JitSpew_RedundantShapeGuards
, "Begin");
4152 for (ReversePostorderIterator block
= graph
.rpoBegin();
4153 block
!= graph
.rpoEnd(); block
++) {
4154 for (MInstructionIterator
insIter(block
->begin());
4155 insIter
!= block
->end();) {
4156 MInstruction
* ins
= *insIter
;
4159 // Skip instructions that aren't shape guards.
4160 if (!ins
->isGuardShape()) {
4163 MGuardShape
* guard
= ins
->toGuardShape();
4164 MDefinition
* lastStore
= guard
->dependency();
4166 JitSpew(JitSpew_RedundantShapeGuards
, "Visit shape guard %d",
4168 JitSpewIndent
spewIndent(JitSpew_RedundantShapeGuards
);
4170 if (lastStore
->isDiscarded() || lastStore
->block()->isDead() ||
4171 !lastStore
->block()->dominates(guard
->block())) {
4172 JitSpew(JitSpew_RedundantShapeGuards
,
4173 "SKIP: ins %d does not dominate block %d", lastStore
->id(),
4174 guard
->block()->id());
4178 if (lastStore
->isAddAndStoreSlot()) {
4179 auto* add
= lastStore
->toAddAndStoreSlot();
4180 auto* addObject
= add
->object()->skipObjectGuards();
4181 if (!ShapeGuardIsRedundant(guard
, addObject
, add
->shape())) {
4184 } else if (lastStore
->isAllocateAndStoreSlot()) {
4185 auto* allocate
= lastStore
->toAllocateAndStoreSlot();
4186 auto* allocateObject
= allocate
->object()->skipObjectGuards();
4187 if (!ShapeGuardIsRedundant(guard
, allocateObject
, allocate
->shape())) {
4190 } else if (lastStore
->isStart()) {
4191 // The guard doesn't depend on any other instruction that is modifying
4192 // the object operand, so we check the object operand directly.
4193 auto* obj
= guard
->object()->skipObjectGuards();
4195 const Shape
* initialShape
= nullptr;
4196 if (obj
->isNewObject()) {
4197 auto* templateObject
= obj
->toNewObject()->templateObject();
4198 if (!templateObject
) {
4199 JitSpew(JitSpew_RedundantShapeGuards
, "SKIP: no template");
4202 initialShape
= templateObject
->shape();
4203 } else if (obj
->isNewPlainObject()) {
4204 initialShape
= obj
->toNewPlainObject()->shape();
4206 JitSpew(JitSpew_RedundantShapeGuards
,
4207 "SKIP: not NewObject or NewPlainObject (%d)", obj
->id());
4210 if (initialShape
!= guard
->shape()) {
4211 JitSpew(JitSpew_RedundantShapeGuards
, "SKIP: shapes don't match");
4215 JitSpew(JitSpew_RedundantShapeGuards
,
4216 "SKIP: Last store not supported (%d)", lastStore
->id());
4221 if (!graph
.alloc().ensureBallast()) {
4224 auto* assert = MAssertShape::New(graph
.alloc(), guard
->object(),
4225 const_cast<Shape
*>(guard
->shape()));
4226 guard
->block()->insertBefore(guard
, assert);
4229 JitSpew(JitSpew_RedundantShapeGuards
, "SUCCESS: Removing shape guard %d",
4231 guard
->replaceAllUsesWith(guard
->input());
4232 guard
->block()->discard(guard
);
4239 static bool TryEliminateGCBarriersForAllocation(TempAllocator
& alloc
,
4240 MInstruction
* allocation
) {
4241 MOZ_ASSERT(allocation
->type() == MIRType::Object
);
4243 JitSpew(JitSpew_RedundantGCBarriers
, "Analyzing allocation %s",
4244 allocation
->opName());
4246 MBasicBlock
* block
= allocation
->block();
4247 MInstructionIterator
insIter(block
->begin(allocation
));
4249 // Skip `allocation`.
4250 MOZ_ASSERT(*insIter
== allocation
);
4253 // Try to optimize the other instructions in the block.
4254 while (insIter
!= block
->end()) {
4255 MInstruction
* ins
= *insIter
;
4257 switch (ins
->op()) {
4258 case MDefinition::Opcode::Constant
:
4259 case MDefinition::Opcode::Box
:
4260 case MDefinition::Opcode::Unbox
:
4261 case MDefinition::Opcode::AssertCanElidePostWriteBarrier
:
4262 // These instructions can't trigger GC or affect this analysis in other
4265 case MDefinition::Opcode::StoreFixedSlot
: {
4266 auto* store
= ins
->toStoreFixedSlot();
4267 if (store
->object() != allocation
) {
4268 JitSpew(JitSpew_RedundantGCBarriers
,
4269 "Stopped at StoreFixedSlot for other object");
4272 store
->setNeedsBarrier(false);
4273 JitSpew(JitSpew_RedundantGCBarriers
, "Elided StoreFixedSlot barrier");
4276 case MDefinition::Opcode::PostWriteBarrier
: {
4277 auto* barrier
= ins
->toPostWriteBarrier();
4278 if (barrier
->object() != allocation
) {
4279 JitSpew(JitSpew_RedundantGCBarriers
,
4280 "Stopped at PostWriteBarrier for other object");
4284 if (!alloc
.ensureBallast()) {
4287 MDefinition
* value
= barrier
->value();
4288 if (value
->type() != MIRType::Value
) {
4289 value
= MBox::New(alloc
, value
);
4290 block
->insertBefore(barrier
, value
->toInstruction());
4293 MAssertCanElidePostWriteBarrier::New(alloc
, allocation
, value
);
4294 block
->insertBefore(barrier
, assert);
4296 block
->discard(barrier
);
4297 JitSpew(JitSpew_RedundantGCBarriers
, "Elided PostWriteBarrier");
4301 JitSpew(JitSpew_RedundantGCBarriers
,
4302 "Stopped at unsupported instruction %s", ins
->opName());
4310 bool jit::EliminateRedundantGCBarriers(MIRGraph
& graph
) {
4311 // Peephole optimization for the following pattern:
4313 // 0: MNewCallObject
4314 // 1: MStoreFixedSlot(0, ...)
4315 // 2: MStoreFixedSlot(0, ...)
4316 // 3: MPostWriteBarrier(0, ...)
4318 // If the instructions immediately following the allocation instruction can't
4319 // trigger GC and we are storing to the new object's slots, we can elide the
4322 // We also eliminate the post barrier and (in debug builds) replace it with an
4325 // See also the similar optimizations in WarpBuilder::buildCallObject.
4327 JitSpew(JitSpew_RedundantGCBarriers
, "Begin");
4329 for (ReversePostorderIterator block
= graph
.rpoBegin();
4330 block
!= graph
.rpoEnd(); block
++) {
4331 for (MInstructionIterator
insIter(block
->begin());
4332 insIter
!= block
->end();) {
4333 MInstruction
* ins
= *insIter
;
4336 if (ins
->isNewCallObject()) {
4337 if (!TryEliminateGCBarriersForAllocation(graph
.alloc(), ins
)) {
4347 bool jit::MarkLoadsUsedAsPropertyKeys(MIRGraph
& graph
) {
4348 // When a string is used as a property key, or as the key for a Map or Set, we
4349 // require it to be atomized. To avoid repeatedly atomizing the same string,
4350 // this analysis looks for cases where we are loading a value from the slot of
4351 // an object (which includes access to global variables and global lexicals)
4352 // and using it as a property key, and marks those loads. During codegen,
4353 // marked loads will check whether the value loaded is a non-atomized string.
4354 // If it is, we will atomize the string and update the stored value, ensuring
4355 // that future loads from the same slot will not have to atomize again.
4356 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
, "Begin");
4358 for (ReversePostorderIterator block
= graph
.rpoBegin();
4359 block
!= graph
.rpoEnd(); block
++) {
4360 for (MInstructionIterator
insIter(block
->begin());
4361 insIter
!= block
->end();) {
4362 MInstruction
* ins
= *insIter
;
4365 MDefinition
* idVal
= nullptr;
4366 if (ins
->isGetPropertyCache()) {
4367 idVal
= ins
->toGetPropertyCache()->idval();
4368 } else if (ins
->isHasOwnCache()) {
4369 idVal
= ins
->toHasOwnCache()->idval();
4370 } else if (ins
->isSetPropertyCache()) {
4371 idVal
= ins
->toSetPropertyCache()->idval();
4372 } else if (ins
->isGetPropSuperCache()) {
4373 idVal
= ins
->toGetPropSuperCache()->idval();
4374 } else if (ins
->isMegamorphicLoadSlotByValue()) {
4375 idVal
= ins
->toMegamorphicLoadSlotByValue()->idVal();
4376 } else if (ins
->isMegamorphicHasProp()) {
4377 idVal
= ins
->toMegamorphicHasProp()->idVal();
4378 } else if (ins
->isMegamorphicSetElement()) {
4379 idVal
= ins
->toMegamorphicSetElement()->index();
4380 } else if (ins
->isProxyGetByValue()) {
4381 idVal
= ins
->toProxyGetByValue()->idVal();
4382 } else if (ins
->isProxyHasProp()) {
4383 idVal
= ins
->toProxyHasProp()->idVal();
4384 } else if (ins
->isProxySetByValue()) {
4385 idVal
= ins
->toProxySetByValue()->idVal();
4386 } else if (ins
->isIdToStringOrSymbol()) {
4387 idVal
= ins
->toIdToStringOrSymbol()->idVal();
4388 } else if (ins
->isGuardSpecificAtom()) {
4389 idVal
= ins
->toGuardSpecificAtom()->input();
4390 } else if (ins
->isToHashableString()) {
4391 idVal
= ins
->toToHashableString()->input();
4392 } else if (ins
->isToHashableValue()) {
4393 idVal
= ins
->toToHashableValue()->input();
4394 } else if (ins
->isMapObjectHasValueVMCall()) {
4395 idVal
= ins
->toMapObjectHasValueVMCall()->value();
4396 } else if (ins
->isMapObjectGetValueVMCall()) {
4397 idVal
= ins
->toMapObjectGetValueVMCall()->value();
4398 } else if (ins
->isSetObjectHasValueVMCall()) {
4399 idVal
= ins
->toSetObjectHasValueVMCall()->value();
4403 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
,
4404 "Analyzing property access %s%d with idVal %s%d", ins
->opName(),
4405 ins
->id(), idVal
->opName(), idVal
->id());
4407 // Skip intermediate nodes.
4409 if (idVal
->isLexicalCheck()) {
4410 idVal
= idVal
->toLexicalCheck()->input();
4411 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
,
4412 "- Skipping lexical check. idVal is now %s%d",
4413 idVal
->opName(), idVal
->id());
4416 if (idVal
->isUnbox() && idVal
->type() == MIRType::String
) {
4417 idVal
= idVal
->toUnbox()->input();
4418 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
,
4419 "- Skipping unbox. idVal is now %s%d", idVal
->opName(),
4426 if (idVal
->isLoadFixedSlot()) {
4427 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
,
4428 "- SUCCESS: Marking fixed slot");
4429 idVal
->toLoadFixedSlot()->setUsedAsPropertyKey();
4430 } else if (idVal
->isLoadDynamicSlot()) {
4431 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
,
4432 "- SUCCESS: Marking dynamic slot");
4433 idVal
->toLoadDynamicSlot()->setUsedAsPropertyKey();
4435 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys
, "- SKIP: %s not supported",
4444 static bool NeedsKeepAlive(MInstruction
* slotsOrElements
, MInstruction
* use
) {
4445 MOZ_ASSERT(slotsOrElements
->type() == MIRType::Elements
||
4446 slotsOrElements
->type() == MIRType::Slots
);
4448 if (slotsOrElements
->block() != use
->block()) {
4452 // Allocating a BigInt can GC, so we have to keep the object alive.
4453 if (use
->type() == MIRType::BigInt
) {
4456 if (use
->isLoadTypedArrayElementHole() &&
4457 Scalar::isBigIntType(use
->toLoadTypedArrayElementHole()->arrayType())) {
4461 MBasicBlock
* block
= use
->block();
4462 MInstructionIterator
iter(block
->begin(slotsOrElements
));
4463 MOZ_ASSERT(*iter
== slotsOrElements
);
4471 switch (iter
->op()) {
4472 case MDefinition::Opcode::Nop
:
4473 case MDefinition::Opcode::Constant
:
4474 case MDefinition::Opcode::KeepAliveObject
:
4475 case MDefinition::Opcode::Unbox
:
4476 case MDefinition::Opcode::LoadDynamicSlot
:
4477 case MDefinition::Opcode::StoreDynamicSlot
:
4478 case MDefinition::Opcode::LoadFixedSlot
:
4479 case MDefinition::Opcode::StoreFixedSlot
:
4480 case MDefinition::Opcode::LoadElement
:
4481 case MDefinition::Opcode::LoadElementAndUnbox
:
4482 case MDefinition::Opcode::LoadElementHole
:
4483 case MDefinition::Opcode::StoreElement
:
4484 case MDefinition::Opcode::StoreHoleValueElement
:
4485 case MDefinition::Opcode::InitializedLength
:
4486 case MDefinition::Opcode::ArrayLength
:
4487 case MDefinition::Opcode::BoundsCheck
:
4488 case MDefinition::Opcode::GuardElementNotHole
:
4489 case MDefinition::Opcode::InArray
:
4490 case MDefinition::Opcode::SpectreMaskIndex
:
4491 case MDefinition::Opcode::DebugEnterGCUnsafeRegion
:
4492 case MDefinition::Opcode::DebugLeaveGCUnsafeRegion
:
4500 MOZ_CRASH("Unreachable");
4503 bool jit::AddKeepAliveInstructions(MIRGraph
& graph
) {
4504 for (MBasicBlockIterator
i(graph
.begin()); i
!= graph
.end(); i
++) {
4505 MBasicBlock
* block
= *i
;
4507 for (MInstructionIterator
insIter(block
->begin()); insIter
!= block
->end();
4509 MInstruction
* ins
= *insIter
;
4510 if (ins
->type() != MIRType::Elements
&& ins
->type() != MIRType::Slots
) {
4514 MDefinition
* ownerObject
;
4515 switch (ins
->op()) {
4516 case MDefinition::Opcode::Elements
:
4517 case MDefinition::Opcode::ArrayBufferViewElements
:
4518 MOZ_ASSERT(ins
->numOperands() == 1);
4519 ownerObject
= ins
->getOperand(0);
4521 case MDefinition::Opcode::Slots
:
4522 ownerObject
= ins
->toSlots()->object();
4525 MOZ_CRASH("Unexpected op");
4528 MOZ_ASSERT(ownerObject
->type() == MIRType::Object
);
4530 if (ownerObject
->isConstant()) {
4531 // Constants are kept alive by other pointers, for instance
4532 // ImmGCPtr in JIT code.
4536 for (MUseDefIterator
uses(ins
); uses
; uses
++) {
4537 MInstruction
* use
= uses
.def()->toInstruction();
4539 if (use
->isStoreElementHole()) {
4540 // StoreElementHole has an explicit object operand. If GVN
4541 // is disabled, we can get different unbox instructions with
4542 // the same object as input, so we check for that case.
4543 MOZ_ASSERT_IF(!use
->toStoreElementHole()->object()->isUnbox() &&
4544 !ownerObject
->isUnbox(),
4545 use
->toStoreElementHole()->object() == ownerObject
);
4549 if (!NeedsKeepAlive(ins
, use
)) {
4551 // These two instructions don't start a GC unsafe region, because they
4552 // overwrite their elements register at the very start. This ensures
4553 // there's no invalidated elements value kept on the stack.
4554 if (use
->isApplyArray() || use
->isConstructArray()) {
4558 if (!graph
.alloc().ensureBallast()) {
4562 // Enter a GC unsafe region while the elements/slots are on the stack.
4563 auto* enter
= MDebugEnterGCUnsafeRegion::New(graph
.alloc());
4564 use
->block()->insertAfter(ins
, enter
);
4566 // Leave the region after the use.
4567 auto* leave
= MDebugLeaveGCUnsafeRegion::New(graph
.alloc());
4568 use
->block()->insertAfter(use
, leave
);
4573 if (!graph
.alloc().ensureBallast()) {
4576 MKeepAliveObject
* keepAlive
=
4577 MKeepAliveObject::New(graph
.alloc(), ownerObject
);
4578 use
->block()->insertAfter(use
, keepAlive
);
4586 bool LinearSum::multiply(int32_t scale
) {
4587 for (size_t i
= 0; i
< terms_
.length(); i
++) {
4588 if (!SafeMul(scale
, terms_
[i
].scale
, &terms_
[i
].scale
)) {
4592 return SafeMul(scale
, constant_
, &constant_
);
4595 bool LinearSum::divide(uint32_t scale
) {
4596 MOZ_ASSERT(scale
> 0);
4598 for (size_t i
= 0; i
< terms_
.length(); i
++) {
4599 if (terms_
[i
].scale
% scale
!= 0) {
4603 if (constant_
% scale
!= 0) {
4607 for (size_t i
= 0; i
< terms_
.length(); i
++) {
4608 terms_
[i
].scale
/= scale
;
4615 bool LinearSum::add(const LinearSum
& other
, int32_t scale
/* = 1 */) {
4616 for (size_t i
= 0; i
< other
.terms_
.length(); i
++) {
4617 int32_t newScale
= scale
;
4618 if (!SafeMul(scale
, other
.terms_
[i
].scale
, &newScale
)) {
4621 if (!add(other
.terms_
[i
].term
, newScale
)) {
4625 int32_t newConstant
= scale
;
4626 if (!SafeMul(scale
, other
.constant_
, &newConstant
)) {
4629 return add(newConstant
);
4632 bool LinearSum::add(SimpleLinearSum other
, int32_t scale
) {
4633 if (other
.term
&& !add(other
.term
, scale
)) {
4638 if (!SafeMul(other
.constant
, scale
, &constant
)) {
4642 return add(constant
);
4645 bool LinearSum::add(MDefinition
* term
, int32_t scale
) {
4652 if (MConstant
* termConst
= term
->maybeConstantValue()) {
4653 int32_t constant
= termConst
->toInt32();
4654 if (!SafeMul(constant
, scale
, &constant
)) {
4657 return add(constant
);
4660 for (size_t i
= 0; i
< terms_
.length(); i
++) {
4661 if (term
== terms_
[i
].term
) {
4662 if (!SafeAdd(scale
, terms_
[i
].scale
, &terms_
[i
].scale
)) {
4665 if (terms_
[i
].scale
== 0) {
4666 terms_
[i
] = terms_
.back();
4673 AutoEnterOOMUnsafeRegion oomUnsafe
;
4674 if (!terms_
.append(LinearTerm(term
, scale
))) {
4675 oomUnsafe
.crash("LinearSum::add");
4681 bool LinearSum::add(int32_t constant
) {
4682 return SafeAdd(constant
, constant_
, &constant_
);
4685 void LinearSum::dump(GenericPrinter
& out
) const {
4686 for (size_t i
= 0; i
< terms_
.length(); i
++) {
4687 int32_t scale
= terms_
[i
].scale
;
4688 int32_t id
= terms_
[i
].term
->id();
4695 out
.printf("#%d", id
);
4697 out
.printf("%d*#%d", scale
, id
);
4699 } else if (scale
== -1) {
4700 out
.printf("-#%d", id
);
4702 out
.printf("%d*#%d", scale
, id
);
4705 if (constant_
> 0) {
4706 out
.printf("+%d", constant_
);
4707 } else if (constant_
< 0) {
4708 out
.printf("%d", constant_
);
4712 void LinearSum::dump() const {
4713 Fprinter
out(stderr
);
4718 MDefinition
* jit::ConvertLinearSum(TempAllocator
& alloc
, MBasicBlock
* block
,
4719 const LinearSum
& sum
,
4720 BailoutKind bailoutKind
) {
4721 MDefinition
* def
= nullptr;
4723 for (size_t i
= 0; i
< sum
.numTerms(); i
++) {
4724 LinearTerm term
= sum
.term(i
);
4725 MOZ_ASSERT(!term
.term
->isConstant());
4726 if (term
.scale
== 1) {
4728 def
= MAdd::New(alloc
, def
, term
.term
, MIRType::Int32
);
4729 def
->setBailoutKind(bailoutKind
);
4730 block
->insertAtEnd(def
->toInstruction());
4731 def
->computeRange(alloc
);
4735 } else if (term
.scale
== -1) {
4737 def
= MConstant::New(alloc
, Int32Value(0));
4738 block
->insertAtEnd(def
->toInstruction());
4739 def
->computeRange(alloc
);
4741 def
= MSub::New(alloc
, def
, term
.term
, MIRType::Int32
);
4742 def
->setBailoutKind(bailoutKind
);
4743 block
->insertAtEnd(def
->toInstruction());
4744 def
->computeRange(alloc
);
4746 MOZ_ASSERT(term
.scale
!= 0);
4747 MConstant
* factor
= MConstant::New(alloc
, Int32Value(term
.scale
));
4748 block
->insertAtEnd(factor
);
4749 MMul
* mul
= MMul::New(alloc
, term
.term
, factor
, MIRType::Int32
);
4750 mul
->setBailoutKind(bailoutKind
);
4751 block
->insertAtEnd(mul
);
4752 mul
->computeRange(alloc
);
4754 def
= MAdd::New(alloc
, def
, mul
, MIRType::Int32
);
4755 def
->setBailoutKind(bailoutKind
);
4756 block
->insertAtEnd(def
->toInstruction());
4757 def
->computeRange(alloc
);
4765 def
= MConstant::New(alloc
, Int32Value(0));
4766 block
->insertAtEnd(def
->toInstruction());
4767 def
->computeRange(alloc
);
4773 // Mark all the blocks that are in the loop with the given header.
4774 // Returns the number of blocks marked. Set *canOsr to true if the loop is
4775 // reachable from both the normal entry and the OSR entry.
4776 size_t jit::MarkLoopBlocks(MIRGraph
& graph
, MBasicBlock
* header
, bool* canOsr
) {
4778 for (ReversePostorderIterator i
= graph
.rpoBegin(), e
= graph
.rpoEnd();
4780 MOZ_ASSERT(!i
->isMarked(), "Some blocks already marked");
4784 MBasicBlock
* osrBlock
= graph
.osrBlock();
4787 // The blocks are in RPO; start at the loop backedge, which marks the bottom
4788 // of the loop, and walk up until we get to the header. Loops may be
4789 // discontiguous, so we trace predecessors to determine which blocks are
4790 // actually part of the loop. The backedge is always part of the loop, and
4791 // so are its predecessors, transitively, up to the loop header or an OSR
4793 MBasicBlock
* backedge
= header
->backedge();
4795 size_t numMarked
= 1;
4796 for (PostorderIterator i
= graph
.poBegin(backedge
);; ++i
) {
4799 "Reached the end of the graph while searching for the loop header");
4800 MBasicBlock
* block
= *i
;
4801 // If we've reached the loop header, we're done.
4802 if (block
== header
) {
4805 // A block not marked by the time we reach it is not in the loop.
4806 if (!block
->isMarked()) {
4810 // This block is in the loop; trace to its predecessors.
4811 for (size_t p
= 0, e
= block
->numPredecessors(); p
!= e
; ++p
) {
4812 MBasicBlock
* pred
= block
->getPredecessor(p
);
4813 if (pred
->isMarked()) {
4817 // Blocks dominated by the OSR entry are not part of the loop
4818 // (unless they aren't reachable from the normal entry).
4819 if (osrBlock
&& pred
!= header
&& osrBlock
->dominates(pred
) &&
4820 !osrBlock
->dominates(header
)) {
4825 MOZ_ASSERT(pred
->id() >= header
->id() && pred
->id() <= backedge
->id(),
4826 "Loop block not between loop header and loop backedge");
4831 // A nested loop may not exit back to the enclosing loop at its
4832 // bottom. If we just marked its header, then the whole nested loop
4833 // is part of the enclosing loop.
4834 if (pred
->isLoopHeader()) {
4835 MBasicBlock
* innerBackedge
= pred
->backedge();
4836 if (!innerBackedge
->isMarked()) {
4837 // Mark its backedge so that we add all of its blocks to the
4838 // outer loop as we walk upwards.
4839 innerBackedge
->mark();
4842 // If the nested loop is not contiguous, we may have already
4843 // passed its backedge. If this happens, back up.
4844 if (innerBackedge
->id() > block
->id()) {
4845 i
= graph
.poBegin(innerBackedge
);
4853 // If there's no path connecting the header to the backedge, then this isn't
4854 // actually a loop. This can happen when the code starts with a loop but GVN
4855 // folds some branches away.
4856 if (!header
->isMarked()) {
4857 jit::UnmarkLoopBlocks(graph
, header
);
4864 // Unmark all the blocks that are in the loop with the given header.
4865 void jit::UnmarkLoopBlocks(MIRGraph
& graph
, MBasicBlock
* header
) {
4866 MBasicBlock
* backedge
= header
->backedge();
4867 for (ReversePostorderIterator i
= graph
.rpoBegin(header
);; ++i
) {
4868 MOZ_ASSERT(i
!= graph
.rpoEnd(),
4869 "Reached the end of the graph while searching for the backedge");
4870 MBasicBlock
* block
= *i
;
4871 if (block
->isMarked()) {
4873 if (block
== backedge
) {
4880 for (ReversePostorderIterator i
= graph
.rpoBegin(), e
= graph
.rpoEnd();
4882 MOZ_ASSERT(!i
->isMarked(), "Not all blocks got unmarked");
4887 bool jit::FoldLoadsWithUnbox(MIRGenerator
* mir
, MIRGraph
& graph
) {
4888 // This pass folds MLoadFixedSlot, MLoadDynamicSlot, MLoadElement instructions
4889 // followed by MUnbox into a single instruction. For LoadElement this allows
4890 // us to fuse the hole check with the type check for the unbox.
4892 for (MBasicBlockIterator
block(graph
.begin()); block
!= graph
.end();
4894 if (mir
->shouldCancel("FoldLoadsWithUnbox")) {
4898 for (MInstructionIterator
insIter(block
->begin());
4899 insIter
!= block
->end();) {
4900 MInstruction
* ins
= *insIter
;
4903 // We're only interested in loads producing a Value.
4904 if (!ins
->isLoadFixedSlot() && !ins
->isLoadDynamicSlot() &&
4905 !ins
->isLoadElement()) {
4908 if (ins
->type() != MIRType::Value
) {
4912 MInstruction
* load
= ins
;
4914 // Ensure there's a single def-use (ignoring resume points) and it's an
4915 // unbox. Unwrap MLexicalCheck because it's redundant if we have a
4916 // fallible unbox (checked below).
4917 MDefinition
* defUse
= load
->maybeSingleDefUse();
4921 MLexicalCheck
* lexicalCheck
= nullptr;
4922 if (defUse
->isLexicalCheck()) {
4923 lexicalCheck
= defUse
->toLexicalCheck();
4924 defUse
= lexicalCheck
->maybeSingleDefUse();
4929 if (!defUse
->isUnbox()) {
4933 // For now require the load and unbox to be in the same block. This isn't
4934 // strictly necessary but it's the common case and could prevent bailouts
4935 // when moving the unbox before a loop.
4936 MUnbox
* unbox
= defUse
->toUnbox();
4937 if (unbox
->block() != *block
) {
4940 MOZ_ASSERT_IF(lexicalCheck
, lexicalCheck
->block() == *block
);
4942 MOZ_ASSERT(!IsMagicType(unbox
->type()));
4944 // If this is a LoadElement or if we have a lexical check between the load
4945 // and unbox, we only support folding the load with a fallible unbox so
4946 // that we can eliminate the MagicValue check.
4947 if ((load
->isLoadElement() || lexicalCheck
) && !unbox
->fallible()) {
4951 // Combine the load and unbox into a single MIR instruction.
4952 if (!graph
.alloc().ensureBallast()) {
4956 MIRType type
= unbox
->type();
4957 MUnbox::Mode mode
= unbox
->mode();
4959 MInstruction
* replacement
;
4960 switch (load
->op()) {
4961 case MDefinition::Opcode::LoadFixedSlot
: {
4962 auto* loadIns
= load
->toLoadFixedSlot();
4963 replacement
= MLoadFixedSlotAndUnbox::New(
4964 graph
.alloc(), loadIns
->object(), loadIns
->slot(), mode
, type
,
4965 loadIns
->usedAsPropertyKey());
4968 case MDefinition::Opcode::LoadDynamicSlot
: {
4969 auto* loadIns
= load
->toLoadDynamicSlot();
4970 replacement
= MLoadDynamicSlotAndUnbox::New(
4971 graph
.alloc(), loadIns
->slots(), loadIns
->slot(), mode
, type
,
4972 loadIns
->usedAsPropertyKey());
4975 case MDefinition::Opcode::LoadElement
: {
4976 auto* loadIns
= load
->toLoadElement();
4977 MOZ_ASSERT(unbox
->fallible());
4978 replacement
= MLoadElementAndUnbox::New(
4979 graph
.alloc(), loadIns
->elements(), loadIns
->index(), mode
, type
);
4983 MOZ_CRASH("Unexpected instruction");
4985 replacement
->setBailoutKind(BailoutKind::UnboxFolding
);
4987 block
->insertBefore(load
, replacement
);
4988 unbox
->replaceAllUsesWith(replacement
);
4990 lexicalCheck
->replaceAllUsesWith(replacement
);
4992 load
->replaceAllUsesWith(replacement
);
4994 if (lexicalCheck
&& *insIter
== lexicalCheck
) {
4997 if (*insIter
== unbox
) {
5000 block
->discard(unbox
);
5002 block
->discard(lexicalCheck
);
5004 block
->discard(load
);
5011 // Reorder the blocks in the loop starting at the given header to be contiguous.
5012 static void MakeLoopContiguous(MIRGraph
& graph
, MBasicBlock
* header
,
5014 MBasicBlock
* backedge
= header
->backedge();
5016 MOZ_ASSERT(header
->isMarked(), "Loop header is not part of loop");
5017 MOZ_ASSERT(backedge
->isMarked(), "Loop backedge is not part of loop");
5019 // If there are any blocks between the loop header and the loop backedge
5020 // that are not part of the loop, prepare to move them to the end. We keep
5021 // them in order, which preserves RPO.
5022 ReversePostorderIterator insertIter
= graph
.rpoBegin(backedge
);
5024 MBasicBlock
* insertPt
= *insertIter
;
5026 // Visit all the blocks from the loop header to the loop backedge.
5027 size_t headerId
= header
->id();
5028 size_t inLoopId
= headerId
;
5029 size_t notInLoopId
= inLoopId
+ numMarked
;
5030 ReversePostorderIterator i
= graph
.rpoBegin(header
);
5032 MBasicBlock
* block
= *i
++;
5033 MOZ_ASSERT(block
->id() >= header
->id() && block
->id() <= backedge
->id(),
5034 "Loop backedge should be last block in loop");
5036 if (block
->isMarked()) {
5037 // This block is in the loop.
5039 block
->setId(inLoopId
++);
5040 // If we've reached the loop backedge, we're done!
5041 if (block
== backedge
) {
5045 // This block is not in the loop. Move it to the end.
5046 graph
.moveBlockBefore(insertPt
, block
);
5047 block
->setId(notInLoopId
++);
5050 MOZ_ASSERT(header
->id() == headerId
, "Loop header id changed");
5051 MOZ_ASSERT(inLoopId
== headerId
+ numMarked
,
5052 "Wrong number of blocks kept in loop");
5053 MOZ_ASSERT(notInLoopId
== (insertIter
!= graph
.rpoEnd() ? insertPt
->id()
5054 : graph
.numBlocks()),
5055 "Wrong number of blocks moved out of loop");
5058 // Reorder the blocks in the graph so that loops are contiguous.
5059 bool jit::MakeLoopsContiguous(MIRGraph
& graph
) {
5060 // Visit all loop headers (in any order).
5061 for (MBasicBlockIterator
i(graph
.begin()); i
!= graph
.end(); i
++) {
5062 MBasicBlock
* header
= *i
;
5063 if (!header
->isLoopHeader()) {
5067 // Mark all blocks that are actually part of the loop.
5069 size_t numMarked
= MarkLoopBlocks(graph
, header
, &canOsr
);
5071 // If the loop isn't a loop, don't try to optimize it.
5072 if (numMarked
== 0) {
5076 // If there's an OSR block entering the loop in the middle, it's tricky,
5077 // so don't try to handle it, for now.
5079 UnmarkLoopBlocks(graph
, header
);
5083 // Move all blocks between header and backedge that aren't marked to
5084 // the end of the loop, making the loop itself contiguous.
5085 MakeLoopContiguous(graph
, header
, numMarked
);
5091 static MDefinition
* SkipUnbox(MDefinition
* ins
) {
5092 if (ins
->isUnbox()) {
5093 return ins
->toUnbox()->input();
5098 bool jit::OptimizeIteratorIndices(MIRGenerator
* mir
, MIRGraph
& graph
) {
5099 bool changed
= false;
5101 for (ReversePostorderIterator blockIter
= graph
.rpoBegin();
5102 blockIter
!= graph
.rpoEnd();) {
5103 MBasicBlock
* block
= *blockIter
++;
5104 for (MInstructionIterator
insIter(block
->begin());
5105 insIter
!= block
->end();) {
5106 MInstruction
* ins
= *insIter
;
5108 if (!graph
.alloc().ensureBallast()) {
5112 MDefinition
* receiver
= nullptr;
5113 MDefinition
* idVal
= nullptr;
5114 MDefinition
* setValue
= nullptr;
5115 if (ins
->isMegamorphicHasProp() &&
5116 ins
->toMegamorphicHasProp()->hasOwn()) {
5117 receiver
= ins
->toMegamorphicHasProp()->object();
5118 idVal
= ins
->toMegamorphicHasProp()->idVal();
5119 } else if (ins
->isHasOwnCache()) {
5120 receiver
= ins
->toHasOwnCache()->value();
5121 idVal
= ins
->toHasOwnCache()->idval();
5122 } else if (ins
->isMegamorphicLoadSlotByValue()) {
5123 receiver
= ins
->toMegamorphicLoadSlotByValue()->object();
5124 idVal
= ins
->toMegamorphicLoadSlotByValue()->idVal();
5125 } else if (ins
->isGetPropertyCache()) {
5126 receiver
= ins
->toGetPropertyCache()->value();
5127 idVal
= ins
->toGetPropertyCache()->idval();
5128 } else if (ins
->isMegamorphicSetElement()) {
5129 receiver
= ins
->toMegamorphicSetElement()->object();
5130 idVal
= ins
->toMegamorphicSetElement()->index();
5131 setValue
= ins
->toMegamorphicSetElement()->value();
5132 } else if (ins
->isSetPropertyCache()) {
5133 receiver
= ins
->toSetPropertyCache()->object();
5134 idVal
= ins
->toSetPropertyCache()->idval();
5135 setValue
= ins
->toSetPropertyCache()->value();
5142 // Given the following structure (that occurs inside for-in loops):
5144 // iter: ObjectToIterator <obj>
5145 // iterNext: IteratorMore <iter>
5146 // access: HasProp/GetElem <obj> <iterNext>
5147 // If the iterator object has an indices array, we can speed up the
5149 // 1. If the property access is a HasProp looking for own properties,
5150 // then the result will always be true if the iterator has indices,
5151 // because we only populate the indices array for objects with no
5152 // enumerable properties on the prototype.
5153 // 2. If the property access is a GetProp, then we can use the contents
5154 // of the indices array to find the correct property faster than
5155 // the megamorphic cache.
5156 if (!idVal
->isIteratorMore()) {
5159 auto* iterNext
= idVal
->toIteratorMore();
5161 if (!iterNext
->iterator()->isObjectToIterator()) {
5165 MObjectToIterator
* iter
= iterNext
->iterator()->toObjectToIterator();
5166 if (SkipUnbox(iter
->object()) != SkipUnbox(receiver
)) {
5170 MInstruction
* indicesCheck
=
5171 MIteratorHasIndices::New(graph
.alloc(), iter
->object(), iter
);
5172 MInstruction
* replacement
;
5173 if (ins
->isHasOwnCache() || ins
->isMegamorphicHasProp()) {
5174 MOZ_ASSERT(!setValue
);
5175 replacement
= MConstant::New(graph
.alloc(), BooleanValue(true));
5176 } else if (ins
->isMegamorphicLoadSlotByValue() ||
5177 ins
->isGetPropertyCache()) {
5178 MOZ_ASSERT(!setValue
);
5180 MLoadSlotByIteratorIndex::New(graph
.alloc(), receiver
, iter
);
5182 MOZ_ASSERT(ins
->isMegamorphicSetElement() || ins
->isSetPropertyCache());
5183 MOZ_ASSERT(setValue
);
5184 replacement
= MStoreSlotByIteratorIndex::New(graph
.alloc(), receiver
,
5188 if (!block
->wrapInstructionInFastpath(ins
, replacement
, indicesCheck
)) {
5192 iter
->setWantsIndices(true);
5195 // Advance to join block.
5196 blockIter
= graph
.rpoBegin(block
->getSuccessor(0)->getSuccessor(0));
5200 if (changed
&& !AccountForCFGChanges(mir
, graph
,
5201 /*updateAliasAnalysis=*/false)) {
5208 void jit::DumpMIRDefinition(GenericPrinter
& out
, MDefinition
* def
) {
5210 out
.printf("%u = %s.", def
->id(), StringFromMIRType(def
->type()));
5211 if (def
->isConstant()) {
5212 def
->printOpcode(out
);
5214 MDefinition::PrintOpcodeName(out
, def
->op());
5217 // Get any extra bits of text that the MIR node wants to show us. Both the
5218 // vector and the strings added to it belong to this function, so both will
5219 // be automatically freed at exit.
5220 ExtrasCollector extras
;
5221 def
->getExtras(&extras
);
5222 for (size_t i
= 0; i
< extras
.count(); i
++) {
5223 out
.printf(" %s", extras
.get(i
).get());
5226 for (size_t i
= 0; i
< def
->numOperands(); i
++) {
5227 out
.printf(" %u", def
->getOperand(i
)->id());
5232 void jit::DumpMIRExpressions(GenericPrinter
& out
, MIRGraph
& graph
,
5233 const CompileInfo
& info
, const char* phase
) {
5235 if (!JitSpewEnabled(JitSpew_MIRExpressions
)) {
5239 out
.printf("===== %s =====\n", phase
);
5241 for (ReversePostorderIterator
block(graph
.rpoBegin());
5242 block
!= graph
.rpoEnd(); block
++) {
5243 out
.printf(" Block%u:\n", block
->id());
5244 for (MPhiIterator
iter(block
->phisBegin()), end(block
->phisEnd());
5245 iter
!= end
; iter
++) {
5247 jit::DumpMIRDefinition(out
, *iter
);
5250 for (MInstructionIterator
iter(block
->begin()), end(block
->end());
5251 iter
!= end
; iter
++) {
5253 DumpMIRDefinition(out
, *iter
);
5258 if (info
.compilingWasm()) {
5259 out
.printf("===== end wasm MIR dump =====\n");
5261 out
.printf("===== %s:%u =====\n", info
.filename(), info
.lineno());