Bug 1867190 - Add prefs for PHC probablities r=glandium
[gecko.git] / js / src / jit / IonAnalysis.cpp
blobf6c0e93ff074cee4b19de2b1538658f060e8b0c2
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/IonAnalysis.h"
9 #include <algorithm>
10 #include <utility> // for ::std::pair
12 #include "jit/AliasAnalysis.h"
13 #include "jit/CompileInfo.h"
14 #include "jit/MIRGenerator.h"
15 #include "jit/MIRGraph.h"
16 #include "util/CheckedArithmetic.h"
18 #include "vm/BytecodeUtil-inl.h"
20 using namespace js;
21 using namespace js::jit;
23 using mozilla::DebugOnly;
25 // Stack used by FlagPhiInputsAsImplicitlyUsed. It stores the Phi instruction
26 // pointer and the MUseIterator which should be visited next.
27 using MPhiUseIteratorStack =
28 Vector<std::pair<MPhi*, MUseIterator>, 16, SystemAllocPolicy>;
30 // Look for Phi uses with a depth-first search. If any uses are found the stack
31 // of MPhi instructions is returned in the |worklist| argument.
32 static bool DepthFirstSearchUse(MIRGenerator* mir,
33 MPhiUseIteratorStack& worklist, MPhi* phi) {
34 // Push a Phi and the next use to iterate over in the worklist.
35 auto push = [&worklist](MPhi* phi, MUseIterator use) -> bool {
36 phi->setInWorklist();
37 return worklist.append(std::make_pair(phi, use));
40 #ifdef DEBUG
41 // Used to assert that when we have no uses, we at least visited all the
42 // transitive uses.
43 size_t refUseCount = phi->useCount();
44 size_t useCount = 0;
45 #endif
46 MOZ_ASSERT(worklist.empty());
47 if (!push(phi, phi->usesBegin())) {
48 return false;
51 while (!worklist.empty()) {
52 // Resume iterating over the last phi-use pair added by the next loop.
53 auto pair = worklist.popCopy();
54 MPhi* producer = pair.first;
55 MUseIterator use = pair.second;
56 MUseIterator end(producer->usesEnd());
57 producer->setNotInWorklist();
59 // Keep going down the tree of uses, skipping (continue)
60 // non-observable/unused cases and Phi which are already listed in the
61 // worklist. Stop (return) as soon as one use is found.
62 while (use != end) {
63 MNode* consumer = (*use)->consumer();
64 MUseIterator it = use;
65 use++;
66 #ifdef DEBUG
67 useCount++;
68 #endif
69 if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed inner loop")) {
70 return false;
73 if (consumer->isResumePoint()) {
74 MResumePoint* rp = consumer->toResumePoint();
75 // Observable operands are similar to potential uses.
76 if (rp->isObservableOperand(*it)) {
77 return push(producer, use);
79 continue;
82 MDefinition* cdef = consumer->toDefinition();
83 if (!cdef->isPhi()) {
84 // The producer is explicitly used by a definition.
85 return push(producer, use);
88 MPhi* cphi = cdef->toPhi();
89 if (cphi->getUsageAnalysis() == PhiUsage::Used ||
90 cphi->isImplicitlyUsed()) {
91 // The information got cached on the Phi the last time it
92 // got visited, or when flagging operands of implicitly used
93 // instructions.
94 return push(producer, use);
97 if (cphi->isInWorklist() || cphi == producer) {
98 // We are already iterating over the uses of this Phi instruction which
99 // are part of a loop, instead of trying to handle loops, conservatively
100 // mark them as used.
101 return push(producer, use);
104 if (cphi->getUsageAnalysis() == PhiUsage::Unused) {
105 // The instruction already got visited and is known to have
106 // no uses. Skip it.
107 continue;
110 // We found another Phi instruction, move the use iterator to
111 // the next use push it to the worklist stack. Then, continue
112 // with a depth search.
113 if (!push(producer, use)) {
114 return false;
116 producer = cphi;
117 use = producer->usesBegin();
118 end = producer->usesEnd();
119 #ifdef DEBUG
120 refUseCount += producer->useCount();
121 #endif
124 // When unused, we cannot bubble up this information without iterating
125 // over the rest of the previous Phi instruction consumers.
126 MOZ_ASSERT(use == end);
127 producer->setUsageAnalysis(PhiUsage::Unused);
130 MOZ_ASSERT(useCount == refUseCount);
131 return true;
134 static bool FlagPhiInputsAsImplicitlyUsed(MIRGenerator* mir, MBasicBlock* block,
135 MBasicBlock* succ,
136 MPhiUseIteratorStack& worklist) {
137 // When removing an edge between 2 blocks, we might remove the ability of
138 // later phases to figure out that the uses of a Phi should be considered as
139 // a use of all its inputs. Thus we need to mark the Phi inputs as being
140 // implicitly used iff the phi has any uses.
143 // +--------------------+ +---------------------+
144 // |12 MFoo 6 | |32 MBar 5 |
145 // | | | |
146 // | ... | | ... |
147 // | | | |
148 // |25 MGoto Block 4 | |43 MGoto Block 4 |
149 // +--------------------+ +---------------------+
150 // | |
151 // | | |
152 // | | |
153 // | +-----X------------------------+
154 // | Edge |
155 // | Removed |
156 // | |
157 // | +------------v-----------+
158 // | |50 MPhi 12 32 |
159 // | | |
160 // | | ... |
161 // | | |
162 // | |70 MReturn 50 |
163 // | +------------------------+
164 // |
165 // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
166 // |
167 // v
169 // ^ +--------------------+ +---------------------+
170 // /!\ |12 MConst opt-out | |32 MBar 5 |
171 // '---' | | | |
172 // | ... | | ... |
173 // |78 MBail | | |
174 // |80 MUnreachable | |43 MGoto Block 4 |
175 // +--------------------+ +---------------------+
176 // |
177 // |
178 // |
179 // +---------------+
180 // |
181 // |
182 // |
183 // +------------v-----------+
184 // |50 MPhi 32 |
185 // | |
186 // | ... |
187 // | |
188 // |70 MReturn 50 |
189 // +------------------------+
192 // If the inputs of the Phi are not flagged as implicitly used, then
193 // later compilation phase might optimize them out. The problem is that a
194 // bailout will use this value and give it back to baseline, which will then
195 // use the OptimizedOut magic value in a computation.
197 // Unfortunately, we cannot be too conservative about flagging Phi inputs as
198 // having implicit uses, as this would prevent many optimizations from being
199 // used. Thus, the following code is in charge of flagging Phi instructions
200 // as Unused or Used, and setting ImplicitlyUsed accordingly.
201 size_t predIndex = succ->getPredecessorIndex(block);
202 MPhiIterator end = succ->phisEnd();
203 MPhiIterator it = succ->phisBegin();
204 for (; it != end; it++) {
205 MPhi* phi = *it;
207 if (mir->shouldCancel("FlagPhiInputsAsImplicitlyUsed outer loop")) {
208 return false;
211 // We are looking to mark the Phi inputs which are used across the edge
212 // between the |block| and its successor |succ|.
213 MDefinition* def = phi->getOperand(predIndex);
214 if (def->isImplicitlyUsed()) {
215 continue;
218 // If the Phi is either Used or Unused, set the ImplicitlyUsed flag
219 // accordingly.
220 if (phi->getUsageAnalysis() == PhiUsage::Used || phi->isImplicitlyUsed()) {
221 def->setImplicitlyUsedUnchecked();
222 continue;
223 } else if (phi->getUsageAnalysis() == PhiUsage::Unused) {
224 continue;
227 // We do not know if the Phi was Used or Unused, iterate over all uses
228 // with a depth-search of uses. Returns the matching stack in the
229 // worklist as soon as one use is found.
230 MOZ_ASSERT(worklist.empty());
231 if (!DepthFirstSearchUse(mir, worklist, phi)) {
232 return false;
235 MOZ_ASSERT_IF(worklist.empty(),
236 phi->getUsageAnalysis() == PhiUsage::Unused);
237 if (!worklist.empty()) {
238 // One of the Phis is used, set Used flags on all the Phis which are
239 // in the use chain.
240 def->setImplicitlyUsedUnchecked();
241 do {
242 auto pair = worklist.popCopy();
243 MPhi* producer = pair.first;
244 producer->setUsageAnalysis(PhiUsage::Used);
245 producer->setNotInWorklist();
246 } while (!worklist.empty());
248 MOZ_ASSERT(phi->getUsageAnalysis() != PhiUsage::Unknown);
251 return true;
254 static MInstructionIterator FindFirstInstructionAfterBail(MBasicBlock* block) {
255 MOZ_ASSERT(block->alwaysBails());
256 for (MInstructionIterator it = block->begin(); it != block->end(); it++) {
257 MInstruction* ins = *it;
258 if (ins->isBail()) {
259 it++;
260 return it;
263 MOZ_CRASH("Expected MBail in alwaysBails block");
266 // Given an iterator pointing to the first removed instruction, mark
267 // the operands of each removed instruction as having implicit uses.
268 static bool FlagOperandsAsImplicitlyUsedAfter(
269 MIRGenerator* mir, MBasicBlock* block, MInstructionIterator firstRemoved) {
270 MOZ_ASSERT(firstRemoved->block() == block);
272 const CompileInfo& info = block->info();
274 // Flag operands of removed instructions as having implicit uses.
275 MInstructionIterator end = block->end();
276 for (MInstructionIterator it = firstRemoved; it != end; it++) {
277 if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 1)")) {
278 return false;
281 MInstruction* ins = *it;
282 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
283 ins->getOperand(i)->setImplicitlyUsedUnchecked();
286 // Flag observable resume point operands as having implicit uses.
287 if (MResumePoint* rp = ins->resumePoint()) {
288 // Note: no need to iterate over the caller's of the resume point as
289 // this is the same as the entry resume point.
290 MOZ_ASSERT(&rp->block()->info() == &info);
291 for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
292 if (info.isObservableSlot(i)) {
293 rp->getOperand(i)->setImplicitlyUsedUnchecked();
299 // Flag Phi inputs of the successors as having implicit uses.
300 MPhiUseIteratorStack worklist;
301 for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
302 if (mir->shouldCancel("FlagOperandsAsImplicitlyUsedAfter (loop 2)")) {
303 return false;
306 if (!FlagPhiInputsAsImplicitlyUsed(mir, block, block->getSuccessor(i),
307 worklist)) {
308 return false;
312 return true;
315 static bool FlagEntryResumePointOperands(MIRGenerator* mir,
316 MBasicBlock* block) {
317 // Flag observable operands of the entry resume point as having implicit uses.
318 MResumePoint* rp = block->entryResumePoint();
319 while (rp) {
320 if (mir->shouldCancel("FlagEntryResumePointOperands")) {
321 return false;
324 const CompileInfo& info = rp->block()->info();
325 for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
326 if (info.isObservableSlot(i)) {
327 rp->getOperand(i)->setImplicitlyUsedUnchecked();
331 rp = rp->caller();
334 return true;
337 static bool FlagAllOperandsAsImplicitlyUsed(MIRGenerator* mir,
338 MBasicBlock* block) {
339 return FlagEntryResumePointOperands(mir, block) &&
340 FlagOperandsAsImplicitlyUsedAfter(mir, block, block->begin());
343 // WarpBuilder sets the alwaysBails flag on blocks that contain an
344 // unconditional bailout. We trim any instructions in those blocks
345 // after the first unconditional bailout, and remove any blocks that
346 // are only reachable through bailing blocks.
347 bool jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph) {
348 JitSpew(JitSpew_Prune, "Begin");
350 // Pruning is guided by unconditional bailouts. Wasm does not have bailouts.
351 MOZ_ASSERT(!mir->compilingWasm());
353 Vector<MBasicBlock*, 16, SystemAllocPolicy> worklist;
354 uint32_t numMarked = 0;
355 bool needsTrim = false;
357 auto markReachable = [&](MBasicBlock* block) -> bool {
358 block->mark();
359 numMarked++;
360 if (block->alwaysBails()) {
361 needsTrim = true;
363 return worklist.append(block);
366 // The entry block is always reachable.
367 if (!markReachable(graph.entryBlock())) {
368 return false;
371 // The OSR entry block is always reachable if it exists.
372 if (graph.osrBlock() && !markReachable(graph.osrBlock())) {
373 return false;
376 // Iteratively mark all reachable blocks.
377 while (!worklist.empty()) {
378 if (mir->shouldCancel("Prune unused branches (marking reachable)")) {
379 return false;
381 MBasicBlock* block = worklist.popCopy();
383 JitSpew(JitSpew_Prune, "Visit block %u:", block->id());
384 JitSpewIndent indent(JitSpew_Prune);
386 // If this block always bails, then it does not reach its successors.
387 if (block->alwaysBails()) {
388 continue;
391 for (size_t i = 0; i < block->numSuccessors(); i++) {
392 MBasicBlock* succ = block->getSuccessor(i);
393 if (succ->isMarked()) {
394 continue;
396 JitSpew(JitSpew_Prune, "Reaches block %u", succ->id());
397 if (!markReachable(succ)) {
398 return false;
403 if (!needsTrim && numMarked == graph.numBlocks()) {
404 // There is nothing to prune.
405 graph.unmarkBlocks();
406 return true;
409 JitSpew(JitSpew_Prune, "Remove unreachable instructions and blocks:");
410 JitSpewIndent indent(JitSpew_Prune);
412 // The operands of removed instructions may be needed in baseline
413 // after bailing out.
414 for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
415 if (mir->shouldCancel("Prune unused branches (marking operands)")) {
416 return false;
419 MBasicBlock* block = *it++;
420 if (!block->isMarked()) {
421 // If we are removing the block entirely, mark the operands of every
422 // instruction as being implicitly used.
423 FlagAllOperandsAsImplicitlyUsed(mir, block);
424 } else if (block->alwaysBails()) {
425 // If we are only trimming instructions after a bail, only mark operands
426 // of removed instructions.
427 MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
428 FlagOperandsAsImplicitlyUsedAfter(mir, block, firstRemoved);
432 // Remove the blocks in post-order such that consumers are visited before
433 // the predecessors, the only exception being the Phi nodes of loop headers.
434 for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
435 if (mir->shouldCancel("Prune unused branches (removal loop)")) {
436 return false;
438 if (!graph.alloc().ensureBallast()) {
439 return false;
442 MBasicBlock* block = *it++;
443 if (block->isMarked() && !block->alwaysBails()) {
444 continue;
447 // As we are going to replace/remove the last instruction, we first have
448 // to remove this block from the predecessor list of its successors.
449 size_t numSucc = block->numSuccessors();
450 for (uint32_t i = 0; i < numSucc; i++) {
451 MBasicBlock* succ = block->getSuccessor(i);
452 if (succ->isDead()) {
453 continue;
456 // Our dominators code expects all loop headers to have two predecessors.
457 // If we are removing the normal entry to a loop, but can still reach
458 // the loop header via OSR, we create a fake unreachable predecessor.
459 if (succ->isLoopHeader() && block != succ->backedge()) {
460 MOZ_ASSERT(graph.osrBlock());
461 if (!graph.alloc().ensureBallast()) {
462 return false;
465 MBasicBlock* fake = MBasicBlock::NewFakeLoopPredecessor(graph, succ);
466 if (!fake) {
467 return false;
469 // Mark the block to avoid removing it as unreachable.
470 fake->mark();
472 JitSpew(JitSpew_Prune,
473 "Header %u only reachable by OSR. Add fake predecessor %u",
474 succ->id(), fake->id());
477 JitSpew(JitSpew_Prune, "Remove block edge %u -> %u.", block->id(),
478 succ->id());
479 succ->removePredecessor(block);
482 if (!block->isMarked()) {
483 // Remove unreachable blocks from the CFG.
484 JitSpew(JitSpew_Prune, "Remove block %u.", block->id());
485 graph.removeBlock(block);
486 } else {
487 // Remove unreachable instructions after unconditional bailouts.
488 JitSpew(JitSpew_Prune, "Trim block %u.", block->id());
490 // Discard all instructions after the first MBail.
491 MInstructionIterator firstRemoved = FindFirstInstructionAfterBail(block);
492 block->discardAllInstructionsStartingAt(firstRemoved);
494 if (block->outerResumePoint()) {
495 block->clearOuterResumePoint();
498 block->end(MUnreachable::New(graph.alloc()));
501 graph.unmarkBlocks();
503 return true;
506 static bool SplitCriticalEdgesForBlock(MIRGraph& graph, MBasicBlock* block) {
507 if (block->numSuccessors() < 2) {
508 return true;
510 for (size_t i = 0; i < block->numSuccessors(); i++) {
511 MBasicBlock* target = block->getSuccessor(i);
512 if (target->numPredecessors() < 2) {
513 continue;
516 // Create a simple new block which contains a goto and which split the
517 // edge between block and target.
518 MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block, i, target);
519 if (!split) {
520 return false;
523 return true;
526 // A critical edge is an edge which is neither its successor's only predecessor
527 // nor its predecessor's only successor. Critical edges must be split to
528 // prevent copy-insertion and code motion from affecting other edges.
529 bool jit::SplitCriticalEdges(MIRGraph& graph) {
530 for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
531 MBasicBlock* block = *iter;
532 if (!SplitCriticalEdgesForBlock(graph, block)) {
533 return false;
536 return true;
539 bool jit::IsUint32Type(const MDefinition* def) {
540 if (def->isBeta()) {
541 def = def->getOperand(0);
544 if (def->type() != MIRType::Int32) {
545 return false;
548 return def->isUrsh() && def->getOperand(1)->isConstant() &&
549 def->getOperand(1)->toConstant()->type() == MIRType::Int32 &&
550 def->getOperand(1)->toConstant()->toInt32() == 0;
553 // Determine whether phiBlock/testBlock simply compute a phi and perform a
554 // test on it.
555 static bool BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock,
556 MPhi** pphi, MTest** ptest) {
557 *pphi = nullptr;
558 *ptest = nullptr;
560 if (phiBlock != testBlock) {
561 MOZ_ASSERT(phiBlock->numSuccessors() == 1 &&
562 phiBlock->getSuccessor(0) == testBlock);
563 if (!phiBlock->begin()->isGoto()) {
564 return false;
568 auto iter = testBlock->rbegin();
569 if (!iter->isTest()) {
570 return false;
572 MTest* test = iter->toTest();
574 // Unwrap boolean conversion performed through the '!!' idiom.
575 MInstruction* testOrNot = test;
576 bool hasOddNumberOfNots = false;
577 while (++iter != testBlock->rend()) {
578 if (iter->isNot()) {
579 // The MNot must only be used by |testOrNot|.
580 auto* notIns = iter->toNot();
581 if (testOrNot->getOperand(0) != notIns) {
582 return false;
584 if (!notIns->hasOneUse()) {
585 return false;
588 testOrNot = notIns;
589 hasOddNumberOfNots = !hasOddNumberOfNots;
590 } else {
591 // Fail if there are any other instructions than MNot.
592 return false;
596 // There's an odd number of MNot, so this can't be the '!!' idiom.
597 if (hasOddNumberOfNots) {
598 return false;
601 MOZ_ASSERT(testOrNot->isTest() || testOrNot->isNot());
603 MDefinition* testInput = testOrNot->getOperand(0);
604 if (!testInput->isPhi()) {
605 return false;
607 MPhi* phi = testInput->toPhi();
608 if (phi->block() != phiBlock) {
609 return false;
612 for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
613 MUse* use = *iter;
614 if (use->consumer() == testOrNot) {
615 continue;
617 if (use->consumer()->isResumePoint()) {
618 MBasicBlock* useBlock = use->consumer()->block();
619 if (useBlock == phiBlock || useBlock == testBlock) {
620 continue;
623 return false;
626 for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd();
627 ++iter) {
628 if (*iter != phi) {
629 return false;
633 if (phiBlock != testBlock && !testBlock->phisEmpty()) {
634 return false;
637 *pphi = phi;
638 *ptest = test;
640 return true;
643 // Determine if value is directly or indirectly the test input.
644 static bool IsTestInputMaybeToBool(MTest* test, MDefinition* value) {
645 auto* input = test->input();
646 bool hasEvenNumberOfNots = true;
647 while (true) {
648 // Only accept if there's an even number of MNot.
649 if (input == value && hasEvenNumberOfNots) {
650 return true;
653 // Unwrap boolean conversion performed through the '!!' idiom.
654 if (input->isNot()) {
655 input = input->toNot()->input();
656 hasEvenNumberOfNots = !hasEvenNumberOfNots;
657 continue;
660 return false;
664 // Change block so that it ends in a goto to the specific target block.
665 // existingPred is an existing predecessor of the block.
666 [[nodiscard]] static bool UpdateGotoSuccessor(TempAllocator& alloc,
667 MBasicBlock* block,
668 MBasicBlock* target,
669 MBasicBlock* existingPred) {
670 MInstruction* ins = block->lastIns();
671 MOZ_ASSERT(ins->isGoto());
672 ins->toGoto()->target()->removePredecessor(block);
673 block->discardLastIns();
675 MGoto* newGoto = MGoto::New(alloc, target);
676 block->end(newGoto);
678 return target->addPredecessorSameInputsAs(block, existingPred);
681 // Change block so that it ends in a test of the specified value, going to
682 // either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
683 // or ifFalse with the same values incoming to ifTrue/ifFalse as block.
684 // existingPred is not required to be a predecessor of ifTrue/ifFalse if block
685 // already ends in a test going to that block on a true/false result.
686 [[nodiscard]] static bool UpdateTestSuccessors(
687 TempAllocator& alloc, MBasicBlock* block, MDefinition* value,
688 MBasicBlock* ifTrue, MBasicBlock* ifFalse, MBasicBlock* existingPred) {
689 MInstruction* ins = block->lastIns();
690 if (ins->isTest()) {
691 MTest* test = ins->toTest();
692 MOZ_ASSERT(test->input() == value);
694 if (ifTrue != test->ifTrue()) {
695 test->ifTrue()->removePredecessor(block);
696 if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
697 return false;
699 MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
700 test->replaceSuccessor(0, ifTrue);
703 if (ifFalse != test->ifFalse()) {
704 test->ifFalse()->removePredecessor(block);
705 if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
706 return false;
708 MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
709 test->replaceSuccessor(1, ifFalse);
712 return true;
715 MOZ_ASSERT(ins->isGoto());
716 ins->toGoto()->target()->removePredecessor(block);
717 block->discardLastIns();
719 MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
720 block->end(test);
722 if (!ifTrue->addPredecessorSameInputsAs(block, existingPred)) {
723 return false;
725 if (!ifFalse->addPredecessorSameInputsAs(block, existingPred)) {
726 return false;
728 return true;
732 * Look for a diamond pattern:
734 * initialBlock
735 * / \
736 * trueBranch falseBranch
737 * \ /
738 * phiBlock
740 * testBlock
742 static bool IsDiamondPattern(MBasicBlock* initialBlock) {
743 MInstruction* ins = initialBlock->lastIns();
744 if (!ins->isTest()) {
745 return false;
747 MTest* initialTest = ins->toTest();
749 MBasicBlock* trueBranch = initialTest->ifTrue();
750 if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1) {
751 return false;
754 MBasicBlock* falseBranch = initialTest->ifFalse();
755 if (falseBranch->numPredecessors() != 1 ||
756 falseBranch->numSuccessors() != 1) {
757 return false;
760 MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
761 if (phiBlock != falseBranch->getSuccessor(0)) {
762 return false;
764 if (phiBlock->numPredecessors() != 2) {
765 return false;
767 return true;
770 static bool MaybeFoldDiamondConditionBlock(MIRGraph& graph,
771 MBasicBlock* initialBlock) {
772 MOZ_ASSERT(IsDiamondPattern(initialBlock));
774 // Optimize the MIR graph to improve the code generated for conditional
775 // operations. A test like 'if (a ? b : c)' normally requires four blocks,
776 // with a phi for the intermediate value. This can be improved to use three
777 // blocks with no phi value.
780 * Look for a diamond pattern:
782 * initialBlock
783 * / \
784 * trueBranch falseBranch
785 * \ /
786 * phiBlock
788 * testBlock
790 * Where phiBlock contains a single phi combining values pushed onto the
791 * stack by trueBranch and falseBranch, and testBlock contains a test on
792 * that phi. phiBlock and testBlock may be the same block; generated code
793 * will use different blocks if the (?:) op is in an inlined function.
796 MTest* initialTest = initialBlock->lastIns()->toTest();
798 MBasicBlock* trueBranch = initialTest->ifTrue();
799 MBasicBlock* falseBranch = initialTest->ifFalse();
800 if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
801 falseBranch->isLoopBackedge()) {
802 return true;
805 MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
806 MBasicBlock* testBlock = phiBlock;
807 if (testBlock->numSuccessors() == 1) {
808 if (testBlock->isLoopBackedge()) {
809 return true;
811 testBlock = testBlock->getSuccessor(0);
812 if (testBlock->numPredecessors() != 1) {
813 return true;
817 MPhi* phi;
818 MTest* finalTest;
819 if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
820 return true;
823 MOZ_ASSERT(phi->numOperands() == 2);
825 // Make sure the test block does not have any outgoing loop backedges.
826 if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
827 return false;
830 MDefinition* trueResult =
831 phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
832 MDefinition* falseResult =
833 phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
835 // OK, we found the desired pattern, now transform the graph.
837 // Remove the phi from phiBlock.
838 phiBlock->discardPhi(*phiBlock->phisBegin());
840 // Change the end of the block to a test that jumps directly to successors of
841 // testBlock, rather than to testBlock itself.
843 if (IsTestInputMaybeToBool(initialTest, trueResult)) {
844 if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(),
845 testBlock)) {
846 return false;
848 } else {
849 if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
850 finalTest->ifTrue(), finalTest->ifFalse(),
851 testBlock)) {
852 return false;
856 if (IsTestInputMaybeToBool(initialTest, falseResult)) {
857 if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(),
858 testBlock)) {
859 return false;
861 } else {
862 if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
863 finalTest->ifTrue(), finalTest->ifFalse(),
864 testBlock)) {
865 return false;
869 // Remove phiBlock, if different from testBlock.
870 if (phiBlock != testBlock) {
871 testBlock->removePredecessor(phiBlock);
872 graph.removeBlock(phiBlock);
875 // Remove testBlock itself.
876 finalTest->ifTrue()->removePredecessor(testBlock);
877 finalTest->ifFalse()->removePredecessor(testBlock);
878 graph.removeBlock(testBlock);
880 return true;
884 * Look for a triangle pattern:
886 * initialBlock
887 * / \
888 * trueBranch |
889 * \ /
890 * phiBlock+falseBranch
892 * testBlock
894 * Or:
896 * initialBlock
897 * / \
898 * | falseBranch
899 * \ /
900 * phiBlock+trueBranch
902 * testBlock
904 static bool IsTrianglePattern(MBasicBlock* initialBlock) {
905 MInstruction* ins = initialBlock->lastIns();
906 if (!ins->isTest()) {
907 return false;
909 MTest* initialTest = ins->toTest();
911 MBasicBlock* trueBranch = initialTest->ifTrue();
912 MBasicBlock* falseBranch = initialTest->ifFalse();
914 if (trueBranch->numSuccessors() == 1 &&
915 trueBranch->getSuccessor(0) == falseBranch) {
916 if (trueBranch->numPredecessors() != 1) {
917 return false;
919 if (falseBranch->numPredecessors() != 2) {
920 return false;
922 return true;
925 if (falseBranch->numSuccessors() == 1 &&
926 falseBranch->getSuccessor(0) == trueBranch) {
927 if (trueBranch->numPredecessors() != 2) {
928 return false;
930 if (falseBranch->numPredecessors() != 1) {
931 return false;
933 return true;
936 return false;
939 static bool MaybeFoldTriangleConditionBlock(MIRGraph& graph,
940 MBasicBlock* initialBlock) {
941 MOZ_ASSERT(IsTrianglePattern(initialBlock));
943 // Optimize the MIR graph to improve the code generated for boolean
944 // operations. A test like 'if (a && b)' normally requires three blocks, with
945 // a phi for the intermediate value. This can be improved to use no phi value.
948 * Look for a triangle pattern:
950 * initialBlock
951 * / \
952 * trueBranch |
953 * \ /
954 * phiBlock+falseBranch
956 * testBlock
958 * Or:
960 * initialBlock
961 * / \
962 * | falseBranch
963 * \ /
964 * phiBlock+trueBranch
966 * testBlock
968 * Where phiBlock contains a single phi combining values pushed onto the stack
969 * by trueBranch and falseBranch, and testBlock contains a test on that phi.
970 * phiBlock and testBlock may be the same block; generated code will use
971 * different blocks if the (&&) op is in an inlined function.
974 MTest* initialTest = initialBlock->lastIns()->toTest();
976 MBasicBlock* trueBranch = initialTest->ifTrue();
977 MBasicBlock* falseBranch = initialTest->ifFalse();
978 if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() ||
979 falseBranch->isLoopBackedge()) {
980 return true;
983 MBasicBlock* phiBlock;
984 if (trueBranch->numSuccessors() == 1 &&
985 trueBranch->getSuccessor(0) == falseBranch) {
986 phiBlock = falseBranch;
987 } else {
988 MOZ_ASSERT(falseBranch->getSuccessor(0) == trueBranch);
989 phiBlock = trueBranch;
992 MBasicBlock* testBlock = phiBlock;
993 if (testBlock->numSuccessors() == 1) {
994 MOZ_ASSERT(!testBlock->isLoopBackedge());
996 testBlock = testBlock->getSuccessor(0);
997 if (testBlock->numPredecessors() != 1) {
998 return true;
1002 MPhi* phi;
1003 MTest* finalTest;
1004 if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
1005 return true;
1008 MOZ_ASSERT(phi->numOperands() == 2);
1010 // If the phi-operand doesn't match the initial input, we can't fold the test.
1011 auto* phiInputForInitialBlock =
1012 phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
1013 if (!IsTestInputMaybeToBool(initialTest, phiInputForInitialBlock)) {
1014 return true;
1017 // Make sure the test block does not have any outgoing loop backedges.
1018 if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
1019 return false;
1022 MDefinition* trueResult;
1023 MDefinition* falseResult;
1024 if (phiBlock == trueBranch) {
1025 trueResult = phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
1026 falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
1027 } else {
1028 trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
1029 falseResult = phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
1032 // OK, we found the desired pattern, now transform the graph.
1034 // Remove the phi from phiBlock.
1035 phiBlock->discardPhi(*phiBlock->phisBegin());
1037 // Change the end of the block to a test that jumps directly to successors of
1038 // testBlock, rather than to testBlock itself.
1040 if (phiBlock == trueBranch) {
1041 if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
1042 finalTest->ifTrue(), initialTest->ifFalse(),
1043 testBlock)) {
1044 return false;
1046 } else if (IsTestInputMaybeToBool(initialTest, trueResult)) {
1047 if (!UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(),
1048 testBlock)) {
1049 return false;
1051 } else {
1052 if (!UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
1053 finalTest->ifTrue(), finalTest->ifFalse(),
1054 testBlock)) {
1055 return false;
1059 if (phiBlock == falseBranch) {
1060 if (!UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
1061 initialTest->ifTrue(), finalTest->ifFalse(),
1062 testBlock)) {
1063 return false;
1065 } else if (IsTestInputMaybeToBool(initialTest, falseResult)) {
1066 if (!UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(),
1067 testBlock)) {
1068 return false;
1070 } else {
1071 if (!UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
1072 finalTest->ifTrue(), finalTest->ifFalse(),
1073 testBlock)) {
1074 return false;
1078 // Remove phiBlock, if different from testBlock.
1079 if (phiBlock != testBlock) {
1080 testBlock->removePredecessor(phiBlock);
1081 graph.removeBlock(phiBlock);
1084 // Remove testBlock itself.
1085 finalTest->ifTrue()->removePredecessor(testBlock);
1086 finalTest->ifFalse()->removePredecessor(testBlock);
1087 graph.removeBlock(testBlock);
1089 return true;
1092 static bool MaybeFoldConditionBlock(MIRGraph& graph,
1093 MBasicBlock* initialBlock) {
1094 if (IsDiamondPattern(initialBlock)) {
1095 return MaybeFoldDiamondConditionBlock(graph, initialBlock);
1097 if (IsTrianglePattern(initialBlock)) {
1098 return MaybeFoldTriangleConditionBlock(graph, initialBlock);
1100 return true;
1103 static bool MaybeFoldTestBlock(MIRGraph& graph, MBasicBlock* initialBlock) {
1104 // Handle test expressions on more than two inputs. For example
1105 // |if ((x > 10) && (y > 20) && (z > 30)) { ... }|, which results in the below
1106 // pattern.
1108 // Look for the pattern:
1109 // ┌─────────────────┐
1110 // 1 │ 1 compare │
1111 // ┌─────┤ 2 test compare1 │
1112 // │ └──────┬──────────┘
1113 // │ │0
1114 // ┌───────▼─────────┐ │
1115 // │ 3 compare │ │
1116 // │ 4 test compare3 │ └──────────┐
1117 // └──┬──────────┬───┘ │
1118 // 1│ │0 │
1119 // ┌──────────▼──────┐ │ │
1120 // │ 5 compare │ └─────────┐ │
1121 // │ 6 goto │ │ │
1122 // └───────┬─────────┘ │ │
1123 // │ │ │
1124 // │ ┌──────────────────▼───────▼───────┐
1125 // └───►│ 9 phi compare1 compare3 compare5 │
1126 // │10 goto │
1127 // └────────────────┬─────────────────┘
1128 // │
1129 // ┌────────▼────────┐
1130 // │11 test phi9 │
1131 // └─────┬─────┬─────┘
1132 // 1│ │0
1133 // ┌────────────┐ │ │ ┌─────────────┐
1134 // │ TrueBranch │◄────┘ └─────►│ FalseBranch │
1135 // └────────────┘ └─────────────┘
1137 // And transform it to:
1139 // ┌─────────────────┐
1140 // 1 │ 1 compare │
1141 // ┌───┤ 2 test compare1 │
1142 // │ └──────────┬──────┘
1143 // │ │0
1144 // ┌───────▼─────────┐ │
1145 // │ 3 compare │ │
1146 // │ 4 test compare3 │ │
1147 // └──┬─────────┬────┘ │
1148 // 1│ │0 │
1149 // ┌──────────▼──────┐ │ │
1150 // │ 5 compare │ └──────┐ │
1151 // │ 6 test compare5 │ │ │
1152 // └────┬────────┬───┘ │ │
1153 // 1│ │0 │ │
1154 // ┌─────▼──────┐ │ ┌───▼──▼──────┐
1155 // │ TrueBranch │ └─────────► FalseBranch │
1156 // └────────────┘ └─────────────┘
1158 auto* ins = initialBlock->lastIns();
1159 if (!ins->isTest()) {
1160 return true;
1162 auto* initialTest = ins->toTest();
1164 MBasicBlock* trueBranch = initialTest->ifTrue();
1165 MBasicBlock* falseBranch = initialTest->ifFalse();
1167 // MaybeFoldConditionBlock handles the case for two operands.
1168 MBasicBlock* phiBlock;
1169 if (trueBranch->numPredecessors() > 2) {
1170 phiBlock = trueBranch;
1171 } else if (falseBranch->numPredecessors() > 2) {
1172 phiBlock = falseBranch;
1173 } else {
1174 return true;
1177 MBasicBlock* testBlock = phiBlock;
1178 if (testBlock->numSuccessors() == 1) {
1179 if (testBlock->isLoopBackedge()) {
1180 return true;
1182 testBlock = testBlock->getSuccessor(0);
1183 if (testBlock->numPredecessors() != 1) {
1184 return true;
1188 MOZ_ASSERT(!phiBlock->isLoopBackedge());
1190 MPhi* phi = nullptr;
1191 MTest* finalTest = nullptr;
1192 if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest)) {
1193 return true;
1196 MOZ_ASSERT(phiBlock->numPredecessors() == phi->numOperands());
1198 // If the phi-operand doesn't match the initial input, we can't fold the test.
1199 auto* phiInputForInitialBlock =
1200 phi->getOperand(phiBlock->indexForPredecessor(initialBlock));
1201 if (!IsTestInputMaybeToBool(initialTest, phiInputForInitialBlock)) {
1202 return true;
1205 MBasicBlock* newTestBlock = nullptr;
1206 MDefinition* newTestInput = nullptr;
1208 // The block of each phi operand must either end with a test instruction on
1209 // that phi operand or it's the sole block which ends with a goto instruction.
1210 for (size_t i = 0; i < phiBlock->numPredecessors(); i++) {
1211 auto* pred = phiBlock->getPredecessor(i);
1212 auto* operand = phi->getOperand(i);
1214 // Each predecessor must end with either a test or goto instruction.
1215 auto* lastIns = pred->lastIns();
1216 if (lastIns->isGoto() && !newTestBlock) {
1217 newTestBlock = pred;
1218 newTestInput = operand;
1219 } else if (lastIns->isTest()) {
1220 if (!IsTestInputMaybeToBool(lastIns->toTest(), operand)) {
1221 return true;
1223 } else {
1224 return true;
1227 MOZ_ASSERT(!pred->isLoopBackedge());
1230 // Ensure we found the single goto block.
1231 if (!newTestBlock) {
1232 return true;
1235 // Make sure the test block does not have any outgoing loop backedges.
1236 if (!SplitCriticalEdgesForBlock(graph, testBlock)) {
1237 return false;
1240 // OK, we found the desired pattern, now transform the graph.
1242 // Remove the phi from phiBlock.
1243 phiBlock->discardPhi(*phiBlock->phisBegin());
1245 // Create the new test instruction.
1246 if (!UpdateTestSuccessors(graph.alloc(), newTestBlock, newTestInput,
1247 finalTest->ifTrue(), finalTest->ifFalse(),
1248 testBlock)) {
1249 return false;
1252 // Update all test instructions to point to the final target.
1253 while (phiBlock->numPredecessors()) {
1254 mozilla::DebugOnly<size_t> oldNumPred = phiBlock->numPredecessors();
1256 auto* pred = phiBlock->getPredecessor(0);
1257 auto* test = pred->lastIns()->toTest();
1258 if (test->ifTrue() == phiBlock) {
1259 if (!UpdateTestSuccessors(graph.alloc(), pred, test->input(),
1260 finalTest->ifTrue(), test->ifFalse(),
1261 testBlock)) {
1262 return false;
1264 } else {
1265 MOZ_ASSERT(test->ifFalse() == phiBlock);
1266 if (!UpdateTestSuccessors(graph.alloc(), pred, test->input(),
1267 test->ifTrue(), finalTest->ifFalse(),
1268 testBlock)) {
1269 return false;
1273 // Ensure we've made progress.
1274 MOZ_ASSERT(phiBlock->numPredecessors() + 1 == oldNumPred);
1277 // Remove phiBlock, if different from testBlock.
1278 if (phiBlock != testBlock) {
1279 testBlock->removePredecessor(phiBlock);
1280 graph.removeBlock(phiBlock);
1283 // Remove testBlock itself.
1284 finalTest->ifTrue()->removePredecessor(testBlock);
1285 finalTest->ifFalse()->removePredecessor(testBlock);
1286 graph.removeBlock(testBlock);
1288 return true;
1291 bool jit::FoldTests(MIRGraph& graph) {
1292 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
1293 block++) {
1294 if (!MaybeFoldConditionBlock(graph, *block)) {
1295 return false;
1297 if (!MaybeFoldTestBlock(graph, *block)) {
1298 return false;
1301 return true;
1304 bool jit::FoldEmptyBlocks(MIRGraph& graph) {
1305 for (MBasicBlockIterator iter(graph.begin()); iter != graph.end();) {
1306 MBasicBlock* block = *iter;
1307 iter++;
1309 if (block->numPredecessors() != 1 || block->numSuccessors() != 1) {
1310 continue;
1313 if (!block->phisEmpty()) {
1314 continue;
1317 if (block->outerResumePoint()) {
1318 continue;
1321 if (*block->begin() != *block->rbegin()) {
1322 continue;
1325 MBasicBlock* succ = block->getSuccessor(0);
1326 MBasicBlock* pred = block->getPredecessor(0);
1328 if (succ->numPredecessors() != 1) {
1329 continue;
1332 size_t pos = pred->getSuccessorIndex(block);
1333 pred->lastIns()->replaceSuccessor(pos, succ);
1335 graph.removeBlock(block);
1337 if (!succ->addPredecessorSameInputsAs(pred, block)) {
1338 return false;
1340 succ->removePredecessor(block);
1342 return true;
1345 static void EliminateTriviallyDeadResumePointOperands(MIRGraph& graph,
1346 MResumePoint* rp) {
1347 // If we will pop the top of the stack immediately after resuming,
1348 // then don't preserve the top value in the resume point.
1349 if (rp->mode() != ResumeMode::ResumeAt) {
1350 return;
1353 jsbytecode* pc = rp->pc();
1354 if (JSOp(*pc) == JSOp::JumpTarget) {
1355 pc += JSOpLength_JumpTarget;
1357 if (JSOp(*pc) != JSOp::Pop) {
1358 return;
1361 size_t top = rp->stackDepth() - 1;
1362 MOZ_ASSERT(!rp->isObservableOperand(top));
1364 MDefinition* def = rp->getOperand(top);
1365 if (def->isConstant()) {
1366 return;
1369 MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
1370 rp->replaceOperand(top, constant);
1373 // Operands to a resume point which are dead at the point of the resume can be
1374 // replaced with a magic value. This pass only replaces resume points which are
1375 // trivially dead.
1377 // This is intended to ensure that extra resume points within a basic block
1378 // will not artificially extend the lifetimes of any SSA values. This could
1379 // otherwise occur if the new resume point captured a value which is created
1380 // between the old and new resume point and is dead at the new resume point.
1381 bool jit::EliminateTriviallyDeadResumePointOperands(MIRGenerator* mir,
1382 MIRGraph& graph) {
1383 for (auto* block : graph) {
1384 if (MResumePoint* rp = block->entryResumePoint()) {
1385 if (!graph.alloc().ensureBallast()) {
1386 return false;
1388 ::EliminateTriviallyDeadResumePointOperands(graph, rp);
1391 return true;
1394 // Operands to a resume point which are dead at the point of the resume can be
1395 // replaced with a magic value. This analysis supports limited detection of
1396 // dead operands, pruning those which are defined in the resume point's basic
1397 // block and have no uses outside the block or at points later than the resume
1398 // point.
1400 // This is intended to ensure that extra resume points within a basic block
1401 // will not artificially extend the lifetimes of any SSA values. This could
1402 // otherwise occur if the new resume point captured a value which is created
1403 // between the old and new resume point and is dead at the new resume point.
1404 bool jit::EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph) {
1405 // If we are compiling try blocks, locals and arguments may be observable
1406 // from catch or finally blocks (which Ion does not compile). For now just
1407 // disable the pass in this case.
1408 if (graph.hasTryBlock()) {
1409 return true;
1412 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1413 block++) {
1414 if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)")) {
1415 return false;
1418 if (MResumePoint* rp = block->entryResumePoint()) {
1419 if (!graph.alloc().ensureBallast()) {
1420 return false;
1422 ::EliminateTriviallyDeadResumePointOperands(graph, rp);
1425 // The logic below can get confused on infinite loops.
1426 if (block->isLoopHeader() && block->backedge() == *block) {
1427 continue;
1430 for (MInstructionIterator ins = block->begin(); ins != block->end();
1431 ins++) {
1432 if (MResumePoint* rp = ins->resumePoint()) {
1433 if (!graph.alloc().ensureBallast()) {
1434 return false;
1436 ::EliminateTriviallyDeadResumePointOperands(graph, rp);
1439 // No benefit to replacing constant operands with other constants.
1440 if (ins->isConstant()) {
1441 continue;
1444 // Scanning uses does not give us sufficient information to tell
1445 // where instructions that are involved in box/unbox operations or
1446 // parameter passing might be live. Rewriting uses of these terms
1447 // in resume points may affect the interpreter's behavior. Rather
1448 // than doing a more sophisticated analysis, just ignore these.
1449 if (ins->isUnbox() || ins->isParameter() || ins->isBoxNonStrictThis() ||
1450 ins->isOsrValue()) {
1451 continue;
1454 // Early intermediate values captured by resume points, such as
1455 // ArrayState and its allocation, may be legitimately dead in Ion code,
1456 // but are still needed if we bail out. They can recover on bailout.
1457 if (ins->isRecoveredOnBailout()) {
1458 MOZ_ASSERT(ins->canRecoverOnBailout());
1459 continue;
1462 // If an instruction can be recovered on bailout, but it has a resume
1463 // point, then do not attempt to remove its uses. The reason being that
1464 // this is likely to be an allocation which would be flagged as recovered
1465 // on bailout by the Sink phase, as long as it is captured by resume
1466 // points, but it would not be removed by DCE as it has a resume point.
1467 if (ins->canRecoverOnBailout() && ins->resumePoint()) {
1468 continue;
1471 // If the instruction's behavior has been constant folded into a
1472 // separate instruction, we can't determine precisely where the
1473 // instruction becomes dead and can't eliminate its uses.
1474 if (ins->isImplicitlyUsed()) {
1475 continue;
1478 // Check if this instruction's result is only used within the
1479 // current block, and keep track of its last use in a definition
1480 // (not resume point). This requires the instructions in the block
1481 // to be numbered, ensured by running this immediately after alias
1482 // analysis.
1484 // The fact that the consumers are in the same block is a cheap way to
1485 // ensure that there is no need for block domination question, nor any Phi
1486 // at a loop edge.
1487 uint32_t lastConsumerId = 0;
1488 for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();
1489 uses++) {
1490 MNode* consumer = uses->consumer();
1491 if (consumer->isResumePoint()) {
1492 // If the instruction's is captured by one of the resume point, then
1493 // it might be observed indirectly while the frame is live on the
1494 // stack, so it has to be computed.
1495 MResumePoint* resume = consumer->toResumePoint();
1496 if (resume->isObservableOperand(*uses)) {
1497 lastConsumerId = UINT32_MAX;
1498 break;
1500 continue;
1503 MDefinition* def = consumer->toDefinition();
1504 if (def->block() != *block || def->isBox() || def->isPhi()) {
1505 lastConsumerId = UINT32_MAX;
1506 break;
1508 lastConsumerId = std::max(lastConsumerId, def->id());
1510 if (lastConsumerId == UINT32_MAX) {
1511 continue;
1514 // Walk the uses a second time, removing any in resume points after
1515 // the last use in a definition.
1517 // Given that all consumers are in the same block as the definition, and
1518 // there is not observable operand, then we can replace this operand in
1519 // any resume point which is after the last consumer. This means, in a
1520 // different block or located after the last consumer.
1521 for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd();) {
1522 MUse* use = *uses++;
1523 if (use->consumer()->isDefinition()) {
1524 MOZ_ASSERT(*block == use->consumer()->block());
1525 continue;
1528 MResumePoint* mrp = use->consumer()->toResumePoint();
1530 // The transformation below would remove the instruction from the
1531 // operand of the resume point. This condition is used to preserve any
1532 // resume point which value might flow in one of the live consumers.
1533 if (mrp->block() == *block &&
1534 (!mrp->instruction() || mrp->instruction()->id() <= lastConsumerId)) {
1535 continue;
1538 if (!graph.alloc().ensureBallast()) {
1539 return false;
1542 // Store an optimized out magic value in place of all dead
1543 // resume point operands. Making any such substitution can in
1544 // general alter the interpreter's behavior, even though the
1545 // code is dead, as the interpreter will still execute opcodes
1546 // whose effects cannot be observed. If the magic value value
1547 // were to flow to, say, a dead property access the
1548 // interpreter could throw an exception; we avoid this problem
1549 // by removing dead operands before removing dead code.
1550 MConstant* constant =
1551 MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
1552 block->insertBefore(*(block->begin()), constant);
1553 use->replaceProducer(constant);
1558 return true;
1561 // Test whether |def| would be needed if it had no uses.
1562 bool js::jit::DeadIfUnused(const MDefinition* def) {
1563 // Effectful instructions of course cannot be removed.
1564 if (def->isEffectful()) {
1565 return false;
1568 // Never eliminate guard instructions.
1569 if (def->isGuard()) {
1570 return false;
1573 // Required to be preserved, as the type guard related to this instruction
1574 // is part of the semantics of a transformation.
1575 if (def->isGuardRangeBailouts()) {
1576 return false;
1579 // Control instructions have no uses, but also shouldn't be optimized out
1580 if (def->isControlInstruction()) {
1581 return false;
1584 // Used when lowering to generate the corresponding snapshots and aggregate
1585 // the list of recover instructions to be repeated.
1586 if (def->isInstruction() && def->toInstruction()->resumePoint()) {
1587 return false;
1590 return true;
1593 // Similar to DeadIfUnused(), but additionally allows effectful instructions.
1594 bool js::jit::DeadIfUnusedAllowEffectful(const MDefinition* def) {
1595 // Never eliminate guard instructions.
1596 if (def->isGuard()) {
1597 return false;
1600 // Required to be preserved, as the type guard related to this instruction
1601 // is part of the semantics of a transformation.
1602 if (def->isGuardRangeBailouts()) {
1603 return false;
1606 // Control instructions have no uses, but also shouldn't be optimized out
1607 if (def->isControlInstruction()) {
1608 return false;
1611 // Used when lowering to generate the corresponding snapshots and aggregate
1612 // the list of recover instructions to be repeated.
1613 if (def->isInstruction() && def->toInstruction()->resumePoint()) {
1614 // All effectful instructions must have a resume point attached. We're
1615 // allowing effectful instructions here, so we have to ignore any resume
1616 // points if we want to consider effectful instructions as dead.
1617 if (!def->isEffectful()) {
1618 return false;
1622 return true;
1625 // Test whether |def| may be safely discarded, due to being dead or due to being
1626 // located in a basic block which has itself been marked for discarding.
1627 bool js::jit::IsDiscardable(const MDefinition* def) {
1628 return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
1631 // Similar to IsDiscardable(), but additionally allows effectful instructions.
1632 bool js::jit::IsDiscardableAllowEffectful(const MDefinition* def) {
1633 return !def->hasUses() &&
1634 (DeadIfUnusedAllowEffectful(def) || def->block()->isMarked());
1637 // Instructions are useless if they are unused and have no side effects.
1638 // This pass eliminates useless instructions.
1639 // The graph itself is unchanged.
1640 bool jit::EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph) {
1641 // Traverse in postorder so that we hit uses before definitions.
1642 // Traverse instruction list backwards for the same reason.
1643 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1644 block++) {
1645 if (mir->shouldCancel("Eliminate Dead Code (main loop)")) {
1646 return false;
1649 // Remove unused instructions.
1650 for (MInstructionReverseIterator iter = block->rbegin();
1651 iter != block->rend();) {
1652 MInstruction* inst = *iter++;
1653 if (js::jit::IsDiscardable(inst)) {
1654 block->discard(inst);
1659 return true;
1662 static inline bool IsPhiObservable(MPhi* phi, Observability observe) {
1663 // If the phi has uses which are not reflected in SSA, then behavior in the
1664 // interpreter may be affected by removing the phi.
1665 if (phi->isImplicitlyUsed()) {
1666 return true;
1669 // Check for uses of this phi node outside of other phi nodes.
1670 // Note that, initially, we skip reading resume points, which we
1671 // don't count as actual uses. If the only uses are resume points,
1672 // then the SSA name is never consumed by the program. However,
1673 // after optimizations have been performed, it's possible that the
1674 // actual uses in the program have been (incorrectly) optimized
1675 // away, so we must be more conservative and consider resume
1676 // points as well.
1677 for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
1678 MNode* consumer = iter->consumer();
1679 if (consumer->isResumePoint()) {
1680 MResumePoint* resume = consumer->toResumePoint();
1681 if (observe == ConservativeObservability) {
1682 return true;
1684 if (resume->isObservableOperand(*iter)) {
1685 return true;
1687 } else {
1688 MDefinition* def = consumer->toDefinition();
1689 if (!def->isPhi()) {
1690 return true;
1695 return false;
1698 // Handles cases like:
1699 // x is phi(a, x) --> a
1700 // x is phi(a, a) --> a
1701 static inline MDefinition* IsPhiRedundant(MPhi* phi) {
1702 MDefinition* first = phi->operandIfRedundant();
1703 if (first == nullptr) {
1704 return nullptr;
1707 // Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
1708 if (phi->isImplicitlyUsed()) {
1709 first->setImplicitlyUsedUnchecked();
1712 return first;
1715 bool jit::EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
1716 Observability observe) {
1717 // Eliminates redundant or unobservable phis from the graph. A
1718 // redundant phi is something like b = phi(a, a) or b = phi(a, b),
1719 // both of which can be replaced with a. An unobservable phi is
1720 // one that whose value is never used in the program.
1722 // Note that we must be careful not to eliminate phis representing
1723 // values that the interpreter will require later. When the graph
1724 // is first constructed, we can be more aggressive, because there
1725 // is a greater correspondence between the CFG and the bytecode.
1726 // After optimizations such as GVN have been performed, however,
1727 // the bytecode and CFG may not correspond as closely to one
1728 // another. In that case, we must be more conservative. The flag
1729 // |conservativeObservability| is used to indicate that eliminate
1730 // phis is being run after some optimizations have been performed,
1731 // and thus we should use more conservative rules about
1732 // observability. The particular danger is that we can optimize
1733 // away uses of a phi because we think they are not executable,
1734 // but the foundation for that assumption is false TI information
1735 // that will eventually be invalidated. Therefore, if
1736 // |conservativeObservability| is set, we will consider any use
1737 // from a resume point to be observable. Otherwise, we demand a
1738 // use from an actual instruction.
1740 Vector<MPhi*, 16, SystemAllocPolicy> worklist;
1742 // Add all observable phis to a worklist. We use the "in worklist" bit to
1743 // mean "this phi is live".
1744 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1745 block++) {
1746 MPhiIterator iter = block->phisBegin();
1747 while (iter != block->phisEnd()) {
1748 MPhi* phi = *iter++;
1750 if (mir->shouldCancel("Eliminate Phis (populate loop)")) {
1751 return false;
1754 // Flag all as unused, only observable phis would be marked as used
1755 // when processed by the work list.
1756 phi->setUnused();
1758 // If the phi is redundant, remove it here.
1759 if (MDefinition* redundant = IsPhiRedundant(phi)) {
1760 phi->justReplaceAllUsesWith(redundant);
1761 block->discardPhi(phi);
1762 continue;
1765 // Enqueue observable Phis.
1766 if (IsPhiObservable(phi, observe)) {
1767 phi->setInWorklist();
1768 if (!worklist.append(phi)) {
1769 return false;
1775 // Iteratively mark all phis reachable from live phis.
1776 while (!worklist.empty()) {
1777 if (mir->shouldCancel("Eliminate Phis (worklist)")) {
1778 return false;
1781 MPhi* phi = worklist.popCopy();
1782 MOZ_ASSERT(phi->isUnused());
1783 phi->setNotInWorklist();
1785 // The removal of Phis can produce newly redundant phis.
1786 if (MDefinition* redundant = IsPhiRedundant(phi)) {
1787 // Add to the worklist the used phis which are impacted.
1788 for (MUseDefIterator it(phi); it; it++) {
1789 if (it.def()->isPhi()) {
1790 MPhi* use = it.def()->toPhi();
1791 if (!use->isUnused()) {
1792 use->setUnusedUnchecked();
1793 use->setInWorklist();
1794 if (!worklist.append(use)) {
1795 return false;
1800 phi->justReplaceAllUsesWith(redundant);
1801 } else {
1802 // Otherwise flag them as used.
1803 phi->setNotUnused();
1806 // The current phi is/was used, so all its operands are used.
1807 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1808 MDefinition* in = phi->getOperand(i);
1809 if (!in->isPhi() || !in->isUnused() || in->isInWorklist()) {
1810 continue;
1812 in->setInWorklist();
1813 if (!worklist.append(in->toPhi())) {
1814 return false;
1819 // Sweep dead phis.
1820 for (PostorderIterator block = graph.poBegin(); block != graph.poEnd();
1821 block++) {
1822 MPhiIterator iter = block->phisBegin();
1823 while (iter != block->phisEnd()) {
1824 MPhi* phi = *iter++;
1825 if (phi->isUnused()) {
1826 if (!phi->optimizeOutAllUses(graph.alloc())) {
1827 return false;
1829 block->discardPhi(phi);
1834 return true;
1837 namespace {
1839 // The type analysis algorithm inserts conversions and box/unbox instructions
1840 // to make the IR graph well-typed for future passes.
1842 // Phi adjustment: If a phi's inputs are all the same type, the phi is
1843 // specialized to return that type.
1845 // Input adjustment: Each input is asked to apply conversion operations to its
1846 // inputs. This may include Box, Unbox, or other instruction-specific type
1847 // conversion operations.
1849 class TypeAnalyzer {
1850 MIRGenerator* mir;
1851 MIRGraph& graph;
1852 Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
1854 TempAllocator& alloc() const { return graph.alloc(); }
1856 bool addPhiToWorklist(MPhi* phi) {
1857 if (phi->isInWorklist()) {
1858 return true;
1860 if (!phiWorklist_.append(phi)) {
1861 return false;
1863 phi->setInWorklist();
1864 return true;
1866 MPhi* popPhi() {
1867 MPhi* phi = phiWorklist_.popCopy();
1868 phi->setNotInWorklist();
1869 return phi;
1872 [[nodiscard]] bool propagateAllPhiSpecializations();
1874 bool respecialize(MPhi* phi, MIRType type);
1875 bool propagateSpecialization(MPhi* phi);
1876 bool specializePhis();
1877 bool specializeOsrOnlyPhis();
1878 void replaceRedundantPhi(MPhi* phi);
1879 bool adjustPhiInputs(MPhi* phi);
1880 bool adjustInputs(MDefinition* def);
1881 bool insertConversions();
1883 bool checkFloatCoherency();
1884 bool graphContainsFloat32();
1885 bool markPhiConsumers();
1886 bool markPhiProducers();
1887 bool specializeValidFloatOps();
1888 bool tryEmitFloatOperations();
1889 bool propagateUnbox();
1891 bool shouldSpecializeOsrPhis() const;
1892 MIRType guessPhiType(MPhi* phi) const;
1894 public:
1895 TypeAnalyzer(MIRGenerator* mir, MIRGraph& graph) : mir(mir), graph(graph) {}
1897 bool analyze();
1900 } /* anonymous namespace */
1902 bool TypeAnalyzer::shouldSpecializeOsrPhis() const {
1903 // [SMDOC] OSR Phi Type Specialization
1905 // Without special handling for OSR phis, we end up with unspecialized phis
1906 // (MIRType::Value) in the loop (pre)header and other blocks, resulting in
1907 // unnecessary boxing and unboxing in the loop body.
1909 // To fix this, phi type specialization needs special code to deal with the
1910 // OSR entry block. Recall that OSR results in the following basic block
1911 // structure:
1913 // +------------------+ +-----------------+
1914 // | Code before loop | | OSR entry block |
1915 // +------------------+ +-----------------+
1916 // | |
1917 // | |
1918 // | +---------------+ |
1919 // +---------> | OSR preheader | <---------+
1920 // +---------------+
1921 // |
1922 // V
1923 // +---------------+
1924 // | Loop header |<-----+
1925 // +---------------+ |
1926 // | |
1927 // ... |
1928 // | |
1929 // +---------------+ |
1930 // | Loop backedge |------+
1931 // +---------------+
1933 // OSR phi specialization happens in three steps:
1935 // (1) Specialize phis but ignore MOsrValue phi inputs. In other words,
1936 // pretend the OSR entry block doesn't exist. See guessPhiType.
1938 // (2) Once phi specialization is done, look at the types of loop header phis
1939 // and add these types to the corresponding preheader phis. This way, the
1940 // types of the preheader phis are based on the code before the loop and
1941 // the code in the loop body. These are exactly the types we expect for
1942 // the OSR Values. See the last part of TypeAnalyzer::specializePhis.
1944 // (3) For type-specialized preheader phis, add guard/unbox instructions to
1945 // the OSR entry block to guard the incoming Value indeed has this type.
1946 // This happens in:
1948 // * TypeAnalyzer::adjustPhiInputs: adds a fallible unbox for values that
1949 // can be unboxed.
1951 // * TypeAnalyzer::replaceRedundantPhi: adds a type guard for values that
1952 // can't be unboxed (null/undefined/magic Values).
1953 if (!mir->graph().osrBlock()) {
1954 return false;
1957 return !mir->outerInfo().hadSpeculativePhiBailout();
1960 // Try to specialize this phi based on its non-cyclic inputs.
1961 MIRType TypeAnalyzer::guessPhiType(MPhi* phi) const {
1962 #ifdef DEBUG
1963 // Check that different magic constants aren't flowing together. Ignore
1964 // JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
1965 // away.
1966 MIRType magicType = MIRType::None;
1967 for (size_t i = 0; i < phi->numOperands(); i++) {
1968 MDefinition* in = phi->getOperand(i);
1969 if (in->type() == MIRType::MagicHole ||
1970 in->type() == MIRType::MagicIsConstructing) {
1971 if (magicType == MIRType::None) {
1972 magicType = in->type();
1974 MOZ_ASSERT(magicType == in->type());
1977 #endif
1979 MIRType type = MIRType::None;
1980 bool convertibleToFloat32 = false;
1981 bool hasOSRValueInput = false;
1982 DebugOnly<bool> hasSpecializableInput = false;
1983 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
1984 MDefinition* in = phi->getOperand(i);
1985 if (in->isPhi()) {
1986 hasSpecializableInput = true;
1987 if (!in->toPhi()->triedToSpecialize()) {
1988 continue;
1990 if (in->type() == MIRType::None) {
1991 // The operand is a phi we tried to specialize, but we were
1992 // unable to guess its type. propagateSpecialization will
1993 // propagate the type to this phi when it becomes known.
1994 continue;
1998 // See shouldSpecializeOsrPhis comment. This is the first step mentioned
1999 // there.
2000 if (shouldSpecializeOsrPhis() && in->isOsrValue()) {
2001 hasOSRValueInput = true;
2002 hasSpecializableInput = true;
2003 continue;
2006 if (type == MIRType::None) {
2007 type = in->type();
2008 if (in->canProduceFloat32() &&
2009 !mir->outerInfo().hadSpeculativePhiBailout()) {
2010 convertibleToFloat32 = true;
2012 continue;
2015 if (type == in->type()) {
2016 convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
2017 } else {
2018 if (convertibleToFloat32 && in->type() == MIRType::Float32) {
2019 // If we only saw definitions that can be converted into Float32 before
2020 // and encounter a Float32 value, promote previous values to Float32
2021 type = MIRType::Float32;
2022 } else if (IsTypeRepresentableAsDouble(type) &&
2023 IsTypeRepresentableAsDouble(in->type())) {
2024 // Specialize phis with int32 and double operands as double.
2025 type = MIRType::Double;
2026 convertibleToFloat32 = convertibleToFloat32 && in->canProduceFloat32();
2027 } else {
2028 return MIRType::Value;
2033 if (hasOSRValueInput && type == MIRType::Float32) {
2034 // TODO(post-Warp): simplify float32 handling in this function or (better)
2035 // make the float32 analysis a stand-alone optimization pass instead of
2036 // complicating type analysis. See bug 1655773.
2037 type = MIRType::Double;
2040 MOZ_ASSERT_IF(type == MIRType::None, hasSpecializableInput);
2041 return type;
2044 bool TypeAnalyzer::respecialize(MPhi* phi, MIRType type) {
2045 if (phi->type() == type) {
2046 return true;
2048 phi->specialize(type);
2049 return addPhiToWorklist(phi);
2052 bool TypeAnalyzer::propagateSpecialization(MPhi* phi) {
2053 MOZ_ASSERT(phi->type() != MIRType::None);
2055 // Verify that this specialization matches any phis depending on it.
2056 for (MUseDefIterator iter(phi); iter; iter++) {
2057 if (!iter.def()->isPhi()) {
2058 continue;
2060 MPhi* use = iter.def()->toPhi();
2061 if (!use->triedToSpecialize()) {
2062 continue;
2064 if (use->type() == MIRType::None) {
2065 // We tried to specialize this phi, but were unable to guess its
2066 // type. Now that we know the type of one of its operands, we can
2067 // specialize it. If it can't be specialized as float32, specialize
2068 // as double.
2069 MIRType type = phi->type();
2070 if (type == MIRType::Float32 && !use->canProduceFloat32()) {
2071 type = MIRType::Double;
2073 if (!respecialize(use, type)) {
2074 return false;
2076 continue;
2078 if (use->type() != phi->type()) {
2079 // Specialize phis with int32 that can be converted to float and float
2080 // operands as floats.
2081 if ((use->type() == MIRType::Int32 && use->canProduceFloat32() &&
2082 phi->type() == MIRType::Float32) ||
2083 (phi->type() == MIRType::Int32 && phi->canProduceFloat32() &&
2084 use->type() == MIRType::Float32)) {
2085 if (!respecialize(use, MIRType::Float32)) {
2086 return false;
2088 continue;
2091 // Specialize phis with int32 and double operands as double.
2092 if (IsTypeRepresentableAsDouble(use->type()) &&
2093 IsTypeRepresentableAsDouble(phi->type())) {
2094 if (!respecialize(use, MIRType::Double)) {
2095 return false;
2097 continue;
2100 // This phi in our use chain can now no longer be specialized.
2101 if (!respecialize(use, MIRType::Value)) {
2102 return false;
2107 return true;
2110 bool TypeAnalyzer::propagateAllPhiSpecializations() {
2111 while (!phiWorklist_.empty()) {
2112 if (mir->shouldCancel("Specialize Phis (worklist)")) {
2113 return false;
2116 MPhi* phi = popPhi();
2117 if (!propagateSpecialization(phi)) {
2118 return false;
2122 return true;
2125 // If branch pruning removes the path from the entry block to the OSR
2126 // preheader, we may have phis (or chains of phis) with no operands
2127 // other than OsrValues. These phis will still have MIRType::None.
2128 // Since we don't have any information about them, we specialize them
2129 // as MIRType::Value.
2130 bool TypeAnalyzer::specializeOsrOnlyPhis() {
2131 MOZ_ASSERT(graph.osrBlock());
2132 MOZ_ASSERT(graph.osrPreHeaderBlock()->numPredecessors() == 1);
2134 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
2135 block++) {
2136 if (mir->shouldCancel("Specialize osr-only phis (main loop)")) {
2137 return false;
2140 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
2141 if (mir->shouldCancel("Specialize osr-only phis (inner loop)")) {
2142 return false;
2145 if (phi->type() == MIRType::None) {
2146 phi->specialize(MIRType::Value);
2150 return true;
2153 bool TypeAnalyzer::specializePhis() {
2154 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
2155 block++) {
2156 if (mir->shouldCancel("Specialize Phis (main loop)")) {
2157 return false;
2160 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
2161 if (mir->shouldCancel("Specialize Phis (inner loop)")) {
2162 return false;
2165 MIRType type = guessPhiType(*phi);
2166 phi->specialize(type);
2167 if (type == MIRType::None) {
2168 // We tried to guess the type but failed because all operands are
2169 // phis we still have to visit. Set the triedToSpecialize flag but
2170 // don't propagate the type to other phis, propagateSpecialization
2171 // will do that once we know the type of one of the operands.
2172 continue;
2174 if (!propagateSpecialization(*phi)) {
2175 return false;
2180 if (!propagateAllPhiSpecializations()) {
2181 return false;
2184 if (shouldSpecializeOsrPhis()) {
2185 // See shouldSpecializeOsrPhis comment. This is the second step, propagating
2186 // loop header phi types to preheader phis.
2187 MBasicBlock* preHeader = graph.osrPreHeaderBlock();
2188 MBasicBlock* header = preHeader->getSingleSuccessor();
2190 if (preHeader->numPredecessors() == 1) {
2191 MOZ_ASSERT(preHeader->getPredecessor(0) == graph.osrBlock());
2192 // Branch pruning has removed the path from the entry block
2193 // to the preheader. Specialize any phis with no non-osr inputs.
2194 if (!specializeOsrOnlyPhis()) {
2195 return false;
2197 } else if (header->isLoopHeader()) {
2198 for (MPhiIterator phi(header->phisBegin()); phi != header->phisEnd();
2199 phi++) {
2200 MPhi* preHeaderPhi = phi->getOperand(0)->toPhi();
2201 MOZ_ASSERT(preHeaderPhi->block() == preHeader);
2203 if (preHeaderPhi->type() == MIRType::Value) {
2204 // Already includes everything.
2205 continue;
2208 MIRType loopType = phi->type();
2209 if (!respecialize(preHeaderPhi, loopType)) {
2210 return false;
2213 if (!propagateAllPhiSpecializations()) {
2214 return false;
2216 } else {
2217 // Edge case: there is no backedge in this loop. This can happen
2218 // if the header is a 'pending' loop header when control flow in
2219 // the loop body is terminated unconditionally, or if a block
2220 // that dominates the backedge unconditionally bails out. In
2221 // this case the header only has the preheader as predecessor
2222 // and we don't need to do anything.
2223 MOZ_ASSERT(header->numPredecessors() == 1);
2227 MOZ_ASSERT(phiWorklist_.empty());
2228 return true;
2231 bool TypeAnalyzer::adjustPhiInputs(MPhi* phi) {
2232 MIRType phiType = phi->type();
2233 MOZ_ASSERT(phiType != MIRType::None);
2235 // If we specialized a type that's not Value, there are 3 cases:
2236 // 1. Every input is of that type.
2237 // 2. Every observed input is of that type (i.e., some inputs haven't been
2238 // executed yet).
2239 // 3. Inputs were doubles and int32s, and was specialized to double.
2240 if (phiType != MIRType::Value) {
2241 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
2242 MDefinition* in = phi->getOperand(i);
2243 if (in->type() == phiType) {
2244 continue;
2247 if (!alloc().ensureBallast()) {
2248 return false;
2251 if (in->isBox() && in->toBox()->input()->type() == phiType) {
2252 phi->replaceOperand(i, in->toBox()->input());
2253 } else {
2254 MInstruction* replacement;
2256 if (phiType == MIRType::Double && IsFloatType(in->type())) {
2257 // Convert int32 operands to double.
2258 replacement = MToDouble::New(alloc(), in);
2259 } else if (phiType == MIRType::Float32) {
2260 if (in->type() == MIRType::Int32 || in->type() == MIRType::Double) {
2261 replacement = MToFloat32::New(alloc(), in);
2262 } else {
2263 // See comment below
2264 if (in->type() != MIRType::Value) {
2265 MBox* box = MBox::New(alloc(), in);
2266 in->block()->insertBefore(in->block()->lastIns(), box);
2267 in = box;
2270 MUnbox* unbox =
2271 MUnbox::New(alloc(), in, MIRType::Double, MUnbox::Fallible);
2272 unbox->setBailoutKind(BailoutKind::SpeculativePhi);
2273 in->block()->insertBefore(in->block()->lastIns(), unbox);
2274 replacement = MToFloat32::New(alloc(), in);
2276 } else {
2277 // If we know this branch will fail to convert to phiType,
2278 // insert a box that'll immediately fail in the fallible unbox
2279 // below.
2280 if (in->type() != MIRType::Value) {
2281 MBox* box = MBox::New(alloc(), in);
2282 in->block()->insertBefore(in->block()->lastIns(), box);
2283 in = box;
2286 // Be optimistic and insert unboxes when the operand is a
2287 // value.
2288 replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
2291 replacement->setBailoutKind(BailoutKind::SpeculativePhi);
2292 in->block()->insertBefore(in->block()->lastIns(), replacement);
2293 phi->replaceOperand(i, replacement);
2297 return true;
2300 // Box every typed input.
2301 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
2302 MDefinition* in = phi->getOperand(i);
2303 if (in->type() == MIRType::Value) {
2304 continue;
2307 // The input is being explicitly unboxed, so sneak past and grab the
2308 // original box. Don't bother optimizing if magic values are involved.
2309 if (in->isUnbox()) {
2310 MDefinition* unboxInput = in->toUnbox()->input();
2311 if (!IsMagicType(unboxInput->type()) && phi->typeIncludes(unboxInput)) {
2312 in = in->toUnbox()->input();
2316 if (in->type() != MIRType::Value) {
2317 if (!alloc().ensureBallast()) {
2318 return false;
2321 MBasicBlock* pred = phi->block()->getPredecessor(i);
2322 in = AlwaysBoxAt(alloc(), pred->lastIns(), in);
2325 phi->replaceOperand(i, in);
2328 return true;
2331 bool TypeAnalyzer::adjustInputs(MDefinition* def) {
2332 // Definitions such as MPhi have no type policy.
2333 if (!def->isInstruction()) {
2334 return true;
2337 MInstruction* ins = def->toInstruction();
2338 const TypePolicy* policy = ins->typePolicy();
2339 if (policy && !policy->adjustInputs(alloc(), ins)) {
2340 return false;
2342 return true;
2345 void TypeAnalyzer::replaceRedundantPhi(MPhi* phi) {
2346 MBasicBlock* block = phi->block();
2347 js::Value v;
2348 switch (phi->type()) {
2349 case MIRType::Undefined:
2350 v = UndefinedValue();
2351 break;
2352 case MIRType::Null:
2353 v = NullValue();
2354 break;
2355 case MIRType::MagicOptimizedOut:
2356 v = MagicValue(JS_OPTIMIZED_OUT);
2357 break;
2358 case MIRType::MagicUninitializedLexical:
2359 v = MagicValue(JS_UNINITIALIZED_LEXICAL);
2360 break;
2361 case MIRType::MagicIsConstructing:
2362 v = MagicValue(JS_IS_CONSTRUCTING);
2363 break;
2364 case MIRType::MagicHole:
2365 default:
2366 MOZ_CRASH("unexpected type");
2368 MConstant* c = MConstant::New(alloc(), v);
2369 // The instruction pass will insert the box
2370 block->insertBefore(*(block->begin()), c);
2371 phi->justReplaceAllUsesWith(c);
2373 if (shouldSpecializeOsrPhis()) {
2374 // See shouldSpecializeOsrPhis comment. This is part of the third step,
2375 // guard the incoming MOsrValue is of this type.
2376 for (uint32_t i = 0; i < phi->numOperands(); i++) {
2377 MDefinition* def = phi->getOperand(i);
2378 if (def->type() != phi->type()) {
2379 MOZ_ASSERT(def->isOsrValue() || def->isPhi());
2380 MOZ_ASSERT(def->type() == MIRType::Value);
2381 MGuardValue* guard = MGuardValue::New(alloc(), def, v);
2382 guard->setBailoutKind(BailoutKind::SpeculativePhi);
2383 def->block()->insertBefore(def->block()->lastIns(), guard);
2389 bool TypeAnalyzer::insertConversions() {
2390 // Instructions are processed in reverse postorder: all uses are defs are
2391 // seen before uses. This ensures that output adjustment (which may rewrite
2392 // inputs of uses) does not conflict with input adjustment.
2393 for (ReversePostorderIterator block(graph.rpoBegin());
2394 block != graph.rpoEnd(); block++) {
2395 if (mir->shouldCancel("Insert Conversions")) {
2396 return false;
2399 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
2400 iter != end;) {
2401 MPhi* phi = *iter++;
2402 if (IsNullOrUndefined(phi->type()) || IsMagicType(phi->type())) {
2403 // We can replace this phi with a constant.
2404 if (!alloc().ensureBallast()) {
2405 return false;
2407 replaceRedundantPhi(phi);
2408 block->discardPhi(phi);
2409 } else {
2410 if (!adjustPhiInputs(phi)) {
2411 return false;
2416 // AdjustInputs can add/remove/mutate instructions before and after the
2417 // current instruction. Only increment the iterator after it is finished.
2418 for (MInstructionIterator iter(block->begin()); iter != block->end();
2419 iter++) {
2420 if (!alloc().ensureBallast()) {
2421 return false;
2424 if (!adjustInputs(*iter)) {
2425 return false;
2429 return true;
2432 /* clang-format off */
2434 // This function tries to emit Float32 specialized operations whenever it's possible.
2435 // MIR nodes are flagged as:
2436 // - Producers, when they can create Float32 that might need to be coerced into a Double.
2437 // Loads in Float32 arrays and conversions to Float32 are producers.
2438 // - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
2439 // Stores in Float32 arrays and conversions to Float32 are consumers.
2440 // - Float32 commutative, when using the Float32 instruction instead of the Double instruction
2441 // does not result in a compound loss of precision. This is the case for +, -, /, * with 2
2442 // operands, for instance. However, an addition with 3 operands is not commutative anymore,
2443 // so an intermediate coercion is needed.
2444 // Except for phis, all these flags are known after Ion building, so they cannot change during
2445 // the process.
2447 // The idea behind the algorithm is easy: whenever we can prove that a commutative operation
2448 // has only producers as inputs and consumers as uses, we can specialize the operation as a
2449 // float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
2450 // if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
2452 // Phis have a special status. Phis need to be flagged as producers or consumers as they can
2453 // be inputs or outputs of commutative instructions. Fortunately, producers and consumers
2454 // properties are such that we can deduce the property using all non phis inputs first (which form
2455 // an initial phi graph) and then propagate all properties from one phi to another using a
2456 // fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
2457 // many flagged phis as the previous iteration (so the worst steady state case is all phis being
2458 // flagged as false).
2460 // In a nutshell, the algorithm applies three passes:
2461 // 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
2462 // all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
2463 // the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
2464 // consumer if all of its uses are consumers.
2465 // 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
2466 // the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
2467 // 3 - Go through all commutative operations and ensure their inputs are all producers and their
2468 // uses are all consumers.
2470 /* clang-format on */
2471 bool TypeAnalyzer::markPhiConsumers() {
2472 MOZ_ASSERT(phiWorklist_.empty());
2474 // Iterate in postorder so worklist is initialized to RPO.
2475 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
2476 ++block) {
2477 if (mir->shouldCancel(
2478 "Ensure Float32 commutativity - Consumer Phis - Initial state")) {
2479 return false;
2482 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
2483 MOZ_ASSERT(!phi->isInWorklist());
2484 bool canConsumeFloat32 = !phi->isImplicitlyUsed();
2485 for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
2486 MDefinition* usedef = use.def();
2487 canConsumeFloat32 &=
2488 usedef->isPhi() || usedef->canConsumeFloat32(use.use());
2490 phi->setCanConsumeFloat32(canConsumeFloat32);
2491 if (canConsumeFloat32 && !addPhiToWorklist(*phi)) {
2492 return false;
2497 while (!phiWorklist_.empty()) {
2498 if (mir->shouldCancel(
2499 "Ensure Float32 commutativity - Consumer Phis - Fixed point")) {
2500 return false;
2503 MPhi* phi = popPhi();
2504 MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
2506 bool validConsumer = true;
2507 for (MUseDefIterator use(phi); use; use++) {
2508 MDefinition* def = use.def();
2509 if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
2510 validConsumer = false;
2511 break;
2515 if (validConsumer) {
2516 continue;
2519 // Propagate invalidated phis
2520 phi->setCanConsumeFloat32(false);
2521 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
2522 MDefinition* input = phi->getOperand(i);
2523 if (input->isPhi() && !input->isInWorklist() &&
2524 input->canConsumeFloat32(nullptr /* unused */)) {
2525 if (!addPhiToWorklist(input->toPhi())) {
2526 return false;
2531 return true;
2534 bool TypeAnalyzer::markPhiProducers() {
2535 MOZ_ASSERT(phiWorklist_.empty());
2537 // Iterate in reverse postorder so worklist is initialized to PO.
2538 for (ReversePostorderIterator block(graph.rpoBegin());
2539 block != graph.rpoEnd(); ++block) {
2540 if (mir->shouldCancel(
2541 "Ensure Float32 commutativity - Producer Phis - initial state")) {
2542 return false;
2545 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
2546 MOZ_ASSERT(!phi->isInWorklist());
2547 bool canProduceFloat32 = true;
2548 for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e;
2549 ++i) {
2550 MDefinition* input = phi->getOperand(i);
2551 canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
2553 phi->setCanProduceFloat32(canProduceFloat32);
2554 if (canProduceFloat32 && !addPhiToWorklist(*phi)) {
2555 return false;
2560 while (!phiWorklist_.empty()) {
2561 if (mir->shouldCancel(
2562 "Ensure Float32 commutativity - Producer Phis - Fixed point")) {
2563 return false;
2566 MPhi* phi = popPhi();
2567 MOZ_ASSERT(phi->canProduceFloat32());
2569 bool validProducer = true;
2570 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
2571 MDefinition* input = phi->getOperand(i);
2572 if (input->isPhi() && !input->canProduceFloat32()) {
2573 validProducer = false;
2574 break;
2578 if (validProducer) {
2579 continue;
2582 // Propagate invalidated phis
2583 phi->setCanProduceFloat32(false);
2584 for (MUseDefIterator use(phi); use; use++) {
2585 MDefinition* def = use.def();
2586 if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32()) {
2587 if (!addPhiToWorklist(def->toPhi())) {
2588 return false;
2593 return true;
2596 bool TypeAnalyzer::specializeValidFloatOps() {
2597 for (ReversePostorderIterator block(graph.rpoBegin());
2598 block != graph.rpoEnd(); ++block) {
2599 if (mir->shouldCancel("Ensure Float32 commutativity - Instructions")) {
2600 return false;
2603 for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
2604 if (!ins->isFloat32Commutative()) {
2605 continue;
2608 if (ins->type() == MIRType::Float32) {
2609 continue;
2612 if (!alloc().ensureBallast()) {
2613 return false;
2616 // This call will try to specialize the instruction iff all uses are
2617 // consumers and all inputs are producers.
2618 ins->trySpecializeFloat32(alloc());
2621 return true;
2624 bool TypeAnalyzer::graphContainsFloat32() {
2625 for (ReversePostorderIterator block(graph.rpoBegin());
2626 block != graph.rpoEnd(); ++block) {
2627 for (MDefinitionIterator def(*block); def; def++) {
2628 if (mir->shouldCancel(
2629 "Ensure Float32 commutativity - Graph contains Float32")) {
2630 return false;
2633 if (def->type() == MIRType::Float32) {
2634 return true;
2638 return false;
2641 bool TypeAnalyzer::tryEmitFloatOperations() {
2642 // Asm.js uses the ahead of time type checks to specialize operations, no need
2643 // to check them again at this point.
2644 if (mir->compilingWasm()) {
2645 return true;
2648 // Check ahead of time that there is at least one definition typed as Float32,
2649 // otherwise we don't need this pass.
2650 if (!graphContainsFloat32()) {
2651 return true;
2654 // WarpBuilder skips over code that can't be reached except through
2655 // a catch block. Locals and arguments may be observable in such
2656 // code after bailing out, so we can't rely on seeing all uses.
2657 if (graph.hasTryBlock()) {
2658 return true;
2661 if (!markPhiConsumers()) {
2662 return false;
2664 if (!markPhiProducers()) {
2665 return false;
2667 if (!specializeValidFloatOps()) {
2668 return false;
2670 return true;
2673 bool TypeAnalyzer::checkFloatCoherency() {
2674 #ifdef DEBUG
2675 // Asserts that all Float32 instructions are flowing into Float32 consumers or
2676 // specialized operations
2677 for (ReversePostorderIterator block(graph.rpoBegin());
2678 block != graph.rpoEnd(); ++block) {
2679 if (mir->shouldCancel("Check Float32 coherency")) {
2680 return false;
2683 for (MDefinitionIterator def(*block); def; def++) {
2684 if (def->type() != MIRType::Float32) {
2685 continue;
2688 for (MUseDefIterator use(*def); use; use++) {
2689 MDefinition* consumer = use.def();
2690 MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
2694 #endif
2695 return true;
2698 static bool HappensBefore(const MDefinition* earlier,
2699 const MDefinition* later) {
2700 MOZ_ASSERT(earlier->block() == later->block());
2702 for (auto* ins : *earlier->block()) {
2703 if (ins == earlier) {
2704 return true;
2706 if (ins == later) {
2707 return false;
2710 MOZ_CRASH("earlier and later are instructions in the block");
2713 // Propagate type information from dominating unbox instructions.
2715 // This optimization applies for example for self-hosted String.prototype
2716 // functions.
2718 // Example:
2719 // ```
2720 // String.prototype.id = function() {
2721 // // Strict mode to avoid ToObject on primitive this-values.
2722 // "use strict";
2724 // // Template string to apply ToString on the this-value.
2725 // return `${this}`;
2726 // };
2728 // function f(s) {
2729 // // Assume |s| is a string value.
2730 // return s.id();
2731 // }
2732 // ```
2734 // Compiles into: (Graph after Scalar Replacement)
2736 // ┌───────────────────────────────────────────────────────────────────────────┐
2737 // │ Block 0 │
2738 // │ resumepoint 1 0 2 2 │
2739 // │ 0 parameter THIS_SLOT Value │
2740 // │ 1 parameter 0 Value │
2741 // │ 2 constant undefined Undefined │
2742 // │ 3 start │
2743 // │ 4 checkoverrecursed │
2744 // │ 5 unbox parameter1 to String (fallible) String │
2745 // │ 6 constant object 1d908053e088 (String) Object │
2746 // │ 7 guardshape constant6:Object Object │
2747 // │ 8 slots guardshape7:Object Slots │
2748 // │ 9 loaddynamicslot slots8:Slots (slot 53) Value │
2749 // │ 10 constant 0x0 Int32 │
2750 // │ 11 unbox loaddynamicslot9 to Object (fallible) Object │
2751 // │ 12 nurseryobject Object │
2752 // │ 13 guardspecificfunction unbox11:Object nurseryobject12:Object Object │
2753 // │ 14 goto block1 │
2754 // └──────────────────────────────────┬────────────────────────────────────────┘
2755 // │
2756 // ┌──────────────────────────────────▼────────────────────────────────────────┐
2757 // │ Block 1 │
2758 // │ ((0)) resumepoint 15 1 15 15 | 1 13 1 0 2 2 │
2759 // │ 15 constant undefined Undefined │
2760 // │ 16 tostring parameter1:Value String │
2761 // │ 18 goto block2 │
2762 // └──────────────────────────────────┬────────────────────────────────────────┘
2763 // │
2764 // ┌──────────────▼──────────────┐
2765 // │ Block 2 │
2766 // │ resumepoint 16 1 0 2 2 │
2767 // │ 19 return tostring16:String │
2768 // └─────────────────────────────┘
2770 // The Unbox instruction is used as a type guard. The ToString instruction
2771 // doesn't use the type information from the preceding Unbox instruction and
2772 // therefore has to assume its operand can be any value.
2774 // When instead propagating the type information from the preceding Unbox
2775 // instruction, this graph is constructed after the "Apply types" phase:
2777 // ┌───────────────────────────────────────────────────────────────────────────┐
2778 // │ Block 0 │
2779 // │ resumepoint 1 0 2 2 │
2780 // │ 0 parameter THIS_SLOT Value │
2781 // │ 1 parameter 0 Value │
2782 // │ 2 constant undefined Undefined │
2783 // │ 3 start │
2784 // │ 4 checkoverrecursed │
2785 // │ 5 unbox parameter1 to String (fallible) String │
2786 // │ 6 constant object 1d908053e088 (String) Object │
2787 // │ 7 guardshape constant6:Object Object │
2788 // │ 8 slots guardshape7:Object Slots │
2789 // │ 9 loaddynamicslot slots8:Slots (slot 53) Value │
2790 // │ 10 constant 0x0 Int32 │
2791 // │ 11 unbox loaddynamicslot9 to Object (fallible) Object │
2792 // │ 12 nurseryobject Object │
2793 // │ 13 guardspecificfunction unbox11:Object nurseryobject12:Object Object │
2794 // │ 14 goto block1 │
2795 // └──────────────────────────────────┬────────────────────────────────────────┘
2796 // │
2797 // ┌──────────────────────────────────▼────────────────────────────────────────┐
2798 // │ Block 1 │
2799 // │ ((0)) resumepoint 15 1 15 15 | 1 13 1 0 2 2 │
2800 // │ 15 constant undefined Undefined │
2801 // │ 20 unbox parameter1 to String (fallible) String │
2802 // │ 16 tostring parameter1:Value String │
2803 // │ 18 goto block2 │
2804 // └──────────────────────────────────┬────────────────────────────────────────┘
2805 // │
2806 // ┌──────────────▼─────────────────────┐
2807 // │ Block 2 │
2808 // │ resumepoint 16 1 0 2 2 │
2809 // │ 21 box tostring16:String Value │
2810 // │ 19 return box21:Value │
2811 // └────────────────────────────────────┘
2813 // GVN will later merge both Unbox instructions and fold away the ToString
2814 // instruction, so we get this final graph:
2816 // ┌───────────────────────────────────────────────────────────────────────────┐
2817 // │ Block 0 │
2818 // │ resumepoint 1 0 2 2 │
2819 // │ 0 parameter THIS_SLOT Value │
2820 // │ 1 parameter 0 Value │
2821 // │ 2 constant undefined Undefined │
2822 // │ 3 start │
2823 // │ 4 checkoverrecursed │
2824 // │ 5 unbox parameter1 to String (fallible) String │
2825 // │ 6 constant object 1d908053e088 (String) Object │
2826 // │ 7 guardshape constant6:Object Object │
2827 // │ 8 slots guardshape7:Object Slots │
2828 // │ 22 loaddynamicslotandunbox slots8:Slots (slot 53) Object │
2829 // │ 11 nurseryobject Object │
2830 // │ 12 guardspecificfunction load22:Object nurseryobject11:Object Object │
2831 // │ 13 goto block1 │
2832 // └──────────────────────────────────┬────────────────────────────────────────┘
2833 // │
2834 // ┌──────────────────────────────────▼────────────────────────────────────────┐
2835 // │ Block 1 │
2836 // │ ((0)) resumepoint 2 1 2 2 | 1 12 1 0 2 2 │
2837 // │ 14 goto block2 │
2838 // └──────────────────────────────────┬────────────────────────────────────────┘
2839 // │
2840 // ┌──────────────▼─────────────────────┐
2841 // │ Block 2 │
2842 // │ resumepoint 5 1 0 2 2 │
2843 // │ 15 return parameter1:Value │
2844 // └────────────────────────────────────┘
2846 bool TypeAnalyzer::propagateUnbox() {
2847 // Visit the blocks in post-order, so that the type information of the closest
2848 // unbox operation is used.
2849 for (PostorderIterator block(graph.poBegin()); block != graph.poEnd();
2850 block++) {
2851 if (mir->shouldCancel("Propagate Unbox")) {
2852 return false;
2855 // Iterate over all instructions to look for unbox instructions.
2856 for (MInstructionIterator iter(block->begin()); iter != block->end();
2857 iter++) {
2858 if (!iter->isUnbox()) {
2859 continue;
2862 auto* unbox = iter->toUnbox();
2863 auto* input = unbox->input();
2865 // Ignore unbox operations on typed values.
2866 if (input->type() != MIRType::Value) {
2867 continue;
2870 // Ignore unbox to floating point types, because propagating boxed Int32
2871 // values as Double can lead to repeated bailouts when later instructions
2872 // expect Int32 inputs.
2873 if (IsFloatingPointType(unbox->type())) {
2874 continue;
2877 // Inspect other uses of |input| to propagate the unboxed type information
2878 // from |unbox|.
2879 for (auto uses = input->usesBegin(); uses != input->usesEnd();) {
2880 auto* use = *uses++;
2882 // Ignore resume points.
2883 if (!use->consumer()->isDefinition()) {
2884 continue;
2886 auto* def = use->consumer()->toDefinition();
2888 // Ignore any unbox operations, including the current |unbox|.
2889 if (def->isUnbox()) {
2890 continue;
2893 // Ignore phi nodes, because we don't yet support them.
2894 if (def->isPhi()) {
2895 continue;
2898 // The unbox operation needs to happen before the other use, otherwise
2899 // we can't propagate the type information.
2900 if (unbox->block() == def->block()) {
2901 if (!HappensBefore(unbox, def)) {
2902 continue;
2904 } else {
2905 if (!unbox->block()->dominates(def->block())) {
2906 continue;
2910 // Replace the use with |unbox|, so that GVN knows about the actual
2911 // value type and can more easily fold unnecessary operations. If the
2912 // instruction actually needs a boxed input, the BoxPolicy type policy
2913 // will simply unwrap the unbox instruction.
2914 use->replaceProducer(unbox);
2916 // The uses in the MIR graph no longer reflect the uses in the bytecode,
2917 // so we have to mark |input| as implicitly used.
2918 input->setImplicitlyUsedUnchecked();
2922 return true;
2925 bool TypeAnalyzer::analyze() {
2926 if (!tryEmitFloatOperations()) {
2927 return false;
2929 if (!specializePhis()) {
2930 return false;
2932 if (!propagateUnbox()) {
2933 return false;
2935 if (!insertConversions()) {
2936 return false;
2938 if (!checkFloatCoherency()) {
2939 return false;
2941 return true;
2944 bool jit::ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph) {
2945 TypeAnalyzer analyzer(mir, graph);
2947 if (!analyzer.analyze()) {
2948 return false;
2951 return true;
2954 void jit::RenumberBlocks(MIRGraph& graph) {
2955 size_t id = 0;
2956 for (ReversePostorderIterator block(graph.rpoBegin());
2957 block != graph.rpoEnd(); block++) {
2958 block->setId(id++);
2962 // A utility for code which adds/deletes blocks. Renumber the remaining blocks,
2963 // recompute dominators, and optionally recompute AliasAnalysis dependencies.
2964 bool jit::AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph,
2965 bool updateAliasAnalysis,
2966 bool underValueNumberer) {
2967 // Renumber the blocks and clear out the old dominator info.
2968 size_t id = 0;
2969 for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e;
2970 ++i) {
2971 i->clearDominatorInfo();
2972 i->setId(id++);
2975 // Recompute dominator info.
2976 if (!BuildDominatorTree(graph)) {
2977 return false;
2980 // If needed, update alias analysis dependencies.
2981 if (updateAliasAnalysis) {
2982 if (!AliasAnalysis(mir, graph).analyze()) {
2983 return false;
2987 AssertExtendedGraphCoherency(graph, underValueNumberer);
2988 return true;
2991 // Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
2992 // Alias analysis dependencies may be invalid after calling this function.
2993 bool jit::RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph,
2994 uint32_t numMarkedBlocks) {
2995 if (numMarkedBlocks == graph.numBlocks()) {
2996 // If all blocks are marked, no blocks need removal. Just clear the
2997 // marks. We'll still need to update the dominator tree below though,
2998 // since we may have removed edges even if we didn't remove any blocks.
2999 graph.unmarkBlocks();
3000 } else {
3001 // As we are going to remove edges and basic blocks, we have to mark
3002 // instructions which would be needed by baseline if we were to
3003 // bailout.
3004 for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
3005 MBasicBlock* block = *it++;
3006 if (block->isMarked()) {
3007 continue;
3010 FlagAllOperandsAsImplicitlyUsed(mir, block);
3013 // Find unmarked blocks and remove them.
3014 for (ReversePostorderIterator iter(graph.rpoBegin());
3015 iter != graph.rpoEnd();) {
3016 MBasicBlock* block = *iter++;
3018 if (block->isMarked()) {
3019 block->unmark();
3020 continue;
3023 // The block is unreachable. Clear out the loop header flag, as
3024 // we're doing the sweep of a mark-and-sweep here, so we no longer
3025 // need to worry about whether an unmarked block is a loop or not.
3026 if (block->isLoopHeader()) {
3027 block->clearLoopHeader();
3030 for (size_t i = 0, e = block->numSuccessors(); i != e; ++i) {
3031 block->getSuccessor(i)->removePredecessor(block);
3033 graph.removeBlock(block);
3037 // Renumber the blocks and update the dominator tree.
3038 return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
3041 // A Simple, Fast Dominance Algorithm by Cooper et al.
3042 // Modified to support empty intersections for OSR, and in RPO.
3043 static MBasicBlock* IntersectDominators(MBasicBlock* block1,
3044 MBasicBlock* block2) {
3045 MBasicBlock* finger1 = block1;
3046 MBasicBlock* finger2 = block2;
3048 MOZ_ASSERT(finger1);
3049 MOZ_ASSERT(finger2);
3051 // In the original paper, the block ID comparisons are on the postorder index.
3052 // This implementation iterates in RPO, so the comparisons are reversed.
3054 // For this function to be called, the block must have multiple predecessors.
3055 // If a finger is then found to be self-dominating, it must therefore be
3056 // reachable from multiple roots through non-intersecting control flow.
3057 // nullptr is returned in this case, to denote an empty intersection.
3059 while (finger1->id() != finger2->id()) {
3060 while (finger1->id() > finger2->id()) {
3061 MBasicBlock* idom = finger1->immediateDominator();
3062 if (idom == finger1) {
3063 return nullptr; // Empty intersection.
3065 finger1 = idom;
3068 while (finger2->id() > finger1->id()) {
3069 MBasicBlock* idom = finger2->immediateDominator();
3070 if (idom == finger2) {
3071 return nullptr; // Empty intersection.
3073 finger2 = idom;
3076 return finger1;
3079 void jit::ClearDominatorTree(MIRGraph& graph) {
3080 for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++) {
3081 iter->clearDominatorInfo();
3085 static void ComputeImmediateDominators(MIRGraph& graph) {
3086 // The default start block is a root and therefore only self-dominates.
3087 MBasicBlock* startBlock = graph.entryBlock();
3088 startBlock->setImmediateDominator(startBlock);
3090 // Any OSR block is a root and therefore only self-dominates.
3091 MBasicBlock* osrBlock = graph.osrBlock();
3092 if (osrBlock) {
3093 osrBlock->setImmediateDominator(osrBlock);
3096 bool changed = true;
3098 while (changed) {
3099 changed = false;
3101 ReversePostorderIterator block = graph.rpoBegin();
3103 // For each block in RPO, intersect all dominators.
3104 for (; block != graph.rpoEnd(); block++) {
3105 // If a node has once been found to have no exclusive dominator,
3106 // it will never have an exclusive dominator, so it may be skipped.
3107 if (block->immediateDominator() == *block) {
3108 continue;
3111 // A block with no predecessors is not reachable from any entry, so
3112 // it self-dominates.
3113 if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
3114 block->setImmediateDominator(*block);
3115 continue;
3118 MBasicBlock* newIdom = block->getPredecessor(0);
3120 // Find the first common dominator.
3121 for (size_t i = 1; i < block->numPredecessors(); i++) {
3122 MBasicBlock* pred = block->getPredecessor(i);
3123 if (pred->immediateDominator() == nullptr) {
3124 continue;
3127 newIdom = IntersectDominators(pred, newIdom);
3129 // If there is no common dominator, the block self-dominates.
3130 if (newIdom == nullptr) {
3131 block->setImmediateDominator(*block);
3132 changed = true;
3133 break;
3137 if (newIdom && block->immediateDominator() != newIdom) {
3138 block->setImmediateDominator(newIdom);
3139 changed = true;
3144 #ifdef DEBUG
3145 // Assert that all blocks have dominator information.
3146 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
3147 block++) {
3148 MOZ_ASSERT(block->immediateDominator() != nullptr);
3150 #endif
3153 bool jit::BuildDominatorTree(MIRGraph& graph) {
3154 MOZ_ASSERT(graph.canBuildDominators());
3156 ComputeImmediateDominators(graph);
3158 Vector<MBasicBlock*, 4, JitAllocPolicy> worklist(graph.alloc());
3160 // Traversing through the graph in post-order means that every non-phi use
3161 // of a definition is visited before the def itself. Since a def
3162 // dominates its uses, by the time we reach a particular
3163 // block, we have processed all of its dominated children, so
3164 // block->numDominated() is accurate.
3165 for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
3166 MBasicBlock* child = *i;
3167 MBasicBlock* parent = child->immediateDominator();
3169 // Dominance is defined such that blocks always dominate themselves.
3170 child->addNumDominated(1);
3172 // If the block only self-dominates, it has no definite parent.
3173 // Add it to the worklist as a root for pre-order traversal.
3174 // This includes all roots. Order does not matter.
3175 if (child == parent) {
3176 if (!worklist.append(child)) {
3177 return false;
3179 continue;
3182 if (!parent->addImmediatelyDominatedBlock(child)) {
3183 return false;
3186 parent->addNumDominated(child->numDominated());
3189 #ifdef DEBUG
3190 // If compiling with OSR, many blocks will self-dominate.
3191 // Without OSR, there is only one root block which dominates all.
3192 if (!graph.osrBlock()) {
3193 MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
3195 #endif
3196 // Now, iterate through the dominator tree in pre-order and annotate every
3197 // block with its index in the traversal.
3198 size_t index = 0;
3199 while (!worklist.empty()) {
3200 MBasicBlock* block = worklist.popCopy();
3201 block->setDomIndex(index);
3203 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
3204 block->immediatelyDominatedBlocksEnd())) {
3205 return false;
3207 index++;
3210 return true;
3213 bool jit::BuildPhiReverseMapping(MIRGraph& graph) {
3214 // Build a mapping such that given a basic block, whose successor has one or
3215 // more phis, we can find our specific input to that phi. To make this fast
3216 // mapping work we rely on a specific property of our structured control
3217 // flow graph: For a block with phis, its predecessors each have only one
3218 // successor with phis. Consider each case:
3219 // * Blocks with less than two predecessors cannot have phis.
3220 // * Breaks. A break always has exactly one successor, and the break
3221 // catch block has exactly one predecessor for each break, as
3222 // well as a final predecessor for the actual loop exit.
3223 // * Continues. A continue always has exactly one successor, and the
3224 // continue catch block has exactly one predecessor for each
3225 // continue, as well as a final predecessor for the actual
3226 // loop continuation. The continue itself has exactly one
3227 // successor.
3228 // * An if. Each branch as exactly one predecessor.
3229 // * A switch. Each branch has exactly one predecessor.
3230 // * Loop tail. A new block is always created for the exit, and if a
3231 // break statement is present, the exit block will forward
3232 // directly to the break block.
3233 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
3234 block++) {
3235 if (block->phisEmpty()) {
3236 continue;
3239 // Assert on the above.
3240 for (size_t j = 0; j < block->numPredecessors(); j++) {
3241 MBasicBlock* pred = block->getPredecessor(j);
3243 #ifdef DEBUG
3244 size_t numSuccessorsWithPhis = 0;
3245 for (size_t k = 0; k < pred->numSuccessors(); k++) {
3246 MBasicBlock* successor = pred->getSuccessor(k);
3247 if (!successor->phisEmpty()) {
3248 numSuccessorsWithPhis++;
3251 MOZ_ASSERT(numSuccessorsWithPhis <= 1);
3252 #endif
3254 pred->setSuccessorWithPhis(*block, j);
3258 return true;
3261 #ifdef DEBUG
3262 static bool CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B) {
3263 // Assuming B = succ(A), verify A = pred(B).
3264 for (size_t i = 0; i < B->numPredecessors(); i++) {
3265 if (A == B->getPredecessor(i)) {
3266 return true;
3269 return false;
3272 static bool CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B) {
3273 // Assuming B = pred(A), verify A = succ(B).
3274 for (size_t i = 0; i < B->numSuccessors(); i++) {
3275 if (A == B->getSuccessor(i)) {
3276 return true;
3279 return false;
3282 // If you have issues with the usesBalance assertions, then define the macro
3283 // _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
3284 // This output can then be processed with the following awk script to filter and
3285 // highlight which checks are missing or if there is an unexpected operand /
3286 // use.
3288 // define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
3291 $ ./js 2>stderr.log
3292 $ gawk '
3293 /^==Check/ { context = ""; state = $2; }
3294 /^[a-z]/ { context = context "\n\t" $0; }
3295 /^==End/ {
3296 if (state == "Operand") {
3297 list[context] = list[context] - 1;
3298 } else if (state == "Use") {
3299 list[context] = list[context] + 1;
3302 END {
3303 for (ctx in list) {
3304 if (list[ctx] > 0) {
3305 print "Missing operand check", ctx, "\n"
3307 if (list[ctx] < 0) {
3308 print "Missing use check", ctx, "\n"
3311 }' < stderr.log
3315 static void CheckOperand(const MNode* consumer, const MUse* use,
3316 int32_t* usesBalance) {
3317 MOZ_ASSERT(use->hasProducer());
3318 MDefinition* producer = use->producer();
3319 MOZ_ASSERT(!producer->isDiscarded());
3320 MOZ_ASSERT(producer->block() != nullptr);
3321 MOZ_ASSERT(use->consumer() == consumer);
3322 # ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
3323 Fprinter print(stderr);
3324 print.printf("==Check Operand\n");
3325 use->producer()->dump(print);
3326 print.printf(" index: %zu\n", use->consumer()->indexOf(use));
3327 use->consumer()->dump(print);
3328 print.printf("==End\n");
3329 # endif
3330 --*usesBalance;
3333 static void CheckUse(const MDefinition* producer, const MUse* use,
3334 int32_t* usesBalance) {
3335 MOZ_ASSERT(!use->consumer()->block()->isDead());
3336 MOZ_ASSERT_IF(use->consumer()->isDefinition(),
3337 !use->consumer()->toDefinition()->isDiscarded());
3338 MOZ_ASSERT(use->consumer()->block() != nullptr);
3339 MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
3340 # ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
3341 Fprinter print(stderr);
3342 print.printf("==Check Use\n");
3343 use->producer()->dump(print);
3344 print.printf(" index: %zu\n", use->consumer()->indexOf(use));
3345 use->consumer()->dump(print);
3346 print.printf("==End\n");
3347 # endif
3348 ++*usesBalance;
3351 // To properly encode entry resume points, we have to ensure that all the
3352 // operands of the entry resume point are located before the safeInsertTop
3353 // location.
3354 static void AssertOperandsBeforeSafeInsertTop(MResumePoint* resume) {
3355 MBasicBlock* block = resume->block();
3356 if (block == block->graph().osrBlock()) {
3357 return;
3359 MInstruction* stop = block->safeInsertTop();
3360 for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
3361 MDefinition* def = resume->getOperand(i);
3362 if (def->block() != block) {
3363 continue;
3365 if (def->isPhi()) {
3366 continue;
3369 for (MInstructionIterator ins = block->begin(); true; ins++) {
3370 if (*ins == def) {
3371 break;
3373 MOZ_ASSERT(
3374 *ins != stop,
3375 "Resume point operand located after the safeInsertTop location");
3379 #endif // DEBUG
3381 void jit::AssertBasicGraphCoherency(MIRGraph& graph, bool force) {
3382 #ifdef DEBUG
3383 if (!JitOptions.fullDebugChecks && !force) {
3384 return;
3387 MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
3388 MOZ_ASSERT(graph.entryBlock()->phisEmpty());
3389 MOZ_ASSERT(!graph.entryBlock()->unreachable());
3391 if (MBasicBlock* osrBlock = graph.osrBlock()) {
3392 MOZ_ASSERT(osrBlock->numPredecessors() == 0);
3393 MOZ_ASSERT(osrBlock->phisEmpty());
3394 MOZ_ASSERT(osrBlock != graph.entryBlock());
3395 MOZ_ASSERT(!osrBlock->unreachable());
3398 if (MResumePoint* resumePoint = graph.entryResumePoint()) {
3399 MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
3402 // Assert successor and predecessor list coherency.
3403 uint32_t count = 0;
3404 int32_t usesBalance = 0;
3405 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
3406 block++) {
3407 count++;
3409 MOZ_ASSERT(&block->graph() == &graph);
3410 MOZ_ASSERT(!block->isDead());
3411 MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
3412 block->entryResumePoint() != nullptr);
3414 for (size_t i = 0; i < block->numSuccessors(); i++) {
3415 MOZ_ASSERT(
3416 CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
3419 for (size_t i = 0; i < block->numPredecessors(); i++) {
3420 MOZ_ASSERT(
3421 CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
3424 if (MResumePoint* resume = block->entryResumePoint()) {
3425 MOZ_ASSERT(!resume->instruction());
3426 MOZ_ASSERT(resume->block() == *block);
3427 AssertOperandsBeforeSafeInsertTop(resume);
3429 if (MResumePoint* resume = block->outerResumePoint()) {
3430 MOZ_ASSERT(!resume->instruction());
3431 MOZ_ASSERT(resume->block() == *block);
3433 for (MResumePointIterator iter(block->resumePointsBegin());
3434 iter != block->resumePointsEnd(); iter++) {
3435 // We cannot yet assert that is there is no instruction then this is
3436 // the entry resume point because we are still storing resume points
3437 // in the InlinePropertyTable.
3438 MOZ_ASSERT_IF(iter->instruction(),
3439 iter->instruction()->block() == *block);
3440 for (uint32_t i = 0, e = iter->numOperands(); i < e; i++) {
3441 CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
3444 for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
3445 MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
3446 MOZ_ASSERT(!phi->isRecoveredOnBailout());
3447 MOZ_ASSERT(phi->type() != MIRType::None);
3448 MOZ_ASSERT(phi->dependency() == nullptr);
3450 for (MDefinitionIterator iter(*block); iter; iter++) {
3451 MOZ_ASSERT(iter->block() == *block);
3452 MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType::None);
3453 MOZ_ASSERT(!iter->isDiscarded());
3454 MOZ_ASSERT_IF(iter->isStart(),
3455 *block == graph.entryBlock() || *block == graph.osrBlock());
3456 MOZ_ASSERT_IF(iter->isParameter(),
3457 *block == graph.entryBlock() || *block == graph.osrBlock());
3458 MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
3459 MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
3461 // Assert that use chains are valid for this instruction.
3462 for (uint32_t i = 0, end = iter->numOperands(); i < end; i++) {
3463 CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
3465 for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++) {
3466 CheckUse(*iter, *use, &usesBalance);
3469 if (iter->isInstruction()) {
3470 if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
3471 MOZ_ASSERT(resume->instruction() == *iter);
3472 MOZ_ASSERT(resume->block() == *block);
3473 MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
3477 if (iter->isRecoveredOnBailout()) {
3478 MOZ_ASSERT(!iter->hasLiveDefUses());
3482 // The control instruction is not visited by the MDefinitionIterator.
3483 MControlInstruction* control = block->lastIns();
3484 MOZ_ASSERT(control->block() == *block);
3485 MOZ_ASSERT(!control->hasUses());
3486 MOZ_ASSERT(control->type() == MIRType::None);
3487 MOZ_ASSERT(!control->isDiscarded());
3488 MOZ_ASSERT(!control->isRecoveredOnBailout());
3489 MOZ_ASSERT(control->resumePoint() == nullptr);
3490 for (uint32_t i = 0, end = control->numOperands(); i < end; i++) {
3491 CheckOperand(control, control->getUseFor(i), &usesBalance);
3493 for (size_t i = 0; i < control->numSuccessors(); i++) {
3494 MOZ_ASSERT(control->getSuccessor(i));
3498 // In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
3499 MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
3500 MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
3501 MOZ_ASSERT(graph.numBlocks() == count);
3502 #endif
3505 #ifdef DEBUG
3506 static void AssertReversePostorder(MIRGraph& graph) {
3507 // Check that every block is visited after all its predecessors (except
3508 // backedges).
3509 for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();
3510 ++iter) {
3511 MBasicBlock* block = *iter;
3512 MOZ_ASSERT(!block->isMarked());
3514 for (size_t i = 0; i < block->numPredecessors(); i++) {
3515 MBasicBlock* pred = block->getPredecessor(i);
3516 if (!pred->isMarked()) {
3517 MOZ_ASSERT(pred->isLoopBackedge());
3518 MOZ_ASSERT(block->backedge() == pred);
3522 block->mark();
3525 graph.unmarkBlocks();
3527 #endif
3529 #ifdef DEBUG
3530 static void AssertDominatorTree(MIRGraph& graph) {
3531 // Check dominators.
3533 MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
3534 if (MBasicBlock* osrBlock = graph.osrBlock()) {
3535 MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
3536 } else {
3537 MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
3540 size_t i = graph.numBlocks();
3541 size_t totalNumDominated = 0;
3542 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
3543 block++) {
3544 MOZ_ASSERT(block->dominates(*block));
3546 MBasicBlock* idom = block->immediateDominator();
3547 MOZ_ASSERT(idom->dominates(*block));
3548 MOZ_ASSERT(idom == *block || idom->id() < block->id());
3550 if (idom == *block) {
3551 totalNumDominated += block->numDominated();
3552 } else {
3553 bool foundInParent = false;
3554 for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
3555 if (idom->getImmediatelyDominatedBlock(j) == *block) {
3556 foundInParent = true;
3557 break;
3560 MOZ_ASSERT(foundInParent);
3563 size_t numDominated = 1;
3564 for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
3565 MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
3566 MOZ_ASSERT(block->dominates(dom));
3567 MOZ_ASSERT(dom->id() > block->id());
3568 MOZ_ASSERT(dom->immediateDominator() == *block);
3570 numDominated += dom->numDominated();
3572 MOZ_ASSERT(block->numDominated() == numDominated);
3573 MOZ_ASSERT(block->numDominated() <= i);
3574 MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
3575 i--;
3577 MOZ_ASSERT(i == 0);
3578 MOZ_ASSERT(totalNumDominated == graph.numBlocks());
3580 #endif
3582 void jit::AssertGraphCoherency(MIRGraph& graph, bool force) {
3583 #ifdef DEBUG
3584 if (!JitOptions.checkGraphConsistency) {
3585 return;
3587 if (!JitOptions.fullDebugChecks && !force) {
3588 return;
3590 AssertBasicGraphCoherency(graph, force);
3591 AssertReversePostorder(graph);
3592 #endif
3595 #ifdef DEBUG
3596 static bool IsResumableMIRType(MIRType type) {
3597 // see CodeGeneratorShared::encodeAllocation
3598 switch (type) {
3599 case MIRType::Undefined:
3600 case MIRType::Null:
3601 case MIRType::Boolean:
3602 case MIRType::Int32:
3603 case MIRType::Double:
3604 case MIRType::Float32:
3605 case MIRType::String:
3606 case MIRType::Symbol:
3607 case MIRType::BigInt:
3608 case MIRType::Object:
3609 case MIRType::Shape:
3610 case MIRType::MagicOptimizedOut:
3611 case MIRType::MagicUninitializedLexical:
3612 case MIRType::MagicIsConstructing:
3613 case MIRType::Value:
3614 case MIRType::Simd128:
3615 return true;
3617 case MIRType::MagicHole:
3618 case MIRType::None:
3619 case MIRType::Slots:
3620 case MIRType::Elements:
3621 case MIRType::Pointer:
3622 case MIRType::Int64:
3623 case MIRType::WasmAnyRef:
3624 case MIRType::StackResults:
3625 case MIRType::IntPtr:
3626 return false;
3628 MOZ_CRASH("Unknown MIRType.");
3631 static void AssertResumableOperands(MNode* node) {
3632 for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
3633 MDefinition* op = node->getOperand(i);
3634 if (op->isRecoveredOnBailout()) {
3635 continue;
3637 MOZ_ASSERT(IsResumableMIRType(op->type()),
3638 "Resume point cannot encode its operands");
3642 static void AssertIfResumableInstruction(MDefinition* def) {
3643 if (!def->isRecoveredOnBailout()) {
3644 return;
3646 AssertResumableOperands(def);
3649 static void AssertResumePointDominatedByOperands(MResumePoint* resume) {
3650 for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
3651 MDefinition* op = resume->getOperand(i);
3652 MOZ_ASSERT(op->block()->dominates(resume->block()),
3653 "Resume point is not dominated by its operands");
3656 #endif // DEBUG
3658 void jit::AssertExtendedGraphCoherency(MIRGraph& graph, bool underValueNumberer,
3659 bool force) {
3660 // Checks the basic GraphCoherency but also other conditions that
3661 // do not hold immediately (such as the fact that critical edges
3662 // are split)
3664 #ifdef DEBUG
3665 if (!JitOptions.checkGraphConsistency) {
3666 return;
3668 if (!JitOptions.fullDebugChecks && !force) {
3669 return;
3672 AssertGraphCoherency(graph, force);
3674 AssertDominatorTree(graph);
3676 DebugOnly<uint32_t> idx = 0;
3677 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
3678 block++) {
3679 MOZ_ASSERT(block->id() == idx);
3680 ++idx;
3682 // No critical edges:
3683 if (block->numSuccessors() > 1) {
3684 for (size_t i = 0; i < block->numSuccessors(); i++) {
3685 MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
3689 if (block->isLoopHeader()) {
3690 if (underValueNumberer && block->numPredecessors() == 3) {
3691 // Fixup block.
3692 MOZ_ASSERT(block->getPredecessor(1)->numPredecessors() == 0);
3693 MOZ_ASSERT(graph.osrBlock(),
3694 "Fixup blocks should only exists if we have an osr block.");
3695 } else {
3696 MOZ_ASSERT(block->numPredecessors() == 2);
3698 MBasicBlock* backedge = block->backedge();
3699 MOZ_ASSERT(backedge->id() >= block->id());
3700 MOZ_ASSERT(backedge->numSuccessors() == 1);
3701 MOZ_ASSERT(backedge->getSuccessor(0) == *block);
3704 if (!block->phisEmpty()) {
3705 for (size_t i = 0; i < block->numPredecessors(); i++) {
3706 MBasicBlock* pred = block->getPredecessor(i);
3707 MOZ_ASSERT(pred->successorWithPhis() == *block);
3708 MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
3712 uint32_t successorWithPhis = 0;
3713 for (size_t i = 0; i < block->numSuccessors(); i++) {
3714 if (!block->getSuccessor(i)->phisEmpty()) {
3715 successorWithPhis++;
3719 MOZ_ASSERT(successorWithPhis <= 1);
3720 MOZ_ASSERT((successorWithPhis != 0) ==
3721 (block->successorWithPhis() != nullptr));
3723 // Verify that phi operands dominate the corresponding CFG predecessor
3724 // edges.
3725 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
3726 iter != end; ++iter) {
3727 MPhi* phi = *iter;
3728 for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
3729 MOZ_ASSERT(
3730 phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
3731 "Phi input is not dominated by its operand");
3735 // Verify that instructions are dominated by their operands.
3736 for (MInstructionIterator iter(block->begin()), end(block->end());
3737 iter != end; ++iter) {
3738 MInstruction* ins = *iter;
3739 for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
3740 MDefinition* op = ins->getOperand(i);
3741 MBasicBlock* opBlock = op->block();
3742 MOZ_ASSERT(opBlock->dominates(*block),
3743 "Instruction is not dominated by its operands");
3745 // If the operand is an instruction in the same block, check
3746 // that it comes first.
3747 if (opBlock == *block && !op->isPhi()) {
3748 MInstructionIterator opIter = block->begin(op->toInstruction());
3749 do {
3750 ++opIter;
3751 MOZ_ASSERT(opIter != block->end(),
3752 "Operand in same block as instruction does not precede");
3753 } while (*opIter != ins);
3756 AssertIfResumableInstruction(ins);
3757 if (MResumePoint* resume = ins->resumePoint()) {
3758 AssertResumePointDominatedByOperands(resume);
3759 AssertResumableOperands(resume);
3763 // Verify that the block resume points are dominated by their operands.
3764 if (MResumePoint* resume = block->entryResumePoint()) {
3765 AssertResumePointDominatedByOperands(resume);
3766 AssertResumableOperands(resume);
3768 if (MResumePoint* resume = block->outerResumePoint()) {
3769 AssertResumePointDominatedByOperands(resume);
3770 AssertResumableOperands(resume);
3773 #endif
3776 struct BoundsCheckInfo {
3777 MBoundsCheck* check;
3778 uint32_t validEnd;
3781 typedef HashMap<uint32_t, BoundsCheckInfo, DefaultHasher<uint32_t>,
3782 JitAllocPolicy>
3783 BoundsCheckMap;
3785 // Compute a hash for bounds checks which ignores constant offsets in the index.
3786 static HashNumber BoundsCheckHashIgnoreOffset(MBoundsCheck* check) {
3787 SimpleLinearSum indexSum = ExtractLinearSum(check->index());
3788 uintptr_t index = indexSum.term ? uintptr_t(indexSum.term) : 0;
3789 uintptr_t length = uintptr_t(check->length());
3790 return index ^ length;
3793 static MBoundsCheck* FindDominatingBoundsCheck(BoundsCheckMap& checks,
3794 MBoundsCheck* check,
3795 size_t index) {
3796 // Since we are traversing the dominator tree in pre-order, when we
3797 // are looking at the |index|-th block, the next numDominated() blocks
3798 // we traverse are precisely the set of blocks that are dominated.
3800 // So, this value is visible in all blocks if:
3801 // index <= index + ins->block->numDominated()
3802 // and becomes invalid after that.
3803 HashNumber hash = BoundsCheckHashIgnoreOffset(check);
3804 BoundsCheckMap::Ptr p = checks.lookup(hash);
3805 if (!p || index >= p->value().validEnd) {
3806 // We didn't find a dominating bounds check.
3807 BoundsCheckInfo info;
3808 info.check = check;
3809 info.validEnd = index + check->block()->numDominated();
3811 if (!checks.put(hash, info)) return nullptr;
3813 return check;
3816 return p->value().check;
3819 static MathSpace ExtractMathSpace(MDefinition* ins) {
3820 MOZ_ASSERT(ins->isAdd() || ins->isSub());
3821 MBinaryArithInstruction* arith = nullptr;
3822 if (ins->isAdd()) {
3823 arith = ins->toAdd();
3824 } else {
3825 arith = ins->toSub();
3827 switch (arith->truncateKind()) {
3828 case TruncateKind::NoTruncate:
3829 case TruncateKind::TruncateAfterBailouts:
3830 // TruncateAfterBailouts is considered as infinite space because the
3831 // LinearSum will effectively remove the bailout check.
3832 return MathSpace::Infinite;
3833 case TruncateKind::IndirectTruncate:
3834 case TruncateKind::Truncate:
3835 return MathSpace::Modulo;
3837 MOZ_CRASH("Unknown TruncateKind");
3840 static bool MonotoneAdd(int32_t lhs, int32_t rhs) {
3841 return (lhs >= 0 && rhs >= 0) || (lhs <= 0 && rhs <= 0);
3844 static bool MonotoneSub(int32_t lhs, int32_t rhs) {
3845 return (lhs >= 0 && rhs <= 0) || (lhs <= 0 && rhs >= 0);
3848 // Extract a linear sum from ins, if possible (otherwise giving the
3849 // sum 'ins + 0').
3850 SimpleLinearSum jit::ExtractLinearSum(MDefinition* ins, MathSpace space,
3851 int32_t recursionDepth) {
3852 const int32_t SAFE_RECURSION_LIMIT = 100;
3853 if (recursionDepth > SAFE_RECURSION_LIMIT) {
3854 return SimpleLinearSum(ins, 0);
3857 // Unwrap Int32ToIntPtr. This instruction only changes the representation
3858 // (int32_t to intptr_t) without affecting the value.
3859 if (ins->isInt32ToIntPtr()) {
3860 ins = ins->toInt32ToIntPtr()->input();
3863 if (ins->isBeta()) {
3864 ins = ins->getOperand(0);
3867 MOZ_ASSERT(!ins->isInt32ToIntPtr());
3869 if (ins->type() != MIRType::Int32) {
3870 return SimpleLinearSum(ins, 0);
3873 if (ins->isConstant()) {
3874 return SimpleLinearSum(nullptr, ins->toConstant()->toInt32());
3877 if (!ins->isAdd() && !ins->isSub()) {
3878 return SimpleLinearSum(ins, 0);
3881 // Only allow math which are in the same space.
3882 MathSpace insSpace = ExtractMathSpace(ins);
3883 if (space == MathSpace::Unknown) {
3884 space = insSpace;
3885 } else if (space != insSpace) {
3886 return SimpleLinearSum(ins, 0);
3888 MOZ_ASSERT(space == MathSpace::Modulo || space == MathSpace::Infinite);
3890 MDefinition* lhs = ins->getOperand(0);
3891 MDefinition* rhs = ins->getOperand(1);
3892 if (lhs->type() != MIRType::Int32 || rhs->type() != MIRType::Int32) {
3893 return SimpleLinearSum(ins, 0);
3896 // Extract linear sums of each operand.
3897 SimpleLinearSum lsum = ExtractLinearSum(lhs, space, recursionDepth + 1);
3898 SimpleLinearSum rsum = ExtractLinearSum(rhs, space, recursionDepth + 1);
3900 // LinearSum only considers a single term operand, if both sides have
3901 // terms, then ignore extracted linear sums.
3902 if (lsum.term && rsum.term) {
3903 return SimpleLinearSum(ins, 0);
3906 // Check if this is of the form <SUM> + n or n + <SUM>.
3907 if (ins->isAdd()) {
3908 int32_t constant;
3909 if (space == MathSpace::Modulo) {
3910 constant = uint32_t(lsum.constant) + uint32_t(rsum.constant);
3911 } else if (!SafeAdd(lsum.constant, rsum.constant, &constant) ||
3912 !MonotoneAdd(lsum.constant, rsum.constant)) {
3913 return SimpleLinearSum(ins, 0);
3915 return SimpleLinearSum(lsum.term ? lsum.term : rsum.term, constant);
3918 MOZ_ASSERT(ins->isSub());
3919 // Check if this is of the form <SUM> - n.
3920 if (lsum.term) {
3921 int32_t constant;
3922 if (space == MathSpace::Modulo) {
3923 constant = uint32_t(lsum.constant) - uint32_t(rsum.constant);
3924 } else if (!SafeSub(lsum.constant, rsum.constant, &constant) ||
3925 !MonotoneSub(lsum.constant, rsum.constant)) {
3926 return SimpleLinearSum(ins, 0);
3928 return SimpleLinearSum(lsum.term, constant);
3931 // Ignore any of the form n - <SUM>.
3932 return SimpleLinearSum(ins, 0);
3935 // Extract a linear inequality holding when a boolean test goes in the
3936 // specified direction, of the form 'lhs + lhsN <= rhs' (or >=).
3937 bool jit::ExtractLinearInequality(MTest* test, BranchDirection direction,
3938 SimpleLinearSum* plhs, MDefinition** prhs,
3939 bool* plessEqual) {
3940 if (!test->getOperand(0)->isCompare()) {
3941 return false;
3944 MCompare* compare = test->getOperand(0)->toCompare();
3946 MDefinition* lhs = compare->getOperand(0);
3947 MDefinition* rhs = compare->getOperand(1);
3949 // TODO: optimize Compare_UInt32
3950 if (!compare->isInt32Comparison()) {
3951 return false;
3954 MOZ_ASSERT(lhs->type() == MIRType::Int32);
3955 MOZ_ASSERT(rhs->type() == MIRType::Int32);
3957 JSOp jsop = compare->jsop();
3958 if (direction == FALSE_BRANCH) {
3959 jsop = NegateCompareOp(jsop);
3962 SimpleLinearSum lsum = ExtractLinearSum(lhs);
3963 SimpleLinearSum rsum = ExtractLinearSum(rhs);
3965 if (!SafeSub(lsum.constant, rsum.constant, &lsum.constant)) {
3966 return false;
3969 // Normalize operations to use <= or >=.
3970 switch (jsop) {
3971 case JSOp::Le:
3972 *plessEqual = true;
3973 break;
3974 case JSOp::Lt:
3975 /* x < y ==> x + 1 <= y */
3976 if (!SafeAdd(lsum.constant, 1, &lsum.constant)) {
3977 return false;
3979 *plessEqual = true;
3980 break;
3981 case JSOp::Ge:
3982 *plessEqual = false;
3983 break;
3984 case JSOp::Gt:
3985 /* x > y ==> x - 1 >= y */
3986 if (!SafeSub(lsum.constant, 1, &lsum.constant)) {
3987 return false;
3989 *plessEqual = false;
3990 break;
3991 default:
3992 return false;
3995 *plhs = lsum;
3996 *prhs = rsum.term;
3998 return true;
4001 static bool TryEliminateBoundsCheck(BoundsCheckMap& checks, size_t blockIndex,
4002 MBoundsCheck* dominated, bool* eliminated) {
4003 MOZ_ASSERT(!*eliminated);
4005 // Replace all uses of the bounds check with the actual index.
4006 // This is (a) necessary, because we can coalesce two different
4007 // bounds checks and would otherwise use the wrong index and
4008 // (b) helps register allocation. Note that this is safe since
4009 // no other pass after bounds check elimination moves instructions.
4010 dominated->replaceAllUsesWith(dominated->index());
4012 if (!dominated->isMovable()) {
4013 return true;
4016 if (!dominated->fallible()) {
4017 return true;
4020 MBoundsCheck* dominating =
4021 FindDominatingBoundsCheck(checks, dominated, blockIndex);
4022 if (!dominating) {
4023 return false;
4026 if (dominating == dominated) {
4027 // We didn't find a dominating bounds check.
4028 return true;
4031 // We found two bounds checks with the same hash number, but we still have
4032 // to make sure the lengths and index terms are equal.
4033 if (dominating->length() != dominated->length()) {
4034 return true;
4037 SimpleLinearSum sumA = ExtractLinearSum(dominating->index());
4038 SimpleLinearSum sumB = ExtractLinearSum(dominated->index());
4040 // Both terms should be nullptr or the same definition.
4041 if (sumA.term != sumB.term) {
4042 return true;
4045 // This bounds check is redundant.
4046 *eliminated = true;
4048 // Normalize the ranges according to the constant offsets in the two indexes.
4049 int32_t minimumA, maximumA, minimumB, maximumB;
4050 if (!SafeAdd(sumA.constant, dominating->minimum(), &minimumA) ||
4051 !SafeAdd(sumA.constant, dominating->maximum(), &maximumA) ||
4052 !SafeAdd(sumB.constant, dominated->minimum(), &minimumB) ||
4053 !SafeAdd(sumB.constant, dominated->maximum(), &maximumB)) {
4054 return false;
4057 // Update the dominating check to cover both ranges, denormalizing the
4058 // result per the constant offset in the index.
4059 int32_t newMinimum, newMaximum;
4060 if (!SafeSub(std::min(minimumA, minimumB), sumA.constant, &newMinimum) ||
4061 !SafeSub(std::max(maximumA, maximumB), sumA.constant, &newMaximum)) {
4062 return false;
4065 dominating->setMinimum(newMinimum);
4066 dominating->setMaximum(newMaximum);
4067 dominating->setBailoutKind(BailoutKind::HoistBoundsCheck);
4069 return true;
4072 // Eliminate checks which are redundant given each other or other instructions.
4074 // A bounds check is considered redundant if it's dominated by another bounds
4075 // check with the same length and the indexes differ by only a constant amount.
4076 // In this case we eliminate the redundant bounds check and update the other one
4077 // to cover the ranges of both checks.
4079 // Bounds checks are added to a hash map and since the hash function ignores
4080 // differences in constant offset, this offers a fast way to find redundant
4081 // checks.
4082 bool jit::EliminateRedundantChecks(MIRGraph& graph) {
4083 BoundsCheckMap checks(graph.alloc());
4085 // Stack for pre-order CFG traversal.
4086 Vector<MBasicBlock*, 1, JitAllocPolicy> worklist(graph.alloc());
4088 // The index of the current block in the CFG traversal.
4089 size_t index = 0;
4091 // Add all self-dominating blocks to the worklist.
4092 // This includes all roots. Order does not matter.
4093 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
4094 MBasicBlock* block = *i;
4095 if (block->immediateDominator() == block) {
4096 if (!worklist.append(block)) {
4097 return false;
4102 // Starting from each self-dominating block, traverse the CFG in pre-order.
4103 while (!worklist.empty()) {
4104 MBasicBlock* block = worklist.popCopy();
4106 // Add all immediate dominators to the front of the worklist.
4107 if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
4108 block->immediatelyDominatedBlocksEnd())) {
4109 return false;
4112 for (MDefinitionIterator iter(block); iter;) {
4113 MDefinition* def = *iter++;
4115 if (!def->isBoundsCheck()) {
4116 continue;
4118 auto* boundsCheck = def->toBoundsCheck();
4120 bool eliminated = false;
4121 if (!TryEliminateBoundsCheck(checks, index, boundsCheck, &eliminated)) {
4122 return false;
4125 if (eliminated) {
4126 block->discard(boundsCheck);
4129 index++;
4132 MOZ_ASSERT(index == graph.numBlocks());
4134 return true;
4137 static bool ShapeGuardIsRedundant(MGuardShape* guard,
4138 const MDefinition* storeObject,
4139 const Shape* storeShape) {
4140 const MDefinition* guardObject = guard->object()->skipObjectGuards();
4141 if (guardObject != storeObject) {
4142 JitSpew(JitSpew_RedundantShapeGuards, "SKIP: different objects (%d vs %d)",
4143 guardObject->id(), storeObject->id());
4144 return false;
4147 const Shape* guardShape = guard->shape();
4148 if (guardShape != storeShape) {
4149 JitSpew(JitSpew_RedundantShapeGuards, "SKIP: different shapes");
4150 return false;
4153 return true;
4156 // Eliminate shape guards which are redundant given other instructions.
4158 // A shape guard is redundant if we can prove that the object being
4159 // guarded already has the correct shape. The conditions for doing so
4160 // are as follows:
4162 // 1. We can see the most recent change to the shape of this object.
4163 // (This can be an AddAndStoreSlot, an AllocateAndStoreSlot, or the
4164 // creation of the object itself.
4165 // 2. That mutation dominates the shape guard.
4166 // 3. The shape that was assigned at that point matches the shape
4167 // we expect.
4169 // If all of these conditions hold, then we can remove the shape guard.
4170 // In debug, we replace it with an AssertShape to help verify correctness.
4171 bool jit::EliminateRedundantShapeGuards(MIRGraph& graph) {
4172 JitSpew(JitSpew_RedundantShapeGuards, "Begin");
4174 for (ReversePostorderIterator block = graph.rpoBegin();
4175 block != graph.rpoEnd(); block++) {
4176 for (MInstructionIterator insIter(block->begin());
4177 insIter != block->end();) {
4178 MInstruction* ins = *insIter;
4179 insIter++;
4181 // Skip instructions that aren't shape guards.
4182 if (!ins->isGuardShape()) {
4183 continue;
4185 MGuardShape* guard = ins->toGuardShape();
4186 MDefinition* lastStore = guard->dependency();
4188 JitSpew(JitSpew_RedundantShapeGuards, "Visit shape guard %d",
4189 guard->id());
4190 JitSpewIndent spewIndent(JitSpew_RedundantShapeGuards);
4192 if (lastStore->isDiscarded() || lastStore->block()->isDead() ||
4193 !lastStore->block()->dominates(guard->block())) {
4194 JitSpew(JitSpew_RedundantShapeGuards,
4195 "SKIP: ins %d does not dominate block %d", lastStore->id(),
4196 guard->block()->id());
4197 continue;
4200 if (lastStore->isAddAndStoreSlot()) {
4201 auto* add = lastStore->toAddAndStoreSlot();
4202 auto* addObject = add->object()->skipObjectGuards();
4203 if (!ShapeGuardIsRedundant(guard, addObject, add->shape())) {
4204 continue;
4206 } else if (lastStore->isAllocateAndStoreSlot()) {
4207 auto* allocate = lastStore->toAllocateAndStoreSlot();
4208 auto* allocateObject = allocate->object()->skipObjectGuards();
4209 if (!ShapeGuardIsRedundant(guard, allocateObject, allocate->shape())) {
4210 continue;
4212 } else if (lastStore->isStart()) {
4213 // The guard doesn't depend on any other instruction that is modifying
4214 // the object operand, so we check the object operand directly.
4215 auto* obj = guard->object()->skipObjectGuards();
4217 const Shape* initialShape = nullptr;
4218 if (obj->isNewObject()) {
4219 auto* templateObject = obj->toNewObject()->templateObject();
4220 if (!templateObject) {
4221 JitSpew(JitSpew_RedundantShapeGuards, "SKIP: no template");
4222 continue;
4224 initialShape = templateObject->shape();
4225 } else if (obj->isNewPlainObject()) {
4226 initialShape = obj->toNewPlainObject()->shape();
4227 } else {
4228 JitSpew(JitSpew_RedundantShapeGuards,
4229 "SKIP: not NewObject or NewPlainObject (%d)", obj->id());
4230 continue;
4232 if (initialShape != guard->shape()) {
4233 JitSpew(JitSpew_RedundantShapeGuards, "SKIP: shapes don't match");
4234 continue;
4236 } else {
4237 JitSpew(JitSpew_RedundantShapeGuards,
4238 "SKIP: Last store not supported (%d)", lastStore->id());
4239 continue;
4242 #ifdef DEBUG
4243 if (!graph.alloc().ensureBallast()) {
4244 return false;
4246 auto* assert = MAssertShape::New(graph.alloc(), guard->object(),
4247 const_cast<Shape*>(guard->shape()));
4248 guard->block()->insertBefore(guard, assert);
4249 #endif
4251 JitSpew(JitSpew_RedundantShapeGuards, "SUCCESS: Removing shape guard %d",
4252 guard->id());
4253 guard->replaceAllUsesWith(guard->input());
4254 guard->block()->discard(guard);
4258 return true;
4261 static bool TryEliminateGCBarriersForAllocation(TempAllocator& alloc,
4262 MInstruction* allocation) {
4263 MOZ_ASSERT(allocation->type() == MIRType::Object);
4265 JitSpew(JitSpew_RedundantGCBarriers, "Analyzing allocation %s",
4266 allocation->opName());
4268 MBasicBlock* block = allocation->block();
4269 MInstructionIterator insIter(block->begin(allocation));
4271 // Skip `allocation`.
4272 MOZ_ASSERT(*insIter == allocation);
4273 insIter++;
4275 // Try to optimize the other instructions in the block.
4276 while (insIter != block->end()) {
4277 MInstruction* ins = *insIter;
4278 insIter++;
4279 switch (ins->op()) {
4280 case MDefinition::Opcode::Constant:
4281 case MDefinition::Opcode::Box:
4282 case MDefinition::Opcode::Unbox:
4283 case MDefinition::Opcode::AssertCanElidePostWriteBarrier:
4284 // These instructions can't trigger GC or affect this analysis in other
4285 // ways.
4286 break;
4287 case MDefinition::Opcode::StoreFixedSlot: {
4288 auto* store = ins->toStoreFixedSlot();
4289 if (store->object() != allocation) {
4290 JitSpew(JitSpew_RedundantGCBarriers,
4291 "Stopped at StoreFixedSlot for other object");
4292 return true;
4294 store->setNeedsBarrier(false);
4295 JitSpew(JitSpew_RedundantGCBarriers, "Elided StoreFixedSlot barrier");
4296 break;
4298 case MDefinition::Opcode::PostWriteBarrier: {
4299 auto* barrier = ins->toPostWriteBarrier();
4300 if (barrier->object() != allocation) {
4301 JitSpew(JitSpew_RedundantGCBarriers,
4302 "Stopped at PostWriteBarrier for other object");
4303 return true;
4305 #ifdef DEBUG
4306 if (!alloc.ensureBallast()) {
4307 return false;
4309 MDefinition* value = barrier->value();
4310 if (value->type() != MIRType::Value) {
4311 value = MBox::New(alloc, value);
4312 block->insertBefore(barrier, value->toInstruction());
4314 auto* assert =
4315 MAssertCanElidePostWriteBarrier::New(alloc, allocation, value);
4316 block->insertBefore(barrier, assert);
4317 #endif
4318 block->discard(barrier);
4319 JitSpew(JitSpew_RedundantGCBarriers, "Elided PostWriteBarrier");
4320 break;
4322 default:
4323 JitSpew(JitSpew_RedundantGCBarriers,
4324 "Stopped at unsupported instruction %s", ins->opName());
4325 return true;
4329 return true;
4332 bool jit::EliminateRedundantGCBarriers(MIRGraph& graph) {
4333 // Peephole optimization for the following pattern:
4335 // 0: MNewCallObject
4336 // 1: MStoreFixedSlot(0, ...)
4337 // 2: MStoreFixedSlot(0, ...)
4338 // 3: MPostWriteBarrier(0, ...)
4340 // If the instructions immediately following the allocation instruction can't
4341 // trigger GC and we are storing to the new object's slots, we can elide the
4342 // pre-barrier.
4344 // We also eliminate the post barrier and (in debug builds) replace it with an
4345 // assertion.
4347 // See also the similar optimizations in WarpBuilder::buildCallObject.
4349 JitSpew(JitSpew_RedundantGCBarriers, "Begin");
4351 for (ReversePostorderIterator block = graph.rpoBegin();
4352 block != graph.rpoEnd(); block++) {
4353 for (MInstructionIterator insIter(block->begin());
4354 insIter != block->end();) {
4355 MInstruction* ins = *insIter;
4356 insIter++;
4358 if (ins->isNewCallObject()) {
4359 if (!TryEliminateGCBarriersForAllocation(graph.alloc(), ins)) {
4360 return false;
4366 return true;
4369 bool jit::MarkLoadsUsedAsPropertyKeys(MIRGraph& graph) {
4370 // When a string is used as a property key, or as the key for a Map or Set, we
4371 // require it to be atomized. To avoid repeatedly atomizing the same string,
4372 // this analysis looks for cases where we are loading a value from the slot of
4373 // an object (which includes access to global variables and global lexicals)
4374 // and using it as a property key, and marks those loads. During codegen,
4375 // marked loads will check whether the value loaded is a non-atomized string.
4376 // If it is, we will atomize the string and update the stored value, ensuring
4377 // that future loads from the same slot will not have to atomize again.
4378 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys, "Begin");
4380 for (ReversePostorderIterator block = graph.rpoBegin();
4381 block != graph.rpoEnd(); block++) {
4382 for (MInstructionIterator insIter(block->begin());
4383 insIter != block->end();) {
4384 MInstruction* ins = *insIter;
4385 insIter++;
4387 MDefinition* idVal = nullptr;
4388 if (ins->isGetPropertyCache()) {
4389 idVal = ins->toGetPropertyCache()->idval();
4390 } else if (ins->isHasOwnCache()) {
4391 idVal = ins->toHasOwnCache()->idval();
4392 } else if (ins->isSetPropertyCache()) {
4393 idVal = ins->toSetPropertyCache()->idval();
4394 } else if (ins->isGetPropSuperCache()) {
4395 idVal = ins->toGetPropSuperCache()->idval();
4396 } else if (ins->isMegamorphicLoadSlotByValue()) {
4397 idVal = ins->toMegamorphicLoadSlotByValue()->idVal();
4398 } else if (ins->isMegamorphicHasProp()) {
4399 idVal = ins->toMegamorphicHasProp()->idVal();
4400 } else if (ins->isMegamorphicSetElement()) {
4401 idVal = ins->toMegamorphicSetElement()->index();
4402 } else if (ins->isProxyGetByValue()) {
4403 idVal = ins->toProxyGetByValue()->idVal();
4404 } else if (ins->isProxyHasProp()) {
4405 idVal = ins->toProxyHasProp()->idVal();
4406 } else if (ins->isProxySetByValue()) {
4407 idVal = ins->toProxySetByValue()->idVal();
4408 } else if (ins->isIdToStringOrSymbol()) {
4409 idVal = ins->toIdToStringOrSymbol()->idVal();
4410 } else if (ins->isGuardSpecificAtom()) {
4411 idVal = ins->toGuardSpecificAtom()->input();
4412 } else if (ins->isToHashableString()) {
4413 idVal = ins->toToHashableString()->input();
4414 } else if (ins->isToHashableValue()) {
4415 idVal = ins->toToHashableValue()->input();
4416 } else if (ins->isMapObjectHasValueVMCall()) {
4417 idVal = ins->toMapObjectHasValueVMCall()->value();
4418 } else if (ins->isMapObjectGetValueVMCall()) {
4419 idVal = ins->toMapObjectGetValueVMCall()->value();
4420 } else if (ins->isSetObjectHasValueVMCall()) {
4421 idVal = ins->toSetObjectHasValueVMCall()->value();
4422 } else {
4423 continue;
4425 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
4426 "Analyzing property access %s%d with idVal %s%d", ins->opName(),
4427 ins->id(), idVal->opName(), idVal->id());
4429 // Skip intermediate nodes.
4430 do {
4431 if (idVal->isLexicalCheck()) {
4432 idVal = idVal->toLexicalCheck()->input();
4433 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
4434 "- Skipping lexical check. idVal is now %s%d",
4435 idVal->opName(), idVal->id());
4436 continue;
4438 if (idVal->isUnbox() && idVal->type() == MIRType::String) {
4439 idVal = idVal->toUnbox()->input();
4440 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
4441 "- Skipping unbox. idVal is now %s%d", idVal->opName(),
4442 idVal->id());
4443 continue;
4445 break;
4446 } while (true);
4448 if (idVal->isLoadFixedSlot()) {
4449 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
4450 "- SUCCESS: Marking fixed slot");
4451 idVal->toLoadFixedSlot()->setUsedAsPropertyKey();
4452 } else if (idVal->isLoadDynamicSlot()) {
4453 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys,
4454 "- SUCCESS: Marking dynamic slot");
4455 idVal->toLoadDynamicSlot()->setUsedAsPropertyKey();
4456 } else {
4457 JitSpew(JitSpew_MarkLoadsUsedAsPropertyKeys, "- SKIP: %s not supported",
4458 idVal->opName());
4463 return true;
4466 static bool NeedsKeepAlive(MInstruction* slotsOrElements, MInstruction* use) {
4467 MOZ_ASSERT(slotsOrElements->type() == MIRType::Elements ||
4468 slotsOrElements->type() == MIRType::Slots);
4470 if (slotsOrElements->block() != use->block()) {
4471 return true;
4474 // Allocating a BigInt can GC, so we have to keep the object alive.
4475 if (use->type() == MIRType::BigInt) {
4476 return true;
4479 MBasicBlock* block = use->block();
4480 MInstructionIterator iter(block->begin(slotsOrElements));
4481 MOZ_ASSERT(*iter == slotsOrElements);
4482 ++iter;
4484 while (true) {
4485 if (*iter == use) {
4486 return false;
4489 switch (iter->op()) {
4490 case MDefinition::Opcode::Nop:
4491 case MDefinition::Opcode::Constant:
4492 case MDefinition::Opcode::KeepAliveObject:
4493 case MDefinition::Opcode::Unbox:
4494 case MDefinition::Opcode::LoadDynamicSlot:
4495 case MDefinition::Opcode::StoreDynamicSlot:
4496 case MDefinition::Opcode::LoadFixedSlot:
4497 case MDefinition::Opcode::StoreFixedSlot:
4498 case MDefinition::Opcode::LoadElement:
4499 case MDefinition::Opcode::LoadElementAndUnbox:
4500 case MDefinition::Opcode::LoadElementHole:
4501 case MDefinition::Opcode::StoreElement:
4502 case MDefinition::Opcode::StoreHoleValueElement:
4503 case MDefinition::Opcode::InitializedLength:
4504 case MDefinition::Opcode::ArrayLength:
4505 case MDefinition::Opcode::BoundsCheck:
4506 case MDefinition::Opcode::GuardElementNotHole:
4507 case MDefinition::Opcode::InArray:
4508 case MDefinition::Opcode::SpectreMaskIndex:
4509 case MDefinition::Opcode::DebugEnterGCUnsafeRegion:
4510 case MDefinition::Opcode::DebugLeaveGCUnsafeRegion:
4511 iter++;
4512 break;
4513 default:
4514 return true;
4518 MOZ_CRASH("Unreachable");
4521 bool jit::AddKeepAliveInstructions(MIRGraph& graph) {
4522 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
4523 MBasicBlock* block = *i;
4525 for (MInstructionIterator insIter(block->begin()); insIter != block->end();
4526 insIter++) {
4527 MInstruction* ins = *insIter;
4528 if (ins->type() != MIRType::Elements && ins->type() != MIRType::Slots) {
4529 continue;
4532 MDefinition* ownerObject;
4533 switch (ins->op()) {
4534 case MDefinition::Opcode::Elements:
4535 case MDefinition::Opcode::ArrayBufferViewElements:
4536 MOZ_ASSERT(ins->numOperands() == 1);
4537 ownerObject = ins->getOperand(0);
4538 break;
4539 case MDefinition::Opcode::Slots:
4540 ownerObject = ins->toSlots()->object();
4541 break;
4542 default:
4543 MOZ_CRASH("Unexpected op");
4546 MOZ_ASSERT(ownerObject->type() == MIRType::Object);
4548 if (ownerObject->isConstant()) {
4549 // Constants are kept alive by other pointers, for instance
4550 // ImmGCPtr in JIT code.
4551 continue;
4554 for (MUseDefIterator uses(ins); uses; uses++) {
4555 MInstruction* use = uses.def()->toInstruction();
4557 if (use->isStoreElementHole()) {
4558 // StoreElementHole has an explicit object operand. If GVN
4559 // is disabled, we can get different unbox instructions with
4560 // the same object as input, so we check for that case.
4561 MOZ_ASSERT_IF(!use->toStoreElementHole()->object()->isUnbox() &&
4562 !ownerObject->isUnbox(),
4563 use->toStoreElementHole()->object() == ownerObject);
4564 continue;
4567 if (!NeedsKeepAlive(ins, use)) {
4568 #ifdef DEBUG
4569 // These two instructions don't start a GC unsafe region, because they
4570 // overwrite their elements register at the very start. This ensures
4571 // there's no invalidated elements value kept on the stack.
4572 if (use->isApplyArray() || use->isConstructArray()) {
4573 continue;
4576 if (!graph.alloc().ensureBallast()) {
4577 return false;
4580 // Enter a GC unsafe region while the elements/slots are on the stack.
4581 auto* enter = MDebugEnterGCUnsafeRegion::New(graph.alloc());
4582 use->block()->insertAfter(ins, enter);
4584 // Leave the region after the use.
4585 auto* leave = MDebugLeaveGCUnsafeRegion::New(graph.alloc());
4586 use->block()->insertAfter(use, leave);
4587 #endif
4588 continue;
4591 if (!graph.alloc().ensureBallast()) {
4592 return false;
4594 MKeepAliveObject* keepAlive =
4595 MKeepAliveObject::New(graph.alloc(), ownerObject);
4596 use->block()->insertAfter(use, keepAlive);
4601 return true;
4604 bool LinearSum::multiply(int32_t scale) {
4605 for (size_t i = 0; i < terms_.length(); i++) {
4606 if (!SafeMul(scale, terms_[i].scale, &terms_[i].scale)) {
4607 return false;
4610 return SafeMul(scale, constant_, &constant_);
4613 bool LinearSum::divide(uint32_t scale) {
4614 MOZ_ASSERT(scale > 0);
4616 for (size_t i = 0; i < terms_.length(); i++) {
4617 if (terms_[i].scale % scale != 0) {
4618 return false;
4621 if (constant_ % scale != 0) {
4622 return false;
4625 for (size_t i = 0; i < terms_.length(); i++) {
4626 terms_[i].scale /= scale;
4628 constant_ /= scale;
4630 return true;
4633 bool LinearSum::add(const LinearSum& other, int32_t scale /* = 1 */) {
4634 for (size_t i = 0; i < other.terms_.length(); i++) {
4635 int32_t newScale = scale;
4636 if (!SafeMul(scale, other.terms_[i].scale, &newScale)) {
4637 return false;
4639 if (!add(other.terms_[i].term, newScale)) {
4640 return false;
4643 int32_t newConstant = scale;
4644 if (!SafeMul(scale, other.constant_, &newConstant)) {
4645 return false;
4647 return add(newConstant);
4650 bool LinearSum::add(SimpleLinearSum other, int32_t scale) {
4651 if (other.term && !add(other.term, scale)) {
4652 return false;
4655 int32_t constant;
4656 if (!SafeMul(other.constant, scale, &constant)) {
4657 return false;
4660 return add(constant);
4663 bool LinearSum::add(MDefinition* term, int32_t scale) {
4664 MOZ_ASSERT(term);
4666 if (scale == 0) {
4667 return true;
4670 if (MConstant* termConst = term->maybeConstantValue()) {
4671 int32_t constant = termConst->toInt32();
4672 if (!SafeMul(constant, scale, &constant)) {
4673 return false;
4675 return add(constant);
4678 for (size_t i = 0; i < terms_.length(); i++) {
4679 if (term == terms_[i].term) {
4680 if (!SafeAdd(scale, terms_[i].scale, &terms_[i].scale)) {
4681 return false;
4683 if (terms_[i].scale == 0) {
4684 terms_[i] = terms_.back();
4685 terms_.popBack();
4687 return true;
4691 AutoEnterOOMUnsafeRegion oomUnsafe;
4692 if (!terms_.append(LinearTerm(term, scale))) {
4693 oomUnsafe.crash("LinearSum::add");
4696 return true;
4699 bool LinearSum::add(int32_t constant) {
4700 return SafeAdd(constant, constant_, &constant_);
4703 void LinearSum::dump(GenericPrinter& out) const {
4704 for (size_t i = 0; i < terms_.length(); i++) {
4705 int32_t scale = terms_[i].scale;
4706 int32_t id = terms_[i].term->id();
4707 MOZ_ASSERT(scale);
4708 if (scale > 0) {
4709 if (i) {
4710 out.printf("+");
4712 if (scale == 1) {
4713 out.printf("#%d", id);
4714 } else {
4715 out.printf("%d*#%d", scale, id);
4717 } else if (scale == -1) {
4718 out.printf("-#%d", id);
4719 } else {
4720 out.printf("%d*#%d", scale, id);
4723 if (constant_ > 0) {
4724 out.printf("+%d", constant_);
4725 } else if (constant_ < 0) {
4726 out.printf("%d", constant_);
4730 void LinearSum::dump() const {
4731 Fprinter out(stderr);
4732 dump(out);
4733 out.finish();
4736 MDefinition* jit::ConvertLinearSum(TempAllocator& alloc, MBasicBlock* block,
4737 const LinearSum& sum,
4738 BailoutKind bailoutKind) {
4739 MDefinition* def = nullptr;
4741 for (size_t i = 0; i < sum.numTerms(); i++) {
4742 LinearTerm term = sum.term(i);
4743 MOZ_ASSERT(!term.term->isConstant());
4744 if (term.scale == 1) {
4745 if (def) {
4746 def = MAdd::New(alloc, def, term.term, MIRType::Int32);
4747 def->setBailoutKind(bailoutKind);
4748 block->insertAtEnd(def->toInstruction());
4749 def->computeRange(alloc);
4750 } else {
4751 def = term.term;
4753 } else if (term.scale == -1) {
4754 if (!def) {
4755 def = MConstant::New(alloc, Int32Value(0));
4756 block->insertAtEnd(def->toInstruction());
4757 def->computeRange(alloc);
4759 def = MSub::New(alloc, def, term.term, MIRType::Int32);
4760 def->setBailoutKind(bailoutKind);
4761 block->insertAtEnd(def->toInstruction());
4762 def->computeRange(alloc);
4763 } else {
4764 MOZ_ASSERT(term.scale != 0);
4765 MConstant* factor = MConstant::New(alloc, Int32Value(term.scale));
4766 block->insertAtEnd(factor);
4767 MMul* mul = MMul::New(alloc, term.term, factor, MIRType::Int32);
4768 mul->setBailoutKind(bailoutKind);
4769 block->insertAtEnd(mul);
4770 mul->computeRange(alloc);
4771 if (def) {
4772 def = MAdd::New(alloc, def, mul, MIRType::Int32);
4773 def->setBailoutKind(bailoutKind);
4774 block->insertAtEnd(def->toInstruction());
4775 def->computeRange(alloc);
4776 } else {
4777 def = mul;
4782 if (!def) {
4783 def = MConstant::New(alloc, Int32Value(0));
4784 block->insertAtEnd(def->toInstruction());
4785 def->computeRange(alloc);
4788 return def;
4791 // Mark all the blocks that are in the loop with the given header.
4792 // Returns the number of blocks marked. Set *canOsr to true if the loop is
4793 // reachable from both the normal entry and the OSR entry.
4794 size_t jit::MarkLoopBlocks(MIRGraph& graph, MBasicBlock* header, bool* canOsr) {
4795 #ifdef DEBUG
4796 for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
4797 i != e; ++i) {
4798 MOZ_ASSERT(!i->isMarked(), "Some blocks already marked");
4800 #endif
4802 MBasicBlock* osrBlock = graph.osrBlock();
4803 *canOsr = false;
4805 // The blocks are in RPO; start at the loop backedge, which marks the bottom
4806 // of the loop, and walk up until we get to the header. Loops may be
4807 // discontiguous, so we trace predecessors to determine which blocks are
4808 // actually part of the loop. The backedge is always part of the loop, and
4809 // so are its predecessors, transitively, up to the loop header or an OSR
4810 // entry.
4811 MBasicBlock* backedge = header->backedge();
4812 backedge->mark();
4813 size_t numMarked = 1;
4814 for (PostorderIterator i = graph.poBegin(backedge);; ++i) {
4815 MOZ_ASSERT(
4816 i != graph.poEnd(),
4817 "Reached the end of the graph while searching for the loop header");
4818 MBasicBlock* block = *i;
4819 // If we've reached the loop header, we're done.
4820 if (block == header) {
4821 break;
4823 // A block not marked by the time we reach it is not in the loop.
4824 if (!block->isMarked()) {
4825 continue;
4828 // This block is in the loop; trace to its predecessors.
4829 for (size_t p = 0, e = block->numPredecessors(); p != e; ++p) {
4830 MBasicBlock* pred = block->getPredecessor(p);
4831 if (pred->isMarked()) {
4832 continue;
4835 // Blocks dominated by the OSR entry are not part of the loop
4836 // (unless they aren't reachable from the normal entry).
4837 if (osrBlock && pred != header && osrBlock->dominates(pred) &&
4838 !osrBlock->dominates(header)) {
4839 *canOsr = true;
4840 continue;
4843 MOZ_ASSERT(pred->id() >= header->id() && pred->id() <= backedge->id(),
4844 "Loop block not between loop header and loop backedge");
4846 pred->mark();
4847 ++numMarked;
4849 // A nested loop may not exit back to the enclosing loop at its
4850 // bottom. If we just marked its header, then the whole nested loop
4851 // is part of the enclosing loop.
4852 if (pred->isLoopHeader()) {
4853 MBasicBlock* innerBackedge = pred->backedge();
4854 if (!innerBackedge->isMarked()) {
4855 // Mark its backedge so that we add all of its blocks to the
4856 // outer loop as we walk upwards.
4857 innerBackedge->mark();
4858 ++numMarked;
4860 // If the nested loop is not contiguous, we may have already
4861 // passed its backedge. If this happens, back up.
4862 if (innerBackedge->id() > block->id()) {
4863 i = graph.poBegin(innerBackedge);
4864 --i;
4871 // If there's no path connecting the header to the backedge, then this isn't
4872 // actually a loop. This can happen when the code starts with a loop but GVN
4873 // folds some branches away.
4874 if (!header->isMarked()) {
4875 jit::UnmarkLoopBlocks(graph, header);
4876 return 0;
4879 return numMarked;
4882 // Unmark all the blocks that are in the loop with the given header.
4883 void jit::UnmarkLoopBlocks(MIRGraph& graph, MBasicBlock* header) {
4884 MBasicBlock* backedge = header->backedge();
4885 for (ReversePostorderIterator i = graph.rpoBegin(header);; ++i) {
4886 MOZ_ASSERT(i != graph.rpoEnd(),
4887 "Reached the end of the graph while searching for the backedge");
4888 MBasicBlock* block = *i;
4889 if (block->isMarked()) {
4890 block->unmark();
4891 if (block == backedge) {
4892 break;
4897 #ifdef DEBUG
4898 for (ReversePostorderIterator i = graph.rpoBegin(), e = graph.rpoEnd();
4899 i != e; ++i) {
4900 MOZ_ASSERT(!i->isMarked(), "Not all blocks got unmarked");
4902 #endif
4905 bool jit::FoldLoadsWithUnbox(MIRGenerator* mir, MIRGraph& graph) {
4906 // This pass folds MLoadFixedSlot, MLoadDynamicSlot, MLoadElement instructions
4907 // followed by MUnbox into a single instruction. For LoadElement this allows
4908 // us to fuse the hole check with the type check for the unbox.
4910 for (MBasicBlockIterator block(graph.begin()); block != graph.end();
4911 block++) {
4912 if (mir->shouldCancel("FoldLoadsWithUnbox")) {
4913 return false;
4916 for (MInstructionIterator insIter(block->begin());
4917 insIter != block->end();) {
4918 MInstruction* ins = *insIter;
4919 insIter++;
4921 // We're only interested in loads producing a Value.
4922 if (!ins->isLoadFixedSlot() && !ins->isLoadDynamicSlot() &&
4923 !ins->isLoadElement()) {
4924 continue;
4926 if (ins->type() != MIRType::Value) {
4927 continue;
4930 MInstruction* load = ins;
4932 // Ensure there's a single def-use (ignoring resume points) and it's an
4933 // unbox. Unwrap MLexicalCheck because it's redundant if we have a
4934 // fallible unbox (checked below).
4935 MDefinition* defUse = load->maybeSingleDefUse();
4936 if (!defUse) {
4937 continue;
4939 MLexicalCheck* lexicalCheck = nullptr;
4940 if (defUse->isLexicalCheck()) {
4941 lexicalCheck = defUse->toLexicalCheck();
4942 defUse = lexicalCheck->maybeSingleDefUse();
4943 if (!defUse) {
4944 continue;
4947 if (!defUse->isUnbox()) {
4948 continue;
4951 // For now require the load and unbox to be in the same block. This isn't
4952 // strictly necessary but it's the common case and could prevent bailouts
4953 // when moving the unbox before a loop.
4954 MUnbox* unbox = defUse->toUnbox();
4955 if (unbox->block() != *block) {
4956 continue;
4958 MOZ_ASSERT_IF(lexicalCheck, lexicalCheck->block() == *block);
4960 MOZ_ASSERT(!IsMagicType(unbox->type()));
4962 // If this is a LoadElement or if we have a lexical check between the load
4963 // and unbox, we only support folding the load with a fallible unbox so
4964 // that we can eliminate the MagicValue check.
4965 if ((load->isLoadElement() || lexicalCheck) && !unbox->fallible()) {
4966 continue;
4969 // Combine the load and unbox into a single MIR instruction.
4970 if (!graph.alloc().ensureBallast()) {
4971 return false;
4974 MIRType type = unbox->type();
4975 MUnbox::Mode mode = unbox->mode();
4977 MInstruction* replacement;
4978 switch (load->op()) {
4979 case MDefinition::Opcode::LoadFixedSlot: {
4980 auto* loadIns = load->toLoadFixedSlot();
4981 replacement = MLoadFixedSlotAndUnbox::New(
4982 graph.alloc(), loadIns->object(), loadIns->slot(), mode, type,
4983 loadIns->usedAsPropertyKey());
4984 break;
4986 case MDefinition::Opcode::LoadDynamicSlot: {
4987 auto* loadIns = load->toLoadDynamicSlot();
4988 replacement = MLoadDynamicSlotAndUnbox::New(
4989 graph.alloc(), loadIns->slots(), loadIns->slot(), mode, type,
4990 loadIns->usedAsPropertyKey());
4991 break;
4993 case MDefinition::Opcode::LoadElement: {
4994 auto* loadIns = load->toLoadElement();
4995 MOZ_ASSERT(unbox->fallible());
4996 replacement = MLoadElementAndUnbox::New(
4997 graph.alloc(), loadIns->elements(), loadIns->index(), mode, type);
4998 break;
5000 default:
5001 MOZ_CRASH("Unexpected instruction");
5003 replacement->setBailoutKind(BailoutKind::UnboxFolding);
5005 block->insertBefore(load, replacement);
5006 unbox->replaceAllUsesWith(replacement);
5007 if (lexicalCheck) {
5008 lexicalCheck->replaceAllUsesWith(replacement);
5010 load->replaceAllUsesWith(replacement);
5012 if (lexicalCheck && *insIter == lexicalCheck) {
5013 insIter++;
5015 if (*insIter == unbox) {
5016 insIter++;
5018 block->discard(unbox);
5019 if (lexicalCheck) {
5020 block->discard(lexicalCheck);
5022 block->discard(load);
5026 return true;
5029 // Reorder the blocks in the loop starting at the given header to be contiguous.
5030 static void MakeLoopContiguous(MIRGraph& graph, MBasicBlock* header,
5031 size_t numMarked) {
5032 MBasicBlock* backedge = header->backedge();
5034 MOZ_ASSERT(header->isMarked(), "Loop header is not part of loop");
5035 MOZ_ASSERT(backedge->isMarked(), "Loop backedge is not part of loop");
5037 // If there are any blocks between the loop header and the loop backedge
5038 // that are not part of the loop, prepare to move them to the end. We keep
5039 // them in order, which preserves RPO.
5040 ReversePostorderIterator insertIter = graph.rpoBegin(backedge);
5041 insertIter++;
5042 MBasicBlock* insertPt = *insertIter;
5044 // Visit all the blocks from the loop header to the loop backedge.
5045 size_t headerId = header->id();
5046 size_t inLoopId = headerId;
5047 size_t notInLoopId = inLoopId + numMarked;
5048 ReversePostorderIterator i = graph.rpoBegin(header);
5049 for (;;) {
5050 MBasicBlock* block = *i++;
5051 MOZ_ASSERT(block->id() >= header->id() && block->id() <= backedge->id(),
5052 "Loop backedge should be last block in loop");
5054 if (block->isMarked()) {
5055 // This block is in the loop.
5056 block->unmark();
5057 block->setId(inLoopId++);
5058 // If we've reached the loop backedge, we're done!
5059 if (block == backedge) {
5060 break;
5062 } else {
5063 // This block is not in the loop. Move it to the end.
5064 graph.moveBlockBefore(insertPt, block);
5065 block->setId(notInLoopId++);
5068 MOZ_ASSERT(header->id() == headerId, "Loop header id changed");
5069 MOZ_ASSERT(inLoopId == headerId + numMarked,
5070 "Wrong number of blocks kept in loop");
5071 MOZ_ASSERT(notInLoopId == (insertIter != graph.rpoEnd() ? insertPt->id()
5072 : graph.numBlocks()),
5073 "Wrong number of blocks moved out of loop");
5076 // Reorder the blocks in the graph so that loops are contiguous.
5077 bool jit::MakeLoopsContiguous(MIRGraph& graph) {
5078 // Visit all loop headers (in any order).
5079 for (MBasicBlockIterator i(graph.begin()); i != graph.end(); i++) {
5080 MBasicBlock* header = *i;
5081 if (!header->isLoopHeader()) {
5082 continue;
5085 // Mark all blocks that are actually part of the loop.
5086 bool canOsr;
5087 size_t numMarked = MarkLoopBlocks(graph, header, &canOsr);
5089 // If the loop isn't a loop, don't try to optimize it.
5090 if (numMarked == 0) {
5091 continue;
5094 // If there's an OSR block entering the loop in the middle, it's tricky,
5095 // so don't try to handle it, for now.
5096 if (canOsr) {
5097 UnmarkLoopBlocks(graph, header);
5098 continue;
5101 // Move all blocks between header and backedge that aren't marked to
5102 // the end of the loop, making the loop itself contiguous.
5103 MakeLoopContiguous(graph, header, numMarked);
5106 return true;
5109 static MDefinition* SkipUnbox(MDefinition* ins) {
5110 if (ins->isUnbox()) {
5111 return ins->toUnbox()->input();
5113 return ins;
5116 bool jit::OptimizeIteratorIndices(MIRGenerator* mir, MIRGraph& graph) {
5117 bool changed = false;
5119 for (ReversePostorderIterator blockIter = graph.rpoBegin();
5120 blockIter != graph.rpoEnd();) {
5121 MBasicBlock* block = *blockIter++;
5122 for (MInstructionIterator insIter(block->begin());
5123 insIter != block->end();) {
5124 MInstruction* ins = *insIter;
5125 insIter++;
5126 if (!graph.alloc().ensureBallast()) {
5127 return false;
5130 MDefinition* receiver = nullptr;
5131 MDefinition* idVal = nullptr;
5132 MDefinition* setValue = nullptr;
5133 if (ins->isMegamorphicHasProp() &&
5134 ins->toMegamorphicHasProp()->hasOwn()) {
5135 receiver = ins->toMegamorphicHasProp()->object();
5136 idVal = ins->toMegamorphicHasProp()->idVal();
5137 } else if (ins->isHasOwnCache()) {
5138 receiver = ins->toHasOwnCache()->value();
5139 idVal = ins->toHasOwnCache()->idval();
5140 } else if (ins->isMegamorphicLoadSlotByValue()) {
5141 receiver = ins->toMegamorphicLoadSlotByValue()->object();
5142 idVal = ins->toMegamorphicLoadSlotByValue()->idVal();
5143 } else if (ins->isGetPropertyCache()) {
5144 receiver = ins->toGetPropertyCache()->value();
5145 idVal = ins->toGetPropertyCache()->idval();
5146 } else if (ins->isMegamorphicSetElement()) {
5147 receiver = ins->toMegamorphicSetElement()->object();
5148 idVal = ins->toMegamorphicSetElement()->index();
5149 setValue = ins->toMegamorphicSetElement()->value();
5150 } else if (ins->isSetPropertyCache()) {
5151 receiver = ins->toSetPropertyCache()->object();
5152 idVal = ins->toSetPropertyCache()->idval();
5153 setValue = ins->toSetPropertyCache()->value();
5156 if (!receiver) {
5157 continue;
5160 // Given the following structure (that occurs inside for-in loops):
5161 // obj: some object
5162 // iter: ObjectToIterator <obj>
5163 // iterNext: IteratorMore <iter>
5164 // access: HasProp/GetElem <obj> <iterNext>
5165 // If the iterator object has an indices array, we can speed up the
5166 // property access:
5167 // 1. If the property access is a HasProp looking for own properties,
5168 // then the result will always be true if the iterator has indices,
5169 // because we only populate the indices array for objects with no
5170 // enumerable properties on the prototype.
5171 // 2. If the property access is a GetProp, then we can use the contents
5172 // of the indices array to find the correct property faster than
5173 // the megamorphic cache.
5174 if (!idVal->isIteratorMore()) {
5175 continue;
5177 auto* iterNext = idVal->toIteratorMore();
5179 if (!iterNext->iterator()->isObjectToIterator()) {
5180 continue;
5183 MObjectToIterator* iter = iterNext->iterator()->toObjectToIterator();
5184 if (SkipUnbox(iter->object()) != SkipUnbox(receiver)) {
5185 continue;
5188 MInstruction* indicesCheck =
5189 MIteratorHasIndices::New(graph.alloc(), iter->object(), iter);
5190 MInstruction* replacement;
5191 if (ins->isHasOwnCache() || ins->isMegamorphicHasProp()) {
5192 MOZ_ASSERT(!setValue);
5193 replacement = MConstant::New(graph.alloc(), BooleanValue(true));
5194 } else if (ins->isMegamorphicLoadSlotByValue() ||
5195 ins->isGetPropertyCache()) {
5196 MOZ_ASSERT(!setValue);
5197 replacement =
5198 MLoadSlotByIteratorIndex::New(graph.alloc(), receiver, iter);
5199 } else {
5200 MOZ_ASSERT(ins->isMegamorphicSetElement() || ins->isSetPropertyCache());
5201 MOZ_ASSERT(setValue);
5202 replacement = MStoreSlotByIteratorIndex::New(graph.alloc(), receiver,
5203 iter, setValue);
5206 if (!block->wrapInstructionInFastpath(ins, replacement, indicesCheck)) {
5207 return false;
5210 iter->setWantsIndices(true);
5211 changed = true;
5213 // Advance to join block.
5214 blockIter = graph.rpoBegin(block->getSuccessor(0)->getSuccessor(0));
5215 break;
5218 if (changed && !AccountForCFGChanges(mir, graph,
5219 /*updateAliasAnalysis=*/false)) {
5220 return false;
5223 return true;
5226 void jit::DumpMIRDefinition(GenericPrinter& out, MDefinition* def) {
5227 #ifdef JS_JITSPEW
5228 out.printf("%u = %s.", def->id(), StringFromMIRType(def->type()));
5229 if (def->isConstant()) {
5230 def->printOpcode(out);
5231 } else {
5232 MDefinition::PrintOpcodeName(out, def->op());
5235 // Get any extra bits of text that the MIR node wants to show us. Both the
5236 // vector and the strings added to it belong to this function, so both will
5237 // be automatically freed at exit.
5238 ExtrasCollector extras;
5239 def->getExtras(&extras);
5240 for (size_t i = 0; i < extras.count(); i++) {
5241 out.printf(" %s", extras.get(i).get());
5244 for (size_t i = 0; i < def->numOperands(); i++) {
5245 out.printf(" %u", def->getOperand(i)->id());
5247 #endif
5250 void jit::DumpMIRExpressions(GenericPrinter& out, MIRGraph& graph,
5251 const CompileInfo& info, const char* phase) {
5252 #ifdef JS_JITSPEW
5253 if (!JitSpewEnabled(JitSpew_MIRExpressions)) {
5254 return;
5257 out.printf("===== %s =====\n", phase);
5259 for (ReversePostorderIterator block(graph.rpoBegin());
5260 block != graph.rpoEnd(); block++) {
5261 out.printf(" Block%u:\n", block->id());
5262 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
5263 iter != end; iter++) {
5264 out.printf(" ");
5265 jit::DumpMIRDefinition(out, *iter);
5266 out.printf("\n");
5268 for (MInstructionIterator iter(block->begin()), end(block->end());
5269 iter != end; iter++) {
5270 out.printf(" ");
5271 DumpMIRDefinition(out, *iter);
5272 out.printf("\n");
5276 if (info.compilingWasm()) {
5277 out.printf("===== end wasm MIR dump =====\n");
5278 } else {
5279 out.printf("===== %s:%u =====\n", info.filename(), info.lineno());
5281 #endif