Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / jit / MIRGraph.cpp
blob47b0ce9b6c54c5fa209839f57c9ac0f400661c3d
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MIRGraph.h"
9 #include "jit/CompileInfo.h"
10 #include "jit/InlineScriptTree.h"
11 #include "jit/IonOptimizationLevels.h"
12 #include "jit/JitSpewer.h"
13 #include "jit/MIR.h"
14 #include "jit/MIRGenerator.h"
16 using namespace js;
17 using namespace js::jit;
19 MIRGenerator::MIRGenerator(CompileRealm* realm,
20 const JitCompileOptions& options,
21 TempAllocator* alloc, MIRGraph* graph,
22 const CompileInfo* info,
23 const OptimizationInfo* optimizationInfo)
24 : realm(realm),
25 runtime(realm ? realm->runtime() : nullptr),
26 outerInfo_(info),
27 optimizationInfo_(optimizationInfo),
28 alloc_(alloc),
29 graph_(graph),
30 offThreadStatus_(Ok()),
31 cancelBuild_(false),
32 wasmMaxStackArgBytes_(0),
33 needsOverrecursedCheck_(false),
34 needsStaticStackAlignment_(false),
35 instrumentedProfiling_(false),
36 instrumentedProfilingIsCached_(false),
37 stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings()
38 : false),
39 bigIntsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateBigInts()
40 : false),
41 minWasmMemory0Length_(0),
42 options(options),
43 gs_(alloc) {}
45 bool MIRGenerator::licmEnabled() const {
46 return optimizationInfo().licmEnabled() && !disableLICM_ &&
47 !outerInfo().hadLICMInvalidation();
50 bool MIRGenerator::branchHintingEnabled() const {
51 return outerInfo().branchHintingEnabled();
54 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
55 if (JitSpewEnabled(JitSpew_IonAbort)) {
56 switch (r) {
57 case AbortReason::Alloc:
58 JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
59 break;
60 case AbortReason::Disable:
61 JitSpew(JitSpew_IonAbort, "AbortReason::Disable");
62 break;
63 case AbortReason::Error:
64 JitSpew(JitSpew_IonAbort, "AbortReason::Error");
65 break;
66 case AbortReason::NoAbort:
67 MOZ_CRASH("Abort with AbortReason::NoAbort");
68 break;
71 return Err(std::move(r));
74 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abortFmt(
75 AbortReason r, const char* message, va_list ap) {
76 JitSpewVA(JitSpew_IonAbort, message, ap);
77 return Err(std::move(r));
80 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(
81 AbortReason r, const char* message, ...) {
82 va_list ap;
83 va_start(ap, message);
84 auto forward = abortFmt(r, message, ap);
85 va_end(ap);
86 return forward;
89 void MIRGraph::addBlock(MBasicBlock* block) {
90 MOZ_ASSERT(block);
91 block->setId(blockIdGen_++);
92 blocks_.pushBack(block);
93 numBlocks_++;
96 void MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block) {
97 block->setId(blockIdGen_++);
98 blocks_.insertAfter(at, block);
99 numBlocks_++;
102 void MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block) {
103 block->setId(blockIdGen_++);
104 blocks_.insertBefore(at, block);
105 numBlocks_++;
108 void MIRGraph::removeBlock(MBasicBlock* block) {
109 // Remove a block from the graph. It will also cleanup the block.
111 if (block == osrBlock_) {
112 osrBlock_ = nullptr;
115 if (returnAccumulator_) {
116 size_t i = 0;
117 while (i < returnAccumulator_->length()) {
118 if ((*returnAccumulator_)[i] == block) {
119 returnAccumulator_->erase(returnAccumulator_->begin() + i);
120 } else {
121 i++;
126 block->clear();
127 block->markAsDead();
129 if (block->isInList()) {
130 blocks_.remove(block);
131 numBlocks_--;
135 void MIRGraph::unmarkBlocks() {
136 for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++) {
137 i->unmark();
141 MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth,
142 const CompileInfo& info, MBasicBlock* maybePred,
143 BytecodeSite* site, Kind kind) {
144 MOZ_ASSERT(site->pc() != nullptr);
146 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
147 if (!block->init()) {
148 return nullptr;
151 if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) {
152 return nullptr;
155 return block;
158 MBasicBlock* MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
159 MBasicBlock* pred, BytecodeSite* site,
160 Kind kind, uint32_t popped) {
161 MOZ_ASSERT(site->pc() != nullptr);
163 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
164 if (!block->init()) {
165 return nullptr;
168 if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, popped)) {
169 return nullptr;
172 return block;
175 MBasicBlock* MBasicBlock::NewPendingLoopHeader(MIRGraph& graph,
176 const CompileInfo& info,
177 MBasicBlock* pred,
178 BytecodeSite* site) {
179 MOZ_ASSERT(site->pc() != nullptr);
181 MBasicBlock* block =
182 new (graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
183 if (!block->init()) {
184 return nullptr;
187 if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, 0)) {
188 return nullptr;
191 return block;
194 MBasicBlock* MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
195 size_t predEdgeIdx, MBasicBlock* succ) {
196 MBasicBlock* split = nullptr;
197 if (!succ->pc()) {
198 // The predecessor does not have a PC, this is a Wasm compilation.
199 split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
200 if (!split) {
201 return nullptr;
204 // Insert the split edge block in-between.
205 split->end(MGoto::New(graph.alloc(), succ));
206 } else {
207 // The predecessor has a PC, this is a Warp compilation.
208 MResumePoint* succEntry = succ->entryResumePoint();
210 BytecodeSite* site =
211 new (graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
212 split =
213 new (graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
215 if (!split->init()) {
216 return nullptr;
219 // A split edge is used to simplify the graph to avoid having a
220 // predecessor with multiple successors as well as a successor with
221 // multiple predecessors. As instructions can be moved in this
222 // split-edge block, we need to give this block a resume point. To do
223 // so, we copy the entry resume points of the successor and filter the
224 // phis to keep inputs from the current edge.
226 // Propagate the caller resume point from the inherited block.
227 split->callerResumePoint_ = succ->callerResumePoint();
229 // Split-edge are created after the interpreter stack emulation. Thus,
230 // there is no need for creating slots.
231 split->stackPosition_ = succEntry->stackDepth();
233 // Create a resume point using our initial stack position.
234 MResumePoint* splitEntry = new (graph.alloc())
235 MResumePoint(split, succEntry->pc(), ResumeMode::ResumeAt);
236 if (!splitEntry->init(graph.alloc())) {
237 return nullptr;
239 split->entryResumePoint_ = splitEntry;
241 // Insert the split edge block in-between.
242 split->end(MGoto::New(graph.alloc(), succ));
244 // The target entry resume point might have phi operands, keep the
245 // operands of the phi coming from our edge.
246 size_t succEdgeIdx = succ->indexForPredecessor(pred);
248 for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
249 MDefinition* def = succEntry->getOperand(i);
250 // This early in the pipeline, we have no recover instructions in
251 // any entry resume point.
252 if (def->block() == succ) {
253 if (def->isPhi()) {
254 def = def->toPhi()->getOperand(succEdgeIdx);
255 } else {
256 // The phi-operand may already have been optimized out.
257 MOZ_ASSERT(def->isConstant());
258 MOZ_ASSERT(def->type() == MIRType::MagicOptimizedOut);
260 def = split->optimizedOutConstant(graph.alloc());
264 splitEntry->initOperand(i, def);
267 // This is done in the New variant for wasm, so we cannot keep this
268 // line below, where the rest of the graph is modified.
269 if (!split->predecessors_.append(pred)) {
270 return nullptr;
274 split->setLoopDepth(succ->loopDepth());
276 graph.insertBlockAfter(pred, split);
278 pred->replaceSuccessor(predEdgeIdx, split);
279 succ->replacePredecessor(pred, split);
280 return split;
283 void MBasicBlock::moveToNewBlock(MInstruction* ins, MBasicBlock* dst) {
284 MOZ_ASSERT(ins->block() == this);
285 MOZ_ASSERT(!dst->hasLastIns());
286 instructions_.remove(ins);
287 ins->setInstructionBlock(dst, dst->trackedSite());
288 if (MResumePoint* rp = ins->resumePoint()) {
289 removeResumePoint(rp);
290 dst->addResumePoint(rp);
291 rp->setBlock(dst);
293 dst->instructions_.pushBack(ins);
296 void MBasicBlock::moveOuterResumePointTo(MBasicBlock* dest) {
297 if (MResumePoint* outer = outerResumePoint()) {
298 removeResumePoint(outer);
299 outerResumePoint_ = nullptr;
300 dest->setOuterResumePoint(outer);
301 dest->addResumePoint(outer);
302 outer->setBlock(dest);
306 bool MBasicBlock::wrapInstructionInFastpath(MInstruction* ins,
307 MInstruction* fastpath,
308 MInstruction* condition) {
309 MOZ_ASSERT(ins->block() == this);
310 MOZ_ASSERT(!ins->isControlInstruction());
312 MInstructionIterator rest(begin(ins));
313 rest++;
315 MResumePoint* resumeBeforeIns = activeResumePoint(ins);
316 MResumePoint* resumeAfterIns = activeResumePoint(*rest);
318 // Create the join block.
319 MBasicBlock* join = MBasicBlock::NewInternal(graph_, this, resumeAfterIns);
320 if (!join) {
321 return false;
324 // Update the successors of the current block.
325 for (uint32_t i = 0; i < numSuccessors(); i++) {
326 getSuccessor(i)->replacePredecessor(this, join);
328 if (successorWithPhis()) {
329 join->setSuccessorWithPhis(successorWithPhis(), positionInPhiSuccessor());
330 clearSuccessorWithPhis();
333 // Copy all instructions after |ins| into the join block.
334 while (rest != end()) {
335 MInstruction* ins = *rest++;
336 moveToNewBlock(ins, join);
338 MOZ_ASSERT(!hasLastIns());
339 MOZ_ASSERT(join->hasLastIns());
341 graph_.insertBlockAfter(this, join);
343 // Create the fast path block.
344 MBasicBlock* fastpathBlock =
345 MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
346 if (!fastpathBlock) {
347 return false;
349 graph_.insertBlockAfter(this, fastpathBlock);
350 fastpathBlock->add(fastpath);
351 fastpathBlock->end(MGoto::New(graph_.alloc(), join));
353 // Create the slowpath block.
354 MBasicBlock* slowpathBlock =
355 MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
356 if (!slowpathBlock) {
357 return false;
359 graph_.insertBlockAfter(fastpathBlock, slowpathBlock);
360 moveToNewBlock(ins, slowpathBlock);
361 slowpathBlock->end(MGoto::New(graph_.alloc(), join));
363 // Connect current block to fastpath and slowpath.
364 add(condition);
365 end(MTest::New(graph_.alloc(), condition, fastpathBlock, slowpathBlock));
367 // Update predecessors.
368 if (!fastpathBlock->addPredecessorWithoutPhis(this) ||
369 !slowpathBlock->addPredecessorWithoutPhis(this) ||
370 !join->addPredecessorWithoutPhis(fastpathBlock) ||
371 !join->addPredecessorWithoutPhis(slowpathBlock)) {
372 return false;
375 if (ins->hasUses()) {
376 // Insert phi.
377 MPhi* phi = MPhi::New(graph_.alloc());
378 if (!phi->reserveLength(2)) {
379 return false;
381 phi->addInput(fastpath);
382 fastpathBlock->setSuccessorWithPhis(join, 0);
383 phi->addInput(ins);
384 slowpathBlock->setSuccessorWithPhis(join, 1);
385 join->addPhi(phi);
387 for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e;) {
388 MUse* use = *i++;
389 if (use->consumer() != phi && use->consumer() != ins->resumePoint()) {
390 use->replaceProducer(phi);
395 moveOuterResumePointTo(join);
397 return true;
400 MBasicBlock* MBasicBlock::NewInternal(MIRGraph& graph, MBasicBlock* orig,
401 MResumePoint* resumePoint) {
402 jsbytecode* pc = IsResumeAfter(resumePoint->mode())
403 ? GetNextPc(resumePoint->pc())
404 : resumePoint->pc();
406 BytecodeSite* site =
407 new (graph.alloc()) BytecodeSite(orig->trackedTree(), pc);
408 MBasicBlock* block =
409 new (graph.alloc()) MBasicBlock(graph, orig->info(), site, INTERNAL);
410 if (!block->init()) {
411 return nullptr;
414 // Propagate the caller resume point from the original block.
415 block->callerResumePoint_ = orig->callerResumePoint();
417 // Copy the resume point.
418 block->stackPosition_ = resumePoint->stackDepth();
419 MResumePoint* entryResumePoint =
420 new (graph.alloc()) MResumePoint(block, pc, ResumeMode::ResumeAt);
421 if (!entryResumePoint->init(graph.alloc())) {
422 return nullptr;
424 for (size_t i = 0; i < resumePoint->stackDepth(); i++) {
425 entryResumePoint->initOperand(i, resumePoint->getOperand(i));
427 block->entryResumePoint_ = entryResumePoint;
429 block->setLoopDepth(orig->loopDepth());
431 return block;
434 MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info,
435 MBasicBlock* pred, Kind kind) {
436 BytecodeSite* site = new (graph.alloc()) BytecodeSite();
437 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
438 if (!block->init()) {
439 return nullptr;
442 if (pred) {
443 block->stackPosition_ = pred->stackPosition_;
445 if (block->kind_ == PENDING_LOOP_HEADER) {
446 size_t nphis = block->stackPosition_;
448 size_t nfree = graph.phiFreeListLength();
450 TempAllocator& alloc = graph.alloc();
451 MPhi* phis = nullptr;
452 if (nphis > nfree) {
453 phis = alloc.allocateArray<MPhi>(nphis - nfree);
454 if (!phis) {
455 return nullptr;
459 // Note: Phis are inserted in the same order as the slots.
460 for (size_t i = 0; i < nphis; i++) {
461 MDefinition* predSlot = pred->getSlot(i);
463 MOZ_ASSERT(predSlot->type() != MIRType::Value);
465 MPhi* phi;
466 if (i < nfree) {
467 phi = graph.takePhiFromFreeList();
468 } else {
469 phi = phis + (i - nfree);
471 new (phi) MPhi(alloc, predSlot->type());
473 phi->addInlineInput(predSlot);
475 // Add append Phis in the block.
476 block->addPhi(phi);
477 block->setSlot(i, phi);
479 } else {
480 if (!block->ensureHasSlots(0)) {
481 return nullptr;
483 block->copySlots(pred);
486 if (!block->predecessors_.append(pred)) {
487 return nullptr;
491 return block;
494 // Create an empty and unreachable block which jumps to |header|. Used
495 // when the normal entry into a loop is removed (but the loop is still
496 // reachable due to OSR) to preserve the invariant that every loop
497 // header has two predecessors, which is needed for building the
498 // dominator tree. The new block is inserted immediately before the
499 // header, which preserves the graph ordering (post-order/RPO). These
500 // blocks will all be removed before lowering.
501 MBasicBlock* MBasicBlock::NewFakeLoopPredecessor(MIRGraph& graph,
502 MBasicBlock* header) {
503 MOZ_ASSERT(graph.osrBlock());
505 MBasicBlock* backedge = header->backedge();
506 MBasicBlock* fake = MBasicBlock::New(graph, header->info(), nullptr,
507 MBasicBlock::FAKE_LOOP_PRED);
508 if (!fake) {
509 return nullptr;
512 graph.insertBlockBefore(header, fake);
513 fake->setUnreachable();
515 // Create fake defs to use as inputs for any phis in |header|.
516 for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
517 iter != end; ++iter) {
518 if (!graph.alloc().ensureBallast()) {
519 return nullptr;
521 MPhi* phi = *iter;
522 auto* fakeDef = MUnreachableResult::New(graph.alloc(), phi->type());
523 fake->add(fakeDef);
524 if (!phi->addInputSlow(fakeDef)) {
525 return nullptr;
529 fake->end(MGoto::New(graph.alloc(), header));
531 if (!header->addPredecessorWithoutPhis(fake)) {
532 return nullptr;
535 // The backedge is always the last predecessor, but we have added a
536 // new pred. Restore |backedge| as |header|'s loop backedge.
537 header->clearLoopHeader();
538 header->setLoopHeader(backedge);
540 return fake;
543 void MIRGraph::removeFakeLoopPredecessors() {
544 MOZ_ASSERT(osrBlock());
545 size_t id = 0;
546 for (ReversePostorderIterator it = rpoBegin(); it != rpoEnd();) {
547 MBasicBlock* block = *it++;
548 if (block->isFakeLoopPred()) {
549 MOZ_ASSERT(block->unreachable());
550 MBasicBlock* succ = block->getSingleSuccessor();
551 succ->removePredecessor(block);
552 removeBlock(block);
553 } else {
554 block->setId(id++);
557 #ifdef DEBUG
558 canBuildDominators_ = false;
559 #endif
562 MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info,
563 BytecodeSite* site, Kind kind)
564 : graph_(graph),
565 info_(info),
566 predecessors_(graph.alloc()),
567 stackPosition_(info_.firstStackSlot()),
568 id_(0),
569 domIndex_(0),
570 numDominated_(0),
571 lir_(nullptr),
572 callerResumePoint_(nullptr),
573 entryResumePoint_(nullptr),
574 outerResumePoint_(nullptr),
575 successorWithPhis_(nullptr),
576 positionInPhiSuccessor_(0),
577 loopDepth_(0),
578 kind_(kind),
579 mark_(false),
580 immediatelyDominated_(graph.alloc()),
581 immediateDominator_(nullptr),
582 trackedSite_(site) {
583 MOZ_ASSERT(trackedSite_, "trackedSite_ is non-nullptr");
586 bool MBasicBlock::init() { return slots_.init(graph_.alloc(), info_.nslots()); }
588 bool MBasicBlock::increaseSlots(size_t num) {
589 return slots_.growBy(graph_.alloc(), num);
592 bool MBasicBlock::ensureHasSlots(size_t num) {
593 size_t depth = stackDepth() + num;
594 if (depth > nslots()) {
595 if (!increaseSlots(depth - nslots())) {
596 return false;
599 return true;
602 void MBasicBlock::copySlots(MBasicBlock* from) {
603 MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
604 MOZ_ASSERT(stackPosition_ <= nslots());
606 MDefinition** thisSlots = slots_.begin();
607 MDefinition** fromSlots = from->slots_.begin();
608 for (size_t i = 0, e = stackPosition_; i < e; ++i) {
609 thisSlots[i] = fromSlots[i];
613 bool MBasicBlock::inherit(TempAllocator& alloc, size_t stackDepth,
614 MBasicBlock* maybePred, uint32_t popped) {
615 MOZ_ASSERT_IF(maybePred, maybePred->stackDepth() == stackDepth);
617 MOZ_ASSERT(stackDepth >= popped);
618 stackDepth -= popped;
619 stackPosition_ = stackDepth;
621 if (maybePred && kind_ != PENDING_LOOP_HEADER) {
622 copySlots(maybePred);
625 MOZ_ASSERT(info_.nslots() >= stackPosition_);
626 MOZ_ASSERT(!entryResumePoint_);
628 // Propagate the caller resume point from the inherited block.
629 callerResumePoint_ = maybePred ? maybePred->callerResumePoint() : nullptr;
631 // Create a resume point using our initial stack state.
632 entryResumePoint_ =
633 new (alloc) MResumePoint(this, pc(), ResumeMode::ResumeAt);
634 if (!entryResumePoint_->init(alloc)) {
635 return false;
638 if (maybePred) {
639 if (!predecessors_.append(maybePred)) {
640 return false;
643 if (kind_ == PENDING_LOOP_HEADER) {
644 for (size_t i = 0; i < stackDepth; i++) {
645 MPhi* phi = MPhi::New(alloc.fallible());
646 if (!phi) {
647 return false;
649 phi->addInlineInput(maybePred->getSlot(i));
650 addPhi(phi);
651 setSlot(i, phi);
652 entryResumePoint()->initOperand(i, phi);
654 } else {
655 for (size_t i = 0; i < stackDepth; i++) {
656 entryResumePoint()->initOperand(i, getSlot(i));
659 } else {
661 * Don't leave the operands uninitialized for the caller, as it may not
662 * initialize them later on.
664 for (size_t i = 0; i < stackDepth; i++) {
665 entryResumePoint()->clearOperand(i);
669 return true;
672 void MBasicBlock::inheritSlots(MBasicBlock* parent) {
673 stackPosition_ = parent->stackPosition_;
674 copySlots(parent);
677 bool MBasicBlock::initEntrySlots(TempAllocator& alloc) {
678 // Remove the previous resume point.
679 discardResumePoint(entryResumePoint_);
681 // Create a resume point using our initial stack state.
682 entryResumePoint_ =
683 MResumePoint::New(alloc, this, pc(), ResumeMode::ResumeAt);
684 if (!entryResumePoint_) {
685 return false;
687 return true;
690 MDefinition* MBasicBlock::environmentChain() {
691 return getSlot(info().environmentChainSlot());
694 MDefinition* MBasicBlock::argumentsObject() {
695 return getSlot(info().argsObjSlot());
698 void MBasicBlock::setEnvironmentChain(MDefinition* scopeObj) {
699 setSlot(info().environmentChainSlot(), scopeObj);
702 void MBasicBlock::setArgumentsObject(MDefinition* argsObj) {
703 setSlot(info().argsObjSlot(), argsObj);
706 void MBasicBlock::pick(int32_t depth) {
707 // pick takes a value and moves it to the top.
708 // pick(-2):
709 // A B C D E
710 // A B D C E [ swapAt(-2) ]
711 // A B D E C [ swapAt(-1) ]
712 for (; depth < 0; depth++) {
713 swapAt(depth);
717 void MBasicBlock::unpick(int32_t depth) {
718 // unpick takes the value on top of the stack and moves it under the depth-th
719 // element;
720 // unpick(-2):
721 // A B C D E
722 // A B C E D [ swapAt(-1) ]
723 // A B E C D [ swapAt(-2) ]
724 for (int32_t n = -1; n >= depth; n--) {
725 swapAt(n);
729 void MBasicBlock::swapAt(int32_t depth) {
730 uint32_t lhsDepth = stackPosition_ + depth - 1;
731 uint32_t rhsDepth = stackPosition_ + depth;
733 MDefinition* temp = slots_[lhsDepth];
734 slots_[lhsDepth] = slots_[rhsDepth];
735 slots_[rhsDepth] = temp;
738 void MBasicBlock::discardLastIns() { discard(lastIns()); }
740 MConstant* MBasicBlock::optimizedOutConstant(TempAllocator& alloc) {
741 // If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
742 // then reuse it.
743 MInstruction* ins = *begin();
744 if (ins->type() == MIRType::MagicOptimizedOut) {
745 return ins->toConstant();
748 MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
749 insertBefore(ins, constant);
750 return constant;
753 void MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins) {
754 // Remove |ins| from the current block.
755 MOZ_ASSERT(ins->block() == this);
756 instructions_.remove(ins);
758 // Insert into new block, which may be distinct.
759 // Uses and operands are untouched.
760 ins->setInstructionBlock(at->block(), at->trackedSite());
761 at->block()->instructions_.insertBefore(at, ins);
764 MInstruction* MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore) {
765 MOZ_ASSERT(graph().osrBlock() != this,
766 "We are not supposed to add any instruction in OSR blocks.");
768 // Beta nodes and interrupt checks are required to be located at the
769 // beginnings of basic blocks, so we must insert new instructions after any
770 // such instructions.
771 MInstructionIterator insertIter =
772 !ins || ins->isPhi() ? begin() : begin(ins->toInstruction());
773 while (insertIter->isBeta() || insertIter->isInterruptCheck() ||
774 insertIter->isConstant() || insertIter->isParameter() ||
775 (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout())) {
776 insertIter++;
779 return *insertIter;
782 void MBasicBlock::discardResumePoint(
783 MResumePoint* rp, ReferencesType refType /* = RefType_Default */) {
784 if (refType & RefType_DiscardOperands) {
785 rp->releaseUses();
787 rp->setDiscarded();
788 removeResumePoint(rp);
791 void MBasicBlock::removeResumePoint(MResumePoint* rp) {
792 #ifdef DEBUG
793 MResumePointIterator iter = resumePointsBegin();
794 while (*iter != rp) {
795 // We should reach it before reaching the end.
796 MOZ_ASSERT(iter != resumePointsEnd());
797 iter++;
799 resumePoints_.removeAt(iter);
800 #endif
803 void MBasicBlock::prepareForDiscard(
804 MInstruction* ins, ReferencesType refType /* = RefType_Default */) {
805 // Only remove instructions from the same basic block. This is needed for
806 // correctly removing the resume point if any.
807 MOZ_ASSERT(ins->block() == this);
809 MResumePoint* rp = ins->resumePoint();
810 if ((refType & RefType_DiscardResumePoint) && rp) {
811 discardResumePoint(rp, refType);
814 // We need to assert that instructions have no uses after removing the their
815 // resume points operands as they could be captured by their own resume
816 // point.
817 MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
819 const uint32_t InstructionOperands =
820 RefType_DiscardOperands | RefType_DiscardInstruction;
821 if ((refType & InstructionOperands) == InstructionOperands) {
822 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
823 ins->releaseOperand(i);
827 ins->setDiscarded();
830 void MBasicBlock::discard(MInstruction* ins) {
831 prepareForDiscard(ins);
832 instructions_.remove(ins);
835 void MBasicBlock::discardIgnoreOperands(MInstruction* ins) {
836 #ifdef DEBUG
837 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
838 MOZ_ASSERT(!ins->hasOperand(i));
840 #endif
842 prepareForDiscard(ins, RefType_IgnoreOperands);
843 instructions_.remove(ins);
846 void MBasicBlock::discardAllInstructions() {
847 MInstructionIterator iter = begin();
848 discardAllInstructionsStartingAt(iter);
851 void MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter) {
852 while (iter != end()) {
853 // Discard operands and resume point operands and flag the instruction
854 // as discarded. Also we do not assert that we have no uses as blocks
855 // might be removed in reverse post order.
856 MInstruction* ins = *iter++;
857 prepareForDiscard(ins, RefType_DefaultNoAssert);
858 instructions_.remove(ins);
862 void MBasicBlock::discardAllPhis() {
863 for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
864 iter->removeAllOperands();
867 for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end();
868 pred++) {
869 (*pred)->clearSuccessorWithPhis();
872 phis_.clear();
875 void MBasicBlock::discardAllResumePoints(bool discardEntry) {
876 if (outerResumePoint_) {
877 clearOuterResumePoint();
880 if (discardEntry && entryResumePoint_) {
881 clearEntryResumePoint();
884 #ifdef DEBUG
885 if (!entryResumePoint()) {
886 MOZ_ASSERT(resumePointsEmpty());
887 } else {
888 MResumePointIterator iter(resumePointsBegin());
889 MOZ_ASSERT(iter != resumePointsEnd());
890 iter++;
891 MOZ_ASSERT(iter == resumePointsEnd());
893 #endif
896 void MBasicBlock::clear() {
897 discardAllInstructions();
898 discardAllResumePoints();
899 discardAllPhis();
902 void MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins) {
903 MOZ_ASSERT(at->block() == this);
904 ins->setInstructionBlock(this, at->trackedSite());
905 graph().allocDefinitionId(ins);
906 instructions_.insertBefore(at, ins);
909 void MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins) {
910 MOZ_ASSERT(at->block() == this);
911 ins->setInstructionBlock(this, at->trackedSite());
912 graph().allocDefinitionId(ins);
913 instructions_.insertAfter(at, ins);
916 void MBasicBlock::insertAtEnd(MInstruction* ins) {
917 if (hasLastIns()) {
918 insertBefore(lastIns(), ins);
919 } else {
920 add(ins);
924 void MBasicBlock::addPhi(MPhi* phi) {
925 phis_.pushBack(phi);
926 phi->setPhiBlock(this);
927 graph().allocDefinitionId(phi);
930 void MBasicBlock::discardPhi(MPhi* phi) {
931 MOZ_ASSERT(!phis_.empty());
933 phi->removeAllOperands();
934 phi->setDiscarded();
936 phis_.remove(phi);
938 if (phis_.empty()) {
939 for (MBasicBlock* pred : predecessors_) {
940 pred->clearSuccessorWithPhis();
945 MResumePoint* MBasicBlock::activeResumePoint(MInstruction* ins) {
946 for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
947 if (iter->resumePoint() && *iter != ins) {
948 return iter->resumePoint();
952 // If none, take the entry resume point.
953 return entryResumePoint();
956 void MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins) {
957 MResumePoint* rp = activeResumePoint(ins);
959 // The only blocks which do not have any entryResumePoint in Ion, are the
960 // SplitEdge blocks. SplitEdge blocks only have a Goto instruction before
961 // Range Analysis phase. In adjustInputs, we are manipulating instructions
962 // which have a TypePolicy. So, as a Goto has no operand and no type
963 // policy, the entry resume point should exist.
964 MOZ_ASSERT(rp);
966 // Flag all operands as being potentially used.
967 while (rp) {
968 for (size_t i = 0, end = rp->numOperands(); i < end; i++) {
969 rp->getOperand(i)->setImplicitlyUsedUnchecked();
971 rp = rp->caller();
975 bool MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred) {
976 return addPredecessorPopN(alloc, pred, 0);
979 bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
980 uint32_t popped) {
981 MOZ_ASSERT(pred);
982 MOZ_ASSERT(predecessors_.length() > 0);
984 // Predecessors must be finished, and at the correct stack depth.
985 MOZ_ASSERT(pred->hasLastIns());
986 MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
988 for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
989 MDefinition* mine = getSlot(i);
990 MDefinition* other = pred->getSlot(i);
992 if (mine != other) {
993 MIRType phiType = mine->type();
994 if (phiType != other->type()) {
995 phiType = MIRType::Value;
998 // If the current instruction is a phi, and it was created in this
999 // basic block, then we have already placed this phi and should
1000 // instead append to its operands.
1001 if (mine->isPhi() && mine->block() == this) {
1002 MOZ_ASSERT(predecessors_.length());
1003 MOZ_ASSERT(!mine->hasDefUses(),
1004 "should only change type of newly created phis");
1005 mine->setResultType(phiType);
1006 if (!mine->toPhi()->addInputSlow(other)) {
1007 return false;
1009 } else {
1010 // Otherwise, create a new phi node.
1011 MPhi* phi = MPhi::New(alloc.fallible(), phiType);
1012 if (!phi) {
1013 return false;
1015 addPhi(phi);
1017 // Prime the phi for each predecessor, so input(x) comes from
1018 // predecessor(x).
1019 if (!phi->reserveLength(predecessors_.length() + 1)) {
1020 return false;
1023 for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds;
1024 ++j) {
1025 MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
1026 phi->addInput(mine);
1028 phi->addInput(other);
1030 setSlot(i, phi);
1031 if (entryResumePoint()) {
1032 entryResumePoint()->replaceOperand(i, phi);
1038 return predecessors_.append(pred);
1041 bool MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred,
1042 MBasicBlock* existingPred) {
1043 MOZ_ASSERT(pred);
1044 MOZ_ASSERT(predecessors_.length() > 0);
1046 // Predecessors must be finished, and at the correct stack depth.
1047 MOZ_ASSERT(pred->hasLastIns());
1048 MOZ_ASSERT(!pred->successorWithPhis());
1050 if (!phisEmpty()) {
1051 size_t existingPosition = indexForPredecessor(existingPred);
1052 for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
1053 if (!iter->addInputSlow(iter->getOperand(existingPosition))) {
1054 return false;
1059 if (!predecessors_.append(pred)) {
1060 return false;
1062 return true;
1065 bool MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred) {
1066 // Predecessors must be finished.
1067 MOZ_ASSERT(pred && pred->hasLastIns());
1068 return predecessors_.append(pred);
1071 bool MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child) {
1072 return immediatelyDominated_.append(child);
1075 void MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child) {
1076 for (size_t i = 0;; ++i) {
1077 MOZ_ASSERT(i < immediatelyDominated_.length(),
1078 "Dominated block to remove not present");
1079 if (immediatelyDominated_[i] == child) {
1080 immediatelyDominated_[i] = immediatelyDominated_.back();
1081 immediatelyDominated_.popBack();
1082 return;
1087 bool MBasicBlock::setBackedge(MBasicBlock* pred) {
1088 // Predecessors must be finished, and at the correct stack depth.
1089 MOZ_ASSERT(hasLastIns());
1090 MOZ_ASSERT(pred->hasLastIns());
1091 MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
1093 // We must be a pending loop header
1094 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1096 // Add exit definitions to each corresponding phi at the entry.
1097 if (!inheritPhisFromBackedge(pred)) {
1098 return false;
1101 // We are now a loop header proper
1102 kind_ = LOOP_HEADER;
1104 return predecessors_.append(pred);
1107 bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred, size_t paramCount) {
1108 // Predecessors must be finished, and at the correct stack depth.
1109 MOZ_ASSERT(hasLastIns());
1110 MOZ_ASSERT(pred->hasLastIns());
1111 MOZ_ASSERT(stackDepth() + paramCount == pred->stackDepth());
1113 // We must be a pending loop header
1114 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1116 // Add exit definitions to each corresponding phi at the entry.
1117 // Note: Phis are inserted in the same order as the slots. (see
1118 // MBasicBlock::New)
1119 size_t slot = 0;
1120 for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
1121 MPhi* entryDef = *phi;
1122 MDefinition* exitDef = pred->getSlot(slot);
1124 // Assert that we already placed phis for each slot.
1125 MOZ_ASSERT(entryDef->block() == this);
1127 // Assert that the phi already has the correct type.
1128 MOZ_ASSERT(entryDef->type() == exitDef->type());
1129 MOZ_ASSERT(entryDef->type() != MIRType::Value);
1131 if (entryDef == exitDef) {
1132 // If the exit def is the same as the entry def, make a redundant
1133 // phi. Since loop headers have exactly two incoming edges, we
1134 // know that that's just the first input.
1136 // Note that we eliminate later rather than now, to avoid any
1137 // weirdness around pending continue edges which might still hold
1138 // onto phis.
1139 exitDef = entryDef->getOperand(0);
1142 // Phis always have room for 2 operands, so this can't fail.
1143 MOZ_ASSERT(phi->numOperands() == 1);
1144 entryDef->addInlineInput(exitDef);
1146 // Two cases here: phis that correspond to locals, and phis that correspond
1147 // to loop parameters. Only phis for locals go in slots.
1148 if (slot < stackDepth()) {
1149 setSlot(slot, entryDef);
1153 // We are now a loop header proper
1154 kind_ = LOOP_HEADER;
1156 return predecessors_.append(pred);
1159 void MBasicBlock::clearLoopHeader() {
1160 MOZ_ASSERT(isLoopHeader());
1161 kind_ = NORMAL;
1164 void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) {
1165 MOZ_ASSERT(!isLoopHeader());
1166 kind_ = LOOP_HEADER;
1168 size_t numPreds = numPredecessors();
1169 MOZ_ASSERT(numPreds != 0);
1171 size_t lastIndex = numPreds - 1;
1172 size_t oldIndex = 0;
1173 for (;; ++oldIndex) {
1174 MOZ_ASSERT(oldIndex < numPreds);
1175 MBasicBlock* pred = getPredecessor(oldIndex);
1176 if (pred == newBackedge) {
1177 break;
1181 // Set the loop backedge to be the last element in predecessors_.
1182 std::swap(predecessors_[oldIndex], predecessors_[lastIndex]);
1184 // If we have phis, reorder their operands accordingly.
1185 if (!phisEmpty()) {
1186 getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
1187 getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
1188 for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1189 MPhi* phi = *iter;
1190 MDefinition* last = phi->getOperand(oldIndex);
1191 MDefinition* old = phi->getOperand(lastIndex);
1192 phi->replaceOperand(oldIndex, old);
1193 phi->replaceOperand(lastIndex, last);
1197 MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
1198 MOZ_ASSERT(backedge() == newBackedge);
1201 size_t MBasicBlock::getSuccessorIndex(MBasicBlock* block) const {
1202 MOZ_ASSERT(lastIns());
1203 for (size_t i = 0; i < numSuccessors(); i++) {
1204 if (getSuccessor(i) == block) {
1205 return i;
1208 MOZ_CRASH("Invalid successor");
1211 size_t MBasicBlock::getPredecessorIndex(MBasicBlock* block) const {
1212 for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
1213 if (getPredecessor(i) == block) {
1214 return i;
1217 MOZ_CRASH("Invalid predecessor");
1220 void MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split) {
1221 MOZ_ASSERT(lastIns());
1223 // Note, during split-critical-edges, successors-with-phis is not yet set.
1224 // During PAA, this case is handled before we enter.
1225 MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
1227 lastIns()->replaceSuccessor(pos, split);
1230 void MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split) {
1231 for (size_t i = 0; i < numPredecessors(); i++) {
1232 if (getPredecessor(i) == old) {
1233 predecessors_[i] = split;
1235 #ifdef DEBUG
1236 // The same block should not appear twice in the predecessor list.
1237 for (size_t j = i; j < numPredecessors(); j++) {
1238 MOZ_ASSERT(predecessors_[j] != old);
1240 #endif
1242 return;
1246 MOZ_CRASH("predecessor was not found");
1249 void MBasicBlock::clearDominatorInfo() {
1250 setImmediateDominator(nullptr);
1251 immediatelyDominated_.clear();
1252 numDominated_ = 0;
1255 void MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred,
1256 size_t predIndex) {
1257 // If we're removing the last backedge, this is no longer a loop.
1258 if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred) {
1259 clearLoopHeader();
1262 // Adjust phis. Note that this can leave redundant phis behind.
1263 // Don't adjust successorWithPhis() if we haven't constructed this
1264 // information yet.
1265 if (pred->successorWithPhis()) {
1266 MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
1267 pred->clearSuccessorWithPhis();
1268 for (size_t j = predIndex + 1; j < numPredecessors(); j++) {
1269 getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
1273 // Remove from pred list.
1274 predecessors_.erase(predecessors_.begin() + predIndex);
1277 void MBasicBlock::removePredecessor(MBasicBlock* pred) {
1278 size_t predIndex = getPredecessorIndex(pred);
1280 // Remove the phi operands.
1281 for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1282 iter->removeOperand(predIndex);
1285 // Now we can call the underlying function, which expects that phi
1286 // operands have been removed.
1287 removePredecessorWithoutPhiOperands(pred, predIndex);
1290 bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge) {
1291 // We must be a pending loop header
1292 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1294 size_t stackDepth = entryResumePoint()->stackDepth();
1295 for (size_t slot = 0; slot < stackDepth; slot++) {
1296 // Get the value stack-slot of the back edge.
1297 MDefinition* exitDef = backedge->getSlot(slot);
1299 // Get the value of the loop header.
1300 MDefinition* loopDef = entryResumePoint()->getOperand(slot);
1301 if (loopDef->block() != this) {
1302 // If we are finishing a pending loop header, then we need to ensure
1303 // that all operands are phis. This is usualy the case, except for
1304 // object/arrays build with generators, in which case we share the
1305 // same allocations across all blocks.
1306 MOZ_ASSERT(loopDef->block()->id() < id());
1307 MOZ_ASSERT(loopDef == exitDef);
1308 continue;
1311 // Phis are allocated by NewPendingLoopHeader.
1312 MPhi* entryDef = loopDef->toPhi();
1313 MOZ_ASSERT(entryDef->block() == this);
1315 if (entryDef == exitDef) {
1316 // If the exit def is the same as the entry def, make a redundant
1317 // phi. Since loop headers have exactly two incoming edges, we
1318 // know that that's just the first input.
1320 // Note that we eliminate later rather than now, to avoid any
1321 // weirdness around pending continue edges which might still hold
1322 // onto phis.
1323 exitDef = entryDef->getOperand(0);
1326 if (!entryDef->addInputSlow(exitDef)) {
1327 return false;
1331 return true;
1334 MTest* MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection) {
1335 *pdirection = FALSE_BRANCH;
1337 if (numPredecessors() != 1) {
1338 return nullptr;
1341 MBasicBlock* dom = immediateDominator();
1342 if (dom != getPredecessor(0)) {
1343 return nullptr;
1346 // Look for a trailing MTest branching to this block.
1347 MInstruction* ins = dom->lastIns();
1348 if (ins->isTest()) {
1349 MTest* test = ins->toTest();
1351 MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
1352 if (test->ifTrue() == this && test->ifFalse() == this) {
1353 return nullptr;
1356 *pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
1357 return test;
1360 return nullptr;
1363 void MBasicBlock::dumpStack(GenericPrinter& out) {
1364 #ifdef DEBUG
1365 out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
1366 out.printf("-------------------------------------------\n");
1367 for (uint32_t i = 0; i < stackPosition_; i++) {
1368 out.printf(" %-3u", i);
1369 out.printf(" %-16p\n", (void*)slots_[i]);
1371 #endif
1374 void MBasicBlock::dumpStack() {
1375 Fprinter out(stderr);
1376 dumpStack(out);
1377 out.finish();
1380 void MIRGraph::dump(GenericPrinter& out) {
1381 #ifdef JS_JITSPEW
1382 for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
1383 iter->dump(out);
1384 out.printf("\n");
1386 #endif
1389 void MIRGraph::dump() {
1390 Fprinter out(stderr);
1391 dump(out);
1392 out.finish();
1395 void MBasicBlock::dump(GenericPrinter& out) {
1396 #ifdef JS_JITSPEW
1397 out.printf("block%u:%s%s%s\n", id(), isLoopHeader() ? " (loop header)" : "",
1398 unreachable() ? " (unreachable)" : "",
1399 isMarked() ? " (marked)" : "");
1400 if (MResumePoint* resume = entryResumePoint()) {
1401 resume->dump(out);
1403 for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++) {
1404 iter->dump(out);
1406 for (MInstructionIterator iter(begin()); iter != end(); iter++) {
1407 iter->dump(out);
1409 #endif
1412 void MBasicBlock::dump() {
1413 Fprinter out(stderr);
1414 dump(out);
1415 out.finish();