Bug 1890513: Directly invoke variadic native functions. r=jandem
[gecko.git] / js / src / jit / MIRGraph.cpp
blob29b5897a0905763b3b2cc8f52354b75b9386a8c6
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/MIRGraph.h"
9 #include "jit/CompileInfo.h"
10 #include "jit/InlineScriptTree.h"
11 #include "jit/IonOptimizationLevels.h"
12 #include "jit/JitSpewer.h"
13 #include "jit/MIR.h"
14 #include "jit/MIRGenerator.h"
16 using namespace js;
17 using namespace js::jit;
19 MIRGenerator::MIRGenerator(CompileRealm* realm,
20 const JitCompileOptions& options,
21 TempAllocator* alloc, MIRGraph* graph,
22 const CompileInfo* info,
23 const OptimizationInfo* optimizationInfo)
24 : realm(realm),
25 runtime(realm ? realm->runtime() : nullptr),
26 outerInfo_(info),
27 optimizationInfo_(optimizationInfo),
28 alloc_(alloc),
29 graph_(graph),
30 offThreadStatus_(Ok()),
31 cancelBuild_(false),
32 wasmMaxStackArgBytes_(0),
33 needsOverrecursedCheck_(false),
34 needsStaticStackAlignment_(false),
35 instrumentedProfiling_(false),
36 instrumentedProfilingIsCached_(false),
37 stringsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateStrings()
38 : false),
39 bigIntsCanBeInNursery_(realm ? realm->zone()->canNurseryAllocateBigInts()
40 : false),
41 minWasmMemory0Length_(0),
42 options(options),
43 gs_(alloc) {}
45 bool MIRGenerator::licmEnabled() const {
46 return optimizationInfo().licmEnabled() && !disableLICM_ &&
47 !outerInfo().hadLICMInvalidation();
50 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(AbortReason r) {
51 if (JitSpewEnabled(JitSpew_IonAbort)) {
52 switch (r) {
53 case AbortReason::Alloc:
54 JitSpew(JitSpew_IonAbort, "AbortReason::Alloc");
55 break;
56 case AbortReason::Disable:
57 JitSpew(JitSpew_IonAbort, "AbortReason::Disable");
58 break;
59 case AbortReason::Error:
60 JitSpew(JitSpew_IonAbort, "AbortReason::Error");
61 break;
62 case AbortReason::NoAbort:
63 MOZ_CRASH("Abort with AbortReason::NoAbort");
64 break;
67 return Err(std::move(r));
70 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abortFmt(
71 AbortReason r, const char* message, va_list ap) {
72 JitSpewVA(JitSpew_IonAbort, message, ap);
73 return Err(std::move(r));
76 mozilla::GenericErrorResult<AbortReason> MIRGenerator::abort(
77 AbortReason r, const char* message, ...) {
78 va_list ap;
79 va_start(ap, message);
80 auto forward = abortFmt(r, message, ap);
81 va_end(ap);
82 return forward;
85 void MIRGraph::addBlock(MBasicBlock* block) {
86 MOZ_ASSERT(block);
87 block->setId(blockIdGen_++);
88 blocks_.pushBack(block);
89 numBlocks_++;
92 void MIRGraph::insertBlockAfter(MBasicBlock* at, MBasicBlock* block) {
93 block->setId(blockIdGen_++);
94 blocks_.insertAfter(at, block);
95 numBlocks_++;
98 void MIRGraph::insertBlockBefore(MBasicBlock* at, MBasicBlock* block) {
99 block->setId(blockIdGen_++);
100 blocks_.insertBefore(at, block);
101 numBlocks_++;
104 void MIRGraph::removeBlock(MBasicBlock* block) {
105 // Remove a block from the graph. It will also cleanup the block.
107 if (block == osrBlock_) {
108 osrBlock_ = nullptr;
111 if (returnAccumulator_) {
112 size_t i = 0;
113 while (i < returnAccumulator_->length()) {
114 if ((*returnAccumulator_)[i] == block) {
115 returnAccumulator_->erase(returnAccumulator_->begin() + i);
116 } else {
117 i++;
122 block->clear();
123 block->markAsDead();
125 if (block->isInList()) {
126 blocks_.remove(block);
127 numBlocks_--;
131 void MIRGraph::unmarkBlocks() {
132 for (MBasicBlockIterator i(blocks_.begin()); i != blocks_.end(); i++) {
133 i->unmark();
137 MBasicBlock* MBasicBlock::New(MIRGraph& graph, size_t stackDepth,
138 const CompileInfo& info, MBasicBlock* maybePred,
139 BytecodeSite* site, Kind kind) {
140 MOZ_ASSERT(site->pc() != nullptr);
142 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
143 if (!block->init()) {
144 return nullptr;
147 if (!block->inherit(graph.alloc(), stackDepth, maybePred, 0)) {
148 return nullptr;
151 return block;
154 MBasicBlock* MBasicBlock::NewPopN(MIRGraph& graph, const CompileInfo& info,
155 MBasicBlock* pred, BytecodeSite* site,
156 Kind kind, uint32_t popped) {
157 MOZ_ASSERT(site->pc() != nullptr);
159 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
160 if (!block->init()) {
161 return nullptr;
164 if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, popped)) {
165 return nullptr;
168 return block;
171 MBasicBlock* MBasicBlock::NewPendingLoopHeader(MIRGraph& graph,
172 const CompileInfo& info,
173 MBasicBlock* pred,
174 BytecodeSite* site) {
175 MOZ_ASSERT(site->pc() != nullptr);
177 MBasicBlock* block =
178 new (graph.alloc()) MBasicBlock(graph, info, site, PENDING_LOOP_HEADER);
179 if (!block->init()) {
180 return nullptr;
183 if (!block->inherit(graph.alloc(), pred->stackDepth(), pred, 0)) {
184 return nullptr;
187 return block;
190 MBasicBlock* MBasicBlock::NewSplitEdge(MIRGraph& graph, MBasicBlock* pred,
191 size_t predEdgeIdx, MBasicBlock* succ) {
192 MBasicBlock* split = nullptr;
193 if (!succ->pc()) {
194 // The predecessor does not have a PC, this is a Wasm compilation.
195 split = MBasicBlock::New(graph, succ->info(), pred, SPLIT_EDGE);
196 if (!split) {
197 return nullptr;
200 // Insert the split edge block in-between.
201 split->end(MGoto::New(graph.alloc(), succ));
202 } else {
203 // The predecessor has a PC, this is a Warp compilation.
204 MResumePoint* succEntry = succ->entryResumePoint();
206 BytecodeSite* site =
207 new (graph.alloc()) BytecodeSite(succ->trackedTree(), succEntry->pc());
208 split =
209 new (graph.alloc()) MBasicBlock(graph, succ->info(), site, SPLIT_EDGE);
211 if (!split->init()) {
212 return nullptr;
215 // A split edge is used to simplify the graph to avoid having a
216 // predecessor with multiple successors as well as a successor with
217 // multiple predecessors. As instructions can be moved in this
218 // split-edge block, we need to give this block a resume point. To do
219 // so, we copy the entry resume points of the successor and filter the
220 // phis to keep inputs from the current edge.
222 // Propagate the caller resume point from the inherited block.
223 split->callerResumePoint_ = succ->callerResumePoint();
225 // Split-edge are created after the interpreter stack emulation. Thus,
226 // there is no need for creating slots.
227 split->stackPosition_ = succEntry->stackDepth();
229 // Create a resume point using our initial stack position.
230 MResumePoint* splitEntry = new (graph.alloc())
231 MResumePoint(split, succEntry->pc(), ResumeMode::ResumeAt);
232 if (!splitEntry->init(graph.alloc())) {
233 return nullptr;
235 split->entryResumePoint_ = splitEntry;
237 // Insert the split edge block in-between.
238 split->end(MGoto::New(graph.alloc(), succ));
240 // The target entry resume point might have phi operands, keep the
241 // operands of the phi coming from our edge.
242 size_t succEdgeIdx = succ->indexForPredecessor(pred);
244 for (size_t i = 0, e = splitEntry->numOperands(); i < e; i++) {
245 MDefinition* def = succEntry->getOperand(i);
246 // This early in the pipeline, we have no recover instructions in
247 // any entry resume point.
248 if (def->block() == succ) {
249 if (def->isPhi()) {
250 def = def->toPhi()->getOperand(succEdgeIdx);
251 } else {
252 // The phi-operand may already have been optimized out.
253 MOZ_ASSERT(def->isConstant());
254 MOZ_ASSERT(def->type() == MIRType::MagicOptimizedOut);
256 def = split->optimizedOutConstant(graph.alloc());
260 splitEntry->initOperand(i, def);
263 // This is done in the New variant for wasm, so we cannot keep this
264 // line below, where the rest of the graph is modified.
265 if (!split->predecessors_.append(pred)) {
266 return nullptr;
270 split->setLoopDepth(succ->loopDepth());
272 graph.insertBlockAfter(pred, split);
274 pred->replaceSuccessor(predEdgeIdx, split);
275 succ->replacePredecessor(pred, split);
276 return split;
279 void MBasicBlock::moveToNewBlock(MInstruction* ins, MBasicBlock* dst) {
280 MOZ_ASSERT(ins->block() == this);
281 MOZ_ASSERT(!dst->hasLastIns());
282 instructions_.remove(ins);
283 ins->setInstructionBlock(dst, dst->trackedSite());
284 if (MResumePoint* rp = ins->resumePoint()) {
285 removeResumePoint(rp);
286 dst->addResumePoint(rp);
287 rp->setBlock(dst);
289 dst->instructions_.pushBack(ins);
292 void MBasicBlock::moveOuterResumePointTo(MBasicBlock* dest) {
293 if (MResumePoint* outer = outerResumePoint()) {
294 removeResumePoint(outer);
295 outerResumePoint_ = nullptr;
296 dest->setOuterResumePoint(outer);
297 dest->addResumePoint(outer);
298 outer->setBlock(dest);
302 bool MBasicBlock::wrapInstructionInFastpath(MInstruction* ins,
303 MInstruction* fastpath,
304 MInstruction* condition) {
305 MOZ_ASSERT(ins->block() == this);
306 MOZ_ASSERT(!ins->isControlInstruction());
308 MInstructionIterator rest(begin(ins));
309 rest++;
311 MResumePoint* resumeBeforeIns = activeResumePoint(ins);
312 MResumePoint* resumeAfterIns = activeResumePoint(*rest);
314 // Create the join block.
315 MBasicBlock* join = MBasicBlock::NewInternal(graph_, this, resumeAfterIns);
316 if (!join) {
317 return false;
320 // Update the successors of the current block.
321 for (uint32_t i = 0; i < numSuccessors(); i++) {
322 getSuccessor(i)->replacePredecessor(this, join);
324 if (successorWithPhis()) {
325 join->setSuccessorWithPhis(successorWithPhis(), positionInPhiSuccessor());
326 clearSuccessorWithPhis();
329 // Copy all instructions after |ins| into the join block.
330 while (rest != end()) {
331 MInstruction* ins = *rest++;
332 moveToNewBlock(ins, join);
334 MOZ_ASSERT(!hasLastIns());
335 MOZ_ASSERT(join->hasLastIns());
337 graph_.insertBlockAfter(this, join);
339 // Create the fast path block.
340 MBasicBlock* fastpathBlock =
341 MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
342 if (!fastpathBlock) {
343 return false;
345 graph_.insertBlockAfter(this, fastpathBlock);
346 fastpathBlock->add(fastpath);
347 fastpathBlock->end(MGoto::New(graph_.alloc(), join));
349 // Create the slowpath block.
350 MBasicBlock* slowpathBlock =
351 MBasicBlock::NewInternal(graph_, this, resumeBeforeIns);
352 if (!slowpathBlock) {
353 return false;
355 graph_.insertBlockAfter(fastpathBlock, slowpathBlock);
356 moveToNewBlock(ins, slowpathBlock);
357 slowpathBlock->end(MGoto::New(graph_.alloc(), join));
359 // Connect current block to fastpath and slowpath.
360 add(condition);
361 end(MTest::New(graph_.alloc(), condition, fastpathBlock, slowpathBlock));
363 // Update predecessors.
364 if (!fastpathBlock->addPredecessorWithoutPhis(this) ||
365 !slowpathBlock->addPredecessorWithoutPhis(this) ||
366 !join->addPredecessorWithoutPhis(fastpathBlock) ||
367 !join->addPredecessorWithoutPhis(slowpathBlock)) {
368 return false;
371 if (ins->hasUses()) {
372 // Insert phi.
373 MPhi* phi = MPhi::New(graph_.alloc());
374 if (!phi->reserveLength(2)) {
375 return false;
377 phi->addInput(fastpath);
378 fastpathBlock->setSuccessorWithPhis(join, 0);
379 phi->addInput(ins);
380 slowpathBlock->setSuccessorWithPhis(join, 1);
381 join->addPhi(phi);
383 for (MUseIterator i(ins->usesBegin()), e(ins->usesEnd()); i != e;) {
384 MUse* use = *i++;
385 if (use->consumer() != phi && use->consumer() != ins->resumePoint()) {
386 use->replaceProducer(phi);
391 moveOuterResumePointTo(join);
393 return true;
396 MBasicBlock* MBasicBlock::NewInternal(MIRGraph& graph, MBasicBlock* orig,
397 MResumePoint* resumePoint) {
398 jsbytecode* pc = IsResumeAfter(resumePoint->mode())
399 ? GetNextPc(resumePoint->pc())
400 : resumePoint->pc();
402 BytecodeSite* site =
403 new (graph.alloc()) BytecodeSite(orig->trackedTree(), pc);
404 MBasicBlock* block =
405 new (graph.alloc()) MBasicBlock(graph, orig->info(), site, INTERNAL);
406 if (!block->init()) {
407 return nullptr;
410 // Propagate the caller resume point from the original block.
411 block->callerResumePoint_ = orig->callerResumePoint();
413 // Copy the resume point.
414 block->stackPosition_ = resumePoint->stackDepth();
415 MResumePoint* entryResumePoint =
416 new (graph.alloc()) MResumePoint(block, pc, ResumeMode::ResumeAt);
417 if (!entryResumePoint->init(graph.alloc())) {
418 return nullptr;
420 for (size_t i = 0; i < resumePoint->stackDepth(); i++) {
421 entryResumePoint->initOperand(i, resumePoint->getOperand(i));
423 block->entryResumePoint_ = entryResumePoint;
425 block->setLoopDepth(orig->loopDepth());
427 return block;
430 MBasicBlock* MBasicBlock::New(MIRGraph& graph, const CompileInfo& info,
431 MBasicBlock* pred, Kind kind) {
432 BytecodeSite* site = new (graph.alloc()) BytecodeSite();
433 MBasicBlock* block = new (graph.alloc()) MBasicBlock(graph, info, site, kind);
434 if (!block->init()) {
435 return nullptr;
438 if (pred) {
439 block->stackPosition_ = pred->stackPosition_;
441 if (block->kind_ == PENDING_LOOP_HEADER) {
442 size_t nphis = block->stackPosition_;
444 size_t nfree = graph.phiFreeListLength();
446 TempAllocator& alloc = graph.alloc();
447 MPhi* phis = nullptr;
448 if (nphis > nfree) {
449 phis = alloc.allocateArray<MPhi>(nphis - nfree);
450 if (!phis) {
451 return nullptr;
455 // Note: Phis are inserted in the same order as the slots.
456 for (size_t i = 0; i < nphis; i++) {
457 MDefinition* predSlot = pred->getSlot(i);
459 MOZ_ASSERT(predSlot->type() != MIRType::Value);
461 MPhi* phi;
462 if (i < nfree) {
463 phi = graph.takePhiFromFreeList();
464 } else {
465 phi = phis + (i - nfree);
467 new (phi) MPhi(alloc, predSlot->type());
469 phi->addInlineInput(predSlot);
471 // Add append Phis in the block.
472 block->addPhi(phi);
473 block->setSlot(i, phi);
475 } else {
476 if (!block->ensureHasSlots(0)) {
477 return nullptr;
479 block->copySlots(pred);
482 if (!block->predecessors_.append(pred)) {
483 return nullptr;
487 return block;
490 // Create an empty and unreachable block which jumps to |header|. Used
491 // when the normal entry into a loop is removed (but the loop is still
492 // reachable due to OSR) to preserve the invariant that every loop
493 // header has two predecessors, which is needed for building the
494 // dominator tree. The new block is inserted immediately before the
495 // header, which preserves the graph ordering (post-order/RPO). These
496 // blocks will all be removed before lowering.
497 MBasicBlock* MBasicBlock::NewFakeLoopPredecessor(MIRGraph& graph,
498 MBasicBlock* header) {
499 MOZ_ASSERT(graph.osrBlock());
501 MBasicBlock* backedge = header->backedge();
502 MBasicBlock* fake = MBasicBlock::New(graph, header->info(), nullptr,
503 MBasicBlock::FAKE_LOOP_PRED);
504 if (!fake) {
505 return nullptr;
508 graph.insertBlockBefore(header, fake);
509 fake->setUnreachable();
511 // Create fake defs to use as inputs for any phis in |header|.
512 for (MPhiIterator iter(header->phisBegin()), end(header->phisEnd());
513 iter != end; ++iter) {
514 if (!graph.alloc().ensureBallast()) {
515 return nullptr;
517 MPhi* phi = *iter;
518 auto* fakeDef = MUnreachableResult::New(graph.alloc(), phi->type());
519 fake->add(fakeDef);
520 if (!phi->addInputSlow(fakeDef)) {
521 return nullptr;
525 fake->end(MGoto::New(graph.alloc(), header));
527 if (!header->addPredecessorWithoutPhis(fake)) {
528 return nullptr;
531 // The backedge is always the last predecessor, but we have added a
532 // new pred. Restore |backedge| as |header|'s loop backedge.
533 header->clearLoopHeader();
534 header->setLoopHeader(backedge);
536 return fake;
539 void MIRGraph::removeFakeLoopPredecessors() {
540 MOZ_ASSERT(osrBlock());
541 size_t id = 0;
542 for (ReversePostorderIterator it = rpoBegin(); it != rpoEnd();) {
543 MBasicBlock* block = *it++;
544 if (block->isFakeLoopPred()) {
545 MOZ_ASSERT(block->unreachable());
546 MBasicBlock* succ = block->getSingleSuccessor();
547 succ->removePredecessor(block);
548 removeBlock(block);
549 } else {
550 block->setId(id++);
553 #ifdef DEBUG
554 canBuildDominators_ = false;
555 #endif
558 MBasicBlock::MBasicBlock(MIRGraph& graph, const CompileInfo& info,
559 BytecodeSite* site, Kind kind)
560 : graph_(graph),
561 info_(info),
562 predecessors_(graph.alloc()),
563 stackPosition_(info_.firstStackSlot()),
564 id_(0),
565 domIndex_(0),
566 numDominated_(0),
567 lir_(nullptr),
568 callerResumePoint_(nullptr),
569 entryResumePoint_(nullptr),
570 outerResumePoint_(nullptr),
571 successorWithPhis_(nullptr),
572 positionInPhiSuccessor_(0),
573 loopDepth_(0),
574 kind_(kind),
575 mark_(false),
576 immediatelyDominated_(graph.alloc()),
577 immediateDominator_(nullptr),
578 trackedSite_(site) {
579 MOZ_ASSERT(trackedSite_, "trackedSite_ is non-nullptr");
582 bool MBasicBlock::init() { return slots_.init(graph_.alloc(), info_.nslots()); }
584 bool MBasicBlock::increaseSlots(size_t num) {
585 return slots_.growBy(graph_.alloc(), num);
588 bool MBasicBlock::ensureHasSlots(size_t num) {
589 size_t depth = stackDepth() + num;
590 if (depth > nslots()) {
591 if (!increaseSlots(depth - nslots())) {
592 return false;
595 return true;
598 void MBasicBlock::copySlots(MBasicBlock* from) {
599 MOZ_ASSERT(stackPosition_ <= from->stackPosition_);
600 MOZ_ASSERT(stackPosition_ <= nslots());
602 MDefinition** thisSlots = slots_.begin();
603 MDefinition** fromSlots = from->slots_.begin();
604 for (size_t i = 0, e = stackPosition_; i < e; ++i) {
605 thisSlots[i] = fromSlots[i];
609 bool MBasicBlock::inherit(TempAllocator& alloc, size_t stackDepth,
610 MBasicBlock* maybePred, uint32_t popped) {
611 MOZ_ASSERT_IF(maybePred, maybePred->stackDepth() == stackDepth);
613 MOZ_ASSERT(stackDepth >= popped);
614 stackDepth -= popped;
615 stackPosition_ = stackDepth;
617 if (maybePred && kind_ != PENDING_LOOP_HEADER) {
618 copySlots(maybePred);
621 MOZ_ASSERT(info_.nslots() >= stackPosition_);
622 MOZ_ASSERT(!entryResumePoint_);
624 // Propagate the caller resume point from the inherited block.
625 callerResumePoint_ = maybePred ? maybePred->callerResumePoint() : nullptr;
627 // Create a resume point using our initial stack state.
628 entryResumePoint_ =
629 new (alloc) MResumePoint(this, pc(), ResumeMode::ResumeAt);
630 if (!entryResumePoint_->init(alloc)) {
631 return false;
634 if (maybePred) {
635 if (!predecessors_.append(maybePred)) {
636 return false;
639 if (kind_ == PENDING_LOOP_HEADER) {
640 for (size_t i = 0; i < stackDepth; i++) {
641 MPhi* phi = MPhi::New(alloc.fallible());
642 if (!phi) {
643 return false;
645 phi->addInlineInput(maybePred->getSlot(i));
646 addPhi(phi);
647 setSlot(i, phi);
648 entryResumePoint()->initOperand(i, phi);
650 } else {
651 for (size_t i = 0; i < stackDepth; i++) {
652 entryResumePoint()->initOperand(i, getSlot(i));
655 } else {
657 * Don't leave the operands uninitialized for the caller, as it may not
658 * initialize them later on.
660 for (size_t i = 0; i < stackDepth; i++) {
661 entryResumePoint()->clearOperand(i);
665 return true;
668 void MBasicBlock::inheritSlots(MBasicBlock* parent) {
669 stackPosition_ = parent->stackPosition_;
670 copySlots(parent);
673 bool MBasicBlock::initEntrySlots(TempAllocator& alloc) {
674 // Remove the previous resume point.
675 discardResumePoint(entryResumePoint_);
677 // Create a resume point using our initial stack state.
678 entryResumePoint_ =
679 MResumePoint::New(alloc, this, pc(), ResumeMode::ResumeAt);
680 if (!entryResumePoint_) {
681 return false;
683 return true;
686 MDefinition* MBasicBlock::environmentChain() {
687 return getSlot(info().environmentChainSlot());
690 MDefinition* MBasicBlock::argumentsObject() {
691 return getSlot(info().argsObjSlot());
694 void MBasicBlock::setEnvironmentChain(MDefinition* scopeObj) {
695 setSlot(info().environmentChainSlot(), scopeObj);
698 void MBasicBlock::setArgumentsObject(MDefinition* argsObj) {
699 setSlot(info().argsObjSlot(), argsObj);
702 void MBasicBlock::pick(int32_t depth) {
703 // pick takes a value and moves it to the top.
704 // pick(-2):
705 // A B C D E
706 // A B D C E [ swapAt(-2) ]
707 // A B D E C [ swapAt(-1) ]
708 for (; depth < 0; depth++) {
709 swapAt(depth);
713 void MBasicBlock::unpick(int32_t depth) {
714 // unpick takes the value on top of the stack and moves it under the depth-th
715 // element;
716 // unpick(-2):
717 // A B C D E
718 // A B C E D [ swapAt(-1) ]
719 // A B E C D [ swapAt(-2) ]
720 for (int32_t n = -1; n >= depth; n--) {
721 swapAt(n);
725 void MBasicBlock::swapAt(int32_t depth) {
726 uint32_t lhsDepth = stackPosition_ + depth - 1;
727 uint32_t rhsDepth = stackPosition_ + depth;
729 MDefinition* temp = slots_[lhsDepth];
730 slots_[lhsDepth] = slots_[rhsDepth];
731 slots_[rhsDepth] = temp;
734 void MBasicBlock::discardLastIns() { discard(lastIns()); }
736 MConstant* MBasicBlock::optimizedOutConstant(TempAllocator& alloc) {
737 // If the first instruction is a MConstant(MagicValue(JS_OPTIMIZED_OUT))
738 // then reuse it.
739 MInstruction* ins = *begin();
740 if (ins->type() == MIRType::MagicOptimizedOut) {
741 return ins->toConstant();
744 MConstant* constant = MConstant::New(alloc, MagicValue(JS_OPTIMIZED_OUT));
745 insertBefore(ins, constant);
746 return constant;
749 void MBasicBlock::moveBefore(MInstruction* at, MInstruction* ins) {
750 // Remove |ins| from the current block.
751 MOZ_ASSERT(ins->block() == this);
752 instructions_.remove(ins);
754 // Insert into new block, which may be distinct.
755 // Uses and operands are untouched.
756 ins->setInstructionBlock(at->block(), at->trackedSite());
757 at->block()->instructions_.insertBefore(at, ins);
760 MInstruction* MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore) {
761 MOZ_ASSERT(graph().osrBlock() != this,
762 "We are not supposed to add any instruction in OSR blocks.");
764 // Beta nodes and interrupt checks are required to be located at the
765 // beginnings of basic blocks, so we must insert new instructions after any
766 // such instructions.
767 MInstructionIterator insertIter =
768 !ins || ins->isPhi() ? begin() : begin(ins->toInstruction());
769 while (insertIter->isBeta() || insertIter->isInterruptCheck() ||
770 insertIter->isConstant() || insertIter->isParameter() ||
771 (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout())) {
772 insertIter++;
775 return *insertIter;
778 void MBasicBlock::discardResumePoint(
779 MResumePoint* rp, ReferencesType refType /* = RefType_Default */) {
780 if (refType & RefType_DiscardOperands) {
781 rp->releaseUses();
783 rp->setDiscarded();
784 removeResumePoint(rp);
787 void MBasicBlock::removeResumePoint(MResumePoint* rp) {
788 #ifdef DEBUG
789 MResumePointIterator iter = resumePointsBegin();
790 while (*iter != rp) {
791 // We should reach it before reaching the end.
792 MOZ_ASSERT(iter != resumePointsEnd());
793 iter++;
795 resumePoints_.removeAt(iter);
796 #endif
799 void MBasicBlock::prepareForDiscard(
800 MInstruction* ins, ReferencesType refType /* = RefType_Default */) {
801 // Only remove instructions from the same basic block. This is needed for
802 // correctly removing the resume point if any.
803 MOZ_ASSERT(ins->block() == this);
805 MResumePoint* rp = ins->resumePoint();
806 if ((refType & RefType_DiscardResumePoint) && rp) {
807 discardResumePoint(rp, refType);
810 // We need to assert that instructions have no uses after removing the their
811 // resume points operands as they could be captured by their own resume
812 // point.
813 MOZ_ASSERT_IF(refType & RefType_AssertNoUses, !ins->hasUses());
815 const uint32_t InstructionOperands =
816 RefType_DiscardOperands | RefType_DiscardInstruction;
817 if ((refType & InstructionOperands) == InstructionOperands) {
818 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
819 ins->releaseOperand(i);
823 ins->setDiscarded();
826 void MBasicBlock::discard(MInstruction* ins) {
827 prepareForDiscard(ins);
828 instructions_.remove(ins);
831 void MBasicBlock::discardIgnoreOperands(MInstruction* ins) {
832 #ifdef DEBUG
833 for (size_t i = 0, e = ins->numOperands(); i < e; i++) {
834 MOZ_ASSERT(!ins->hasOperand(i));
836 #endif
838 prepareForDiscard(ins, RefType_IgnoreOperands);
839 instructions_.remove(ins);
842 void MBasicBlock::discardAllInstructions() {
843 MInstructionIterator iter = begin();
844 discardAllInstructionsStartingAt(iter);
847 void MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator iter) {
848 while (iter != end()) {
849 // Discard operands and resume point operands and flag the instruction
850 // as discarded. Also we do not assert that we have no uses as blocks
851 // might be removed in reverse post order.
852 MInstruction* ins = *iter++;
853 prepareForDiscard(ins, RefType_DefaultNoAssert);
854 instructions_.remove(ins);
858 void MBasicBlock::discardAllPhis() {
859 for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
860 iter->removeAllOperands();
863 for (MBasicBlock** pred = predecessors_.begin(); pred != predecessors_.end();
864 pred++) {
865 (*pred)->clearSuccessorWithPhis();
868 phis_.clear();
871 void MBasicBlock::discardAllResumePoints(bool discardEntry) {
872 if (outerResumePoint_) {
873 clearOuterResumePoint();
876 if (discardEntry && entryResumePoint_) {
877 clearEntryResumePoint();
880 #ifdef DEBUG
881 if (!entryResumePoint()) {
882 MOZ_ASSERT(resumePointsEmpty());
883 } else {
884 MResumePointIterator iter(resumePointsBegin());
885 MOZ_ASSERT(iter != resumePointsEnd());
886 iter++;
887 MOZ_ASSERT(iter == resumePointsEnd());
889 #endif
892 void MBasicBlock::clear() {
893 discardAllInstructions();
894 discardAllResumePoints();
895 discardAllPhis();
898 void MBasicBlock::insertBefore(MInstruction* at, MInstruction* ins) {
899 MOZ_ASSERT(at->block() == this);
900 ins->setInstructionBlock(this, at->trackedSite());
901 graph().allocDefinitionId(ins);
902 instructions_.insertBefore(at, ins);
905 void MBasicBlock::insertAfter(MInstruction* at, MInstruction* ins) {
906 MOZ_ASSERT(at->block() == this);
907 ins->setInstructionBlock(this, at->trackedSite());
908 graph().allocDefinitionId(ins);
909 instructions_.insertAfter(at, ins);
912 void MBasicBlock::insertAtEnd(MInstruction* ins) {
913 if (hasLastIns()) {
914 insertBefore(lastIns(), ins);
915 } else {
916 add(ins);
920 void MBasicBlock::addPhi(MPhi* phi) {
921 phis_.pushBack(phi);
922 phi->setPhiBlock(this);
923 graph().allocDefinitionId(phi);
926 void MBasicBlock::discardPhi(MPhi* phi) {
927 MOZ_ASSERT(!phis_.empty());
929 phi->removeAllOperands();
930 phi->setDiscarded();
932 phis_.remove(phi);
934 if (phis_.empty()) {
935 for (MBasicBlock* pred : predecessors_) {
936 pred->clearSuccessorWithPhis();
941 MResumePoint* MBasicBlock::activeResumePoint(MInstruction* ins) {
942 for (MInstructionReverseIterator iter = rbegin(ins); iter != rend(); iter++) {
943 if (iter->resumePoint() && *iter != ins) {
944 return iter->resumePoint();
948 // If none, take the entry resume point.
949 return entryResumePoint();
952 void MBasicBlock::flagOperandsOfPrunedBranches(MInstruction* ins) {
953 MResumePoint* rp = activeResumePoint(ins);
955 // The only blocks which do not have any entryResumePoint in Ion, are the
956 // SplitEdge blocks. SplitEdge blocks only have a Goto instruction before
957 // Range Analysis phase. In adjustInputs, we are manipulating instructions
958 // which have a TypePolicy. So, as a Goto has no operand and no type
959 // policy, the entry resume point should exist.
960 MOZ_ASSERT(rp);
962 // Flag all operands as being potentially used.
963 while (rp) {
964 for (size_t i = 0, end = rp->numOperands(); i < end; i++) {
965 rp->getOperand(i)->setImplicitlyUsedUnchecked();
967 rp = rp->caller();
971 bool MBasicBlock::addPredecessor(TempAllocator& alloc, MBasicBlock* pred) {
972 return addPredecessorPopN(alloc, pred, 0);
975 bool MBasicBlock::addPredecessorPopN(TempAllocator& alloc, MBasicBlock* pred,
976 uint32_t popped) {
977 MOZ_ASSERT(pred);
978 MOZ_ASSERT(predecessors_.length() > 0);
980 // Predecessors must be finished, and at the correct stack depth.
981 MOZ_ASSERT(pred->hasLastIns());
982 MOZ_ASSERT(pred->stackPosition_ == stackPosition_ + popped);
984 for (uint32_t i = 0, e = stackPosition_; i < e; ++i) {
985 MDefinition* mine = getSlot(i);
986 MDefinition* other = pred->getSlot(i);
988 if (mine != other) {
989 MIRType phiType = mine->type();
990 if (phiType != other->type()) {
991 phiType = MIRType::Value;
994 // If the current instruction is a phi, and it was created in this
995 // basic block, then we have already placed this phi and should
996 // instead append to its operands.
997 if (mine->isPhi() && mine->block() == this) {
998 MOZ_ASSERT(predecessors_.length());
999 MOZ_ASSERT(!mine->hasDefUses(),
1000 "should only change type of newly created phis");
1001 mine->setResultType(phiType);
1002 if (!mine->toPhi()->addInputSlow(other)) {
1003 return false;
1005 } else {
1006 // Otherwise, create a new phi node.
1007 MPhi* phi = MPhi::New(alloc.fallible(), phiType);
1008 if (!phi) {
1009 return false;
1011 addPhi(phi);
1013 // Prime the phi for each predecessor, so input(x) comes from
1014 // predecessor(x).
1015 if (!phi->reserveLength(predecessors_.length() + 1)) {
1016 return false;
1019 for (size_t j = 0, numPreds = predecessors_.length(); j < numPreds;
1020 ++j) {
1021 MOZ_ASSERT(predecessors_[j]->getSlot(i) == mine);
1022 phi->addInput(mine);
1024 phi->addInput(other);
1026 setSlot(i, phi);
1027 if (entryResumePoint()) {
1028 entryResumePoint()->replaceOperand(i, phi);
1034 return predecessors_.append(pred);
1037 bool MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred,
1038 MBasicBlock* existingPred) {
1039 MOZ_ASSERT(pred);
1040 MOZ_ASSERT(predecessors_.length() > 0);
1042 // Predecessors must be finished, and at the correct stack depth.
1043 MOZ_ASSERT(pred->hasLastIns());
1044 MOZ_ASSERT(!pred->successorWithPhis());
1046 if (!phisEmpty()) {
1047 size_t existingPosition = indexForPredecessor(existingPred);
1048 for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
1049 if (!iter->addInputSlow(iter->getOperand(existingPosition))) {
1050 return false;
1055 if (!predecessors_.append(pred)) {
1056 return false;
1058 return true;
1061 bool MBasicBlock::addPredecessorWithoutPhis(MBasicBlock* pred) {
1062 // Predecessors must be finished.
1063 MOZ_ASSERT(pred && pred->hasLastIns());
1064 return predecessors_.append(pred);
1067 bool MBasicBlock::addImmediatelyDominatedBlock(MBasicBlock* child) {
1068 return immediatelyDominated_.append(child);
1071 void MBasicBlock::removeImmediatelyDominatedBlock(MBasicBlock* child) {
1072 for (size_t i = 0;; ++i) {
1073 MOZ_ASSERT(i < immediatelyDominated_.length(),
1074 "Dominated block to remove not present");
1075 if (immediatelyDominated_[i] == child) {
1076 immediatelyDominated_[i] = immediatelyDominated_.back();
1077 immediatelyDominated_.popBack();
1078 return;
1083 bool MBasicBlock::setBackedge(MBasicBlock* pred) {
1084 // Predecessors must be finished, and at the correct stack depth.
1085 MOZ_ASSERT(hasLastIns());
1086 MOZ_ASSERT(pred->hasLastIns());
1087 MOZ_ASSERT(pred->stackDepth() == entryResumePoint()->stackDepth());
1089 // We must be a pending loop header
1090 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1092 // Add exit definitions to each corresponding phi at the entry.
1093 if (!inheritPhisFromBackedge(pred)) {
1094 return false;
1097 // We are now a loop header proper
1098 kind_ = LOOP_HEADER;
1100 return predecessors_.append(pred);
1103 bool MBasicBlock::setBackedgeWasm(MBasicBlock* pred, size_t paramCount) {
1104 // Predecessors must be finished, and at the correct stack depth.
1105 MOZ_ASSERT(hasLastIns());
1106 MOZ_ASSERT(pred->hasLastIns());
1107 MOZ_ASSERT(stackDepth() + paramCount == pred->stackDepth());
1109 // We must be a pending loop header
1110 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1112 // Add exit definitions to each corresponding phi at the entry.
1113 // Note: Phis are inserted in the same order as the slots. (see
1114 // MBasicBlock::New)
1115 size_t slot = 0;
1116 for (MPhiIterator phi = phisBegin(); phi != phisEnd(); phi++, slot++) {
1117 MPhi* entryDef = *phi;
1118 MDefinition* exitDef = pred->getSlot(slot);
1120 // Assert that we already placed phis for each slot.
1121 MOZ_ASSERT(entryDef->block() == this);
1123 // Assert that the phi already has the correct type.
1124 MOZ_ASSERT(entryDef->type() == exitDef->type());
1125 MOZ_ASSERT(entryDef->type() != MIRType::Value);
1127 if (entryDef == exitDef) {
1128 // If the exit def is the same as the entry def, make a redundant
1129 // phi. Since loop headers have exactly two incoming edges, we
1130 // know that that's just the first input.
1132 // Note that we eliminate later rather than now, to avoid any
1133 // weirdness around pending continue edges which might still hold
1134 // onto phis.
1135 exitDef = entryDef->getOperand(0);
1138 // Phis always have room for 2 operands, so this can't fail.
1139 MOZ_ASSERT(phi->numOperands() == 1);
1140 entryDef->addInlineInput(exitDef);
1142 // Two cases here: phis that correspond to locals, and phis that correspond
1143 // to loop parameters. Only phis for locals go in slots.
1144 if (slot < stackDepth()) {
1145 setSlot(slot, entryDef);
1149 // We are now a loop header proper
1150 kind_ = LOOP_HEADER;
1152 return predecessors_.append(pred);
1155 void MBasicBlock::clearLoopHeader() {
1156 MOZ_ASSERT(isLoopHeader());
1157 kind_ = NORMAL;
1160 void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) {
1161 MOZ_ASSERT(!isLoopHeader());
1162 kind_ = LOOP_HEADER;
1164 size_t numPreds = numPredecessors();
1165 MOZ_ASSERT(numPreds != 0);
1167 size_t lastIndex = numPreds - 1;
1168 size_t oldIndex = 0;
1169 for (;; ++oldIndex) {
1170 MOZ_ASSERT(oldIndex < numPreds);
1171 MBasicBlock* pred = getPredecessor(oldIndex);
1172 if (pred == newBackedge) {
1173 break;
1177 // Set the loop backedge to be the last element in predecessors_.
1178 std::swap(predecessors_[oldIndex], predecessors_[lastIndex]);
1180 // If we have phis, reorder their operands accordingly.
1181 if (!phisEmpty()) {
1182 getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex);
1183 getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex);
1184 for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1185 MPhi* phi = *iter;
1186 MDefinition* last = phi->getOperand(oldIndex);
1187 MDefinition* old = phi->getOperand(lastIndex);
1188 phi->replaceOperand(oldIndex, old);
1189 phi->replaceOperand(lastIndex, last);
1193 MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this);
1194 MOZ_ASSERT(backedge() == newBackedge);
1197 size_t MBasicBlock::getSuccessorIndex(MBasicBlock* block) const {
1198 MOZ_ASSERT(lastIns());
1199 for (size_t i = 0; i < numSuccessors(); i++) {
1200 if (getSuccessor(i) == block) {
1201 return i;
1204 MOZ_CRASH("Invalid successor");
1207 size_t MBasicBlock::getPredecessorIndex(MBasicBlock* block) const {
1208 for (size_t i = 0, e = numPredecessors(); i < e; ++i) {
1209 if (getPredecessor(i) == block) {
1210 return i;
1213 MOZ_CRASH("Invalid predecessor");
1216 void MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock* split) {
1217 MOZ_ASSERT(lastIns());
1219 // Note, during split-critical-edges, successors-with-phis is not yet set.
1220 // During PAA, this case is handled before we enter.
1221 MOZ_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
1223 lastIns()->replaceSuccessor(pos, split);
1226 void MBasicBlock::replacePredecessor(MBasicBlock* old, MBasicBlock* split) {
1227 for (size_t i = 0; i < numPredecessors(); i++) {
1228 if (getPredecessor(i) == old) {
1229 predecessors_[i] = split;
1231 #ifdef DEBUG
1232 // The same block should not appear twice in the predecessor list.
1233 for (size_t j = i; j < numPredecessors(); j++) {
1234 MOZ_ASSERT(predecessors_[j] != old);
1236 #endif
1238 return;
1242 MOZ_CRASH("predecessor was not found");
1245 void MBasicBlock::clearDominatorInfo() {
1246 setImmediateDominator(nullptr);
1247 immediatelyDominated_.clear();
1248 numDominated_ = 0;
1251 void MBasicBlock::removePredecessorWithoutPhiOperands(MBasicBlock* pred,
1252 size_t predIndex) {
1253 // If we're removing the last backedge, this is no longer a loop.
1254 if (isLoopHeader() && hasUniqueBackedge() && backedge() == pred) {
1255 clearLoopHeader();
1258 // Adjust phis. Note that this can leave redundant phis behind.
1259 // Don't adjust successorWithPhis() if we haven't constructed this
1260 // information yet.
1261 if (pred->successorWithPhis()) {
1262 MOZ_ASSERT(pred->positionInPhiSuccessor() == predIndex);
1263 pred->clearSuccessorWithPhis();
1264 for (size_t j = predIndex + 1; j < numPredecessors(); j++) {
1265 getPredecessor(j)->setSuccessorWithPhis(this, j - 1);
1269 // Remove from pred list.
1270 predecessors_.erase(predecessors_.begin() + predIndex);
1273 void MBasicBlock::removePredecessor(MBasicBlock* pred) {
1274 size_t predIndex = getPredecessorIndex(pred);
1276 // Remove the phi operands.
1277 for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) {
1278 iter->removeOperand(predIndex);
1281 // Now we can call the underlying function, which expects that phi
1282 // operands have been removed.
1283 removePredecessorWithoutPhiOperands(pred, predIndex);
1286 bool MBasicBlock::inheritPhisFromBackedge(MBasicBlock* backedge) {
1287 // We must be a pending loop header
1288 MOZ_ASSERT(kind_ == PENDING_LOOP_HEADER);
1290 size_t stackDepth = entryResumePoint()->stackDepth();
1291 for (size_t slot = 0; slot < stackDepth; slot++) {
1292 // Get the value stack-slot of the back edge.
1293 MDefinition* exitDef = backedge->getSlot(slot);
1295 // Get the value of the loop header.
1296 MDefinition* loopDef = entryResumePoint()->getOperand(slot);
1297 if (loopDef->block() != this) {
1298 // If we are finishing a pending loop header, then we need to ensure
1299 // that all operands are phis. This is usualy the case, except for
1300 // object/arrays build with generators, in which case we share the
1301 // same allocations across all blocks.
1302 MOZ_ASSERT(loopDef->block()->id() < id());
1303 MOZ_ASSERT(loopDef == exitDef);
1304 continue;
1307 // Phis are allocated by NewPendingLoopHeader.
1308 MPhi* entryDef = loopDef->toPhi();
1309 MOZ_ASSERT(entryDef->block() == this);
1311 if (entryDef == exitDef) {
1312 // If the exit def is the same as the entry def, make a redundant
1313 // phi. Since loop headers have exactly two incoming edges, we
1314 // know that that's just the first input.
1316 // Note that we eliminate later rather than now, to avoid any
1317 // weirdness around pending continue edges which might still hold
1318 // onto phis.
1319 exitDef = entryDef->getOperand(0);
1322 if (!entryDef->addInputSlow(exitDef)) {
1323 return false;
1327 return true;
1330 MTest* MBasicBlock::immediateDominatorBranch(BranchDirection* pdirection) {
1331 *pdirection = FALSE_BRANCH;
1333 if (numPredecessors() != 1) {
1334 return nullptr;
1337 MBasicBlock* dom = immediateDominator();
1338 if (dom != getPredecessor(0)) {
1339 return nullptr;
1342 // Look for a trailing MTest branching to this block.
1343 MInstruction* ins = dom->lastIns();
1344 if (ins->isTest()) {
1345 MTest* test = ins->toTest();
1347 MOZ_ASSERT(test->ifTrue() == this || test->ifFalse() == this);
1348 if (test->ifTrue() == this && test->ifFalse() == this) {
1349 return nullptr;
1352 *pdirection = (test->ifTrue() == this) ? TRUE_BRANCH : FALSE_BRANCH;
1353 return test;
1356 return nullptr;
1359 void MBasicBlock::dumpStack(GenericPrinter& out) {
1360 #ifdef DEBUG
1361 out.printf(" %-3s %-16s %-6s %-10s\n", "#", "name", "copyOf", "first/next");
1362 out.printf("-------------------------------------------\n");
1363 for (uint32_t i = 0; i < stackPosition_; i++) {
1364 out.printf(" %-3u", i);
1365 out.printf(" %-16p\n", (void*)slots_[i]);
1367 #endif
1370 void MBasicBlock::dumpStack() {
1371 Fprinter out(stderr);
1372 dumpStack(out);
1373 out.finish();
1376 void MIRGraph::dump(GenericPrinter& out) {
1377 #ifdef JS_JITSPEW
1378 for (MBasicBlockIterator iter(begin()); iter != end(); iter++) {
1379 iter->dump(out);
1380 out.printf("\n");
1382 #endif
1385 void MIRGraph::dump() {
1386 Fprinter out(stderr);
1387 dump(out);
1388 out.finish();
1391 void MBasicBlock::dump(GenericPrinter& out) {
1392 #ifdef JS_JITSPEW
1393 out.printf("block%u:%s%s%s\n", id(), isLoopHeader() ? " (loop header)" : "",
1394 unreachable() ? " (unreachable)" : "",
1395 isMarked() ? " (marked)" : "");
1396 if (MResumePoint* resume = entryResumePoint()) {
1397 resume->dump(out);
1399 for (MPhiIterator iter(phisBegin()); iter != phisEnd(); iter++) {
1400 iter->dump(out);
1402 for (MInstructionIterator iter(begin()); iter != end(); iter++) {
1403 iter->dump(out);
1405 #endif
1408 void MBasicBlock::dump() {
1409 Fprinter out(stderr);
1410 dump(out);
1411 out.finish();