Bug 1885489 - Part 5: Add SnapshotIterator::readInt32(). r=iain
[gecko.git] / js / src / jit / ScalarReplacement.cpp
blobbfbb0be6ee76adcd8c455bef42725c9773ca3294
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/ScalarReplacement.h"
9 #include "jit/IonAnalysis.h"
10 #include "jit/JitSpewer.h"
11 #include "jit/MIR.h"
12 #include "jit/MIRGenerator.h"
13 #include "jit/MIRGraph.h"
14 #include "jit/WarpBuilderShared.h"
15 #include "js/Vector.h"
16 #include "vm/ArgumentsObject.h"
18 #include "gc/ObjectKind-inl.h"
20 namespace js {
21 namespace jit {
23 template <typename MemoryView>
24 class EmulateStateOf {
25 private:
26 using BlockState = typename MemoryView::BlockState;
28 MIRGenerator* mir_;
29 MIRGraph& graph_;
31 // Block state at the entrance of all basic blocks.
32 Vector<BlockState*, 8, SystemAllocPolicy> states_;
34 public:
35 EmulateStateOf(MIRGenerator* mir, MIRGraph& graph)
36 : mir_(mir), graph_(graph) {}
38 bool run(MemoryView& view);
41 template <typename MemoryView>
42 bool EmulateStateOf<MemoryView>::run(MemoryView& view) {
43 // Initialize the current block state of each block to an unknown state.
44 if (!states_.appendN(nullptr, graph_.numBlocks())) {
45 return false;
48 // Initialize the first block which needs to be traversed in RPO.
49 MBasicBlock* startBlock = view.startingBlock();
50 if (!view.initStartingState(&states_[startBlock->id()])) {
51 return false;
54 // Iterate over each basic block which has a valid entry state, and merge
55 // the state in the successor blocks.
56 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
57 block != graph_.rpoEnd(); block++) {
58 if (mir_->shouldCancel(MemoryView::phaseName)) {
59 return false;
62 // Get the block state as the result of the merge of all predecessors
63 // which have already been visited in RPO. This means that backedges
64 // are not yet merged into the loop.
65 BlockState* state = states_[block->id()];
66 if (!state) {
67 continue;
69 view.setEntryBlockState(state);
71 // Iterates over resume points, phi and instructions.
72 for (MNodeIterator iter(*block); iter;) {
73 // Increment the iterator before visiting the instruction, as the
74 // visit function might discard itself from the basic block.
75 MNode* ins = *iter++;
76 if (ins->isDefinition()) {
77 MDefinition* def = ins->toDefinition();
78 switch (def->op()) {
79 #define MIR_OP(op) \
80 case MDefinition::Opcode::op: \
81 view.visit##op(def->to##op()); \
82 break;
83 MIR_OPCODE_LIST(MIR_OP)
84 #undef MIR_OP
86 } else {
87 view.visitResumePoint(ins->toResumePoint());
89 if (!graph_.alloc().ensureBallast()) {
90 return false;
92 if (view.oom()) {
93 return false;
97 // For each successor, merge the current state into the state of the
98 // successors.
99 for (size_t s = 0; s < block->numSuccessors(); s++) {
100 MBasicBlock* succ = block->getSuccessor(s);
101 if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()])) {
102 return false;
107 states_.clear();
108 return true;
111 static inline bool IsOptimizableObjectInstruction(MInstruction* ins) {
112 return ins->isNewObject() || ins->isNewPlainObject() ||
113 ins->isNewCallObject() || ins->isNewIterator();
116 static bool PhiOperandEqualTo(MDefinition* operand, MInstruction* newObject) {
117 if (operand == newObject) {
118 return true;
121 switch (operand->op()) {
122 case MDefinition::Opcode::GuardShape:
123 return PhiOperandEqualTo(operand->toGuardShape()->input(), newObject);
125 case MDefinition::Opcode::GuardToClass:
126 return PhiOperandEqualTo(operand->toGuardToClass()->input(), newObject);
128 case MDefinition::Opcode::CheckIsObj:
129 return PhiOperandEqualTo(operand->toCheckIsObj()->input(), newObject);
131 case MDefinition::Opcode::Unbox:
132 return PhiOperandEqualTo(operand->toUnbox()->input(), newObject);
134 default:
135 return false;
139 // Return true if all phi operands are equal to |newObject|.
140 static bool PhiOperandsEqualTo(MPhi* phi, MInstruction* newObject) {
141 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
143 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
144 if (!PhiOperandEqualTo(phi->getOperand(i), newObject)) {
145 return false;
148 return true;
151 static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject,
152 const Shape* shapeDefault = nullptr);
154 // Returns False if the lambda is not escaped and if it is optimizable by
155 // ScalarReplacementOfObject.
156 static bool IsLambdaEscaped(MInstruction* ins, MInstruction* lambda,
157 MInstruction* newObject, const Shape* shape) {
158 MOZ_ASSERT(lambda->isLambda() || lambda->isFunctionWithProto());
159 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
160 JitSpewDef(JitSpew_Escape, "Check lambda\n", ins);
161 JitSpewIndent spewIndent(JitSpew_Escape);
163 // The scope chain is not escaped if none of the Lambdas which are
164 // capturing it are escaped.
165 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
166 MNode* consumer = (*i)->consumer();
167 if (!consumer->isDefinition()) {
168 // Cannot optimize if it is observable from fun.arguments or others.
169 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
170 JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered");
171 return true;
173 continue;
176 MDefinition* def = consumer->toDefinition();
177 switch (def->op()) {
178 case MDefinition::Opcode::GuardToFunction: {
179 auto* guard = def->toGuardToFunction();
180 if (IsLambdaEscaped(guard, lambda, newObject, shape)) {
181 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
182 return true;
184 break;
187 case MDefinition::Opcode::GuardFunctionScript: {
188 auto* guard = def->toGuardFunctionScript();
189 BaseScript* actual;
190 if (lambda->isLambda()) {
191 actual = lambda->toLambda()->templateFunction()->baseScript();
192 } else {
193 actual = lambda->toFunctionWithProto()->function()->baseScript();
195 if (actual != guard->expected()) {
196 JitSpewDef(JitSpew_Escape, "has a non-matching script guard\n",
197 guard);
198 return true;
200 if (IsLambdaEscaped(guard, lambda, newObject, shape)) {
201 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
202 return true;
204 break;
207 case MDefinition::Opcode::FunctionEnvironment: {
208 if (IsObjectEscaped(def->toFunctionEnvironment(), newObject, shape)) {
209 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
210 return true;
212 break;
215 default:
216 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
217 return true;
220 JitSpew(JitSpew_Escape, "Lambda is not escaped");
221 return false;
224 static bool IsLambdaEscaped(MInstruction* lambda, MInstruction* newObject,
225 const Shape* shape) {
226 return IsLambdaEscaped(lambda, lambda, newObject, shape);
229 // Returns False if the object is not escaped and if it is optimizable by
230 // ScalarReplacementOfObject.
232 // For the moment, this code is dumb as it only supports objects which are not
233 // changing shape.
234 static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject,
235 const Shape* shapeDefault) {
236 MOZ_ASSERT(ins->type() == MIRType::Object || ins->isPhi());
237 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
239 JitSpewDef(JitSpew_Escape, "Check object\n", ins);
240 JitSpewIndent spewIndent(JitSpew_Escape);
242 const Shape* shape = shapeDefault;
243 if (!shape) {
244 if (ins->isNewPlainObject()) {
245 shape = ins->toNewPlainObject()->shape();
246 } else if (JSObject* templateObj = MObjectState::templateObjectOf(ins)) {
247 shape = templateObj->shape();
251 if (!shape) {
252 JitSpew(JitSpew_Escape, "No shape defined.");
253 return true;
256 // Check if the object is escaped. If the object is not the first argument
257 // of either a known Store / Load, then we consider it as escaped. This is a
258 // cheap and conservative escape analysis.
259 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
260 MNode* consumer = (*i)->consumer();
261 if (!consumer->isDefinition()) {
262 // Cannot optimize if it is observable from fun.arguments or others.
263 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
264 JitSpew(JitSpew_Escape, "Observable object cannot be recovered");
265 return true;
267 continue;
270 MDefinition* def = consumer->toDefinition();
271 switch (def->op()) {
272 case MDefinition::Opcode::StoreFixedSlot:
273 case MDefinition::Opcode::LoadFixedSlot:
274 // Not escaped if it is the first argument.
275 if (def->indexOf(*i) == 0) {
276 break;
279 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
280 return true;
282 case MDefinition::Opcode::PostWriteBarrier:
283 break;
285 case MDefinition::Opcode::Slots: {
286 #ifdef DEBUG
287 // Assert that MSlots are only used by MStoreDynamicSlot and
288 // MLoadDynamicSlot.
289 MSlots* ins = def->toSlots();
290 MOZ_ASSERT(ins->object() != 0);
291 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
292 // toDefinition should normally never fail, since they don't get
293 // captured by resume points.
294 MDefinition* def = (*i)->consumer()->toDefinition();
295 MOZ_ASSERT(def->op() == MDefinition::Opcode::StoreDynamicSlot ||
296 def->op() == MDefinition::Opcode::LoadDynamicSlot);
298 #endif
299 break;
302 case MDefinition::Opcode::GuardShape: {
303 MGuardShape* guard = def->toGuardShape();
304 if (shape != guard->shape()) {
305 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
306 return true;
308 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
309 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
310 return true;
312 break;
315 case MDefinition::Opcode::GuardToClass: {
316 MGuardToClass* guard = def->toGuardToClass();
317 if (!shape || shape->getObjectClass() != guard->getClass()) {
318 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
319 return true;
321 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
322 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
323 return true;
325 break;
328 case MDefinition::Opcode::CheckIsObj: {
329 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
330 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
331 return true;
333 break;
336 case MDefinition::Opcode::Unbox: {
337 if (def->type() != MIRType::Object) {
338 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
339 return true;
341 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
342 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
343 return true;
345 break;
348 case MDefinition::Opcode::Lambda:
349 case MDefinition::Opcode::FunctionWithProto: {
350 if (IsLambdaEscaped(def->toInstruction(), newObject, shape)) {
351 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
352 return true;
354 break;
357 case MDefinition::Opcode::Phi: {
358 auto* phi = def->toPhi();
359 if (!PhiOperandsEqualTo(phi, newObject)) {
360 JitSpewDef(JitSpew_Escape, "has different phi operands\n", def);
361 return true;
363 if (IsObjectEscaped(phi, newObject, shape)) {
364 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
365 return true;
367 break;
370 case MDefinition::Opcode::Compare: {
371 bool canFold;
372 if (!def->toCompare()->tryFold(&canFold)) {
373 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
374 return true;
376 break;
379 // Doesn't escape the object.
380 case MDefinition::Opcode::IsObject:
381 break;
383 // This instruction is a no-op used to verify that scalar replacement
384 // is working as expected in jit-test.
385 case MDefinition::Opcode::AssertRecoveredOnBailout:
386 break;
388 // This is just a special flavor of constant which lets us optimize
389 // out some guards in certain circumstances. We'll turn this into a
390 // regular constant later.
391 case MDefinition::Opcode::ConstantProto:
392 break;
394 // We definitely don't need barriers for objects that don't exist.
395 case MDefinition::Opcode::AssertCanElidePostWriteBarrier:
396 break;
398 default:
399 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
400 return true;
404 JitSpew(JitSpew_Escape, "Object is not escaped");
405 return false;
408 class ObjectMemoryView : public MDefinitionVisitorDefaultNoop {
409 public:
410 using BlockState = MObjectState;
411 static const char phaseName[];
413 private:
414 TempAllocator& alloc_;
415 MConstant* undefinedVal_;
416 MInstruction* obj_;
417 MBasicBlock* startBlock_;
418 BlockState* state_;
420 // Used to improve the memory usage by sharing common modification.
421 const MResumePoint* lastResumePoint_;
423 bool oom_;
425 public:
426 ObjectMemoryView(TempAllocator& alloc, MInstruction* obj);
428 MBasicBlock* startingBlock();
429 bool initStartingState(BlockState** pState);
431 void setEntryBlockState(BlockState* state);
432 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
433 BlockState** pSuccState);
435 #ifdef DEBUG
436 void assertSuccess();
437 #else
438 void assertSuccess() {}
439 #endif
441 bool oom() const { return oom_; }
443 private:
444 MDefinition* functionForCallObject(MDefinition* ins);
446 public:
447 void visitResumePoint(MResumePoint* rp);
448 void visitObjectState(MObjectState* ins);
449 void visitStoreFixedSlot(MStoreFixedSlot* ins);
450 void visitLoadFixedSlot(MLoadFixedSlot* ins);
451 void visitPostWriteBarrier(MPostWriteBarrier* ins);
452 void visitStoreDynamicSlot(MStoreDynamicSlot* ins);
453 void visitLoadDynamicSlot(MLoadDynamicSlot* ins);
454 void visitGuardShape(MGuardShape* ins);
455 void visitGuardToClass(MGuardToClass* ins);
456 void visitCheckIsObj(MCheckIsObj* ins);
457 void visitUnbox(MUnbox* ins);
458 void visitFunctionEnvironment(MFunctionEnvironment* ins);
459 void visitGuardToFunction(MGuardToFunction* ins);
460 void visitGuardFunctionScript(MGuardFunctionScript* ins);
461 void visitLambda(MLambda* ins);
462 void visitFunctionWithProto(MFunctionWithProto* ins);
463 void visitPhi(MPhi* ins);
464 void visitCompare(MCompare* ins);
465 void visitConstantProto(MConstantProto* ins);
466 void visitIsObject(MIsObject* ins);
467 void visitAssertCanElidePostWriteBarrier(
468 MAssertCanElidePostWriteBarrier* ins);
471 /* static */ const char ObjectMemoryView::phaseName[] =
472 "Scalar Replacement of Object";
474 ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj)
475 : alloc_(alloc),
476 undefinedVal_(nullptr),
477 obj_(obj),
478 startBlock_(obj->block()),
479 state_(nullptr),
480 lastResumePoint_(nullptr),
481 oom_(false) {
482 // Annotate snapshots RValue such that we recover the store first.
483 obj_->setIncompleteObject();
485 // Annotate the instruction such that we do not replace it by a
486 // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
487 obj_->setImplicitlyUsedUnchecked();
490 MBasicBlock* ObjectMemoryView::startingBlock() { return startBlock_; }
492 bool ObjectMemoryView::initStartingState(BlockState** pState) {
493 // Uninitialized slots have an "undefined" value.
494 undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
495 startBlock_->insertBefore(obj_, undefinedVal_);
497 // Create a new block state and insert at it at the location of the new
498 // object.
499 BlockState* state = BlockState::New(alloc_, obj_);
500 if (!state) {
501 return false;
504 startBlock_->insertAfter(obj_, state);
506 // Initialize the properties of the object state.
507 state->initFromTemplateObject(alloc_, undefinedVal_);
509 // Hold out of resume point until it is visited.
510 state->setInWorklist();
512 *pState = state;
513 return true;
516 void ObjectMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
518 bool ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
519 MBasicBlock* succ,
520 BlockState** pSuccState) {
521 BlockState* succState = *pSuccState;
523 // When a block has no state yet, create an empty one for the
524 // successor.
525 if (!succState) {
526 // If the successor is not dominated then the object cannot flow
527 // in this basic block without a Phi. We know that no Phi exist
528 // in non-dominated successors as the conservative escaped
529 // analysis fails otherwise. Such condition can succeed if the
530 // successor is a join at the end of a if-block and the object
531 // only exists within the branch.
532 if (!startBlock_->dominates(succ)) {
533 return true;
536 // If there is only one predecessor, carry over the last state of the
537 // block to the successor. As the block state is immutable, if the
538 // current block has multiple successors, they will share the same entry
539 // state.
540 if (succ->numPredecessors() <= 1 || !state_->numSlots()) {
541 *pSuccState = state_;
542 return true;
545 // If we have multiple predecessors, then we allocate one Phi node for
546 // each predecessor, and create a new block state which only has phi
547 // nodes. These would later be removed by the removal of redundant phi
548 // nodes.
549 succState = BlockState::Copy(alloc_, state_);
550 if (!succState) {
551 return false;
554 size_t numPreds = succ->numPredecessors();
555 for (size_t slot = 0; slot < state_->numSlots(); slot++) {
556 MPhi* phi = MPhi::New(alloc_.fallible());
557 if (!phi || !phi->reserveLength(numPreds)) {
558 return false;
561 // Fill the input of the successors Phi with undefined
562 // values, and each block later fills the Phi inputs.
563 for (size_t p = 0; p < numPreds; p++) {
564 phi->addInput(undefinedVal_);
567 // Add Phi in the list of Phis of the basic block.
568 succ->addPhi(phi);
569 succState->setSlot(slot, phi);
572 // Insert the newly created block state instruction at the beginning
573 // of the successor block, after all the phi nodes. Note that it
574 // would be captured by the entry resume point of the successor
575 // block.
576 succ->insertBefore(succ->safeInsertTop(), succState);
577 *pSuccState = succState;
580 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
581 if (succ->numPredecessors() > 1 && succState->numSlots() &&
582 succ != startBlock_) {
583 // We need to re-compute successorWithPhis as the previous EliminatePhis
584 // phase might have removed all the Phis from the successor block.
585 size_t currIndex;
586 MOZ_ASSERT(!succ->phisEmpty());
587 if (curr->successorWithPhis()) {
588 MOZ_ASSERT(curr->successorWithPhis() == succ);
589 currIndex = curr->positionInPhiSuccessor();
590 } else {
591 currIndex = succ->indexForPredecessor(curr);
592 curr->setSuccessorWithPhis(succ, currIndex);
594 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
596 // Copy the current slot states to the index of current block in all the
597 // Phi created during the first visit of the successor.
598 for (size_t slot = 0; slot < state_->numSlots(); slot++) {
599 MPhi* phi = succState->getSlot(slot)->toPhi();
600 phi->replaceOperand(currIndex, state_->getSlot(slot));
604 return true;
607 #ifdef DEBUG
608 void ObjectMemoryView::assertSuccess() {
609 for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) {
610 MNode* ins = (*i)->consumer();
611 MDefinition* def = nullptr;
613 // Resume points have been replaced by the object state.
614 if (ins->isResumePoint() ||
615 (def = ins->toDefinition())->isRecoveredOnBailout()) {
616 MOZ_ASSERT(obj_->isIncompleteObject());
617 continue;
620 // The only remaining uses would be removed by DCE, which will also
621 // recover the object on bailouts.
622 MOZ_ASSERT(def->isSlots() || def->isLambda() || def->isFunctionWithProto());
623 MOZ_ASSERT(!def->hasDefUses());
626 #endif
628 void ObjectMemoryView::visitResumePoint(MResumePoint* rp) {
629 // As long as the MObjectState is not yet seen next to the allocation, we do
630 // not patch the resume point to recover the side effects.
631 if (!state_->isInWorklist()) {
632 rp->addStore(alloc_, state_, lastResumePoint_);
633 lastResumePoint_ = rp;
637 void ObjectMemoryView::visitObjectState(MObjectState* ins) {
638 if (ins->isInWorklist()) {
639 ins->setNotInWorklist();
643 void ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins) {
644 // Skip stores made on other objects.
645 if (ins->object() != obj_) {
646 return;
649 // Clone the state and update the slot value.
650 if (state_->hasFixedSlot(ins->slot())) {
651 state_ = BlockState::Copy(alloc_, state_);
652 if (!state_) {
653 oom_ = true;
654 return;
657 state_->setFixedSlot(ins->slot(), ins->value());
658 ins->block()->insertBefore(ins->toInstruction(), state_);
659 } else {
660 // UnsafeSetReserveSlot can access baked-in slots which are guarded by
661 // conditions, which are not seen by the escape analysis.
662 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
663 ins->block()->insertBefore(ins, bailout);
666 // Remove original instruction.
667 ins->block()->discard(ins);
670 void ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins) {
671 // Skip loads made on other objects.
672 if (ins->object() != obj_) {
673 return;
676 // Replace load by the slot value.
677 if (state_->hasFixedSlot(ins->slot())) {
678 ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot()));
679 } else {
680 // UnsafeGetReserveSlot can access baked-in slots which are guarded by
681 // conditions, which are not seen by the escape analysis.
682 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
683 ins->block()->insertBefore(ins, bailout);
684 ins->replaceAllUsesWith(undefinedVal_);
687 // Remove original instruction.
688 ins->block()->discard(ins);
691 void ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
692 // Skip loads made on other objects.
693 if (ins->object() != obj_) {
694 return;
697 // Remove original instruction.
698 ins->block()->discard(ins);
701 void ObjectMemoryView::visitStoreDynamicSlot(MStoreDynamicSlot* ins) {
702 // Skip stores made on other objects.
703 MSlots* slots = ins->slots()->toSlots();
704 if (slots->object() != obj_) {
705 // Guard objects are replaced when they are visited.
706 MOZ_ASSERT(!slots->object()->isGuardShape() ||
707 slots->object()->toGuardShape()->object() != obj_);
708 return;
711 // Clone the state and update the slot value.
712 if (state_->hasDynamicSlot(ins->slot())) {
713 state_ = BlockState::Copy(alloc_, state_);
714 if (!state_) {
715 oom_ = true;
716 return;
719 state_->setDynamicSlot(ins->slot(), ins->value());
720 ins->block()->insertBefore(ins->toInstruction(), state_);
721 } else {
722 // UnsafeSetReserveSlot can access baked-in slots which are guarded by
723 // conditions, which are not seen by the escape analysis.
724 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
725 ins->block()->insertBefore(ins, bailout);
728 // Remove original instruction.
729 ins->block()->discard(ins);
732 void ObjectMemoryView::visitLoadDynamicSlot(MLoadDynamicSlot* ins) {
733 // Skip loads made on other objects.
734 MSlots* slots = ins->slots()->toSlots();
735 if (slots->object() != obj_) {
736 // Guard objects are replaced when they are visited.
737 MOZ_ASSERT(!slots->object()->isGuardShape() ||
738 slots->object()->toGuardShape()->object() != obj_);
739 return;
742 // Replace load by the slot value.
743 if (state_->hasDynamicSlot(ins->slot())) {
744 ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot()));
745 } else {
746 // UnsafeGetReserveSlot can access baked-in slots which are guarded by
747 // conditions, which are not seen by the escape analysis.
748 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
749 ins->block()->insertBefore(ins, bailout);
750 ins->replaceAllUsesWith(undefinedVal_);
753 // Remove original instruction.
754 ins->block()->discard(ins);
757 void ObjectMemoryView::visitGuardShape(MGuardShape* ins) {
758 // Skip guards on other objects.
759 if (ins->object() != obj_) {
760 return;
763 // Replace the guard by its object.
764 ins->replaceAllUsesWith(obj_);
766 // Remove original instruction.
767 ins->block()->discard(ins);
770 void ObjectMemoryView::visitGuardToClass(MGuardToClass* ins) {
771 // Skip guards on other objects.
772 if (ins->object() != obj_) {
773 return;
776 // Replace the guard by its object.
777 ins->replaceAllUsesWith(obj_);
779 // Remove original instruction.
780 ins->block()->discard(ins);
783 void ObjectMemoryView::visitCheckIsObj(MCheckIsObj* ins) {
784 // Skip checks on other objects.
785 if (ins->input() != obj_) {
786 return;
789 // Replace the check by its object.
790 ins->replaceAllUsesWith(obj_);
792 // Remove original instruction.
793 ins->block()->discard(ins);
796 void ObjectMemoryView::visitUnbox(MUnbox* ins) {
797 // Skip unrelated unboxes.
798 if (ins->input() != obj_) {
799 return;
801 MOZ_ASSERT(ins->type() == MIRType::Object);
803 // Replace the unbox with the object.
804 ins->replaceAllUsesWith(obj_);
806 // Remove the unbox.
807 ins->block()->discard(ins);
810 MDefinition* ObjectMemoryView::functionForCallObject(MDefinition* ins) {
811 // Return early when we don't replace MNewCallObject.
812 if (!obj_->isNewCallObject()) {
813 return nullptr;
816 // Unwrap instructions until we found either MLambda or MFunctionWithProto.
817 // Return the function instruction if their environment chain matches the
818 // MNewCallObject we're about to replace.
819 while (true) {
820 switch (ins->op()) {
821 case MDefinition::Opcode::Lambda: {
822 if (ins->toLambda()->environmentChain() == obj_) {
823 return ins;
825 return nullptr;
827 case MDefinition::Opcode::FunctionWithProto: {
828 if (ins->toFunctionWithProto()->environmentChain() == obj_) {
829 return ins;
831 return nullptr;
833 case MDefinition::Opcode::FunctionEnvironment:
834 ins = ins->toFunctionEnvironment()->function();
835 break;
836 case MDefinition::Opcode::GuardToFunction:
837 ins = ins->toGuardToFunction()->object();
838 break;
839 case MDefinition::Opcode::GuardFunctionScript:
840 ins = ins->toGuardFunctionScript()->function();
841 break;
842 default:
843 return nullptr;
848 void ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins) {
849 // Skip function environment which are not aliases of the NewCallObject.
850 if (!functionForCallObject(ins)) {
851 return;
854 // Replace the function environment by the scope chain of the lambda.
855 ins->replaceAllUsesWith(obj_);
857 // Remove original instruction.
858 ins->block()->discard(ins);
861 void ObjectMemoryView::visitGuardToFunction(MGuardToFunction* ins) {
862 // Skip guards on other objects.
863 auto* function = functionForCallObject(ins);
864 if (!function) {
865 return;
868 // Replace the guard by its object.
869 ins->replaceAllUsesWith(function);
871 // Remove original instruction.
872 ins->block()->discard(ins);
875 void ObjectMemoryView::visitGuardFunctionScript(MGuardFunctionScript* ins) {
876 // Skip guards on other objects.
877 auto* function = functionForCallObject(ins);
878 if (!function) {
879 return;
882 // Replace the guard by its object.
883 ins->replaceAllUsesWith(function);
885 // Remove original instruction.
886 ins->block()->discard(ins);
889 void ObjectMemoryView::visitLambda(MLambda* ins) {
890 if (ins->environmentChain() != obj_) {
891 return;
894 // In order to recover the lambda we need to recover the scope chain, as the
895 // lambda is holding it.
896 ins->setIncompleteObject();
899 void ObjectMemoryView::visitFunctionWithProto(MFunctionWithProto* ins) {
900 if (ins->environmentChain() != obj_) {
901 return;
904 ins->setIncompleteObject();
907 void ObjectMemoryView::visitPhi(MPhi* ins) {
908 // Skip phis on other objects.
909 if (!PhiOperandsEqualTo(ins, obj_)) {
910 return;
913 // Replace the phi by its object.
914 ins->replaceAllUsesWith(obj_);
916 // Remove original instruction.
917 ins->block()->discardPhi(ins);
920 void ObjectMemoryView::visitCompare(MCompare* ins) {
921 // Skip unrelated comparisons.
922 if (ins->lhs() != obj_ && ins->rhs() != obj_) {
923 return;
926 bool folded;
927 MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
929 auto* cst = MConstant::New(alloc_, BooleanValue(folded));
930 ins->block()->insertBefore(ins, cst);
932 // Replace the comparison with a constant.
933 ins->replaceAllUsesWith(cst);
935 // Remove original instruction.
936 ins->block()->discard(ins);
939 void ObjectMemoryView::visitConstantProto(MConstantProto* ins) {
940 if (ins->getReceiverObject() != obj_) {
941 return;
944 auto* cst = ins->protoObject();
945 ins->replaceAllUsesWith(cst);
946 ins->block()->discard(ins);
949 void ObjectMemoryView::visitIsObject(MIsObject* ins) {
950 // Skip unrelated tests.
951 if (ins->input() != obj_) {
952 return;
955 auto* cst = MConstant::New(alloc_, BooleanValue(true));
956 ins->block()->insertBefore(ins, cst);
958 // Replace the test with a constant.
959 ins->replaceAllUsesWith(cst);
961 // Remove original instruction.
962 ins->block()->discard(ins);
965 void ObjectMemoryView::visitAssertCanElidePostWriteBarrier(
966 MAssertCanElidePostWriteBarrier* ins) {
967 if (ins->object() != obj_) {
968 return;
971 ins->block()->discard(ins);
974 static bool IndexOf(MDefinition* ins, int32_t* res) {
975 MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement());
976 MDefinition* indexDef = ins->getOperand(1); // ins->index();
977 if (indexDef->isSpectreMaskIndex()) {
978 indexDef = indexDef->toSpectreMaskIndex()->index();
980 if (indexDef->isBoundsCheck()) {
981 indexDef = indexDef->toBoundsCheck()->index();
983 if (indexDef->isToNumberInt32()) {
984 indexDef = indexDef->toToNumberInt32()->getOperand(0);
986 MConstant* indexDefConst = indexDef->maybeConstantValue();
987 if (!indexDefConst || indexDefConst->type() != MIRType::Int32) {
988 return false;
990 *res = indexDefConst->toInt32();
991 return true;
994 static inline bool IsOptimizableArrayInstruction(MInstruction* ins) {
995 return ins->isNewArray() || ins->isNewArrayObject();
998 // We don't support storing holes when doing scalar replacement, so any
999 // optimizable MNewArrayObject instruction is guaranteed to be packed.
1000 static inline bool IsPackedArray(MInstruction* ins) {
1001 return ins->isNewArrayObject();
1004 // Returns False if the elements is not escaped and if it is optimizable by
1005 // ScalarReplacementOfArray.
1006 static bool IsElementEscaped(MDefinition* def, MInstruction* newArray,
1007 uint32_t arraySize) {
1008 MOZ_ASSERT(def->isElements());
1009 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
1011 JitSpewDef(JitSpew_Escape, "Check elements\n", def);
1012 JitSpewIndent spewIndent(JitSpew_Escape);
1014 for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
1015 // The MIRType::Elements cannot be captured in a resume point as
1016 // it does not represent a value allocation.
1017 MDefinition* access = (*i)->consumer()->toDefinition();
1019 switch (access->op()) {
1020 case MDefinition::Opcode::LoadElement: {
1021 MOZ_ASSERT(access->toLoadElement()->elements() == def);
1023 // If the index is not a constant then this index can alias
1024 // all others. We do not handle this case.
1025 int32_t index;
1026 if (!IndexOf(access, &index)) {
1027 JitSpewDef(JitSpew_Escape,
1028 "has a load element with a non-trivial index\n", access);
1029 return true;
1031 if (index < 0 || arraySize <= uint32_t(index)) {
1032 JitSpewDef(JitSpew_Escape,
1033 "has a load element with an out-of-bound index\n", access);
1034 return true;
1036 break;
1039 case MDefinition::Opcode::StoreElement: {
1040 MStoreElement* storeElem = access->toStoreElement();
1041 MOZ_ASSERT(storeElem->elements() == def);
1043 // StoreElement must bail out if it stores to a hole, in case
1044 // there is a setter on the prototype chain. If this StoreElement
1045 // might store to a hole, we can't scalar-replace it.
1046 if (storeElem->needsHoleCheck()) {
1047 JitSpewDef(JitSpew_Escape, "has a store element with a hole check\n",
1048 storeElem);
1049 return true;
1052 // If the index is not a constant then this index can alias
1053 // all others. We do not handle this case.
1054 int32_t index;
1055 if (!IndexOf(storeElem, &index)) {
1056 JitSpewDef(JitSpew_Escape,
1057 "has a store element with a non-trivial index\n",
1058 storeElem);
1059 return true;
1061 if (index < 0 || arraySize <= uint32_t(index)) {
1062 JitSpewDef(JitSpew_Escape,
1063 "has a store element with an out-of-bound index\n",
1064 storeElem);
1065 return true;
1068 // Dense element holes are written using MStoreHoleValueElement instead
1069 // of MStoreElement.
1070 MOZ_ASSERT(storeElem->value()->type() != MIRType::MagicHole);
1071 break;
1074 case MDefinition::Opcode::SetInitializedLength:
1075 MOZ_ASSERT(access->toSetInitializedLength()->elements() == def);
1076 break;
1078 case MDefinition::Opcode::InitializedLength:
1079 MOZ_ASSERT(access->toInitializedLength()->elements() == def);
1080 break;
1082 case MDefinition::Opcode::ArrayLength:
1083 MOZ_ASSERT(access->toArrayLength()->elements() == def);
1084 break;
1086 case MDefinition::Opcode::ApplyArray:
1087 MOZ_ASSERT(access->toApplyArray()->getElements() == def);
1088 if (!IsPackedArray(newArray)) {
1089 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n",
1090 access);
1091 return true;
1093 break;
1095 case MDefinition::Opcode::ConstructArray:
1096 MOZ_ASSERT(access->toConstructArray()->getElements() == def);
1097 if (!IsPackedArray(newArray)) {
1098 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n",
1099 access);
1100 return true;
1102 break;
1104 default:
1105 JitSpewDef(JitSpew_Escape, "is escaped by\n", access);
1106 return true;
1109 JitSpew(JitSpew_Escape, "Elements is not escaped");
1110 return false;
1113 // Returns False if the array is not escaped and if it is optimizable by
1114 // ScalarReplacementOfArray.
1116 // For the moment, this code is dumb as it only supports arrays which are not
1117 // changing length, with only access with known constants.
1118 static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) {
1119 MOZ_ASSERT(ins->type() == MIRType::Object);
1120 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
1122 JitSpewDef(JitSpew_Escape, "Check array\n", ins);
1123 JitSpewIndent spewIndent(JitSpew_Escape);
1125 const Shape* shape;
1126 uint32_t length;
1127 if (newArray->isNewArrayObject()) {
1128 length = newArray->toNewArrayObject()->length();
1129 shape = newArray->toNewArrayObject()->shape();
1130 } else {
1131 length = newArray->toNewArray()->length();
1132 JSObject* templateObject = newArray->toNewArray()->templateObject();
1133 if (!templateObject) {
1134 JitSpew(JitSpew_Escape, "No template object defined.");
1135 return true;
1137 shape = templateObject->shape();
1140 if (length >= 16) {
1141 JitSpew(JitSpew_Escape, "Array has too many elements");
1142 return true;
1145 // Check if the object is escaped. If the object is not the first argument
1146 // of either a known Store / Load, then we consider it as escaped. This is a
1147 // cheap and conservative escape analysis.
1148 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
1149 MNode* consumer = (*i)->consumer();
1150 if (!consumer->isDefinition()) {
1151 // Cannot optimize if it is observable from fun.arguments or others.
1152 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
1153 JitSpew(JitSpew_Escape, "Observable array cannot be recovered");
1154 return true;
1156 continue;
1159 MDefinition* def = consumer->toDefinition();
1160 switch (def->op()) {
1161 case MDefinition::Opcode::Elements: {
1162 MElements* elem = def->toElements();
1163 MOZ_ASSERT(elem->object() == ins);
1164 if (IsElementEscaped(elem, newArray, length)) {
1165 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem);
1166 return true;
1169 break;
1172 case MDefinition::Opcode::GuardShape: {
1173 MGuardShape* guard = def->toGuardShape();
1174 if (shape != guard->shape()) {
1175 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
1176 return true;
1178 if (IsArrayEscaped(guard, newArray)) {
1179 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1180 return true;
1183 break;
1186 case MDefinition::Opcode::GuardToClass: {
1187 MGuardToClass* guard = def->toGuardToClass();
1188 if (shape->getObjectClass() != guard->getClass()) {
1189 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
1190 return true;
1192 if (IsArrayEscaped(guard, newArray)) {
1193 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1194 return true;
1197 break;
1200 case MDefinition::Opcode::GuardArrayIsPacked: {
1201 auto* guard = def->toGuardArrayIsPacked();
1202 if (!IsPackedArray(newArray)) {
1203 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", def);
1204 return true;
1206 if (IsArrayEscaped(guard, newArray)) {
1207 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1208 return true;
1210 break;
1213 case MDefinition::Opcode::Unbox: {
1214 if (def->type() != MIRType::Object) {
1215 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
1216 return true;
1218 if (IsArrayEscaped(def->toInstruction(), newArray)) {
1219 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1220 return true;
1222 break;
1225 // This instruction is supported for |JSOp::OptimizeSpreadCall|.
1226 case MDefinition::Opcode::Compare: {
1227 bool canFold;
1228 if (!def->toCompare()->tryFold(&canFold)) {
1229 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
1230 return true;
1232 break;
1235 case MDefinition::Opcode::PostWriteBarrier:
1236 case MDefinition::Opcode::PostWriteElementBarrier:
1237 break;
1239 // This instruction is a no-op used to verify that scalar replacement
1240 // is working as expected in jit-test.
1241 case MDefinition::Opcode::AssertRecoveredOnBailout:
1242 break;
1244 default:
1245 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
1246 return true;
1250 JitSpew(JitSpew_Escape, "Array is not escaped");
1251 return false;
1254 // This class replaces every MStoreElement and MSetInitializedLength by an
1255 // MArrayState which emulates the content of the array. All MLoadElement,
1256 // MInitializedLength and MArrayLength are replaced by the corresponding value.
1258 // In order to restore the value of the array correctly in case of bailouts, we
1259 // replace all reference of the allocation by the MArrayState definition.
1260 class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
1261 public:
1262 using BlockState = MArrayState;
1263 static const char* phaseName;
1265 private:
1266 TempAllocator& alloc_;
1267 MConstant* undefinedVal_;
1268 MConstant* length_;
1269 MInstruction* arr_;
1270 MBasicBlock* startBlock_;
1271 BlockState* state_;
1273 // Used to improve the memory usage by sharing common modification.
1274 const MResumePoint* lastResumePoint_;
1276 bool oom_;
1278 public:
1279 ArrayMemoryView(TempAllocator& alloc, MInstruction* arr);
1281 MBasicBlock* startingBlock();
1282 bool initStartingState(BlockState** pState);
1284 void setEntryBlockState(BlockState* state);
1285 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
1286 BlockState** pSuccState);
1288 #ifdef DEBUG
1289 void assertSuccess();
1290 #else
1291 void assertSuccess() {}
1292 #endif
1294 bool oom() const { return oom_; }
1296 private:
1297 bool isArrayStateElements(MDefinition* elements);
1298 void discardInstruction(MInstruction* ins, MDefinition* elements);
1300 public:
1301 void visitResumePoint(MResumePoint* rp);
1302 void visitArrayState(MArrayState* ins);
1303 void visitStoreElement(MStoreElement* ins);
1304 void visitLoadElement(MLoadElement* ins);
1305 void visitSetInitializedLength(MSetInitializedLength* ins);
1306 void visitInitializedLength(MInitializedLength* ins);
1307 void visitArrayLength(MArrayLength* ins);
1308 void visitPostWriteBarrier(MPostWriteBarrier* ins);
1309 void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins);
1310 void visitGuardShape(MGuardShape* ins);
1311 void visitGuardToClass(MGuardToClass* ins);
1312 void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
1313 void visitUnbox(MUnbox* ins);
1314 void visitCompare(MCompare* ins);
1315 void visitApplyArray(MApplyArray* ins);
1316 void visitConstructArray(MConstructArray* ins);
1319 const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
1321 ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
1322 : alloc_(alloc),
1323 undefinedVal_(nullptr),
1324 length_(nullptr),
1325 arr_(arr),
1326 startBlock_(arr->block()),
1327 state_(nullptr),
1328 lastResumePoint_(nullptr),
1329 oom_(false) {
1330 // Annotate snapshots RValue such that we recover the store first.
1331 arr_->setIncompleteObject();
1333 // Annotate the instruction such that we do not replace it by a
1334 // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
1335 arr_->setImplicitlyUsedUnchecked();
1338 MBasicBlock* ArrayMemoryView::startingBlock() { return startBlock_; }
1340 bool ArrayMemoryView::initStartingState(BlockState** pState) {
1341 // Uninitialized elements have an "undefined" value.
1342 undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
1343 MConstant* initLength = MConstant::New(alloc_, Int32Value(0));
1344 arr_->block()->insertBefore(arr_, undefinedVal_);
1345 arr_->block()->insertBefore(arr_, initLength);
1347 // Create a new block state and insert at it at the location of the new array.
1348 BlockState* state = BlockState::New(alloc_, arr_, initLength);
1349 if (!state) {
1350 return false;
1353 startBlock_->insertAfter(arr_, state);
1355 // Initialize the elements of the array state.
1356 state->initFromTemplateObject(alloc_, undefinedVal_);
1358 // Hold out of resume point until it is visited.
1359 state->setInWorklist();
1361 *pState = state;
1362 return true;
1365 void ArrayMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
1367 bool ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
1368 MBasicBlock* succ,
1369 BlockState** pSuccState) {
1370 BlockState* succState = *pSuccState;
1372 // When a block has no state yet, create an empty one for the
1373 // successor.
1374 if (!succState) {
1375 // If the successor is not dominated then the array cannot flow
1376 // in this basic block without a Phi. We know that no Phi exist
1377 // in non-dominated successors as the conservative escaped
1378 // analysis fails otherwise. Such condition can succeed if the
1379 // successor is a join at the end of a if-block and the array
1380 // only exists within the branch.
1381 if (!startBlock_->dominates(succ)) {
1382 return true;
1385 // If there is only one predecessor, carry over the last state of the
1386 // block to the successor. As the block state is immutable, if the
1387 // current block has multiple successors, they will share the same entry
1388 // state.
1389 if (succ->numPredecessors() <= 1 || !state_->numElements()) {
1390 *pSuccState = state_;
1391 return true;
1394 // If we have multiple predecessors, then we allocate one Phi node for
1395 // each predecessor, and create a new block state which only has phi
1396 // nodes. These would later be removed by the removal of redundant phi
1397 // nodes.
1398 succState = BlockState::Copy(alloc_, state_);
1399 if (!succState) {
1400 return false;
1403 size_t numPreds = succ->numPredecessors();
1404 for (size_t index = 0; index < state_->numElements(); index++) {
1405 MPhi* phi = MPhi::New(alloc_.fallible());
1406 if (!phi || !phi->reserveLength(numPreds)) {
1407 return false;
1410 // Fill the input of the successors Phi with undefined
1411 // values, and each block later fills the Phi inputs.
1412 for (size_t p = 0; p < numPreds; p++) {
1413 phi->addInput(undefinedVal_);
1416 // Add Phi in the list of Phis of the basic block.
1417 succ->addPhi(phi);
1418 succState->setElement(index, phi);
1421 // Insert the newly created block state instruction at the beginning
1422 // of the successor block, after all the phi nodes. Note that it
1423 // would be captured by the entry resume point of the successor
1424 // block.
1425 succ->insertBefore(succ->safeInsertTop(), succState);
1426 *pSuccState = succState;
1429 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
1430 if (succ->numPredecessors() > 1 && succState->numElements() &&
1431 succ != startBlock_) {
1432 // We need to re-compute successorWithPhis as the previous EliminatePhis
1433 // phase might have removed all the Phis from the successor block.
1434 size_t currIndex;
1435 MOZ_ASSERT(!succ->phisEmpty());
1436 if (curr->successorWithPhis()) {
1437 MOZ_ASSERT(curr->successorWithPhis() == succ);
1438 currIndex = curr->positionInPhiSuccessor();
1439 } else {
1440 currIndex = succ->indexForPredecessor(curr);
1441 curr->setSuccessorWithPhis(succ, currIndex);
1443 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
1445 // Copy the current element states to the index of current block in all
1446 // the Phi created during the first visit of the successor.
1447 for (size_t index = 0; index < state_->numElements(); index++) {
1448 MPhi* phi = succState->getElement(index)->toPhi();
1449 phi->replaceOperand(currIndex, state_->getElement(index));
1453 return true;
1456 #ifdef DEBUG
1457 void ArrayMemoryView::assertSuccess() { MOZ_ASSERT(!arr_->hasLiveDefUses()); }
1458 #endif
1460 void ArrayMemoryView::visitResumePoint(MResumePoint* rp) {
1461 // As long as the MArrayState is not yet seen next to the allocation, we do
1462 // not patch the resume point to recover the side effects.
1463 if (!state_->isInWorklist()) {
1464 rp->addStore(alloc_, state_, lastResumePoint_);
1465 lastResumePoint_ = rp;
1469 void ArrayMemoryView::visitArrayState(MArrayState* ins) {
1470 if (ins->isInWorklist()) {
1471 ins->setNotInWorklist();
1475 bool ArrayMemoryView::isArrayStateElements(MDefinition* elements) {
1476 return elements->isElements() && elements->toElements()->object() == arr_;
1479 void ArrayMemoryView::discardInstruction(MInstruction* ins,
1480 MDefinition* elements) {
1481 MOZ_ASSERT(elements->isElements());
1482 ins->block()->discard(ins);
1483 if (!elements->hasLiveDefUses()) {
1484 elements->block()->discard(elements->toInstruction());
1488 void ArrayMemoryView::visitStoreElement(MStoreElement* ins) {
1489 // Skip other array objects.
1490 MDefinition* elements = ins->elements();
1491 if (!isArrayStateElements(elements)) {
1492 return;
1495 // Register value of the setter in the state.
1496 int32_t index;
1497 MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1498 state_ = BlockState::Copy(alloc_, state_);
1499 if (!state_) {
1500 oom_ = true;
1501 return;
1504 state_->setElement(index, ins->value());
1505 ins->block()->insertBefore(ins, state_);
1507 // Remove original instruction.
1508 discardInstruction(ins, elements);
1511 void ArrayMemoryView::visitLoadElement(MLoadElement* ins) {
1512 // Skip other array objects.
1513 MDefinition* elements = ins->elements();
1514 if (!isArrayStateElements(elements)) {
1515 return;
1518 // Replace by the value contained at the index.
1519 int32_t index;
1520 MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1522 // The only way to store a hole value in a new array is with
1523 // StoreHoleValueElement, which IsElementEscaped does not allow.
1524 // Therefore, we do not have to do a hole check.
1525 MDefinition* element = state_->getElement(index);
1526 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1528 ins->replaceAllUsesWith(element);
1530 // Remove original instruction.
1531 discardInstruction(ins, elements);
1534 void ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins) {
1535 // Skip other array objects.
1536 MDefinition* elements = ins->elements();
1537 if (!isArrayStateElements(elements)) {
1538 return;
1541 // Replace by the new initialized length. Note that the argument of
1542 // MSetInitializedLength is the last index and not the initialized length.
1543 // To obtain the length, we need to add 1 to it, and thus we need to create
1544 // a new constant that we register in the ArrayState.
1545 state_ = BlockState::Copy(alloc_, state_);
1546 if (!state_) {
1547 oom_ = true;
1548 return;
1551 int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1;
1552 MConstant* initLength = MConstant::New(alloc_, Int32Value(initLengthValue));
1553 ins->block()->insertBefore(ins, initLength);
1554 ins->block()->insertBefore(ins, state_);
1555 state_->setInitializedLength(initLength);
1557 // Remove original instruction.
1558 discardInstruction(ins, elements);
1561 void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) {
1562 // Skip other array objects.
1563 MDefinition* elements = ins->elements();
1564 if (!isArrayStateElements(elements)) {
1565 return;
1568 // Replace by the value of the length.
1569 ins->replaceAllUsesWith(state_->initializedLength());
1571 // Remove original instruction.
1572 discardInstruction(ins, elements);
1575 void ArrayMemoryView::visitArrayLength(MArrayLength* ins) {
1576 // Skip other array objects.
1577 MDefinition* elements = ins->elements();
1578 if (!isArrayStateElements(elements)) {
1579 return;
1582 // Replace by the value of the length.
1583 if (!length_) {
1584 length_ = MConstant::New(alloc_, Int32Value(state_->numElements()));
1585 arr_->block()->insertBefore(arr_, length_);
1587 ins->replaceAllUsesWith(length_);
1589 // Remove original instruction.
1590 discardInstruction(ins, elements);
1593 void ArrayMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
1594 // Skip barriers on other objects.
1595 if (ins->object() != arr_) {
1596 return;
1599 // Remove original instruction.
1600 ins->block()->discard(ins);
1603 void ArrayMemoryView::visitPostWriteElementBarrier(
1604 MPostWriteElementBarrier* ins) {
1605 // Skip barriers on other objects.
1606 if (ins->object() != arr_) {
1607 return;
1610 // Remove original instruction.
1611 ins->block()->discard(ins);
1614 void ArrayMemoryView::visitGuardShape(MGuardShape* ins) {
1615 // Skip guards on other objects.
1616 if (ins->object() != arr_) {
1617 return;
1620 // Replace the guard by its object.
1621 ins->replaceAllUsesWith(arr_);
1623 // Remove original instruction.
1624 ins->block()->discard(ins);
1627 void ArrayMemoryView::visitGuardToClass(MGuardToClass* ins) {
1628 // Skip guards on other objects.
1629 if (ins->object() != arr_) {
1630 return;
1633 // Replace the guard by its object.
1634 ins->replaceAllUsesWith(arr_);
1636 // Remove original instruction.
1637 ins->block()->discard(ins);
1640 void ArrayMemoryView::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
1641 // Skip guards on other objects.
1642 if (ins->array() != arr_) {
1643 return;
1646 // Replace the guard by its object.
1647 ins->replaceAllUsesWith(arr_);
1649 // Remove original instruction.
1650 ins->block()->discard(ins);
1653 void ArrayMemoryView::visitUnbox(MUnbox* ins) {
1654 // Skip unrelated unboxes.
1655 if (ins->getOperand(0) != arr_) {
1656 return;
1658 MOZ_ASSERT(ins->type() == MIRType::Object);
1660 // Replace the unbox with the array object.
1661 ins->replaceAllUsesWith(arr_);
1663 // Remove the unbox.
1664 ins->block()->discard(ins);
1667 void ArrayMemoryView::visitCompare(MCompare* ins) {
1668 // Skip unrelated comparisons.
1669 if (ins->lhs() != arr_ && ins->rhs() != arr_) {
1670 return;
1673 bool folded;
1674 MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
1676 auto* cst = MConstant::New(alloc_, BooleanValue(folded));
1677 ins->block()->insertBefore(ins, cst);
1679 // Replace the comparison with a constant.
1680 ins->replaceAllUsesWith(cst);
1682 // Remove original instruction.
1683 ins->block()->discard(ins);
1686 void ArrayMemoryView::visitApplyArray(MApplyArray* ins) {
1687 // Skip other array objects.
1688 MDefinition* elements = ins->getElements();
1689 if (!isArrayStateElements(elements)) {
1690 return;
1693 uint32_t numElements = state_->numElements();
1695 CallInfo callInfo(alloc_, /*constructing=*/false, ins->ignoresReturnValue());
1696 if (!callInfo.initForApplyArray(ins->getFunction(), ins->getThis(),
1697 numElements)) {
1698 oom_ = true;
1699 return;
1702 for (uint32_t i = 0; i < numElements; i++) {
1703 auto* element = state_->getElement(i);
1704 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1706 callInfo.initArg(i, element);
1709 auto addUndefined = [this]() { return undefinedVal_; };
1711 bool needsThisCheck = false;
1712 bool isDOMCall = false;
1713 auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck,
1714 ins->getSingleTarget(), isDOMCall);
1715 if (!call) {
1716 oom_ = true;
1717 return;
1719 if (!ins->maybeCrossRealm()) {
1720 call->setNotCrossRealm();
1723 ins->block()->insertBefore(ins, call);
1724 ins->replaceAllUsesWith(call);
1726 call->stealResumePoint(ins);
1728 // Remove original instruction.
1729 discardInstruction(ins, elements);
1732 void ArrayMemoryView::visitConstructArray(MConstructArray* ins) {
1733 // Skip other array objects.
1734 MDefinition* elements = ins->getElements();
1735 if (!isArrayStateElements(elements)) {
1736 return;
1739 uint32_t numElements = state_->numElements();
1741 CallInfo callInfo(alloc_, /*constructing=*/true, ins->ignoresReturnValue());
1742 if (!callInfo.initForConstructArray(ins->getFunction(), ins->getThis(),
1743 ins->getNewTarget(), numElements)) {
1744 oom_ = true;
1745 return;
1748 for (uint32_t i = 0; i < numElements; i++) {
1749 auto* element = state_->getElement(i);
1750 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1752 callInfo.initArg(i, element);
1755 auto addUndefined = [this]() { return undefinedVal_; };
1757 bool needsThisCheck = ins->needsThisCheck();
1758 bool isDOMCall = false;
1759 auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck,
1760 ins->getSingleTarget(), isDOMCall);
1761 if (!call) {
1762 oom_ = true;
1763 return;
1765 if (!ins->maybeCrossRealm()) {
1766 call->setNotCrossRealm();
1769 ins->block()->insertBefore(ins, call);
1770 ins->replaceAllUsesWith(call);
1772 call->stealResumePoint(ins);
1774 // Remove original instruction.
1775 discardInstruction(ins, elements);
1778 static inline bool IsOptimizableArgumentsInstruction(MInstruction* ins) {
1779 return ins->isCreateArgumentsObject() ||
1780 ins->isCreateInlinedArgumentsObject();
1783 class ArgumentsReplacer : public MDefinitionVisitorDefaultNoop {
1784 private:
1785 MIRGenerator* mir_;
1786 MIRGraph& graph_;
1787 MInstruction* args_;
1789 bool oom_ = false;
1791 TempAllocator& alloc() { return graph_.alloc(); }
1793 bool isInlinedArguments() const {
1794 return args_->isCreateInlinedArgumentsObject();
1797 MNewArrayObject* inlineArgsArray(MInstruction* ins, Shape* shape,
1798 uint32_t begin, uint32_t count);
1800 void visitGuardToClass(MGuardToClass* ins);
1801 void visitGuardProto(MGuardProto* ins);
1802 void visitGuardArgumentsObjectFlags(MGuardArgumentsObjectFlags* ins);
1803 void visitUnbox(MUnbox* ins);
1804 void visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins);
1805 void visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins);
1806 void visitLoadArgumentsObjectArgHole(MLoadArgumentsObjectArgHole* ins);
1807 void visitInArgumentsObjectArg(MInArgumentsObjectArg* ins);
1808 void visitArgumentsObjectLength(MArgumentsObjectLength* ins);
1809 void visitApplyArgsObj(MApplyArgsObj* ins);
1810 void visitArrayFromArgumentsObject(MArrayFromArgumentsObject* ins);
1811 void visitArgumentsSlice(MArgumentsSlice* ins);
1812 void visitLoadFixedSlot(MLoadFixedSlot* ins);
1814 bool oom() const { return oom_; }
1816 public:
1817 ArgumentsReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* args)
1818 : mir_(mir), graph_(graph), args_(args) {
1819 MOZ_ASSERT(IsOptimizableArgumentsInstruction(args_));
1822 bool escapes(MInstruction* ins, bool guardedForMapped = false);
1823 bool run();
1824 void assertSuccess();
1827 // Returns false if the arguments object does not escape.
1828 bool ArgumentsReplacer::escapes(MInstruction* ins, bool guardedForMapped) {
1829 MOZ_ASSERT(ins->type() == MIRType::Object);
1831 JitSpewDef(JitSpew_Escape, "Check arguments object\n", ins);
1832 JitSpewIndent spewIndent(JitSpew_Escape);
1834 // We can replace inlined arguments in scripts with OSR entries, but
1835 // the outermost arguments object has already been allocated before
1836 // we enter via OSR and can't be replaced.
1837 if (ins->isCreateArgumentsObject() && graph_.osrBlock()) {
1838 JitSpew(JitSpew_Escape, "Can't replace outermost OSR arguments");
1839 return true;
1842 // Check all uses to see whether they can be supported without
1843 // allocating an ArgumentsObject.
1844 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
1845 MNode* consumer = (*i)->consumer();
1847 // If a resume point can observe this instruction, we can only optimize
1848 // if it is recoverable.
1849 if (consumer->isResumePoint()) {
1850 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
1851 JitSpew(JitSpew_Escape, "Observable args object cannot be recovered");
1852 return true;
1854 continue;
1857 MDefinition* def = consumer->toDefinition();
1858 switch (def->op()) {
1859 case MDefinition::Opcode::GuardToClass: {
1860 MGuardToClass* guard = def->toGuardToClass();
1861 if (!guard->isArgumentsObjectClass()) {
1862 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
1863 return true;
1865 bool isMapped = guard->getClass() == &MappedArgumentsObject::class_;
1866 if (escapes(guard, isMapped)) {
1867 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1868 return true;
1870 break;
1873 case MDefinition::Opcode::GuardProto: {
1874 if (escapes(def->toInstruction(), guardedForMapped)) {
1875 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1876 return true;
1878 break;
1881 case MDefinition::Opcode::GuardArgumentsObjectFlags: {
1882 if (escapes(def->toInstruction(), guardedForMapped)) {
1883 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1884 return true;
1886 break;
1889 case MDefinition::Opcode::Unbox: {
1890 if (def->type() != MIRType::Object) {
1891 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
1892 return true;
1894 if (escapes(def->toInstruction())) {
1895 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1896 return true;
1898 break;
1901 case MDefinition::Opcode::LoadFixedSlot: {
1902 MLoadFixedSlot* load = def->toLoadFixedSlot();
1904 // We can replace arguments.callee.
1905 if (load->slot() == ArgumentsObject::CALLEE_SLOT) {
1906 MOZ_ASSERT(guardedForMapped);
1907 continue;
1909 JitSpew(JitSpew_Escape, "is escaped by unsupported LoadFixedSlot\n");
1910 return true;
1913 case MDefinition::Opcode::ApplyArgsObj: {
1914 if (ins == def->toApplyArgsObj()->getThis()) {
1915 JitSpew(JitSpew_Escape, "is escaped as |this| arg of ApplyArgsObj\n");
1916 return true;
1918 MOZ_ASSERT(ins == def->toApplyArgsObj()->getArgsObj());
1919 break;
1922 // This is a replaceable consumer.
1923 case MDefinition::Opcode::ArgumentsObjectLength:
1924 case MDefinition::Opcode::GetArgumentsObjectArg:
1925 case MDefinition::Opcode::LoadArgumentsObjectArg:
1926 case MDefinition::Opcode::LoadArgumentsObjectArgHole:
1927 case MDefinition::Opcode::InArgumentsObjectArg:
1928 case MDefinition::Opcode::ArrayFromArgumentsObject:
1929 case MDefinition::Opcode::ArgumentsSlice:
1930 break;
1932 // This instruction is a no-op used to test that scalar replacement
1933 // is working as expected.
1934 case MDefinition::Opcode::AssertRecoveredOnBailout:
1935 break;
1937 default:
1938 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
1939 return true;
1943 JitSpew(JitSpew_Escape, "ArgumentsObject is not escaped");
1944 return false;
1947 // Replacing the arguments object is simpler than replacing an object
1948 // or array, because the arguments object does not change state.
1949 bool ArgumentsReplacer::run() {
1950 MBasicBlock* startBlock = args_->block();
1952 // Iterate over each basic block.
1953 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
1954 block != graph_.rpoEnd(); block++) {
1955 if (mir_->shouldCancel("Scalar replacement of Arguments Object")) {
1956 return false;
1959 // Iterates over phis and instructions.
1960 // We do not have to visit resume points. Any resume points that capture
1961 // the argument object will be handled by the Sink pass.
1962 for (MDefinitionIterator iter(*block); iter;) {
1963 // Increment the iterator before visiting the instruction, as the
1964 // visit function might discard itself from the basic block.
1965 MDefinition* def = *iter++;
1966 switch (def->op()) {
1967 #define MIR_OP(op) \
1968 case MDefinition::Opcode::op: \
1969 visit##op(def->to##op()); \
1970 break;
1971 MIR_OPCODE_LIST(MIR_OP)
1972 #undef MIR_OP
1974 if (!graph_.alloc().ensureBallast()) {
1975 return false;
1977 if (oom()) {
1978 return false;
1983 assertSuccess();
1984 return true;
1987 void ArgumentsReplacer::assertSuccess() {
1988 MOZ_ASSERT(args_->canRecoverOnBailout());
1989 MOZ_ASSERT(!args_->hasLiveDefUses());
1992 void ArgumentsReplacer::visitGuardToClass(MGuardToClass* ins) {
1993 // Skip guards on other objects.
1994 if (ins->object() != args_) {
1995 return;
1997 MOZ_ASSERT(ins->isArgumentsObjectClass());
1999 // Replace the guard with the args object.
2000 ins->replaceAllUsesWith(args_);
2002 // Remove the guard.
2003 ins->block()->discard(ins);
2006 void ArgumentsReplacer::visitGuardProto(MGuardProto* ins) {
2007 // Skip guards on other objects.
2008 if (ins->object() != args_) {
2009 return;
2012 // The prototype can only be changed through explicit operations, for example
2013 // by calling |Reflect.setPrototype|. We have already determined that the args
2014 // object doesn't escape, so its prototype can't be mutated.
2016 // Replace the guard with the args object.
2017 ins->replaceAllUsesWith(args_);
2019 // Remove the guard.
2020 ins->block()->discard(ins);
2023 void ArgumentsReplacer::visitGuardArgumentsObjectFlags(
2024 MGuardArgumentsObjectFlags* ins) {
2025 // Skip other arguments objects.
2026 if (ins->argsObject() != args_) {
2027 return;
2030 #ifdef DEBUG
2031 // Each *_OVERRIDDEN_BIT can only be set by setting or deleting a
2032 // property of the args object. We have already determined that the
2033 // args object doesn't escape, so its properties can't be mutated.
2035 // FORWARDED_ARGUMENTS_BIT is set if any mapped argument is closed
2036 // over, which is an immutable property of the script. Because we
2037 // are replacing the args object for a known script, we can check
2038 // the flag once, which is done when we first attach the CacheIR,
2039 // and rely on it. (Note that this wouldn't be true if we didn't
2040 // know the origin of args_, because it could be passed in from
2041 // another function.)
2042 uint32_t supportedBits = ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
2043 ArgumentsObject::ITERATOR_OVERRIDDEN_BIT |
2044 ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
2045 ArgumentsObject::CALLEE_OVERRIDDEN_BIT |
2046 ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
2048 MOZ_ASSERT((ins->flags() & ~supportedBits) == 0);
2049 MOZ_ASSERT_IF(ins->flags() & ArgumentsObject::FORWARDED_ARGUMENTS_BIT,
2050 !args_->block()->info().anyFormalIsForwarded());
2051 #endif
2053 // Replace the guard with the args object.
2054 ins->replaceAllUsesWith(args_);
2056 // Remove the guard.
2057 ins->block()->discard(ins);
2060 void ArgumentsReplacer::visitUnbox(MUnbox* ins) {
2061 // Skip unrelated unboxes.
2062 if (ins->getOperand(0) != args_) {
2063 return;
2065 MOZ_ASSERT(ins->type() == MIRType::Object);
2067 // Replace the unbox with the args object.
2068 ins->replaceAllUsesWith(args_);
2070 // Remove the unbox.
2071 ins->block()->discard(ins);
2074 void ArgumentsReplacer::visitGetArgumentsObjectArg(
2075 MGetArgumentsObjectArg* ins) {
2076 // Skip other arguments objects.
2077 if (ins->argsObject() != args_) {
2078 return;
2081 // We don't support setting arguments in ArgumentsReplacer::escapes,
2082 // so we can load the initial value of the argument without worrying
2083 // about it being stale.
2084 MDefinition* getArg;
2085 if (isInlinedArguments()) {
2086 // Inlined frames have direct access to the actual arguments.
2087 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2088 if (ins->argno() < actualArgs->numActuals()) {
2089 getArg = actualArgs->getArg(ins->argno());
2090 } else {
2091 // Omitted arguments are not mapped to the arguments object, and
2092 // will always be undefined.
2093 auto* undef = MConstant::New(alloc(), UndefinedValue());
2094 ins->block()->insertBefore(ins, undef);
2095 getArg = undef;
2097 } else {
2098 // Load the argument from the frame.
2099 auto* index = MConstant::New(alloc(), Int32Value(ins->argno()));
2100 ins->block()->insertBefore(ins, index);
2102 auto* loadArg = MGetFrameArgument::New(alloc(), index);
2103 ins->block()->insertBefore(ins, loadArg);
2104 getArg = loadArg;
2106 ins->replaceAllUsesWith(getArg);
2108 // Remove original instruction.
2109 ins->block()->discard(ins);
2112 void ArgumentsReplacer::visitLoadArgumentsObjectArg(
2113 MLoadArgumentsObjectArg* ins) {
2114 // Skip other arguments objects.
2115 if (ins->argsObject() != args_) {
2116 return;
2119 MDefinition* index = ins->index();
2121 MInstruction* loadArg;
2122 if (isInlinedArguments()) {
2123 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2125 // Insert bounds check.
2126 auto* length =
2127 MConstant::New(alloc(), Int32Value(actualArgs->numActuals()));
2128 ins->block()->insertBefore(ins, length);
2130 MInstruction* check = MBoundsCheck::New(alloc(), index, length);
2131 check->setBailoutKind(ins->bailoutKind());
2132 ins->block()->insertBefore(ins, check);
2134 if (mir_->outerInfo().hadBoundsCheckBailout()) {
2135 check->setNotMovable();
2138 loadArg = MGetInlinedArgument::New(alloc(), check, actualArgs);
2139 if (!loadArg) {
2140 oom_ = true;
2141 return;
2143 } else {
2144 // Insert bounds check.
2145 auto* length = MArgumentsLength::New(alloc());
2146 ins->block()->insertBefore(ins, length);
2148 MInstruction* check = MBoundsCheck::New(alloc(), index, length);
2149 check->setBailoutKind(ins->bailoutKind());
2150 ins->block()->insertBefore(ins, check);
2152 if (mir_->outerInfo().hadBoundsCheckBailout()) {
2153 check->setNotMovable();
2156 if (JitOptions.spectreIndexMasking) {
2157 check = MSpectreMaskIndex::New(alloc(), check, length);
2158 ins->block()->insertBefore(ins, check);
2161 loadArg = MGetFrameArgument::New(alloc(), check);
2163 ins->block()->insertBefore(ins, loadArg);
2164 ins->replaceAllUsesWith(loadArg);
2166 // Remove original instruction.
2167 ins->block()->discard(ins);
2170 void ArgumentsReplacer::visitLoadArgumentsObjectArgHole(
2171 MLoadArgumentsObjectArgHole* ins) {
2172 // Skip other arguments objects.
2173 if (ins->argsObject() != args_) {
2174 return;
2177 MDefinition* index = ins->index();
2179 MInstruction* loadArg;
2180 if (isInlinedArguments()) {
2181 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2183 loadArg = MGetInlinedArgumentHole::New(alloc(), index, actualArgs);
2184 if (!loadArg) {
2185 oom_ = true;
2186 return;
2188 } else {
2189 auto* length = MArgumentsLength::New(alloc());
2190 ins->block()->insertBefore(ins, length);
2192 loadArg = MGetFrameArgumentHole::New(alloc(), index, length);
2194 loadArg->setBailoutKind(ins->bailoutKind());
2195 ins->block()->insertBefore(ins, loadArg);
2196 ins->replaceAllUsesWith(loadArg);
2198 // Remove original instruction.
2199 ins->block()->discard(ins);
2202 void ArgumentsReplacer::visitInArgumentsObjectArg(MInArgumentsObjectArg* ins) {
2203 // Skip other arguments objects.
2204 if (ins->argsObject() != args_) {
2205 return;
2208 MDefinition* index = ins->index();
2210 // Ensure the index is non-negative.
2211 auto* guardedIndex = MGuardInt32IsNonNegative::New(alloc(), index);
2212 guardedIndex->setBailoutKind(ins->bailoutKind());
2213 ins->block()->insertBefore(ins, guardedIndex);
2215 MInstruction* length;
2216 if (isInlinedArguments()) {
2217 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
2218 length = MConstant::New(alloc(), Int32Value(argc));
2219 } else {
2220 length = MArgumentsLength::New(alloc());
2222 ins->block()->insertBefore(ins, length);
2224 auto* compare = MCompare::New(alloc(), guardedIndex, length, JSOp::Lt,
2225 MCompare::Compare_Int32);
2226 ins->block()->insertBefore(ins, compare);
2227 ins->replaceAllUsesWith(compare);
2229 // Remove original instruction.
2230 ins->block()->discard(ins);
2233 void ArgumentsReplacer::visitArgumentsObjectLength(
2234 MArgumentsObjectLength* ins) {
2235 // Skip other arguments objects.
2236 if (ins->argsObject() != args_) {
2237 return;
2240 MInstruction* length;
2241 if (isInlinedArguments()) {
2242 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
2243 length = MConstant::New(alloc(), Int32Value(argc));
2244 } else {
2245 length = MArgumentsLength::New(alloc());
2247 ins->block()->insertBefore(ins, length);
2248 ins->replaceAllUsesWith(length);
2250 // Remove original instruction.
2251 ins->block()->discard(ins);
2254 void ArgumentsReplacer::visitApplyArgsObj(MApplyArgsObj* ins) {
2255 // Skip other arguments objects.
2256 if (ins->getArgsObj() != args_) {
2257 return;
2260 MInstruction* newIns;
2261 if (isInlinedArguments()) {
2262 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2263 CallInfo callInfo(alloc(), /*constructing=*/false,
2264 ins->ignoresReturnValue());
2266 callInfo.initForApplyInlinedArgs(ins->getFunction(), ins->getThis(),
2267 actualArgs->numActuals());
2268 for (uint32_t i = 0; i < actualArgs->numActuals(); i++) {
2269 callInfo.initArg(i, actualArgs->getArg(i));
2272 auto addUndefined = [this, &ins]() -> MConstant* {
2273 MConstant* undef = MConstant::New(alloc(), UndefinedValue());
2274 ins->block()->insertBefore(ins, undef);
2275 return undef;
2278 bool needsThisCheck = false;
2279 bool isDOMCall = false;
2280 auto* call = MakeCall(alloc(), addUndefined, callInfo, needsThisCheck,
2281 ins->getSingleTarget(), isDOMCall);
2282 if (!call) {
2283 oom_ = true;
2284 return;
2286 if (!ins->maybeCrossRealm()) {
2287 call->setNotCrossRealm();
2289 newIns = call;
2290 } else {
2291 auto* numArgs = MArgumentsLength::New(alloc());
2292 ins->block()->insertBefore(ins, numArgs);
2294 // TODO: Should we rename MApplyArgs?
2295 auto* apply = MApplyArgs::New(alloc(), ins->getSingleTarget(),
2296 ins->getFunction(), numArgs, ins->getThis());
2297 apply->setBailoutKind(ins->bailoutKind());
2298 if (!ins->maybeCrossRealm()) {
2299 apply->setNotCrossRealm();
2301 if (ins->ignoresReturnValue()) {
2302 apply->setIgnoresReturnValue();
2304 newIns = apply;
2307 ins->block()->insertBefore(ins, newIns);
2308 ins->replaceAllUsesWith(newIns);
2310 newIns->stealResumePoint(ins);
2311 ins->block()->discard(ins);
2314 MNewArrayObject* ArgumentsReplacer::inlineArgsArray(MInstruction* ins,
2315 Shape* shape,
2316 uint32_t begin,
2317 uint32_t count) {
2318 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2320 // Contrary to |WarpBuilder::build_Rest()|, we can always create
2321 // MNewArrayObject, because we're guaranteed to have a shape and all
2322 // arguments can be stored into fixed elements.
2323 static_assert(
2324 gc::CanUseFixedElementsForArray(ArgumentsObject::MaxInlinedArgs));
2326 gc::Heap heap = gc::Heap::Default;
2328 // Allocate an array of the correct size.
2329 auto* shapeConstant = MConstant::NewShape(alloc(), shape);
2330 ins->block()->insertBefore(ins, shapeConstant);
2332 auto* newArray = MNewArrayObject::New(alloc(), shapeConstant, count, heap);
2333 ins->block()->insertBefore(ins, newArray);
2335 if (count) {
2336 auto* elements = MElements::New(alloc(), newArray);
2337 ins->block()->insertBefore(ins, elements);
2339 MConstant* index = nullptr;
2340 for (uint32_t i = 0; i < count; i++) {
2341 index = MConstant::New(alloc(), Int32Value(i));
2342 ins->block()->insertBefore(ins, index);
2344 MDefinition* arg = actualArgs->getArg(begin + i);
2345 auto* store = MStoreElement::NewUnbarriered(alloc(), elements, index, arg,
2346 /* needsHoleCheck = */ false);
2347 ins->block()->insertBefore(ins, store);
2349 auto* barrier = MPostWriteBarrier::New(alloc(), newArray, arg);
2350 ins->block()->insertBefore(ins, barrier);
2353 auto* initLength = MSetInitializedLength::New(alloc(), elements, index);
2354 ins->block()->insertBefore(ins, initLength);
2357 return newArray;
2360 void ArgumentsReplacer::visitArrayFromArgumentsObject(
2361 MArrayFromArgumentsObject* ins) {
2362 // Skip other arguments objects.
2363 if (ins->argsObject() != args_) {
2364 return;
2367 // We can only replace `arguments` because we've verified that the `arguments`
2368 // object hasn't been modified in any way. This implies that the arguments
2369 // stored in the stack frame haven't been changed either.
2371 // The idea to replace `arguments` in spread calls `f(...arguments)` is now as
2372 // follows:
2373 // We replace |MArrayFromArgumentsObject| with the identical instructions we
2374 // emit when building a rest-array object, cf. |WarpBuilder::build_Rest()|. In
2375 // a next step, scalar replacement will then replace these new instructions
2376 // themselves.
2378 Shape* shape = ins->shape();
2379 MOZ_ASSERT(shape);
2381 MDefinition* replacement;
2382 if (isInlinedArguments()) {
2383 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2384 uint32_t numActuals = actualArgs->numActuals();
2385 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
2387 replacement = inlineArgsArray(ins, shape, 0, numActuals);
2388 } else {
2389 // We can use |MRest| to read all arguments, because we've guaranteed that
2390 // the arguments stored in the stack frame haven't changed; see the comment
2391 // at the start of this method.
2393 auto* numActuals = MArgumentsLength::New(alloc());
2394 ins->block()->insertBefore(ins, numActuals);
2396 // Set |numFormals| to zero to read all arguments, including any formals.
2397 uint32_t numFormals = 0;
2399 auto* rest = MRest::New(alloc(), numActuals, numFormals, shape);
2400 ins->block()->insertBefore(ins, rest);
2402 replacement = rest;
2405 ins->replaceAllUsesWith(replacement);
2407 // Remove original instruction.
2408 ins->block()->discard(ins);
2411 static uint32_t NormalizeSlice(MDefinition* def, uint32_t length) {
2412 int32_t value = def->toConstant()->toInt32();
2413 if (value < 0) {
2414 return std::max(int32_t(uint32_t(value) + length), 0);
2416 return std::min(uint32_t(value), length);
2419 void ArgumentsReplacer::visitArgumentsSlice(MArgumentsSlice* ins) {
2420 // Skip other arguments objects.
2421 if (ins->object() != args_) {
2422 return;
2425 // Optimise the common pattern |Array.prototype.slice.call(arguments, begin)|,
2426 // where |begin| is a non-negative, constant int32.
2428 // An absent end-index is replaced by |arguments.length|, so we try to match
2429 // |Array.prototype.slice.call(arguments, begin, arguments.length)|.
2430 if (isInlinedArguments()) {
2431 // When this is an inlined arguments, |arguments.length| has been replaced
2432 // by a constant.
2433 if (ins->begin()->isConstant() && ins->end()->isConstant()) {
2434 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2435 uint32_t numActuals = actualArgs->numActuals();
2436 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
2438 uint32_t begin = NormalizeSlice(ins->begin(), numActuals);
2439 uint32_t end = NormalizeSlice(ins->end(), numActuals);
2440 uint32_t count = end > begin ? end - begin : 0;
2441 MOZ_ASSERT(count <= numActuals);
2443 Shape* shape = ins->templateObj()->shape();
2444 auto* newArray = inlineArgsArray(ins, shape, begin, count);
2446 ins->replaceAllUsesWith(newArray);
2448 // Remove original instruction.
2449 ins->block()->discard(ins);
2450 return;
2452 } else {
2453 // Otherwise |arguments.length| is emitted as MArgumentsLength.
2454 if (ins->begin()->isConstant() && ins->end()->isArgumentsLength()) {
2455 int32_t begin = ins->begin()->toConstant()->toInt32();
2456 if (begin >= 0) {
2457 auto* numActuals = MArgumentsLength::New(alloc());
2458 ins->block()->insertBefore(ins, numActuals);
2460 // Set |numFormals| to read all arguments starting at |begin|.
2461 uint32_t numFormals = begin;
2463 Shape* shape = ins->templateObj()->shape();
2465 // Use MRest because it can be scalar replaced, which enables further
2466 // optimizations.
2467 auto* rest = MRest::New(alloc(), numActuals, numFormals, shape);
2468 ins->block()->insertBefore(ins, rest);
2470 ins->replaceAllUsesWith(rest);
2472 // Remove original instruction.
2473 ins->block()->discard(ins);
2474 return;
2479 MInstruction* numArgs;
2480 if (isInlinedArguments()) {
2481 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
2482 numArgs = MConstant::New(alloc(), Int32Value(argc));
2483 } else {
2484 numArgs = MArgumentsLength::New(alloc());
2486 ins->block()->insertBefore(ins, numArgs);
2488 auto* begin = MNormalizeSliceTerm::New(alloc(), ins->begin(), numArgs);
2489 ins->block()->insertBefore(ins, begin);
2491 auto* end = MNormalizeSliceTerm::New(alloc(), ins->end(), numArgs);
2492 ins->block()->insertBefore(ins, end);
2494 bool isMax = false;
2495 auto* beginMin = MMinMax::New(alloc(), begin, end, MIRType::Int32, isMax);
2496 ins->block()->insertBefore(ins, beginMin);
2498 // Safe to truncate because both operands are positive and end >= beginMin.
2499 auto* count = MSub::New(alloc(), end, beginMin, MIRType::Int32);
2500 count->setTruncateKind(TruncateKind::Truncate);
2501 ins->block()->insertBefore(ins, count);
2503 MInstruction* replacement;
2504 if (isInlinedArguments()) {
2505 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2506 replacement =
2507 MInlineArgumentsSlice::New(alloc(), beginMin, count, actualArgs,
2508 ins->templateObj(), ins->initialHeap());
2509 if (!replacement) {
2510 oom_ = true;
2511 return;
2513 } else {
2514 replacement = MFrameArgumentsSlice::New(
2515 alloc(), beginMin, count, ins->templateObj(), ins->initialHeap());
2517 ins->block()->insertBefore(ins, replacement);
2519 ins->replaceAllUsesWith(replacement);
2521 // Remove original instruction.
2522 ins->block()->discard(ins);
2525 void ArgumentsReplacer::visitLoadFixedSlot(MLoadFixedSlot* ins) {
2526 // Skip other arguments objects.
2527 if (ins->object() != args_) {
2528 return;
2531 MOZ_ASSERT(ins->slot() == ArgumentsObject::CALLEE_SLOT);
2533 MDefinition* replacement;
2534 if (isInlinedArguments()) {
2535 replacement = args_->toCreateInlinedArgumentsObject()->getCallee();
2536 } else {
2537 auto* callee = MCallee::New(alloc());
2538 ins->block()->insertBefore(ins, callee);
2539 replacement = callee;
2541 ins->replaceAllUsesWith(replacement);
2543 // Remove original instruction.
2544 ins->block()->discard(ins);
2547 static inline bool IsOptimizableRestInstruction(MInstruction* ins) {
2548 return ins->isRest();
2551 class RestReplacer : public MDefinitionVisitorDefaultNoop {
2552 private:
2553 MIRGenerator* mir_;
2554 MIRGraph& graph_;
2555 MInstruction* rest_;
2557 TempAllocator& alloc() { return graph_.alloc(); }
2558 MRest* rest() const { return rest_->toRest(); }
2560 bool isRestElements(MDefinition* elements);
2561 void discardInstruction(MInstruction* ins, MDefinition* elements);
2562 MDefinition* restLength(MInstruction* ins);
2563 void visitLength(MInstruction* ins, MDefinition* elements);
2565 void visitGuardToClass(MGuardToClass* ins);
2566 void visitGuardShape(MGuardShape* ins);
2567 void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
2568 void visitUnbox(MUnbox* ins);
2569 void visitCompare(MCompare* ins);
2570 void visitLoadElement(MLoadElement* ins);
2571 void visitArrayLength(MArrayLength* ins);
2572 void visitInitializedLength(MInitializedLength* ins);
2573 void visitApplyArray(MApplyArray* ins);
2574 void visitConstructArray(MConstructArray* ins);
2576 bool escapes(MElements* ins);
2578 public:
2579 RestReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* rest)
2580 : mir_(mir), graph_(graph), rest_(rest) {
2581 MOZ_ASSERT(IsOptimizableRestInstruction(rest_));
2584 bool escapes(MInstruction* ins);
2585 bool run();
2586 void assertSuccess();
2589 // Returns false if the rest array object does not escape.
2590 bool RestReplacer::escapes(MInstruction* ins) {
2591 MOZ_ASSERT(ins->type() == MIRType::Object);
2593 JitSpewDef(JitSpew_Escape, "Check rest array\n", ins);
2594 JitSpewIndent spewIndent(JitSpew_Escape);
2596 // We can replace rest arrays in scripts with OSR entries, but the outermost
2597 // rest object has already been allocated before we enter via OSR and can't be
2598 // replaced.
2599 // See also the same restriction when replacing |arguments|.
2600 if (graph_.osrBlock()) {
2601 JitSpew(JitSpew_Escape, "Can't replace outermost OSR rest array");
2602 return true;
2605 // Check all uses to see whether they can be supported without allocating an
2606 // ArrayObject for the rest parameter.
2607 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
2608 MNode* consumer = (*i)->consumer();
2610 // If a resume point can observe this instruction, we can only optimize
2611 // if it is recoverable.
2612 if (consumer->isResumePoint()) {
2613 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
2614 JitSpew(JitSpew_Escape, "Observable rest array cannot be recovered");
2615 return true;
2617 continue;
2620 MDefinition* def = consumer->toDefinition();
2621 switch (def->op()) {
2622 case MDefinition::Opcode::Elements: {
2623 auto* elem = def->toElements();
2624 MOZ_ASSERT(elem->object() == ins);
2625 if (escapes(elem)) {
2626 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2627 return true;
2629 break;
2632 case MDefinition::Opcode::GuardShape: {
2633 const Shape* shape = rest()->shape();
2634 if (!shape) {
2635 JitSpew(JitSpew_Escape, "No shape defined.");
2636 return true;
2639 auto* guard = def->toGuardShape();
2640 if (shape != guard->shape()) {
2641 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def);
2642 return true;
2644 if (escapes(guard)) {
2645 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2646 return true;
2648 break;
2651 case MDefinition::Opcode::GuardToClass: {
2652 auto* guard = def->toGuardToClass();
2653 if (guard->getClass() != &ArrayObject::class_) {
2654 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", def);
2655 return true;
2657 if (escapes(guard)) {
2658 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2659 return true;
2661 break;
2664 case MDefinition::Opcode::GuardArrayIsPacked: {
2665 // Rest arrays are always packed as long as they aren't modified.
2666 auto* guard = def->toGuardArrayIsPacked();
2667 if (escapes(guard)) {
2668 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2669 return true;
2671 break;
2674 case MDefinition::Opcode::Unbox: {
2675 if (def->type() != MIRType::Object) {
2676 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
2677 return true;
2679 if (escapes(def->toInstruction())) {
2680 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2681 return true;
2683 break;
2686 // This instruction is supported for |JSOp::OptimizeSpreadCall|.
2687 case MDefinition::Opcode::Compare: {
2688 bool canFold;
2689 if (!def->toCompare()->tryFold(&canFold)) {
2690 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
2691 return true;
2693 break;
2696 // This instruction is a no-op used to test that scalar replacement is
2697 // working as expected.
2698 case MDefinition::Opcode::AssertRecoveredOnBailout:
2699 break;
2701 default:
2702 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
2703 return true;
2707 JitSpew(JitSpew_Escape, "Rest array object is not escaped");
2708 return false;
2711 bool RestReplacer::escapes(MElements* ins) {
2712 JitSpewDef(JitSpew_Escape, "Check rest array elements\n", ins);
2713 JitSpewIndent spewIndent(JitSpew_Escape);
2715 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
2716 // The MIRType::Elements cannot be captured in a resume point as it does not
2717 // represent a value allocation.
2718 MDefinition* def = (*i)->consumer()->toDefinition();
2720 switch (def->op()) {
2721 case MDefinition::Opcode::LoadElement:
2722 MOZ_ASSERT(def->toLoadElement()->elements() == ins);
2723 break;
2725 case MDefinition::Opcode::ArrayLength:
2726 MOZ_ASSERT(def->toArrayLength()->elements() == ins);
2727 break;
2729 case MDefinition::Opcode::InitializedLength:
2730 MOZ_ASSERT(def->toInitializedLength()->elements() == ins);
2731 break;
2733 case MDefinition::Opcode::ApplyArray:
2734 MOZ_ASSERT(def->toApplyArray()->getElements() == ins);
2735 break;
2737 case MDefinition::Opcode::ConstructArray:
2738 MOZ_ASSERT(def->toConstructArray()->getElements() == ins);
2739 break;
2741 default:
2742 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
2743 return true;
2747 JitSpew(JitSpew_Escape, "Rest array object is not escaped");
2748 return false;
2751 // Replacing the rest array object is simpler than replacing an object or array,
2752 // because the rest array object does not change state.
2753 bool RestReplacer::run() {
2754 MBasicBlock* startBlock = rest_->block();
2756 // Iterate over each basic block.
2757 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
2758 block != graph_.rpoEnd(); block++) {
2759 if (mir_->shouldCancel("Scalar replacement of rest array object")) {
2760 return false;
2763 // Iterates over phis and instructions.
2764 // We do not have to visit resume points. Any resume points that capture the
2765 // rest array object will be handled by the Sink pass.
2766 for (MDefinitionIterator iter(*block); iter;) {
2767 // Increment the iterator before visiting the instruction, as the visit
2768 // function might discard itself from the basic block.
2769 MDefinition* def = *iter++;
2770 switch (def->op()) {
2771 #define MIR_OP(op) \
2772 case MDefinition::Opcode::op: \
2773 visit##op(def->to##op()); \
2774 break;
2775 MIR_OPCODE_LIST(MIR_OP)
2776 #undef MIR_OP
2778 if (!graph_.alloc().ensureBallast()) {
2779 return false;
2784 assertSuccess();
2785 return true;
2788 void RestReplacer::assertSuccess() {
2789 MOZ_ASSERT(rest_->canRecoverOnBailout());
2790 MOZ_ASSERT(!rest_->hasLiveDefUses());
2793 bool RestReplacer::isRestElements(MDefinition* elements) {
2794 return elements->isElements() && elements->toElements()->object() == rest_;
2797 void RestReplacer::discardInstruction(MInstruction* ins,
2798 MDefinition* elements) {
2799 MOZ_ASSERT(elements->isElements());
2800 ins->block()->discard(ins);
2801 if (!elements->hasLiveDefUses()) {
2802 elements->block()->discard(elements->toInstruction());
2806 void RestReplacer::visitGuardToClass(MGuardToClass* ins) {
2807 // Skip guards on other objects.
2808 if (ins->object() != rest_) {
2809 return;
2811 MOZ_ASSERT(ins->getClass() == &ArrayObject::class_);
2813 // Replace the guard with the array object.
2814 ins->replaceAllUsesWith(rest_);
2816 // Remove the guard.
2817 ins->block()->discard(ins);
2820 void RestReplacer::visitGuardShape(MGuardShape* ins) {
2821 // Skip guards on other objects.
2822 if (ins->object() != rest_) {
2823 return;
2826 // Replace the guard with the array object.
2827 ins->replaceAllUsesWith(rest_);
2829 // Remove the guard.
2830 ins->block()->discard(ins);
2833 void RestReplacer::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
2834 // Skip guards on other objects.
2835 if (ins->array() != rest_) {
2836 return;
2839 // Replace the guard by its object.
2840 ins->replaceAllUsesWith(rest_);
2842 // Remove original instruction.
2843 ins->block()->discard(ins);
2846 void RestReplacer::visitUnbox(MUnbox* ins) {
2847 // Skip unrelated unboxes.
2848 if (ins->input() != rest_) {
2849 return;
2851 MOZ_ASSERT(ins->type() == MIRType::Object);
2853 // Replace the unbox with the array object.
2854 ins->replaceAllUsesWith(rest_);
2856 // Remove the unbox.
2857 ins->block()->discard(ins);
2860 void RestReplacer::visitCompare(MCompare* ins) {
2861 // Skip unrelated comparisons.
2862 if (ins->lhs() != rest_ && ins->rhs() != rest_) {
2863 return;
2866 bool folded;
2867 MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
2869 auto* cst = MConstant::New(alloc(), BooleanValue(folded));
2870 ins->block()->insertBefore(ins, cst);
2872 // Replace the comparison with a constant.
2873 ins->replaceAllUsesWith(cst);
2875 // Remove original instruction.
2876 ins->block()->discard(ins);
2879 void RestReplacer::visitLoadElement(MLoadElement* ins) {
2880 // Skip other array objects.
2881 MDefinition* elements = ins->elements();
2882 if (!isRestElements(elements)) {
2883 return;
2886 MDefinition* index = ins->index();
2888 // Adjust the index to skip any extra formals.
2889 if (uint32_t formals = rest()->numFormals()) {
2890 auto* numFormals = MConstant::New(alloc(), Int32Value(formals));
2891 ins->block()->insertBefore(ins, numFormals);
2893 auto* add = MAdd::New(alloc(), index, numFormals, TruncateKind::Truncate);
2894 ins->block()->insertBefore(ins, add);
2896 index = add;
2899 auto* loadArg = MGetFrameArgument::New(alloc(), index);
2901 ins->block()->insertBefore(ins, loadArg);
2902 ins->replaceAllUsesWith(loadArg);
2904 // Remove original instruction.
2905 discardInstruction(ins, elements);
2908 MDefinition* RestReplacer::restLength(MInstruction* ins) {
2909 // Compute |Math.max(numActuals - numFormals, 0)| for the rest array length.
2911 auto* numActuals = rest()->numActuals();
2913 if (uint32_t formals = rest()->numFormals()) {
2914 auto* numFormals = MConstant::New(alloc(), Int32Value(formals));
2915 ins->block()->insertBefore(ins, numFormals);
2917 auto* length = MSub::New(alloc(), numActuals, numFormals, MIRType::Int32);
2918 length->setTruncateKind(TruncateKind::Truncate);
2919 ins->block()->insertBefore(ins, length);
2921 auto* zero = MConstant::New(alloc(), Int32Value(0));
2922 ins->block()->insertBefore(ins, zero);
2924 bool isMax = true;
2925 auto* minmax = MMinMax::New(alloc(), length, zero, MIRType::Int32, isMax);
2926 ins->block()->insertBefore(ins, minmax);
2928 return minmax;
2931 return numActuals;
2934 void RestReplacer::visitLength(MInstruction* ins, MDefinition* elements) {
2935 MOZ_ASSERT(ins->isArrayLength() || ins->isInitializedLength());
2937 // Skip other array objects.
2938 if (!isRestElements(elements)) {
2939 return;
2942 MDefinition* replacement = restLength(ins);
2944 ins->replaceAllUsesWith(replacement);
2946 // Remove original instruction.
2947 discardInstruction(ins, elements);
2950 void RestReplacer::visitArrayLength(MArrayLength* ins) {
2951 visitLength(ins, ins->elements());
2954 void RestReplacer::visitInitializedLength(MInitializedLength* ins) {
2955 // The initialized length of a rest array is equal to its length.
2956 visitLength(ins, ins->elements());
2959 void RestReplacer::visitApplyArray(MApplyArray* ins) {
2960 // Skip other array objects.
2961 MDefinition* elements = ins->getElements();
2962 if (!isRestElements(elements)) {
2963 return;
2966 auto* numActuals = restLength(ins);
2968 auto* apply =
2969 MApplyArgs::New(alloc(), ins->getSingleTarget(), ins->getFunction(),
2970 numActuals, ins->getThis(), rest()->numFormals());
2971 apply->setBailoutKind(ins->bailoutKind());
2972 if (!ins->maybeCrossRealm()) {
2973 apply->setNotCrossRealm();
2975 if (ins->ignoresReturnValue()) {
2976 apply->setIgnoresReturnValue();
2978 ins->block()->insertBefore(ins, apply);
2980 ins->replaceAllUsesWith(apply);
2982 apply->stealResumePoint(ins);
2984 // Remove original instruction.
2985 discardInstruction(ins, elements);
2988 void RestReplacer::visitConstructArray(MConstructArray* ins) {
2989 // Skip other array objects.
2990 MDefinition* elements = ins->getElements();
2991 if (!isRestElements(elements)) {
2992 return;
2995 auto* numActuals = restLength(ins);
2997 auto* construct = MConstructArgs::New(
2998 alloc(), ins->getSingleTarget(), ins->getFunction(), numActuals,
2999 ins->getThis(), ins->getNewTarget(), rest()->numFormals());
3000 construct->setBailoutKind(ins->bailoutKind());
3001 if (!ins->maybeCrossRealm()) {
3002 construct->setNotCrossRealm();
3005 ins->block()->insertBefore(ins, construct);
3006 ins->replaceAllUsesWith(construct);
3008 construct->stealResumePoint(ins);
3010 // Remove original instruction.
3011 discardInstruction(ins, elements);
3014 bool ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) {
3015 JitSpew(JitSpew_Escape, "Begin (ScalarReplacement)");
3017 EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph);
3018 EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph);
3019 bool addedPhi = false;
3021 for (ReversePostorderIterator block = graph.rpoBegin();
3022 block != graph.rpoEnd(); block++) {
3023 if (mir->shouldCancel("Scalar Replacement (main loop)")) {
3024 return false;
3027 for (MInstructionIterator ins = block->begin(); ins != block->end();
3028 ins++) {
3029 if (IsOptimizableObjectInstruction(*ins) &&
3030 !IsObjectEscaped(*ins, *ins)) {
3031 ObjectMemoryView view(graph.alloc(), *ins);
3032 if (!replaceObject.run(view)) {
3033 return false;
3035 view.assertSuccess();
3036 addedPhi = true;
3037 continue;
3040 if (IsOptimizableArrayInstruction(*ins) && !IsArrayEscaped(*ins, *ins)) {
3041 ArrayMemoryView view(graph.alloc(), *ins);
3042 if (!replaceArray.run(view)) {
3043 return false;
3045 view.assertSuccess();
3046 addedPhi = true;
3047 continue;
3050 if (IsOptimizableArgumentsInstruction(*ins)) {
3051 ArgumentsReplacer replacer(mir, graph, *ins);
3052 if (replacer.escapes(*ins)) {
3053 continue;
3055 if (!replacer.run()) {
3056 return false;
3058 continue;
3061 if (IsOptimizableRestInstruction(*ins)) {
3062 RestReplacer replacer(mir, graph, *ins);
3063 if (replacer.escapes(*ins)) {
3064 continue;
3066 if (!replacer.run()) {
3067 return false;
3069 continue;
3074 if (addedPhi) {
3075 // Phis added by Scalar Replacement are only redundant Phis which are
3076 // not directly captured by any resume point but only by the MDefinition
3077 // state. The conservative observability only focuses on Phis which are
3078 // not used as resume points operands.
3079 AssertExtendedGraphCoherency(graph);
3080 if (!EliminatePhis(mir, graph, ConservativeObservability)) {
3081 return false;
3085 return true;
3088 } /* namespace jit */
3089 } /* namespace js */