Backed out changeset 4b2edb2c26d4 (bug 1880582) for causing mochitest failures on...
[gecko.git] / js / src / jit / ScalarReplacement.cpp
blobdf9cffb4acb50b93d6fcf57c830a017c7cc414a0
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/ScalarReplacement.h"
9 #include "jit/IonAnalysis.h"
10 #include "jit/JitSpewer.h"
11 #include "jit/MIR-wasm.h"
12 #include "jit/MIR.h"
13 #include "jit/MIRGenerator.h"
14 #include "jit/MIRGraph.h"
15 #include "jit/WarpBuilderShared.h"
16 #include "js/Vector.h"
17 #include "vm/ArgumentsObject.h"
19 #include "gc/ObjectKind-inl.h"
21 namespace js {
22 namespace jit {
24 template <typename MemoryView>
25 class EmulateStateOf {
26 private:
27 using BlockState = typename MemoryView::BlockState;
29 MIRGenerator* mir_;
30 MIRGraph& graph_;
32 // Block state at the entrance of all basic blocks.
33 Vector<BlockState*, 8, SystemAllocPolicy> states_;
35 public:
36 EmulateStateOf(MIRGenerator* mir, MIRGraph& graph)
37 : mir_(mir), graph_(graph) {}
39 bool run(MemoryView& view);
42 template <typename MemoryView>
43 bool EmulateStateOf<MemoryView>::run(MemoryView& view) {
44 // Initialize the current block state of each block to an unknown state.
45 if (!states_.appendN(nullptr, graph_.numBlocks())) {
46 return false;
49 // Initialize the first block which needs to be traversed in RPO.
50 MBasicBlock* startBlock = view.startingBlock();
51 if (!view.initStartingState(&states_[startBlock->id()])) {
52 return false;
55 // Iterate over each basic block which has a valid entry state, and merge
56 // the state in the successor blocks.
57 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
58 block != graph_.rpoEnd(); block++) {
59 if (mir_->shouldCancel(MemoryView::phaseName)) {
60 return false;
63 // Get the block state as the result of the merge of all predecessors
64 // which have already been visited in RPO. This means that backedges
65 // are not yet merged into the loop.
66 BlockState* state = states_[block->id()];
67 if (!state) {
68 continue;
70 view.setEntryBlockState(state);
72 // Iterates over resume points, phi and instructions.
73 for (MNodeIterator iter(*block); iter;) {
74 // Increment the iterator before visiting the instruction, as the
75 // visit function might discard itself from the basic block.
76 MNode* ins = *iter++;
77 if (ins->isDefinition()) {
78 MDefinition* def = ins->toDefinition();
79 switch (def->op()) {
80 #define MIR_OP(op) \
81 case MDefinition::Opcode::op: \
82 view.visit##op(def->to##op()); \
83 break;
84 MIR_OPCODE_LIST(MIR_OP)
85 #undef MIR_OP
87 } else {
88 view.visitResumePoint(ins->toResumePoint());
90 if (!graph_.alloc().ensureBallast()) {
91 return false;
93 if (view.oom()) {
94 return false;
98 // For each successor, merge the current state into the state of the
99 // successors.
100 for (size_t s = 0; s < block->numSuccessors(); s++) {
101 MBasicBlock* succ = block->getSuccessor(s);
102 if (!view.mergeIntoSuccessorState(*block, succ, &states_[succ->id()])) {
103 return false;
108 states_.clear();
109 return true;
112 static inline bool IsOptimizableObjectInstruction(MInstruction* ins) {
113 return ins->isNewObject() || ins->isNewPlainObject() ||
114 ins->isNewCallObject() || ins->isNewIterator();
117 static bool PhiOperandEqualTo(MDefinition* operand, MInstruction* newObject) {
118 if (operand == newObject) {
119 return true;
122 switch (operand->op()) {
123 case MDefinition::Opcode::GuardShape:
124 return PhiOperandEqualTo(operand->toGuardShape()->input(), newObject);
126 case MDefinition::Opcode::GuardToClass:
127 return PhiOperandEqualTo(operand->toGuardToClass()->input(), newObject);
129 case MDefinition::Opcode::CheckIsObj:
130 return PhiOperandEqualTo(operand->toCheckIsObj()->input(), newObject);
132 case MDefinition::Opcode::Unbox:
133 return PhiOperandEqualTo(operand->toUnbox()->input(), newObject);
135 default:
136 return false;
140 // Return true if all phi operands are equal to |newObject|.
141 static bool PhiOperandsEqualTo(MPhi* phi, MInstruction* newObject) {
142 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
144 for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
145 if (!PhiOperandEqualTo(phi->getOperand(i), newObject)) {
146 return false;
149 return true;
152 static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject,
153 const Shape* shapeDefault = nullptr);
155 // Returns False if the lambda is not escaped and if it is optimizable by
156 // ScalarReplacementOfObject.
157 static bool IsLambdaEscaped(MInstruction* ins, MInstruction* lambda,
158 MInstruction* newObject, const Shape* shape) {
159 MOZ_ASSERT(lambda->isLambda() || lambda->isFunctionWithProto());
160 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
161 JitSpewDef(JitSpew_Escape, "Check lambda\n", ins);
162 JitSpewIndent spewIndent(JitSpew_Escape);
164 // The scope chain is not escaped if none of the Lambdas which are
165 // capturing it are escaped.
166 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
167 MNode* consumer = (*i)->consumer();
168 if (!consumer->isDefinition()) {
169 // Cannot optimize if it is observable from fun.arguments or others.
170 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
171 JitSpew(JitSpew_Escape, "Observable lambda cannot be recovered");
172 return true;
174 continue;
177 MDefinition* def = consumer->toDefinition();
178 switch (def->op()) {
179 case MDefinition::Opcode::GuardToFunction: {
180 auto* guard = def->toGuardToFunction();
181 if (IsLambdaEscaped(guard, lambda, newObject, shape)) {
182 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
183 return true;
185 break;
188 case MDefinition::Opcode::GuardFunctionScript: {
189 auto* guard = def->toGuardFunctionScript();
190 BaseScript* actual;
191 if (lambda->isLambda()) {
192 actual = lambda->toLambda()->templateFunction()->baseScript();
193 } else {
194 actual = lambda->toFunctionWithProto()->function()->baseScript();
196 if (actual != guard->expected()) {
197 JitSpewDef(JitSpew_Escape, "has a non-matching script guard\n",
198 guard);
199 return true;
201 if (IsLambdaEscaped(guard, lambda, newObject, shape)) {
202 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
203 return true;
205 break;
208 case MDefinition::Opcode::FunctionEnvironment: {
209 if (IsObjectEscaped(def->toFunctionEnvironment(), newObject, shape)) {
210 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
211 return true;
213 break;
216 default:
217 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
218 return true;
221 JitSpew(JitSpew_Escape, "Lambda is not escaped");
222 return false;
225 static bool IsLambdaEscaped(MInstruction* lambda, MInstruction* newObject,
226 const Shape* shape) {
227 return IsLambdaEscaped(lambda, lambda, newObject, shape);
230 // Returns False if the object is not escaped and if it is optimizable by
231 // ScalarReplacementOfObject.
233 // For the moment, this code is dumb as it only supports objects which are not
234 // changing shape.
235 static bool IsObjectEscaped(MDefinition* ins, MInstruction* newObject,
236 const Shape* shapeDefault) {
237 MOZ_ASSERT(ins->type() == MIRType::Object || ins->isPhi());
238 MOZ_ASSERT(IsOptimizableObjectInstruction(newObject));
240 JitSpewDef(JitSpew_Escape, "Check object\n", ins);
241 JitSpewIndent spewIndent(JitSpew_Escape);
243 const Shape* shape = shapeDefault;
244 if (!shape) {
245 if (ins->isNewPlainObject()) {
246 shape = ins->toNewPlainObject()->shape();
247 } else if (JSObject* templateObj = MObjectState::templateObjectOf(ins)) {
248 shape = templateObj->shape();
252 if (!shape) {
253 JitSpew(JitSpew_Escape, "No shape defined.");
254 return true;
257 // Check if the object is escaped. If the object is not the first argument
258 // of either a known Store / Load, then we consider it as escaped. This is a
259 // cheap and conservative escape analysis.
260 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
261 MNode* consumer = (*i)->consumer();
262 if (!consumer->isDefinition()) {
263 // Cannot optimize if it is observable from fun.arguments or others.
264 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
265 JitSpew(JitSpew_Escape, "Observable object cannot be recovered");
266 return true;
268 continue;
271 MDefinition* def = consumer->toDefinition();
272 switch (def->op()) {
273 case MDefinition::Opcode::StoreFixedSlot:
274 case MDefinition::Opcode::LoadFixedSlot:
275 // Not escaped if it is the first argument.
276 if (def->indexOf(*i) == 0) {
277 break;
280 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
281 return true;
283 case MDefinition::Opcode::PostWriteBarrier:
284 break;
286 case MDefinition::Opcode::Slots: {
287 #ifdef DEBUG
288 // Assert that MSlots are only used by MStoreDynamicSlot and
289 // MLoadDynamicSlot.
290 MSlots* ins = def->toSlots();
291 MOZ_ASSERT(ins->object() != 0);
292 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
293 // toDefinition should normally never fail, since they don't get
294 // captured by resume points.
295 MDefinition* def = (*i)->consumer()->toDefinition();
296 MOZ_ASSERT(def->op() == MDefinition::Opcode::StoreDynamicSlot ||
297 def->op() == MDefinition::Opcode::LoadDynamicSlot);
299 #endif
300 break;
303 case MDefinition::Opcode::GuardShape: {
304 MGuardShape* guard = def->toGuardShape();
305 if (shape != guard->shape()) {
306 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
307 return true;
309 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
310 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
311 return true;
313 break;
316 case MDefinition::Opcode::GuardToClass: {
317 MGuardToClass* guard = def->toGuardToClass();
318 if (!shape || shape->getObjectClass() != guard->getClass()) {
319 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
320 return true;
322 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
323 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
324 return true;
326 break;
329 case MDefinition::Opcode::CheckIsObj: {
330 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
331 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
332 return true;
334 break;
337 case MDefinition::Opcode::Unbox: {
338 if (def->type() != MIRType::Object) {
339 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
340 return true;
342 if (IsObjectEscaped(def->toInstruction(), newObject, shape)) {
343 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
344 return true;
346 break;
349 case MDefinition::Opcode::Lambda:
350 case MDefinition::Opcode::FunctionWithProto: {
351 if (IsLambdaEscaped(def->toInstruction(), newObject, shape)) {
352 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
353 return true;
355 break;
358 case MDefinition::Opcode::Phi: {
359 auto* phi = def->toPhi();
360 if (!PhiOperandsEqualTo(phi, newObject)) {
361 JitSpewDef(JitSpew_Escape, "has different phi operands\n", def);
362 return true;
364 if (IsObjectEscaped(phi, newObject, shape)) {
365 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
366 return true;
368 break;
371 case MDefinition::Opcode::Compare: {
372 bool canFold;
373 if (!def->toCompare()->tryFold(&canFold)) {
374 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
375 return true;
377 break;
380 // Doesn't escape the object.
381 case MDefinition::Opcode::IsObject:
382 break;
384 // This instruction is a no-op used to verify that scalar replacement
385 // is working as expected in jit-test.
386 case MDefinition::Opcode::AssertRecoveredOnBailout:
387 break;
389 // This is just a special flavor of constant which lets us optimize
390 // out some guards in certain circumstances. We'll turn this into a
391 // regular constant later.
392 case MDefinition::Opcode::ConstantProto:
393 break;
395 // We definitely don't need barriers for objects that don't exist.
396 case MDefinition::Opcode::AssertCanElidePostWriteBarrier:
397 break;
399 default:
400 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
401 return true;
405 JitSpew(JitSpew_Escape, "Object is not escaped");
406 return false;
409 class ObjectMemoryView : public MDefinitionVisitorDefaultNoop {
410 public:
411 using BlockState = MObjectState;
412 static const char phaseName[];
414 private:
415 TempAllocator& alloc_;
416 MConstant* undefinedVal_;
417 MInstruction* obj_;
418 MBasicBlock* startBlock_;
419 BlockState* state_;
421 // Used to improve the memory usage by sharing common modification.
422 const MResumePoint* lastResumePoint_;
424 bool oom_;
426 public:
427 ObjectMemoryView(TempAllocator& alloc, MInstruction* obj);
429 MBasicBlock* startingBlock();
430 bool initStartingState(BlockState** pState);
432 void setEntryBlockState(BlockState* state);
433 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
434 BlockState** pSuccState);
436 #ifdef DEBUG
437 void assertSuccess();
438 #else
439 void assertSuccess() {}
440 #endif
442 bool oom() const { return oom_; }
444 private:
445 MDefinition* functionForCallObject(MDefinition* ins);
447 public:
448 void visitResumePoint(MResumePoint* rp);
449 void visitObjectState(MObjectState* ins);
450 void visitStoreFixedSlot(MStoreFixedSlot* ins);
451 void visitLoadFixedSlot(MLoadFixedSlot* ins);
452 void visitPostWriteBarrier(MPostWriteBarrier* ins);
453 void visitStoreDynamicSlot(MStoreDynamicSlot* ins);
454 void visitLoadDynamicSlot(MLoadDynamicSlot* ins);
455 void visitGuardShape(MGuardShape* ins);
456 void visitGuardToClass(MGuardToClass* ins);
457 void visitCheckIsObj(MCheckIsObj* ins);
458 void visitUnbox(MUnbox* ins);
459 void visitFunctionEnvironment(MFunctionEnvironment* ins);
460 void visitGuardToFunction(MGuardToFunction* ins);
461 void visitGuardFunctionScript(MGuardFunctionScript* ins);
462 void visitLambda(MLambda* ins);
463 void visitFunctionWithProto(MFunctionWithProto* ins);
464 void visitPhi(MPhi* ins);
465 void visitCompare(MCompare* ins);
466 void visitConstantProto(MConstantProto* ins);
467 void visitIsObject(MIsObject* ins);
468 void visitAssertCanElidePostWriteBarrier(
469 MAssertCanElidePostWriteBarrier* ins);
472 /* static */ const char ObjectMemoryView::phaseName[] =
473 "Scalar Replacement of Object";
475 ObjectMemoryView::ObjectMemoryView(TempAllocator& alloc, MInstruction* obj)
476 : alloc_(alloc),
477 undefinedVal_(nullptr),
478 obj_(obj),
479 startBlock_(obj->block()),
480 state_(nullptr),
481 lastResumePoint_(nullptr),
482 oom_(false) {
483 // Annotate snapshots RValue such that we recover the store first.
484 obj_->setIncompleteObject();
486 // Annotate the instruction such that we do not replace it by a
487 // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
488 obj_->setImplicitlyUsedUnchecked();
491 MBasicBlock* ObjectMemoryView::startingBlock() { return startBlock_; }
493 bool ObjectMemoryView::initStartingState(BlockState** pState) {
494 // Uninitialized slots have an "undefined" value.
495 undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
496 startBlock_->insertBefore(obj_, undefinedVal_);
498 // Create a new block state and insert at it at the location of the new
499 // object.
500 BlockState* state = BlockState::New(alloc_, obj_);
501 if (!state) {
502 return false;
505 startBlock_->insertAfter(obj_, state);
507 // Initialize the properties of the object state.
508 state->initFromTemplateObject(alloc_, undefinedVal_);
510 // Hold out of resume point until it is visited.
511 state->setInWorklist();
513 *pState = state;
514 return true;
517 void ObjectMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
519 bool ObjectMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
520 MBasicBlock* succ,
521 BlockState** pSuccState) {
522 BlockState* succState = *pSuccState;
524 // When a block has no state yet, create an empty one for the
525 // successor.
526 if (!succState) {
527 // If the successor is not dominated then the object cannot flow
528 // in this basic block without a Phi. We know that no Phi exist
529 // in non-dominated successors as the conservative escaped
530 // analysis fails otherwise. Such condition can succeed if the
531 // successor is a join at the end of a if-block and the object
532 // only exists within the branch.
533 if (!startBlock_->dominates(succ)) {
534 return true;
537 // If there is only one predecessor, carry over the last state of the
538 // block to the successor. As the block state is immutable, if the
539 // current block has multiple successors, they will share the same entry
540 // state.
541 if (succ->numPredecessors() <= 1 || !state_->numSlots()) {
542 *pSuccState = state_;
543 return true;
546 // If we have multiple predecessors, then we allocate one Phi node for
547 // each predecessor, and create a new block state which only has phi
548 // nodes. These would later be removed by the removal of redundant phi
549 // nodes.
550 succState = BlockState::Copy(alloc_, state_);
551 if (!succState) {
552 return false;
555 size_t numPreds = succ->numPredecessors();
556 for (size_t slot = 0; slot < state_->numSlots(); slot++) {
557 MPhi* phi = MPhi::New(alloc_.fallible());
558 if (!phi || !phi->reserveLength(numPreds)) {
559 return false;
562 // Fill the input of the successors Phi with undefined
563 // values, and each block later fills the Phi inputs.
564 for (size_t p = 0; p < numPreds; p++) {
565 phi->addInput(undefinedVal_);
568 // Add Phi in the list of Phis of the basic block.
569 succ->addPhi(phi);
570 succState->setSlot(slot, phi);
573 // Insert the newly created block state instruction at the beginning
574 // of the successor block, after all the phi nodes. Note that it
575 // would be captured by the entry resume point of the successor
576 // block.
577 succ->insertBefore(succ->safeInsertTop(), succState);
578 *pSuccState = succState;
581 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
582 if (succ->numPredecessors() > 1 && succState->numSlots() &&
583 succ != startBlock_) {
584 // We need to re-compute successorWithPhis as the previous EliminatePhis
585 // phase might have removed all the Phis from the successor block.
586 size_t currIndex;
587 MOZ_ASSERT(!succ->phisEmpty());
588 if (curr->successorWithPhis()) {
589 MOZ_ASSERT(curr->successorWithPhis() == succ);
590 currIndex = curr->positionInPhiSuccessor();
591 } else {
592 currIndex = succ->indexForPredecessor(curr);
593 curr->setSuccessorWithPhis(succ, currIndex);
595 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
597 // Copy the current slot states to the index of current block in all the
598 // Phi created during the first visit of the successor.
599 for (size_t slot = 0; slot < state_->numSlots(); slot++) {
600 MPhi* phi = succState->getSlot(slot)->toPhi();
601 phi->replaceOperand(currIndex, state_->getSlot(slot));
605 return true;
608 #ifdef DEBUG
609 void ObjectMemoryView::assertSuccess() {
610 for (MUseIterator i(obj_->usesBegin()); i != obj_->usesEnd(); i++) {
611 MNode* ins = (*i)->consumer();
612 MDefinition* def = nullptr;
614 // Resume points have been replaced by the object state.
615 if (ins->isResumePoint() ||
616 (def = ins->toDefinition())->isRecoveredOnBailout()) {
617 MOZ_ASSERT(obj_->isIncompleteObject());
618 continue;
621 // The only remaining uses would be removed by DCE, which will also
622 // recover the object on bailouts.
623 MOZ_ASSERT(def->isSlots() || def->isLambda() || def->isFunctionWithProto());
624 MOZ_ASSERT(!def->hasDefUses());
627 #endif
629 void ObjectMemoryView::visitResumePoint(MResumePoint* rp) {
630 // As long as the MObjectState is not yet seen next to the allocation, we do
631 // not patch the resume point to recover the side effects.
632 if (!state_->isInWorklist()) {
633 rp->addStore(alloc_, state_, lastResumePoint_);
634 lastResumePoint_ = rp;
638 void ObjectMemoryView::visitObjectState(MObjectState* ins) {
639 if (ins->isInWorklist()) {
640 ins->setNotInWorklist();
644 void ObjectMemoryView::visitStoreFixedSlot(MStoreFixedSlot* ins) {
645 // Skip stores made on other objects.
646 if (ins->object() != obj_) {
647 return;
650 // Clone the state and update the slot value.
651 if (state_->hasFixedSlot(ins->slot())) {
652 state_ = BlockState::Copy(alloc_, state_);
653 if (!state_) {
654 oom_ = true;
655 return;
658 state_->setFixedSlot(ins->slot(), ins->value());
659 ins->block()->insertBefore(ins->toInstruction(), state_);
660 } else {
661 // UnsafeSetReserveSlot can access baked-in slots which are guarded by
662 // conditions, which are not seen by the escape analysis.
663 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
664 ins->block()->insertBefore(ins, bailout);
667 // Remove original instruction.
668 ins->block()->discard(ins);
671 void ObjectMemoryView::visitLoadFixedSlot(MLoadFixedSlot* ins) {
672 // Skip loads made on other objects.
673 if (ins->object() != obj_) {
674 return;
677 // Replace load by the slot value.
678 if (state_->hasFixedSlot(ins->slot())) {
679 ins->replaceAllUsesWith(state_->getFixedSlot(ins->slot()));
680 } else {
681 // UnsafeGetReserveSlot can access baked-in slots which are guarded by
682 // conditions, which are not seen by the escape analysis.
683 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
684 ins->block()->insertBefore(ins, bailout);
685 ins->replaceAllUsesWith(undefinedVal_);
688 // Remove original instruction.
689 ins->block()->discard(ins);
692 void ObjectMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
693 // Skip loads made on other objects.
694 if (ins->object() != obj_) {
695 return;
698 // Remove original instruction.
699 ins->block()->discard(ins);
702 void ObjectMemoryView::visitStoreDynamicSlot(MStoreDynamicSlot* ins) {
703 // Skip stores made on other objects.
704 MSlots* slots = ins->slots()->toSlots();
705 if (slots->object() != obj_) {
706 // Guard objects are replaced when they are visited.
707 MOZ_ASSERT(!slots->object()->isGuardShape() ||
708 slots->object()->toGuardShape()->object() != obj_);
709 return;
712 // Clone the state and update the slot value.
713 if (state_->hasDynamicSlot(ins->slot())) {
714 state_ = BlockState::Copy(alloc_, state_);
715 if (!state_) {
716 oom_ = true;
717 return;
720 state_->setDynamicSlot(ins->slot(), ins->value());
721 ins->block()->insertBefore(ins->toInstruction(), state_);
722 } else {
723 // UnsafeSetReserveSlot can access baked-in slots which are guarded by
724 // conditions, which are not seen by the escape analysis.
725 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
726 ins->block()->insertBefore(ins, bailout);
729 // Remove original instruction.
730 ins->block()->discard(ins);
733 void ObjectMemoryView::visitLoadDynamicSlot(MLoadDynamicSlot* ins) {
734 // Skip loads made on other objects.
735 MSlots* slots = ins->slots()->toSlots();
736 if (slots->object() != obj_) {
737 // Guard objects are replaced when they are visited.
738 MOZ_ASSERT(!slots->object()->isGuardShape() ||
739 slots->object()->toGuardShape()->object() != obj_);
740 return;
743 // Replace load by the slot value.
744 if (state_->hasDynamicSlot(ins->slot())) {
745 ins->replaceAllUsesWith(state_->getDynamicSlot(ins->slot()));
746 } else {
747 // UnsafeGetReserveSlot can access baked-in slots which are guarded by
748 // conditions, which are not seen by the escape analysis.
749 MBail* bailout = MBail::New(alloc_, BailoutKind::Inevitable);
750 ins->block()->insertBefore(ins, bailout);
751 ins->replaceAllUsesWith(undefinedVal_);
754 // Remove original instruction.
755 ins->block()->discard(ins);
758 void ObjectMemoryView::visitGuardShape(MGuardShape* ins) {
759 // Skip guards on other objects.
760 if (ins->object() != obj_) {
761 return;
764 // Replace the guard by its object.
765 ins->replaceAllUsesWith(obj_);
767 // Remove original instruction.
768 ins->block()->discard(ins);
771 void ObjectMemoryView::visitGuardToClass(MGuardToClass* ins) {
772 // Skip guards on other objects.
773 if (ins->object() != obj_) {
774 return;
777 // Replace the guard by its object.
778 ins->replaceAllUsesWith(obj_);
780 // Remove original instruction.
781 ins->block()->discard(ins);
784 void ObjectMemoryView::visitCheckIsObj(MCheckIsObj* ins) {
785 // Skip checks on other objects.
786 if (ins->input() != obj_) {
787 return;
790 // Replace the check by its object.
791 ins->replaceAllUsesWith(obj_);
793 // Remove original instruction.
794 ins->block()->discard(ins);
797 void ObjectMemoryView::visitUnbox(MUnbox* ins) {
798 // Skip unrelated unboxes.
799 if (ins->input() != obj_) {
800 return;
802 MOZ_ASSERT(ins->type() == MIRType::Object);
804 // Replace the unbox with the object.
805 ins->replaceAllUsesWith(obj_);
807 // Remove the unbox.
808 ins->block()->discard(ins);
811 MDefinition* ObjectMemoryView::functionForCallObject(MDefinition* ins) {
812 // Return early when we don't replace MNewCallObject.
813 if (!obj_->isNewCallObject()) {
814 return nullptr;
817 // Unwrap instructions until we found either MLambda or MFunctionWithProto.
818 // Return the function instruction if their environment chain matches the
819 // MNewCallObject we're about to replace.
820 while (true) {
821 switch (ins->op()) {
822 case MDefinition::Opcode::Lambda: {
823 if (ins->toLambda()->environmentChain() == obj_) {
824 return ins;
826 return nullptr;
828 case MDefinition::Opcode::FunctionWithProto: {
829 if (ins->toFunctionWithProto()->environmentChain() == obj_) {
830 return ins;
832 return nullptr;
834 case MDefinition::Opcode::FunctionEnvironment:
835 ins = ins->toFunctionEnvironment()->function();
836 break;
837 case MDefinition::Opcode::GuardToFunction:
838 ins = ins->toGuardToFunction()->object();
839 break;
840 case MDefinition::Opcode::GuardFunctionScript:
841 ins = ins->toGuardFunctionScript()->function();
842 break;
843 default:
844 return nullptr;
849 void ObjectMemoryView::visitFunctionEnvironment(MFunctionEnvironment* ins) {
850 // Skip function environment which are not aliases of the NewCallObject.
851 if (!functionForCallObject(ins)) {
852 return;
855 // Replace the function environment by the scope chain of the lambda.
856 ins->replaceAllUsesWith(obj_);
858 // Remove original instruction.
859 ins->block()->discard(ins);
862 void ObjectMemoryView::visitGuardToFunction(MGuardToFunction* ins) {
863 // Skip guards on other objects.
864 auto* function = functionForCallObject(ins);
865 if (!function) {
866 return;
869 // Replace the guard by its object.
870 ins->replaceAllUsesWith(function);
872 // Remove original instruction.
873 ins->block()->discard(ins);
876 void ObjectMemoryView::visitGuardFunctionScript(MGuardFunctionScript* ins) {
877 // Skip guards on other objects.
878 auto* function = functionForCallObject(ins);
879 if (!function) {
880 return;
883 // Replace the guard by its object.
884 ins->replaceAllUsesWith(function);
886 // Remove original instruction.
887 ins->block()->discard(ins);
890 void ObjectMemoryView::visitLambda(MLambda* ins) {
891 if (ins->environmentChain() != obj_) {
892 return;
895 // In order to recover the lambda we need to recover the scope chain, as the
896 // lambda is holding it.
897 ins->setIncompleteObject();
900 void ObjectMemoryView::visitFunctionWithProto(MFunctionWithProto* ins) {
901 if (ins->environmentChain() != obj_) {
902 return;
905 ins->setIncompleteObject();
908 void ObjectMemoryView::visitPhi(MPhi* ins) {
909 // Skip phis on other objects.
910 if (!PhiOperandsEqualTo(ins, obj_)) {
911 return;
914 // Replace the phi by its object.
915 ins->replaceAllUsesWith(obj_);
917 // Remove original instruction.
918 ins->block()->discardPhi(ins);
921 void ObjectMemoryView::visitCompare(MCompare* ins) {
922 // Skip unrelated comparisons.
923 if (ins->lhs() != obj_ && ins->rhs() != obj_) {
924 return;
927 bool folded;
928 MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
930 auto* cst = MConstant::New(alloc_, BooleanValue(folded));
931 ins->block()->insertBefore(ins, cst);
933 // Replace the comparison with a constant.
934 ins->replaceAllUsesWith(cst);
936 // Remove original instruction.
937 ins->block()->discard(ins);
940 void ObjectMemoryView::visitConstantProto(MConstantProto* ins) {
941 if (ins->getReceiverObject() != obj_) {
942 return;
945 auto* cst = ins->protoObject();
946 ins->replaceAllUsesWith(cst);
947 ins->block()->discard(ins);
950 void ObjectMemoryView::visitIsObject(MIsObject* ins) {
951 // Skip unrelated tests.
952 if (ins->input() != obj_) {
953 return;
956 auto* cst = MConstant::New(alloc_, BooleanValue(true));
957 ins->block()->insertBefore(ins, cst);
959 // Replace the test with a constant.
960 ins->replaceAllUsesWith(cst);
962 // Remove original instruction.
963 ins->block()->discard(ins);
966 void ObjectMemoryView::visitAssertCanElidePostWriteBarrier(
967 MAssertCanElidePostWriteBarrier* ins) {
968 if (ins->object() != obj_) {
969 return;
972 ins->block()->discard(ins);
975 static bool IndexOf(MDefinition* ins, int32_t* res) {
976 MOZ_ASSERT(ins->isLoadElement() || ins->isStoreElement());
977 MDefinition* indexDef = ins->getOperand(1); // ins->index();
978 if (indexDef->isSpectreMaskIndex()) {
979 indexDef = indexDef->toSpectreMaskIndex()->index();
981 if (indexDef->isBoundsCheck()) {
982 indexDef = indexDef->toBoundsCheck()->index();
984 if (indexDef->isToNumberInt32()) {
985 indexDef = indexDef->toToNumberInt32()->getOperand(0);
987 MConstant* indexDefConst = indexDef->maybeConstantValue();
988 if (!indexDefConst || indexDefConst->type() != MIRType::Int32) {
989 return false;
991 *res = indexDefConst->toInt32();
992 return true;
995 static inline bool IsOptimizableArrayInstruction(MInstruction* ins) {
996 return ins->isNewArray() || ins->isNewArrayObject();
999 // We don't support storing holes when doing scalar replacement, so any
1000 // optimizable MNewArrayObject instruction is guaranteed to be packed.
1001 static inline bool IsPackedArray(MInstruction* ins) {
1002 return ins->isNewArrayObject();
1005 // Returns False if the elements is not escaped and if it is optimizable by
1006 // ScalarReplacementOfArray.
1007 static bool IsElementEscaped(MDefinition* def, MInstruction* newArray,
1008 uint32_t arraySize) {
1009 MOZ_ASSERT(def->isElements());
1010 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
1012 JitSpewDef(JitSpew_Escape, "Check elements\n", def);
1013 JitSpewIndent spewIndent(JitSpew_Escape);
1015 for (MUseIterator i(def->usesBegin()); i != def->usesEnd(); i++) {
1016 // The MIRType::Elements cannot be captured in a resume point as
1017 // it does not represent a value allocation.
1018 MDefinition* access = (*i)->consumer()->toDefinition();
1020 switch (access->op()) {
1021 case MDefinition::Opcode::LoadElement: {
1022 MOZ_ASSERT(access->toLoadElement()->elements() == def);
1024 // If the index is not a constant then this index can alias
1025 // all others. We do not handle this case.
1026 int32_t index;
1027 if (!IndexOf(access, &index)) {
1028 JitSpewDef(JitSpew_Escape,
1029 "has a load element with a non-trivial index\n", access);
1030 return true;
1032 if (index < 0 || arraySize <= uint32_t(index)) {
1033 JitSpewDef(JitSpew_Escape,
1034 "has a load element with an out-of-bound index\n", access);
1035 return true;
1037 break;
1040 case MDefinition::Opcode::StoreElement: {
1041 MStoreElement* storeElem = access->toStoreElement();
1042 MOZ_ASSERT(storeElem->elements() == def);
1044 // StoreElement must bail out if it stores to a hole, in case
1045 // there is a setter on the prototype chain. If this StoreElement
1046 // might store to a hole, we can't scalar-replace it.
1047 if (storeElem->needsHoleCheck()) {
1048 JitSpewDef(JitSpew_Escape, "has a store element with a hole check\n",
1049 storeElem);
1050 return true;
1053 // If the index is not a constant then this index can alias
1054 // all others. We do not handle this case.
1055 int32_t index;
1056 if (!IndexOf(storeElem, &index)) {
1057 JitSpewDef(JitSpew_Escape,
1058 "has a store element with a non-trivial index\n",
1059 storeElem);
1060 return true;
1062 if (index < 0 || arraySize <= uint32_t(index)) {
1063 JitSpewDef(JitSpew_Escape,
1064 "has a store element with an out-of-bound index\n",
1065 storeElem);
1066 return true;
1069 // Dense element holes are written using MStoreHoleValueElement instead
1070 // of MStoreElement.
1071 MOZ_ASSERT(storeElem->value()->type() != MIRType::MagicHole);
1072 break;
1075 case MDefinition::Opcode::SetInitializedLength:
1076 MOZ_ASSERT(access->toSetInitializedLength()->elements() == def);
1077 break;
1079 case MDefinition::Opcode::InitializedLength:
1080 MOZ_ASSERT(access->toInitializedLength()->elements() == def);
1081 break;
1083 case MDefinition::Opcode::ArrayLength:
1084 MOZ_ASSERT(access->toArrayLength()->elements() == def);
1085 break;
1087 case MDefinition::Opcode::ApplyArray:
1088 MOZ_ASSERT(access->toApplyArray()->getElements() == def);
1089 if (!IsPackedArray(newArray)) {
1090 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n",
1091 access);
1092 return true;
1094 break;
1096 case MDefinition::Opcode::ConstructArray:
1097 MOZ_ASSERT(access->toConstructArray()->getElements() == def);
1098 if (!IsPackedArray(newArray)) {
1099 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n",
1100 access);
1101 return true;
1103 break;
1105 default:
1106 JitSpewDef(JitSpew_Escape, "is escaped by\n", access);
1107 return true;
1110 JitSpew(JitSpew_Escape, "Elements is not escaped");
1111 return false;
1114 // Returns False if the array is not escaped and if it is optimizable by
1115 // ScalarReplacementOfArray.
1117 // For the moment, this code is dumb as it only supports arrays which are not
1118 // changing length, with only access with known constants.
1119 static bool IsArrayEscaped(MInstruction* ins, MInstruction* newArray) {
1120 MOZ_ASSERT(ins->type() == MIRType::Object);
1121 MOZ_ASSERT(IsOptimizableArrayInstruction(newArray));
1123 JitSpewDef(JitSpew_Escape, "Check array\n", ins);
1124 JitSpewIndent spewIndent(JitSpew_Escape);
1126 const Shape* shape;
1127 uint32_t length;
1128 if (newArray->isNewArrayObject()) {
1129 length = newArray->toNewArrayObject()->length();
1130 shape = newArray->toNewArrayObject()->shape();
1131 } else {
1132 length = newArray->toNewArray()->length();
1133 JSObject* templateObject = newArray->toNewArray()->templateObject();
1134 if (!templateObject) {
1135 JitSpew(JitSpew_Escape, "No template object defined.");
1136 return true;
1138 shape = templateObject->shape();
1141 if (length >= 16) {
1142 JitSpew(JitSpew_Escape, "Array has too many elements");
1143 return true;
1146 // Check if the object is escaped. If the object is not the first argument
1147 // of either a known Store / Load, then we consider it as escaped. This is a
1148 // cheap and conservative escape analysis.
1149 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
1150 MNode* consumer = (*i)->consumer();
1151 if (!consumer->isDefinition()) {
1152 // Cannot optimize if it is observable from fun.arguments or others.
1153 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
1154 JitSpew(JitSpew_Escape, "Observable array cannot be recovered");
1155 return true;
1157 continue;
1160 MDefinition* def = consumer->toDefinition();
1161 switch (def->op()) {
1162 case MDefinition::Opcode::Elements: {
1163 MElements* elem = def->toElements();
1164 MOZ_ASSERT(elem->object() == ins);
1165 if (IsElementEscaped(elem, newArray, length)) {
1166 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", elem);
1167 return true;
1170 break;
1173 case MDefinition::Opcode::GuardShape: {
1174 MGuardShape* guard = def->toGuardShape();
1175 if (shape != guard->shape()) {
1176 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", guard);
1177 return true;
1179 if (IsArrayEscaped(guard, newArray)) {
1180 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1181 return true;
1184 break;
1187 case MDefinition::Opcode::GuardToClass: {
1188 MGuardToClass* guard = def->toGuardToClass();
1189 if (shape->getObjectClass() != guard->getClass()) {
1190 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
1191 return true;
1193 if (IsArrayEscaped(guard, newArray)) {
1194 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1195 return true;
1198 break;
1201 case MDefinition::Opcode::GuardArrayIsPacked: {
1202 auto* guard = def->toGuardArrayIsPacked();
1203 if (!IsPackedArray(newArray)) {
1204 JitSpewDef(JitSpew_Escape, "is not guaranteed to be packed\n", def);
1205 return true;
1207 if (IsArrayEscaped(guard, newArray)) {
1208 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1209 return true;
1211 break;
1214 case MDefinition::Opcode::Unbox: {
1215 if (def->type() != MIRType::Object) {
1216 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
1217 return true;
1219 if (IsArrayEscaped(def->toInstruction(), newArray)) {
1220 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1221 return true;
1223 break;
1226 // This instruction is supported for |JSOp::OptimizeSpreadCall|.
1227 case MDefinition::Opcode::Compare: {
1228 bool canFold;
1229 if (!def->toCompare()->tryFold(&canFold)) {
1230 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
1231 return true;
1233 break;
1236 case MDefinition::Opcode::PostWriteBarrier:
1237 case MDefinition::Opcode::PostWriteElementBarrier:
1238 break;
1240 // This instruction is a no-op used to verify that scalar replacement
1241 // is working as expected in jit-test.
1242 case MDefinition::Opcode::AssertRecoveredOnBailout:
1243 break;
1245 default:
1246 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
1247 return true;
1251 JitSpew(JitSpew_Escape, "Array is not escaped");
1252 return false;
1255 // This class replaces every MStoreElement and MSetInitializedLength by an
1256 // MArrayState which emulates the content of the array. All MLoadElement,
1257 // MInitializedLength and MArrayLength are replaced by the corresponding value.
1259 // In order to restore the value of the array correctly in case of bailouts, we
1260 // replace all reference of the allocation by the MArrayState definition.
1261 class ArrayMemoryView : public MDefinitionVisitorDefaultNoop {
1262 public:
1263 using BlockState = MArrayState;
1264 static const char* phaseName;
1266 private:
1267 TempAllocator& alloc_;
1268 MConstant* undefinedVal_;
1269 MConstant* length_;
1270 MInstruction* arr_;
1271 MBasicBlock* startBlock_;
1272 BlockState* state_;
1274 // Used to improve the memory usage by sharing common modification.
1275 const MResumePoint* lastResumePoint_;
1277 bool oom_;
1279 public:
1280 ArrayMemoryView(TempAllocator& alloc, MInstruction* arr);
1282 MBasicBlock* startingBlock();
1283 bool initStartingState(BlockState** pState);
1285 void setEntryBlockState(BlockState* state);
1286 bool mergeIntoSuccessorState(MBasicBlock* curr, MBasicBlock* succ,
1287 BlockState** pSuccState);
1289 #ifdef DEBUG
1290 void assertSuccess();
1291 #else
1292 void assertSuccess() {}
1293 #endif
1295 bool oom() const { return oom_; }
1297 private:
1298 bool isArrayStateElements(MDefinition* elements);
1299 void discardInstruction(MInstruction* ins, MDefinition* elements);
1301 public:
1302 void visitResumePoint(MResumePoint* rp);
1303 void visitArrayState(MArrayState* ins);
1304 void visitStoreElement(MStoreElement* ins);
1305 void visitLoadElement(MLoadElement* ins);
1306 void visitSetInitializedLength(MSetInitializedLength* ins);
1307 void visitInitializedLength(MInitializedLength* ins);
1308 void visitArrayLength(MArrayLength* ins);
1309 void visitPostWriteBarrier(MPostWriteBarrier* ins);
1310 void visitPostWriteElementBarrier(MPostWriteElementBarrier* ins);
1311 void visitGuardShape(MGuardShape* ins);
1312 void visitGuardToClass(MGuardToClass* ins);
1313 void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
1314 void visitUnbox(MUnbox* ins);
1315 void visitCompare(MCompare* ins);
1316 void visitApplyArray(MApplyArray* ins);
1317 void visitConstructArray(MConstructArray* ins);
1320 const char* ArrayMemoryView::phaseName = "Scalar Replacement of Array";
1322 ArrayMemoryView::ArrayMemoryView(TempAllocator& alloc, MInstruction* arr)
1323 : alloc_(alloc),
1324 undefinedVal_(nullptr),
1325 length_(nullptr),
1326 arr_(arr),
1327 startBlock_(arr->block()),
1328 state_(nullptr),
1329 lastResumePoint_(nullptr),
1330 oom_(false) {
1331 // Annotate snapshots RValue such that we recover the store first.
1332 arr_->setIncompleteObject();
1334 // Annotate the instruction such that we do not replace it by a
1335 // Magic(JS_OPTIMIZED_OUT) in case of removed uses.
1336 arr_->setImplicitlyUsedUnchecked();
1339 MBasicBlock* ArrayMemoryView::startingBlock() { return startBlock_; }
1341 bool ArrayMemoryView::initStartingState(BlockState** pState) {
1342 // Uninitialized elements have an "undefined" value.
1343 undefinedVal_ = MConstant::New(alloc_, UndefinedValue());
1344 MConstant* initLength = MConstant::New(alloc_, Int32Value(0));
1345 arr_->block()->insertBefore(arr_, undefinedVal_);
1346 arr_->block()->insertBefore(arr_, initLength);
1348 // Create a new block state and insert at it at the location of the new array.
1349 BlockState* state = BlockState::New(alloc_, arr_, initLength);
1350 if (!state) {
1351 return false;
1354 startBlock_->insertAfter(arr_, state);
1356 // Initialize the elements of the array state.
1357 state->initFromTemplateObject(alloc_, undefinedVal_);
1359 // Hold out of resume point until it is visited.
1360 state->setInWorklist();
1362 *pState = state;
1363 return true;
1366 void ArrayMemoryView::setEntryBlockState(BlockState* state) { state_ = state; }
1368 bool ArrayMemoryView::mergeIntoSuccessorState(MBasicBlock* curr,
1369 MBasicBlock* succ,
1370 BlockState** pSuccState) {
1371 BlockState* succState = *pSuccState;
1373 // When a block has no state yet, create an empty one for the
1374 // successor.
1375 if (!succState) {
1376 // If the successor is not dominated then the array cannot flow
1377 // in this basic block without a Phi. We know that no Phi exist
1378 // in non-dominated successors as the conservative escaped
1379 // analysis fails otherwise. Such condition can succeed if the
1380 // successor is a join at the end of a if-block and the array
1381 // only exists within the branch.
1382 if (!startBlock_->dominates(succ)) {
1383 return true;
1386 // If there is only one predecessor, carry over the last state of the
1387 // block to the successor. As the block state is immutable, if the
1388 // current block has multiple successors, they will share the same entry
1389 // state.
1390 if (succ->numPredecessors() <= 1 || !state_->numElements()) {
1391 *pSuccState = state_;
1392 return true;
1395 // If we have multiple predecessors, then we allocate one Phi node for
1396 // each predecessor, and create a new block state which only has phi
1397 // nodes. These would later be removed by the removal of redundant phi
1398 // nodes.
1399 succState = BlockState::Copy(alloc_, state_);
1400 if (!succState) {
1401 return false;
1404 size_t numPreds = succ->numPredecessors();
1405 for (size_t index = 0; index < state_->numElements(); index++) {
1406 MPhi* phi = MPhi::New(alloc_.fallible());
1407 if (!phi || !phi->reserveLength(numPreds)) {
1408 return false;
1411 // Fill the input of the successors Phi with undefined
1412 // values, and each block later fills the Phi inputs.
1413 for (size_t p = 0; p < numPreds; p++) {
1414 phi->addInput(undefinedVal_);
1417 // Add Phi in the list of Phis of the basic block.
1418 succ->addPhi(phi);
1419 succState->setElement(index, phi);
1422 // Insert the newly created block state instruction at the beginning
1423 // of the successor block, after all the phi nodes. Note that it
1424 // would be captured by the entry resume point of the successor
1425 // block.
1426 succ->insertBefore(succ->safeInsertTop(), succState);
1427 *pSuccState = succState;
1430 MOZ_ASSERT_IF(succ == startBlock_, startBlock_->isLoopHeader());
1431 if (succ->numPredecessors() > 1 && succState->numElements() &&
1432 succ != startBlock_) {
1433 // We need to re-compute successorWithPhis as the previous EliminatePhis
1434 // phase might have removed all the Phis from the successor block.
1435 size_t currIndex;
1436 MOZ_ASSERT(!succ->phisEmpty());
1437 if (curr->successorWithPhis()) {
1438 MOZ_ASSERT(curr->successorWithPhis() == succ);
1439 currIndex = curr->positionInPhiSuccessor();
1440 } else {
1441 currIndex = succ->indexForPredecessor(curr);
1442 curr->setSuccessorWithPhis(succ, currIndex);
1444 MOZ_ASSERT(succ->getPredecessor(currIndex) == curr);
1446 // Copy the current element states to the index of current block in all
1447 // the Phi created during the first visit of the successor.
1448 for (size_t index = 0; index < state_->numElements(); index++) {
1449 MPhi* phi = succState->getElement(index)->toPhi();
1450 phi->replaceOperand(currIndex, state_->getElement(index));
1454 return true;
1457 #ifdef DEBUG
1458 void ArrayMemoryView::assertSuccess() { MOZ_ASSERT(!arr_->hasLiveDefUses()); }
1459 #endif
1461 void ArrayMemoryView::visitResumePoint(MResumePoint* rp) {
1462 // As long as the MArrayState is not yet seen next to the allocation, we do
1463 // not patch the resume point to recover the side effects.
1464 if (!state_->isInWorklist()) {
1465 rp->addStore(alloc_, state_, lastResumePoint_);
1466 lastResumePoint_ = rp;
1470 void ArrayMemoryView::visitArrayState(MArrayState* ins) {
1471 if (ins->isInWorklist()) {
1472 ins->setNotInWorklist();
1476 bool ArrayMemoryView::isArrayStateElements(MDefinition* elements) {
1477 return elements->isElements() && elements->toElements()->object() == arr_;
1480 void ArrayMemoryView::discardInstruction(MInstruction* ins,
1481 MDefinition* elements) {
1482 MOZ_ASSERT(elements->isElements());
1483 ins->block()->discard(ins);
1484 if (!elements->hasLiveDefUses()) {
1485 elements->block()->discard(elements->toInstruction());
1489 void ArrayMemoryView::visitStoreElement(MStoreElement* ins) {
1490 // Skip other array objects.
1491 MDefinition* elements = ins->elements();
1492 if (!isArrayStateElements(elements)) {
1493 return;
1496 // Register value of the setter in the state.
1497 int32_t index;
1498 MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1499 state_ = BlockState::Copy(alloc_, state_);
1500 if (!state_) {
1501 oom_ = true;
1502 return;
1505 state_->setElement(index, ins->value());
1506 ins->block()->insertBefore(ins, state_);
1508 // Remove original instruction.
1509 discardInstruction(ins, elements);
1512 void ArrayMemoryView::visitLoadElement(MLoadElement* ins) {
1513 // Skip other array objects.
1514 MDefinition* elements = ins->elements();
1515 if (!isArrayStateElements(elements)) {
1516 return;
1519 // Replace by the value contained at the index.
1520 int32_t index;
1521 MOZ_ALWAYS_TRUE(IndexOf(ins, &index));
1523 // The only way to store a hole value in a new array is with
1524 // StoreHoleValueElement, which IsElementEscaped does not allow.
1525 // Therefore, we do not have to do a hole check.
1526 MDefinition* element = state_->getElement(index);
1527 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1529 ins->replaceAllUsesWith(element);
1531 // Remove original instruction.
1532 discardInstruction(ins, elements);
1535 void ArrayMemoryView::visitSetInitializedLength(MSetInitializedLength* ins) {
1536 // Skip other array objects.
1537 MDefinition* elements = ins->elements();
1538 if (!isArrayStateElements(elements)) {
1539 return;
1542 // Replace by the new initialized length. Note that the argument of
1543 // MSetInitializedLength is the last index and not the initialized length.
1544 // To obtain the length, we need to add 1 to it, and thus we need to create
1545 // a new constant that we register in the ArrayState.
1546 state_ = BlockState::Copy(alloc_, state_);
1547 if (!state_) {
1548 oom_ = true;
1549 return;
1552 int32_t initLengthValue = ins->index()->maybeConstantValue()->toInt32() + 1;
1553 MConstant* initLength = MConstant::New(alloc_, Int32Value(initLengthValue));
1554 ins->block()->insertBefore(ins, initLength);
1555 ins->block()->insertBefore(ins, state_);
1556 state_->setInitializedLength(initLength);
1558 // Remove original instruction.
1559 discardInstruction(ins, elements);
1562 void ArrayMemoryView::visitInitializedLength(MInitializedLength* ins) {
1563 // Skip other array objects.
1564 MDefinition* elements = ins->elements();
1565 if (!isArrayStateElements(elements)) {
1566 return;
1569 // Replace by the value of the length.
1570 ins->replaceAllUsesWith(state_->initializedLength());
1572 // Remove original instruction.
1573 discardInstruction(ins, elements);
1576 void ArrayMemoryView::visitArrayLength(MArrayLength* ins) {
1577 // Skip other array objects.
1578 MDefinition* elements = ins->elements();
1579 if (!isArrayStateElements(elements)) {
1580 return;
1583 // Replace by the value of the length.
1584 if (!length_) {
1585 length_ = MConstant::New(alloc_, Int32Value(state_->numElements()));
1586 arr_->block()->insertBefore(arr_, length_);
1588 ins->replaceAllUsesWith(length_);
1590 // Remove original instruction.
1591 discardInstruction(ins, elements);
1594 void ArrayMemoryView::visitPostWriteBarrier(MPostWriteBarrier* ins) {
1595 // Skip barriers on other objects.
1596 if (ins->object() != arr_) {
1597 return;
1600 // Remove original instruction.
1601 ins->block()->discard(ins);
1604 void ArrayMemoryView::visitPostWriteElementBarrier(
1605 MPostWriteElementBarrier* ins) {
1606 // Skip barriers on other objects.
1607 if (ins->object() != arr_) {
1608 return;
1611 // Remove original instruction.
1612 ins->block()->discard(ins);
1615 void ArrayMemoryView::visitGuardShape(MGuardShape* ins) {
1616 // Skip guards on other objects.
1617 if (ins->object() != arr_) {
1618 return;
1621 // Replace the guard by its object.
1622 ins->replaceAllUsesWith(arr_);
1624 // Remove original instruction.
1625 ins->block()->discard(ins);
1628 void ArrayMemoryView::visitGuardToClass(MGuardToClass* ins) {
1629 // Skip guards on other objects.
1630 if (ins->object() != arr_) {
1631 return;
1634 // Replace the guard by its object.
1635 ins->replaceAllUsesWith(arr_);
1637 // Remove original instruction.
1638 ins->block()->discard(ins);
1641 void ArrayMemoryView::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
1642 // Skip guards on other objects.
1643 if (ins->array() != arr_) {
1644 return;
1647 // Replace the guard by its object.
1648 ins->replaceAllUsesWith(arr_);
1650 // Remove original instruction.
1651 ins->block()->discard(ins);
1654 void ArrayMemoryView::visitUnbox(MUnbox* ins) {
1655 // Skip unrelated unboxes.
1656 if (ins->getOperand(0) != arr_) {
1657 return;
1659 MOZ_ASSERT(ins->type() == MIRType::Object);
1661 // Replace the unbox with the array object.
1662 ins->replaceAllUsesWith(arr_);
1664 // Remove the unbox.
1665 ins->block()->discard(ins);
1668 void ArrayMemoryView::visitCompare(MCompare* ins) {
1669 // Skip unrelated comparisons.
1670 if (ins->lhs() != arr_ && ins->rhs() != arr_) {
1671 return;
1674 bool folded;
1675 MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
1677 auto* cst = MConstant::New(alloc_, BooleanValue(folded));
1678 ins->block()->insertBefore(ins, cst);
1680 // Replace the comparison with a constant.
1681 ins->replaceAllUsesWith(cst);
1683 // Remove original instruction.
1684 ins->block()->discard(ins);
1687 void ArrayMemoryView::visitApplyArray(MApplyArray* ins) {
1688 // Skip other array objects.
1689 MDefinition* elements = ins->getElements();
1690 if (!isArrayStateElements(elements)) {
1691 return;
1694 uint32_t numElements = state_->numElements();
1696 CallInfo callInfo(alloc_, /*constructing=*/false, ins->ignoresReturnValue());
1697 if (!callInfo.initForApplyArray(ins->getFunction(), ins->getThis(),
1698 numElements)) {
1699 oom_ = true;
1700 return;
1703 for (uint32_t i = 0; i < numElements; i++) {
1704 auto* element = state_->getElement(i);
1705 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1707 callInfo.initArg(i, element);
1710 auto addUndefined = [this]() { return undefinedVal_; };
1712 bool needsThisCheck = false;
1713 bool isDOMCall = false;
1714 auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck,
1715 ins->getSingleTarget(), isDOMCall);
1716 if (!call) {
1717 oom_ = true;
1718 return;
1720 if (!ins->maybeCrossRealm()) {
1721 call->setNotCrossRealm();
1724 ins->block()->insertBefore(ins, call);
1725 ins->replaceAllUsesWith(call);
1727 call->stealResumePoint(ins);
1729 // Remove original instruction.
1730 discardInstruction(ins, elements);
1733 void ArrayMemoryView::visitConstructArray(MConstructArray* ins) {
1734 // Skip other array objects.
1735 MDefinition* elements = ins->getElements();
1736 if (!isArrayStateElements(elements)) {
1737 return;
1740 uint32_t numElements = state_->numElements();
1742 CallInfo callInfo(alloc_, /*constructing=*/true, ins->ignoresReturnValue());
1743 if (!callInfo.initForConstructArray(ins->getFunction(), ins->getThis(),
1744 ins->getNewTarget(), numElements)) {
1745 oom_ = true;
1746 return;
1749 for (uint32_t i = 0; i < numElements; i++) {
1750 auto* element = state_->getElement(i);
1751 MOZ_ASSERT(element->type() != MIRType::MagicHole);
1753 callInfo.initArg(i, element);
1756 auto addUndefined = [this]() { return undefinedVal_; };
1758 bool needsThisCheck = ins->needsThisCheck();
1759 bool isDOMCall = false;
1760 auto* call = MakeCall(alloc_, addUndefined, callInfo, needsThisCheck,
1761 ins->getSingleTarget(), isDOMCall);
1762 if (!call) {
1763 oom_ = true;
1764 return;
1766 if (!ins->maybeCrossRealm()) {
1767 call->setNotCrossRealm();
1770 ins->block()->insertBefore(ins, call);
1771 ins->replaceAllUsesWith(call);
1773 call->stealResumePoint(ins);
1775 // Remove original instruction.
1776 discardInstruction(ins, elements);
1779 static inline bool IsOptimizableArgumentsInstruction(MInstruction* ins) {
1780 return ins->isCreateArgumentsObject() ||
1781 ins->isCreateInlinedArgumentsObject();
1784 class ArgumentsReplacer : public MDefinitionVisitorDefaultNoop {
1785 private:
1786 MIRGenerator* mir_;
1787 MIRGraph& graph_;
1788 MInstruction* args_;
1790 bool oom_ = false;
1792 TempAllocator& alloc() { return graph_.alloc(); }
1794 bool isInlinedArguments() const {
1795 return args_->isCreateInlinedArgumentsObject();
1798 MNewArrayObject* inlineArgsArray(MInstruction* ins, Shape* shape,
1799 uint32_t begin, uint32_t count);
1801 void visitGuardToClass(MGuardToClass* ins);
1802 void visitGuardProto(MGuardProto* ins);
1803 void visitGuardArgumentsObjectFlags(MGuardArgumentsObjectFlags* ins);
1804 void visitUnbox(MUnbox* ins);
1805 void visitGetArgumentsObjectArg(MGetArgumentsObjectArg* ins);
1806 void visitLoadArgumentsObjectArg(MLoadArgumentsObjectArg* ins);
1807 void visitLoadArgumentsObjectArgHole(MLoadArgumentsObjectArgHole* ins);
1808 void visitInArgumentsObjectArg(MInArgumentsObjectArg* ins);
1809 void visitArgumentsObjectLength(MArgumentsObjectLength* ins);
1810 void visitApplyArgsObj(MApplyArgsObj* ins);
1811 void visitArrayFromArgumentsObject(MArrayFromArgumentsObject* ins);
1812 void visitArgumentsSlice(MArgumentsSlice* ins);
1813 void visitLoadFixedSlot(MLoadFixedSlot* ins);
1815 bool oom() const { return oom_; }
1817 public:
1818 ArgumentsReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* args)
1819 : mir_(mir), graph_(graph), args_(args) {
1820 MOZ_ASSERT(IsOptimizableArgumentsInstruction(args_));
1823 bool escapes(MInstruction* ins, bool guardedForMapped = false);
1824 bool run();
1825 void assertSuccess();
1828 // Returns false if the arguments object does not escape.
1829 bool ArgumentsReplacer::escapes(MInstruction* ins, bool guardedForMapped) {
1830 MOZ_ASSERT(ins->type() == MIRType::Object);
1832 JitSpewDef(JitSpew_Escape, "Check arguments object\n", ins);
1833 JitSpewIndent spewIndent(JitSpew_Escape);
1835 // We can replace inlined arguments in scripts with OSR entries, but
1836 // the outermost arguments object has already been allocated before
1837 // we enter via OSR and can't be replaced.
1838 if (ins->isCreateArgumentsObject() && graph_.osrBlock()) {
1839 JitSpew(JitSpew_Escape, "Can't replace outermost OSR arguments");
1840 return true;
1843 // Check all uses to see whether they can be supported without
1844 // allocating an ArgumentsObject.
1845 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
1846 MNode* consumer = (*i)->consumer();
1848 // If a resume point can observe this instruction, we can only optimize
1849 // if it is recoverable.
1850 if (consumer->isResumePoint()) {
1851 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
1852 JitSpew(JitSpew_Escape, "Observable args object cannot be recovered");
1853 return true;
1855 continue;
1858 MDefinition* def = consumer->toDefinition();
1859 switch (def->op()) {
1860 case MDefinition::Opcode::GuardToClass: {
1861 MGuardToClass* guard = def->toGuardToClass();
1862 if (!guard->isArgumentsObjectClass()) {
1863 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", guard);
1864 return true;
1866 bool isMapped = guard->getClass() == &MappedArgumentsObject::class_;
1867 if (escapes(guard, isMapped)) {
1868 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1869 return true;
1871 break;
1874 case MDefinition::Opcode::GuardProto: {
1875 if (escapes(def->toInstruction(), guardedForMapped)) {
1876 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1877 return true;
1879 break;
1882 case MDefinition::Opcode::GuardArgumentsObjectFlags: {
1883 if (escapes(def->toInstruction(), guardedForMapped)) {
1884 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1885 return true;
1887 break;
1890 case MDefinition::Opcode::Unbox: {
1891 if (def->type() != MIRType::Object) {
1892 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
1893 return true;
1895 if (escapes(def->toInstruction())) {
1896 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
1897 return true;
1899 break;
1902 case MDefinition::Opcode::LoadFixedSlot: {
1903 MLoadFixedSlot* load = def->toLoadFixedSlot();
1905 // We can replace arguments.callee.
1906 if (load->slot() == ArgumentsObject::CALLEE_SLOT) {
1907 MOZ_ASSERT(guardedForMapped);
1908 continue;
1910 JitSpew(JitSpew_Escape, "is escaped by unsupported LoadFixedSlot\n");
1911 return true;
1914 case MDefinition::Opcode::ApplyArgsObj: {
1915 if (ins == def->toApplyArgsObj()->getThis()) {
1916 JitSpew(JitSpew_Escape, "is escaped as |this| arg of ApplyArgsObj\n");
1917 return true;
1919 MOZ_ASSERT(ins == def->toApplyArgsObj()->getArgsObj());
1920 break;
1923 // This is a replaceable consumer.
1924 case MDefinition::Opcode::ArgumentsObjectLength:
1925 case MDefinition::Opcode::GetArgumentsObjectArg:
1926 case MDefinition::Opcode::LoadArgumentsObjectArg:
1927 case MDefinition::Opcode::LoadArgumentsObjectArgHole:
1928 case MDefinition::Opcode::InArgumentsObjectArg:
1929 case MDefinition::Opcode::ArrayFromArgumentsObject:
1930 case MDefinition::Opcode::ArgumentsSlice:
1931 break;
1933 // This instruction is a no-op used to test that scalar replacement
1934 // is working as expected.
1935 case MDefinition::Opcode::AssertRecoveredOnBailout:
1936 break;
1938 default:
1939 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
1940 return true;
1944 JitSpew(JitSpew_Escape, "ArgumentsObject is not escaped");
1945 return false;
1948 // Replacing the arguments object is simpler than replacing an object
1949 // or array, because the arguments object does not change state.
1950 bool ArgumentsReplacer::run() {
1951 MBasicBlock* startBlock = args_->block();
1953 // Iterate over each basic block.
1954 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
1955 block != graph_.rpoEnd(); block++) {
1956 if (mir_->shouldCancel("Scalar replacement of Arguments Object")) {
1957 return false;
1960 // Iterates over phis and instructions.
1961 // We do not have to visit resume points. Any resume points that capture
1962 // the argument object will be handled by the Sink pass.
1963 for (MDefinitionIterator iter(*block); iter;) {
1964 // Increment the iterator before visiting the instruction, as the
1965 // visit function might discard itself from the basic block.
1966 MDefinition* def = *iter++;
1967 switch (def->op()) {
1968 #define MIR_OP(op) \
1969 case MDefinition::Opcode::op: \
1970 visit##op(def->to##op()); \
1971 break;
1972 MIR_OPCODE_LIST(MIR_OP)
1973 #undef MIR_OP
1975 if (!graph_.alloc().ensureBallast()) {
1976 return false;
1978 if (oom()) {
1979 return false;
1984 assertSuccess();
1985 return true;
1988 void ArgumentsReplacer::assertSuccess() {
1989 MOZ_ASSERT(args_->canRecoverOnBailout());
1990 MOZ_ASSERT(!args_->hasLiveDefUses());
1993 void ArgumentsReplacer::visitGuardToClass(MGuardToClass* ins) {
1994 // Skip guards on other objects.
1995 if (ins->object() != args_) {
1996 return;
1998 MOZ_ASSERT(ins->isArgumentsObjectClass());
2000 // Replace the guard with the args object.
2001 ins->replaceAllUsesWith(args_);
2003 // Remove the guard.
2004 ins->block()->discard(ins);
2007 void ArgumentsReplacer::visitGuardProto(MGuardProto* ins) {
2008 // Skip guards on other objects.
2009 if (ins->object() != args_) {
2010 return;
2013 // The prototype can only be changed through explicit operations, for example
2014 // by calling |Reflect.setPrototype|. We have already determined that the args
2015 // object doesn't escape, so its prototype can't be mutated.
2017 // Replace the guard with the args object.
2018 ins->replaceAllUsesWith(args_);
2020 // Remove the guard.
2021 ins->block()->discard(ins);
2024 void ArgumentsReplacer::visitGuardArgumentsObjectFlags(
2025 MGuardArgumentsObjectFlags* ins) {
2026 // Skip other arguments objects.
2027 if (ins->argsObject() != args_) {
2028 return;
2031 #ifdef DEBUG
2032 // Each *_OVERRIDDEN_BIT can only be set by setting or deleting a
2033 // property of the args object. We have already determined that the
2034 // args object doesn't escape, so its properties can't be mutated.
2036 // FORWARDED_ARGUMENTS_BIT is set if any mapped argument is closed
2037 // over, which is an immutable property of the script. Because we
2038 // are replacing the args object for a known script, we can check
2039 // the flag once, which is done when we first attach the CacheIR,
2040 // and rely on it. (Note that this wouldn't be true if we didn't
2041 // know the origin of args_, because it could be passed in from
2042 // another function.)
2043 uint32_t supportedBits = ArgumentsObject::LENGTH_OVERRIDDEN_BIT |
2044 ArgumentsObject::ITERATOR_OVERRIDDEN_BIT |
2045 ArgumentsObject::ELEMENT_OVERRIDDEN_BIT |
2046 ArgumentsObject::CALLEE_OVERRIDDEN_BIT |
2047 ArgumentsObject::FORWARDED_ARGUMENTS_BIT;
2049 MOZ_ASSERT((ins->flags() & ~supportedBits) == 0);
2050 MOZ_ASSERT_IF(ins->flags() & ArgumentsObject::FORWARDED_ARGUMENTS_BIT,
2051 !args_->block()->info().anyFormalIsForwarded());
2052 #endif
2054 // Replace the guard with the args object.
2055 ins->replaceAllUsesWith(args_);
2057 // Remove the guard.
2058 ins->block()->discard(ins);
2061 void ArgumentsReplacer::visitUnbox(MUnbox* ins) {
2062 // Skip unrelated unboxes.
2063 if (ins->getOperand(0) != args_) {
2064 return;
2066 MOZ_ASSERT(ins->type() == MIRType::Object);
2068 // Replace the unbox with the args object.
2069 ins->replaceAllUsesWith(args_);
2071 // Remove the unbox.
2072 ins->block()->discard(ins);
2075 void ArgumentsReplacer::visitGetArgumentsObjectArg(
2076 MGetArgumentsObjectArg* ins) {
2077 // Skip other arguments objects.
2078 if (ins->argsObject() != args_) {
2079 return;
2082 // We don't support setting arguments in ArgumentsReplacer::escapes,
2083 // so we can load the initial value of the argument without worrying
2084 // about it being stale.
2085 MDefinition* getArg;
2086 if (isInlinedArguments()) {
2087 // Inlined frames have direct access to the actual arguments.
2088 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2089 if (ins->argno() < actualArgs->numActuals()) {
2090 getArg = actualArgs->getArg(ins->argno());
2091 } else {
2092 // Omitted arguments are not mapped to the arguments object, and
2093 // will always be undefined.
2094 auto* undef = MConstant::New(alloc(), UndefinedValue());
2095 ins->block()->insertBefore(ins, undef);
2096 getArg = undef;
2098 } else {
2099 // Load the argument from the frame.
2100 auto* index = MConstant::New(alloc(), Int32Value(ins->argno()));
2101 ins->block()->insertBefore(ins, index);
2103 auto* loadArg = MGetFrameArgument::New(alloc(), index);
2104 ins->block()->insertBefore(ins, loadArg);
2105 getArg = loadArg;
2107 ins->replaceAllUsesWith(getArg);
2109 // Remove original instruction.
2110 ins->block()->discard(ins);
2113 void ArgumentsReplacer::visitLoadArgumentsObjectArg(
2114 MLoadArgumentsObjectArg* ins) {
2115 // Skip other arguments objects.
2116 if (ins->argsObject() != args_) {
2117 return;
2120 MDefinition* index = ins->index();
2122 MInstruction* loadArg;
2123 if (isInlinedArguments()) {
2124 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2126 // Insert bounds check.
2127 auto* length =
2128 MConstant::New(alloc(), Int32Value(actualArgs->numActuals()));
2129 ins->block()->insertBefore(ins, length);
2131 MInstruction* check = MBoundsCheck::New(alloc(), index, length);
2132 check->setBailoutKind(ins->bailoutKind());
2133 ins->block()->insertBefore(ins, check);
2135 if (mir_->outerInfo().hadBoundsCheckBailout()) {
2136 check->setNotMovable();
2139 loadArg = MGetInlinedArgument::New(alloc(), check, actualArgs);
2140 if (!loadArg) {
2141 oom_ = true;
2142 return;
2144 } else {
2145 // Insert bounds check.
2146 auto* length = MArgumentsLength::New(alloc());
2147 ins->block()->insertBefore(ins, length);
2149 MInstruction* check = MBoundsCheck::New(alloc(), index, length);
2150 check->setBailoutKind(ins->bailoutKind());
2151 ins->block()->insertBefore(ins, check);
2153 if (mir_->outerInfo().hadBoundsCheckBailout()) {
2154 check->setNotMovable();
2157 if (JitOptions.spectreIndexMasking) {
2158 check = MSpectreMaskIndex::New(alloc(), check, length);
2159 ins->block()->insertBefore(ins, check);
2162 loadArg = MGetFrameArgument::New(alloc(), check);
2164 ins->block()->insertBefore(ins, loadArg);
2165 ins->replaceAllUsesWith(loadArg);
2167 // Remove original instruction.
2168 ins->block()->discard(ins);
2171 void ArgumentsReplacer::visitLoadArgumentsObjectArgHole(
2172 MLoadArgumentsObjectArgHole* ins) {
2173 // Skip other arguments objects.
2174 if (ins->argsObject() != args_) {
2175 return;
2178 MDefinition* index = ins->index();
2180 MInstruction* loadArg;
2181 if (isInlinedArguments()) {
2182 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2184 loadArg = MGetInlinedArgumentHole::New(alloc(), index, actualArgs);
2185 if (!loadArg) {
2186 oom_ = true;
2187 return;
2189 } else {
2190 auto* length = MArgumentsLength::New(alloc());
2191 ins->block()->insertBefore(ins, length);
2193 loadArg = MGetFrameArgumentHole::New(alloc(), index, length);
2195 loadArg->setBailoutKind(ins->bailoutKind());
2196 ins->block()->insertBefore(ins, loadArg);
2197 ins->replaceAllUsesWith(loadArg);
2199 // Remove original instruction.
2200 ins->block()->discard(ins);
2203 void ArgumentsReplacer::visitInArgumentsObjectArg(MInArgumentsObjectArg* ins) {
2204 // Skip other arguments objects.
2205 if (ins->argsObject() != args_) {
2206 return;
2209 MDefinition* index = ins->index();
2211 // Ensure the index is non-negative.
2212 auto* guardedIndex = MGuardInt32IsNonNegative::New(alloc(), index);
2213 guardedIndex->setBailoutKind(ins->bailoutKind());
2214 ins->block()->insertBefore(ins, guardedIndex);
2216 MInstruction* length;
2217 if (isInlinedArguments()) {
2218 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
2219 length = MConstant::New(alloc(), Int32Value(argc));
2220 } else {
2221 length = MArgumentsLength::New(alloc());
2223 ins->block()->insertBefore(ins, length);
2225 auto* compare = MCompare::New(alloc(), guardedIndex, length, JSOp::Lt,
2226 MCompare::Compare_Int32);
2227 ins->block()->insertBefore(ins, compare);
2228 ins->replaceAllUsesWith(compare);
2230 // Remove original instruction.
2231 ins->block()->discard(ins);
2234 void ArgumentsReplacer::visitArgumentsObjectLength(
2235 MArgumentsObjectLength* ins) {
2236 // Skip other arguments objects.
2237 if (ins->argsObject() != args_) {
2238 return;
2241 MInstruction* length;
2242 if (isInlinedArguments()) {
2243 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
2244 length = MConstant::New(alloc(), Int32Value(argc));
2245 } else {
2246 length = MArgumentsLength::New(alloc());
2248 ins->block()->insertBefore(ins, length);
2249 ins->replaceAllUsesWith(length);
2251 // Remove original instruction.
2252 ins->block()->discard(ins);
2255 void ArgumentsReplacer::visitApplyArgsObj(MApplyArgsObj* ins) {
2256 // Skip other arguments objects.
2257 if (ins->getArgsObj() != args_) {
2258 return;
2261 MInstruction* newIns;
2262 if (isInlinedArguments()) {
2263 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2264 CallInfo callInfo(alloc(), /*constructing=*/false,
2265 ins->ignoresReturnValue());
2267 callInfo.initForApplyInlinedArgs(ins->getFunction(), ins->getThis(),
2268 actualArgs->numActuals());
2269 for (uint32_t i = 0; i < actualArgs->numActuals(); i++) {
2270 callInfo.initArg(i, actualArgs->getArg(i));
2273 auto addUndefined = [this, &ins]() -> MConstant* {
2274 MConstant* undef = MConstant::New(alloc(), UndefinedValue());
2275 ins->block()->insertBefore(ins, undef);
2276 return undef;
2279 bool needsThisCheck = false;
2280 bool isDOMCall = false;
2281 auto* call = MakeCall(alloc(), addUndefined, callInfo, needsThisCheck,
2282 ins->getSingleTarget(), isDOMCall);
2283 if (!call) {
2284 oom_ = true;
2285 return;
2287 if (!ins->maybeCrossRealm()) {
2288 call->setNotCrossRealm();
2290 newIns = call;
2291 } else {
2292 auto* numArgs = MArgumentsLength::New(alloc());
2293 ins->block()->insertBefore(ins, numArgs);
2295 // TODO: Should we rename MApplyArgs?
2296 auto* apply = MApplyArgs::New(alloc(), ins->getSingleTarget(),
2297 ins->getFunction(), numArgs, ins->getThis());
2298 apply->setBailoutKind(ins->bailoutKind());
2299 if (!ins->maybeCrossRealm()) {
2300 apply->setNotCrossRealm();
2302 if (ins->ignoresReturnValue()) {
2303 apply->setIgnoresReturnValue();
2305 newIns = apply;
2308 ins->block()->insertBefore(ins, newIns);
2309 ins->replaceAllUsesWith(newIns);
2311 newIns->stealResumePoint(ins);
2312 ins->block()->discard(ins);
2315 MNewArrayObject* ArgumentsReplacer::inlineArgsArray(MInstruction* ins,
2316 Shape* shape,
2317 uint32_t begin,
2318 uint32_t count) {
2319 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2321 // Contrary to |WarpBuilder::build_Rest()|, we can always create
2322 // MNewArrayObject, because we're guaranteed to have a shape and all
2323 // arguments can be stored into fixed elements.
2324 static_assert(
2325 gc::CanUseFixedElementsForArray(ArgumentsObject::MaxInlinedArgs));
2327 gc::Heap heap = gc::Heap::Default;
2329 // Allocate an array of the correct size.
2330 auto* shapeConstant = MConstant::NewShape(alloc(), shape);
2331 ins->block()->insertBefore(ins, shapeConstant);
2333 auto* newArray = MNewArrayObject::New(alloc(), shapeConstant, count, heap);
2334 ins->block()->insertBefore(ins, newArray);
2336 if (count) {
2337 auto* elements = MElements::New(alloc(), newArray);
2338 ins->block()->insertBefore(ins, elements);
2340 MConstant* index = nullptr;
2341 for (uint32_t i = 0; i < count; i++) {
2342 index = MConstant::New(alloc(), Int32Value(i));
2343 ins->block()->insertBefore(ins, index);
2345 MDefinition* arg = actualArgs->getArg(begin + i);
2346 auto* store = MStoreElement::NewUnbarriered(alloc(), elements, index, arg,
2347 /* needsHoleCheck = */ false);
2348 ins->block()->insertBefore(ins, store);
2350 auto* barrier = MPostWriteBarrier::New(alloc(), newArray, arg);
2351 ins->block()->insertBefore(ins, barrier);
2354 auto* initLength = MSetInitializedLength::New(alloc(), elements, index);
2355 ins->block()->insertBefore(ins, initLength);
2358 return newArray;
2361 void ArgumentsReplacer::visitArrayFromArgumentsObject(
2362 MArrayFromArgumentsObject* ins) {
2363 // Skip other arguments objects.
2364 if (ins->argsObject() != args_) {
2365 return;
2368 // We can only replace `arguments` because we've verified that the `arguments`
2369 // object hasn't been modified in any way. This implies that the arguments
2370 // stored in the stack frame haven't been changed either.
2372 // The idea to replace `arguments` in spread calls `f(...arguments)` is now as
2373 // follows:
2374 // We replace |MArrayFromArgumentsObject| with the identical instructions we
2375 // emit when building a rest-array object, cf. |WarpBuilder::build_Rest()|. In
2376 // a next step, scalar replacement will then replace these new instructions
2377 // themselves.
2379 Shape* shape = ins->shape();
2380 MOZ_ASSERT(shape);
2382 MDefinition* replacement;
2383 if (isInlinedArguments()) {
2384 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2385 uint32_t numActuals = actualArgs->numActuals();
2386 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
2388 replacement = inlineArgsArray(ins, shape, 0, numActuals);
2389 } else {
2390 // We can use |MRest| to read all arguments, because we've guaranteed that
2391 // the arguments stored in the stack frame haven't changed; see the comment
2392 // at the start of this method.
2394 auto* numActuals = MArgumentsLength::New(alloc());
2395 ins->block()->insertBefore(ins, numActuals);
2397 // Set |numFormals| to zero to read all arguments, including any formals.
2398 uint32_t numFormals = 0;
2400 auto* rest = MRest::New(alloc(), numActuals, numFormals, shape);
2401 ins->block()->insertBefore(ins, rest);
2403 replacement = rest;
2406 ins->replaceAllUsesWith(replacement);
2408 // Remove original instruction.
2409 ins->block()->discard(ins);
2412 static uint32_t NormalizeSlice(MDefinition* def, uint32_t length) {
2413 int32_t value = def->toConstant()->toInt32();
2414 if (value < 0) {
2415 return std::max(int32_t(uint32_t(value) + length), 0);
2417 return std::min(uint32_t(value), length);
2420 void ArgumentsReplacer::visitArgumentsSlice(MArgumentsSlice* ins) {
2421 // Skip other arguments objects.
2422 if (ins->object() != args_) {
2423 return;
2426 // Optimise the common pattern |Array.prototype.slice.call(arguments, begin)|,
2427 // where |begin| is a non-negative, constant int32.
2429 // An absent end-index is replaced by |arguments.length|, so we try to match
2430 // |Array.prototype.slice.call(arguments, begin, arguments.length)|.
2431 if (isInlinedArguments()) {
2432 // When this is an inlined arguments, |arguments.length| has been replaced
2433 // by a constant.
2434 if (ins->begin()->isConstant() && ins->end()->isConstant()) {
2435 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2436 uint32_t numActuals = actualArgs->numActuals();
2437 MOZ_ASSERT(numActuals <= ArgumentsObject::MaxInlinedArgs);
2439 uint32_t begin = NormalizeSlice(ins->begin(), numActuals);
2440 uint32_t end = NormalizeSlice(ins->end(), numActuals);
2441 uint32_t count = end > begin ? end - begin : 0;
2442 MOZ_ASSERT(count <= numActuals);
2444 Shape* shape = ins->templateObj()->shape();
2445 auto* newArray = inlineArgsArray(ins, shape, begin, count);
2447 ins->replaceAllUsesWith(newArray);
2449 // Remove original instruction.
2450 ins->block()->discard(ins);
2451 return;
2453 } else {
2454 // Otherwise |arguments.length| is emitted as MArgumentsLength.
2455 if (ins->begin()->isConstant() && ins->end()->isArgumentsLength()) {
2456 int32_t begin = ins->begin()->toConstant()->toInt32();
2457 if (begin >= 0) {
2458 auto* numActuals = MArgumentsLength::New(alloc());
2459 ins->block()->insertBefore(ins, numActuals);
2461 // Set |numFormals| to read all arguments starting at |begin|.
2462 uint32_t numFormals = begin;
2464 Shape* shape = ins->templateObj()->shape();
2466 // Use MRest because it can be scalar replaced, which enables further
2467 // optimizations.
2468 auto* rest = MRest::New(alloc(), numActuals, numFormals, shape);
2469 ins->block()->insertBefore(ins, rest);
2471 ins->replaceAllUsesWith(rest);
2473 // Remove original instruction.
2474 ins->block()->discard(ins);
2475 return;
2480 MInstruction* numArgs;
2481 if (isInlinedArguments()) {
2482 uint32_t argc = args_->toCreateInlinedArgumentsObject()->numActuals();
2483 numArgs = MConstant::New(alloc(), Int32Value(argc));
2484 } else {
2485 numArgs = MArgumentsLength::New(alloc());
2487 ins->block()->insertBefore(ins, numArgs);
2489 auto* begin = MNormalizeSliceTerm::New(alloc(), ins->begin(), numArgs);
2490 ins->block()->insertBefore(ins, begin);
2492 auto* end = MNormalizeSliceTerm::New(alloc(), ins->end(), numArgs);
2493 ins->block()->insertBefore(ins, end);
2495 bool isMax = false;
2496 auto* beginMin = MMinMax::New(alloc(), begin, end, MIRType::Int32, isMax);
2497 ins->block()->insertBefore(ins, beginMin);
2499 // Safe to truncate because both operands are positive and end >= beginMin.
2500 auto* count = MSub::New(alloc(), end, beginMin, MIRType::Int32);
2501 count->setTruncateKind(TruncateKind::Truncate);
2502 ins->block()->insertBefore(ins, count);
2504 MInstruction* replacement;
2505 if (isInlinedArguments()) {
2506 auto* actualArgs = args_->toCreateInlinedArgumentsObject();
2507 replacement =
2508 MInlineArgumentsSlice::New(alloc(), beginMin, count, actualArgs,
2509 ins->templateObj(), ins->initialHeap());
2510 if (!replacement) {
2511 oom_ = true;
2512 return;
2514 } else {
2515 replacement = MFrameArgumentsSlice::New(
2516 alloc(), beginMin, count, ins->templateObj(), ins->initialHeap());
2518 ins->block()->insertBefore(ins, replacement);
2520 ins->replaceAllUsesWith(replacement);
2522 // Remove original instruction.
2523 ins->block()->discard(ins);
2526 void ArgumentsReplacer::visitLoadFixedSlot(MLoadFixedSlot* ins) {
2527 // Skip other arguments objects.
2528 if (ins->object() != args_) {
2529 return;
2532 MOZ_ASSERT(ins->slot() == ArgumentsObject::CALLEE_SLOT);
2534 MDefinition* replacement;
2535 if (isInlinedArguments()) {
2536 replacement = args_->toCreateInlinedArgumentsObject()->getCallee();
2537 } else {
2538 auto* callee = MCallee::New(alloc());
2539 ins->block()->insertBefore(ins, callee);
2540 replacement = callee;
2542 ins->replaceAllUsesWith(replacement);
2544 // Remove original instruction.
2545 ins->block()->discard(ins);
2548 static inline bool IsOptimizableRestInstruction(MInstruction* ins) {
2549 return ins->isRest();
2552 class RestReplacer : public MDefinitionVisitorDefaultNoop {
2553 private:
2554 MIRGenerator* mir_;
2555 MIRGraph& graph_;
2556 MInstruction* rest_;
2558 TempAllocator& alloc() { return graph_.alloc(); }
2559 MRest* rest() const { return rest_->toRest(); }
2561 bool isRestElements(MDefinition* elements);
2562 void discardInstruction(MInstruction* ins, MDefinition* elements);
2563 MDefinition* restLength(MInstruction* ins);
2564 void visitLength(MInstruction* ins, MDefinition* elements);
2566 void visitGuardToClass(MGuardToClass* ins);
2567 void visitGuardShape(MGuardShape* ins);
2568 void visitGuardArrayIsPacked(MGuardArrayIsPacked* ins);
2569 void visitUnbox(MUnbox* ins);
2570 void visitCompare(MCompare* ins);
2571 void visitLoadElement(MLoadElement* ins);
2572 void visitArrayLength(MArrayLength* ins);
2573 void visitInitializedLength(MInitializedLength* ins);
2574 void visitApplyArray(MApplyArray* ins);
2575 void visitConstructArray(MConstructArray* ins);
2577 bool escapes(MElements* ins);
2579 public:
2580 RestReplacer(MIRGenerator* mir, MIRGraph& graph, MInstruction* rest)
2581 : mir_(mir), graph_(graph), rest_(rest) {
2582 MOZ_ASSERT(IsOptimizableRestInstruction(rest_));
2585 bool escapes(MInstruction* ins);
2586 bool run();
2587 void assertSuccess();
2590 // Returns false if the rest array object does not escape.
2591 bool RestReplacer::escapes(MInstruction* ins) {
2592 MOZ_ASSERT(ins->type() == MIRType::Object);
2594 JitSpewDef(JitSpew_Escape, "Check rest array\n", ins);
2595 JitSpewIndent spewIndent(JitSpew_Escape);
2597 // We can replace rest arrays in scripts with OSR entries, but the outermost
2598 // rest object has already been allocated before we enter via OSR and can't be
2599 // replaced.
2600 // See also the same restriction when replacing |arguments|.
2601 if (graph_.osrBlock()) {
2602 JitSpew(JitSpew_Escape, "Can't replace outermost OSR rest array");
2603 return true;
2606 // Check all uses to see whether they can be supported without allocating an
2607 // ArrayObject for the rest parameter.
2608 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
2609 MNode* consumer = (*i)->consumer();
2611 // If a resume point can observe this instruction, we can only optimize
2612 // if it is recoverable.
2613 if (consumer->isResumePoint()) {
2614 if (!consumer->toResumePoint()->isRecoverableOperand(*i)) {
2615 JitSpew(JitSpew_Escape, "Observable rest array cannot be recovered");
2616 return true;
2618 continue;
2621 MDefinition* def = consumer->toDefinition();
2622 switch (def->op()) {
2623 case MDefinition::Opcode::Elements: {
2624 auto* elem = def->toElements();
2625 MOZ_ASSERT(elem->object() == ins);
2626 if (escapes(elem)) {
2627 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2628 return true;
2630 break;
2633 case MDefinition::Opcode::GuardShape: {
2634 const Shape* shape = rest()->shape();
2635 if (!shape) {
2636 JitSpew(JitSpew_Escape, "No shape defined.");
2637 return true;
2640 auto* guard = def->toGuardShape();
2641 if (shape != guard->shape()) {
2642 JitSpewDef(JitSpew_Escape, "has a non-matching guard shape\n", def);
2643 return true;
2645 if (escapes(guard)) {
2646 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2647 return true;
2649 break;
2652 case MDefinition::Opcode::GuardToClass: {
2653 auto* guard = def->toGuardToClass();
2654 if (guard->getClass() != &ArrayObject::class_) {
2655 JitSpewDef(JitSpew_Escape, "has a non-matching class guard\n", def);
2656 return true;
2658 if (escapes(guard)) {
2659 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2660 return true;
2662 break;
2665 case MDefinition::Opcode::GuardArrayIsPacked: {
2666 // Rest arrays are always packed as long as they aren't modified.
2667 auto* guard = def->toGuardArrayIsPacked();
2668 if (escapes(guard)) {
2669 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2670 return true;
2672 break;
2675 case MDefinition::Opcode::Unbox: {
2676 if (def->type() != MIRType::Object) {
2677 JitSpewDef(JitSpew_Escape, "has an invalid unbox\n", def);
2678 return true;
2680 if (escapes(def->toInstruction())) {
2681 JitSpewDef(JitSpew_Escape, "is indirectly escaped by\n", def);
2682 return true;
2684 break;
2687 // This instruction is supported for |JSOp::OptimizeSpreadCall|.
2688 case MDefinition::Opcode::Compare: {
2689 bool canFold;
2690 if (!def->toCompare()->tryFold(&canFold)) {
2691 JitSpewDef(JitSpew_Escape, "has an unsupported compare\n", def);
2692 return true;
2694 break;
2697 // This instruction is a no-op used to test that scalar replacement is
2698 // working as expected.
2699 case MDefinition::Opcode::AssertRecoveredOnBailout:
2700 break;
2702 default:
2703 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
2704 return true;
2708 JitSpew(JitSpew_Escape, "Rest array object is not escaped");
2709 return false;
2712 bool RestReplacer::escapes(MElements* ins) {
2713 JitSpewDef(JitSpew_Escape, "Check rest array elements\n", ins);
2714 JitSpewIndent spewIndent(JitSpew_Escape);
2716 for (MUseIterator i(ins->usesBegin()); i != ins->usesEnd(); i++) {
2717 // The MIRType::Elements cannot be captured in a resume point as it does not
2718 // represent a value allocation.
2719 MDefinition* def = (*i)->consumer()->toDefinition();
2721 switch (def->op()) {
2722 case MDefinition::Opcode::LoadElement:
2723 MOZ_ASSERT(def->toLoadElement()->elements() == ins);
2724 break;
2726 case MDefinition::Opcode::ArrayLength:
2727 MOZ_ASSERT(def->toArrayLength()->elements() == ins);
2728 break;
2730 case MDefinition::Opcode::InitializedLength:
2731 MOZ_ASSERT(def->toInitializedLength()->elements() == ins);
2732 break;
2734 case MDefinition::Opcode::ApplyArray:
2735 MOZ_ASSERT(def->toApplyArray()->getElements() == ins);
2736 break;
2738 case MDefinition::Opcode::ConstructArray:
2739 MOZ_ASSERT(def->toConstructArray()->getElements() == ins);
2740 break;
2742 default:
2743 JitSpewDef(JitSpew_Escape, "is escaped by\n", def);
2744 return true;
2748 JitSpew(JitSpew_Escape, "Rest array object is not escaped");
2749 return false;
2752 // Replacing the rest array object is simpler than replacing an object or array,
2753 // because the rest array object does not change state.
2754 bool RestReplacer::run() {
2755 MBasicBlock* startBlock = rest_->block();
2757 // Iterate over each basic block.
2758 for (ReversePostorderIterator block = graph_.rpoBegin(startBlock);
2759 block != graph_.rpoEnd(); block++) {
2760 if (mir_->shouldCancel("Scalar replacement of rest array object")) {
2761 return false;
2764 // Iterates over phis and instructions.
2765 // We do not have to visit resume points. Any resume points that capture the
2766 // rest array object will be handled by the Sink pass.
2767 for (MDefinitionIterator iter(*block); iter;) {
2768 // Increment the iterator before visiting the instruction, as the visit
2769 // function might discard itself from the basic block.
2770 MDefinition* def = *iter++;
2771 switch (def->op()) {
2772 #define MIR_OP(op) \
2773 case MDefinition::Opcode::op: \
2774 visit##op(def->to##op()); \
2775 break;
2776 MIR_OPCODE_LIST(MIR_OP)
2777 #undef MIR_OP
2779 if (!graph_.alloc().ensureBallast()) {
2780 return false;
2785 assertSuccess();
2786 return true;
2789 void RestReplacer::assertSuccess() {
2790 MOZ_ASSERT(rest_->canRecoverOnBailout());
2791 MOZ_ASSERT(!rest_->hasLiveDefUses());
2794 bool RestReplacer::isRestElements(MDefinition* elements) {
2795 return elements->isElements() && elements->toElements()->object() == rest_;
2798 void RestReplacer::discardInstruction(MInstruction* ins,
2799 MDefinition* elements) {
2800 MOZ_ASSERT(elements->isElements());
2801 ins->block()->discard(ins);
2802 if (!elements->hasLiveDefUses()) {
2803 elements->block()->discard(elements->toInstruction());
2807 void RestReplacer::visitGuardToClass(MGuardToClass* ins) {
2808 // Skip guards on other objects.
2809 if (ins->object() != rest_) {
2810 return;
2812 MOZ_ASSERT(ins->getClass() == &ArrayObject::class_);
2814 // Replace the guard with the array object.
2815 ins->replaceAllUsesWith(rest_);
2817 // Remove the guard.
2818 ins->block()->discard(ins);
2821 void RestReplacer::visitGuardShape(MGuardShape* ins) {
2822 // Skip guards on other objects.
2823 if (ins->object() != rest_) {
2824 return;
2827 // Replace the guard with the array object.
2828 ins->replaceAllUsesWith(rest_);
2830 // Remove the guard.
2831 ins->block()->discard(ins);
2834 void RestReplacer::visitGuardArrayIsPacked(MGuardArrayIsPacked* ins) {
2835 // Skip guards on other objects.
2836 if (ins->array() != rest_) {
2837 return;
2840 // Replace the guard by its object.
2841 ins->replaceAllUsesWith(rest_);
2843 // Remove original instruction.
2844 ins->block()->discard(ins);
2847 void RestReplacer::visitUnbox(MUnbox* ins) {
2848 // Skip unrelated unboxes.
2849 if (ins->input() != rest_) {
2850 return;
2852 MOZ_ASSERT(ins->type() == MIRType::Object);
2854 // Replace the unbox with the array object.
2855 ins->replaceAllUsesWith(rest_);
2857 // Remove the unbox.
2858 ins->block()->discard(ins);
2861 void RestReplacer::visitCompare(MCompare* ins) {
2862 // Skip unrelated comparisons.
2863 if (ins->lhs() != rest_ && ins->rhs() != rest_) {
2864 return;
2867 bool folded;
2868 MOZ_ALWAYS_TRUE(ins->tryFold(&folded));
2870 auto* cst = MConstant::New(alloc(), BooleanValue(folded));
2871 ins->block()->insertBefore(ins, cst);
2873 // Replace the comparison with a constant.
2874 ins->replaceAllUsesWith(cst);
2876 // Remove original instruction.
2877 ins->block()->discard(ins);
2880 void RestReplacer::visitLoadElement(MLoadElement* ins) {
2881 // Skip other array objects.
2882 MDefinition* elements = ins->elements();
2883 if (!isRestElements(elements)) {
2884 return;
2887 MDefinition* index = ins->index();
2889 // Adjust the index to skip any extra formals.
2890 if (uint32_t formals = rest()->numFormals()) {
2891 auto* numFormals = MConstant::New(alloc(), Int32Value(formals));
2892 ins->block()->insertBefore(ins, numFormals);
2894 auto* add = MAdd::New(alloc(), index, numFormals, TruncateKind::Truncate);
2895 ins->block()->insertBefore(ins, add);
2897 index = add;
2900 auto* loadArg = MGetFrameArgument::New(alloc(), index);
2902 ins->block()->insertBefore(ins, loadArg);
2903 ins->replaceAllUsesWith(loadArg);
2905 // Remove original instruction.
2906 discardInstruction(ins, elements);
2909 MDefinition* RestReplacer::restLength(MInstruction* ins) {
2910 // Compute |Math.max(numActuals - numFormals, 0)| for the rest array length.
2912 auto* numActuals = rest()->numActuals();
2914 if (uint32_t formals = rest()->numFormals()) {
2915 auto* numFormals = MConstant::New(alloc(), Int32Value(formals));
2916 ins->block()->insertBefore(ins, numFormals);
2918 auto* length = MSub::New(alloc(), numActuals, numFormals, MIRType::Int32);
2919 length->setTruncateKind(TruncateKind::Truncate);
2920 ins->block()->insertBefore(ins, length);
2922 auto* zero = MConstant::New(alloc(), Int32Value(0));
2923 ins->block()->insertBefore(ins, zero);
2925 bool isMax = true;
2926 auto* minmax = MMinMax::New(alloc(), length, zero, MIRType::Int32, isMax);
2927 ins->block()->insertBefore(ins, minmax);
2929 return minmax;
2932 return numActuals;
2935 void RestReplacer::visitLength(MInstruction* ins, MDefinition* elements) {
2936 MOZ_ASSERT(ins->isArrayLength() || ins->isInitializedLength());
2938 // Skip other array objects.
2939 if (!isRestElements(elements)) {
2940 return;
2943 MDefinition* replacement = restLength(ins);
2945 ins->replaceAllUsesWith(replacement);
2947 // Remove original instruction.
2948 discardInstruction(ins, elements);
2951 void RestReplacer::visitArrayLength(MArrayLength* ins) {
2952 visitLength(ins, ins->elements());
2955 void RestReplacer::visitInitializedLength(MInitializedLength* ins) {
2956 // The initialized length of a rest array is equal to its length.
2957 visitLength(ins, ins->elements());
2960 void RestReplacer::visitApplyArray(MApplyArray* ins) {
2961 // Skip other array objects.
2962 MDefinition* elements = ins->getElements();
2963 if (!isRestElements(elements)) {
2964 return;
2967 auto* numActuals = restLength(ins);
2969 auto* apply =
2970 MApplyArgs::New(alloc(), ins->getSingleTarget(), ins->getFunction(),
2971 numActuals, ins->getThis(), rest()->numFormals());
2972 apply->setBailoutKind(ins->bailoutKind());
2973 if (!ins->maybeCrossRealm()) {
2974 apply->setNotCrossRealm();
2976 if (ins->ignoresReturnValue()) {
2977 apply->setIgnoresReturnValue();
2979 ins->block()->insertBefore(ins, apply);
2981 ins->replaceAllUsesWith(apply);
2983 apply->stealResumePoint(ins);
2985 // Remove original instruction.
2986 discardInstruction(ins, elements);
2989 void RestReplacer::visitConstructArray(MConstructArray* ins) {
2990 // Skip other array objects.
2991 MDefinition* elements = ins->getElements();
2992 if (!isRestElements(elements)) {
2993 return;
2996 auto* numActuals = restLength(ins);
2998 auto* construct = MConstructArgs::New(
2999 alloc(), ins->getSingleTarget(), ins->getFunction(), numActuals,
3000 ins->getThis(), ins->getNewTarget(), rest()->numFormals());
3001 construct->setBailoutKind(ins->bailoutKind());
3002 if (!ins->maybeCrossRealm()) {
3003 construct->setNotCrossRealm();
3006 ins->block()->insertBefore(ins, construct);
3007 ins->replaceAllUsesWith(construct);
3009 construct->stealResumePoint(ins);
3011 // Remove original instruction.
3012 discardInstruction(ins, elements);
3015 bool ScalarReplacement(MIRGenerator* mir, MIRGraph& graph) {
3016 JitSpew(JitSpew_Escape, "Begin (ScalarReplacement)");
3018 EmulateStateOf<ObjectMemoryView> replaceObject(mir, graph);
3019 EmulateStateOf<ArrayMemoryView> replaceArray(mir, graph);
3020 bool addedPhi = false;
3022 for (ReversePostorderIterator block = graph.rpoBegin();
3023 block != graph.rpoEnd(); block++) {
3024 if (mir->shouldCancel("Scalar Replacement (main loop)")) {
3025 return false;
3028 for (MInstructionIterator ins = block->begin(); ins != block->end();
3029 ins++) {
3030 if (IsOptimizableObjectInstruction(*ins) &&
3031 !IsObjectEscaped(*ins, *ins)) {
3032 ObjectMemoryView view(graph.alloc(), *ins);
3033 if (!replaceObject.run(view)) {
3034 return false;
3036 view.assertSuccess();
3037 addedPhi = true;
3038 continue;
3041 if (IsOptimizableArrayInstruction(*ins) && !IsArrayEscaped(*ins, *ins)) {
3042 ArrayMemoryView view(graph.alloc(), *ins);
3043 if (!replaceArray.run(view)) {
3044 return false;
3046 view.assertSuccess();
3047 addedPhi = true;
3048 continue;
3051 if (IsOptimizableArgumentsInstruction(*ins)) {
3052 ArgumentsReplacer replacer(mir, graph, *ins);
3053 if (replacer.escapes(*ins)) {
3054 continue;
3056 if (!replacer.run()) {
3057 return false;
3059 continue;
3062 if (IsOptimizableRestInstruction(*ins)) {
3063 RestReplacer replacer(mir, graph, *ins);
3064 if (replacer.escapes(*ins)) {
3065 continue;
3067 if (!replacer.run()) {
3068 return false;
3070 continue;
3075 if (addedPhi) {
3076 // Phis added by Scalar Replacement are only redundant Phis which are
3077 // not directly captured by any resume point but only by the MDefinition
3078 // state. The conservative observability only focuses on Phis which are
3079 // not used as resume points operands.
3080 AssertExtendedGraphCoherency(graph);
3081 if (!EliminatePhis(mir, graph, ConservativeObservability)) {
3082 return false;
3086 return true;
3089 } /* namespace jit */
3090 } /* namespace js */