1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/RangeAnalysis.h"
9 #include "mozilla/MathAlgorithms.h"
10 #include "mozilla/TemplateLib.h"
18 #include "jit/IonAnalysis.h"
19 #include "jit/JitSpewer.h"
21 #include "jit/MIRGenerator.h"
22 #include "jit/MIRGraph.h"
23 #include "js/Conversions.h"
24 #include "js/ScalarType.h" // js::Scalar::Type
25 #include "util/CheckedArithmetic.h"
26 #include "vm/ArgumentsObject.h"
27 #include "vm/TypedArrayObject.h"
29 #include "vm/BytecodeUtil-inl.h"
32 using namespace js::jit
;
37 using mozilla::CountLeadingZeroes32
;
38 using mozilla::ExponentComponent
;
39 using mozilla::FloorLog2
;
40 using mozilla::IsInfinite
;
42 using mozilla::IsNegativeZero
;
43 using mozilla::NegativeInfinity
;
44 using mozilla::NumberEqualsInt32
;
45 using mozilla::PositiveInfinity
;
47 // [SMDOC] IonMonkey Range Analysis
49 // This algorithm is based on the paper "Eliminating Range Checks Using
50 // Static Single Assignment Form" by Gough and Klaren.
52 // We associate a range object with each SSA name, and the ranges are consulted
53 // in order to determine whether overflow is possible for arithmetic
56 // An important source of range information that requires care to take
57 // advantage of is conditional control flow. Consider the code below:
60 // y = x + 2000000000;
62 // if (x < 1000000000) {
65 // y = x - 3000000000;
69 // The arithmetic operations in this code cannot overflow, but it is not
70 // sufficient to simply associate each name with a range, since the information
71 // differs between basic blocks. The traditional dataflow approach would be
72 // associate ranges with (name, basic block) pairs. This solution is not
73 // satisfying, since we lose the benefit of SSA form: in SSA form, each
74 // definition has a unique name, so there is no need to track information about
75 // the control flow of the program.
77 // The approach used here is to add a new form of pseudo operation called a
78 // beta node, which associates range information with a value. These beta
79 // instructions take one argument and additionally have an auxiliary constant
80 // range associated with them. Operationally, beta nodes are just copies, but
81 // the invariant expressed by beta node copies is that the output will fall
82 // inside the range given by the beta node. Gough and Klaeren refer to SSA
83 // extended with these beta nodes as XSA form. The following shows the example
84 // code transformed into XSA form:
87 // x1 = Beta(x, [INT_MIN, -1]);
88 // y1 = x1 + 2000000000;
90 // x2 = Beta(x, [0, INT_MAX]);
91 // if (x2 < 1000000000) {
92 // x3 = Beta(x2, [INT_MIN, 999999999]);
95 // x4 = Beta(x2, [1000000000, INT_MAX]);
96 // y3 = x4 - 3000000000;
102 // We insert beta nodes for the purposes of range analysis (they might also be
103 // usefully used for other forms of bounds check elimination) and remove them
104 // after range analysis is performed. The remaining compiler phases do not ever
105 // encounter beta nodes.
107 static bool IsDominatedUse(MBasicBlock
* block
, MUse
* use
) {
108 MNode
* n
= use
->consumer();
109 bool isPhi
= n
->isDefinition() && n
->toDefinition()->isPhi();
112 MPhi
* phi
= n
->toDefinition()->toPhi();
113 return block
->dominates(phi
->block()->getPredecessor(phi
->indexOf(use
)));
116 return block
->dominates(n
->block());
119 static inline void SpewRange(MDefinition
* def
) {
121 if (JitSpewEnabled(JitSpew_Range
) && def
->type() != MIRType::None
&&
123 JitSpewHeader(JitSpew_Range
);
124 Fprinter
& out
= JitSpewPrinter();
126 out
.printf(" has range ");
127 def
->range()->dump(out
);
132 static inline void SpewTruncate(MDefinition
* def
,
133 MDefinition::TruncateKind kind
,
136 if (JitSpewEnabled(JitSpew_Range
)) {
137 JitSpewHeader(JitSpew_Range
);
138 Fprinter
& out
= JitSpewPrinter();
139 out
.printf("truncating ");
141 out
.printf(" (kind: %s, clone: %d)\n",
142 MDefinition::TruncateKindString(kind
), shouldClone
);
147 TempAllocator
& RangeAnalysis::alloc() const { return graph_
.alloc(); }
149 void RangeAnalysis::replaceDominatedUsesWith(MDefinition
* orig
,
151 MBasicBlock
* block
) {
152 for (MUseIterator
i(orig
->usesBegin()); i
!= orig
->usesEnd();) {
154 if (use
->consumer() != dom
&& IsDominatedUse(block
, use
)) {
155 use
->replaceProducer(dom
);
160 bool RangeAnalysis::addBetaNodes() {
161 JitSpew(JitSpew_Range
, "Adding beta nodes");
163 for (PostorderIterator
i(graph_
.poBegin()); i
!= graph_
.poEnd(); i
++) {
164 MBasicBlock
* block
= *i
;
165 JitSpew(JitSpew_Range
, "Looking at block %u", block
->id());
167 BranchDirection branch_dir
;
168 MTest
* test
= block
->immediateDominatorBranch(&branch_dir
);
170 if (!test
|| !test
->getOperand(0)->isCompare()) {
174 MCompare
* compare
= test
->getOperand(0)->toCompare();
176 if (!compare
->isNumericComparison()) {
180 // TODO: support unsigned comparisons
181 if (compare
->compareType() == MCompare::Compare_UInt32
) {
185 MDefinition
* left
= compare
->getOperand(0);
186 MDefinition
* right
= compare
->getOperand(1);
188 double conservativeLower
= NegativeInfinity
<double>();
189 double conservativeUpper
= PositiveInfinity
<double>();
190 MDefinition
* val
= nullptr;
192 JSOp jsop
= compare
->jsop();
194 if (branch_dir
== FALSE_BRANCH
) {
195 jsop
= NegateCompareOp(jsop
);
196 conservativeLower
= GenericNaN();
197 conservativeUpper
= GenericNaN();
200 MConstant
* leftConst
= left
->maybeConstantValue();
201 MConstant
* rightConst
= right
->maybeConstantValue();
202 if (leftConst
&& leftConst
->isTypeRepresentableAsDouble()) {
203 bound
= leftConst
->numberToDouble();
205 jsop
= ReverseCompareOp(jsop
);
206 } else if (rightConst
&& rightConst
->isTypeRepresentableAsDouble()) {
207 bound
= rightConst
->numberToDouble();
209 } else if (left
->type() == MIRType::Int32
&&
210 right
->type() == MIRType::Int32
) {
211 MDefinition
* smaller
= nullptr;
212 MDefinition
* greater
= nullptr;
213 if (jsop
== JSOp::Lt
) {
216 } else if (jsop
== JSOp::Gt
) {
220 if (smaller
&& greater
) {
221 if (!alloc().ensureBallast()) {
228 Range::NewInt32Range(alloc(), JSVAL_INT_MIN
, JSVAL_INT_MAX
- 1));
229 block
->insertBefore(*block
->begin(), beta
);
230 replaceDominatedUsesWith(smaller
, beta
, block
);
231 JitSpew(JitSpew_Range
, "Adding beta node for smaller %u",
235 Range::NewInt32Range(alloc(), JSVAL_INT_MIN
+ 1, JSVAL_INT_MAX
));
236 block
->insertBefore(*block
->begin(), beta
);
237 replaceDominatedUsesWith(greater
, beta
, block
);
238 JitSpew(JitSpew_Range
, "Adding beta node for greater %u",
246 // At this point, one of the operands if the compare is a constant, and
247 // val is the other operand.
253 comp
.setDouble(conservativeLower
, bound
);
256 // For integers, if x < c, the upper bound of x is c-1.
257 if (val
->type() == MIRType::Int32
) {
259 if (NumberEqualsInt32(bound
, &intbound
) &&
260 SafeSub(intbound
, 1, &intbound
)) {
264 comp
.setDouble(conservativeLower
, bound
);
266 // Negative zero is not less than zero.
268 comp
.refineToExcludeNegativeZero();
272 comp
.setDouble(bound
, conservativeUpper
);
275 // For integers, if x > c, the lower bound of x is c+1.
276 if (val
->type() == MIRType::Int32
) {
278 if (NumberEqualsInt32(bound
, &intbound
) &&
279 SafeAdd(intbound
, 1, &intbound
)) {
283 comp
.setDouble(bound
, conservativeUpper
);
285 // Negative zero is not greater than zero.
287 comp
.refineToExcludeNegativeZero();
291 // A strict comparison can test for things other than numeric value.
292 if (!compare
->isNumericComparison()) {
295 // Otherwise fall through to handle JSOp::StrictEq the same as JSOp::Eq.
298 comp
.setDouble(bound
, bound
);
301 // A strict comparison can test for things other than numeric value.
302 if (!compare
->isNumericComparison()) {
305 // Otherwise fall through to handle JSOp::StrictNe the same as JSOp::Ne.
308 // Negative zero is not not-equal to zero.
310 comp
.refineToExcludeNegativeZero();
313 continue; // well, we could have
314 // [-\inf, bound-1] U [bound+1, \inf] but we only use
315 // contiguous ranges.
320 if (JitSpewEnabled(JitSpew_Range
)) {
321 JitSpewHeader(JitSpew_Range
);
322 Fprinter
& out
= JitSpewPrinter();
323 out
.printf("Adding beta node for %u with range ", val
->id());
327 if (!alloc().ensureBallast()) {
331 MBeta
* beta
= MBeta::New(alloc(), val
, new (alloc()) Range(comp
));
332 block
->insertBefore(*block
->begin(), beta
);
333 replaceDominatedUsesWith(val
, beta
, block
);
339 bool RangeAnalysis::removeBetaNodes() {
340 JitSpew(JitSpew_Range
, "Removing beta nodes");
342 for (PostorderIterator
i(graph_
.poBegin()); i
!= graph_
.poEnd(); i
++) {
343 MBasicBlock
* block
= *i
;
344 for (MDefinitionIterator
iter(*i
); iter
;) {
345 MDefinition
* def
= *iter
++;
347 MDefinition
* op
= def
->getOperand(0);
348 JitSpew(JitSpew_Range
, "Removing beta node %u for %u", def
->id(),
350 def
->justReplaceAllUsesWith(op
);
351 block
->discardDef(def
);
353 // We only place Beta nodes at the beginning of basic
354 // blocks, so if we see something else, we can move on
355 // to the next block.
363 void SymbolicBound::dump(GenericPrinter
& out
) const {
365 out
.printf("[loop] ");
370 void SymbolicBound::dump() const {
371 Fprinter
out(stderr
);
377 // Test whether the given range's exponent tells us anything that its lower
378 // and upper bound values don't.
379 static bool IsExponentInteresting(const Range
* r
) {
380 // If it lacks either a lower or upper bound, the exponent is interesting.
381 if (!r
->hasInt32Bounds()) {
385 // Otherwise if there's no fractional part, the lower and upper bounds,
386 // which are integers, are perfectly precise.
387 if (!r
->canHaveFractionalPart()) {
391 // Otherwise, if the bounds are conservatively rounded across a power-of-two
392 // boundary, the exponent may imply a tighter range.
393 return FloorLog2(std::max(Abs(r
->lower()), Abs(r
->upper()))) > r
->exponent();
396 void Range::dump(GenericPrinter
& out
) const {
399 // Floating-point or Integer subset.
400 if (canHaveFractionalPart_
) {
408 if (!hasInt32LowerBound_
) {
411 out
.printf("%d", lower_
);
413 if (symbolicLower_
) {
415 symbolicLower_
->dump(out
);
421 if (!hasInt32UpperBound_
) {
424 out
.printf("%d", upper_
);
426 if (symbolicUpper_
) {
428 symbolicUpper_
->dump(out
);
434 bool includesNaN
= max_exponent_
== IncludesInfinityAndNaN
;
435 bool includesNegativeInfinity
=
436 max_exponent_
>= IncludesInfinity
&& !hasInt32LowerBound_
;
437 bool includesPositiveInfinity
=
438 max_exponent_
>= IncludesInfinity
&& !hasInt32UpperBound_
;
439 bool includesNegativeZero
= canBeNegativeZero_
;
441 if (includesNaN
|| includesNegativeInfinity
|| includesPositiveInfinity
||
442 includesNegativeZero
) {
453 if (includesNegativeInfinity
) {
459 out
.printf("U -Infinity");
461 if (includesPositiveInfinity
) {
467 out
.printf("U Infinity");
469 if (includesNegativeZero
) {
479 if (max_exponent_
< IncludesInfinity
&& IsExponentInteresting(this)) {
480 out
.printf(" (< pow(2, %d+1))", max_exponent_
);
484 void Range::dump() const {
485 Fprinter
out(stderr
);
491 Range
* Range::intersect(TempAllocator
& alloc
, const Range
* lhs
,
492 const Range
* rhs
, bool* emptyRange
) {
500 return new (alloc
) Range(*rhs
);
503 return new (alloc
) Range(*lhs
);
506 int32_t newLower
= std::max(lhs
->lower_
, rhs
->lower_
);
507 int32_t newUpper
= std::min(lhs
->upper_
, rhs
->upper_
);
509 // If upper < lower, then we have conflicting constraints. Consider:
517 // In this case, the block is unreachable.
518 if (newUpper
< newLower
) {
519 // If both ranges can be NaN, the result can still be NaN.
520 if (!lhs
->canBeNaN() || !rhs
->canBeNaN()) {
526 bool newHasInt32LowerBound
=
527 lhs
->hasInt32LowerBound_
|| rhs
->hasInt32LowerBound_
;
528 bool newHasInt32UpperBound
=
529 lhs
->hasInt32UpperBound_
|| rhs
->hasInt32UpperBound_
;
531 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
532 lhs
->canHaveFractionalPart_
&& rhs
->canHaveFractionalPart_
);
533 NegativeZeroFlag newMayIncludeNegativeZero
=
534 NegativeZeroFlag(lhs
->canBeNegativeZero_
&& rhs
->canBeNegativeZero_
);
536 uint16_t newExponent
= std::min(lhs
->max_exponent_
, rhs
->max_exponent_
);
538 // NaN is a special value which is neither greater than infinity or less than
539 // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
540 // can end up thinking we have both a lower and upper bound, even though NaN
541 // is still possible. In this case, just be conservative, since any case where
542 // we can have NaN is not especially interesting.
543 if (newHasInt32LowerBound
&& newHasInt32UpperBound
&&
544 newExponent
== IncludesInfinityAndNaN
) {
548 // If one of the ranges has a fractional part and the other doesn't, it's
549 // possible that we will have computed a newExponent that's more precise
550 // than our newLower and newUpper. This is unusual, so we handle it here
551 // instead of in optimize().
553 // For example, consider the range F[0,1.5]. Range analysis represents the
554 // lower and upper bound as integers, so we'd actually have
555 // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
556 // more precise upper bound than the integer upper bound.
558 // When intersecting such a range with an integer range, the fractional part
559 // of the range is dropped. The max exponent of 0 remains valid, so the
560 // upper bound needs to be adjusted to 1.
562 // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
563 // the naive intersection is I[2,2], but since the max exponent tells us
564 // that the value is always less than 2, the intersection is actually empty.
565 if (lhs
->canHaveFractionalPart() != rhs
->canHaveFractionalPart() ||
566 (lhs
->canHaveFractionalPart() && newHasInt32LowerBound
&&
567 newHasInt32UpperBound
&& newLower
== newUpper
)) {
568 refineInt32BoundsByExponent(newExponent
, &newLower
, &newHasInt32LowerBound
,
569 &newUpper
, &newHasInt32UpperBound
);
571 // If we're intersecting two ranges that don't overlap, this could also
572 // push the bounds past each other, since the actual intersection is
574 if (newLower
> newUpper
) {
581 Range(newLower
, newHasInt32LowerBound
, newUpper
, newHasInt32UpperBound
,
582 newCanHaveFractionalPart
, newMayIncludeNegativeZero
, newExponent
);
585 void Range::unionWith(const Range
* other
) {
586 int32_t newLower
= std::min(lower_
, other
->lower_
);
587 int32_t newUpper
= std::max(upper_
, other
->upper_
);
589 bool newHasInt32LowerBound
=
590 hasInt32LowerBound_
&& other
->hasInt32LowerBound_
;
591 bool newHasInt32UpperBound
=
592 hasInt32UpperBound_
&& other
->hasInt32UpperBound_
;
594 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
595 canHaveFractionalPart_
|| other
->canHaveFractionalPart_
);
596 NegativeZeroFlag newMayIncludeNegativeZero
=
597 NegativeZeroFlag(canBeNegativeZero_
|| other
->canBeNegativeZero_
);
599 uint16_t newExponent
= std::max(max_exponent_
, other
->max_exponent_
);
601 rawInitialize(newLower
, newHasInt32LowerBound
, newUpper
,
602 newHasInt32UpperBound
, newCanHaveFractionalPart
,
603 newMayIncludeNegativeZero
, newExponent
);
606 Range::Range(const MDefinition
* def
)
607 : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
608 if (const Range
* other
= def
->range()) {
609 // The instruction has range information; use it.
612 // Simulate the effect of converting the value to its type.
613 // Note: we cannot clamp here, since ranges aren't allowed to shrink
614 // and truncation can increase range again. So doing wrapAround to
615 // mimick a possible truncation.
616 switch (def
->type()) {
618 // MToNumberInt32 cannot truncate. So we can safely clamp.
619 if (def
->isToNumberInt32()) {
625 case MIRType::Boolean
:
626 wrapAroundToBoolean();
629 MOZ_CRASH("Asking for the range of an instruction with no value");
634 // Otherwise just use type information. We can trust the type here
635 // because we don't care what value the instruction actually produces,
636 // but what value we might get after we get past the bailouts.
637 switch (def
->type()) {
639 setInt32(JSVAL_INT_MIN
, JSVAL_INT_MAX
);
641 case MIRType::Boolean
:
645 MOZ_CRASH("Asking for the range of an instruction with no value");
652 // As a special case, MUrsh is permitted to claim a result type of
653 // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
654 // bailouts. If range analysis hasn't ruled out values in
655 // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
656 // use as either a uint32 or an int32.
657 if (!hasInt32UpperBound() && def
->isUrsh() &&
658 def
->toUrsh()->bailoutsDisabled() && def
->type() != MIRType::Int64
) {
665 static uint16_t ExponentImpliedByDouble(double d
) {
666 // Handle the special values.
668 return Range::IncludesInfinityAndNaN
;
671 return Range::IncludesInfinity
;
674 // Otherwise take the exponent part and clamp it at zero, since the Range
675 // class doesn't track fractional ranges.
676 return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d
)));
679 void Range::setDouble(double l
, double h
) {
680 MOZ_ASSERT(!(l
> h
));
682 // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
683 if (l
>= INT32_MIN
&& l
<= INT32_MAX
) {
684 lower_
= int32_t(::floor(l
));
685 hasInt32LowerBound_
= true;
686 } else if (l
>= INT32_MAX
) {
688 hasInt32LowerBound_
= true;
691 hasInt32LowerBound_
= false;
693 if (h
>= INT32_MIN
&& h
<= INT32_MAX
) {
694 upper_
= int32_t(::ceil(h
));
695 hasInt32UpperBound_
= true;
696 } else if (h
<= INT32_MIN
) {
698 hasInt32UpperBound_
= true;
701 hasInt32UpperBound_
= false;
704 // Infer max_exponent_.
705 uint16_t lExp
= ExponentImpliedByDouble(l
);
706 uint16_t hExp
= ExponentImpliedByDouble(h
);
707 max_exponent_
= std::max(lExp
, hExp
);
709 canHaveFractionalPart_
= ExcludesFractionalParts
;
710 canBeNegativeZero_
= ExcludesNegativeZero
;
712 // Infer the canHaveFractionalPart_ setting. We can have a
713 // fractional part if the range crosses through the neighborhood of zero. We
714 // won't have a fractional value if the value is always beyond the point at
715 // which double precision can't represent fractional values.
716 uint16_t minExp
= std::min(lExp
, hExp
);
717 bool includesNegative
= IsNaN(l
) || l
< 0;
718 bool includesPositive
= IsNaN(h
) || h
> 0;
719 bool crossesZero
= includesNegative
&& includesPositive
;
720 if (crossesZero
|| minExp
< MaxTruncatableExponent
) {
721 canHaveFractionalPart_
= IncludesFractionalParts
;
724 // Infer the canBeNegativeZero_ setting. We can have a negative zero if
725 // either bound is zero.
726 if (!(l
> 0) && !(h
< 0)) {
727 canBeNegativeZero_
= IncludesNegativeZero
;
733 void Range::setDoubleSingleton(double d
) {
736 // The above setDouble call is for comparisons, and treats negative zero
737 // as equal to zero. We're aiming for a minimum range, so we can clear the
738 // negative zero flag if the value isn't actually negative zero.
739 if (!IsNegativeZero(d
)) {
740 canBeNegativeZero_
= ExcludesNegativeZero
;
746 static inline bool MissingAnyInt32Bounds(const Range
* lhs
, const Range
* rhs
) {
747 return !lhs
->hasInt32Bounds() || !rhs
->hasInt32Bounds();
750 Range
* Range::add(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
751 int64_t l
= (int64_t)lhs
->lower_
+ (int64_t)rhs
->lower_
;
752 if (!lhs
->hasInt32LowerBound() || !rhs
->hasInt32LowerBound()) {
753 l
= NoInt32LowerBound
;
756 int64_t h
= (int64_t)lhs
->upper_
+ (int64_t)rhs
->upper_
;
757 if (!lhs
->hasInt32UpperBound() || !rhs
->hasInt32UpperBound()) {
758 h
= NoInt32UpperBound
;
761 // The exponent is at most one greater than the greater of the operands'
762 // exponents, except for NaN and infinity cases.
763 uint16_t e
= std::max(lhs
->max_exponent_
, rhs
->max_exponent_
);
764 if (e
<= Range::MaxFiniteExponent
) {
768 // Infinity + -Infinity is NaN.
769 if (lhs
->canBeInfiniteOrNaN() && rhs
->canBeInfiniteOrNaN()) {
770 e
= Range::IncludesInfinityAndNaN
;
773 return new (alloc
) Range(
775 FractionalPartFlag(lhs
->canHaveFractionalPart() ||
776 rhs
->canHaveFractionalPart()),
777 NegativeZeroFlag(lhs
->canBeNegativeZero() && rhs
->canBeNegativeZero()),
781 Range
* Range::sub(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
782 int64_t l
= (int64_t)lhs
->lower_
- (int64_t)rhs
->upper_
;
783 if (!lhs
->hasInt32LowerBound() || !rhs
->hasInt32UpperBound()) {
784 l
= NoInt32LowerBound
;
787 int64_t h
= (int64_t)lhs
->upper_
- (int64_t)rhs
->lower_
;
788 if (!lhs
->hasInt32UpperBound() || !rhs
->hasInt32LowerBound()) {
789 h
= NoInt32UpperBound
;
792 // The exponent is at most one greater than the greater of the operands'
793 // exponents, except for NaN and infinity cases.
794 uint16_t e
= std::max(lhs
->max_exponent_
, rhs
->max_exponent_
);
795 if (e
<= Range::MaxFiniteExponent
) {
799 // Infinity - Infinity is NaN.
800 if (lhs
->canBeInfiniteOrNaN() && rhs
->canBeInfiniteOrNaN()) {
801 e
= Range::IncludesInfinityAndNaN
;
806 FractionalPartFlag(lhs
->canHaveFractionalPart() ||
807 rhs
->canHaveFractionalPart()),
808 NegativeZeroFlag(lhs
->canBeNegativeZero() && rhs
->canBeZero()), e
);
811 Range
* Range::and_(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
812 MOZ_ASSERT(lhs
->isInt32());
813 MOZ_ASSERT(rhs
->isInt32());
815 // If both numbers can be negative, result can be negative in the whole range
816 if (lhs
->lower() < 0 && rhs
->lower() < 0) {
817 return Range::NewInt32Range(alloc
, INT32_MIN
,
818 std::max(lhs
->upper(), rhs
->upper()));
821 // Only one of both numbers can be negative.
822 // - result can't be negative
823 // - Upper bound is minimum of both upper range,
825 int32_t upper
= std::min(lhs
->upper(), rhs
->upper());
827 // EXCEPT when upper bound of non negative number is max value,
828 // because negative value can return the whole max value.
830 if (lhs
->lower() < 0) {
831 upper
= rhs
->upper();
833 if (rhs
->lower() < 0) {
834 upper
= lhs
->upper();
837 return Range::NewInt32Range(alloc
, lower
, upper
);
840 Range
* Range::or_(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
841 MOZ_ASSERT(lhs
->isInt32());
842 MOZ_ASSERT(rhs
->isInt32());
843 // When one operand is always 0 or always -1, it's a special case where we
844 // can compute a fully precise result. Handling these up front also
845 // protects the code below from calling CountLeadingZeroes32 with a zero
846 // operand or from shifting an int32_t by 32.
847 if (lhs
->lower() == lhs
->upper()) {
848 if (lhs
->lower() == 0) {
849 return new (alloc
) Range(*rhs
);
851 if (lhs
->lower() == -1) {
852 return new (alloc
) Range(*lhs
);
855 if (rhs
->lower() == rhs
->upper()) {
856 if (rhs
->lower() == 0) {
857 return new (alloc
) Range(*lhs
);
859 if (rhs
->lower() == -1) {
860 return new (alloc
) Range(*rhs
);
864 // The code below uses CountLeadingZeroes32, which has undefined behavior
865 // if its operand is 0. We rely on the code above to protect it.
866 MOZ_ASSERT_IF(lhs
->lower() >= 0, lhs
->upper() != 0);
867 MOZ_ASSERT_IF(rhs
->lower() >= 0, rhs
->upper() != 0);
868 MOZ_ASSERT_IF(lhs
->upper() < 0, lhs
->lower() != -1);
869 MOZ_ASSERT_IF(rhs
->upper() < 0, rhs
->lower() != -1);
871 int32_t lower
= INT32_MIN
;
872 int32_t upper
= INT32_MAX
;
874 if (lhs
->lower() >= 0 && rhs
->lower() >= 0) {
875 // Both operands are non-negative, so the result won't be less than either.
876 lower
= std::max(lhs
->lower(), rhs
->lower());
877 // The result will have leading zeros where both operands have leading
878 // zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
879 // account for the bit of sign.
880 upper
= int32_t(UINT32_MAX
>> std::min(CountLeadingZeroes32(lhs
->upper()),
881 CountLeadingZeroes32(rhs
->upper())));
883 // The result will have leading ones where either operand has leading ones.
884 if (lhs
->upper() < 0) {
885 unsigned leadingOnes
= CountLeadingZeroes32(~lhs
->lower());
886 lower
= std::max(lower
, ~int32_t(UINT32_MAX
>> leadingOnes
));
889 if (rhs
->upper() < 0) {
890 unsigned leadingOnes
= CountLeadingZeroes32(~rhs
->lower());
891 lower
= std::max(lower
, ~int32_t(UINT32_MAX
>> leadingOnes
));
896 return Range::NewInt32Range(alloc
, lower
, upper
);
899 Range
* Range::xor_(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
900 MOZ_ASSERT(lhs
->isInt32());
901 MOZ_ASSERT(rhs
->isInt32());
902 int32_t lhsLower
= lhs
->lower();
903 int32_t lhsUpper
= lhs
->upper();
904 int32_t rhsLower
= rhs
->lower();
905 int32_t rhsUpper
= rhs
->upper();
906 bool invertAfter
= false;
908 // If either operand is negative, bitwise-negate it, and arrange to negate
909 // the result; ~((~x)^y) == x^y. If both are negative the negations on the
910 // result cancel each other out; effectively this is (~x)^(~y) == x^y.
911 // These transformations reduce the number of cases we have to handle below.
913 lhsLower
= ~lhsLower
;
914 lhsUpper
= ~lhsUpper
;
915 std::swap(lhsLower
, lhsUpper
);
916 invertAfter
= !invertAfter
;
919 rhsLower
= ~rhsLower
;
920 rhsUpper
= ~rhsUpper
;
921 std::swap(rhsLower
, rhsUpper
);
922 invertAfter
= !invertAfter
;
925 // Handle cases where lhs or rhs is always zero specially, because they're
926 // easy cases where we can be perfectly precise, and because it protects the
927 // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
928 // undefined behavior.
929 int32_t lower
= INT32_MIN
;
930 int32_t upper
= INT32_MAX
;
931 if (lhsLower
== 0 && lhsUpper
== 0) {
934 } else if (rhsLower
== 0 && rhsUpper
== 0) {
937 } else if (lhsLower
>= 0 && rhsLower
>= 0) {
938 // Both operands are non-negative. The result will be non-negative.
940 // To compute the upper value, take each operand's upper value and
941 // set all bits that don't correspond to leading zero bits in the
942 // other to one. For each one, this gives an upper bound for the
943 // result, so we can take the minimum between the two.
944 unsigned lhsLeadingZeros
= CountLeadingZeroes32(lhsUpper
);
945 unsigned rhsLeadingZeros
= CountLeadingZeroes32(rhsUpper
);
946 upper
= std::min(rhsUpper
| int32_t(UINT32_MAX
>> lhsLeadingZeros
),
947 lhsUpper
| int32_t(UINT32_MAX
>> rhsLeadingZeros
));
950 // If we bitwise-negated one (but not both) of the operands above, apply the
951 // bitwise-negate to the result, completing ~((~x)^y) == x^y.
955 std::swap(lower
, upper
);
958 return Range::NewInt32Range(alloc
, lower
, upper
);
961 Range
* Range::not_(TempAllocator
& alloc
, const Range
* op
) {
962 MOZ_ASSERT(op
->isInt32());
963 return Range::NewInt32Range(alloc
, ~op
->upper(), ~op
->lower());
966 Range
* Range::mul(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
967 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
968 lhs
->canHaveFractionalPart_
|| rhs
->canHaveFractionalPart_
);
970 NegativeZeroFlag newMayIncludeNegativeZero
= NegativeZeroFlag(
971 (lhs
->canHaveSignBitSet() && rhs
->canBeFiniteNonNegative()) ||
972 (rhs
->canHaveSignBitSet() && lhs
->canBeFiniteNonNegative()));
975 if (!lhs
->canBeInfiniteOrNaN() && !rhs
->canBeInfiniteOrNaN()) {
976 // Two finite values.
977 exponent
= lhs
->numBits() + rhs
->numBits() - 1;
978 if (exponent
> Range::MaxFiniteExponent
) {
979 exponent
= Range::IncludesInfinity
;
981 } else if (!lhs
->canBeNaN() && !rhs
->canBeNaN() &&
982 !(lhs
->canBeZero() && rhs
->canBeInfiniteOrNaN()) &&
983 !(rhs
->canBeZero() && lhs
->canBeInfiniteOrNaN())) {
984 // Two values that multiplied together won't produce a NaN.
985 exponent
= Range::IncludesInfinity
;
987 // Could be anything.
988 exponent
= Range::IncludesInfinityAndNaN
;
991 if (MissingAnyInt32Bounds(lhs
, rhs
)) {
993 Range(NoInt32LowerBound
, NoInt32UpperBound
, newCanHaveFractionalPart
,
994 newMayIncludeNegativeZero
, exponent
);
996 int64_t a
= (int64_t)lhs
->lower() * (int64_t)rhs
->lower();
997 int64_t b
= (int64_t)lhs
->lower() * (int64_t)rhs
->upper();
998 int64_t c
= (int64_t)lhs
->upper() * (int64_t)rhs
->lower();
999 int64_t d
= (int64_t)lhs
->upper() * (int64_t)rhs
->upper();
1001 Range(std::min(std::min(a
, b
), std::min(c
, d
)),
1002 std::max(std::max(a
, b
), std::max(c
, d
)), newCanHaveFractionalPart
,
1003 newMayIncludeNegativeZero
, exponent
);
1006 Range
* Range::lsh(TempAllocator
& alloc
, const Range
* lhs
, int32_t c
) {
1007 MOZ_ASSERT(lhs
->isInt32());
1008 int32_t shift
= c
& 0x1f;
1010 // If the shift doesn't loose bits or shift bits into the sign bit, we
1011 // can simply compute the correct range by shifting.
1012 if ((int32_t)((uint32_t)lhs
->lower() << shift
<< 1 >> shift
>> 1) ==
1014 (int32_t)((uint32_t)lhs
->upper() << shift
<< 1 >> shift
>> 1) ==
1016 return Range::NewInt32Range(alloc
, uint32_t(lhs
->lower()) << shift
,
1017 uint32_t(lhs
->upper()) << shift
);
1020 return Range::NewInt32Range(alloc
, INT32_MIN
, INT32_MAX
);
1023 Range
* Range::rsh(TempAllocator
& alloc
, const Range
* lhs
, int32_t c
) {
1024 MOZ_ASSERT(lhs
->isInt32());
1025 int32_t shift
= c
& 0x1f;
1026 return Range::NewInt32Range(alloc
, lhs
->lower() >> shift
,
1027 lhs
->upper() >> shift
);
1030 Range
* Range::ursh(TempAllocator
& alloc
, const Range
* lhs
, int32_t c
) {
1031 // ursh's left operand is uint32, not int32, but for range analysis we
1032 // currently approximate it as int32. We assume here that the range has
1033 // already been adjusted accordingly by our callers.
1034 MOZ_ASSERT(lhs
->isInt32());
1036 int32_t shift
= c
& 0x1f;
1038 // If the value is always non-negative or always negative, we can simply
1039 // compute the correct range by shifting.
1040 if (lhs
->isFiniteNonNegative() || lhs
->isFiniteNegative()) {
1041 return Range::NewUInt32Range(alloc
, uint32_t(lhs
->lower()) >> shift
,
1042 uint32_t(lhs
->upper()) >> shift
);
1045 // Otherwise return the most general range after the shift.
1046 return Range::NewUInt32Range(alloc
, 0, UINT32_MAX
>> shift
);
1049 Range
* Range::lsh(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1050 MOZ_ASSERT(lhs
->isInt32());
1051 MOZ_ASSERT(rhs
->isInt32());
1052 return Range::NewInt32Range(alloc
, INT32_MIN
, INT32_MAX
);
1055 Range
* Range::rsh(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1056 MOZ_ASSERT(lhs
->isInt32());
1057 MOZ_ASSERT(rhs
->isInt32());
1059 // Canonicalize the shift range to 0 to 31.
1060 int32_t shiftLower
= rhs
->lower();
1061 int32_t shiftUpper
= rhs
->upper();
1062 if ((int64_t(shiftUpper
) - int64_t(shiftLower
)) >= 31) {
1068 if (shiftLower
> shiftUpper
) {
1073 MOZ_ASSERT(shiftLower
>= 0 && shiftUpper
<= 31);
1075 // The lhs bounds are signed, thus the minimum is either the lower bound
1076 // shift by the smallest shift if negative or the lower bound shifted by the
1077 // biggest shift otherwise. And the opposite for the maximum.
1078 int32_t lhsLower
= lhs
->lower();
1079 int32_t min
= lhsLower
< 0 ? lhsLower
>> shiftLower
: lhsLower
>> shiftUpper
;
1080 int32_t lhsUpper
= lhs
->upper();
1081 int32_t max
= lhsUpper
>= 0 ? lhsUpper
>> shiftLower
: lhsUpper
>> shiftUpper
;
1083 return Range::NewInt32Range(alloc
, min
, max
);
1086 Range
* Range::ursh(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1087 // ursh's left operand is uint32, not int32, but for range analysis we
1088 // currently approximate it as int32. We assume here that the range has
1089 // already been adjusted accordingly by our callers.
1090 MOZ_ASSERT(lhs
->isInt32());
1091 MOZ_ASSERT(rhs
->isInt32());
1092 return Range::NewUInt32Range(
1093 alloc
, 0, lhs
->isFiniteNonNegative() ? lhs
->upper() : UINT32_MAX
);
1096 Range
* Range::abs(TempAllocator
& alloc
, const Range
* op
) {
1097 int32_t l
= op
->lower_
;
1098 int32_t u
= op
->upper_
;
1099 FractionalPartFlag canHaveFractionalPart
= op
->canHaveFractionalPart_
;
1101 // Abs never produces a negative zero.
1102 NegativeZeroFlag canBeNegativeZero
= ExcludesNegativeZero
;
1104 return new (alloc
) Range(
1105 std::max(std::max(int32_t(0), l
), u
== INT32_MIN
? INT32_MAX
: -u
), true,
1106 std::max(std::max(int32_t(0), u
), l
== INT32_MIN
? INT32_MAX
: -l
),
1107 op
->hasInt32Bounds() && l
!= INT32_MIN
, canHaveFractionalPart
,
1108 canBeNegativeZero
, op
->max_exponent_
);
1111 Range
* Range::min(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1112 // If either operand is NaN, the result is NaN.
1113 if (lhs
->canBeNaN() || rhs
->canBeNaN()) {
1117 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
1118 lhs
->canHaveFractionalPart_
|| rhs
->canHaveFractionalPart_
);
1119 NegativeZeroFlag newMayIncludeNegativeZero
=
1120 NegativeZeroFlag(lhs
->canBeNegativeZero_
|| rhs
->canBeNegativeZero_
);
1122 return new (alloc
) Range(std::min(lhs
->lower_
, rhs
->lower_
),
1123 lhs
->hasInt32LowerBound_
&& rhs
->hasInt32LowerBound_
,
1124 std::min(lhs
->upper_
, rhs
->upper_
),
1125 lhs
->hasInt32UpperBound_
|| rhs
->hasInt32UpperBound_
,
1126 newCanHaveFractionalPart
, newMayIncludeNegativeZero
,
1127 std::max(lhs
->max_exponent_
, rhs
->max_exponent_
));
1130 Range
* Range::max(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1131 // If either operand is NaN, the result is NaN.
1132 if (lhs
->canBeNaN() || rhs
->canBeNaN()) {
1136 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
1137 lhs
->canHaveFractionalPart_
|| rhs
->canHaveFractionalPart_
);
1138 NegativeZeroFlag newMayIncludeNegativeZero
=
1139 NegativeZeroFlag(lhs
->canBeNegativeZero_
|| rhs
->canBeNegativeZero_
);
1141 return new (alloc
) Range(std::max(lhs
->lower_
, rhs
->lower_
),
1142 lhs
->hasInt32LowerBound_
|| rhs
->hasInt32LowerBound_
,
1143 std::max(lhs
->upper_
, rhs
->upper_
),
1144 lhs
->hasInt32UpperBound_
&& rhs
->hasInt32UpperBound_
,
1145 newCanHaveFractionalPart
, newMayIncludeNegativeZero
,
1146 std::max(lhs
->max_exponent_
, rhs
->max_exponent_
));
1149 Range
* Range::floor(TempAllocator
& alloc
, const Range
* op
) {
1150 Range
* copy
= new (alloc
) Range(*op
);
1151 // Decrement lower bound of copy range if op have a factional part and lower
1152 // bound is Int32 defined. Also we avoid to decrement when op have a
1153 // fractional part but lower_ >= JSVAL_INT_MAX.
1154 if (op
->canHaveFractionalPart() && op
->hasInt32LowerBound()) {
1155 copy
->setLowerInit(int64_t(copy
->lower_
) - 1);
1158 // Also refine max_exponent_ because floor may have decremented int value
1159 // If we've got int32 defined bounds, just deduce it using defined bounds.
1160 // But, if we don't have those, value's max_exponent_ may have changed.
1161 // Because we're looking to maintain an over estimation, if we can,
1163 if (copy
->hasInt32Bounds())
1164 copy
->max_exponent_
= copy
->exponentImpliedByInt32Bounds();
1165 else if (copy
->max_exponent_
< MaxFiniteExponent
)
1166 copy
->max_exponent_
++;
1168 copy
->canHaveFractionalPart_
= ExcludesFractionalParts
;
1169 copy
->assertInvariants();
1173 Range
* Range::ceil(TempAllocator
& alloc
, const Range
* op
) {
1174 Range
* copy
= new (alloc
) Range(*op
);
1176 // We need to refine max_exponent_ because ceil may have incremented the int
1177 // value. If we have got int32 bounds defined, just deduce it using the
1178 // defined bounds. Else we can just increment its value, as we are looking to
1179 // maintain an over estimation.
1180 if (copy
->hasInt32Bounds()) {
1181 copy
->max_exponent_
= copy
->exponentImpliedByInt32Bounds();
1182 } else if (copy
->max_exponent_
< MaxFiniteExponent
) {
1183 copy
->max_exponent_
++;
1186 // If the range is definitely above 0 or below -1, we don't need to include
1187 // -0; otherwise we do.
1189 copy
->canBeNegativeZero_
= ((copy
->lower_
> 0) || (copy
->upper_
<= -1))
1190 ? copy
->canBeNegativeZero_
1191 : IncludesNegativeZero
;
1193 copy
->canHaveFractionalPart_
= ExcludesFractionalParts
;
1194 copy
->assertInvariants();
1198 Range
* Range::sign(TempAllocator
& alloc
, const Range
* op
) {
1199 if (op
->canBeNaN()) {
1203 return new (alloc
) Range(std::max(std::min(op
->lower_
, 1), -1),
1204 std::max(std::min(op
->upper_
, 1), -1),
1205 Range::ExcludesFractionalParts
,
1206 NegativeZeroFlag(op
->canBeNegativeZero()), 0);
1209 Range
* Range::NaNToZero(TempAllocator
& alloc
, const Range
* op
) {
1210 Range
* copy
= new (alloc
) Range(*op
);
1211 if (copy
->canBeNaN()) {
1212 copy
->max_exponent_
= Range::IncludesInfinity
;
1213 if (!copy
->canBeZero()) {
1215 zero
.setDoubleSingleton(0);
1216 copy
->unionWith(&zero
);
1219 copy
->refineToExcludeNegativeZero();
1223 Range
* Range::toIntegerInt32(TempAllocator
& alloc
, const Range
* op
) {
1224 return Range::NaNToZero(alloc
, Range::ceil(alloc
, op
));
1227 bool Range::negativeZeroMul(const Range
* lhs
, const Range
* rhs
) {
1228 // The result can only be negative zero if both sides are finite and they
1229 // have differing signs.
1230 return (lhs
->canHaveSignBitSet() && rhs
->canBeFiniteNonNegative()) ||
1231 (rhs
->canHaveSignBitSet() && lhs
->canBeFiniteNonNegative());
1234 bool Range::update(const Range
* other
) {
1235 bool changed
= lower_
!= other
->lower_
||
1236 hasInt32LowerBound_
!= other
->hasInt32LowerBound_
||
1237 upper_
!= other
->upper_
||
1238 hasInt32UpperBound_
!= other
->hasInt32UpperBound_
||
1239 canHaveFractionalPart_
!= other
->canHaveFractionalPart_
||
1240 canBeNegativeZero_
!= other
->canBeNegativeZero_
||
1241 max_exponent_
!= other
->max_exponent_
;
1243 lower_
= other
->lower_
;
1244 hasInt32LowerBound_
= other
->hasInt32LowerBound_
;
1245 upper_
= other
->upper_
;
1246 hasInt32UpperBound_
= other
->hasInt32UpperBound_
;
1247 canHaveFractionalPart_
= other
->canHaveFractionalPart_
;
1248 canBeNegativeZero_
= other
->canBeNegativeZero_
;
1249 max_exponent_
= other
->max_exponent_
;
1256 ///////////////////////////////////////////////////////////////////////////////
1257 // Range Computation for MIR Nodes
1258 ///////////////////////////////////////////////////////////////////////////////
1260 void MPhi::computeRange(TempAllocator
& alloc
) {
1261 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1265 Range
* range
= nullptr;
1266 for (size_t i
= 0, e
= numOperands(); i
< e
; i
++) {
1267 if (getOperand(i
)->block()->unreachable()) {
1268 JitSpew(JitSpew_Range
, "Ignoring unreachable input %u",
1269 getOperand(i
)->id());
1273 // Peek at the pre-bailout range so we can take a short-cut; if any of
1274 // the operands has an unknown range, this phi has an unknown range.
1275 if (!getOperand(i
)->range()) {
1279 Range
input(getOperand(i
));
1282 range
->unionWith(&input
);
1284 range
= new (alloc
) Range(input
);
1291 void MBeta::computeRange(TempAllocator
& alloc
) {
1292 bool emptyRange
= false;
1294 Range
opRange(getOperand(0));
1295 Range
* range
= Range::intersect(alloc
, &opRange
, comparison_
, &emptyRange
);
1297 JitSpew(JitSpew_Range
, "Marking block for inst %u unreachable", id());
1298 block()->setUnreachableUnchecked();
1304 void MConstant::computeRange(TempAllocator
& alloc
) {
1305 if (isTypeRepresentableAsDouble()) {
1306 double d
= numberToDouble();
1307 setRange(Range::NewDoubleSingletonRange(alloc
, d
));
1308 } else if (type() == MIRType::Boolean
) {
1309 bool b
= toBoolean();
1310 setRange(Range::NewInt32Range(alloc
, b
, b
));
1314 void MCharCodeAt::computeRange(TempAllocator
& alloc
) {
1315 // ECMA 262 says that the integer will be non-negative and at most 65535.
1316 setRange(Range::NewInt32Range(alloc
, 0, 65535));
1319 void MClampToUint8::computeRange(TempAllocator
& alloc
) {
1320 setRange(Range::NewUInt32Range(alloc
, 0, 255));
1323 void MBitAnd::computeRange(TempAllocator
& alloc
) {
1324 if (type() != MIRType::Int32
) {
1328 Range
left(getOperand(0));
1329 Range
right(getOperand(1));
1330 left
.wrapAroundToInt32();
1331 right
.wrapAroundToInt32();
1333 setRange(Range::and_(alloc
, &left
, &right
));
1336 void MBitOr::computeRange(TempAllocator
& alloc
) {
1337 if (type() != MIRType::Int32
) {
1341 Range
left(getOperand(0));
1342 Range
right(getOperand(1));
1343 left
.wrapAroundToInt32();
1344 right
.wrapAroundToInt32();
1346 setRange(Range::or_(alloc
, &left
, &right
));
1349 void MBitXor::computeRange(TempAllocator
& alloc
) {
1350 if (type() != MIRType::Int32
) {
1354 Range
left(getOperand(0));
1355 Range
right(getOperand(1));
1356 left
.wrapAroundToInt32();
1357 right
.wrapAroundToInt32();
1359 setRange(Range::xor_(alloc
, &left
, &right
));
1362 void MBitNot::computeRange(TempAllocator
& alloc
) {
1363 MOZ_ASSERT(type() == MIRType::Int32
);
1365 Range
op(getOperand(0));
1366 op
.wrapAroundToInt32();
1368 setRange(Range::not_(alloc
, &op
));
1371 void MLsh::computeRange(TempAllocator
& alloc
) {
1372 if (type() != MIRType::Int32
) {
1376 Range
left(getOperand(0));
1377 Range
right(getOperand(1));
1378 left
.wrapAroundToInt32();
1380 MConstant
* rhsConst
= getOperand(1)->maybeConstantValue();
1381 if (rhsConst
&& rhsConst
->type() == MIRType::Int32
) {
1382 int32_t c
= rhsConst
->toInt32();
1383 setRange(Range::lsh(alloc
, &left
, c
));
1387 right
.wrapAroundToShiftCount();
1388 setRange(Range::lsh(alloc
, &left
, &right
));
1391 void MRsh::computeRange(TempAllocator
& alloc
) {
1392 if (type() != MIRType::Int32
) {
1396 Range
left(getOperand(0));
1397 Range
right(getOperand(1));
1398 left
.wrapAroundToInt32();
1400 MConstant
* rhsConst
= getOperand(1)->maybeConstantValue();
1401 if (rhsConst
&& rhsConst
->type() == MIRType::Int32
) {
1402 int32_t c
= rhsConst
->toInt32();
1403 setRange(Range::rsh(alloc
, &left
, c
));
1407 right
.wrapAroundToShiftCount();
1408 setRange(Range::rsh(alloc
, &left
, &right
));
1411 void MUrsh::computeRange(TempAllocator
& alloc
) {
1412 if (type() != MIRType::Int32
) {
1416 Range
left(getOperand(0));
1417 Range
right(getOperand(1));
1419 // ursh can be thought of as converting its left operand to uint32, or it
1420 // can be thought of as converting its left operand to int32, and then
1421 // reinterpreting the int32 bits as a uint32 value. Both approaches yield
1422 // the same result. Since we lack support for full uint32 ranges, we use
1423 // the second interpretation, though it does cause us to be conservative.
1424 left
.wrapAroundToInt32();
1425 right
.wrapAroundToShiftCount();
1427 MConstant
* rhsConst
= getOperand(1)->maybeConstantValue();
1428 if (rhsConst
&& rhsConst
->type() == MIRType::Int32
) {
1429 int32_t c
= rhsConst
->toInt32();
1430 setRange(Range::ursh(alloc
, &left
, c
));
1432 setRange(Range::ursh(alloc
, &left
, &right
));
1435 MOZ_ASSERT(range()->lower() >= 0);
1438 void MAbs::computeRange(TempAllocator
& alloc
) {
1439 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1443 Range
other(getOperand(0));
1444 Range
* next
= Range::abs(alloc
, &other
);
1445 if (implicitTruncate_
) {
1446 next
->wrapAroundToInt32();
1451 void MFloor::computeRange(TempAllocator
& alloc
) {
1452 Range
other(getOperand(0));
1453 setRange(Range::floor(alloc
, &other
));
1456 void MCeil::computeRange(TempAllocator
& alloc
) {
1457 Range
other(getOperand(0));
1458 setRange(Range::ceil(alloc
, &other
));
1461 void MClz::computeRange(TempAllocator
& alloc
) {
1462 if (type() != MIRType::Int32
) {
1465 setRange(Range::NewUInt32Range(alloc
, 0, 32));
1468 void MCtz::computeRange(TempAllocator
& alloc
) {
1469 if (type() != MIRType::Int32
) {
1472 setRange(Range::NewUInt32Range(alloc
, 0, 32));
1475 void MPopcnt::computeRange(TempAllocator
& alloc
) {
1476 if (type() != MIRType::Int32
) {
1479 setRange(Range::NewUInt32Range(alloc
, 0, 32));
1482 void MMinMax::computeRange(TempAllocator
& alloc
) {
1483 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1487 Range
left(getOperand(0));
1488 Range
right(getOperand(1));
1489 setRange(isMax() ? Range::max(alloc
, &left
, &right
)
1490 : Range::min(alloc
, &left
, &right
));
1493 void MAdd::computeRange(TempAllocator
& alloc
) {
1494 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1497 Range
left(getOperand(0));
1498 Range
right(getOperand(1));
1499 Range
* next
= Range::add(alloc
, &left
, &right
);
1500 if (isTruncated()) {
1501 next
->wrapAroundToInt32();
1506 void MSub::computeRange(TempAllocator
& alloc
) {
1507 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1510 Range
left(getOperand(0));
1511 Range
right(getOperand(1));
1512 Range
* next
= Range::sub(alloc
, &left
, &right
);
1513 if (isTruncated()) {
1514 next
->wrapAroundToInt32();
1519 void MMul::computeRange(TempAllocator
& alloc
) {
1520 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1523 Range
left(getOperand(0));
1524 Range
right(getOperand(1));
1525 if (canBeNegativeZero()) {
1526 canBeNegativeZero_
= Range::negativeZeroMul(&left
, &right
);
1528 Range
* next
= Range::mul(alloc
, &left
, &right
);
1529 if (!next
->canBeNegativeZero()) {
1530 canBeNegativeZero_
= false;
1532 // Truncated multiplications could overflow in both directions
1533 if (isTruncated()) {
1534 next
->wrapAroundToInt32();
1539 void MMod::computeRange(TempAllocator
& alloc
) {
1540 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1543 Range
lhs(getOperand(0));
1544 Range
rhs(getOperand(1));
1546 // If either operand is a NaN, the result is NaN. This also conservatively
1547 // handles Infinity cases.
1548 if (!lhs
.hasInt32Bounds() || !rhs
.hasInt32Bounds()) {
1552 // If RHS can be zero, the result can be NaN.
1553 if (rhs
.lower() <= 0 && rhs
.upper() >= 0) {
1557 // If both operands are non-negative integers, we can optimize this to an
1559 if (type() == MIRType::Int32
&& rhs
.lower() > 0) {
1560 bool hasDoubles
= lhs
.lower() < 0 || lhs
.canHaveFractionalPart() ||
1561 rhs
.canHaveFractionalPart();
1562 // It is not possible to check that lhs.lower() >= 0, since the range
1563 // of a ursh with rhs a 0 constant is wrapped around the int32 range in
1564 // Range::Range(). However, IsUint32Type() will only return true for
1565 // nodes that lie in the range [0, UINT32_MAX].
1567 IsUint32Type(getOperand(0)) &&
1568 getOperand(1)->type() == MIRType::Int32
&&
1569 (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
1570 if (!hasDoubles
|| hasUint32s
) {
1575 // For unsigned mod, we have to convert both operands to unsigned.
1576 // Note that we handled the case of a zero rhs above.
1578 // The result of an unsigned mod will never be unsigned-greater than
1580 uint32_t lhsBound
= std::max
<uint32_t>(lhs
.lower(), lhs
.upper());
1581 uint32_t rhsBound
= std::max
<uint32_t>(rhs
.lower(), rhs
.upper());
1583 // If either range crosses through -1 as a signed value, it could be
1584 // the maximum unsigned value when interpreted as unsigned. If the range
1585 // doesn't include -1, then the simple max value we computed above is
1587 if (lhs
.lower() <= -1 && lhs
.upper() >= -1) {
1588 lhsBound
= UINT32_MAX
;
1590 if (rhs
.lower() <= -1 && rhs
.upper() >= -1) {
1591 rhsBound
= UINT32_MAX
;
1594 // The result will never be equal to the rhs, and we shouldn't have
1595 // any rounding to worry about.
1596 MOZ_ASSERT(!lhs
.canHaveFractionalPart() && !rhs
.canHaveFractionalPart());
1599 // This gives us two upper bounds, so we can take the best one.
1600 setRange(Range::NewUInt32Range(alloc
, 0, std::min(lhsBound
, rhsBound
)));
1604 // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
1605 // First, the absolute value of the result will always be less than the
1606 // absolute value of rhs. (And if rhs is zero, the result is NaN).
1607 int64_t a
= Abs
<int64_t>(rhs
.lower());
1608 int64_t b
= Abs
<int64_t>(rhs
.upper());
1609 if (a
== 0 && b
== 0) {
1612 int64_t rhsAbsBound
= std::max(a
, b
);
1614 // If the value is known to be integer, less-than abs(rhs) is equivalent
1615 // to less-than-or-equal abs(rhs)-1. This is important for being able to
1616 // say that the result of x%256 is an 8-bit unsigned number.
1617 if (!lhs
.canHaveFractionalPart() && !rhs
.canHaveFractionalPart()) {
1621 // Next, the absolute value of the result will never be greater than the
1622 // absolute value of lhs.
1623 int64_t lhsAbsBound
=
1624 std::max(Abs
<int64_t>(lhs
.lower()), Abs
<int64_t>(lhs
.upper()));
1626 // This gives us two upper bounds, so we can take the best one.
1627 int64_t absBound
= std::min(lhsAbsBound
, rhsAbsBound
);
1629 // Now consider the sign of the result.
1630 // If lhs is non-negative, the result will be non-negative.
1631 // If lhs is non-positive, the result will be non-positive.
1632 int64_t lower
= lhs
.lower() >= 0 ? 0 : -absBound
;
1633 int64_t upper
= lhs
.upper() <= 0 ? 0 : absBound
;
1635 Range::FractionalPartFlag newCanHaveFractionalPart
=
1636 Range::FractionalPartFlag(lhs
.canHaveFractionalPart() ||
1637 rhs
.canHaveFractionalPart());
1639 // If the lhs can have the sign bit set and we can return a zero, it'll be a
1641 Range::NegativeZeroFlag newMayIncludeNegativeZero
=
1642 Range::NegativeZeroFlag(lhs
.canHaveSignBitSet());
1644 setRange(new (alloc
) Range(lower
, upper
, newCanHaveFractionalPart
,
1645 newMayIncludeNegativeZero
,
1646 std::min(lhs
.exponent(), rhs
.exponent())));
1649 void MDiv::computeRange(TempAllocator
& alloc
) {
1650 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1653 Range
lhs(getOperand(0));
1654 Range
rhs(getOperand(1));
1656 // If either operand is a NaN, the result is NaN. This also conservatively
1657 // handles Infinity cases.
1658 if (!lhs
.hasInt32Bounds() || !rhs
.hasInt32Bounds()) {
1662 // Something simple for now: When dividing by a positive rhs, the result
1663 // won't be further from zero than lhs.
1664 if (lhs
.lower() >= 0 && rhs
.lower() >= 1) {
1665 setRange(new (alloc
) Range(0, lhs
.upper(), Range::IncludesFractionalParts
,
1666 Range::IncludesNegativeZero
, lhs
.exponent()));
1667 } else if (unsigned_
&& rhs
.lower() >= 1) {
1668 // We shouldn't set the unsigned flag if the inputs can have
1669 // fractional parts.
1670 MOZ_ASSERT(!lhs
.canHaveFractionalPart() && !rhs
.canHaveFractionalPart());
1671 // We shouldn't set the unsigned flag if the inputs can be
1673 MOZ_ASSERT(!lhs
.canBeNegativeZero() && !rhs
.canBeNegativeZero());
1674 // Unsigned division by a non-zero rhs will return a uint32 value.
1675 setRange(Range::NewUInt32Range(alloc
, 0, UINT32_MAX
));
1679 void MSqrt::computeRange(TempAllocator
& alloc
) {
1680 Range
input(getOperand(0));
1682 // If either operand is a NaN, the result is NaN. This also conservatively
1683 // handles Infinity cases.
1684 if (!input
.hasInt32Bounds()) {
1688 // Sqrt of a negative non-zero value is NaN.
1689 if (input
.lower() < 0) {
1693 // Something simple for now: When taking the sqrt of a positive value, the
1694 // result won't be further from zero than the input.
1695 // And, sqrt of an integer may have a fractional part.
1696 setRange(new (alloc
) Range(0, input
.upper(), Range::IncludesFractionalParts
,
1697 input
.canBeNegativeZero(), input
.exponent()));
1700 void MToDouble::computeRange(TempAllocator
& alloc
) {
1701 setRange(new (alloc
) Range(getOperand(0)));
1704 void MToFloat32::computeRange(TempAllocator
& alloc
) {}
1706 void MTruncateToInt32::computeRange(TempAllocator
& alloc
) {
1707 Range
* output
= new (alloc
) Range(getOperand(0));
1708 output
->wrapAroundToInt32();
1712 void MToNumberInt32::computeRange(TempAllocator
& alloc
) {
1713 // No clamping since this computes the range *before* bailouts.
1714 setRange(new (alloc
) Range(getOperand(0)));
1717 void MToIntegerInt32::computeRange(TempAllocator
& alloc
) {
1718 Range
other(input());
1719 setRange(Range::toIntegerInt32(alloc
, &other
));
1722 void MLimitedTruncate::computeRange(TempAllocator
& alloc
) {
1723 Range
* output
= new (alloc
) Range(input());
1727 void MFilterTypeSet::computeRange(TempAllocator
& alloc
) {
1728 setRange(new (alloc
) Range(getOperand(0)));
1731 static Range
* GetArrayBufferViewRange(TempAllocator
& alloc
, Scalar::Type type
) {
1733 case Scalar::Uint8Clamped
:
1735 return Range::NewUInt32Range(alloc
, 0, UINT8_MAX
);
1736 case Scalar::Uint16
:
1737 return Range::NewUInt32Range(alloc
, 0, UINT16_MAX
);
1738 case Scalar::Uint32
:
1739 return Range::NewUInt32Range(alloc
, 0, UINT32_MAX
);
1742 return Range::NewInt32Range(alloc
, INT8_MIN
, INT8_MAX
);
1744 return Range::NewInt32Range(alloc
, INT16_MIN
, INT16_MAX
);
1746 return Range::NewInt32Range(alloc
, INT32_MIN
, INT32_MAX
);
1748 case Scalar::BigInt64
:
1749 case Scalar::BigUint64
:
1751 case Scalar::Simd128
:
1752 case Scalar::Float32
:
1753 case Scalar::Float64
:
1754 case Scalar::MaxTypedArrayViewType
:
1760 void MLoadUnboxedScalar::computeRange(TempAllocator
& alloc
) {
1761 // We have an Int32 type and if this is a UInt32 load it may produce a value
1762 // outside of our range, but we have a bailout to handle those cases.
1763 setRange(GetArrayBufferViewRange(alloc
, storageType()));
1766 void MLoadDataViewElement::computeRange(TempAllocator
& alloc
) {
1767 // We have an Int32 type and if this is a UInt32 load it may produce a value
1768 // outside of our range, but we have a bailout to handle those cases.
1769 setRange(GetArrayBufferViewRange(alloc
, storageType()));
1772 void MArrayLength::computeRange(TempAllocator
& alloc
) {
1773 // Array lengths can go up to UINT32_MAX. IonBuilder only creates MArrayLength
1774 // nodes when the value is known to be int32 (see the
1775 // OBJECT_FLAG_LENGTH_OVERFLOW flag). WarpBuilder does a dynamic check and we
1776 // have to return the range pre-bailouts, so use UINT32_MAX for Warp.
1777 uint32_t max
= JitOptions
.warpBuilder
? UINT32_MAX
: INT32_MAX
;
1778 setRange(Range::NewUInt32Range(alloc
, 0, max
));
1781 void MInitializedLength::computeRange(TempAllocator
& alloc
) {
1783 Range::NewUInt32Range(alloc
, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT
));
1786 void MArrayBufferViewLength::computeRange(TempAllocator
& alloc
) {
1787 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1790 void MArrayBufferViewByteOffset::computeRange(TempAllocator
& alloc
) {
1791 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1794 void MTypedArrayElementShift::computeRange(TempAllocator
& alloc
) {
1795 using mozilla::tl::FloorLog2
;
1797 constexpr auto MaxTypedArrayShift
= FloorLog2
<sizeof(double)>::value
;
1799 #define ASSERT_MAX_SHIFT(T, N) \
1800 static_assert(FloorLog2<sizeof(T)>::value <= MaxTypedArrayShift, \
1801 "unexpected typed array type exceeding 64-bits storage");
1802 JS_FOR_EACH_TYPED_ARRAY(ASSERT_MAX_SHIFT
)
1803 #undef ASSERT_MAX_SHIFT
1805 setRange(Range::NewUInt32Range(alloc
, 0, MaxTypedArrayShift
));
1808 void MStringLength::computeRange(TempAllocator
& alloc
) {
1809 static_assert(JSString::MAX_LENGTH
<= UINT32_MAX
,
1810 "NewUInt32Range requires a uint32 value");
1811 setRange(Range::NewUInt32Range(alloc
, 0, JSString::MAX_LENGTH
));
1814 void MArgumentsLength::computeRange(TempAllocator
& alloc
) {
1815 // This is is a conservative upper bound on what |TooManyActualArguments|
1816 // checks. If exceeded, Ion will not be entered in the first place.
1817 static_assert(ARGS_LENGTH_MAX
<= UINT32_MAX
,
1818 "NewUInt32Range requires a uint32 value");
1819 setRange(Range::NewUInt32Range(alloc
, 0, ARGS_LENGTH_MAX
));
1822 void MBoundsCheck::computeRange(TempAllocator
& alloc
) {
1823 // Just transfer the incoming index range to the output. The length() is
1824 // also interesting, but it is handled as a bailout check, and we're
1825 // computing a pre-bailout range here.
1826 setRange(new (alloc
) Range(index()));
1829 void MSpectreMaskIndex::computeRange(TempAllocator
& alloc
) {
1830 // Just transfer the incoming index range to the output for now.
1831 setRange(new (alloc
) Range(index()));
1834 void MArrayPush::computeRange(TempAllocator
& alloc
) {
1835 // MArrayPush returns the new array length.
1836 setRange(Range::NewUInt32Range(alloc
, 0, UINT32_MAX
));
1839 void MMathFunction::computeRange(TempAllocator
& alloc
) {
1840 Range
opRange(getOperand(0));
1841 switch (function()) {
1842 case UnaryMathFunction::Sin
:
1843 case UnaryMathFunction::Cos
:
1844 if (!opRange
.canBeInfiniteOrNaN()) {
1845 setRange(Range::NewDoubleRange(alloc
, -1.0, 1.0));
1853 void MSign::computeRange(TempAllocator
& alloc
) {
1854 Range
opRange(getOperand(0));
1855 setRange(Range::sign(alloc
, &opRange
));
1858 void MRandom::computeRange(TempAllocator
& alloc
) {
1859 Range
* r
= Range::NewDoubleRange(alloc
, 0.0, 1.0);
1861 // Random never returns negative zero.
1862 r
->refineToExcludeNegativeZero();
1867 void MNaNToZero::computeRange(TempAllocator
& alloc
) {
1868 Range
other(input());
1869 setRange(Range::NaNToZero(alloc
, &other
));
1872 ///////////////////////////////////////////////////////////////////////////////
1874 ///////////////////////////////////////////////////////////////////////////////
1876 bool RangeAnalysis::analyzeLoop(MBasicBlock
* header
) {
1877 MOZ_ASSERT(header
->hasUniqueBackedge());
1879 // Try to compute an upper bound on the number of times the loop backedge
1880 // will be taken. Look for tests that dominate the backedge and which have
1881 // an edge leaving the loop body.
1882 MBasicBlock
* backedge
= header
->backedge();
1884 // Ignore trivial infinite loops.
1885 if (backedge
== header
) {
1890 size_t numBlocks
= MarkLoopBlocks(graph_
, header
, &canOsr
);
1892 // Ignore broken loops.
1893 if (numBlocks
== 0) {
1897 LoopIterationBound
* iterationBound
= nullptr;
1899 MBasicBlock
* block
= backedge
;
1901 BranchDirection direction
;
1902 MTest
* branch
= block
->immediateDominatorBranch(&direction
);
1904 if (block
== block
->immediateDominator()) {
1908 block
= block
->immediateDominator();
1911 direction
= NegateBranchDirection(direction
);
1912 MBasicBlock
* otherBlock
= branch
->branchSuccessor(direction
);
1913 if (!otherBlock
->isMarked()) {
1914 if (!alloc().ensureBallast()) {
1917 iterationBound
= analyzeLoopIterationCount(header
, branch
, direction
);
1918 if (iterationBound
) {
1923 } while (block
!= header
);
1925 if (!iterationBound
) {
1926 UnmarkLoopBlocks(graph_
, header
);
1930 if (!loopIterationBounds
.append(iterationBound
)) {
1935 if (JitSpewEnabled(JitSpew_Range
)) {
1936 Sprinter
sp(GetJitContext()->cx
);
1940 iterationBound
->boundSum
.dump(sp
);
1941 JitSpew(JitSpew_Range
, "computed symbolic bound on backedges: %s",
1946 // Try to compute symbolic bounds for the phi nodes at the head of this
1947 // loop, expressed in terms of the iteration bound just computed.
1949 for (MPhiIterator
iter(header
->phisBegin()); iter
!= header
->phisEnd();
1951 analyzeLoopPhi(iterationBound
, *iter
);
1954 if (!mir
->compilingWasm()) {
1955 // Try to hoist any bounds checks from the loop using symbolic bounds.
1957 Vector
<MBoundsCheck
*, 0, JitAllocPolicy
> hoistedChecks(alloc());
1959 for (ReversePostorderIterator
iter(graph_
.rpoBegin(header
));
1960 iter
!= graph_
.rpoEnd(); iter
++) {
1961 MBasicBlock
* block
= *iter
;
1962 if (!block
->isMarked()) {
1966 for (MDefinitionIterator
iter(block
); iter
; iter
++) {
1967 MDefinition
* def
= *iter
;
1968 if (def
->isBoundsCheck() && def
->isMovable()) {
1969 if (!alloc().ensureBallast()) {
1972 if (tryHoistBoundsCheck(header
, def
->toBoundsCheck())) {
1973 if (!hoistedChecks
.append(def
->toBoundsCheck())) {
1981 // Note: replace all uses of the original bounds check with the
1982 // actual index. This is usually done during bounds check elimination,
1983 // but in this case it's safe to do it here since the load/store is
1984 // definitely not loop-invariant, so we will never move it before
1985 // one of the bounds checks we just added.
1986 for (size_t i
= 0; i
< hoistedChecks
.length(); i
++) {
1987 MBoundsCheck
* ins
= hoistedChecks
[i
];
1988 ins
->replaceAllUsesWith(ins
->index());
1989 ins
->block()->discard(ins
);
1993 UnmarkLoopBlocks(graph_
, header
);
1997 // Unbox beta nodes in order to hoist instruction properly, and not be limited
1998 // by the beta nodes which are added after each branch.
1999 static inline MDefinition
* DefinitionOrBetaInputDefinition(MDefinition
* ins
) {
2000 while (ins
->isBeta()) {
2001 ins
= ins
->toBeta()->input();
2006 LoopIterationBound
* RangeAnalysis::analyzeLoopIterationCount(
2007 MBasicBlock
* header
, MTest
* test
, BranchDirection direction
) {
2008 SimpleLinearSum
lhs(nullptr, 0);
2011 if (!ExtractLinearInequality(test
, direction
, &lhs
, &rhs
, &lessEqual
)) {
2015 // Ensure the rhs is a loop invariant term.
2016 if (rhs
&& rhs
->block()->isMarked()) {
2017 if (lhs
.term
&& lhs
.term
->block()->isMarked()) {
2020 MDefinition
* temp
= lhs
.term
;
2023 if (!SafeSub(0, lhs
.constant
, &lhs
.constant
)) {
2026 lessEqual
= !lessEqual
;
2029 MOZ_ASSERT_IF(rhs
, !rhs
->block()->isMarked());
2031 // Ensure the lhs is a phi node from the start of the loop body.
2032 if (!lhs
.term
|| !lhs
.term
->isPhi() || lhs
.term
->block() != header
) {
2036 // Check that the value of the lhs changes by a constant amount with each
2037 // loop iteration. This requires that the lhs be written in every loop
2038 // iteration with a value that is a constant difference from its value at
2039 // the start of the iteration.
2041 if (lhs
.term
->toPhi()->numOperands() != 2) {
2045 // The first operand of the phi should be the lhs' value at the start of
2046 // the first executed iteration, and not a value written which could
2047 // replace the second operand below during the middle of execution.
2048 MDefinition
* lhsInitial
= lhs
.term
->toPhi()->getLoopPredecessorOperand();
2049 if (lhsInitial
->block()->isMarked()) {
2053 // The second operand of the phi should be a value written by an add/sub
2054 // in every loop iteration, i.e. in a block which dominates the backedge.
2055 MDefinition
* lhsWrite
= DefinitionOrBetaInputDefinition(
2056 lhs
.term
->toPhi()->getLoopBackedgeOperand());
2057 if (!lhsWrite
->isAdd() && !lhsWrite
->isSub()) {
2060 if (!lhsWrite
->block()->isMarked()) {
2063 MBasicBlock
* bb
= header
->backedge();
2064 for (; bb
!= lhsWrite
->block() && bb
!= header
;
2065 bb
= bb
->immediateDominator()) {
2067 if (bb
!= lhsWrite
->block()) {
2071 SimpleLinearSum lhsModified
= ExtractLinearSum(lhsWrite
);
2073 // Check that the value of the lhs at the backedge is of the form
2074 // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
2075 // of the iteration, and not that written to lhs in a previous iteration,
2076 // as such a previous value could not appear directly in the addition:
2077 // it could not be stored in lhs as the lhs add/sub executes in every
2078 // iteration, and if it were stored in another variable its use here would
2079 // be as an operand to a phi node for that variable.
2080 if (lhsModified
.term
!= lhs
.term
) {
2084 LinearSum
iterationBound(alloc());
2085 LinearSum
currentIteration(alloc());
2087 if (lhsModified
.constant
== 1 && !lessEqual
) {
2088 // The value of lhs is 'initial(lhs) + iterCount' and this will end
2089 // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
2090 // on the number of backedges executed is:
2092 // initial(lhs) + iterCount + lhsN == rhs
2093 // iterCount == rhsN - initial(lhs) - lhsN
2096 if (!iterationBound
.add(rhs
, 1)) {
2100 if (!iterationBound
.add(lhsInitial
, -1)) {
2104 int32_t lhsConstant
;
2105 if (!SafeSub(0, lhs
.constant
, &lhsConstant
)) {
2108 if (!iterationBound
.add(lhsConstant
)) {
2112 if (!currentIteration
.add(lhs
.term
, 1)) {
2115 if (!currentIteration
.add(lhsInitial
, -1)) {
2118 } else if (lhsModified
.constant
== -1 && lessEqual
) {
2119 // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
2120 // case, an upper bound on the number of backedges executed is:
2122 // initial(lhs) - iterCount + lhsN == rhs
2123 // iterCount == initial(lhs) - rhs + lhsN
2125 if (!iterationBound
.add(lhsInitial
, 1)) {
2129 if (!iterationBound
.add(rhs
, -1)) {
2133 if (!iterationBound
.add(lhs
.constant
)) {
2137 if (!currentIteration
.add(lhsInitial
, 1)) {
2140 if (!currentIteration
.add(lhs
.term
, -1)) {
2147 return new (alloc())
2148 LoopIterationBound(header
, test
, iterationBound
, currentIteration
);
2151 void RangeAnalysis::analyzeLoopPhi(LoopIterationBound
* loopBound
, MPhi
* phi
) {
2152 // Given a bound on the number of backedges taken, compute an upper and
2153 // lower bound for a phi node that may change by a constant amount each
2154 // iteration. Unlike for the case when computing the iteration bound
2155 // itself, the phi does not need to change the same amount every iteration,
2156 // but is required to change at most N and be either nondecreasing or
2159 MOZ_ASSERT(phi
->numOperands() == 2);
2161 MDefinition
* initial
= phi
->getLoopPredecessorOperand();
2162 if (initial
->block()->isMarked()) {
2166 SimpleLinearSum modified
=
2167 ExtractLinearSum(phi
->getLoopBackedgeOperand(), MathSpace::Infinite
);
2169 if (modified
.term
!= phi
|| modified
.constant
== 0) {
2173 if (!phi
->range()) {
2174 phi
->setRange(new (alloc()) Range(phi
));
2177 LinearSum
initialSum(alloc());
2178 if (!initialSum
.add(initial
, 1)) {
2182 // The phi may change by N each iteration, and is either nondecreasing or
2183 // nonincreasing. initial(phi) is either a lower or upper bound for the
2184 // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
2185 // at all points within the loop, provided that loopBound >= 0.
2187 // We are more interested, however, in the bound for phi at points
2188 // dominated by the loop bound's test; if the test dominates e.g. a bounds
2189 // check we want to hoist from the loop, using the value of the phi at the
2190 // head of the loop for this will usually be too imprecise to hoist the
2191 // check. These points will execute only if the backedge executes at least
2192 // one more time (as the test passed and the test dominates the backedge),
2193 // so we know both that loopBound >= 1 and that the phi's value has changed
2194 // at most loopBound - 1 times. Thus, another upper or lower bound for the
2195 // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
2196 // ensure that loopBound >= 0.
2198 LinearSum
limitSum(loopBound
->boundSum
);
2199 if (!limitSum
.multiply(modified
.constant
) || !limitSum
.add(initialSum
)) {
2203 int32_t negativeConstant
;
2204 if (!SafeSub(0, modified
.constant
, &negativeConstant
) ||
2205 !limitSum
.add(negativeConstant
)) {
2209 Range
* initRange
= initial
->range();
2210 if (modified
.constant
> 0) {
2211 if (initRange
&& initRange
->hasInt32LowerBound()) {
2212 phi
->range()->refineLower(initRange
->lower());
2214 phi
->range()->setSymbolicLower(
2215 SymbolicBound::New(alloc(), nullptr, initialSum
));
2216 phi
->range()->setSymbolicUpper(
2217 SymbolicBound::New(alloc(), loopBound
, limitSum
));
2219 if (initRange
&& initRange
->hasInt32UpperBound()) {
2220 phi
->range()->refineUpper(initRange
->upper());
2222 phi
->range()->setSymbolicUpper(
2223 SymbolicBound::New(alloc(), nullptr, initialSum
));
2224 phi
->range()->setSymbolicLower(
2225 SymbolicBound::New(alloc(), loopBound
, limitSum
));
2228 JitSpew(JitSpew_Range
, "added symbolic range on %u", phi
->id());
2232 // Whether bound is valid at the specified bounds check instruction in a loop,
2233 // and may be used to hoist ins.
2234 static inline bool SymbolicBoundIsValid(MBasicBlock
* header
, MBoundsCheck
* ins
,
2235 const SymbolicBound
* bound
) {
2239 if (ins
->block() == header
) {
2242 MBasicBlock
* bb
= ins
->block()->immediateDominator();
2243 while (bb
!= header
&& bb
!= bound
->loop
->test
->block()) {
2244 bb
= bb
->immediateDominator();
2246 return bb
== bound
->loop
->test
->block();
2249 bool RangeAnalysis::tryHoistBoundsCheck(MBasicBlock
* header
,
2250 MBoundsCheck
* ins
) {
2251 // The bounds check's length must be loop invariant or a constant.
2252 MDefinition
* length
= DefinitionOrBetaInputDefinition(ins
->length());
2253 if (length
->block()->isMarked() && !length
->isConstant()) {
2257 // The bounds check's index should not be loop invariant (else we would
2258 // already have hoisted it during LICM).
2259 SimpleLinearSum index
= ExtractLinearSum(ins
->index());
2260 if (!index
.term
|| !index
.term
->block()->isMarked()) {
2264 // Check for a symbolic lower and upper bound on the index. If either
2265 // condition depends on an iteration bound for the loop, only hoist if
2266 // the bounds check is dominated by the iteration bound's test.
2267 if (!index
.term
->range()) {
2270 const SymbolicBound
* lower
= index
.term
->range()->symbolicLower();
2271 if (!lower
|| !SymbolicBoundIsValid(header
, ins
, lower
)) {
2274 const SymbolicBound
* upper
= index
.term
->range()->symbolicUpper();
2275 if (!upper
|| !SymbolicBoundIsValid(header
, ins
, upper
)) {
2279 MBasicBlock
* preLoop
= header
->loopPredecessor();
2280 MOZ_ASSERT(!preLoop
->isMarked());
2282 MDefinition
* lowerTerm
= ConvertLinearSum(alloc(), preLoop
, lower
->sum
);
2287 MDefinition
* upperTerm
= ConvertLinearSum(alloc(), preLoop
, upper
->sum
);
2292 // We are checking that index + indexConstant >= 0, and know that
2293 // index >= lowerTerm + lowerConstant. Thus, check that:
2295 // lowerTerm + lowerConstant + indexConstant >= 0
2296 // lowerTerm >= -lowerConstant - indexConstant
2298 int32_t lowerConstant
= 0;
2299 if (!SafeSub(lowerConstant
, index
.constant
, &lowerConstant
)) {
2302 if (!SafeSub(lowerConstant
, lower
->sum
.constant(), &lowerConstant
)) {
2306 // We are checking that index < boundsLength, and know that
2307 // index <= upperTerm + upperConstant. Thus, check that:
2309 // upperTerm + upperConstant < boundsLength
2311 int32_t upperConstant
= index
.constant
;
2312 if (!SafeAdd(upper
->sum
.constant(), upperConstant
, &upperConstant
)) {
2316 // Hoist the loop invariant lower bounds checks.
2317 MBoundsCheckLower
* lowerCheck
= MBoundsCheckLower::New(alloc(), lowerTerm
);
2318 lowerCheck
->setMinimum(lowerConstant
);
2319 lowerCheck
->computeRange(alloc());
2320 lowerCheck
->collectRangeInfoPreTrunc();
2321 preLoop
->insertBefore(preLoop
->lastIns(), lowerCheck
);
2323 // Hoist the loop invariant upper bounds checks.
2324 if (upperTerm
!= length
|| upperConstant
>= 0) {
2325 // Hoist the bound check's length if it isn't already loop invariant.
2326 if (length
->block()->isMarked()) {
2327 MOZ_ASSERT(length
->isConstant());
2328 MInstruction
* lengthIns
= length
->toInstruction();
2329 lengthIns
->block()->moveBefore(preLoop
->lastIns(), lengthIns
);
2332 MBoundsCheck
* upperCheck
= MBoundsCheck::New(alloc(), upperTerm
, length
);
2333 upperCheck
->setMinimum(upperConstant
);
2334 upperCheck
->setMaximum(upperConstant
);
2335 upperCheck
->computeRange(alloc());
2336 upperCheck
->collectRangeInfoPreTrunc();
2337 preLoop
->insertBefore(preLoop
->lastIns(), upperCheck
);
2343 bool RangeAnalysis::analyze() {
2344 JitSpew(JitSpew_Range
, "Doing range propagation");
2346 for (ReversePostorderIterator
iter(graph_
.rpoBegin());
2347 iter
!= graph_
.rpoEnd(); iter
++) {
2348 MBasicBlock
* block
= *iter
;
2349 // No blocks are supposed to be unreachable, except when we have an OSR
2350 // block, in which case the Value Numbering phase add fixup blocks which
2352 MOZ_ASSERT(!block
->unreachable() || graph_
.osrBlock());
2354 // If the block's immediate dominator is unreachable, the block is
2355 // unreachable. Iterating in RPO, we'll always see the immediate
2356 // dominator before the block.
2357 if (block
->immediateDominator()->unreachable()) {
2358 block
->setUnreachableUnchecked();
2362 for (MDefinitionIterator
iter(block
); iter
; iter
++) {
2363 MDefinition
* def
= *iter
;
2364 if (!alloc().ensureBallast()) {
2368 def
->computeRange(alloc());
2369 JitSpew(JitSpew_Range
, "computing range on %u", def
->id());
2373 // Beta node range analysis may have marked this block unreachable. If
2374 // so, it's no longer interesting to continue processing it.
2375 if (block
->unreachable()) {
2379 if (block
->isLoopHeader()) {
2380 if (!analyzeLoop(block
)) {
2385 // First pass at collecting range info - while the beta nodes are still
2386 // around and before truncation.
2387 for (MInstructionIterator
iter(block
->begin()); iter
!= block
->end();
2389 iter
->collectRangeInfoPreTrunc();
2396 bool RangeAnalysis::addRangeAssertions() {
2397 if (!JitOptions
.checkRangeAnalysis
) {
2401 // Check the computed range for this instruction, if the option is set. Note
2402 // that this code is quite invasive; it adds numerous additional
2403 // instructions for each MInstruction with a computed range, and it uses
2404 // registers, so it also affects register allocation.
2405 for (ReversePostorderIterator
iter(graph_
.rpoBegin());
2406 iter
!= graph_
.rpoEnd(); iter
++) {
2407 MBasicBlock
* block
= *iter
;
2409 // Do not add assertions in unreachable blocks.
2410 if (block
->unreachable()) {
2414 for (MDefinitionIterator
iter(block
); iter
; iter
++) {
2415 MDefinition
* ins
= *iter
;
2417 // Perform range checking for all numeric and numeric-like types.
2418 if (!IsNumberType(ins
->type()) && ins
->type() != MIRType::Boolean
&&
2419 ins
->type() != MIRType::Value
) {
2423 // MIsNoIter is fused with the MTest that follows it and emitted as
2424 // LIsNoIterAndBranch. Skip it to avoid complicating MIsNoIter
2426 if (ins
->isIsNoIter()) {
2432 MOZ_ASSERT_IF(ins
->type() == MIRType::Int64
, r
.isUnknown());
2434 // Don't insert assertions if there's nothing interesting to assert.
2435 if (r
.isUnknown() ||
2436 (ins
->type() == MIRType::Int32
&& r
.isUnknownInt32())) {
2440 // Don't add a use to an instruction that is recovered on bailout.
2441 if (ins
->isRecoveredOnBailout()) {
2445 if (!alloc().ensureBallast()) {
2448 MAssertRange
* guard
=
2449 MAssertRange::New(alloc(), ins
, new (alloc()) Range(r
));
2451 // Beta nodes and interrupt checks are required to be located at the
2452 // beginnings of basic blocks, so we must insert range assertions
2453 // after any such instructions.
2454 MInstruction
* insertAt
= nullptr;
2455 if (block
->graph().osrBlock() == block
) {
2456 insertAt
= ins
->toInstruction();
2458 insertAt
= block
->safeInsertTop(ins
);
2461 if (insertAt
== *iter
) {
2462 block
->insertAfter(insertAt
, guard
);
2464 block
->insertBefore(insertAt
, guard
);
2472 ///////////////////////////////////////////////////////////////////////////////
2473 // Range based Truncation
2474 ///////////////////////////////////////////////////////////////////////////////
2476 void Range::clampToInt32() {
2480 int32_t l
= hasInt32LowerBound() ? lower() : JSVAL_INT_MIN
;
2481 int32_t h
= hasInt32UpperBound() ? upper() : JSVAL_INT_MAX
;
2485 void Range::wrapAroundToInt32() {
2486 if (!hasInt32Bounds()) {
2487 setInt32(JSVAL_INT_MIN
, JSVAL_INT_MAX
);
2488 } else if (canHaveFractionalPart()) {
2489 // Clearing the fractional field may provide an opportunity to refine
2490 // lower_ or upper_.
2491 canHaveFractionalPart_
= ExcludesFractionalParts
;
2492 canBeNegativeZero_
= ExcludesNegativeZero
;
2493 refineInt32BoundsByExponent(max_exponent_
, &lower_
, &hasInt32LowerBound_
,
2494 &upper_
, &hasInt32UpperBound_
);
2498 // If nothing else, we can clear the negative zero flag.
2499 canBeNegativeZero_
= ExcludesNegativeZero
;
2501 MOZ_ASSERT(isInt32());
2504 void Range::wrapAroundToShiftCount() {
2505 wrapAroundToInt32();
2506 if (lower() < 0 || upper() >= 32) {
2511 void Range::wrapAroundToBoolean() {
2512 wrapAroundToInt32();
2516 MOZ_ASSERT(isBoolean());
2519 bool MDefinition::needTruncation(TruncateKind kind
) {
2520 // No procedure defined for truncating this instruction.
2524 void MDefinition::truncate() {
2525 MOZ_CRASH("No procedure defined for truncating this instruction.");
2528 bool MConstant::needTruncation(TruncateKind kind
) {
2529 return IsFloatingPointType(type());
2532 void MConstant::truncate() {
2533 MOZ_ASSERT(needTruncation(Truncate
));
2535 // Truncate the double to int, since all uses truncates it.
2536 int32_t res
= ToInt32(numberToDouble());
2537 payload_
.asBits
= 0;
2539 setResultType(MIRType::Int32
);
2541 range()->setInt32(res
, res
);
2545 bool MPhi::needTruncation(TruncateKind kind
) {
2546 if (type() == MIRType::Double
|| type() == MIRType::Int32
) {
2547 truncateKind_
= kind
;
2554 void MPhi::truncate() {
2555 setResultType(MIRType::Int32
);
2556 if (truncateKind_
>= IndirectTruncate
&& range()) {
2557 range()->wrapAroundToInt32();
2561 bool MAdd::needTruncation(TruncateKind kind
) {
2562 // Remember analysis, needed for fallible checks.
2563 setTruncateKind(kind
);
2565 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2568 void MAdd::truncate() {
2569 MOZ_ASSERT(needTruncation(truncateKind()));
2570 setSpecialization(MIRType::Int32
);
2571 if (truncateKind() >= IndirectTruncate
&& range()) {
2572 range()->wrapAroundToInt32();
2576 bool MSub::needTruncation(TruncateKind kind
) {
2577 // Remember analysis, needed for fallible checks.
2578 setTruncateKind(kind
);
2580 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2583 void MSub::truncate() {
2584 MOZ_ASSERT(needTruncation(truncateKind()));
2585 setSpecialization(MIRType::Int32
);
2586 if (truncateKind() >= IndirectTruncate
&& range()) {
2587 range()->wrapAroundToInt32();
2591 bool MMul::needTruncation(TruncateKind kind
) {
2592 // Remember analysis, needed for fallible checks.
2593 setTruncateKind(kind
);
2595 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2598 void MMul::truncate() {
2599 MOZ_ASSERT(needTruncation(truncateKind()));
2600 setSpecialization(MIRType::Int32
);
2601 if (truncateKind() >= IndirectTruncate
) {
2602 setCanBeNegativeZero(false);
2604 range()->wrapAroundToInt32();
2609 bool MDiv::needTruncation(TruncateKind kind
) {
2610 // Remember analysis, needed for fallible checks.
2611 setTruncateKind(kind
);
2613 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2616 void MDiv::truncate() {
2617 MOZ_ASSERT(needTruncation(truncateKind()));
2618 setSpecialization(MIRType::Int32
);
2620 // Divisions where the lhs and rhs are unsigned and the result is
2621 // truncated can be lowered more efficiently.
2622 if (unsignedOperands()) {
2623 replaceWithUnsignedOperands();
2628 bool MMod::needTruncation(TruncateKind kind
) {
2629 // Remember analysis, needed for fallible checks.
2630 setTruncateKind(kind
);
2632 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2635 void MMod::truncate() {
2636 // As for division, handle unsigned modulus with a truncated result.
2637 MOZ_ASSERT(needTruncation(truncateKind()));
2638 setSpecialization(MIRType::Int32
);
2640 if (unsignedOperands()) {
2641 replaceWithUnsignedOperands();
2646 bool MToDouble::needTruncation(TruncateKind kind
) {
2647 MOZ_ASSERT(type() == MIRType::Double
);
2648 setTruncateKind(kind
);
2653 void MToDouble::truncate() {
2654 MOZ_ASSERT(needTruncation(truncateKind()));
2656 // We use the return type to flag that this MToDouble should be replaced by
2657 // a MTruncateToInt32 when modifying the graph.
2658 setResultType(MIRType::Int32
);
2659 if (truncateKind() >= IndirectTruncate
) {
2661 range()->wrapAroundToInt32();
2666 bool MLimitedTruncate::needTruncation(TruncateKind kind
) {
2667 setTruncateKind(kind
);
2668 setResultType(MIRType::Int32
);
2669 if (kind
>= IndirectTruncate
&& range()) {
2670 range()->wrapAroundToInt32();
2675 bool MCompare::needTruncation(TruncateKind kind
) {
2676 // If we're compiling wasm, don't try to optimize the comparison type, as
2677 // the code presumably is already using the type it wants. Also, wasm
2678 // doesn't support bailouts, so we woudn't be able to rely on
2679 // TruncateAfterBailouts to convert our inputs.
2680 if (block()->info().compilingWasm()) {
2684 if (!isDoubleComparison()) {
2688 // If both operands are naturally in the int32 range, we can convert from
2689 // a double comparison to being an int32 comparison.
2690 if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) {
2697 void MCompare::truncate() {
2698 compareType_
= Compare_Int32
;
2700 // Truncating the operands won't change their value because we don't force a
2701 // truncation, but it will change their type, which we need because we
2702 // now expect integer inputs.
2703 truncateOperands_
= true;
2706 MDefinition::TruncateKind
MDefinition::operandTruncateKind(size_t index
) const {
2707 // Generic routine: We don't know anything.
2711 MDefinition::TruncateKind
MPhi::operandTruncateKind(size_t index
) const {
2712 // The truncation applied to a phi is effectively applied to the phi's
2714 return truncateKind_
;
2717 MDefinition::TruncateKind
MTruncateToInt32::operandTruncateKind(
2718 size_t index
) const {
2719 // This operator is an explicit truncate to int32.
2723 MDefinition::TruncateKind
MBinaryBitwiseInstruction::operandTruncateKind(
2724 size_t index
) const {
2725 // The bitwise operators truncate to int32.
2729 MDefinition::TruncateKind
MLimitedTruncate::operandTruncateKind(
2730 size_t index
) const {
2731 return std::min(truncateKind(), truncateLimit_
);
2734 MDefinition::TruncateKind
MAdd::operandTruncateKind(size_t index
) const {
2735 // This operator is doing some arithmetic. If its result is truncated,
2736 // it's an indirect truncate for its operands.
2737 return std::min(truncateKind(), IndirectTruncate
);
2740 MDefinition::TruncateKind
MSub::operandTruncateKind(size_t index
) const {
2741 // See the comment in MAdd::operandTruncateKind.
2742 return std::min(truncateKind(), IndirectTruncate
);
2745 MDefinition::TruncateKind
MMul::operandTruncateKind(size_t index
) const {
2746 // See the comment in MAdd::operandTruncateKind.
2747 return std::min(truncateKind(), IndirectTruncate
);
2750 MDefinition::TruncateKind
MToDouble::operandTruncateKind(size_t index
) const {
2751 // MToDouble propagates its truncate kind to its operand.
2752 return truncateKind();
2755 MDefinition::TruncateKind
MStoreUnboxedScalar::operandTruncateKind(
2756 size_t index
) const {
2757 // An integer store truncates the stored value.
2758 return (index
== 2 && isIntegerWrite()) ? Truncate
: NoTruncate
;
2761 MDefinition::TruncateKind
MStoreDataViewElement::operandTruncateKind(
2762 size_t index
) const {
2763 // An integer store truncates the stored value.
2764 return (index
== 2 && isIntegerWrite()) ? Truncate
: NoTruncate
;
2767 MDefinition::TruncateKind
MStoreTypedArrayElementHole::operandTruncateKind(
2768 size_t index
) const {
2769 // An integer store truncates the stored value.
2770 return (index
== 3 && isIntegerWrite()) ? Truncate
: NoTruncate
;
2773 MDefinition::TruncateKind
MDiv::operandTruncateKind(size_t index
) const {
2774 return std::min(truncateKind(), TruncateAfterBailouts
);
2777 MDefinition::TruncateKind
MMod::operandTruncateKind(size_t index
) const {
2778 return std::min(truncateKind(), TruncateAfterBailouts
);
2781 MDefinition::TruncateKind
MCompare::operandTruncateKind(size_t index
) const {
2782 // If we're doing an int32 comparison on operands which were previously
2783 // floating-point, convert them!
2784 MOZ_ASSERT_IF(truncateOperands_
, isInt32Comparison());
2785 return truncateOperands_
? TruncateAfterBailouts
: NoTruncate
;
2788 static bool TruncateTest(TempAllocator
& alloc
, MTest
* test
) {
2789 // If all possible inputs to the test are either int32 or boolean,
2790 // convert those inputs to int32 so that an int32 test can be performed.
2792 if (test
->input()->type() != MIRType::Value
) {
2796 if (!test
->input()->isPhi() || !test
->input()->hasOneDefUse() ||
2797 test
->input()->isImplicitlyUsed()) {
2801 MPhi
* phi
= test
->input()->toPhi();
2802 for (size_t i
= 0; i
< phi
->numOperands(); i
++) {
2803 MDefinition
* def
= phi
->getOperand(i
);
2804 if (!def
->isBox()) {
2807 MDefinition
* inner
= def
->getOperand(0);
2808 if (inner
->type() != MIRType::Boolean
&& inner
->type() != MIRType::Int32
) {
2813 for (size_t i
= 0; i
< phi
->numOperands(); i
++) {
2814 MDefinition
* inner
= phi
->getOperand(i
)->getOperand(0);
2815 if (inner
->type() != MIRType::Int32
) {
2816 if (!alloc
.ensureBallast()) {
2819 MBasicBlock
* block
= inner
->block();
2820 inner
= MToNumberInt32::New(alloc
, inner
);
2821 block
->insertBefore(block
->lastIns(), inner
->toInstruction());
2823 MOZ_ASSERT(inner
->type() == MIRType::Int32
);
2824 phi
->replaceOperand(i
, inner
);
2827 phi
->setResultType(MIRType::Int32
);
2831 // Truncating instruction result is an optimization which implies
2832 // knowing all uses of an instruction. This implies that if one of
2833 // the uses got removed, then Range Analysis is not be allowed to do
2834 // any modification which can change the result, especially if the
2835 // result can be observed.
2837 // This corner can easily be understood with UCE examples, but it
2838 // might also happen with type inference assumptions. Note: Type
2839 // inference is implicitly branches where other types might be
2841 static bool CloneForDeadBranches(TempAllocator
& alloc
,
2842 MInstruction
* candidate
) {
2843 // Compare returns a boolean so it doesn't have to be recovered on bailout
2844 // because the output would remain correct.
2845 if (candidate
->isCompare()) {
2849 MOZ_ASSERT(candidate
->canClone());
2850 if (!alloc
.ensureBallast()) {
2854 MDefinitionVector
operands(alloc
);
2855 size_t end
= candidate
->numOperands();
2856 if (!operands
.reserve(end
)) {
2859 for (size_t i
= 0; i
< end
; ++i
) {
2860 operands
.infallibleAppend(candidate
->getOperand(i
));
2863 MInstruction
* clone
= candidate
->clone(alloc
, operands
);
2864 clone
->setRange(nullptr);
2866 // Set UseRemoved flag on the cloned instruction in order to chain recover
2867 // instruction for the bailout path.
2868 clone
->setUseRemovedUnchecked();
2870 candidate
->block()->insertBefore(candidate
, clone
);
2872 if (!candidate
->maybeConstantValue()) {
2873 MOZ_ASSERT(clone
->canRecoverOnBailout());
2874 clone
->setRecoveredOnBailout();
2877 // Replace the candidate by its recovered on bailout clone within recovered
2878 // instructions and resume points operands.
2879 for (MUseIterator
i(candidate
->usesBegin()); i
!= candidate
->usesEnd();) {
2881 MNode
* ins
= use
->consumer();
2882 if (ins
->isDefinition() && !ins
->toDefinition()->isRecoveredOnBailout()) {
2886 use
->replaceProducer(clone
);
2892 // Examine all the users of |candidate| and determine the most aggressive
2893 // truncate kind that satisfies all of them.
2894 static MDefinition::TruncateKind
ComputeRequestedTruncateKind(
2895 MDefinition
* candidate
, bool* shouldClone
) {
2896 bool isCapturedResult
=
2897 false; // Check if used by a recovered instruction or a resume point.
2898 bool isObservableResult
=
2899 false; // Check if it can be read from another frame.
2900 bool isRecoverableResult
= true; // Check if it can safely be reconstructed.
2901 bool hasUseRemoved
= candidate
->isUseRemoved();
2903 MDefinition::TruncateKind kind
= MDefinition::Truncate
;
2904 for (MUseIterator
use(candidate
->usesBegin()); use
!= candidate
->usesEnd();
2906 if (use
->consumer()->isResumePoint()) {
2907 // Truncation is a destructive optimization, as such, we need to pay
2908 // attention to removed branches and prevent optimization
2909 // destructive optimizations if we have no alternative. (see
2911 isCapturedResult
= true;
2912 isObservableResult
=
2913 isObservableResult
||
2914 use
->consumer()->toResumePoint()->isObservableOperand(*use
);
2915 isRecoverableResult
=
2916 isRecoverableResult
&&
2917 use
->consumer()->toResumePoint()->isRecoverableOperand(*use
);
2921 MDefinition
* consumer
= use
->consumer()->toDefinition();
2922 if (consumer
->isRecoveredOnBailout()) {
2923 isCapturedResult
= true;
2924 hasUseRemoved
= hasUseRemoved
|| consumer
->isUseRemoved();
2928 MDefinition::TruncateKind consumerKind
=
2929 consumer
->operandTruncateKind(consumer
->indexOf(*use
));
2930 kind
= std::min(kind
, consumerKind
);
2931 if (kind
== MDefinition::NoTruncate
) {
2936 // We cannot do full trunction on guarded instructions.
2937 if (candidate
->isGuard() || candidate
->isGuardRangeBailouts()) {
2938 kind
= std::min(kind
, MDefinition::TruncateAfterBailouts
);
2941 // If the value naturally produces an int32 value (before bailout checks)
2942 // that needs no conversion, we don't have to worry about resume points
2943 // seeing truncated values.
2944 bool needsConversion
= !candidate
->range() || !candidate
->range()->isInt32();
2946 // If the instruction is explicitly truncated (not indirectly) by all its
2947 // uses and if it has no removed uses, then we can safely encode its
2948 // truncated result as part of the resume point operands. This is safe,
2949 // because even if we resume with a truncated double, the next baseline
2950 // instruction operating on this instruction is going to be a no-op.
2952 // Note, that if the result can be observed from another frame, then this
2953 // optimization is not safe.
2954 bool safeToConvert
=
2955 kind
== MDefinition::Truncate
&& !hasUseRemoved
&& !isObservableResult
;
2957 // If the candidate instruction appears as operand of a resume point or a
2958 // recover instruction, and we have to truncate its result, then we might
2959 // have to either recover the result during the bailout, or avoid the
2961 if (isCapturedResult
&& needsConversion
&& !safeToConvert
) {
2962 // If the result can be recovered from all the resume points (not needed
2963 // for iterating over the inlined frames), and this instruction can be
2964 // recovered on bailout, then we can clone it and use the cloned
2965 // instruction to encode the recover instruction. Otherwise, we should
2966 // keep the original result and bailout if the value is not in the int32
2968 if (!JitOptions
.disableRecoverIns
&& isRecoverableResult
&&
2969 candidate
->canRecoverOnBailout()) {
2970 *shouldClone
= true;
2972 kind
= std::min(kind
, MDefinition::TruncateAfterBailouts
);
2979 static MDefinition::TruncateKind
ComputeTruncateKind(MDefinition
* candidate
,
2980 bool* shouldClone
) {
2981 // Compare operations might coerce its inputs to int32 if the ranges are
2982 // correct. So we do not need to check if all uses are coerced.
2983 if (candidate
->isCompare()) {
2984 return MDefinition::TruncateAfterBailouts
;
2987 // Set truncated flag if range analysis ensure that it has no
2988 // rounding errors and no fractional part. Note that we can't use
2989 // the MDefinition Range constructor, because we need to know if
2990 // the value will have rounding errors before any bailout checks.
2991 const Range
* r
= candidate
->range();
2992 bool canHaveRoundingErrors
= !r
|| r
->canHaveRoundingErrors();
2994 // Special case integer division and modulo: a/b can be infinite, and a%b
2995 // can be NaN but cannot actually have rounding errors induced by truncation.
2996 if ((candidate
->isDiv() || candidate
->isMod()) &&
2997 candidate
->type() == MIRType::Int32
) {
2998 canHaveRoundingErrors
= false;
3001 if (canHaveRoundingErrors
) {
3002 return MDefinition::NoTruncate
;
3005 // Ensure all observable uses are truncated.
3006 return ComputeRequestedTruncateKind(candidate
, shouldClone
);
3009 static void RemoveTruncatesOnOutput(MDefinition
* truncated
) {
3010 // Compare returns a boolean so it doen't have any output truncates.
3011 if (truncated
->isCompare()) {
3015 MOZ_ASSERT(truncated
->type() == MIRType::Int32
);
3016 MOZ_ASSERT(Range(truncated
).isInt32());
3018 for (MUseDefIterator
use(truncated
); use
; use
++) {
3019 MDefinition
* def
= use
.def();
3020 if (!def
->isTruncateToInt32() || !def
->isToNumberInt32()) {
3024 def
->replaceAllUsesWith(truncated
);
3028 static void AdjustTruncatedInputs(TempAllocator
& alloc
,
3029 MDefinition
* truncated
) {
3030 MBasicBlock
* block
= truncated
->block();
3031 for (size_t i
= 0, e
= truncated
->numOperands(); i
< e
; i
++) {
3032 MDefinition::TruncateKind kind
= truncated
->operandTruncateKind(i
);
3033 if (kind
== MDefinition::NoTruncate
) {
3037 MDefinition
* input
= truncated
->getOperand(i
);
3038 if (input
->type() == MIRType::Int32
) {
3042 if (input
->isToDouble() && input
->getOperand(0)->type() == MIRType::Int32
) {
3043 truncated
->replaceOperand(i
, input
->getOperand(0));
3046 if (kind
== MDefinition::TruncateAfterBailouts
) {
3047 op
= MToNumberInt32::New(alloc
, truncated
->getOperand(i
));
3049 op
= MTruncateToInt32::New(alloc
, truncated
->getOperand(i
));
3052 if (truncated
->isPhi()) {
3053 MBasicBlock
* pred
= block
->getPredecessor(i
);
3054 pred
->insertBefore(pred
->lastIns(), op
);
3056 block
->insertBefore(truncated
->toInstruction(), op
);
3058 truncated
->replaceOperand(i
, op
);
3062 if (truncated
->isToDouble()) {
3063 truncated
->replaceAllUsesWith(truncated
->toToDouble()->getOperand(0));
3064 block
->discard(truncated
->toToDouble());
3068 // Iterate backward on all instruction and attempt to truncate operations for
3069 // each instruction which respect the following list of predicates: Has been
3070 // analyzed by range analysis, the range has no rounding errors, all uses cases
3071 // are truncating the result.
3073 // If the truncation of the operation is successful, then the instruction is
3074 // queue for later updating the graph to restore the type correctness by
3075 // converting the operands that need to be truncated.
3077 // We iterate backward because it is likely that a truncated operation truncates
3078 // some of its operands.
3079 bool RangeAnalysis::truncate() {
3080 JitSpew(JitSpew_Range
, "Do range-base truncation (backward loop)");
3082 // Automatic truncation is disabled for wasm because the truncation logic
3083 // is based on IonMonkey which assumes that we can bailout if the truncation
3084 // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
3085 // any automatic truncations.
3086 MOZ_ASSERT(!mir
->compilingWasm());
3088 Vector
<MDefinition
*, 16, SystemAllocPolicy
> worklist
;
3090 for (PostorderIterator
block(graph_
.poBegin()); block
!= graph_
.poEnd();
3092 for (MInstructionReverseIterator
iter(block
->rbegin());
3093 iter
!= block
->rend(); iter
++) {
3094 if (iter
->isRecoveredOnBailout()) {
3098 if (iter
->type() == MIRType::None
) {
3099 if (iter
->isTest()) {
3100 if (!TruncateTest(alloc(), iter
->toTest())) {
3107 // Remember all bitop instructions for folding after range analysis.
3108 switch (iter
->op()) {
3109 case MDefinition::Opcode::BitAnd
:
3110 case MDefinition::Opcode::BitOr
:
3111 case MDefinition::Opcode::BitXor
:
3112 case MDefinition::Opcode::Lsh
:
3113 case MDefinition::Opcode::Rsh
:
3114 case MDefinition::Opcode::Ursh
:
3115 if (!bitops
.append(static_cast<MBinaryBitwiseInstruction
*>(*iter
))) {
3122 bool shouldClone
= false;
3123 MDefinition::TruncateKind kind
= ComputeTruncateKind(*iter
, &shouldClone
);
3124 if (kind
== MDefinition::NoTruncate
) {
3128 // Range Analysis is sometimes eager to do optimizations, even if we
3129 // are not be able to truncate an instruction. In such case, we
3130 // speculatively compile the instruction to an int32 instruction
3131 // while adding a guard. This is what is implied by
3132 // TruncateAfterBailout.
3134 // If we already experienced an overflow bailout while executing
3135 // code within the current JSScript, we no longer attempt to make
3136 // this kind of eager optimizations.
3137 if (kind
<= MDefinition::TruncateAfterBailouts
&&
3138 block
->info().hadOverflowBailout()) {
3142 // Truncate this instruction if possible.
3143 if (!iter
->needTruncation(kind
)) {
3147 SpewTruncate(*iter
, kind
, shouldClone
);
3149 // If needed, clone the current instruction for keeping it for the
3150 // bailout path. This give us the ability to truncate instructions
3151 // even after the removal of branches.
3152 if (shouldClone
&& !CloneForDeadBranches(alloc(), *iter
)) {
3158 // Delay updates of inputs/outputs to avoid creating node which
3159 // would be removed by the truncation of the next operations.
3160 iter
->setInWorklist();
3161 if (!worklist
.append(*iter
)) {
3165 for (MPhiIterator
iter(block
->phisBegin()), end(block
->phisEnd());
3166 iter
!= end
; ++iter
) {
3167 bool shouldClone
= false;
3168 MDefinition::TruncateKind kind
= ComputeTruncateKind(*iter
, &shouldClone
);
3169 if (kind
== MDefinition::NoTruncate
) {
3173 // Truncate this phi if possible.
3174 if (shouldClone
|| !iter
->needTruncation(kind
)) {
3178 SpewTruncate(*iter
, kind
, shouldClone
);
3182 // Delay updates of inputs/outputs to avoid creating node which
3183 // would be removed by the truncation of the next operations.
3184 iter
->setInWorklist();
3185 if (!worklist
.append(*iter
)) {
3191 // Update inputs/outputs of truncated instructions.
3192 JitSpew(JitSpew_Range
, "Do graph type fixup (dequeue)");
3193 while (!worklist
.empty()) {
3194 if (!alloc().ensureBallast()) {
3197 MDefinition
* def
= worklist
.popCopy();
3198 def
->setNotInWorklist();
3199 RemoveTruncatesOnOutput(def
);
3200 AdjustTruncatedInputs(alloc(), def
);
3206 bool RangeAnalysis::removeUnnecessaryBitops() {
3207 // Note: This operation change the semantic of the program in a way which
3208 // uniquely works with Int32, Recover Instructions added by the Sink phase
3209 // expects the MIR Graph to still have a valid flow as-if they were double
3210 // operations instead of Int32 operations. Thus, this phase should be
3211 // executed after the Sink phase, and before DCE.
3213 // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
3214 // input. This is done after range analysis rather than during GVN as the
3215 // presence of the bitop can change which instructions are truncated.
3216 for (size_t i
= 0; i
< bitops
.length(); i
++) {
3217 MBinaryBitwiseInstruction
* ins
= bitops
[i
];
3218 if (ins
->isRecoveredOnBailout()) {
3222 MDefinition
* folded
= ins
->foldUnnecessaryBitop();
3223 if (folded
!= ins
) {
3224 ins
->replaceAllLiveUsesWith(folded
);
3225 ins
->setRecoveredOnBailout();
3233 ///////////////////////////////////////////////////////////////////////////////
3234 // Collect Range information of operands
3235 ///////////////////////////////////////////////////////////////////////////////
3237 void MInArray::collectRangeInfoPreTrunc() {
3238 Range
indexRange(index());
3239 if (indexRange
.isFiniteNonNegative()) {
3240 needsNegativeIntCheck_
= false;
3244 void MLoadElementHole::collectRangeInfoPreTrunc() {
3245 Range
indexRange(index());
3246 if (indexRange
.isFiniteNonNegative()) {
3247 needsNegativeIntCheck_
= false;
3252 void MClz::collectRangeInfoPreTrunc() {
3253 Range
inputRange(input());
3254 if (!inputRange
.canBeZero()) {
3255 operandIsNeverZero_
= true;
3259 void MCtz::collectRangeInfoPreTrunc() {
3260 Range
inputRange(input());
3261 if (!inputRange
.canBeZero()) {
3262 operandIsNeverZero_
= true;
3266 void MDiv::collectRangeInfoPreTrunc() {
3267 Range
lhsRange(lhs());
3268 Range
rhsRange(rhs());
3270 // Test if Dividend is non-negative.
3271 if (lhsRange
.isFiniteNonNegative()) {
3272 canBeNegativeDividend_
= false;
3275 // Try removing divide by zero check.
3276 if (!rhsRange
.canBeZero()) {
3277 canBeDivideByZero_
= false;
3280 // If lhsRange does not contain INT32_MIN in its range,
3281 // negative overflow check can be skipped.
3282 if (!lhsRange
.contains(INT32_MIN
)) {
3283 canBeNegativeOverflow_
= false;
3286 // If rhsRange does not contain -1 likewise.
3287 if (!rhsRange
.contains(-1)) {
3288 canBeNegativeOverflow_
= false;
3291 // If lhsRange does not contain a zero,
3292 // negative zero check can be skipped.
3293 if (!lhsRange
.canBeZero()) {
3294 canBeNegativeZero_
= false;
3297 // If rhsRange >= 0 negative zero check can be skipped.
3298 if (rhsRange
.isFiniteNonNegative()) {
3299 canBeNegativeZero_
= false;
3303 void MMul::collectRangeInfoPreTrunc() {
3304 Range
lhsRange(lhs());
3305 Range
rhsRange(rhs());
3307 // If lhsRange contains only positive then we can skip negative zero check.
3308 if (lhsRange
.isFiniteNonNegative() && !lhsRange
.canBeZero()) {
3309 setCanBeNegativeZero(false);
3312 // Likewise rhsRange.
3313 if (rhsRange
.isFiniteNonNegative() && !rhsRange
.canBeZero()) {
3314 setCanBeNegativeZero(false);
3317 // If rhsRange and lhsRange contain Non-negative integers only,
3318 // We skip negative zero check.
3319 if (rhsRange
.isFiniteNonNegative() && lhsRange
.isFiniteNonNegative()) {
3320 setCanBeNegativeZero(false);
3323 // If rhsRange and lhsRange < 0. Then we skip negative zero check.
3324 if (rhsRange
.isFiniteNegative() && lhsRange
.isFiniteNegative()) {
3325 setCanBeNegativeZero(false);
3329 void MMod::collectRangeInfoPreTrunc() {
3330 Range
lhsRange(lhs());
3331 Range
rhsRange(rhs());
3332 if (lhsRange
.isFiniteNonNegative()) {
3333 canBeNegativeDividend_
= false;
3335 if (!rhsRange
.canBeZero()) {
3336 canBeDivideByZero_
= false;
3340 void MToNumberInt32::collectRangeInfoPreTrunc() {
3341 Range
inputRange(input());
3342 if (!inputRange
.canBeNegativeZero()) {
3343 canBeNegativeZero_
= false;
3347 void MBoundsCheck::collectRangeInfoPreTrunc() {
3348 Range
indexRange(index());
3349 Range
lengthRange(length());
3350 if (!indexRange
.hasInt32LowerBound() || !indexRange
.hasInt32UpperBound()) {
3353 if (!lengthRange
.hasInt32LowerBound() || lengthRange
.canBeNaN()) {
3357 int64_t indexLower
= indexRange
.lower();
3358 int64_t indexUpper
= indexRange
.upper();
3359 int64_t lengthLower
= lengthRange
.lower();
3360 int64_t min
= minimum();
3361 int64_t max
= maximum();
3363 if (indexLower
+ min
>= 0 && indexUpper
+ max
< lengthLower
) {
3368 void MBoundsCheckLower::collectRangeInfoPreTrunc() {
3369 Range
indexRange(index());
3370 if (indexRange
.hasInt32LowerBound() && indexRange
.lower() >= minimum_
) {
3375 void MCompare::collectRangeInfoPreTrunc() {
3376 if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) {
3377 operandsAreNeverNaN_
= true;
3381 void MNot::collectRangeInfoPreTrunc() {
3382 if (!Range(input()).canBeNaN()) {
3383 operandIsNeverNaN_
= true;
3387 void MPowHalf::collectRangeInfoPreTrunc() {
3388 Range
inputRange(input());
3389 if (!inputRange
.canBeInfiniteOrNaN() || inputRange
.hasInt32LowerBound()) {
3390 operandIsNeverNegativeInfinity_
= true;
3392 if (!inputRange
.canBeNegativeZero()) {
3393 operandIsNeverNegativeZero_
= true;
3395 if (!inputRange
.canBeNaN()) {
3396 operandIsNeverNaN_
= true;
3400 void MUrsh::collectRangeInfoPreTrunc() {
3401 if (type() == MIRType::Int64
) {
3405 Range
lhsRange(lhs()), rhsRange(rhs());
3407 // As in MUrsh::computeRange(), convert the inputs.
3408 lhsRange
.wrapAroundToInt32();
3409 rhsRange
.wrapAroundToShiftCount();
3411 // If the most significant bit of our result is always going to be zero,
3412 // we can optimize by disabling bailout checks for enforcing an int32 range.
3413 if (lhsRange
.lower() >= 0 || rhsRange
.lower() >= 1) {
3414 bailoutsDisabled_
= true;
3418 static bool DoesMaskMatchRange(int32_t mask
, Range
& range
) {
3419 // Check if range is positive, because the bitand operator in `(-3) & 0xff`
3420 // can't be eliminated.
3421 if (range
.lower() >= 0) {
3422 MOZ_ASSERT(range
.isInt32());
3423 // Check that the mask value has all bits set given the range upper bound.
3424 // Note that the upper bound does not have to be exactly the mask value. For
3425 // example, consider `x & 0xfff` where `x` is a uint8. That expression can
3426 // still be optimized to `x`.
3427 int bits
= 1 + FloorLog2(range
.upper());
3428 uint32_t maskNeeded
= (bits
== 32) ? 0xffffffff : (uint32_t(1) << bits
) - 1;
3429 if ((mask
& maskNeeded
) == maskNeeded
) {
3437 void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
3438 Range
lhsRange(lhs());
3439 Range
rhsRange(rhs());
3441 if (lhs()->isConstant() && lhs()->type() == MIRType::Int32
&&
3442 DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange
)) {
3443 maskMatchesRightRange
= true;
3446 if (rhs()->isConstant() && rhs()->type() == MIRType::Int32
&&
3447 DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange
)) {
3448 maskMatchesLeftRange
= true;
3452 void MNaNToZero::collectRangeInfoPreTrunc() {
3453 Range
inputRange(input());
3455 if (!inputRange
.canBeNaN()) {
3456 operandIsNeverNaN_
= true;
3458 if (!inputRange
.canBeNegativeZero()) {
3459 operandIsNeverNegativeZero_
= true;
3463 bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode
) {
3464 *shouldRemoveDeadCode
= false;
3466 for (ReversePostorderIterator
iter(graph_
.rpoBegin());
3467 iter
!= graph_
.rpoEnd(); iter
++) {
3468 MBasicBlock
* block
= *iter
;
3470 if (!block
->unreachable()) {
3474 // Filter out unreachable fake entries.
3475 if (block
->numPredecessors() == 0) {
3476 // Ignore fixup blocks added by the Value Numbering phase, in order
3477 // to keep the dominator tree as-is when we have OSR Block which are
3478 // no longer reachable from the main entry point of the graph.
3479 MOZ_ASSERT(graph_
.osrBlock());
3483 MControlInstruction
* cond
= block
->getPredecessor(0)->lastIns();
3484 if (!cond
->isTest()) {
3488 // Replace the condition of the test control instruction by a constant
3489 // chosen based which of the successors has the unreachable flag which is
3490 // added by MBeta::computeRange on its own block.
3491 MTest
* test
= cond
->toTest();
3492 MDefinition
* condition
= test
->input();
3494 // If the false-branch is unreachable, then the test condition must be true.
3495 // If the true-branch is unreachable, then the test condition must be false.
3496 MOZ_ASSERT(block
== test
->ifTrue() || block
== test
->ifFalse());
3497 bool value
= block
== test
->ifFalse();
3498 MConstant
* constant
=
3499 MConstant::New(alloc().fallible(), BooleanValue(value
));
3504 condition
->setGuardRangeBailoutsUnchecked();
3506 test
->block()->insertBefore(test
, constant
);
3508 test
->replaceOperand(0, constant
);
3509 JitSpew(JitSpew_Range
,
3510 "Update condition of %u to reflect unreachable branches.",
3513 *shouldRemoveDeadCode
= true;
3516 return tryRemovingGuards();
3519 bool RangeAnalysis::tryRemovingGuards() {
3520 MDefinitionVector
guards(alloc());
3522 for (ReversePostorderIterator block
= graph_
.rpoBegin();
3523 block
!= graph_
.rpoEnd(); block
++) {
3524 for (MDefinitionIterator
iter(*block
); iter
; iter
++) {
3525 if (!iter
->isGuardRangeBailouts()) {
3529 iter
->setInWorklist();
3530 if (!guards
.append(*iter
)) {
3536 // Flag all fallible instructions which were indirectly used in the
3537 // computation of the condition, such that we do not ignore
3538 // bailout-paths which are used to shrink the input range of the
3539 // operands of the condition.
3540 for (size_t i
= 0; i
< guards
.length(); i
++) {
3541 MDefinition
* guard
= guards
[i
];
3543 // If this ins is a guard even without guardRangeBailouts,
3544 // there is no reason in trying to hoist the guardRangeBailouts check.
3545 guard
->setNotGuardRangeBailouts();
3546 if (!DeadIfUnused(guard
)) {
3547 guard
->setGuardRangeBailouts();
3550 guard
->setGuardRangeBailouts();
3552 if (!guard
->isPhi()) {
3553 if (!guard
->range()) {
3557 // Filter the range of the instruction based on its MIRType.
3558 Range
typeFilteredRange(guard
);
3560 // If the output range is updated by adding the inner range,
3561 // then the MIRType act as an effectful filter. As we do not know if
3562 // this filtered Range might change or not the result of the
3563 // previous comparison, we have to keep this instruction as a guard
3564 // because it has to bailout in order to restrict the Range to its
3566 if (typeFilteredRange
.update(guard
->range())) {
3571 guard
->setNotGuardRangeBailouts();
3573 // Propagate the guard to its operands.
3574 for (size_t op
= 0, e
= guard
->numOperands(); op
< e
; op
++) {
3575 MDefinition
* operand
= guard
->getOperand(op
);
3578 if (operand
->isInWorklist()) {
3582 MOZ_ASSERT(!operand
->isGuardRangeBailouts());
3584 operand
->setInWorklist();
3585 operand
->setGuardRangeBailouts();
3586 if (!guards
.append(operand
)) {
3592 for (size_t i
= 0; i
< guards
.length(); i
++) {
3593 MDefinition
* guard
= guards
[i
];
3594 guard
->setNotInWorklist();