1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/RangeAnalysis.h"
9 #include "mozilla/MathAlgorithms.h"
15 #include "jit/CompileInfo.h"
16 #include "jit/IonAnalysis.h"
17 #include "jit/JitSpewer.h"
19 #include "jit/MIRGenerator.h"
20 #include "jit/MIRGraph.h"
21 #include "js/Conversions.h"
22 #include "js/ScalarType.h" // js::Scalar::Type
23 #include "util/CheckedArithmetic.h"
24 #include "util/Unicode.h"
25 #include "vm/ArgumentsObject.h"
26 #include "vm/TypedArrayObject.h"
27 #include "vm/Uint8Clamped.h"
29 #include "vm/BytecodeUtil-inl.h"
32 using namespace js::jit
;
37 using mozilla::CountLeadingZeroes32
;
38 using mozilla::ExponentComponent
;
39 using mozilla::FloorLog2
;
40 using mozilla::IsNegativeZero
;
41 using mozilla::NegativeInfinity
;
42 using mozilla::NumberEqualsInt32
;
43 using mozilla::PositiveInfinity
;
45 // [SMDOC] IonMonkey Range Analysis
47 // This algorithm is based on the paper "Eliminating Range Checks Using
48 // Static Single Assignment Form" by Gough and Klaren.
50 // We associate a range object with each SSA name, and the ranges are consulted
51 // in order to determine whether overflow is possible for arithmetic
54 // An important source of range information that requires care to take
55 // advantage of is conditional control flow. Consider the code below:
58 // y = x + 2000000000;
60 // if (x < 1000000000) {
63 // y = x - 3000000000;
67 // The arithmetic operations in this code cannot overflow, but it is not
68 // sufficient to simply associate each name with a range, since the information
69 // differs between basic blocks. The traditional dataflow approach would be
70 // associate ranges with (name, basic block) pairs. This solution is not
71 // satisfying, since we lose the benefit of SSA form: in SSA form, each
72 // definition has a unique name, so there is no need to track information about
73 // the control flow of the program.
75 // The approach used here is to add a new form of pseudo operation called a
76 // beta node, which associates range information with a value. These beta
77 // instructions take one argument and additionally have an auxiliary constant
78 // range associated with them. Operationally, beta nodes are just copies, but
79 // the invariant expressed by beta node copies is that the output will fall
80 // inside the range given by the beta node. Gough and Klaeren refer to SSA
81 // extended with these beta nodes as XSA form. The following shows the example
82 // code transformed into XSA form:
85 // x1 = Beta(x, [INT_MIN, -1]);
86 // y1 = x1 + 2000000000;
88 // x2 = Beta(x, [0, INT_MAX]);
89 // if (x2 < 1000000000) {
90 // x3 = Beta(x2, [INT_MIN, 999999999]);
93 // x4 = Beta(x2, [1000000000, INT_MAX]);
94 // y3 = x4 - 3000000000;
100 // We insert beta nodes for the purposes of range analysis (they might also be
101 // usefully used for other forms of bounds check elimination) and remove them
102 // after range analysis is performed. The remaining compiler phases do not ever
103 // encounter beta nodes.
105 static bool IsDominatedUse(MBasicBlock
* block
, MUse
* use
) {
106 MNode
* n
= use
->consumer();
107 bool isPhi
= n
->isDefinition() && n
->toDefinition()->isPhi();
110 MPhi
* phi
= n
->toDefinition()->toPhi();
111 return block
->dominates(phi
->block()->getPredecessor(phi
->indexOf(use
)));
114 return block
->dominates(n
->block());
117 static inline void SpewRange(MDefinition
* def
) {
119 if (JitSpewEnabled(JitSpew_Range
) && def
->type() != MIRType::None
&&
121 JitSpewHeader(JitSpew_Range
);
122 Fprinter
& out
= JitSpewPrinter();
125 out
.printf(" has range ");
126 def
->range()->dump(out
);
133 static const char* TruncateKindString(TruncateKind kind
) {
135 case TruncateKind::NoTruncate
:
137 case TruncateKind::TruncateAfterBailouts
:
138 return "TruncateAfterBailouts";
139 case TruncateKind::IndirectTruncate
:
140 return "IndirectTruncate";
141 case TruncateKind::Truncate
:
144 MOZ_CRASH("Unknown truncate kind.");
148 static inline void SpewTruncate(MDefinition
* def
, TruncateKind kind
,
150 if (JitSpewEnabled(JitSpew_Range
)) {
151 JitSpewHeader(JitSpew_Range
);
152 Fprinter
& out
= JitSpewPrinter();
154 out
.printf("truncating ");
156 out
.printf(" (kind: %s, clone: %d)\n", TruncateKindString(kind
),
161 static inline void SpewTruncate(MDefinition
* def
, TruncateKind kind
,
165 TempAllocator
& RangeAnalysis::alloc() const { return graph_
.alloc(); }
167 void RangeAnalysis::replaceDominatedUsesWith(MDefinition
* orig
,
169 MBasicBlock
* block
) {
170 for (MUseIterator
i(orig
->usesBegin()); i
!= orig
->usesEnd();) {
172 if (use
->consumer() != dom
&& IsDominatedUse(block
, use
)) {
173 use
->replaceProducer(dom
);
178 bool RangeAnalysis::addBetaNodes() {
179 JitSpew(JitSpew_Range
, "Adding beta nodes");
181 for (PostorderIterator
i(graph_
.poBegin()); i
!= graph_
.poEnd(); i
++) {
182 MBasicBlock
* block
= *i
;
183 JitSpew(JitSpew_Range
, "Looking at block %u", block
->id());
185 BranchDirection branch_dir
;
186 MTest
* test
= block
->immediateDominatorBranch(&branch_dir
);
188 if (!test
|| !test
->getOperand(0)->isCompare()) {
192 MCompare
* compare
= test
->getOperand(0)->toCompare();
194 if (!compare
->isNumericComparison()) {
198 // TODO: support unsigned comparisons
199 if (compare
->compareType() == MCompare::Compare_UInt32
) {
203 // isNumericComparison should return false for UIntPtr.
204 MOZ_ASSERT(compare
->compareType() != MCompare::Compare_UIntPtr
);
206 MDefinition
* left
= compare
->getOperand(0);
207 MDefinition
* right
= compare
->getOperand(1);
209 double conservativeLower
= NegativeInfinity
<double>();
210 double conservativeUpper
= PositiveInfinity
<double>();
211 MDefinition
* val
= nullptr;
213 JSOp jsop
= compare
->jsop();
215 if (branch_dir
== FALSE_BRANCH
) {
216 jsop
= NegateCompareOp(jsop
);
217 conservativeLower
= GenericNaN();
218 conservativeUpper
= GenericNaN();
221 MConstant
* leftConst
= left
->maybeConstantValue();
222 MConstant
* rightConst
= right
->maybeConstantValue();
223 if (leftConst
&& leftConst
->isTypeRepresentableAsDouble()) {
224 bound
= leftConst
->numberToDouble();
226 jsop
= ReverseCompareOp(jsop
);
227 } else if (rightConst
&& rightConst
->isTypeRepresentableAsDouble()) {
228 bound
= rightConst
->numberToDouble();
230 } else if (left
->type() == MIRType::Int32
&&
231 right
->type() == MIRType::Int32
) {
232 MDefinition
* smaller
= nullptr;
233 MDefinition
* greater
= nullptr;
234 if (jsop
== JSOp::Lt
) {
237 } else if (jsop
== JSOp::Gt
) {
241 if (smaller
&& greater
) {
242 if (!alloc().ensureBallast()) {
249 Range::NewInt32Range(alloc(), JSVAL_INT_MIN
, JSVAL_INT_MAX
- 1));
250 block
->insertBefore(*block
->begin(), beta
);
251 replaceDominatedUsesWith(smaller
, beta
, block
);
252 JitSpew(JitSpew_Range
, " Adding beta node for smaller %u",
256 Range::NewInt32Range(alloc(), JSVAL_INT_MIN
+ 1, JSVAL_INT_MAX
));
257 block
->insertBefore(*block
->begin(), beta
);
258 replaceDominatedUsesWith(greater
, beta
, block
);
259 JitSpew(JitSpew_Range
, " Adding beta node for greater %u",
267 // At this point, one of the operands if the compare is a constant, and
268 // val is the other operand.
274 comp
.setDouble(conservativeLower
, bound
);
277 // For integers, if x < c, the upper bound of x is c-1.
278 if (val
->type() == MIRType::Int32
) {
280 if (NumberEqualsInt32(bound
, &intbound
) &&
281 SafeSub(intbound
, 1, &intbound
)) {
285 comp
.setDouble(conservativeLower
, bound
);
287 // Negative zero is not less than zero.
289 comp
.refineToExcludeNegativeZero();
293 comp
.setDouble(bound
, conservativeUpper
);
296 // For integers, if x > c, the lower bound of x is c+1.
297 if (val
->type() == MIRType::Int32
) {
299 if (NumberEqualsInt32(bound
, &intbound
) &&
300 SafeAdd(intbound
, 1, &intbound
)) {
304 comp
.setDouble(bound
, conservativeUpper
);
306 // Negative zero is not greater than zero.
308 comp
.refineToExcludeNegativeZero();
313 comp
.setDouble(bound
, bound
);
317 // Negative zero is not not-equal to zero.
319 comp
.refineToExcludeNegativeZero();
322 continue; // well, we could have
323 // [-\inf, bound-1] U [bound+1, \inf] but we only use
324 // contiguous ranges.
329 if (JitSpewEnabled(JitSpew_Range
)) {
330 JitSpewHeader(JitSpew_Range
);
331 Fprinter
& out
= JitSpewPrinter();
332 out
.printf(" Adding beta node for %u with range ", val
->id());
337 if (!alloc().ensureBallast()) {
341 MBeta
* beta
= MBeta::New(alloc(), val
, new (alloc()) Range(comp
));
342 block
->insertBefore(*block
->begin(), beta
);
343 replaceDominatedUsesWith(val
, beta
, block
);
349 bool RangeAnalysis::removeBetaNodes() {
350 JitSpew(JitSpew_Range
, "Removing beta nodes");
352 for (PostorderIterator
i(graph_
.poBegin()); i
!= graph_
.poEnd(); i
++) {
353 MBasicBlock
* block
= *i
;
354 for (MDefinitionIterator
iter(*i
); iter
;) {
355 MDefinition
* def
= *iter
++;
357 auto* beta
= def
->toBeta();
358 MDefinition
* op
= beta
->input();
359 JitSpew(JitSpew_Range
, " Removing beta node %u for %u", beta
->id(),
361 beta
->justReplaceAllUsesWith(op
);
362 block
->discard(beta
);
364 // We only place Beta nodes at the beginning of basic
365 // blocks, so if we see something else, we can move on
366 // to the next block.
374 void SymbolicBound::dump(GenericPrinter
& out
) const {
376 out
.printf("[loop] ");
381 void SymbolicBound::dump() const {
382 Fprinter
out(stderr
);
388 // Test whether the given range's exponent tells us anything that its lower
389 // and upper bound values don't.
390 static bool IsExponentInteresting(const Range
* r
) {
391 // If it lacks either a lower or upper bound, the exponent is interesting.
392 if (!r
->hasInt32Bounds()) {
396 // Otherwise if there's no fractional part, the lower and upper bounds,
397 // which are integers, are perfectly precise.
398 if (!r
->canHaveFractionalPart()) {
402 // Otherwise, if the bounds are conservatively rounded across a power-of-two
403 // boundary, the exponent may imply a tighter range.
404 return FloorLog2(std::max(Abs(r
->lower()), Abs(r
->upper()))) > r
->exponent();
407 void Range::dump(GenericPrinter
& out
) const {
410 // Floating-point or Integer subset.
411 if (canHaveFractionalPart_
) {
419 if (!hasInt32LowerBound_
) {
422 out
.printf("%d", lower_
);
424 if (symbolicLower_
) {
426 symbolicLower_
->dump(out
);
432 if (!hasInt32UpperBound_
) {
435 out
.printf("%d", upper_
);
437 if (symbolicUpper_
) {
439 symbolicUpper_
->dump(out
);
445 bool includesNaN
= max_exponent_
== IncludesInfinityAndNaN
;
446 bool includesNegativeInfinity
=
447 max_exponent_
>= IncludesInfinity
&& !hasInt32LowerBound_
;
448 bool includesPositiveInfinity
=
449 max_exponent_
>= IncludesInfinity
&& !hasInt32UpperBound_
;
450 bool includesNegativeZero
= canBeNegativeZero_
;
452 if (includesNaN
|| includesNegativeInfinity
|| includesPositiveInfinity
||
453 includesNegativeZero
) {
464 if (includesNegativeInfinity
) {
470 out
.printf("U -Infinity");
472 if (includesPositiveInfinity
) {
478 out
.printf("U Infinity");
480 if (includesNegativeZero
) {
490 if (max_exponent_
< IncludesInfinity
&& IsExponentInteresting(this)) {
491 out
.printf(" (< pow(2, %d+1))", max_exponent_
);
495 void Range::dump() const {
496 Fprinter
out(stderr
);
502 Range
* Range::intersect(TempAllocator
& alloc
, const Range
* lhs
,
503 const Range
* rhs
, bool* emptyRange
) {
511 return new (alloc
) Range(*rhs
);
514 return new (alloc
) Range(*lhs
);
517 int32_t newLower
= std::max(lhs
->lower_
, rhs
->lower_
);
518 int32_t newUpper
= std::min(lhs
->upper_
, rhs
->upper_
);
520 // If upper < lower, then we have conflicting constraints. Consider:
528 // In this case, the block is unreachable.
529 if (newUpper
< newLower
) {
530 // If both ranges can be NaN, the result can still be NaN.
531 if (!lhs
->canBeNaN() || !rhs
->canBeNaN()) {
537 bool newHasInt32LowerBound
=
538 lhs
->hasInt32LowerBound_
|| rhs
->hasInt32LowerBound_
;
539 bool newHasInt32UpperBound
=
540 lhs
->hasInt32UpperBound_
|| rhs
->hasInt32UpperBound_
;
542 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
543 lhs
->canHaveFractionalPart_
&& rhs
->canHaveFractionalPart_
);
544 NegativeZeroFlag newMayIncludeNegativeZero
=
545 NegativeZeroFlag(lhs
->canBeNegativeZero_
&& rhs
->canBeNegativeZero_
);
547 uint16_t newExponent
= std::min(lhs
->max_exponent_
, rhs
->max_exponent_
);
549 // NaN is a special value which is neither greater than infinity or less than
550 // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
551 // can end up thinking we have both a lower and upper bound, even though NaN
552 // is still possible. In this case, just be conservative, since any case where
553 // we can have NaN is not especially interesting.
554 if (newHasInt32LowerBound
&& newHasInt32UpperBound
&&
555 newExponent
== IncludesInfinityAndNaN
) {
559 // If one of the ranges has a fractional part and the other doesn't, it's
560 // possible that we will have computed a newExponent that's more precise
561 // than our newLower and newUpper. This is unusual, so we handle it here
562 // instead of in optimize().
564 // For example, consider the range F[0,1.5]. Range analysis represents the
565 // lower and upper bound as integers, so we'd actually have
566 // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
567 // more precise upper bound than the integer upper bound.
569 // When intersecting such a range with an integer range, the fractional part
570 // of the range is dropped. The max exponent of 0 remains valid, so the
571 // upper bound needs to be adjusted to 1.
573 // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
574 // the naive intersection is I[2,2], but since the max exponent tells us
575 // that the value is always less than 2, the intersection is actually empty.
576 if (lhs
->canHaveFractionalPart() != rhs
->canHaveFractionalPart() ||
577 (lhs
->canHaveFractionalPart() && newHasInt32LowerBound
&&
578 newHasInt32UpperBound
&& newLower
== newUpper
)) {
579 refineInt32BoundsByExponent(newExponent
, &newLower
, &newHasInt32LowerBound
,
580 &newUpper
, &newHasInt32UpperBound
);
582 // If we're intersecting two ranges that don't overlap, this could also
583 // push the bounds past each other, since the actual intersection is
585 if (newLower
> newUpper
) {
592 Range(newLower
, newHasInt32LowerBound
, newUpper
, newHasInt32UpperBound
,
593 newCanHaveFractionalPart
, newMayIncludeNegativeZero
, newExponent
);
596 void Range::unionWith(const Range
* other
) {
597 int32_t newLower
= std::min(lower_
, other
->lower_
);
598 int32_t newUpper
= std::max(upper_
, other
->upper_
);
600 bool newHasInt32LowerBound
=
601 hasInt32LowerBound_
&& other
->hasInt32LowerBound_
;
602 bool newHasInt32UpperBound
=
603 hasInt32UpperBound_
&& other
->hasInt32UpperBound_
;
605 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
606 canHaveFractionalPart_
|| other
->canHaveFractionalPart_
);
607 NegativeZeroFlag newMayIncludeNegativeZero
=
608 NegativeZeroFlag(canBeNegativeZero_
|| other
->canBeNegativeZero_
);
610 uint16_t newExponent
= std::max(max_exponent_
, other
->max_exponent_
);
612 rawInitialize(newLower
, newHasInt32LowerBound
, newUpper
,
613 newHasInt32UpperBound
, newCanHaveFractionalPart
,
614 newMayIncludeNegativeZero
, newExponent
);
617 Range::Range(const MDefinition
* def
)
618 : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
619 if (const Range
* other
= def
->range()) {
620 // The instruction has range information; use it.
623 // Simulate the effect of converting the value to its type.
624 // Note: we cannot clamp here, since ranges aren't allowed to shrink
625 // and truncation can increase range again. So doing wrapAround to
626 // mimick a possible truncation.
627 switch (def
->type()) {
629 // MToNumberInt32 cannot truncate. So we can safely clamp.
630 if (def
->isToNumberInt32()) {
636 case MIRType::Boolean
:
637 wrapAroundToBoolean();
640 MOZ_CRASH("Asking for the range of an instruction with no value");
645 // Otherwise just use type information. We can trust the type here
646 // because we don't care what value the instruction actually produces,
647 // but what value we might get after we get past the bailouts.
648 switch (def
->type()) {
650 setInt32(JSVAL_INT_MIN
, JSVAL_INT_MAX
);
652 case MIRType::Boolean
:
656 MOZ_CRASH("Asking for the range of an instruction with no value");
663 // As a special case, MUrsh is permitted to claim a result type of
664 // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
665 // bailouts. If range analysis hasn't ruled out values in
666 // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
667 // use as either a uint32 or an int32.
668 if (!hasInt32UpperBound() && def
->isUrsh() &&
669 def
->toUrsh()->bailoutsDisabled() && def
->type() != MIRType::Int64
) {
676 static uint16_t ExponentImpliedByDouble(double d
) {
677 // Handle the special values.
679 return Range::IncludesInfinityAndNaN
;
682 return Range::IncludesInfinity
;
685 // Otherwise take the exponent part and clamp it at zero, since the Range
686 // class doesn't track fractional ranges.
687 return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d
)));
690 void Range::setDouble(double l
, double h
) {
691 MOZ_ASSERT(!(l
> h
));
693 // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
694 if (l
>= INT32_MIN
&& l
<= INT32_MAX
) {
695 lower_
= int32_t(::floor(l
));
696 hasInt32LowerBound_
= true;
697 } else if (l
>= INT32_MAX
) {
699 hasInt32LowerBound_
= true;
702 hasInt32LowerBound_
= false;
704 if (h
>= INT32_MIN
&& h
<= INT32_MAX
) {
705 upper_
= int32_t(::ceil(h
));
706 hasInt32UpperBound_
= true;
707 } else if (h
<= INT32_MIN
) {
709 hasInt32UpperBound_
= true;
712 hasInt32UpperBound_
= false;
715 // Infer max_exponent_.
716 uint16_t lExp
= ExponentImpliedByDouble(l
);
717 uint16_t hExp
= ExponentImpliedByDouble(h
);
718 max_exponent_
= std::max(lExp
, hExp
);
720 canHaveFractionalPart_
= ExcludesFractionalParts
;
721 canBeNegativeZero_
= ExcludesNegativeZero
;
723 // Infer the canHaveFractionalPart_ setting. We can have a
724 // fractional part if the range crosses through the neighborhood of zero. We
725 // won't have a fractional value if the value is always beyond the point at
726 // which double precision can't represent fractional values.
727 uint16_t minExp
= std::min(lExp
, hExp
);
728 bool includesNegative
= std::isnan(l
) || l
< 0;
729 bool includesPositive
= std::isnan(h
) || h
> 0;
730 bool crossesZero
= includesNegative
&& includesPositive
;
731 if (crossesZero
|| minExp
< MaxTruncatableExponent
) {
732 canHaveFractionalPart_
= IncludesFractionalParts
;
735 // Infer the canBeNegativeZero_ setting. We can have a negative zero if
736 // either bound is zero.
737 if (!(l
> 0) && !(h
< 0)) {
738 canBeNegativeZero_
= IncludesNegativeZero
;
744 void Range::setDoubleSingleton(double d
) {
747 // The above setDouble call is for comparisons, and treats negative zero
748 // as equal to zero. We're aiming for a minimum range, so we can clear the
749 // negative zero flag if the value isn't actually negative zero.
750 if (!IsNegativeZero(d
)) {
751 canBeNegativeZero_
= ExcludesNegativeZero
;
757 static inline bool MissingAnyInt32Bounds(const Range
* lhs
, const Range
* rhs
) {
758 return !lhs
->hasInt32Bounds() || !rhs
->hasInt32Bounds();
761 Range
* Range::add(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
762 int64_t l
= (int64_t)lhs
->lower_
+ (int64_t)rhs
->lower_
;
763 if (!lhs
->hasInt32LowerBound() || !rhs
->hasInt32LowerBound()) {
764 l
= NoInt32LowerBound
;
767 int64_t h
= (int64_t)lhs
->upper_
+ (int64_t)rhs
->upper_
;
768 if (!lhs
->hasInt32UpperBound() || !rhs
->hasInt32UpperBound()) {
769 h
= NoInt32UpperBound
;
772 // The exponent is at most one greater than the greater of the operands'
773 // exponents, except for NaN and infinity cases.
774 uint16_t e
= std::max(lhs
->max_exponent_
, rhs
->max_exponent_
);
775 if (e
<= Range::MaxFiniteExponent
) {
779 // Infinity + -Infinity is NaN.
780 if (lhs
->canBeInfiniteOrNaN() && rhs
->canBeInfiniteOrNaN()) {
781 e
= Range::IncludesInfinityAndNaN
;
784 return new (alloc
) Range(
786 FractionalPartFlag(lhs
->canHaveFractionalPart() ||
787 rhs
->canHaveFractionalPart()),
788 NegativeZeroFlag(lhs
->canBeNegativeZero() && rhs
->canBeNegativeZero()),
792 Range
* Range::sub(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
793 int64_t l
= (int64_t)lhs
->lower_
- (int64_t)rhs
->upper_
;
794 if (!lhs
->hasInt32LowerBound() || !rhs
->hasInt32UpperBound()) {
795 l
= NoInt32LowerBound
;
798 int64_t h
= (int64_t)lhs
->upper_
- (int64_t)rhs
->lower_
;
799 if (!lhs
->hasInt32UpperBound() || !rhs
->hasInt32LowerBound()) {
800 h
= NoInt32UpperBound
;
803 // The exponent is at most one greater than the greater of the operands'
804 // exponents, except for NaN and infinity cases.
805 uint16_t e
= std::max(lhs
->max_exponent_
, rhs
->max_exponent_
);
806 if (e
<= Range::MaxFiniteExponent
) {
810 // Infinity - Infinity is NaN.
811 if (lhs
->canBeInfiniteOrNaN() && rhs
->canBeInfiniteOrNaN()) {
812 e
= Range::IncludesInfinityAndNaN
;
817 FractionalPartFlag(lhs
->canHaveFractionalPart() ||
818 rhs
->canHaveFractionalPart()),
819 NegativeZeroFlag(lhs
->canBeNegativeZero() && rhs
->canBeZero()), e
);
822 Range
* Range::and_(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
823 MOZ_ASSERT(lhs
->isInt32());
824 MOZ_ASSERT(rhs
->isInt32());
826 // If both numbers can be negative, result can be negative in the whole range
827 if (lhs
->lower() < 0 && rhs
->lower() < 0) {
828 return Range::NewInt32Range(alloc
, INT32_MIN
,
829 std::max(lhs
->upper(), rhs
->upper()));
832 // Only one of both numbers can be negative.
833 // - result can't be negative
834 // - Upper bound is minimum of both upper range,
836 int32_t upper
= std::min(lhs
->upper(), rhs
->upper());
838 // EXCEPT when upper bound of non negative number is max value,
839 // because negative value can return the whole max value.
841 if (lhs
->lower() < 0) {
842 upper
= rhs
->upper();
844 if (rhs
->lower() < 0) {
845 upper
= lhs
->upper();
848 return Range::NewInt32Range(alloc
, lower
, upper
);
851 Range
* Range::or_(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
852 MOZ_ASSERT(lhs
->isInt32());
853 MOZ_ASSERT(rhs
->isInt32());
854 // When one operand is always 0 or always -1, it's a special case where we
855 // can compute a fully precise result. Handling these up front also
856 // protects the code below from calling CountLeadingZeroes32 with a zero
857 // operand or from shifting an int32_t by 32.
858 if (lhs
->lower() == lhs
->upper()) {
859 if (lhs
->lower() == 0) {
860 return new (alloc
) Range(*rhs
);
862 if (lhs
->lower() == -1) {
863 return new (alloc
) Range(*lhs
);
866 if (rhs
->lower() == rhs
->upper()) {
867 if (rhs
->lower() == 0) {
868 return new (alloc
) Range(*lhs
);
870 if (rhs
->lower() == -1) {
871 return new (alloc
) Range(*rhs
);
875 // The code below uses CountLeadingZeroes32, which has undefined behavior
876 // if its operand is 0. We rely on the code above to protect it.
877 MOZ_ASSERT_IF(lhs
->lower() >= 0, lhs
->upper() != 0);
878 MOZ_ASSERT_IF(rhs
->lower() >= 0, rhs
->upper() != 0);
879 MOZ_ASSERT_IF(lhs
->upper() < 0, lhs
->lower() != -1);
880 MOZ_ASSERT_IF(rhs
->upper() < 0, rhs
->lower() != -1);
882 int32_t lower
= INT32_MIN
;
883 int32_t upper
= INT32_MAX
;
885 if (lhs
->lower() >= 0 && rhs
->lower() >= 0) {
886 // Both operands are non-negative, so the result won't be less than either.
887 lower
= std::max(lhs
->lower(), rhs
->lower());
888 // The result will have leading zeros where both operands have leading
889 // zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
890 // account for the bit of sign.
891 upper
= int32_t(UINT32_MAX
>> std::min(CountLeadingZeroes32(lhs
->upper()),
892 CountLeadingZeroes32(rhs
->upper())));
894 // The result will have leading ones where either operand has leading ones.
895 if (lhs
->upper() < 0) {
896 unsigned leadingOnes
= CountLeadingZeroes32(~lhs
->lower());
897 lower
= std::max(lower
, ~int32_t(UINT32_MAX
>> leadingOnes
));
900 if (rhs
->upper() < 0) {
901 unsigned leadingOnes
= CountLeadingZeroes32(~rhs
->lower());
902 lower
= std::max(lower
, ~int32_t(UINT32_MAX
>> leadingOnes
));
907 return Range::NewInt32Range(alloc
, lower
, upper
);
910 Range
* Range::xor_(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
911 MOZ_ASSERT(lhs
->isInt32());
912 MOZ_ASSERT(rhs
->isInt32());
913 int32_t lhsLower
= lhs
->lower();
914 int32_t lhsUpper
= lhs
->upper();
915 int32_t rhsLower
= rhs
->lower();
916 int32_t rhsUpper
= rhs
->upper();
917 bool invertAfter
= false;
919 // If either operand is negative, bitwise-negate it, and arrange to negate
920 // the result; ~((~x)^y) == x^y. If both are negative the negations on the
921 // result cancel each other out; effectively this is (~x)^(~y) == x^y.
922 // These transformations reduce the number of cases we have to handle below.
924 lhsLower
= ~lhsLower
;
925 lhsUpper
= ~lhsUpper
;
926 std::swap(lhsLower
, lhsUpper
);
927 invertAfter
= !invertAfter
;
930 rhsLower
= ~rhsLower
;
931 rhsUpper
= ~rhsUpper
;
932 std::swap(rhsLower
, rhsUpper
);
933 invertAfter
= !invertAfter
;
936 // Handle cases where lhs or rhs is always zero specially, because they're
937 // easy cases where we can be perfectly precise, and because it protects the
938 // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
939 // undefined behavior.
940 int32_t lower
= INT32_MIN
;
941 int32_t upper
= INT32_MAX
;
942 if (lhsLower
== 0 && lhsUpper
== 0) {
945 } else if (rhsLower
== 0 && rhsUpper
== 0) {
948 } else if (lhsLower
>= 0 && rhsLower
>= 0) {
949 // Both operands are non-negative. The result will be non-negative.
951 // To compute the upper value, take each operand's upper value and
952 // set all bits that don't correspond to leading zero bits in the
953 // other to one. For each one, this gives an upper bound for the
954 // result, so we can take the minimum between the two.
955 unsigned lhsLeadingZeros
= CountLeadingZeroes32(lhsUpper
);
956 unsigned rhsLeadingZeros
= CountLeadingZeroes32(rhsUpper
);
957 upper
= std::min(rhsUpper
| int32_t(UINT32_MAX
>> lhsLeadingZeros
),
958 lhsUpper
| int32_t(UINT32_MAX
>> rhsLeadingZeros
));
961 // If we bitwise-negated one (but not both) of the operands above, apply the
962 // bitwise-negate to the result, completing ~((~x)^y) == x^y.
966 std::swap(lower
, upper
);
969 return Range::NewInt32Range(alloc
, lower
, upper
);
972 Range
* Range::not_(TempAllocator
& alloc
, const Range
* op
) {
973 MOZ_ASSERT(op
->isInt32());
974 return Range::NewInt32Range(alloc
, ~op
->upper(), ~op
->lower());
977 Range
* Range::mul(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
978 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
979 lhs
->canHaveFractionalPart_
|| rhs
->canHaveFractionalPart_
);
981 NegativeZeroFlag newMayIncludeNegativeZero
= NegativeZeroFlag(
982 (lhs
->canHaveSignBitSet() && rhs
->canBeFiniteNonNegative()) ||
983 (rhs
->canHaveSignBitSet() && lhs
->canBeFiniteNonNegative()));
986 if (!lhs
->canBeInfiniteOrNaN() && !rhs
->canBeInfiniteOrNaN()) {
987 // Two finite values.
988 exponent
= lhs
->numBits() + rhs
->numBits() - 1;
989 if (exponent
> Range::MaxFiniteExponent
) {
990 exponent
= Range::IncludesInfinity
;
992 } else if (!lhs
->canBeNaN() && !rhs
->canBeNaN() &&
993 !(lhs
->canBeZero() && rhs
->canBeInfiniteOrNaN()) &&
994 !(rhs
->canBeZero() && lhs
->canBeInfiniteOrNaN())) {
995 // Two values that multiplied together won't produce a NaN.
996 exponent
= Range::IncludesInfinity
;
998 // Could be anything.
999 exponent
= Range::IncludesInfinityAndNaN
;
1002 if (MissingAnyInt32Bounds(lhs
, rhs
)) {
1004 Range(NoInt32LowerBound
, NoInt32UpperBound
, newCanHaveFractionalPart
,
1005 newMayIncludeNegativeZero
, exponent
);
1007 int64_t a
= (int64_t)lhs
->lower() * (int64_t)rhs
->lower();
1008 int64_t b
= (int64_t)lhs
->lower() * (int64_t)rhs
->upper();
1009 int64_t c
= (int64_t)lhs
->upper() * (int64_t)rhs
->lower();
1010 int64_t d
= (int64_t)lhs
->upper() * (int64_t)rhs
->upper();
1012 Range(std::min(std::min(a
, b
), std::min(c
, d
)),
1013 std::max(std::max(a
, b
), std::max(c
, d
)), newCanHaveFractionalPart
,
1014 newMayIncludeNegativeZero
, exponent
);
1017 Range
* Range::lsh(TempAllocator
& alloc
, const Range
* lhs
, int32_t c
) {
1018 MOZ_ASSERT(lhs
->isInt32());
1019 int32_t shift
= c
& 0x1f;
1021 // If the shift doesn't loose bits or shift bits into the sign bit, we
1022 // can simply compute the correct range by shifting.
1023 if ((int32_t)((uint32_t)lhs
->lower() << shift
<< 1 >> shift
>> 1) ==
1025 (int32_t)((uint32_t)lhs
->upper() << shift
<< 1 >> shift
>> 1) ==
1027 return Range::NewInt32Range(alloc
, uint32_t(lhs
->lower()) << shift
,
1028 uint32_t(lhs
->upper()) << shift
);
1031 return Range::NewInt32Range(alloc
, INT32_MIN
, INT32_MAX
);
1034 Range
* Range::rsh(TempAllocator
& alloc
, const Range
* lhs
, int32_t c
) {
1035 MOZ_ASSERT(lhs
->isInt32());
1036 int32_t shift
= c
& 0x1f;
1037 return Range::NewInt32Range(alloc
, lhs
->lower() >> shift
,
1038 lhs
->upper() >> shift
);
1041 Range
* Range::ursh(TempAllocator
& alloc
, const Range
* lhs
, int32_t c
) {
1042 // ursh's left operand is uint32, not int32, but for range analysis we
1043 // currently approximate it as int32. We assume here that the range has
1044 // already been adjusted accordingly by our callers.
1045 MOZ_ASSERT(lhs
->isInt32());
1047 int32_t shift
= c
& 0x1f;
1049 // If the value is always non-negative or always negative, we can simply
1050 // compute the correct range by shifting.
1051 if (lhs
->isFiniteNonNegative() || lhs
->isFiniteNegative()) {
1052 return Range::NewUInt32Range(alloc
, uint32_t(lhs
->lower()) >> shift
,
1053 uint32_t(lhs
->upper()) >> shift
);
1056 // Otherwise return the most general range after the shift.
1057 return Range::NewUInt32Range(alloc
, 0, UINT32_MAX
>> shift
);
1060 Range
* Range::lsh(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1061 MOZ_ASSERT(lhs
->isInt32());
1062 MOZ_ASSERT(rhs
->isInt32());
1063 return Range::NewInt32Range(alloc
, INT32_MIN
, INT32_MAX
);
1066 Range
* Range::rsh(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1067 MOZ_ASSERT(lhs
->isInt32());
1068 MOZ_ASSERT(rhs
->isInt32());
1070 // Canonicalize the shift range to 0 to 31.
1071 int32_t shiftLower
= rhs
->lower();
1072 int32_t shiftUpper
= rhs
->upper();
1073 if ((int64_t(shiftUpper
) - int64_t(shiftLower
)) >= 31) {
1079 if (shiftLower
> shiftUpper
) {
1084 MOZ_ASSERT(shiftLower
>= 0 && shiftUpper
<= 31);
1086 // The lhs bounds are signed, thus the minimum is either the lower bound
1087 // shift by the smallest shift if negative or the lower bound shifted by the
1088 // biggest shift otherwise. And the opposite for the maximum.
1089 int32_t lhsLower
= lhs
->lower();
1090 int32_t min
= lhsLower
< 0 ? lhsLower
>> shiftLower
: lhsLower
>> shiftUpper
;
1091 int32_t lhsUpper
= lhs
->upper();
1092 int32_t max
= lhsUpper
>= 0 ? lhsUpper
>> shiftLower
: lhsUpper
>> shiftUpper
;
1094 return Range::NewInt32Range(alloc
, min
, max
);
1097 Range
* Range::ursh(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1098 // ursh's left operand is uint32, not int32, but for range analysis we
1099 // currently approximate it as int32. We assume here that the range has
1100 // already been adjusted accordingly by our callers.
1101 MOZ_ASSERT(lhs
->isInt32());
1102 MOZ_ASSERT(rhs
->isInt32());
1103 return Range::NewUInt32Range(
1104 alloc
, 0, lhs
->isFiniteNonNegative() ? lhs
->upper() : UINT32_MAX
);
1107 Range
* Range::abs(TempAllocator
& alloc
, const Range
* op
) {
1108 int32_t l
= op
->lower_
;
1109 int32_t u
= op
->upper_
;
1110 FractionalPartFlag canHaveFractionalPart
= op
->canHaveFractionalPart_
;
1112 // Abs never produces a negative zero.
1113 NegativeZeroFlag canBeNegativeZero
= ExcludesNegativeZero
;
1115 return new (alloc
) Range(
1116 std::max(std::max(int32_t(0), l
), u
== INT32_MIN
? INT32_MAX
: -u
), true,
1117 std::max(std::max(int32_t(0), u
), l
== INT32_MIN
? INT32_MAX
: -l
),
1118 op
->hasInt32Bounds() && l
!= INT32_MIN
, canHaveFractionalPart
,
1119 canBeNegativeZero
, op
->max_exponent_
);
1122 Range
* Range::min(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1123 // If either operand is NaN, the result is NaN.
1124 if (lhs
->canBeNaN() || rhs
->canBeNaN()) {
1128 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
1129 lhs
->canHaveFractionalPart_
|| rhs
->canHaveFractionalPart_
);
1130 NegativeZeroFlag newMayIncludeNegativeZero
=
1131 NegativeZeroFlag(lhs
->canBeNegativeZero_
|| rhs
->canBeNegativeZero_
);
1133 return new (alloc
) Range(std::min(lhs
->lower_
, rhs
->lower_
),
1134 lhs
->hasInt32LowerBound_
&& rhs
->hasInt32LowerBound_
,
1135 std::min(lhs
->upper_
, rhs
->upper_
),
1136 lhs
->hasInt32UpperBound_
|| rhs
->hasInt32UpperBound_
,
1137 newCanHaveFractionalPart
, newMayIncludeNegativeZero
,
1138 std::max(lhs
->max_exponent_
, rhs
->max_exponent_
));
1141 Range
* Range::max(TempAllocator
& alloc
, const Range
* lhs
, const Range
* rhs
) {
1142 // If either operand is NaN, the result is NaN.
1143 if (lhs
->canBeNaN() || rhs
->canBeNaN()) {
1147 FractionalPartFlag newCanHaveFractionalPart
= FractionalPartFlag(
1148 lhs
->canHaveFractionalPart_
|| rhs
->canHaveFractionalPart_
);
1149 NegativeZeroFlag newMayIncludeNegativeZero
=
1150 NegativeZeroFlag(lhs
->canBeNegativeZero_
|| rhs
->canBeNegativeZero_
);
1152 return new (alloc
) Range(std::max(lhs
->lower_
, rhs
->lower_
),
1153 lhs
->hasInt32LowerBound_
|| rhs
->hasInt32LowerBound_
,
1154 std::max(lhs
->upper_
, rhs
->upper_
),
1155 lhs
->hasInt32UpperBound_
&& rhs
->hasInt32UpperBound_
,
1156 newCanHaveFractionalPart
, newMayIncludeNegativeZero
,
1157 std::max(lhs
->max_exponent_
, rhs
->max_exponent_
));
1160 Range
* Range::floor(TempAllocator
& alloc
, const Range
* op
) {
1161 Range
* copy
= new (alloc
) Range(*op
);
1162 // Decrement lower bound of copy range if op have a factional part and lower
1163 // bound is Int32 defined. Also we avoid to decrement when op have a
1164 // fractional part but lower_ >= JSVAL_INT_MAX.
1165 if (op
->canHaveFractionalPart() && op
->hasInt32LowerBound()) {
1166 copy
->setLowerInit(int64_t(copy
->lower_
) - 1);
1169 // Also refine max_exponent_ because floor may have decremented int value
1170 // If we've got int32 defined bounds, just deduce it using defined bounds.
1171 // But, if we don't have those, value's max_exponent_ may have changed.
1172 // Because we're looking to maintain an over estimation, if we can,
1174 if (copy
->hasInt32Bounds())
1175 copy
->max_exponent_
= copy
->exponentImpliedByInt32Bounds();
1176 else if (copy
->max_exponent_
< MaxFiniteExponent
)
1177 copy
->max_exponent_
++;
1179 copy
->canHaveFractionalPart_
= ExcludesFractionalParts
;
1180 copy
->assertInvariants();
1184 Range
* Range::ceil(TempAllocator
& alloc
, const Range
* op
) {
1185 Range
* copy
= new (alloc
) Range(*op
);
1187 // We need to refine max_exponent_ because ceil may have incremented the int
1188 // value. If we have got int32 bounds defined, just deduce it using the
1189 // defined bounds. Else we can just increment its value, as we are looking to
1190 // maintain an over estimation.
1191 if (copy
->hasInt32Bounds()) {
1192 copy
->max_exponent_
= copy
->exponentImpliedByInt32Bounds();
1193 } else if (copy
->max_exponent_
< MaxFiniteExponent
) {
1194 copy
->max_exponent_
++;
1197 // If the range is definitely above 0 or below -1, we don't need to include
1198 // -0; otherwise we do.
1200 copy
->canBeNegativeZero_
= ((copy
->lower_
> 0) || (copy
->upper_
<= -1))
1201 ? copy
->canBeNegativeZero_
1202 : IncludesNegativeZero
;
1204 copy
->canHaveFractionalPart_
= ExcludesFractionalParts
;
1205 copy
->assertInvariants();
1209 Range
* Range::sign(TempAllocator
& alloc
, const Range
* op
) {
1210 if (op
->canBeNaN()) {
1214 return new (alloc
) Range(std::max(std::min(op
->lower_
, 1), -1),
1215 std::max(std::min(op
->upper_
, 1), -1),
1216 Range::ExcludesFractionalParts
,
1217 NegativeZeroFlag(op
->canBeNegativeZero()), 0);
1220 Range
* Range::NaNToZero(TempAllocator
& alloc
, const Range
* op
) {
1221 Range
* copy
= new (alloc
) Range(*op
);
1222 if (copy
->canBeNaN()) {
1223 copy
->max_exponent_
= Range::IncludesInfinity
;
1224 if (!copy
->canBeZero()) {
1226 zero
.setDoubleSingleton(0);
1227 copy
->unionWith(&zero
);
1230 copy
->refineToExcludeNegativeZero();
1234 bool Range::negativeZeroMul(const Range
* lhs
, const Range
* rhs
) {
1235 // The result can only be negative zero if both sides are finite and they
1236 // have differing signs.
1237 return (lhs
->canHaveSignBitSet() && rhs
->canBeFiniteNonNegative()) ||
1238 (rhs
->canHaveSignBitSet() && lhs
->canBeFiniteNonNegative());
1241 bool Range::update(const Range
* other
) {
1242 bool changed
= lower_
!= other
->lower_
||
1243 hasInt32LowerBound_
!= other
->hasInt32LowerBound_
||
1244 upper_
!= other
->upper_
||
1245 hasInt32UpperBound_
!= other
->hasInt32UpperBound_
||
1246 canHaveFractionalPart_
!= other
->canHaveFractionalPart_
||
1247 canBeNegativeZero_
!= other
->canBeNegativeZero_
||
1248 max_exponent_
!= other
->max_exponent_
;
1250 lower_
= other
->lower_
;
1251 hasInt32LowerBound_
= other
->hasInt32LowerBound_
;
1252 upper_
= other
->upper_
;
1253 hasInt32UpperBound_
= other
->hasInt32UpperBound_
;
1254 canHaveFractionalPart_
= other
->canHaveFractionalPart_
;
1255 canBeNegativeZero_
= other
->canBeNegativeZero_
;
1256 max_exponent_
= other
->max_exponent_
;
1263 ///////////////////////////////////////////////////////////////////////////////
1264 // Range Computation for MIR Nodes
1265 ///////////////////////////////////////////////////////////////////////////////
1267 void MPhi::computeRange(TempAllocator
& alloc
) {
1268 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1272 Range
* range
= nullptr;
1273 for (size_t i
= 0, e
= numOperands(); i
< e
; i
++) {
1274 if (getOperand(i
)->block()->unreachable()) {
1275 JitSpew(JitSpew_Range
, "Ignoring unreachable input %u",
1276 getOperand(i
)->id());
1280 // Peek at the pre-bailout range so we can take a short-cut; if any of
1281 // the operands has an unknown range, this phi has an unknown range.
1282 if (!getOperand(i
)->range()) {
1286 Range
input(getOperand(i
));
1289 range
->unionWith(&input
);
1291 range
= new (alloc
) Range(input
);
1298 void MBeta::computeRange(TempAllocator
& alloc
) {
1299 bool emptyRange
= false;
1301 Range
opRange(getOperand(0));
1302 Range
* range
= Range::intersect(alloc
, &opRange
, comparison_
, &emptyRange
);
1304 JitSpew(JitSpew_Range
, "Marking block for inst %u unreachable", id());
1305 block()->setUnreachableUnchecked();
1311 void MConstant::computeRange(TempAllocator
& alloc
) {
1312 if (isTypeRepresentableAsDouble()) {
1313 double d
= numberToDouble();
1314 setRange(Range::NewDoubleSingletonRange(alloc
, d
));
1315 } else if (type() == MIRType::Boolean
) {
1316 bool b
= toBoolean();
1317 setRange(Range::NewInt32Range(alloc
, b
, b
));
1321 void MCharCodeAt::computeRange(TempAllocator
& alloc
) {
1322 // ECMA 262 says that the integer will be non-negative and at most 65535.
1323 setRange(Range::NewInt32Range(alloc
, 0, unicode::UTF16Max
));
1326 void MCodePointAt::computeRange(TempAllocator
& alloc
) {
1327 setRange(Range::NewInt32Range(alloc
, 0, unicode::NonBMPMax
));
1330 void MClampToUint8::computeRange(TempAllocator
& alloc
) {
1331 setRange(Range::NewUInt32Range(alloc
, 0, 255));
1334 void MBitAnd::computeRange(TempAllocator
& alloc
) {
1335 if (type() != MIRType::Int32
) {
1339 Range
left(getOperand(0));
1340 Range
right(getOperand(1));
1341 left
.wrapAroundToInt32();
1342 right
.wrapAroundToInt32();
1344 setRange(Range::and_(alloc
, &left
, &right
));
1347 void MBitOr::computeRange(TempAllocator
& alloc
) {
1348 if (type() != MIRType::Int32
) {
1352 Range
left(getOperand(0));
1353 Range
right(getOperand(1));
1354 left
.wrapAroundToInt32();
1355 right
.wrapAroundToInt32();
1357 setRange(Range::or_(alloc
, &left
, &right
));
1360 void MBitXor::computeRange(TempAllocator
& alloc
) {
1361 if (type() != MIRType::Int32
) {
1365 Range
left(getOperand(0));
1366 Range
right(getOperand(1));
1367 left
.wrapAroundToInt32();
1368 right
.wrapAroundToInt32();
1370 setRange(Range::xor_(alloc
, &left
, &right
));
1373 void MBitNot::computeRange(TempAllocator
& alloc
) {
1374 if (type() == MIRType::Int64
) {
1377 MOZ_ASSERT(type() == MIRType::Int32
);
1379 Range
op(getOperand(0));
1380 op
.wrapAroundToInt32();
1382 setRange(Range::not_(alloc
, &op
));
1385 void MLsh::computeRange(TempAllocator
& alloc
) {
1386 if (type() != MIRType::Int32
) {
1390 Range
left(getOperand(0));
1391 Range
right(getOperand(1));
1392 left
.wrapAroundToInt32();
1394 MConstant
* rhsConst
= getOperand(1)->maybeConstantValue();
1395 if (rhsConst
&& rhsConst
->type() == MIRType::Int32
) {
1396 int32_t c
= rhsConst
->toInt32();
1397 setRange(Range::lsh(alloc
, &left
, c
));
1401 right
.wrapAroundToShiftCount();
1402 setRange(Range::lsh(alloc
, &left
, &right
));
1405 void MRsh::computeRange(TempAllocator
& alloc
) {
1406 if (type() != MIRType::Int32
) {
1410 Range
left(getOperand(0));
1411 Range
right(getOperand(1));
1412 left
.wrapAroundToInt32();
1414 MConstant
* rhsConst
= getOperand(1)->maybeConstantValue();
1415 if (rhsConst
&& rhsConst
->type() == MIRType::Int32
) {
1416 int32_t c
= rhsConst
->toInt32();
1417 setRange(Range::rsh(alloc
, &left
, c
));
1421 right
.wrapAroundToShiftCount();
1422 setRange(Range::rsh(alloc
, &left
, &right
));
1425 void MUrsh::computeRange(TempAllocator
& alloc
) {
1426 if (type() != MIRType::Int32
) {
1430 Range
left(getOperand(0));
1431 Range
right(getOperand(1));
1433 // ursh can be thought of as converting its left operand to uint32, or it
1434 // can be thought of as converting its left operand to int32, and then
1435 // reinterpreting the int32 bits as a uint32 value. Both approaches yield
1436 // the same result. Since we lack support for full uint32 ranges, we use
1437 // the second interpretation, though it does cause us to be conservative.
1438 left
.wrapAroundToInt32();
1439 right
.wrapAroundToShiftCount();
1441 MConstant
* rhsConst
= getOperand(1)->maybeConstantValue();
1442 if (rhsConst
&& rhsConst
->type() == MIRType::Int32
) {
1443 int32_t c
= rhsConst
->toInt32();
1444 setRange(Range::ursh(alloc
, &left
, c
));
1446 setRange(Range::ursh(alloc
, &left
, &right
));
1449 MOZ_ASSERT(range()->lower() >= 0);
1452 void MAbs::computeRange(TempAllocator
& alloc
) {
1453 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1457 Range
other(getOperand(0));
1458 Range
* next
= Range::abs(alloc
, &other
);
1459 if (implicitTruncate_
) {
1460 next
->wrapAroundToInt32();
1465 void MFloor::computeRange(TempAllocator
& alloc
) {
1466 Range
other(getOperand(0));
1467 setRange(Range::floor(alloc
, &other
));
1470 void MCeil::computeRange(TempAllocator
& alloc
) {
1471 Range
other(getOperand(0));
1472 setRange(Range::ceil(alloc
, &other
));
1475 void MClz::computeRange(TempAllocator
& alloc
) {
1476 if (type() != MIRType::Int32
) {
1479 setRange(Range::NewUInt32Range(alloc
, 0, 32));
1482 void MCtz::computeRange(TempAllocator
& alloc
) {
1483 if (type() != MIRType::Int32
) {
1486 setRange(Range::NewUInt32Range(alloc
, 0, 32));
1489 void MPopcnt::computeRange(TempAllocator
& alloc
) {
1490 if (type() != MIRType::Int32
) {
1493 setRange(Range::NewUInt32Range(alloc
, 0, 32));
1496 void MMinMax::computeRange(TempAllocator
& alloc
) {
1497 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1501 Range
left(getOperand(0));
1502 Range
right(getOperand(1));
1503 setRange(isMax() ? Range::max(alloc
, &left
, &right
)
1504 : Range::min(alloc
, &left
, &right
));
1507 void MAdd::computeRange(TempAllocator
& alloc
) {
1508 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1511 Range
left(getOperand(0));
1512 Range
right(getOperand(1));
1513 Range
* next
= Range::add(alloc
, &left
, &right
);
1514 if (isTruncated()) {
1515 next
->wrapAroundToInt32();
1520 void MSub::computeRange(TempAllocator
& alloc
) {
1521 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1524 Range
left(getOperand(0));
1525 Range
right(getOperand(1));
1526 Range
* next
= Range::sub(alloc
, &left
, &right
);
1527 if (isTruncated()) {
1528 next
->wrapAroundToInt32();
1533 void MMul::computeRange(TempAllocator
& alloc
) {
1534 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1537 Range
left(getOperand(0));
1538 Range
right(getOperand(1));
1539 if (canBeNegativeZero()) {
1540 canBeNegativeZero_
= Range::negativeZeroMul(&left
, &right
);
1542 Range
* next
= Range::mul(alloc
, &left
, &right
);
1543 if (!next
->canBeNegativeZero()) {
1544 canBeNegativeZero_
= false;
1546 // Truncated multiplications could overflow in both directions
1547 if (isTruncated()) {
1548 next
->wrapAroundToInt32();
1553 void MMod::computeRange(TempAllocator
& alloc
) {
1554 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1557 Range
lhs(getOperand(0));
1558 Range
rhs(getOperand(1));
1560 // If either operand is a NaN, the result is NaN. This also conservatively
1561 // handles Infinity cases.
1562 if (!lhs
.hasInt32Bounds() || !rhs
.hasInt32Bounds()) {
1566 // If RHS can be zero, the result can be NaN.
1567 if (rhs
.lower() <= 0 && rhs
.upper() >= 0) {
1571 // If both operands are non-negative integers, we can optimize this to an
1573 if (type() == MIRType::Int32
&& rhs
.lower() > 0) {
1574 bool hasDoubles
= lhs
.lower() < 0 || lhs
.canHaveFractionalPart() ||
1575 rhs
.canHaveFractionalPart();
1576 // It is not possible to check that lhs.lower() >= 0, since the range
1577 // of a ursh with rhs a 0 constant is wrapped around the int32 range in
1578 // Range::Range(). However, IsUint32Type() will only return true for
1579 // nodes that lie in the range [0, UINT32_MAX].
1581 IsUint32Type(getOperand(0)) &&
1582 getOperand(1)->type() == MIRType::Int32
&&
1583 (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
1584 if (!hasDoubles
|| hasUint32s
) {
1589 // For unsigned mod, we have to convert both operands to unsigned.
1590 // Note that we handled the case of a zero rhs above.
1592 // The result of an unsigned mod will never be unsigned-greater than
1594 uint32_t lhsBound
= std::max
<uint32_t>(lhs
.lower(), lhs
.upper());
1595 uint32_t rhsBound
= std::max
<uint32_t>(rhs
.lower(), rhs
.upper());
1597 // If either range crosses through -1 as a signed value, it could be
1598 // the maximum unsigned value when interpreted as unsigned. If the range
1599 // doesn't include -1, then the simple max value we computed above is
1601 if (lhs
.lower() <= -1 && lhs
.upper() >= -1) {
1602 lhsBound
= UINT32_MAX
;
1604 if (rhs
.lower() <= -1 && rhs
.upper() >= -1) {
1605 rhsBound
= UINT32_MAX
;
1608 // The result will never be equal to the rhs, and we shouldn't have
1609 // any rounding to worry about.
1610 MOZ_ASSERT(!lhs
.canHaveFractionalPart() && !rhs
.canHaveFractionalPart());
1613 // This gives us two upper bounds, so we can take the best one.
1614 setRange(Range::NewUInt32Range(alloc
, 0, std::min(lhsBound
, rhsBound
)));
1618 // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
1619 // First, the absolute value of the result will always be less than the
1620 // absolute value of rhs. (And if rhs is zero, the result is NaN).
1621 int64_t a
= Abs
<int64_t>(rhs
.lower());
1622 int64_t b
= Abs
<int64_t>(rhs
.upper());
1623 if (a
== 0 && b
== 0) {
1626 int64_t rhsAbsBound
= std::max(a
, b
);
1628 // If the value is known to be integer, less-than abs(rhs) is equivalent
1629 // to less-than-or-equal abs(rhs)-1. This is important for being able to
1630 // say that the result of x%256 is an 8-bit unsigned number.
1631 if (!lhs
.canHaveFractionalPart() && !rhs
.canHaveFractionalPart()) {
1635 // Next, the absolute value of the result will never be greater than the
1636 // absolute value of lhs.
1637 int64_t lhsAbsBound
=
1638 std::max(Abs
<int64_t>(lhs
.lower()), Abs
<int64_t>(lhs
.upper()));
1640 // This gives us two upper bounds, so we can take the best one.
1641 int64_t absBound
= std::min(lhsAbsBound
, rhsAbsBound
);
1643 // Now consider the sign of the result.
1644 // If lhs is non-negative, the result will be non-negative.
1645 // If lhs is non-positive, the result will be non-positive.
1646 int64_t lower
= lhs
.lower() >= 0 ? 0 : -absBound
;
1647 int64_t upper
= lhs
.upper() <= 0 ? 0 : absBound
;
1649 Range::FractionalPartFlag newCanHaveFractionalPart
=
1650 Range::FractionalPartFlag(lhs
.canHaveFractionalPart() ||
1651 rhs
.canHaveFractionalPart());
1653 // If the lhs can have the sign bit set and we can return a zero, it'll be a
1655 Range::NegativeZeroFlag newMayIncludeNegativeZero
=
1656 Range::NegativeZeroFlag(lhs
.canHaveSignBitSet());
1658 setRange(new (alloc
) Range(lower
, upper
, newCanHaveFractionalPart
,
1659 newMayIncludeNegativeZero
,
1660 std::min(lhs
.exponent(), rhs
.exponent())));
1663 void MDiv::computeRange(TempAllocator
& alloc
) {
1664 if (type() != MIRType::Int32
&& type() != MIRType::Double
) {
1667 Range
lhs(getOperand(0));
1668 Range
rhs(getOperand(1));
1670 // If either operand is a NaN, the result is NaN. This also conservatively
1671 // handles Infinity cases.
1672 if (!lhs
.hasInt32Bounds() || !rhs
.hasInt32Bounds()) {
1676 // Something simple for now: When dividing by a positive rhs, the result
1677 // won't be further from zero than lhs.
1678 if (lhs
.lower() >= 0 && rhs
.lower() >= 1) {
1679 setRange(new (alloc
) Range(0, lhs
.upper(), Range::IncludesFractionalParts
,
1680 Range::IncludesNegativeZero
, lhs
.exponent()));
1681 } else if (unsigned_
&& rhs
.lower() >= 1) {
1682 // We shouldn't set the unsigned flag if the inputs can have
1683 // fractional parts.
1684 MOZ_ASSERT(!lhs
.canHaveFractionalPart() && !rhs
.canHaveFractionalPart());
1685 // We shouldn't set the unsigned flag if the inputs can be
1687 MOZ_ASSERT(!lhs
.canBeNegativeZero() && !rhs
.canBeNegativeZero());
1688 // Unsigned division by a non-zero rhs will return a uint32 value.
1689 setRange(Range::NewUInt32Range(alloc
, 0, UINT32_MAX
));
1693 void MSqrt::computeRange(TempAllocator
& alloc
) {
1694 Range
input(getOperand(0));
1696 // If either operand is a NaN, the result is NaN. This also conservatively
1697 // handles Infinity cases.
1698 if (!input
.hasInt32Bounds()) {
1702 // Sqrt of a negative non-zero value is NaN.
1703 if (input
.lower() < 0) {
1707 // Something simple for now: When taking the sqrt of a positive value, the
1708 // result won't be further from zero than the input.
1709 // And, sqrt of an integer may have a fractional part.
1710 setRange(new (alloc
) Range(0, input
.upper(), Range::IncludesFractionalParts
,
1711 input
.canBeNegativeZero(), input
.exponent()));
1714 void MToDouble::computeRange(TempAllocator
& alloc
) {
1715 setRange(new (alloc
) Range(getOperand(0)));
1718 void MToFloat32::computeRange(TempAllocator
& alloc
) {}
1720 void MTruncateToInt32::computeRange(TempAllocator
& alloc
) {
1721 Range
* output
= new (alloc
) Range(getOperand(0));
1722 output
->wrapAroundToInt32();
1726 void MToNumberInt32::computeRange(TempAllocator
& alloc
) {
1727 // No clamping since this computes the range *before* bailouts.
1728 setRange(new (alloc
) Range(getOperand(0)));
1731 void MBooleanToInt32::computeRange(TempAllocator
& alloc
) {
1732 setRange(Range::NewUInt32Range(alloc
, 0, 1));
1735 void MLimitedTruncate::computeRange(TempAllocator
& alloc
) {
1736 Range
* output
= new (alloc
) Range(input());
1740 static Range
* GetArrayBufferViewRange(TempAllocator
& alloc
, Scalar::Type type
) {
1742 case Scalar::Uint8Clamped
:
1744 return Range::NewUInt32Range(alloc
, 0, UINT8_MAX
);
1745 case Scalar::Uint16
:
1746 return Range::NewUInt32Range(alloc
, 0, UINT16_MAX
);
1747 case Scalar::Uint32
:
1748 return Range::NewUInt32Range(alloc
, 0, UINT32_MAX
);
1751 return Range::NewInt32Range(alloc
, INT8_MIN
, INT8_MAX
);
1753 return Range::NewInt32Range(alloc
, INT16_MIN
, INT16_MAX
);
1755 return Range::NewInt32Range(alloc
, INT32_MIN
, INT32_MAX
);
1757 case Scalar::BigInt64
:
1758 case Scalar::BigUint64
:
1760 case Scalar::Simd128
:
1761 case Scalar::Float32
:
1762 case Scalar::Float64
:
1763 case Scalar::MaxTypedArrayViewType
:
1769 void MLoadUnboxedScalar::computeRange(TempAllocator
& alloc
) {
1770 // We have an Int32 type and if this is a UInt32 load it may produce a value
1771 // outside of our range, but we have a bailout to handle those cases.
1772 setRange(GetArrayBufferViewRange(alloc
, storageType()));
1775 void MLoadDataViewElement::computeRange(TempAllocator
& alloc
) {
1776 // We have an Int32 type and if this is a UInt32 load it may produce a value
1777 // outside of our range, but we have a bailout to handle those cases.
1778 setRange(GetArrayBufferViewRange(alloc
, storageType()));
1781 void MArrayLength::computeRange(TempAllocator
& alloc
) {
1782 // Array lengths can go up to UINT32_MAX. We will bail out if the array
1783 // length > INT32_MAX.
1784 MOZ_ASSERT(type() == MIRType::Int32
);
1785 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1788 void MInitializedLength::computeRange(TempAllocator
& alloc
) {
1790 Range::NewUInt32Range(alloc
, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT
));
1793 void MArrayBufferViewLength::computeRange(TempAllocator
& alloc
) {
1794 if constexpr (ArrayBufferObject::ByteLengthLimit
<= INT32_MAX
) {
1795 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1799 void MArrayBufferViewByteOffset::computeRange(TempAllocator
& alloc
) {
1800 if constexpr (ArrayBufferObject::ByteLengthLimit
<= INT32_MAX
) {
1801 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1805 void MObjectKeysLength::computeRange(TempAllocator
& alloc
) {
1806 // Object.keys(..) returns an array, but this array is bounded by the number
1807 // of slots / elements that can be encoded in a single object.
1808 MOZ_ASSERT(type() == MIRType::Int32
);
1809 setRange(Range::NewUInt32Range(alloc
, 0, NativeObject::MAX_SLOTS_COUNT
));
1812 void MTypedArrayElementSize::computeRange(TempAllocator
& alloc
) {
1813 constexpr auto MaxTypedArraySize
= sizeof(double);
1815 #define ASSERT_MAX_SIZE(_, T, N) \
1816 static_assert(sizeof(T) <= MaxTypedArraySize, \
1817 "unexpected typed array type exceeding 64-bits storage");
1818 JS_FOR_EACH_TYPED_ARRAY(ASSERT_MAX_SIZE
)
1819 #undef ASSERT_MAX_SIZE
1821 setRange(Range::NewUInt32Range(alloc
, 0, MaxTypedArraySize
));
1824 void MStringLength::computeRange(TempAllocator
& alloc
) {
1825 static_assert(JSString::MAX_LENGTH
<= UINT32_MAX
,
1826 "NewUInt32Range requires a uint32 value");
1827 setRange(Range::NewUInt32Range(alloc
, 0, JSString::MAX_LENGTH
));
1830 void MArgumentsLength::computeRange(TempAllocator
& alloc
) {
1831 // This is is a conservative upper bound on what |TooManyActualArguments|
1832 // checks. If exceeded, Ion will not be entered in the first place.
1833 static_assert(ARGS_LENGTH_MAX
<= UINT32_MAX
,
1834 "NewUInt32Range requires a uint32 value");
1835 setRange(Range::NewUInt32Range(alloc
, 0, ARGS_LENGTH_MAX
));
1838 void MBoundsCheck::computeRange(TempAllocator
& alloc
) {
1839 // Just transfer the incoming index range to the output. The length() is
1840 // also interesting, but it is handled as a bailout check, and we're
1841 // computing a pre-bailout range here.
1842 setRange(new (alloc
) Range(index()));
1845 void MSpectreMaskIndex::computeRange(TempAllocator
& alloc
) {
1846 // Just transfer the incoming index range to the output for now.
1847 setRange(new (alloc
) Range(index()));
1850 void MInt32ToIntPtr::computeRange(TempAllocator
& alloc
) {
1851 setRange(new (alloc
) Range(input()));
1854 void MNonNegativeIntPtrToInt32::computeRange(TempAllocator
& alloc
) {
1855 // We will bail out if the IntPtr value > INT32_MAX.
1856 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1859 void MArrayPush::computeRange(TempAllocator
& alloc
) {
1860 // MArrayPush returns the new array length. It bails out if the new length
1861 // doesn't fit in an Int32.
1862 MOZ_ASSERT(type() == MIRType::Int32
);
1863 setRange(Range::NewUInt32Range(alloc
, 0, INT32_MAX
));
1866 void MMathFunction::computeRange(TempAllocator
& alloc
) {
1867 Range
opRange(getOperand(0));
1868 switch (function()) {
1869 case UnaryMathFunction::SinNative
:
1870 case UnaryMathFunction::SinFdlibm
:
1871 case UnaryMathFunction::CosNative
:
1872 case UnaryMathFunction::CosFdlibm
:
1873 if (!opRange
.canBeInfiniteOrNaN()) {
1874 setRange(Range::NewDoubleRange(alloc
, -1.0, 1.0));
1882 void MSign::computeRange(TempAllocator
& alloc
) {
1883 Range
opRange(getOperand(0));
1884 setRange(Range::sign(alloc
, &opRange
));
1887 void MRandom::computeRange(TempAllocator
& alloc
) {
1888 Range
* r
= Range::NewDoubleRange(alloc
, 0.0, 1.0);
1890 // Random never returns negative zero.
1891 r
->refineToExcludeNegativeZero();
1896 void MNaNToZero::computeRange(TempAllocator
& alloc
) {
1897 Range
other(input());
1898 setRange(Range::NaNToZero(alloc
, &other
));
1901 ///////////////////////////////////////////////////////////////////////////////
1903 ///////////////////////////////////////////////////////////////////////////////
1905 static BranchDirection
NegateBranchDirection(BranchDirection dir
) {
1906 return (dir
== FALSE_BRANCH
) ? TRUE_BRANCH
: FALSE_BRANCH
;
1909 bool RangeAnalysis::analyzeLoop(MBasicBlock
* header
) {
1910 MOZ_ASSERT(header
->hasUniqueBackedge());
1912 // Try to compute an upper bound on the number of times the loop backedge
1913 // will be taken. Look for tests that dominate the backedge and which have
1914 // an edge leaving the loop body.
1915 MBasicBlock
* backedge
= header
->backedge();
1917 // Ignore trivial infinite loops.
1918 if (backedge
== header
) {
1923 size_t numBlocks
= MarkLoopBlocks(graph_
, header
, &canOsr
);
1925 // Ignore broken loops.
1926 if (numBlocks
== 0) {
1930 LoopIterationBound
* iterationBound
= nullptr;
1932 MBasicBlock
* block
= backedge
;
1934 BranchDirection direction
;
1935 MTest
* branch
= block
->immediateDominatorBranch(&direction
);
1937 if (block
== block
->immediateDominator()) {
1941 block
= block
->immediateDominator();
1944 direction
= NegateBranchDirection(direction
);
1945 MBasicBlock
* otherBlock
= branch
->branchSuccessor(direction
);
1946 if (!otherBlock
->isMarked()) {
1947 if (!alloc().ensureBallast()) {
1950 iterationBound
= analyzeLoopIterationCount(header
, branch
, direction
);
1951 if (iterationBound
) {
1956 } while (block
!= header
);
1958 if (!iterationBound
) {
1959 UnmarkLoopBlocks(graph_
, header
);
1963 if (!loopIterationBounds
.append(iterationBound
)) {
1968 if (JitSpewEnabled(JitSpew_Range
)) {
1969 Sprinter
sp(GetJitContext()->cx
);
1973 iterationBound
->boundSum
.dump(sp
);
1974 JS::UniqueChars str
= sp
.release();
1978 JitSpew(JitSpew_Range
, "computed symbolic bound on backedges: %s",
1983 // Try to compute symbolic bounds for the phi nodes at the head of this
1984 // loop, expressed in terms of the iteration bound just computed.
1986 for (MPhiIterator
iter(header
->phisBegin()); iter
!= header
->phisEnd();
1988 analyzeLoopPhi(iterationBound
, *iter
);
1991 if (!mir
->compilingWasm() && !mir
->outerInfo().hadBoundsCheckBailout()) {
1992 // Try to hoist any bounds checks from the loop using symbolic bounds.
1994 Vector
<MBoundsCheck
*, 0, JitAllocPolicy
> hoistedChecks(alloc());
1996 for (ReversePostorderIterator
iter(graph_
.rpoBegin(header
));
1997 iter
!= graph_
.rpoEnd(); iter
++) {
1998 MBasicBlock
* block
= *iter
;
1999 if (!block
->isMarked()) {
2003 for (MDefinitionIterator
iter(block
); iter
; iter
++) {
2004 MDefinition
* def
= *iter
;
2005 if (def
->isBoundsCheck() && def
->isMovable()) {
2006 if (!alloc().ensureBallast()) {
2009 if (tryHoistBoundsCheck(header
, def
->toBoundsCheck())) {
2010 if (!hoistedChecks
.append(def
->toBoundsCheck())) {
2018 // Note: replace all uses of the original bounds check with the
2019 // actual index. This is usually done during bounds check elimination,
2020 // but in this case it's safe to do it here since the load/store is
2021 // definitely not loop-invariant, so we will never move it before
2022 // one of the bounds checks we just added.
2023 for (size_t i
= 0; i
< hoistedChecks
.length(); i
++) {
2024 MBoundsCheck
* ins
= hoistedChecks
[i
];
2025 ins
->replaceAllUsesWith(ins
->index());
2026 ins
->block()->discard(ins
);
2030 UnmarkLoopBlocks(graph_
, header
);
2034 // Unbox beta nodes in order to hoist instruction properly, and not be limited
2035 // by the beta nodes which are added after each branch.
2036 static inline MDefinition
* DefinitionOrBetaInputDefinition(MDefinition
* ins
) {
2037 while (ins
->isBeta()) {
2038 ins
= ins
->toBeta()->input();
2043 LoopIterationBound
* RangeAnalysis::analyzeLoopIterationCount(
2044 MBasicBlock
* header
, MTest
* test
, BranchDirection direction
) {
2045 SimpleLinearSum
lhs(nullptr, 0);
2048 if (!ExtractLinearInequality(test
, direction
, &lhs
, &rhs
, &lessEqual
)) {
2052 // Ensure the rhs is a loop invariant term.
2053 if (rhs
&& rhs
->block()->isMarked()) {
2054 if (lhs
.term
&& lhs
.term
->block()->isMarked()) {
2057 MDefinition
* temp
= lhs
.term
;
2060 if (!SafeSub(0, lhs
.constant
, &lhs
.constant
)) {
2063 lessEqual
= !lessEqual
;
2066 MOZ_ASSERT_IF(rhs
, !rhs
->block()->isMarked());
2068 // Ensure the lhs is a phi node from the start of the loop body.
2069 if (!lhs
.term
|| !lhs
.term
->isPhi() || lhs
.term
->block() != header
) {
2073 // Check that the value of the lhs changes by a constant amount with each
2074 // loop iteration. This requires that the lhs be written in every loop
2075 // iteration with a value that is a constant difference from its value at
2076 // the start of the iteration.
2078 if (lhs
.term
->toPhi()->numOperands() != 2) {
2082 // The first operand of the phi should be the lhs' value at the start of
2083 // the first executed iteration, and not a value written which could
2084 // replace the second operand below during the middle of execution.
2085 MDefinition
* lhsInitial
= lhs
.term
->toPhi()->getLoopPredecessorOperand();
2086 if (lhsInitial
->block()->isMarked()) {
2090 // The second operand of the phi should be a value written by an add/sub
2091 // in every loop iteration, i.e. in a block which dominates the backedge.
2092 MDefinition
* lhsWrite
= DefinitionOrBetaInputDefinition(
2093 lhs
.term
->toPhi()->getLoopBackedgeOperand());
2094 if (!lhsWrite
->isAdd() && !lhsWrite
->isSub()) {
2097 if (!lhsWrite
->block()->isMarked()) {
2100 MBasicBlock
* bb
= header
->backedge();
2101 for (; bb
!= lhsWrite
->block() && bb
!= header
;
2102 bb
= bb
->immediateDominator()) {
2104 if (bb
!= lhsWrite
->block()) {
2108 SimpleLinearSum lhsModified
= ExtractLinearSum(lhsWrite
);
2110 // Check that the value of the lhs at the backedge is of the form
2111 // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
2112 // of the iteration, and not that written to lhs in a previous iteration,
2113 // as such a previous value could not appear directly in the addition:
2114 // it could not be stored in lhs as the lhs add/sub executes in every
2115 // iteration, and if it were stored in another variable its use here would
2116 // be as an operand to a phi node for that variable.
2117 if (lhsModified
.term
!= lhs
.term
) {
2121 LinearSum
iterationBound(alloc());
2122 LinearSum
currentIteration(alloc());
2124 if (lhsModified
.constant
== 1 && !lessEqual
) {
2125 // The value of lhs is 'initial(lhs) + iterCount' and this will end
2126 // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
2127 // on the number of backedges executed is:
2129 // initial(lhs) + iterCount + lhsN == rhs
2130 // iterCount == rhsN - initial(lhs) - lhsN
2133 if (!iterationBound
.add(rhs
, 1)) {
2137 if (!iterationBound
.add(lhsInitial
, -1)) {
2141 int32_t lhsConstant
;
2142 if (!SafeSub(0, lhs
.constant
, &lhsConstant
)) {
2145 if (!iterationBound
.add(lhsConstant
)) {
2149 if (!currentIteration
.add(lhs
.term
, 1)) {
2152 if (!currentIteration
.add(lhsInitial
, -1)) {
2155 } else if (lhsModified
.constant
== -1 && lessEqual
) {
2156 // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
2157 // case, an upper bound on the number of backedges executed is:
2159 // initial(lhs) - iterCount + lhsN == rhs
2160 // iterCount == initial(lhs) - rhs + lhsN
2162 if (!iterationBound
.add(lhsInitial
, 1)) {
2166 if (!iterationBound
.add(rhs
, -1)) {
2170 if (!iterationBound
.add(lhs
.constant
)) {
2174 if (!currentIteration
.add(lhsInitial
, 1)) {
2177 if (!currentIteration
.add(lhs
.term
, -1)) {
2184 return new (alloc())
2185 LoopIterationBound(header
, test
, iterationBound
, currentIteration
);
2188 void RangeAnalysis::analyzeLoopPhi(LoopIterationBound
* loopBound
, MPhi
* phi
) {
2189 // Given a bound on the number of backedges taken, compute an upper and
2190 // lower bound for a phi node that may change by a constant amount each
2191 // iteration. Unlike for the case when computing the iteration bound
2192 // itself, the phi does not need to change the same amount every iteration,
2193 // but is required to change at most N and be either nondecreasing or
2196 MOZ_ASSERT(phi
->numOperands() == 2);
2198 MDefinition
* initial
= phi
->getLoopPredecessorOperand();
2199 if (initial
->block()->isMarked()) {
2203 SimpleLinearSum modified
=
2204 ExtractLinearSum(phi
->getLoopBackedgeOperand(), MathSpace::Infinite
);
2206 if (modified
.term
!= phi
|| modified
.constant
== 0) {
2210 if (!phi
->range()) {
2211 phi
->setRange(new (alloc()) Range(phi
));
2214 LinearSum
initialSum(alloc());
2215 if (!initialSum
.add(initial
, 1)) {
2219 // The phi may change by N each iteration, and is either nondecreasing or
2220 // nonincreasing. initial(phi) is either a lower or upper bound for the
2221 // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
2222 // at all points within the loop, provided that loopBound >= 0.
2224 // We are more interested, however, in the bound for phi at points
2225 // dominated by the loop bound's test; if the test dominates e.g. a bounds
2226 // check we want to hoist from the loop, using the value of the phi at the
2227 // head of the loop for this will usually be too imprecise to hoist the
2228 // check. These points will execute only if the backedge executes at least
2229 // one more time (as the test passed and the test dominates the backedge),
2230 // so we know both that loopBound >= 1 and that the phi's value has changed
2231 // at most loopBound - 1 times. Thus, another upper or lower bound for the
2232 // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
2233 // ensure that loopBound >= 0.
2235 LinearSum
limitSum(loopBound
->boundSum
);
2236 if (!limitSum
.multiply(modified
.constant
) || !limitSum
.add(initialSum
)) {
2240 int32_t negativeConstant
;
2241 if (!SafeSub(0, modified
.constant
, &negativeConstant
) ||
2242 !limitSum
.add(negativeConstant
)) {
2246 Range
* initRange
= initial
->range();
2247 if (modified
.constant
> 0) {
2248 if (initRange
&& initRange
->hasInt32LowerBound()) {
2249 phi
->range()->refineLower(initRange
->lower());
2251 phi
->range()->setSymbolicLower(
2252 SymbolicBound::New(alloc(), nullptr, initialSum
));
2253 phi
->range()->setSymbolicUpper(
2254 SymbolicBound::New(alloc(), loopBound
, limitSum
));
2256 if (initRange
&& initRange
->hasInt32UpperBound()) {
2257 phi
->range()->refineUpper(initRange
->upper());
2259 phi
->range()->setSymbolicUpper(
2260 SymbolicBound::New(alloc(), nullptr, initialSum
));
2261 phi
->range()->setSymbolicLower(
2262 SymbolicBound::New(alloc(), loopBound
, limitSum
));
2265 JitSpew(JitSpew_Range
, "added symbolic range on %u", phi
->id());
2269 // Whether bound is valid at the specified bounds check instruction in a loop,
2270 // and may be used to hoist ins.
2271 static inline bool SymbolicBoundIsValid(MBasicBlock
* header
, MBoundsCheck
* ins
,
2272 const SymbolicBound
* bound
) {
2276 if (ins
->block() == header
) {
2279 MBasicBlock
* bb
= ins
->block()->immediateDominator();
2280 while (bb
!= header
&& bb
!= bound
->loop
->test
->block()) {
2281 bb
= bb
->immediateDominator();
2283 return bb
== bound
->loop
->test
->block();
2286 bool RangeAnalysis::tryHoistBoundsCheck(MBasicBlock
* header
,
2287 MBoundsCheck
* ins
) {
2288 // The bounds check's length must be loop invariant or a constant.
2289 MDefinition
* length
= DefinitionOrBetaInputDefinition(ins
->length());
2290 if (length
->block()->isMarked() && !length
->isConstant()) {
2294 // The bounds check's index should not be loop invariant (else we would
2295 // already have hoisted it during LICM).
2296 SimpleLinearSum index
= ExtractLinearSum(ins
->index());
2297 if (!index
.term
|| !index
.term
->block()->isMarked()) {
2301 // Check for a symbolic lower and upper bound on the index. If either
2302 // condition depends on an iteration bound for the loop, only hoist if
2303 // the bounds check is dominated by the iteration bound's test.
2304 if (!index
.term
->range()) {
2307 const SymbolicBound
* lower
= index
.term
->range()->symbolicLower();
2308 if (!lower
|| !SymbolicBoundIsValid(header
, ins
, lower
)) {
2311 const SymbolicBound
* upper
= index
.term
->range()->symbolicUpper();
2312 if (!upper
|| !SymbolicBoundIsValid(header
, ins
, upper
)) {
2316 MBasicBlock
* preLoop
= header
->loopPredecessor();
2317 MOZ_ASSERT(!preLoop
->isMarked());
2319 MDefinition
* lowerTerm
= ConvertLinearSum(alloc(), preLoop
, lower
->sum
,
2320 BailoutKind::HoistBoundsCheck
);
2325 MDefinition
* upperTerm
= ConvertLinearSum(alloc(), preLoop
, upper
->sum
,
2326 BailoutKind::HoistBoundsCheck
);
2331 // We are checking that index + indexConstant >= 0, and know that
2332 // index >= lowerTerm + lowerConstant. Thus, check that:
2334 // lowerTerm + lowerConstant + indexConstant >= 0
2335 // lowerTerm >= -lowerConstant - indexConstant
2337 int32_t lowerConstant
= 0;
2338 if (!SafeSub(lowerConstant
, index
.constant
, &lowerConstant
)) {
2341 if (!SafeSub(lowerConstant
, lower
->sum
.constant(), &lowerConstant
)) {
2345 // We are checking that index < boundsLength, and know that
2346 // index <= upperTerm + upperConstant. Thus, check that:
2348 // upperTerm + upperConstant < boundsLength
2350 int32_t upperConstant
= index
.constant
;
2351 if (!SafeAdd(upper
->sum
.constant(), upperConstant
, &upperConstant
)) {
2355 // Hoist the loop invariant lower bounds checks.
2356 MBoundsCheckLower
* lowerCheck
= MBoundsCheckLower::New(alloc(), lowerTerm
);
2357 lowerCheck
->setMinimum(lowerConstant
);
2358 lowerCheck
->computeRange(alloc());
2359 lowerCheck
->collectRangeInfoPreTrunc();
2360 lowerCheck
->setBailoutKind(BailoutKind::HoistBoundsCheck
);
2361 preLoop
->insertBefore(preLoop
->lastIns(), lowerCheck
);
2363 // A common pattern for iterating over typed arrays is this:
2365 // for (var i = 0; i < ta.length; i++) {
2369 // Here |upperTerm| (= ta.length) is a NonNegativeIntPtrToInt32 instruction.
2370 // Unwrap this if |length| is also an IntPtr so that we don't add an
2371 // unnecessary bounds check and Int32ToIntPtr below.
2372 if (upperTerm
->isNonNegativeIntPtrToInt32() &&
2373 length
->type() == MIRType::IntPtr
) {
2374 upperTerm
= upperTerm
->toNonNegativeIntPtrToInt32()->input();
2377 // Hoist the loop invariant upper bounds checks.
2378 if (upperTerm
!= length
|| upperConstant
>= 0) {
2379 // Hoist the bound check's length if it isn't already loop invariant.
2380 if (length
->block()->isMarked()) {
2381 MOZ_ASSERT(length
->isConstant());
2382 MInstruction
* lengthIns
= length
->toInstruction();
2383 lengthIns
->block()->moveBefore(preLoop
->lastIns(), lengthIns
);
2386 // If the length is IntPtr, convert the upperTerm to that as well for the
2388 if (length
->type() == MIRType::IntPtr
&&
2389 upperTerm
->type() == MIRType::Int32
) {
2390 upperTerm
= MInt32ToIntPtr::New(alloc(), upperTerm
);
2391 upperTerm
->computeRange(alloc());
2392 upperTerm
->collectRangeInfoPreTrunc();
2393 preLoop
->insertBefore(preLoop
->lastIns(), upperTerm
->toInstruction());
2396 MBoundsCheck
* upperCheck
= MBoundsCheck::New(alloc(), upperTerm
, length
);
2397 upperCheck
->setMinimum(upperConstant
);
2398 upperCheck
->setMaximum(upperConstant
);
2399 upperCheck
->computeRange(alloc());
2400 upperCheck
->collectRangeInfoPreTrunc();
2401 upperCheck
->setBailoutKind(BailoutKind::HoistBoundsCheck
);
2402 preLoop
->insertBefore(preLoop
->lastIns(), upperCheck
);
2408 bool RangeAnalysis::analyze() {
2409 JitSpew(JitSpew_Range
, "Doing range propagation");
2411 for (ReversePostorderIterator
iter(graph_
.rpoBegin());
2412 iter
!= graph_
.rpoEnd(); iter
++) {
2413 MBasicBlock
* block
= *iter
;
2414 // No blocks are supposed to be unreachable, except when we have an OSR
2415 // block, in which case the Value Numbering phase add fixup blocks which
2417 MOZ_ASSERT(!block
->unreachable() || graph_
.osrBlock());
2419 // If the block's immediate dominator is unreachable, the block is
2420 // unreachable. Iterating in RPO, we'll always see the immediate
2421 // dominator before the block.
2422 if (block
->immediateDominator()->unreachable()) {
2423 block
->setUnreachableUnchecked();
2427 for (MDefinitionIterator
iter(block
); iter
; iter
++) {
2428 MDefinition
* def
= *iter
;
2429 if (!alloc().ensureBallast()) {
2433 def
->computeRange(alloc());
2434 JitSpew(JitSpew_Range
, "computing range on %u", def
->id());
2438 // Beta node range analysis may have marked this block unreachable. If
2439 // so, it's no longer interesting to continue processing it.
2440 if (block
->unreachable()) {
2444 if (block
->isLoopHeader()) {
2445 if (!analyzeLoop(block
)) {
2450 // First pass at collecting range info - while the beta nodes are still
2451 // around and before truncation.
2452 for (MInstructionIterator
iter(block
->begin()); iter
!= block
->end();
2454 iter
->collectRangeInfoPreTrunc();
2461 bool RangeAnalysis::addRangeAssertions() {
2462 if (!JitOptions
.checkRangeAnalysis
) {
2466 // Check the computed range for this instruction, if the option is set. Note
2467 // that this code is quite invasive; it adds numerous additional
2468 // instructions for each MInstruction with a computed range, and it uses
2469 // registers, so it also affects register allocation.
2470 for (ReversePostorderIterator
iter(graph_
.rpoBegin());
2471 iter
!= graph_
.rpoEnd(); iter
++) {
2472 MBasicBlock
* block
= *iter
;
2474 // Do not add assertions in unreachable blocks.
2475 if (block
->unreachable()) {
2479 for (MDefinitionIterator
iter(block
); iter
; iter
++) {
2480 MDefinition
* ins
= *iter
;
2482 // Perform range checking for all numeric and numeric-like types.
2483 if (!IsNumberType(ins
->type()) && ins
->type() != MIRType::Boolean
&&
2484 ins
->type() != MIRType::Value
&& ins
->type() != MIRType::IntPtr
) {
2488 // MIsNoIter is fused with the MTest that follows it and emitted as
2489 // LIsNoIterAndBranch. Similarly, MIteratorHasIndices is fused to
2490 // become LIteratorHasIndicesAndBranch. Skip them to avoid complicating
2492 if (ins
->isIsNoIter() || ins
->isIteratorHasIndices()) {
2493 MOZ_ASSERT(ins
->hasOneUse());
2499 MOZ_ASSERT_IF(ins
->type() == MIRType::Int64
, r
.isUnknown());
2501 // Don't insert assertions if there's nothing interesting to assert.
2502 if (r
.isUnknown() ||
2503 (ins
->type() == MIRType::Int32
&& r
.isUnknownInt32())) {
2507 // Don't add a use to an instruction that is recovered on bailout.
2508 if (ins
->isRecoveredOnBailout()) {
2512 if (!alloc().ensureBallast()) {
2515 MAssertRange
* guard
=
2516 MAssertRange::New(alloc(), ins
, new (alloc()) Range(r
));
2518 // Beta nodes and interrupt checks are required to be located at the
2519 // beginnings of basic blocks, so we must insert range assertions
2520 // after any such instructions.
2521 MInstruction
* insertAt
= nullptr;
2522 if (block
->graph().osrBlock() == block
) {
2523 insertAt
= ins
->toInstruction();
2525 insertAt
= block
->safeInsertTop(ins
);
2528 if (insertAt
== *iter
) {
2529 block
->insertAfter(insertAt
, guard
);
2531 block
->insertBefore(insertAt
, guard
);
2539 ///////////////////////////////////////////////////////////////////////////////
2540 // Range based Truncation
2541 ///////////////////////////////////////////////////////////////////////////////
2543 void Range::clampToInt32() {
2547 int32_t l
= hasInt32LowerBound() ? lower() : JSVAL_INT_MIN
;
2548 int32_t h
= hasInt32UpperBound() ? upper() : JSVAL_INT_MAX
;
2552 void Range::wrapAroundToInt32() {
2553 if (!hasInt32Bounds()) {
2554 setInt32(JSVAL_INT_MIN
, JSVAL_INT_MAX
);
2555 } else if (canHaveFractionalPart()) {
2556 // Clearing the fractional field may provide an opportunity to refine
2557 // lower_ or upper_.
2558 canHaveFractionalPart_
= ExcludesFractionalParts
;
2559 canBeNegativeZero_
= ExcludesNegativeZero
;
2560 refineInt32BoundsByExponent(max_exponent_
, &lower_
, &hasInt32LowerBound_
,
2561 &upper_
, &hasInt32UpperBound_
);
2565 // If nothing else, we can clear the negative zero flag.
2566 canBeNegativeZero_
= ExcludesNegativeZero
;
2568 MOZ_ASSERT(isInt32());
2571 void Range::wrapAroundToShiftCount() {
2572 wrapAroundToInt32();
2573 if (lower() < 0 || upper() >= 32) {
2578 void Range::wrapAroundToBoolean() {
2579 wrapAroundToInt32();
2583 MOZ_ASSERT(isBoolean());
2586 bool MDefinition::canTruncate() const {
2587 // No procedure defined for truncating this instruction.
2591 void MDefinition::truncate(TruncateKind kind
) {
2592 MOZ_CRASH("No procedure defined for truncating this instruction.");
2595 bool MConstant::canTruncate() const { return IsFloatingPointType(type()); }
2597 void MConstant::truncate(TruncateKind kind
) {
2598 MOZ_ASSERT(canTruncate());
2600 // Truncate the double to int, since all uses truncates it.
2601 int32_t res
= ToInt32(numberToDouble());
2602 payload_
.asBits
= 0;
2604 setResultType(MIRType::Int32
);
2606 range()->setInt32(res
, res
);
2610 bool MPhi::canTruncate() const {
2611 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2614 void MPhi::truncate(TruncateKind kind
) {
2615 MOZ_ASSERT(canTruncate());
2616 truncateKind_
= kind
;
2617 setResultType(MIRType::Int32
);
2618 if (kind
>= TruncateKind::IndirectTruncate
&& range()) {
2619 range()->wrapAroundToInt32();
2623 bool MAdd::canTruncate() const {
2624 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2627 void MAdd::truncate(TruncateKind kind
) {
2628 MOZ_ASSERT(canTruncate());
2630 // Remember analysis, needed for fallible checks.
2631 setTruncateKind(kind
);
2633 setSpecialization(MIRType::Int32
);
2634 if (truncateKind() >= TruncateKind::IndirectTruncate
&& range()) {
2635 range()->wrapAroundToInt32();
2639 bool MSub::canTruncate() const {
2640 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2643 void MSub::truncate(TruncateKind kind
) {
2644 MOZ_ASSERT(canTruncate());
2646 // Remember analysis, needed for fallible checks.
2647 setTruncateKind(kind
);
2648 setSpecialization(MIRType::Int32
);
2649 if (truncateKind() >= TruncateKind::IndirectTruncate
&& range()) {
2650 range()->wrapAroundToInt32();
2654 bool MMul::canTruncate() const {
2655 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2658 void MMul::truncate(TruncateKind kind
) {
2659 MOZ_ASSERT(canTruncate());
2661 // Remember analysis, needed for fallible checks.
2662 setTruncateKind(kind
);
2663 setSpecialization(MIRType::Int32
);
2664 if (truncateKind() >= TruncateKind::IndirectTruncate
) {
2665 setCanBeNegativeZero(false);
2667 range()->wrapAroundToInt32();
2672 bool MDiv::canTruncate() const {
2673 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2676 void MDiv::truncate(TruncateKind kind
) {
2677 MOZ_ASSERT(canTruncate());
2679 // Remember analysis, needed for fallible checks.
2680 setTruncateKind(kind
);
2681 setSpecialization(MIRType::Int32
);
2683 // Divisions where the lhs and rhs are unsigned and the result is
2684 // truncated can be lowered more efficiently.
2685 if (unsignedOperands()) {
2686 replaceWithUnsignedOperands();
2691 bool MMod::canTruncate() const {
2692 return type() == MIRType::Double
|| type() == MIRType::Int32
;
2695 void MMod::truncate(TruncateKind kind
) {
2696 // As for division, handle unsigned modulus with a truncated result.
2697 MOZ_ASSERT(canTruncate());
2699 // Remember analysis, needed for fallible checks.
2700 setTruncateKind(kind
);
2701 setSpecialization(MIRType::Int32
);
2703 if (unsignedOperands()) {
2704 replaceWithUnsignedOperands();
2709 bool MToDouble::canTruncate() const {
2710 MOZ_ASSERT(type() == MIRType::Double
);
2714 void MToDouble::truncate(TruncateKind kind
) {
2715 MOZ_ASSERT(canTruncate());
2716 setTruncateKind(kind
);
2718 // We use the return type to flag that this MToDouble should be replaced by
2719 // a MTruncateToInt32 when modifying the graph.
2720 setResultType(MIRType::Int32
);
2721 if (truncateKind() >= TruncateKind::IndirectTruncate
) {
2723 range()->wrapAroundToInt32();
2728 bool MLimitedTruncate::canTruncate() const { return true; }
2730 void MLimitedTruncate::truncate(TruncateKind kind
) {
2731 MOZ_ASSERT(canTruncate());
2732 setTruncateKind(kind
);
2733 setResultType(MIRType::Int32
);
2734 if (kind
>= TruncateKind::IndirectTruncate
&& range()) {
2735 range()->wrapAroundToInt32();
2739 bool MCompare::canTruncate() const {
2740 if (!isDoubleComparison()) {
2744 // If both operands are naturally in the int32 range, we can convert from
2745 // a double comparison to being an int32 comparison.
2746 if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) {
2753 void MCompare::truncate(TruncateKind kind
) {
2754 MOZ_ASSERT(canTruncate());
2755 compareType_
= Compare_Int32
;
2757 // Truncating the operands won't change their value because we don't force a
2758 // truncation, but it will change their type, which we need because we
2759 // now expect integer inputs.
2760 truncateOperands_
= true;
2763 TruncateKind
MDefinition::operandTruncateKind(size_t index
) const {
2764 // Generic routine: We don't know anything.
2765 return TruncateKind::NoTruncate
;
2768 TruncateKind
MPhi::operandTruncateKind(size_t index
) const {
2769 // The truncation applied to a phi is effectively applied to the phi's
2771 return truncateKind_
;
2774 TruncateKind
MTruncateToInt32::operandTruncateKind(size_t index
) const {
2775 // This operator is an explicit truncate to int32.
2776 return TruncateKind::Truncate
;
2779 TruncateKind
MBinaryBitwiseInstruction::operandTruncateKind(
2780 size_t index
) const {
2781 // The bitwise operators truncate to int32.
2782 return TruncateKind::Truncate
;
2785 TruncateKind
MLimitedTruncate::operandTruncateKind(size_t index
) const {
2786 return std::min(truncateKind(), truncateLimit_
);
2789 TruncateKind
MAdd::operandTruncateKind(size_t index
) const {
2790 // This operator is doing some arithmetic. If its result is truncated,
2791 // it's an indirect truncate for its operands.
2792 return std::min(truncateKind(), TruncateKind::IndirectTruncate
);
2795 TruncateKind
MSub::operandTruncateKind(size_t index
) const {
2796 // See the comment in MAdd::operandTruncateKind.
2797 return std::min(truncateKind(), TruncateKind::IndirectTruncate
);
2800 TruncateKind
MMul::operandTruncateKind(size_t index
) const {
2801 // See the comment in MAdd::operandTruncateKind.
2802 return std::min(truncateKind(), TruncateKind::IndirectTruncate
);
2805 TruncateKind
MToDouble::operandTruncateKind(size_t index
) const {
2806 // MToDouble propagates its truncate kind to its operand.
2807 return truncateKind();
2810 TruncateKind
MStoreUnboxedScalar::operandTruncateKind(size_t index
) const {
2811 // An integer store truncates the stored value.
2812 return (index
== 2 && isIntegerWrite()) ? TruncateKind::Truncate
2813 : TruncateKind::NoTruncate
;
2816 TruncateKind
MStoreDataViewElement::operandTruncateKind(size_t index
) const {
2817 // An integer store truncates the stored value.
2818 return (index
== 2 && isIntegerWrite()) ? TruncateKind::Truncate
2819 : TruncateKind::NoTruncate
;
2822 TruncateKind
MStoreTypedArrayElementHole::operandTruncateKind(
2823 size_t index
) const {
2824 // An integer store truncates the stored value.
2825 return (index
== 3 && isIntegerWrite()) ? TruncateKind::Truncate
2826 : TruncateKind::NoTruncate
;
2829 TruncateKind
MDiv::operandTruncateKind(size_t index
) const {
2830 return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts
);
2833 TruncateKind
MMod::operandTruncateKind(size_t index
) const {
2834 return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts
);
2837 TruncateKind
MCompare::operandTruncateKind(size_t index
) const {
2838 // If we're doing an int32 comparison on operands which were previously
2839 // floating-point, convert them!
2840 MOZ_ASSERT_IF(truncateOperands_
, isInt32Comparison());
2841 return truncateOperands_
? TruncateKind::TruncateAfterBailouts
2842 : TruncateKind::NoTruncate
;
2845 static bool TruncateTest(TempAllocator
& alloc
, MTest
* test
) {
2846 // If all possible inputs to the test are either int32 or boolean,
2847 // convert those inputs to int32 so that an int32 test can be performed.
2849 if (test
->input()->type() != MIRType::Value
) {
2853 if (!test
->input()->isPhi() || !test
->input()->hasOneDefUse() ||
2854 test
->input()->isImplicitlyUsed()) {
2858 MPhi
* phi
= test
->input()->toPhi();
2859 for (size_t i
= 0; i
< phi
->numOperands(); i
++) {
2860 MDefinition
* def
= phi
->getOperand(i
);
2861 if (!def
->isBox()) {
2864 MDefinition
* inner
= def
->getOperand(0);
2865 if (inner
->type() != MIRType::Boolean
&& inner
->type() != MIRType::Int32
) {
2870 for (size_t i
= 0; i
< phi
->numOperands(); i
++) {
2871 MDefinition
* inner
= phi
->getOperand(i
)->getOperand(0);
2872 if (inner
->type() != MIRType::Int32
) {
2873 if (!alloc
.ensureBallast()) {
2876 MBasicBlock
* block
= inner
->block();
2877 inner
= MToNumberInt32::New(alloc
, inner
);
2878 block
->insertBefore(block
->lastIns(), inner
->toInstruction());
2880 MOZ_ASSERT(inner
->type() == MIRType::Int32
);
2881 phi
->replaceOperand(i
, inner
);
2884 phi
->setResultType(MIRType::Int32
);
2888 // Truncating instruction result is an optimization which implies
2889 // knowing all uses of an instruction. This implies that if one of
2890 // the uses got removed, then Range Analysis is not be allowed to do
2891 // any modification which can change the result, especially if the
2892 // result can be observed.
2894 // This corner can easily be understood with UCE examples, but it
2895 // might also happen with type inference assumptions. Note: Type
2896 // inference is implicitly branches where other types might be
2898 static bool CloneForDeadBranches(TempAllocator
& alloc
,
2899 MInstruction
* candidate
) {
2900 // Compare returns a boolean so it doesn't have to be recovered on bailout
2901 // because the output would remain correct.
2902 if (candidate
->isCompare()) {
2906 MOZ_ASSERT(candidate
->canClone());
2907 if (!alloc
.ensureBallast()) {
2911 MDefinitionVector
operands(alloc
);
2912 size_t end
= candidate
->numOperands();
2913 if (!operands
.reserve(end
)) {
2916 for (size_t i
= 0; i
< end
; ++i
) {
2917 operands
.infallibleAppend(candidate
->getOperand(i
));
2920 MInstruction
* clone
= candidate
->clone(alloc
, operands
);
2924 clone
->setRange(nullptr);
2926 // Set ImplicitlyUsed flag on the cloned instruction in order to chain recover
2927 // instruction for the bailout path.
2928 clone
->setImplicitlyUsedUnchecked();
2930 candidate
->block()->insertBefore(candidate
, clone
);
2932 if (!candidate
->maybeConstantValue()) {
2933 MOZ_ASSERT(clone
->canRecoverOnBailout());
2934 clone
->setRecoveredOnBailout();
2937 // Replace the candidate by its recovered on bailout clone within recovered
2938 // instructions and resume points operands.
2939 for (MUseIterator
i(candidate
->usesBegin()); i
!= candidate
->usesEnd();) {
2941 MNode
* ins
= use
->consumer();
2942 if (ins
->isDefinition() && !ins
->toDefinition()->isRecoveredOnBailout()) {
2946 use
->replaceProducer(clone
);
2952 // Examine all the users of |candidate| and determine the most aggressive
2953 // truncate kind that satisfies all of them.
2954 static TruncateKind
ComputeRequestedTruncateKind(MDefinition
* candidate
,
2955 bool* shouldClone
) {
2956 bool isCapturedResult
=
2957 false; // Check if used by a recovered instruction or a resume point.
2958 bool isObservableResult
=
2959 false; // Check if it can be read from another frame.
2960 bool isRecoverableResult
= true; // Check if it can safely be reconstructed.
2961 bool isImplicitlyUsed
= candidate
->isImplicitlyUsed();
2962 bool hasTryBlock
= candidate
->block()->graph().hasTryBlock();
2964 TruncateKind kind
= TruncateKind::Truncate
;
2965 for (MUseIterator
use(candidate
->usesBegin()); use
!= candidate
->usesEnd();
2967 if (use
->consumer()->isResumePoint()) {
2968 // Truncation is a destructive optimization, as such, we need to pay
2969 // attention to removed branches and prevent optimization
2970 // destructive optimizations if we have no alternative. (see
2971 // ImplicitlyUsed flag)
2972 isCapturedResult
= true;
2973 isObservableResult
=
2974 isObservableResult
||
2975 use
->consumer()->toResumePoint()->isObservableOperand(*use
);
2976 isRecoverableResult
=
2977 isRecoverableResult
&&
2978 use
->consumer()->toResumePoint()->isRecoverableOperand(*use
);
2982 MDefinition
* consumer
= use
->consumer()->toDefinition();
2983 if (consumer
->isRecoveredOnBailout()) {
2984 isCapturedResult
= true;
2985 isImplicitlyUsed
= isImplicitlyUsed
|| consumer
->isImplicitlyUsed();
2989 TruncateKind consumerKind
=
2990 consumer
->operandTruncateKind(consumer
->indexOf(*use
));
2991 kind
= std::min(kind
, consumerKind
);
2992 if (kind
== TruncateKind::NoTruncate
) {
2997 // We cannot do full trunction on guarded instructions.
2998 if (candidate
->isGuard() || candidate
->isGuardRangeBailouts()) {
2999 kind
= std::min(kind
, TruncateKind::TruncateAfterBailouts
);
3002 // If the value naturally produces an int32 value (before bailout checks)
3003 // that needs no conversion, we don't have to worry about resume points
3004 // seeing truncated values.
3005 bool needsConversion
= !candidate
->range() || !candidate
->range()->isInt32();
3007 // If the instruction is explicitly truncated (not indirectly) by all its
3008 // uses and if it is not implicitly used, then we can safely encode its
3009 // truncated result as part of the resume point operands. This is safe,
3010 // because even if we resume with a truncated double, the next baseline
3011 // instruction operating on this instruction is going to be a no-op.
3013 // Note, that if the result can be observed from another frame, then this
3014 // optimization is not safe. Similarly, if this function contains a try
3015 // block, the result could be observed from a catch block, which we do
3017 bool safeToConvert
= kind
== TruncateKind::Truncate
&& !isImplicitlyUsed
&&
3018 !isObservableResult
&& !hasTryBlock
;
3020 // If the candidate instruction appears as operand of a resume point or a
3021 // recover instruction, and we have to truncate its result, then we might
3022 // have to either recover the result during the bailout, or avoid the
3024 if (isCapturedResult
&& needsConversion
&& !safeToConvert
) {
3025 // If the result can be recovered from all the resume points (not needed
3026 // for iterating over the inlined frames), and this instruction can be
3027 // recovered on bailout, then we can clone it and use the cloned
3028 // instruction to encode the recover instruction. Otherwise, we should
3029 // keep the original result and bailout if the value is not in the int32
3031 if (!JitOptions
.disableRecoverIns
&& isRecoverableResult
&&
3032 candidate
->canRecoverOnBailout()) {
3033 *shouldClone
= true;
3035 kind
= std::min(kind
, TruncateKind::TruncateAfterBailouts
);
3042 static TruncateKind
ComputeTruncateKind(MDefinition
* candidate
,
3043 bool* shouldClone
) {
3044 // Compare operations might coerce its inputs to int32 if the ranges are
3045 // correct. So we do not need to check if all uses are coerced.
3046 if (candidate
->isCompare()) {
3047 return TruncateKind::TruncateAfterBailouts
;
3050 // Set truncated flag if range analysis ensure that it has no
3051 // rounding errors and no fractional part. Note that we can't use
3052 // the MDefinition Range constructor, because we need to know if
3053 // the value will have rounding errors before any bailout checks.
3054 const Range
* r
= candidate
->range();
3055 bool canHaveRoundingErrors
= !r
|| r
->canHaveRoundingErrors();
3057 // Special case integer division and modulo: a/b can be infinite, and a%b
3058 // can be NaN but cannot actually have rounding errors induced by truncation.
3059 if ((candidate
->isDiv() || candidate
->isMod()) &&
3060 candidate
->type() == MIRType::Int32
) {
3061 canHaveRoundingErrors
= false;
3064 if (canHaveRoundingErrors
) {
3065 return TruncateKind::NoTruncate
;
3068 // Ensure all observable uses are truncated.
3069 return ComputeRequestedTruncateKind(candidate
, shouldClone
);
3072 static void RemoveTruncatesOnOutput(MDefinition
* truncated
) {
3073 // Compare returns a boolean so it doen't have any output truncates.
3074 if (truncated
->isCompare()) {
3078 MOZ_ASSERT(truncated
->type() == MIRType::Int32
);
3079 MOZ_ASSERT(Range(truncated
).isInt32());
3081 for (MUseDefIterator
use(truncated
); use
; use
++) {
3082 MDefinition
* def
= use
.def();
3083 if (!def
->isTruncateToInt32() || !def
->isToNumberInt32()) {
3087 def
->replaceAllUsesWith(truncated
);
3091 void RangeAnalysis::adjustTruncatedInputs(MDefinition
* truncated
) {
3092 MBasicBlock
* block
= truncated
->block();
3093 for (size_t i
= 0, e
= truncated
->numOperands(); i
< e
; i
++) {
3094 TruncateKind kind
= truncated
->operandTruncateKind(i
);
3095 if (kind
== TruncateKind::NoTruncate
) {
3099 MDefinition
* input
= truncated
->getOperand(i
);
3100 if (input
->type() == MIRType::Int32
) {
3104 if (input
->isToDouble() && input
->getOperand(0)->type() == MIRType::Int32
) {
3105 truncated
->replaceOperand(i
, input
->getOperand(0));
3108 if (kind
== TruncateKind::TruncateAfterBailouts
) {
3109 MOZ_ASSERT(!mir
->outerInfo().hadEagerTruncationBailout());
3110 op
= MToNumberInt32::New(alloc(), truncated
->getOperand(i
));
3111 op
->setBailoutKind(BailoutKind::EagerTruncation
);
3113 op
= MTruncateToInt32::New(alloc(), truncated
->getOperand(i
));
3116 if (truncated
->isPhi()) {
3117 MBasicBlock
* pred
= block
->getPredecessor(i
);
3118 pred
->insertBefore(pred
->lastIns(), op
);
3120 block
->insertBefore(truncated
->toInstruction(), op
);
3122 truncated
->replaceOperand(i
, op
);
3126 if (truncated
->isToDouble()) {
3127 truncated
->replaceAllUsesWith(truncated
->toToDouble()->getOperand(0));
3128 block
->discard(truncated
->toToDouble());
3132 bool RangeAnalysis::canTruncate(MDefinition
* def
, TruncateKind kind
) const {
3133 if (kind
== TruncateKind::NoTruncate
) {
3137 // Range Analysis is sometimes eager to do optimizations, even if we
3138 // are not able to truncate an instruction. In such case, we
3139 // speculatively compile the instruction to an int32 instruction
3140 // while adding a guard. This is what is implied by
3141 // TruncateAfterBailout.
3143 // If a previous compilation was invalidated because a speculative
3144 // truncation bailed out, we no longer attempt to make this kind of
3145 // eager optimization.
3146 if (mir
->outerInfo().hadEagerTruncationBailout()) {
3147 if (kind
== TruncateKind::TruncateAfterBailouts
) {
3150 // MDiv and MMod always require TruncateAfterBailout for their operands.
3151 // See MDiv::operandTruncateKind and MMod::operandTruncateKind.
3152 if (def
->isDiv() || def
->isMod()) {
3160 // Iterate backward on all instruction and attempt to truncate operations for
3161 // each instruction which respect the following list of predicates: Has been
3162 // analyzed by range analysis, the range has no rounding errors, all uses cases
3163 // are truncating the result.
3165 // If the truncation of the operation is successful, then the instruction is
3166 // queue for later updating the graph to restore the type correctness by
3167 // converting the operands that need to be truncated.
3169 // We iterate backward because it is likely that a truncated operation truncates
3170 // some of its operands.
3171 bool RangeAnalysis::truncate() {
3172 JitSpew(JitSpew_Range
, "Do range-base truncation (backward loop)");
3174 // Automatic truncation is disabled for wasm because the truncation logic
3175 // is based on IonMonkey which assumes that we can bailout if the truncation
3176 // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
3177 // any automatic truncations.
3178 MOZ_ASSERT(!mir
->compilingWasm());
3180 Vector
<MDefinition
*, 16, SystemAllocPolicy
> worklist
;
3182 for (PostorderIterator
block(graph_
.poBegin()); block
!= graph_
.poEnd();
3184 for (MInstructionReverseIterator
iter(block
->rbegin());
3185 iter
!= block
->rend(); iter
++) {
3186 if (iter
->isRecoveredOnBailout()) {
3190 if (iter
->type() == MIRType::None
) {
3191 if (iter
->isTest()) {
3192 if (!TruncateTest(alloc(), iter
->toTest())) {
3199 // Remember all bitop instructions for folding after range analysis.
3200 switch (iter
->op()) {
3201 case MDefinition::Opcode::BitAnd
:
3202 case MDefinition::Opcode::BitOr
:
3203 case MDefinition::Opcode::BitXor
:
3204 case MDefinition::Opcode::Lsh
:
3205 case MDefinition::Opcode::Rsh
:
3206 case MDefinition::Opcode::Ursh
:
3207 if (!bitops
.append(static_cast<MBinaryBitwiseInstruction
*>(*iter
))) {
3214 bool shouldClone
= false;
3215 TruncateKind kind
= ComputeTruncateKind(*iter
, &shouldClone
);
3217 // Truncate this instruction if possible.
3218 if (!canTruncate(*iter
, kind
) || !iter
->canTruncate()) {
3222 SpewTruncate(*iter
, kind
, shouldClone
);
3224 // If needed, clone the current instruction for keeping it for the
3225 // bailout path. This give us the ability to truncate instructions
3226 // even after the removal of branches.
3227 if (shouldClone
&& !CloneForDeadBranches(alloc(), *iter
)) {
3231 // TruncateAfterBailouts keeps the bailout code as-is and
3232 // continues with truncated operations, with the expectation
3233 // that we are unlikely to bail out. If we do bail out, then we
3234 // will set a flag in FinishBailoutToBaseline to prevent eager
3235 // truncation when we recompile, to avoid bailout loops.
3236 if (kind
== TruncateKind::TruncateAfterBailouts
) {
3237 iter
->setBailoutKind(BailoutKind::EagerTruncation
);
3240 iter
->truncate(kind
);
3242 // Delay updates of inputs/outputs to avoid creating node which
3243 // would be removed by the truncation of the next operations.
3244 iter
->setInWorklist();
3245 if (!worklist
.append(*iter
)) {
3249 for (MPhiIterator
iter(block
->phisBegin()), end(block
->phisEnd());
3250 iter
!= end
; ++iter
) {
3251 bool shouldClone
= false;
3252 TruncateKind kind
= ComputeTruncateKind(*iter
, &shouldClone
);
3254 // Truncate this phi if possible.
3255 if (shouldClone
|| !canTruncate(*iter
, kind
) || !iter
->canTruncate()) {
3259 SpewTruncate(*iter
, kind
, shouldClone
);
3261 iter
->truncate(kind
);
3263 // Delay updates of inputs/outputs to avoid creating node which
3264 // would be removed by the truncation of the next operations.
3265 iter
->setInWorklist();
3266 if (!worklist
.append(*iter
)) {
3272 // Update inputs/outputs of truncated instructions.
3273 JitSpew(JitSpew_Range
, "Do graph type fixup (dequeue)");
3274 while (!worklist
.empty()) {
3275 if (!alloc().ensureBallast()) {
3278 MDefinition
* def
= worklist
.popCopy();
3279 def
->setNotInWorklist();
3280 RemoveTruncatesOnOutput(def
);
3281 adjustTruncatedInputs(def
);
3287 bool RangeAnalysis::removeUnnecessaryBitops() {
3288 JitSpew(JitSpew_Range
, "Begin (removeUnnecessaryBitops)");
3289 // Note: This operation change the semantic of the program in a way which
3290 // uniquely works with Int32, Recover Instructions added by the Sink phase
3291 // expects the MIR Graph to still have a valid flow as-if they were double
3292 // operations instead of Int32 operations. Thus, this phase should be
3293 // executed after the Sink phase, and before DCE.
3295 // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
3296 // input. This is done after range analysis rather than during GVN as the
3297 // presence of the bitop can change which instructions are truncated.
3298 for (size_t i
= 0; i
< bitops
.length(); i
++) {
3299 MBinaryBitwiseInstruction
* ins
= bitops
[i
];
3300 if (ins
->isRecoveredOnBailout()) {
3304 MDefinition
* folded
= ins
->foldUnnecessaryBitop();
3305 if (folded
!= ins
) {
3306 ins
->replaceAllLiveUsesWith(folded
);
3307 ins
->setRecoveredOnBailout();
3315 ///////////////////////////////////////////////////////////////////////////////
3316 // Collect Range information of operands
3317 ///////////////////////////////////////////////////////////////////////////////
3319 void MInArray::collectRangeInfoPreTrunc() {
3320 Range
indexRange(index());
3321 if (indexRange
.isFiniteNonNegative()) {
3322 needsNegativeIntCheck_
= false;
3327 void MLoadElementHole::collectRangeInfoPreTrunc() {
3328 Range
indexRange(index());
3329 if (indexRange
.isFiniteNonNegative()) {
3330 needsNegativeIntCheck_
= false;
3335 void MInt32ToIntPtr::collectRangeInfoPreTrunc() {
3336 Range
inputRange(input());
3337 if (inputRange
.isFiniteNonNegative()) {
3338 canBeNegative_
= false;
3342 void MClz::collectRangeInfoPreTrunc() {
3343 Range
inputRange(input());
3344 if (!inputRange
.canBeZero()) {
3345 operandIsNeverZero_
= true;
3349 void MCtz::collectRangeInfoPreTrunc() {
3350 Range
inputRange(input());
3351 if (!inputRange
.canBeZero()) {
3352 operandIsNeverZero_
= true;
3356 void MDiv::collectRangeInfoPreTrunc() {
3357 Range
lhsRange(lhs());
3358 Range
rhsRange(rhs());
3360 // Test if Dividend is non-negative.
3361 if (lhsRange
.isFiniteNonNegative()) {
3362 canBeNegativeDividend_
= false;
3365 // Try removing divide by zero check.
3366 if (!rhsRange
.canBeZero()) {
3367 canBeDivideByZero_
= false;
3370 // If lhsRange does not contain INT32_MIN in its range,
3371 // negative overflow check can be skipped.
3372 if (!lhsRange
.contains(INT32_MIN
)) {
3373 canBeNegativeOverflow_
= false;
3376 // If rhsRange does not contain -1 likewise.
3377 if (!rhsRange
.contains(-1)) {
3378 canBeNegativeOverflow_
= false;
3381 // If lhsRange does not contain a zero,
3382 // negative zero check can be skipped.
3383 if (!lhsRange
.canBeZero()) {
3384 canBeNegativeZero_
= false;
3387 // If rhsRange >= 0 negative zero check can be skipped.
3388 if (rhsRange
.isFiniteNonNegative()) {
3389 canBeNegativeZero_
= false;
3393 setGuardRangeBailoutsUnchecked();
3397 void MMul::collectRangeInfoPreTrunc() {
3398 Range
lhsRange(lhs());
3399 Range
rhsRange(rhs());
3401 // If lhsRange contains only positive then we can skip negative zero check.
3402 if (lhsRange
.isFiniteNonNegative() && !lhsRange
.canBeZero()) {
3403 setCanBeNegativeZero(false);
3406 // Likewise rhsRange.
3407 if (rhsRange
.isFiniteNonNegative() && !rhsRange
.canBeZero()) {
3408 setCanBeNegativeZero(false);
3411 // If rhsRange and lhsRange contain Non-negative integers only,
3412 // We skip negative zero check.
3413 if (rhsRange
.isFiniteNonNegative() && lhsRange
.isFiniteNonNegative()) {
3414 setCanBeNegativeZero(false);
3417 // If rhsRange and lhsRange < 0. Then we skip negative zero check.
3418 if (rhsRange
.isFiniteNegative() && lhsRange
.isFiniteNegative()) {
3419 setCanBeNegativeZero(false);
3423 void MMod::collectRangeInfoPreTrunc() {
3424 Range
lhsRange(lhs());
3425 Range
rhsRange(rhs());
3426 if (lhsRange
.isFiniteNonNegative()) {
3427 canBeNegativeDividend_
= false;
3429 if (!rhsRange
.canBeZero()) {
3430 canBeDivideByZero_
= false;
3432 if (type() == MIRType::Int32
&& fallible()) {
3433 setGuardRangeBailoutsUnchecked();
3437 void MToNumberInt32::collectRangeInfoPreTrunc() {
3438 Range
inputRange(input());
3439 if (!inputRange
.canBeNegativeZero()) {
3440 needsNegativeZeroCheck_
= false;
3444 void MBoundsCheck::collectRangeInfoPreTrunc() {
3445 Range
indexRange(index());
3446 Range
lengthRange(length());
3447 if (!indexRange
.hasInt32LowerBound() || !indexRange
.hasInt32UpperBound()) {
3450 if (!lengthRange
.hasInt32LowerBound() || lengthRange
.canBeNaN()) {
3454 int64_t indexLower
= indexRange
.lower();
3455 int64_t indexUpper
= indexRange
.upper();
3456 int64_t lengthLower
= lengthRange
.lower();
3457 int64_t min
= minimum();
3458 int64_t max
= maximum();
3460 if (indexLower
+ min
>= 0 && indexUpper
+ max
< lengthLower
) {
3465 void MBoundsCheckLower::collectRangeInfoPreTrunc() {
3466 Range
indexRange(index());
3467 if (indexRange
.hasInt32LowerBound() && indexRange
.lower() >= minimum_
) {
3472 void MCompare::collectRangeInfoPreTrunc() {
3473 if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) {
3474 operandsAreNeverNaN_
= true;
3478 void MNot::collectRangeInfoPreTrunc() {
3479 if (!Range(input()).canBeNaN()) {
3480 operandIsNeverNaN_
= true;
3484 void MPowHalf::collectRangeInfoPreTrunc() {
3485 Range
inputRange(input());
3486 if (!inputRange
.canBeInfiniteOrNaN() || inputRange
.hasInt32LowerBound()) {
3487 operandIsNeverNegativeInfinity_
= true;
3489 if (!inputRange
.canBeNegativeZero()) {
3490 operandIsNeverNegativeZero_
= true;
3492 if (!inputRange
.canBeNaN()) {
3493 operandIsNeverNaN_
= true;
3497 void MUrsh::collectRangeInfoPreTrunc() {
3498 if (type() == MIRType::Int64
) {
3502 Range
lhsRange(lhs()), rhsRange(rhs());
3504 // As in MUrsh::computeRange(), convert the inputs.
3505 lhsRange
.wrapAroundToInt32();
3506 rhsRange
.wrapAroundToShiftCount();
3508 // If the most significant bit of our result is always going to be zero,
3509 // we can optimize by disabling bailout checks for enforcing an int32 range.
3510 if (lhsRange
.lower() >= 0 || rhsRange
.lower() >= 1) {
3511 bailoutsDisabled_
= true;
3515 static bool DoesMaskMatchRange(int32_t mask
, Range
& range
) {
3516 // Check if range is positive, because the bitand operator in `(-3) & 0xff`
3517 // can't be eliminated.
3518 if (range
.lower() >= 0) {
3519 MOZ_ASSERT(range
.isInt32());
3520 // Check that the mask value has all bits set given the range upper bound.
3521 // Note that the upper bound does not have to be exactly the mask value. For
3522 // example, consider `x & 0xfff` where `x` is a uint8. That expression can
3523 // still be optimized to `x`.
3524 int bits
= 1 + FloorLog2(range
.upper());
3525 uint32_t maskNeeded
= (bits
== 32) ? 0xffffffff : (uint32_t(1) << bits
) - 1;
3526 if ((mask
& maskNeeded
) == maskNeeded
) {
3534 void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
3535 Range
lhsRange(lhs());
3536 Range
rhsRange(rhs());
3538 if (lhs()->isConstant() && lhs()->type() == MIRType::Int32
&&
3539 DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange
)) {
3540 maskMatchesRightRange
= true;
3543 if (rhs()->isConstant() && rhs()->type() == MIRType::Int32
&&
3544 DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange
)) {
3545 maskMatchesLeftRange
= true;
3549 void MNaNToZero::collectRangeInfoPreTrunc() {
3550 Range
inputRange(input());
3552 if (!inputRange
.canBeNaN()) {
3553 operandIsNeverNaN_
= true;
3555 if (!inputRange
.canBeNegativeZero()) {
3556 operandIsNeverNegativeZero_
= true;
3560 bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode
) {
3561 *shouldRemoveDeadCode
= false;
3563 for (ReversePostorderIterator
iter(graph_
.rpoBegin());
3564 iter
!= graph_
.rpoEnd(); iter
++) {
3565 MBasicBlock
* block
= *iter
;
3567 if (!block
->unreachable()) {
3571 // Filter out unreachable fake entries.
3572 if (block
->numPredecessors() == 0) {
3573 // Ignore fixup blocks added by the Value Numbering phase, in order
3574 // to keep the dominator tree as-is when we have OSR Block which are
3575 // no longer reachable from the main entry point of the graph.
3576 MOZ_ASSERT(graph_
.osrBlock());
3580 MControlInstruction
* cond
= block
->getPredecessor(0)->lastIns();
3581 if (!cond
->isTest()) {
3585 // Replace the condition of the test control instruction by a constant
3586 // chosen based which of the successors has the unreachable flag which is
3587 // added by MBeta::computeRange on its own block.
3588 MTest
* test
= cond
->toTest();
3589 MDefinition
* condition
= test
->input();
3591 // If the false-branch is unreachable, then the test condition must be true.
3592 // If the true-branch is unreachable, then the test condition must be false.
3593 MOZ_ASSERT(block
== test
->ifTrue() || block
== test
->ifFalse());
3594 bool value
= block
== test
->ifFalse();
3595 MConstant
* constant
=
3596 MConstant::New(alloc().fallible(), BooleanValue(value
));
3601 condition
->setGuardRangeBailoutsUnchecked();
3603 test
->block()->insertBefore(test
, constant
);
3605 test
->replaceOperand(0, constant
);
3606 JitSpew(JitSpew_Range
,
3607 "Update condition of %u to reflect unreachable branches.",
3610 *shouldRemoveDeadCode
= true;
3613 return tryRemovingGuards();
3616 bool RangeAnalysis::tryRemovingGuards() {
3617 MDefinitionVector
guards(alloc());
3619 for (ReversePostorderIterator block
= graph_
.rpoBegin();
3620 block
!= graph_
.rpoEnd(); block
++) {
3621 for (MDefinitionIterator
iter(*block
); iter
; iter
++) {
3622 if (!iter
->isGuardRangeBailouts()) {
3626 iter
->setInWorklist();
3627 if (!guards
.append(*iter
)) {
3633 // Flag all fallible instructions which were indirectly used in the
3634 // computation of the condition, such that we do not ignore
3635 // bailout-paths which are used to shrink the input range of the
3636 // operands of the condition.
3637 for (size_t i
= 0; i
< guards
.length(); i
++) {
3638 MDefinition
* guard
= guards
[i
];
3640 // If this ins is a guard even without guardRangeBailouts,
3641 // there is no reason in trying to hoist the guardRangeBailouts check.
3642 guard
->setNotGuardRangeBailouts();
3643 if (!DeadIfUnused(guard
)) {
3644 guard
->setGuardRangeBailouts();
3647 guard
->setGuardRangeBailouts();
3649 if (!guard
->isPhi()) {
3650 if (!guard
->range()) {
3654 // Filter the range of the instruction based on its MIRType.
3655 Range
typeFilteredRange(guard
);
3657 // If the output range is updated by adding the inner range,
3658 // then the MIRType act as an effectful filter. As we do not know if
3659 // this filtered Range might change or not the result of the
3660 // previous comparison, we have to keep this instruction as a guard
3661 // because it has to bailout in order to restrict the Range to its
3663 if (typeFilteredRange
.update(guard
->range())) {
3668 guard
->setNotGuardRangeBailouts();
3670 // Propagate the guard to its operands.
3671 for (size_t op
= 0, e
= guard
->numOperands(); op
< e
; op
++) {
3672 MDefinition
* operand
= guard
->getOperand(op
);
3675 if (operand
->isInWorklist()) {
3679 MOZ_ASSERT(!operand
->isGuardRangeBailouts());
3681 operand
->setInWorklist();
3682 operand
->setGuardRangeBailouts();
3683 if (!guards
.append(operand
)) {
3689 for (size_t i
= 0; i
< guards
.length(); i
++) {
3690 MDefinition
* guard
= guards
[i
];
3691 guard
->setNotInWorklist();