Bug 1885489 - Part 5: Add SnapshotIterator::readInt32(). r=iain
[gecko.git] / js / src / jit / RangeAnalysis.cpp
blobbd8380a69027b527f110122beb41f864105084cd
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "jit/RangeAnalysis.h"
9 #include "mozilla/MathAlgorithms.h"
11 #include <algorithm>
13 #include "jsmath.h"
15 #include "jit/CompileInfo.h"
16 #include "jit/IonAnalysis.h"
17 #include "jit/JitSpewer.h"
18 #include "jit/MIR.h"
19 #include "jit/MIRGenerator.h"
20 #include "jit/MIRGraph.h"
21 #include "js/Conversions.h"
22 #include "js/ScalarType.h" // js::Scalar::Type
23 #include "util/CheckedArithmetic.h"
24 #include "util/Unicode.h"
25 #include "vm/ArgumentsObject.h"
26 #include "vm/TypedArrayObject.h"
27 #include "vm/Uint8Clamped.h"
29 #include "vm/BytecodeUtil-inl.h"
31 using namespace js;
32 using namespace js::jit;
34 using JS::GenericNaN;
35 using JS::ToInt32;
36 using mozilla::Abs;
37 using mozilla::CountLeadingZeroes32;
38 using mozilla::ExponentComponent;
39 using mozilla::FloorLog2;
40 using mozilla::IsNegativeZero;
41 using mozilla::NegativeInfinity;
42 using mozilla::NumberEqualsInt32;
43 using mozilla::PositiveInfinity;
45 // [SMDOC] IonMonkey Range Analysis
47 // This algorithm is based on the paper "Eliminating Range Checks Using
48 // Static Single Assignment Form" by Gough and Klaren.
50 // We associate a range object with each SSA name, and the ranges are consulted
51 // in order to determine whether overflow is possible for arithmetic
52 // computations.
54 // An important source of range information that requires care to take
55 // advantage of is conditional control flow. Consider the code below:
57 // if (x < 0) {
58 // y = x + 2000000000;
59 // } else {
60 // if (x < 1000000000) {
61 // y = x * 2;
62 // } else {
63 // y = x - 3000000000;
64 // }
65 // }
67 // The arithmetic operations in this code cannot overflow, but it is not
68 // sufficient to simply associate each name with a range, since the information
69 // differs between basic blocks. The traditional dataflow approach would be
70 // associate ranges with (name, basic block) pairs. This solution is not
71 // satisfying, since we lose the benefit of SSA form: in SSA form, each
72 // definition has a unique name, so there is no need to track information about
73 // the control flow of the program.
75 // The approach used here is to add a new form of pseudo operation called a
76 // beta node, which associates range information with a value. These beta
77 // instructions take one argument and additionally have an auxiliary constant
78 // range associated with them. Operationally, beta nodes are just copies, but
79 // the invariant expressed by beta node copies is that the output will fall
80 // inside the range given by the beta node. Gough and Klaeren refer to SSA
81 // extended with these beta nodes as XSA form. The following shows the example
82 // code transformed into XSA form:
84 // if (x < 0) {
85 // x1 = Beta(x, [INT_MIN, -1]);
86 // y1 = x1 + 2000000000;
87 // } else {
88 // x2 = Beta(x, [0, INT_MAX]);
89 // if (x2 < 1000000000) {
90 // x3 = Beta(x2, [INT_MIN, 999999999]);
91 // y2 = x3*2;
92 // } else {
93 // x4 = Beta(x2, [1000000000, INT_MAX]);
94 // y3 = x4 - 3000000000;
95 // }
96 // y4 = Phi(y2, y3);
97 // }
98 // y = Phi(y1, y4);
100 // We insert beta nodes for the purposes of range analysis (they might also be
101 // usefully used for other forms of bounds check elimination) and remove them
102 // after range analysis is performed. The remaining compiler phases do not ever
103 // encounter beta nodes.
105 static bool IsDominatedUse(MBasicBlock* block, MUse* use) {
106 MNode* n = use->consumer();
107 bool isPhi = n->isDefinition() && n->toDefinition()->isPhi();
109 if (isPhi) {
110 MPhi* phi = n->toDefinition()->toPhi();
111 return block->dominates(phi->block()->getPredecessor(phi->indexOf(use)));
114 return block->dominates(n->block());
117 static inline void SpewRange(MDefinition* def) {
118 #ifdef JS_JITSPEW
119 if (JitSpewEnabled(JitSpew_Range) && def->type() != MIRType::None &&
120 def->range()) {
121 JitSpewHeader(JitSpew_Range);
122 Fprinter& out = JitSpewPrinter();
123 out.printf(" ");
124 def->printName(out);
125 out.printf(" has range ");
126 def->range()->dump(out);
127 out.printf("\n");
129 #endif
132 #ifdef JS_JITSPEW
133 static const char* TruncateKindString(TruncateKind kind) {
134 switch (kind) {
135 case TruncateKind::NoTruncate:
136 return "NoTruncate";
137 case TruncateKind::TruncateAfterBailouts:
138 return "TruncateAfterBailouts";
139 case TruncateKind::IndirectTruncate:
140 return "IndirectTruncate";
141 case TruncateKind::Truncate:
142 return "Truncate";
143 default:
144 MOZ_CRASH("Unknown truncate kind.");
148 static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
149 bool shouldClone) {
150 if (JitSpewEnabled(JitSpew_Range)) {
151 JitSpewHeader(JitSpew_Range);
152 Fprinter& out = JitSpewPrinter();
153 out.printf(" ");
154 out.printf("truncating ");
155 def->printName(out);
156 out.printf(" (kind: %s, clone: %d)\n", TruncateKindString(kind),
157 shouldClone);
160 #else
161 static inline void SpewTruncate(MDefinition* def, TruncateKind kind,
162 bool shouldClone) {}
163 #endif
165 TempAllocator& RangeAnalysis::alloc() const { return graph_.alloc(); }
167 void RangeAnalysis::replaceDominatedUsesWith(MDefinition* orig,
168 MDefinition* dom,
169 MBasicBlock* block) {
170 for (MUseIterator i(orig->usesBegin()); i != orig->usesEnd();) {
171 MUse* use = *i++;
172 if (use->consumer() != dom && IsDominatedUse(block, use)) {
173 use->replaceProducer(dom);
178 bool RangeAnalysis::addBetaNodes() {
179 JitSpew(JitSpew_Range, "Adding beta nodes");
181 for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
182 MBasicBlock* block = *i;
183 JitSpew(JitSpew_Range, "Looking at block %u", block->id());
185 BranchDirection branch_dir;
186 MTest* test = block->immediateDominatorBranch(&branch_dir);
188 if (!test || !test->getOperand(0)->isCompare()) {
189 continue;
192 MCompare* compare = test->getOperand(0)->toCompare();
194 if (!compare->isNumericComparison()) {
195 continue;
198 // TODO: support unsigned comparisons
199 if (compare->compareType() == MCompare::Compare_UInt32) {
200 continue;
203 // isNumericComparison should return false for UIntPtr.
204 MOZ_ASSERT(compare->compareType() != MCompare::Compare_UIntPtr);
206 MDefinition* left = compare->getOperand(0);
207 MDefinition* right = compare->getOperand(1);
208 double bound;
209 double conservativeLower = NegativeInfinity<double>();
210 double conservativeUpper = PositiveInfinity<double>();
211 MDefinition* val = nullptr;
213 JSOp jsop = compare->jsop();
215 if (branch_dir == FALSE_BRANCH) {
216 jsop = NegateCompareOp(jsop);
217 conservativeLower = GenericNaN();
218 conservativeUpper = GenericNaN();
221 MConstant* leftConst = left->maybeConstantValue();
222 MConstant* rightConst = right->maybeConstantValue();
223 if (leftConst && leftConst->isTypeRepresentableAsDouble()) {
224 bound = leftConst->numberToDouble();
225 val = right;
226 jsop = ReverseCompareOp(jsop);
227 } else if (rightConst && rightConst->isTypeRepresentableAsDouble()) {
228 bound = rightConst->numberToDouble();
229 val = left;
230 } else if (left->type() == MIRType::Int32 &&
231 right->type() == MIRType::Int32) {
232 MDefinition* smaller = nullptr;
233 MDefinition* greater = nullptr;
234 if (jsop == JSOp::Lt) {
235 smaller = left;
236 greater = right;
237 } else if (jsop == JSOp::Gt) {
238 smaller = right;
239 greater = left;
241 if (smaller && greater) {
242 if (!alloc().ensureBallast()) {
243 return false;
246 MBeta* beta;
247 beta = MBeta::New(
248 alloc(), smaller,
249 Range::NewInt32Range(alloc(), JSVAL_INT_MIN, JSVAL_INT_MAX - 1));
250 block->insertBefore(*block->begin(), beta);
251 replaceDominatedUsesWith(smaller, beta, block);
252 JitSpew(JitSpew_Range, " Adding beta node for smaller %u",
253 smaller->id());
254 beta = MBeta::New(
255 alloc(), greater,
256 Range::NewInt32Range(alloc(), JSVAL_INT_MIN + 1, JSVAL_INT_MAX));
257 block->insertBefore(*block->begin(), beta);
258 replaceDominatedUsesWith(greater, beta, block);
259 JitSpew(JitSpew_Range, " Adding beta node for greater %u",
260 greater->id());
262 continue;
263 } else {
264 continue;
267 // At this point, one of the operands if the compare is a constant, and
268 // val is the other operand.
269 MOZ_ASSERT(val);
271 Range comp;
272 switch (jsop) {
273 case JSOp::Le:
274 comp.setDouble(conservativeLower, bound);
275 break;
276 case JSOp::Lt:
277 // For integers, if x < c, the upper bound of x is c-1.
278 if (val->type() == MIRType::Int32) {
279 int32_t intbound;
280 if (NumberEqualsInt32(bound, &intbound) &&
281 SafeSub(intbound, 1, &intbound)) {
282 bound = intbound;
285 comp.setDouble(conservativeLower, bound);
287 // Negative zero is not less than zero.
288 if (bound == 0) {
289 comp.refineToExcludeNegativeZero();
291 break;
292 case JSOp::Ge:
293 comp.setDouble(bound, conservativeUpper);
294 break;
295 case JSOp::Gt:
296 // For integers, if x > c, the lower bound of x is c+1.
297 if (val->type() == MIRType::Int32) {
298 int32_t intbound;
299 if (NumberEqualsInt32(bound, &intbound) &&
300 SafeAdd(intbound, 1, &intbound)) {
301 bound = intbound;
304 comp.setDouble(bound, conservativeUpper);
306 // Negative zero is not greater than zero.
307 if (bound == 0) {
308 comp.refineToExcludeNegativeZero();
310 break;
311 case JSOp::StrictEq:
312 case JSOp::Eq:
313 comp.setDouble(bound, bound);
314 break;
315 case JSOp::StrictNe:
316 case JSOp::Ne:
317 // Negative zero is not not-equal to zero.
318 if (bound == 0) {
319 comp.refineToExcludeNegativeZero();
320 break;
322 continue; // well, we could have
323 // [-\inf, bound-1] U [bound+1, \inf] but we only use
324 // contiguous ranges.
325 default:
326 continue;
329 if (JitSpewEnabled(JitSpew_Range)) {
330 JitSpewHeader(JitSpew_Range);
331 Fprinter& out = JitSpewPrinter();
332 out.printf(" Adding beta node for %u with range ", val->id());
333 comp.dump(out);
334 out.printf("\n");
337 if (!alloc().ensureBallast()) {
338 return false;
341 MBeta* beta = MBeta::New(alloc(), val, new (alloc()) Range(comp));
342 block->insertBefore(*block->begin(), beta);
343 replaceDominatedUsesWith(val, beta, block);
346 return true;
349 bool RangeAnalysis::removeBetaNodes() {
350 JitSpew(JitSpew_Range, "Removing beta nodes");
352 for (PostorderIterator i(graph_.poBegin()); i != graph_.poEnd(); i++) {
353 MBasicBlock* block = *i;
354 for (MDefinitionIterator iter(*i); iter;) {
355 MDefinition* def = *iter++;
356 if (def->isBeta()) {
357 auto* beta = def->toBeta();
358 MDefinition* op = beta->input();
359 JitSpew(JitSpew_Range, " Removing beta node %u for %u", beta->id(),
360 op->id());
361 beta->justReplaceAllUsesWith(op);
362 block->discard(beta);
363 } else {
364 // We only place Beta nodes at the beginning of basic
365 // blocks, so if we see something else, we can move on
366 // to the next block.
367 break;
371 return true;
374 void SymbolicBound::dump(GenericPrinter& out) const {
375 if (loop) {
376 out.printf("[loop] ");
378 sum.dump(out);
381 void SymbolicBound::dump() const {
382 Fprinter out(stderr);
383 dump(out);
384 out.printf("\n");
385 out.finish();
388 // Test whether the given range's exponent tells us anything that its lower
389 // and upper bound values don't.
390 static bool IsExponentInteresting(const Range* r) {
391 // If it lacks either a lower or upper bound, the exponent is interesting.
392 if (!r->hasInt32Bounds()) {
393 return true;
396 // Otherwise if there's no fractional part, the lower and upper bounds,
397 // which are integers, are perfectly precise.
398 if (!r->canHaveFractionalPart()) {
399 return false;
402 // Otherwise, if the bounds are conservatively rounded across a power-of-two
403 // boundary, the exponent may imply a tighter range.
404 return FloorLog2(std::max(Abs(r->lower()), Abs(r->upper()))) > r->exponent();
407 void Range::dump(GenericPrinter& out) const {
408 assertInvariants();
410 // Floating-point or Integer subset.
411 if (canHaveFractionalPart_) {
412 out.printf("F");
413 } else {
414 out.printf("I");
417 out.printf("[");
419 if (!hasInt32LowerBound_) {
420 out.printf("?");
421 } else {
422 out.printf("%d", lower_);
424 if (symbolicLower_) {
425 out.printf(" {");
426 symbolicLower_->dump(out);
427 out.printf("}");
430 out.printf(", ");
432 if (!hasInt32UpperBound_) {
433 out.printf("?");
434 } else {
435 out.printf("%d", upper_);
437 if (symbolicUpper_) {
438 out.printf(" {");
439 symbolicUpper_->dump(out);
440 out.printf("}");
443 out.printf("]");
445 bool includesNaN = max_exponent_ == IncludesInfinityAndNaN;
446 bool includesNegativeInfinity =
447 max_exponent_ >= IncludesInfinity && !hasInt32LowerBound_;
448 bool includesPositiveInfinity =
449 max_exponent_ >= IncludesInfinity && !hasInt32UpperBound_;
450 bool includesNegativeZero = canBeNegativeZero_;
452 if (includesNaN || includesNegativeInfinity || includesPositiveInfinity ||
453 includesNegativeZero) {
454 out.printf(" (");
455 bool first = true;
456 if (includesNaN) {
457 if (first) {
458 first = false;
459 } else {
460 out.printf(" ");
462 out.printf("U NaN");
464 if (includesNegativeInfinity) {
465 if (first) {
466 first = false;
467 } else {
468 out.printf(" ");
470 out.printf("U -Infinity");
472 if (includesPositiveInfinity) {
473 if (first) {
474 first = false;
475 } else {
476 out.printf(" ");
478 out.printf("U Infinity");
480 if (includesNegativeZero) {
481 if (first) {
482 first = false;
483 } else {
484 out.printf(" ");
486 out.printf("U -0");
488 out.printf(")");
490 if (max_exponent_ < IncludesInfinity && IsExponentInteresting(this)) {
491 out.printf(" (< pow(2, %d+1))", max_exponent_);
495 void Range::dump() const {
496 Fprinter out(stderr);
497 dump(out);
498 out.printf("\n");
499 out.finish();
502 Range* Range::intersect(TempAllocator& alloc, const Range* lhs,
503 const Range* rhs, bool* emptyRange) {
504 *emptyRange = false;
506 if (!lhs && !rhs) {
507 return nullptr;
510 if (!lhs) {
511 return new (alloc) Range(*rhs);
513 if (!rhs) {
514 return new (alloc) Range(*lhs);
517 int32_t newLower = std::max(lhs->lower_, rhs->lower_);
518 int32_t newUpper = std::min(lhs->upper_, rhs->upper_);
520 // If upper < lower, then we have conflicting constraints. Consider:
522 // if (x < 0) {
523 // if (x > 0) {
524 // [Some code.]
525 // }
526 // }
528 // In this case, the block is unreachable.
529 if (newUpper < newLower) {
530 // If both ranges can be NaN, the result can still be NaN.
531 if (!lhs->canBeNaN() || !rhs->canBeNaN()) {
532 *emptyRange = true;
534 return nullptr;
537 bool newHasInt32LowerBound =
538 lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_;
539 bool newHasInt32UpperBound =
540 lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_;
542 FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
543 lhs->canHaveFractionalPart_ && rhs->canHaveFractionalPart_);
544 NegativeZeroFlag newMayIncludeNegativeZero =
545 NegativeZeroFlag(lhs->canBeNegativeZero_ && rhs->canBeNegativeZero_);
547 uint16_t newExponent = std::min(lhs->max_exponent_, rhs->max_exponent_);
549 // NaN is a special value which is neither greater than infinity or less than
550 // negative infinity. When we intersect two ranges like [?, 0] and [0, ?], we
551 // can end up thinking we have both a lower and upper bound, even though NaN
552 // is still possible. In this case, just be conservative, since any case where
553 // we can have NaN is not especially interesting.
554 if (newHasInt32LowerBound && newHasInt32UpperBound &&
555 newExponent == IncludesInfinityAndNaN) {
556 return nullptr;
559 // If one of the ranges has a fractional part and the other doesn't, it's
560 // possible that we will have computed a newExponent that's more precise
561 // than our newLower and newUpper. This is unusual, so we handle it here
562 // instead of in optimize().
564 // For example, consider the range F[0,1.5]. Range analysis represents the
565 // lower and upper bound as integers, so we'd actually have
566 // F[0,2] (< pow(2, 0+1)). In this case, the exponent gives us a slightly
567 // more precise upper bound than the integer upper bound.
569 // When intersecting such a range with an integer range, the fractional part
570 // of the range is dropped. The max exponent of 0 remains valid, so the
571 // upper bound needs to be adjusted to 1.
573 // When intersecting F[0,2] (< pow(2, 0+1)) with a range like F[2,4],
574 // the naive intersection is I[2,2], but since the max exponent tells us
575 // that the value is always less than 2, the intersection is actually empty.
576 if (lhs->canHaveFractionalPart() != rhs->canHaveFractionalPart() ||
577 (lhs->canHaveFractionalPart() && newHasInt32LowerBound &&
578 newHasInt32UpperBound && newLower == newUpper)) {
579 refineInt32BoundsByExponent(newExponent, &newLower, &newHasInt32LowerBound,
580 &newUpper, &newHasInt32UpperBound);
582 // If we're intersecting two ranges that don't overlap, this could also
583 // push the bounds past each other, since the actual intersection is
584 // the empty set.
585 if (newLower > newUpper) {
586 *emptyRange = true;
587 return nullptr;
591 return new (alloc)
592 Range(newLower, newHasInt32LowerBound, newUpper, newHasInt32UpperBound,
593 newCanHaveFractionalPart, newMayIncludeNegativeZero, newExponent);
596 void Range::unionWith(const Range* other) {
597 int32_t newLower = std::min(lower_, other->lower_);
598 int32_t newUpper = std::max(upper_, other->upper_);
600 bool newHasInt32LowerBound =
601 hasInt32LowerBound_ && other->hasInt32LowerBound_;
602 bool newHasInt32UpperBound =
603 hasInt32UpperBound_ && other->hasInt32UpperBound_;
605 FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
606 canHaveFractionalPart_ || other->canHaveFractionalPart_);
607 NegativeZeroFlag newMayIncludeNegativeZero =
608 NegativeZeroFlag(canBeNegativeZero_ || other->canBeNegativeZero_);
610 uint16_t newExponent = std::max(max_exponent_, other->max_exponent_);
612 rawInitialize(newLower, newHasInt32LowerBound, newUpper,
613 newHasInt32UpperBound, newCanHaveFractionalPart,
614 newMayIncludeNegativeZero, newExponent);
617 Range::Range(const MDefinition* def)
618 : symbolicLower_(nullptr), symbolicUpper_(nullptr) {
619 if (const Range* other = def->range()) {
620 // The instruction has range information; use it.
621 *this = *other;
623 // Simulate the effect of converting the value to its type.
624 // Note: we cannot clamp here, since ranges aren't allowed to shrink
625 // and truncation can increase range again. So doing wrapAround to
626 // mimick a possible truncation.
627 switch (def->type()) {
628 case MIRType::Int32:
629 // MToNumberInt32 cannot truncate. So we can safely clamp.
630 if (def->isToNumberInt32()) {
631 clampToInt32();
632 } else {
633 wrapAroundToInt32();
635 break;
636 case MIRType::Boolean:
637 wrapAroundToBoolean();
638 break;
639 case MIRType::None:
640 MOZ_CRASH("Asking for the range of an instruction with no value");
641 default:
642 break;
644 } else {
645 // Otherwise just use type information. We can trust the type here
646 // because we don't care what value the instruction actually produces,
647 // but what value we might get after we get past the bailouts.
648 switch (def->type()) {
649 case MIRType::Int32:
650 setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
651 break;
652 case MIRType::Boolean:
653 setInt32(0, 1);
654 break;
655 case MIRType::None:
656 MOZ_CRASH("Asking for the range of an instruction with no value");
657 default:
658 setUnknown();
659 break;
663 // As a special case, MUrsh is permitted to claim a result type of
664 // MIRType::Int32 while actually returning values in [0,UINT32_MAX] without
665 // bailouts. If range analysis hasn't ruled out values in
666 // (INT32_MAX,UINT32_MAX], set the range to be conservatively correct for
667 // use as either a uint32 or an int32.
668 if (!hasInt32UpperBound() && def->isUrsh() &&
669 def->toUrsh()->bailoutsDisabled() && def->type() != MIRType::Int64) {
670 lower_ = INT32_MIN;
673 assertInvariants();
676 static uint16_t ExponentImpliedByDouble(double d) {
677 // Handle the special values.
678 if (std::isnan(d)) {
679 return Range::IncludesInfinityAndNaN;
681 if (std::isinf(d)) {
682 return Range::IncludesInfinity;
685 // Otherwise take the exponent part and clamp it at zero, since the Range
686 // class doesn't track fractional ranges.
687 return uint16_t(std::max(int_fast16_t(0), ExponentComponent(d)));
690 void Range::setDouble(double l, double h) {
691 MOZ_ASSERT(!(l > h));
693 // Infer lower_, upper_, hasInt32LowerBound_, and hasInt32UpperBound_.
694 if (l >= INT32_MIN && l <= INT32_MAX) {
695 lower_ = int32_t(::floor(l));
696 hasInt32LowerBound_ = true;
697 } else if (l >= INT32_MAX) {
698 lower_ = INT32_MAX;
699 hasInt32LowerBound_ = true;
700 } else {
701 lower_ = INT32_MIN;
702 hasInt32LowerBound_ = false;
704 if (h >= INT32_MIN && h <= INT32_MAX) {
705 upper_ = int32_t(::ceil(h));
706 hasInt32UpperBound_ = true;
707 } else if (h <= INT32_MIN) {
708 upper_ = INT32_MIN;
709 hasInt32UpperBound_ = true;
710 } else {
711 upper_ = INT32_MAX;
712 hasInt32UpperBound_ = false;
715 // Infer max_exponent_.
716 uint16_t lExp = ExponentImpliedByDouble(l);
717 uint16_t hExp = ExponentImpliedByDouble(h);
718 max_exponent_ = std::max(lExp, hExp);
720 canHaveFractionalPart_ = ExcludesFractionalParts;
721 canBeNegativeZero_ = ExcludesNegativeZero;
723 // Infer the canHaveFractionalPart_ setting. We can have a
724 // fractional part if the range crosses through the neighborhood of zero. We
725 // won't have a fractional value if the value is always beyond the point at
726 // which double precision can't represent fractional values.
727 uint16_t minExp = std::min(lExp, hExp);
728 bool includesNegative = std::isnan(l) || l < 0;
729 bool includesPositive = std::isnan(h) || h > 0;
730 bool crossesZero = includesNegative && includesPositive;
731 if (crossesZero || minExp < MaxTruncatableExponent) {
732 canHaveFractionalPart_ = IncludesFractionalParts;
735 // Infer the canBeNegativeZero_ setting. We can have a negative zero if
736 // either bound is zero.
737 if (!(l > 0) && !(h < 0)) {
738 canBeNegativeZero_ = IncludesNegativeZero;
741 optimize();
744 void Range::setDoubleSingleton(double d) {
745 setDouble(d, d);
747 // The above setDouble call is for comparisons, and treats negative zero
748 // as equal to zero. We're aiming for a minimum range, so we can clear the
749 // negative zero flag if the value isn't actually negative zero.
750 if (!IsNegativeZero(d)) {
751 canBeNegativeZero_ = ExcludesNegativeZero;
754 assertInvariants();
757 static inline bool MissingAnyInt32Bounds(const Range* lhs, const Range* rhs) {
758 return !lhs->hasInt32Bounds() || !rhs->hasInt32Bounds();
761 Range* Range::add(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
762 int64_t l = (int64_t)lhs->lower_ + (int64_t)rhs->lower_;
763 if (!lhs->hasInt32LowerBound() || !rhs->hasInt32LowerBound()) {
764 l = NoInt32LowerBound;
767 int64_t h = (int64_t)lhs->upper_ + (int64_t)rhs->upper_;
768 if (!lhs->hasInt32UpperBound() || !rhs->hasInt32UpperBound()) {
769 h = NoInt32UpperBound;
772 // The exponent is at most one greater than the greater of the operands'
773 // exponents, except for NaN and infinity cases.
774 uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
775 if (e <= Range::MaxFiniteExponent) {
776 ++e;
779 // Infinity + -Infinity is NaN.
780 if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
781 e = Range::IncludesInfinityAndNaN;
784 return new (alloc) Range(
785 l, h,
786 FractionalPartFlag(lhs->canHaveFractionalPart() ||
787 rhs->canHaveFractionalPart()),
788 NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeNegativeZero()),
792 Range* Range::sub(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
793 int64_t l = (int64_t)lhs->lower_ - (int64_t)rhs->upper_;
794 if (!lhs->hasInt32LowerBound() || !rhs->hasInt32UpperBound()) {
795 l = NoInt32LowerBound;
798 int64_t h = (int64_t)lhs->upper_ - (int64_t)rhs->lower_;
799 if (!lhs->hasInt32UpperBound() || !rhs->hasInt32LowerBound()) {
800 h = NoInt32UpperBound;
803 // The exponent is at most one greater than the greater of the operands'
804 // exponents, except for NaN and infinity cases.
805 uint16_t e = std::max(lhs->max_exponent_, rhs->max_exponent_);
806 if (e <= Range::MaxFiniteExponent) {
807 ++e;
810 // Infinity - Infinity is NaN.
811 if (lhs->canBeInfiniteOrNaN() && rhs->canBeInfiniteOrNaN()) {
812 e = Range::IncludesInfinityAndNaN;
815 return new (alloc)
816 Range(l, h,
817 FractionalPartFlag(lhs->canHaveFractionalPart() ||
818 rhs->canHaveFractionalPart()),
819 NegativeZeroFlag(lhs->canBeNegativeZero() && rhs->canBeZero()), e);
822 Range* Range::and_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
823 MOZ_ASSERT(lhs->isInt32());
824 MOZ_ASSERT(rhs->isInt32());
826 // If both numbers can be negative, result can be negative in the whole range
827 if (lhs->lower() < 0 && rhs->lower() < 0) {
828 return Range::NewInt32Range(alloc, INT32_MIN,
829 std::max(lhs->upper(), rhs->upper()));
832 // Only one of both numbers can be negative.
833 // - result can't be negative
834 // - Upper bound is minimum of both upper range,
835 int32_t lower = 0;
836 int32_t upper = std::min(lhs->upper(), rhs->upper());
838 // EXCEPT when upper bound of non negative number is max value,
839 // because negative value can return the whole max value.
840 // -1 & 5 = 5
841 if (lhs->lower() < 0) {
842 upper = rhs->upper();
844 if (rhs->lower() < 0) {
845 upper = lhs->upper();
848 return Range::NewInt32Range(alloc, lower, upper);
851 Range* Range::or_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
852 MOZ_ASSERT(lhs->isInt32());
853 MOZ_ASSERT(rhs->isInt32());
854 // When one operand is always 0 or always -1, it's a special case where we
855 // can compute a fully precise result. Handling these up front also
856 // protects the code below from calling CountLeadingZeroes32 with a zero
857 // operand or from shifting an int32_t by 32.
858 if (lhs->lower() == lhs->upper()) {
859 if (lhs->lower() == 0) {
860 return new (alloc) Range(*rhs);
862 if (lhs->lower() == -1) {
863 return new (alloc) Range(*lhs);
866 if (rhs->lower() == rhs->upper()) {
867 if (rhs->lower() == 0) {
868 return new (alloc) Range(*lhs);
870 if (rhs->lower() == -1) {
871 return new (alloc) Range(*rhs);
875 // The code below uses CountLeadingZeroes32, which has undefined behavior
876 // if its operand is 0. We rely on the code above to protect it.
877 MOZ_ASSERT_IF(lhs->lower() >= 0, lhs->upper() != 0);
878 MOZ_ASSERT_IF(rhs->lower() >= 0, rhs->upper() != 0);
879 MOZ_ASSERT_IF(lhs->upper() < 0, lhs->lower() != -1);
880 MOZ_ASSERT_IF(rhs->upper() < 0, rhs->lower() != -1);
882 int32_t lower = INT32_MIN;
883 int32_t upper = INT32_MAX;
885 if (lhs->lower() >= 0 && rhs->lower() >= 0) {
886 // Both operands are non-negative, so the result won't be less than either.
887 lower = std::max(lhs->lower(), rhs->lower());
888 // The result will have leading zeros where both operands have leading
889 // zeros. CountLeadingZeroes32 of a non-negative int32 will at least be 1 to
890 // account for the bit of sign.
891 upper = int32_t(UINT32_MAX >> std::min(CountLeadingZeroes32(lhs->upper()),
892 CountLeadingZeroes32(rhs->upper())));
893 } else {
894 // The result will have leading ones where either operand has leading ones.
895 if (lhs->upper() < 0) {
896 unsigned leadingOnes = CountLeadingZeroes32(~lhs->lower());
897 lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
898 upper = -1;
900 if (rhs->upper() < 0) {
901 unsigned leadingOnes = CountLeadingZeroes32(~rhs->lower());
902 lower = std::max(lower, ~int32_t(UINT32_MAX >> leadingOnes));
903 upper = -1;
907 return Range::NewInt32Range(alloc, lower, upper);
910 Range* Range::xor_(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
911 MOZ_ASSERT(lhs->isInt32());
912 MOZ_ASSERT(rhs->isInt32());
913 int32_t lhsLower = lhs->lower();
914 int32_t lhsUpper = lhs->upper();
915 int32_t rhsLower = rhs->lower();
916 int32_t rhsUpper = rhs->upper();
917 bool invertAfter = false;
919 // If either operand is negative, bitwise-negate it, and arrange to negate
920 // the result; ~((~x)^y) == x^y. If both are negative the negations on the
921 // result cancel each other out; effectively this is (~x)^(~y) == x^y.
922 // These transformations reduce the number of cases we have to handle below.
923 if (lhsUpper < 0) {
924 lhsLower = ~lhsLower;
925 lhsUpper = ~lhsUpper;
926 std::swap(lhsLower, lhsUpper);
927 invertAfter = !invertAfter;
929 if (rhsUpper < 0) {
930 rhsLower = ~rhsLower;
931 rhsUpper = ~rhsUpper;
932 std::swap(rhsLower, rhsUpper);
933 invertAfter = !invertAfter;
936 // Handle cases where lhs or rhs is always zero specially, because they're
937 // easy cases where we can be perfectly precise, and because it protects the
938 // CountLeadingZeroes32 calls below from seeing 0 operands, which would be
939 // undefined behavior.
940 int32_t lower = INT32_MIN;
941 int32_t upper = INT32_MAX;
942 if (lhsLower == 0 && lhsUpper == 0) {
943 upper = rhsUpper;
944 lower = rhsLower;
945 } else if (rhsLower == 0 && rhsUpper == 0) {
946 upper = lhsUpper;
947 lower = lhsLower;
948 } else if (lhsLower >= 0 && rhsLower >= 0) {
949 // Both operands are non-negative. The result will be non-negative.
950 lower = 0;
951 // To compute the upper value, take each operand's upper value and
952 // set all bits that don't correspond to leading zero bits in the
953 // other to one. For each one, this gives an upper bound for the
954 // result, so we can take the minimum between the two.
955 unsigned lhsLeadingZeros = CountLeadingZeroes32(lhsUpper);
956 unsigned rhsLeadingZeros = CountLeadingZeroes32(rhsUpper);
957 upper = std::min(rhsUpper | int32_t(UINT32_MAX >> lhsLeadingZeros),
958 lhsUpper | int32_t(UINT32_MAX >> rhsLeadingZeros));
961 // If we bitwise-negated one (but not both) of the operands above, apply the
962 // bitwise-negate to the result, completing ~((~x)^y) == x^y.
963 if (invertAfter) {
964 lower = ~lower;
965 upper = ~upper;
966 std::swap(lower, upper);
969 return Range::NewInt32Range(alloc, lower, upper);
972 Range* Range::not_(TempAllocator& alloc, const Range* op) {
973 MOZ_ASSERT(op->isInt32());
974 return Range::NewInt32Range(alloc, ~op->upper(), ~op->lower());
977 Range* Range::mul(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
978 FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
979 lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
981 NegativeZeroFlag newMayIncludeNegativeZero = NegativeZeroFlag(
982 (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
983 (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative()));
985 uint16_t exponent;
986 if (!lhs->canBeInfiniteOrNaN() && !rhs->canBeInfiniteOrNaN()) {
987 // Two finite values.
988 exponent = lhs->numBits() + rhs->numBits() - 1;
989 if (exponent > Range::MaxFiniteExponent) {
990 exponent = Range::IncludesInfinity;
992 } else if (!lhs->canBeNaN() && !rhs->canBeNaN() &&
993 !(lhs->canBeZero() && rhs->canBeInfiniteOrNaN()) &&
994 !(rhs->canBeZero() && lhs->canBeInfiniteOrNaN())) {
995 // Two values that multiplied together won't produce a NaN.
996 exponent = Range::IncludesInfinity;
997 } else {
998 // Could be anything.
999 exponent = Range::IncludesInfinityAndNaN;
1002 if (MissingAnyInt32Bounds(lhs, rhs)) {
1003 return new (alloc)
1004 Range(NoInt32LowerBound, NoInt32UpperBound, newCanHaveFractionalPart,
1005 newMayIncludeNegativeZero, exponent);
1007 int64_t a = (int64_t)lhs->lower() * (int64_t)rhs->lower();
1008 int64_t b = (int64_t)lhs->lower() * (int64_t)rhs->upper();
1009 int64_t c = (int64_t)lhs->upper() * (int64_t)rhs->lower();
1010 int64_t d = (int64_t)lhs->upper() * (int64_t)rhs->upper();
1011 return new (alloc)
1012 Range(std::min(std::min(a, b), std::min(c, d)),
1013 std::max(std::max(a, b), std::max(c, d)), newCanHaveFractionalPart,
1014 newMayIncludeNegativeZero, exponent);
1017 Range* Range::lsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
1018 MOZ_ASSERT(lhs->isInt32());
1019 int32_t shift = c & 0x1f;
1021 // If the shift doesn't loose bits or shift bits into the sign bit, we
1022 // can simply compute the correct range by shifting.
1023 if ((int32_t)((uint32_t)lhs->lower() << shift << 1 >> shift >> 1) ==
1024 lhs->lower() &&
1025 (int32_t)((uint32_t)lhs->upper() << shift << 1 >> shift >> 1) ==
1026 lhs->upper()) {
1027 return Range::NewInt32Range(alloc, uint32_t(lhs->lower()) << shift,
1028 uint32_t(lhs->upper()) << shift);
1031 return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
1034 Range* Range::rsh(TempAllocator& alloc, const Range* lhs, int32_t c) {
1035 MOZ_ASSERT(lhs->isInt32());
1036 int32_t shift = c & 0x1f;
1037 return Range::NewInt32Range(alloc, lhs->lower() >> shift,
1038 lhs->upper() >> shift);
1041 Range* Range::ursh(TempAllocator& alloc, const Range* lhs, int32_t c) {
1042 // ursh's left operand is uint32, not int32, but for range analysis we
1043 // currently approximate it as int32. We assume here that the range has
1044 // already been adjusted accordingly by our callers.
1045 MOZ_ASSERT(lhs->isInt32());
1047 int32_t shift = c & 0x1f;
1049 // If the value is always non-negative or always negative, we can simply
1050 // compute the correct range by shifting.
1051 if (lhs->isFiniteNonNegative() || lhs->isFiniteNegative()) {
1052 return Range::NewUInt32Range(alloc, uint32_t(lhs->lower()) >> shift,
1053 uint32_t(lhs->upper()) >> shift);
1056 // Otherwise return the most general range after the shift.
1057 return Range::NewUInt32Range(alloc, 0, UINT32_MAX >> shift);
1060 Range* Range::lsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1061 MOZ_ASSERT(lhs->isInt32());
1062 MOZ_ASSERT(rhs->isInt32());
1063 return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
1066 Range* Range::rsh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1067 MOZ_ASSERT(lhs->isInt32());
1068 MOZ_ASSERT(rhs->isInt32());
1070 // Canonicalize the shift range to 0 to 31.
1071 int32_t shiftLower = rhs->lower();
1072 int32_t shiftUpper = rhs->upper();
1073 if ((int64_t(shiftUpper) - int64_t(shiftLower)) >= 31) {
1074 shiftLower = 0;
1075 shiftUpper = 31;
1076 } else {
1077 shiftLower &= 0x1f;
1078 shiftUpper &= 0x1f;
1079 if (shiftLower > shiftUpper) {
1080 shiftLower = 0;
1081 shiftUpper = 31;
1084 MOZ_ASSERT(shiftLower >= 0 && shiftUpper <= 31);
1086 // The lhs bounds are signed, thus the minimum is either the lower bound
1087 // shift by the smallest shift if negative or the lower bound shifted by the
1088 // biggest shift otherwise. And the opposite for the maximum.
1089 int32_t lhsLower = lhs->lower();
1090 int32_t min = lhsLower < 0 ? lhsLower >> shiftLower : lhsLower >> shiftUpper;
1091 int32_t lhsUpper = lhs->upper();
1092 int32_t max = lhsUpper >= 0 ? lhsUpper >> shiftLower : lhsUpper >> shiftUpper;
1094 return Range::NewInt32Range(alloc, min, max);
1097 Range* Range::ursh(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1098 // ursh's left operand is uint32, not int32, but for range analysis we
1099 // currently approximate it as int32. We assume here that the range has
1100 // already been adjusted accordingly by our callers.
1101 MOZ_ASSERT(lhs->isInt32());
1102 MOZ_ASSERT(rhs->isInt32());
1103 return Range::NewUInt32Range(
1104 alloc, 0, lhs->isFiniteNonNegative() ? lhs->upper() : UINT32_MAX);
1107 Range* Range::abs(TempAllocator& alloc, const Range* op) {
1108 int32_t l = op->lower_;
1109 int32_t u = op->upper_;
1110 FractionalPartFlag canHaveFractionalPart = op->canHaveFractionalPart_;
1112 // Abs never produces a negative zero.
1113 NegativeZeroFlag canBeNegativeZero = ExcludesNegativeZero;
1115 return new (alloc) Range(
1116 std::max(std::max(int32_t(0), l), u == INT32_MIN ? INT32_MAX : -u), true,
1117 std::max(std::max(int32_t(0), u), l == INT32_MIN ? INT32_MAX : -l),
1118 op->hasInt32Bounds() && l != INT32_MIN, canHaveFractionalPart,
1119 canBeNegativeZero, op->max_exponent_);
1122 Range* Range::min(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1123 // If either operand is NaN, the result is NaN.
1124 if (lhs->canBeNaN() || rhs->canBeNaN()) {
1125 return nullptr;
1128 FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
1129 lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
1130 NegativeZeroFlag newMayIncludeNegativeZero =
1131 NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
1133 return new (alloc) Range(std::min(lhs->lower_, rhs->lower_),
1134 lhs->hasInt32LowerBound_ && rhs->hasInt32LowerBound_,
1135 std::min(lhs->upper_, rhs->upper_),
1136 lhs->hasInt32UpperBound_ || rhs->hasInt32UpperBound_,
1137 newCanHaveFractionalPart, newMayIncludeNegativeZero,
1138 std::max(lhs->max_exponent_, rhs->max_exponent_));
1141 Range* Range::max(TempAllocator& alloc, const Range* lhs, const Range* rhs) {
1142 // If either operand is NaN, the result is NaN.
1143 if (lhs->canBeNaN() || rhs->canBeNaN()) {
1144 return nullptr;
1147 FractionalPartFlag newCanHaveFractionalPart = FractionalPartFlag(
1148 lhs->canHaveFractionalPart_ || rhs->canHaveFractionalPart_);
1149 NegativeZeroFlag newMayIncludeNegativeZero =
1150 NegativeZeroFlag(lhs->canBeNegativeZero_ || rhs->canBeNegativeZero_);
1152 return new (alloc) Range(std::max(lhs->lower_, rhs->lower_),
1153 lhs->hasInt32LowerBound_ || rhs->hasInt32LowerBound_,
1154 std::max(lhs->upper_, rhs->upper_),
1155 lhs->hasInt32UpperBound_ && rhs->hasInt32UpperBound_,
1156 newCanHaveFractionalPart, newMayIncludeNegativeZero,
1157 std::max(lhs->max_exponent_, rhs->max_exponent_));
1160 Range* Range::floor(TempAllocator& alloc, const Range* op) {
1161 Range* copy = new (alloc) Range(*op);
1162 // Decrement lower bound of copy range if op have a factional part and lower
1163 // bound is Int32 defined. Also we avoid to decrement when op have a
1164 // fractional part but lower_ >= JSVAL_INT_MAX.
1165 if (op->canHaveFractionalPart() && op->hasInt32LowerBound()) {
1166 copy->setLowerInit(int64_t(copy->lower_) - 1);
1169 // Also refine max_exponent_ because floor may have decremented int value
1170 // If we've got int32 defined bounds, just deduce it using defined bounds.
1171 // But, if we don't have those, value's max_exponent_ may have changed.
1172 // Because we're looking to maintain an over estimation, if we can,
1173 // we increment it.
1174 if (copy->hasInt32Bounds())
1175 copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
1176 else if (copy->max_exponent_ < MaxFiniteExponent)
1177 copy->max_exponent_++;
1179 copy->canHaveFractionalPart_ = ExcludesFractionalParts;
1180 copy->assertInvariants();
1181 return copy;
1184 Range* Range::ceil(TempAllocator& alloc, const Range* op) {
1185 Range* copy = new (alloc) Range(*op);
1187 // We need to refine max_exponent_ because ceil may have incremented the int
1188 // value. If we have got int32 bounds defined, just deduce it using the
1189 // defined bounds. Else we can just increment its value, as we are looking to
1190 // maintain an over estimation.
1191 if (copy->hasInt32Bounds()) {
1192 copy->max_exponent_ = copy->exponentImpliedByInt32Bounds();
1193 } else if (copy->max_exponent_ < MaxFiniteExponent) {
1194 copy->max_exponent_++;
1197 // If the range is definitely above 0 or below -1, we don't need to include
1198 // -0; otherwise we do.
1200 copy->canBeNegativeZero_ = ((copy->lower_ > 0) || (copy->upper_ <= -1))
1201 ? copy->canBeNegativeZero_
1202 : IncludesNegativeZero;
1204 copy->canHaveFractionalPart_ = ExcludesFractionalParts;
1205 copy->assertInvariants();
1206 return copy;
1209 Range* Range::sign(TempAllocator& alloc, const Range* op) {
1210 if (op->canBeNaN()) {
1211 return nullptr;
1214 return new (alloc) Range(std::max(std::min(op->lower_, 1), -1),
1215 std::max(std::min(op->upper_, 1), -1),
1216 Range::ExcludesFractionalParts,
1217 NegativeZeroFlag(op->canBeNegativeZero()), 0);
1220 Range* Range::NaNToZero(TempAllocator& alloc, const Range* op) {
1221 Range* copy = new (alloc) Range(*op);
1222 if (copy->canBeNaN()) {
1223 copy->max_exponent_ = Range::IncludesInfinity;
1224 if (!copy->canBeZero()) {
1225 Range zero;
1226 zero.setDoubleSingleton(0);
1227 copy->unionWith(&zero);
1230 copy->refineToExcludeNegativeZero();
1231 return copy;
1234 bool Range::negativeZeroMul(const Range* lhs, const Range* rhs) {
1235 // The result can only be negative zero if both sides are finite and they
1236 // have differing signs.
1237 return (lhs->canHaveSignBitSet() && rhs->canBeFiniteNonNegative()) ||
1238 (rhs->canHaveSignBitSet() && lhs->canBeFiniteNonNegative());
1241 bool Range::update(const Range* other) {
1242 bool changed = lower_ != other->lower_ ||
1243 hasInt32LowerBound_ != other->hasInt32LowerBound_ ||
1244 upper_ != other->upper_ ||
1245 hasInt32UpperBound_ != other->hasInt32UpperBound_ ||
1246 canHaveFractionalPart_ != other->canHaveFractionalPart_ ||
1247 canBeNegativeZero_ != other->canBeNegativeZero_ ||
1248 max_exponent_ != other->max_exponent_;
1249 if (changed) {
1250 lower_ = other->lower_;
1251 hasInt32LowerBound_ = other->hasInt32LowerBound_;
1252 upper_ = other->upper_;
1253 hasInt32UpperBound_ = other->hasInt32UpperBound_;
1254 canHaveFractionalPart_ = other->canHaveFractionalPart_;
1255 canBeNegativeZero_ = other->canBeNegativeZero_;
1256 max_exponent_ = other->max_exponent_;
1257 assertInvariants();
1260 return changed;
1263 ///////////////////////////////////////////////////////////////////////////////
1264 // Range Computation for MIR Nodes
1265 ///////////////////////////////////////////////////////////////////////////////
1267 void MPhi::computeRange(TempAllocator& alloc) {
1268 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1269 return;
1272 Range* range = nullptr;
1273 for (size_t i = 0, e = numOperands(); i < e; i++) {
1274 if (getOperand(i)->block()->unreachable()) {
1275 JitSpew(JitSpew_Range, "Ignoring unreachable input %u",
1276 getOperand(i)->id());
1277 continue;
1280 // Peek at the pre-bailout range so we can take a short-cut; if any of
1281 // the operands has an unknown range, this phi has an unknown range.
1282 if (!getOperand(i)->range()) {
1283 return;
1286 Range input(getOperand(i));
1288 if (range) {
1289 range->unionWith(&input);
1290 } else {
1291 range = new (alloc) Range(input);
1295 setRange(range);
1298 void MBeta::computeRange(TempAllocator& alloc) {
1299 bool emptyRange = false;
1301 Range opRange(getOperand(0));
1302 Range* range = Range::intersect(alloc, &opRange, comparison_, &emptyRange);
1303 if (emptyRange) {
1304 JitSpew(JitSpew_Range, "Marking block for inst %u unreachable", id());
1305 block()->setUnreachableUnchecked();
1306 } else {
1307 setRange(range);
1311 void MConstant::computeRange(TempAllocator& alloc) {
1312 if (isTypeRepresentableAsDouble()) {
1313 double d = numberToDouble();
1314 setRange(Range::NewDoubleSingletonRange(alloc, d));
1315 } else if (type() == MIRType::Boolean) {
1316 bool b = toBoolean();
1317 setRange(Range::NewInt32Range(alloc, b, b));
1321 void MCharCodeAt::computeRange(TempAllocator& alloc) {
1322 // ECMA 262 says that the integer will be non-negative and at most 65535.
1323 setRange(Range::NewInt32Range(alloc, 0, unicode::UTF16Max));
1326 void MCodePointAt::computeRange(TempAllocator& alloc) {
1327 setRange(Range::NewInt32Range(alloc, 0, unicode::NonBMPMax));
1330 void MClampToUint8::computeRange(TempAllocator& alloc) {
1331 setRange(Range::NewUInt32Range(alloc, 0, 255));
1334 void MBitAnd::computeRange(TempAllocator& alloc) {
1335 if (type() != MIRType::Int32) {
1336 return;
1339 Range left(getOperand(0));
1340 Range right(getOperand(1));
1341 left.wrapAroundToInt32();
1342 right.wrapAroundToInt32();
1344 setRange(Range::and_(alloc, &left, &right));
1347 void MBitOr::computeRange(TempAllocator& alloc) {
1348 if (type() != MIRType::Int32) {
1349 return;
1352 Range left(getOperand(0));
1353 Range right(getOperand(1));
1354 left.wrapAroundToInt32();
1355 right.wrapAroundToInt32();
1357 setRange(Range::or_(alloc, &left, &right));
1360 void MBitXor::computeRange(TempAllocator& alloc) {
1361 if (type() != MIRType::Int32) {
1362 return;
1365 Range left(getOperand(0));
1366 Range right(getOperand(1));
1367 left.wrapAroundToInt32();
1368 right.wrapAroundToInt32();
1370 setRange(Range::xor_(alloc, &left, &right));
1373 void MBitNot::computeRange(TempAllocator& alloc) {
1374 if (type() == MIRType::Int64) {
1375 return;
1377 MOZ_ASSERT(type() == MIRType::Int32);
1379 Range op(getOperand(0));
1380 op.wrapAroundToInt32();
1382 setRange(Range::not_(alloc, &op));
1385 void MLsh::computeRange(TempAllocator& alloc) {
1386 if (type() != MIRType::Int32) {
1387 return;
1390 Range left(getOperand(0));
1391 Range right(getOperand(1));
1392 left.wrapAroundToInt32();
1394 MConstant* rhsConst = getOperand(1)->maybeConstantValue();
1395 if (rhsConst && rhsConst->type() == MIRType::Int32) {
1396 int32_t c = rhsConst->toInt32();
1397 setRange(Range::lsh(alloc, &left, c));
1398 return;
1401 right.wrapAroundToShiftCount();
1402 setRange(Range::lsh(alloc, &left, &right));
1405 void MRsh::computeRange(TempAllocator& alloc) {
1406 if (type() != MIRType::Int32) {
1407 return;
1410 Range left(getOperand(0));
1411 Range right(getOperand(1));
1412 left.wrapAroundToInt32();
1414 MConstant* rhsConst = getOperand(1)->maybeConstantValue();
1415 if (rhsConst && rhsConst->type() == MIRType::Int32) {
1416 int32_t c = rhsConst->toInt32();
1417 setRange(Range::rsh(alloc, &left, c));
1418 return;
1421 right.wrapAroundToShiftCount();
1422 setRange(Range::rsh(alloc, &left, &right));
1425 void MUrsh::computeRange(TempAllocator& alloc) {
1426 if (type() != MIRType::Int32) {
1427 return;
1430 Range left(getOperand(0));
1431 Range right(getOperand(1));
1433 // ursh can be thought of as converting its left operand to uint32, or it
1434 // can be thought of as converting its left operand to int32, and then
1435 // reinterpreting the int32 bits as a uint32 value. Both approaches yield
1436 // the same result. Since we lack support for full uint32 ranges, we use
1437 // the second interpretation, though it does cause us to be conservative.
1438 left.wrapAroundToInt32();
1439 right.wrapAroundToShiftCount();
1441 MConstant* rhsConst = getOperand(1)->maybeConstantValue();
1442 if (rhsConst && rhsConst->type() == MIRType::Int32) {
1443 int32_t c = rhsConst->toInt32();
1444 setRange(Range::ursh(alloc, &left, c));
1445 } else {
1446 setRange(Range::ursh(alloc, &left, &right));
1449 MOZ_ASSERT(range()->lower() >= 0);
1452 void MAbs::computeRange(TempAllocator& alloc) {
1453 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1454 return;
1457 Range other(getOperand(0));
1458 Range* next = Range::abs(alloc, &other);
1459 if (implicitTruncate_) {
1460 next->wrapAroundToInt32();
1462 setRange(next);
1465 void MFloor::computeRange(TempAllocator& alloc) {
1466 Range other(getOperand(0));
1467 setRange(Range::floor(alloc, &other));
1470 void MCeil::computeRange(TempAllocator& alloc) {
1471 Range other(getOperand(0));
1472 setRange(Range::ceil(alloc, &other));
1475 void MClz::computeRange(TempAllocator& alloc) {
1476 if (type() != MIRType::Int32) {
1477 return;
1479 setRange(Range::NewUInt32Range(alloc, 0, 32));
1482 void MCtz::computeRange(TempAllocator& alloc) {
1483 if (type() != MIRType::Int32) {
1484 return;
1486 setRange(Range::NewUInt32Range(alloc, 0, 32));
1489 void MPopcnt::computeRange(TempAllocator& alloc) {
1490 if (type() != MIRType::Int32) {
1491 return;
1493 setRange(Range::NewUInt32Range(alloc, 0, 32));
1496 void MMinMax::computeRange(TempAllocator& alloc) {
1497 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1498 return;
1501 Range left(getOperand(0));
1502 Range right(getOperand(1));
1503 setRange(isMax() ? Range::max(alloc, &left, &right)
1504 : Range::min(alloc, &left, &right));
1507 void MAdd::computeRange(TempAllocator& alloc) {
1508 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1509 return;
1511 Range left(getOperand(0));
1512 Range right(getOperand(1));
1513 Range* next = Range::add(alloc, &left, &right);
1514 if (isTruncated()) {
1515 next->wrapAroundToInt32();
1517 setRange(next);
1520 void MSub::computeRange(TempAllocator& alloc) {
1521 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1522 return;
1524 Range left(getOperand(0));
1525 Range right(getOperand(1));
1526 Range* next = Range::sub(alloc, &left, &right);
1527 if (isTruncated()) {
1528 next->wrapAroundToInt32();
1530 setRange(next);
1533 void MMul::computeRange(TempAllocator& alloc) {
1534 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1535 return;
1537 Range left(getOperand(0));
1538 Range right(getOperand(1));
1539 if (canBeNegativeZero()) {
1540 canBeNegativeZero_ = Range::negativeZeroMul(&left, &right);
1542 Range* next = Range::mul(alloc, &left, &right);
1543 if (!next->canBeNegativeZero()) {
1544 canBeNegativeZero_ = false;
1546 // Truncated multiplications could overflow in both directions
1547 if (isTruncated()) {
1548 next->wrapAroundToInt32();
1550 setRange(next);
1553 void MMod::computeRange(TempAllocator& alloc) {
1554 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1555 return;
1557 Range lhs(getOperand(0));
1558 Range rhs(getOperand(1));
1560 // If either operand is a NaN, the result is NaN. This also conservatively
1561 // handles Infinity cases.
1562 if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
1563 return;
1566 // If RHS can be zero, the result can be NaN.
1567 if (rhs.lower() <= 0 && rhs.upper() >= 0) {
1568 return;
1571 // If both operands are non-negative integers, we can optimize this to an
1572 // unsigned mod.
1573 if (type() == MIRType::Int32 && rhs.lower() > 0) {
1574 bool hasDoubles = lhs.lower() < 0 || lhs.canHaveFractionalPart() ||
1575 rhs.canHaveFractionalPart();
1576 // It is not possible to check that lhs.lower() >= 0, since the range
1577 // of a ursh with rhs a 0 constant is wrapped around the int32 range in
1578 // Range::Range(). However, IsUint32Type() will only return true for
1579 // nodes that lie in the range [0, UINT32_MAX].
1580 bool hasUint32s =
1581 IsUint32Type(getOperand(0)) &&
1582 getOperand(1)->type() == MIRType::Int32 &&
1583 (IsUint32Type(getOperand(1)) || getOperand(1)->isConstant());
1584 if (!hasDoubles || hasUint32s) {
1585 unsigned_ = true;
1589 // For unsigned mod, we have to convert both operands to unsigned.
1590 // Note that we handled the case of a zero rhs above.
1591 if (unsigned_) {
1592 // The result of an unsigned mod will never be unsigned-greater than
1593 // either operand.
1594 uint32_t lhsBound = std::max<uint32_t>(lhs.lower(), lhs.upper());
1595 uint32_t rhsBound = std::max<uint32_t>(rhs.lower(), rhs.upper());
1597 // If either range crosses through -1 as a signed value, it could be
1598 // the maximum unsigned value when interpreted as unsigned. If the range
1599 // doesn't include -1, then the simple max value we computed above is
1600 // correct.
1601 if (lhs.lower() <= -1 && lhs.upper() >= -1) {
1602 lhsBound = UINT32_MAX;
1604 if (rhs.lower() <= -1 && rhs.upper() >= -1) {
1605 rhsBound = UINT32_MAX;
1608 // The result will never be equal to the rhs, and we shouldn't have
1609 // any rounding to worry about.
1610 MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
1611 --rhsBound;
1613 // This gives us two upper bounds, so we can take the best one.
1614 setRange(Range::NewUInt32Range(alloc, 0, std::min(lhsBound, rhsBound)));
1615 return;
1618 // Math.abs(lhs % rhs) == Math.abs(lhs) % Math.abs(rhs).
1619 // First, the absolute value of the result will always be less than the
1620 // absolute value of rhs. (And if rhs is zero, the result is NaN).
1621 int64_t a = Abs<int64_t>(rhs.lower());
1622 int64_t b = Abs<int64_t>(rhs.upper());
1623 if (a == 0 && b == 0) {
1624 return;
1626 int64_t rhsAbsBound = std::max(a, b);
1628 // If the value is known to be integer, less-than abs(rhs) is equivalent
1629 // to less-than-or-equal abs(rhs)-1. This is important for being able to
1630 // say that the result of x%256 is an 8-bit unsigned number.
1631 if (!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart()) {
1632 --rhsAbsBound;
1635 // Next, the absolute value of the result will never be greater than the
1636 // absolute value of lhs.
1637 int64_t lhsAbsBound =
1638 std::max(Abs<int64_t>(lhs.lower()), Abs<int64_t>(lhs.upper()));
1640 // This gives us two upper bounds, so we can take the best one.
1641 int64_t absBound = std::min(lhsAbsBound, rhsAbsBound);
1643 // Now consider the sign of the result.
1644 // If lhs is non-negative, the result will be non-negative.
1645 // If lhs is non-positive, the result will be non-positive.
1646 int64_t lower = lhs.lower() >= 0 ? 0 : -absBound;
1647 int64_t upper = lhs.upper() <= 0 ? 0 : absBound;
1649 Range::FractionalPartFlag newCanHaveFractionalPart =
1650 Range::FractionalPartFlag(lhs.canHaveFractionalPart() ||
1651 rhs.canHaveFractionalPart());
1653 // If the lhs can have the sign bit set and we can return a zero, it'll be a
1654 // negative zero.
1655 Range::NegativeZeroFlag newMayIncludeNegativeZero =
1656 Range::NegativeZeroFlag(lhs.canHaveSignBitSet());
1658 setRange(new (alloc) Range(lower, upper, newCanHaveFractionalPart,
1659 newMayIncludeNegativeZero,
1660 std::min(lhs.exponent(), rhs.exponent())));
1663 void MDiv::computeRange(TempAllocator& alloc) {
1664 if (type() != MIRType::Int32 && type() != MIRType::Double) {
1665 return;
1667 Range lhs(getOperand(0));
1668 Range rhs(getOperand(1));
1670 // If either operand is a NaN, the result is NaN. This also conservatively
1671 // handles Infinity cases.
1672 if (!lhs.hasInt32Bounds() || !rhs.hasInt32Bounds()) {
1673 return;
1676 // Something simple for now: When dividing by a positive rhs, the result
1677 // won't be further from zero than lhs.
1678 if (lhs.lower() >= 0 && rhs.lower() >= 1) {
1679 setRange(new (alloc) Range(0, lhs.upper(), Range::IncludesFractionalParts,
1680 Range::IncludesNegativeZero, lhs.exponent()));
1681 } else if (unsigned_ && rhs.lower() >= 1) {
1682 // We shouldn't set the unsigned flag if the inputs can have
1683 // fractional parts.
1684 MOZ_ASSERT(!lhs.canHaveFractionalPart() && !rhs.canHaveFractionalPart());
1685 // We shouldn't set the unsigned flag if the inputs can be
1686 // negative zero.
1687 MOZ_ASSERT(!lhs.canBeNegativeZero() && !rhs.canBeNegativeZero());
1688 // Unsigned division by a non-zero rhs will return a uint32 value.
1689 setRange(Range::NewUInt32Range(alloc, 0, UINT32_MAX));
1693 void MSqrt::computeRange(TempAllocator& alloc) {
1694 Range input(getOperand(0));
1696 // If either operand is a NaN, the result is NaN. This also conservatively
1697 // handles Infinity cases.
1698 if (!input.hasInt32Bounds()) {
1699 return;
1702 // Sqrt of a negative non-zero value is NaN.
1703 if (input.lower() < 0) {
1704 return;
1707 // Something simple for now: When taking the sqrt of a positive value, the
1708 // result won't be further from zero than the input.
1709 // And, sqrt of an integer may have a fractional part.
1710 setRange(new (alloc) Range(0, input.upper(), Range::IncludesFractionalParts,
1711 input.canBeNegativeZero(), input.exponent()));
1714 void MToDouble::computeRange(TempAllocator& alloc) {
1715 setRange(new (alloc) Range(getOperand(0)));
1718 void MToFloat32::computeRange(TempAllocator& alloc) {}
1720 void MTruncateToInt32::computeRange(TempAllocator& alloc) {
1721 Range* output = new (alloc) Range(getOperand(0));
1722 output->wrapAroundToInt32();
1723 setRange(output);
1726 void MToNumberInt32::computeRange(TempAllocator& alloc) {
1727 // No clamping since this computes the range *before* bailouts.
1728 setRange(new (alloc) Range(getOperand(0)));
1731 void MBooleanToInt32::computeRange(TempAllocator& alloc) {
1732 setRange(Range::NewUInt32Range(alloc, 0, 1));
1735 void MLimitedTruncate::computeRange(TempAllocator& alloc) {
1736 Range* output = new (alloc) Range(input());
1737 setRange(output);
1740 static Range* GetArrayBufferViewRange(TempAllocator& alloc, Scalar::Type type) {
1741 switch (type) {
1742 case Scalar::Uint8Clamped:
1743 case Scalar::Uint8:
1744 return Range::NewUInt32Range(alloc, 0, UINT8_MAX);
1745 case Scalar::Uint16:
1746 return Range::NewUInt32Range(alloc, 0, UINT16_MAX);
1747 case Scalar::Uint32:
1748 return Range::NewUInt32Range(alloc, 0, UINT32_MAX);
1750 case Scalar::Int8:
1751 return Range::NewInt32Range(alloc, INT8_MIN, INT8_MAX);
1752 case Scalar::Int16:
1753 return Range::NewInt32Range(alloc, INT16_MIN, INT16_MAX);
1754 case Scalar::Int32:
1755 return Range::NewInt32Range(alloc, INT32_MIN, INT32_MAX);
1757 case Scalar::BigInt64:
1758 case Scalar::BigUint64:
1759 case Scalar::Int64:
1760 case Scalar::Simd128:
1761 case Scalar::Float32:
1762 case Scalar::Float64:
1763 case Scalar::MaxTypedArrayViewType:
1764 break;
1766 return nullptr;
1769 void MLoadUnboxedScalar::computeRange(TempAllocator& alloc) {
1770 // We have an Int32 type and if this is a UInt32 load it may produce a value
1771 // outside of our range, but we have a bailout to handle those cases.
1772 setRange(GetArrayBufferViewRange(alloc, storageType()));
1775 void MLoadDataViewElement::computeRange(TempAllocator& alloc) {
1776 // We have an Int32 type and if this is a UInt32 load it may produce a value
1777 // outside of our range, but we have a bailout to handle those cases.
1778 setRange(GetArrayBufferViewRange(alloc, storageType()));
1781 void MArrayLength::computeRange(TempAllocator& alloc) {
1782 // Array lengths can go up to UINT32_MAX. We will bail out if the array
1783 // length > INT32_MAX.
1784 MOZ_ASSERT(type() == MIRType::Int32);
1785 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1788 void MInitializedLength::computeRange(TempAllocator& alloc) {
1789 setRange(
1790 Range::NewUInt32Range(alloc, 0, NativeObject::MAX_DENSE_ELEMENTS_COUNT));
1793 void MArrayBufferViewLength::computeRange(TempAllocator& alloc) {
1794 if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
1795 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1799 void MArrayBufferViewByteOffset::computeRange(TempAllocator& alloc) {
1800 if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
1801 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1805 void MResizableTypedArrayByteOffsetMaybeOutOfBounds::computeRange(
1806 TempAllocator& alloc) {
1807 if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
1808 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1812 void MResizableTypedArrayLength::computeRange(TempAllocator& alloc) {
1813 if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
1814 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1818 void MResizableDataViewByteLength::computeRange(TempAllocator& alloc) {
1819 if constexpr (ArrayBufferObject::ByteLengthLimit <= INT32_MAX) {
1820 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1824 void MTypedArrayElementSize::computeRange(TempAllocator& alloc) {
1825 constexpr auto MaxTypedArraySize = sizeof(double);
1827 #define ASSERT_MAX_SIZE(_, T, N) \
1828 static_assert(sizeof(T) <= MaxTypedArraySize, \
1829 "unexpected typed array type exceeding 64-bits storage");
1830 JS_FOR_EACH_TYPED_ARRAY(ASSERT_MAX_SIZE)
1831 #undef ASSERT_MAX_SIZE
1833 setRange(Range::NewUInt32Range(alloc, 0, MaxTypedArraySize));
1836 void MStringLength::computeRange(TempAllocator& alloc) {
1837 static_assert(JSString::MAX_LENGTH <= UINT32_MAX,
1838 "NewUInt32Range requires a uint32 value");
1839 setRange(Range::NewUInt32Range(alloc, 0, JSString::MAX_LENGTH));
1842 void MArgumentsLength::computeRange(TempAllocator& alloc) {
1843 // This is is a conservative upper bound on what |TooManyActualArguments|
1844 // checks. If exceeded, Ion will not be entered in the first place.
1845 static_assert(ARGS_LENGTH_MAX <= UINT32_MAX,
1846 "NewUInt32Range requires a uint32 value");
1847 setRange(Range::NewUInt32Range(alloc, 0, ARGS_LENGTH_MAX));
1850 void MBoundsCheck::computeRange(TempAllocator& alloc) {
1851 // Just transfer the incoming index range to the output. The length() is
1852 // also interesting, but it is handled as a bailout check, and we're
1853 // computing a pre-bailout range here.
1854 setRange(new (alloc) Range(index()));
1857 void MSpectreMaskIndex::computeRange(TempAllocator& alloc) {
1858 // Just transfer the incoming index range to the output for now.
1859 setRange(new (alloc) Range(index()));
1862 void MInt32ToIntPtr::computeRange(TempAllocator& alloc) {
1863 setRange(new (alloc) Range(input()));
1866 void MNonNegativeIntPtrToInt32::computeRange(TempAllocator& alloc) {
1867 // We will bail out if the IntPtr value > INT32_MAX.
1868 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1871 void MArrayPush::computeRange(TempAllocator& alloc) {
1872 // MArrayPush returns the new array length. It bails out if the new length
1873 // doesn't fit in an Int32.
1874 MOZ_ASSERT(type() == MIRType::Int32);
1875 setRange(Range::NewUInt32Range(alloc, 0, INT32_MAX));
1878 void MMathFunction::computeRange(TempAllocator& alloc) {
1879 Range opRange(getOperand(0));
1880 switch (function()) {
1881 case UnaryMathFunction::SinNative:
1882 case UnaryMathFunction::SinFdlibm:
1883 case UnaryMathFunction::CosNative:
1884 case UnaryMathFunction::CosFdlibm:
1885 if (!opRange.canBeInfiniteOrNaN()) {
1886 setRange(Range::NewDoubleRange(alloc, -1.0, 1.0));
1888 break;
1889 default:
1890 break;
1894 void MSign::computeRange(TempAllocator& alloc) {
1895 Range opRange(getOperand(0));
1896 setRange(Range::sign(alloc, &opRange));
1899 void MRandom::computeRange(TempAllocator& alloc) {
1900 Range* r = Range::NewDoubleRange(alloc, 0.0, 1.0);
1902 // Random never returns negative zero.
1903 r->refineToExcludeNegativeZero();
1905 setRange(r);
1908 void MNaNToZero::computeRange(TempAllocator& alloc) {
1909 Range other(input());
1910 setRange(Range::NaNToZero(alloc, &other));
1913 ///////////////////////////////////////////////////////////////////////////////
1914 // Range Analysis
1915 ///////////////////////////////////////////////////////////////////////////////
1917 static BranchDirection NegateBranchDirection(BranchDirection dir) {
1918 return (dir == FALSE_BRANCH) ? TRUE_BRANCH : FALSE_BRANCH;
1921 bool RangeAnalysis::analyzeLoop(MBasicBlock* header) {
1922 MOZ_ASSERT(header->hasUniqueBackedge());
1924 // Try to compute an upper bound on the number of times the loop backedge
1925 // will be taken. Look for tests that dominate the backedge and which have
1926 // an edge leaving the loop body.
1927 MBasicBlock* backedge = header->backedge();
1929 // Ignore trivial infinite loops.
1930 if (backedge == header) {
1931 return true;
1934 bool canOsr;
1935 size_t numBlocks = MarkLoopBlocks(graph_, header, &canOsr);
1937 // Ignore broken loops.
1938 if (numBlocks == 0) {
1939 return true;
1942 LoopIterationBound* iterationBound = nullptr;
1944 MBasicBlock* block = backedge;
1945 do {
1946 BranchDirection direction;
1947 MTest* branch = block->immediateDominatorBranch(&direction);
1949 if (block == block->immediateDominator()) {
1950 break;
1953 block = block->immediateDominator();
1955 if (branch) {
1956 direction = NegateBranchDirection(direction);
1957 MBasicBlock* otherBlock = branch->branchSuccessor(direction);
1958 if (!otherBlock->isMarked()) {
1959 if (!alloc().ensureBallast()) {
1960 return false;
1962 iterationBound = analyzeLoopIterationCount(header, branch, direction);
1963 if (iterationBound) {
1964 break;
1968 } while (block != header);
1970 if (!iterationBound) {
1971 UnmarkLoopBlocks(graph_, header);
1972 return true;
1975 if (!loopIterationBounds.append(iterationBound)) {
1976 return false;
1979 #ifdef DEBUG
1980 if (JitSpewEnabled(JitSpew_Range)) {
1981 Sprinter sp(GetJitContext()->cx);
1982 if (!sp.init()) {
1983 return false;
1985 iterationBound->boundSum.dump(sp);
1986 JS::UniqueChars str = sp.release();
1987 if (!str) {
1988 return false;
1990 JitSpew(JitSpew_Range, "computed symbolic bound on backedges: %s",
1991 str.get());
1993 #endif
1995 // Try to compute symbolic bounds for the phi nodes at the head of this
1996 // loop, expressed in terms of the iteration bound just computed.
1998 for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd();
1999 iter++) {
2000 analyzeLoopPhi(iterationBound, *iter);
2003 if (!mir->compilingWasm() && !mir->outerInfo().hadBoundsCheckBailout()) {
2004 // Try to hoist any bounds checks from the loop using symbolic bounds.
2006 Vector<MBoundsCheck*, 0, JitAllocPolicy> hoistedChecks(alloc());
2008 for (ReversePostorderIterator iter(graph_.rpoBegin(header));
2009 iter != graph_.rpoEnd(); iter++) {
2010 MBasicBlock* block = *iter;
2011 if (!block->isMarked()) {
2012 continue;
2015 for (MDefinitionIterator iter(block); iter; iter++) {
2016 MDefinition* def = *iter;
2017 if (def->isBoundsCheck() && def->isMovable()) {
2018 if (!alloc().ensureBallast()) {
2019 return false;
2021 if (tryHoistBoundsCheck(header, def->toBoundsCheck())) {
2022 if (!hoistedChecks.append(def->toBoundsCheck())) {
2023 return false;
2030 // Note: replace all uses of the original bounds check with the
2031 // actual index. This is usually done during bounds check elimination,
2032 // but in this case it's safe to do it here since the load/store is
2033 // definitely not loop-invariant, so we will never move it before
2034 // one of the bounds checks we just added.
2035 for (size_t i = 0; i < hoistedChecks.length(); i++) {
2036 MBoundsCheck* ins = hoistedChecks[i];
2037 ins->replaceAllUsesWith(ins->index());
2038 ins->block()->discard(ins);
2042 UnmarkLoopBlocks(graph_, header);
2043 return true;
2046 // Unbox beta nodes in order to hoist instruction properly, and not be limited
2047 // by the beta nodes which are added after each branch.
2048 static inline MDefinition* DefinitionOrBetaInputDefinition(MDefinition* ins) {
2049 while (ins->isBeta()) {
2050 ins = ins->toBeta()->input();
2052 return ins;
2055 LoopIterationBound* RangeAnalysis::analyzeLoopIterationCount(
2056 MBasicBlock* header, MTest* test, BranchDirection direction) {
2057 SimpleLinearSum lhs(nullptr, 0);
2058 MDefinition* rhs;
2059 bool lessEqual;
2060 if (!ExtractLinearInequality(test, direction, &lhs, &rhs, &lessEqual)) {
2061 return nullptr;
2064 // Ensure the rhs is a loop invariant term.
2065 if (rhs && rhs->block()->isMarked()) {
2066 if (lhs.term && lhs.term->block()->isMarked()) {
2067 return nullptr;
2069 MDefinition* temp = lhs.term;
2070 lhs.term = rhs;
2071 rhs = temp;
2072 if (!SafeSub(0, lhs.constant, &lhs.constant)) {
2073 return nullptr;
2075 lessEqual = !lessEqual;
2078 MOZ_ASSERT_IF(rhs, !rhs->block()->isMarked());
2080 // Ensure the lhs is a phi node from the start of the loop body.
2081 if (!lhs.term || !lhs.term->isPhi() || lhs.term->block() != header) {
2082 return nullptr;
2085 // Check that the value of the lhs changes by a constant amount with each
2086 // loop iteration. This requires that the lhs be written in every loop
2087 // iteration with a value that is a constant difference from its value at
2088 // the start of the iteration.
2090 if (lhs.term->toPhi()->numOperands() != 2) {
2091 return nullptr;
2094 // The first operand of the phi should be the lhs' value at the start of
2095 // the first executed iteration, and not a value written which could
2096 // replace the second operand below during the middle of execution.
2097 MDefinition* lhsInitial = lhs.term->toPhi()->getLoopPredecessorOperand();
2098 if (lhsInitial->block()->isMarked()) {
2099 return nullptr;
2102 // The second operand of the phi should be a value written by an add/sub
2103 // in every loop iteration, i.e. in a block which dominates the backedge.
2104 MDefinition* lhsWrite = DefinitionOrBetaInputDefinition(
2105 lhs.term->toPhi()->getLoopBackedgeOperand());
2106 if (!lhsWrite->isAdd() && !lhsWrite->isSub()) {
2107 return nullptr;
2109 if (!lhsWrite->block()->isMarked()) {
2110 return nullptr;
2112 MBasicBlock* bb = header->backedge();
2113 for (; bb != lhsWrite->block() && bb != header;
2114 bb = bb->immediateDominator()) {
2116 if (bb != lhsWrite->block()) {
2117 return nullptr;
2120 SimpleLinearSum lhsModified = ExtractLinearSum(lhsWrite);
2122 // Check that the value of the lhs at the backedge is of the form
2123 // 'old(lhs) + N'. We can be sure that old(lhs) is the value at the start
2124 // of the iteration, and not that written to lhs in a previous iteration,
2125 // as such a previous value could not appear directly in the addition:
2126 // it could not be stored in lhs as the lhs add/sub executes in every
2127 // iteration, and if it were stored in another variable its use here would
2128 // be as an operand to a phi node for that variable.
2129 if (lhsModified.term != lhs.term) {
2130 return nullptr;
2133 LinearSum iterationBound(alloc());
2134 LinearSum currentIteration(alloc());
2136 if (lhsModified.constant == 1 && !lessEqual) {
2137 // The value of lhs is 'initial(lhs) + iterCount' and this will end
2138 // execution of the loop if 'lhs + lhsN >= rhs'. Thus, an upper bound
2139 // on the number of backedges executed is:
2141 // initial(lhs) + iterCount + lhsN == rhs
2142 // iterCount == rhsN - initial(lhs) - lhsN
2144 if (rhs) {
2145 if (!iterationBound.add(rhs, 1)) {
2146 return nullptr;
2149 if (!iterationBound.add(lhsInitial, -1)) {
2150 return nullptr;
2153 int32_t lhsConstant;
2154 if (!SafeSub(0, lhs.constant, &lhsConstant)) {
2155 return nullptr;
2157 if (!iterationBound.add(lhsConstant)) {
2158 return nullptr;
2161 if (!currentIteration.add(lhs.term, 1)) {
2162 return nullptr;
2164 if (!currentIteration.add(lhsInitial, -1)) {
2165 return nullptr;
2167 } else if (lhsModified.constant == -1 && lessEqual) {
2168 // The value of lhs is 'initial(lhs) - iterCount'. Similar to the above
2169 // case, an upper bound on the number of backedges executed is:
2171 // initial(lhs) - iterCount + lhsN == rhs
2172 // iterCount == initial(lhs) - rhs + lhsN
2174 if (!iterationBound.add(lhsInitial, 1)) {
2175 return nullptr;
2177 if (rhs) {
2178 if (!iterationBound.add(rhs, -1)) {
2179 return nullptr;
2182 if (!iterationBound.add(lhs.constant)) {
2183 return nullptr;
2186 if (!currentIteration.add(lhsInitial, 1)) {
2187 return nullptr;
2189 if (!currentIteration.add(lhs.term, -1)) {
2190 return nullptr;
2192 } else {
2193 return nullptr;
2196 return new (alloc())
2197 LoopIterationBound(header, test, iterationBound, currentIteration);
2200 void RangeAnalysis::analyzeLoopPhi(LoopIterationBound* loopBound, MPhi* phi) {
2201 // Given a bound on the number of backedges taken, compute an upper and
2202 // lower bound for a phi node that may change by a constant amount each
2203 // iteration. Unlike for the case when computing the iteration bound
2204 // itself, the phi does not need to change the same amount every iteration,
2205 // but is required to change at most N and be either nondecreasing or
2206 // nonincreasing.
2208 MOZ_ASSERT(phi->numOperands() == 2);
2210 MDefinition* initial = phi->getLoopPredecessorOperand();
2211 if (initial->block()->isMarked()) {
2212 return;
2215 SimpleLinearSum modified =
2216 ExtractLinearSum(phi->getLoopBackedgeOperand(), MathSpace::Infinite);
2218 if (modified.term != phi || modified.constant == 0) {
2219 return;
2222 if (!phi->range()) {
2223 phi->setRange(new (alloc()) Range(phi));
2226 LinearSum initialSum(alloc());
2227 if (!initialSum.add(initial, 1)) {
2228 return;
2231 // The phi may change by N each iteration, and is either nondecreasing or
2232 // nonincreasing. initial(phi) is either a lower or upper bound for the
2233 // phi, and initial(phi) + loopBound * N is either an upper or lower bound,
2234 // at all points within the loop, provided that loopBound >= 0.
2236 // We are more interested, however, in the bound for phi at points
2237 // dominated by the loop bound's test; if the test dominates e.g. a bounds
2238 // check we want to hoist from the loop, using the value of the phi at the
2239 // head of the loop for this will usually be too imprecise to hoist the
2240 // check. These points will execute only if the backedge executes at least
2241 // one more time (as the test passed and the test dominates the backedge),
2242 // so we know both that loopBound >= 1 and that the phi's value has changed
2243 // at most loopBound - 1 times. Thus, another upper or lower bound for the
2244 // phi is initial(phi) + (loopBound - 1) * N, without requiring us to
2245 // ensure that loopBound >= 0.
2247 LinearSum limitSum(loopBound->boundSum);
2248 if (!limitSum.multiply(modified.constant) || !limitSum.add(initialSum)) {
2249 return;
2252 int32_t negativeConstant;
2253 if (!SafeSub(0, modified.constant, &negativeConstant) ||
2254 !limitSum.add(negativeConstant)) {
2255 return;
2258 Range* initRange = initial->range();
2259 if (modified.constant > 0) {
2260 if (initRange && initRange->hasInt32LowerBound()) {
2261 phi->range()->refineLower(initRange->lower());
2263 phi->range()->setSymbolicLower(
2264 SymbolicBound::New(alloc(), nullptr, initialSum));
2265 phi->range()->setSymbolicUpper(
2266 SymbolicBound::New(alloc(), loopBound, limitSum));
2267 } else {
2268 if (initRange && initRange->hasInt32UpperBound()) {
2269 phi->range()->refineUpper(initRange->upper());
2271 phi->range()->setSymbolicUpper(
2272 SymbolicBound::New(alloc(), nullptr, initialSum));
2273 phi->range()->setSymbolicLower(
2274 SymbolicBound::New(alloc(), loopBound, limitSum));
2277 JitSpew(JitSpew_Range, "added symbolic range on %u", phi->id());
2278 SpewRange(phi);
2281 // Whether bound is valid at the specified bounds check instruction in a loop,
2282 // and may be used to hoist ins.
2283 static inline bool SymbolicBoundIsValid(MBasicBlock* header, MBoundsCheck* ins,
2284 const SymbolicBound* bound) {
2285 if (!bound->loop) {
2286 return true;
2288 if (ins->block() == header) {
2289 return false;
2291 MBasicBlock* bb = ins->block()->immediateDominator();
2292 while (bb != header && bb != bound->loop->test->block()) {
2293 bb = bb->immediateDominator();
2295 return bb == bound->loop->test->block();
2298 bool RangeAnalysis::tryHoistBoundsCheck(MBasicBlock* header,
2299 MBoundsCheck* ins) {
2300 // The bounds check's length must be loop invariant or a constant.
2301 MDefinition* length = DefinitionOrBetaInputDefinition(ins->length());
2302 if (length->block()->isMarked() && !length->isConstant()) {
2303 return false;
2306 // The bounds check's index should not be loop invariant (else we would
2307 // already have hoisted it during LICM).
2308 SimpleLinearSum index = ExtractLinearSum(ins->index());
2309 if (!index.term || !index.term->block()->isMarked()) {
2310 return false;
2313 // Check for a symbolic lower and upper bound on the index. If either
2314 // condition depends on an iteration bound for the loop, only hoist if
2315 // the bounds check is dominated by the iteration bound's test.
2316 if (!index.term->range()) {
2317 return false;
2319 const SymbolicBound* lower = index.term->range()->symbolicLower();
2320 if (!lower || !SymbolicBoundIsValid(header, ins, lower)) {
2321 return false;
2323 const SymbolicBound* upper = index.term->range()->symbolicUpper();
2324 if (!upper || !SymbolicBoundIsValid(header, ins, upper)) {
2325 return false;
2328 MBasicBlock* preLoop = header->loopPredecessor();
2329 MOZ_ASSERT(!preLoop->isMarked());
2331 MDefinition* lowerTerm = ConvertLinearSum(alloc(), preLoop, lower->sum,
2332 BailoutKind::HoistBoundsCheck);
2333 if (!lowerTerm) {
2334 return false;
2337 MDefinition* upperTerm = ConvertLinearSum(alloc(), preLoop, upper->sum,
2338 BailoutKind::HoistBoundsCheck);
2339 if (!upperTerm) {
2340 return false;
2343 // We are checking that index + indexConstant >= 0, and know that
2344 // index >= lowerTerm + lowerConstant. Thus, check that:
2346 // lowerTerm + lowerConstant + indexConstant >= 0
2347 // lowerTerm >= -lowerConstant - indexConstant
2349 int32_t lowerConstant = 0;
2350 if (!SafeSub(lowerConstant, index.constant, &lowerConstant)) {
2351 return false;
2353 if (!SafeSub(lowerConstant, lower->sum.constant(), &lowerConstant)) {
2354 return false;
2357 // We are checking that index < boundsLength, and know that
2358 // index <= upperTerm + upperConstant. Thus, check that:
2360 // upperTerm + upperConstant < boundsLength
2362 int32_t upperConstant = index.constant;
2363 if (!SafeAdd(upper->sum.constant(), upperConstant, &upperConstant)) {
2364 return false;
2367 // Hoist the loop invariant lower bounds checks.
2368 MBoundsCheckLower* lowerCheck = MBoundsCheckLower::New(alloc(), lowerTerm);
2369 lowerCheck->setMinimum(lowerConstant);
2370 lowerCheck->computeRange(alloc());
2371 lowerCheck->collectRangeInfoPreTrunc();
2372 lowerCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
2373 preLoop->insertBefore(preLoop->lastIns(), lowerCheck);
2375 // A common pattern for iterating over typed arrays is this:
2377 // for (var i = 0; i < ta.length; i++) {
2378 // use ta[i];
2379 // }
2381 // Here |upperTerm| (= ta.length) is a NonNegativeIntPtrToInt32 instruction.
2382 // Unwrap this if |length| is also an IntPtr so that we don't add an
2383 // unnecessary bounds check and Int32ToIntPtr below.
2384 if (upperTerm->isNonNegativeIntPtrToInt32() &&
2385 length->type() == MIRType::IntPtr) {
2386 upperTerm = upperTerm->toNonNegativeIntPtrToInt32()->input();
2389 // Hoist the loop invariant upper bounds checks.
2390 if (upperTerm != length || upperConstant >= 0) {
2391 // Hoist the bound check's length if it isn't already loop invariant.
2392 if (length->block()->isMarked()) {
2393 MOZ_ASSERT(length->isConstant());
2394 MInstruction* lengthIns = length->toInstruction();
2395 lengthIns->block()->moveBefore(preLoop->lastIns(), lengthIns);
2398 // If the length is IntPtr, convert the upperTerm to that as well for the
2399 // bounds check.
2400 if (length->type() == MIRType::IntPtr &&
2401 upperTerm->type() == MIRType::Int32) {
2402 upperTerm = MInt32ToIntPtr::New(alloc(), upperTerm);
2403 upperTerm->computeRange(alloc());
2404 upperTerm->collectRangeInfoPreTrunc();
2405 preLoop->insertBefore(preLoop->lastIns(), upperTerm->toInstruction());
2408 MBoundsCheck* upperCheck = MBoundsCheck::New(alloc(), upperTerm, length);
2409 upperCheck->setMinimum(upperConstant);
2410 upperCheck->setMaximum(upperConstant);
2411 upperCheck->computeRange(alloc());
2412 upperCheck->collectRangeInfoPreTrunc();
2413 upperCheck->setBailoutKind(BailoutKind::HoistBoundsCheck);
2414 preLoop->insertBefore(preLoop->lastIns(), upperCheck);
2417 return true;
2420 bool RangeAnalysis::analyze() {
2421 JitSpew(JitSpew_Range, "Doing range propagation");
2423 for (ReversePostorderIterator iter(graph_.rpoBegin());
2424 iter != graph_.rpoEnd(); iter++) {
2425 MBasicBlock* block = *iter;
2426 // No blocks are supposed to be unreachable, except when we have an OSR
2427 // block, in which case the Value Numbering phase add fixup blocks which
2428 // are unreachable.
2429 MOZ_ASSERT(!block->unreachable() || graph_.osrBlock());
2431 // If the block's immediate dominator is unreachable, the block is
2432 // unreachable. Iterating in RPO, we'll always see the immediate
2433 // dominator before the block.
2434 if (block->immediateDominator()->unreachable()) {
2435 block->setUnreachableUnchecked();
2436 continue;
2439 for (MDefinitionIterator iter(block); iter; iter++) {
2440 MDefinition* def = *iter;
2441 if (!alloc().ensureBallast()) {
2442 return false;
2445 def->computeRange(alloc());
2446 JitSpew(JitSpew_Range, "computing range on %u", def->id());
2447 SpewRange(def);
2450 // Beta node range analysis may have marked this block unreachable. If
2451 // so, it's no longer interesting to continue processing it.
2452 if (block->unreachable()) {
2453 continue;
2456 if (block->isLoopHeader()) {
2457 if (!analyzeLoop(block)) {
2458 return false;
2462 // First pass at collecting range info - while the beta nodes are still
2463 // around and before truncation.
2464 for (MInstructionIterator iter(block->begin()); iter != block->end();
2465 iter++) {
2466 iter->collectRangeInfoPreTrunc();
2470 return true;
2473 bool RangeAnalysis::addRangeAssertions() {
2474 if (!JitOptions.checkRangeAnalysis) {
2475 return true;
2478 // Check the computed range for this instruction, if the option is set. Note
2479 // that this code is quite invasive; it adds numerous additional
2480 // instructions for each MInstruction with a computed range, and it uses
2481 // registers, so it also affects register allocation.
2482 for (ReversePostorderIterator iter(graph_.rpoBegin());
2483 iter != graph_.rpoEnd(); iter++) {
2484 MBasicBlock* block = *iter;
2486 // Do not add assertions in unreachable blocks.
2487 if (block->unreachable()) {
2488 continue;
2491 for (MDefinitionIterator iter(block); iter; iter++) {
2492 MDefinition* ins = *iter;
2494 // Perform range checking for all numeric and numeric-like types.
2495 if (!IsNumberType(ins->type()) && ins->type() != MIRType::Boolean &&
2496 ins->type() != MIRType::Value && ins->type() != MIRType::IntPtr) {
2497 continue;
2500 // MIsNoIter is fused with the MTest that follows it and emitted as
2501 // LIsNoIterAndBranch. Similarly, MIteratorHasIndices is fused to
2502 // become LIteratorHasIndicesAndBranch. Skip them to avoid complicating
2503 // lowering.
2504 if (ins->isIsNoIter() || ins->isIteratorHasIndices()) {
2505 MOZ_ASSERT(ins->hasOneUse());
2506 continue;
2509 Range r(ins);
2511 MOZ_ASSERT_IF(ins->type() == MIRType::Int64, r.isUnknown());
2513 // Don't insert assertions if there's nothing interesting to assert.
2514 if (r.isUnknown() ||
2515 (ins->type() == MIRType::Int32 && r.isUnknownInt32())) {
2516 continue;
2519 // Don't add a use to an instruction that is recovered on bailout.
2520 if (ins->isRecoveredOnBailout()) {
2521 continue;
2524 if (!alloc().ensureBallast()) {
2525 return false;
2527 MAssertRange* guard =
2528 MAssertRange::New(alloc(), ins, new (alloc()) Range(r));
2530 // Beta nodes and interrupt checks are required to be located at the
2531 // beginnings of basic blocks, so we must insert range assertions
2532 // after any such instructions.
2533 MInstruction* insertAt = nullptr;
2534 if (block->graph().osrBlock() == block) {
2535 insertAt = ins->toInstruction();
2536 } else {
2537 insertAt = block->safeInsertTop(ins);
2540 if (insertAt == *iter) {
2541 block->insertAfter(insertAt, guard);
2542 } else {
2543 block->insertBefore(insertAt, guard);
2548 return true;
2551 ///////////////////////////////////////////////////////////////////////////////
2552 // Range based Truncation
2553 ///////////////////////////////////////////////////////////////////////////////
2555 void Range::clampToInt32() {
2556 if (isInt32()) {
2557 return;
2559 int32_t l = hasInt32LowerBound() ? lower() : JSVAL_INT_MIN;
2560 int32_t h = hasInt32UpperBound() ? upper() : JSVAL_INT_MAX;
2561 setInt32(l, h);
2564 void Range::wrapAroundToInt32() {
2565 if (!hasInt32Bounds()) {
2566 setInt32(JSVAL_INT_MIN, JSVAL_INT_MAX);
2567 } else if (canHaveFractionalPart()) {
2568 // Clearing the fractional field may provide an opportunity to refine
2569 // lower_ or upper_.
2570 canHaveFractionalPart_ = ExcludesFractionalParts;
2571 canBeNegativeZero_ = ExcludesNegativeZero;
2572 refineInt32BoundsByExponent(max_exponent_, &lower_, &hasInt32LowerBound_,
2573 &upper_, &hasInt32UpperBound_);
2575 assertInvariants();
2576 } else {
2577 // If nothing else, we can clear the negative zero flag.
2578 canBeNegativeZero_ = ExcludesNegativeZero;
2580 MOZ_ASSERT(isInt32());
2583 void Range::wrapAroundToShiftCount() {
2584 wrapAroundToInt32();
2585 if (lower() < 0 || upper() >= 32) {
2586 setInt32(0, 31);
2590 void Range::wrapAroundToBoolean() {
2591 wrapAroundToInt32();
2592 if (!isBoolean()) {
2593 setInt32(0, 1);
2595 MOZ_ASSERT(isBoolean());
2598 bool MDefinition::canTruncate() const {
2599 // No procedure defined for truncating this instruction.
2600 return false;
2603 void MDefinition::truncate(TruncateKind kind) {
2604 MOZ_CRASH("No procedure defined for truncating this instruction.");
2607 bool MConstant::canTruncate() const { return IsFloatingPointType(type()); }
2609 void MConstant::truncate(TruncateKind kind) {
2610 MOZ_ASSERT(canTruncate());
2612 // Truncate the double to int, since all uses truncates it.
2613 int32_t res = ToInt32(numberToDouble());
2614 payload_.asBits = 0;
2615 payload_.i32 = res;
2616 setResultType(MIRType::Int32);
2617 if (range()) {
2618 range()->setInt32(res, res);
2622 bool MPhi::canTruncate() const {
2623 return type() == MIRType::Double || type() == MIRType::Int32;
2626 void MPhi::truncate(TruncateKind kind) {
2627 MOZ_ASSERT(canTruncate());
2628 truncateKind_ = kind;
2629 setResultType(MIRType::Int32);
2630 if (kind >= TruncateKind::IndirectTruncate && range()) {
2631 range()->wrapAroundToInt32();
2635 bool MAdd::canTruncate() const {
2636 return type() == MIRType::Double || type() == MIRType::Int32;
2639 void MAdd::truncate(TruncateKind kind) {
2640 MOZ_ASSERT(canTruncate());
2642 // Remember analysis, needed for fallible checks.
2643 setTruncateKind(kind);
2645 setSpecialization(MIRType::Int32);
2646 if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
2647 range()->wrapAroundToInt32();
2651 bool MSub::canTruncate() const {
2652 return type() == MIRType::Double || type() == MIRType::Int32;
2655 void MSub::truncate(TruncateKind kind) {
2656 MOZ_ASSERT(canTruncate());
2658 // Remember analysis, needed for fallible checks.
2659 setTruncateKind(kind);
2660 setSpecialization(MIRType::Int32);
2661 if (truncateKind() >= TruncateKind::IndirectTruncate && range()) {
2662 range()->wrapAroundToInt32();
2666 bool MMul::canTruncate() const {
2667 return type() == MIRType::Double || type() == MIRType::Int32;
2670 void MMul::truncate(TruncateKind kind) {
2671 MOZ_ASSERT(canTruncate());
2673 // Remember analysis, needed for fallible checks.
2674 setTruncateKind(kind);
2675 setSpecialization(MIRType::Int32);
2676 if (truncateKind() >= TruncateKind::IndirectTruncate) {
2677 setCanBeNegativeZero(false);
2678 if (range()) {
2679 range()->wrapAroundToInt32();
2684 bool MDiv::canTruncate() const {
2685 return type() == MIRType::Double || type() == MIRType::Int32;
2688 void MDiv::truncate(TruncateKind kind) {
2689 MOZ_ASSERT(canTruncate());
2691 // Remember analysis, needed for fallible checks.
2692 setTruncateKind(kind);
2693 setSpecialization(MIRType::Int32);
2695 // Divisions where the lhs and rhs are unsigned and the result is
2696 // truncated can be lowered more efficiently.
2697 if (unsignedOperands()) {
2698 replaceWithUnsignedOperands();
2699 unsigned_ = true;
2703 bool MMod::canTruncate() const {
2704 return type() == MIRType::Double || type() == MIRType::Int32;
2707 void MMod::truncate(TruncateKind kind) {
2708 // As for division, handle unsigned modulus with a truncated result.
2709 MOZ_ASSERT(canTruncate());
2711 // Remember analysis, needed for fallible checks.
2712 setTruncateKind(kind);
2713 setSpecialization(MIRType::Int32);
2715 if (unsignedOperands()) {
2716 replaceWithUnsignedOperands();
2717 unsigned_ = true;
2721 bool MToDouble::canTruncate() const {
2722 MOZ_ASSERT(type() == MIRType::Double);
2723 return true;
2726 void MToDouble::truncate(TruncateKind kind) {
2727 MOZ_ASSERT(canTruncate());
2728 setTruncateKind(kind);
2730 // We use the return type to flag that this MToDouble should be replaced by
2731 // a MTruncateToInt32 when modifying the graph.
2732 setResultType(MIRType::Int32);
2733 if (truncateKind() >= TruncateKind::IndirectTruncate) {
2734 if (range()) {
2735 range()->wrapAroundToInt32();
2740 bool MLimitedTruncate::canTruncate() const { return true; }
2742 void MLimitedTruncate::truncate(TruncateKind kind) {
2743 MOZ_ASSERT(canTruncate());
2744 setTruncateKind(kind);
2745 setResultType(MIRType::Int32);
2746 if (kind >= TruncateKind::IndirectTruncate && range()) {
2747 range()->wrapAroundToInt32();
2751 bool MCompare::canTruncate() const {
2752 if (!isDoubleComparison()) {
2753 return false;
2756 // If both operands are naturally in the int32 range, we can convert from
2757 // a double comparison to being an int32 comparison.
2758 if (!Range(lhs()).isInt32() || !Range(rhs()).isInt32()) {
2759 return false;
2762 return true;
2765 void MCompare::truncate(TruncateKind kind) {
2766 MOZ_ASSERT(canTruncate());
2767 compareType_ = Compare_Int32;
2769 // Truncating the operands won't change their value because we don't force a
2770 // truncation, but it will change their type, which we need because we
2771 // now expect integer inputs.
2772 truncateOperands_ = true;
2775 TruncateKind MDefinition::operandTruncateKind(size_t index) const {
2776 // Generic routine: We don't know anything.
2777 return TruncateKind::NoTruncate;
2780 TruncateKind MPhi::operandTruncateKind(size_t index) const {
2781 // The truncation applied to a phi is effectively applied to the phi's
2782 // operands.
2783 return truncateKind_;
2786 TruncateKind MTruncateToInt32::operandTruncateKind(size_t index) const {
2787 // This operator is an explicit truncate to int32.
2788 return TruncateKind::Truncate;
2791 TruncateKind MBinaryBitwiseInstruction::operandTruncateKind(
2792 size_t index) const {
2793 // The bitwise operators truncate to int32.
2794 return TruncateKind::Truncate;
2797 TruncateKind MLimitedTruncate::operandTruncateKind(size_t index) const {
2798 return std::min(truncateKind(), truncateLimit_);
2801 TruncateKind MAdd::operandTruncateKind(size_t index) const {
2802 // This operator is doing some arithmetic. If its result is truncated,
2803 // it's an indirect truncate for its operands.
2804 return std::min(truncateKind(), TruncateKind::IndirectTruncate);
2807 TruncateKind MSub::operandTruncateKind(size_t index) const {
2808 // See the comment in MAdd::operandTruncateKind.
2809 return std::min(truncateKind(), TruncateKind::IndirectTruncate);
2812 TruncateKind MMul::operandTruncateKind(size_t index) const {
2813 // See the comment in MAdd::operandTruncateKind.
2814 return std::min(truncateKind(), TruncateKind::IndirectTruncate);
2817 TruncateKind MToDouble::operandTruncateKind(size_t index) const {
2818 // MToDouble propagates its truncate kind to its operand.
2819 return truncateKind();
2822 TruncateKind MStoreUnboxedScalar::operandTruncateKind(size_t index) const {
2823 // An integer store truncates the stored value.
2824 return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
2825 : TruncateKind::NoTruncate;
2828 TruncateKind MStoreDataViewElement::operandTruncateKind(size_t index) const {
2829 // An integer store truncates the stored value.
2830 return (index == 2 && isIntegerWrite()) ? TruncateKind::Truncate
2831 : TruncateKind::NoTruncate;
2834 TruncateKind MStoreTypedArrayElementHole::operandTruncateKind(
2835 size_t index) const {
2836 // An integer store truncates the stored value.
2837 return (index == 3 && isIntegerWrite()) ? TruncateKind::Truncate
2838 : TruncateKind::NoTruncate;
2841 TruncateKind MDiv::operandTruncateKind(size_t index) const {
2842 return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
2845 TruncateKind MMod::operandTruncateKind(size_t index) const {
2846 return std::min(truncateKind(), TruncateKind::TruncateAfterBailouts);
2849 TruncateKind MCompare::operandTruncateKind(size_t index) const {
2850 // If we're doing an int32 comparison on operands which were previously
2851 // floating-point, convert them!
2852 MOZ_ASSERT_IF(truncateOperands_, isInt32Comparison());
2853 return truncateOperands_ ? TruncateKind::TruncateAfterBailouts
2854 : TruncateKind::NoTruncate;
2857 static bool TruncateTest(TempAllocator& alloc, MTest* test) {
2858 // If all possible inputs to the test are either int32 or boolean,
2859 // convert those inputs to int32 so that an int32 test can be performed.
2861 if (test->input()->type() != MIRType::Value) {
2862 return true;
2865 if (!test->input()->isPhi() || !test->input()->hasOneDefUse() ||
2866 test->input()->isImplicitlyUsed()) {
2867 return true;
2870 MPhi* phi = test->input()->toPhi();
2871 for (size_t i = 0; i < phi->numOperands(); i++) {
2872 MDefinition* def = phi->getOperand(i);
2873 if (!def->isBox()) {
2874 return true;
2876 MDefinition* inner = def->getOperand(0);
2877 if (inner->type() != MIRType::Boolean && inner->type() != MIRType::Int32) {
2878 return true;
2882 for (size_t i = 0; i < phi->numOperands(); i++) {
2883 MDefinition* inner = phi->getOperand(i)->getOperand(0);
2884 if (inner->type() != MIRType::Int32) {
2885 if (!alloc.ensureBallast()) {
2886 return false;
2888 MBasicBlock* block = inner->block();
2889 inner = MToNumberInt32::New(alloc, inner);
2890 block->insertBefore(block->lastIns(), inner->toInstruction());
2892 MOZ_ASSERT(inner->type() == MIRType::Int32);
2893 phi->replaceOperand(i, inner);
2896 phi->setResultType(MIRType::Int32);
2897 return true;
2900 // Truncating instruction result is an optimization which implies
2901 // knowing all uses of an instruction. This implies that if one of
2902 // the uses got removed, then Range Analysis is not be allowed to do
2903 // any modification which can change the result, especially if the
2904 // result can be observed.
2906 // This corner can easily be understood with UCE examples, but it
2907 // might also happen with type inference assumptions. Note: Type
2908 // inference is implicitly branches where other types might be
2909 // flowing into.
2910 static bool CloneForDeadBranches(TempAllocator& alloc,
2911 MInstruction* candidate) {
2912 // Compare returns a boolean so it doesn't have to be recovered on bailout
2913 // because the output would remain correct.
2914 if (candidate->isCompare()) {
2915 return true;
2918 MOZ_ASSERT(candidate->canClone());
2919 if (!alloc.ensureBallast()) {
2920 return false;
2923 MDefinitionVector operands(alloc);
2924 size_t end = candidate->numOperands();
2925 if (!operands.reserve(end)) {
2926 return false;
2928 for (size_t i = 0; i < end; ++i) {
2929 operands.infallibleAppend(candidate->getOperand(i));
2932 MInstruction* clone = candidate->clone(alloc, operands);
2933 if (!clone) {
2934 return false;
2936 clone->setRange(nullptr);
2938 // Set ImplicitlyUsed flag on the cloned instruction in order to chain recover
2939 // instruction for the bailout path.
2940 clone->setImplicitlyUsedUnchecked();
2942 candidate->block()->insertBefore(candidate, clone);
2944 if (!candidate->maybeConstantValue()) {
2945 MOZ_ASSERT(clone->canRecoverOnBailout());
2946 clone->setRecoveredOnBailout();
2949 // Replace the candidate by its recovered on bailout clone within recovered
2950 // instructions and resume points operands.
2951 for (MUseIterator i(candidate->usesBegin()); i != candidate->usesEnd();) {
2952 MUse* use = *i++;
2953 MNode* ins = use->consumer();
2954 if (ins->isDefinition() && !ins->toDefinition()->isRecoveredOnBailout()) {
2955 continue;
2958 use->replaceProducer(clone);
2961 return true;
2964 // Examine all the users of |candidate| and determine the most aggressive
2965 // truncate kind that satisfies all of them.
2966 static TruncateKind ComputeRequestedTruncateKind(MDefinition* candidate,
2967 bool* shouldClone) {
2968 bool isCapturedResult =
2969 false; // Check if used by a recovered instruction or a resume point.
2970 bool isObservableResult =
2971 false; // Check if it can be read from another frame.
2972 bool isRecoverableResult = true; // Check if it can safely be reconstructed.
2973 bool isImplicitlyUsed = candidate->isImplicitlyUsed();
2974 bool hasTryBlock = candidate->block()->graph().hasTryBlock();
2976 TruncateKind kind = TruncateKind::Truncate;
2977 for (MUseIterator use(candidate->usesBegin()); use != candidate->usesEnd();
2978 use++) {
2979 if (use->consumer()->isResumePoint()) {
2980 // Truncation is a destructive optimization, as such, we need to pay
2981 // attention to removed branches and prevent optimization
2982 // destructive optimizations if we have no alternative. (see
2983 // ImplicitlyUsed flag)
2984 isCapturedResult = true;
2985 isObservableResult =
2986 isObservableResult ||
2987 use->consumer()->toResumePoint()->isObservableOperand(*use);
2988 isRecoverableResult =
2989 isRecoverableResult &&
2990 use->consumer()->toResumePoint()->isRecoverableOperand(*use);
2991 continue;
2994 MDefinition* consumer = use->consumer()->toDefinition();
2995 if (consumer->isRecoveredOnBailout()) {
2996 isCapturedResult = true;
2997 isImplicitlyUsed = isImplicitlyUsed || consumer->isImplicitlyUsed();
2998 continue;
3001 TruncateKind consumerKind =
3002 consumer->operandTruncateKind(consumer->indexOf(*use));
3003 kind = std::min(kind, consumerKind);
3004 if (kind == TruncateKind::NoTruncate) {
3005 break;
3009 // We cannot do full trunction on guarded instructions.
3010 if (candidate->isGuard() || candidate->isGuardRangeBailouts()) {
3011 kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
3014 // If the value naturally produces an int32 value (before bailout checks)
3015 // that needs no conversion, we don't have to worry about resume points
3016 // seeing truncated values.
3017 bool needsConversion = !candidate->range() || !candidate->range()->isInt32();
3019 // If the instruction is explicitly truncated (not indirectly) by all its
3020 // uses and if it is not implicitly used, then we can safely encode its
3021 // truncated result as part of the resume point operands. This is safe,
3022 // because even if we resume with a truncated double, the next baseline
3023 // instruction operating on this instruction is going to be a no-op.
3025 // Note, that if the result can be observed from another frame, then this
3026 // optimization is not safe. Similarly, if this function contains a try
3027 // block, the result could be observed from a catch block, which we do
3028 // not compile.
3029 bool safeToConvert = kind == TruncateKind::Truncate && !isImplicitlyUsed &&
3030 !isObservableResult && !hasTryBlock;
3032 // If the candidate instruction appears as operand of a resume point or a
3033 // recover instruction, and we have to truncate its result, then we might
3034 // have to either recover the result during the bailout, or avoid the
3035 // truncation.
3036 if (isCapturedResult && needsConversion && !safeToConvert) {
3037 // If the result can be recovered from all the resume points (not needed
3038 // for iterating over the inlined frames), and this instruction can be
3039 // recovered on bailout, then we can clone it and use the cloned
3040 // instruction to encode the recover instruction. Otherwise, we should
3041 // keep the original result and bailout if the value is not in the int32
3042 // range.
3043 if (!JitOptions.disableRecoverIns && isRecoverableResult &&
3044 candidate->canRecoverOnBailout()) {
3045 *shouldClone = true;
3046 } else {
3047 kind = std::min(kind, TruncateKind::TruncateAfterBailouts);
3051 return kind;
3054 static TruncateKind ComputeTruncateKind(MDefinition* candidate,
3055 bool* shouldClone) {
3056 // Compare operations might coerce its inputs to int32 if the ranges are
3057 // correct. So we do not need to check if all uses are coerced.
3058 if (candidate->isCompare()) {
3059 return TruncateKind::TruncateAfterBailouts;
3062 // Set truncated flag if range analysis ensure that it has no
3063 // rounding errors and no fractional part. Note that we can't use
3064 // the MDefinition Range constructor, because we need to know if
3065 // the value will have rounding errors before any bailout checks.
3066 const Range* r = candidate->range();
3067 bool canHaveRoundingErrors = !r || r->canHaveRoundingErrors();
3069 // Special case integer division and modulo: a/b can be infinite, and a%b
3070 // can be NaN but cannot actually have rounding errors induced by truncation.
3071 if ((candidate->isDiv() || candidate->isMod()) &&
3072 candidate->type() == MIRType::Int32) {
3073 canHaveRoundingErrors = false;
3076 if (canHaveRoundingErrors) {
3077 return TruncateKind::NoTruncate;
3080 // Ensure all observable uses are truncated.
3081 return ComputeRequestedTruncateKind(candidate, shouldClone);
3084 static void RemoveTruncatesOnOutput(MDefinition* truncated) {
3085 // Compare returns a boolean so it doen't have any output truncates.
3086 if (truncated->isCompare()) {
3087 return;
3090 MOZ_ASSERT(truncated->type() == MIRType::Int32);
3091 MOZ_ASSERT(Range(truncated).isInt32());
3093 for (MUseDefIterator use(truncated); use; use++) {
3094 MDefinition* def = use.def();
3095 if (!def->isTruncateToInt32() || !def->isToNumberInt32()) {
3096 continue;
3099 def->replaceAllUsesWith(truncated);
3103 void RangeAnalysis::adjustTruncatedInputs(MDefinition* truncated) {
3104 MBasicBlock* block = truncated->block();
3105 for (size_t i = 0, e = truncated->numOperands(); i < e; i++) {
3106 TruncateKind kind = truncated->operandTruncateKind(i);
3107 if (kind == TruncateKind::NoTruncate) {
3108 continue;
3111 MDefinition* input = truncated->getOperand(i);
3112 if (input->type() == MIRType::Int32) {
3113 continue;
3116 if (input->isToDouble() && input->getOperand(0)->type() == MIRType::Int32) {
3117 truncated->replaceOperand(i, input->getOperand(0));
3118 } else {
3119 MInstruction* op;
3120 if (kind == TruncateKind::TruncateAfterBailouts) {
3121 MOZ_ASSERT(!mir->outerInfo().hadEagerTruncationBailout());
3122 op = MToNumberInt32::New(alloc(), truncated->getOperand(i));
3123 op->setBailoutKind(BailoutKind::EagerTruncation);
3124 } else {
3125 op = MTruncateToInt32::New(alloc(), truncated->getOperand(i));
3128 if (truncated->isPhi()) {
3129 MBasicBlock* pred = block->getPredecessor(i);
3130 pred->insertBefore(pred->lastIns(), op);
3131 } else {
3132 block->insertBefore(truncated->toInstruction(), op);
3134 truncated->replaceOperand(i, op);
3138 if (truncated->isToDouble()) {
3139 truncated->replaceAllUsesWith(truncated->toToDouble()->getOperand(0));
3140 block->discard(truncated->toToDouble());
3144 bool RangeAnalysis::canTruncate(MDefinition* def, TruncateKind kind) const {
3145 if (kind == TruncateKind::NoTruncate) {
3146 return false;
3149 // Range Analysis is sometimes eager to do optimizations, even if we
3150 // are not able to truncate an instruction. In such case, we
3151 // speculatively compile the instruction to an int32 instruction
3152 // while adding a guard. This is what is implied by
3153 // TruncateAfterBailout.
3155 // If a previous compilation was invalidated because a speculative
3156 // truncation bailed out, we no longer attempt to make this kind of
3157 // eager optimization.
3158 if (mir->outerInfo().hadEagerTruncationBailout()) {
3159 if (kind == TruncateKind::TruncateAfterBailouts) {
3160 return false;
3162 // MDiv and MMod always require TruncateAfterBailout for their operands.
3163 // See MDiv::operandTruncateKind and MMod::operandTruncateKind.
3164 if (def->isDiv() || def->isMod()) {
3165 return false;
3169 return true;
3172 // Iterate backward on all instruction and attempt to truncate operations for
3173 // each instruction which respect the following list of predicates: Has been
3174 // analyzed by range analysis, the range has no rounding errors, all uses cases
3175 // are truncating the result.
3177 // If the truncation of the operation is successful, then the instruction is
3178 // queue for later updating the graph to restore the type correctness by
3179 // converting the operands that need to be truncated.
3181 // We iterate backward because it is likely that a truncated operation truncates
3182 // some of its operands.
3183 bool RangeAnalysis::truncate() {
3184 JitSpew(JitSpew_Range, "Do range-base truncation (backward loop)");
3186 // Automatic truncation is disabled for wasm because the truncation logic
3187 // is based on IonMonkey which assumes that we can bailout if the truncation
3188 // logic fails. As wasm code has no bailout mechanism, it is safer to avoid
3189 // any automatic truncations.
3190 MOZ_ASSERT(!mir->compilingWasm());
3192 Vector<MDefinition*, 16, SystemAllocPolicy> worklist;
3194 for (PostorderIterator block(graph_.poBegin()); block != graph_.poEnd();
3195 block++) {
3196 for (MInstructionReverseIterator iter(block->rbegin());
3197 iter != block->rend(); iter++) {
3198 if (iter->isRecoveredOnBailout()) {
3199 continue;
3202 if (iter->type() == MIRType::None) {
3203 if (iter->isTest()) {
3204 if (!TruncateTest(alloc(), iter->toTest())) {
3205 return false;
3208 continue;
3211 // Remember all bitop instructions for folding after range analysis.
3212 switch (iter->op()) {
3213 case MDefinition::Opcode::BitAnd:
3214 case MDefinition::Opcode::BitOr:
3215 case MDefinition::Opcode::BitXor:
3216 case MDefinition::Opcode::Lsh:
3217 case MDefinition::Opcode::Rsh:
3218 case MDefinition::Opcode::Ursh:
3219 if (!bitops.append(static_cast<MBinaryBitwiseInstruction*>(*iter))) {
3220 return false;
3222 break;
3223 default:;
3226 bool shouldClone = false;
3227 TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
3229 // Truncate this instruction if possible.
3230 if (!canTruncate(*iter, kind) || !iter->canTruncate()) {
3231 continue;
3234 SpewTruncate(*iter, kind, shouldClone);
3236 // If needed, clone the current instruction for keeping it for the
3237 // bailout path. This give us the ability to truncate instructions
3238 // even after the removal of branches.
3239 if (shouldClone && !CloneForDeadBranches(alloc(), *iter)) {
3240 return false;
3243 // TruncateAfterBailouts keeps the bailout code as-is and
3244 // continues with truncated operations, with the expectation
3245 // that we are unlikely to bail out. If we do bail out, then we
3246 // will set a flag in FinishBailoutToBaseline to prevent eager
3247 // truncation when we recompile, to avoid bailout loops.
3248 if (kind == TruncateKind::TruncateAfterBailouts) {
3249 iter->setBailoutKind(BailoutKind::EagerTruncation);
3252 iter->truncate(kind);
3254 // Delay updates of inputs/outputs to avoid creating node which
3255 // would be removed by the truncation of the next operations.
3256 iter->setInWorklist();
3257 if (!worklist.append(*iter)) {
3258 return false;
3261 for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd());
3262 iter != end; ++iter) {
3263 bool shouldClone = false;
3264 TruncateKind kind = ComputeTruncateKind(*iter, &shouldClone);
3266 // Truncate this phi if possible.
3267 if (shouldClone || !canTruncate(*iter, kind) || !iter->canTruncate()) {
3268 continue;
3271 SpewTruncate(*iter, kind, shouldClone);
3273 iter->truncate(kind);
3275 // Delay updates of inputs/outputs to avoid creating node which
3276 // would be removed by the truncation of the next operations.
3277 iter->setInWorklist();
3278 if (!worklist.append(*iter)) {
3279 return false;
3284 // Update inputs/outputs of truncated instructions.
3285 JitSpew(JitSpew_Range, "Do graph type fixup (dequeue)");
3286 while (!worklist.empty()) {
3287 if (!alloc().ensureBallast()) {
3288 return false;
3290 MDefinition* def = worklist.popCopy();
3291 def->setNotInWorklist();
3292 RemoveTruncatesOnOutput(def);
3293 adjustTruncatedInputs(def);
3296 return true;
3299 bool RangeAnalysis::removeUnnecessaryBitops() {
3300 JitSpew(JitSpew_Range, "Begin (removeUnnecessaryBitops)");
3301 // Note: This operation change the semantic of the program in a way which
3302 // uniquely works with Int32, Recover Instructions added by the Sink phase
3303 // expects the MIR Graph to still have a valid flow as-if they were double
3304 // operations instead of Int32 operations. Thus, this phase should be
3305 // executed after the Sink phase, and before DCE.
3307 // Fold any unnecessary bitops in the graph, such as (x | 0) on an integer
3308 // input. This is done after range analysis rather than during GVN as the
3309 // presence of the bitop can change which instructions are truncated.
3310 for (size_t i = 0; i < bitops.length(); i++) {
3311 MBinaryBitwiseInstruction* ins = bitops[i];
3312 if (ins->isRecoveredOnBailout()) {
3313 continue;
3316 MDefinition* folded = ins->foldUnnecessaryBitop();
3317 if (folded != ins) {
3318 ins->replaceAllLiveUsesWith(folded);
3319 ins->setRecoveredOnBailout();
3323 bitops.clear();
3324 return true;
3327 ///////////////////////////////////////////////////////////////////////////////
3328 // Collect Range information of operands
3329 ///////////////////////////////////////////////////////////////////////////////
3331 void MInArray::collectRangeInfoPreTrunc() {
3332 Range indexRange(index());
3333 if (indexRange.isFiniteNonNegative()) {
3334 needsNegativeIntCheck_ = false;
3335 setNotGuard();
3339 void MLoadElementHole::collectRangeInfoPreTrunc() {
3340 Range indexRange(index());
3341 if (indexRange.isFiniteNonNegative()) {
3342 needsNegativeIntCheck_ = false;
3343 setNotGuard();
3347 void MInt32ToIntPtr::collectRangeInfoPreTrunc() {
3348 Range inputRange(input());
3349 if (inputRange.isFiniteNonNegative()) {
3350 canBeNegative_ = false;
3354 void MClz::collectRangeInfoPreTrunc() {
3355 Range inputRange(input());
3356 if (!inputRange.canBeZero()) {
3357 operandIsNeverZero_ = true;
3361 void MCtz::collectRangeInfoPreTrunc() {
3362 Range inputRange(input());
3363 if (!inputRange.canBeZero()) {
3364 operandIsNeverZero_ = true;
3368 void MDiv::collectRangeInfoPreTrunc() {
3369 Range lhsRange(lhs());
3370 Range rhsRange(rhs());
3372 // Test if Dividend is non-negative.
3373 if (lhsRange.isFiniteNonNegative()) {
3374 canBeNegativeDividend_ = false;
3377 // Try removing divide by zero check.
3378 if (!rhsRange.canBeZero()) {
3379 canBeDivideByZero_ = false;
3382 // If lhsRange does not contain INT32_MIN in its range,
3383 // negative overflow check can be skipped.
3384 if (!lhsRange.contains(INT32_MIN)) {
3385 canBeNegativeOverflow_ = false;
3388 // If rhsRange does not contain -1 likewise.
3389 if (!rhsRange.contains(-1)) {
3390 canBeNegativeOverflow_ = false;
3393 // If lhsRange does not contain a zero,
3394 // negative zero check can be skipped.
3395 if (!lhsRange.canBeZero()) {
3396 canBeNegativeZero_ = false;
3399 // If rhsRange >= 0 negative zero check can be skipped.
3400 if (rhsRange.isFiniteNonNegative()) {
3401 canBeNegativeZero_ = false;
3404 if (fallible()) {
3405 setGuardRangeBailoutsUnchecked();
3409 void MMul::collectRangeInfoPreTrunc() {
3410 Range lhsRange(lhs());
3411 Range rhsRange(rhs());
3413 // If lhsRange contains only positive then we can skip negative zero check.
3414 if (lhsRange.isFiniteNonNegative() && !lhsRange.canBeZero()) {
3415 setCanBeNegativeZero(false);
3418 // Likewise rhsRange.
3419 if (rhsRange.isFiniteNonNegative() && !rhsRange.canBeZero()) {
3420 setCanBeNegativeZero(false);
3423 // If rhsRange and lhsRange contain Non-negative integers only,
3424 // We skip negative zero check.
3425 if (rhsRange.isFiniteNonNegative() && lhsRange.isFiniteNonNegative()) {
3426 setCanBeNegativeZero(false);
3429 // If rhsRange and lhsRange < 0. Then we skip negative zero check.
3430 if (rhsRange.isFiniteNegative() && lhsRange.isFiniteNegative()) {
3431 setCanBeNegativeZero(false);
3435 void MMod::collectRangeInfoPreTrunc() {
3436 Range lhsRange(lhs());
3437 Range rhsRange(rhs());
3438 if (lhsRange.isFiniteNonNegative()) {
3439 canBeNegativeDividend_ = false;
3441 if (!rhsRange.canBeZero()) {
3442 canBeDivideByZero_ = false;
3444 if (type() == MIRType::Int32 && fallible()) {
3445 setGuardRangeBailoutsUnchecked();
3449 void MToNumberInt32::collectRangeInfoPreTrunc() {
3450 Range inputRange(input());
3451 if (!inputRange.canBeNegativeZero()) {
3452 needsNegativeZeroCheck_ = false;
3456 void MBoundsCheck::collectRangeInfoPreTrunc() {
3457 Range indexRange(index());
3458 Range lengthRange(length());
3459 if (!indexRange.hasInt32LowerBound() || !indexRange.hasInt32UpperBound()) {
3460 return;
3462 if (!lengthRange.hasInt32LowerBound() || lengthRange.canBeNaN()) {
3463 return;
3466 int64_t indexLower = indexRange.lower();
3467 int64_t indexUpper = indexRange.upper();
3468 int64_t lengthLower = lengthRange.lower();
3469 int64_t min = minimum();
3470 int64_t max = maximum();
3472 if (indexLower + min >= 0 && indexUpper + max < lengthLower) {
3473 fallible_ = false;
3477 void MBoundsCheckLower::collectRangeInfoPreTrunc() {
3478 Range indexRange(index());
3479 if (indexRange.hasInt32LowerBound() && indexRange.lower() >= minimum_) {
3480 fallible_ = false;
3484 void MCompare::collectRangeInfoPreTrunc() {
3485 if (!Range(lhs()).canBeNaN() && !Range(rhs()).canBeNaN()) {
3486 operandsAreNeverNaN_ = true;
3490 void MNot::collectRangeInfoPreTrunc() {
3491 if (!Range(input()).canBeNaN()) {
3492 operandIsNeverNaN_ = true;
3496 void MPowHalf::collectRangeInfoPreTrunc() {
3497 Range inputRange(input());
3498 if (!inputRange.canBeInfiniteOrNaN() || inputRange.hasInt32LowerBound()) {
3499 operandIsNeverNegativeInfinity_ = true;
3501 if (!inputRange.canBeNegativeZero()) {
3502 operandIsNeverNegativeZero_ = true;
3504 if (!inputRange.canBeNaN()) {
3505 operandIsNeverNaN_ = true;
3509 void MUrsh::collectRangeInfoPreTrunc() {
3510 if (type() == MIRType::Int64) {
3511 return;
3514 Range lhsRange(lhs()), rhsRange(rhs());
3516 // As in MUrsh::computeRange(), convert the inputs.
3517 lhsRange.wrapAroundToInt32();
3518 rhsRange.wrapAroundToShiftCount();
3520 // If the most significant bit of our result is always going to be zero,
3521 // we can optimize by disabling bailout checks for enforcing an int32 range.
3522 if (lhsRange.lower() >= 0 || rhsRange.lower() >= 1) {
3523 bailoutsDisabled_ = true;
3527 static bool DoesMaskMatchRange(int32_t mask, Range& range) {
3528 // Check if range is positive, because the bitand operator in `(-3) & 0xff`
3529 // can't be eliminated.
3530 if (range.lower() >= 0) {
3531 MOZ_ASSERT(range.isInt32());
3532 // Check that the mask value has all bits set given the range upper bound.
3533 // Note that the upper bound does not have to be exactly the mask value. For
3534 // example, consider `x & 0xfff` where `x` is a uint8. That expression can
3535 // still be optimized to `x`.
3536 int bits = 1 + FloorLog2(range.upper());
3537 uint32_t maskNeeded = (bits == 32) ? 0xffffffff : (uint32_t(1) << bits) - 1;
3538 if ((mask & maskNeeded) == maskNeeded) {
3539 return true;
3543 return false;
3546 void MBinaryBitwiseInstruction::collectRangeInfoPreTrunc() {
3547 Range lhsRange(lhs());
3548 Range rhsRange(rhs());
3550 if (lhs()->isConstant() && lhs()->type() == MIRType::Int32 &&
3551 DoesMaskMatchRange(lhs()->toConstant()->toInt32(), rhsRange)) {
3552 maskMatchesRightRange = true;
3555 if (rhs()->isConstant() && rhs()->type() == MIRType::Int32 &&
3556 DoesMaskMatchRange(rhs()->toConstant()->toInt32(), lhsRange)) {
3557 maskMatchesLeftRange = true;
3561 void MNaNToZero::collectRangeInfoPreTrunc() {
3562 Range inputRange(input());
3564 if (!inputRange.canBeNaN()) {
3565 operandIsNeverNaN_ = true;
3567 if (!inputRange.canBeNegativeZero()) {
3568 operandIsNeverNegativeZero_ = true;
3572 bool RangeAnalysis::prepareForUCE(bool* shouldRemoveDeadCode) {
3573 *shouldRemoveDeadCode = false;
3575 for (ReversePostorderIterator iter(graph_.rpoBegin());
3576 iter != graph_.rpoEnd(); iter++) {
3577 MBasicBlock* block = *iter;
3579 if (!block->unreachable()) {
3580 continue;
3583 // Filter out unreachable fake entries.
3584 if (block->numPredecessors() == 0) {
3585 // Ignore fixup blocks added by the Value Numbering phase, in order
3586 // to keep the dominator tree as-is when we have OSR Block which are
3587 // no longer reachable from the main entry point of the graph.
3588 MOZ_ASSERT(graph_.osrBlock());
3589 continue;
3592 MControlInstruction* cond = block->getPredecessor(0)->lastIns();
3593 if (!cond->isTest()) {
3594 continue;
3597 // Replace the condition of the test control instruction by a constant
3598 // chosen based which of the successors has the unreachable flag which is
3599 // added by MBeta::computeRange on its own block.
3600 MTest* test = cond->toTest();
3601 MDefinition* condition = test->input();
3603 // If the false-branch is unreachable, then the test condition must be true.
3604 // If the true-branch is unreachable, then the test condition must be false.
3605 MOZ_ASSERT(block == test->ifTrue() || block == test->ifFalse());
3606 bool value = block == test->ifFalse();
3607 MConstant* constant =
3608 MConstant::New(alloc().fallible(), BooleanValue(value));
3609 if (!constant) {
3610 return false;
3613 condition->setGuardRangeBailoutsUnchecked();
3615 test->block()->insertBefore(test, constant);
3617 test->replaceOperand(0, constant);
3618 JitSpew(JitSpew_Range,
3619 "Update condition of %u to reflect unreachable branches.",
3620 test->id());
3622 *shouldRemoveDeadCode = true;
3625 return tryRemovingGuards();
3628 bool RangeAnalysis::tryRemovingGuards() {
3629 MDefinitionVector guards(alloc());
3631 for (ReversePostorderIterator block = graph_.rpoBegin();
3632 block != graph_.rpoEnd(); block++) {
3633 for (MDefinitionIterator iter(*block); iter; iter++) {
3634 if (!iter->isGuardRangeBailouts()) {
3635 continue;
3638 iter->setInWorklist();
3639 if (!guards.append(*iter)) {
3640 return false;
3645 // Flag all fallible instructions which were indirectly used in the
3646 // computation of the condition, such that we do not ignore
3647 // bailout-paths which are used to shrink the input range of the
3648 // operands of the condition.
3649 for (size_t i = 0; i < guards.length(); i++) {
3650 MDefinition* guard = guards[i];
3652 // If this ins is a guard even without guardRangeBailouts,
3653 // there is no reason in trying to hoist the guardRangeBailouts check.
3654 guard->setNotGuardRangeBailouts();
3655 if (!DeadIfUnused(guard)) {
3656 guard->setGuardRangeBailouts();
3657 continue;
3659 guard->setGuardRangeBailouts();
3661 if (!guard->isPhi()) {
3662 if (!guard->range()) {
3663 continue;
3666 // Filter the range of the instruction based on its MIRType.
3667 Range typeFilteredRange(guard);
3669 // If the output range is updated by adding the inner range,
3670 // then the MIRType act as an effectful filter. As we do not know if
3671 // this filtered Range might change or not the result of the
3672 // previous comparison, we have to keep this instruction as a guard
3673 // because it has to bailout in order to restrict the Range to its
3674 // MIRType.
3675 if (typeFilteredRange.update(guard->range())) {
3676 continue;
3680 guard->setNotGuardRangeBailouts();
3682 // Propagate the guard to its operands.
3683 for (size_t op = 0, e = guard->numOperands(); op < e; op++) {
3684 MDefinition* operand = guard->getOperand(op);
3686 // Already marked.
3687 if (operand->isInWorklist()) {
3688 continue;
3691 MOZ_ASSERT(!operand->isGuardRangeBailouts());
3693 operand->setInWorklist();
3694 operand->setGuardRangeBailouts();
3695 if (!guards.append(operand)) {
3696 return false;
3701 for (size_t i = 0; i < guards.length(); i++) {
3702 MDefinition* guard = guards[i];
3703 guard->setNotInWorklist();
3706 return true;