1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
38 //===----------------------------------------------------------------------===//
40 // There are several good references for the techniques used in this analysis.
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
46 // On computational properties of chains of recurrences
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
59 //===----------------------------------------------------------------------===//
61 #define DEBUG_TYPE "scalar-evolution"
62 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
63 #include "llvm/Constants.h"
64 #include "llvm/DerivedTypes.h"
65 #include "llvm/GlobalVariable.h"
66 #include "llvm/GlobalAlias.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Operator.h"
70 #include "llvm/Analysis/ConstantFolding.h"
71 #include "llvm/Analysis/Dominators.h"
72 #include "llvm/Analysis/InstructionSimplify.h"
73 #include "llvm/Analysis/LoopInfo.h"
74 #include "llvm/Analysis/ValueTracking.h"
75 #include "llvm/Assembly/Writer.h"
76 #include "llvm/Target/TargetData.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/ConstantRange.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/GetElementPtrTypeIterator.h"
82 #include "llvm/Support/InstIterator.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/ADT/Statistic.h"
86 #include "llvm/ADT/STLExtras.h"
87 #include "llvm/ADT/SmallPtrSet.h"
91 STATISTIC(NumArrayLenItCounts
,
92 "Number of trip counts computed with array length");
93 STATISTIC(NumTripCountsComputed
,
94 "Number of loops with predictable loop counts");
95 STATISTIC(NumTripCountsNotComputed
,
96 "Number of loops without predictable loop counts");
97 STATISTIC(NumBruteForceTripCountsComputed
,
98 "Number of loops with trip counts computed by force");
100 static cl::opt
<unsigned>
101 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden
,
102 cl::desc("Maximum number of iterations SCEV will "
103 "symbolically execute a constant "
107 INITIALIZE_PASS_BEGIN(ScalarEvolution
, "scalar-evolution",
108 "Scalar Evolution Analysis", false, true)
109 INITIALIZE_PASS_DEPENDENCY(LoopInfo
)
110 INITIALIZE_PASS_DEPENDENCY(DominatorTree
)
111 INITIALIZE_PASS_END(ScalarEvolution
, "scalar-evolution",
112 "Scalar Evolution Analysis", false, true)
113 char ScalarEvolution::ID
= 0;
115 //===----------------------------------------------------------------------===//
116 // SCEV class definitions
117 //===----------------------------------------------------------------------===//
119 //===----------------------------------------------------------------------===//
120 // Implementation of the SCEV class.
123 void SCEV::dump() const {
128 void SCEV::print(raw_ostream
&OS
) const {
129 switch (getSCEVType()) {
131 WriteAsOperand(OS
, cast
<SCEVConstant
>(this)->getValue(), false);
134 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(this);
135 const SCEV
*Op
= Trunc
->getOperand();
136 OS
<< "(trunc " << *Op
->getType() << " " << *Op
<< " to "
137 << *Trunc
->getType() << ")";
141 const SCEVZeroExtendExpr
*ZExt
= cast
<SCEVZeroExtendExpr
>(this);
142 const SCEV
*Op
= ZExt
->getOperand();
143 OS
<< "(zext " << *Op
->getType() << " " << *Op
<< " to "
144 << *ZExt
->getType() << ")";
148 const SCEVSignExtendExpr
*SExt
= cast
<SCEVSignExtendExpr
>(this);
149 const SCEV
*Op
= SExt
->getOperand();
150 OS
<< "(sext " << *Op
->getType() << " " << *Op
<< " to "
151 << *SExt
->getType() << ")";
155 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(this);
156 OS
<< "{" << *AR
->getOperand(0);
157 for (unsigned i
= 1, e
= AR
->getNumOperands(); i
!= e
; ++i
)
158 OS
<< ",+," << *AR
->getOperand(i
);
160 if (AR
->hasNoUnsignedWrap())
162 if (AR
->hasNoSignedWrap())
164 WriteAsOperand(OS
, AR
->getLoop()->getHeader(), /*PrintType=*/false);
172 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(this);
173 const char *OpStr
= 0;
174 switch (NAry
->getSCEVType()) {
175 case scAddExpr
: OpStr
= " + "; break;
176 case scMulExpr
: OpStr
= " * "; break;
177 case scUMaxExpr
: OpStr
= " umax "; break;
178 case scSMaxExpr
: OpStr
= " smax "; break;
181 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
184 if (llvm::next(I
) != E
)
191 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(this);
192 OS
<< "(" << *UDiv
->getLHS() << " /u " << *UDiv
->getRHS() << ")";
196 const SCEVUnknown
*U
= cast
<SCEVUnknown
>(this);
198 if (U
->isSizeOf(AllocTy
)) {
199 OS
<< "sizeof(" << *AllocTy
<< ")";
202 if (U
->isAlignOf(AllocTy
)) {
203 OS
<< "alignof(" << *AllocTy
<< ")";
209 if (U
->isOffsetOf(CTy
, FieldNo
)) {
210 OS
<< "offsetof(" << *CTy
<< ", ";
211 WriteAsOperand(OS
, FieldNo
, false);
216 // Otherwise just print it normally.
217 WriteAsOperand(OS
, U
->getValue(), false);
220 case scCouldNotCompute
:
221 OS
<< "***COULDNOTCOMPUTE***";
225 llvm_unreachable("Unknown SCEV kind!");
228 const Type
*SCEV::getType() const {
229 switch (getSCEVType()) {
231 return cast
<SCEVConstant
>(this)->getType();
235 return cast
<SCEVCastExpr
>(this)->getType();
240 return cast
<SCEVNAryExpr
>(this)->getType();
242 return cast
<SCEVAddExpr
>(this)->getType();
244 return cast
<SCEVUDivExpr
>(this)->getType();
246 return cast
<SCEVUnknown
>(this)->getType();
247 case scCouldNotCompute
:
248 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
252 llvm_unreachable("Unknown SCEV kind!");
256 bool SCEV::isZero() const {
257 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
258 return SC
->getValue()->isZero();
262 bool SCEV::isOne() const {
263 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
264 return SC
->getValue()->isOne();
268 bool SCEV::isAllOnesValue() const {
269 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(this))
270 return SC
->getValue()->isAllOnesValue();
274 SCEVCouldNotCompute::SCEVCouldNotCompute() :
275 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute
) {}
277 bool SCEVCouldNotCompute::classof(const SCEV
*S
) {
278 return S
->getSCEVType() == scCouldNotCompute
;
281 const SCEV
*ScalarEvolution::getConstant(ConstantInt
*V
) {
283 ID
.AddInteger(scConstant
);
286 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
287 SCEV
*S
= new (SCEVAllocator
) SCEVConstant(ID
.Intern(SCEVAllocator
), V
);
288 UniqueSCEVs
.InsertNode(S
, IP
);
292 const SCEV
*ScalarEvolution::getConstant(const APInt
& Val
) {
293 return getConstant(ConstantInt::get(getContext(), Val
));
297 ScalarEvolution::getConstant(const Type
*Ty
, uint64_t V
, bool isSigned
) {
298 const IntegerType
*ITy
= cast
<IntegerType
>(getEffectiveSCEVType(Ty
));
299 return getConstant(ConstantInt::get(ITy
, V
, isSigned
));
302 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID
,
303 unsigned SCEVTy
, const SCEV
*op
, const Type
*ty
)
304 : SCEV(ID
, SCEVTy
), Op(op
), Ty(ty
) {}
306 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID
,
307 const SCEV
*op
, const Type
*ty
)
308 : SCEVCastExpr(ID
, scTruncate
, op
, ty
) {
309 assert((Op
->getType()->isIntegerTy() || Op
->getType()->isPointerTy()) &&
310 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
311 "Cannot truncate non-integer value!");
314 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID
,
315 const SCEV
*op
, const Type
*ty
)
316 : SCEVCastExpr(ID
, scZeroExtend
, op
, ty
) {
317 assert((Op
->getType()->isIntegerTy() || Op
->getType()->isPointerTy()) &&
318 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
319 "Cannot zero extend non-integer value!");
322 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID
,
323 const SCEV
*op
, const Type
*ty
)
324 : SCEVCastExpr(ID
, scSignExtend
, op
, ty
) {
325 assert((Op
->getType()->isIntegerTy() || Op
->getType()->isPointerTy()) &&
326 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
327 "Cannot sign extend non-integer value!");
330 void SCEVUnknown::deleted() {
331 // Clear this SCEVUnknown from various maps.
332 SE
->forgetMemoizedResults(this);
334 // Remove this SCEVUnknown from the uniquing map.
335 SE
->UniqueSCEVs
.RemoveNode(this);
337 // Release the value.
341 void SCEVUnknown::allUsesReplacedWith(Value
*New
) {
342 // Clear this SCEVUnknown from various maps.
343 SE
->forgetMemoizedResults(this);
345 // Remove this SCEVUnknown from the uniquing map.
346 SE
->UniqueSCEVs
.RemoveNode(this);
348 // Update this SCEVUnknown to point to the new value. This is needed
349 // because there may still be outstanding SCEVs which still point to
354 bool SCEVUnknown::isSizeOf(const Type
*&AllocTy
) const {
355 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
356 if (VCE
->getOpcode() == Instruction::PtrToInt
)
357 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
358 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
359 CE
->getOperand(0)->isNullValue() &&
360 CE
->getNumOperands() == 2)
361 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(1)))
363 AllocTy
= cast
<PointerType
>(CE
->getOperand(0)->getType())
371 bool SCEVUnknown::isAlignOf(const Type
*&AllocTy
) const {
372 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
373 if (VCE
->getOpcode() == Instruction::PtrToInt
)
374 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
375 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
376 CE
->getOperand(0)->isNullValue()) {
378 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
379 if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
))
380 if (!STy
->isPacked() &&
381 CE
->getNumOperands() == 3 &&
382 CE
->getOperand(1)->isNullValue()) {
383 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(CE
->getOperand(2)))
385 STy
->getNumElements() == 2 &&
386 STy
->getElementType(0)->isIntegerTy(1)) {
387 AllocTy
= STy
->getElementType(1);
396 bool SCEVUnknown::isOffsetOf(const Type
*&CTy
, Constant
*&FieldNo
) const {
397 if (ConstantExpr
*VCE
= dyn_cast
<ConstantExpr
>(getValue()))
398 if (VCE
->getOpcode() == Instruction::PtrToInt
)
399 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(VCE
->getOperand(0)))
400 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
401 CE
->getNumOperands() == 3 &&
402 CE
->getOperand(0)->isNullValue() &&
403 CE
->getOperand(1)->isNullValue()) {
405 cast
<PointerType
>(CE
->getOperand(0)->getType())->getElementType();
406 // Ignore vector types here so that ScalarEvolutionExpander doesn't
407 // emit getelementptrs that index into vectors.
408 if (Ty
->isStructTy() || Ty
->isArrayTy()) {
410 FieldNo
= CE
->getOperand(2);
418 //===----------------------------------------------------------------------===//
420 //===----------------------------------------------------------------------===//
423 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
424 /// than the complexity of the RHS. This comparator is used to canonicalize
426 class SCEVComplexityCompare
{
427 const LoopInfo
*const LI
;
429 explicit SCEVComplexityCompare(const LoopInfo
*li
) : LI(li
) {}
431 // Return true or false if LHS is less than, or at least RHS, respectively.
432 bool operator()(const SCEV
*LHS
, const SCEV
*RHS
) const {
433 return compare(LHS
, RHS
) < 0;
436 // Return negative, zero, or positive, if LHS is less than, equal to, or
437 // greater than RHS, respectively. A three-way result allows recursive
438 // comparisons to be more efficient.
439 int compare(const SCEV
*LHS
, const SCEV
*RHS
) const {
440 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
444 // Primarily, sort the SCEVs by their getSCEVType().
445 unsigned LType
= LHS
->getSCEVType(), RType
= RHS
->getSCEVType();
447 return (int)LType
- (int)RType
;
449 // Aside from the getSCEVType() ordering, the particular ordering
450 // isn't very important except that it's beneficial to be consistent,
451 // so that (a + b) and (b + a) don't end up as different expressions.
454 const SCEVUnknown
*LU
= cast
<SCEVUnknown
>(LHS
);
455 const SCEVUnknown
*RU
= cast
<SCEVUnknown
>(RHS
);
457 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
458 // not as complete as it could be.
459 const Value
*LV
= LU
->getValue(), *RV
= RU
->getValue();
461 // Order pointer values after integer values. This helps SCEVExpander
463 bool LIsPointer
= LV
->getType()->isPointerTy(),
464 RIsPointer
= RV
->getType()->isPointerTy();
465 if (LIsPointer
!= RIsPointer
)
466 return (int)LIsPointer
- (int)RIsPointer
;
468 // Compare getValueID values.
469 unsigned LID
= LV
->getValueID(),
470 RID
= RV
->getValueID();
472 return (int)LID
- (int)RID
;
474 // Sort arguments by their position.
475 if (const Argument
*LA
= dyn_cast
<Argument
>(LV
)) {
476 const Argument
*RA
= cast
<Argument
>(RV
);
477 unsigned LArgNo
= LA
->getArgNo(), RArgNo
= RA
->getArgNo();
478 return (int)LArgNo
- (int)RArgNo
;
481 // For instructions, compare their loop depth, and their operand
482 // count. This is pretty loose.
483 if (const Instruction
*LInst
= dyn_cast
<Instruction
>(LV
)) {
484 const Instruction
*RInst
= cast
<Instruction
>(RV
);
486 // Compare loop depths.
487 const BasicBlock
*LParent
= LInst
->getParent(),
488 *RParent
= RInst
->getParent();
489 if (LParent
!= RParent
) {
490 unsigned LDepth
= LI
->getLoopDepth(LParent
),
491 RDepth
= LI
->getLoopDepth(RParent
);
492 if (LDepth
!= RDepth
)
493 return (int)LDepth
- (int)RDepth
;
496 // Compare the number of operands.
497 unsigned LNumOps
= LInst
->getNumOperands(),
498 RNumOps
= RInst
->getNumOperands();
499 return (int)LNumOps
- (int)RNumOps
;
506 const SCEVConstant
*LC
= cast
<SCEVConstant
>(LHS
);
507 const SCEVConstant
*RC
= cast
<SCEVConstant
>(RHS
);
509 // Compare constant values.
510 const APInt
&LA
= LC
->getValue()->getValue();
511 const APInt
&RA
= RC
->getValue()->getValue();
512 unsigned LBitWidth
= LA
.getBitWidth(), RBitWidth
= RA
.getBitWidth();
513 if (LBitWidth
!= RBitWidth
)
514 return (int)LBitWidth
- (int)RBitWidth
;
515 return LA
.ult(RA
) ? -1 : 1;
519 const SCEVAddRecExpr
*LA
= cast
<SCEVAddRecExpr
>(LHS
);
520 const SCEVAddRecExpr
*RA
= cast
<SCEVAddRecExpr
>(RHS
);
522 // Compare addrec loop depths.
523 const Loop
*LLoop
= LA
->getLoop(), *RLoop
= RA
->getLoop();
524 if (LLoop
!= RLoop
) {
525 unsigned LDepth
= LLoop
->getLoopDepth(),
526 RDepth
= RLoop
->getLoopDepth();
527 if (LDepth
!= RDepth
)
528 return (int)LDepth
- (int)RDepth
;
531 // Addrec complexity grows with operand count.
532 unsigned LNumOps
= LA
->getNumOperands(), RNumOps
= RA
->getNumOperands();
533 if (LNumOps
!= RNumOps
)
534 return (int)LNumOps
- (int)RNumOps
;
536 // Lexicographically compare.
537 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
538 long X
= compare(LA
->getOperand(i
), RA
->getOperand(i
));
550 const SCEVNAryExpr
*LC
= cast
<SCEVNAryExpr
>(LHS
);
551 const SCEVNAryExpr
*RC
= cast
<SCEVNAryExpr
>(RHS
);
553 // Lexicographically compare n-ary expressions.
554 unsigned LNumOps
= LC
->getNumOperands(), RNumOps
= RC
->getNumOperands();
555 for (unsigned i
= 0; i
!= LNumOps
; ++i
) {
558 long X
= compare(LC
->getOperand(i
), RC
->getOperand(i
));
562 return (int)LNumOps
- (int)RNumOps
;
566 const SCEVUDivExpr
*LC
= cast
<SCEVUDivExpr
>(LHS
);
567 const SCEVUDivExpr
*RC
= cast
<SCEVUDivExpr
>(RHS
);
569 // Lexicographically compare udiv expressions.
570 long X
= compare(LC
->getLHS(), RC
->getLHS());
573 return compare(LC
->getRHS(), RC
->getRHS());
579 const SCEVCastExpr
*LC
= cast
<SCEVCastExpr
>(LHS
);
580 const SCEVCastExpr
*RC
= cast
<SCEVCastExpr
>(RHS
);
582 // Compare cast expressions by operand.
583 return compare(LC
->getOperand(), RC
->getOperand());
590 llvm_unreachable("Unknown SCEV kind!");
596 /// GroupByComplexity - Given a list of SCEV objects, order them by their
597 /// complexity, and group objects of the same complexity together by value.
598 /// When this routine is finished, we know that any duplicates in the vector are
599 /// consecutive and that complexity is monotonically increasing.
601 /// Note that we go take special precautions to ensure that we get deterministic
602 /// results from this routine. In other words, we don't want the results of
603 /// this to depend on where the addresses of various SCEV objects happened to
606 static void GroupByComplexity(SmallVectorImpl
<const SCEV
*> &Ops
,
608 if (Ops
.size() < 2) return; // Noop
609 if (Ops
.size() == 2) {
610 // This is the common case, which also happens to be trivially simple.
612 const SCEV
*&LHS
= Ops
[0], *&RHS
= Ops
[1];
613 if (SCEVComplexityCompare(LI
)(RHS
, LHS
))
618 // Do the rough sort by complexity.
619 std::stable_sort(Ops
.begin(), Ops
.end(), SCEVComplexityCompare(LI
));
621 // Now that we are sorted by complexity, group elements of the same
622 // complexity. Note that this is, at worst, N^2, but the vector is likely to
623 // be extremely short in practice. Note that we take this approach because we
624 // do not want to depend on the addresses of the objects we are grouping.
625 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-2; ++i
) {
626 const SCEV
*S
= Ops
[i
];
627 unsigned Complexity
= S
->getSCEVType();
629 // If there are any objects of the same complexity and same value as this
631 for (unsigned j
= i
+1; j
!= e
&& Ops
[j
]->getSCEVType() == Complexity
; ++j
) {
632 if (Ops
[j
] == S
) { // Found a duplicate.
633 // Move it to immediately after i'th element.
634 std::swap(Ops
[i
+1], Ops
[j
]);
635 ++i
; // no need to rescan it.
636 if (i
== e
-2) return; // Done!
644 //===----------------------------------------------------------------------===//
645 // Simple SCEV method implementations
646 //===----------------------------------------------------------------------===//
648 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
650 static const SCEV
*BinomialCoefficient(const SCEV
*It
, unsigned K
,
652 const Type
* ResultTy
) {
653 // Handle the simplest case efficiently.
655 return SE
.getTruncateOrZeroExtend(It
, ResultTy
);
657 // We are using the following formula for BC(It, K):
659 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
661 // Suppose, W is the bitwidth of the return value. We must be prepared for
662 // overflow. Hence, we must assure that the result of our computation is
663 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
664 // safe in modular arithmetic.
666 // However, this code doesn't use exactly that formula; the formula it uses
667 // is something like the following, where T is the number of factors of 2 in
668 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
671 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
673 // This formula is trivially equivalent to the previous formula. However,
674 // this formula can be implemented much more efficiently. The trick is that
675 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
676 // arithmetic. To do exact division in modular arithmetic, all we have
677 // to do is multiply by the inverse. Therefore, this step can be done at
680 // The next issue is how to safely do the division by 2^T. The way this
681 // is done is by doing the multiplication step at a width of at least W + T
682 // bits. This way, the bottom W+T bits of the product are accurate. Then,
683 // when we perform the division by 2^T (which is equivalent to a right shift
684 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
685 // truncated out after the division by 2^T.
687 // In comparison to just directly using the first formula, this technique
688 // is much more efficient; using the first formula requires W * K bits,
689 // but this formula less than W + K bits. Also, the first formula requires
690 // a division step, whereas this formula only requires multiplies and shifts.
692 // It doesn't matter whether the subtraction step is done in the calculation
693 // width or the input iteration count's width; if the subtraction overflows,
694 // the result must be zero anyway. We prefer here to do it in the width of
695 // the induction variable because it helps a lot for certain cases; CodeGen
696 // isn't smart enough to ignore the overflow, which leads to much less
697 // efficient code if the width of the subtraction is wider than the native
700 // (It's possible to not widen at all by pulling out factors of 2 before
701 // the multiplication; for example, K=2 can be calculated as
702 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
703 // extra arithmetic, so it's not an obvious win, and it gets
704 // much more complicated for K > 3.)
706 // Protection from insane SCEVs; this bound is conservative,
707 // but it probably doesn't matter.
709 return SE
.getCouldNotCompute();
711 unsigned W
= SE
.getTypeSizeInBits(ResultTy
);
713 // Calculate K! / 2^T and T; we divide out the factors of two before
714 // multiplying for calculating K! / 2^T to avoid overflow.
715 // Other overflow doesn't matter because we only care about the bottom
716 // W bits of the result.
717 APInt
OddFactorial(W
, 1);
719 for (unsigned i
= 3; i
<= K
; ++i
) {
721 unsigned TwoFactors
= Mult
.countTrailingZeros();
723 Mult
= Mult
.lshr(TwoFactors
);
724 OddFactorial
*= Mult
;
727 // We need at least W + T bits for the multiplication step
728 unsigned CalculationBits
= W
+ T
;
730 // Calculate 2^T, at width T+W.
731 APInt DivFactor
= APInt(CalculationBits
, 1).shl(T
);
733 // Calculate the multiplicative inverse of K! / 2^T;
734 // this multiplication factor will perform the exact division by
736 APInt Mod
= APInt::getSignedMinValue(W
+1);
737 APInt MultiplyFactor
= OddFactorial
.zext(W
+1);
738 MultiplyFactor
= MultiplyFactor
.multiplicativeInverse(Mod
);
739 MultiplyFactor
= MultiplyFactor
.trunc(W
);
741 // Calculate the product, at width T+W
742 const IntegerType
*CalculationTy
= IntegerType::get(SE
.getContext(),
744 const SCEV
*Dividend
= SE
.getTruncateOrZeroExtend(It
, CalculationTy
);
745 for (unsigned i
= 1; i
!= K
; ++i
) {
746 const SCEV
*S
= SE
.getMinusSCEV(It
, SE
.getConstant(It
->getType(), i
));
747 Dividend
= SE
.getMulExpr(Dividend
,
748 SE
.getTruncateOrZeroExtend(S
, CalculationTy
));
752 const SCEV
*DivResult
= SE
.getUDivExpr(Dividend
, SE
.getConstant(DivFactor
));
754 // Truncate the result, and divide by K! / 2^T.
756 return SE
.getMulExpr(SE
.getConstant(MultiplyFactor
),
757 SE
.getTruncateOrZeroExtend(DivResult
, ResultTy
));
760 /// evaluateAtIteration - Return the value of this chain of recurrences at
761 /// the specified iteration number. We can evaluate this recurrence by
762 /// multiplying each element in the chain by the binomial coefficient
763 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
765 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
767 /// where BC(It, k) stands for binomial coefficient.
769 const SCEV
*SCEVAddRecExpr::evaluateAtIteration(const SCEV
*It
,
770 ScalarEvolution
&SE
) const {
771 const SCEV
*Result
= getStart();
772 for (unsigned i
= 1, e
= getNumOperands(); i
!= e
; ++i
) {
773 // The computation is correct in the face of overflow provided that the
774 // multiplication is performed _after_ the evaluation of the binomial
776 const SCEV
*Coeff
= BinomialCoefficient(It
, i
, SE
, getType());
777 if (isa
<SCEVCouldNotCompute
>(Coeff
))
780 Result
= SE
.getAddExpr(Result
, SE
.getMulExpr(getOperand(i
), Coeff
));
785 //===----------------------------------------------------------------------===//
786 // SCEV Expression folder implementations
787 //===----------------------------------------------------------------------===//
789 const SCEV
*ScalarEvolution::getTruncateExpr(const SCEV
*Op
,
791 assert(getTypeSizeInBits(Op
->getType()) > getTypeSizeInBits(Ty
) &&
792 "This is not a truncating conversion!");
793 assert(isSCEVable(Ty
) &&
794 "This is not a conversion to a SCEVable type!");
795 Ty
= getEffectiveSCEVType(Ty
);
798 ID
.AddInteger(scTruncate
);
802 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
804 // Fold if the operand is constant.
805 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
807 cast
<ConstantInt
>(ConstantExpr::getTrunc(SC
->getValue(),
808 getEffectiveSCEVType(Ty
))));
810 // trunc(trunc(x)) --> trunc(x)
811 if (const SCEVTruncateExpr
*ST
= dyn_cast
<SCEVTruncateExpr
>(Op
))
812 return getTruncateExpr(ST
->getOperand(), Ty
);
814 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
815 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
816 return getTruncateOrSignExtend(SS
->getOperand(), Ty
);
818 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
819 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
820 return getTruncateOrZeroExtend(SZ
->getOperand(), Ty
);
822 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
823 // eliminate all the truncates.
824 if (const SCEVAddExpr
*SA
= dyn_cast
<SCEVAddExpr
>(Op
)) {
825 SmallVector
<const SCEV
*, 4> Operands
;
826 bool hasTrunc
= false;
827 for (unsigned i
= 0, e
= SA
->getNumOperands(); i
!= e
&& !hasTrunc
; ++i
) {
828 const SCEV
*S
= getTruncateExpr(SA
->getOperand(i
), Ty
);
829 hasTrunc
= isa
<SCEVTruncateExpr
>(S
);
830 Operands
.push_back(S
);
833 return getAddExpr(Operands
, false, false);
836 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
837 // eliminate all the truncates.
838 if (const SCEVMulExpr
*SM
= dyn_cast
<SCEVMulExpr
>(Op
)) {
839 SmallVector
<const SCEV
*, 4> Operands
;
840 bool hasTrunc
= false;
841 for (unsigned i
= 0, e
= SM
->getNumOperands(); i
!= e
&& !hasTrunc
; ++i
) {
842 const SCEV
*S
= getTruncateExpr(SM
->getOperand(i
), Ty
);
843 hasTrunc
= isa
<SCEVTruncateExpr
>(S
);
844 Operands
.push_back(S
);
847 return getMulExpr(Operands
, false, false);
850 // If the input value is a chrec scev, truncate the chrec's operands.
851 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
852 SmallVector
<const SCEV
*, 4> Operands
;
853 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
)
854 Operands
.push_back(getTruncateExpr(AddRec
->getOperand(i
), Ty
));
855 return getAddRecExpr(Operands
, AddRec
->getLoop());
858 // As a special case, fold trunc(undef) to undef. We don't want to
859 // know too much about SCEVUnknowns, but this special case is handy
861 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(Op
))
862 if (isa
<UndefValue
>(U
->getValue()))
863 return getSCEV(UndefValue::get(Ty
));
865 // The cast wasn't folded; create an explicit cast node. We can reuse
866 // the existing insert position since if we get here, we won't have
867 // made any changes which would invalidate it.
868 SCEV
*S
= new (SCEVAllocator
) SCEVTruncateExpr(ID
.Intern(SCEVAllocator
),
870 UniqueSCEVs
.InsertNode(S
, IP
);
874 const SCEV
*ScalarEvolution::getZeroExtendExpr(const SCEV
*Op
,
876 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
877 "This is not an extending conversion!");
878 assert(isSCEVable(Ty
) &&
879 "This is not a conversion to a SCEVable type!");
880 Ty
= getEffectiveSCEVType(Ty
);
882 // Fold if the operand is constant.
883 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
885 cast
<ConstantInt
>(ConstantExpr::getZExt(SC
->getValue(),
886 getEffectiveSCEVType(Ty
))));
888 // zext(zext(x)) --> zext(x)
889 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
890 return getZeroExtendExpr(SZ
->getOperand(), Ty
);
892 // Before doing any expensive analysis, check to see if we've already
893 // computed a SCEV for this Op and Ty.
895 ID
.AddInteger(scZeroExtend
);
899 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
901 // If the input value is a chrec scev, and we can prove that the value
902 // did not overflow the old, smaller, value, we can zero extend all of the
903 // operands (often constants). This allows analysis of something like
904 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
905 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
906 if (AR
->isAffine()) {
907 const SCEV
*Start
= AR
->getStart();
908 const SCEV
*Step
= AR
->getStepRecurrence(*this);
909 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
910 const Loop
*L
= AR
->getLoop();
912 // If we have special knowledge that this addrec won't overflow,
913 // we don't need to do any further analysis.
914 if (AR
->hasNoUnsignedWrap())
915 return getAddRecExpr(getZeroExtendExpr(Start
, Ty
),
916 getZeroExtendExpr(Step
, Ty
),
919 // Check whether the backedge-taken count is SCEVCouldNotCompute.
920 // Note that this serves two purposes: It filters out loops that are
921 // simply not analyzable, and it covers the case where this code is
922 // being called from within backedge-taken count analysis, such that
923 // attempting to ask for the backedge-taken count would likely result
924 // in infinite recursion. In the later case, the analysis code will
925 // cope with a conservative value, and it will take care to purge
926 // that value once it has finished.
927 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
928 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
929 // Manually compute the final value for AR, checking for
932 // Check whether the backedge-taken count can be losslessly casted to
933 // the addrec's type. The count is always unsigned.
934 const SCEV
*CastedMaxBECount
=
935 getTruncateOrZeroExtend(MaxBECount
, Start
->getType());
936 const SCEV
*RecastedMaxBECount
=
937 getTruncateOrZeroExtend(CastedMaxBECount
, MaxBECount
->getType());
938 if (MaxBECount
== RecastedMaxBECount
) {
939 const Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
940 // Check whether Start+Step*MaxBECount has no unsigned overflow.
941 const SCEV
*ZMul
= getMulExpr(CastedMaxBECount
, Step
);
942 const SCEV
*Add
= getAddExpr(Start
, ZMul
);
943 const SCEV
*OperandExtendedAdd
=
944 getAddExpr(getZeroExtendExpr(Start
, WideTy
),
945 getMulExpr(getZeroExtendExpr(CastedMaxBECount
, WideTy
),
946 getZeroExtendExpr(Step
, WideTy
)));
947 if (getZeroExtendExpr(Add
, WideTy
) == OperandExtendedAdd
)
948 // Return the expression with the addrec on the outside.
949 return getAddRecExpr(getZeroExtendExpr(Start
, Ty
),
950 getZeroExtendExpr(Step
, Ty
),
953 // Similar to above, only this time treat the step value as signed.
954 // This covers loops that count down.
955 const SCEV
*SMul
= getMulExpr(CastedMaxBECount
, Step
);
956 Add
= getAddExpr(Start
, SMul
);
958 getAddExpr(getZeroExtendExpr(Start
, WideTy
),
959 getMulExpr(getZeroExtendExpr(CastedMaxBECount
, WideTy
),
960 getSignExtendExpr(Step
, WideTy
)));
961 if (getZeroExtendExpr(Add
, WideTy
) == OperandExtendedAdd
)
962 // Return the expression with the addrec on the outside.
963 return getAddRecExpr(getZeroExtendExpr(Start
, Ty
),
964 getSignExtendExpr(Step
, Ty
),
968 // If the backedge is guarded by a comparison with the pre-inc value
969 // the addrec is safe. Also, if the entry is guarded by a comparison
970 // with the start value and the backedge is guarded by a comparison
971 // with the post-inc value, the addrec is safe.
972 if (isKnownPositive(Step
)) {
973 const SCEV
*N
= getConstant(APInt::getMinValue(BitWidth
) -
974 getUnsignedRange(Step
).getUnsignedMax());
975 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_ULT
, AR
, N
) ||
976 (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_ULT
, Start
, N
) &&
977 isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_ULT
,
978 AR
->getPostIncExpr(*this), N
)))
979 // Return the expression with the addrec on the outside.
980 return getAddRecExpr(getZeroExtendExpr(Start
, Ty
),
981 getZeroExtendExpr(Step
, Ty
),
983 } else if (isKnownNegative(Step
)) {
984 const SCEV
*N
= getConstant(APInt::getMaxValue(BitWidth
) -
985 getSignedRange(Step
).getSignedMin());
986 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_UGT
, AR
, N
) ||
987 (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_UGT
, Start
, N
) &&
988 isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_UGT
,
989 AR
->getPostIncExpr(*this), N
)))
990 // Return the expression with the addrec on the outside.
991 return getAddRecExpr(getZeroExtendExpr(Start
, Ty
),
992 getSignExtendExpr(Step
, Ty
),
998 // The cast wasn't folded; create an explicit cast node.
999 // Recompute the insert position, as it may have been invalidated.
1000 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1001 SCEV
*S
= new (SCEVAllocator
) SCEVZeroExtendExpr(ID
.Intern(SCEVAllocator
),
1003 UniqueSCEVs
.InsertNode(S
, IP
);
1007 const SCEV
*ScalarEvolution::getSignExtendExpr(const SCEV
*Op
,
1009 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1010 "This is not an extending conversion!");
1011 assert(isSCEVable(Ty
) &&
1012 "This is not a conversion to a SCEVable type!");
1013 Ty
= getEffectiveSCEVType(Ty
);
1015 // Fold if the operand is constant.
1016 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1018 cast
<ConstantInt
>(ConstantExpr::getSExt(SC
->getValue(),
1019 getEffectiveSCEVType(Ty
))));
1021 // sext(sext(x)) --> sext(x)
1022 if (const SCEVSignExtendExpr
*SS
= dyn_cast
<SCEVSignExtendExpr
>(Op
))
1023 return getSignExtendExpr(SS
->getOperand(), Ty
);
1025 // sext(zext(x)) --> zext(x)
1026 if (const SCEVZeroExtendExpr
*SZ
= dyn_cast
<SCEVZeroExtendExpr
>(Op
))
1027 return getZeroExtendExpr(SZ
->getOperand(), Ty
);
1029 // Before doing any expensive analysis, check to see if we've already
1030 // computed a SCEV for this Op and Ty.
1031 FoldingSetNodeID ID
;
1032 ID
.AddInteger(scSignExtend
);
1036 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1038 // If the input value is a chrec scev, and we can prove that the value
1039 // did not overflow the old, smaller, value, we can sign extend all of the
1040 // operands (often constants). This allows analysis of something like
1041 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1042 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
))
1043 if (AR
->isAffine()) {
1044 const SCEV
*Start
= AR
->getStart();
1045 const SCEV
*Step
= AR
->getStepRecurrence(*this);
1046 unsigned BitWidth
= getTypeSizeInBits(AR
->getType());
1047 const Loop
*L
= AR
->getLoop();
1049 // If we have special knowledge that this addrec won't overflow,
1050 // we don't need to do any further analysis.
1051 if (AR
->hasNoSignedWrap())
1052 return getAddRecExpr(getSignExtendExpr(Start
, Ty
),
1053 getSignExtendExpr(Step
, Ty
),
1056 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1057 // Note that this serves two purposes: It filters out loops that are
1058 // simply not analyzable, and it covers the case where this code is
1059 // being called from within backedge-taken count analysis, such that
1060 // attempting to ask for the backedge-taken count would likely result
1061 // in infinite recursion. In the later case, the analysis code will
1062 // cope with a conservative value, and it will take care to purge
1063 // that value once it has finished.
1064 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(L
);
1065 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
)) {
1066 // Manually compute the final value for AR, checking for
1069 // Check whether the backedge-taken count can be losslessly casted to
1070 // the addrec's type. The count is always unsigned.
1071 const SCEV
*CastedMaxBECount
=
1072 getTruncateOrZeroExtend(MaxBECount
, Start
->getType());
1073 const SCEV
*RecastedMaxBECount
=
1074 getTruncateOrZeroExtend(CastedMaxBECount
, MaxBECount
->getType());
1075 if (MaxBECount
== RecastedMaxBECount
) {
1076 const Type
*WideTy
= IntegerType::get(getContext(), BitWidth
* 2);
1077 // Check whether Start+Step*MaxBECount has no signed overflow.
1078 const SCEV
*SMul
= getMulExpr(CastedMaxBECount
, Step
);
1079 const SCEV
*Add
= getAddExpr(Start
, SMul
);
1080 const SCEV
*OperandExtendedAdd
=
1081 getAddExpr(getSignExtendExpr(Start
, WideTy
),
1082 getMulExpr(getZeroExtendExpr(CastedMaxBECount
, WideTy
),
1083 getSignExtendExpr(Step
, WideTy
)));
1084 if (getSignExtendExpr(Add
, WideTy
) == OperandExtendedAdd
)
1085 // Return the expression with the addrec on the outside.
1086 return getAddRecExpr(getSignExtendExpr(Start
, Ty
),
1087 getSignExtendExpr(Step
, Ty
),
1090 // Similar to above, only this time treat the step value as unsigned.
1091 // This covers loops that count up with an unsigned step.
1092 const SCEV
*UMul
= getMulExpr(CastedMaxBECount
, Step
);
1093 Add
= getAddExpr(Start
, UMul
);
1094 OperandExtendedAdd
=
1095 getAddExpr(getSignExtendExpr(Start
, WideTy
),
1096 getMulExpr(getZeroExtendExpr(CastedMaxBECount
, WideTy
),
1097 getZeroExtendExpr(Step
, WideTy
)));
1098 if (getSignExtendExpr(Add
, WideTy
) == OperandExtendedAdd
)
1099 // Return the expression with the addrec on the outside.
1100 return getAddRecExpr(getSignExtendExpr(Start
, Ty
),
1101 getZeroExtendExpr(Step
, Ty
),
1105 // If the backedge is guarded by a comparison with the pre-inc value
1106 // the addrec is safe. Also, if the entry is guarded by a comparison
1107 // with the start value and the backedge is guarded by a comparison
1108 // with the post-inc value, the addrec is safe.
1109 if (isKnownPositive(Step
)) {
1110 const SCEV
*N
= getConstant(APInt::getSignedMinValue(BitWidth
) -
1111 getSignedRange(Step
).getSignedMax());
1112 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_SLT
, AR
, N
) ||
1113 (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_SLT
, Start
, N
) &&
1114 isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_SLT
,
1115 AR
->getPostIncExpr(*this), N
)))
1116 // Return the expression with the addrec on the outside.
1117 return getAddRecExpr(getSignExtendExpr(Start
, Ty
),
1118 getSignExtendExpr(Step
, Ty
),
1120 } else if (isKnownNegative(Step
)) {
1121 const SCEV
*N
= getConstant(APInt::getSignedMaxValue(BitWidth
) -
1122 getSignedRange(Step
).getSignedMin());
1123 if (isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_SGT
, AR
, N
) ||
1124 (isLoopEntryGuardedByCond(L
, ICmpInst::ICMP_SGT
, Start
, N
) &&
1125 isLoopBackedgeGuardedByCond(L
, ICmpInst::ICMP_SGT
,
1126 AR
->getPostIncExpr(*this), N
)))
1127 // Return the expression with the addrec on the outside.
1128 return getAddRecExpr(getSignExtendExpr(Start
, Ty
),
1129 getSignExtendExpr(Step
, Ty
),
1135 // The cast wasn't folded; create an explicit cast node.
1136 // Recompute the insert position, as it may have been invalidated.
1137 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1138 SCEV
*S
= new (SCEVAllocator
) SCEVSignExtendExpr(ID
.Intern(SCEVAllocator
),
1140 UniqueSCEVs
.InsertNode(S
, IP
);
1144 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1145 /// unspecified bits out to the given type.
1147 const SCEV
*ScalarEvolution::getAnyExtendExpr(const SCEV
*Op
,
1149 assert(getTypeSizeInBits(Op
->getType()) < getTypeSizeInBits(Ty
) &&
1150 "This is not an extending conversion!");
1151 assert(isSCEVable(Ty
) &&
1152 "This is not a conversion to a SCEVable type!");
1153 Ty
= getEffectiveSCEVType(Ty
);
1155 // Sign-extend negative constants.
1156 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(Op
))
1157 if (SC
->getValue()->getValue().isNegative())
1158 return getSignExtendExpr(Op
, Ty
);
1160 // Peel off a truncate cast.
1161 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Op
)) {
1162 const SCEV
*NewOp
= T
->getOperand();
1163 if (getTypeSizeInBits(NewOp
->getType()) < getTypeSizeInBits(Ty
))
1164 return getAnyExtendExpr(NewOp
, Ty
);
1165 return getTruncateOrNoop(NewOp
, Ty
);
1168 // Next try a zext cast. If the cast is folded, use it.
1169 const SCEV
*ZExt
= getZeroExtendExpr(Op
, Ty
);
1170 if (!isa
<SCEVZeroExtendExpr
>(ZExt
))
1173 // Next try a sext cast. If the cast is folded, use it.
1174 const SCEV
*SExt
= getSignExtendExpr(Op
, Ty
);
1175 if (!isa
<SCEVSignExtendExpr
>(SExt
))
1178 // Force the cast to be folded into the operands of an addrec.
1179 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(Op
)) {
1180 SmallVector
<const SCEV
*, 4> Ops
;
1181 for (SCEVAddRecExpr::op_iterator I
= AR
->op_begin(), E
= AR
->op_end();
1183 Ops
.push_back(getAnyExtendExpr(*I
, Ty
));
1184 return getAddRecExpr(Ops
, AR
->getLoop());
1187 // As a special case, fold anyext(undef) to undef. We don't want to
1188 // know too much about SCEVUnknowns, but this special case is handy
1190 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(Op
))
1191 if (isa
<UndefValue
>(U
->getValue()))
1192 return getSCEV(UndefValue::get(Ty
));
1194 // If the expression is obviously signed, use the sext cast value.
1195 if (isa
<SCEVSMaxExpr
>(Op
))
1198 // Absent any other information, use the zext cast value.
1202 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1203 /// a list of operands to be added under the given scale, update the given
1204 /// map. This is a helper function for getAddRecExpr. As an example of
1205 /// what it does, given a sequence of operands that would form an add
1206 /// expression like this:
1208 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1210 /// where A and B are constants, update the map with these values:
1212 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1214 /// and add 13 + A*B*29 to AccumulatedConstant.
1215 /// This will allow getAddRecExpr to produce this:
1217 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1219 /// This form often exposes folding opportunities that are hidden in
1220 /// the original operand list.
1222 /// Return true iff it appears that any interesting folding opportunities
1223 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1224 /// the common case where no interesting opportunities are present, and
1225 /// is also used as a check to avoid infinite recursion.
1228 CollectAddOperandsWithScales(DenseMap
<const SCEV
*, APInt
> &M
,
1229 SmallVector
<const SCEV
*, 8> &NewOps
,
1230 APInt
&AccumulatedConstant
,
1231 const SCEV
*const *Ops
, size_t NumOperands
,
1233 ScalarEvolution
&SE
) {
1234 bool Interesting
= false;
1236 // Iterate over the add operands. They are sorted, with constants first.
1238 while (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
1240 // Pull a buried constant out to the outside.
1241 if (Scale
!= 1 || AccumulatedConstant
!= 0 || C
->getValue()->isZero())
1243 AccumulatedConstant
+= Scale
* C
->getValue()->getValue();
1246 // Next comes everything else. We're especially interested in multiplies
1247 // here, but they're in the middle, so just visit the rest with one loop.
1248 for (; i
!= NumOperands
; ++i
) {
1249 const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[i
]);
1250 if (Mul
&& isa
<SCEVConstant
>(Mul
->getOperand(0))) {
1252 Scale
* cast
<SCEVConstant
>(Mul
->getOperand(0))->getValue()->getValue();
1253 if (Mul
->getNumOperands() == 2 && isa
<SCEVAddExpr
>(Mul
->getOperand(1))) {
1254 // A multiplication of a constant with another add; recurse.
1255 const SCEVAddExpr
*Add
= cast
<SCEVAddExpr
>(Mul
->getOperand(1));
1257 CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
1258 Add
->op_begin(), Add
->getNumOperands(),
1261 // A multiplication of a constant with some other value. Update
1263 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin()+1, Mul
->op_end());
1264 const SCEV
*Key
= SE
.getMulExpr(MulOps
);
1265 std::pair
<DenseMap
<const SCEV
*, APInt
>::iterator
, bool> Pair
=
1266 M
.insert(std::make_pair(Key
, NewScale
));
1268 NewOps
.push_back(Pair
.first
->first
);
1270 Pair
.first
->second
+= NewScale
;
1271 // The map already had an entry for this value, which may indicate
1272 // a folding opportunity.
1277 // An ordinary operand. Update the map.
1278 std::pair
<DenseMap
<const SCEV
*, APInt
>::iterator
, bool> Pair
=
1279 M
.insert(std::make_pair(Ops
[i
], Scale
));
1281 NewOps
.push_back(Pair
.first
->first
);
1283 Pair
.first
->second
+= Scale
;
1284 // The map already had an entry for this value, which may indicate
1285 // a folding opportunity.
1295 struct APIntCompare
{
1296 bool operator()(const APInt
&LHS
, const APInt
&RHS
) const {
1297 return LHS
.ult(RHS
);
1302 /// getAddExpr - Get a canonical add expression, or something simpler if
1304 const SCEV
*ScalarEvolution::getAddExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
1305 bool HasNUW
, bool HasNSW
) {
1306 assert(!Ops
.empty() && "Cannot get empty add!");
1307 if (Ops
.size() == 1) return Ops
[0];
1309 const Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
1310 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
1311 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
1312 "SCEVAddExpr operand types don't match!");
1315 // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1316 if (!HasNUW
&& HasNSW
) {
1318 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= Ops
.begin(),
1319 E
= Ops
.end(); I
!= E
; ++I
)
1320 if (!isKnownNonNegative(*I
)) {
1324 if (All
) HasNUW
= true;
1327 // Sort by complexity, this groups all similar expression types together.
1328 GroupByComplexity(Ops
, LI
);
1330 // If there are any constants, fold them together.
1332 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
1334 assert(Idx
< Ops
.size());
1335 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
1336 // We found two constants, fold them together!
1337 Ops
[0] = getConstant(LHSC
->getValue()->getValue() +
1338 RHSC
->getValue()->getValue());
1339 if (Ops
.size() == 2) return Ops
[0];
1340 Ops
.erase(Ops
.begin()+1); // Erase the folded element
1341 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
1344 // If we are left with a constant zero being added, strip it off.
1345 if (LHSC
->getValue()->isZero()) {
1346 Ops
.erase(Ops
.begin());
1350 if (Ops
.size() == 1) return Ops
[0];
1353 // Okay, check to see if the same value occurs in the operand list more than
1354 // once. If so, merge them together into an multiply expression. Since we
1355 // sorted the list, these values are required to be adjacent.
1356 const Type
*Ty
= Ops
[0]->getType();
1357 bool FoundMatch
= false;
1358 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
-1; ++i
)
1359 if (Ops
[i
] == Ops
[i
+1]) { // X + Y + Y --> X + Y*2
1360 // Scan ahead to count how many equal operands there are.
1362 while (i
+Count
!= e
&& Ops
[i
+Count
] == Ops
[i
])
1364 // Merge the values into a multiply.
1365 const SCEV
*Scale
= getConstant(Ty
, Count
);
1366 const SCEV
*Mul
= getMulExpr(Scale
, Ops
[i
]);
1367 if (Ops
.size() == Count
)
1370 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+Count
);
1371 --i
; e
-= Count
- 1;
1375 return getAddExpr(Ops
, HasNUW
, HasNSW
);
1377 // Check for truncates. If all the operands are truncated from the same
1378 // type, see if factoring out the truncate would permit the result to be
1379 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1380 // if the contents of the resulting outer trunc fold to something simple.
1381 for (; Idx
< Ops
.size() && isa
<SCEVTruncateExpr
>(Ops
[Idx
]); ++Idx
) {
1382 const SCEVTruncateExpr
*Trunc
= cast
<SCEVTruncateExpr
>(Ops
[Idx
]);
1383 const Type
*DstType
= Trunc
->getType();
1384 const Type
*SrcType
= Trunc
->getOperand()->getType();
1385 SmallVector
<const SCEV
*, 8> LargeOps
;
1387 // Check all the operands to see if they can be represented in the
1388 // source type of the truncate.
1389 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
) {
1390 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(Ops
[i
])) {
1391 if (T
->getOperand()->getType() != SrcType
) {
1395 LargeOps
.push_back(T
->getOperand());
1396 } else if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(Ops
[i
])) {
1397 LargeOps
.push_back(getAnyExtendExpr(C
, SrcType
));
1398 } else if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(Ops
[i
])) {
1399 SmallVector
<const SCEV
*, 8> LargeMulOps
;
1400 for (unsigned j
= 0, f
= M
->getNumOperands(); j
!= f
&& Ok
; ++j
) {
1401 if (const SCEVTruncateExpr
*T
=
1402 dyn_cast
<SCEVTruncateExpr
>(M
->getOperand(j
))) {
1403 if (T
->getOperand()->getType() != SrcType
) {
1407 LargeMulOps
.push_back(T
->getOperand());
1408 } else if (const SCEVConstant
*C
=
1409 dyn_cast
<SCEVConstant
>(M
->getOperand(j
))) {
1410 LargeMulOps
.push_back(getAnyExtendExpr(C
, SrcType
));
1417 LargeOps
.push_back(getMulExpr(LargeMulOps
));
1424 // Evaluate the expression in the larger type.
1425 const SCEV
*Fold
= getAddExpr(LargeOps
, HasNUW
, HasNSW
);
1426 // If it folds to something simple, use it. Otherwise, don't.
1427 if (isa
<SCEVConstant
>(Fold
) || isa
<SCEVUnknown
>(Fold
))
1428 return getTruncateExpr(Fold
, DstType
);
1432 // Skip past any other cast SCEVs.
1433 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddExpr
)
1436 // If there are add operands they would be next.
1437 if (Idx
< Ops
.size()) {
1438 bool DeletedAdd
= false;
1439 while (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[Idx
])) {
1440 // If we have an add, expand the add operands onto the end of the operands
1442 Ops
.erase(Ops
.begin()+Idx
);
1443 Ops
.append(Add
->op_begin(), Add
->op_end());
1447 // If we deleted at least one add, we added operands to the end of the list,
1448 // and they are not necessarily sorted. Recurse to resort and resimplify
1449 // any operands we just acquired.
1451 return getAddExpr(Ops
);
1454 // Skip over the add expression until we get to a multiply.
1455 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
1458 // Check to see if there are any folding opportunities present with
1459 // operands multiplied by constant values.
1460 if (Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
])) {
1461 uint64_t BitWidth
= getTypeSizeInBits(Ty
);
1462 DenseMap
<const SCEV
*, APInt
> M
;
1463 SmallVector
<const SCEV
*, 8> NewOps
;
1464 APInt
AccumulatedConstant(BitWidth
, 0);
1465 if (CollectAddOperandsWithScales(M
, NewOps
, AccumulatedConstant
,
1466 Ops
.data(), Ops
.size(),
1467 APInt(BitWidth
, 1), *this)) {
1468 // Some interesting folding opportunity is present, so its worthwhile to
1469 // re-generate the operands list. Group the operands by constant scale,
1470 // to avoid multiplying by the same constant scale multiple times.
1471 std::map
<APInt
, SmallVector
<const SCEV
*, 4>, APIntCompare
> MulOpLists
;
1472 for (SmallVector
<const SCEV
*, 8>::const_iterator I
= NewOps
.begin(),
1473 E
= NewOps
.end(); I
!= E
; ++I
)
1474 MulOpLists
[M
.find(*I
)->second
].push_back(*I
);
1475 // Re-generate the operands list.
1477 if (AccumulatedConstant
!= 0)
1478 Ops
.push_back(getConstant(AccumulatedConstant
));
1479 for (std::map
<APInt
, SmallVector
<const SCEV
*, 4>, APIntCompare
>::iterator
1480 I
= MulOpLists
.begin(), E
= MulOpLists
.end(); I
!= E
; ++I
)
1482 Ops
.push_back(getMulExpr(getConstant(I
->first
),
1483 getAddExpr(I
->second
)));
1485 return getConstant(Ty
, 0);
1486 if (Ops
.size() == 1)
1488 return getAddExpr(Ops
);
1492 // If we are adding something to a multiply expression, make sure the
1493 // something is not already an operand of the multiply. If so, merge it into
1495 for (; Idx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[Idx
]); ++Idx
) {
1496 const SCEVMulExpr
*Mul
= cast
<SCEVMulExpr
>(Ops
[Idx
]);
1497 for (unsigned MulOp
= 0, e
= Mul
->getNumOperands(); MulOp
!= e
; ++MulOp
) {
1498 const SCEV
*MulOpSCEV
= Mul
->getOperand(MulOp
);
1499 if (isa
<SCEVConstant
>(MulOpSCEV
))
1501 for (unsigned AddOp
= 0, e
= Ops
.size(); AddOp
!= e
; ++AddOp
)
1502 if (MulOpSCEV
== Ops
[AddOp
]) {
1503 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1504 const SCEV
*InnerMul
= Mul
->getOperand(MulOp
== 0);
1505 if (Mul
->getNumOperands() != 2) {
1506 // If the multiply has more than two operands, we must get the
1508 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
1509 Mul
->op_begin()+MulOp
);
1510 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
1511 InnerMul
= getMulExpr(MulOps
);
1513 const SCEV
*One
= getConstant(Ty
, 1);
1514 const SCEV
*AddOne
= getAddExpr(One
, InnerMul
);
1515 const SCEV
*OuterMul
= getMulExpr(AddOne
, MulOpSCEV
);
1516 if (Ops
.size() == 2) return OuterMul
;
1518 Ops
.erase(Ops
.begin()+AddOp
);
1519 Ops
.erase(Ops
.begin()+Idx
-1);
1521 Ops
.erase(Ops
.begin()+Idx
);
1522 Ops
.erase(Ops
.begin()+AddOp
-1);
1524 Ops
.push_back(OuterMul
);
1525 return getAddExpr(Ops
);
1528 // Check this multiply against other multiplies being added together.
1529 for (unsigned OtherMulIdx
= Idx
+1;
1530 OtherMulIdx
< Ops
.size() && isa
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
1532 const SCEVMulExpr
*OtherMul
= cast
<SCEVMulExpr
>(Ops
[OtherMulIdx
]);
1533 // If MulOp occurs in OtherMul, we can fold the two multiplies
1535 for (unsigned OMulOp
= 0, e
= OtherMul
->getNumOperands();
1536 OMulOp
!= e
; ++OMulOp
)
1537 if (OtherMul
->getOperand(OMulOp
) == MulOpSCEV
) {
1538 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1539 const SCEV
*InnerMul1
= Mul
->getOperand(MulOp
== 0);
1540 if (Mul
->getNumOperands() != 2) {
1541 SmallVector
<const SCEV
*, 4> MulOps(Mul
->op_begin(),
1542 Mul
->op_begin()+MulOp
);
1543 MulOps
.append(Mul
->op_begin()+MulOp
+1, Mul
->op_end());
1544 InnerMul1
= getMulExpr(MulOps
);
1546 const SCEV
*InnerMul2
= OtherMul
->getOperand(OMulOp
== 0);
1547 if (OtherMul
->getNumOperands() != 2) {
1548 SmallVector
<const SCEV
*, 4> MulOps(OtherMul
->op_begin(),
1549 OtherMul
->op_begin()+OMulOp
);
1550 MulOps
.append(OtherMul
->op_begin()+OMulOp
+1, OtherMul
->op_end());
1551 InnerMul2
= getMulExpr(MulOps
);
1553 const SCEV
*InnerMulSum
= getAddExpr(InnerMul1
,InnerMul2
);
1554 const SCEV
*OuterMul
= getMulExpr(MulOpSCEV
, InnerMulSum
);
1555 if (Ops
.size() == 2) return OuterMul
;
1556 Ops
.erase(Ops
.begin()+Idx
);
1557 Ops
.erase(Ops
.begin()+OtherMulIdx
-1);
1558 Ops
.push_back(OuterMul
);
1559 return getAddExpr(Ops
);
1565 // If there are any add recurrences in the operands list, see if any other
1566 // added values are loop invariant. If so, we can fold them into the
1568 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
1571 // Scan over all recurrences, trying to fold loop invariants into them.
1572 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
1573 // Scan all of the other operands to this add and add them to the vector if
1574 // they are loop invariant w.r.t. the recurrence.
1575 SmallVector
<const SCEV
*, 8> LIOps
;
1576 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
1577 const Loop
*AddRecLoop
= AddRec
->getLoop();
1578 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
1579 if (isLoopInvariant(Ops
[i
], AddRecLoop
)) {
1580 LIOps
.push_back(Ops
[i
]);
1581 Ops
.erase(Ops
.begin()+i
);
1585 // If we found some loop invariants, fold them into the recurrence.
1586 if (!LIOps
.empty()) {
1587 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1588 LIOps
.push_back(AddRec
->getStart());
1590 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
1592 AddRecOps
[0] = getAddExpr(LIOps
);
1594 // Build the new addrec. Propagate the NUW and NSW flags if both the
1595 // outer add and the inner addrec are guaranteed to have no overflow.
1596 const SCEV
*NewRec
= getAddRecExpr(AddRecOps
, AddRecLoop
,
1597 HasNUW
&& AddRec
->hasNoUnsignedWrap(),
1598 HasNSW
&& AddRec
->hasNoSignedWrap());
1600 // If all of the other operands were loop invariant, we are done.
1601 if (Ops
.size() == 1) return NewRec
;
1603 // Otherwise, add the folded AddRec by the non-liv parts.
1604 for (unsigned i
= 0;; ++i
)
1605 if (Ops
[i
] == AddRec
) {
1609 return getAddExpr(Ops
);
1612 // Okay, if there weren't any loop invariants to be folded, check to see if
1613 // there are multiple AddRec's with the same loop induction variable being
1614 // added together. If so, we can fold them.
1615 for (unsigned OtherIdx
= Idx
+1;
1616 OtherIdx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
1618 if (AddRecLoop
== cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()) {
1619 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
1620 SmallVector
<const SCEV
*, 4> AddRecOps(AddRec
->op_begin(),
1622 for (; OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
1624 if (const SCEVAddRecExpr
*OtherAddRec
=
1625 dyn_cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]))
1626 if (OtherAddRec
->getLoop() == AddRecLoop
) {
1627 for (unsigned i
= 0, e
= OtherAddRec
->getNumOperands();
1629 if (i
>= AddRecOps
.size()) {
1630 AddRecOps
.append(OtherAddRec
->op_begin()+i
,
1631 OtherAddRec
->op_end());
1634 AddRecOps
[i
] = getAddExpr(AddRecOps
[i
],
1635 OtherAddRec
->getOperand(i
));
1637 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
1639 Ops
[Idx
] = getAddRecExpr(AddRecOps
, AddRecLoop
);
1640 return getAddExpr(Ops
);
1643 // Otherwise couldn't fold anything into this recurrence. Move onto the
1647 // Okay, it looks like we really DO need an add expr. Check to see if we
1648 // already have one, otherwise create a new one.
1649 FoldingSetNodeID ID
;
1650 ID
.AddInteger(scAddExpr
);
1651 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
1652 ID
.AddPointer(Ops
[i
]);
1655 static_cast<SCEVAddExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
1657 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
1658 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
1659 S
= new (SCEVAllocator
) SCEVAddExpr(ID
.Intern(SCEVAllocator
),
1661 UniqueSCEVs
.InsertNode(S
, IP
);
1663 if (HasNUW
) S
->setHasNoUnsignedWrap(true);
1664 if (HasNSW
) S
->setHasNoSignedWrap(true);
1668 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1670 const SCEV
*ScalarEvolution::getMulExpr(SmallVectorImpl
<const SCEV
*> &Ops
,
1671 bool HasNUW
, bool HasNSW
) {
1672 assert(!Ops
.empty() && "Cannot get empty mul!");
1673 if (Ops
.size() == 1) return Ops
[0];
1675 const Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
1676 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
1677 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
1678 "SCEVMulExpr operand types don't match!");
1681 // If HasNSW is true and all the operands are non-negative, infer HasNUW.
1682 if (!HasNUW
&& HasNSW
) {
1684 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= Ops
.begin(),
1685 E
= Ops
.end(); I
!= E
; ++I
)
1686 if (!isKnownNonNegative(*I
)) {
1690 if (All
) HasNUW
= true;
1693 // Sort by complexity, this groups all similar expression types together.
1694 GroupByComplexity(Ops
, LI
);
1696 // If there are any constants, fold them together.
1698 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
1700 // C1*(C2+V) -> C1*C2 + C1*V
1701 if (Ops
.size() == 2)
1702 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1]))
1703 if (Add
->getNumOperands() == 2 &&
1704 isa
<SCEVConstant
>(Add
->getOperand(0)))
1705 return getAddExpr(getMulExpr(LHSC
, Add
->getOperand(0)),
1706 getMulExpr(LHSC
, Add
->getOperand(1)));
1709 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
1710 // We found two constants, fold them together!
1711 ConstantInt
*Fold
= ConstantInt::get(getContext(),
1712 LHSC
->getValue()->getValue() *
1713 RHSC
->getValue()->getValue());
1714 Ops
[0] = getConstant(Fold
);
1715 Ops
.erase(Ops
.begin()+1); // Erase the folded element
1716 if (Ops
.size() == 1) return Ops
[0];
1717 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
1720 // If we are left with a constant one being multiplied, strip it off.
1721 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->equalsInt(1)) {
1722 Ops
.erase(Ops
.begin());
1724 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isZero()) {
1725 // If we have a multiply of zero, it will always be zero.
1727 } else if (Ops
[0]->isAllOnesValue()) {
1728 // If we have a mul by -1 of an add, try distributing the -1 among the
1730 if (Ops
.size() == 2)
1731 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(Ops
[1])) {
1732 SmallVector
<const SCEV
*, 4> NewOps
;
1733 bool AnyFolded
= false;
1734 for (SCEVAddRecExpr::op_iterator I
= Add
->op_begin(), E
= Add
->op_end();
1736 const SCEV
*Mul
= getMulExpr(Ops
[0], *I
);
1737 if (!isa
<SCEVMulExpr
>(Mul
)) AnyFolded
= true;
1738 NewOps
.push_back(Mul
);
1741 return getAddExpr(NewOps
);
1745 if (Ops
.size() == 1)
1749 // Skip over the add expression until we get to a multiply.
1750 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scMulExpr
)
1753 // If there are mul operands inline them all into this expression.
1754 if (Idx
< Ops
.size()) {
1755 bool DeletedMul
= false;
1756 while (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(Ops
[Idx
])) {
1757 // If we have an mul, expand the mul operands onto the end of the operands
1759 Ops
.erase(Ops
.begin()+Idx
);
1760 Ops
.append(Mul
->op_begin(), Mul
->op_end());
1764 // If we deleted at least one mul, we added operands to the end of the list,
1765 // and they are not necessarily sorted. Recurse to resort and resimplify
1766 // any operands we just acquired.
1768 return getMulExpr(Ops
);
1771 // If there are any add recurrences in the operands list, see if any other
1772 // added values are loop invariant. If so, we can fold them into the
1774 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scAddRecExpr
)
1777 // Scan over all recurrences, trying to fold loop invariants into them.
1778 for (; Idx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[Idx
]); ++Idx
) {
1779 // Scan all of the other operands to this mul and add them to the vector if
1780 // they are loop invariant w.r.t. the recurrence.
1781 SmallVector
<const SCEV
*, 8> LIOps
;
1782 const SCEVAddRecExpr
*AddRec
= cast
<SCEVAddRecExpr
>(Ops
[Idx
]);
1783 const Loop
*AddRecLoop
= AddRec
->getLoop();
1784 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
1785 if (isLoopInvariant(Ops
[i
], AddRecLoop
)) {
1786 LIOps
.push_back(Ops
[i
]);
1787 Ops
.erase(Ops
.begin()+i
);
1791 // If we found some loop invariants, fold them into the recurrence.
1792 if (!LIOps
.empty()) {
1793 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1794 SmallVector
<const SCEV
*, 4> NewOps
;
1795 NewOps
.reserve(AddRec
->getNumOperands());
1796 const SCEV
*Scale
= getMulExpr(LIOps
);
1797 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
)
1798 NewOps
.push_back(getMulExpr(Scale
, AddRec
->getOperand(i
)));
1800 // Build the new addrec. Propagate the NUW and NSW flags if both the
1801 // outer mul and the inner addrec are guaranteed to have no overflow.
1802 const SCEV
*NewRec
= getAddRecExpr(NewOps
, AddRecLoop
,
1803 HasNUW
&& AddRec
->hasNoUnsignedWrap(),
1804 HasNSW
&& AddRec
->hasNoSignedWrap());
1806 // If all of the other operands were loop invariant, we are done.
1807 if (Ops
.size() == 1) return NewRec
;
1809 // Otherwise, multiply the folded AddRec by the non-liv parts.
1810 for (unsigned i
= 0;; ++i
)
1811 if (Ops
[i
] == AddRec
) {
1815 return getMulExpr(Ops
);
1818 // Okay, if there weren't any loop invariants to be folded, check to see if
1819 // there are multiple AddRec's with the same loop induction variable being
1820 // multiplied together. If so, we can fold them.
1821 for (unsigned OtherIdx
= Idx
+1;
1822 OtherIdx
< Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
1824 if (AddRecLoop
== cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
])->getLoop()) {
1825 // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L> -->
1826 // {A*C,+,F*D + G*B + B*D}<L>
1827 for (; OtherIdx
!= Ops
.size() && isa
<SCEVAddRecExpr
>(Ops
[OtherIdx
]);
1829 if (const SCEVAddRecExpr
*OtherAddRec
=
1830 dyn_cast
<SCEVAddRecExpr
>(Ops
[OtherIdx
]))
1831 if (OtherAddRec
->getLoop() == AddRecLoop
) {
1832 const SCEVAddRecExpr
*F
= AddRec
, *G
= OtherAddRec
;
1833 const SCEV
*NewStart
= getMulExpr(F
->getStart(), G
->getStart());
1834 const SCEV
*B
= F
->getStepRecurrence(*this);
1835 const SCEV
*D
= G
->getStepRecurrence(*this);
1836 const SCEV
*NewStep
= getAddExpr(getMulExpr(F
, D
),
1839 const SCEV
*NewAddRec
= getAddRecExpr(NewStart
, NewStep
,
1841 if (Ops
.size() == 2) return NewAddRec
;
1842 Ops
[Idx
] = AddRec
= cast
<SCEVAddRecExpr
>(NewAddRec
);
1843 Ops
.erase(Ops
.begin() + OtherIdx
); --OtherIdx
;
1845 return getMulExpr(Ops
);
1848 // Otherwise couldn't fold anything into this recurrence. Move onto the
1852 // Okay, it looks like we really DO need an mul expr. Check to see if we
1853 // already have one, otherwise create a new one.
1854 FoldingSetNodeID ID
;
1855 ID
.AddInteger(scMulExpr
);
1856 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
1857 ID
.AddPointer(Ops
[i
]);
1860 static_cast<SCEVMulExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
1862 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
1863 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
1864 S
= new (SCEVAllocator
) SCEVMulExpr(ID
.Intern(SCEVAllocator
),
1866 UniqueSCEVs
.InsertNode(S
, IP
);
1868 if (HasNUW
) S
->setHasNoUnsignedWrap(true);
1869 if (HasNSW
) S
->setHasNoSignedWrap(true);
1873 /// getUDivExpr - Get a canonical unsigned division expression, or something
1874 /// simpler if possible.
1875 const SCEV
*ScalarEvolution::getUDivExpr(const SCEV
*LHS
,
1877 assert(getEffectiveSCEVType(LHS
->getType()) ==
1878 getEffectiveSCEVType(RHS
->getType()) &&
1879 "SCEVUDivExpr operand types don't match!");
1881 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
1882 if (RHSC
->getValue()->equalsInt(1))
1883 return LHS
; // X udiv 1 --> x
1884 // If the denominator is zero, the result of the udiv is undefined. Don't
1885 // try to analyze it, because the resolution chosen here may differ from
1886 // the resolution chosen in other parts of the compiler.
1887 if (!RHSC
->getValue()->isZero()) {
1888 // Determine if the division can be folded into the operands of
1890 // TODO: Generalize this to non-constants by using known-bits information.
1891 const Type
*Ty
= LHS
->getType();
1892 unsigned LZ
= RHSC
->getValue()->getValue().countLeadingZeros();
1893 unsigned MaxShiftAmt
= getTypeSizeInBits(Ty
) - LZ
- 1;
1894 // For non-power-of-two values, effectively round the value up to the
1895 // nearest power of two.
1896 if (!RHSC
->getValue()->getValue().isPowerOf2())
1898 const IntegerType
*ExtTy
=
1899 IntegerType::get(getContext(), getTypeSizeInBits(Ty
) + MaxShiftAmt
);
1900 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1901 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
1902 if (const SCEVConstant
*Step
=
1903 dyn_cast
<SCEVConstant
>(AR
->getStepRecurrence(*this)))
1904 if (!Step
->getValue()->getValue()
1905 .urem(RHSC
->getValue()->getValue()) &&
1906 getZeroExtendExpr(AR
, ExtTy
) ==
1907 getAddRecExpr(getZeroExtendExpr(AR
->getStart(), ExtTy
),
1908 getZeroExtendExpr(Step
, ExtTy
),
1910 SmallVector
<const SCEV
*, 4> Operands
;
1911 for (unsigned i
= 0, e
= AR
->getNumOperands(); i
!= e
; ++i
)
1912 Operands
.push_back(getUDivExpr(AR
->getOperand(i
), RHS
));
1913 return getAddRecExpr(Operands
, AR
->getLoop());
1915 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1916 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(LHS
)) {
1917 SmallVector
<const SCEV
*, 4> Operands
;
1918 for (unsigned i
= 0, e
= M
->getNumOperands(); i
!= e
; ++i
)
1919 Operands
.push_back(getZeroExtendExpr(M
->getOperand(i
), ExtTy
));
1920 if (getZeroExtendExpr(M
, ExtTy
) == getMulExpr(Operands
))
1921 // Find an operand that's safely divisible.
1922 for (unsigned i
= 0, e
= M
->getNumOperands(); i
!= e
; ++i
) {
1923 const SCEV
*Op
= M
->getOperand(i
);
1924 const SCEV
*Div
= getUDivExpr(Op
, RHSC
);
1925 if (!isa
<SCEVUDivExpr
>(Div
) && getMulExpr(Div
, RHSC
) == Op
) {
1926 Operands
= SmallVector
<const SCEV
*, 4>(M
->op_begin(),
1929 return getMulExpr(Operands
);
1933 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1934 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(LHS
)) {
1935 SmallVector
<const SCEV
*, 4> Operands
;
1936 for (unsigned i
= 0, e
= A
->getNumOperands(); i
!= e
; ++i
)
1937 Operands
.push_back(getZeroExtendExpr(A
->getOperand(i
), ExtTy
));
1938 if (getZeroExtendExpr(A
, ExtTy
) == getAddExpr(Operands
)) {
1940 for (unsigned i
= 0, e
= A
->getNumOperands(); i
!= e
; ++i
) {
1941 const SCEV
*Op
= getUDivExpr(A
->getOperand(i
), RHS
);
1942 if (isa
<SCEVUDivExpr
>(Op
) ||
1943 getMulExpr(Op
, RHS
) != A
->getOperand(i
))
1945 Operands
.push_back(Op
);
1947 if (Operands
.size() == A
->getNumOperands())
1948 return getAddExpr(Operands
);
1952 // Fold if both operands are constant.
1953 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
1954 Constant
*LHSCV
= LHSC
->getValue();
1955 Constant
*RHSCV
= RHSC
->getValue();
1956 return getConstant(cast
<ConstantInt
>(ConstantExpr::getUDiv(LHSCV
,
1962 FoldingSetNodeID ID
;
1963 ID
.AddInteger(scUDivExpr
);
1967 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
1968 SCEV
*S
= new (SCEVAllocator
) SCEVUDivExpr(ID
.Intern(SCEVAllocator
),
1970 UniqueSCEVs
.InsertNode(S
, IP
);
1975 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1976 /// Simplify the expression as much as possible.
1977 const SCEV
*ScalarEvolution::getAddRecExpr(const SCEV
*Start
,
1978 const SCEV
*Step
, const Loop
*L
,
1979 bool HasNUW
, bool HasNSW
) {
1980 SmallVector
<const SCEV
*, 4> Operands
;
1981 Operands
.push_back(Start
);
1982 if (const SCEVAddRecExpr
*StepChrec
= dyn_cast
<SCEVAddRecExpr
>(Step
))
1983 if (StepChrec
->getLoop() == L
) {
1984 Operands
.append(StepChrec
->op_begin(), StepChrec
->op_end());
1985 return getAddRecExpr(Operands
, L
);
1988 Operands
.push_back(Step
);
1989 return getAddRecExpr(Operands
, L
, HasNUW
, HasNSW
);
1992 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1993 /// Simplify the expression as much as possible.
1995 ScalarEvolution::getAddRecExpr(SmallVectorImpl
<const SCEV
*> &Operands
,
1997 bool HasNUW
, bool HasNSW
) {
1998 if (Operands
.size() == 1) return Operands
[0];
2000 const Type
*ETy
= getEffectiveSCEVType(Operands
[0]->getType());
2001 for (unsigned i
= 1, e
= Operands
.size(); i
!= e
; ++i
)
2002 assert(getEffectiveSCEVType(Operands
[i
]->getType()) == ETy
&&
2003 "SCEVAddRecExpr operand types don't match!");
2004 for (unsigned i
= 0, e
= Operands
.size(); i
!= e
; ++i
)
2005 assert(isLoopInvariant(Operands
[i
], L
) &&
2006 "SCEVAddRecExpr operand is not loop-invariant!");
2009 if (Operands
.back()->isZero()) {
2010 Operands
.pop_back();
2011 return getAddRecExpr(Operands
, L
, HasNUW
, HasNSW
); // {X,+,0} --> X
2014 // It's tempting to want to call getMaxBackedgeTakenCount count here and
2015 // use that information to infer NUW and NSW flags. However, computing a
2016 // BE count requires calling getAddRecExpr, so we may not yet have a
2017 // meaningful BE count at this point (and if we don't, we'd be stuck
2018 // with a SCEVCouldNotCompute as the cached BE count).
2020 // If HasNSW is true and all the operands are non-negative, infer HasNUW.
2021 if (!HasNUW
&& HasNSW
) {
2023 for (SmallVectorImpl
<const SCEV
*>::const_iterator I
= Operands
.begin(),
2024 E
= Operands
.end(); I
!= E
; ++I
)
2025 if (!isKnownNonNegative(*I
)) {
2029 if (All
) HasNUW
= true;
2032 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2033 if (const SCEVAddRecExpr
*NestedAR
= dyn_cast
<SCEVAddRecExpr
>(Operands
[0])) {
2034 const Loop
*NestedLoop
= NestedAR
->getLoop();
2035 if (L
->contains(NestedLoop
) ?
2036 (L
->getLoopDepth() < NestedLoop
->getLoopDepth()) :
2037 (!NestedLoop
->contains(L
) &&
2038 DT
->dominates(L
->getHeader(), NestedLoop
->getHeader()))) {
2039 SmallVector
<const SCEV
*, 4> NestedOperands(NestedAR
->op_begin(),
2040 NestedAR
->op_end());
2041 Operands
[0] = NestedAR
->getStart();
2042 // AddRecs require their operands be loop-invariant with respect to their
2043 // loops. Don't perform this transformation if it would break this
2045 bool AllInvariant
= true;
2046 for (unsigned i
= 0, e
= Operands
.size(); i
!= e
; ++i
)
2047 if (!isLoopInvariant(Operands
[i
], L
)) {
2048 AllInvariant
= false;
2052 NestedOperands
[0] = getAddRecExpr(Operands
, L
);
2053 AllInvariant
= true;
2054 for (unsigned i
= 0, e
= NestedOperands
.size(); i
!= e
; ++i
)
2055 if (!isLoopInvariant(NestedOperands
[i
], NestedLoop
)) {
2056 AllInvariant
= false;
2060 // Ok, both add recurrences are valid after the transformation.
2061 return getAddRecExpr(NestedOperands
, NestedLoop
, HasNUW
, HasNSW
);
2063 // Reset Operands to its original state.
2064 Operands
[0] = NestedAR
;
2068 // Okay, it looks like we really DO need an addrec expr. Check to see if we
2069 // already have one, otherwise create a new one.
2070 FoldingSetNodeID ID
;
2071 ID
.AddInteger(scAddRecExpr
);
2072 for (unsigned i
= 0, e
= Operands
.size(); i
!= e
; ++i
)
2073 ID
.AddPointer(Operands
[i
]);
2077 static_cast<SCEVAddRecExpr
*>(UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
));
2079 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Operands
.size());
2080 std::uninitialized_copy(Operands
.begin(), Operands
.end(), O
);
2081 S
= new (SCEVAllocator
) SCEVAddRecExpr(ID
.Intern(SCEVAllocator
),
2082 O
, Operands
.size(), L
);
2083 UniqueSCEVs
.InsertNode(S
, IP
);
2085 if (HasNUW
) S
->setHasNoUnsignedWrap(true);
2086 if (HasNSW
) S
->setHasNoSignedWrap(true);
2090 const SCEV
*ScalarEvolution::getSMaxExpr(const SCEV
*LHS
,
2092 SmallVector
<const SCEV
*, 2> Ops
;
2095 return getSMaxExpr(Ops
);
2099 ScalarEvolution::getSMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
2100 assert(!Ops
.empty() && "Cannot get empty smax!");
2101 if (Ops
.size() == 1) return Ops
[0];
2103 const Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2104 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2105 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2106 "SCEVSMaxExpr operand types don't match!");
2109 // Sort by complexity, this groups all similar expression types together.
2110 GroupByComplexity(Ops
, LI
);
2112 // If there are any constants, fold them together.
2114 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2116 assert(Idx
< Ops
.size());
2117 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2118 // We found two constants, fold them together!
2119 ConstantInt
*Fold
= ConstantInt::get(getContext(),
2120 APIntOps::smax(LHSC
->getValue()->getValue(),
2121 RHSC
->getValue()->getValue()));
2122 Ops
[0] = getConstant(Fold
);
2123 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2124 if (Ops
.size() == 1) return Ops
[0];
2125 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2128 // If we are left with a constant minimum-int, strip it off.
2129 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMinValue(true)) {
2130 Ops
.erase(Ops
.begin());
2132 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMaxValue(true)) {
2133 // If we have an smax with a constant maximum-int, it will always be
2138 if (Ops
.size() == 1) return Ops
[0];
2141 // Find the first SMax
2142 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scSMaxExpr
)
2145 // Check to see if one of the operands is an SMax. If so, expand its operands
2146 // onto our operand list, and recurse to simplify.
2147 if (Idx
< Ops
.size()) {
2148 bool DeletedSMax
= false;
2149 while (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(Ops
[Idx
])) {
2150 Ops
.erase(Ops
.begin()+Idx
);
2151 Ops
.append(SMax
->op_begin(), SMax
->op_end());
2156 return getSMaxExpr(Ops
);
2159 // Okay, check to see if the same value occurs in the operand list twice. If
2160 // so, delete one. Since we sorted the list, these values are required to
2162 for (unsigned i
= 0, e
= Ops
.size()-1; i
!= e
; ++i
)
2163 // X smax Y smax Y --> X smax Y
2164 // X smax Y --> X, if X is always greater than Y
2165 if (Ops
[i
] == Ops
[i
+1] ||
2166 isKnownPredicate(ICmpInst::ICMP_SGE
, Ops
[i
], Ops
[i
+1])) {
2167 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+2);
2169 } else if (isKnownPredicate(ICmpInst::ICMP_SLE
, Ops
[i
], Ops
[i
+1])) {
2170 Ops
.erase(Ops
.begin()+i
, Ops
.begin()+i
+1);
2174 if (Ops
.size() == 1) return Ops
[0];
2176 assert(!Ops
.empty() && "Reduced smax down to nothing!");
2178 // Okay, it looks like we really DO need an smax expr. Check to see if we
2179 // already have one, otherwise create a new one.
2180 FoldingSetNodeID ID
;
2181 ID
.AddInteger(scSMaxExpr
);
2182 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2183 ID
.AddPointer(Ops
[i
]);
2185 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
2186 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2187 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2188 SCEV
*S
= new (SCEVAllocator
) SCEVSMaxExpr(ID
.Intern(SCEVAllocator
),
2190 UniqueSCEVs
.InsertNode(S
, IP
);
2194 const SCEV
*ScalarEvolution::getUMaxExpr(const SCEV
*LHS
,
2196 SmallVector
<const SCEV
*, 2> Ops
;
2199 return getUMaxExpr(Ops
);
2203 ScalarEvolution::getUMaxExpr(SmallVectorImpl
<const SCEV
*> &Ops
) {
2204 assert(!Ops
.empty() && "Cannot get empty umax!");
2205 if (Ops
.size() == 1) return Ops
[0];
2207 const Type
*ETy
= getEffectiveSCEVType(Ops
[0]->getType());
2208 for (unsigned i
= 1, e
= Ops
.size(); i
!= e
; ++i
)
2209 assert(getEffectiveSCEVType(Ops
[i
]->getType()) == ETy
&&
2210 "SCEVUMaxExpr operand types don't match!");
2213 // Sort by complexity, this groups all similar expression types together.
2214 GroupByComplexity(Ops
, LI
);
2216 // If there are any constants, fold them together.
2218 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(Ops
[0])) {
2220 assert(Idx
< Ops
.size());
2221 while (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(Ops
[Idx
])) {
2222 // We found two constants, fold them together!
2223 ConstantInt
*Fold
= ConstantInt::get(getContext(),
2224 APIntOps::umax(LHSC
->getValue()->getValue(),
2225 RHSC
->getValue()->getValue()));
2226 Ops
[0] = getConstant(Fold
);
2227 Ops
.erase(Ops
.begin()+1); // Erase the folded element
2228 if (Ops
.size() == 1) return Ops
[0];
2229 LHSC
= cast
<SCEVConstant
>(Ops
[0]);
2232 // If we are left with a constant minimum-int, strip it off.
2233 if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMinValue(false)) {
2234 Ops
.erase(Ops
.begin());
2236 } else if (cast
<SCEVConstant
>(Ops
[0])->getValue()->isMaxValue(false)) {
2237 // If we have an umax with a constant maximum-int, it will always be
2242 if (Ops
.size() == 1) return Ops
[0];
2245 // Find the first UMax
2246 while (Idx
< Ops
.size() && Ops
[Idx
]->getSCEVType() < scUMaxExpr
)
2249 // Check to see if one of the operands is a UMax. If so, expand its operands
2250 // onto our operand list, and recurse to simplify.
2251 if (Idx
< Ops
.size()) {
2252 bool DeletedUMax
= false;
2253 while (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(Ops
[Idx
])) {
2254 Ops
.erase(Ops
.begin()+Idx
);
2255 Ops
.append(UMax
->op_begin(), UMax
->op_end());
2260 return getUMaxExpr(Ops
);
2263 // Okay, check to see if the same value occurs in the operand list twice. If
2264 // so, delete one. Since we sorted the list, these values are required to
2266 for (unsigned i
= 0, e
= Ops
.size()-1; i
!= e
; ++i
)
2267 // X umax Y umax Y --> X umax Y
2268 // X umax Y --> X, if X is always greater than Y
2269 if (Ops
[i
] == Ops
[i
+1] ||
2270 isKnownPredicate(ICmpInst::ICMP_UGE
, Ops
[i
], Ops
[i
+1])) {
2271 Ops
.erase(Ops
.begin()+i
+1, Ops
.begin()+i
+2);
2273 } else if (isKnownPredicate(ICmpInst::ICMP_ULE
, Ops
[i
], Ops
[i
+1])) {
2274 Ops
.erase(Ops
.begin()+i
, Ops
.begin()+i
+1);
2278 if (Ops
.size() == 1) return Ops
[0];
2280 assert(!Ops
.empty() && "Reduced umax down to nothing!");
2282 // Okay, it looks like we really DO need a umax expr. Check to see if we
2283 // already have one, otherwise create a new one.
2284 FoldingSetNodeID ID
;
2285 ID
.AddInteger(scUMaxExpr
);
2286 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
2287 ID
.AddPointer(Ops
[i
]);
2289 if (const SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) return S
;
2290 const SCEV
**O
= SCEVAllocator
.Allocate
<const SCEV
*>(Ops
.size());
2291 std::uninitialized_copy(Ops
.begin(), Ops
.end(), O
);
2292 SCEV
*S
= new (SCEVAllocator
) SCEVUMaxExpr(ID
.Intern(SCEVAllocator
),
2294 UniqueSCEVs
.InsertNode(S
, IP
);
2298 const SCEV
*ScalarEvolution::getSMinExpr(const SCEV
*LHS
,
2300 // ~smax(~x, ~y) == smin(x, y).
2301 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS
), getNotSCEV(RHS
)));
2304 const SCEV
*ScalarEvolution::getUMinExpr(const SCEV
*LHS
,
2306 // ~umax(~x, ~y) == umin(x, y)
2307 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS
), getNotSCEV(RHS
)));
2310 const SCEV
*ScalarEvolution::getSizeOfExpr(const Type
*AllocTy
) {
2311 // If we have TargetData, we can bypass creating a target-independent
2312 // constant expression and then folding it back into a ConstantInt.
2313 // This is just a compile-time optimization.
2315 return getConstant(TD
->getIntPtrType(getContext()),
2316 TD
->getTypeAllocSize(AllocTy
));
2318 Constant
*C
= ConstantExpr::getSizeOf(AllocTy
);
2319 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
))
2320 if (Constant
*Folded
= ConstantFoldConstantExpression(CE
, TD
))
2322 const Type
*Ty
= getEffectiveSCEVType(PointerType::getUnqual(AllocTy
));
2323 return getTruncateOrZeroExtend(getSCEV(C
), Ty
);
2326 const SCEV
*ScalarEvolution::getAlignOfExpr(const Type
*AllocTy
) {
2327 Constant
*C
= ConstantExpr::getAlignOf(AllocTy
);
2328 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
))
2329 if (Constant
*Folded
= ConstantFoldConstantExpression(CE
, TD
))
2331 const Type
*Ty
= getEffectiveSCEVType(PointerType::getUnqual(AllocTy
));
2332 return getTruncateOrZeroExtend(getSCEV(C
), Ty
);
2335 const SCEV
*ScalarEvolution::getOffsetOfExpr(const StructType
*STy
,
2337 // If we have TargetData, we can bypass creating a target-independent
2338 // constant expression and then folding it back into a ConstantInt.
2339 // This is just a compile-time optimization.
2341 return getConstant(TD
->getIntPtrType(getContext()),
2342 TD
->getStructLayout(STy
)->getElementOffset(FieldNo
));
2344 Constant
*C
= ConstantExpr::getOffsetOf(STy
, FieldNo
);
2345 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
))
2346 if (Constant
*Folded
= ConstantFoldConstantExpression(CE
, TD
))
2348 const Type
*Ty
= getEffectiveSCEVType(PointerType::getUnqual(STy
));
2349 return getTruncateOrZeroExtend(getSCEV(C
), Ty
);
2352 const SCEV
*ScalarEvolution::getOffsetOfExpr(const Type
*CTy
,
2353 Constant
*FieldNo
) {
2354 Constant
*C
= ConstantExpr::getOffsetOf(CTy
, FieldNo
);
2355 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
))
2356 if (Constant
*Folded
= ConstantFoldConstantExpression(CE
, TD
))
2358 const Type
*Ty
= getEffectiveSCEVType(PointerType::getUnqual(CTy
));
2359 return getTruncateOrZeroExtend(getSCEV(C
), Ty
);
2362 const SCEV
*ScalarEvolution::getUnknown(Value
*V
) {
2363 // Don't attempt to do anything other than create a SCEVUnknown object
2364 // here. createSCEV only calls getUnknown after checking for all other
2365 // interesting possibilities, and any other code that calls getUnknown
2366 // is doing so in order to hide a value from SCEV canonicalization.
2368 FoldingSetNodeID ID
;
2369 ID
.AddInteger(scUnknown
);
2372 if (SCEV
*S
= UniqueSCEVs
.FindNodeOrInsertPos(ID
, IP
)) {
2373 assert(cast
<SCEVUnknown
>(S
)->getValue() == V
&&
2374 "Stale SCEVUnknown in uniquing map!");
2377 SCEV
*S
= new (SCEVAllocator
) SCEVUnknown(ID
.Intern(SCEVAllocator
), V
, this,
2379 FirstUnknown
= cast
<SCEVUnknown
>(S
);
2380 UniqueSCEVs
.InsertNode(S
, IP
);
2384 //===----------------------------------------------------------------------===//
2385 // Basic SCEV Analysis and PHI Idiom Recognition Code
2388 /// isSCEVable - Test if values of the given type are analyzable within
2389 /// the SCEV framework. This primarily includes integer types, and it
2390 /// can optionally include pointer types if the ScalarEvolution class
2391 /// has access to target-specific information.
2392 bool ScalarEvolution::isSCEVable(const Type
*Ty
) const {
2393 // Integers and pointers are always SCEVable.
2394 return Ty
->isIntegerTy() || Ty
->isPointerTy();
2397 /// getTypeSizeInBits - Return the size in bits of the specified type,
2398 /// for which isSCEVable must return true.
2399 uint64_t ScalarEvolution::getTypeSizeInBits(const Type
*Ty
) const {
2400 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
2402 // If we have a TargetData, use it!
2404 return TD
->getTypeSizeInBits(Ty
);
2406 // Integer types have fixed sizes.
2407 if (Ty
->isIntegerTy())
2408 return Ty
->getPrimitiveSizeInBits();
2410 // The only other support type is pointer. Without TargetData, conservatively
2411 // assume pointers are 64-bit.
2412 assert(Ty
->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2416 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2417 /// the given type and which represents how SCEV will treat the given
2418 /// type, for which isSCEVable must return true. For pointer types,
2419 /// this is the pointer-sized integer type.
2420 const Type
*ScalarEvolution::getEffectiveSCEVType(const Type
*Ty
) const {
2421 assert(isSCEVable(Ty
) && "Type is not SCEVable!");
2423 if (Ty
->isIntegerTy())
2426 // The only other support type is pointer.
2427 assert(Ty
->isPointerTy() && "Unexpected non-pointer non-integer type!");
2428 if (TD
) return TD
->getIntPtrType(getContext());
2430 // Without TargetData, conservatively assume pointers are 64-bit.
2431 return Type::getInt64Ty(getContext());
2434 const SCEV
*ScalarEvolution::getCouldNotCompute() {
2435 return &CouldNotCompute
;
2438 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2439 /// expression and create a new one.
2440 const SCEV
*ScalarEvolution::getSCEV(Value
*V
) {
2441 assert(isSCEVable(V
->getType()) && "Value is not SCEVable!");
2443 ValueExprMapType::const_iterator I
= ValueExprMap
.find(V
);
2444 if (I
!= ValueExprMap
.end()) return I
->second
;
2445 const SCEV
*S
= createSCEV(V
);
2447 // The process of creating a SCEV for V may have caused other SCEVs
2448 // to have been created, so it's necessary to insert the new entry
2449 // from scratch, rather than trying to remember the insert position
2451 ValueExprMap
.insert(std::make_pair(SCEVCallbackVH(V
, this), S
));
2455 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2457 const SCEV
*ScalarEvolution::getNegativeSCEV(const SCEV
*V
) {
2458 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
2460 cast
<ConstantInt
>(ConstantExpr::getNeg(VC
->getValue())));
2462 const Type
*Ty
= V
->getType();
2463 Ty
= getEffectiveSCEVType(Ty
);
2464 return getMulExpr(V
,
2465 getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
))));
2468 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2469 const SCEV
*ScalarEvolution::getNotSCEV(const SCEV
*V
) {
2470 if (const SCEVConstant
*VC
= dyn_cast
<SCEVConstant
>(V
))
2472 cast
<ConstantInt
>(ConstantExpr::getNot(VC
->getValue())));
2474 const Type
*Ty
= V
->getType();
2475 Ty
= getEffectiveSCEVType(Ty
);
2476 const SCEV
*AllOnes
=
2477 getConstant(cast
<ConstantInt
>(Constant::getAllOnesValue(Ty
)));
2478 return getMinusSCEV(AllOnes
, V
);
2481 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1,
2482 /// and thus the HasNUW and HasNSW bits apply to the resultant add, not
2483 /// whether the sub would have overflowed.
2484 const SCEV
*ScalarEvolution::getMinusSCEV(const SCEV
*LHS
, const SCEV
*RHS
,
2485 bool HasNUW
, bool HasNSW
) {
2486 // Fast path: X - X --> 0.
2488 return getConstant(LHS
->getType(), 0);
2491 return getAddExpr(LHS
, getNegativeSCEV(RHS
), HasNUW
, HasNSW
);
2494 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2495 /// input value to the specified type. If the type must be extended, it is zero
2498 ScalarEvolution::getTruncateOrZeroExtend(const SCEV
*V
, const Type
*Ty
) {
2499 const Type
*SrcTy
= V
->getType();
2500 assert((SrcTy
->isIntegerTy() || SrcTy
->isPointerTy()) &&
2501 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
2502 "Cannot truncate or zero extend with non-integer arguments!");
2503 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
2504 return V
; // No conversion
2505 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
2506 return getTruncateExpr(V
, Ty
);
2507 return getZeroExtendExpr(V
, Ty
);
2510 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2511 /// input value to the specified type. If the type must be extended, it is sign
2514 ScalarEvolution::getTruncateOrSignExtend(const SCEV
*V
,
2516 const Type
*SrcTy
= V
->getType();
2517 assert((SrcTy
->isIntegerTy() || SrcTy
->isPointerTy()) &&
2518 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
2519 "Cannot truncate or zero extend with non-integer arguments!");
2520 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
2521 return V
; // No conversion
2522 if (getTypeSizeInBits(SrcTy
) > getTypeSizeInBits(Ty
))
2523 return getTruncateExpr(V
, Ty
);
2524 return getSignExtendExpr(V
, Ty
);
2527 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2528 /// input value to the specified type. If the type must be extended, it is zero
2529 /// extended. The conversion must not be narrowing.
2531 ScalarEvolution::getNoopOrZeroExtend(const SCEV
*V
, const Type
*Ty
) {
2532 const Type
*SrcTy
= V
->getType();
2533 assert((SrcTy
->isIntegerTy() || SrcTy
->isPointerTy()) &&
2534 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
2535 "Cannot noop or zero extend with non-integer arguments!");
2536 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
2537 "getNoopOrZeroExtend cannot truncate!");
2538 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
2539 return V
; // No conversion
2540 return getZeroExtendExpr(V
, Ty
);
2543 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2544 /// input value to the specified type. If the type must be extended, it is sign
2545 /// extended. The conversion must not be narrowing.
2547 ScalarEvolution::getNoopOrSignExtend(const SCEV
*V
, const Type
*Ty
) {
2548 const Type
*SrcTy
= V
->getType();
2549 assert((SrcTy
->isIntegerTy() || SrcTy
->isPointerTy()) &&
2550 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
2551 "Cannot noop or sign extend with non-integer arguments!");
2552 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
2553 "getNoopOrSignExtend cannot truncate!");
2554 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
2555 return V
; // No conversion
2556 return getSignExtendExpr(V
, Ty
);
2559 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2560 /// the input value to the specified type. If the type must be extended,
2561 /// it is extended with unspecified bits. The conversion must not be
2564 ScalarEvolution::getNoopOrAnyExtend(const SCEV
*V
, const Type
*Ty
) {
2565 const Type
*SrcTy
= V
->getType();
2566 assert((SrcTy
->isIntegerTy() || SrcTy
->isPointerTy()) &&
2567 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
2568 "Cannot noop or any extend with non-integer arguments!");
2569 assert(getTypeSizeInBits(SrcTy
) <= getTypeSizeInBits(Ty
) &&
2570 "getNoopOrAnyExtend cannot truncate!");
2571 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
2572 return V
; // No conversion
2573 return getAnyExtendExpr(V
, Ty
);
2576 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2577 /// input value to the specified type. The conversion must not be widening.
2579 ScalarEvolution::getTruncateOrNoop(const SCEV
*V
, const Type
*Ty
) {
2580 const Type
*SrcTy
= V
->getType();
2581 assert((SrcTy
->isIntegerTy() || SrcTy
->isPointerTy()) &&
2582 (Ty
->isIntegerTy() || Ty
->isPointerTy()) &&
2583 "Cannot truncate or noop with non-integer arguments!");
2584 assert(getTypeSizeInBits(SrcTy
) >= getTypeSizeInBits(Ty
) &&
2585 "getTruncateOrNoop cannot extend!");
2586 if (getTypeSizeInBits(SrcTy
) == getTypeSizeInBits(Ty
))
2587 return V
; // No conversion
2588 return getTruncateExpr(V
, Ty
);
2591 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2592 /// the types using zero-extension, and then perform a umax operation
2594 const SCEV
*ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV
*LHS
,
2596 const SCEV
*PromotedLHS
= LHS
;
2597 const SCEV
*PromotedRHS
= RHS
;
2599 if (getTypeSizeInBits(LHS
->getType()) > getTypeSizeInBits(RHS
->getType()))
2600 PromotedRHS
= getZeroExtendExpr(RHS
, LHS
->getType());
2602 PromotedLHS
= getNoopOrZeroExtend(LHS
, RHS
->getType());
2604 return getUMaxExpr(PromotedLHS
, PromotedRHS
);
2607 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2608 /// the types using zero-extension, and then perform a umin operation
2610 const SCEV
*ScalarEvolution::getUMinFromMismatchedTypes(const SCEV
*LHS
,
2612 const SCEV
*PromotedLHS
= LHS
;
2613 const SCEV
*PromotedRHS
= RHS
;
2615 if (getTypeSizeInBits(LHS
->getType()) > getTypeSizeInBits(RHS
->getType()))
2616 PromotedRHS
= getZeroExtendExpr(RHS
, LHS
->getType());
2618 PromotedLHS
= getNoopOrZeroExtend(LHS
, RHS
->getType());
2620 return getUMinExpr(PromotedLHS
, PromotedRHS
);
2623 /// PushDefUseChildren - Push users of the given Instruction
2624 /// onto the given Worklist.
2626 PushDefUseChildren(Instruction
*I
,
2627 SmallVectorImpl
<Instruction
*> &Worklist
) {
2628 // Push the def-use children onto the Worklist stack.
2629 for (Value::use_iterator UI
= I
->use_begin(), UE
= I
->use_end();
2631 Worklist
.push_back(cast
<Instruction
>(*UI
));
2634 /// ForgetSymbolicValue - This looks up computed SCEV values for all
2635 /// instructions that depend on the given instruction and removes them from
2636 /// the ValueExprMapType map if they reference SymName. This is used during PHI
2639 ScalarEvolution::ForgetSymbolicName(Instruction
*PN
, const SCEV
*SymName
) {
2640 SmallVector
<Instruction
*, 16> Worklist
;
2641 PushDefUseChildren(PN
, Worklist
);
2643 SmallPtrSet
<Instruction
*, 8> Visited
;
2645 while (!Worklist
.empty()) {
2646 Instruction
*I
= Worklist
.pop_back_val();
2647 if (!Visited
.insert(I
)) continue;
2649 ValueExprMapType::iterator It
=
2650 ValueExprMap
.find(static_cast<Value
*>(I
));
2651 if (It
!= ValueExprMap
.end()) {
2652 const SCEV
*Old
= It
->second
;
2654 // Short-circuit the def-use traversal if the symbolic name
2655 // ceases to appear in expressions.
2656 if (Old
!= SymName
&& !hasOperand(Old
, SymName
))
2659 // SCEVUnknown for a PHI either means that it has an unrecognized
2660 // structure, it's a PHI that's in the progress of being computed
2661 // by createNodeForPHI, or it's a single-value PHI. In the first case,
2662 // additional loop trip count information isn't going to change anything.
2663 // In the second case, createNodeForPHI will perform the necessary
2664 // updates on its own when it gets to that point. In the third, we do
2665 // want to forget the SCEVUnknown.
2666 if (!isa
<PHINode
>(I
) ||
2667 !isa
<SCEVUnknown
>(Old
) ||
2668 (I
!= PN
&& Old
== SymName
)) {
2669 forgetMemoizedResults(Old
);
2670 ValueExprMap
.erase(It
);
2674 PushDefUseChildren(I
, Worklist
);
2678 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2679 /// a loop header, making it a potential recurrence, or it doesn't.
2681 const SCEV
*ScalarEvolution::createNodeForPHI(PHINode
*PN
) {
2682 if (const Loop
*L
= LI
->getLoopFor(PN
->getParent()))
2683 if (L
->getHeader() == PN
->getParent()) {
2684 // The loop may have multiple entrances or multiple exits; we can analyze
2685 // this phi as an addrec if it has a unique entry value and a unique
2687 Value
*BEValueV
= 0, *StartValueV
= 0;
2688 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
2689 Value
*V
= PN
->getIncomingValue(i
);
2690 if (L
->contains(PN
->getIncomingBlock(i
))) {
2693 } else if (BEValueV
!= V
) {
2697 } else if (!StartValueV
) {
2699 } else if (StartValueV
!= V
) {
2704 if (BEValueV
&& StartValueV
) {
2705 // While we are analyzing this PHI node, handle its value symbolically.
2706 const SCEV
*SymbolicName
= getUnknown(PN
);
2707 assert(ValueExprMap
.find(PN
) == ValueExprMap
.end() &&
2708 "PHI node already processed?");
2709 ValueExprMap
.insert(std::make_pair(SCEVCallbackVH(PN
, this), SymbolicName
));
2711 // Using this symbolic name for the PHI, analyze the value coming around
2713 const SCEV
*BEValue
= getSCEV(BEValueV
);
2715 // NOTE: If BEValue is loop invariant, we know that the PHI node just
2716 // has a special value for the first iteration of the loop.
2718 // If the value coming around the backedge is an add with the symbolic
2719 // value we just inserted, then we found a simple induction variable!
2720 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(BEValue
)) {
2721 // If there is a single occurrence of the symbolic value, replace it
2722 // with a recurrence.
2723 unsigned FoundIndex
= Add
->getNumOperands();
2724 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
2725 if (Add
->getOperand(i
) == SymbolicName
)
2726 if (FoundIndex
== e
) {
2731 if (FoundIndex
!= Add
->getNumOperands()) {
2732 // Create an add with everything but the specified operand.
2733 SmallVector
<const SCEV
*, 8> Ops
;
2734 for (unsigned i
= 0, e
= Add
->getNumOperands(); i
!= e
; ++i
)
2735 if (i
!= FoundIndex
)
2736 Ops
.push_back(Add
->getOperand(i
));
2737 const SCEV
*Accum
= getAddExpr(Ops
);
2739 // This is not a valid addrec if the step amount is varying each
2740 // loop iteration, but is not itself an addrec in this loop.
2741 if (isLoopInvariant(Accum
, L
) ||
2742 (isa
<SCEVAddRecExpr
>(Accum
) &&
2743 cast
<SCEVAddRecExpr
>(Accum
)->getLoop() == L
)) {
2744 bool HasNUW
= false;
2745 bool HasNSW
= false;
2747 // If the increment doesn't overflow, then neither the addrec nor
2748 // the post-increment will overflow.
2749 if (const AddOperator
*OBO
= dyn_cast
<AddOperator
>(BEValueV
)) {
2750 if (OBO
->hasNoUnsignedWrap())
2752 if (OBO
->hasNoSignedWrap())
2754 } else if (const GEPOperator
*GEP
=
2755 dyn_cast
<GEPOperator
>(BEValueV
)) {
2756 // If the increment is a GEP, then we know it won't perform an
2757 // unsigned overflow, because the address space cannot be
2759 HasNUW
|= GEP
->isInBounds();
2762 const SCEV
*StartVal
= getSCEV(StartValueV
);
2763 const SCEV
*PHISCEV
=
2764 getAddRecExpr(StartVal
, Accum
, L
, HasNUW
, HasNSW
);
2766 // Since the no-wrap flags are on the increment, they apply to the
2767 // post-incremented value as well.
2768 if (isLoopInvariant(Accum
, L
))
2769 (void)getAddRecExpr(getAddExpr(StartVal
, Accum
),
2770 Accum
, L
, HasNUW
, HasNSW
);
2772 // Okay, for the entire analysis of this edge we assumed the PHI
2773 // to be symbolic. We now need to go back and purge all of the
2774 // entries for the scalars that use the symbolic expression.
2775 ForgetSymbolicName(PN
, SymbolicName
);
2776 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
2780 } else if (const SCEVAddRecExpr
*AddRec
=
2781 dyn_cast
<SCEVAddRecExpr
>(BEValue
)) {
2782 // Otherwise, this could be a loop like this:
2783 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
2784 // In this case, j = {1,+,1} and BEValue is j.
2785 // Because the other in-value of i (0) fits the evolution of BEValue
2786 // i really is an addrec evolution.
2787 if (AddRec
->getLoop() == L
&& AddRec
->isAffine()) {
2788 const SCEV
*StartVal
= getSCEV(StartValueV
);
2790 // If StartVal = j.start - j.stride, we can use StartVal as the
2791 // initial step of the addrec evolution.
2792 if (StartVal
== getMinusSCEV(AddRec
->getOperand(0),
2793 AddRec
->getOperand(1))) {
2794 const SCEV
*PHISCEV
=
2795 getAddRecExpr(StartVal
, AddRec
->getOperand(1), L
);
2797 // Okay, for the entire analysis of this edge we assumed the PHI
2798 // to be symbolic. We now need to go back and purge all of the
2799 // entries for the scalars that use the symbolic expression.
2800 ForgetSymbolicName(PN
, SymbolicName
);
2801 ValueExprMap
[SCEVCallbackVH(PN
, this)] = PHISCEV
;
2809 // If the PHI has a single incoming value, follow that value, unless the
2810 // PHI's incoming blocks are in a different loop, in which case doing so
2811 // risks breaking LCSSA form. Instcombine would normally zap these, but
2812 // it doesn't have DominatorTree information, so it may miss cases.
2813 if (Value
*V
= SimplifyInstruction(PN
, TD
, DT
))
2814 if (LI
->replacementPreservesLCSSAForm(PN
, V
))
2817 // If it's not a loop phi, we can't handle it yet.
2818 return getUnknown(PN
);
2821 /// createNodeForGEP - Expand GEP instructions into add and multiply
2822 /// operations. This allows them to be analyzed by regular SCEV code.
2824 const SCEV
*ScalarEvolution::createNodeForGEP(GEPOperator
*GEP
) {
2826 // Don't blindly transfer the inbounds flag from the GEP instruction to the
2827 // Add expression, because the Instruction may be guarded by control flow
2828 // and the no-overflow bits may not be valid for the expression in any
2831 const Type
*IntPtrTy
= getEffectiveSCEVType(GEP
->getType());
2832 Value
*Base
= GEP
->getOperand(0);
2833 // Don't attempt to analyze GEPs over unsized objects.
2834 if (!cast
<PointerType
>(Base
->getType())->getElementType()->isSized())
2835 return getUnknown(GEP
);
2836 const SCEV
*TotalOffset
= getConstant(IntPtrTy
, 0);
2837 gep_type_iterator GTI
= gep_type_begin(GEP
);
2838 for (GetElementPtrInst::op_iterator I
= llvm::next(GEP
->op_begin()),
2842 // Compute the (potentially symbolic) offset in bytes for this index.
2843 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
++)) {
2844 // For a struct, add the member offset.
2845 unsigned FieldNo
= cast
<ConstantInt
>(Index
)->getZExtValue();
2846 const SCEV
*FieldOffset
= getOffsetOfExpr(STy
, FieldNo
);
2848 // Add the field offset to the running total offset.
2849 TotalOffset
= getAddExpr(TotalOffset
, FieldOffset
);
2851 // For an array, add the element offset, explicitly scaled.
2852 const SCEV
*ElementSize
= getSizeOfExpr(*GTI
);
2853 const SCEV
*IndexS
= getSCEV(Index
);
2854 // Getelementptr indices are signed.
2855 IndexS
= getTruncateOrSignExtend(IndexS
, IntPtrTy
);
2857 // Multiply the index by the element size to compute the element offset.
2858 const SCEV
*LocalOffset
= getMulExpr(IndexS
, ElementSize
);
2860 // Add the element offset to the running total offset.
2861 TotalOffset
= getAddExpr(TotalOffset
, LocalOffset
);
2865 // Get the SCEV for the GEP base.
2866 const SCEV
*BaseS
= getSCEV(Base
);
2868 // Add the total offset from all the GEP indices to the base.
2869 return getAddExpr(BaseS
, TotalOffset
);
2872 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2873 /// guaranteed to end in (at every loop iteration). It is, at the same time,
2874 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
2875 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
2877 ScalarEvolution::GetMinTrailingZeros(const SCEV
*S
) {
2878 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
2879 return C
->getValue()->getValue().countTrailingZeros();
2881 if (const SCEVTruncateExpr
*T
= dyn_cast
<SCEVTruncateExpr
>(S
))
2882 return std::min(GetMinTrailingZeros(T
->getOperand()),
2883 (uint32_t)getTypeSizeInBits(T
->getType()));
2885 if (const SCEVZeroExtendExpr
*E
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
2886 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
2887 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType()) ?
2888 getTypeSizeInBits(E
->getType()) : OpRes
;
2891 if (const SCEVSignExtendExpr
*E
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
2892 uint32_t OpRes
= GetMinTrailingZeros(E
->getOperand());
2893 return OpRes
== getTypeSizeInBits(E
->getOperand()->getType()) ?
2894 getTypeSizeInBits(E
->getType()) : OpRes
;
2897 if (const SCEVAddExpr
*A
= dyn_cast
<SCEVAddExpr
>(S
)) {
2898 // The result is the min of all operands results.
2899 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
2900 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
2901 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
2905 if (const SCEVMulExpr
*M
= dyn_cast
<SCEVMulExpr
>(S
)) {
2906 // The result is the sum of all operands results.
2907 uint32_t SumOpRes
= GetMinTrailingZeros(M
->getOperand(0));
2908 uint32_t BitWidth
= getTypeSizeInBits(M
->getType());
2909 for (unsigned i
= 1, e
= M
->getNumOperands();
2910 SumOpRes
!= BitWidth
&& i
!= e
; ++i
)
2911 SumOpRes
= std::min(SumOpRes
+ GetMinTrailingZeros(M
->getOperand(i
)),
2916 if (const SCEVAddRecExpr
*A
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
2917 // The result is the min of all operands results.
2918 uint32_t MinOpRes
= GetMinTrailingZeros(A
->getOperand(0));
2919 for (unsigned i
= 1, e
= A
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
2920 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(A
->getOperand(i
)));
2924 if (const SCEVSMaxExpr
*M
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
2925 // The result is the min of all operands results.
2926 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
2927 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
2928 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
2932 if (const SCEVUMaxExpr
*M
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
2933 // The result is the min of all operands results.
2934 uint32_t MinOpRes
= GetMinTrailingZeros(M
->getOperand(0));
2935 for (unsigned i
= 1, e
= M
->getNumOperands(); MinOpRes
&& i
!= e
; ++i
)
2936 MinOpRes
= std::min(MinOpRes
, GetMinTrailingZeros(M
->getOperand(i
)));
2940 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
2941 // For a SCEVUnknown, ask ValueTracking.
2942 unsigned BitWidth
= getTypeSizeInBits(U
->getType());
2943 APInt Mask
= APInt::getAllOnesValue(BitWidth
);
2944 APInt
Zeros(BitWidth
, 0), Ones(BitWidth
, 0);
2945 ComputeMaskedBits(U
->getValue(), Mask
, Zeros
, Ones
);
2946 return Zeros
.countTrailingOnes();
2953 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
2956 ScalarEvolution::getUnsignedRange(const SCEV
*S
) {
2957 // See if we've computed this range already.
2958 DenseMap
<const SCEV
*, ConstantRange
>::iterator I
= UnsignedRanges
.find(S
);
2959 if (I
!= UnsignedRanges
.end())
2962 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
2963 return setUnsignedRange(C
, ConstantRange(C
->getValue()->getValue()));
2965 unsigned BitWidth
= getTypeSizeInBits(S
->getType());
2966 ConstantRange
ConservativeResult(BitWidth
, /*isFullSet=*/true);
2968 // If the value has known zeros, the maximum unsigned value will have those
2969 // known zeros as well.
2970 uint32_t TZ
= GetMinTrailingZeros(S
);
2972 ConservativeResult
=
2973 ConstantRange(APInt::getMinValue(BitWidth
),
2974 APInt::getMaxValue(BitWidth
).lshr(TZ
).shl(TZ
) + 1);
2976 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
2977 ConstantRange X
= getUnsignedRange(Add
->getOperand(0));
2978 for (unsigned i
= 1, e
= Add
->getNumOperands(); i
!= e
; ++i
)
2979 X
= X
.add(getUnsignedRange(Add
->getOperand(i
)));
2980 return setUnsignedRange(Add
, ConservativeResult
.intersectWith(X
));
2983 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
2984 ConstantRange X
= getUnsignedRange(Mul
->getOperand(0));
2985 for (unsigned i
= 1, e
= Mul
->getNumOperands(); i
!= e
; ++i
)
2986 X
= X
.multiply(getUnsignedRange(Mul
->getOperand(i
)));
2987 return setUnsignedRange(Mul
, ConservativeResult
.intersectWith(X
));
2990 if (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
2991 ConstantRange X
= getUnsignedRange(SMax
->getOperand(0));
2992 for (unsigned i
= 1, e
= SMax
->getNumOperands(); i
!= e
; ++i
)
2993 X
= X
.smax(getUnsignedRange(SMax
->getOperand(i
)));
2994 return setUnsignedRange(SMax
, ConservativeResult
.intersectWith(X
));
2997 if (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
2998 ConstantRange X
= getUnsignedRange(UMax
->getOperand(0));
2999 for (unsigned i
= 1, e
= UMax
->getNumOperands(); i
!= e
; ++i
)
3000 X
= X
.umax(getUnsignedRange(UMax
->getOperand(i
)));
3001 return setUnsignedRange(UMax
, ConservativeResult
.intersectWith(X
));
3004 if (const SCEVUDivExpr
*UDiv
= dyn_cast
<SCEVUDivExpr
>(S
)) {
3005 ConstantRange X
= getUnsignedRange(UDiv
->getLHS());
3006 ConstantRange Y
= getUnsignedRange(UDiv
->getRHS());
3007 return setUnsignedRange(UDiv
, ConservativeResult
.intersectWith(X
.udiv(Y
)));
3010 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
3011 ConstantRange X
= getUnsignedRange(ZExt
->getOperand());
3012 return setUnsignedRange(ZExt
,
3013 ConservativeResult
.intersectWith(X
.zeroExtend(BitWidth
)));
3016 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
3017 ConstantRange X
= getUnsignedRange(SExt
->getOperand());
3018 return setUnsignedRange(SExt
,
3019 ConservativeResult
.intersectWith(X
.signExtend(BitWidth
)));
3022 if (const SCEVTruncateExpr
*Trunc
= dyn_cast
<SCEVTruncateExpr
>(S
)) {
3023 ConstantRange X
= getUnsignedRange(Trunc
->getOperand());
3024 return setUnsignedRange(Trunc
,
3025 ConservativeResult
.intersectWith(X
.truncate(BitWidth
)));
3028 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
3029 // If there's no unsigned wrap, the value will never be less than its
3031 if (AddRec
->hasNoUnsignedWrap())
3032 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(AddRec
->getStart()))
3033 if (!C
->getValue()->isZero())
3034 ConservativeResult
=
3035 ConservativeResult
.intersectWith(
3036 ConstantRange(C
->getValue()->getValue(), APInt(BitWidth
, 0)));
3038 // TODO: non-affine addrec
3039 if (AddRec
->isAffine()) {
3040 const Type
*Ty
= AddRec
->getType();
3041 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(AddRec
->getLoop());
3042 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
3043 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
) {
3044 MaxBECount
= getNoopOrZeroExtend(MaxBECount
, Ty
);
3046 const SCEV
*Start
= AddRec
->getStart();
3047 const SCEV
*Step
= AddRec
->getStepRecurrence(*this);
3049 ConstantRange StartRange
= getUnsignedRange(Start
);
3050 ConstantRange StepRange
= getSignedRange(Step
);
3051 ConstantRange MaxBECountRange
= getUnsignedRange(MaxBECount
);
3052 ConstantRange EndRange
=
3053 StartRange
.add(MaxBECountRange
.multiply(StepRange
));
3055 // Check for overflow. This must be done with ConstantRange arithmetic
3056 // because we could be called from within the ScalarEvolution overflow
3058 ConstantRange ExtStartRange
= StartRange
.zextOrTrunc(BitWidth
*2+1);
3059 ConstantRange ExtStepRange
= StepRange
.sextOrTrunc(BitWidth
*2+1);
3060 ConstantRange ExtMaxBECountRange
=
3061 MaxBECountRange
.zextOrTrunc(BitWidth
*2+1);
3062 ConstantRange ExtEndRange
= EndRange
.zextOrTrunc(BitWidth
*2+1);
3063 if (ExtStartRange
.add(ExtMaxBECountRange
.multiply(ExtStepRange
)) !=
3065 return setUnsignedRange(AddRec
, ConservativeResult
);
3067 APInt Min
= APIntOps::umin(StartRange
.getUnsignedMin(),
3068 EndRange
.getUnsignedMin());
3069 APInt Max
= APIntOps::umax(StartRange
.getUnsignedMax(),
3070 EndRange
.getUnsignedMax());
3071 if (Min
.isMinValue() && Max
.isMaxValue())
3072 return setUnsignedRange(AddRec
, ConservativeResult
);
3073 return setUnsignedRange(AddRec
,
3074 ConservativeResult
.intersectWith(ConstantRange(Min
, Max
+1)));
3078 return setUnsignedRange(AddRec
, ConservativeResult
);
3081 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
3082 // For a SCEVUnknown, ask ValueTracking.
3083 APInt Mask
= APInt::getAllOnesValue(BitWidth
);
3084 APInt
Zeros(BitWidth
, 0), Ones(BitWidth
, 0);
3085 ComputeMaskedBits(U
->getValue(), Mask
, Zeros
, Ones
, TD
);
3086 if (Ones
== ~Zeros
+ 1)
3087 return setUnsignedRange(U
, ConservativeResult
);
3088 return setUnsignedRange(U
,
3089 ConservativeResult
.intersectWith(ConstantRange(Ones
, ~Zeros
+ 1)));
3092 return setUnsignedRange(S
, ConservativeResult
);
3095 /// getSignedRange - Determine the signed range for a particular SCEV.
3098 ScalarEvolution::getSignedRange(const SCEV
*S
) {
3099 DenseMap
<const SCEV
*, ConstantRange
>::iterator I
= SignedRanges
.find(S
);
3100 if (I
!= SignedRanges
.end())
3103 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(S
))
3104 return setSignedRange(C
, ConstantRange(C
->getValue()->getValue()));
3106 unsigned BitWidth
= getTypeSizeInBits(S
->getType());
3107 ConstantRange
ConservativeResult(BitWidth
, /*isFullSet=*/true);
3109 // If the value has known zeros, the maximum signed value will have those
3110 // known zeros as well.
3111 uint32_t TZ
= GetMinTrailingZeros(S
);
3113 ConservativeResult
=
3114 ConstantRange(APInt::getSignedMinValue(BitWidth
),
3115 APInt::getSignedMaxValue(BitWidth
).ashr(TZ
).shl(TZ
) + 1);
3117 if (const SCEVAddExpr
*Add
= dyn_cast
<SCEVAddExpr
>(S
)) {
3118 ConstantRange X
= getSignedRange(Add
->getOperand(0));
3119 for (unsigned i
= 1, e
= Add
->getNumOperands(); i
!= e
; ++i
)
3120 X
= X
.add(getSignedRange(Add
->getOperand(i
)));
3121 return setSignedRange(Add
, ConservativeResult
.intersectWith(X
));
3124 if (const SCEVMulExpr
*Mul
= dyn_cast
<SCEVMulExpr
>(S
)) {
3125 ConstantRange X
= getSignedRange(Mul
->getOperand(0));
3126 for (unsigned i
= 1, e
= Mul
->getNumOperands(); i
!= e
; ++i
)
3127 X
= X
.multiply(getSignedRange(Mul
->getOperand(i
)));
3128 return setSignedRange(Mul
, ConservativeResult
.intersectWith(X
));
3131 if (const SCEVSMaxExpr
*SMax
= dyn_cast
<SCEVSMaxExpr
>(S
)) {
3132 ConstantRange X
= getSignedRange(SMax
->getOperand(0));
3133 for (unsigned i
= 1, e
= SMax
->getNumOperands(); i
!= e
; ++i
)
3134 X
= X
.smax(getSignedRange(SMax
->getOperand(i
)));
3135 return setSignedRange(SMax
, ConservativeResult
.intersectWith(X
));
3138 if (const SCEVUMaxExpr
*UMax
= dyn_cast
<SCEVUMaxExpr
>(S
)) {
3139 ConstantRange X
= getSignedRange(UMax
->getOperand(0));
3140 for (unsigned i
= 1, e
= UMax
->getNumOperands(); i
!= e
; ++i
)
3141 X
= X
.umax(getSignedRange(UMax
->getOperand(i
)));
3142 return setSignedRange(UMax
, ConservativeResult
.intersectWith(X
));
3145 if (const SCEVUDivExpr
*UDiv
= dyn_cast
<SCEVUDivExpr
>(S
)) {
3146 ConstantRange X
= getSignedRange(UDiv
->getLHS());
3147 ConstantRange Y
= getSignedRange(UDiv
->getRHS());
3148 return setSignedRange(UDiv
, ConservativeResult
.intersectWith(X
.udiv(Y
)));
3151 if (const SCEVZeroExtendExpr
*ZExt
= dyn_cast
<SCEVZeroExtendExpr
>(S
)) {
3152 ConstantRange X
= getSignedRange(ZExt
->getOperand());
3153 return setSignedRange(ZExt
,
3154 ConservativeResult
.intersectWith(X
.zeroExtend(BitWidth
)));
3157 if (const SCEVSignExtendExpr
*SExt
= dyn_cast
<SCEVSignExtendExpr
>(S
)) {
3158 ConstantRange X
= getSignedRange(SExt
->getOperand());
3159 return setSignedRange(SExt
,
3160 ConservativeResult
.intersectWith(X
.signExtend(BitWidth
)));
3163 if (const SCEVTruncateExpr
*Trunc
= dyn_cast
<SCEVTruncateExpr
>(S
)) {
3164 ConstantRange X
= getSignedRange(Trunc
->getOperand());
3165 return setSignedRange(Trunc
,
3166 ConservativeResult
.intersectWith(X
.truncate(BitWidth
)));
3169 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
3170 // If there's no signed wrap, and all the operands have the same sign or
3171 // zero, the value won't ever change sign.
3172 if (AddRec
->hasNoSignedWrap()) {
3173 bool AllNonNeg
= true;
3174 bool AllNonPos
= true;
3175 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
3176 if (!isKnownNonNegative(AddRec
->getOperand(i
))) AllNonNeg
= false;
3177 if (!isKnownNonPositive(AddRec
->getOperand(i
))) AllNonPos
= false;
3180 ConservativeResult
= ConservativeResult
.intersectWith(
3181 ConstantRange(APInt(BitWidth
, 0),
3182 APInt::getSignedMinValue(BitWidth
)));
3184 ConservativeResult
= ConservativeResult
.intersectWith(
3185 ConstantRange(APInt::getSignedMinValue(BitWidth
),
3186 APInt(BitWidth
, 1)));
3189 // TODO: non-affine addrec
3190 if (AddRec
->isAffine()) {
3191 const Type
*Ty
= AddRec
->getType();
3192 const SCEV
*MaxBECount
= getMaxBackedgeTakenCount(AddRec
->getLoop());
3193 if (!isa
<SCEVCouldNotCompute
>(MaxBECount
) &&
3194 getTypeSizeInBits(MaxBECount
->getType()) <= BitWidth
) {
3195 MaxBECount
= getNoopOrZeroExtend(MaxBECount
, Ty
);
3197 const SCEV
*Start
= AddRec
->getStart();
3198 const SCEV
*Step
= AddRec
->getStepRecurrence(*this);
3200 ConstantRange StartRange
= getSignedRange(Start
);
3201 ConstantRange StepRange
= getSignedRange(Step
);
3202 ConstantRange MaxBECountRange
= getUnsignedRange(MaxBECount
);
3203 ConstantRange EndRange
=
3204 StartRange
.add(MaxBECountRange
.multiply(StepRange
));
3206 // Check for overflow. This must be done with ConstantRange arithmetic
3207 // because we could be called from within the ScalarEvolution overflow
3209 ConstantRange ExtStartRange
= StartRange
.sextOrTrunc(BitWidth
*2+1);
3210 ConstantRange ExtStepRange
= StepRange
.sextOrTrunc(BitWidth
*2+1);
3211 ConstantRange ExtMaxBECountRange
=
3212 MaxBECountRange
.zextOrTrunc(BitWidth
*2+1);
3213 ConstantRange ExtEndRange
= EndRange
.sextOrTrunc(BitWidth
*2+1);
3214 if (ExtStartRange
.add(ExtMaxBECountRange
.multiply(ExtStepRange
)) !=
3216 return setSignedRange(AddRec
, ConservativeResult
);
3218 APInt Min
= APIntOps::smin(StartRange
.getSignedMin(),
3219 EndRange
.getSignedMin());
3220 APInt Max
= APIntOps::smax(StartRange
.getSignedMax(),
3221 EndRange
.getSignedMax());
3222 if (Min
.isMinSignedValue() && Max
.isMaxSignedValue())
3223 return setSignedRange(AddRec
, ConservativeResult
);
3224 return setSignedRange(AddRec
,
3225 ConservativeResult
.intersectWith(ConstantRange(Min
, Max
+1)));
3229 return setSignedRange(AddRec
, ConservativeResult
);
3232 if (const SCEVUnknown
*U
= dyn_cast
<SCEVUnknown
>(S
)) {
3233 // For a SCEVUnknown, ask ValueTracking.
3234 if (!U
->getValue()->getType()->isIntegerTy() && !TD
)
3235 return setSignedRange(U
, ConservativeResult
);
3236 unsigned NS
= ComputeNumSignBits(U
->getValue(), TD
);
3238 return setSignedRange(U
, ConservativeResult
);
3239 return setSignedRange(U
, ConservativeResult
.intersectWith(
3240 ConstantRange(APInt::getSignedMinValue(BitWidth
).ashr(NS
- 1),
3241 APInt::getSignedMaxValue(BitWidth
).ashr(NS
- 1)+1)));
3244 return setSignedRange(S
, ConservativeResult
);
3247 /// createSCEV - We know that there is no SCEV for the specified value.
3248 /// Analyze the expression.
3250 const SCEV
*ScalarEvolution::createSCEV(Value
*V
) {
3251 if (!isSCEVable(V
->getType()))
3252 return getUnknown(V
);
3254 unsigned Opcode
= Instruction::UserOp1
;
3255 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
3256 Opcode
= I
->getOpcode();
3258 // Don't attempt to analyze instructions in blocks that aren't
3259 // reachable. Such instructions don't matter, and they aren't required
3260 // to obey basic rules for definitions dominating uses which this
3261 // analysis depends on.
3262 if (!DT
->isReachableFromEntry(I
->getParent()))
3263 return getUnknown(V
);
3264 } else if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
))
3265 Opcode
= CE
->getOpcode();
3266 else if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
))
3267 return getConstant(CI
);
3268 else if (isa
<ConstantPointerNull
>(V
))
3269 return getConstant(V
->getType(), 0);
3270 else if (GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
))
3271 return GA
->mayBeOverridden() ? getUnknown(V
) : getSCEV(GA
->getAliasee());
3273 return getUnknown(V
);
3275 Operator
*U
= cast
<Operator
>(V
);
3277 case Instruction::Add
: {
3278 // The simple thing to do would be to just call getSCEV on both operands
3279 // and call getAddExpr with the result. However if we're looking at a
3280 // bunch of things all added together, this can be quite inefficient,
3281 // because it leads to N-1 getAddExpr calls for N ultimate operands.
3282 // Instead, gather up all the operands and make a single getAddExpr call.
3283 // LLVM IR canonical form means we need only traverse the left operands.
3284 SmallVector
<const SCEV
*, 4> AddOps
;
3285 AddOps
.push_back(getSCEV(U
->getOperand(1)));
3286 for (Value
*Op
= U
->getOperand(0); ; Op
= U
->getOperand(0)) {
3287 unsigned Opcode
= Op
->getValueID() - Value::InstructionVal
;
3288 if (Opcode
!= Instruction::Add
&& Opcode
!= Instruction::Sub
)
3290 U
= cast
<Operator
>(Op
);
3291 const SCEV
*Op1
= getSCEV(U
->getOperand(1));
3292 if (Opcode
== Instruction::Sub
)
3293 AddOps
.push_back(getNegativeSCEV(Op1
));
3295 AddOps
.push_back(Op1
);
3297 AddOps
.push_back(getSCEV(U
->getOperand(0)));
3298 return getAddExpr(AddOps
);
3300 case Instruction::Mul
: {
3301 // See the Add code above.
3302 SmallVector
<const SCEV
*, 4> MulOps
;
3303 MulOps
.push_back(getSCEV(U
->getOperand(1)));
3304 for (Value
*Op
= U
->getOperand(0);
3305 Op
->getValueID() == Instruction::Mul
+ Value::InstructionVal
;
3306 Op
= U
->getOperand(0)) {
3307 U
= cast
<Operator
>(Op
);
3308 MulOps
.push_back(getSCEV(U
->getOperand(1)));
3310 MulOps
.push_back(getSCEV(U
->getOperand(0)));
3311 return getMulExpr(MulOps
);
3313 case Instruction::UDiv
:
3314 return getUDivExpr(getSCEV(U
->getOperand(0)),
3315 getSCEV(U
->getOperand(1)));
3316 case Instruction::Sub
:
3317 return getMinusSCEV(getSCEV(U
->getOperand(0)),
3318 getSCEV(U
->getOperand(1)));
3319 case Instruction::And
:
3320 // For an expression like x&255 that merely masks off the high bits,
3321 // use zext(trunc(x)) as the SCEV expression.
3322 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1))) {
3323 if (CI
->isNullValue())
3324 return getSCEV(U
->getOperand(1));
3325 if (CI
->isAllOnesValue())
3326 return getSCEV(U
->getOperand(0));
3327 const APInt
&A
= CI
->getValue();
3329 // Instcombine's ShrinkDemandedConstant may strip bits out of
3330 // constants, obscuring what would otherwise be a low-bits mask.
3331 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3332 // knew about to reconstruct a low-bits mask value.
3333 unsigned LZ
= A
.countLeadingZeros();
3334 unsigned BitWidth
= A
.getBitWidth();
3335 APInt AllOnes
= APInt::getAllOnesValue(BitWidth
);
3336 APInt
KnownZero(BitWidth
, 0), KnownOne(BitWidth
, 0);
3337 ComputeMaskedBits(U
->getOperand(0), AllOnes
, KnownZero
, KnownOne
, TD
);
3339 APInt EffectiveMask
= APInt::getLowBitsSet(BitWidth
, BitWidth
- LZ
);
3341 if (LZ
!= 0 && !((~A
& ~KnownZero
) & EffectiveMask
))
3343 getZeroExtendExpr(getTruncateExpr(getSCEV(U
->getOperand(0)),
3344 IntegerType::get(getContext(), BitWidth
- LZ
)),
3349 case Instruction::Or
:
3350 // If the RHS of the Or is a constant, we may have something like:
3351 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
3352 // optimizations will transparently handle this case.
3354 // In order for this transformation to be safe, the LHS must be of the
3355 // form X*(2^n) and the Or constant must be less than 2^n.
3356 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1))) {
3357 const SCEV
*LHS
= getSCEV(U
->getOperand(0));
3358 const APInt
&CIVal
= CI
->getValue();
3359 if (GetMinTrailingZeros(LHS
) >=
3360 (CIVal
.getBitWidth() - CIVal
.countLeadingZeros())) {
3361 // Build a plain add SCEV.
3362 const SCEV
*S
= getAddExpr(LHS
, getSCEV(CI
));
3363 // If the LHS of the add was an addrec and it has no-wrap flags,
3364 // transfer the no-wrap flags, since an or won't introduce a wrap.
3365 if (const SCEVAddRecExpr
*NewAR
= dyn_cast
<SCEVAddRecExpr
>(S
)) {
3366 const SCEVAddRecExpr
*OldAR
= cast
<SCEVAddRecExpr
>(LHS
);
3367 if (OldAR
->hasNoUnsignedWrap())
3368 const_cast<SCEVAddRecExpr
*>(NewAR
)->setHasNoUnsignedWrap(true);
3369 if (OldAR
->hasNoSignedWrap())
3370 const_cast<SCEVAddRecExpr
*>(NewAR
)->setHasNoSignedWrap(true);
3376 case Instruction::Xor
:
3377 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1))) {
3378 // If the RHS of the xor is a signbit, then this is just an add.
3379 // Instcombine turns add of signbit into xor as a strength reduction step.
3380 if (CI
->getValue().isSignBit())
3381 return getAddExpr(getSCEV(U
->getOperand(0)),
3382 getSCEV(U
->getOperand(1)));
3384 // If the RHS of xor is -1, then this is a not operation.
3385 if (CI
->isAllOnesValue())
3386 return getNotSCEV(getSCEV(U
->getOperand(0)));
3388 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3389 // This is a variant of the check for xor with -1, and it handles
3390 // the case where instcombine has trimmed non-demanded bits out
3391 // of an xor with -1.
3392 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(U
->getOperand(0)))
3393 if (ConstantInt
*LCI
= dyn_cast
<ConstantInt
>(BO
->getOperand(1)))
3394 if (BO
->getOpcode() == Instruction::And
&&
3395 LCI
->getValue() == CI
->getValue())
3396 if (const SCEVZeroExtendExpr
*Z
=
3397 dyn_cast
<SCEVZeroExtendExpr
>(getSCEV(U
->getOperand(0)))) {
3398 const Type
*UTy
= U
->getType();
3399 const SCEV
*Z0
= Z
->getOperand();
3400 const Type
*Z0Ty
= Z0
->getType();
3401 unsigned Z0TySize
= getTypeSizeInBits(Z0Ty
);
3403 // If C is a low-bits mask, the zero extend is serving to
3404 // mask off the high bits. Complement the operand and
3405 // re-apply the zext.
3406 if (APIntOps::isMask(Z0TySize
, CI
->getValue()))
3407 return getZeroExtendExpr(getNotSCEV(Z0
), UTy
);
3409 // If C is a single bit, it may be in the sign-bit position
3410 // before the zero-extend. In this case, represent the xor
3411 // using an add, which is equivalent, and re-apply the zext.
3412 APInt Trunc
= CI
->getValue().trunc(Z0TySize
);
3413 if (Trunc
.zext(getTypeSizeInBits(UTy
)) == CI
->getValue() &&
3415 return getZeroExtendExpr(getAddExpr(Z0
, getConstant(Trunc
)),
3421 case Instruction::Shl
:
3422 // Turn shift left of a constant amount into a multiply.
3423 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(U
->getOperand(1))) {
3424 uint32_t BitWidth
= cast
<IntegerType
>(U
->getType())->getBitWidth();
3426 // If the shift count is not less than the bitwidth, the result of
3427 // the shift is undefined. Don't try to analyze it, because the
3428 // resolution chosen here may differ from the resolution chosen in
3429 // other parts of the compiler.
3430 if (SA
->getValue().uge(BitWidth
))
3433 Constant
*X
= ConstantInt::get(getContext(),
3434 APInt(BitWidth
, 1).shl(SA
->getZExtValue()));
3435 return getMulExpr(getSCEV(U
->getOperand(0)), getSCEV(X
));
3439 case Instruction::LShr
:
3440 // Turn logical shift right of a constant into a unsigned divide.
3441 if (ConstantInt
*SA
= dyn_cast
<ConstantInt
>(U
->getOperand(1))) {
3442 uint32_t BitWidth
= cast
<IntegerType
>(U
->getType())->getBitWidth();
3444 // If the shift count is not less than the bitwidth, the result of
3445 // the shift is undefined. Don't try to analyze it, because the
3446 // resolution chosen here may differ from the resolution chosen in
3447 // other parts of the compiler.
3448 if (SA
->getValue().uge(BitWidth
))
3451 Constant
*X
= ConstantInt::get(getContext(),
3452 APInt(BitWidth
, 1).shl(SA
->getZExtValue()));
3453 return getUDivExpr(getSCEV(U
->getOperand(0)), getSCEV(X
));
3457 case Instruction::AShr
:
3458 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3459 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(U
->getOperand(1)))
3460 if (Operator
*L
= dyn_cast
<Operator
>(U
->getOperand(0)))
3461 if (L
->getOpcode() == Instruction::Shl
&&
3462 L
->getOperand(1) == U
->getOperand(1)) {
3463 uint64_t BitWidth
= getTypeSizeInBits(U
->getType());
3465 // If the shift count is not less than the bitwidth, the result of
3466 // the shift is undefined. Don't try to analyze it, because the
3467 // resolution chosen here may differ from the resolution chosen in
3468 // other parts of the compiler.
3469 if (CI
->getValue().uge(BitWidth
))
3472 uint64_t Amt
= BitWidth
- CI
->getZExtValue();
3473 if (Amt
== BitWidth
)
3474 return getSCEV(L
->getOperand(0)); // shift by zero --> noop
3476 getSignExtendExpr(getTruncateExpr(getSCEV(L
->getOperand(0)),
3477 IntegerType::get(getContext(),
3483 case Instruction::Trunc
:
3484 return getTruncateExpr(getSCEV(U
->getOperand(0)), U
->getType());
3486 case Instruction::ZExt
:
3487 return getZeroExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
3489 case Instruction::SExt
:
3490 return getSignExtendExpr(getSCEV(U
->getOperand(0)), U
->getType());
3492 case Instruction::BitCast
:
3493 // BitCasts are no-op casts so we just eliminate the cast.
3494 if (isSCEVable(U
->getType()) && isSCEVable(U
->getOperand(0)->getType()))
3495 return getSCEV(U
->getOperand(0));
3498 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3499 // lead to pointer expressions which cannot safely be expanded to GEPs,
3500 // because ScalarEvolution doesn't respect the GEP aliasing rules when
3501 // simplifying integer expressions.
3503 case Instruction::GetElementPtr
:
3504 return createNodeForGEP(cast
<GEPOperator
>(U
));
3506 case Instruction::PHI
:
3507 return createNodeForPHI(cast
<PHINode
>(U
));
3509 case Instruction::Select
:
3510 // This could be a smax or umax that was lowered earlier.
3511 // Try to recover it.
3512 if (ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(U
->getOperand(0))) {
3513 Value
*LHS
= ICI
->getOperand(0);
3514 Value
*RHS
= ICI
->getOperand(1);
3515 switch (ICI
->getPredicate()) {
3516 case ICmpInst::ICMP_SLT
:
3517 case ICmpInst::ICMP_SLE
:
3518 std::swap(LHS
, RHS
);
3520 case ICmpInst::ICMP_SGT
:
3521 case ICmpInst::ICMP_SGE
:
3522 // a >s b ? a+x : b+x -> smax(a, b)+x
3523 // a >s b ? b+x : a+x -> smin(a, b)+x
3524 if (LHS
->getType() == U
->getType()) {
3525 const SCEV
*LS
= getSCEV(LHS
);
3526 const SCEV
*RS
= getSCEV(RHS
);
3527 const SCEV
*LA
= getSCEV(U
->getOperand(1));
3528 const SCEV
*RA
= getSCEV(U
->getOperand(2));
3529 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
3530 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
3532 return getAddExpr(getSMaxExpr(LS
, RS
), LDiff
);
3533 LDiff
= getMinusSCEV(LA
, RS
);
3534 RDiff
= getMinusSCEV(RA
, LS
);
3536 return getAddExpr(getSMinExpr(LS
, RS
), LDiff
);
3539 case ICmpInst::ICMP_ULT
:
3540 case ICmpInst::ICMP_ULE
:
3541 std::swap(LHS
, RHS
);
3543 case ICmpInst::ICMP_UGT
:
3544 case ICmpInst::ICMP_UGE
:
3545 // a >u b ? a+x : b+x -> umax(a, b)+x
3546 // a >u b ? b+x : a+x -> umin(a, b)+x
3547 if (LHS
->getType() == U
->getType()) {
3548 const SCEV
*LS
= getSCEV(LHS
);
3549 const SCEV
*RS
= getSCEV(RHS
);
3550 const SCEV
*LA
= getSCEV(U
->getOperand(1));
3551 const SCEV
*RA
= getSCEV(U
->getOperand(2));
3552 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
3553 const SCEV
*RDiff
= getMinusSCEV(RA
, RS
);
3555 return getAddExpr(getUMaxExpr(LS
, RS
), LDiff
);
3556 LDiff
= getMinusSCEV(LA
, RS
);
3557 RDiff
= getMinusSCEV(RA
, LS
);
3559 return getAddExpr(getUMinExpr(LS
, RS
), LDiff
);
3562 case ICmpInst::ICMP_NE
:
3563 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
3564 if (LHS
->getType() == U
->getType() &&
3565 isa
<ConstantInt
>(RHS
) &&
3566 cast
<ConstantInt
>(RHS
)->isZero()) {
3567 const SCEV
*One
= getConstant(LHS
->getType(), 1);
3568 const SCEV
*LS
= getSCEV(LHS
);
3569 const SCEV
*LA
= getSCEV(U
->getOperand(1));
3570 const SCEV
*RA
= getSCEV(U
->getOperand(2));
3571 const SCEV
*LDiff
= getMinusSCEV(LA
, LS
);
3572 const SCEV
*RDiff
= getMinusSCEV(RA
, One
);
3574 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
3577 case ICmpInst::ICMP_EQ
:
3578 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
3579 if (LHS
->getType() == U
->getType() &&
3580 isa
<ConstantInt
>(RHS
) &&
3581 cast
<ConstantInt
>(RHS
)->isZero()) {
3582 const SCEV
*One
= getConstant(LHS
->getType(), 1);
3583 const SCEV
*LS
= getSCEV(LHS
);
3584 const SCEV
*LA
= getSCEV(U
->getOperand(1));
3585 const SCEV
*RA
= getSCEV(U
->getOperand(2));
3586 const SCEV
*LDiff
= getMinusSCEV(LA
, One
);
3587 const SCEV
*RDiff
= getMinusSCEV(RA
, LS
);
3589 return getAddExpr(getUMaxExpr(One
, LS
), LDiff
);
3597 default: // We cannot analyze this expression.
3601 return getUnknown(V
);
3606 //===----------------------------------------------------------------------===//
3607 // Iteration Count Computation Code
3610 /// getBackedgeTakenCount - If the specified loop has a predictable
3611 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3612 /// object. The backedge-taken count is the number of times the loop header
3613 /// will be branched to from within the loop. This is one less than the
3614 /// trip count of the loop, since it doesn't count the first iteration,
3615 /// when the header is branched to from outside the loop.
3617 /// Note that it is not valid to call this method on a loop without a
3618 /// loop-invariant backedge-taken count (see
3619 /// hasLoopInvariantBackedgeTakenCount).
3621 const SCEV
*ScalarEvolution::getBackedgeTakenCount(const Loop
*L
) {
3622 return getBackedgeTakenInfo(L
).Exact
;
3625 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3626 /// return the least SCEV value that is known never to be less than the
3627 /// actual backedge taken count.
3628 const SCEV
*ScalarEvolution::getMaxBackedgeTakenCount(const Loop
*L
) {
3629 return getBackedgeTakenInfo(L
).Max
;
3632 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
3633 /// onto the given Worklist.
3635 PushLoopPHIs(const Loop
*L
, SmallVectorImpl
<Instruction
*> &Worklist
) {
3636 BasicBlock
*Header
= L
->getHeader();
3638 // Push all Loop-header PHIs onto the Worklist stack.
3639 for (BasicBlock::iterator I
= Header
->begin();
3640 PHINode
*PN
= dyn_cast
<PHINode
>(I
); ++I
)
3641 Worklist
.push_back(PN
);
3644 const ScalarEvolution::BackedgeTakenInfo
&
3645 ScalarEvolution::getBackedgeTakenInfo(const Loop
*L
) {
3646 // Initially insert a CouldNotCompute for this loop. If the insertion
3647 // succeeds, proceed to actually compute a backedge-taken count and
3648 // update the value. The temporary CouldNotCompute value tells SCEV
3649 // code elsewhere that it shouldn't attempt to request a new
3650 // backedge-taken count, which could result in infinite recursion.
3651 std::pair
<std::map
<const Loop
*, BackedgeTakenInfo
>::iterator
, bool> Pair
=
3652 BackedgeTakenCounts
.insert(std::make_pair(L
, getCouldNotCompute()));
3654 return Pair
.first
->second
;
3656 BackedgeTakenInfo BECount
= ComputeBackedgeTakenCount(L
);
3657 if (BECount
.Exact
!= getCouldNotCompute()) {
3658 assert(isLoopInvariant(BECount
.Exact
, L
) &&
3659 isLoopInvariant(BECount
.Max
, L
) &&
3660 "Computed backedge-taken count isn't loop invariant for loop!");
3661 ++NumTripCountsComputed
;
3663 // Update the value in the map.
3664 Pair
.first
->second
= BECount
;
3666 if (BECount
.Max
!= getCouldNotCompute())
3667 // Update the value in the map.
3668 Pair
.first
->second
= BECount
;
3669 if (isa
<PHINode
>(L
->getHeader()->begin()))
3670 // Only count loops that have phi nodes as not being computable.
3671 ++NumTripCountsNotComputed
;
3674 // Now that we know more about the trip count for this loop, forget any
3675 // existing SCEV values for PHI nodes in this loop since they are only
3676 // conservative estimates made without the benefit of trip count
3677 // information. This is similar to the code in forgetLoop, except that
3678 // it handles SCEVUnknown PHI nodes specially.
3679 if (BECount
.hasAnyInfo()) {
3680 SmallVector
<Instruction
*, 16> Worklist
;
3681 PushLoopPHIs(L
, Worklist
);
3683 SmallPtrSet
<Instruction
*, 8> Visited
;
3684 while (!Worklist
.empty()) {
3685 Instruction
*I
= Worklist
.pop_back_val();
3686 if (!Visited
.insert(I
)) continue;
3688 ValueExprMapType::iterator It
=
3689 ValueExprMap
.find(static_cast<Value
*>(I
));
3690 if (It
!= ValueExprMap
.end()) {
3691 const SCEV
*Old
= It
->second
;
3693 // SCEVUnknown for a PHI either means that it has an unrecognized
3694 // structure, or it's a PHI that's in the progress of being computed
3695 // by createNodeForPHI. In the former case, additional loop trip
3696 // count information isn't going to change anything. In the later
3697 // case, createNodeForPHI will perform the necessary updates on its
3698 // own when it gets to that point.
3699 if (!isa
<PHINode
>(I
) || !isa
<SCEVUnknown
>(Old
)) {
3700 forgetMemoizedResults(Old
);
3701 ValueExprMap
.erase(It
);
3703 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
3704 ConstantEvolutionLoopExitValue
.erase(PN
);
3707 PushDefUseChildren(I
, Worklist
);
3710 return Pair
.first
->second
;
3713 /// forgetLoop - This method should be called by the client when it has
3714 /// changed a loop in a way that may effect ScalarEvolution's ability to
3715 /// compute a trip count, or if the loop is deleted.
3716 void ScalarEvolution::forgetLoop(const Loop
*L
) {
3717 // Drop any stored trip count value.
3718 BackedgeTakenCounts
.erase(L
);
3720 // Drop information about expressions based on loop-header PHIs.
3721 SmallVector
<Instruction
*, 16> Worklist
;
3722 PushLoopPHIs(L
, Worklist
);
3724 SmallPtrSet
<Instruction
*, 8> Visited
;
3725 while (!Worklist
.empty()) {
3726 Instruction
*I
= Worklist
.pop_back_val();
3727 if (!Visited
.insert(I
)) continue;
3729 ValueExprMapType::iterator It
= ValueExprMap
.find(static_cast<Value
*>(I
));
3730 if (It
!= ValueExprMap
.end()) {
3731 forgetMemoizedResults(It
->second
);
3732 ValueExprMap
.erase(It
);
3733 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
3734 ConstantEvolutionLoopExitValue
.erase(PN
);
3737 PushDefUseChildren(I
, Worklist
);
3740 // Forget all contained loops too, to avoid dangling entries in the
3741 // ValuesAtScopes map.
3742 for (Loop::iterator I
= L
->begin(), E
= L
->end(); I
!= E
; ++I
)
3746 /// forgetValue - This method should be called by the client when it has
3747 /// changed a value in a way that may effect its value, or which may
3748 /// disconnect it from a def-use chain linking it to a loop.
3749 void ScalarEvolution::forgetValue(Value
*V
) {
3750 Instruction
*I
= dyn_cast
<Instruction
>(V
);
3753 // Drop information about expressions based on loop-header PHIs.
3754 SmallVector
<Instruction
*, 16> Worklist
;
3755 Worklist
.push_back(I
);
3757 SmallPtrSet
<Instruction
*, 8> Visited
;
3758 while (!Worklist
.empty()) {
3759 I
= Worklist
.pop_back_val();
3760 if (!Visited
.insert(I
)) continue;
3762 ValueExprMapType::iterator It
= ValueExprMap
.find(static_cast<Value
*>(I
));
3763 if (It
!= ValueExprMap
.end()) {
3764 forgetMemoizedResults(It
->second
);
3765 ValueExprMap
.erase(It
);
3766 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
3767 ConstantEvolutionLoopExitValue
.erase(PN
);
3770 PushDefUseChildren(I
, Worklist
);
3774 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
3775 /// of the specified loop will execute.
3776 ScalarEvolution::BackedgeTakenInfo
3777 ScalarEvolution::ComputeBackedgeTakenCount(const Loop
*L
) {
3778 SmallVector
<BasicBlock
*, 8> ExitingBlocks
;
3779 L
->getExitingBlocks(ExitingBlocks
);
3781 // Examine all exits and pick the most conservative values.
3782 const SCEV
*BECount
= getCouldNotCompute();
3783 const SCEV
*MaxBECount
= getCouldNotCompute();
3784 bool CouldNotComputeBECount
= false;
3785 for (unsigned i
= 0, e
= ExitingBlocks
.size(); i
!= e
; ++i
) {
3786 BackedgeTakenInfo NewBTI
=
3787 ComputeBackedgeTakenCountFromExit(L
, ExitingBlocks
[i
]);
3789 if (NewBTI
.Exact
== getCouldNotCompute()) {
3790 // We couldn't compute an exact value for this exit, so
3791 // we won't be able to compute an exact value for the loop.
3792 CouldNotComputeBECount
= true;
3793 BECount
= getCouldNotCompute();
3794 } else if (!CouldNotComputeBECount
) {
3795 if (BECount
== getCouldNotCompute())
3796 BECount
= NewBTI
.Exact
;
3798 BECount
= getUMinFromMismatchedTypes(BECount
, NewBTI
.Exact
);
3800 if (MaxBECount
== getCouldNotCompute())
3801 MaxBECount
= NewBTI
.Max
;
3802 else if (NewBTI
.Max
!= getCouldNotCompute())
3803 MaxBECount
= getUMinFromMismatchedTypes(MaxBECount
, NewBTI
.Max
);
3806 return BackedgeTakenInfo(BECount
, MaxBECount
);
3809 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
3810 /// of the specified loop will execute if it exits via the specified block.
3811 ScalarEvolution::BackedgeTakenInfo
3812 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop
*L
,
3813 BasicBlock
*ExitingBlock
) {
3815 // Okay, we've chosen an exiting block. See what condition causes us to
3816 // exit at this block.
3818 // FIXME: we should be able to handle switch instructions (with a single exit)
3819 BranchInst
*ExitBr
= dyn_cast
<BranchInst
>(ExitingBlock
->getTerminator());
3820 if (ExitBr
== 0) return getCouldNotCompute();
3821 assert(ExitBr
->isConditional() && "If unconditional, it can't be in loop!");
3823 // At this point, we know we have a conditional branch that determines whether
3824 // the loop is exited. However, we don't know if the branch is executed each
3825 // time through the loop. If not, then the execution count of the branch will
3826 // not be equal to the trip count of the loop.
3828 // Currently we check for this by checking to see if the Exit branch goes to
3829 // the loop header. If so, we know it will always execute the same number of
3830 // times as the loop. We also handle the case where the exit block *is* the
3831 // loop header. This is common for un-rotated loops.
3833 // If both of those tests fail, walk up the unique predecessor chain to the
3834 // header, stopping if there is an edge that doesn't exit the loop. If the
3835 // header is reached, the execution count of the branch will be equal to the
3836 // trip count of the loop.
3838 // More extensive analysis could be done to handle more cases here.
3840 if (ExitBr
->getSuccessor(0) != L
->getHeader() &&
3841 ExitBr
->getSuccessor(1) != L
->getHeader() &&
3842 ExitBr
->getParent() != L
->getHeader()) {
3843 // The simple checks failed, try climbing the unique predecessor chain
3844 // up to the header.
3846 for (BasicBlock
*BB
= ExitBr
->getParent(); BB
; ) {
3847 BasicBlock
*Pred
= BB
->getUniquePredecessor();
3849 return getCouldNotCompute();
3850 TerminatorInst
*PredTerm
= Pred
->getTerminator();
3851 for (unsigned i
= 0, e
= PredTerm
->getNumSuccessors(); i
!= e
; ++i
) {
3852 BasicBlock
*PredSucc
= PredTerm
->getSuccessor(i
);
3855 // If the predecessor has a successor that isn't BB and isn't
3856 // outside the loop, assume the worst.
3857 if (L
->contains(PredSucc
))
3858 return getCouldNotCompute();
3860 if (Pred
== L
->getHeader()) {
3867 return getCouldNotCompute();
3870 // Proceed to the next level to examine the exit condition expression.
3871 return ComputeBackedgeTakenCountFromExitCond(L
, ExitBr
->getCondition(),
3872 ExitBr
->getSuccessor(0),
3873 ExitBr
->getSuccessor(1));
3876 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3877 /// backedge of the specified loop will execute if its exit condition
3878 /// were a conditional branch of ExitCond, TBB, and FBB.
3879 ScalarEvolution::BackedgeTakenInfo
3880 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop
*L
,
3884 // Check if the controlling expression for this loop is an And or Or.
3885 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(ExitCond
)) {
3886 if (BO
->getOpcode() == Instruction::And
) {
3887 // Recurse on the operands of the and.
3888 BackedgeTakenInfo BTI0
=
3889 ComputeBackedgeTakenCountFromExitCond(L
, BO
->getOperand(0), TBB
, FBB
);
3890 BackedgeTakenInfo BTI1
=
3891 ComputeBackedgeTakenCountFromExitCond(L
, BO
->getOperand(1), TBB
, FBB
);
3892 const SCEV
*BECount
= getCouldNotCompute();
3893 const SCEV
*MaxBECount
= getCouldNotCompute();
3894 if (L
->contains(TBB
)) {
3895 // Both conditions must be true for the loop to continue executing.
3896 // Choose the less conservative count.
3897 if (BTI0
.Exact
== getCouldNotCompute() ||
3898 BTI1
.Exact
== getCouldNotCompute())
3899 BECount
= getCouldNotCompute();
3901 BECount
= getUMinFromMismatchedTypes(BTI0
.Exact
, BTI1
.Exact
);
3902 if (BTI0
.Max
== getCouldNotCompute())
3903 MaxBECount
= BTI1
.Max
;
3904 else if (BTI1
.Max
== getCouldNotCompute())
3905 MaxBECount
= BTI0
.Max
;
3907 MaxBECount
= getUMinFromMismatchedTypes(BTI0
.Max
, BTI1
.Max
);
3909 // Both conditions must be true at the same time for the loop to exit.
3910 // For now, be conservative.
3911 assert(L
->contains(FBB
) && "Loop block has no successor in loop!");
3912 if (BTI0
.Max
== BTI1
.Max
)
3913 MaxBECount
= BTI0
.Max
;
3914 if (BTI0
.Exact
== BTI1
.Exact
)
3915 BECount
= BTI0
.Exact
;
3918 return BackedgeTakenInfo(BECount
, MaxBECount
);
3920 if (BO
->getOpcode() == Instruction::Or
) {
3921 // Recurse on the operands of the or.
3922 BackedgeTakenInfo BTI0
=
3923 ComputeBackedgeTakenCountFromExitCond(L
, BO
->getOperand(0), TBB
, FBB
);
3924 BackedgeTakenInfo BTI1
=
3925 ComputeBackedgeTakenCountFromExitCond(L
, BO
->getOperand(1), TBB
, FBB
);
3926 const SCEV
*BECount
= getCouldNotCompute();
3927 const SCEV
*MaxBECount
= getCouldNotCompute();
3928 if (L
->contains(FBB
)) {
3929 // Both conditions must be false for the loop to continue executing.
3930 // Choose the less conservative count.
3931 if (BTI0
.Exact
== getCouldNotCompute() ||
3932 BTI1
.Exact
== getCouldNotCompute())
3933 BECount
= getCouldNotCompute();
3935 BECount
= getUMinFromMismatchedTypes(BTI0
.Exact
, BTI1
.Exact
);
3936 if (BTI0
.Max
== getCouldNotCompute())
3937 MaxBECount
= BTI1
.Max
;
3938 else if (BTI1
.Max
== getCouldNotCompute())
3939 MaxBECount
= BTI0
.Max
;
3941 MaxBECount
= getUMinFromMismatchedTypes(BTI0
.Max
, BTI1
.Max
);
3943 // Both conditions must be false at the same time for the loop to exit.
3944 // For now, be conservative.
3945 assert(L
->contains(TBB
) && "Loop block has no successor in loop!");
3946 if (BTI0
.Max
== BTI1
.Max
)
3947 MaxBECount
= BTI0
.Max
;
3948 if (BTI0
.Exact
== BTI1
.Exact
)
3949 BECount
= BTI0
.Exact
;
3952 return BackedgeTakenInfo(BECount
, MaxBECount
);
3956 // With an icmp, it may be feasible to compute an exact backedge-taken count.
3957 // Proceed to the next level to examine the icmp.
3958 if (ICmpInst
*ExitCondICmp
= dyn_cast
<ICmpInst
>(ExitCond
))
3959 return ComputeBackedgeTakenCountFromExitCondICmp(L
, ExitCondICmp
, TBB
, FBB
);
3961 // Check for a constant condition. These are normally stripped out by
3962 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
3963 // preserve the CFG and is temporarily leaving constant conditions
3965 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(ExitCond
)) {
3966 if (L
->contains(FBB
) == !CI
->getZExtValue())
3967 // The backedge is always taken.
3968 return getCouldNotCompute();
3970 // The backedge is never taken.
3971 return getConstant(CI
->getType(), 0);
3974 // If it's not an integer or pointer comparison then compute it the hard way.
3975 return ComputeBackedgeTakenCountExhaustively(L
, ExitCond
, !L
->contains(TBB
));
3978 static const SCEVAddRecExpr
*
3979 isSimpleUnwrappingAddRec(const SCEV
*S
, const Loop
*L
) {
3980 const SCEVAddRecExpr
*SA
= dyn_cast
<SCEVAddRecExpr
>(S
);
3982 // The SCEV must be an addrec of this loop.
3983 if (!SA
|| SA
->getLoop() != L
|| !SA
->isAffine())
3986 // The SCEV must be known to not wrap in some way to be interesting.
3987 if (!SA
->hasNoUnsignedWrap() && !SA
->hasNoSignedWrap())
3990 // The stride must be a constant so that we know if it is striding up or down.
3991 if (!isa
<SCEVConstant
>(SA
->getOperand(1)))
3996 /// getMinusSCEVForExitTest - When considering an exit test for a loop with a
3997 /// "x != y" exit test, we turn this into a computation that evaluates x-y != 0,
3998 /// and this function returns the expression to use for x-y. We know and take
3999 /// advantage of the fact that this subtraction is only being used in a
4000 /// comparison by zero context.
4002 static const SCEV
*getMinusSCEVForExitTest(const SCEV
*LHS
, const SCEV
*RHS
,
4003 const Loop
*L
, ScalarEvolution
&SE
) {
4004 // If either LHS or RHS is an AddRec SCEV (of this loop) that is known to not
4005 // wrap (either NSW or NUW), then we know that the value will either become
4006 // the other one (and thus the loop terminates), that the loop will terminate
4007 // through some other exit condition first, or that the loop has undefined
4008 // behavior. This information is useful when the addrec has a stride that is
4009 // != 1 or -1, because it means we can't "miss" the exit value.
4011 // In any of these three cases, it is safe to turn the exit condition into a
4012 // "counting down" AddRec (to zero) by subtracting the two inputs as normal,
4013 // but since we know that the "end cannot be missed" we can force the
4014 // resulting AddRec to be a NUW addrec. Since it is counting down, this means
4015 // that the AddRec *cannot* pass zero.
4017 // See if LHS and RHS are addrec's we can handle.
4018 const SCEVAddRecExpr
*LHSA
= isSimpleUnwrappingAddRec(LHS
, L
);
4019 const SCEVAddRecExpr
*RHSA
= isSimpleUnwrappingAddRec(RHS
, L
);
4021 // If neither addrec is interesting, just return a minus.
4022 if (RHSA
== 0 && LHSA
== 0)
4023 return SE
.getMinusSCEV(LHS
, RHS
);
4025 // If only one of LHS and RHS are an AddRec of this loop, make sure it is LHS.
4026 if (RHSA
&& LHSA
== 0) {
4027 // Safe because a-b === b-a for comparisons against zero.
4028 std::swap(LHS
, RHS
);
4029 std::swap(LHSA
, RHSA
);
4032 // Handle the case when only one is advancing in a non-overflowing way.
4034 // If RHS is loop varying, then we can't predict when LHS will cross it.
4035 if (!SE
.isLoopInvariant(RHS
, L
))
4036 return SE
.getMinusSCEV(LHS
, RHS
);
4038 // If LHS has a positive stride, then we compute RHS-LHS, because the loop
4039 // is counting up until it crosses RHS (which must be larger than LHS). If
4040 // it is negative, we compute LHS-RHS because we're counting down to RHS.
4041 const ConstantInt
*Stride
=
4042 cast
<SCEVConstant
>(LHSA
->getOperand(1))->getValue();
4043 if (Stride
->getValue().isNegative())
4044 std::swap(LHS
, RHS
);
4046 return SE
.getMinusSCEV(RHS
, LHS
, true /*HasNUW*/);
4049 // If both LHS and RHS are interesting, we have something like:
4051 const ConstantInt
*LHSStride
=
4052 cast
<SCEVConstant
>(LHSA
->getOperand(1))->getValue();
4053 const ConstantInt
*RHSStride
=
4054 cast
<SCEVConstant
>(RHSA
->getOperand(1))->getValue();
4056 // If the strides are equal, then this is just a (complex) loop invariant
4057 // comparison of a and b.
4058 if (LHSStride
== RHSStride
)
4059 return SE
.getMinusSCEV(LHSA
->getStart(), RHSA
->getStart());
4061 // If the signs of the strides differ, then the negative stride is counting
4062 // down to the positive stride.
4063 if (LHSStride
->getValue().isNegative() != RHSStride
->getValue().isNegative()){
4064 if (RHSStride
->getValue().isNegative())
4065 std::swap(LHS
, RHS
);
4067 // If LHS's stride is smaller than RHS's stride, then "b" must be less than
4068 // "a" and "b" is RHS is counting up (catching up) to LHS. This is true
4069 // whether the strides are positive or negative.
4070 if (RHSStride
->getValue().slt(LHSStride
->getValue()))
4071 std::swap(LHS
, RHS
);
4074 return SE
.getMinusSCEV(LHS
, RHS
, true /*HasNUW*/);
4077 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
4078 /// backedge of the specified loop will execute if its exit condition
4079 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
4080 ScalarEvolution::BackedgeTakenInfo
4081 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop
*L
,
4086 // If the condition was exit on true, convert the condition to exit on false
4087 ICmpInst::Predicate Cond
;
4088 if (!L
->contains(FBB
))
4089 Cond
= ExitCond
->getPredicate();
4091 Cond
= ExitCond
->getInversePredicate();
4093 // Handle common loops like: for (X = "string"; *X; ++X)
4094 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(ExitCond
->getOperand(0)))
4095 if (Constant
*RHS
= dyn_cast
<Constant
>(ExitCond
->getOperand(1))) {
4096 BackedgeTakenInfo ItCnt
=
4097 ComputeLoadConstantCompareBackedgeTakenCount(LI
, RHS
, L
, Cond
);
4098 if (ItCnt
.hasAnyInfo())
4102 const SCEV
*LHS
= getSCEV(ExitCond
->getOperand(0));
4103 const SCEV
*RHS
= getSCEV(ExitCond
->getOperand(1));
4105 // Try to evaluate any dependencies out of the loop.
4106 LHS
= getSCEVAtScope(LHS
, L
);
4107 RHS
= getSCEVAtScope(RHS
, L
);
4109 // At this point, we would like to compute how many iterations of the
4110 // loop the predicate will return true for these inputs.
4111 if (isLoopInvariant(LHS
, L
) && !isLoopInvariant(RHS
, L
)) {
4112 // If there is a loop-invariant, force it into the RHS.
4113 std::swap(LHS
, RHS
);
4114 Cond
= ICmpInst::getSwappedPredicate(Cond
);
4117 // Simplify the operands before analyzing them.
4118 (void)SimplifyICmpOperands(Cond
, LHS
, RHS
);
4120 // If we have a comparison of a chrec against a constant, try to use value
4121 // ranges to answer this query.
4122 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
))
4123 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
4124 if (AddRec
->getLoop() == L
) {
4125 // Form the constant range.
4126 ConstantRange
CompRange(
4127 ICmpInst::makeConstantRange(Cond
, RHSC
->getValue()->getValue()));
4129 const SCEV
*Ret
= AddRec
->getNumIterationsInRange(CompRange
, *this);
4130 if (!isa
<SCEVCouldNotCompute
>(Ret
)) return Ret
;
4134 case ICmpInst::ICMP_NE
: { // while (X != Y)
4135 // Convert to: while (X-Y != 0)
4136 BackedgeTakenInfo BTI
= HowFarToZero(getMinusSCEVForExitTest(LHS
, RHS
, L
,
4138 if (BTI
.hasAnyInfo()) return BTI
;
4141 case ICmpInst::ICMP_EQ
: { // while (X == Y)
4142 // Convert to: while (X-Y == 0)
4143 BackedgeTakenInfo BTI
= HowFarToNonZero(getMinusSCEV(LHS
, RHS
), L
);
4144 if (BTI
.hasAnyInfo()) return BTI
;
4147 case ICmpInst::ICMP_SLT
: {
4148 BackedgeTakenInfo BTI
= HowManyLessThans(LHS
, RHS
, L
, true);
4149 if (BTI
.hasAnyInfo()) return BTI
;
4152 case ICmpInst::ICMP_SGT
: {
4153 BackedgeTakenInfo BTI
= HowManyLessThans(getNotSCEV(LHS
),
4154 getNotSCEV(RHS
), L
, true);
4155 if (BTI
.hasAnyInfo()) return BTI
;
4158 case ICmpInst::ICMP_ULT
: {
4159 BackedgeTakenInfo BTI
= HowManyLessThans(LHS
, RHS
, L
, false);
4160 if (BTI
.hasAnyInfo()) return BTI
;
4163 case ICmpInst::ICMP_UGT
: {
4164 BackedgeTakenInfo BTI
= HowManyLessThans(getNotSCEV(LHS
),
4165 getNotSCEV(RHS
), L
, false);
4166 if (BTI
.hasAnyInfo()) return BTI
;
4171 dbgs() << "ComputeBackedgeTakenCount ";
4172 if (ExitCond
->getOperand(0)->getType()->isUnsigned())
4173 dbgs() << "[unsigned] ";
4174 dbgs() << *LHS
<< " "
4175 << Instruction::getOpcodeName(Instruction::ICmp
)
4176 << " " << *RHS
<< "\n";
4181 ComputeBackedgeTakenCountExhaustively(L
, ExitCond
, !L
->contains(TBB
));
4184 static ConstantInt
*
4185 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr
*AddRec
, ConstantInt
*C
,
4186 ScalarEvolution
&SE
) {
4187 const SCEV
*InVal
= SE
.getConstant(C
);
4188 const SCEV
*Val
= AddRec
->evaluateAtIteration(InVal
, SE
);
4189 assert(isa
<SCEVConstant
>(Val
) &&
4190 "Evaluation of SCEV at constant didn't fold correctly?");
4191 return cast
<SCEVConstant
>(Val
)->getValue();
4194 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
4195 /// and a GEP expression (missing the pointer index) indexing into it, return
4196 /// the addressed element of the initializer or null if the index expression is
4199 GetAddressedElementFromGlobal(GlobalVariable
*GV
,
4200 const std::vector
<ConstantInt
*> &Indices
) {
4201 Constant
*Init
= GV
->getInitializer();
4202 for (unsigned i
= 0, e
= Indices
.size(); i
!= e
; ++i
) {
4203 uint64_t Idx
= Indices
[i
]->getZExtValue();
4204 if (ConstantStruct
*CS
= dyn_cast
<ConstantStruct
>(Init
)) {
4205 assert(Idx
< CS
->getNumOperands() && "Bad struct index!");
4206 Init
= cast
<Constant
>(CS
->getOperand(Idx
));
4207 } else if (ConstantArray
*CA
= dyn_cast
<ConstantArray
>(Init
)) {
4208 if (Idx
>= CA
->getNumOperands()) return 0; // Bogus program
4209 Init
= cast
<Constant
>(CA
->getOperand(Idx
));
4210 } else if (isa
<ConstantAggregateZero
>(Init
)) {
4211 if (const StructType
*STy
= dyn_cast
<StructType
>(Init
->getType())) {
4212 assert(Idx
< STy
->getNumElements() && "Bad struct index!");
4213 Init
= Constant::getNullValue(STy
->getElementType(Idx
));
4214 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(Init
->getType())) {
4215 if (Idx
>= ATy
->getNumElements()) return 0; // Bogus program
4216 Init
= Constant::getNullValue(ATy
->getElementType());
4218 llvm_unreachable("Unknown constant aggregate type!");
4222 return 0; // Unknown initializer type
4228 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
4229 /// 'icmp op load X, cst', try to see if we can compute the backedge
4230 /// execution count.
4231 ScalarEvolution::BackedgeTakenInfo
4232 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
4236 ICmpInst::Predicate predicate
) {
4237 if (LI
->isVolatile()) return getCouldNotCompute();
4239 // Check to see if the loaded pointer is a getelementptr of a global.
4240 // TODO: Use SCEV instead of manually grubbing with GEPs.
4241 GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(LI
->getOperand(0));
4242 if (!GEP
) return getCouldNotCompute();
4244 // Make sure that it is really a constant global we are gepping, with an
4245 // initializer, and make sure the first IDX is really 0.
4246 GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(GEP
->getOperand(0));
4247 if (!GV
|| !GV
->isConstant() || !GV
->hasDefinitiveInitializer() ||
4248 GEP
->getNumOperands() < 3 || !isa
<Constant
>(GEP
->getOperand(1)) ||
4249 !cast
<Constant
>(GEP
->getOperand(1))->isNullValue())
4250 return getCouldNotCompute();
4252 // Okay, we allow one non-constant index into the GEP instruction.
4254 std::vector
<ConstantInt
*> Indexes
;
4255 unsigned VarIdxNum
= 0;
4256 for (unsigned i
= 2, e
= GEP
->getNumOperands(); i
!= e
; ++i
)
4257 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
))) {
4258 Indexes
.push_back(CI
);
4259 } else if (!isa
<ConstantInt
>(GEP
->getOperand(i
))) {
4260 if (VarIdx
) return getCouldNotCompute(); // Multiple non-constant idx's.
4261 VarIdx
= GEP
->getOperand(i
);
4263 Indexes
.push_back(0);
4266 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4267 // Check to see if X is a loop variant variable value now.
4268 const SCEV
*Idx
= getSCEV(VarIdx
);
4269 Idx
= getSCEVAtScope(Idx
, L
);
4271 // We can only recognize very limited forms of loop index expressions, in
4272 // particular, only affine AddRec's like {C1,+,C2}.
4273 const SCEVAddRecExpr
*IdxExpr
= dyn_cast
<SCEVAddRecExpr
>(Idx
);
4274 if (!IdxExpr
|| !IdxExpr
->isAffine() || isLoopInvariant(IdxExpr
, L
) ||
4275 !isa
<SCEVConstant
>(IdxExpr
->getOperand(0)) ||
4276 !isa
<SCEVConstant
>(IdxExpr
->getOperand(1)))
4277 return getCouldNotCompute();
4279 unsigned MaxSteps
= MaxBruteForceIterations
;
4280 for (unsigned IterationNum
= 0; IterationNum
!= MaxSteps
; ++IterationNum
) {
4281 ConstantInt
*ItCst
= ConstantInt::get(
4282 cast
<IntegerType
>(IdxExpr
->getType()), IterationNum
);
4283 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(IdxExpr
, ItCst
, *this);
4285 // Form the GEP offset.
4286 Indexes
[VarIdxNum
] = Val
;
4288 Constant
*Result
= GetAddressedElementFromGlobal(GV
, Indexes
);
4289 if (Result
== 0) break; // Cannot compute!
4291 // Evaluate the condition for this iteration.
4292 Result
= ConstantExpr::getICmp(predicate
, Result
, RHS
);
4293 if (!isa
<ConstantInt
>(Result
)) break; // Couldn't decide for sure
4294 if (cast
<ConstantInt
>(Result
)->getValue().isMinValue()) {
4296 dbgs() << "\n***\n*** Computed loop count " << *ItCst
4297 << "\n*** From global " << *GV
<< "*** BB: " << *L
->getHeader()
4300 ++NumArrayLenItCounts
;
4301 return getConstant(ItCst
); // Found terminating iteration!
4304 return getCouldNotCompute();
4308 /// CanConstantFold - Return true if we can constant fold an instruction of the
4309 /// specified type, assuming that all operands were constants.
4310 static bool CanConstantFold(const Instruction
*I
) {
4311 if (isa
<BinaryOperator
>(I
) || isa
<CmpInst
>(I
) ||
4312 isa
<SelectInst
>(I
) || isa
<CastInst
>(I
) || isa
<GetElementPtrInst
>(I
))
4315 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
))
4316 if (const Function
*F
= CI
->getCalledFunction())
4317 return canConstantFoldCallTo(F
);
4321 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4322 /// in the loop that V is derived from. We allow arbitrary operations along the
4323 /// way, but the operands of an operation must either be constants or a value
4324 /// derived from a constant PHI. If this expression does not fit with these
4325 /// constraints, return null.
4326 static PHINode
*getConstantEvolvingPHI(Value
*V
, const Loop
*L
) {
4327 // If this is not an instruction, or if this is an instruction outside of the
4328 // loop, it can't be derived from a loop PHI.
4329 Instruction
*I
= dyn_cast
<Instruction
>(V
);
4330 if (I
== 0 || !L
->contains(I
)) return 0;
4332 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
4333 if (L
->getHeader() == I
->getParent())
4336 // We don't currently keep track of the control flow needed to evaluate
4337 // PHIs, so we cannot handle PHIs inside of loops.
4341 // If we won't be able to constant fold this expression even if the operands
4342 // are constants, return early.
4343 if (!CanConstantFold(I
)) return 0;
4345 // Otherwise, we can evaluate this instruction if all of its operands are
4346 // constant or derived from a PHI node themselves.
4348 for (unsigned Op
= 0, e
= I
->getNumOperands(); Op
!= e
; ++Op
)
4349 if (!isa
<Constant
>(I
->getOperand(Op
))) {
4350 PHINode
*P
= getConstantEvolvingPHI(I
->getOperand(Op
), L
);
4351 if (P
== 0) return 0; // Not evolving from PHI
4355 return 0; // Evolving from multiple different PHIs.
4358 // This is a expression evolving from a constant PHI!
4362 /// EvaluateExpression - Given an expression that passes the
4363 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4364 /// in the loop has the value PHIVal. If we can't fold this expression for some
4365 /// reason, return null.
4366 static Constant
*EvaluateExpression(Value
*V
, Constant
*PHIVal
,
4367 const TargetData
*TD
) {
4368 if (isa
<PHINode
>(V
)) return PHIVal
;
4369 if (Constant
*C
= dyn_cast
<Constant
>(V
)) return C
;
4370 Instruction
*I
= cast
<Instruction
>(V
);
4372 std::vector
<Constant
*> Operands(I
->getNumOperands());
4374 for (unsigned i
= 0, e
= I
->getNumOperands(); i
!= e
; ++i
) {
4375 Operands
[i
] = EvaluateExpression(I
->getOperand(i
), PHIVal
, TD
);
4376 if (Operands
[i
] == 0) return 0;
4379 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
4380 return ConstantFoldCompareInstOperands(CI
->getPredicate(), Operands
[0],
4382 return ConstantFoldInstOperands(I
->getOpcode(), I
->getType(),
4383 &Operands
[0], Operands
.size(), TD
);
4386 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4387 /// in the header of its containing loop, we know the loop executes a
4388 /// constant number of times, and the PHI node is just a recurrence
4389 /// involving constants, fold it.
4391 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode
*PN
,
4394 std::map
<PHINode
*, Constant
*>::const_iterator I
=
4395 ConstantEvolutionLoopExitValue
.find(PN
);
4396 if (I
!= ConstantEvolutionLoopExitValue
.end())
4399 if (BEs
.ugt(MaxBruteForceIterations
))
4400 return ConstantEvolutionLoopExitValue
[PN
] = 0; // Not going to evaluate it.
4402 Constant
*&RetVal
= ConstantEvolutionLoopExitValue
[PN
];
4404 // Since the loop is canonicalized, the PHI node must have two entries. One
4405 // entry must be a constant (coming in from outside of the loop), and the
4406 // second must be derived from the same PHI.
4407 bool SecondIsBackedge
= L
->contains(PN
->getIncomingBlock(1));
4408 Constant
*StartCST
=
4409 dyn_cast
<Constant
>(PN
->getIncomingValue(!SecondIsBackedge
));
4411 return RetVal
= 0; // Must be a constant.
4413 Value
*BEValue
= PN
->getIncomingValue(SecondIsBackedge
);
4414 if (getConstantEvolvingPHI(BEValue
, L
) != PN
&&
4415 !isa
<Constant
>(BEValue
))
4416 return RetVal
= 0; // Not derived from same PHI.
4418 // Execute the loop symbolically to determine the exit value.
4419 if (BEs
.getActiveBits() >= 32)
4420 return RetVal
= 0; // More than 2^32-1 iterations?? Not doing it!
4422 unsigned NumIterations
= BEs
.getZExtValue(); // must be in range
4423 unsigned IterationNum
= 0;
4424 for (Constant
*PHIVal
= StartCST
; ; ++IterationNum
) {
4425 if (IterationNum
== NumIterations
)
4426 return RetVal
= PHIVal
; // Got exit value!
4428 // Compute the value of the PHI node for the next iteration.
4429 Constant
*NextPHI
= EvaluateExpression(BEValue
, PHIVal
, TD
);
4430 if (NextPHI
== PHIVal
)
4431 return RetVal
= NextPHI
; // Stopped evolving!
4433 return 0; // Couldn't evaluate!
4438 /// ComputeBackedgeTakenCountExhaustively - If the loop is known to execute a
4439 /// constant number of times (the condition evolves only from constants),
4440 /// try to evaluate a few iterations of the loop until we get the exit
4441 /// condition gets a value of ExitWhen (true or false). If we cannot
4442 /// evaluate the trip count of the loop, return getCouldNotCompute().
4444 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop
*L
,
4447 PHINode
*PN
= getConstantEvolvingPHI(Cond
, L
);
4448 if (PN
== 0) return getCouldNotCompute();
4450 // If the loop is canonicalized, the PHI will have exactly two entries.
4451 // That's the only form we support here.
4452 if (PN
->getNumIncomingValues() != 2) return getCouldNotCompute();
4454 // One entry must be a constant (coming in from outside of the loop), and the
4455 // second must be derived from the same PHI.
4456 bool SecondIsBackedge
= L
->contains(PN
->getIncomingBlock(1));
4457 Constant
*StartCST
=
4458 dyn_cast
<Constant
>(PN
->getIncomingValue(!SecondIsBackedge
));
4459 if (StartCST
== 0) return getCouldNotCompute(); // Must be a constant.
4461 Value
*BEValue
= PN
->getIncomingValue(SecondIsBackedge
);
4462 if (getConstantEvolvingPHI(BEValue
, L
) != PN
&&
4463 !isa
<Constant
>(BEValue
))
4464 return getCouldNotCompute(); // Not derived from same PHI.
4466 // Okay, we find a PHI node that defines the trip count of this loop. Execute
4467 // the loop symbolically to determine when the condition gets a value of
4469 unsigned IterationNum
= 0;
4470 unsigned MaxIterations
= MaxBruteForceIterations
; // Limit analysis.
4471 for (Constant
*PHIVal
= StartCST
;
4472 IterationNum
!= MaxIterations
; ++IterationNum
) {
4473 ConstantInt
*CondVal
=
4474 dyn_cast_or_null
<ConstantInt
>(EvaluateExpression(Cond
, PHIVal
, TD
));
4476 // Couldn't symbolically evaluate.
4477 if (!CondVal
) return getCouldNotCompute();
4479 if (CondVal
->getValue() == uint64_t(ExitWhen
)) {
4480 ++NumBruteForceTripCountsComputed
;
4481 return getConstant(Type::getInt32Ty(getContext()), IterationNum
);
4484 // Compute the value of the PHI node for the next iteration.
4485 Constant
*NextPHI
= EvaluateExpression(BEValue
, PHIVal
, TD
);
4486 if (NextPHI
== 0 || NextPHI
== PHIVal
)
4487 return getCouldNotCompute();// Couldn't evaluate or not making progress...
4491 // Too many iterations were needed to evaluate.
4492 return getCouldNotCompute();
4495 /// getSCEVAtScope - Return a SCEV expression for the specified value
4496 /// at the specified scope in the program. The L value specifies a loop
4497 /// nest to evaluate the expression at, where null is the top-level or a
4498 /// specified loop is immediately inside of the loop.
4500 /// This method can be used to compute the exit value for a variable defined
4501 /// in a loop by querying what the value will hold in the parent loop.
4503 /// In the case that a relevant loop exit value cannot be computed, the
4504 /// original value V is returned.
4505 const SCEV
*ScalarEvolution::getSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
4506 // Check to see if we've folded this expression at this loop before.
4507 std::map
<const Loop
*, const SCEV
*> &Values
= ValuesAtScopes
[V
];
4508 std::pair
<std::map
<const Loop
*, const SCEV
*>::iterator
, bool> Pair
=
4509 Values
.insert(std::make_pair(L
, static_cast<const SCEV
*>(0)));
4511 return Pair
.first
->second
? Pair
.first
->second
: V
;
4513 // Otherwise compute it.
4514 const SCEV
*C
= computeSCEVAtScope(V
, L
);
4515 ValuesAtScopes
[V
][L
] = C
;
4519 const SCEV
*ScalarEvolution::computeSCEVAtScope(const SCEV
*V
, const Loop
*L
) {
4520 if (isa
<SCEVConstant
>(V
)) return V
;
4522 // If this instruction is evolved from a constant-evolving PHI, compute the
4523 // exit value from the loop without using SCEVs.
4524 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(V
)) {
4525 if (Instruction
*I
= dyn_cast
<Instruction
>(SU
->getValue())) {
4526 const Loop
*LI
= (*this->LI
)[I
->getParent()];
4527 if (LI
&& LI
->getParentLoop() == L
) // Looking for loop exit value.
4528 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
))
4529 if (PN
->getParent() == LI
->getHeader()) {
4530 // Okay, there is no closed form solution for the PHI node. Check
4531 // to see if the loop that contains it has a known backedge-taken
4532 // count. If so, we may be able to force computation of the exit
4534 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(LI
);
4535 if (const SCEVConstant
*BTCC
=
4536 dyn_cast
<SCEVConstant
>(BackedgeTakenCount
)) {
4537 // Okay, we know how many times the containing loop executes. If
4538 // this is a constant evolving PHI node, get the final value at
4539 // the specified iteration number.
4540 Constant
*RV
= getConstantEvolutionLoopExitValue(PN
,
4541 BTCC
->getValue()->getValue(),
4543 if (RV
) return getSCEV(RV
);
4547 // Okay, this is an expression that we cannot symbolically evaluate
4548 // into a SCEV. Check to see if it's possible to symbolically evaluate
4549 // the arguments into constants, and if so, try to constant propagate the
4550 // result. This is particularly useful for computing loop exit values.
4551 if (CanConstantFold(I
)) {
4552 SmallVector
<Constant
*, 4> Operands
;
4553 bool MadeImprovement
= false;
4554 for (unsigned i
= 0, e
= I
->getNumOperands(); i
!= e
; ++i
) {
4555 Value
*Op
= I
->getOperand(i
);
4556 if (Constant
*C
= dyn_cast
<Constant
>(Op
)) {
4557 Operands
.push_back(C
);
4561 // If any of the operands is non-constant and if they are
4562 // non-integer and non-pointer, don't even try to analyze them
4563 // with scev techniques.
4564 if (!isSCEVable(Op
->getType()))
4567 const SCEV
*OrigV
= getSCEV(Op
);
4568 const SCEV
*OpV
= getSCEVAtScope(OrigV
, L
);
4569 MadeImprovement
|= OrigV
!= OpV
;
4572 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(OpV
))
4574 if (const SCEVUnknown
*SU
= dyn_cast
<SCEVUnknown
>(OpV
))
4575 C
= dyn_cast
<Constant
>(SU
->getValue());
4577 if (C
->getType() != Op
->getType())
4578 C
= ConstantExpr::getCast(CastInst::getCastOpcode(C
, false,
4582 Operands
.push_back(C
);
4585 // Check to see if getSCEVAtScope actually made an improvement.
4586 if (MadeImprovement
) {
4588 if (const CmpInst
*CI
= dyn_cast
<CmpInst
>(I
))
4589 C
= ConstantFoldCompareInstOperands(CI
->getPredicate(),
4590 Operands
[0], Operands
[1], TD
);
4592 C
= ConstantFoldInstOperands(I
->getOpcode(), I
->getType(),
4593 &Operands
[0], Operands
.size(), TD
);
4600 // This is some other type of SCEVUnknown, just return it.
4604 if (const SCEVCommutativeExpr
*Comm
= dyn_cast
<SCEVCommutativeExpr
>(V
)) {
4605 // Avoid performing the look-up in the common case where the specified
4606 // expression has no loop-variant portions.
4607 for (unsigned i
= 0, e
= Comm
->getNumOperands(); i
!= e
; ++i
) {
4608 const SCEV
*OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
4609 if (OpAtScope
!= Comm
->getOperand(i
)) {
4610 // Okay, at least one of these operands is loop variant but might be
4611 // foldable. Build a new instance of the folded commutative expression.
4612 SmallVector
<const SCEV
*, 8> NewOps(Comm
->op_begin(),
4613 Comm
->op_begin()+i
);
4614 NewOps
.push_back(OpAtScope
);
4616 for (++i
; i
!= e
; ++i
) {
4617 OpAtScope
= getSCEVAtScope(Comm
->getOperand(i
), L
);
4618 NewOps
.push_back(OpAtScope
);
4620 if (isa
<SCEVAddExpr
>(Comm
))
4621 return getAddExpr(NewOps
);
4622 if (isa
<SCEVMulExpr
>(Comm
))
4623 return getMulExpr(NewOps
);
4624 if (isa
<SCEVSMaxExpr
>(Comm
))
4625 return getSMaxExpr(NewOps
);
4626 if (isa
<SCEVUMaxExpr
>(Comm
))
4627 return getUMaxExpr(NewOps
);
4628 llvm_unreachable("Unknown commutative SCEV type!");
4631 // If we got here, all operands are loop invariant.
4635 if (const SCEVUDivExpr
*Div
= dyn_cast
<SCEVUDivExpr
>(V
)) {
4636 const SCEV
*LHS
= getSCEVAtScope(Div
->getLHS(), L
);
4637 const SCEV
*RHS
= getSCEVAtScope(Div
->getRHS(), L
);
4638 if (LHS
== Div
->getLHS() && RHS
== Div
->getRHS())
4639 return Div
; // must be loop invariant
4640 return getUDivExpr(LHS
, RHS
);
4643 // If this is a loop recurrence for a loop that does not contain L, then we
4644 // are dealing with the final value computed by the loop.
4645 if (const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(V
)) {
4646 // First, attempt to evaluate each operand.
4647 // Avoid performing the look-up in the common case where the specified
4648 // expression has no loop-variant portions.
4649 for (unsigned i
= 0, e
= AddRec
->getNumOperands(); i
!= e
; ++i
) {
4650 const SCEV
*OpAtScope
= getSCEVAtScope(AddRec
->getOperand(i
), L
);
4651 if (OpAtScope
== AddRec
->getOperand(i
))
4654 // Okay, at least one of these operands is loop variant but might be
4655 // foldable. Build a new instance of the folded commutative expression.
4656 SmallVector
<const SCEV
*, 8> NewOps(AddRec
->op_begin(),
4657 AddRec
->op_begin()+i
);
4658 NewOps
.push_back(OpAtScope
);
4659 for (++i
; i
!= e
; ++i
)
4660 NewOps
.push_back(getSCEVAtScope(AddRec
->getOperand(i
), L
));
4662 AddRec
= cast
<SCEVAddRecExpr
>(getAddRecExpr(NewOps
, AddRec
->getLoop()));
4666 // If the scope is outside the addrec's loop, evaluate it by using the
4667 // loop exit value of the addrec.
4668 if (!AddRec
->getLoop()->contains(L
)) {
4669 // To evaluate this recurrence, we need to know how many times the AddRec
4670 // loop iterates. Compute this now.
4671 const SCEV
*BackedgeTakenCount
= getBackedgeTakenCount(AddRec
->getLoop());
4672 if (BackedgeTakenCount
== getCouldNotCompute()) return AddRec
;
4674 // Then, evaluate the AddRec.
4675 return AddRec
->evaluateAtIteration(BackedgeTakenCount
, *this);
4681 if (const SCEVZeroExtendExpr
*Cast
= dyn_cast
<SCEVZeroExtendExpr
>(V
)) {
4682 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
4683 if (Op
== Cast
->getOperand())
4684 return Cast
; // must be loop invariant
4685 return getZeroExtendExpr(Op
, Cast
->getType());
4688 if (const SCEVSignExtendExpr
*Cast
= dyn_cast
<SCEVSignExtendExpr
>(V
)) {
4689 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
4690 if (Op
== Cast
->getOperand())
4691 return Cast
; // must be loop invariant
4692 return getSignExtendExpr(Op
, Cast
->getType());
4695 if (const SCEVTruncateExpr
*Cast
= dyn_cast
<SCEVTruncateExpr
>(V
)) {
4696 const SCEV
*Op
= getSCEVAtScope(Cast
->getOperand(), L
);
4697 if (Op
== Cast
->getOperand())
4698 return Cast
; // must be loop invariant
4699 return getTruncateExpr(Op
, Cast
->getType());
4702 llvm_unreachable("Unknown SCEV type!");
4706 /// getSCEVAtScope - This is a convenience function which does
4707 /// getSCEVAtScope(getSCEV(V), L).
4708 const SCEV
*ScalarEvolution::getSCEVAtScope(Value
*V
, const Loop
*L
) {
4709 return getSCEVAtScope(getSCEV(V
), L
);
4712 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
4713 /// following equation:
4715 /// A * X = B (mod N)
4717 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
4718 /// A and B isn't important.
4720 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
4721 static const SCEV
*SolveLinEquationWithOverflow(const APInt
&A
, const APInt
&B
,
4722 ScalarEvolution
&SE
) {
4723 uint32_t BW
= A
.getBitWidth();
4724 assert(BW
== B
.getBitWidth() && "Bit widths must be the same.");
4725 assert(A
!= 0 && "A must be non-zero.");
4729 // The gcd of A and N may have only one prime factor: 2. The number of
4730 // trailing zeros in A is its multiplicity
4731 uint32_t Mult2
= A
.countTrailingZeros();
4734 // 2. Check if B is divisible by D.
4736 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
4737 // is not less than multiplicity of this prime factor for D.
4738 if (B
.countTrailingZeros() < Mult2
)
4739 return SE
.getCouldNotCompute();
4741 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
4744 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
4745 // bit width during computations.
4746 APInt AD
= A
.lshr(Mult2
).zext(BW
+ 1); // AD = A / D
4747 APInt
Mod(BW
+ 1, 0);
4748 Mod
.setBit(BW
- Mult2
); // Mod = N / D
4749 APInt I
= AD
.multiplicativeInverse(Mod
);
4751 // 4. Compute the minimum unsigned root of the equation:
4752 // I * (B / D) mod (N / D)
4753 APInt Result
= (I
* B
.lshr(Mult2
).zext(BW
+ 1)).urem(Mod
);
4755 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
4757 return SE
.getConstant(Result
.trunc(BW
));
4760 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
4761 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
4762 /// might be the same) or two SCEVCouldNotCompute objects.
4764 static std::pair
<const SCEV
*,const SCEV
*>
4765 SolveQuadraticEquation(const SCEVAddRecExpr
*AddRec
, ScalarEvolution
&SE
) {
4766 assert(AddRec
->getNumOperands() == 3 && "This is not a quadratic chrec!");
4767 const SCEVConstant
*LC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(0));
4768 const SCEVConstant
*MC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(1));
4769 const SCEVConstant
*NC
= dyn_cast
<SCEVConstant
>(AddRec
->getOperand(2));
4771 // We currently can only solve this if the coefficients are constants.
4772 if (!LC
|| !MC
|| !NC
) {
4773 const SCEV
*CNC
= SE
.getCouldNotCompute();
4774 return std::make_pair(CNC
, CNC
);
4777 uint32_t BitWidth
= LC
->getValue()->getValue().getBitWidth();
4778 const APInt
&L
= LC
->getValue()->getValue();
4779 const APInt
&M
= MC
->getValue()->getValue();
4780 const APInt
&N
= NC
->getValue()->getValue();
4781 APInt
Two(BitWidth
, 2);
4782 APInt
Four(BitWidth
, 4);
4785 using namespace APIntOps
;
4787 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
4788 // The B coefficient is M-N/2
4792 // The A coefficient is N/2
4793 APInt
A(N
.sdiv(Two
));
4795 // Compute the B^2-4ac term.
4798 SqrtTerm
-= Four
* (A
* C
);
4800 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
4801 // integer value or else APInt::sqrt() will assert.
4802 APInt
SqrtVal(SqrtTerm
.sqrt());
4804 // Compute the two solutions for the quadratic formula.
4805 // The divisions must be performed as signed divisions.
4807 APInt
TwoA( A
<< 1 );
4808 if (TwoA
.isMinValue()) {
4809 const SCEV
*CNC
= SE
.getCouldNotCompute();
4810 return std::make_pair(CNC
, CNC
);
4813 LLVMContext
&Context
= SE
.getContext();
4815 ConstantInt
*Solution1
=
4816 ConstantInt::get(Context
, (NegB
+ SqrtVal
).sdiv(TwoA
));
4817 ConstantInt
*Solution2
=
4818 ConstantInt::get(Context
, (NegB
- SqrtVal
).sdiv(TwoA
));
4820 return std::make_pair(SE
.getConstant(Solution1
),
4821 SE
.getConstant(Solution2
));
4822 } // end APIntOps namespace
4825 /// HowFarToZero - Return the number of times a backedge comparing the specified
4826 /// value to zero will execute. If not computable, return CouldNotCompute.
4827 ScalarEvolution::BackedgeTakenInfo
4828 ScalarEvolution::HowFarToZero(const SCEV
*V
, const Loop
*L
) {
4829 // If the value is a constant
4830 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
4831 // If the value is already zero, the branch will execute zero times.
4832 if (C
->getValue()->isZero()) return C
;
4833 return getCouldNotCompute(); // Otherwise it will loop infinitely.
4836 const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(V
);
4837 if (!AddRec
|| AddRec
->getLoop() != L
)
4838 return getCouldNotCompute();
4840 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
4841 // the quadratic equation to solve it.
4842 if (AddRec
->isQuadratic() && AddRec
->getType()->isIntegerTy()) {
4843 std::pair
<const SCEV
*,const SCEV
*> Roots
=
4844 SolveQuadraticEquation(AddRec
, *this);
4845 const SCEVConstant
*R1
= dyn_cast
<SCEVConstant
>(Roots
.first
);
4846 const SCEVConstant
*R2
= dyn_cast
<SCEVConstant
>(Roots
.second
);
4849 dbgs() << "HFTZ: " << *V
<< " - sol#1: " << *R1
4850 << " sol#2: " << *R2
<< "\n";
4852 // Pick the smallest positive root value.
4853 if (ConstantInt
*CB
=
4854 dyn_cast
<ConstantInt
>(ConstantExpr::getICmp(CmpInst::ICMP_ULT
,
4857 if (CB
->getZExtValue() == false)
4858 std::swap(R1
, R2
); // R1 is the minimum root now.
4860 // We can only use this value if the chrec ends up with an exact zero
4861 // value at this index. When solving for "X*X != 5", for example, we
4862 // should not accept a root of 2.
4863 const SCEV
*Val
= AddRec
->evaluateAtIteration(R1
, *this);
4865 return R1
; // We found a quadratic root!
4868 return getCouldNotCompute();
4871 // Otherwise we can only handle this if it is affine.
4872 if (!AddRec
->isAffine())
4873 return getCouldNotCompute();
4875 // If this is an affine expression, the execution count of this branch is
4876 // the minimum unsigned root of the following equation:
4878 // Start + Step*N = 0 (mod 2^BW)
4882 // Step*N = -Start (mod 2^BW)
4884 // where BW is the common bit width of Start and Step.
4886 // Get the initial value for the loop.
4887 const SCEV
*Start
= getSCEVAtScope(AddRec
->getStart(), L
->getParentLoop());
4888 const SCEV
*Step
= getSCEVAtScope(AddRec
->getOperand(1), L
->getParentLoop());
4890 // If the AddRec is NUW, then (in an unsigned sense) it cannot be counting up
4891 // to wrap to 0, it must be counting down to equal 0. Also, while counting
4892 // down, it cannot "miss" 0 (which would cause it to wrap), regardless of what
4893 // the stride is. As such, NUW addrec's will always become zero in
4894 // "start / -stride" steps, and we know that the division is exact.
4895 if (AddRec
->hasNoUnsignedWrap())
4896 // FIXME: We really want an "isexact" bit for udiv.
4897 return getUDivExpr(Start
, getNegativeSCEV(Step
));
4899 // For now we handle only constant steps.
4900 const SCEVConstant
*StepC
= dyn_cast
<SCEVConstant
>(Step
);
4902 return getCouldNotCompute();
4904 // First, handle unitary steps.
4905 if (StepC
->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
4906 return getNegativeSCEV(Start
); // N = -Start (as unsigned)
4908 if (StepC
->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
4909 return Start
; // N = Start (as unsigned)
4911 // Then, try to solve the above equation provided that Start is constant.
4912 if (const SCEVConstant
*StartC
= dyn_cast
<SCEVConstant
>(Start
))
4913 return SolveLinEquationWithOverflow(StepC
->getValue()->getValue(),
4914 -StartC
->getValue()->getValue(),
4916 return getCouldNotCompute();
4919 /// HowFarToNonZero - Return the number of times a backedge checking the
4920 /// specified value for nonzero will execute. If not computable, return
4922 ScalarEvolution::BackedgeTakenInfo
4923 ScalarEvolution::HowFarToNonZero(const SCEV
*V
, const Loop
*L
) {
4924 // Loops that look like: while (X == 0) are very strange indeed. We don't
4925 // handle them yet except for the trivial case. This could be expanded in the
4926 // future as needed.
4928 // If the value is a constant, check to see if it is known to be non-zero
4929 // already. If so, the backedge will execute zero times.
4930 if (const SCEVConstant
*C
= dyn_cast
<SCEVConstant
>(V
)) {
4931 if (!C
->getValue()->isNullValue())
4932 return getConstant(C
->getType(), 0);
4933 return getCouldNotCompute(); // Otherwise it will loop infinitely.
4936 // We could implement others, but I really doubt anyone writes loops like
4937 // this, and if they did, they would already be constant folded.
4938 return getCouldNotCompute();
4941 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
4942 /// (which may not be an immediate predecessor) which has exactly one
4943 /// successor from which BB is reachable, or null if no such block is
4946 std::pair
<BasicBlock
*, BasicBlock
*>
4947 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock
*BB
) {
4948 // If the block has a unique predecessor, then there is no path from the
4949 // predecessor to the block that does not go through the direct edge
4950 // from the predecessor to the block.
4951 if (BasicBlock
*Pred
= BB
->getSinglePredecessor())
4952 return std::make_pair(Pred
, BB
);
4954 // A loop's header is defined to be a block that dominates the loop.
4955 // If the header has a unique predecessor outside the loop, it must be
4956 // a block that has exactly one successor that can reach the loop.
4957 if (Loop
*L
= LI
->getLoopFor(BB
))
4958 return std::make_pair(L
->getLoopPredecessor(), L
->getHeader());
4960 return std::pair
<BasicBlock
*, BasicBlock
*>();
4963 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4964 /// testing whether two expressions are equal, however for the purposes of
4965 /// looking for a condition guarding a loop, it can be useful to be a little
4966 /// more general, since a front-end may have replicated the controlling
4969 static bool HasSameValue(const SCEV
*A
, const SCEV
*B
) {
4970 // Quick check to see if they are the same SCEV.
4971 if (A
== B
) return true;
4973 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4974 // two different instructions with the same value. Check for this case.
4975 if (const SCEVUnknown
*AU
= dyn_cast
<SCEVUnknown
>(A
))
4976 if (const SCEVUnknown
*BU
= dyn_cast
<SCEVUnknown
>(B
))
4977 if (const Instruction
*AI
= dyn_cast
<Instruction
>(AU
->getValue()))
4978 if (const Instruction
*BI
= dyn_cast
<Instruction
>(BU
->getValue()))
4979 if (AI
->isIdenticalTo(BI
) && !AI
->mayReadFromMemory())
4982 // Otherwise assume they may have a different value.
4986 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
4987 /// predicate Pred. Return true iff any changes were made.
4989 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate
&Pred
,
4990 const SCEV
*&LHS
, const SCEV
*&RHS
) {
4991 bool Changed
= false;
4993 // Canonicalize a constant to the right side.
4994 if (const SCEVConstant
*LHSC
= dyn_cast
<SCEVConstant
>(LHS
)) {
4995 // Check for both operands constant.
4996 if (const SCEVConstant
*RHSC
= dyn_cast
<SCEVConstant
>(RHS
)) {
4997 if (ConstantExpr::getICmp(Pred
,
4999 RHSC
->getValue())->isNullValue())
5000 goto trivially_false
;
5002 goto trivially_true
;
5004 // Otherwise swap the operands to put the constant on the right.
5005 std::swap(LHS
, RHS
);
5006 Pred
= ICmpInst::getSwappedPredicate(Pred
);
5010 // If we're comparing an addrec with a value which is loop-invariant in the
5011 // addrec's loop, put the addrec on the left. Also make a dominance check,
5012 // as both operands could be addrecs loop-invariant in each other's loop.
5013 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(RHS
)) {
5014 const Loop
*L
= AR
->getLoop();
5015 if (isLoopInvariant(LHS
, L
) && properlyDominates(LHS
, L
->getHeader())) {
5016 std::swap(LHS
, RHS
);
5017 Pred
= ICmpInst::getSwappedPredicate(Pred
);
5022 // If there's a constant operand, canonicalize comparisons with boundary
5023 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
5024 if (const SCEVConstant
*RC
= dyn_cast
<SCEVConstant
>(RHS
)) {
5025 const APInt
&RA
= RC
->getValue()->getValue();
5027 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5028 case ICmpInst::ICMP_EQ
:
5029 case ICmpInst::ICMP_NE
:
5031 case ICmpInst::ICMP_UGE
:
5032 if ((RA
- 1).isMinValue()) {
5033 Pred
= ICmpInst::ICMP_NE
;
5034 RHS
= getConstant(RA
- 1);
5038 if (RA
.isMaxValue()) {
5039 Pred
= ICmpInst::ICMP_EQ
;
5043 if (RA
.isMinValue()) goto trivially_true
;
5045 Pred
= ICmpInst::ICMP_UGT
;
5046 RHS
= getConstant(RA
- 1);
5049 case ICmpInst::ICMP_ULE
:
5050 if ((RA
+ 1).isMaxValue()) {
5051 Pred
= ICmpInst::ICMP_NE
;
5052 RHS
= getConstant(RA
+ 1);
5056 if (RA
.isMinValue()) {
5057 Pred
= ICmpInst::ICMP_EQ
;
5061 if (RA
.isMaxValue()) goto trivially_true
;
5063 Pred
= ICmpInst::ICMP_ULT
;
5064 RHS
= getConstant(RA
+ 1);
5067 case ICmpInst::ICMP_SGE
:
5068 if ((RA
- 1).isMinSignedValue()) {
5069 Pred
= ICmpInst::ICMP_NE
;
5070 RHS
= getConstant(RA
- 1);
5074 if (RA
.isMaxSignedValue()) {
5075 Pred
= ICmpInst::ICMP_EQ
;
5079 if (RA
.isMinSignedValue()) goto trivially_true
;
5081 Pred
= ICmpInst::ICMP_SGT
;
5082 RHS
= getConstant(RA
- 1);
5085 case ICmpInst::ICMP_SLE
:
5086 if ((RA
+ 1).isMaxSignedValue()) {
5087 Pred
= ICmpInst::ICMP_NE
;
5088 RHS
= getConstant(RA
+ 1);
5092 if (RA
.isMinSignedValue()) {
5093 Pred
= ICmpInst::ICMP_EQ
;
5097 if (RA
.isMaxSignedValue()) goto trivially_true
;
5099 Pred
= ICmpInst::ICMP_SLT
;
5100 RHS
= getConstant(RA
+ 1);
5103 case ICmpInst::ICMP_UGT
:
5104 if (RA
.isMinValue()) {
5105 Pred
= ICmpInst::ICMP_NE
;
5109 if ((RA
+ 1).isMaxValue()) {
5110 Pred
= ICmpInst::ICMP_EQ
;
5111 RHS
= getConstant(RA
+ 1);
5115 if (RA
.isMaxValue()) goto trivially_false
;
5117 case ICmpInst::ICMP_ULT
:
5118 if (RA
.isMaxValue()) {
5119 Pred
= ICmpInst::ICMP_NE
;
5123 if ((RA
- 1).isMinValue()) {
5124 Pred
= ICmpInst::ICMP_EQ
;
5125 RHS
= getConstant(RA
- 1);
5129 if (RA
.isMinValue()) goto trivially_false
;
5131 case ICmpInst::ICMP_SGT
:
5132 if (RA
.isMinSignedValue()) {
5133 Pred
= ICmpInst::ICMP_NE
;
5137 if ((RA
+ 1).isMaxSignedValue()) {
5138 Pred
= ICmpInst::ICMP_EQ
;
5139 RHS
= getConstant(RA
+ 1);
5143 if (RA
.isMaxSignedValue()) goto trivially_false
;
5145 case ICmpInst::ICMP_SLT
:
5146 if (RA
.isMaxSignedValue()) {
5147 Pred
= ICmpInst::ICMP_NE
;
5151 if ((RA
- 1).isMinSignedValue()) {
5152 Pred
= ICmpInst::ICMP_EQ
;
5153 RHS
= getConstant(RA
- 1);
5157 if (RA
.isMinSignedValue()) goto trivially_false
;
5162 // Check for obvious equality.
5163 if (HasSameValue(LHS
, RHS
)) {
5164 if (ICmpInst::isTrueWhenEqual(Pred
))
5165 goto trivially_true
;
5166 if (ICmpInst::isFalseWhenEqual(Pred
))
5167 goto trivially_false
;
5170 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5171 // adding or subtracting 1 from one of the operands.
5173 case ICmpInst::ICMP_SLE
:
5174 if (!getSignedRange(RHS
).getSignedMax().isMaxSignedValue()) {
5175 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
5176 /*HasNUW=*/false, /*HasNSW=*/true);
5177 Pred
= ICmpInst::ICMP_SLT
;
5179 } else if (!getSignedRange(LHS
).getSignedMin().isMinSignedValue()) {
5180 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
,
5181 /*HasNUW=*/false, /*HasNSW=*/true);
5182 Pred
= ICmpInst::ICMP_SLT
;
5186 case ICmpInst::ICMP_SGE
:
5187 if (!getSignedRange(RHS
).getSignedMin().isMinSignedValue()) {
5188 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
,
5189 /*HasNUW=*/false, /*HasNSW=*/true);
5190 Pred
= ICmpInst::ICMP_SGT
;
5192 } else if (!getSignedRange(LHS
).getSignedMax().isMaxSignedValue()) {
5193 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
5194 /*HasNUW=*/false, /*HasNSW=*/true);
5195 Pred
= ICmpInst::ICMP_SGT
;
5199 case ICmpInst::ICMP_ULE
:
5200 if (!getUnsignedRange(RHS
).getUnsignedMax().isMaxValue()) {
5201 RHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), RHS
,
5202 /*HasNUW=*/true, /*HasNSW=*/false);
5203 Pred
= ICmpInst::ICMP_ULT
;
5205 } else if (!getUnsignedRange(LHS
).getUnsignedMin().isMinValue()) {
5206 LHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), LHS
,
5207 /*HasNUW=*/true, /*HasNSW=*/false);
5208 Pred
= ICmpInst::ICMP_ULT
;
5212 case ICmpInst::ICMP_UGE
:
5213 if (!getUnsignedRange(RHS
).getUnsignedMin().isMinValue()) {
5214 RHS
= getAddExpr(getConstant(RHS
->getType(), (uint64_t)-1, true), RHS
,
5215 /*HasNUW=*/true, /*HasNSW=*/false);
5216 Pred
= ICmpInst::ICMP_UGT
;
5218 } else if (!getUnsignedRange(LHS
).getUnsignedMax().isMaxValue()) {
5219 LHS
= getAddExpr(getConstant(RHS
->getType(), 1, true), LHS
,
5220 /*HasNUW=*/true, /*HasNSW=*/false);
5221 Pred
= ICmpInst::ICMP_UGT
;
5229 // TODO: More simplifications are possible here.
5235 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
5236 Pred
= ICmpInst::ICMP_EQ
;
5241 LHS
= RHS
= getConstant(ConstantInt::getFalse(getContext()));
5242 Pred
= ICmpInst::ICMP_NE
;
5246 bool ScalarEvolution::isKnownNegative(const SCEV
*S
) {
5247 return getSignedRange(S
).getSignedMax().isNegative();
5250 bool ScalarEvolution::isKnownPositive(const SCEV
*S
) {
5251 return getSignedRange(S
).getSignedMin().isStrictlyPositive();
5254 bool ScalarEvolution::isKnownNonNegative(const SCEV
*S
) {
5255 return !getSignedRange(S
).getSignedMin().isNegative();
5258 bool ScalarEvolution::isKnownNonPositive(const SCEV
*S
) {
5259 return !getSignedRange(S
).getSignedMax().isStrictlyPositive();
5262 bool ScalarEvolution::isKnownNonZero(const SCEV
*S
) {
5263 return isKnownNegative(S
) || isKnownPositive(S
);
5266 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred
,
5267 const SCEV
*LHS
, const SCEV
*RHS
) {
5268 // Canonicalize the inputs first.
5269 (void)SimplifyICmpOperands(Pred
, LHS
, RHS
);
5271 // If LHS or RHS is an addrec, check to see if the condition is true in
5272 // every iteration of the loop.
5273 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(LHS
))
5274 if (isLoopEntryGuardedByCond(
5275 AR
->getLoop(), Pred
, AR
->getStart(), RHS
) &&
5276 isLoopBackedgeGuardedByCond(
5277 AR
->getLoop(), Pred
, AR
->getPostIncExpr(*this), RHS
))
5279 if (const SCEVAddRecExpr
*AR
= dyn_cast
<SCEVAddRecExpr
>(RHS
))
5280 if (isLoopEntryGuardedByCond(
5281 AR
->getLoop(), Pred
, LHS
, AR
->getStart()) &&
5282 isLoopBackedgeGuardedByCond(
5283 AR
->getLoop(), Pred
, LHS
, AR
->getPostIncExpr(*this)))
5286 // Otherwise see what can be done with known constant ranges.
5287 return isKnownPredicateWithRanges(Pred
, LHS
, RHS
);
5291 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred
,
5292 const SCEV
*LHS
, const SCEV
*RHS
) {
5293 if (HasSameValue(LHS
, RHS
))
5294 return ICmpInst::isTrueWhenEqual(Pred
);
5296 // This code is split out from isKnownPredicate because it is called from
5297 // within isLoopEntryGuardedByCond.
5300 llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5302 case ICmpInst::ICMP_SGT
:
5303 Pred
= ICmpInst::ICMP_SLT
;
5304 std::swap(LHS
, RHS
);
5305 case ICmpInst::ICMP_SLT
: {
5306 ConstantRange LHSRange
= getSignedRange(LHS
);
5307 ConstantRange RHSRange
= getSignedRange(RHS
);
5308 if (LHSRange
.getSignedMax().slt(RHSRange
.getSignedMin()))
5310 if (LHSRange
.getSignedMin().sge(RHSRange
.getSignedMax()))
5314 case ICmpInst::ICMP_SGE
:
5315 Pred
= ICmpInst::ICMP_SLE
;
5316 std::swap(LHS
, RHS
);
5317 case ICmpInst::ICMP_SLE
: {
5318 ConstantRange LHSRange
= getSignedRange(LHS
);
5319 ConstantRange RHSRange
= getSignedRange(RHS
);
5320 if (LHSRange
.getSignedMax().sle(RHSRange
.getSignedMin()))
5322 if (LHSRange
.getSignedMin().sgt(RHSRange
.getSignedMax()))
5326 case ICmpInst::ICMP_UGT
:
5327 Pred
= ICmpInst::ICMP_ULT
;
5328 std::swap(LHS
, RHS
);
5329 case ICmpInst::ICMP_ULT
: {
5330 ConstantRange LHSRange
= getUnsignedRange(LHS
);
5331 ConstantRange RHSRange
= getUnsignedRange(RHS
);
5332 if (LHSRange
.getUnsignedMax().ult(RHSRange
.getUnsignedMin()))
5334 if (LHSRange
.getUnsignedMin().uge(RHSRange
.getUnsignedMax()))
5338 case ICmpInst::ICMP_UGE
:
5339 Pred
= ICmpInst::ICMP_ULE
;
5340 std::swap(LHS
, RHS
);
5341 case ICmpInst::ICMP_ULE
: {
5342 ConstantRange LHSRange
= getUnsignedRange(LHS
);
5343 ConstantRange RHSRange
= getUnsignedRange(RHS
);
5344 if (LHSRange
.getUnsignedMax().ule(RHSRange
.getUnsignedMin()))
5346 if (LHSRange
.getUnsignedMin().ugt(RHSRange
.getUnsignedMax()))
5350 case ICmpInst::ICMP_NE
: {
5351 if (getUnsignedRange(LHS
).intersectWith(getUnsignedRange(RHS
)).isEmptySet())
5353 if (getSignedRange(LHS
).intersectWith(getSignedRange(RHS
)).isEmptySet())
5356 const SCEV
*Diff
= getMinusSCEV(LHS
, RHS
);
5357 if (isKnownNonZero(Diff
))
5361 case ICmpInst::ICMP_EQ
:
5362 // The check at the top of the function catches the case where
5363 // the values are known to be equal.
5369 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
5370 /// protected by a conditional between LHS and RHS. This is used to
5371 /// to eliminate casts.
5373 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop
*L
,
5374 ICmpInst::Predicate Pred
,
5375 const SCEV
*LHS
, const SCEV
*RHS
) {
5376 // Interpret a null as meaning no loop, where there is obviously no guard
5377 // (interprocedural conditions notwithstanding).
5378 if (!L
) return true;
5380 BasicBlock
*Latch
= L
->getLoopLatch();
5384 BranchInst
*LoopContinuePredicate
=
5385 dyn_cast
<BranchInst
>(Latch
->getTerminator());
5386 if (!LoopContinuePredicate
||
5387 LoopContinuePredicate
->isUnconditional())
5390 return isImpliedCond(Pred
, LHS
, RHS
,
5391 LoopContinuePredicate
->getCondition(),
5392 LoopContinuePredicate
->getSuccessor(0) != L
->getHeader());
5395 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
5396 /// by a conditional between LHS and RHS. This is used to help avoid max
5397 /// expressions in loop trip counts, and to eliminate casts.
5399 ScalarEvolution::isLoopEntryGuardedByCond(const Loop
*L
,
5400 ICmpInst::Predicate Pred
,
5401 const SCEV
*LHS
, const SCEV
*RHS
) {
5402 // Interpret a null as meaning no loop, where there is obviously no guard
5403 // (interprocedural conditions notwithstanding).
5404 if (!L
) return false;
5406 // Starting at the loop predecessor, climb up the predecessor chain, as long
5407 // as there are predecessors that can be found that have unique successors
5408 // leading to the original header.
5409 for (std::pair
<BasicBlock
*, BasicBlock
*>
5410 Pair(L
->getLoopPredecessor(), L
->getHeader());
5412 Pair
= getPredecessorWithUniqueSuccessorForBB(Pair
.first
)) {
5414 BranchInst
*LoopEntryPredicate
=
5415 dyn_cast
<BranchInst
>(Pair
.first
->getTerminator());
5416 if (!LoopEntryPredicate
||
5417 LoopEntryPredicate
->isUnconditional())
5420 if (isImpliedCond(Pred
, LHS
, RHS
,
5421 LoopEntryPredicate
->getCondition(),
5422 LoopEntryPredicate
->getSuccessor(0) != Pair
.second
))
5429 /// isImpliedCond - Test whether the condition described by Pred, LHS,
5430 /// and RHS is true whenever the given Cond value evaluates to true.
5431 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred
,
5432 const SCEV
*LHS
, const SCEV
*RHS
,
5433 Value
*FoundCondValue
,
5435 // Recursively handle And and Or conditions.
5436 if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(FoundCondValue
)) {
5437 if (BO
->getOpcode() == Instruction::And
) {
5439 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
5440 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
5441 } else if (BO
->getOpcode() == Instruction::Or
) {
5443 return isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(0), Inverse
) ||
5444 isImpliedCond(Pred
, LHS
, RHS
, BO
->getOperand(1), Inverse
);
5448 ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(FoundCondValue
);
5449 if (!ICI
) return false;
5451 // Bail if the ICmp's operands' types are wider than the needed type
5452 // before attempting to call getSCEV on them. This avoids infinite
5453 // recursion, since the analysis of widening casts can require loop
5454 // exit condition information for overflow checking, which would
5456 if (getTypeSizeInBits(LHS
->getType()) <
5457 getTypeSizeInBits(ICI
->getOperand(0)->getType()))
5460 // Now that we found a conditional branch that dominates the loop, check to
5461 // see if it is the comparison we are looking for.
5462 ICmpInst::Predicate FoundPred
;
5464 FoundPred
= ICI
->getInversePredicate();
5466 FoundPred
= ICI
->getPredicate();
5468 const SCEV
*FoundLHS
= getSCEV(ICI
->getOperand(0));
5469 const SCEV
*FoundRHS
= getSCEV(ICI
->getOperand(1));
5471 // Balance the types. The case where FoundLHS' type is wider than
5472 // LHS' type is checked for above.
5473 if (getTypeSizeInBits(LHS
->getType()) >
5474 getTypeSizeInBits(FoundLHS
->getType())) {
5475 if (CmpInst::isSigned(Pred
)) {
5476 FoundLHS
= getSignExtendExpr(FoundLHS
, LHS
->getType());
5477 FoundRHS
= getSignExtendExpr(FoundRHS
, LHS
->getType());
5479 FoundLHS
= getZeroExtendExpr(FoundLHS
, LHS
->getType());
5480 FoundRHS
= getZeroExtendExpr(FoundRHS
, LHS
->getType());
5484 // Canonicalize the query to match the way instcombine will have
5485 // canonicalized the comparison.
5486 if (SimplifyICmpOperands(Pred
, LHS
, RHS
))
5488 return CmpInst::isTrueWhenEqual(Pred
);
5489 if (SimplifyICmpOperands(FoundPred
, FoundLHS
, FoundRHS
))
5490 if (FoundLHS
== FoundRHS
)
5491 return CmpInst::isFalseWhenEqual(Pred
);
5493 // Check to see if we can make the LHS or RHS match.
5494 if (LHS
== FoundRHS
|| RHS
== FoundLHS
) {
5495 if (isa
<SCEVConstant
>(RHS
)) {
5496 std::swap(FoundLHS
, FoundRHS
);
5497 FoundPred
= ICmpInst::getSwappedPredicate(FoundPred
);
5499 std::swap(LHS
, RHS
);
5500 Pred
= ICmpInst::getSwappedPredicate(Pred
);
5504 // Check whether the found predicate is the same as the desired predicate.
5505 if (FoundPred
== Pred
)
5506 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
);
5508 // Check whether swapping the found predicate makes it the same as the
5509 // desired predicate.
5510 if (ICmpInst::getSwappedPredicate(FoundPred
) == Pred
) {
5511 if (isa
<SCEVConstant
>(RHS
))
5512 return isImpliedCondOperands(Pred
, LHS
, RHS
, FoundRHS
, FoundLHS
);
5514 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred
),
5515 RHS
, LHS
, FoundLHS
, FoundRHS
);
5518 // Check whether the actual condition is beyond sufficient.
5519 if (FoundPred
== ICmpInst::ICMP_EQ
)
5520 if (ICmpInst::isTrueWhenEqual(Pred
))
5521 if (isImpliedCondOperands(Pred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
5523 if (Pred
== ICmpInst::ICMP_NE
)
5524 if (!ICmpInst::isTrueWhenEqual(FoundPred
))
5525 if (isImpliedCondOperands(FoundPred
, LHS
, RHS
, FoundLHS
, FoundRHS
))
5528 // Otherwise assume the worst.
5532 /// isImpliedCondOperands - Test whether the condition described by Pred,
5533 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5534 /// and FoundRHS is true.
5535 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred
,
5536 const SCEV
*LHS
, const SCEV
*RHS
,
5537 const SCEV
*FoundLHS
,
5538 const SCEV
*FoundRHS
) {
5539 return isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
5540 FoundLHS
, FoundRHS
) ||
5541 // ~x < ~y --> x > y
5542 isImpliedCondOperandsHelper(Pred
, LHS
, RHS
,
5543 getNotSCEV(FoundRHS
),
5544 getNotSCEV(FoundLHS
));
5547 /// isImpliedCondOperandsHelper - Test whether the condition described by
5548 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
5549 /// FoundLHS, and FoundRHS is true.
5551 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred
,
5552 const SCEV
*LHS
, const SCEV
*RHS
,
5553 const SCEV
*FoundLHS
,
5554 const SCEV
*FoundRHS
) {
5556 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5557 case ICmpInst::ICMP_EQ
:
5558 case ICmpInst::ICMP_NE
:
5559 if (HasSameValue(LHS
, FoundLHS
) && HasSameValue(RHS
, FoundRHS
))
5562 case ICmpInst::ICMP_SLT
:
5563 case ICmpInst::ICMP_SLE
:
5564 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE
, LHS
, FoundLHS
) &&
5565 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE
, RHS
, FoundRHS
))
5568 case ICmpInst::ICMP_SGT
:
5569 case ICmpInst::ICMP_SGE
:
5570 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE
, LHS
, FoundLHS
) &&
5571 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE
, RHS
, FoundRHS
))
5574 case ICmpInst::ICMP_ULT
:
5575 case ICmpInst::ICMP_ULE
:
5576 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE
, LHS
, FoundLHS
) &&
5577 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE
, RHS
, FoundRHS
))
5580 case ICmpInst::ICMP_UGT
:
5581 case ICmpInst::ICMP_UGE
:
5582 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE
, LHS
, FoundLHS
) &&
5583 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE
, RHS
, FoundRHS
))
5591 /// getBECount - Subtract the end and start values and divide by the step,
5592 /// rounding up, to get the number of times the backedge is executed. Return
5593 /// CouldNotCompute if an intermediate computation overflows.
5594 const SCEV
*ScalarEvolution::getBECount(const SCEV
*Start
,
5598 assert(!isKnownNegative(Step
) &&
5599 "This code doesn't handle negative strides yet!");
5601 const Type
*Ty
= Start
->getType();
5602 const SCEV
*NegOne
= getConstant(Ty
, (uint64_t)-1);
5603 const SCEV
*Diff
= getMinusSCEV(End
, Start
);
5604 const SCEV
*RoundUp
= getAddExpr(Step
, NegOne
);
5606 // Add an adjustment to the difference between End and Start so that
5607 // the division will effectively round up.
5608 const SCEV
*Add
= getAddExpr(Diff
, RoundUp
);
5611 // Check Add for unsigned overflow.
5612 // TODO: More sophisticated things could be done here.
5613 const Type
*WideTy
= IntegerType::get(getContext(),
5614 getTypeSizeInBits(Ty
) + 1);
5615 const SCEV
*EDiff
= getZeroExtendExpr(Diff
, WideTy
);
5616 const SCEV
*ERoundUp
= getZeroExtendExpr(RoundUp
, WideTy
);
5617 const SCEV
*OperandExtendedAdd
= getAddExpr(EDiff
, ERoundUp
);
5618 if (getZeroExtendExpr(Add
, WideTy
) != OperandExtendedAdd
)
5619 return getCouldNotCompute();
5622 return getUDivExpr(Add
, Step
);
5625 /// HowManyLessThans - Return the number of times a backedge containing the
5626 /// specified less-than comparison will execute. If not computable, return
5627 /// CouldNotCompute.
5628 ScalarEvolution::BackedgeTakenInfo
5629 ScalarEvolution::HowManyLessThans(const SCEV
*LHS
, const SCEV
*RHS
,
5630 const Loop
*L
, bool isSigned
) {
5631 // Only handle: "ADDREC < LoopInvariant".
5632 if (!isLoopInvariant(RHS
, L
)) return getCouldNotCompute();
5634 const SCEVAddRecExpr
*AddRec
= dyn_cast
<SCEVAddRecExpr
>(LHS
);
5635 if (!AddRec
|| AddRec
->getLoop() != L
)
5636 return getCouldNotCompute();
5638 // Check to see if we have a flag which makes analysis easy.
5639 bool NoWrap
= isSigned
? AddRec
->hasNoSignedWrap() :
5640 AddRec
->hasNoUnsignedWrap();
5642 if (AddRec
->isAffine()) {
5643 unsigned BitWidth
= getTypeSizeInBits(AddRec
->getType());
5644 const SCEV
*Step
= AddRec
->getStepRecurrence(*this);
5647 return getCouldNotCompute();
5648 if (Step
->isOne()) {
5649 // With unit stride, the iteration never steps past the limit value.
5650 } else if (isKnownPositive(Step
)) {
5651 // Test whether a positive iteration can step past the limit
5652 // value and past the maximum value for its type in a single step.
5653 // Note that it's not sufficient to check NoWrap here, because even
5654 // though the value after a wrap is undefined, it's not undefined
5655 // behavior, so if wrap does occur, the loop could either terminate or
5656 // loop infinitely, but in either case, the loop is guaranteed to
5657 // iterate at least until the iteration where the wrapping occurs.
5658 const SCEV
*One
= getConstant(Step
->getType(), 1);
5660 APInt Max
= APInt::getSignedMaxValue(BitWidth
);
5661 if ((Max
- getSignedRange(getMinusSCEV(Step
, One
)).getSignedMax())
5662 .slt(getSignedRange(RHS
).getSignedMax()))
5663 return getCouldNotCompute();
5665 APInt Max
= APInt::getMaxValue(BitWidth
);
5666 if ((Max
- getUnsignedRange(getMinusSCEV(Step
, One
)).getUnsignedMax())
5667 .ult(getUnsignedRange(RHS
).getUnsignedMax()))
5668 return getCouldNotCompute();
5671 // TODO: Handle negative strides here and below.
5672 return getCouldNotCompute();
5674 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
5675 // m. So, we count the number of iterations in which {n,+,s} < m is true.
5676 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
5677 // treat m-n as signed nor unsigned due to overflow possibility.
5679 // First, we get the value of the LHS in the first iteration: n
5680 const SCEV
*Start
= AddRec
->getOperand(0);
5682 // Determine the minimum constant start value.
5683 const SCEV
*MinStart
= getConstant(isSigned
?
5684 getSignedRange(Start
).getSignedMin() :
5685 getUnsignedRange(Start
).getUnsignedMin());
5687 // If we know that the condition is true in order to enter the loop,
5688 // then we know that it will run exactly (m-n)/s times. Otherwise, we
5689 // only know that it will execute (max(m,n)-n)/s times. In both cases,
5690 // the division must round up.
5691 const SCEV
*End
= RHS
;
5692 if (!isLoopEntryGuardedByCond(L
,
5693 isSigned
? ICmpInst::ICMP_SLT
:
5695 getMinusSCEV(Start
, Step
), RHS
))
5696 End
= isSigned
? getSMaxExpr(RHS
, Start
)
5697 : getUMaxExpr(RHS
, Start
);
5699 // Determine the maximum constant end value.
5700 const SCEV
*MaxEnd
= getConstant(isSigned
?
5701 getSignedRange(End
).getSignedMax() :
5702 getUnsignedRange(End
).getUnsignedMax());
5704 // If MaxEnd is within a step of the maximum integer value in its type,
5705 // adjust it down to the minimum value which would produce the same effect.
5706 // This allows the subsequent ceiling division of (N+(step-1))/step to
5707 // compute the correct value.
5708 const SCEV
*StepMinusOne
= getMinusSCEV(Step
,
5709 getConstant(Step
->getType(), 1));
5712 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth
)),
5715 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth
)),
5718 // Finally, we subtract these two values and divide, rounding up, to get
5719 // the number of times the backedge is executed.
5720 const SCEV
*BECount
= getBECount(Start
, End
, Step
, NoWrap
);
5722 // The maximum backedge count is similar, except using the minimum start
5723 // value and the maximum end value.
5724 const SCEV
*MaxBECount
= getBECount(MinStart
, MaxEnd
, Step
, NoWrap
);
5726 return BackedgeTakenInfo(BECount
, MaxBECount
);
5729 return getCouldNotCompute();
5732 /// getNumIterationsInRange - Return the number of iterations of this loop that
5733 /// produce values in the specified constant range. Another way of looking at
5734 /// this is that it returns the first iteration number where the value is not in
5735 /// the condition, thus computing the exit count. If the iteration count can't
5736 /// be computed, an instance of SCEVCouldNotCompute is returned.
5737 const SCEV
*SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range
,
5738 ScalarEvolution
&SE
) const {
5739 if (Range
.isFullSet()) // Infinite loop.
5740 return SE
.getCouldNotCompute();
5742 // If the start is a non-zero constant, shift the range to simplify things.
5743 if (const SCEVConstant
*SC
= dyn_cast
<SCEVConstant
>(getStart()))
5744 if (!SC
->getValue()->isZero()) {
5745 SmallVector
<const SCEV
*, 4> Operands(op_begin(), op_end());
5746 Operands
[0] = SE
.getConstant(SC
->getType(), 0);
5747 const SCEV
*Shifted
= SE
.getAddRecExpr(Operands
, getLoop());
5748 if (const SCEVAddRecExpr
*ShiftedAddRec
=
5749 dyn_cast
<SCEVAddRecExpr
>(Shifted
))
5750 return ShiftedAddRec
->getNumIterationsInRange(
5751 Range
.subtract(SC
->getValue()->getValue()), SE
);
5752 // This is strange and shouldn't happen.
5753 return SE
.getCouldNotCompute();
5756 // The only time we can solve this is when we have all constant indices.
5757 // Otherwise, we cannot determine the overflow conditions.
5758 for (unsigned i
= 0, e
= getNumOperands(); i
!= e
; ++i
)
5759 if (!isa
<SCEVConstant
>(getOperand(i
)))
5760 return SE
.getCouldNotCompute();
5763 // Okay at this point we know that all elements of the chrec are constants and
5764 // that the start element is zero.
5766 // First check to see if the range contains zero. If not, the first
5768 unsigned BitWidth
= SE
.getTypeSizeInBits(getType());
5769 if (!Range
.contains(APInt(BitWidth
, 0)))
5770 return SE
.getConstant(getType(), 0);
5773 // If this is an affine expression then we have this situation:
5774 // Solve {0,+,A} in Range === Ax in Range
5776 // We know that zero is in the range. If A is positive then we know that
5777 // the upper value of the range must be the first possible exit value.
5778 // If A is negative then the lower of the range is the last possible loop
5779 // value. Also note that we already checked for a full range.
5780 APInt
One(BitWidth
,1);
5781 APInt A
= cast
<SCEVConstant
>(getOperand(1))->getValue()->getValue();
5782 APInt End
= A
.sge(One
) ? (Range
.getUpper() - One
) : Range
.getLower();
5784 // The exit value should be (End+A)/A.
5785 APInt ExitVal
= (End
+ A
).udiv(A
);
5786 ConstantInt
*ExitValue
= ConstantInt::get(SE
.getContext(), ExitVal
);
5788 // Evaluate at the exit value. If we really did fall out of the valid
5789 // range, then we computed our trip count, otherwise wrap around or other
5790 // things must have happened.
5791 ConstantInt
*Val
= EvaluateConstantChrecAtConstant(this, ExitValue
, SE
);
5792 if (Range
.contains(Val
->getValue()))
5793 return SE
.getCouldNotCompute(); // Something strange happened
5795 // Ensure that the previous value is in the range. This is a sanity check.
5796 assert(Range
.contains(
5797 EvaluateConstantChrecAtConstant(this,
5798 ConstantInt::get(SE
.getContext(), ExitVal
- One
), SE
)->getValue()) &&
5799 "Linear scev computation is off in a bad way!");
5800 return SE
.getConstant(ExitValue
);
5801 } else if (isQuadratic()) {
5802 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
5803 // quadratic equation to solve it. To do this, we must frame our problem in
5804 // terms of figuring out when zero is crossed, instead of when
5805 // Range.getUpper() is crossed.
5806 SmallVector
<const SCEV
*, 4> NewOps(op_begin(), op_end());
5807 NewOps
[0] = SE
.getNegativeSCEV(SE
.getConstant(Range
.getUpper()));
5808 const SCEV
*NewAddRec
= SE
.getAddRecExpr(NewOps
, getLoop());
5810 // Next, solve the constructed addrec
5811 std::pair
<const SCEV
*,const SCEV
*> Roots
=
5812 SolveQuadraticEquation(cast
<SCEVAddRecExpr
>(NewAddRec
), SE
);
5813 const SCEVConstant
*R1
= dyn_cast
<SCEVConstant
>(Roots
.first
);
5814 const SCEVConstant
*R2
= dyn_cast
<SCEVConstant
>(Roots
.second
);
5816 // Pick the smallest positive root value.
5817 if (ConstantInt
*CB
=
5818 dyn_cast
<ConstantInt
>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT
,
5819 R1
->getValue(), R2
->getValue()))) {
5820 if (CB
->getZExtValue() == false)
5821 std::swap(R1
, R2
); // R1 is the minimum root now.
5823 // Make sure the root is not off by one. The returned iteration should
5824 // not be in the range, but the previous one should be. When solving
5825 // for "X*X < 5", for example, we should not return a root of 2.
5826 ConstantInt
*R1Val
= EvaluateConstantChrecAtConstant(this,
5829 if (Range
.contains(R1Val
->getValue())) {
5830 // The next iteration must be out of the range...
5831 ConstantInt
*NextVal
=
5832 ConstantInt::get(SE
.getContext(), R1
->getValue()->getValue()+1);
5834 R1Val
= EvaluateConstantChrecAtConstant(this, NextVal
, SE
);
5835 if (!Range
.contains(R1Val
->getValue()))
5836 return SE
.getConstant(NextVal
);
5837 return SE
.getCouldNotCompute(); // Something strange happened
5840 // If R1 was not in the range, then it is a good return value. Make
5841 // sure that R1-1 WAS in the range though, just in case.
5842 ConstantInt
*NextVal
=
5843 ConstantInt::get(SE
.getContext(), R1
->getValue()->getValue()-1);
5844 R1Val
= EvaluateConstantChrecAtConstant(this, NextVal
, SE
);
5845 if (Range
.contains(R1Val
->getValue()))
5847 return SE
.getCouldNotCompute(); // Something strange happened
5852 return SE
.getCouldNotCompute();
5857 //===----------------------------------------------------------------------===//
5858 // SCEVCallbackVH Class Implementation
5859 //===----------------------------------------------------------------------===//
5861 void ScalarEvolution::SCEVCallbackVH::deleted() {
5862 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
5863 if (PHINode
*PN
= dyn_cast
<PHINode
>(getValPtr()))
5864 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
5865 SE
->ValueExprMap
.erase(getValPtr());
5866 // this now dangles!
5869 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value
*V
) {
5870 assert(SE
&& "SCEVCallbackVH called with a null ScalarEvolution!");
5872 // Forget all the expressions associated with users of the old value,
5873 // so that future queries will recompute the expressions using the new
5875 Value
*Old
= getValPtr();
5876 SmallVector
<User
*, 16> Worklist
;
5877 SmallPtrSet
<User
*, 8> Visited
;
5878 for (Value::use_iterator UI
= Old
->use_begin(), UE
= Old
->use_end();
5880 Worklist
.push_back(*UI
);
5881 while (!Worklist
.empty()) {
5882 User
*U
= Worklist
.pop_back_val();
5883 // Deleting the Old value will cause this to dangle. Postpone
5884 // that until everything else is done.
5887 if (!Visited
.insert(U
))
5889 if (PHINode
*PN
= dyn_cast
<PHINode
>(U
))
5890 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
5891 SE
->ValueExprMap
.erase(U
);
5892 for (Value::use_iterator UI
= U
->use_begin(), UE
= U
->use_end();
5894 Worklist
.push_back(*UI
);
5896 // Delete the Old value.
5897 if (PHINode
*PN
= dyn_cast
<PHINode
>(Old
))
5898 SE
->ConstantEvolutionLoopExitValue
.erase(PN
);
5899 SE
->ValueExprMap
.erase(Old
);
5900 // this now dangles!
5903 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value
*V
, ScalarEvolution
*se
)
5904 : CallbackVH(V
), SE(se
) {}
5906 //===----------------------------------------------------------------------===//
5907 // ScalarEvolution Class Implementation
5908 //===----------------------------------------------------------------------===//
5910 ScalarEvolution::ScalarEvolution()
5911 : FunctionPass(ID
), FirstUnknown(0) {
5912 initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
5915 bool ScalarEvolution::runOnFunction(Function
&F
) {
5917 LI
= &getAnalysis
<LoopInfo
>();
5918 TD
= getAnalysisIfAvailable
<TargetData
>();
5919 DT
= &getAnalysis
<DominatorTree
>();
5923 void ScalarEvolution::releaseMemory() {
5924 // Iterate through all the SCEVUnknown instances and call their
5925 // destructors, so that they release their references to their values.
5926 for (SCEVUnknown
*U
= FirstUnknown
; U
; U
= U
->Next
)
5930 ValueExprMap
.clear();
5931 BackedgeTakenCounts
.clear();
5932 ConstantEvolutionLoopExitValue
.clear();
5933 ValuesAtScopes
.clear();
5934 LoopDispositions
.clear();
5935 BlockDispositions
.clear();
5936 UnsignedRanges
.clear();
5937 SignedRanges
.clear();
5938 UniqueSCEVs
.clear();
5939 SCEVAllocator
.Reset();
5942 void ScalarEvolution::getAnalysisUsage(AnalysisUsage
&AU
) const {
5943 AU
.setPreservesAll();
5944 AU
.addRequiredTransitive
<LoopInfo
>();
5945 AU
.addRequiredTransitive
<DominatorTree
>();
5948 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop
*L
) {
5949 return !isa
<SCEVCouldNotCompute
>(getBackedgeTakenCount(L
));
5952 static void PrintLoopInfo(raw_ostream
&OS
, ScalarEvolution
*SE
,
5954 // Print all inner loops first
5955 for (Loop::iterator I
= L
->begin(), E
= L
->end(); I
!= E
; ++I
)
5956 PrintLoopInfo(OS
, SE
, *I
);
5959 WriteAsOperand(OS
, L
->getHeader(), /*PrintType=*/false);
5962 SmallVector
<BasicBlock
*, 8> ExitBlocks
;
5963 L
->getExitBlocks(ExitBlocks
);
5964 if (ExitBlocks
.size() != 1)
5965 OS
<< "<multiple exits> ";
5967 if (SE
->hasLoopInvariantBackedgeTakenCount(L
)) {
5968 OS
<< "backedge-taken count is " << *SE
->getBackedgeTakenCount(L
);
5970 OS
<< "Unpredictable backedge-taken count. ";
5975 WriteAsOperand(OS
, L
->getHeader(), /*PrintType=*/false);
5978 if (!isa
<SCEVCouldNotCompute
>(SE
->getMaxBackedgeTakenCount(L
))) {
5979 OS
<< "max backedge-taken count is " << *SE
->getMaxBackedgeTakenCount(L
);
5981 OS
<< "Unpredictable max backedge-taken count. ";
5987 void ScalarEvolution::print(raw_ostream
&OS
, const Module
*) const {
5988 // ScalarEvolution's implementation of the print method is to print
5989 // out SCEV values of all instructions that are interesting. Doing
5990 // this potentially causes it to create new SCEV objects though,
5991 // which technically conflicts with the const qualifier. This isn't
5992 // observable from outside the class though, so casting away the
5993 // const isn't dangerous.
5994 ScalarEvolution
&SE
= *const_cast<ScalarEvolution
*>(this);
5996 OS
<< "Classifying expressions for: ";
5997 WriteAsOperand(OS
, F
, /*PrintType=*/false);
5999 for (inst_iterator I
= inst_begin(F
), E
= inst_end(F
); I
!= E
; ++I
)
6000 if (isSCEVable(I
->getType()) && !isa
<CmpInst
>(*I
)) {
6003 const SCEV
*SV
= SE
.getSCEV(&*I
);
6006 const Loop
*L
= LI
->getLoopFor((*I
).getParent());
6008 const SCEV
*AtUse
= SE
.getSCEVAtScope(SV
, L
);
6015 OS
<< "\t\t" "Exits: ";
6016 const SCEV
*ExitValue
= SE
.getSCEVAtScope(SV
, L
->getParentLoop());
6017 if (!SE
.isLoopInvariant(ExitValue
, L
)) {
6018 OS
<< "<<Unknown>>";
6027 OS
<< "Determining loop execution counts for: ";
6028 WriteAsOperand(OS
, F
, /*PrintType=*/false);
6030 for (LoopInfo::iterator I
= LI
->begin(), E
= LI
->end(); I
!= E
; ++I
)
6031 PrintLoopInfo(OS
, &SE
, *I
);
6034 ScalarEvolution::LoopDisposition
6035 ScalarEvolution::getLoopDisposition(const SCEV
*S
, const Loop
*L
) {
6036 std::map
<const Loop
*, LoopDisposition
> &Values
= LoopDispositions
[S
];
6037 std::pair
<std::map
<const Loop
*, LoopDisposition
>::iterator
, bool> Pair
=
6038 Values
.insert(std::make_pair(L
, LoopVariant
));
6040 return Pair
.first
->second
;
6042 LoopDisposition D
= computeLoopDisposition(S
, L
);
6043 return LoopDispositions
[S
][L
] = D
;
6046 ScalarEvolution::LoopDisposition
6047 ScalarEvolution::computeLoopDisposition(const SCEV
*S
, const Loop
*L
) {
6048 switch (S
->getSCEVType()) {
6050 return LoopInvariant
;
6054 return getLoopDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), L
);
6055 case scAddRecExpr
: {
6056 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
6058 // If L is the addrec's loop, it's computable.
6059 if (AR
->getLoop() == L
)
6060 return LoopComputable
;
6062 // Add recurrences are never invariant in the function-body (null loop).
6066 // This recurrence is variant w.r.t. L if L contains AR's loop.
6067 if (L
->contains(AR
->getLoop()))
6070 // This recurrence is invariant w.r.t. L if AR's loop contains L.
6071 if (AR
->getLoop()->contains(L
))
6072 return LoopInvariant
;
6074 // This recurrence is variant w.r.t. L if any of its operands
6076 for (SCEVAddRecExpr::op_iterator I
= AR
->op_begin(), E
= AR
->op_end();
6078 if (!isLoopInvariant(*I
, L
))
6081 // Otherwise it's loop-invariant.
6082 return LoopInvariant
;
6088 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(S
);
6089 bool HasVarying
= false;
6090 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
6092 LoopDisposition D
= getLoopDisposition(*I
, L
);
6093 if (D
== LoopVariant
)
6095 if (D
== LoopComputable
)
6098 return HasVarying
? LoopComputable
: LoopInvariant
;
6101 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
6102 LoopDisposition LD
= getLoopDisposition(UDiv
->getLHS(), L
);
6103 if (LD
== LoopVariant
)
6105 LoopDisposition RD
= getLoopDisposition(UDiv
->getRHS(), L
);
6106 if (RD
== LoopVariant
)
6108 return (LD
== LoopInvariant
&& RD
== LoopInvariant
) ?
6109 LoopInvariant
: LoopComputable
;
6112 // All non-instruction values are loop invariant. All instructions are loop
6113 // invariant if they are not contained in the specified loop.
6114 // Instructions are never considered invariant in the function body
6115 // (null loop) because they are defined within the "loop".
6116 if (Instruction
*I
= dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue()))
6117 return (L
&& !L
->contains(I
)) ? LoopInvariant
: LoopVariant
;
6118 return LoopInvariant
;
6119 case scCouldNotCompute
:
6120 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6124 llvm_unreachable("Unknown SCEV kind!");
6128 bool ScalarEvolution::isLoopInvariant(const SCEV
*S
, const Loop
*L
) {
6129 return getLoopDisposition(S
, L
) == LoopInvariant
;
6132 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV
*S
, const Loop
*L
) {
6133 return getLoopDisposition(S
, L
) == LoopComputable
;
6136 ScalarEvolution::BlockDisposition
6137 ScalarEvolution::getBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
6138 std::map
<const BasicBlock
*, BlockDisposition
> &Values
= BlockDispositions
[S
];
6139 std::pair
<std::map
<const BasicBlock
*, BlockDisposition
>::iterator
, bool>
6140 Pair
= Values
.insert(std::make_pair(BB
, DoesNotDominateBlock
));
6142 return Pair
.first
->second
;
6144 BlockDisposition D
= computeBlockDisposition(S
, BB
);
6145 return BlockDispositions
[S
][BB
] = D
;
6148 ScalarEvolution::BlockDisposition
6149 ScalarEvolution::computeBlockDisposition(const SCEV
*S
, const BasicBlock
*BB
) {
6150 switch (S
->getSCEVType()) {
6152 return ProperlyDominatesBlock
;
6156 return getBlockDisposition(cast
<SCEVCastExpr
>(S
)->getOperand(), BB
);
6157 case scAddRecExpr
: {
6158 // This uses a "dominates" query instead of "properly dominates" query
6159 // to test for proper dominance too, because the instruction which
6160 // produces the addrec's value is a PHI, and a PHI effectively properly
6161 // dominates its entire containing block.
6162 const SCEVAddRecExpr
*AR
= cast
<SCEVAddRecExpr
>(S
);
6163 if (!DT
->dominates(AR
->getLoop()->getHeader(), BB
))
6164 return DoesNotDominateBlock
;
6166 // FALL THROUGH into SCEVNAryExpr handling.
6171 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(S
);
6173 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
6175 BlockDisposition D
= getBlockDisposition(*I
, BB
);
6176 if (D
== DoesNotDominateBlock
)
6177 return DoesNotDominateBlock
;
6178 if (D
== DominatesBlock
)
6181 return Proper
? ProperlyDominatesBlock
: DominatesBlock
;
6184 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
6185 const SCEV
*LHS
= UDiv
->getLHS(), *RHS
= UDiv
->getRHS();
6186 BlockDisposition LD
= getBlockDisposition(LHS
, BB
);
6187 if (LD
== DoesNotDominateBlock
)
6188 return DoesNotDominateBlock
;
6189 BlockDisposition RD
= getBlockDisposition(RHS
, BB
);
6190 if (RD
== DoesNotDominateBlock
)
6191 return DoesNotDominateBlock
;
6192 return (LD
== ProperlyDominatesBlock
&& RD
== ProperlyDominatesBlock
) ?
6193 ProperlyDominatesBlock
: DominatesBlock
;
6196 if (Instruction
*I
=
6197 dyn_cast
<Instruction
>(cast
<SCEVUnknown
>(S
)->getValue())) {
6198 if (I
->getParent() == BB
)
6199 return DominatesBlock
;
6200 if (DT
->properlyDominates(I
->getParent(), BB
))
6201 return ProperlyDominatesBlock
;
6202 return DoesNotDominateBlock
;
6204 return ProperlyDominatesBlock
;
6205 case scCouldNotCompute
:
6206 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6207 return DoesNotDominateBlock
;
6210 llvm_unreachable("Unknown SCEV kind!");
6211 return DoesNotDominateBlock
;
6214 bool ScalarEvolution::dominates(const SCEV
*S
, const BasicBlock
*BB
) {
6215 return getBlockDisposition(S
, BB
) >= DominatesBlock
;
6218 bool ScalarEvolution::properlyDominates(const SCEV
*S
, const BasicBlock
*BB
) {
6219 return getBlockDisposition(S
, BB
) == ProperlyDominatesBlock
;
6222 bool ScalarEvolution::hasOperand(const SCEV
*S
, const SCEV
*Op
) const {
6223 switch (S
->getSCEVType()) {
6228 case scSignExtend
: {
6229 const SCEVCastExpr
*Cast
= cast
<SCEVCastExpr
>(S
);
6230 const SCEV
*CastOp
= Cast
->getOperand();
6231 return Op
== CastOp
|| hasOperand(CastOp
, Op
);
6238 const SCEVNAryExpr
*NAry
= cast
<SCEVNAryExpr
>(S
);
6239 for (SCEVNAryExpr::op_iterator I
= NAry
->op_begin(), E
= NAry
->op_end();
6241 const SCEV
*NAryOp
= *I
;
6242 if (NAryOp
== Op
|| hasOperand(NAryOp
, Op
))
6248 const SCEVUDivExpr
*UDiv
= cast
<SCEVUDivExpr
>(S
);
6249 const SCEV
*LHS
= UDiv
->getLHS(), *RHS
= UDiv
->getRHS();
6250 return LHS
== Op
|| hasOperand(LHS
, Op
) ||
6251 RHS
== Op
|| hasOperand(RHS
, Op
);
6255 case scCouldNotCompute
:
6256 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6260 llvm_unreachable("Unknown SCEV kind!");
6264 void ScalarEvolution::forgetMemoizedResults(const SCEV
*S
) {
6265 ValuesAtScopes
.erase(S
);
6266 LoopDispositions
.erase(S
);
6267 BlockDispositions
.erase(S
);
6268 UnsignedRanges
.erase(S
);
6269 SignedRanges
.erase(S
);