Make byref llvm::Use parameters const. NFC.
[polly-mirror.git] / lib / Support / ScopHelper.cpp
blobd3e1e9948d471986e010140a921ab4e1c3c73d70
1 //===- ScopHelper.cpp - Some Helper Functions for Scop. ------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Small functions that help with Scop and LLVM-IR.
12 //===----------------------------------------------------------------------===//
14 #include "polly/Support/ScopHelper.h"
15 #include "polly/Options.h"
16 #include "polly/ScopInfo.h"
17 #include "polly/Support/SCEVValidator.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/Analysis/RegionInfo.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpander.h"
22 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
28 using namespace llvm;
29 using namespace polly;
31 #define DEBUG_TYPE "polly-scop-helper"
33 static cl::opt<bool> PollyAllowErrorBlocks(
34 "polly-allow-error-blocks",
35 cl::desc("Allow to speculate on the execution of 'error blocks'."),
36 cl::Hidden, cl::init(true), cl::ZeroOrMore, cl::cat(PollyCategory));
38 // Ensures that there is just one predecessor to the entry node from outside the
39 // region.
40 // The identity of the region entry node is preserved.
41 static void simplifyRegionEntry(Region *R, DominatorTree *DT, LoopInfo *LI,
42 RegionInfo *RI) {
43 BasicBlock *EnteringBB = R->getEnteringBlock();
44 BasicBlock *Entry = R->getEntry();
46 // Before (one of):
48 // \ / //
49 // EnteringBB //
50 // | \------> //
51 // \ / | //
52 // Entry <--\ Entry <--\ //
53 // / \ / / \ / //
54 // .... .... //
56 // Create single entry edge if the region has multiple entry edges.
57 if (!EnteringBB) {
58 SmallVector<BasicBlock *, 4> Preds;
59 for (BasicBlock *P : predecessors(Entry))
60 if (!R->contains(P))
61 Preds.push_back(P);
63 BasicBlock *NewEntering =
64 SplitBlockPredecessors(Entry, Preds, ".region_entering", DT, LI);
66 if (RI) {
67 // The exit block of predecessing regions must be changed to NewEntering
68 for (BasicBlock *ExitPred : predecessors(NewEntering)) {
69 Region *RegionOfPred = RI->getRegionFor(ExitPred);
70 if (RegionOfPred->getExit() != Entry)
71 continue;
73 while (!RegionOfPred->isTopLevelRegion() &&
74 RegionOfPred->getExit() == Entry) {
75 RegionOfPred->replaceExit(NewEntering);
76 RegionOfPred = RegionOfPred->getParent();
80 // Make all ancestors use EnteringBB as entry; there might be edges to it
81 Region *AncestorR = R->getParent();
82 RI->setRegionFor(NewEntering, AncestorR);
83 while (!AncestorR->isTopLevelRegion() && AncestorR->getEntry() == Entry) {
84 AncestorR->replaceEntry(NewEntering);
85 AncestorR = AncestorR->getParent();
89 EnteringBB = NewEntering;
91 assert(R->getEnteringBlock() == EnteringBB);
93 // After:
95 // \ / //
96 // EnteringBB //
97 // | //
98 // | //
99 // Entry <--\ //
100 // / \ / //
101 // .... //
104 // Ensure that the region has a single block that branches to the exit node.
105 static void simplifyRegionExit(Region *R, DominatorTree *DT, LoopInfo *LI,
106 RegionInfo *RI) {
107 BasicBlock *ExitBB = R->getExit();
108 BasicBlock *ExitingBB = R->getExitingBlock();
110 // Before:
112 // (Region) ______/ //
113 // \ | / //
114 // ExitBB //
115 // / \ //
117 if (!ExitingBB) {
118 SmallVector<BasicBlock *, 4> Preds;
119 for (BasicBlock *P : predecessors(ExitBB))
120 if (R->contains(P))
121 Preds.push_back(P);
123 // Preds[0] Preds[1] otherBB //
124 // \ | ________/ //
125 // \ | / //
126 // BB //
127 ExitingBB =
128 SplitBlockPredecessors(ExitBB, Preds, ".region_exiting", DT, LI);
129 // Preds[0] Preds[1] otherBB //
130 // \ / / //
131 // BB.region_exiting / //
132 // \ / //
133 // BB //
135 if (RI)
136 RI->setRegionFor(ExitingBB, R);
138 // Change the exit of nested regions, but not the region itself,
139 R->replaceExitRecursive(ExitingBB);
140 R->replaceExit(ExitBB);
142 assert(ExitingBB == R->getExitingBlock());
144 // After:
146 // \ / //
147 // ExitingBB _____/ //
148 // \ / //
149 // ExitBB //
150 // / \ //
153 void polly::simplifyRegion(Region *R, DominatorTree *DT, LoopInfo *LI,
154 RegionInfo *RI) {
155 assert(R && !R->isTopLevelRegion());
156 assert(!RI || RI == R->getRegionInfo());
157 assert((!RI || DT) &&
158 "RegionInfo requires DominatorTree to be updated as well");
160 simplifyRegionEntry(R, DT, LI, RI);
161 simplifyRegionExit(R, DT, LI, RI);
162 assert(R->isSimple());
165 // Split the block into two successive blocks.
167 // Like llvm::SplitBlock, but also preserves RegionInfo
168 static BasicBlock *splitBlock(BasicBlock *Old, Instruction *SplitPt,
169 DominatorTree *DT, llvm::LoopInfo *LI,
170 RegionInfo *RI) {
171 assert(Old && SplitPt);
173 // Before:
175 // \ / //
176 // Old //
177 // / \ //
179 BasicBlock *NewBlock = llvm::SplitBlock(Old, SplitPt, DT, LI);
181 if (RI) {
182 Region *R = RI->getRegionFor(Old);
183 RI->setRegionFor(NewBlock, R);
186 // After:
188 // \ / //
189 // Old //
190 // | //
191 // NewBlock //
192 // / \ //
194 return NewBlock;
197 void polly::splitEntryBlockForAlloca(BasicBlock *EntryBlock, Pass *P) {
198 // Find first non-alloca instruction. Every basic block has a non-alloca
199 // instruction, as every well formed basic block has a terminator.
200 BasicBlock::iterator I = EntryBlock->begin();
201 while (isa<AllocaInst>(I))
202 ++I;
204 auto *DTWP = P->getAnalysisIfAvailable<DominatorTreeWrapperPass>();
205 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
206 auto *LIWP = P->getAnalysisIfAvailable<LoopInfoWrapperPass>();
207 auto *LI = LIWP ? &LIWP->getLoopInfo() : nullptr;
208 RegionInfoPass *RIP = P->getAnalysisIfAvailable<RegionInfoPass>();
209 RegionInfo *RI = RIP ? &RIP->getRegionInfo() : nullptr;
211 // splitBlock updates DT, LI and RI.
212 splitBlock(EntryBlock, &*I, DT, LI, RI);
215 /// The SCEVExpander will __not__ generate any code for an existing SDiv/SRem
216 /// instruction but just use it, if it is referenced as a SCEVUnknown. We want
217 /// however to generate new code if the instruction is in the analyzed region
218 /// and we generate code outside/in front of that region. Hence, we generate the
219 /// code for the SDiv/SRem operands in front of the analyzed region and then
220 /// create a new SDiv/SRem operation there too.
221 struct ScopExpander : SCEVVisitor<ScopExpander, const SCEV *> {
222 friend struct SCEVVisitor<ScopExpander, const SCEV *>;
224 explicit ScopExpander(const Region &R, ScalarEvolution &SE,
225 const DataLayout &DL, const char *Name, ValueMapT *VMap,
226 BasicBlock *RTCBB)
227 : Expander(SCEVExpander(SE, DL, Name)), SE(SE), Name(Name), R(R),
228 VMap(VMap), RTCBB(RTCBB) {}
230 Value *expandCodeFor(const SCEV *E, Type *Ty, Instruction *I) {
231 // If we generate code in the region we will immediately fall back to the
232 // SCEVExpander, otherwise we will stop at all unknowns in the SCEV and if
233 // needed replace them by copies computed in the entering block.
234 if (!R.contains(I))
235 E = visit(E);
236 return Expander.expandCodeFor(E, Ty, I);
239 private:
240 SCEVExpander Expander;
241 ScalarEvolution &SE;
242 const char *Name;
243 const Region &R;
244 ValueMapT *VMap;
245 BasicBlock *RTCBB;
247 const SCEV *visitGenericInst(const SCEVUnknown *E, Instruction *Inst,
248 Instruction *IP) {
249 if (!Inst || !R.contains(Inst))
250 return E;
252 assert(!Inst->mayThrow() && !Inst->mayReadOrWriteMemory() &&
253 !isa<PHINode>(Inst));
255 auto *InstClone = Inst->clone();
256 for (auto &Op : Inst->operands()) {
257 assert(SE.isSCEVable(Op->getType()));
258 auto *OpSCEV = SE.getSCEV(Op);
259 auto *OpClone = expandCodeFor(OpSCEV, Op->getType(), IP);
260 InstClone->replaceUsesOfWith(Op, OpClone);
263 InstClone->setName(Name + Inst->getName());
264 InstClone->insertBefore(IP);
265 return SE.getSCEV(InstClone);
268 const SCEV *visitUnknown(const SCEVUnknown *E) {
270 // If a value mapping was given try if the underlying value is remapped.
271 Value *NewVal = VMap ? VMap->lookup(E->getValue()) : nullptr;
272 if (NewVal) {
273 auto *NewE = SE.getSCEV(NewVal);
275 // While the mapped value might be different the SCEV representation might
276 // not be. To this end we will check before we go into recursion here.
277 if (E != NewE)
278 return visit(NewE);
281 Instruction *Inst = dyn_cast<Instruction>(E->getValue());
282 Instruction *IP;
283 if (Inst && !R.contains(Inst))
284 IP = Inst;
285 else if (Inst && RTCBB->getParent() == Inst->getFunction())
286 IP = RTCBB->getTerminator();
287 else
288 IP = RTCBB->getParent()->getEntryBlock().getTerminator();
290 if (!Inst || (Inst->getOpcode() != Instruction::SRem &&
291 Inst->getOpcode() != Instruction::SDiv))
292 return visitGenericInst(E, Inst, IP);
294 const SCEV *LHSScev = SE.getSCEV(Inst->getOperand(0));
295 const SCEV *RHSScev = SE.getSCEV(Inst->getOperand(1));
297 if (!SE.isKnownNonZero(RHSScev))
298 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
300 Value *LHS = expandCodeFor(LHSScev, E->getType(), IP);
301 Value *RHS = expandCodeFor(RHSScev, E->getType(), IP);
303 Inst = BinaryOperator::Create((Instruction::BinaryOps)Inst->getOpcode(),
304 LHS, RHS, Inst->getName() + Name, IP);
305 return SE.getSCEV(Inst);
308 /// The following functions will just traverse the SCEV and rebuild it with
309 /// the new operands returned by the traversal.
311 ///{
312 const SCEV *visitConstant(const SCEVConstant *E) { return E; }
313 const SCEV *visitTruncateExpr(const SCEVTruncateExpr *E) {
314 return SE.getTruncateExpr(visit(E->getOperand()), E->getType());
316 const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *E) {
317 return SE.getZeroExtendExpr(visit(E->getOperand()), E->getType());
319 const SCEV *visitSignExtendExpr(const SCEVSignExtendExpr *E) {
320 return SE.getSignExtendExpr(visit(E->getOperand()), E->getType());
322 const SCEV *visitUDivExpr(const SCEVUDivExpr *E) {
323 auto *RHSScev = visit(E->getRHS());
324 if (!SE.isKnownNonZero(RHSScev))
325 RHSScev = SE.getUMaxExpr(RHSScev, SE.getConstant(E->getType(), 1));
326 return SE.getUDivExpr(visit(E->getLHS()), RHSScev);
328 const SCEV *visitAddExpr(const SCEVAddExpr *E) {
329 SmallVector<const SCEV *, 4> NewOps;
330 for (const SCEV *Op : E->operands())
331 NewOps.push_back(visit(Op));
332 return SE.getAddExpr(NewOps);
334 const SCEV *visitMulExpr(const SCEVMulExpr *E) {
335 SmallVector<const SCEV *, 4> NewOps;
336 for (const SCEV *Op : E->operands())
337 NewOps.push_back(visit(Op));
338 return SE.getMulExpr(NewOps);
340 const SCEV *visitUMaxExpr(const SCEVUMaxExpr *E) {
341 SmallVector<const SCEV *, 4> NewOps;
342 for (const SCEV *Op : E->operands())
343 NewOps.push_back(visit(Op));
344 return SE.getUMaxExpr(NewOps);
346 const SCEV *visitSMaxExpr(const SCEVSMaxExpr *E) {
347 SmallVector<const SCEV *, 4> NewOps;
348 for (const SCEV *Op : E->operands())
349 NewOps.push_back(visit(Op));
350 return SE.getSMaxExpr(NewOps);
352 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
353 SmallVector<const SCEV *, 4> NewOps;
354 for (const SCEV *Op : E->operands())
355 NewOps.push_back(visit(Op));
356 return SE.getAddRecExpr(NewOps, E->getLoop(), E->getNoWrapFlags());
358 ///}
361 Value *polly::expandCodeFor(Scop &S, ScalarEvolution &SE, const DataLayout &DL,
362 const char *Name, const SCEV *E, Type *Ty,
363 Instruction *IP, ValueMapT *VMap,
364 BasicBlock *RTCBB) {
365 ScopExpander Expander(S.getRegion(), SE, DL, Name, VMap, RTCBB);
366 return Expander.expandCodeFor(E, Ty, IP);
369 bool polly::isErrorBlock(BasicBlock &BB, const Region &R, LoopInfo &LI,
370 const DominatorTree &DT) {
371 if (!PollyAllowErrorBlocks)
372 return false;
374 if (isa<UnreachableInst>(BB.getTerminator()))
375 return true;
377 if (LI.isLoopHeader(&BB))
378 return false;
380 // Basic blocks that are always executed are not considered error blocks,
381 // as their execution can not be a rare event.
382 bool DominatesAllPredecessors = true;
383 if (R.isTopLevelRegion()) {
384 for (BasicBlock &I : *R.getEntry()->getParent())
385 if (isa<ReturnInst>(I.getTerminator()) && !DT.dominates(&BB, &I))
386 DominatesAllPredecessors = false;
387 } else {
388 for (auto Pred : predecessors(R.getExit()))
389 if (R.contains(Pred) && !DT.dominates(&BB, Pred))
390 DominatesAllPredecessors = false;
393 if (DominatesAllPredecessors)
394 return false;
396 // FIXME: This is a simple heuristic to determine if the load is executed
397 // in a conditional. However, we actually would need the control
398 // condition, i.e., the post dominance frontier. Alternatively we
399 // could walk up the dominance tree until we find a block that is
400 // not post dominated by the load and check if it is a conditional
401 // or a loop header.
402 auto *DTNode = DT.getNode(&BB);
403 auto *IDomBB = DTNode->getIDom()->getBlock();
404 if (LI.isLoopHeader(IDomBB))
405 return false;
407 for (Instruction &Inst : BB)
408 if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
409 if (isIgnoredIntrinsic(CI))
410 return false;
412 if (!CI->doesNotAccessMemory())
413 return true;
414 if (CI->doesNotReturn())
415 return true;
418 return false;
421 Value *polly::getConditionFromTerminator(TerminatorInst *TI) {
422 if (BranchInst *BR = dyn_cast<BranchInst>(TI)) {
423 if (BR->isUnconditional())
424 return ConstantInt::getTrue(Type::getInt1Ty(TI->getContext()));
426 return BR->getCondition();
429 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
430 return SI->getCondition();
432 return nullptr;
435 bool polly::isHoistableLoad(LoadInst *LInst, Region &R, LoopInfo &LI,
436 ScalarEvolution &SE, const DominatorTree &DT) {
437 Loop *L = LI.getLoopFor(LInst->getParent());
438 auto *Ptr = LInst->getPointerOperand();
439 const SCEV *PtrSCEV = SE.getSCEVAtScope(Ptr, L);
440 while (L && R.contains(L)) {
441 if (!SE.isLoopInvariant(PtrSCEV, L))
442 return false;
443 L = L->getParentLoop();
446 for (auto *User : Ptr->users()) {
447 auto *UserI = dyn_cast<Instruction>(User);
448 if (!UserI || !R.contains(UserI))
449 continue;
450 if (!UserI->mayWriteToMemory())
451 continue;
453 auto &BB = *UserI->getParent();
454 if (DT.dominates(&BB, LInst->getParent()))
455 return false;
457 bool DominatesAllPredecessors = true;
458 for (auto Pred : predecessors(R.getExit()))
459 if (R.contains(Pred) && !DT.dominates(&BB, Pred))
460 DominatesAllPredecessors = false;
462 if (!DominatesAllPredecessors)
463 continue;
465 return false;
468 return true;
471 bool polly::isIgnoredIntrinsic(const Value *V) {
472 if (auto *IT = dyn_cast<IntrinsicInst>(V)) {
473 switch (IT->getIntrinsicID()) {
474 // Lifetime markers are supported/ignored.
475 case llvm::Intrinsic::lifetime_start:
476 case llvm::Intrinsic::lifetime_end:
477 // Invariant markers are supported/ignored.
478 case llvm::Intrinsic::invariant_start:
479 case llvm::Intrinsic::invariant_end:
480 // Some misc annotations are supported/ignored.
481 case llvm::Intrinsic::var_annotation:
482 case llvm::Intrinsic::ptr_annotation:
483 case llvm::Intrinsic::annotation:
484 case llvm::Intrinsic::donothing:
485 case llvm::Intrinsic::assume:
486 // Some debug info intrinsics are supported/ignored.
487 case llvm::Intrinsic::dbg_value:
488 case llvm::Intrinsic::dbg_declare:
489 return true;
490 default:
491 break;
494 return false;
497 bool polly::canSynthesize(const Value *V, const Scop &S, ScalarEvolution *SE,
498 Loop *Scope) {
499 if (!V || !SE->isSCEVable(V->getType()))
500 return false;
502 const InvariantLoadsSetTy &ILS = S.getRequiredInvariantLoads();
503 if (const SCEV *Scev = SE->getSCEVAtScope(const_cast<Value *>(V), Scope))
504 if (!isa<SCEVCouldNotCompute>(Scev))
505 if (!hasScalarDepsInsideRegion(Scev, &S.getRegion(), Scope, false, ILS))
506 return true;
508 return false;
511 llvm::BasicBlock *polly::getUseBlock(const llvm::Use &U) {
512 Instruction *UI = dyn_cast<Instruction>(U.getUser());
513 if (!UI)
514 return nullptr;
516 if (PHINode *PHI = dyn_cast<PHINode>(UI))
517 return PHI->getIncomingBlock(U);
519 return UI->getParent();
522 std::tuple<std::vector<const SCEV *>, std::vector<int>>
523 polly::getIndexExpressionsFromGEP(GetElementPtrInst *GEP, ScalarEvolution &SE) {
524 std::vector<const SCEV *> Subscripts;
525 std::vector<int> Sizes;
527 Type *Ty = GEP->getPointerOperandType();
529 bool DroppedFirstDim = false;
531 for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
533 const SCEV *Expr = SE.getSCEV(GEP->getOperand(i));
535 if (i == 1) {
536 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
537 Ty = PtrTy->getElementType();
538 } else if (auto *ArrayTy = dyn_cast<ArrayType>(Ty)) {
539 Ty = ArrayTy->getElementType();
540 } else {
541 Subscripts.clear();
542 Sizes.clear();
543 break;
545 if (auto *Const = dyn_cast<SCEVConstant>(Expr))
546 if (Const->getValue()->isZero()) {
547 DroppedFirstDim = true;
548 continue;
550 Subscripts.push_back(Expr);
551 continue;
554 auto *ArrayTy = dyn_cast<ArrayType>(Ty);
555 if (!ArrayTy) {
556 Subscripts.clear();
557 Sizes.clear();
558 break;
561 Subscripts.push_back(Expr);
562 if (!(DroppedFirstDim && i == 2))
563 Sizes.push_back(ArrayTy->getNumElements());
565 Ty = ArrayTy->getElementType();
568 return std::make_tuple(Subscripts, Sizes);
571 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
572 const BoxedLoopsSetTy &BoxedLoops) {
573 while (BoxedLoops.count(L))
574 L = L->getParentLoop();
575 return L;
578 llvm::Loop *polly::getFirstNonBoxedLoopFor(llvm::BasicBlock *BB,
579 llvm::LoopInfo &LI,
580 const BoxedLoopsSetTy &BoxedLoops) {
581 Loop *L = LI.getLoopFor(BB);
582 return getFirstNonBoxedLoopFor(L, LI, BoxedLoops);