ScopInfo: Improve comments in buildAliasGroup [NFC]
[polly-mirror.git] / lib / Analysis / ScopBuilder.cpp
blob6d1bf88c5afda90a57aa88c2e8ff6b5d7ad5adde
1 //===- ScopBuilder.cpp ---------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Create a polyhedral description for a static control flow region.
12 // The pass creates a polyhedral description of the Scops detected by the SCoP
13 // detection derived from their LLVM-IR code.
15 //===----------------------------------------------------------------------===//
17 #include "polly/ScopBuilder.h"
18 #include "polly/Options.h"
19 #include "polly/Support/GICHelper.h"
20 #include "polly/Support/SCEVValidator.h"
21 #include "llvm/Analysis/RegionIterator.h"
22 #include "llvm/IR/DiagnosticInfo.h"
24 using namespace llvm;
25 using namespace polly;
27 #define DEBUG_TYPE "polly-scops"
29 STATISTIC(ScopFound, "Number of valid Scops");
30 STATISTIC(RichScopFound, "Number of Scops containing a loop");
31 STATISTIC(InfeasibleScops,
32 "Number of SCoPs with statically infeasible context.");
34 // If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
35 // for Polly. If the loop is affine, return the loop itself. Do not call
36 // `getSCEVAtScope()` on the result of `getFirstNonBoxedLoopFor()`, as we need
37 // to analyze the memory accesses of the nonaffine/boxed loops.
38 static Loop *getFirstNonBoxedLoopFor(Loop *L, LoopInfo &LI,
39 const BoxedLoopsSetTy &BoxedLoops) {
40 while (BoxedLoops.count(L))
41 L = L->getParentLoop();
42 return L;
45 static cl::opt<bool> ModelReadOnlyScalars(
46 "polly-analyze-read-only-scalars",
47 cl::desc("Model read-only scalar values in the scop description"),
48 cl::Hidden, cl::ZeroOrMore, cl::init(true), cl::cat(PollyCategory));
50 void ScopBuilder::buildPHIAccesses(PHINode *PHI, Region *NonAffineSubRegion,
51 bool IsExitBlock) {
53 // PHI nodes that are in the exit block of the region, hence if IsExitBlock is
54 // true, are not modeled as ordinary PHI nodes as they are not part of the
55 // region. However, we model the operands in the predecessor blocks that are
56 // part of the region as regular scalar accesses.
58 // If we can synthesize a PHI we can skip it, however only if it is in
59 // the region. If it is not it can only be in the exit block of the region.
60 // In this case we model the operands but not the PHI itself.
61 auto *Scope = LI.getLoopFor(PHI->getParent());
62 if (!IsExitBlock && canSynthesize(PHI, *scop, &SE, Scope))
63 return;
65 // PHI nodes are modeled as if they had been demoted prior to the SCoP
66 // detection. Hence, the PHI is a load of a new memory location in which the
67 // incoming value was written at the end of the incoming basic block.
68 bool OnlyNonAffineSubRegionOperands = true;
69 for (unsigned u = 0; u < PHI->getNumIncomingValues(); u++) {
70 Value *Op = PHI->getIncomingValue(u);
71 BasicBlock *OpBB = PHI->getIncomingBlock(u);
73 // Do not build PHI dependences inside a non-affine subregion, but make
74 // sure that the necessary scalar values are still made available.
75 if (NonAffineSubRegion && NonAffineSubRegion->contains(OpBB)) {
76 auto *OpInst = dyn_cast<Instruction>(Op);
77 if (!OpInst || !NonAffineSubRegion->contains(OpInst))
78 ensureValueRead(Op, OpBB);
79 continue;
82 OnlyNonAffineSubRegionOperands = false;
83 ensurePHIWrite(PHI, OpBB, Op, IsExitBlock);
86 if (!OnlyNonAffineSubRegionOperands && !IsExitBlock) {
87 addPHIReadAccess(PHI);
91 void ScopBuilder::buildScalarDependences(Instruction *Inst) {
92 assert(!isa<PHINode>(Inst));
94 // Pull-in required operands.
95 for (Use &Op : Inst->operands())
96 ensureValueRead(Op.get(), Inst->getParent());
99 void ScopBuilder::buildEscapingDependences(Instruction *Inst) {
100 // Check for uses of this instruction outside the scop. Because we do not
101 // iterate over such instructions and therefore did not "ensure" the existence
102 // of a write, we must determine such use here.
103 for (Use &U : Inst->uses()) {
104 Instruction *UI = dyn_cast<Instruction>(U.getUser());
105 if (!UI)
106 continue;
108 BasicBlock *UseParent = getUseBlock(U);
109 BasicBlock *UserParent = UI->getParent();
111 // An escaping value is either used by an instruction not within the scop,
112 // or (when the scop region's exit needs to be simplified) by a PHI in the
113 // scop's exit block. This is because region simplification before code
114 // generation inserts new basic blocks before the PHI such that its incoming
115 // blocks are not in the scop anymore.
116 if (!scop->contains(UseParent) ||
117 (isa<PHINode>(UI) && scop->isExit(UserParent) &&
118 scop->hasSingleExitEdge())) {
119 // At least one escaping use found.
120 ensureValueWrite(Inst);
121 break;
126 bool ScopBuilder::buildAccessMultiDimFixed(MemAccInst Inst, Loop *L) {
127 Value *Val = Inst.getValueOperand();
128 Type *ElementType = Val->getType();
129 Value *Address = Inst.getPointerOperand();
130 const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
131 const SCEVUnknown *BasePointer =
132 dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
133 enum MemoryAccess::AccessType AccType =
134 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
136 if (auto *BitCast = dyn_cast<BitCastInst>(Address)) {
137 auto *Src = BitCast->getOperand(0);
138 auto *SrcTy = Src->getType();
139 auto *DstTy = BitCast->getType();
140 // Do not try to delinearize non-sized (opaque) pointers.
141 if ((SrcTy->isPointerTy() && !SrcTy->getPointerElementType()->isSized()) ||
142 (DstTy->isPointerTy() && !DstTy->getPointerElementType()->isSized())) {
143 return false;
145 if (SrcTy->isPointerTy() && DstTy->isPointerTy() &&
146 DL.getTypeAllocSize(SrcTy->getPointerElementType()) ==
147 DL.getTypeAllocSize(DstTy->getPointerElementType()))
148 Address = Src;
151 auto *GEP = dyn_cast<GetElementPtrInst>(Address);
152 if (!GEP)
153 return false;
155 std::vector<const SCEV *> Subscripts;
156 std::vector<int> Sizes;
157 std::tie(Subscripts, Sizes) = getIndexExpressionsFromGEP(GEP, SE);
158 auto *BasePtr = GEP->getOperand(0);
160 if (auto *BasePtrCast = dyn_cast<BitCastInst>(BasePtr))
161 BasePtr = BasePtrCast->getOperand(0);
163 // Check for identical base pointers to ensure that we do not miss index
164 // offsets that have been added before this GEP is applied.
165 if (BasePtr != BasePointer->getValue())
166 return false;
168 std::vector<const SCEV *> SizesSCEV;
170 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
172 Loop *SurroundingLoop = getFirstNonBoxedLoopFor(L, LI, scop->getBoxedLoops());
173 for (auto *Subscript : Subscripts) {
174 InvariantLoadsSetTy AccessILS;
175 if (!isAffineExpr(&scop->getRegion(), SurroundingLoop, Subscript, SE,
176 &AccessILS))
177 return false;
179 for (LoadInst *LInst : AccessILS)
180 if (!ScopRIL.count(LInst))
181 return false;
184 if (Sizes.empty())
185 return false;
187 SizesSCEV.push_back(nullptr);
189 for (auto V : Sizes)
190 SizesSCEV.push_back(SE.getSCEV(
191 ConstantInt::get(IntegerType::getInt64Ty(BasePtr->getContext()), V)));
193 addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
194 Subscripts, SizesSCEV, Val);
195 return true;
198 bool ScopBuilder::buildAccessMultiDimParam(MemAccInst Inst, Loop *L) {
199 if (!PollyDelinearize)
200 return false;
202 Value *Address = Inst.getPointerOperand();
203 Value *Val = Inst.getValueOperand();
204 Type *ElementType = Val->getType();
205 unsigned ElementSize = DL.getTypeAllocSize(ElementType);
206 enum MemoryAccess::AccessType AccType =
207 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
209 const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
210 const SCEVUnknown *BasePointer =
211 dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
213 assert(BasePointer && "Could not find base pointer");
215 auto &InsnToMemAcc = scop->getInsnToMemAccMap();
216 auto AccItr = InsnToMemAcc.find(Inst);
217 if (AccItr == InsnToMemAcc.end())
218 return false;
220 std::vector<const SCEV *> Sizes = {nullptr};
222 Sizes.insert(Sizes.end(), AccItr->second.Shape->DelinearizedSizes.begin(),
223 AccItr->second.Shape->DelinearizedSizes.end());
224 // Remove the element size. This information is already provided by the
225 // ElementSize parameter. In case the element size of this access and the
226 // element size used for delinearization differs the delinearization is
227 // incorrect. Hence, we invalidate the scop.
229 // TODO: Handle delinearization with differing element sizes.
230 auto DelinearizedSize =
231 cast<SCEVConstant>(Sizes.back())->getAPInt().getSExtValue();
232 Sizes.pop_back();
233 if (ElementSize != DelinearizedSize)
234 scop->invalidate(DELINEARIZATION, Inst->getDebugLoc());
236 addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, true,
237 AccItr->second.DelinearizedSubscripts, Sizes, Val);
238 return true;
241 bool ScopBuilder::buildAccessMemIntrinsic(MemAccInst Inst, Loop *L) {
242 auto *MemIntr = dyn_cast_or_null<MemIntrinsic>(Inst);
244 if (MemIntr == nullptr)
245 return false;
247 auto *LengthVal = SE.getSCEVAtScope(MemIntr->getLength(), L);
248 assert(LengthVal);
250 // Check if the length val is actually affine or if we overapproximate it
251 InvariantLoadsSetTy AccessILS;
252 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
254 Loop *SurroundingLoop = getFirstNonBoxedLoopFor(L, LI, scop->getBoxedLoops());
255 bool LengthIsAffine = isAffineExpr(&scop->getRegion(), SurroundingLoop,
256 LengthVal, SE, &AccessILS);
257 for (LoadInst *LInst : AccessILS)
258 if (!ScopRIL.count(LInst))
259 LengthIsAffine = false;
260 if (!LengthIsAffine)
261 LengthVal = nullptr;
263 auto *DestPtrVal = MemIntr->getDest();
264 assert(DestPtrVal);
266 auto *DestAccFunc = SE.getSCEVAtScope(DestPtrVal, L);
267 assert(DestAccFunc);
268 // Ignore accesses to "NULL".
269 // TODO: We could use this to optimize the region further, e.g., intersect
270 // the context with
271 // isl_set_complement(isl_set_params(getDomain()))
272 // as we know it would be undefined to execute this instruction anyway.
273 if (DestAccFunc->isZero())
274 return true;
276 auto *DestPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(DestAccFunc));
277 assert(DestPtrSCEV);
278 DestAccFunc = SE.getMinusSCEV(DestAccFunc, DestPtrSCEV);
279 addArrayAccess(Inst, MemoryAccess::MUST_WRITE, DestPtrSCEV->getValue(),
280 IntegerType::getInt8Ty(DestPtrVal->getContext()),
281 LengthIsAffine, {DestAccFunc, LengthVal}, {nullptr},
282 Inst.getValueOperand());
284 auto *MemTrans = dyn_cast<MemTransferInst>(MemIntr);
285 if (!MemTrans)
286 return true;
288 auto *SrcPtrVal = MemTrans->getSource();
289 assert(SrcPtrVal);
291 auto *SrcAccFunc = SE.getSCEVAtScope(SrcPtrVal, L);
292 assert(SrcAccFunc);
293 // Ignore accesses to "NULL".
294 // TODO: See above TODO
295 if (SrcAccFunc->isZero())
296 return true;
298 auto *SrcPtrSCEV = dyn_cast<SCEVUnknown>(SE.getPointerBase(SrcAccFunc));
299 assert(SrcPtrSCEV);
300 SrcAccFunc = SE.getMinusSCEV(SrcAccFunc, SrcPtrSCEV);
301 addArrayAccess(Inst, MemoryAccess::READ, SrcPtrSCEV->getValue(),
302 IntegerType::getInt8Ty(SrcPtrVal->getContext()),
303 LengthIsAffine, {SrcAccFunc, LengthVal}, {nullptr},
304 Inst.getValueOperand());
306 return true;
309 bool ScopBuilder::buildAccessCallInst(MemAccInst Inst, Loop *L) {
310 auto *CI = dyn_cast_or_null<CallInst>(Inst);
312 if (CI == nullptr)
313 return false;
315 if (CI->doesNotAccessMemory() || isIgnoredIntrinsic(CI))
316 return true;
318 bool ReadOnly = false;
319 auto *AF = SE.getConstant(IntegerType::getInt64Ty(CI->getContext()), 0);
320 auto *CalledFunction = CI->getCalledFunction();
321 switch (AA.getModRefBehavior(CalledFunction)) {
322 case FMRB_UnknownModRefBehavior:
323 llvm_unreachable("Unknown mod ref behaviour cannot be represented.");
324 case FMRB_DoesNotAccessMemory:
325 return true;
326 case FMRB_DoesNotReadMemory:
327 case FMRB_OnlyAccessesInaccessibleMem:
328 case FMRB_OnlyAccessesInaccessibleOrArgMem:
329 return false;
330 case FMRB_OnlyReadsMemory:
331 GlobalReads.push_back(CI);
332 return true;
333 case FMRB_OnlyReadsArgumentPointees:
334 ReadOnly = true;
335 // Fall through
336 case FMRB_OnlyAccessesArgumentPointees:
337 auto AccType = ReadOnly ? MemoryAccess::READ : MemoryAccess::MAY_WRITE;
338 for (const auto &Arg : CI->arg_operands()) {
339 if (!Arg->getType()->isPointerTy())
340 continue;
342 auto *ArgSCEV = SE.getSCEVAtScope(Arg, L);
343 if (ArgSCEV->isZero())
344 continue;
346 auto *ArgBasePtr = cast<SCEVUnknown>(SE.getPointerBase(ArgSCEV));
347 addArrayAccess(Inst, AccType, ArgBasePtr->getValue(),
348 ArgBasePtr->getType(), false, {AF}, {nullptr}, CI);
350 return true;
353 return true;
356 void ScopBuilder::buildAccessSingleDim(MemAccInst Inst, Loop *L) {
357 Value *Address = Inst.getPointerOperand();
358 Value *Val = Inst.getValueOperand();
359 Type *ElementType = Val->getType();
360 enum MemoryAccess::AccessType AccType =
361 isa<LoadInst>(Inst) ? MemoryAccess::READ : MemoryAccess::MUST_WRITE;
363 const SCEV *AccessFunction = SE.getSCEVAtScope(Address, L);
364 const SCEVUnknown *BasePointer =
365 dyn_cast<SCEVUnknown>(SE.getPointerBase(AccessFunction));
367 assert(BasePointer && "Could not find base pointer");
368 AccessFunction = SE.getMinusSCEV(AccessFunction, BasePointer);
370 // Check if the access depends on a loop contained in a non-affine subregion.
371 bool isVariantInNonAffineLoop = false;
372 SetVector<const Loop *> Loops;
373 auto &BoxedLoops = scop->getBoxedLoops();
374 findLoops(AccessFunction, Loops);
375 for (const Loop *L : Loops)
376 if (BoxedLoops.count(L))
377 isVariantInNonAffineLoop = true;
379 InvariantLoadsSetTy AccessILS;
381 Loop *SurroundingLoop = getFirstNonBoxedLoopFor(L, LI, BoxedLoops);
382 bool IsAffine = !isVariantInNonAffineLoop &&
383 isAffineExpr(&scop->getRegion(), SurroundingLoop,
384 AccessFunction, SE, &AccessILS);
386 const InvariantLoadsSetTy &ScopRIL = scop->getRequiredInvariantLoads();
387 for (LoadInst *LInst : AccessILS)
388 if (!ScopRIL.count(LInst))
389 IsAffine = false;
391 if (!IsAffine && AccType == MemoryAccess::MUST_WRITE)
392 AccType = MemoryAccess::MAY_WRITE;
394 addArrayAccess(Inst, AccType, BasePointer->getValue(), ElementType, IsAffine,
395 {AccessFunction}, {nullptr}, Val);
398 void ScopBuilder::buildMemoryAccess(MemAccInst Inst, Loop *L) {
400 if (buildAccessMemIntrinsic(Inst, L))
401 return;
403 if (buildAccessCallInst(Inst, L))
404 return;
406 if (buildAccessMultiDimFixed(Inst, L))
407 return;
409 if (buildAccessMultiDimParam(Inst, L))
410 return;
412 buildAccessSingleDim(Inst, L);
415 void ScopBuilder::buildAccessFunctions(Region &SR) {
417 if (scop->isNonAffineSubRegion(&SR)) {
418 for (BasicBlock *BB : SR.blocks())
419 buildAccessFunctions(*BB, &SR);
420 return;
423 for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
424 if (I->isSubRegion())
425 buildAccessFunctions(*I->getNodeAs<Region>());
426 else
427 buildAccessFunctions(*I->getNodeAs<BasicBlock>());
430 void ScopBuilder::buildStmts(Region &SR) {
432 if (scop->isNonAffineSubRegion(&SR)) {
433 scop->addScopStmt(&SR);
434 return;
437 for (auto I = SR.element_begin(), E = SR.element_end(); I != E; ++I)
438 if (I->isSubRegion())
439 buildStmts(*I->getNodeAs<Region>());
440 else
441 scop->addScopStmt(I->getNodeAs<BasicBlock>());
444 void ScopBuilder::buildAccessFunctions(BasicBlock &BB,
445 Region *NonAffineSubRegion,
446 bool IsExitBlock) {
447 // We do not build access functions for error blocks, as they may contain
448 // instructions we can not model.
449 if (isErrorBlock(BB, scop->getRegion(), LI, DT) && !IsExitBlock)
450 return;
452 Loop *L = LI.getLoopFor(&BB);
454 for (Instruction &Inst : BB) {
455 PHINode *PHI = dyn_cast<PHINode>(&Inst);
456 if (PHI)
457 buildPHIAccesses(PHI, NonAffineSubRegion, IsExitBlock);
459 // For the exit block we stop modeling after the last PHI node.
460 if (!PHI && IsExitBlock)
461 break;
463 if (auto MemInst = MemAccInst::dyn_cast(Inst))
464 buildMemoryAccess(MemInst, L);
466 if (isIgnoredIntrinsic(&Inst))
467 continue;
469 // PHI nodes have already been modeled above and TerminatorInsts that are
470 // not part of a non-affine subregion are fully modeled and regenerated
471 // from the polyhedral domains. Hence, they do not need to be modeled as
472 // explicit data dependences.
473 if (!PHI && (!isa<TerminatorInst>(&Inst) || NonAffineSubRegion))
474 buildScalarDependences(&Inst);
476 if (!IsExitBlock)
477 buildEscapingDependences(&Inst);
481 MemoryAccess *ScopBuilder::addMemoryAccess(
482 BasicBlock *BB, Instruction *Inst, MemoryAccess::AccessType AccType,
483 Value *BaseAddress, Type *ElementType, bool Affine, Value *AccessValue,
484 ArrayRef<const SCEV *> Subscripts, ArrayRef<const SCEV *> Sizes,
485 MemoryKind Kind) {
486 ScopStmt *Stmt = scop->getStmtFor(BB);
488 // Do not create a memory access for anything not in the SCoP. It would be
489 // ignored anyway.
490 if (!Stmt)
491 return nullptr;
493 Value *BaseAddr = BaseAddress;
494 std::string BaseName = getIslCompatibleName("MemRef_", BaseAddr, "");
496 bool isKnownMustAccess = false;
498 // Accesses in single-basic block statements are always excuted.
499 if (Stmt->isBlockStmt())
500 isKnownMustAccess = true;
502 if (Stmt->isRegionStmt()) {
503 // Accesses that dominate the exit block of a non-affine region are always
504 // executed. In non-affine regions there may exist MemoryKind::Values that
505 // do not dominate the exit. MemoryKind::Values will always dominate the
506 // exit and MemoryKind::PHIs only if there is at most one PHI_WRITE in the
507 // non-affine region.
508 if (DT.dominates(BB, Stmt->getRegion()->getExit()))
509 isKnownMustAccess = true;
512 // Non-affine PHI writes do not "happen" at a particular instruction, but
513 // after exiting the statement. Therefore they are guaranteed to execute and
514 // overwrite the old value.
515 if (Kind == MemoryKind::PHI || Kind == MemoryKind::ExitPHI)
516 isKnownMustAccess = true;
518 if (!isKnownMustAccess && AccType == MemoryAccess::MUST_WRITE)
519 AccType = MemoryAccess::MAY_WRITE;
521 auto *Access =
522 new MemoryAccess(Stmt, Inst, AccType, BaseAddress, ElementType, Affine,
523 Subscripts, Sizes, AccessValue, Kind, BaseName);
525 scop->addAccessFunction(Access);
526 Stmt->addAccess(Access);
527 return Access;
530 void ScopBuilder::addArrayAccess(
531 MemAccInst MemAccInst, MemoryAccess::AccessType AccType, Value *BaseAddress,
532 Type *ElementType, bool IsAffine, ArrayRef<const SCEV *> Subscripts,
533 ArrayRef<const SCEV *> Sizes, Value *AccessValue) {
534 ArrayBasePointers.insert(BaseAddress);
535 addMemoryAccess(MemAccInst->getParent(), MemAccInst, AccType, BaseAddress,
536 ElementType, IsAffine, AccessValue, Subscripts, Sizes,
537 MemoryKind::Array);
540 void ScopBuilder::ensureValueWrite(Instruction *Inst) {
541 ScopStmt *Stmt = scop->getStmtFor(Inst);
543 // Inst not defined within this SCoP.
544 if (!Stmt)
545 return;
547 // Do not process further if the instruction is already written.
548 if (Stmt->lookupValueWriteOf(Inst))
549 return;
551 addMemoryAccess(Inst->getParent(), Inst, MemoryAccess::MUST_WRITE, Inst,
552 Inst->getType(), true, Inst, ArrayRef<const SCEV *>(),
553 ArrayRef<const SCEV *>(), MemoryKind::Value);
556 void ScopBuilder::ensureValueRead(Value *V, BasicBlock *UserBB) {
558 // There cannot be an "access" for literal constants. BasicBlock references
559 // (jump destinations) also never change.
560 if ((isa<Constant>(V) && !isa<GlobalVariable>(V)) || isa<BasicBlock>(V))
561 return;
563 // If the instruction can be synthesized and the user is in the region we do
564 // not need to add a value dependences.
565 auto *Scope = LI.getLoopFor(UserBB);
566 if (canSynthesize(V, *scop, &SE, Scope))
567 return;
569 // Do not build scalar dependences for required invariant loads as we will
570 // hoist them later on anyway or drop the SCoP if we cannot.
571 auto &ScopRIL = scop->getRequiredInvariantLoads();
572 if (ScopRIL.count(dyn_cast<LoadInst>(V)))
573 return;
575 // Determine the ScopStmt containing the value's definition and use. There is
576 // no defining ScopStmt if the value is a function argument, a global value,
577 // or defined outside the SCoP.
578 Instruction *ValueInst = dyn_cast<Instruction>(V);
579 ScopStmt *ValueStmt = ValueInst ? scop->getStmtFor(ValueInst) : nullptr;
581 ScopStmt *UserStmt = scop->getStmtFor(UserBB);
583 // We do not model uses outside the scop.
584 if (!UserStmt)
585 return;
587 // Add MemoryAccess for invariant values only if requested.
588 if (!ModelReadOnlyScalars && !ValueStmt)
589 return;
591 // Ignore use-def chains within the same ScopStmt.
592 if (ValueStmt == UserStmt)
593 return;
595 // Do not create another MemoryAccess for reloading the value if one already
596 // exists.
597 if (UserStmt->lookupValueReadOf(V))
598 return;
600 addMemoryAccess(UserBB, nullptr, MemoryAccess::READ, V, V->getType(), true, V,
601 ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
602 MemoryKind::Value);
603 if (ValueInst)
604 ensureValueWrite(ValueInst);
607 void ScopBuilder::ensurePHIWrite(PHINode *PHI, BasicBlock *IncomingBlock,
608 Value *IncomingValue, bool IsExitBlock) {
609 // As the incoming block might turn out to be an error statement ensure we
610 // will create an exit PHI SAI object. It is needed during code generation
611 // and would be created later anyway.
612 if (IsExitBlock)
613 scop->getOrCreateScopArrayInfo(PHI, PHI->getType(), {},
614 MemoryKind::ExitPHI);
616 ScopStmt *IncomingStmt = scop->getStmtFor(IncomingBlock);
617 if (!IncomingStmt)
618 return;
620 // Take care for the incoming value being available in the incoming block.
621 // This must be done before the check for multiple PHI writes because multiple
622 // exiting edges from subregion each can be the effective written value of the
623 // subregion. As such, all of them must be made available in the subregion
624 // statement.
625 ensureValueRead(IncomingValue, IncomingBlock);
627 // Do not add more than one MemoryAccess per PHINode and ScopStmt.
628 if (MemoryAccess *Acc = IncomingStmt->lookupPHIWriteOf(PHI)) {
629 assert(Acc->getAccessInstruction() == PHI);
630 Acc->addIncoming(IncomingBlock, IncomingValue);
631 return;
634 MemoryAccess *Acc =
635 addMemoryAccess(IncomingStmt->getEntryBlock(), PHI,
636 MemoryAccess::MUST_WRITE, PHI, PHI->getType(), true, PHI,
637 ArrayRef<const SCEV *>(), ArrayRef<const SCEV *>(),
638 IsExitBlock ? MemoryKind::ExitPHI : MemoryKind::PHI);
639 assert(Acc);
640 Acc->addIncoming(IncomingBlock, IncomingValue);
643 void ScopBuilder::addPHIReadAccess(PHINode *PHI) {
644 addMemoryAccess(PHI->getParent(), PHI, MemoryAccess::READ, PHI,
645 PHI->getType(), true, PHI, ArrayRef<const SCEV *>(),
646 ArrayRef<const SCEV *>(), MemoryKind::PHI);
649 void ScopBuilder::buildScop(Region &R) {
650 scop.reset(new Scop(R, SE, LI, *SD.getDetectionContext(&R)));
652 buildStmts(R);
653 buildAccessFunctions(R);
655 // In case the region does not have an exiting block we will later (during
656 // code generation) split the exit block. This will move potential PHI nodes
657 // from the current exit block into the new region exiting block. Hence, PHI
658 // nodes that are at this point not part of the region will be.
659 // To handle these PHI nodes later we will now model their operands as scalar
660 // accesses. Note that we do not model anything in the exit block if we have
661 // an exiting block in the region, as there will not be any splitting later.
662 if (!scop->hasSingleExitEdge())
663 buildAccessFunctions(*R.getExit(), nullptr,
664 /* IsExitBlock */ true);
666 // Create memory accesses for global reads since all arrays are now known.
667 auto *AF = SE.getConstant(IntegerType::getInt64Ty(SE.getContext()), 0);
668 for (auto *GlobalRead : GlobalReads)
669 for (auto *BP : ArrayBasePointers)
670 addArrayAccess(MemAccInst(GlobalRead), MemoryAccess::READ, BP,
671 BP->getType(), false, {AF}, {nullptr}, GlobalRead);
673 scop->init(AA, DT, LI);
676 ScopBuilder::ScopBuilder(Region *R, AliasAnalysis &AA, const DataLayout &DL,
677 DominatorTree &DT, LoopInfo &LI, ScopDetection &SD,
678 ScalarEvolution &SE)
679 : AA(AA), DL(DL), DT(DT), LI(LI), SD(SD), SE(SE) {
681 Function *F = R->getEntry()->getParent();
683 DebugLoc Beg, End;
684 getDebugLocations(getBBPairForRegion(R), Beg, End);
685 std::string Msg = "SCoP begins here.";
686 emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, Beg, Msg);
688 buildScop(*R);
690 DEBUG(scop->print(dbgs()));
692 if (!scop->hasFeasibleRuntimeContext()) {
693 InfeasibleScops++;
694 Msg = "SCoP ends here but was dismissed.";
695 scop.reset();
696 } else {
697 Msg = "SCoP ends here.";
698 ++ScopFound;
699 if (scop->getMaxLoopDepth() > 0)
700 ++RichScopFound;
703 emitOptimizationRemarkAnalysis(F->getContext(), DEBUG_TYPE, *F, End, Msg);