[ScopInfo] Never print instruction list of region stmts.
[polly-mirror.git] / lib / Analysis / ScopInfo.cpp
blob0c65f2e013c7e2c081965d85754e34dae6798f4c
1 //===--------- ScopInfo.cpp ----------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Create a polyhedral description for a static control flow region.
12 // The pass creates a polyhedral description of the Scops detected by the Scop
13 // detection derived from their LLVM-IR code.
15 // This representation is shared among several tools in the polyhedral
16 // community, which are e.g. Cloog, Pluto, Loopo, Graphite.
18 //===----------------------------------------------------------------------===//
20 #include "polly/ScopInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopBuilder.h"
24 #include "polly/Support/GICHelper.h"
25 #include "polly/Support/ISLOStream.h"
26 #include "polly/Support/SCEVValidator.h"
27 #include "polly/Support/ScopHelper.h"
28 #include "llvm/ADT/DepthFirstIterator.h"
29 #include "llvm/ADT/MapVector.h"
30 #include "llvm/ADT/PostOrderIterator.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SetVector.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/StringExtras.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/AssumptionCache.h"
37 #include "llvm/Analysis/Loads.h"
38 #include "llvm/Analysis/LoopInfo.h"
39 #include "llvm/Analysis/LoopIterator.h"
40 #include "llvm/Analysis/RegionIterator.h"
41 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/Support/Debug.h"
44 #include "isl/aff.h"
45 #include "isl/constraint.h"
46 #include "isl/local_space.h"
47 #include "isl/map.h"
48 #include "isl/options.h"
49 #include "isl/printer.h"
50 #include "isl/schedule.h"
51 #include "isl/schedule_node.h"
52 #include "isl/set.h"
53 #include "isl/union_map.h"
54 #include "isl/union_set.h"
55 #include "isl/val.h"
56 #include <sstream>
57 #include <string>
58 #include <vector>
60 using namespace llvm;
61 using namespace polly;
63 #define DEBUG_TYPE "polly-scops"
65 STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
66 STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
67 STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
68 STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
69 STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
70 STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
71 STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
72 STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
73 STATISTIC(AssumptionsInvariantLoad,
74 "Number of invariant loads assumptions taken.");
75 STATISTIC(AssumptionsDelinearization,
76 "Number of delinearization assumptions taken.");
78 STATISTIC(NumLoopsInScop, "Number of loops in scops");
79 STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
80 STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
81 STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
82 STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
83 STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
84 STATISTIC(NumScopsDepthLarger,
85 "Number of scops with maximal loop depth 6 and larger");
86 STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
88 // The maximal number of basic sets we allow during domain construction to
89 // be created. More complex scops will result in very high compile time and
90 // are also unlikely to result in good code
91 static int const MaxDisjunctsInDomain = 20;
93 // The number of disjunct in the context after which we stop to add more
94 // disjuncts. This parameter is there to avoid exponential growth in the
95 // number of disjunct when adding non-convex sets to the context.
96 static int const MaxDisjunctsInContext = 4;
98 // The maximal number of dimensions we allow during invariant load construction.
99 // More complex access ranges will result in very high compile time and are also
100 // unlikely to result in good code. This value is very high and should only
101 // trigger for corner cases (e.g., the "dct_luma" function in h264, SPEC2006).
102 static int const MaxDimensionsInAccessRange = 9;
104 static cl::opt<int>
105 OptComputeOut("polly-analysis-computeout",
106 cl::desc("Bound the scop analysis by a maximal amount of "
107 "computational steps (0 means no bound)"),
108 cl::Hidden, cl::init(800000), cl::ZeroOrMore,
109 cl::cat(PollyCategory));
111 static cl::opt<bool> PollyRemarksMinimal(
112 "polly-remarks-minimal",
113 cl::desc("Do not emit remarks about assumptions that are known"),
114 cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
116 // Multiplicative reductions can be disabled separately as these kind of
117 // operations can overflow easily. Additive reductions and bit operations
118 // are in contrast pretty stable.
119 static cl::opt<bool> DisableMultiplicativeReductions(
120 "polly-disable-multiplicative-reductions",
121 cl::desc("Disable multiplicative reductions"), cl::Hidden, cl::ZeroOrMore,
122 cl::init(false), cl::cat(PollyCategory));
124 static cl::opt<int> RunTimeChecksMaxAccessDisjuncts(
125 "polly-rtc-max-array-disjuncts",
126 cl::desc("The maximal number of disjunts allowed in memory accesses to "
127 "to build RTCs."),
128 cl::Hidden, cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
130 static cl::opt<unsigned> RunTimeChecksMaxParameters(
131 "polly-rtc-max-parameters",
132 cl::desc("The maximal number of parameters allowed in RTCs."), cl::Hidden,
133 cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
135 static cl::opt<unsigned> RunTimeChecksMaxArraysPerGroup(
136 "polly-rtc-max-arrays-per-group",
137 cl::desc("The maximal number of arrays to compare in each alias group."),
138 cl::Hidden, cl::ZeroOrMore, cl::init(20), cl::cat(PollyCategory));
140 static cl::opt<std::string> UserContextStr(
141 "polly-context", cl::value_desc("isl parameter set"),
142 cl::desc("Provide additional constraints on the context parameters"),
143 cl::init(""), cl::cat(PollyCategory));
145 static cl::opt<bool> DetectReductions("polly-detect-reductions",
146 cl::desc("Detect and exploit reductions"),
147 cl::Hidden, cl::ZeroOrMore,
148 cl::init(true), cl::cat(PollyCategory));
150 static cl::opt<bool>
151 IslOnErrorAbort("polly-on-isl-error-abort",
152 cl::desc("Abort if an isl error is encountered"),
153 cl::init(true), cl::cat(PollyCategory));
155 static cl::opt<bool> PollyPreciseInbounds(
156 "polly-precise-inbounds",
157 cl::desc("Take more precise inbounds assumptions (do not scale well)"),
158 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
160 static cl::opt<bool>
161 PollyIgnoreInbounds("polly-ignore-inbounds",
162 cl::desc("Do not take inbounds assumptions at all"),
163 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
165 static cl::opt<bool> PollyIgnoreParamBounds(
166 "polly-ignore-parameter-bounds",
167 cl::desc(
168 "Do not add parameter bounds and do no gist simplify sets accordingly"),
169 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
171 static cl::opt<bool> PollyPreciseFoldAccesses(
172 "polly-precise-fold-accesses",
173 cl::desc("Fold memory accesses to model more possible delinearizations "
174 "(does not scale well)"),
175 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
177 bool polly::UseInstructionNames;
178 static cl::opt<bool, true> XUseInstructionNames(
179 "polly-use-llvm-names",
180 cl::desc("Use LLVM-IR names when deriving statement names"),
181 cl::location(UseInstructionNames), cl::Hidden, cl::init(false),
182 cl::ZeroOrMore, cl::cat(PollyCategory));
184 static cl::opt<bool> PollyPrintInstructions(
185 "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
186 cl::Hidden, cl::Optional, cl::init(false), cl::cat(PollyCategory));
188 //===----------------------------------------------------------------------===//
190 // Create a sequence of two schedules. Either argument may be null and is
191 // interpreted as the empty schedule. Can also return null if both schedules are
192 // empty.
193 static __isl_give isl_schedule *
194 combineInSequence(__isl_take isl_schedule *Prev,
195 __isl_take isl_schedule *Succ) {
196 if (!Prev)
197 return Succ;
198 if (!Succ)
199 return Prev;
201 return isl_schedule_sequence(Prev, Succ);
204 static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
205 int dim, isl::dim type) {
206 isl::val V;
207 isl::ctx Ctx = S.get_ctx();
209 // The upper and lower bound for a parameter value is derived either from
210 // the data type of the parameter or from the - possibly more restrictive -
211 // range metadata.
212 V = valFromAPInt(Ctx.get(), Range.getSignedMin(), true);
213 S = S.lower_bound_val(type, dim, V);
214 V = valFromAPInt(Ctx.get(), Range.getSignedMax(), true);
215 S = S.upper_bound_val(type, dim, V);
217 if (Range.isFullSet())
218 return S;
220 if (isl_set_n_basic_set(S.get()) > MaxDisjunctsInContext)
221 return S;
223 // In case of signed wrapping, we can refine the set of valid values by
224 // excluding the part not covered by the wrapping range.
225 if (Range.isSignWrappedSet()) {
226 V = valFromAPInt(Ctx.get(), Range.getLower(), true);
227 isl::set SLB = S.lower_bound_val(type, dim, V);
229 V = valFromAPInt(Ctx.get(), Range.getUpper(), true);
230 V = V.sub_ui(1);
231 isl::set SUB = S.upper_bound_val(type, dim, V);
232 S = SLB.unite(SUB);
235 return S;
238 static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
239 LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
240 if (!BasePtrLI)
241 return nullptr;
243 if (!S->contains(BasePtrLI))
244 return nullptr;
246 ScalarEvolution &SE = *S->getSE();
248 auto *OriginBaseSCEV =
249 SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
250 if (!OriginBaseSCEV)
251 return nullptr;
253 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
254 if (!OriginBaseSCEVUnknown)
255 return nullptr;
257 return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
258 MemoryKind::Array);
261 ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl_ctx *Ctx,
262 ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
263 const DataLayout &DL, Scop *S,
264 const char *BaseName)
265 : BasePtr(BasePtr), ElementType(ElementType), IsOnHeap(false), Kind(Kind),
266 DL(DL), S(*S), FAD(nullptr) {
267 std::string BasePtrName =
268 BaseName ? BaseName
269 : getIslCompatibleName("MemRef", BasePtr, S->getNextArrayIdx(),
270 Kind == MemoryKind::PHI ? "__phi" : "",
271 UseInstructionNames);
272 Id = isl::id::alloc(Ctx, BasePtrName.c_str(), this);
274 updateSizes(Sizes);
276 if (!BasePtr || Kind != MemoryKind::Array) {
277 BasePtrOriginSAI = nullptr;
278 return;
281 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
282 if (BasePtrOriginSAI)
283 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
286 isl::space ScopArrayInfo::getSpace() const {
287 auto Space = isl::space(Id.get_ctx(), 0, getNumberOfDimensions());
288 Space = Space.set_tuple_id(isl::dim::set, Id);
289 return Space;
292 bool ScopArrayInfo::isReadOnly() {
293 isl::union_set WriteSet = give(S.getWrites()).range();
294 isl::space Space = getSpace();
295 WriteSet = WriteSet.extract_set(Space);
297 return bool(WriteSet.is_empty());
300 bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
301 if (Array->getElementType() != getElementType())
302 return false;
304 if (Array->getNumberOfDimensions() != getNumberOfDimensions())
305 return false;
307 for (unsigned i = 0; i < getNumberOfDimensions(); i++)
308 if (Array->getDimensionSize(i) != getDimensionSize(i))
309 return false;
311 return true;
314 void ScopArrayInfo::updateElementType(Type *NewElementType) {
315 if (NewElementType == ElementType)
316 return;
318 auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
319 auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
321 if (NewElementSize == OldElementSize || NewElementSize == 0)
322 return;
324 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
325 ElementType = NewElementType;
326 } else {
327 auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
328 ElementType = IntegerType::get(ElementType->getContext(), GCD);
332 /// Make the ScopArrayInfo model a Fortran Array
333 void ScopArrayInfo::applyAndSetFAD(Value *FAD) {
334 assert(FAD && "got invalid Fortran array descriptor");
335 if (this->FAD) {
336 assert(this->FAD == FAD &&
337 "receiving different array descriptors for same array");
338 return;
341 assert(DimensionSizesPw.size() > 0 && !DimensionSizesPw[0]);
342 assert(!this->FAD);
343 this->FAD = FAD;
345 isl::space Space(S.getIslCtx(), 1, 0);
347 std::string param_name = getName();
348 param_name += "_fortranarr_size";
349 // TODO: see if we need to add `this` as the id user pointer
350 isl::id IdPwAff = isl::id::alloc(S.getIslCtx(), param_name.c_str(), nullptr);
352 Space = Space.set_dim_id(isl::dim::param, 0, IdPwAff);
353 isl::pw_aff PwAff =
354 isl::aff::var_on_domain(isl::local_space(Space), isl::dim::param, 0);
356 DimensionSizesPw[0] = PwAff;
359 bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
360 bool CheckConsistency) {
361 int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
362 int ExtraDimsNew = NewSizes.size() - SharedDims;
363 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
365 if (CheckConsistency) {
366 for (int i = 0; i < SharedDims; i++) {
367 auto *NewSize = NewSizes[i + ExtraDimsNew];
368 auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
369 if (NewSize && KnownSize && NewSize != KnownSize)
370 return false;
373 if (DimensionSizes.size() >= NewSizes.size())
374 return true;
377 DimensionSizes.clear();
378 DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
379 NewSizes.end());
380 DimensionSizesPw.clear();
381 for (const SCEV *Expr : DimensionSizes) {
382 if (!Expr) {
383 DimensionSizesPw.push_back(nullptr);
384 continue;
386 isl::pw_aff Size = isl::manage(S.getPwAffOnly(Expr));
387 DimensionSizesPw.push_back(Size);
389 return true;
392 ScopArrayInfo::~ScopArrayInfo() {}
394 std::string ScopArrayInfo::getName() const { return Id.get_name(); }
396 int ScopArrayInfo::getElemSizeInBytes() const {
397 return DL.getTypeAllocSize(ElementType);
400 isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
402 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
403 LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(errs()); }
404 #endif
406 void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
407 OS.indent(8) << *getElementType() << " " << getName();
408 unsigned u = 0;
409 // If this is a Fortran array, then we can print the outermost dimension
410 // as a isl_pw_aff even though there is no SCEV information.
411 bool IsOutermostSizeKnown = SizeAsPwAff && FAD;
413 if (!IsOutermostSizeKnown && getNumberOfDimensions() > 0 &&
414 !getDimensionSize(0)) {
415 OS << "[*]";
416 u++;
418 for (; u < getNumberOfDimensions(); u++) {
419 OS << "[";
421 if (SizeAsPwAff) {
422 isl::pw_aff Size = getDimensionSizePw(u);
423 OS << " " << Size << " ";
424 } else {
425 OS << *getDimensionSize(u);
428 OS << "]";
431 OS << ";";
433 if (BasePtrOriginSAI)
434 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
436 OS << " // Element size " << getElemSizeInBytes() << "\n";
439 const ScopArrayInfo *
440 ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
441 isl::id Id = PMA.get_tuple_id(isl::dim::out);
442 assert(!Id.is_null() && "Output dimension didn't have an ID");
443 return getFromId(Id);
446 const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
447 void *User = Id.get_user();
448 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
449 return SAI;
452 void MemoryAccess::wrapConstantDimensions() {
453 auto *SAI = getScopArrayInfo();
454 isl::space ArraySpace = SAI->getSpace();
455 isl::ctx Ctx = ArraySpace.get_ctx();
456 unsigned DimsArray = SAI->getNumberOfDimensions();
458 isl::multi_aff DivModAff = isl::multi_aff::identity(
459 ArraySpace.map_from_domain_and_range(ArraySpace));
460 isl::local_space LArraySpace = isl::local_space(ArraySpace);
462 // Begin with last dimension, to iteratively carry into higher dimensions.
463 for (int i = DimsArray - 1; i > 0; i--) {
464 auto *DimSize = SAI->getDimensionSize(i);
465 auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
467 // This transformation is not applicable to dimensions with dynamic size.
468 if (!DimSizeCst)
469 continue;
471 // This transformation is not applicable to dimensions of size zero.
472 if (DimSize->isZero())
473 continue;
475 isl::val DimSizeVal =
476 valFromAPInt(Ctx.get(), DimSizeCst->getAPInt(), false);
477 isl::aff Var = isl::aff::var_on_domain(LArraySpace, isl::dim::set, i);
478 isl::aff PrevVar =
479 isl::aff::var_on_domain(LArraySpace, isl::dim::set, i - 1);
481 // Compute: index % size
482 // Modulo must apply in the divide of the previous iteration, if any.
483 isl::aff Modulo = Var.mod_val(DimSizeVal);
484 Modulo = Modulo.pullback(DivModAff);
486 // Compute: floor(index / size)
487 isl::aff Divide = Var.div(isl::aff(LArraySpace, DimSizeVal));
488 Divide = Divide.floor();
489 Divide = Divide.add(PrevVar);
490 Divide = Divide.pullback(DivModAff);
492 // Apply Modulo and Divide.
493 DivModAff = DivModAff.set_aff(i, Modulo);
494 DivModAff = DivModAff.set_aff(i - 1, Divide);
497 // Apply all modulo/divides on the accesses.
498 isl::map Relation = AccessRelation;
499 Relation = Relation.apply_range(isl::map::from_multi_aff(DivModAff));
500 Relation = Relation.detect_equalities();
501 AccessRelation = Relation;
504 void MemoryAccess::updateDimensionality() {
505 auto *SAI = getScopArrayInfo();
506 isl::space ArraySpace = SAI->getSpace();
507 isl::space AccessSpace = AccessRelation.get_space().range();
508 isl::ctx Ctx = ArraySpace.get_ctx();
510 auto DimsArray = ArraySpace.dim(isl::dim::set);
511 auto DimsAccess = AccessSpace.dim(isl::dim::set);
512 auto DimsMissing = DimsArray - DimsAccess;
514 auto *BB = getStatement()->getEntryBlock();
515 auto &DL = BB->getModule()->getDataLayout();
516 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
517 unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
519 isl::map Map = isl::map::from_domain_and_range(
520 isl::set::universe(AccessSpace), isl::set::universe(ArraySpace));
522 for (unsigned i = 0; i < DimsMissing; i++)
523 Map = Map.fix_si(isl::dim::out, i, 0);
525 for (unsigned i = DimsMissing; i < DimsArray; i++)
526 Map = Map.equate(isl::dim::in, i - DimsMissing, isl::dim::out, i);
528 AccessRelation = AccessRelation.apply_range(Map);
530 // For the non delinearized arrays, divide the access function of the last
531 // subscript by the size of the elements in the array.
533 // A stride one array access in C expressed as A[i] is expressed in
534 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
535 // two subsequent values of 'i' index two values that are stored next to
536 // each other in memory. By this division we make this characteristic
537 // obvious again. If the base pointer was accessed with offsets not divisible
538 // by the accesses element size, we will have chosen a smaller ArrayElemSize
539 // that divides the offsets of all accesses to this base pointer.
540 if (DimsAccess == 1) {
541 isl::val V = isl::val(Ctx, ArrayElemSize);
542 AccessRelation = AccessRelation.floordiv_val(V);
545 // We currently do this only if we added at least one dimension, which means
546 // some dimension's indices have not been specified, an indicator that some
547 // index values have been added together.
548 // TODO: Investigate general usefulness; Effect on unit tests is to make index
549 // expressions more complicated.
550 if (DimsMissing)
551 wrapConstantDimensions();
553 if (!isAffine())
554 computeBoundsOnAccessRelation(ArrayElemSize);
556 // Introduce multi-element accesses in case the type loaded by this memory
557 // access is larger than the canonical element type of the array.
559 // An access ((float *)A)[i] to an array char *A is modeled as
560 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
561 if (ElemBytes > ArrayElemSize) {
562 assert(ElemBytes % ArrayElemSize == 0 &&
563 "Loaded element size should be multiple of canonical element size");
564 isl::map Map = isl::map::from_domain_and_range(
565 isl::set::universe(ArraySpace), isl::set::universe(ArraySpace));
566 for (unsigned i = 0; i < DimsArray - 1; i++)
567 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
569 isl::constraint C;
570 isl::local_space LS;
572 LS = isl::local_space(Map.get_space());
573 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
575 C = isl::constraint::alloc_inequality(LS);
576 C = C.set_constant_val(isl::val(Ctx, Num - 1));
577 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, 1);
578 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, -1);
579 Map = Map.add_constraint(C);
581 C = isl::constraint::alloc_inequality(LS);
582 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, -1);
583 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, 1);
584 C = C.set_constant_val(isl::val(Ctx, 0));
585 Map = Map.add_constraint(C);
586 AccessRelation = AccessRelation.apply_range(Map);
590 const std::string
591 MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
592 switch (RT) {
593 case MemoryAccess::RT_NONE:
594 llvm_unreachable("Requested a reduction operator string for a memory "
595 "access which isn't a reduction");
596 case MemoryAccess::RT_ADD:
597 return "+";
598 case MemoryAccess::RT_MUL:
599 return "*";
600 case MemoryAccess::RT_BOR:
601 return "|";
602 case MemoryAccess::RT_BXOR:
603 return "^";
604 case MemoryAccess::RT_BAND:
605 return "&";
607 llvm_unreachable("Unknown reduction type");
608 return "";
611 /// Return the reduction type for a given binary operator.
612 static MemoryAccess::ReductionType getReductionType(const BinaryOperator *BinOp,
613 const Instruction *Load) {
614 if (!BinOp)
615 return MemoryAccess::RT_NONE;
616 switch (BinOp->getOpcode()) {
617 case Instruction::FAdd:
618 if (!BinOp->hasUnsafeAlgebra())
619 return MemoryAccess::RT_NONE;
620 // Fall through
621 case Instruction::Add:
622 return MemoryAccess::RT_ADD;
623 case Instruction::Or:
624 return MemoryAccess::RT_BOR;
625 case Instruction::Xor:
626 return MemoryAccess::RT_BXOR;
627 case Instruction::And:
628 return MemoryAccess::RT_BAND;
629 case Instruction::FMul:
630 if (!BinOp->hasUnsafeAlgebra())
631 return MemoryAccess::RT_NONE;
632 // Fall through
633 case Instruction::Mul:
634 if (DisableMultiplicativeReductions)
635 return MemoryAccess::RT_NONE;
636 return MemoryAccess::RT_MUL;
637 default:
638 return MemoryAccess::RT_NONE;
642 MemoryAccess::~MemoryAccess() {}
644 const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
645 isl::id ArrayId = getArrayId();
646 void *User = ArrayId.get_user();
647 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
648 return SAI;
651 const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
652 isl::id ArrayId = getLatestArrayId();
653 void *User = ArrayId.get_user();
654 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
655 return SAI;
658 isl::id MemoryAccess::getOriginalArrayId() const {
659 return AccessRelation.get_tuple_id(isl::dim::out);
662 isl::id MemoryAccess::getLatestArrayId() const {
663 if (!hasNewAccessRelation())
664 return getOriginalArrayId();
665 return NewAccessRelation.get_tuple_id(isl::dim::out);
668 isl::map MemoryAccess::getAddressFunction() const {
669 return getAccessRelation().lexmin();
672 isl::pw_multi_aff
673 MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
674 isl::map Schedule, ScheduledAccRel;
675 isl::union_set UDomain;
677 UDomain = isl::manage(getStatement()->getDomain());
678 USchedule = USchedule.intersect_domain(UDomain);
679 Schedule = isl::map::from_union_map(USchedule);
680 ScheduledAccRel = getAddressFunction().apply_domain(Schedule);
681 return isl::pw_multi_aff::from_map(ScheduledAccRel);
684 isl::map MemoryAccess::getOriginalAccessRelation() const {
685 return AccessRelation;
688 std::string MemoryAccess::getOriginalAccessRelationStr() const {
689 return stringFromIslObj(AccessRelation.get());
692 isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
693 return AccessRelation.get_space();
696 isl::map MemoryAccess::getNewAccessRelation() const {
697 return NewAccessRelation;
700 std::string MemoryAccess::getNewAccessRelationStr() const {
701 return stringFromIslObj(NewAccessRelation.get());
704 std::string MemoryAccess::getAccessRelationStr() const {
705 return isl::manage(getAccessRelation().get()).to_str();
708 isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
709 isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
710 Space = Space.align_params(isl::manage(Statement->getDomainSpace()));
712 return isl::basic_map::from_domain_and_range(
713 isl::basic_set::universe(isl::manage(Statement->getDomainSpace())),
714 isl::basic_set::universe(Space));
717 // Formalize no out-of-bound access assumption
719 // When delinearizing array accesses we optimistically assume that the
720 // delinearized accesses do not access out of bound locations (the subscript
721 // expression of each array evaluates for each statement instance that is
722 // executed to a value that is larger than zero and strictly smaller than the
723 // size of the corresponding dimension). The only exception is the outermost
724 // dimension for which we do not need to assume any upper bound. At this point
725 // we formalize this assumption to ensure that at code generation time the
726 // relevant run-time checks can be generated.
728 // To find the set of constraints necessary to avoid out of bound accesses, we
729 // first build the set of data locations that are not within array bounds. We
730 // then apply the reverse access relation to obtain the set of iterations that
731 // may contain invalid accesses and reduce this set of iterations to the ones
732 // that are actually executed by intersecting them with the domain of the
733 // statement. If we now project out all loop dimensions, we obtain a set of
734 // parameters that may cause statement instances to be executed that may
735 // possibly yield out of bound memory accesses. The complement of these
736 // constraints is the set of constraints that needs to be assumed to ensure such
737 // statement instances are never executed.
738 void MemoryAccess::assumeNoOutOfBound() {
739 if (PollyIgnoreInbounds)
740 return;
741 auto *SAI = getScopArrayInfo();
742 isl::space Space = getOriginalAccessRelationSpace().range();
743 isl::set Outside = isl::set::empty(Space);
744 for (int i = 1, Size = Space.dim(isl::dim::set); i < Size; ++i) {
745 isl::local_space LS(Space);
746 isl::pw_aff Var = isl::pw_aff::var_on_domain(LS, isl::dim::set, i);
747 isl::pw_aff Zero = isl::pw_aff(LS);
749 isl::set DimOutside = Var.lt_set(Zero);
750 isl::pw_aff SizeE = SAI->getDimensionSizePw(i);
751 SizeE = SizeE.add_dims(isl::dim::in, Space.dim(isl::dim::set));
752 SizeE = SizeE.set_tuple_id(isl::dim::in, Space.get_tuple_id(isl::dim::set));
753 DimOutside = DimOutside.unite(SizeE.le_set(Var));
755 Outside = Outside.unite(DimOutside);
758 Outside = Outside.apply(getAccessRelation().reverse());
759 Outside = Outside.intersect(give(Statement->getDomain()));
760 Outside = Outside.params();
762 // Remove divs to avoid the construction of overly complicated assumptions.
763 // Doing so increases the set of parameter combinations that are assumed to
764 // not appear. This is always save, but may make the resulting run-time check
765 // bail out more often than strictly necessary.
766 Outside = Outside.remove_divs();
767 Outside = Outside.complement();
768 const auto &Loc = getAccessInstruction()
769 ? getAccessInstruction()->getDebugLoc()
770 : DebugLoc();
771 if (!PollyPreciseInbounds)
772 Outside = Outside.gist_params(give(Statement->getDomain()).params());
773 Statement->getParent()->recordAssumption(INBOUNDS, Outside.release(), Loc,
774 AS_ASSUMPTION);
777 void MemoryAccess::buildMemIntrinsicAccessRelation() {
778 assert(isMemoryIntrinsic());
779 assert(Subscripts.size() == 2 && Sizes.size() == 1);
781 isl::pw_aff SubscriptPWA = getPwAff(Subscripts[0]);
782 isl::map SubscriptMap = isl::map::from_pw_aff(SubscriptPWA);
784 isl::map LengthMap;
785 if (Subscripts[1] == nullptr) {
786 LengthMap = isl::map::universe(SubscriptMap.get_space());
787 } else {
788 isl::pw_aff LengthPWA = getPwAff(Subscripts[1]);
789 LengthMap = isl::map::from_pw_aff(LengthPWA);
790 isl::space RangeSpace = LengthMap.get_space().range();
791 LengthMap = LengthMap.apply_range(isl::map::lex_gt(RangeSpace));
793 LengthMap = LengthMap.lower_bound_si(isl::dim::out, 0, 0);
794 LengthMap = LengthMap.align_params(SubscriptMap.get_space());
795 SubscriptMap = SubscriptMap.align_params(LengthMap.get_space());
796 LengthMap = LengthMap.sum(SubscriptMap);
797 AccessRelation =
798 LengthMap.set_tuple_id(isl::dim::in, give(getStatement()->getDomainId()));
801 void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
802 ScalarEvolution *SE = Statement->getParent()->getSE();
804 auto MAI = MemAccInst(getAccessInstruction());
805 if (isa<MemIntrinsic>(MAI))
806 return;
808 Value *Ptr = MAI.getPointerOperand();
809 if (!Ptr || !SE->isSCEVable(Ptr->getType()))
810 return;
812 auto *PtrSCEV = SE->getSCEV(Ptr);
813 if (isa<SCEVCouldNotCompute>(PtrSCEV))
814 return;
816 auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
817 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
818 PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
820 const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
821 if (Range.isFullSet())
822 return;
824 if (Range.isWrappedSet() || Range.isSignWrappedSet())
825 return;
827 bool isWrapping = Range.isSignWrappedSet();
829 unsigned BW = Range.getBitWidth();
830 const auto One = APInt(BW, 1);
831 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
832 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
834 auto Min = LB.sdiv(APInt(BW, ElementSize));
835 auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
837 assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
839 isl::map Relation = AccessRelation;
840 isl::set AccessRange = Relation.range();
841 AccessRange = addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0,
842 isl::dim::set);
843 AccessRelation = Relation.intersect_range(AccessRange);
846 void MemoryAccess::foldAccessRelation() {
847 if (Sizes.size() < 2 || isa<SCEVConstant>(Sizes[1]))
848 return;
850 int Size = Subscripts.size();
852 isl::map NewAccessRelation = AccessRelation;
854 for (int i = Size - 2; i >= 0; --i) {
855 isl::space Space;
856 isl::map MapOne, MapTwo;
857 isl::pw_aff DimSize = getPwAff(Sizes[i + 1]);
859 isl::space SpaceSize = DimSize.get_space();
860 isl::id ParamId =
861 give(isl_space_get_dim_id(SpaceSize.get(), isl_dim_param, 0));
863 Space = AccessRelation.get_space();
864 Space = Space.range().map_from_set();
865 Space = Space.align_params(SpaceSize);
867 int ParamLocation = Space.find_dim_by_id(isl::dim::param, ParamId);
869 MapOne = isl::map::universe(Space);
870 for (int j = 0; j < Size; ++j)
871 MapOne = MapOne.equate(isl::dim::in, j, isl::dim::out, j);
872 MapOne = MapOne.lower_bound_si(isl::dim::in, i + 1, 0);
874 MapTwo = isl::map::universe(Space);
875 for (int j = 0; j < Size; ++j)
876 if (j < i || j > i + 1)
877 MapTwo = MapTwo.equate(isl::dim::in, j, isl::dim::out, j);
879 isl::local_space LS(Space);
880 isl::constraint C;
881 C = isl::constraint::alloc_equality(LS);
882 C = C.set_constant_si(-1);
883 C = C.set_coefficient_si(isl::dim::in, i, 1);
884 C = C.set_coefficient_si(isl::dim::out, i, -1);
885 MapTwo = MapTwo.add_constraint(C);
886 C = isl::constraint::alloc_equality(LS);
887 C = C.set_coefficient_si(isl::dim::in, i + 1, 1);
888 C = C.set_coefficient_si(isl::dim::out, i + 1, -1);
889 C = C.set_coefficient_si(isl::dim::param, ParamLocation, 1);
890 MapTwo = MapTwo.add_constraint(C);
891 MapTwo = MapTwo.upper_bound_si(isl::dim::in, i + 1, -1);
893 MapOne = MapOne.unite(MapTwo);
894 NewAccessRelation = NewAccessRelation.apply_range(MapOne);
897 isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
898 isl::space Space = give(Statement->getDomainSpace());
899 NewAccessRelation = NewAccessRelation.set_tuple_id(
900 isl::dim::in, Space.get_tuple_id(isl::dim::set));
901 NewAccessRelation = NewAccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
902 NewAccessRelation =
903 NewAccessRelation.gist_domain(give(Statement->getDomain()));
905 // Access dimension folding might in certain cases increase the number of
906 // disjuncts in the memory access, which can possibly complicate the generated
907 // run-time checks and can lead to costly compilation.
908 if (!PollyPreciseFoldAccesses &&
909 isl_map_n_basic_map(NewAccessRelation.get()) >
910 isl_map_n_basic_map(AccessRelation.get())) {
911 } else {
912 AccessRelation = NewAccessRelation;
916 /// Check if @p Expr is divisible by @p Size.
917 static bool isDivisible(const SCEV *Expr, unsigned Size, ScalarEvolution &SE) {
918 assert(Size != 0);
919 if (Size == 1)
920 return true;
922 // Only one factor needs to be divisible.
923 if (auto *MulExpr = dyn_cast<SCEVMulExpr>(Expr)) {
924 for (auto *FactorExpr : MulExpr->operands())
925 if (isDivisible(FactorExpr, Size, SE))
926 return true;
927 return false;
930 // For other n-ary expressions (Add, AddRec, Max,...) all operands need
931 // to be divisible.
932 if (auto *NAryExpr = dyn_cast<SCEVNAryExpr>(Expr)) {
933 for (auto *OpExpr : NAryExpr->operands())
934 if (!isDivisible(OpExpr, Size, SE))
935 return false;
936 return true;
939 auto *SizeSCEV = SE.getConstant(Expr->getType(), Size);
940 auto *UDivSCEV = SE.getUDivExpr(Expr, SizeSCEV);
941 auto *MulSCEV = SE.getMulExpr(UDivSCEV, SizeSCEV);
942 return MulSCEV == Expr;
945 void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
946 assert(AccessRelation.is_null() && "AccessRelation already built");
948 // Initialize the invalid domain which describes all iterations for which the
949 // access relation is not modeled correctly.
950 isl::set StmtInvalidDomain = isl::manage(getStatement()->getInvalidDomain());
951 InvalidDomain = isl::set::empty(StmtInvalidDomain.get_space());
953 isl::ctx Ctx = Id.get_ctx();
954 isl::id BaseAddrId = SAI->getBasePtrId();
956 if (getAccessInstruction() && isa<MemIntrinsic>(getAccessInstruction())) {
957 buildMemIntrinsicAccessRelation();
958 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
959 return;
962 if (!isAffine()) {
963 // We overapproximate non-affine accesses with a possible access to the
964 // whole array. For read accesses it does not make a difference, if an
965 // access must or may happen. However, for write accesses it is important to
966 // differentiate between writes that must happen and writes that may happen.
967 if (AccessRelation.is_null())
968 AccessRelation = createBasicAccessMap(Statement);
970 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
971 return;
974 isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
975 AccessRelation = isl::map::universe(Space);
977 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
978 isl::pw_aff Affine = getPwAff(Subscripts[i]);
979 isl::map SubscriptMap = isl::map::from_pw_aff(Affine);
980 AccessRelation = AccessRelation.flat_range_product(SubscriptMap);
983 Space = isl::manage(Statement->getDomainSpace());
984 AccessRelation = AccessRelation.set_tuple_id(
985 isl::dim::in, Space.get_tuple_id(isl::dim::set));
986 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
988 AccessRelation =
989 AccessRelation.gist_domain(isl::manage(Statement->getDomain()));
992 MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
993 AccessType AccType, Value *BaseAddress,
994 Type *ElementType, bool Affine,
995 ArrayRef<const SCEV *> Subscripts,
996 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
997 MemoryKind Kind)
998 : Kind(Kind), AccType(AccType), RedType(RT_NONE), Statement(Stmt),
999 InvalidDomain(nullptr), BaseAddr(BaseAddress), ElementType(ElementType),
1000 Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
1001 AccessValue(AccessValue), IsAffine(Affine),
1002 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(nullptr),
1003 NewAccessRelation(nullptr), FAD(nullptr) {
1004 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
1005 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
1007 std::string IdName = Stmt->getBaseName() + Access;
1008 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName.c_str(), this);
1011 MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
1012 : Kind(MemoryKind::Array), AccType(AccType), RedType(RT_NONE),
1013 Statement(Stmt), InvalidDomain(nullptr), AccessInstruction(nullptr),
1014 IsAffine(true), AccessRelation(nullptr), NewAccessRelation(AccRel),
1015 FAD(nullptr) {
1016 isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(isl::dim::out);
1017 auto *SAI = ScopArrayInfo::getFromId(ArrayInfoId);
1018 Sizes.push_back(nullptr);
1019 for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
1020 Sizes.push_back(SAI->getDimensionSize(i));
1021 ElementType = SAI->getElementType();
1022 BaseAddr = SAI->getBasePtr();
1023 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
1024 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
1026 std::string IdName = Stmt->getBaseName() + Access;
1027 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName.c_str(), this);
1030 void MemoryAccess::realignParams() {
1031 isl::set Ctx = isl::manage(Statement->getParent()->getContext());
1032 InvalidDomain = InvalidDomain.gist_params(Ctx);
1033 AccessRelation = AccessRelation.gist_params(Ctx);
1036 const std::string MemoryAccess::getReductionOperatorStr() const {
1037 return MemoryAccess::getReductionOperatorStr(getReductionType());
1040 isl::id MemoryAccess::getId() const { return Id; }
1042 raw_ostream &polly::operator<<(raw_ostream &OS,
1043 MemoryAccess::ReductionType RT) {
1044 if (RT == MemoryAccess::RT_NONE)
1045 OS << "NONE";
1046 else
1047 OS << MemoryAccess::getReductionOperatorStr(RT);
1048 return OS;
1051 void MemoryAccess::setFortranArrayDescriptor(Value *FAD) { this->FAD = FAD; }
1053 void MemoryAccess::print(raw_ostream &OS) const {
1054 switch (AccType) {
1055 case READ:
1056 OS.indent(12) << "ReadAccess :=\t";
1057 break;
1058 case MUST_WRITE:
1059 OS.indent(12) << "MustWriteAccess :=\t";
1060 break;
1061 case MAY_WRITE:
1062 OS.indent(12) << "MayWriteAccess :=\t";
1063 break;
1066 OS << "[Reduction Type: " << getReductionType() << "] ";
1068 if (FAD) {
1069 OS << "[Fortran array descriptor: " << FAD->getName();
1070 OS << "] ";
1073 OS << "[Scalar: " << isScalarKind() << "]\n";
1074 OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
1075 if (hasNewAccessRelation())
1076 OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
1079 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1080 LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(errs()); }
1081 #endif
1083 isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
1084 auto *Stmt = getStatement();
1085 PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
1086 isl::set StmtDom = isl::manage(getStatement()->getDomain());
1087 StmtDom = StmtDom.reset_tuple_id();
1088 isl::set NewInvalidDom = StmtDom.intersect(isl::manage(PWAC.second));
1089 InvalidDomain = InvalidDomain.unite(NewInvalidDom);
1090 return isl::manage(PWAC.first);
1093 // Create a map in the size of the provided set domain, that maps from the
1094 // one element of the provided set domain to another element of the provided
1095 // set domain.
1096 // The mapping is limited to all points that are equal in all but the last
1097 // dimension and for which the last dimension of the input is strict smaller
1098 // than the last dimension of the output.
1100 // getEqualAndLarger(set[i0, i1, ..., iX]):
1102 // set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
1103 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
1105 static isl::map getEqualAndLarger(isl::space SetDomain) {
1106 isl::space Space = SetDomain.map_from_set();
1107 isl::map Map = isl::map::universe(Space);
1108 unsigned lastDimension = Map.dim(isl::dim::in) - 1;
1110 // Set all but the last dimension to be equal for the input and output
1112 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
1113 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
1114 for (unsigned i = 0; i < lastDimension; ++i)
1115 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
1117 // Set the last dimension of the input to be strict smaller than the
1118 // last dimension of the output.
1120 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
1121 Map = Map.order_lt(isl::dim::in, lastDimension, isl::dim::out, lastDimension);
1122 return Map;
1125 isl::set MemoryAccess::getStride(isl::map Schedule) const {
1126 isl::map AccessRelation = getAccessRelation();
1127 isl::space Space = Schedule.get_space().range();
1128 isl::map NextScatt = getEqualAndLarger(Space);
1130 Schedule = Schedule.reverse();
1131 NextScatt = NextScatt.lexmin();
1133 NextScatt = NextScatt.apply_range(Schedule);
1134 NextScatt = NextScatt.apply_range(AccessRelation);
1135 NextScatt = NextScatt.apply_domain(Schedule);
1136 NextScatt = NextScatt.apply_domain(AccessRelation);
1138 isl::set Deltas = NextScatt.deltas();
1139 return Deltas;
1142 bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1143 isl::set Stride, StrideX;
1144 bool IsStrideX;
1146 Stride = getStride(Schedule);
1147 StrideX = isl::set::universe(Stride.get_space());
1148 for (unsigned i = 0; i < StrideX.dim(isl::dim::set) - 1; i++)
1149 StrideX = StrideX.fix_si(isl::dim::set, i, 0);
1150 StrideX = StrideX.fix_si(isl::dim::set, StrideX.dim(isl::dim::set) - 1,
1151 StrideWidth);
1152 IsStrideX = Stride.is_subset(StrideX);
1154 return IsStrideX;
1157 bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1158 return isStrideX(Schedule, 0);
1161 bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1162 return isStrideX(Schedule, 1);
1165 void MemoryAccess::setAccessRelation(__isl_take isl_map *NewAccess) {
1166 AccessRelation = isl::manage(NewAccess);
1169 void MemoryAccess::setNewAccessRelation(__isl_take isl_map *NewAccess) {
1170 assert(NewAccess);
1172 #ifndef NDEBUG
1173 // Check domain space compatibility.
1174 auto *NewSpace = isl_map_get_space(NewAccess);
1175 auto *NewDomainSpace = isl_space_domain(isl_space_copy(NewSpace));
1176 auto *OriginalDomainSpace = getStatement()->getDomainSpace();
1177 assert(isl_space_has_equal_tuples(OriginalDomainSpace, NewDomainSpace));
1178 isl_space_free(NewDomainSpace);
1179 isl_space_free(OriginalDomainSpace);
1181 // Reads must be executed unconditionally. Writes might be executed in a
1182 // subdomain only.
1183 if (isRead()) {
1184 // Check whether there is an access for every statement instance.
1185 auto *StmtDomain = getStatement()->getDomain();
1186 StmtDomain = isl_set_intersect_params(
1187 StmtDomain, getStatement()->getParent()->getContext());
1188 auto *NewDomain = isl_map_domain(isl_map_copy(NewAccess));
1189 assert(isl_set_is_subset(StmtDomain, NewDomain) &&
1190 "Partial READ accesses not supported");
1191 isl_set_free(NewDomain);
1192 isl_set_free(StmtDomain);
1195 auto *NewAccessSpace = isl_space_range(NewSpace);
1196 assert(isl_space_has_tuple_id(NewAccessSpace, isl_dim_set) &&
1197 "Must specify the array that is accessed");
1198 auto *NewArrayId = isl_space_get_tuple_id(NewAccessSpace, isl_dim_set);
1199 auto *SAI = static_cast<ScopArrayInfo *>(isl_id_get_user(NewArrayId));
1200 assert(SAI && "Must set a ScopArrayInfo");
1202 if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1203 InvariantEquivClassTy *EqClass =
1204 getStatement()->getParent()->lookupInvariantEquivClass(
1205 SAI->getBasePtr());
1206 assert(EqClass &&
1207 "Access functions to indirect arrays must have an invariant and "
1208 "hoisted base pointer");
1211 // Check whether access dimensions correspond to number of dimensions of the
1212 // accesses array.
1213 auto Dims = SAI->getNumberOfDimensions();
1214 assert(isl_space_dim(NewAccessSpace, isl_dim_set) == Dims &&
1215 "Access dims must match array dims");
1216 isl_space_free(NewAccessSpace);
1217 isl_id_free(NewArrayId);
1218 #endif
1220 NewAccess = isl_map_gist_domain(NewAccess, getStatement()->getDomain());
1221 NewAccessRelation = isl::manage(NewAccess);
1224 bool MemoryAccess::isLatestPartialAccess() const {
1225 isl::set StmtDom = give(getStatement()->getDomain());
1226 isl::set AccDom = getLatestAccessRelation().domain();
1228 return isl_set_is_subset(StmtDom.keep(), AccDom.keep()) == isl_bool_false;
1231 //===----------------------------------------------------------------------===//
1233 __isl_give isl_map *ScopStmt::getSchedule() const {
1234 isl_set *Domain = getDomain();
1235 if (isl_set_is_empty(Domain)) {
1236 isl_set_free(Domain);
1237 return isl_map_from_aff(
1238 isl_aff_zero_on_domain(isl_local_space_from_space(getDomainSpace())));
1240 auto *Schedule = getParent()->getSchedule();
1241 if (!Schedule) {
1242 isl_set_free(Domain);
1243 return nullptr;
1245 Schedule = isl_union_map_intersect_domain(
1246 Schedule, isl_union_set_from_set(isl_set_copy(Domain)));
1247 if (isl_union_map_is_empty(Schedule)) {
1248 isl_set_free(Domain);
1249 isl_union_map_free(Schedule);
1250 return isl_map_from_aff(
1251 isl_aff_zero_on_domain(isl_local_space_from_space(getDomainSpace())));
1253 auto *M = isl_map_from_union_map(Schedule);
1254 M = isl_map_coalesce(M);
1255 M = isl_map_gist_domain(M, Domain);
1256 M = isl_map_coalesce(M);
1257 return M;
1260 void ScopStmt::restrictDomain(__isl_take isl_set *NewDomain) {
1261 assert(isl_set_is_subset(NewDomain, Domain) &&
1262 "New domain is not a subset of old domain!");
1263 isl_set_free(Domain);
1264 Domain = NewDomain;
1267 void ScopStmt::buildAccessRelations() {
1268 Scop &S = *getParent();
1269 for (MemoryAccess *Access : MemAccs) {
1270 Type *ElementType = Access->getElementType();
1272 MemoryKind Ty;
1273 if (Access->isPHIKind())
1274 Ty = MemoryKind::PHI;
1275 else if (Access->isExitPHIKind())
1276 Ty = MemoryKind::ExitPHI;
1277 else if (Access->isValueKind())
1278 Ty = MemoryKind::Value;
1279 else
1280 Ty = MemoryKind::Array;
1282 auto *SAI = S.getOrCreateScopArrayInfo(Access->getOriginalBaseAddr(),
1283 ElementType, Access->Sizes, Ty);
1284 Access->buildAccessRelation(SAI);
1285 S.addAccessData(Access);
1289 void ScopStmt::addAccess(MemoryAccess *Access) {
1290 Instruction *AccessInst = Access->getAccessInstruction();
1292 if (Access->isArrayKind()) {
1293 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1294 MAL.emplace_front(Access);
1295 } else if (Access->isValueKind() && Access->isWrite()) {
1296 Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1297 assert(Parent.getStmtFor(AccessVal) == this);
1298 assert(!ValueWrites.lookup(AccessVal));
1300 ValueWrites[AccessVal] = Access;
1301 } else if (Access->isValueKind() && Access->isRead()) {
1302 Value *AccessVal = Access->getAccessValue();
1303 assert(!ValueReads.lookup(AccessVal));
1305 ValueReads[AccessVal] = Access;
1306 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1307 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1308 assert(!PHIWrites.lookup(PHI));
1310 PHIWrites[PHI] = Access;
1311 } else if (Access->isAnyPHIKind() && Access->isRead()) {
1312 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1313 assert(!PHIReads.lookup(PHI));
1315 PHIReads[PHI] = Access;
1318 MemAccs.push_back(Access);
1321 void ScopStmt::realignParams() {
1322 for (MemoryAccess *MA : *this)
1323 MA->realignParams();
1325 auto *Ctx = Parent.getContext();
1326 InvalidDomain = isl_set_gist_params(InvalidDomain, isl_set_copy(Ctx));
1327 Domain = isl_set_gist_params(Domain, Ctx);
1330 /// Add @p BSet to the set @p User if @p BSet is bounded.
1331 static isl_stat collectBoundedParts(__isl_take isl_basic_set *BSet,
1332 void *User) {
1333 isl_set **BoundedParts = static_cast<isl_set **>(User);
1334 if (isl_basic_set_is_bounded(BSet))
1335 *BoundedParts = isl_set_union(*BoundedParts, isl_set_from_basic_set(BSet));
1336 else
1337 isl_basic_set_free(BSet);
1338 return isl_stat_ok;
1341 /// Return the bounded parts of @p S.
1342 static __isl_give isl_set *collectBoundedParts(__isl_take isl_set *S) {
1343 isl_set *BoundedParts = isl_set_empty(isl_set_get_space(S));
1344 isl_set_foreach_basic_set(S, collectBoundedParts, &BoundedParts);
1345 isl_set_free(S);
1346 return BoundedParts;
1349 /// Compute the (un)bounded parts of @p S wrt. to dimension @p Dim.
1351 /// @returns A separation of @p S into first an unbounded then a bounded subset,
1352 /// both with regards to the dimension @p Dim.
1353 static std::pair<__isl_give isl_set *, __isl_give isl_set *>
1354 partitionSetParts(__isl_take isl_set *S, unsigned Dim) {
1356 for (unsigned u = 0, e = isl_set_n_dim(S); u < e; u++)
1357 S = isl_set_lower_bound_si(S, isl_dim_set, u, 0);
1359 unsigned NumDimsS = isl_set_n_dim(S);
1360 isl_set *OnlyDimS = isl_set_copy(S);
1362 // Remove dimensions that are greater than Dim as they are not interesting.
1363 assert(NumDimsS >= Dim + 1);
1364 OnlyDimS =
1365 isl_set_project_out(OnlyDimS, isl_dim_set, Dim + 1, NumDimsS - Dim - 1);
1367 // Create artificial parametric upper bounds for dimensions smaller than Dim
1368 // as we are not interested in them.
1369 OnlyDimS = isl_set_insert_dims(OnlyDimS, isl_dim_param, 0, Dim);
1370 for (unsigned u = 0; u < Dim; u++) {
1371 isl_constraint *C = isl_inequality_alloc(
1372 isl_local_space_from_space(isl_set_get_space(OnlyDimS)));
1373 C = isl_constraint_set_coefficient_si(C, isl_dim_param, u, 1);
1374 C = isl_constraint_set_coefficient_si(C, isl_dim_set, u, -1);
1375 OnlyDimS = isl_set_add_constraint(OnlyDimS, C);
1378 // Collect all bounded parts of OnlyDimS.
1379 isl_set *BoundedParts = collectBoundedParts(OnlyDimS);
1381 // Create the dimensions greater than Dim again.
1382 BoundedParts = isl_set_insert_dims(BoundedParts, isl_dim_set, Dim + 1,
1383 NumDimsS - Dim - 1);
1385 // Remove the artificial upper bound parameters again.
1386 BoundedParts = isl_set_remove_dims(BoundedParts, isl_dim_param, 0, Dim);
1388 isl_set *UnboundedParts = isl_set_subtract(S, isl_set_copy(BoundedParts));
1389 return std::make_pair(UnboundedParts, BoundedParts);
1392 /// Set the dimension Ids from @p From in @p To.
1393 static __isl_give isl_set *setDimensionIds(__isl_keep isl_set *From,
1394 __isl_take isl_set *To) {
1395 for (unsigned u = 0, e = isl_set_n_dim(From); u < e; u++) {
1396 isl_id *DimId = isl_set_get_dim_id(From, isl_dim_set, u);
1397 To = isl_set_set_dim_id(To, isl_dim_set, u, DimId);
1399 return To;
1402 /// Create the conditions under which @p L @p Pred @p R is true.
1403 static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
1404 __isl_take isl_pw_aff *L,
1405 __isl_take isl_pw_aff *R) {
1406 switch (Pred) {
1407 case ICmpInst::ICMP_EQ:
1408 return isl_pw_aff_eq_set(L, R);
1409 case ICmpInst::ICMP_NE:
1410 return isl_pw_aff_ne_set(L, R);
1411 case ICmpInst::ICMP_SLT:
1412 return isl_pw_aff_lt_set(L, R);
1413 case ICmpInst::ICMP_SLE:
1414 return isl_pw_aff_le_set(L, R);
1415 case ICmpInst::ICMP_SGT:
1416 return isl_pw_aff_gt_set(L, R);
1417 case ICmpInst::ICMP_SGE:
1418 return isl_pw_aff_ge_set(L, R);
1419 case ICmpInst::ICMP_ULT:
1420 return isl_pw_aff_lt_set(L, R);
1421 case ICmpInst::ICMP_UGT:
1422 return isl_pw_aff_gt_set(L, R);
1423 case ICmpInst::ICMP_ULE:
1424 return isl_pw_aff_le_set(L, R);
1425 case ICmpInst::ICMP_UGE:
1426 return isl_pw_aff_ge_set(L, R);
1427 default:
1428 llvm_unreachable("Non integer predicate not supported");
1432 /// Create the conditions under which @p L @p Pred @p R is true.
1434 /// Helper function that will make sure the dimensions of the result have the
1435 /// same isl_id's as the @p Domain.
1436 static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
1437 __isl_take isl_pw_aff *L,
1438 __isl_take isl_pw_aff *R,
1439 __isl_keep isl_set *Domain) {
1440 isl_set *ConsequenceCondSet = buildConditionSet(Pred, L, R);
1441 return setDimensionIds(Domain, ConsequenceCondSet);
1444 /// Compute the isl representation for the SCEV @p E in this BB.
1446 /// @param S The Scop in which @p BB resides in.
1447 /// @param BB The BB for which isl representation is to be
1448 /// computed.
1449 /// @param InvalidDomainMap A map of BB to their invalid domains.
1450 /// @param E The SCEV that should be translated.
1451 /// @param NonNegative Flag to indicate the @p E has to be non-negative.
1453 /// Note that this function will also adjust the invalid context accordingly.
1455 __isl_give isl_pw_aff *
1456 getPwAff(Scop &S, BasicBlock *BB,
1457 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap, const SCEV *E,
1458 bool NonNegative = false) {
1459 PWACtx PWAC = S.getPwAff(E, BB, NonNegative);
1460 InvalidDomainMap[BB] = InvalidDomainMap[BB].unite(isl::manage(PWAC.second));
1461 return PWAC.first;
1464 /// Build the conditions sets for the switch @p SI in the @p Domain.
1466 /// This will fill @p ConditionSets with the conditions under which control
1467 /// will be moved from @p SI to its successors. Hence, @p ConditionSets will
1468 /// have as many elements as @p SI has successors.
1469 static bool
1470 buildConditionSets(Scop &S, BasicBlock *BB, SwitchInst *SI, Loop *L,
1471 __isl_keep isl_set *Domain,
1472 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1473 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1475 Value *Condition = getConditionFromTerminator(SI);
1476 assert(Condition && "No condition for switch");
1478 ScalarEvolution &SE = *S.getSE();
1479 isl_pw_aff *LHS, *RHS;
1480 LHS = getPwAff(S, BB, InvalidDomainMap, SE.getSCEVAtScope(Condition, L));
1482 unsigned NumSuccessors = SI->getNumSuccessors();
1483 ConditionSets.resize(NumSuccessors);
1484 for (auto &Case : SI->cases()) {
1485 unsigned Idx = Case.getSuccessorIndex();
1486 ConstantInt *CaseValue = Case.getCaseValue();
1488 RHS = getPwAff(S, BB, InvalidDomainMap, SE.getSCEV(CaseValue));
1489 isl_set *CaseConditionSet =
1490 buildConditionSet(ICmpInst::ICMP_EQ, isl_pw_aff_copy(LHS), RHS, Domain);
1491 ConditionSets[Idx] = isl_set_coalesce(
1492 isl_set_intersect(CaseConditionSet, isl_set_copy(Domain)));
1495 assert(ConditionSets[0] == nullptr && "Default condition set was set");
1496 isl_set *ConditionSetUnion = isl_set_copy(ConditionSets[1]);
1497 for (unsigned u = 2; u < NumSuccessors; u++)
1498 ConditionSetUnion =
1499 isl_set_union(ConditionSetUnion, isl_set_copy(ConditionSets[u]));
1500 ConditionSets[0] = setDimensionIds(
1501 Domain, isl_set_subtract(isl_set_copy(Domain), ConditionSetUnion));
1503 isl_pw_aff_free(LHS);
1505 return true;
1508 /// Build condition sets for unsigned ICmpInst(s).
1509 /// Special handling is required for unsigned operands to ensure that if
1510 /// MSB (aka the Sign bit) is set for an operands in an unsigned ICmpInst
1511 /// it should wrap around.
1513 /// @param IsStrictUpperBound holds information on the predicate relation
1514 /// between TestVal and UpperBound, i.e,
1515 /// TestVal < UpperBound OR TestVal <= UpperBound
1516 static __isl_give isl_set *
1517 buildUnsignedConditionSets(Scop &S, BasicBlock *BB, Value *Condition,
1518 __isl_keep isl_set *Domain, const SCEV *SCEV_TestVal,
1519 const SCEV *SCEV_UpperBound,
1520 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1521 bool IsStrictUpperBound) {
1523 // Do not take NonNeg assumption on TestVal
1524 // as it might have MSB (Sign bit) set.
1525 isl_pw_aff *TestVal = getPwAff(S, BB, InvalidDomainMap, SCEV_TestVal, false);
1526 // Take NonNeg assumption on UpperBound.
1527 isl_pw_aff *UpperBound =
1528 getPwAff(S, BB, InvalidDomainMap, SCEV_UpperBound, true);
1530 // 0 <= TestVal
1531 isl_set *First =
1532 isl_pw_aff_le_set(isl_pw_aff_zero_on_domain(isl_local_space_from_space(
1533 isl_pw_aff_get_domain_space(TestVal))),
1534 isl_pw_aff_copy(TestVal));
1536 isl_set *Second;
1537 if (IsStrictUpperBound)
1538 // TestVal < UpperBound
1539 Second = isl_pw_aff_lt_set(TestVal, UpperBound);
1540 else
1541 // TestVal <= UpperBound
1542 Second = isl_pw_aff_le_set(TestVal, UpperBound);
1544 isl_set *ConsequenceCondSet = isl_set_intersect(First, Second);
1545 ConsequenceCondSet = setDimensionIds(Domain, ConsequenceCondSet);
1546 return ConsequenceCondSet;
1549 /// Build the conditions sets for the branch condition @p Condition in
1550 /// the @p Domain.
1552 /// This will fill @p ConditionSets with the conditions under which control
1553 /// will be moved from @p TI to its successors. Hence, @p ConditionSets will
1554 /// have as many elements as @p TI has successors. If @p TI is nullptr the
1555 /// context under which @p Condition is true/false will be returned as the
1556 /// new elements of @p ConditionSets.
1557 static bool
1558 buildConditionSets(Scop &S, BasicBlock *BB, Value *Condition,
1559 TerminatorInst *TI, Loop *L, __isl_keep isl_set *Domain,
1560 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1561 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1563 isl_set *ConsequenceCondSet = nullptr;
1564 if (auto *CCond = dyn_cast<ConstantInt>(Condition)) {
1565 if (CCond->isZero())
1566 ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
1567 else
1568 ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
1569 } else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Condition)) {
1570 auto Opcode = BinOp->getOpcode();
1571 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
1573 bool Valid = buildConditionSets(S, BB, BinOp->getOperand(0), TI, L, Domain,
1574 InvalidDomainMap, ConditionSets) &&
1575 buildConditionSets(S, BB, BinOp->getOperand(1), TI, L, Domain,
1576 InvalidDomainMap, ConditionSets);
1577 if (!Valid) {
1578 while (!ConditionSets.empty())
1579 isl_set_free(ConditionSets.pop_back_val());
1580 return false;
1583 isl_set_free(ConditionSets.pop_back_val());
1584 isl_set *ConsCondPart0 = ConditionSets.pop_back_val();
1585 isl_set_free(ConditionSets.pop_back_val());
1586 isl_set *ConsCondPart1 = ConditionSets.pop_back_val();
1588 if (Opcode == Instruction::And)
1589 ConsequenceCondSet = isl_set_intersect(ConsCondPart0, ConsCondPart1);
1590 else
1591 ConsequenceCondSet = isl_set_union(ConsCondPart0, ConsCondPart1);
1592 } else {
1593 auto *ICond = dyn_cast<ICmpInst>(Condition);
1594 assert(ICond &&
1595 "Condition of exiting branch was neither constant nor ICmp!");
1597 ScalarEvolution &SE = *S.getSE();
1598 isl_pw_aff *LHS, *RHS;
1599 // For unsigned comparisons we assumed the signed bit of neither operand
1600 // to be set. The comparison is equal to a signed comparison under this
1601 // assumption.
1602 bool NonNeg = ICond->isUnsigned();
1603 const SCEV *LeftOperand = SE.getSCEVAtScope(ICond->getOperand(0), L),
1604 *RightOperand = SE.getSCEVAtScope(ICond->getOperand(1), L);
1606 switch (ICond->getPredicate()) {
1607 case ICmpInst::ICMP_ULT:
1608 ConsequenceCondSet =
1609 buildUnsignedConditionSets(S, BB, Condition, Domain, LeftOperand,
1610 RightOperand, InvalidDomainMap, true);
1611 break;
1612 case ICmpInst::ICMP_ULE:
1613 ConsequenceCondSet =
1614 buildUnsignedConditionSets(S, BB, Condition, Domain, LeftOperand,
1615 RightOperand, InvalidDomainMap, false);
1616 break;
1617 case ICmpInst::ICMP_UGT:
1618 ConsequenceCondSet =
1619 buildUnsignedConditionSets(S, BB, Condition, Domain, RightOperand,
1620 LeftOperand, InvalidDomainMap, true);
1621 break;
1622 case ICmpInst::ICMP_UGE:
1623 ConsequenceCondSet =
1624 buildUnsignedConditionSets(S, BB, Condition, Domain, RightOperand,
1625 LeftOperand, InvalidDomainMap, false);
1626 break;
1627 default:
1628 LHS = getPwAff(S, BB, InvalidDomainMap, LeftOperand, NonNeg);
1629 RHS = getPwAff(S, BB, InvalidDomainMap, RightOperand, NonNeg);
1630 ConsequenceCondSet =
1631 buildConditionSet(ICond->getPredicate(), LHS, RHS, Domain);
1632 break;
1636 // If no terminator was given we are only looking for parameter constraints
1637 // under which @p Condition is true/false.
1638 if (!TI)
1639 ConsequenceCondSet = isl_set_params(ConsequenceCondSet);
1640 assert(ConsequenceCondSet);
1641 ConsequenceCondSet = isl_set_coalesce(
1642 isl_set_intersect(ConsequenceCondSet, isl_set_copy(Domain)));
1644 isl_set *AlternativeCondSet = nullptr;
1645 bool TooComplex =
1646 isl_set_n_basic_set(ConsequenceCondSet) >= MaxDisjunctsInDomain;
1648 if (!TooComplex) {
1649 AlternativeCondSet = isl_set_subtract(isl_set_copy(Domain),
1650 isl_set_copy(ConsequenceCondSet));
1651 TooComplex =
1652 isl_set_n_basic_set(AlternativeCondSet) >= MaxDisjunctsInDomain;
1655 if (TooComplex) {
1656 S.invalidate(COMPLEXITY, TI ? TI->getDebugLoc() : DebugLoc(),
1657 TI ? TI->getParent() : nullptr /* BasicBlock */);
1658 isl_set_free(AlternativeCondSet);
1659 isl_set_free(ConsequenceCondSet);
1660 return false;
1663 ConditionSets.push_back(ConsequenceCondSet);
1664 ConditionSets.push_back(isl_set_coalesce(AlternativeCondSet));
1666 return true;
1669 /// Build the conditions sets for the terminator @p TI in the @p Domain.
1671 /// This will fill @p ConditionSets with the conditions under which control
1672 /// will be moved from @p TI to its successors. Hence, @p ConditionSets will
1673 /// have as many elements as @p TI has successors.
1674 static bool
1675 buildConditionSets(Scop &S, BasicBlock *BB, TerminatorInst *TI, Loop *L,
1676 __isl_keep isl_set *Domain,
1677 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1678 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1680 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
1681 return buildConditionSets(S, BB, SI, L, Domain, InvalidDomainMap,
1682 ConditionSets);
1684 assert(isa<BranchInst>(TI) && "Terminator was neither branch nor switch.");
1686 if (TI->getNumSuccessors() == 1) {
1687 ConditionSets.push_back(isl_set_copy(Domain));
1688 return true;
1691 Value *Condition = getConditionFromTerminator(TI);
1692 assert(Condition && "No condition for Terminator");
1694 return buildConditionSets(S, BB, Condition, TI, L, Domain, InvalidDomainMap,
1695 ConditionSets);
1698 void ScopStmt::buildDomain() {
1699 isl_id *Id = isl_id_alloc(getIslCtx(), getBaseName(), this);
1701 Domain = getParent()->getDomainConditions(this);
1702 Domain = isl_set_set_tuple_id(Domain, Id);
1705 void ScopStmt::collectSurroundingLoops() {
1706 for (unsigned u = 0, e = isl_set_n_dim(Domain); u < e; u++) {
1707 isl_id *DimId = isl_set_get_dim_id(Domain, isl_dim_set, u);
1708 NestLoops.push_back(static_cast<Loop *>(isl_id_get_user(DimId)));
1709 isl_id_free(DimId);
1713 ScopStmt::ScopStmt(Scop &parent, Region &R, Loop *SurroundingLoop)
1714 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(nullptr),
1715 R(&R), Build(nullptr), SurroundingLoop(SurroundingLoop) {
1717 BaseName = getIslCompatibleName(
1718 "Stmt", R.getNameStr(), parent.getNextStmtIdx(), "", UseInstructionNames);
1721 ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, Loop *SurroundingLoop,
1722 std::vector<Instruction *> Instructions)
1723 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(&bb),
1724 R(nullptr), Build(nullptr), SurroundingLoop(SurroundingLoop),
1725 Instructions(Instructions) {
1727 BaseName = getIslCompatibleName("Stmt", &bb, parent.getNextStmtIdx(), "",
1728 UseInstructionNames);
1731 ScopStmt::ScopStmt(Scop &parent, __isl_take isl_map *SourceRel,
1732 __isl_take isl_map *TargetRel, __isl_take isl_set *NewDomain)
1733 : Parent(parent), InvalidDomain(nullptr), Domain(NewDomain), BB(nullptr),
1734 R(nullptr), Build(nullptr) {
1735 BaseName = getIslCompatibleName("CopyStmt_", "",
1736 std::to_string(parent.getCopyStmtsNum()));
1737 auto *Id = isl_id_alloc(getIslCtx(), getBaseName(), this);
1738 Domain = isl_set_set_tuple_id(Domain, isl_id_copy(Id));
1739 TargetRel = isl_map_set_tuple_id(TargetRel, isl_dim_in, Id);
1740 auto *Access = new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE,
1741 isl::manage(TargetRel));
1742 parent.addAccessFunction(Access);
1743 addAccess(Access);
1744 SourceRel = isl_map_set_tuple_id(SourceRel, isl_dim_in, isl_id_copy(Id));
1745 Access = new MemoryAccess(this, MemoryAccess::AccessType::READ,
1746 isl::manage(SourceRel));
1747 parent.addAccessFunction(Access);
1748 addAccess(Access);
1751 void ScopStmt::init(LoopInfo &LI) {
1752 assert(!Domain && "init must be called only once");
1754 buildDomain();
1755 collectSurroundingLoops();
1756 buildAccessRelations();
1758 if (DetectReductions)
1759 checkForReductions();
1762 /// Collect loads which might form a reduction chain with @p StoreMA.
1764 /// Check if the stored value for @p StoreMA is a binary operator with one or
1765 /// two loads as operands. If the binary operand is commutative & associative,
1766 /// used only once (by @p StoreMA) and its load operands are also used only
1767 /// once, we have found a possible reduction chain. It starts at an operand
1768 /// load and includes the binary operator and @p StoreMA.
1770 /// Note: We allow only one use to ensure the load and binary operator cannot
1771 /// escape this block or into any other store except @p StoreMA.
1772 void ScopStmt::collectCandiateReductionLoads(
1773 MemoryAccess *StoreMA, SmallVectorImpl<MemoryAccess *> &Loads) {
1774 auto *Store = dyn_cast<StoreInst>(StoreMA->getAccessInstruction());
1775 if (!Store)
1776 return;
1778 // Skip if there is not one binary operator between the load and the store
1779 auto *BinOp = dyn_cast<BinaryOperator>(Store->getValueOperand());
1780 if (!BinOp)
1781 return;
1783 // Skip if the binary operators has multiple uses
1784 if (BinOp->getNumUses() != 1)
1785 return;
1787 // Skip if the opcode of the binary operator is not commutative/associative
1788 if (!BinOp->isCommutative() || !BinOp->isAssociative())
1789 return;
1791 // Skip if the binary operator is outside the current SCoP
1792 if (BinOp->getParent() != Store->getParent())
1793 return;
1795 // Skip if it is a multiplicative reduction and we disabled them
1796 if (DisableMultiplicativeReductions &&
1797 (BinOp->getOpcode() == Instruction::Mul ||
1798 BinOp->getOpcode() == Instruction::FMul))
1799 return;
1801 // Check the binary operator operands for a candidate load
1802 auto *PossibleLoad0 = dyn_cast<LoadInst>(BinOp->getOperand(0));
1803 auto *PossibleLoad1 = dyn_cast<LoadInst>(BinOp->getOperand(1));
1804 if (!PossibleLoad0 && !PossibleLoad1)
1805 return;
1807 // A load is only a candidate if it cannot escape (thus has only this use)
1808 if (PossibleLoad0 && PossibleLoad0->getNumUses() == 1)
1809 if (PossibleLoad0->getParent() == Store->getParent())
1810 Loads.push_back(&getArrayAccessFor(PossibleLoad0));
1811 if (PossibleLoad1 && PossibleLoad1->getNumUses() == 1)
1812 if (PossibleLoad1->getParent() == Store->getParent())
1813 Loads.push_back(&getArrayAccessFor(PossibleLoad1));
1816 /// Check for reductions in this ScopStmt.
1818 /// Iterate over all store memory accesses and check for valid binary reduction
1819 /// like chains. For all candidates we check if they have the same base address
1820 /// and there are no other accesses which overlap with them. The base address
1821 /// check rules out impossible reductions candidates early. The overlap check,
1822 /// together with the "only one user" check in collectCandiateReductionLoads,
1823 /// guarantees that none of the intermediate results will escape during
1824 /// execution of the loop nest. We basically check here that no other memory
1825 /// access can access the same memory as the potential reduction.
1826 void ScopStmt::checkForReductions() {
1827 SmallVector<MemoryAccess *, 2> Loads;
1828 SmallVector<std::pair<MemoryAccess *, MemoryAccess *>, 4> Candidates;
1830 // First collect candidate load-store reduction chains by iterating over all
1831 // stores and collecting possible reduction loads.
1832 for (MemoryAccess *StoreMA : MemAccs) {
1833 if (StoreMA->isRead())
1834 continue;
1836 Loads.clear();
1837 collectCandiateReductionLoads(StoreMA, Loads);
1838 for (MemoryAccess *LoadMA : Loads)
1839 Candidates.push_back(std::make_pair(LoadMA, StoreMA));
1842 // Then check each possible candidate pair.
1843 for (const auto &CandidatePair : Candidates) {
1844 bool Valid = true;
1845 isl_map *LoadAccs = CandidatePair.first->getAccessRelation().release();
1846 isl_map *StoreAccs = CandidatePair.second->getAccessRelation().release();
1848 // Skip those with obviously unequal base addresses.
1849 if (!isl_map_has_equal_space(LoadAccs, StoreAccs)) {
1850 isl_map_free(LoadAccs);
1851 isl_map_free(StoreAccs);
1852 continue;
1855 // And check if the remaining for overlap with other memory accesses.
1856 isl_map *AllAccsRel = isl_map_union(LoadAccs, StoreAccs);
1857 AllAccsRel = isl_map_intersect_domain(AllAccsRel, getDomain());
1858 isl_set *AllAccs = isl_map_range(AllAccsRel);
1860 for (MemoryAccess *MA : MemAccs) {
1861 if (MA == CandidatePair.first || MA == CandidatePair.second)
1862 continue;
1864 isl_map *AccRel = isl_map_intersect_domain(
1865 MA->getAccessRelation().release(), getDomain());
1866 isl_set *Accs = isl_map_range(AccRel);
1868 if (isl_set_has_equal_space(AllAccs, Accs)) {
1869 isl_set *OverlapAccs = isl_set_intersect(Accs, isl_set_copy(AllAccs));
1870 Valid = Valid && isl_set_is_empty(OverlapAccs);
1871 isl_set_free(OverlapAccs);
1872 } else {
1873 isl_set_free(Accs);
1877 isl_set_free(AllAccs);
1878 if (!Valid)
1879 continue;
1881 const LoadInst *Load =
1882 dyn_cast<const LoadInst>(CandidatePair.first->getAccessInstruction());
1883 MemoryAccess::ReductionType RT =
1884 getReductionType(dyn_cast<BinaryOperator>(Load->user_back()), Load);
1886 // If no overlapping access was found we mark the load and store as
1887 // reduction like.
1888 CandidatePair.first->markAsReductionLike(RT);
1889 CandidatePair.second->markAsReductionLike(RT);
1893 std::string ScopStmt::getDomainStr() const { return stringFromIslObj(Domain); }
1895 std::string ScopStmt::getScheduleStr() const {
1896 auto *S = getSchedule();
1897 if (!S)
1898 return "";
1899 auto Str = stringFromIslObj(S);
1900 isl_map_free(S);
1901 return Str;
1904 void ScopStmt::setInvalidDomain(__isl_take isl_set *ID) {
1905 isl_set_free(InvalidDomain);
1906 InvalidDomain = ID;
1909 BasicBlock *ScopStmt::getEntryBlock() const {
1910 if (isBlockStmt())
1911 return getBasicBlock();
1912 return getRegion()->getEntry();
1915 unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1917 const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1919 Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1920 return NestLoops[Dimension];
1923 isl_ctx *ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1925 __isl_give isl_set *ScopStmt::getDomain() const { return isl_set_copy(Domain); }
1927 __isl_give isl_space *ScopStmt::getDomainSpace() const {
1928 return isl_set_get_space(Domain);
1931 __isl_give isl_id *ScopStmt::getDomainId() const {
1932 return isl_set_get_tuple_id(Domain);
1935 ScopStmt::~ScopStmt() {
1936 isl_set_free(Domain);
1937 isl_set_free(InvalidDomain);
1940 void ScopStmt::printInstructions(raw_ostream &OS) const {
1941 OS << "Instructions {\n";
1943 for (Instruction *Inst : Instructions)
1944 OS.indent(16) << *Inst << "\n";
1946 OS.indent(12) << "}\n";
1949 void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1950 OS << "\t" << getBaseName() << "\n";
1951 OS.indent(12) << "Domain :=\n";
1953 if (Domain) {
1954 OS.indent(16) << getDomainStr() << ";\n";
1955 } else
1956 OS.indent(16) << "n/a\n";
1958 OS.indent(12) << "Schedule :=\n";
1960 if (Domain) {
1961 OS.indent(16) << getScheduleStr() << ";\n";
1962 } else
1963 OS.indent(16) << "n/a\n";
1965 for (MemoryAccess *Access : MemAccs)
1966 Access->print(OS);
1968 if (PrintInstructions && isBlockStmt())
1969 printInstructions(OS.indent(12));
1972 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1973 LLVM_DUMP_METHOD void ScopStmt::dump() const { print(dbgs(), true); }
1974 #endif
1976 void ScopStmt::removeAccessData(MemoryAccess *MA) {
1977 if (MA->isRead() && MA->isOriginalValueKind()) {
1978 bool Found = ValueReads.erase(MA->getAccessValue());
1979 (void)Found;
1980 assert(Found && "Expected access data not found");
1982 if (MA->isWrite() && MA->isOriginalValueKind()) {
1983 bool Found = ValueWrites.erase(cast<Instruction>(MA->getAccessValue()));
1984 (void)Found;
1985 assert(Found && "Expected access data not found");
1987 if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1988 bool Found = PHIWrites.erase(cast<PHINode>(MA->getAccessInstruction()));
1989 (void)Found;
1990 assert(Found && "Expected access data not found");
1992 if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1993 bool Found = PHIReads.erase(cast<PHINode>(MA->getAccessInstruction()));
1994 (void)Found;
1995 assert(Found && "Expected access data not found");
1999 void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
2000 // Remove the memory accesses from this statement together with all scalar
2001 // accesses that were caused by it. MemoryKind::Value READs have no access
2002 // instruction, hence would not be removed by this function. However, it is
2003 // only used for invariant LoadInst accesses, its arguments are always affine,
2004 // hence synthesizable, and therefore there are no MemoryKind::Value READ
2005 // accesses to be removed.
2006 auto Predicate = [&](MemoryAccess *Acc) {
2007 return Acc->getAccessInstruction() == MA->getAccessInstruction();
2009 for (auto *MA : MemAccs) {
2010 if (Predicate(MA)) {
2011 removeAccessData(MA);
2012 Parent.removeAccessData(MA);
2015 MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
2016 MemAccs.end());
2017 InstructionToAccess.erase(MA->getAccessInstruction());
2020 void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA) {
2021 auto MAIt = std::find(MemAccs.begin(), MemAccs.end(), MA);
2022 assert(MAIt != MemAccs.end());
2023 MemAccs.erase(MAIt);
2025 removeAccessData(MA);
2026 Parent.removeAccessData(MA);
2028 auto It = InstructionToAccess.find(MA->getAccessInstruction());
2029 if (It != InstructionToAccess.end()) {
2030 It->second.remove(MA);
2031 if (It->second.empty())
2032 InstructionToAccess.erase(MA->getAccessInstruction());
2036 MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
2037 MemoryAccess *Access = lookupInputAccessOf(V);
2038 if (Access)
2039 return Access;
2041 ScopArrayInfo *SAI =
2042 Parent.getOrCreateScopArrayInfo(V, V->getType(), {}, MemoryKind::Value);
2043 Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
2044 true, {}, {}, V, MemoryKind::Value);
2045 Parent.addAccessFunction(Access);
2046 Access->buildAccessRelation(SAI);
2047 addAccess(Access);
2048 Parent.addAccessData(Access);
2049 return Access;
2052 raw_ostream &polly::operator<<(raw_ostream &O, const ScopStmt &S) {
2053 S.print(O, PollyPrintInstructions);
2054 return O;
2057 //===----------------------------------------------------------------------===//
2058 /// Scop class implement
2060 void Scop::setContext(__isl_take isl_set *NewContext) {
2061 NewContext = isl_set_align_params(NewContext, isl_set_get_space(Context));
2062 isl_set_free(Context);
2063 Context = NewContext;
2066 namespace {
2067 /// Remap parameter values but keep AddRecs valid wrt. invariant loads.
2068 struct SCEVSensitiveParameterRewriter
2069 : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
2070 ValueToValueMap &VMap;
2072 public:
2073 SCEVSensitiveParameterRewriter(ValueToValueMap &VMap, ScalarEvolution &SE)
2074 : SCEVRewriteVisitor(SE), VMap(VMap) {}
2076 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
2077 ValueToValueMap &VMap) {
2078 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
2079 return SSPR.visit(E);
2082 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
2083 auto *Start = visit(E->getStart());
2084 auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
2085 visit(E->getStepRecurrence(SE)),
2086 E->getLoop(), SCEV::FlagAnyWrap);
2087 return SE.getAddExpr(Start, AddRec);
2090 const SCEV *visitUnknown(const SCEVUnknown *E) {
2091 if (auto *NewValue = VMap.lookup(E->getValue()))
2092 return SE.getUnknown(NewValue);
2093 return E;
2097 /// Check whether we should remap a SCEV expression.
2098 struct SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
2099 ValueToValueMap &VMap;
2100 bool FoundInside = false;
2101 Scop *S;
2103 public:
2104 SCEVFindInsideScop(ValueToValueMap &VMap, ScalarEvolution &SE, Scop *S)
2105 : SCEVTraversal(*this), VMap(VMap), S(S) {}
2107 static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
2108 ValueToValueMap &VMap, Scop *S) {
2109 SCEVFindInsideScop SFIS(VMap, SE, S);
2110 SFIS.visitAll(E);
2111 return SFIS.FoundInside;
2114 bool follow(const SCEV *E) {
2115 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(E)) {
2116 FoundInside |= S->getRegion().contains(AddRec->getLoop());
2117 } else if (auto *Unknown = dyn_cast<SCEVUnknown>(E)) {
2118 if (Instruction *I = dyn_cast<Instruction>(Unknown->getValue()))
2119 FoundInside |= S->getRegion().contains(I) && !VMap.count(I);
2121 return !FoundInside;
2123 bool isDone() { return FoundInside; }
2125 } // namespace
2127 const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) {
2128 // Check whether it makes sense to rewrite the SCEV. (ScalarEvolution
2129 // doesn't like addition between an AddRec and an expression that
2130 // doesn't have a dominance relationship with it.)
2131 if (SCEVFindInsideScop::hasVariant(E, *SE, InvEquivClassVMap, this))
2132 return E;
2134 // Rewrite SCEV.
2135 return SCEVSensitiveParameterRewriter::rewrite(E, *SE, InvEquivClassVMap);
2138 // This table of function names is used to translate parameter names in more
2139 // human-readable names. This makes it easier to interpret Polly analysis
2140 // results.
2141 StringMap<std::string> KnownNames = {
2142 {"_Z13get_global_idj", "global_id"},
2143 {"_Z12get_local_idj", "local_id"},
2144 {"_Z15get_global_sizej", "global_size"},
2145 {"_Z14get_local_sizej", "local_size"},
2146 {"_Z12get_work_dimv", "work_dim"},
2147 {"_Z17get_global_offsetj", "global_offset"},
2148 {"_Z12get_group_idj", "group_id"},
2149 {"_Z14get_num_groupsj", "num_groups"},
2152 static std::string getCallParamName(CallInst *Call) {
2153 std::string Result;
2154 raw_string_ostream OS(Result);
2155 std::string Name = Call->getCalledFunction()->getName();
2157 auto Iterator = KnownNames.find(Name);
2158 if (Iterator != KnownNames.end())
2159 Name = "__" + Iterator->getValue();
2160 OS << Name;
2161 for (auto &Operand : Call->arg_operands()) {
2162 ConstantInt *Op = cast<ConstantInt>(&Operand);
2163 OS << "_" << Op->getValue();
2165 OS.flush();
2166 return Result;
2169 void Scop::createParameterId(const SCEV *Parameter) {
2170 assert(Parameters.count(Parameter));
2171 assert(!ParameterIds.count(Parameter));
2173 std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
2175 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
2176 Value *Val = ValueParameter->getValue();
2177 CallInst *Call = dyn_cast<CallInst>(Val);
2179 if (Call && isConstCall(Call)) {
2180 ParameterName = getCallParamName(Call);
2181 } else if (UseInstructionNames) {
2182 // If this parameter references a specific Value and this value has a name
2183 // we use this name as it is likely to be unique and more useful than just
2184 // a number.
2185 if (Val->hasName())
2186 ParameterName = Val->getName();
2187 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
2188 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
2189 if (LoadOrigin->hasName()) {
2190 ParameterName += "_loaded_from_";
2191 ParameterName +=
2192 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
2197 ParameterName = getIslCompatibleName("", ParameterName, "");
2200 auto *Id = isl_id_alloc(getIslCtx(), ParameterName.c_str(),
2201 const_cast<void *>((const void *)Parameter));
2202 ParameterIds[Parameter] = Id;
2205 void Scop::addParams(const ParameterSetTy &NewParameters) {
2206 for (const SCEV *Parameter : NewParameters) {
2207 // Normalize the SCEV to get the representing element for an invariant load.
2208 Parameter = extractConstantFactor(Parameter, *SE).second;
2209 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
2211 if (Parameters.insert(Parameter))
2212 createParameterId(Parameter);
2216 __isl_give isl_id *Scop::getIdForParam(const SCEV *Parameter) {
2217 // Normalize the SCEV to get the representing element for an invariant load.
2218 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
2219 return isl_id_copy(ParameterIds.lookup(Parameter));
2222 __isl_give isl_set *
2223 Scop::addNonEmptyDomainConstraints(__isl_take isl_set *C) const {
2224 isl_set *DomainContext = isl_union_set_params(getDomains());
2225 return isl_set_intersect_params(C, DomainContext);
2228 bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
2229 return DT.dominates(BB, getEntry());
2232 void Scop::addUserAssumptions(
2233 AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI,
2234 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2235 for (auto &Assumption : AC.assumptions()) {
2236 auto *CI = dyn_cast_or_null<CallInst>(Assumption);
2237 if (!CI || CI->getNumArgOperands() != 1)
2238 continue;
2240 bool InScop = contains(CI);
2241 if (!InScop && !isDominatedBy(DT, CI->getParent()))
2242 continue;
2244 auto *L = LI.getLoopFor(CI->getParent());
2245 auto *Val = CI->getArgOperand(0);
2246 ParameterSetTy DetectedParams;
2247 if (!isAffineConstraint(Val, &R, L, *SE, DetectedParams)) {
2248 ORE.emit(
2249 OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreUserAssumption", CI)
2250 << "Non-affine user assumption ignored.");
2251 continue;
2254 // Collect all newly introduced parameters.
2255 ParameterSetTy NewParams;
2256 for (auto *Param : DetectedParams) {
2257 Param = extractConstantFactor(Param, *SE).second;
2258 Param = getRepresentingInvariantLoadSCEV(Param);
2259 if (Parameters.count(Param))
2260 continue;
2261 NewParams.insert(Param);
2264 SmallVector<isl_set *, 2> ConditionSets;
2265 auto *TI = InScop ? CI->getParent()->getTerminator() : nullptr;
2266 BasicBlock *BB = InScop ? CI->getParent() : getRegion().getEntry();
2267 auto *Dom = InScop ? DomainMap[BB].copy() : isl_set_copy(Context);
2268 assert(Dom && "Cannot propagate a nullptr.");
2269 bool Valid = buildConditionSets(*this, BB, Val, TI, L, Dom,
2270 InvalidDomainMap, ConditionSets);
2271 isl_set_free(Dom);
2273 if (!Valid)
2274 continue;
2276 isl_set *AssumptionCtx = nullptr;
2277 if (InScop) {
2278 AssumptionCtx = isl_set_complement(isl_set_params(ConditionSets[1]));
2279 isl_set_free(ConditionSets[0]);
2280 } else {
2281 AssumptionCtx = isl_set_complement(ConditionSets[1]);
2282 AssumptionCtx = isl_set_intersect(AssumptionCtx, ConditionSets[0]);
2285 // Project out newly introduced parameters as they are not otherwise useful.
2286 if (!NewParams.empty()) {
2287 for (unsigned u = 0; u < isl_set_n_param(AssumptionCtx); u++) {
2288 auto *Id = isl_set_get_dim_id(AssumptionCtx, isl_dim_param, u);
2289 auto *Param = static_cast<const SCEV *>(isl_id_get_user(Id));
2290 isl_id_free(Id);
2292 if (!NewParams.count(Param))
2293 continue;
2295 AssumptionCtx =
2296 isl_set_project_out(AssumptionCtx, isl_dim_param, u--, 1);
2299 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "UserAssumption", CI)
2300 << "Use user assumption: " << stringFromIslObj(AssumptionCtx));
2301 Context = isl_set_intersect(Context, AssumptionCtx);
2305 void Scop::addUserContext() {
2306 if (UserContextStr.empty())
2307 return;
2309 isl_set *UserContext =
2310 isl_set_read_from_str(getIslCtx(), UserContextStr.c_str());
2311 isl_space *Space = getParamSpace();
2312 if (isl_space_dim(Space, isl_dim_param) !=
2313 isl_set_dim(UserContext, isl_dim_param)) {
2314 auto SpaceStr = isl_space_to_str(Space);
2315 errs() << "Error: the context provided in -polly-context has not the same "
2316 << "number of dimensions than the computed context. Due to this "
2317 << "mismatch, the -polly-context option is ignored. Please provide "
2318 << "the context in the parameter space: " << SpaceStr << ".\n";
2319 free(SpaceStr);
2320 isl_set_free(UserContext);
2321 isl_space_free(Space);
2322 return;
2325 for (unsigned i = 0; i < isl_space_dim(Space, isl_dim_param); i++) {
2326 auto *NameContext = isl_set_get_dim_name(Context, isl_dim_param, i);
2327 auto *NameUserContext = isl_set_get_dim_name(UserContext, isl_dim_param, i);
2329 if (strcmp(NameContext, NameUserContext) != 0) {
2330 auto SpaceStr = isl_space_to_str(Space);
2331 errs() << "Error: the name of dimension " << i
2332 << " provided in -polly-context "
2333 << "is '" << NameUserContext << "', but the name in the computed "
2334 << "context is '" << NameContext
2335 << "'. Due to this name mismatch, "
2336 << "the -polly-context option is ignored. Please provide "
2337 << "the context in the parameter space: " << SpaceStr << ".\n";
2338 free(SpaceStr);
2339 isl_set_free(UserContext);
2340 isl_space_free(Space);
2341 return;
2344 UserContext =
2345 isl_set_set_dim_id(UserContext, isl_dim_param, i,
2346 isl_space_get_dim_id(Space, isl_dim_param, i));
2349 Context = isl_set_intersect(Context, UserContext);
2350 isl_space_free(Space);
2353 void Scop::buildInvariantEquivalenceClasses() {
2354 DenseMap<std::pair<const SCEV *, Type *>, LoadInst *> EquivClasses;
2356 const InvariantLoadsSetTy &RIL = getRequiredInvariantLoads();
2357 for (LoadInst *LInst : RIL) {
2358 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
2360 Type *Ty = LInst->getType();
2361 LoadInst *&ClassRep = EquivClasses[std::make_pair(PointerSCEV, Ty)];
2362 if (ClassRep) {
2363 InvEquivClassVMap[LInst] = ClassRep;
2364 continue;
2367 ClassRep = LInst;
2368 InvariantEquivClasses.emplace_back(
2369 InvariantEquivClassTy{PointerSCEV, MemoryAccessList(), nullptr, Ty});
2373 void Scop::buildContext() {
2374 isl_space *Space = isl_space_params_alloc(getIslCtx(), 0);
2375 Context = isl_set_universe(isl_space_copy(Space));
2376 InvalidContext = isl_set_empty(isl_space_copy(Space));
2377 AssumedContext = isl_set_universe(Space);
2380 void Scop::addParameterBounds() {
2381 unsigned PDim = 0;
2382 for (auto *Parameter : Parameters) {
2383 ConstantRange SRange = SE->getSignedRange(Parameter);
2384 Context =
2385 addRangeBoundsToSet(give(Context), SRange, PDim++, isl::dim::param)
2386 .release();
2390 // We use the outermost dimension to generate GPU transfers for Fortran arrays
2391 // even when the array bounds are not known statically. To do so, we need the
2392 // outermost dimension information. We add this into the context so that the
2393 // outermost dimension is available during codegen.
2394 // We currently do not care about dimensions other than the outermost
2395 // dimension since it doesn't affect transfers.
2396 static isl_set *addFortranArrayOutermostDimParams(__isl_give isl_set *Context,
2397 Scop::array_range Arrays) {
2399 std::vector<isl_id *> OutermostSizeIds;
2400 for (auto Array : Arrays) {
2401 // To check if an array is a Fortran array, we check if it has a isl_pw_aff
2402 // for its outermost dimension. Fortran arrays will have this since the
2403 // outermost dimension size can be picked up from their runtime description.
2404 // TODO: actually need to check if it has a FAD, but for now this works.
2405 if (Array->getNumberOfDimensions() > 0) {
2406 isl_pw_aff *PwAff = Array->getDimensionSizePw(0).release();
2407 if (!PwAff)
2408 continue;
2410 isl_id *Id = isl_pw_aff_get_dim_id(PwAff, isl_dim_param, 0);
2411 isl_pw_aff_free(PwAff);
2412 assert(Id && "Invalid Id for PwAff expression in Fortran array");
2413 OutermostSizeIds.push_back(Id);
2417 const int NumTrueParams = isl_set_dim(Context, isl_dim_param);
2418 Context = isl_set_add_dims(Context, isl_dim_param, OutermostSizeIds.size());
2420 for (size_t i = 0; i < OutermostSizeIds.size(); i++) {
2421 Context = isl_set_set_dim_id(Context, isl_dim_param, NumTrueParams + i,
2422 OutermostSizeIds[i]);
2423 Context =
2424 isl_set_lower_bound_si(Context, isl_dim_param, NumTrueParams + i, 0);
2427 return Context;
2430 void Scop::realignParams() {
2431 if (PollyIgnoreParamBounds)
2432 return;
2434 // Add all parameters into a common model.
2435 isl_space *Space = isl_space_params_alloc(getIslCtx(), ParameterIds.size());
2437 unsigned PDim = 0;
2438 for (const auto *Parameter : Parameters) {
2439 isl_id *id = getIdForParam(Parameter);
2440 Space = isl_space_set_dim_id(Space, isl_dim_param, PDim++, id);
2443 // Align the parameters of all data structures to the model.
2444 Context = isl_set_align_params(Context, Space);
2446 // Add the outermost dimension of the Fortran arrays into the Context.
2447 // See the description of the function for more information.
2448 Context = addFortranArrayOutermostDimParams(Context, arrays());
2450 // As all parameters are known add bounds to them.
2451 addParameterBounds();
2453 for (ScopStmt &Stmt : *this)
2454 Stmt.realignParams();
2455 // Simplify the schedule according to the context too.
2456 Schedule = isl_schedule_gist_domain_params(Schedule, getContext());
2459 static __isl_give isl_set *
2460 simplifyAssumptionContext(__isl_take isl_set *AssumptionContext,
2461 const Scop &S) {
2462 // If we have modeled all blocks in the SCoP that have side effects we can
2463 // simplify the context with the constraints that are needed for anything to
2464 // be executed at all. However, if we have error blocks in the SCoP we already
2465 // assumed some parameter combinations cannot occur and removed them from the
2466 // domains, thus we cannot use the remaining domain to simplify the
2467 // assumptions.
2468 if (!S.hasErrorBlock()) {
2469 isl_set *DomainParameters = isl_union_set_params(S.getDomains());
2470 AssumptionContext =
2471 isl_set_gist_params(AssumptionContext, DomainParameters);
2474 AssumptionContext = isl_set_gist_params(AssumptionContext, S.getContext());
2475 return AssumptionContext;
2478 void Scop::simplifyContexts() {
2479 // The parameter constraints of the iteration domains give us a set of
2480 // constraints that need to hold for all cases where at least a single
2481 // statement iteration is executed in the whole scop. We now simplify the
2482 // assumed context under the assumption that such constraints hold and at
2483 // least a single statement iteration is executed. For cases where no
2484 // statement instances are executed, the assumptions we have taken about
2485 // the executed code do not matter and can be changed.
2487 // WARNING: This only holds if the assumptions we have taken do not reduce
2488 // the set of statement instances that are executed. Otherwise we
2489 // may run into a case where the iteration domains suggest that
2490 // for a certain set of parameter constraints no code is executed,
2491 // but in the original program some computation would have been
2492 // performed. In such a case, modifying the run-time conditions and
2493 // possibly influencing the run-time check may cause certain scops
2494 // to not be executed.
2496 // Example:
2498 // When delinearizing the following code:
2500 // for (long i = 0; i < 100; i++)
2501 // for (long j = 0; j < m; j++)
2502 // A[i+p][j] = 1.0;
2504 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
2505 // otherwise we would access out of bound data. Now, knowing that code is
2506 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
2507 AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
2508 InvalidContext = isl_set_align_params(InvalidContext, getParamSpace());
2511 /// Add the minimal/maximal access in @p Set to @p User.
2512 static isl::stat
2513 buildMinMaxAccess(isl::set Set, Scop::MinMaxVectorTy &MinMaxAccesses, Scop &S) {
2514 isl::pw_multi_aff MinPMA, MaxPMA;
2515 isl::pw_aff LastDimAff;
2516 isl::aff OneAff;
2517 unsigned Pos;
2518 isl::ctx Ctx = Set.get_ctx();
2520 Set = Set.remove_divs();
2522 if (isl_set_n_basic_set(Set.get()) >= MaxDisjunctsInDomain)
2523 return isl::stat::error;
2525 // Restrict the number of parameters involved in the access as the lexmin/
2526 // lexmax computation will take too long if this number is high.
2528 // Experiments with a simple test case using an i7 4800MQ:
2530 // #Parameters involved | Time (in sec)
2531 // 6 | 0.01
2532 // 7 | 0.04
2533 // 8 | 0.12
2534 // 9 | 0.40
2535 // 10 | 1.54
2536 // 11 | 6.78
2537 // 12 | 30.38
2539 if (isl_set_n_param(Set.get()) > RunTimeChecksMaxParameters) {
2540 unsigned InvolvedParams = 0;
2541 for (unsigned u = 0, e = isl_set_n_param(Set.get()); u < e; u++)
2542 if (Set.involves_dims(isl::dim::param, u, 1))
2543 InvolvedParams++;
2545 if (InvolvedParams > RunTimeChecksMaxParameters)
2546 return isl::stat::error;
2549 if (isl_set_n_basic_set(Set.get()) > RunTimeChecksMaxAccessDisjuncts)
2550 return isl::stat::error;
2552 MinPMA = Set.lexmin_pw_multi_aff();
2553 MaxPMA = Set.lexmax_pw_multi_aff();
2555 if (isl_ctx_last_error(Ctx.get()) == isl_error_quota)
2556 return isl::stat::error;
2558 MinPMA = MinPMA.coalesce();
2559 MaxPMA = MaxPMA.coalesce();
2561 // Adjust the last dimension of the maximal access by one as we want to
2562 // enclose the accessed memory region by MinPMA and MaxPMA. The pointer
2563 // we test during code generation might now point after the end of the
2564 // allocated array but we will never dereference it anyway.
2565 assert(MaxPMA.dim(isl::dim::out) && "Assumed at least one output dimension");
2566 Pos = MaxPMA.dim(isl::dim::out) - 1;
2567 LastDimAff = MaxPMA.get_pw_aff(Pos);
2568 OneAff = isl::aff(isl::local_space(LastDimAff.get_domain_space()));
2569 OneAff = OneAff.add_constant_si(1);
2570 LastDimAff = LastDimAff.add(OneAff);
2571 MaxPMA = MaxPMA.set_pw_aff(Pos, LastDimAff);
2573 MinMaxAccesses.push_back(std::make_pair(MinPMA.copy(), MaxPMA.copy()));
2575 return isl::stat::ok;
2578 static __isl_give isl_set *getAccessDomain(MemoryAccess *MA) {
2579 isl_set *Domain = MA->getStatement()->getDomain();
2580 Domain = isl_set_project_out(Domain, isl_dim_set, 0, isl_set_n_dim(Domain));
2581 return isl_set_reset_tuple_id(Domain);
2584 /// Wrapper function to calculate minimal/maximal accesses to each array.
2585 static bool calculateMinMaxAccess(Scop::AliasGroupTy AliasGroup, Scop &S,
2586 Scop::MinMaxVectorTy &MinMaxAccesses) {
2588 MinMaxAccesses.reserve(AliasGroup.size());
2590 isl::union_set Domains = give(S.getDomains());
2591 isl::union_map Accesses = isl::union_map::empty(give(S.getParamSpace()));
2593 for (MemoryAccess *MA : AliasGroup)
2594 Accesses = Accesses.add_map(give(MA->getAccessRelation().release()));
2596 Accesses = Accesses.intersect_domain(Domains);
2597 isl::union_set Locations = Accesses.range();
2598 Locations = Locations.coalesce();
2599 Locations = Locations.detect_equalities();
2601 auto Lambda = [&MinMaxAccesses, &S](isl::set Set) -> isl::stat {
2602 return buildMinMaxAccess(Set, MinMaxAccesses, S);
2604 return Locations.foreach_set(Lambda) == isl::stat::ok;
2607 /// Helper to treat non-affine regions and basic blocks the same.
2609 ///{
2611 /// Return the block that is the representing block for @p RN.
2612 static inline BasicBlock *getRegionNodeBasicBlock(RegionNode *RN) {
2613 return RN->isSubRegion() ? RN->getNodeAs<Region>()->getEntry()
2614 : RN->getNodeAs<BasicBlock>();
2617 /// Return the @p idx'th block that is executed after @p RN.
2618 static inline BasicBlock *
2619 getRegionNodeSuccessor(RegionNode *RN, TerminatorInst *TI, unsigned idx) {
2620 if (RN->isSubRegion()) {
2621 assert(idx == 0);
2622 return RN->getNodeAs<Region>()->getExit();
2624 return TI->getSuccessor(idx);
2627 /// Return the smallest loop surrounding @p RN.
2628 static inline Loop *getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
2629 if (!RN->isSubRegion()) {
2630 BasicBlock *BB = RN->getNodeAs<BasicBlock>();
2631 Loop *L = LI.getLoopFor(BB);
2633 // Unreachable statements are not considered to belong to a LLVM loop, as
2634 // they are not part of an actual loop in the control flow graph.
2635 // Nevertheless, we handle certain unreachable statements that are common
2636 // when modeling run-time bounds checks as being part of the loop to be
2637 // able to model them and to later eliminate the run-time bounds checks.
2639 // Specifically, for basic blocks that terminate in an unreachable and
2640 // where the immediate predecessor is part of a loop, we assume these
2641 // basic blocks belong to the loop the predecessor belongs to. This
2642 // allows us to model the following code.
2644 // for (i = 0; i < N; i++) {
2645 // if (i > 1024)
2646 // abort(); <- this abort might be translated to an
2647 // unreachable
2649 // A[i] = ...
2650 // }
2651 if (!L && isa<UnreachableInst>(BB->getTerminator()) && BB->getPrevNode())
2652 L = LI.getLoopFor(BB->getPrevNode());
2653 return L;
2656 Region *NonAffineSubRegion = RN->getNodeAs<Region>();
2657 Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
2658 while (L && NonAffineSubRegion->contains(L))
2659 L = L->getParentLoop();
2660 return L;
2663 /// Get the number of blocks in @p L.
2665 /// The number of blocks in a loop are the number of basic blocks actually
2666 /// belonging to the loop, as well as all single basic blocks that the loop
2667 /// exits to and which terminate in an unreachable instruction. We do not
2668 /// allow such basic blocks in the exit of a scop, hence they belong to the
2669 /// scop and represent run-time conditions which we want to model and
2670 /// subsequently speculate away.
2672 /// @see getRegionNodeLoop for additional details.
2673 unsigned getNumBlocksInLoop(Loop *L) {
2674 unsigned NumBlocks = L->getNumBlocks();
2675 SmallVector<llvm::BasicBlock *, 4> ExitBlocks;
2676 L->getExitBlocks(ExitBlocks);
2678 for (auto ExitBlock : ExitBlocks) {
2679 if (isa<UnreachableInst>(ExitBlock->getTerminator()))
2680 NumBlocks++;
2682 return NumBlocks;
2685 static inline unsigned getNumBlocksInRegionNode(RegionNode *RN) {
2686 if (!RN->isSubRegion())
2687 return 1;
2689 Region *R = RN->getNodeAs<Region>();
2690 return std::distance(R->block_begin(), R->block_end());
2693 static bool containsErrorBlock(RegionNode *RN, const Region &R, LoopInfo &LI,
2694 const DominatorTree &DT) {
2695 if (!RN->isSubRegion())
2696 return isErrorBlock(*RN->getNodeAs<BasicBlock>(), R, LI, DT);
2697 for (BasicBlock *BB : RN->getNodeAs<Region>()->blocks())
2698 if (isErrorBlock(*BB, R, LI, DT))
2699 return true;
2700 return false;
2703 ///}
2705 static inline __isl_give isl_set *addDomainDimId(__isl_take isl_set *Domain,
2706 unsigned Dim, Loop *L) {
2707 Domain = isl_set_lower_bound_si(Domain, isl_dim_set, Dim, -1);
2708 isl_id *DimId =
2709 isl_id_alloc(isl_set_get_ctx(Domain), nullptr, static_cast<void *>(L));
2710 return isl_set_set_dim_id(Domain, isl_dim_set, Dim, DimId);
2713 __isl_give isl_set *Scop::getDomainConditions(const ScopStmt *Stmt) const {
2714 return getDomainConditions(Stmt->getEntryBlock());
2717 __isl_give isl_set *Scop::getDomainConditions(BasicBlock *BB) const {
2718 auto DIt = DomainMap.find(BB);
2719 if (DIt != DomainMap.end())
2720 return DIt->getSecond().copy();
2722 auto &RI = *R.getRegionInfo();
2723 auto *BBR = RI.getRegionFor(BB);
2724 while (BBR->getEntry() == BB)
2725 BBR = BBR->getParent();
2726 return getDomainConditions(BBR->getEntry());
2729 bool Scop::buildDomains(Region *R, DominatorTree &DT, LoopInfo &LI,
2730 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2732 bool IsOnlyNonAffineRegion = isNonAffineSubRegion(R);
2733 auto *EntryBB = R->getEntry();
2734 auto *L = IsOnlyNonAffineRegion ? nullptr : LI.getLoopFor(EntryBB);
2735 int LD = getRelativeLoopDepth(L);
2736 auto *S = isl_set_universe(isl_space_set_alloc(getIslCtx(), 0, LD + 1));
2738 while (LD-- >= 0) {
2739 S = addDomainDimId(S, LD + 1, L);
2740 L = L->getParentLoop();
2743 InvalidDomainMap[EntryBB] = isl::manage(isl_set_empty(isl_set_get_space(S)));
2744 DomainMap[EntryBB] = isl::manage(S);
2746 if (IsOnlyNonAffineRegion)
2747 return !containsErrorBlock(R->getNode(), *R, LI, DT);
2749 if (!buildDomainsWithBranchConstraints(R, DT, LI, InvalidDomainMap))
2750 return false;
2752 if (!propagateDomainConstraints(R, DT, LI, InvalidDomainMap))
2753 return false;
2755 // Error blocks and blocks dominated by them have been assumed to never be
2756 // executed. Representing them in the Scop does not add any value. In fact,
2757 // it is likely to cause issues during construction of the ScopStmts. The
2758 // contents of error blocks have not been verified to be expressible and
2759 // will cause problems when building up a ScopStmt for them.
2760 // Furthermore, basic blocks dominated by error blocks may reference
2761 // instructions in the error block which, if the error block is not modeled,
2762 // can themselves not be constructed properly. To this end we will replace
2763 // the domains of error blocks and those only reachable via error blocks
2764 // with an empty set. Additionally, we will record for each block under which
2765 // parameter combination it would be reached via an error block in its
2766 // InvalidDomain. This information is needed during load hoisting.
2767 if (!propagateInvalidStmtDomains(R, DT, LI, InvalidDomainMap))
2768 return false;
2770 return true;
2773 /// Adjust the dimensions of @p Dom that was constructed for @p OldL
2774 /// to be compatible to domains constructed for loop @p NewL.
2776 /// This function assumes @p NewL and @p OldL are equal or there is a CFG
2777 /// edge from @p OldL to @p NewL.
2778 static __isl_give isl_set *adjustDomainDimensions(Scop &S,
2779 __isl_take isl_set *Dom,
2780 Loop *OldL, Loop *NewL) {
2782 // If the loops are the same there is nothing to do.
2783 if (NewL == OldL)
2784 return Dom;
2786 int OldDepth = S.getRelativeLoopDepth(OldL);
2787 int NewDepth = S.getRelativeLoopDepth(NewL);
2788 // If both loops are non-affine loops there is nothing to do.
2789 if (OldDepth == -1 && NewDepth == -1)
2790 return Dom;
2792 // Distinguish three cases:
2793 // 1) The depth is the same but the loops are not.
2794 // => One loop was left one was entered.
2795 // 2) The depth increased from OldL to NewL.
2796 // => One loop was entered, none was left.
2797 // 3) The depth decreased from OldL to NewL.
2798 // => Loops were left were difference of the depths defines how many.
2799 if (OldDepth == NewDepth) {
2800 assert(OldL->getParentLoop() == NewL->getParentLoop());
2801 Dom = isl_set_project_out(Dom, isl_dim_set, NewDepth, 1);
2802 Dom = isl_set_add_dims(Dom, isl_dim_set, 1);
2803 Dom = addDomainDimId(Dom, NewDepth, NewL);
2804 } else if (OldDepth < NewDepth) {
2805 assert(OldDepth + 1 == NewDepth);
2806 auto &R = S.getRegion();
2807 (void)R;
2808 assert(NewL->getParentLoop() == OldL ||
2809 ((!OldL || !R.contains(OldL)) && R.contains(NewL)));
2810 Dom = isl_set_add_dims(Dom, isl_dim_set, 1);
2811 Dom = addDomainDimId(Dom, NewDepth, NewL);
2812 } else {
2813 assert(OldDepth > NewDepth);
2814 int Diff = OldDepth - NewDepth;
2815 int NumDim = isl_set_n_dim(Dom);
2816 assert(NumDim >= Diff);
2817 Dom = isl_set_project_out(Dom, isl_dim_set, NumDim - Diff, Diff);
2820 return Dom;
2823 bool Scop::propagateInvalidStmtDomains(
2824 Region *R, DominatorTree &DT, LoopInfo &LI,
2825 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2827 ReversePostOrderTraversal<Region *> RTraversal(R);
2828 for (auto *RN : RTraversal) {
2830 // Recurse for affine subregions but go on for basic blocks and non-affine
2831 // subregions.
2832 if (RN->isSubRegion()) {
2833 Region *SubRegion = RN->getNodeAs<Region>();
2834 if (!isNonAffineSubRegion(SubRegion)) {
2835 propagateInvalidStmtDomains(SubRegion, DT, LI, InvalidDomainMap);
2836 continue;
2840 bool ContainsErrorBlock = containsErrorBlock(RN, getRegion(), LI, DT);
2841 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2842 isl::set &Domain = DomainMap[BB];
2843 assert(Domain && "Cannot propagate a nullptr");
2845 isl::set InvalidDomain = InvalidDomainMap[BB];
2847 bool IsInvalidBlock = ContainsErrorBlock || Domain.is_subset(InvalidDomain);
2849 if (!IsInvalidBlock) {
2850 InvalidDomain = InvalidDomain.intersect(Domain);
2851 } else {
2852 InvalidDomain = Domain;
2853 isl::set DomPar = Domain.params();
2854 recordAssumption(ERRORBLOCK, DomPar.release(),
2855 BB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
2856 Domain = nullptr;
2859 if (InvalidDomain.is_empty()) {
2860 InvalidDomainMap[BB] = InvalidDomain;
2861 continue;
2864 auto *BBLoop = getRegionNodeLoop(RN, LI);
2865 auto *TI = BB->getTerminator();
2866 unsigned NumSuccs = RN->isSubRegion() ? 1 : TI->getNumSuccessors();
2867 for (unsigned u = 0; u < NumSuccs; u++) {
2868 auto *SuccBB = getRegionNodeSuccessor(RN, TI, u);
2870 // Skip successors outside the SCoP.
2871 if (!contains(SuccBB))
2872 continue;
2874 // Skip backedges.
2875 if (DT.dominates(SuccBB, BB))
2876 continue;
2878 Loop *SuccBBLoop = getFirstNonBoxedLoopFor(SuccBB, LI, getBoxedLoops());
2880 auto *AdjustedInvalidDomain = adjustDomainDimensions(
2881 *this, InvalidDomain.copy(), BBLoop, SuccBBLoop);
2883 auto *SuccInvalidDomain = InvalidDomainMap[SuccBB].copy();
2884 SuccInvalidDomain =
2885 isl_set_union(SuccInvalidDomain, AdjustedInvalidDomain);
2886 SuccInvalidDomain = isl_set_coalesce(SuccInvalidDomain);
2887 unsigned NumConjucts = isl_set_n_basic_set(SuccInvalidDomain);
2889 InvalidDomainMap[SuccBB] = isl::manage(SuccInvalidDomain);
2891 // Check if the maximal number of domain disjunctions was reached.
2892 // In case this happens we will bail.
2893 if (NumConjucts < MaxDisjunctsInDomain)
2894 continue;
2896 InvalidDomainMap.erase(BB);
2897 invalidate(COMPLEXITY, TI->getDebugLoc(), TI->getParent());
2898 return false;
2901 InvalidDomainMap[BB] = InvalidDomain;
2904 return true;
2907 void Scop::propagateDomainConstraintsToRegionExit(
2908 BasicBlock *BB, Loop *BBLoop,
2909 SmallPtrSetImpl<BasicBlock *> &FinishedExitBlocks, LoopInfo &LI,
2910 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2912 // Check if the block @p BB is the entry of a region. If so we propagate it's
2913 // domain to the exit block of the region. Otherwise we are done.
2914 auto *RI = R.getRegionInfo();
2915 auto *BBReg = RI ? RI->getRegionFor(BB) : nullptr;
2916 auto *ExitBB = BBReg ? BBReg->getExit() : nullptr;
2917 if (!BBReg || BBReg->getEntry() != BB || !contains(ExitBB))
2918 return;
2920 // Do not propagate the domain if there is a loop backedge inside the region
2921 // that would prevent the exit block from being executed.
2922 auto *L = BBLoop;
2923 while (L && contains(L)) {
2924 SmallVector<BasicBlock *, 4> LatchBBs;
2925 BBLoop->getLoopLatches(LatchBBs);
2926 for (auto *LatchBB : LatchBBs)
2927 if (BB != LatchBB && BBReg->contains(LatchBB))
2928 return;
2929 L = L->getParentLoop();
2932 isl::set Domain = DomainMap[BB];
2933 assert(Domain && "Cannot propagate a nullptr");
2935 Loop *ExitBBLoop = getFirstNonBoxedLoopFor(ExitBB, LI, getBoxedLoops());
2937 // Since the dimensions of @p BB and @p ExitBB might be different we have to
2938 // adjust the domain before we can propagate it.
2939 isl::set AdjustedDomain = isl::manage(
2940 adjustDomainDimensions(*this, Domain.copy(), BBLoop, ExitBBLoop));
2941 isl::set &ExitDomain = DomainMap[ExitBB];
2943 // If the exit domain is not yet created we set it otherwise we "add" the
2944 // current domain.
2945 ExitDomain = ExitDomain ? AdjustedDomain.unite(ExitDomain) : AdjustedDomain;
2947 // Initialize the invalid domain.
2948 InvalidDomainMap[ExitBB] = ExitDomain.empty(ExitDomain.get_space());
2950 FinishedExitBlocks.insert(ExitBB);
2953 bool Scop::buildDomainsWithBranchConstraints(
2954 Region *R, DominatorTree &DT, LoopInfo &LI,
2955 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2957 // To create the domain for each block in R we iterate over all blocks and
2958 // subregions in R and propagate the conditions under which the current region
2959 // element is executed. To this end we iterate in reverse post order over R as
2960 // it ensures that we first visit all predecessors of a region node (either a
2961 // basic block or a subregion) before we visit the region node itself.
2962 // Initially, only the domain for the SCoP region entry block is set and from
2963 // there we propagate the current domain to all successors, however we add the
2964 // condition that the successor is actually executed next.
2965 // As we are only interested in non-loop carried constraints here we can
2966 // simply skip loop back edges.
2968 SmallPtrSet<BasicBlock *, 8> FinishedExitBlocks;
2969 ReversePostOrderTraversal<Region *> RTraversal(R);
2970 for (auto *RN : RTraversal) {
2972 // Recurse for affine subregions but go on for basic blocks and non-affine
2973 // subregions.
2974 if (RN->isSubRegion()) {
2975 Region *SubRegion = RN->getNodeAs<Region>();
2976 if (!isNonAffineSubRegion(SubRegion)) {
2977 if (!buildDomainsWithBranchConstraints(SubRegion, DT, LI,
2978 InvalidDomainMap))
2979 return false;
2980 continue;
2984 if (containsErrorBlock(RN, getRegion(), LI, DT))
2985 HasErrorBlock = true;
2987 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2988 TerminatorInst *TI = BB->getTerminator();
2990 if (isa<UnreachableInst>(TI))
2991 continue;
2993 isl::set Domain = DomainMap.lookup(BB);
2994 if (!Domain)
2995 continue;
2996 MaxLoopDepth = std::max(MaxLoopDepth, isl_set_n_dim(Domain.get()));
2998 auto *BBLoop = getRegionNodeLoop(RN, LI);
2999 // Propagate the domain from BB directly to blocks that have a superset
3000 // domain, at the moment only region exit nodes of regions that start in BB.
3001 propagateDomainConstraintsToRegionExit(BB, BBLoop, FinishedExitBlocks, LI,
3002 InvalidDomainMap);
3004 // If all successors of BB have been set a domain through the propagation
3005 // above we do not need to build condition sets but can just skip this
3006 // block. However, it is important to note that this is a local property
3007 // with regards to the region @p R. To this end FinishedExitBlocks is a
3008 // local variable.
3009 auto IsFinishedRegionExit = [&FinishedExitBlocks](BasicBlock *SuccBB) {
3010 return FinishedExitBlocks.count(SuccBB);
3012 if (std::all_of(succ_begin(BB), succ_end(BB), IsFinishedRegionExit))
3013 continue;
3015 // Build the condition sets for the successor nodes of the current region
3016 // node. If it is a non-affine subregion we will always execute the single
3017 // exit node, hence the single entry node domain is the condition set. For
3018 // basic blocks we use the helper function buildConditionSets.
3019 SmallVector<isl_set *, 8> ConditionSets;
3020 if (RN->isSubRegion())
3021 ConditionSets.push_back(Domain.copy());
3022 else if (!buildConditionSets(*this, BB, TI, BBLoop, Domain.get(),
3023 InvalidDomainMap, ConditionSets))
3024 return false;
3026 // Now iterate over the successors and set their initial domain based on
3027 // their condition set. We skip back edges here and have to be careful when
3028 // we leave a loop not to keep constraints over a dimension that doesn't
3029 // exist anymore.
3030 assert(RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size());
3031 for (unsigned u = 0, e = ConditionSets.size(); u < e; u++) {
3032 isl::set CondSet = isl::manage(ConditionSets[u]);
3033 BasicBlock *SuccBB = getRegionNodeSuccessor(RN, TI, u);
3035 // Skip blocks outside the region.
3036 if (!contains(SuccBB))
3037 continue;
3039 // If we propagate the domain of some block to "SuccBB" we do not have to
3040 // adjust the domain.
3041 if (FinishedExitBlocks.count(SuccBB))
3042 continue;
3044 // Skip back edges.
3045 if (DT.dominates(SuccBB, BB))
3046 continue;
3048 Loop *SuccBBLoop = getFirstNonBoxedLoopFor(SuccBB, LI, getBoxedLoops());
3050 CondSet = isl::manage(
3051 adjustDomainDimensions(*this, CondSet.copy(), BBLoop, SuccBBLoop));
3053 // Set the domain for the successor or merge it with an existing domain in
3054 // case there are multiple paths (without loop back edges) to the
3055 // successor block.
3056 isl::set &SuccDomain = DomainMap[SuccBB];
3058 if (SuccDomain) {
3059 SuccDomain = SuccDomain.unite(CondSet).coalesce();
3060 } else {
3061 // Initialize the invalid domain.
3062 InvalidDomainMap[SuccBB] = CondSet.empty(CondSet.get_space());
3063 SuccDomain = CondSet;
3066 SuccDomain = SuccDomain.detect_equalities();
3068 // Check if the maximal number of domain disjunctions was reached.
3069 // In case this happens we will clean up and bail.
3070 if (isl_set_n_basic_set(SuccDomain.get()) < MaxDisjunctsInDomain)
3071 continue;
3073 invalidate(COMPLEXITY, DebugLoc());
3074 while (++u < ConditionSets.size())
3075 isl_set_free(ConditionSets[u]);
3076 return false;
3080 return true;
3083 __isl_give isl_set *
3084 Scop::getPredecessorDomainConstraints(BasicBlock *BB,
3085 __isl_keep isl_set *Domain,
3086 DominatorTree &DT, LoopInfo &LI) {
3087 // If @p BB is the ScopEntry we are done
3088 if (R.getEntry() == BB)
3089 return isl_set_universe(isl_set_get_space(Domain));
3091 // The region info of this function.
3092 auto &RI = *R.getRegionInfo();
3094 Loop *BBLoop = getFirstNonBoxedLoopFor(BB, LI, getBoxedLoops());
3096 // A domain to collect all predecessor domains, thus all conditions under
3097 // which the block is executed. To this end we start with the empty domain.
3098 isl_set *PredDom = isl_set_empty(isl_set_get_space(Domain));
3100 // Set of regions of which the entry block domain has been propagated to BB.
3101 // all predecessors inside any of the regions can be skipped.
3102 SmallSet<Region *, 8> PropagatedRegions;
3104 for (auto *PredBB : predecessors(BB)) {
3105 // Skip backedges.
3106 if (DT.dominates(BB, PredBB))
3107 continue;
3109 // If the predecessor is in a region we used for propagation we can skip it.
3110 auto PredBBInRegion = [PredBB](Region *PR) { return PR->contains(PredBB); };
3111 if (std::any_of(PropagatedRegions.begin(), PropagatedRegions.end(),
3112 PredBBInRegion)) {
3113 continue;
3116 // Check if there is a valid region we can use for propagation, thus look
3117 // for a region that contains the predecessor and has @p BB as exit block.
3118 auto *PredR = RI.getRegionFor(PredBB);
3119 while (PredR->getExit() != BB && !PredR->contains(BB))
3120 PredR->getParent();
3122 // If a valid region for propagation was found use the entry of that region
3123 // for propagation, otherwise the PredBB directly.
3124 if (PredR->getExit() == BB) {
3125 PredBB = PredR->getEntry();
3126 PropagatedRegions.insert(PredR);
3129 auto *PredBBDom = getDomainConditions(PredBB);
3130 Loop *PredBBLoop = getFirstNonBoxedLoopFor(PredBB, LI, getBoxedLoops());
3132 PredBBDom = adjustDomainDimensions(*this, PredBBDom, PredBBLoop, BBLoop);
3134 PredDom = isl_set_union(PredDom, PredBBDom);
3137 return PredDom;
3140 bool Scop::propagateDomainConstraints(
3141 Region *R, DominatorTree &DT, LoopInfo &LI,
3142 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
3143 // Iterate over the region R and propagate the domain constrains from the
3144 // predecessors to the current node. In contrast to the
3145 // buildDomainsWithBranchConstraints function, this one will pull the domain
3146 // information from the predecessors instead of pushing it to the successors.
3147 // Additionally, we assume the domains to be already present in the domain
3148 // map here. However, we iterate again in reverse post order so we know all
3149 // predecessors have been visited before a block or non-affine subregion is
3150 // visited.
3152 ReversePostOrderTraversal<Region *> RTraversal(R);
3153 for (auto *RN : RTraversal) {
3155 // Recurse for affine subregions but go on for basic blocks and non-affine
3156 // subregions.
3157 if (RN->isSubRegion()) {
3158 Region *SubRegion = RN->getNodeAs<Region>();
3159 if (!isNonAffineSubRegion(SubRegion)) {
3160 if (!propagateDomainConstraints(SubRegion, DT, LI, InvalidDomainMap))
3161 return false;
3162 continue;
3166 BasicBlock *BB = getRegionNodeBasicBlock(RN);
3167 isl::set &Domain = DomainMap[BB];
3168 assert(Domain);
3170 // Under the union of all predecessor conditions we can reach this block.
3171 isl::set PredDom =
3172 isl::manage(getPredecessorDomainConstraints(BB, Domain.get(), DT, LI));
3173 Domain = Domain.intersect(PredDom).coalesce();
3174 Domain = Domain.align_params(isl::manage(getParamSpace()));
3176 Loop *BBLoop = getRegionNodeLoop(RN, LI);
3177 if (BBLoop && BBLoop->getHeader() == BB && contains(BBLoop))
3178 if (!addLoopBoundsToHeaderDomain(BBLoop, LI, InvalidDomainMap))
3179 return false;
3182 return true;
3185 /// Create a map to map from a given iteration to a subsequent iteration.
3187 /// This map maps from SetSpace -> SetSpace where the dimensions @p Dim
3188 /// is incremented by one and all other dimensions are equal, e.g.,
3189 /// [i0, i1, i2, i3] -> [i0, i1, i2 + 1, i3]
3191 /// if @p Dim is 2 and @p SetSpace has 4 dimensions.
3192 static __isl_give isl_map *
3193 createNextIterationMap(__isl_take isl_space *SetSpace, unsigned Dim) {
3194 auto *MapSpace = isl_space_map_from_set(SetSpace);
3195 auto *NextIterationMap = isl_map_universe(isl_space_copy(MapSpace));
3196 for (unsigned u = 0; u < isl_map_dim(NextIterationMap, isl_dim_in); u++)
3197 if (u != Dim)
3198 NextIterationMap =
3199 isl_map_equate(NextIterationMap, isl_dim_in, u, isl_dim_out, u);
3200 auto *C = isl_constraint_alloc_equality(isl_local_space_from_space(MapSpace));
3201 C = isl_constraint_set_constant_si(C, 1);
3202 C = isl_constraint_set_coefficient_si(C, isl_dim_in, Dim, 1);
3203 C = isl_constraint_set_coefficient_si(C, isl_dim_out, Dim, -1);
3204 NextIterationMap = isl_map_add_constraint(NextIterationMap, C);
3205 return NextIterationMap;
3208 bool Scop::addLoopBoundsToHeaderDomain(
3209 Loop *L, LoopInfo &LI, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
3210 int LoopDepth = getRelativeLoopDepth(L);
3211 assert(LoopDepth >= 0 && "Loop in region should have at least depth one");
3213 BasicBlock *HeaderBB = L->getHeader();
3214 assert(DomainMap.count(HeaderBB));
3215 isl::set &HeaderBBDom = DomainMap[HeaderBB];
3217 isl::map NextIterationMap = isl::manage(
3218 createNextIterationMap(HeaderBBDom.get_space().release(), LoopDepth));
3220 isl::set UnionBackedgeCondition = HeaderBBDom.empty(HeaderBBDom.get_space());
3222 SmallVector<llvm::BasicBlock *, 4> LatchBlocks;
3223 L->getLoopLatches(LatchBlocks);
3225 for (BasicBlock *LatchBB : LatchBlocks) {
3227 // If the latch is only reachable via error statements we skip it.
3228 isl::set LatchBBDom = DomainMap.lookup(LatchBB);
3229 if (!LatchBBDom)
3230 continue;
3232 isl::set BackedgeCondition = nullptr;
3234 TerminatorInst *TI = LatchBB->getTerminator();
3235 BranchInst *BI = dyn_cast<BranchInst>(TI);
3236 assert(BI && "Only branch instructions allowed in loop latches");
3238 if (BI->isUnconditional())
3239 BackedgeCondition = LatchBBDom;
3240 else {
3241 SmallVector<isl_set *, 8> ConditionSets;
3242 int idx = BI->getSuccessor(0) != HeaderBB;
3243 if (!buildConditionSets(*this, LatchBB, TI, L, LatchBBDom.get(),
3244 InvalidDomainMap, ConditionSets))
3245 return false;
3247 // Free the non back edge condition set as we do not need it.
3248 isl_set_free(ConditionSets[1 - idx]);
3250 BackedgeCondition = isl::manage(ConditionSets[idx]);
3253 int LatchLoopDepth = getRelativeLoopDepth(LI.getLoopFor(LatchBB));
3254 assert(LatchLoopDepth >= LoopDepth);
3255 BackedgeCondition = BackedgeCondition.project_out(
3256 isl::dim::set, LoopDepth + 1, LatchLoopDepth - LoopDepth);
3257 UnionBackedgeCondition = UnionBackedgeCondition.unite(BackedgeCondition);
3260 isl::map ForwardMap = ForwardMap.lex_le(HeaderBBDom.get_space());
3261 for (int i = 0; i < LoopDepth; i++)
3262 ForwardMap = ForwardMap.equate(isl::dim::in, i, isl::dim::out, i);
3264 isl::set UnionBackedgeConditionComplement =
3265 UnionBackedgeCondition.complement();
3266 UnionBackedgeConditionComplement =
3267 UnionBackedgeConditionComplement.lower_bound_si(isl::dim::set, LoopDepth,
3269 UnionBackedgeConditionComplement =
3270 UnionBackedgeConditionComplement.apply(ForwardMap);
3271 HeaderBBDom = HeaderBBDom.subtract(UnionBackedgeConditionComplement);
3272 HeaderBBDom = HeaderBBDom.apply(NextIterationMap);
3274 auto Parts = partitionSetParts(HeaderBBDom.copy(), LoopDepth);
3275 HeaderBBDom = isl::manage(Parts.second);
3277 // Check if there is a <nsw> tagged AddRec for this loop and if so do not add
3278 // the bounded assumptions to the context as they are already implied by the
3279 // <nsw> tag.
3280 if (Affinator.hasNSWAddRecForLoop(L)) {
3281 isl_set_free(Parts.first);
3282 return true;
3285 isl_set *UnboundedCtx = isl_set_params(Parts.first);
3286 recordAssumption(INFINITELOOP, UnboundedCtx,
3287 HeaderBB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
3288 return true;
3291 MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
3292 Value *PointerBase = MA->getOriginalBaseAddr();
3294 auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase);
3295 if (!PointerBaseInst)
3296 return nullptr;
3298 auto *BasePtrStmt = getStmtFor(PointerBaseInst);
3299 if (!BasePtrStmt)
3300 return nullptr;
3302 return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
3305 bool Scop::hasNonHoistableBasePtrInScop(MemoryAccess *MA,
3306 isl::union_map Writes) {
3307 if (auto *BasePtrMA = lookupBasePtrAccess(MA)) {
3308 return getNonHoistableCtx(BasePtrMA, Writes).is_null();
3311 Value *BaseAddr = MA->getOriginalBaseAddr();
3312 if (auto *BasePtrInst = dyn_cast<Instruction>(BaseAddr))
3313 if (!isa<LoadInst>(BasePtrInst))
3314 return contains(BasePtrInst);
3316 return false;
3319 bool Scop::buildAliasChecks(AliasAnalysis &AA) {
3320 if (!PollyUseRuntimeAliasChecks)
3321 return true;
3323 if (buildAliasGroups(AA)) {
3324 // Aliasing assumptions do not go through addAssumption but we still want to
3325 // collect statistics so we do it here explicitly.
3326 if (MinMaxAliasGroups.size())
3327 AssumptionsAliasing++;
3328 return true;
3331 // If a problem occurs while building the alias groups we need to delete
3332 // this SCoP and pretend it wasn't valid in the first place. To this end
3333 // we make the assumed context infeasible.
3334 invalidate(ALIASING, DebugLoc());
3336 DEBUG(dbgs() << "\n\nNOTE: Run time checks for " << getNameStr()
3337 << " could not be created as the number of parameters involved "
3338 "is too high. The SCoP will be "
3339 "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
3340 "the maximal number of parameters but be advised that the "
3341 "compile time might increase exponentially.\n\n");
3342 return false;
3345 std::tuple<Scop::AliasGroupVectorTy, DenseSet<const ScopArrayInfo *>>
3346 Scop::buildAliasGroupsForAccesses(AliasAnalysis &AA) {
3347 AliasSetTracker AST(AA);
3349 DenseMap<Value *, MemoryAccess *> PtrToAcc;
3350 DenseSet<const ScopArrayInfo *> HasWriteAccess;
3351 for (ScopStmt &Stmt : *this) {
3353 isl_set *StmtDomain = Stmt.getDomain();
3354 bool StmtDomainEmpty = isl_set_is_empty(StmtDomain);
3355 isl_set_free(StmtDomain);
3357 // Statements with an empty domain will never be executed.
3358 if (StmtDomainEmpty)
3359 continue;
3361 for (MemoryAccess *MA : Stmt) {
3362 if (MA->isScalarKind())
3363 continue;
3364 if (!MA->isRead())
3365 HasWriteAccess.insert(MA->getScopArrayInfo());
3366 MemAccInst Acc(MA->getAccessInstruction());
3367 if (MA->isRead() && isa<MemTransferInst>(Acc))
3368 PtrToAcc[cast<MemTransferInst>(Acc)->getRawSource()] = MA;
3369 else
3370 PtrToAcc[Acc.getPointerOperand()] = MA;
3371 AST.add(Acc);
3375 AliasGroupVectorTy AliasGroups;
3376 for (AliasSet &AS : AST) {
3377 if (AS.isMustAlias() || AS.isForwardingAliasSet())
3378 continue;
3379 AliasGroupTy AG;
3380 for (auto &PR : AS)
3381 AG.push_back(PtrToAcc[PR.getValue()]);
3382 if (AG.size() < 2)
3383 continue;
3384 AliasGroups.push_back(std::move(AG));
3387 return std::make_tuple(AliasGroups, HasWriteAccess);
3390 void Scop::splitAliasGroupsByDomain(AliasGroupVectorTy &AliasGroups) {
3391 for (unsigned u = 0; u < AliasGroups.size(); u++) {
3392 AliasGroupTy NewAG;
3393 AliasGroupTy &AG = AliasGroups[u];
3394 AliasGroupTy::iterator AGI = AG.begin();
3395 isl_set *AGDomain = getAccessDomain(*AGI);
3396 while (AGI != AG.end()) {
3397 MemoryAccess *MA = *AGI;
3398 isl_set *MADomain = getAccessDomain(MA);
3399 if (isl_set_is_disjoint(AGDomain, MADomain)) {
3400 NewAG.push_back(MA);
3401 AGI = AG.erase(AGI);
3402 isl_set_free(MADomain);
3403 } else {
3404 AGDomain = isl_set_union(AGDomain, MADomain);
3405 AGI++;
3408 if (NewAG.size() > 1)
3409 AliasGroups.push_back(std::move(NewAG));
3410 isl_set_free(AGDomain);
3414 bool Scop::buildAliasGroups(AliasAnalysis &AA) {
3415 // To create sound alias checks we perform the following steps:
3416 // o) We partition each group into read only and non read only accesses.
3417 // o) For each group with more than one base pointer we then compute minimal
3418 // and maximal accesses to each array of a group in read only and non
3419 // read only partitions separately.
3420 AliasGroupVectorTy AliasGroups;
3421 DenseSet<const ScopArrayInfo *> HasWriteAccess;
3423 std::tie(AliasGroups, HasWriteAccess) = buildAliasGroupsForAccesses(AA);
3425 splitAliasGroupsByDomain(AliasGroups);
3427 for (AliasGroupTy &AG : AliasGroups) {
3428 if (!hasFeasibleRuntimeContext())
3429 return false;
3432 IslMaxOperationsGuard MaxOpGuard(getIslCtx(), OptComputeOut);
3433 bool Valid = buildAliasGroup(AG, HasWriteAccess);
3434 if (!Valid)
3435 return false;
3437 if (isl_ctx_last_error(getIslCtx()) == isl_error_quota) {
3438 invalidate(COMPLEXITY, DebugLoc());
3439 return false;
3443 return true;
3446 bool Scop::buildAliasGroup(Scop::AliasGroupTy &AliasGroup,
3447 DenseSet<const ScopArrayInfo *> HasWriteAccess) {
3448 AliasGroupTy ReadOnlyAccesses;
3449 AliasGroupTy ReadWriteAccesses;
3450 SmallPtrSet<const ScopArrayInfo *, 4> ReadWriteArrays;
3451 SmallPtrSet<const ScopArrayInfo *, 4> ReadOnlyArrays;
3453 if (AliasGroup.size() < 2)
3454 return true;
3456 for (MemoryAccess *Access : AliasGroup) {
3457 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "PossibleAlias",
3458 Access->getAccessInstruction())
3459 << "Possibly aliasing pointer, use restrict keyword.");
3460 const ScopArrayInfo *Array = Access->getScopArrayInfo();
3461 if (HasWriteAccess.count(Array)) {
3462 ReadWriteArrays.insert(Array);
3463 ReadWriteAccesses.push_back(Access);
3464 } else {
3465 ReadOnlyArrays.insert(Array);
3466 ReadOnlyAccesses.push_back(Access);
3470 // If there are no read-only pointers, and less than two read-write pointers,
3471 // no alias check is needed.
3472 if (ReadOnlyAccesses.empty() && ReadWriteArrays.size() <= 1)
3473 return true;
3475 // If there is no read-write pointer, no alias check is needed.
3476 if (ReadWriteArrays.empty())
3477 return true;
3479 // For non-affine accesses, no alias check can be generated as we cannot
3480 // compute a sufficiently tight lower and upper bound: bail out.
3481 for (MemoryAccess *MA : AliasGroup) {
3482 if (!MA->isAffine()) {
3483 invalidate(ALIASING, MA->getAccessInstruction()->getDebugLoc(),
3484 MA->getAccessInstruction()->getParent());
3485 return false;
3489 // Ensure that for all memory accesses for which we generate alias checks,
3490 // their base pointers are available.
3491 for (MemoryAccess *MA : AliasGroup) {
3492 if (MemoryAccess *BasePtrMA = lookupBasePtrAccess(MA))
3493 addRequiredInvariantLoad(
3494 cast<LoadInst>(BasePtrMA->getAccessInstruction()));
3497 MinMaxAliasGroups.emplace_back();
3498 MinMaxVectorPairTy &pair = MinMaxAliasGroups.back();
3499 MinMaxVectorTy &MinMaxAccessesReadWrite = pair.first;
3500 MinMaxVectorTy &MinMaxAccessesReadOnly = pair.second;
3502 bool Valid;
3504 Valid =
3505 calculateMinMaxAccess(ReadWriteAccesses, *this, MinMaxAccessesReadWrite);
3507 if (!Valid)
3508 return false;
3510 // Bail out if the number of values we need to compare is too large.
3511 // This is important as the number of comparisons grows quadratically with
3512 // the number of values we need to compare.
3513 if (MinMaxAccessesReadWrite.size() + ReadOnlyArrays.size() >
3514 RunTimeChecksMaxArraysPerGroup)
3515 return false;
3517 Valid =
3518 calculateMinMaxAccess(ReadOnlyAccesses, *this, MinMaxAccessesReadOnly);
3520 if (!Valid)
3521 return false;
3523 return true;
3526 /// Get the smallest loop that contains @p S but is not in @p S.
3527 static Loop *getLoopSurroundingScop(Scop &S, LoopInfo &LI) {
3528 // Start with the smallest loop containing the entry and expand that
3529 // loop until it contains all blocks in the region. If there is a loop
3530 // containing all blocks in the region check if it is itself contained
3531 // and if so take the parent loop as it will be the smallest containing
3532 // the region but not contained by it.
3533 Loop *L = LI.getLoopFor(S.getEntry());
3534 while (L) {
3535 bool AllContained = true;
3536 for (auto *BB : S.blocks())
3537 AllContained &= L->contains(BB);
3538 if (AllContained)
3539 break;
3540 L = L->getParentLoop();
3543 return L ? (S.contains(L) ? L->getParentLoop() : L) : nullptr;
3546 int Scop::NextScopID = 0;
3548 std::string Scop::CurrentFunc = "";
3550 int Scop::getNextID(std::string ParentFunc) {
3551 if (ParentFunc != CurrentFunc) {
3552 CurrentFunc = ParentFunc;
3553 NextScopID = 0;
3555 return NextScopID++;
3558 Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
3559 ScopDetection::DetectionContext &DC, OptimizationRemarkEmitter &ORE)
3560 : SE(&ScalarEvolution), R(R), name(R.getNameStr()), IsOptimized(false),
3561 HasSingleExitEdge(R.getExitingBlock()), HasErrorBlock(false),
3562 MaxLoopDepth(0), CopyStmtsNum(0), SkipScop(false), DC(DC), ORE(ORE),
3563 IslCtx(isl_ctx_alloc(), isl_ctx_free), Context(nullptr),
3564 Affinator(this, LI), AssumedContext(nullptr), InvalidContext(nullptr),
3565 Schedule(nullptr),
3566 ID(getNextID((*R.getEntry()->getParent()).getName().str())) {
3567 if (IslOnErrorAbort)
3568 isl_options_set_on_error(getIslCtx(), ISL_ON_ERROR_ABORT);
3569 buildContext();
3572 void Scop::foldSizeConstantsToRight() {
3573 isl_union_set *Accessed = isl_union_map_range(getAccesses());
3575 for (auto Array : arrays()) {
3576 if (Array->getNumberOfDimensions() <= 1)
3577 continue;
3579 isl_space *Space = Array->getSpace().release();
3581 Space = isl_space_align_params(Space, isl_union_set_get_space(Accessed));
3583 if (!isl_union_set_contains(Accessed, Space)) {
3584 isl_space_free(Space);
3585 continue;
3588 isl_set *Elements = isl_union_set_extract_set(Accessed, Space);
3590 isl_map *Transform =
3591 isl_map_universe(isl_space_map_from_set(Array->getSpace().release()));
3593 std::vector<int> Int;
3595 int Dims = isl_set_dim(Elements, isl_dim_set);
3596 for (int i = 0; i < Dims; i++) {
3597 isl_set *DimOnly =
3598 isl_set_project_out(isl_set_copy(Elements), isl_dim_set, 0, i);
3599 DimOnly = isl_set_project_out(DimOnly, isl_dim_set, 1, Dims - i - 1);
3600 DimOnly = isl_set_lower_bound_si(DimOnly, isl_dim_set, 0, 0);
3602 isl_basic_set *DimHull = isl_set_affine_hull(DimOnly);
3604 if (i == Dims - 1) {
3605 Int.push_back(1);
3606 Transform = isl_map_equate(Transform, isl_dim_in, i, isl_dim_out, i);
3607 isl_basic_set_free(DimHull);
3608 continue;
3611 if (isl_basic_set_dim(DimHull, isl_dim_div) == 1) {
3612 isl_aff *Diff = isl_basic_set_get_div(DimHull, 0);
3613 isl_val *Val = isl_aff_get_denominator_val(Diff);
3614 isl_aff_free(Diff);
3616 int ValInt = 1;
3618 if (isl_val_is_int(Val))
3619 ValInt = isl_val_get_num_si(Val);
3620 isl_val_free(Val);
3622 Int.push_back(ValInt);
3624 isl_constraint *C = isl_constraint_alloc_equality(
3625 isl_local_space_from_space(isl_map_get_space(Transform)));
3626 C = isl_constraint_set_coefficient_si(C, isl_dim_out, i, ValInt);
3627 C = isl_constraint_set_coefficient_si(C, isl_dim_in, i, -1);
3628 Transform = isl_map_add_constraint(Transform, C);
3629 isl_basic_set_free(DimHull);
3630 continue;
3633 isl_basic_set *ZeroSet = isl_basic_set_copy(DimHull);
3634 ZeroSet = isl_basic_set_fix_si(ZeroSet, isl_dim_set, 0, 0);
3636 int ValInt = 1;
3637 if (isl_basic_set_is_equal(ZeroSet, DimHull)) {
3638 ValInt = 0;
3641 Int.push_back(ValInt);
3642 Transform = isl_map_equate(Transform, isl_dim_in, i, isl_dim_out, i);
3643 isl_basic_set_free(DimHull);
3644 isl_basic_set_free(ZeroSet);
3647 isl_set *MappedElements = isl_map_domain(isl_map_copy(Transform));
3649 if (!isl_set_is_subset(Elements, MappedElements)) {
3650 isl_set_free(Elements);
3651 isl_set_free(MappedElements);
3652 isl_map_free(Transform);
3653 continue;
3656 isl_set_free(MappedElements);
3658 bool CanFold = true;
3660 if (Int[0] <= 1)
3661 CanFold = false;
3663 unsigned NumDims = Array->getNumberOfDimensions();
3664 for (unsigned i = 1; i < NumDims - 1; i++)
3665 if (Int[0] != Int[i] && Int[i])
3666 CanFold = false;
3668 if (!CanFold) {
3669 isl_set_free(Elements);
3670 isl_map_free(Transform);
3671 continue;
3674 for (auto &Access : AccessFunctions)
3675 if (Access->getScopArrayInfo() == Array)
3676 Access->setAccessRelation(isl_map_apply_range(
3677 Access->getAccessRelation().release(), isl_map_copy(Transform)));
3679 isl_map_free(Transform);
3681 std::vector<const SCEV *> Sizes;
3682 for (unsigned i = 0; i < NumDims; i++) {
3683 auto Size = Array->getDimensionSize(i);
3685 if (i == NumDims - 1)
3686 Size = SE->getMulExpr(Size, SE->getConstant(Size->getType(), Int[0]));
3687 Sizes.push_back(Size);
3690 Array->updateSizes(Sizes, false /* CheckConsistency */);
3692 isl_set_free(Elements);
3694 isl_union_set_free(Accessed);
3695 return;
3698 void Scop::markFortranArrays() {
3699 for (ScopStmt &Stmt : Stmts) {
3700 for (MemoryAccess *MemAcc : Stmt) {
3701 Value *FAD = MemAcc->getFortranArrayDescriptor();
3702 if (!FAD)
3703 continue;
3705 // TODO: const_cast-ing to edit
3706 ScopArrayInfo *SAI =
3707 const_cast<ScopArrayInfo *>(MemAcc->getLatestScopArrayInfo());
3708 assert(SAI && "memory access into a Fortran array does not "
3709 "have an associated ScopArrayInfo");
3710 SAI->applyAndSetFAD(FAD);
3715 void Scop::finalizeAccesses() {
3716 updateAccessDimensionality();
3717 foldSizeConstantsToRight();
3718 foldAccessRelations();
3719 assumeNoOutOfBounds();
3720 markFortranArrays();
3723 Scop::~Scop() {
3724 isl_set_free(Context);
3725 isl_set_free(AssumedContext);
3726 isl_set_free(InvalidContext);
3727 isl_schedule_free(Schedule);
3729 for (auto &It : ParameterIds)
3730 isl_id_free(It.second);
3732 for (auto &AS : RecordedAssumptions)
3733 isl_set_free(AS.Set);
3735 // Free the alias groups
3736 for (MinMaxVectorPairTy &MinMaxAccessPair : MinMaxAliasGroups) {
3737 for (MinMaxAccessTy &MMA : MinMaxAccessPair.first) {
3738 isl_pw_multi_aff_free(MMA.first);
3739 isl_pw_multi_aff_free(MMA.second);
3741 for (MinMaxAccessTy &MMA : MinMaxAccessPair.second) {
3742 isl_pw_multi_aff_free(MMA.first);
3743 isl_pw_multi_aff_free(MMA.second);
3747 for (const auto &IAClass : InvariantEquivClasses)
3748 isl_set_free(IAClass.ExecutionContext);
3750 // Explicitly release all Scop objects and the underlying isl objects before
3751 // we release the isl context.
3752 Stmts.clear();
3753 ScopArrayInfoSet.clear();
3754 ScopArrayInfoMap.clear();
3755 ScopArrayNameMap.clear();
3756 AccessFunctions.clear();
3759 void Scop::updateAccessDimensionality() {
3760 // Check all array accesses for each base pointer and find a (virtual) element
3761 // size for the base pointer that divides all access functions.
3762 for (ScopStmt &Stmt : *this)
3763 for (MemoryAccess *Access : Stmt) {
3764 if (!Access->isArrayKind())
3765 continue;
3766 ScopArrayInfo *Array =
3767 const_cast<ScopArrayInfo *>(Access->getScopArrayInfo());
3769 if (Array->getNumberOfDimensions() != 1)
3770 continue;
3771 unsigned DivisibleSize = Array->getElemSizeInBytes();
3772 const SCEV *Subscript = Access->getSubscript(0);
3773 while (!isDivisible(Subscript, DivisibleSize, *SE))
3774 DivisibleSize /= 2;
3775 auto *Ty = IntegerType::get(SE->getContext(), DivisibleSize * 8);
3776 Array->updateElementType(Ty);
3779 for (auto &Stmt : *this)
3780 for (auto &Access : Stmt)
3781 Access->updateDimensionality();
3784 void Scop::foldAccessRelations() {
3785 for (auto &Stmt : *this)
3786 for (auto &Access : Stmt)
3787 Access->foldAccessRelation();
3790 void Scop::assumeNoOutOfBounds() {
3791 for (auto &Stmt : *this)
3792 for (auto &Access : Stmt)
3793 Access->assumeNoOutOfBound();
3796 void Scop::removeFromStmtMap(ScopStmt &Stmt) {
3797 if (Stmt.isRegionStmt())
3798 for (BasicBlock *BB : Stmt.getRegion()->blocks())
3799 StmtMap.erase(BB);
3800 else
3801 StmtMap.erase(Stmt.getBasicBlock());
3804 void Scop::removeStmts(std::function<bool(ScopStmt &)> ShouldDelete) {
3805 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
3806 if (!ShouldDelete(*StmtIt)) {
3807 StmtIt++;
3808 continue;
3811 removeFromStmtMap(*StmtIt);
3812 StmtIt = Stmts.erase(StmtIt);
3816 void Scop::removeStmtNotInDomainMap() {
3817 auto ShouldDelete = [this](ScopStmt &Stmt) -> bool {
3818 return !this->DomainMap.lookup(Stmt.getEntryBlock());
3820 removeStmts(ShouldDelete);
3823 void Scop::simplifySCoP(bool AfterHoisting) {
3825 auto ShouldDelete = [AfterHoisting](ScopStmt &Stmt) -> bool {
3826 bool RemoveStmt = Stmt.isEmpty();
3828 // Remove read only statements only after invariant load hoisting.
3829 if (!RemoveStmt && AfterHoisting) {
3830 bool OnlyRead = true;
3831 for (MemoryAccess *MA : Stmt) {
3832 if (MA->isRead())
3833 continue;
3835 OnlyRead = false;
3836 break;
3839 RemoveStmt = OnlyRead;
3841 return RemoveStmt;
3844 removeStmts(ShouldDelete);
3847 InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
3848 LoadInst *LInst = dyn_cast<LoadInst>(Val);
3849 if (!LInst)
3850 return nullptr;
3852 if (Value *Rep = InvEquivClassVMap.lookup(LInst))
3853 LInst = cast<LoadInst>(Rep);
3855 Type *Ty = LInst->getType();
3856 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
3857 for (auto &IAClass : InvariantEquivClasses) {
3858 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
3859 continue;
3861 auto &MAs = IAClass.InvariantAccesses;
3862 for (auto *MA : MAs)
3863 if (MA->getAccessInstruction() == Val)
3864 return &IAClass;
3867 return nullptr;
3870 /// Check if @p MA can always be hoisted without execution context.
3871 static bool canAlwaysBeHoisted(MemoryAccess *MA, bool StmtInvalidCtxIsEmpty,
3872 bool MAInvalidCtxIsEmpty,
3873 bool NonHoistableCtxIsEmpty) {
3874 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3875 const DataLayout &DL = LInst->getParent()->getModule()->getDataLayout();
3876 // TODO: We can provide more information for better but more expensive
3877 // results.
3878 if (!isDereferenceableAndAlignedPointer(LInst->getPointerOperand(),
3879 LInst->getAlignment(), DL))
3880 return false;
3882 // If the location might be overwritten we do not hoist it unconditionally.
3884 // TODO: This is probably to conservative.
3885 if (!NonHoistableCtxIsEmpty)
3886 return false;
3888 // If a dereferenceable load is in a statement that is modeled precisely we
3889 // can hoist it.
3890 if (StmtInvalidCtxIsEmpty && MAInvalidCtxIsEmpty)
3891 return true;
3893 // Even if the statement is not modeled precisely we can hoist the load if it
3894 // does not involve any parameters that might have been specialized by the
3895 // statement domain.
3896 for (unsigned u = 0, e = MA->getNumSubscripts(); u < e; u++)
3897 if (!isa<SCEVConstant>(MA->getSubscript(u)))
3898 return false;
3899 return true;
3902 void Scop::addInvariantLoads(ScopStmt &Stmt, InvariantAccessesTy &InvMAs) {
3904 if (InvMAs.empty())
3905 return;
3907 auto *StmtInvalidCtx = Stmt.getInvalidContext();
3908 bool StmtInvalidCtxIsEmpty = isl_set_is_empty(StmtInvalidCtx);
3910 // Get the context under which the statement is executed but remove the error
3911 // context under which this statement is reached.
3912 isl_set *DomainCtx = isl_set_params(Stmt.getDomain());
3913 DomainCtx = isl_set_subtract(DomainCtx, StmtInvalidCtx);
3915 if (isl_set_n_basic_set(DomainCtx) >= MaxDisjunctsInDomain) {
3916 auto *AccInst = InvMAs.front().MA->getAccessInstruction();
3917 invalidate(COMPLEXITY, AccInst->getDebugLoc(), AccInst->getParent());
3918 isl_set_free(DomainCtx);
3919 for (auto &InvMA : InvMAs)
3920 isl_set_free(InvMA.NonHoistableCtx);
3921 return;
3924 // Project out all parameters that relate to loads in the statement. Otherwise
3925 // we could have cyclic dependences on the constraints under which the
3926 // hoisted loads are executed and we could not determine an order in which to
3927 // pre-load them. This happens because not only lower bounds are part of the
3928 // domain but also upper bounds.
3929 for (auto &InvMA : InvMAs) {
3930 auto *MA = InvMA.MA;
3931 Instruction *AccInst = MA->getAccessInstruction();
3932 if (SE->isSCEVable(AccInst->getType())) {
3933 SetVector<Value *> Values;
3934 for (const SCEV *Parameter : Parameters) {
3935 Values.clear();
3936 findValues(Parameter, *SE, Values);
3937 if (!Values.count(AccInst))
3938 continue;
3940 if (isl_id *ParamId = getIdForParam(Parameter)) {
3941 int Dim = isl_set_find_dim_by_id(DomainCtx, isl_dim_param, ParamId);
3942 if (Dim >= 0)
3943 DomainCtx = isl_set_eliminate(DomainCtx, isl_dim_param, Dim, 1);
3944 isl_id_free(ParamId);
3950 for (auto &InvMA : InvMAs) {
3951 auto *MA = InvMA.MA;
3952 auto *NHCtx = InvMA.NonHoistableCtx;
3954 // Check for another invariant access that accesses the same location as
3955 // MA and if found consolidate them. Otherwise create a new equivalence
3956 // class at the end of InvariantEquivClasses.
3957 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3958 Type *Ty = LInst->getType();
3959 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
3961 auto *MAInvalidCtx = MA->getInvalidContext().release();
3962 bool NonHoistableCtxIsEmpty = isl_set_is_empty(NHCtx);
3963 bool MAInvalidCtxIsEmpty = isl_set_is_empty(MAInvalidCtx);
3965 isl_set *MACtx;
3966 // Check if we know that this pointer can be speculatively accessed.
3967 if (canAlwaysBeHoisted(MA, StmtInvalidCtxIsEmpty, MAInvalidCtxIsEmpty,
3968 NonHoistableCtxIsEmpty)) {
3969 MACtx = isl_set_universe(isl_set_get_space(DomainCtx));
3970 isl_set_free(MAInvalidCtx);
3971 isl_set_free(NHCtx);
3972 } else {
3973 MACtx = isl_set_copy(DomainCtx);
3974 MACtx = isl_set_subtract(MACtx, isl_set_union(MAInvalidCtx, NHCtx));
3975 MACtx = isl_set_gist_params(MACtx, getContext());
3978 bool Consolidated = false;
3979 for (auto &IAClass : InvariantEquivClasses) {
3980 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
3981 continue;
3983 // If the pointer and the type is equal check if the access function wrt.
3984 // to the domain is equal too. It can happen that the domain fixes
3985 // parameter values and these can be different for distinct part of the
3986 // SCoP. If this happens we cannot consolidate the loads but need to
3987 // create a new invariant load equivalence class.
3988 auto &MAs = IAClass.InvariantAccesses;
3989 if (!MAs.empty()) {
3990 auto *LastMA = MAs.front();
3992 auto *AR = isl_map_range(MA->getAccessRelation().release());
3993 auto *LastAR = isl_map_range(LastMA->getAccessRelation().release());
3994 bool SameAR = isl_set_is_equal(AR, LastAR);
3995 isl_set_free(AR);
3996 isl_set_free(LastAR);
3998 if (!SameAR)
3999 continue;
4002 // Add MA to the list of accesses that are in this class.
4003 MAs.push_front(MA);
4005 Consolidated = true;
4007 // Unify the execution context of the class and this statement.
4008 isl_set *&IAClassDomainCtx = IAClass.ExecutionContext;
4009 if (IAClassDomainCtx)
4010 IAClassDomainCtx =
4011 isl_set_coalesce(isl_set_union(IAClassDomainCtx, MACtx));
4012 else
4013 IAClassDomainCtx = MACtx;
4014 break;
4017 if (Consolidated)
4018 continue;
4020 // If we did not consolidate MA, thus did not find an equivalence class
4021 // for it, we create a new one.
4022 InvariantEquivClasses.emplace_back(
4023 InvariantEquivClassTy{PointerSCEV, MemoryAccessList{MA}, MACtx, Ty});
4026 isl_set_free(DomainCtx);
4029 /// Check if an access range is too complex.
4031 /// An access range is too complex, if it contains either many disjuncts or
4032 /// very complex expressions. As a simple heuristic, we assume if a set to
4033 /// be too complex if the sum of existentially quantified dimensions and
4034 /// set dimensions is larger than a threshold. This reliably detects both
4035 /// sets with many disjuncts as well as sets with many divisions as they
4036 /// arise in h264.
4038 /// @param AccessRange The range to check for complexity.
4040 /// @returns True if the access range is too complex.
4041 static bool isAccessRangeTooComplex(isl::set AccessRange) {
4042 unsigned NumTotalDims = 0;
4044 auto CountDimensions = [&NumTotalDims](isl::basic_set BSet) -> isl::stat {
4045 NumTotalDims += BSet.dim(isl::dim::div);
4046 NumTotalDims += BSet.dim(isl::dim::set);
4047 return isl::stat::ok;
4050 AccessRange.foreach_basic_set(CountDimensions);
4052 if (NumTotalDims > MaxDimensionsInAccessRange)
4053 return true;
4055 return false;
4058 isl::set Scop::getNonHoistableCtx(MemoryAccess *Access, isl::union_map Writes) {
4059 // TODO: Loads that are not loop carried, hence are in a statement with
4060 // zero iterators, are by construction invariant, though we
4061 // currently "hoist" them anyway. This is necessary because we allow
4062 // them to be treated as parameters (e.g., in conditions) and our code
4063 // generation would otherwise use the old value.
4065 auto &Stmt = *Access->getStatement();
4066 BasicBlock *BB = Stmt.getEntryBlock();
4068 if (Access->isScalarKind() || Access->isWrite() || !Access->isAffine() ||
4069 Access->isMemoryIntrinsic())
4070 return nullptr;
4072 // Skip accesses that have an invariant base pointer which is defined but
4073 // not loaded inside the SCoP. This can happened e.g., if a readnone call
4074 // returns a pointer that is used as a base address. However, as we want
4075 // to hoist indirect pointers, we allow the base pointer to be defined in
4076 // the region if it is also a memory access. Each ScopArrayInfo object
4077 // that has a base pointer origin has a base pointer that is loaded and
4078 // that it is invariant, thus it will be hoisted too. However, if there is
4079 // no base pointer origin we check that the base pointer is defined
4080 // outside the region.
4081 auto *LI = cast<LoadInst>(Access->getAccessInstruction());
4082 if (hasNonHoistableBasePtrInScop(Access, Writes))
4083 return nullptr;
4085 isl::map AccessRelation = give(Access->getAccessRelation().release());
4086 assert(!AccessRelation.is_empty());
4088 if (AccessRelation.involves_dims(isl::dim::in, 0, Stmt.getNumIterators()))
4089 return nullptr;
4091 AccessRelation = AccessRelation.intersect_domain(give(Stmt.getDomain()));
4092 isl::set SafeToLoad;
4094 auto &DL = getFunction().getParent()->getDataLayout();
4095 if (isSafeToLoadUnconditionally(LI->getPointerOperand(), LI->getAlignment(),
4096 DL)) {
4097 SafeToLoad = isl::set::universe(AccessRelation.get_space().range());
4098 } else if (BB != LI->getParent()) {
4099 // Skip accesses in non-affine subregions as they might not be executed
4100 // under the same condition as the entry of the non-affine subregion.
4101 return nullptr;
4102 } else {
4103 SafeToLoad = AccessRelation.range();
4106 if (isAccessRangeTooComplex(AccessRelation.range()))
4107 return nullptr;
4109 isl::union_map Written = Writes.intersect_range(SafeToLoad);
4110 isl::set WrittenCtx = Written.params();
4111 bool IsWritten = !WrittenCtx.is_empty();
4113 if (!IsWritten)
4114 return WrittenCtx;
4116 WrittenCtx = WrittenCtx.remove_divs();
4117 bool TooComplex =
4118 isl_set_n_basic_set(WrittenCtx.get()) >= MaxDisjunctsInDomain;
4119 if (TooComplex || !isRequiredInvariantLoad(LI))
4120 return nullptr;
4122 addAssumption(INVARIANTLOAD, WrittenCtx.copy(), LI->getDebugLoc(),
4123 AS_RESTRICTION, LI->getParent());
4124 return WrittenCtx;
4127 void Scop::verifyInvariantLoads() {
4128 auto &RIL = getRequiredInvariantLoads();
4129 for (LoadInst *LI : RIL) {
4130 assert(LI && contains(LI));
4131 ScopStmt *Stmt = getStmtFor(LI);
4132 if (Stmt && Stmt->getArrayAccessOrNULLFor(LI)) {
4133 invalidate(INVARIANTLOAD, LI->getDebugLoc(), LI->getParent());
4134 return;
4139 void Scop::hoistInvariantLoads() {
4140 if (!PollyInvariantLoadHoisting)
4141 return;
4143 isl::union_map Writes = give(getWrites());
4144 for (ScopStmt &Stmt : *this) {
4145 InvariantAccessesTy InvariantAccesses;
4147 for (MemoryAccess *Access : Stmt)
4148 if (isl::set NHCtx = getNonHoistableCtx(Access, Writes))
4149 InvariantAccesses.push_back({Access, NHCtx.release()});
4151 // Transfer the memory access from the statement to the SCoP.
4152 for (auto InvMA : InvariantAccesses)
4153 Stmt.removeMemoryAccess(InvMA.MA);
4154 addInvariantLoads(Stmt, InvariantAccesses);
4158 /// Find the canonical scop array info object for a set of invariant load
4159 /// hoisted loads. The canonical array is the one that corresponds to the
4160 /// first load in the list of accesses which is used as base pointer of a
4161 /// scop array.
4162 static const ScopArrayInfo *findCanonicalArray(Scop *S,
4163 MemoryAccessList &Accesses) {
4164 for (MemoryAccess *Access : Accesses) {
4165 const ScopArrayInfo *CanonicalArray = S->getScopArrayInfoOrNull(
4166 Access->getAccessInstruction(), MemoryKind::Array);
4167 if (CanonicalArray)
4168 return CanonicalArray;
4170 return nullptr;
4173 /// Check if @p Array severs as base array in an invariant load.
4174 static bool isUsedForIndirectHoistedLoad(Scop *S, const ScopArrayInfo *Array) {
4175 for (InvariantEquivClassTy &EqClass2 : S->getInvariantAccesses())
4176 for (MemoryAccess *Access2 : EqClass2.InvariantAccesses)
4177 if (Access2->getScopArrayInfo() == Array)
4178 return true;
4179 return false;
4182 /// Replace the base pointer arrays in all memory accesses referencing @p Old,
4183 /// with a reference to @p New.
4184 static void replaceBasePtrArrays(Scop *S, const ScopArrayInfo *Old,
4185 const ScopArrayInfo *New) {
4186 for (ScopStmt &Stmt : *S)
4187 for (MemoryAccess *Access : Stmt) {
4188 if (Access->getLatestScopArrayInfo() != Old)
4189 continue;
4191 isl_id *Id = New->getBasePtrId().release();
4192 isl_map *Map = Access->getAccessRelation().release();
4193 Map = isl_map_set_tuple_id(Map, isl_dim_out, Id);
4194 Access->setAccessRelation(Map);
4198 void Scop::canonicalizeDynamicBasePtrs() {
4199 for (InvariantEquivClassTy &EqClass : InvariantEquivClasses) {
4200 MemoryAccessList &BasePtrAccesses = EqClass.InvariantAccesses;
4202 const ScopArrayInfo *CanonicalBasePtrSAI =
4203 findCanonicalArray(this, BasePtrAccesses);
4205 if (!CanonicalBasePtrSAI)
4206 continue;
4208 for (MemoryAccess *BasePtrAccess : BasePtrAccesses) {
4209 const ScopArrayInfo *BasePtrSAI = getScopArrayInfoOrNull(
4210 BasePtrAccess->getAccessInstruction(), MemoryKind::Array);
4211 if (!BasePtrSAI || BasePtrSAI == CanonicalBasePtrSAI ||
4212 !BasePtrSAI->isCompatibleWith(CanonicalBasePtrSAI))
4213 continue;
4215 // we currently do not canonicalize arrays where some accesses are
4216 // hoisted as invariant loads. If we would, we need to update the access
4217 // function of the invariant loads as well. However, as this is not a
4218 // very common situation, we leave this for now to avoid further
4219 // complexity increases.
4220 if (isUsedForIndirectHoistedLoad(this, BasePtrSAI))
4221 continue;
4223 replaceBasePtrArrays(this, BasePtrSAI, CanonicalBasePtrSAI);
4228 ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
4229 ArrayRef<const SCEV *> Sizes,
4230 MemoryKind Kind,
4231 const char *BaseName) {
4232 assert((BasePtr || BaseName) &&
4233 "BasePtr and BaseName can not be nullptr at the same time.");
4234 assert(!(BasePtr && BaseName) && "BaseName is redundant.");
4235 auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(BasePtr, Kind)]
4236 : ScopArrayNameMap[BaseName];
4237 if (!SAI) {
4238 auto &DL = getFunction().getParent()->getDataLayout();
4239 SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
4240 DL, this, BaseName));
4241 ScopArrayInfoSet.insert(SAI.get());
4242 } else {
4243 SAI->updateElementType(ElementType);
4244 // In case of mismatching array sizes, we bail out by setting the run-time
4245 // context to false.
4246 if (!SAI->updateSizes(Sizes))
4247 invalidate(DELINEARIZATION, DebugLoc());
4249 return SAI.get();
4252 ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
4253 const std::string &BaseName,
4254 const std::vector<unsigned> &Sizes) {
4255 auto *DimSizeType = Type::getInt64Ty(getSE()->getContext());
4256 std::vector<const SCEV *> SCEVSizes;
4258 for (auto size : Sizes)
4259 if (size)
4260 SCEVSizes.push_back(getSE()->getConstant(DimSizeType, size, false));
4261 else
4262 SCEVSizes.push_back(nullptr);
4264 auto *SAI = getOrCreateScopArrayInfo(nullptr, ElementType, SCEVSizes,
4265 MemoryKind::Array, BaseName.c_str());
4266 return SAI;
4269 const ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr,
4270 MemoryKind Kind) {
4271 auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
4272 return SAI;
4275 const ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
4276 auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
4277 assert(SAI && "No ScopArrayInfo available for this base pointer");
4278 return SAI;
4281 std::string Scop::getContextStr() const { return stringFromIslObj(Context); }
4283 std::string Scop::getAssumedContextStr() const {
4284 assert(AssumedContext && "Assumed context not yet built");
4285 return stringFromIslObj(AssumedContext);
4288 std::string Scop::getInvalidContextStr() const {
4289 return stringFromIslObj(InvalidContext);
4292 std::string Scop::getNameStr() const {
4293 std::string ExitName, EntryName;
4294 std::tie(EntryName, ExitName) = getEntryExitStr();
4295 return EntryName + "---" + ExitName;
4298 std::pair<std::string, std::string> Scop::getEntryExitStr() const {
4299 std::string ExitName, EntryName;
4300 raw_string_ostream ExitStr(ExitName);
4301 raw_string_ostream EntryStr(EntryName);
4303 R.getEntry()->printAsOperand(EntryStr, false);
4304 EntryStr.str();
4306 if (R.getExit()) {
4307 R.getExit()->printAsOperand(ExitStr, false);
4308 ExitStr.str();
4309 } else
4310 ExitName = "FunctionExit";
4312 return std::make_pair(EntryName, ExitName);
4315 __isl_give isl_set *Scop::getContext() const { return isl_set_copy(Context); }
4316 __isl_give isl_space *Scop::getParamSpace() const {
4317 return isl_set_get_space(Context);
4320 __isl_give isl_set *Scop::getAssumedContext() const {
4321 assert(AssumedContext && "Assumed context not yet built");
4322 return isl_set_copy(AssumedContext);
4325 bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
4326 if (PollyProcessUnprofitable)
4327 return true;
4329 if (isEmpty())
4330 return false;
4332 unsigned OptimizableStmtsOrLoops = 0;
4333 for (auto &Stmt : *this) {
4334 if (Stmt.getNumIterators() == 0)
4335 continue;
4337 bool ContainsArrayAccs = false;
4338 bool ContainsScalarAccs = false;
4339 for (auto *MA : Stmt) {
4340 if (MA->isRead())
4341 continue;
4342 ContainsArrayAccs |= MA->isLatestArrayKind();
4343 ContainsScalarAccs |= MA->isLatestScalarKind();
4346 if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
4347 OptimizableStmtsOrLoops += Stmt.getNumIterators();
4350 return OptimizableStmtsOrLoops > 1;
4353 bool Scop::hasFeasibleRuntimeContext() const {
4354 auto *PositiveContext = getAssumedContext();
4355 auto *NegativeContext = getInvalidContext();
4356 PositiveContext = addNonEmptyDomainConstraints(PositiveContext);
4357 bool IsFeasible = !(isl_set_is_empty(PositiveContext) ||
4358 isl_set_is_subset(PositiveContext, NegativeContext));
4359 isl_set_free(PositiveContext);
4360 if (!IsFeasible) {
4361 isl_set_free(NegativeContext);
4362 return false;
4365 auto *DomainContext = isl_union_set_params(getDomains());
4366 IsFeasible = !isl_set_is_subset(DomainContext, NegativeContext);
4367 IsFeasible &= !isl_set_is_subset(Context, NegativeContext);
4368 isl_set_free(NegativeContext);
4369 isl_set_free(DomainContext);
4371 return IsFeasible;
4374 static std::string toString(AssumptionKind Kind) {
4375 switch (Kind) {
4376 case ALIASING:
4377 return "No-aliasing";
4378 case INBOUNDS:
4379 return "Inbounds";
4380 case WRAPPING:
4381 return "No-overflows";
4382 case UNSIGNED:
4383 return "Signed-unsigned";
4384 case COMPLEXITY:
4385 return "Low complexity";
4386 case PROFITABLE:
4387 return "Profitable";
4388 case ERRORBLOCK:
4389 return "No-error";
4390 case INFINITELOOP:
4391 return "Finite loop";
4392 case INVARIANTLOAD:
4393 return "Invariant load";
4394 case DELINEARIZATION:
4395 return "Delinearization";
4397 llvm_unreachable("Unknown AssumptionKind!");
4400 bool Scop::isEffectiveAssumption(__isl_keep isl_set *Set, AssumptionSign Sign) {
4401 if (Sign == AS_ASSUMPTION) {
4402 if (isl_set_is_subset(Context, Set))
4403 return false;
4405 if (isl_set_is_subset(AssumedContext, Set))
4406 return false;
4407 } else {
4408 if (isl_set_is_disjoint(Set, Context))
4409 return false;
4411 if (isl_set_is_subset(Set, InvalidContext))
4412 return false;
4414 return true;
4417 bool Scop::trackAssumption(AssumptionKind Kind, __isl_keep isl_set *Set,
4418 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
4419 if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
4420 return false;
4422 // Do never emit trivial assumptions as they only clutter the output.
4423 if (!PollyRemarksMinimal) {
4424 isl_set *Univ = nullptr;
4425 if (Sign == AS_ASSUMPTION)
4426 Univ = isl_set_universe(isl_set_get_space(Set));
4428 bool IsTrivial = (Sign == AS_RESTRICTION && isl_set_is_empty(Set)) ||
4429 (Sign == AS_ASSUMPTION && isl_set_is_equal(Univ, Set));
4430 isl_set_free(Univ);
4432 if (IsTrivial)
4433 return false;
4436 switch (Kind) {
4437 case ALIASING:
4438 AssumptionsAliasing++;
4439 break;
4440 case INBOUNDS:
4441 AssumptionsInbounds++;
4442 break;
4443 case WRAPPING:
4444 AssumptionsWrapping++;
4445 break;
4446 case UNSIGNED:
4447 AssumptionsUnsigned++;
4448 break;
4449 case COMPLEXITY:
4450 AssumptionsComplexity++;
4451 break;
4452 case PROFITABLE:
4453 AssumptionsUnprofitable++;
4454 break;
4455 case ERRORBLOCK:
4456 AssumptionsErrorBlock++;
4457 break;
4458 case INFINITELOOP:
4459 AssumptionsInfiniteLoop++;
4460 break;
4461 case INVARIANTLOAD:
4462 AssumptionsInvariantLoad++;
4463 break;
4464 case DELINEARIZATION:
4465 AssumptionsDelinearization++;
4466 break;
4469 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
4470 std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Set);
4471 if (BB)
4472 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
4473 << Msg);
4474 else
4475 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
4476 R.getEntry())
4477 << Msg);
4478 return true;
4481 void Scop::addAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
4482 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
4483 // Simplify the assumptions/restrictions first.
4484 Set = isl_set_gist_params(Set, getContext());
4486 if (!trackAssumption(Kind, Set, Loc, Sign, BB)) {
4487 isl_set_free(Set);
4488 return;
4491 if (Sign == AS_ASSUMPTION) {
4492 AssumedContext = isl_set_intersect(AssumedContext, Set);
4493 AssumedContext = isl_set_coalesce(AssumedContext);
4494 } else {
4495 InvalidContext = isl_set_union(InvalidContext, Set);
4496 InvalidContext = isl_set_coalesce(InvalidContext);
4500 void Scop::recordAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
4501 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
4502 assert((isl_set_is_params(Set) || BB) &&
4503 "Assumptions without a basic block must be parameter sets");
4504 RecordedAssumptions.push_back({Kind, Sign, Set, Loc, BB});
4507 void Scop::addRecordedAssumptions() {
4508 while (!RecordedAssumptions.empty()) {
4509 const Assumption &AS = RecordedAssumptions.pop_back_val();
4511 if (!AS.BB) {
4512 addAssumption(AS.Kind, AS.Set, AS.Loc, AS.Sign, nullptr /* BasicBlock */);
4513 continue;
4516 // If the domain was deleted the assumptions are void.
4517 isl_set *Dom = getDomainConditions(AS.BB);
4518 if (!Dom) {
4519 isl_set_free(AS.Set);
4520 continue;
4523 // If a basic block was given use its domain to simplify the assumption.
4524 // In case of restrictions we know they only have to hold on the domain,
4525 // thus we can intersect them with the domain of the block. However, for
4526 // assumptions the domain has to imply them, thus:
4527 // _ _____
4528 // Dom => S <==> A v B <==> A - B
4530 // To avoid the complement we will register A - B as a restriction not an
4531 // assumption.
4532 isl_set *S = AS.Set;
4533 if (AS.Sign == AS_RESTRICTION)
4534 S = isl_set_params(isl_set_intersect(S, Dom));
4535 else /* (AS.Sign == AS_ASSUMPTION) */
4536 S = isl_set_params(isl_set_subtract(Dom, S));
4538 addAssumption(AS.Kind, S, AS.Loc, AS_RESTRICTION, AS.BB);
4542 void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
4543 addAssumption(Kind, isl_set_empty(getParamSpace()), Loc, AS_ASSUMPTION, BB);
4546 __isl_give isl_set *Scop::getInvalidContext() const {
4547 return isl_set_copy(InvalidContext);
4550 void Scop::printContext(raw_ostream &OS) const {
4551 OS << "Context:\n";
4552 OS.indent(4) << Context << "\n";
4554 OS.indent(4) << "Assumed Context:\n";
4555 OS.indent(4) << AssumedContext << "\n";
4557 OS.indent(4) << "Invalid Context:\n";
4558 OS.indent(4) << InvalidContext << "\n";
4560 unsigned Dim = 0;
4561 for (const SCEV *Parameter : Parameters)
4562 OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
4565 void Scop::printAliasAssumptions(raw_ostream &OS) const {
4566 int noOfGroups = 0;
4567 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
4568 if (Pair.second.size() == 0)
4569 noOfGroups += 1;
4570 else
4571 noOfGroups += Pair.second.size();
4574 OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
4575 if (MinMaxAliasGroups.empty()) {
4576 OS.indent(8) << "n/a\n";
4577 return;
4580 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
4582 // If the group has no read only accesses print the write accesses.
4583 if (Pair.second.empty()) {
4584 OS.indent(8) << "[[";
4585 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
4586 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
4587 << ">";
4589 OS << " ]]\n";
4592 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
4593 OS.indent(8) << "[[";
4594 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
4595 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
4596 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
4597 << ">";
4599 OS << " ]]\n";
4604 void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
4605 OS << "Statements {\n";
4607 for (const ScopStmt &Stmt : *this) {
4608 OS.indent(4);
4609 Stmt.print(OS, PrintInstructions);
4612 OS.indent(4) << "}\n";
4615 void Scop::printArrayInfo(raw_ostream &OS) const {
4616 OS << "Arrays {\n";
4618 for (auto &Array : arrays())
4619 Array->print(OS);
4621 OS.indent(4) << "}\n";
4623 OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
4625 for (auto &Array : arrays())
4626 Array->print(OS, /* SizeAsPwAff */ true);
4628 OS.indent(4) << "}\n";
4631 void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
4632 OS.indent(4) << "Function: " << getFunction().getName() << "\n";
4633 OS.indent(4) << "Region: " << getNameStr() << "\n";
4634 OS.indent(4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
4635 OS.indent(4) << "Invariant Accesses: {\n";
4636 for (const auto &IAClass : InvariantEquivClasses) {
4637 const auto &MAs = IAClass.InvariantAccesses;
4638 if (MAs.empty()) {
4639 OS.indent(12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
4640 } else {
4641 MAs.front()->print(OS);
4642 OS.indent(12) << "Execution Context: " << IAClass.ExecutionContext
4643 << "\n";
4646 OS.indent(4) << "}\n";
4647 printContext(OS.indent(4));
4648 printArrayInfo(OS.indent(4));
4649 printAliasAssumptions(OS);
4650 printStatements(OS.indent(4), PrintInstructions);
4653 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4654 LLVM_DUMP_METHOD void Scop::dump() const { print(dbgs(), true); }
4655 #endif
4657 isl_ctx *Scop::getIslCtx() const { return IslCtx.get(); }
4659 __isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
4660 bool NonNegative) {
4661 // First try to use the SCEVAffinator to generate a piecewise defined
4662 // affine function from @p E in the context of @p BB. If that tasks becomes to
4663 // complex the affinator might return a nullptr. In such a case we invalidate
4664 // the SCoP and return a dummy value. This way we do not need to add error
4665 // handling code to all users of this function.
4666 auto PWAC = Affinator.getPwAff(E, BB);
4667 if (PWAC.first) {
4668 // TODO: We could use a heuristic and either use:
4669 // SCEVAffinator::takeNonNegativeAssumption
4670 // or
4671 // SCEVAffinator::interpretAsUnsigned
4672 // to deal with unsigned or "NonNegative" SCEVs.
4673 if (NonNegative)
4674 Affinator.takeNonNegativeAssumption(PWAC);
4675 return PWAC;
4678 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
4679 invalidate(COMPLEXITY, DL, BB);
4680 return Affinator.getPwAff(SE->getZero(E->getType()), BB);
4683 __isl_give isl_union_set *Scop::getDomains() const {
4684 isl_space *EmptySpace = isl_space_params_alloc(getIslCtx(), 0);
4685 isl_union_set *Domain = isl_union_set_empty(EmptySpace);
4687 for (const ScopStmt &Stmt : *this)
4688 Domain = isl_union_set_add_set(Domain, Stmt.getDomain());
4690 return Domain;
4693 __isl_give isl_pw_aff *Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB) {
4694 PWACtx PWAC = getPwAff(E, BB);
4695 isl_set_free(PWAC.second);
4696 return PWAC.first;
4699 __isl_give isl_union_map *
4700 Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
4701 isl_union_map *Accesses = isl_union_map_empty(getParamSpace());
4703 for (ScopStmt &Stmt : *this) {
4704 for (MemoryAccess *MA : Stmt) {
4705 if (!Predicate(*MA))
4706 continue;
4708 isl_set *Domain = Stmt.getDomain();
4709 isl_map *AccessDomain = MA->getAccessRelation().release();
4710 AccessDomain = isl_map_intersect_domain(AccessDomain, Domain);
4711 Accesses = isl_union_map_add_map(Accesses, AccessDomain);
4715 return isl_union_map_coalesce(Accesses);
4717 for (auto X : this->getInvariantAccesses())
4718 for (auto A : X.InvariantAccesses) {
4719 if (!Predicate(*A))
4720 continue;
4721 Accesses =
4722 isl_union_map_add_map(Accesses, A->getAccessRelation().release());
4724 return isl_union_map_coalesce(Accesses);
4727 __isl_give isl_union_map *Scop::getMustWrites() {
4728 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
4731 __isl_give isl_union_map *Scop::getMayWrites() {
4732 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
4735 __isl_give isl_union_map *Scop::getWrites() {
4736 return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
4739 __isl_give isl_union_map *Scop::getReads() {
4740 return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
4743 __isl_give isl_union_map *Scop::getAccesses() {
4744 return getAccessesOfType([](MemoryAccess &MA) { return true; });
4747 // Check whether @p Node is an extension node.
4749 // @return true if @p Node is an extension node.
4750 isl_bool isNotExtNode(__isl_keep isl_schedule_node *Node, void *User) {
4751 if (isl_schedule_node_get_type(Node) == isl_schedule_node_extension)
4752 return isl_bool_error;
4753 else
4754 return isl_bool_true;
4757 bool Scop::containsExtensionNode(__isl_keep isl_schedule *Schedule) {
4758 return isl_schedule_foreach_schedule_node_top_down(Schedule, isNotExtNode,
4759 nullptr) == isl_stat_error;
4762 __isl_give isl_union_map *Scop::getSchedule() const {
4763 auto *Tree = getScheduleTree();
4764 if (containsExtensionNode(Tree)) {
4765 isl_schedule_free(Tree);
4766 return nullptr;
4768 auto *S = isl_schedule_get_map(Tree);
4769 isl_schedule_free(Tree);
4770 return S;
4773 __isl_give isl_schedule *Scop::getScheduleTree() const {
4774 return isl_schedule_intersect_domain(isl_schedule_copy(Schedule),
4775 getDomains());
4778 void Scop::setSchedule(__isl_take isl_union_map *NewSchedule) {
4779 auto *S = isl_schedule_from_domain(getDomains());
4780 S = isl_schedule_insert_partial_schedule(
4781 S, isl_multi_union_pw_aff_from_union_map(NewSchedule));
4782 isl_schedule_free(Schedule);
4783 Schedule = S;
4786 void Scop::setScheduleTree(__isl_take isl_schedule *NewSchedule) {
4787 isl_schedule_free(Schedule);
4788 Schedule = NewSchedule;
4791 bool Scop::restrictDomains(__isl_take isl_union_set *Domain) {
4792 bool Changed = false;
4793 for (ScopStmt &Stmt : *this) {
4794 isl_union_set *StmtDomain = isl_union_set_from_set(Stmt.getDomain());
4795 isl_union_set *NewStmtDomain = isl_union_set_intersect(
4796 isl_union_set_copy(StmtDomain), isl_union_set_copy(Domain));
4798 if (isl_union_set_is_subset(StmtDomain, NewStmtDomain)) {
4799 isl_union_set_free(StmtDomain);
4800 isl_union_set_free(NewStmtDomain);
4801 continue;
4804 Changed = true;
4806 isl_union_set_free(StmtDomain);
4807 NewStmtDomain = isl_union_set_coalesce(NewStmtDomain);
4809 if (isl_union_set_is_empty(NewStmtDomain)) {
4810 Stmt.restrictDomain(isl_set_empty(Stmt.getDomainSpace()));
4811 isl_union_set_free(NewStmtDomain);
4812 } else
4813 Stmt.restrictDomain(isl_set_from_union_set(NewStmtDomain));
4815 isl_union_set_free(Domain);
4816 return Changed;
4819 ScalarEvolution *Scop::getSE() const { return SE; }
4821 // Create an isl_multi_union_aff that defines an identity mapping from the
4822 // elements of USet to their N-th dimension.
4824 // # Example:
4826 // Domain: { A[i,j]; B[i,j,k] }
4827 // N: 1
4829 // Resulting Mapping: { {A[i,j] -> [(j)]; B[i,j,k] -> [(j)] }
4831 // @param USet A union set describing the elements for which to generate a
4832 // mapping.
4833 // @param N The dimension to map to.
4834 // @returns A mapping from USet to its N-th dimension.
4835 static isl::multi_union_pw_aff mapToDimension(isl::union_set USet, int N) {
4836 assert(N >= 0);
4837 assert(USet);
4838 assert(!USet.is_empty());
4840 auto Result = isl::union_pw_multi_aff::empty(USet.get_space());
4842 auto Lambda = [&Result, N](isl::set S) -> isl::stat {
4843 int Dim = S.dim(isl::dim::set);
4844 auto PMA = isl::pw_multi_aff::project_out_map(S.get_space(), isl::dim::set,
4845 N, Dim - N);
4846 if (N > 1)
4847 PMA = PMA.drop_dims(isl::dim::out, 0, N - 1);
4849 Result = Result.add_pw_multi_aff(PMA);
4850 return isl::stat::ok;
4853 isl::stat Res = USet.foreach_set(Lambda);
4854 (void)Res;
4856 assert(Res == isl::stat::ok);
4858 return isl::multi_union_pw_aff(isl::union_pw_multi_aff(Result));
4861 void Scop::addScopStmt(BasicBlock *BB, Loop *SurroundingLoop,
4862 std::vector<Instruction *> Instructions) {
4863 assert(BB && "Unexpected nullptr!");
4864 Stmts.emplace_back(*this, *BB, SurroundingLoop, Instructions);
4865 auto *Stmt = &Stmts.back();
4866 StmtMap[BB].push_back(Stmt);
4869 void Scop::addScopStmt(Region *R, Loop *SurroundingLoop) {
4870 assert(R && "Unexpected nullptr!");
4871 Stmts.emplace_back(*this, *R, SurroundingLoop);
4872 auto *Stmt = &Stmts.back();
4873 for (BasicBlock *BB : R->blocks())
4874 StmtMap[BB].push_back(Stmt);
4877 ScopStmt *Scop::addScopStmt(__isl_take isl_map *SourceRel,
4878 __isl_take isl_map *TargetRel,
4879 __isl_take isl_set *Domain) {
4880 #ifndef NDEBUG
4881 isl_set *SourceDomain = isl_map_domain(isl_map_copy(SourceRel));
4882 isl_set *TargetDomain = isl_map_domain(isl_map_copy(TargetRel));
4883 assert(isl_set_is_subset(Domain, TargetDomain) &&
4884 "Target access not defined for complete statement domain");
4885 assert(isl_set_is_subset(Domain, SourceDomain) &&
4886 "Source access not defined for complete statement domain");
4887 isl_set_free(SourceDomain);
4888 isl_set_free(TargetDomain);
4889 #endif
4890 Stmts.emplace_back(*this, SourceRel, TargetRel, Domain);
4891 CopyStmtsNum++;
4892 return &(Stmts.back());
4895 void Scop::buildSchedule(LoopInfo &LI) {
4896 Loop *L = getLoopSurroundingScop(*this, LI);
4897 LoopStackTy LoopStack({LoopStackElementTy(L, nullptr, 0)});
4898 buildSchedule(getRegion().getNode(), LoopStack, LI);
4899 assert(LoopStack.size() == 1 && LoopStack.back().L == L);
4900 Schedule = LoopStack[0].Schedule;
4903 /// To generate a schedule for the elements in a Region we traverse the Region
4904 /// in reverse-post-order and add the contained RegionNodes in traversal order
4905 /// to the schedule of the loop that is currently at the top of the LoopStack.
4906 /// For loop-free codes, this results in a correct sequential ordering.
4908 /// Example:
4909 /// bb1(0)
4910 /// / \.
4911 /// bb2(1) bb3(2)
4912 /// \ / \.
4913 /// bb4(3) bb5(4)
4914 /// \ /
4915 /// bb6(5)
4917 /// Including loops requires additional processing. Whenever a loop header is
4918 /// encountered, the corresponding loop is added to the @p LoopStack. Starting
4919 /// from an empty schedule, we first process all RegionNodes that are within
4920 /// this loop and complete the sequential schedule at this loop-level before
4921 /// processing about any other nodes. To implement this
4922 /// loop-nodes-first-processing, the reverse post-order traversal is
4923 /// insufficient. Hence, we additionally check if the traversal yields
4924 /// sub-regions or blocks that are outside the last loop on the @p LoopStack.
4925 /// These region-nodes are then queue and only traverse after the all nodes
4926 /// within the current loop have been processed.
4927 void Scop::buildSchedule(Region *R, LoopStackTy &LoopStack, LoopInfo &LI) {
4928 Loop *OuterScopLoop = getLoopSurroundingScop(*this, LI);
4930 ReversePostOrderTraversal<Region *> RTraversal(R);
4931 std::deque<RegionNode *> WorkList(RTraversal.begin(), RTraversal.end());
4932 std::deque<RegionNode *> DelayList;
4933 bool LastRNWaiting = false;
4935 // Iterate over the region @p R in reverse post-order but queue
4936 // sub-regions/blocks iff they are not part of the last encountered but not
4937 // completely traversed loop. The variable LastRNWaiting is a flag to indicate
4938 // that we queued the last sub-region/block from the reverse post-order
4939 // iterator. If it is set we have to explore the next sub-region/block from
4940 // the iterator (if any) to guarantee progress. If it is not set we first try
4941 // the next queued sub-region/blocks.
4942 while (!WorkList.empty() || !DelayList.empty()) {
4943 RegionNode *RN;
4945 if ((LastRNWaiting && !WorkList.empty()) || DelayList.size() == 0) {
4946 RN = WorkList.front();
4947 WorkList.pop_front();
4948 LastRNWaiting = false;
4949 } else {
4950 RN = DelayList.front();
4951 DelayList.pop_front();
4954 Loop *L = getRegionNodeLoop(RN, LI);
4955 if (!contains(L))
4956 L = OuterScopLoop;
4958 Loop *LastLoop = LoopStack.back().L;
4959 if (LastLoop != L) {
4960 if (LastLoop && !LastLoop->contains(L)) {
4961 LastRNWaiting = true;
4962 DelayList.push_back(RN);
4963 continue;
4965 LoopStack.push_back({L, nullptr, 0});
4967 buildSchedule(RN, LoopStack, LI);
4970 return;
4973 void Scop::buildSchedule(RegionNode *RN, LoopStackTy &LoopStack, LoopInfo &LI) {
4975 if (RN->isSubRegion()) {
4976 auto *LocalRegion = RN->getNodeAs<Region>();
4977 if (!isNonAffineSubRegion(LocalRegion)) {
4978 buildSchedule(LocalRegion, LoopStack, LI);
4979 return;
4983 auto &LoopData = LoopStack.back();
4984 LoopData.NumBlocksProcessed += getNumBlocksInRegionNode(RN);
4986 for (auto *Stmt : getStmtListFor(RN)) {
4987 auto *UDomain = isl_union_set_from_set(Stmt->getDomain());
4988 auto *StmtSchedule = isl_schedule_from_domain(UDomain);
4989 LoopData.Schedule = combineInSequence(LoopData.Schedule, StmtSchedule);
4992 // Check if we just processed the last node in this loop. If we did, finalize
4993 // the loop by:
4995 // - adding new schedule dimensions
4996 // - folding the resulting schedule into the parent loop schedule
4997 // - dropping the loop schedule from the LoopStack.
4999 // Then continue to check surrounding loops, which might also have been
5000 // completed by this node.
5001 while (LoopData.L &&
5002 LoopData.NumBlocksProcessed == getNumBlocksInLoop(LoopData.L)) {
5003 auto *Schedule = LoopData.Schedule;
5004 auto NumBlocksProcessed = LoopData.NumBlocksProcessed;
5006 LoopStack.pop_back();
5007 auto &NextLoopData = LoopStack.back();
5009 if (Schedule) {
5010 isl::union_set Domain = give(isl_schedule_get_domain(Schedule));
5011 isl::multi_union_pw_aff MUPA = mapToDimension(Domain, LoopStack.size());
5012 Schedule = isl_schedule_insert_partial_schedule(Schedule, MUPA.release());
5013 NextLoopData.Schedule =
5014 combineInSequence(NextLoopData.Schedule, Schedule);
5017 NextLoopData.NumBlocksProcessed += NumBlocksProcessed;
5018 LoopData = NextLoopData;
5022 ScopStmt *Scop::getStmtFor(BasicBlock *BB) const {
5023 auto StmtMapIt = StmtMap.find(BB);
5024 if (StmtMapIt == StmtMap.end())
5025 return nullptr;
5026 assert(StmtMapIt->second.size() == 1);
5027 return StmtMapIt->second.front();
5030 ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
5031 auto StmtMapIt = StmtMap.find(BB);
5032 if (StmtMapIt == StmtMap.end())
5033 return {};
5034 assert(StmtMapIt->second.size() == 1 &&
5035 "Each statement corresponds to exactly one BB.");
5036 return StmtMapIt->second;
5039 ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
5040 ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
5041 if (StmtList.size() > 0)
5042 return StmtList.back();
5043 return nullptr;
5046 ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
5047 if (RN->isSubRegion())
5048 return getStmtListFor(RN->getNodeAs<Region>());
5049 return getStmtListFor(RN->getNodeAs<BasicBlock>());
5052 ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
5053 return getStmtListFor(R->getEntry());
5056 int Scop::getRelativeLoopDepth(const Loop *L) const {
5057 if (!L || !R.contains(L))
5058 return -1;
5059 // outermostLoopInRegion always returns nullptr for top level regions
5060 if (R.isTopLevelRegion()) {
5061 // LoopInfo's depths start at 1, we start at 0
5062 return L->getLoopDepth() - 1;
5063 } else {
5064 Loop *OuterLoop = R.outermostLoopInRegion(const_cast<Loop *>(L));
5065 assert(OuterLoop);
5066 return L->getLoopDepth() - OuterLoop->getLoopDepth();
5070 ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
5071 for (auto &SAI : arrays()) {
5072 if (SAI->getName() == BaseName)
5073 return SAI;
5075 return nullptr;
5078 void Scop::addAccessData(MemoryAccess *Access) {
5079 const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
5080 assert(SAI && "can only use after access relations have been constructed");
5082 if (Access->isOriginalValueKind() && Access->isRead())
5083 ValueUseAccs[SAI].push_back(Access);
5084 else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
5085 PHIIncomingAccs[SAI].push_back(Access);
5088 void Scop::removeAccessData(MemoryAccess *Access) {
5089 if (Access->isOriginalValueKind() && Access->isRead()) {
5090 auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
5091 std::remove(Uses.begin(), Uses.end(), Access);
5092 } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
5093 auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
5094 std::remove(Incomings.begin(), Incomings.end(), Access);
5098 MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
5099 assert(SAI->isValueKind());
5101 Instruction *Val = dyn_cast<Instruction>(SAI->getBasePtr());
5102 if (!Val)
5103 return nullptr;
5105 ScopStmt *Stmt = getStmtFor(Val);
5106 if (!Stmt)
5107 return nullptr;
5109 return Stmt->lookupValueWriteOf(Val);
5112 ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
5113 assert(SAI->isValueKind());
5114 auto It = ValueUseAccs.find(SAI);
5115 if (It == ValueUseAccs.end())
5116 return {};
5117 return It->second;
5120 MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
5121 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
5123 if (SAI->isExitPHIKind())
5124 return nullptr;
5126 PHINode *PHI = cast<PHINode>(SAI->getBasePtr());
5127 ScopStmt *Stmt = getStmtFor(PHI);
5128 assert(Stmt && "PHINode must be within the SCoP");
5130 return Stmt->lookupPHIReadOf(PHI);
5133 ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
5134 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
5135 auto It = PHIIncomingAccs.find(SAI);
5136 if (It == PHIIncomingAccs.end())
5137 return {};
5138 return It->second;
5141 raw_ostream &polly::operator<<(raw_ostream &O, const Scop &scop) {
5142 scop.print(O, PollyPrintInstructions);
5143 return O;
5146 //===----------------------------------------------------------------------===//
5147 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
5148 AU.addRequired<LoopInfoWrapperPass>();
5149 AU.addRequired<RegionInfoPass>();
5150 AU.addRequired<DominatorTreeWrapperPass>();
5151 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
5152 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
5153 AU.addRequired<AAResultsWrapperPass>();
5154 AU.addRequired<AssumptionCacheTracker>();
5155 AU.setPreservesAll();
5158 void updateLoopCountStatistic(ScopDetection::LoopStats Stats) {
5159 NumLoopsInScop += Stats.NumLoops;
5160 MaxNumLoopsInScop =
5161 std::max(MaxNumLoopsInScop.getValue(), (unsigned)Stats.NumLoops);
5163 if (Stats.MaxDepth == 1)
5164 NumScopsDepthOne++;
5165 else if (Stats.MaxDepth == 2)
5166 NumScopsDepthTwo++;
5167 else if (Stats.MaxDepth == 3)
5168 NumScopsDepthThree++;
5169 else if (Stats.MaxDepth == 4)
5170 NumScopsDepthFour++;
5171 else if (Stats.MaxDepth == 5)
5172 NumScopsDepthFive++;
5173 else
5174 NumScopsDepthLarger++;
5177 bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
5178 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
5180 if (!SD.isMaxRegionInScop(*R))
5181 return false;
5183 Function *F = R->getEntry()->getParent();
5184 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5185 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5186 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
5187 auto const &DL = F->getParent()->getDataLayout();
5188 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5189 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
5191 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE);
5192 S = SB.getScop(); // take ownership of scop object
5194 if (S) {
5195 ScopDetection::LoopStats Stats =
5196 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
5197 updateLoopCountStatistic(Stats);
5200 return false;
5203 void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
5204 if (S)
5205 S->print(OS, PollyPrintInstructions);
5206 else
5207 OS << "Invalid Scop!\n";
5210 char ScopInfoRegionPass::ID = 0;
5212 Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
5214 INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
5215 "Polly - Create polyhedral description of Scops", false,
5216 false);
5217 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
5218 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
5219 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
5220 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
5221 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
5222 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
5223 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
5224 INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
5225 "Polly - Create polyhedral description of Scops", false,
5226 false)
5228 //===----------------------------------------------------------------------===//
5229 ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
5230 LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
5231 AssumptionCache &AC) {
5232 /// Create polyhedral description of scops for all the valid regions of a
5233 /// function.
5234 for (auto &It : SD) {
5235 Region *R = const_cast<Region *>(It);
5236 if (!SD.isMaxRegionInScop(*R))
5237 continue;
5239 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE);
5240 std::unique_ptr<Scop> S = SB.getScop();
5241 if (!S)
5242 continue;
5243 bool Inserted = RegionToScopMap.insert({R, std::move(S)}).second;
5244 assert(Inserted && "Building Scop for the same region twice!");
5245 (void)Inserted;
5249 AnalysisKey ScopInfoAnalysis::Key;
5251 ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
5252 FunctionAnalysisManager &FAM) {
5253 auto &SD = FAM.getResult<ScopAnalysis>(F);
5254 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
5255 auto &LI = FAM.getResult<LoopAnalysis>(F);
5256 auto &AA = FAM.getResult<AAManager>(F);
5257 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
5258 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
5259 auto &DL = F.getParent()->getDataLayout();
5260 return {DL, SD, SE, LI, AA, DT, AC};
5263 PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
5264 FunctionAnalysisManager &FAM) {
5265 auto &SI = FAM.getResult<ScopInfoAnalysis>(F);
5266 for (auto &It : SI) {
5267 if (It.second)
5268 It.second->print(Stream, PollyPrintInstructions);
5269 else
5270 Stream << "Invalid Scop!\n";
5272 return PreservedAnalyses::all();
5275 void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
5276 AU.addRequired<LoopInfoWrapperPass>();
5277 AU.addRequired<RegionInfoPass>();
5278 AU.addRequired<DominatorTreeWrapperPass>();
5279 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
5280 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
5281 AU.addRequired<AAResultsWrapperPass>();
5282 AU.addRequired<AssumptionCacheTracker>();
5283 AU.setPreservesAll();
5286 bool ScopInfoWrapperPass::runOnFunction(Function &F) {
5287 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
5288 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5289 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5290 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
5291 auto const &DL = F.getParent()->getDataLayout();
5292 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5293 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5295 Result.reset(new ScopInfo{DL, SD, SE, LI, AA, DT, AC});
5296 return false;
5299 void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
5300 for (auto &It : *Result) {
5301 if (It.second)
5302 It.second->print(OS, PollyPrintInstructions);
5303 else
5304 OS << "Invalid Scop!\n";
5308 char ScopInfoWrapperPass::ID = 0;
5310 Pass *polly::createScopInfoWrapperPassPass() {
5311 return new ScopInfoWrapperPass();
5314 INITIALIZE_PASS_BEGIN(
5315 ScopInfoWrapperPass, "polly-function-scops",
5316 "Polly - Create polyhedral description of all Scops of a function", false,
5317 false);
5318 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
5319 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
5320 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
5321 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
5322 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
5323 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
5324 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
5325 INITIALIZE_PASS_END(
5326 ScopInfoWrapperPass, "polly-function-scops",
5327 "Polly - Create polyhedral description of all Scops of a function", false,
5328 false)