[SI] Fix a potential use-after-free
[polly-mirror.git] / lib / Analysis / ScopInfo.cpp
blobb4b5f1abcb96ae8a94ccb58534170aaa45dd4804
1 //===- ScopInfo.cpp -------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Create a polyhedral description for a static control flow region.
12 // The pass creates a polyhedral description of the Scops detected by the Scop
13 // detection derived from their LLVM-IR code.
15 // This representation is shared among several tools in the polyhedral
16 // community, which are e.g. Cloog, Pluto, Loopo, Graphite.
18 //===----------------------------------------------------------------------===//
20 #include "polly/ScopInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopBuilder.h"
24 #include "polly/ScopDetection.h"
25 #include "polly/Support/GICHelper.h"
26 #include "polly/Support/ISLOStream.h"
27 #include "polly/Support/SCEVAffinator.h"
28 #include "polly/Support/SCEVValidator.h"
29 #include "polly/Support/ScopHelper.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/DenseSet.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SetVector.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/SmallSet.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/ADT/Statistic.h"
41 #include "llvm/ADT/StringExtras.h"
42 #include "llvm/ADT/StringMap.h"
43 #include "llvm/Analysis/AliasAnalysis.h"
44 #include "llvm/Analysis/AliasSetTracker.h"
45 #include "llvm/Analysis/AssumptionCache.h"
46 #include "llvm/Analysis/Loads.h"
47 #include "llvm/Analysis/LoopInfo.h"
48 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
49 #include "llvm/Analysis/RegionInfo.h"
50 #include "llvm/Analysis/RegionIterator.h"
51 #include "llvm/Analysis/ScalarEvolution.h"
52 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
53 #include "llvm/IR/Argument.h"
54 #include "llvm/IR/BasicBlock.h"
55 #include "llvm/IR/CFG.h"
56 #include "llvm/IR/ConstantRange.h"
57 #include "llvm/IR/Constants.h"
58 #include "llvm/IR/DataLayout.h"
59 #include "llvm/IR/DebugLoc.h"
60 #include "llvm/IR/DerivedTypes.h"
61 #include "llvm/IR/DiagnosticInfo.h"
62 #include "llvm/IR/Dominators.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/InstrTypes.h"
65 #include "llvm/IR/Instruction.h"
66 #include "llvm/IR/Instructions.h"
67 #include "llvm/IR/IntrinsicInst.h"
68 #include "llvm/IR/Module.h"
69 #include "llvm/IR/PassManager.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/Use.h"
72 #include "llvm/IR/User.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Compiler.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "isl/aff.h"
83 #include "isl/constraint.h"
84 #include "isl/local_space.h"
85 #include "isl/map.h"
86 #include "isl/options.h"
87 #include "isl/printer.h"
88 #include "isl/schedule.h"
89 #include "isl/schedule_node.h"
90 #include "isl/set.h"
91 #include "isl/union_map.h"
92 #include "isl/union_set.h"
93 #include "isl/val.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdlib>
97 #include <cstring>
98 #include <deque>
99 #include <iterator>
100 #include <memory>
101 #include <string>
102 #include <tuple>
103 #include <utility>
104 #include <vector>
106 using namespace llvm;
107 using namespace polly;
109 #define DEBUG_TYPE "polly-scops"
111 STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
112 STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
113 STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
114 STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
115 STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
116 STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
117 STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
118 STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
119 STATISTIC(AssumptionsInvariantLoad,
120 "Number of invariant loads assumptions taken.");
121 STATISTIC(AssumptionsDelinearization,
122 "Number of delinearization assumptions taken.");
124 STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
125 STATISTIC(NumLoopsInScop, "Number of loops in scops");
126 STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
127 STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
129 STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
130 STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
131 STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
132 STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
133 STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
134 STATISTIC(NumScopsDepthLarger,
135 "Number of scops with maximal loop depth 6 and larger");
136 STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
138 STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
139 STATISTIC(
140 NumValueWritesInLoops,
141 "Number of scalar value writes nested in affine loops after ScopInfo");
142 STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
143 STATISTIC(NumPHIWritesInLoops,
144 "Number of scalar phi writes nested in affine loops after ScopInfo");
145 STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
146 STATISTIC(NumSingletonWritesInLoops,
147 "Number of singleton writes nested in affine loops after ScopInfo");
149 // The maximal number of basic sets we allow during domain construction to
150 // be created. More complex scops will result in very high compile time and
151 // are also unlikely to result in good code
152 static int const MaxDisjunctsInDomain = 20;
154 // The number of disjunct in the context after which we stop to add more
155 // disjuncts. This parameter is there to avoid exponential growth in the
156 // number of disjunct when adding non-convex sets to the context.
157 static int const MaxDisjunctsInContext = 4;
159 // The maximal number of dimensions we allow during invariant load construction.
160 // More complex access ranges will result in very high compile time and are also
161 // unlikely to result in good code. This value is very high and should only
162 // trigger for corner cases (e.g., the "dct_luma" function in h264, SPEC2006).
163 static int const MaxDimensionsInAccessRange = 9;
165 static cl::opt<int>
166 OptComputeOut("polly-analysis-computeout",
167 cl::desc("Bound the scop analysis by a maximal amount of "
168 "computational steps (0 means no bound)"),
169 cl::Hidden, cl::init(800000), cl::ZeroOrMore,
170 cl::cat(PollyCategory));
172 static cl::opt<bool> PollyRemarksMinimal(
173 "polly-remarks-minimal",
174 cl::desc("Do not emit remarks about assumptions that are known"),
175 cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
177 static cl::opt<int> RunTimeChecksMaxAccessDisjuncts(
178 "polly-rtc-max-array-disjuncts",
179 cl::desc("The maximal number of disjunts allowed in memory accesses to "
180 "to build RTCs."),
181 cl::Hidden, cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
183 static cl::opt<unsigned> RunTimeChecksMaxParameters(
184 "polly-rtc-max-parameters",
185 cl::desc("The maximal number of parameters allowed in RTCs."), cl::Hidden,
186 cl::ZeroOrMore, cl::init(8), cl::cat(PollyCategory));
188 static cl::opt<unsigned> RunTimeChecksMaxArraysPerGroup(
189 "polly-rtc-max-arrays-per-group",
190 cl::desc("The maximal number of arrays to compare in each alias group."),
191 cl::Hidden, cl::ZeroOrMore, cl::init(20), cl::cat(PollyCategory));
193 static cl::opt<std::string> UserContextStr(
194 "polly-context", cl::value_desc("isl parameter set"),
195 cl::desc("Provide additional constraints on the context parameters"),
196 cl::init(""), cl::cat(PollyCategory));
198 static cl::opt<bool>
199 IslOnErrorAbort("polly-on-isl-error-abort",
200 cl::desc("Abort if an isl error is encountered"),
201 cl::init(true), cl::cat(PollyCategory));
203 static cl::opt<bool> PollyPreciseInbounds(
204 "polly-precise-inbounds",
205 cl::desc("Take more precise inbounds assumptions (do not scale well)"),
206 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
208 static cl::opt<bool>
209 PollyIgnoreInbounds("polly-ignore-inbounds",
210 cl::desc("Do not take inbounds assumptions at all"),
211 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
213 static cl::opt<bool> PollyIgnoreParamBounds(
214 "polly-ignore-parameter-bounds",
215 cl::desc(
216 "Do not add parameter bounds and do no gist simplify sets accordingly"),
217 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
219 static cl::opt<bool> PollyAllowDereferenceOfAllFunctionParams(
220 "polly-allow-dereference-of-all-function-parameters",
221 cl::desc(
222 "Treat all parameters to functions that are pointers as dereferencible."
223 " This is useful for invariant load hoisting, since we can generate"
224 " less runtime checks. This is only valid if all pointers to functions"
225 " are always initialized, so that Polly can choose to hoist"
226 " their loads. "),
227 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
229 static cl::opt<bool> PollyPreciseFoldAccesses(
230 "polly-precise-fold-accesses",
231 cl::desc("Fold memory accesses to model more possible delinearizations "
232 "(does not scale well)"),
233 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
235 bool polly::UseInstructionNames;
237 static cl::opt<bool, true> XUseInstructionNames(
238 "polly-use-llvm-names",
239 cl::desc("Use LLVM-IR names when deriving statement names"),
240 cl::location(UseInstructionNames), cl::Hidden, cl::init(false),
241 cl::ZeroOrMore, cl::cat(PollyCategory));
243 static cl::opt<bool> PollyPrintInstructions(
244 "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
245 cl::Hidden, cl::Optional, cl::init(false), cl::cat(PollyCategory));
247 //===----------------------------------------------------------------------===//
249 // Create a sequence of two schedules. Either argument may be null and is
250 // interpreted as the empty schedule. Can also return null if both schedules are
251 // empty.
252 static __isl_give isl_schedule *
253 combineInSequence(__isl_take isl_schedule *Prev,
254 __isl_take isl_schedule *Succ) {
255 if (!Prev)
256 return Succ;
257 if (!Succ)
258 return Prev;
260 return isl_schedule_sequence(Prev, Succ);
263 static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
264 int dim, isl::dim type) {
265 isl::val V;
266 isl::ctx Ctx = S.get_ctx();
268 // The upper and lower bound for a parameter value is derived either from
269 // the data type of the parameter or from the - possibly more restrictive -
270 // range metadata.
271 V = valFromAPInt(Ctx.get(), Range.getSignedMin(), true);
272 S = S.lower_bound_val(type, dim, V);
273 V = valFromAPInt(Ctx.get(), Range.getSignedMax(), true);
274 S = S.upper_bound_val(type, dim, V);
276 if (Range.isFullSet())
277 return S;
279 if (isl_set_n_basic_set(S.get()) > MaxDisjunctsInContext)
280 return S;
282 // In case of signed wrapping, we can refine the set of valid values by
283 // excluding the part not covered by the wrapping range.
284 if (Range.isSignWrappedSet()) {
285 V = valFromAPInt(Ctx.get(), Range.getLower(), true);
286 isl::set SLB = S.lower_bound_val(type, dim, V);
288 V = valFromAPInt(Ctx.get(), Range.getUpper(), true);
289 V = V.sub_ui(1);
290 isl::set SUB = S.upper_bound_val(type, dim, V);
291 S = SLB.unite(SUB);
294 return S;
297 static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
298 LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
299 if (!BasePtrLI)
300 return nullptr;
302 if (!S->contains(BasePtrLI))
303 return nullptr;
305 ScalarEvolution &SE = *S->getSE();
307 auto *OriginBaseSCEV =
308 SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
309 if (!OriginBaseSCEV)
310 return nullptr;
312 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
313 if (!OriginBaseSCEVUnknown)
314 return nullptr;
316 return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
317 MemoryKind::Array);
320 ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
321 ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
322 const DataLayout &DL, Scop *S,
323 const char *BaseName)
324 : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
325 std::string BasePtrName =
326 BaseName ? BaseName
327 : getIslCompatibleName("MemRef", BasePtr, S->getNextArrayIdx(),
328 Kind == MemoryKind::PHI ? "__phi" : "",
329 UseInstructionNames);
330 Id = isl::id::alloc(Ctx, BasePtrName, this);
332 updateSizes(Sizes);
334 if (!BasePtr || Kind != MemoryKind::Array) {
335 BasePtrOriginSAI = nullptr;
336 return;
339 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
340 if (BasePtrOriginSAI)
341 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
344 ScopArrayInfo::~ScopArrayInfo() = default;
346 isl::space ScopArrayInfo::getSpace() const {
347 auto Space = isl::space(Id.get_ctx(), 0, getNumberOfDimensions());
348 Space = Space.set_tuple_id(isl::dim::set, Id);
349 return Space;
352 bool ScopArrayInfo::isReadOnly() {
353 isl::union_set WriteSet = S.getWrites().range();
354 isl::space Space = getSpace();
355 WriteSet = WriteSet.extract_set(Space);
357 return bool(WriteSet.is_empty());
360 bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
361 if (Array->getElementType() != getElementType())
362 return false;
364 if (Array->getNumberOfDimensions() != getNumberOfDimensions())
365 return false;
367 for (unsigned i = 0; i < getNumberOfDimensions(); i++)
368 if (Array->getDimensionSize(i) != getDimensionSize(i))
369 return false;
371 return true;
374 void ScopArrayInfo::updateElementType(Type *NewElementType) {
375 if (NewElementType == ElementType)
376 return;
378 auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
379 auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
381 if (NewElementSize == OldElementSize || NewElementSize == 0)
382 return;
384 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
385 ElementType = NewElementType;
386 } else {
387 auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
388 ElementType = IntegerType::get(ElementType->getContext(), GCD);
392 /// Make the ScopArrayInfo model a Fortran Array
393 void ScopArrayInfo::applyAndSetFAD(Value *FAD) {
394 assert(FAD && "got invalid Fortran array descriptor");
395 if (this->FAD) {
396 assert(this->FAD == FAD &&
397 "receiving different array descriptors for same array");
398 return;
401 assert(DimensionSizesPw.size() > 0 && !DimensionSizesPw[0]);
402 assert(!this->FAD);
403 this->FAD = FAD;
405 isl::space Space(S.getIslCtx(), 1, 0);
407 std::string param_name = getName();
408 param_name += "_fortranarr_size";
409 isl::id IdPwAff = isl::id::alloc(S.getIslCtx(), param_name, this);
411 Space = Space.set_dim_id(isl::dim::param, 0, IdPwAff);
412 isl::pw_aff PwAff =
413 isl::aff::var_on_domain(isl::local_space(Space), isl::dim::param, 0);
415 DimensionSizesPw[0] = PwAff;
418 bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
419 bool CheckConsistency) {
420 int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
421 int ExtraDimsNew = NewSizes.size() - SharedDims;
422 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
424 if (CheckConsistency) {
425 for (int i = 0; i < SharedDims; i++) {
426 auto *NewSize = NewSizes[i + ExtraDimsNew];
427 auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
428 if (NewSize && KnownSize && NewSize != KnownSize)
429 return false;
432 if (DimensionSizes.size() >= NewSizes.size())
433 return true;
436 DimensionSizes.clear();
437 DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
438 NewSizes.end());
439 DimensionSizesPw.clear();
440 for (const SCEV *Expr : DimensionSizes) {
441 if (!Expr) {
442 DimensionSizesPw.push_back(nullptr);
443 continue;
445 isl::pw_aff Size = S.getPwAffOnly(Expr);
446 DimensionSizesPw.push_back(Size);
448 return true;
451 std::string ScopArrayInfo::getName() const { return Id.get_name(); }
453 int ScopArrayInfo::getElemSizeInBytes() const {
454 return DL.getTypeAllocSize(ElementType);
457 isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
459 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
460 LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(errs()); }
461 #endif
463 void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
464 OS.indent(8) << *getElementType() << " " << getName();
465 unsigned u = 0;
466 // If this is a Fortran array, then we can print the outermost dimension
467 // as a isl_pw_aff even though there is no SCEV information.
468 bool IsOutermostSizeKnown = SizeAsPwAff && FAD;
470 if (!IsOutermostSizeKnown && getNumberOfDimensions() > 0 &&
471 !getDimensionSize(0)) {
472 OS << "[*]";
473 u++;
475 for (; u < getNumberOfDimensions(); u++) {
476 OS << "[";
478 if (SizeAsPwAff) {
479 isl::pw_aff Size = getDimensionSizePw(u);
480 OS << " " << Size << " ";
481 } else {
482 OS << *getDimensionSize(u);
485 OS << "]";
488 OS << ";";
490 if (BasePtrOriginSAI)
491 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
493 OS << " // Element size " << getElemSizeInBytes() << "\n";
496 const ScopArrayInfo *
497 ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
498 isl::id Id = PMA.get_tuple_id(isl::dim::out);
499 assert(!Id.is_null() && "Output dimension didn't have an ID");
500 return getFromId(Id);
503 const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
504 void *User = Id.get_user();
505 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
506 return SAI;
509 void MemoryAccess::wrapConstantDimensions() {
510 auto *SAI = getScopArrayInfo();
511 isl::space ArraySpace = SAI->getSpace();
512 isl::ctx Ctx = ArraySpace.get_ctx();
513 unsigned DimsArray = SAI->getNumberOfDimensions();
515 isl::multi_aff DivModAff = isl::multi_aff::identity(
516 ArraySpace.map_from_domain_and_range(ArraySpace));
517 isl::local_space LArraySpace = isl::local_space(ArraySpace);
519 // Begin with last dimension, to iteratively carry into higher dimensions.
520 for (int i = DimsArray - 1; i > 0; i--) {
521 auto *DimSize = SAI->getDimensionSize(i);
522 auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
524 // This transformation is not applicable to dimensions with dynamic size.
525 if (!DimSizeCst)
526 continue;
528 // This transformation is not applicable to dimensions of size zero.
529 if (DimSize->isZero())
530 continue;
532 isl::val DimSizeVal =
533 valFromAPInt(Ctx.get(), DimSizeCst->getAPInt(), false);
534 isl::aff Var = isl::aff::var_on_domain(LArraySpace, isl::dim::set, i);
535 isl::aff PrevVar =
536 isl::aff::var_on_domain(LArraySpace, isl::dim::set, i - 1);
538 // Compute: index % size
539 // Modulo must apply in the divide of the previous iteration, if any.
540 isl::aff Modulo = Var.mod(DimSizeVal);
541 Modulo = Modulo.pullback(DivModAff);
543 // Compute: floor(index / size)
544 isl::aff Divide = Var.div(isl::aff(LArraySpace, DimSizeVal));
545 Divide = Divide.floor();
546 Divide = Divide.add(PrevVar);
547 Divide = Divide.pullback(DivModAff);
549 // Apply Modulo and Divide.
550 DivModAff = DivModAff.set_aff(i, Modulo);
551 DivModAff = DivModAff.set_aff(i - 1, Divide);
554 // Apply all modulo/divides on the accesses.
555 isl::map Relation = AccessRelation;
556 Relation = Relation.apply_range(isl::map::from_multi_aff(DivModAff));
557 Relation = Relation.detect_equalities();
558 AccessRelation = Relation;
561 void MemoryAccess::updateDimensionality() {
562 auto *SAI = getScopArrayInfo();
563 isl::space ArraySpace = SAI->getSpace();
564 isl::space AccessSpace = AccessRelation.get_space().range();
565 isl::ctx Ctx = ArraySpace.get_ctx();
567 auto DimsArray = ArraySpace.dim(isl::dim::set);
568 auto DimsAccess = AccessSpace.dim(isl::dim::set);
569 auto DimsMissing = DimsArray - DimsAccess;
571 auto *BB = getStatement()->getEntryBlock();
572 auto &DL = BB->getModule()->getDataLayout();
573 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
574 unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
576 isl::map Map = isl::map::from_domain_and_range(
577 isl::set::universe(AccessSpace), isl::set::universe(ArraySpace));
579 for (unsigned i = 0; i < DimsMissing; i++)
580 Map = Map.fix_si(isl::dim::out, i, 0);
582 for (unsigned i = DimsMissing; i < DimsArray; i++)
583 Map = Map.equate(isl::dim::in, i - DimsMissing, isl::dim::out, i);
585 AccessRelation = AccessRelation.apply_range(Map);
587 // For the non delinearized arrays, divide the access function of the last
588 // subscript by the size of the elements in the array.
590 // A stride one array access in C expressed as A[i] is expressed in
591 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
592 // two subsequent values of 'i' index two values that are stored next to
593 // each other in memory. By this division we make this characteristic
594 // obvious again. If the base pointer was accessed with offsets not divisible
595 // by the accesses element size, we will have chosen a smaller ArrayElemSize
596 // that divides the offsets of all accesses to this base pointer.
597 if (DimsAccess == 1) {
598 isl::val V = isl::val(Ctx, ArrayElemSize);
599 AccessRelation = AccessRelation.floordiv_val(V);
602 // We currently do this only if we added at least one dimension, which means
603 // some dimension's indices have not been specified, an indicator that some
604 // index values have been added together.
605 // TODO: Investigate general usefulness; Effect on unit tests is to make index
606 // expressions more complicated.
607 if (DimsMissing)
608 wrapConstantDimensions();
610 if (!isAffine())
611 computeBoundsOnAccessRelation(ArrayElemSize);
613 // Introduce multi-element accesses in case the type loaded by this memory
614 // access is larger than the canonical element type of the array.
616 // An access ((float *)A)[i] to an array char *A is modeled as
617 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
618 if (ElemBytes > ArrayElemSize) {
619 assert(ElemBytes % ArrayElemSize == 0 &&
620 "Loaded element size should be multiple of canonical element size");
621 isl::map Map = isl::map::from_domain_and_range(
622 isl::set::universe(ArraySpace), isl::set::universe(ArraySpace));
623 for (unsigned i = 0; i < DimsArray - 1; i++)
624 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
626 isl::constraint C;
627 isl::local_space LS;
629 LS = isl::local_space(Map.get_space());
630 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
632 C = isl::constraint::alloc_inequality(LS);
633 C = C.set_constant_val(isl::val(Ctx, Num - 1));
634 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, 1);
635 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, -1);
636 Map = Map.add_constraint(C);
638 C = isl::constraint::alloc_inequality(LS);
639 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, -1);
640 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, 1);
641 C = C.set_constant_val(isl::val(Ctx, 0));
642 Map = Map.add_constraint(C);
643 AccessRelation = AccessRelation.apply_range(Map);
647 const std::string
648 MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
649 switch (RT) {
650 case MemoryAccess::RT_NONE:
651 llvm_unreachable("Requested a reduction operator string for a memory "
652 "access which isn't a reduction");
653 case MemoryAccess::RT_ADD:
654 return "+";
655 case MemoryAccess::RT_MUL:
656 return "*";
657 case MemoryAccess::RT_BOR:
658 return "|";
659 case MemoryAccess::RT_BXOR:
660 return "^";
661 case MemoryAccess::RT_BAND:
662 return "&";
664 llvm_unreachable("Unknown reduction type");
667 const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
668 isl::id ArrayId = getArrayId();
669 void *User = ArrayId.get_user();
670 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
671 return SAI;
674 const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
675 isl::id ArrayId = getLatestArrayId();
676 void *User = ArrayId.get_user();
677 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
678 return SAI;
681 isl::id MemoryAccess::getOriginalArrayId() const {
682 return AccessRelation.get_tuple_id(isl::dim::out);
685 isl::id MemoryAccess::getLatestArrayId() const {
686 if (!hasNewAccessRelation())
687 return getOriginalArrayId();
688 return NewAccessRelation.get_tuple_id(isl::dim::out);
691 isl::map MemoryAccess::getAddressFunction() const {
692 return getAccessRelation().lexmin();
695 isl::pw_multi_aff
696 MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
697 isl::map Schedule, ScheduledAccRel;
698 isl::union_set UDomain;
700 UDomain = getStatement()->getDomain();
701 USchedule = USchedule.intersect_domain(UDomain);
702 Schedule = isl::map::from_union_map(USchedule);
703 ScheduledAccRel = getAddressFunction().apply_domain(Schedule);
704 return isl::pw_multi_aff::from_map(ScheduledAccRel);
707 isl::map MemoryAccess::getOriginalAccessRelation() const {
708 return AccessRelation;
711 std::string MemoryAccess::getOriginalAccessRelationStr() const {
712 return stringFromIslObj(AccessRelation.get());
715 isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
716 return AccessRelation.get_space();
719 isl::map MemoryAccess::getNewAccessRelation() const {
720 return NewAccessRelation;
723 std::string MemoryAccess::getNewAccessRelationStr() const {
724 return stringFromIslObj(NewAccessRelation.get());
727 std::string MemoryAccess::getAccessRelationStr() const {
728 return getAccessRelation().to_str();
731 isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
732 isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
733 Space = Space.align_params(Statement->getDomainSpace());
735 return isl::basic_map::from_domain_and_range(
736 isl::basic_set::universe(Statement->getDomainSpace()),
737 isl::basic_set::universe(Space));
740 // Formalize no out-of-bound access assumption
742 // When delinearizing array accesses we optimistically assume that the
743 // delinearized accesses do not access out of bound locations (the subscript
744 // expression of each array evaluates for each statement instance that is
745 // executed to a value that is larger than zero and strictly smaller than the
746 // size of the corresponding dimension). The only exception is the outermost
747 // dimension for which we do not need to assume any upper bound. At this point
748 // we formalize this assumption to ensure that at code generation time the
749 // relevant run-time checks can be generated.
751 // To find the set of constraints necessary to avoid out of bound accesses, we
752 // first build the set of data locations that are not within array bounds. We
753 // then apply the reverse access relation to obtain the set of iterations that
754 // may contain invalid accesses and reduce this set of iterations to the ones
755 // that are actually executed by intersecting them with the domain of the
756 // statement. If we now project out all loop dimensions, we obtain a set of
757 // parameters that may cause statement instances to be executed that may
758 // possibly yield out of bound memory accesses. The complement of these
759 // constraints is the set of constraints that needs to be assumed to ensure such
760 // statement instances are never executed.
761 void MemoryAccess::assumeNoOutOfBound() {
762 if (PollyIgnoreInbounds)
763 return;
764 auto *SAI = getScopArrayInfo();
765 isl::space Space = getOriginalAccessRelationSpace().range();
766 isl::set Outside = isl::set::empty(Space);
767 for (int i = 1, Size = Space.dim(isl::dim::set); i < Size; ++i) {
768 isl::local_space LS(Space);
769 isl::pw_aff Var = isl::pw_aff::var_on_domain(LS, isl::dim::set, i);
770 isl::pw_aff Zero = isl::pw_aff(LS);
772 isl::set DimOutside = Var.lt_set(Zero);
773 isl::pw_aff SizeE = SAI->getDimensionSizePw(i);
774 SizeE = SizeE.add_dims(isl::dim::in, Space.dim(isl::dim::set));
775 SizeE = SizeE.set_tuple_id(isl::dim::in, Space.get_tuple_id(isl::dim::set));
776 DimOutside = DimOutside.unite(SizeE.le_set(Var));
778 Outside = Outside.unite(DimOutside);
781 Outside = Outside.apply(getAccessRelation().reverse());
782 Outside = Outside.intersect(Statement->getDomain());
783 Outside = Outside.params();
785 // Remove divs to avoid the construction of overly complicated assumptions.
786 // Doing so increases the set of parameter combinations that are assumed to
787 // not appear. This is always save, but may make the resulting run-time check
788 // bail out more often than strictly necessary.
789 Outside = Outside.remove_divs();
790 Outside = Outside.complement();
791 const auto &Loc = getAccessInstruction()
792 ? getAccessInstruction()->getDebugLoc()
793 : DebugLoc();
794 if (!PollyPreciseInbounds)
795 Outside = Outside.gist_params(Statement->getDomain().params());
796 Statement->getParent()->recordAssumption(INBOUNDS, Outside.release(), Loc,
797 AS_ASSUMPTION);
800 void MemoryAccess::buildMemIntrinsicAccessRelation() {
801 assert(isMemoryIntrinsic());
802 assert(Subscripts.size() == 2 && Sizes.size() == 1);
804 isl::pw_aff SubscriptPWA = getPwAff(Subscripts[0]);
805 isl::map SubscriptMap = isl::map::from_pw_aff(SubscriptPWA);
807 isl::map LengthMap;
808 if (Subscripts[1] == nullptr) {
809 LengthMap = isl::map::universe(SubscriptMap.get_space());
810 } else {
811 isl::pw_aff LengthPWA = getPwAff(Subscripts[1]);
812 LengthMap = isl::map::from_pw_aff(LengthPWA);
813 isl::space RangeSpace = LengthMap.get_space().range();
814 LengthMap = LengthMap.apply_range(isl::map::lex_gt(RangeSpace));
816 LengthMap = LengthMap.lower_bound_si(isl::dim::out, 0, 0);
817 LengthMap = LengthMap.align_params(SubscriptMap.get_space());
818 SubscriptMap = SubscriptMap.align_params(LengthMap.get_space());
819 LengthMap = LengthMap.sum(SubscriptMap);
820 AccessRelation =
821 LengthMap.set_tuple_id(isl::dim::in, getStatement()->getDomainId());
824 void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
825 ScalarEvolution *SE = Statement->getParent()->getSE();
827 auto MAI = MemAccInst(getAccessInstruction());
828 if (isa<MemIntrinsic>(MAI))
829 return;
831 Value *Ptr = MAI.getPointerOperand();
832 if (!Ptr || !SE->isSCEVable(Ptr->getType()))
833 return;
835 auto *PtrSCEV = SE->getSCEV(Ptr);
836 if (isa<SCEVCouldNotCompute>(PtrSCEV))
837 return;
839 auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
840 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
841 PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
843 const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
844 if (Range.isFullSet())
845 return;
847 if (Range.isWrappedSet() || Range.isSignWrappedSet())
848 return;
850 bool isWrapping = Range.isSignWrappedSet();
852 unsigned BW = Range.getBitWidth();
853 const auto One = APInt(BW, 1);
854 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
855 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
857 auto Min = LB.sdiv(APInt(BW, ElementSize));
858 auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
860 assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
862 isl::map Relation = AccessRelation;
863 isl::set AccessRange = Relation.range();
864 AccessRange = addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0,
865 isl::dim::set);
866 AccessRelation = Relation.intersect_range(AccessRange);
869 void MemoryAccess::foldAccessRelation() {
870 if (Sizes.size() < 2 || isa<SCEVConstant>(Sizes[1]))
871 return;
873 int Size = Subscripts.size();
875 isl::map NewAccessRelation = AccessRelation;
877 for (int i = Size - 2; i >= 0; --i) {
878 isl::space Space;
879 isl::map MapOne, MapTwo;
880 isl::pw_aff DimSize = getPwAff(Sizes[i + 1]);
882 isl::space SpaceSize = DimSize.get_space();
883 isl::id ParamId =
884 give(isl_space_get_dim_id(SpaceSize.get(), isl_dim_param, 0));
886 Space = AccessRelation.get_space();
887 Space = Space.range().map_from_set();
888 Space = Space.align_params(SpaceSize);
890 int ParamLocation = Space.find_dim_by_id(isl::dim::param, ParamId);
892 MapOne = isl::map::universe(Space);
893 for (int j = 0; j < Size; ++j)
894 MapOne = MapOne.equate(isl::dim::in, j, isl::dim::out, j);
895 MapOne = MapOne.lower_bound_si(isl::dim::in, i + 1, 0);
897 MapTwo = isl::map::universe(Space);
898 for (int j = 0; j < Size; ++j)
899 if (j < i || j > i + 1)
900 MapTwo = MapTwo.equate(isl::dim::in, j, isl::dim::out, j);
902 isl::local_space LS(Space);
903 isl::constraint C;
904 C = isl::constraint::alloc_equality(LS);
905 C = C.set_constant_si(-1);
906 C = C.set_coefficient_si(isl::dim::in, i, 1);
907 C = C.set_coefficient_si(isl::dim::out, i, -1);
908 MapTwo = MapTwo.add_constraint(C);
909 C = isl::constraint::alloc_equality(LS);
910 C = C.set_coefficient_si(isl::dim::in, i + 1, 1);
911 C = C.set_coefficient_si(isl::dim::out, i + 1, -1);
912 C = C.set_coefficient_si(isl::dim::param, ParamLocation, 1);
913 MapTwo = MapTwo.add_constraint(C);
914 MapTwo = MapTwo.upper_bound_si(isl::dim::in, i + 1, -1);
916 MapOne = MapOne.unite(MapTwo);
917 NewAccessRelation = NewAccessRelation.apply_range(MapOne);
920 isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
921 isl::space Space = Statement->getDomainSpace();
922 NewAccessRelation = NewAccessRelation.set_tuple_id(
923 isl::dim::in, Space.get_tuple_id(isl::dim::set));
924 NewAccessRelation = NewAccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
925 NewAccessRelation = NewAccessRelation.gist_domain(Statement->getDomain());
927 // Access dimension folding might in certain cases increase the number of
928 // disjuncts in the memory access, which can possibly complicate the generated
929 // run-time checks and can lead to costly compilation.
930 if (!PollyPreciseFoldAccesses &&
931 isl_map_n_basic_map(NewAccessRelation.get()) >
932 isl_map_n_basic_map(AccessRelation.get())) {
933 } else {
934 AccessRelation = NewAccessRelation;
938 /// Check if @p Expr is divisible by @p Size.
939 static bool isDivisible(const SCEV *Expr, unsigned Size, ScalarEvolution &SE) {
940 assert(Size != 0);
941 if (Size == 1)
942 return true;
944 // Only one factor needs to be divisible.
945 if (auto *MulExpr = dyn_cast<SCEVMulExpr>(Expr)) {
946 for (auto *FactorExpr : MulExpr->operands())
947 if (isDivisible(FactorExpr, Size, SE))
948 return true;
949 return false;
952 // For other n-ary expressions (Add, AddRec, Max,...) all operands need
953 // to be divisible.
954 if (auto *NAryExpr = dyn_cast<SCEVNAryExpr>(Expr)) {
955 for (auto *OpExpr : NAryExpr->operands())
956 if (!isDivisible(OpExpr, Size, SE))
957 return false;
958 return true;
961 auto *SizeSCEV = SE.getConstant(Expr->getType(), Size);
962 auto *UDivSCEV = SE.getUDivExpr(Expr, SizeSCEV);
963 auto *MulSCEV = SE.getMulExpr(UDivSCEV, SizeSCEV);
964 return MulSCEV == Expr;
967 void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
968 assert(AccessRelation.is_null() && "AccessRelation already built");
970 // Initialize the invalid domain which describes all iterations for which the
971 // access relation is not modeled correctly.
972 isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
973 InvalidDomain = isl::set::empty(StmtInvalidDomain.get_space());
975 isl::ctx Ctx = Id.get_ctx();
976 isl::id BaseAddrId = SAI->getBasePtrId();
978 if (getAccessInstruction() && isa<MemIntrinsic>(getAccessInstruction())) {
979 buildMemIntrinsicAccessRelation();
980 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
981 return;
984 if (!isAffine()) {
985 // We overapproximate non-affine accesses with a possible access to the
986 // whole array. For read accesses it does not make a difference, if an
987 // access must or may happen. However, for write accesses it is important to
988 // differentiate between writes that must happen and writes that may happen.
989 if (AccessRelation.is_null())
990 AccessRelation = createBasicAccessMap(Statement);
992 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
993 return;
996 isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
997 AccessRelation = isl::map::universe(Space);
999 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
1000 isl::pw_aff Affine = getPwAff(Subscripts[i]);
1001 isl::map SubscriptMap = isl::map::from_pw_aff(Affine);
1002 AccessRelation = AccessRelation.flat_range_product(SubscriptMap);
1005 Space = Statement->getDomainSpace();
1006 AccessRelation = AccessRelation.set_tuple_id(
1007 isl::dim::in, Space.get_tuple_id(isl::dim::set));
1008 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
1010 AccessRelation = AccessRelation.gist_domain(Statement->getDomain());
1013 MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
1014 AccessType AccType, Value *BaseAddress,
1015 Type *ElementType, bool Affine,
1016 ArrayRef<const SCEV *> Subscripts,
1017 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
1018 MemoryKind Kind)
1019 : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(nullptr),
1020 BaseAddr(BaseAddress), ElementType(ElementType),
1021 Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
1022 AccessValue(AccessValue), IsAffine(Affine),
1023 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(nullptr),
1024 NewAccessRelation(nullptr), FAD(nullptr) {
1025 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
1026 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
1028 std::string IdName = Stmt->getBaseName() + Access;
1029 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
1032 MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
1033 : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
1034 InvalidDomain(nullptr), AccessRelation(nullptr),
1035 NewAccessRelation(AccRel), FAD(nullptr) {
1036 isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(isl::dim::out);
1037 auto *SAI = ScopArrayInfo::getFromId(ArrayInfoId);
1038 Sizes.push_back(nullptr);
1039 for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
1040 Sizes.push_back(SAI->getDimensionSize(i));
1041 ElementType = SAI->getElementType();
1042 BaseAddr = SAI->getBasePtr();
1043 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
1044 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
1046 std::string IdName = Stmt->getBaseName() + Access;
1047 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
1050 MemoryAccess::~MemoryAccess() = default;
1052 void MemoryAccess::realignParams() {
1053 isl::set Ctx = Statement->getParent()->getContext();
1054 InvalidDomain = InvalidDomain.gist_params(Ctx);
1055 AccessRelation = AccessRelation.gist_params(Ctx);
1058 const std::string MemoryAccess::getReductionOperatorStr() const {
1059 return MemoryAccess::getReductionOperatorStr(getReductionType());
1062 isl::id MemoryAccess::getId() const { return Id; }
1064 raw_ostream &polly::operator<<(raw_ostream &OS,
1065 MemoryAccess::ReductionType RT) {
1066 if (RT == MemoryAccess::RT_NONE)
1067 OS << "NONE";
1068 else
1069 OS << MemoryAccess::getReductionOperatorStr(RT);
1070 return OS;
1073 void MemoryAccess::setFortranArrayDescriptor(Value *FAD) { this->FAD = FAD; }
1075 void MemoryAccess::print(raw_ostream &OS) const {
1076 switch (AccType) {
1077 case READ:
1078 OS.indent(12) << "ReadAccess :=\t";
1079 break;
1080 case MUST_WRITE:
1081 OS.indent(12) << "MustWriteAccess :=\t";
1082 break;
1083 case MAY_WRITE:
1084 OS.indent(12) << "MayWriteAccess :=\t";
1085 break;
1088 OS << "[Reduction Type: " << getReductionType() << "] ";
1090 if (FAD) {
1091 OS << "[Fortran array descriptor: " << FAD->getName();
1092 OS << "] ";
1095 OS << "[Scalar: " << isScalarKind() << "]\n";
1096 OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
1097 if (hasNewAccessRelation())
1098 OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
1101 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1102 LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(errs()); }
1103 #endif
1105 isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
1106 auto *Stmt = getStatement();
1107 PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
1108 isl::set StmtDom = getStatement()->getDomain();
1109 StmtDom = StmtDom.reset_tuple_id();
1110 isl::set NewInvalidDom = StmtDom.intersect(isl::manage(PWAC.second));
1111 InvalidDomain = InvalidDomain.unite(NewInvalidDom);
1112 return isl::manage(PWAC.first);
1115 // Create a map in the size of the provided set domain, that maps from the
1116 // one element of the provided set domain to another element of the provided
1117 // set domain.
1118 // The mapping is limited to all points that are equal in all but the last
1119 // dimension and for which the last dimension of the input is strict smaller
1120 // than the last dimension of the output.
1122 // getEqualAndLarger(set[i0, i1, ..., iX]):
1124 // set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
1125 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
1127 static isl::map getEqualAndLarger(isl::space SetDomain) {
1128 isl::space Space = SetDomain.map_from_set();
1129 isl::map Map = isl::map::universe(Space);
1130 unsigned lastDimension = Map.dim(isl::dim::in) - 1;
1132 // Set all but the last dimension to be equal for the input and output
1134 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
1135 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
1136 for (unsigned i = 0; i < lastDimension; ++i)
1137 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
1139 // Set the last dimension of the input to be strict smaller than the
1140 // last dimension of the output.
1142 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
1143 Map = Map.order_lt(isl::dim::in, lastDimension, isl::dim::out, lastDimension);
1144 return Map;
1147 isl::set MemoryAccess::getStride(isl::map Schedule) const {
1148 isl::map AccessRelation = getAccessRelation();
1149 isl::space Space = Schedule.get_space().range();
1150 isl::map NextScatt = getEqualAndLarger(Space);
1152 Schedule = Schedule.reverse();
1153 NextScatt = NextScatt.lexmin();
1155 NextScatt = NextScatt.apply_range(Schedule);
1156 NextScatt = NextScatt.apply_range(AccessRelation);
1157 NextScatt = NextScatt.apply_domain(Schedule);
1158 NextScatt = NextScatt.apply_domain(AccessRelation);
1160 isl::set Deltas = NextScatt.deltas();
1161 return Deltas;
1164 bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1165 isl::set Stride, StrideX;
1166 bool IsStrideX;
1168 Stride = getStride(Schedule);
1169 StrideX = isl::set::universe(Stride.get_space());
1170 for (unsigned i = 0; i < StrideX.dim(isl::dim::set) - 1; i++)
1171 StrideX = StrideX.fix_si(isl::dim::set, i, 0);
1172 StrideX = StrideX.fix_si(isl::dim::set, StrideX.dim(isl::dim::set) - 1,
1173 StrideWidth);
1174 IsStrideX = Stride.is_subset(StrideX);
1176 return IsStrideX;
1179 bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1180 return isStrideX(Schedule, 0);
1183 bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1184 return isStrideX(Schedule, 1);
1187 void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1188 AccessRelation = NewAccess;
1191 void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1192 assert(NewAccess);
1194 #ifndef NDEBUG
1195 // Check domain space compatibility.
1196 isl::space NewSpace = NewAccess.get_space();
1197 isl::space NewDomainSpace = NewSpace.domain();
1198 isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1199 assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1201 // Reads must be executed unconditionally. Writes might be executed in a
1202 // subdomain only.
1203 if (isRead()) {
1204 // Check whether there is an access for every statement instance.
1205 isl::set StmtDomain = getStatement()->getDomain();
1206 StmtDomain =
1207 StmtDomain.intersect_params(getStatement()->getParent()->getContext());
1208 isl::set NewDomain = NewAccess.domain();
1209 assert(StmtDomain.is_subset(NewDomain) &&
1210 "Partial READ accesses not supported");
1213 isl::space NewAccessSpace = NewAccess.get_space();
1214 assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1215 "Must specify the array that is accessed");
1216 isl::id NewArrayId = NewAccessSpace.get_tuple_id(isl::dim::set);
1217 auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1218 assert(SAI && "Must set a ScopArrayInfo");
1220 if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1221 InvariantEquivClassTy *EqClass =
1222 getStatement()->getParent()->lookupInvariantEquivClass(
1223 SAI->getBasePtr());
1224 assert(EqClass &&
1225 "Access functions to indirect arrays must have an invariant and "
1226 "hoisted base pointer");
1229 // Check whether access dimensions correspond to number of dimensions of the
1230 // accesses array.
1231 auto Dims = SAI->getNumberOfDimensions();
1232 assert(NewAccessSpace.dim(isl::dim::set) == Dims &&
1233 "Access dims must match array dims");
1234 #endif
1236 NewAccess = NewAccess.gist_domain(getStatement()->getDomain());
1237 NewAccessRelation = NewAccess;
1240 bool MemoryAccess::isLatestPartialAccess() const {
1241 isl::set StmtDom = getStatement()->getDomain();
1242 isl::set AccDom = getLatestAccessRelation().domain();
1244 return isl_set_is_subset(StmtDom.keep(), AccDom.keep()) == isl_bool_false;
1247 //===----------------------------------------------------------------------===//
1249 isl::map ScopStmt::getSchedule() const {
1250 isl::set Domain = getDomain();
1251 if (Domain.is_empty())
1252 return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1253 auto Schedule = getParent()->getSchedule();
1254 if (!Schedule)
1255 return nullptr;
1256 Schedule = Schedule.intersect_domain(isl::union_set(Domain));
1257 if (Schedule.is_empty())
1258 return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1259 isl::map M = M.from_union_map(Schedule);
1260 M = M.coalesce();
1261 M = M.gist_domain(Domain);
1262 M = M.coalesce();
1263 return M;
1266 void ScopStmt::restrictDomain(isl::set NewDomain) {
1267 assert(NewDomain.is_subset(Domain) &&
1268 "New domain is not a subset of old domain!");
1269 Domain = NewDomain;
1272 void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1273 Instruction *AccessInst = Access->getAccessInstruction();
1275 if (Access->isArrayKind()) {
1276 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1277 MAL.emplace_front(Access);
1278 } else if (Access->isValueKind() && Access->isWrite()) {
1279 Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1280 assert(!ValueWrites.lookup(AccessVal));
1282 ValueWrites[AccessVal] = Access;
1283 } else if (Access->isValueKind() && Access->isRead()) {
1284 Value *AccessVal = Access->getAccessValue();
1285 assert(!ValueReads.lookup(AccessVal));
1287 ValueReads[AccessVal] = Access;
1288 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1289 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1290 assert(!PHIWrites.lookup(PHI));
1292 PHIWrites[PHI] = Access;
1293 } else if (Access->isAnyPHIKind() && Access->isRead()) {
1294 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1295 assert(!PHIReads.lookup(PHI));
1297 PHIReads[PHI] = Access;
1300 if (Prepend) {
1301 MemAccs.insert(MemAccs.begin(), Access);
1302 return;
1304 MemAccs.push_back(Access);
1307 void ScopStmt::realignParams() {
1308 for (MemoryAccess *MA : *this)
1309 MA->realignParams();
1311 isl::set Ctx = Parent.getContext();
1312 InvalidDomain = InvalidDomain.gist_params(Ctx);
1313 Domain = Domain.gist_params(Ctx);
1316 /// Add @p BSet to the set @p User if @p BSet is bounded.
1317 static isl_stat collectBoundedParts(__isl_take isl_basic_set *BSet,
1318 void *User) {
1319 isl_set **BoundedParts = static_cast<isl_set **>(User);
1320 if (isl_basic_set_is_bounded(BSet))
1321 *BoundedParts = isl_set_union(*BoundedParts, isl_set_from_basic_set(BSet));
1322 else
1323 isl_basic_set_free(BSet);
1324 return isl_stat_ok;
1327 /// Return the bounded parts of @p S.
1328 static __isl_give isl_set *collectBoundedParts(__isl_take isl_set *S) {
1329 isl_set *BoundedParts = isl_set_empty(isl_set_get_space(S));
1330 isl_set_foreach_basic_set(S, collectBoundedParts, &BoundedParts);
1331 isl_set_free(S);
1332 return BoundedParts;
1335 /// Compute the (un)bounded parts of @p S wrt. to dimension @p Dim.
1337 /// @returns A separation of @p S into first an unbounded then a bounded subset,
1338 /// both with regards to the dimension @p Dim.
1339 static std::pair<__isl_give isl_set *, __isl_give isl_set *>
1340 partitionSetParts(__isl_take isl_set *S, unsigned Dim) {
1341 for (unsigned u = 0, e = isl_set_n_dim(S); u < e; u++)
1342 S = isl_set_lower_bound_si(S, isl_dim_set, u, 0);
1344 unsigned NumDimsS = isl_set_n_dim(S);
1345 isl_set *OnlyDimS = isl_set_copy(S);
1347 // Remove dimensions that are greater than Dim as they are not interesting.
1348 assert(NumDimsS >= Dim + 1);
1349 OnlyDimS =
1350 isl_set_project_out(OnlyDimS, isl_dim_set, Dim + 1, NumDimsS - Dim - 1);
1352 // Create artificial parametric upper bounds for dimensions smaller than Dim
1353 // as we are not interested in them.
1354 OnlyDimS = isl_set_insert_dims(OnlyDimS, isl_dim_param, 0, Dim);
1355 for (unsigned u = 0; u < Dim; u++) {
1356 isl_constraint *C = isl_inequality_alloc(
1357 isl_local_space_from_space(isl_set_get_space(OnlyDimS)));
1358 C = isl_constraint_set_coefficient_si(C, isl_dim_param, u, 1);
1359 C = isl_constraint_set_coefficient_si(C, isl_dim_set, u, -1);
1360 OnlyDimS = isl_set_add_constraint(OnlyDimS, C);
1363 // Collect all bounded parts of OnlyDimS.
1364 isl_set *BoundedParts = collectBoundedParts(OnlyDimS);
1366 // Create the dimensions greater than Dim again.
1367 BoundedParts = isl_set_insert_dims(BoundedParts, isl_dim_set, Dim + 1,
1368 NumDimsS - Dim - 1);
1370 // Remove the artificial upper bound parameters again.
1371 BoundedParts = isl_set_remove_dims(BoundedParts, isl_dim_param, 0, Dim);
1373 isl_set *UnboundedParts = isl_set_subtract(S, isl_set_copy(BoundedParts));
1374 return std::make_pair(UnboundedParts, BoundedParts);
1377 /// Set the dimension Ids from @p From in @p To.
1378 static __isl_give isl_set *setDimensionIds(__isl_keep isl_set *From,
1379 __isl_take isl_set *To) {
1380 for (unsigned u = 0, e = isl_set_n_dim(From); u < e; u++) {
1381 isl_id *DimId = isl_set_get_dim_id(From, isl_dim_set, u);
1382 To = isl_set_set_dim_id(To, isl_dim_set, u, DimId);
1384 return To;
1387 /// Create the conditions under which @p L @p Pred @p R is true.
1388 static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
1389 __isl_take isl_pw_aff *L,
1390 __isl_take isl_pw_aff *R) {
1391 switch (Pred) {
1392 case ICmpInst::ICMP_EQ:
1393 return isl_pw_aff_eq_set(L, R);
1394 case ICmpInst::ICMP_NE:
1395 return isl_pw_aff_ne_set(L, R);
1396 case ICmpInst::ICMP_SLT:
1397 return isl_pw_aff_lt_set(L, R);
1398 case ICmpInst::ICMP_SLE:
1399 return isl_pw_aff_le_set(L, R);
1400 case ICmpInst::ICMP_SGT:
1401 return isl_pw_aff_gt_set(L, R);
1402 case ICmpInst::ICMP_SGE:
1403 return isl_pw_aff_ge_set(L, R);
1404 case ICmpInst::ICMP_ULT:
1405 return isl_pw_aff_lt_set(L, R);
1406 case ICmpInst::ICMP_UGT:
1407 return isl_pw_aff_gt_set(L, R);
1408 case ICmpInst::ICMP_ULE:
1409 return isl_pw_aff_le_set(L, R);
1410 case ICmpInst::ICMP_UGE:
1411 return isl_pw_aff_ge_set(L, R);
1412 default:
1413 llvm_unreachable("Non integer predicate not supported");
1417 /// Create the conditions under which @p L @p Pred @p R is true.
1419 /// Helper function that will make sure the dimensions of the result have the
1420 /// same isl_id's as the @p Domain.
1421 static __isl_give isl_set *buildConditionSet(ICmpInst::Predicate Pred,
1422 __isl_take isl_pw_aff *L,
1423 __isl_take isl_pw_aff *R,
1424 __isl_keep isl_set *Domain) {
1425 isl_set *ConsequenceCondSet = buildConditionSet(Pred, L, R);
1426 return setDimensionIds(Domain, ConsequenceCondSet);
1429 /// Compute the isl representation for the SCEV @p E in this BB.
1431 /// @param S The Scop in which @p BB resides in.
1432 /// @param BB The BB for which isl representation is to be
1433 /// computed.
1434 /// @param InvalidDomainMap A map of BB to their invalid domains.
1435 /// @param E The SCEV that should be translated.
1436 /// @param NonNegative Flag to indicate the @p E has to be non-negative.
1438 /// Note that this function will also adjust the invalid context accordingly.
1440 __isl_give isl_pw_aff *
1441 getPwAff(Scop &S, BasicBlock *BB,
1442 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap, const SCEV *E,
1443 bool NonNegative = false) {
1444 PWACtx PWAC = S.getPwAff(E, BB, NonNegative);
1445 InvalidDomainMap[BB] = InvalidDomainMap[BB].unite(isl::manage(PWAC.second));
1446 return PWAC.first;
1449 /// Build the conditions sets for the switch @p SI in the @p Domain.
1451 /// This will fill @p ConditionSets with the conditions under which control
1452 /// will be moved from @p SI to its successors. Hence, @p ConditionSets will
1453 /// have as many elements as @p SI has successors.
1454 bool buildConditionSets(Scop &S, BasicBlock *BB, SwitchInst *SI, Loop *L,
1455 __isl_keep isl_set *Domain,
1456 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1457 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1458 Value *Condition = getConditionFromTerminator(SI);
1459 assert(Condition && "No condition for switch");
1461 ScalarEvolution &SE = *S.getSE();
1462 isl_pw_aff *LHS, *RHS;
1463 LHS = getPwAff(S, BB, InvalidDomainMap, SE.getSCEVAtScope(Condition, L));
1465 unsigned NumSuccessors = SI->getNumSuccessors();
1466 ConditionSets.resize(NumSuccessors);
1467 for (auto &Case : SI->cases()) {
1468 unsigned Idx = Case.getSuccessorIndex();
1469 ConstantInt *CaseValue = Case.getCaseValue();
1471 RHS = getPwAff(S, BB, InvalidDomainMap, SE.getSCEV(CaseValue));
1472 isl_set *CaseConditionSet =
1473 buildConditionSet(ICmpInst::ICMP_EQ, isl_pw_aff_copy(LHS), RHS, Domain);
1474 ConditionSets[Idx] = isl_set_coalesce(
1475 isl_set_intersect(CaseConditionSet, isl_set_copy(Domain)));
1478 assert(ConditionSets[0] == nullptr && "Default condition set was set");
1479 isl_set *ConditionSetUnion = isl_set_copy(ConditionSets[1]);
1480 for (unsigned u = 2; u < NumSuccessors; u++)
1481 ConditionSetUnion =
1482 isl_set_union(ConditionSetUnion, isl_set_copy(ConditionSets[u]));
1483 ConditionSets[0] = setDimensionIds(
1484 Domain, isl_set_subtract(isl_set_copy(Domain), ConditionSetUnion));
1486 isl_pw_aff_free(LHS);
1488 return true;
1491 /// Build condition sets for unsigned ICmpInst(s).
1492 /// Special handling is required for unsigned operands to ensure that if
1493 /// MSB (aka the Sign bit) is set for an operands in an unsigned ICmpInst
1494 /// it should wrap around.
1496 /// @param IsStrictUpperBound holds information on the predicate relation
1497 /// between TestVal and UpperBound, i.e,
1498 /// TestVal < UpperBound OR TestVal <= UpperBound
1499 __isl_give isl_set *
1500 buildUnsignedConditionSets(Scop &S, BasicBlock *BB, Value *Condition,
1501 __isl_keep isl_set *Domain, const SCEV *SCEV_TestVal,
1502 const SCEV *SCEV_UpperBound,
1503 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1504 bool IsStrictUpperBound) {
1505 // Do not take NonNeg assumption on TestVal
1506 // as it might have MSB (Sign bit) set.
1507 isl_pw_aff *TestVal = getPwAff(S, BB, InvalidDomainMap, SCEV_TestVal, false);
1508 // Take NonNeg assumption on UpperBound.
1509 isl_pw_aff *UpperBound =
1510 getPwAff(S, BB, InvalidDomainMap, SCEV_UpperBound, true);
1512 // 0 <= TestVal
1513 isl_set *First =
1514 isl_pw_aff_le_set(isl_pw_aff_zero_on_domain(isl_local_space_from_space(
1515 isl_pw_aff_get_domain_space(TestVal))),
1516 isl_pw_aff_copy(TestVal));
1518 isl_set *Second;
1519 if (IsStrictUpperBound)
1520 // TestVal < UpperBound
1521 Second = isl_pw_aff_lt_set(TestVal, UpperBound);
1522 else
1523 // TestVal <= UpperBound
1524 Second = isl_pw_aff_le_set(TestVal, UpperBound);
1526 isl_set *ConsequenceCondSet = isl_set_intersect(First, Second);
1527 ConsequenceCondSet = setDimensionIds(Domain, ConsequenceCondSet);
1528 return ConsequenceCondSet;
1531 /// Build the conditions sets for the branch condition @p Condition in
1532 /// the @p Domain.
1534 /// This will fill @p ConditionSets with the conditions under which control
1535 /// will be moved from @p TI to its successors. Hence, @p ConditionSets will
1536 /// have as many elements as @p TI has successors. If @p TI is nullptr the
1537 /// context under which @p Condition is true/false will be returned as the
1538 /// new elements of @p ConditionSets.
1539 bool buildConditionSets(Scop &S, BasicBlock *BB, Value *Condition,
1540 TerminatorInst *TI, Loop *L, __isl_keep isl_set *Domain,
1541 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1542 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1543 ScalarEvolution &SE = *S.getSE();
1544 isl_set *ConsequenceCondSet = nullptr;
1546 if (auto Load = dyn_cast<LoadInst>(Condition)) {
1547 const SCEV *LHSSCEV = SE.getSCEVAtScope(Load, L);
1548 const SCEV *RHSSCEV = SE.getZero(LHSSCEV->getType());
1549 bool NonNeg = false;
1550 isl_pw_aff *LHS = getPwAff(S, BB, InvalidDomainMap, LHSSCEV, NonNeg);
1551 isl_pw_aff *RHS = getPwAff(S, BB, InvalidDomainMap, RHSSCEV, NonNeg);
1552 ConsequenceCondSet =
1553 buildConditionSet(ICmpInst::ICMP_SLE, LHS, RHS, Domain);
1554 } else if (auto *PHI = dyn_cast<PHINode>(Condition)) {
1555 auto *Unique = dyn_cast<ConstantInt>(
1556 getUniqueNonErrorValue(PHI, &S.getRegion(), *S.getLI(), *S.getDT()));
1558 if (Unique->isZero())
1559 ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
1560 else
1561 ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
1562 } else if (auto *CCond = dyn_cast<ConstantInt>(Condition)) {
1563 if (CCond->isZero())
1564 ConsequenceCondSet = isl_set_empty(isl_set_get_space(Domain));
1565 else
1566 ConsequenceCondSet = isl_set_universe(isl_set_get_space(Domain));
1567 } else if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Condition)) {
1568 auto Opcode = BinOp->getOpcode();
1569 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
1571 bool Valid = buildConditionSets(S, BB, BinOp->getOperand(0), TI, L, Domain,
1572 InvalidDomainMap, ConditionSets) &&
1573 buildConditionSets(S, BB, BinOp->getOperand(1), TI, L, Domain,
1574 InvalidDomainMap, ConditionSets);
1575 if (!Valid) {
1576 while (!ConditionSets.empty())
1577 isl_set_free(ConditionSets.pop_back_val());
1578 return false;
1581 isl_set_free(ConditionSets.pop_back_val());
1582 isl_set *ConsCondPart0 = ConditionSets.pop_back_val();
1583 isl_set_free(ConditionSets.pop_back_val());
1584 isl_set *ConsCondPart1 = ConditionSets.pop_back_val();
1586 if (Opcode == Instruction::And)
1587 ConsequenceCondSet = isl_set_intersect(ConsCondPart0, ConsCondPart1);
1588 else
1589 ConsequenceCondSet = isl_set_union(ConsCondPart0, ConsCondPart1);
1590 } else {
1591 auto *ICond = dyn_cast<ICmpInst>(Condition);
1592 assert(ICond &&
1593 "Condition of exiting branch was neither constant nor ICmp!");
1595 LoopInfo &LI = *S.getLI();
1596 DominatorTree &DT = *S.getDT();
1597 Region &R = S.getRegion();
1599 isl_pw_aff *LHS, *RHS;
1600 // For unsigned comparisons we assumed the signed bit of neither operand
1601 // to be set. The comparison is equal to a signed comparison under this
1602 // assumption.
1603 bool NonNeg = ICond->isUnsigned();
1604 const SCEV *LeftOperand = SE.getSCEVAtScope(ICond->getOperand(0), L),
1605 *RightOperand = SE.getSCEVAtScope(ICond->getOperand(1), L);
1607 LeftOperand = tryForwardThroughPHI(LeftOperand, R, SE, LI, DT);
1608 RightOperand = tryForwardThroughPHI(RightOperand, R, SE, LI, DT);
1610 switch (ICond->getPredicate()) {
1611 case ICmpInst::ICMP_ULT:
1612 ConsequenceCondSet =
1613 buildUnsignedConditionSets(S, BB, Condition, Domain, LeftOperand,
1614 RightOperand, InvalidDomainMap, true);
1615 break;
1616 case ICmpInst::ICMP_ULE:
1617 ConsequenceCondSet =
1618 buildUnsignedConditionSets(S, BB, Condition, Domain, LeftOperand,
1619 RightOperand, InvalidDomainMap, false);
1620 break;
1621 case ICmpInst::ICMP_UGT:
1622 ConsequenceCondSet =
1623 buildUnsignedConditionSets(S, BB, Condition, Domain, RightOperand,
1624 LeftOperand, InvalidDomainMap, true);
1625 break;
1626 case ICmpInst::ICMP_UGE:
1627 ConsequenceCondSet =
1628 buildUnsignedConditionSets(S, BB, Condition, Domain, RightOperand,
1629 LeftOperand, InvalidDomainMap, false);
1630 break;
1631 default:
1632 LHS = getPwAff(S, BB, InvalidDomainMap, LeftOperand, NonNeg);
1633 RHS = getPwAff(S, BB, InvalidDomainMap, RightOperand, NonNeg);
1634 ConsequenceCondSet =
1635 buildConditionSet(ICond->getPredicate(), LHS, RHS, Domain);
1636 break;
1640 // If no terminator was given we are only looking for parameter constraints
1641 // under which @p Condition is true/false.
1642 if (!TI)
1643 ConsequenceCondSet = isl_set_params(ConsequenceCondSet);
1644 assert(ConsequenceCondSet);
1645 ConsequenceCondSet = isl_set_coalesce(
1646 isl_set_intersect(ConsequenceCondSet, isl_set_copy(Domain)));
1648 isl_set *AlternativeCondSet = nullptr;
1649 bool TooComplex =
1650 isl_set_n_basic_set(ConsequenceCondSet) >= MaxDisjunctsInDomain;
1652 if (!TooComplex) {
1653 AlternativeCondSet = isl_set_subtract(isl_set_copy(Domain),
1654 isl_set_copy(ConsequenceCondSet));
1655 TooComplex =
1656 isl_set_n_basic_set(AlternativeCondSet) >= MaxDisjunctsInDomain;
1659 if (TooComplex) {
1660 S.invalidate(COMPLEXITY, TI ? TI->getDebugLoc() : DebugLoc(),
1661 TI ? TI->getParent() : nullptr /* BasicBlock */);
1662 isl_set_free(AlternativeCondSet);
1663 isl_set_free(ConsequenceCondSet);
1664 return false;
1667 ConditionSets.push_back(ConsequenceCondSet);
1668 ConditionSets.push_back(isl_set_coalesce(AlternativeCondSet));
1670 return true;
1673 /// Build the conditions sets for the terminator @p TI in the @p Domain.
1675 /// This will fill @p ConditionSets with the conditions under which control
1676 /// will be moved from @p TI to its successors. Hence, @p ConditionSets will
1677 /// have as many elements as @p TI has successors.
1678 bool buildConditionSets(Scop &S, BasicBlock *BB, TerminatorInst *TI, Loop *L,
1679 __isl_keep isl_set *Domain,
1680 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap,
1681 SmallVectorImpl<__isl_give isl_set *> &ConditionSets) {
1682 if (SwitchInst *SI = dyn_cast<SwitchInst>(TI))
1683 return buildConditionSets(S, BB, SI, L, Domain, InvalidDomainMap,
1684 ConditionSets);
1686 assert(isa<BranchInst>(TI) && "Terminator was neither branch nor switch.");
1688 if (TI->getNumSuccessors() == 1) {
1689 ConditionSets.push_back(isl_set_copy(Domain));
1690 return true;
1693 Value *Condition = getConditionFromTerminator(TI);
1694 assert(Condition && "No condition for Terminator");
1696 return buildConditionSets(S, BB, Condition, TI, L, Domain, InvalidDomainMap,
1697 ConditionSets);
1700 ScopStmt::ScopStmt(Scop &parent, Region &R, Loop *SurroundingLoop,
1701 std::vector<Instruction *> EntryBlockInstructions)
1702 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), R(&R),
1703 Build(nullptr), SurroundingLoop(SurroundingLoop),
1704 Instructions(EntryBlockInstructions) {
1705 BaseName = getIslCompatibleName(
1706 "Stmt", R.getNameStr(), parent.getNextStmtIdx(), "", UseInstructionNames);
1709 ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, Loop *SurroundingLoop,
1710 std::vector<Instruction *> Instructions, int Count)
1711 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(&bb),
1712 Build(nullptr), SurroundingLoop(SurroundingLoop),
1713 Instructions(Instructions) {
1714 std::string S = "";
1715 if (Count != 0)
1716 S += std::to_string(Count);
1717 BaseName = getIslCompatibleName("Stmt", &bb, parent.getNextStmtIdx(), S,
1718 UseInstructionNames);
1721 ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1722 isl::set NewDomain)
1723 : Parent(parent), InvalidDomain(nullptr), Domain(NewDomain),
1724 Build(nullptr) {
1725 BaseName = getIslCompatibleName("CopyStmt_", "",
1726 std::to_string(parent.getCopyStmtsNum()));
1727 isl::id Id = isl::id::alloc(getIslCtx(), getBaseName(), this);
1728 Domain = Domain.set_tuple_id(Id);
1729 TargetRel = TargetRel.set_tuple_id(isl::dim::in, Id);
1730 auto *Access =
1731 new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1732 parent.addAccessFunction(Access);
1733 addAccess(Access);
1734 SourceRel = SourceRel.set_tuple_id(isl::dim::in, Id);
1735 Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1736 parent.addAccessFunction(Access);
1737 addAccess(Access);
1740 ScopStmt::~ScopStmt() = default;
1742 std::string ScopStmt::getDomainStr() const { return Domain.to_str(); }
1744 std::string ScopStmt::getScheduleStr() const {
1745 auto *S = getSchedule().release();
1746 if (!S)
1747 return {};
1748 auto Str = stringFromIslObj(S);
1749 isl_map_free(S);
1750 return Str;
1753 void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1755 BasicBlock *ScopStmt::getEntryBlock() const {
1756 if (isBlockStmt())
1757 return getBasicBlock();
1758 return getRegion()->getEntry();
1761 unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1763 const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1765 Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1766 return NestLoops[Dimension];
1769 isl_ctx *ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1771 isl::set ScopStmt::getDomain() const { return Domain; }
1773 isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1775 isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1777 void ScopStmt::printInstructions(raw_ostream &OS) const {
1778 OS << "Instructions {\n";
1780 for (Instruction *Inst : Instructions)
1781 OS.indent(16) << *Inst << "\n";
1783 OS.indent(12) << "}\n";
1786 void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1787 OS << "\t" << getBaseName() << "\n";
1788 OS.indent(12) << "Domain :=\n";
1790 if (Domain) {
1791 OS.indent(16) << getDomainStr() << ";\n";
1792 } else
1793 OS.indent(16) << "n/a\n";
1795 OS.indent(12) << "Schedule :=\n";
1797 if (Domain) {
1798 OS.indent(16) << getScheduleStr() << ";\n";
1799 } else
1800 OS.indent(16) << "n/a\n";
1802 for (MemoryAccess *Access : MemAccs)
1803 Access->print(OS);
1805 if (PrintInstructions)
1806 printInstructions(OS.indent(12));
1809 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1810 LLVM_DUMP_METHOD void ScopStmt::dump() const { print(dbgs(), true); }
1811 #endif
1813 void ScopStmt::removeAccessData(MemoryAccess *MA) {
1814 if (MA->isRead() && MA->isOriginalValueKind()) {
1815 bool Found = ValueReads.erase(MA->getAccessValue());
1816 (void)Found;
1817 assert(Found && "Expected access data not found");
1819 if (MA->isWrite() && MA->isOriginalValueKind()) {
1820 bool Found = ValueWrites.erase(cast<Instruction>(MA->getAccessValue()));
1821 (void)Found;
1822 assert(Found && "Expected access data not found");
1824 if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1825 bool Found = PHIWrites.erase(cast<PHINode>(MA->getAccessInstruction()));
1826 (void)Found;
1827 assert(Found && "Expected access data not found");
1829 if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1830 bool Found = PHIReads.erase(cast<PHINode>(MA->getAccessInstruction()));
1831 (void)Found;
1832 assert(Found && "Expected access data not found");
1836 void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1837 // Remove the memory accesses from this statement together with all scalar
1838 // accesses that were caused by it. MemoryKind::Value READs have no access
1839 // instruction, hence would not be removed by this function. However, it is
1840 // only used for invariant LoadInst accesses, its arguments are always affine,
1841 // hence synthesizable, and therefore there are no MemoryKind::Value READ
1842 // accesses to be removed.
1843 auto Predicate = [&](MemoryAccess *Acc) {
1844 return Acc->getAccessInstruction() == MA->getAccessInstruction();
1846 for (auto *MA : MemAccs) {
1847 if (Predicate(MA)) {
1848 removeAccessData(MA);
1849 Parent.removeAccessData(MA);
1852 MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
1853 MemAccs.end());
1854 InstructionToAccess.erase(MA->getAccessInstruction());
1857 void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA) {
1858 auto MAIt = std::find(MemAccs.begin(), MemAccs.end(), MA);
1859 assert(MAIt != MemAccs.end());
1860 MemAccs.erase(MAIt);
1862 removeAccessData(MA);
1863 Parent.removeAccessData(MA);
1865 auto It = InstructionToAccess.find(MA->getAccessInstruction());
1866 if (It != InstructionToAccess.end()) {
1867 It->second.remove(MA);
1868 if (It->second.empty())
1869 InstructionToAccess.erase(MA->getAccessInstruction());
1873 MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1874 MemoryAccess *Access = lookupInputAccessOf(V);
1875 if (Access)
1876 return Access;
1878 ScopArrayInfo *SAI =
1879 Parent.getOrCreateScopArrayInfo(V, V->getType(), {}, MemoryKind::Value);
1880 Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1881 true, {}, {}, V, MemoryKind::Value);
1882 Parent.addAccessFunction(Access);
1883 Access->buildAccessRelation(SAI);
1884 addAccess(Access);
1885 Parent.addAccessData(Access);
1886 return Access;
1889 raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1890 S.print(OS, PollyPrintInstructions);
1891 return OS;
1894 //===----------------------------------------------------------------------===//
1895 /// Scop class implement
1897 void Scop::setContext(__isl_take isl_set *NewContext) {
1898 NewContext = isl_set_align_params(NewContext, isl_set_get_space(Context));
1899 isl_set_free(Context);
1900 Context = NewContext;
1903 namespace {
1905 /// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1906 struct SCEVSensitiveParameterRewriter
1907 : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1908 const ValueToValueMap &VMap;
1910 public:
1911 SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1912 ScalarEvolution &SE)
1913 : SCEVRewriteVisitor(SE), VMap(VMap) {}
1915 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1916 const ValueToValueMap &VMap) {
1917 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1918 return SSPR.visit(E);
1921 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1922 auto *Start = visit(E->getStart());
1923 auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
1924 visit(E->getStepRecurrence(SE)),
1925 E->getLoop(), SCEV::FlagAnyWrap);
1926 return SE.getAddExpr(Start, AddRec);
1929 const SCEV *visitUnknown(const SCEVUnknown *E) {
1930 if (auto *NewValue = VMap.lookup(E->getValue()))
1931 return SE.getUnknown(NewValue);
1932 return E;
1936 /// Check whether we should remap a SCEV expression.
1937 struct SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1938 const ValueToValueMap &VMap;
1939 bool FoundInside = false;
1940 const Scop *S;
1942 public:
1943 SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1944 const Scop *S)
1945 : SCEVTraversal(*this), VMap(VMap), S(S) {}
1947 static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1948 const ValueToValueMap &VMap, const Scop *S) {
1949 SCEVFindInsideScop SFIS(VMap, SE, S);
1950 SFIS.visitAll(E);
1951 return SFIS.FoundInside;
1954 bool follow(const SCEV *E) {
1955 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(E)) {
1956 FoundInside |= S->getRegion().contains(AddRec->getLoop());
1957 } else if (auto *Unknown = dyn_cast<SCEVUnknown>(E)) {
1958 if (Instruction *I = dyn_cast<Instruction>(Unknown->getValue()))
1959 FoundInside |= S->getRegion().contains(I) && !VMap.count(I);
1961 return !FoundInside;
1964 bool isDone() { return FoundInside; }
1967 } // end anonymous namespace
1969 const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1970 // Check whether it makes sense to rewrite the SCEV. (ScalarEvolution
1971 // doesn't like addition between an AddRec and an expression that
1972 // doesn't have a dominance relationship with it.)
1973 if (SCEVFindInsideScop::hasVariant(E, *SE, InvEquivClassVMap, this))
1974 return E;
1976 // Rewrite SCEV.
1977 return SCEVSensitiveParameterRewriter::rewrite(E, *SE, InvEquivClassVMap);
1980 // This table of function names is used to translate parameter names in more
1981 // human-readable names. This makes it easier to interpret Polly analysis
1982 // results.
1983 StringMap<std::string> KnownNames = {
1984 {"_Z13get_global_idj", "global_id"},
1985 {"_Z12get_local_idj", "local_id"},
1986 {"_Z15get_global_sizej", "global_size"},
1987 {"_Z14get_local_sizej", "local_size"},
1988 {"_Z12get_work_dimv", "work_dim"},
1989 {"_Z17get_global_offsetj", "global_offset"},
1990 {"_Z12get_group_idj", "group_id"},
1991 {"_Z14get_num_groupsj", "num_groups"},
1994 static std::string getCallParamName(CallInst *Call) {
1995 std::string Result;
1996 raw_string_ostream OS(Result);
1997 std::string Name = Call->getCalledFunction()->getName();
1999 auto Iterator = KnownNames.find(Name);
2000 if (Iterator != KnownNames.end())
2001 Name = "__" + Iterator->getValue();
2002 OS << Name;
2003 for (auto &Operand : Call->arg_operands()) {
2004 ConstantInt *Op = cast<ConstantInt>(&Operand);
2005 OS << "_" << Op->getValue();
2007 OS.flush();
2008 return Result;
2011 void Scop::createParameterId(const SCEV *Parameter) {
2012 assert(Parameters.count(Parameter));
2013 assert(!ParameterIds.count(Parameter));
2015 std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
2017 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
2018 Value *Val = ValueParameter->getValue();
2019 CallInst *Call = dyn_cast<CallInst>(Val);
2021 if (Call && isConstCall(Call)) {
2022 ParameterName = getCallParamName(Call);
2023 } else if (UseInstructionNames) {
2024 // If this parameter references a specific Value and this value has a name
2025 // we use this name as it is likely to be unique and more useful than just
2026 // a number.
2027 if (Val->hasName())
2028 ParameterName = Val->getName();
2029 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
2030 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
2031 if (LoadOrigin->hasName()) {
2032 ParameterName += "_loaded_from_";
2033 ParameterName +=
2034 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
2039 ParameterName = getIslCompatibleName("", ParameterName, "");
2042 isl::id Id = isl::id::alloc(getIslCtx(), ParameterName,
2043 const_cast<void *>((const void *)Parameter));
2044 ParameterIds[Parameter] = Id;
2047 void Scop::addParams(const ParameterSetTy &NewParameters) {
2048 for (const SCEV *Parameter : NewParameters) {
2049 // Normalize the SCEV to get the representing element for an invariant load.
2050 Parameter = extractConstantFactor(Parameter, *SE).second;
2051 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
2053 if (Parameters.insert(Parameter))
2054 createParameterId(Parameter);
2058 isl::id Scop::getIdForParam(const SCEV *Parameter) const {
2059 // Normalize the SCEV to get the representing element for an invariant load.
2060 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
2061 return ParameterIds.lookup(Parameter);
2064 isl::set Scop::addNonEmptyDomainConstraints(isl::set C) const {
2065 isl_set *DomainContext = isl_union_set_params(getDomains().release());
2066 return isl::manage(isl_set_intersect_params(C.release(), DomainContext));
2069 bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
2070 return DT.dominates(BB, getEntry());
2073 void Scop::addUserAssumptions(
2074 AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI,
2075 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2076 for (auto &Assumption : AC.assumptions()) {
2077 auto *CI = dyn_cast_or_null<CallInst>(Assumption);
2078 if (!CI || CI->getNumArgOperands() != 1)
2079 continue;
2081 bool InScop = contains(CI);
2082 if (!InScop && !isDominatedBy(DT, CI->getParent()))
2083 continue;
2085 auto *L = LI.getLoopFor(CI->getParent());
2086 auto *Val = CI->getArgOperand(0);
2087 ParameterSetTy DetectedParams;
2088 if (!isAffineConstraint(Val, &R, L, *SE, DetectedParams)) {
2089 ORE.emit(
2090 OptimizationRemarkAnalysis(DEBUG_TYPE, "IgnoreUserAssumption", CI)
2091 << "Non-affine user assumption ignored.");
2092 continue;
2095 // Collect all newly introduced parameters.
2096 ParameterSetTy NewParams;
2097 for (auto *Param : DetectedParams) {
2098 Param = extractConstantFactor(Param, *SE).second;
2099 Param = getRepresentingInvariantLoadSCEV(Param);
2100 if (Parameters.count(Param))
2101 continue;
2102 NewParams.insert(Param);
2105 SmallVector<isl_set *, 2> ConditionSets;
2106 auto *TI = InScop ? CI->getParent()->getTerminator() : nullptr;
2107 BasicBlock *BB = InScop ? CI->getParent() : getRegion().getEntry();
2108 auto *Dom = InScop ? DomainMap[BB].copy() : isl_set_copy(Context);
2109 assert(Dom && "Cannot propagate a nullptr.");
2110 bool Valid = buildConditionSets(*this, BB, Val, TI, L, Dom,
2111 InvalidDomainMap, ConditionSets);
2112 isl_set_free(Dom);
2114 if (!Valid)
2115 continue;
2117 isl_set *AssumptionCtx = nullptr;
2118 if (InScop) {
2119 AssumptionCtx = isl_set_complement(isl_set_params(ConditionSets[1]));
2120 isl_set_free(ConditionSets[0]);
2121 } else {
2122 AssumptionCtx = isl_set_complement(ConditionSets[1]);
2123 AssumptionCtx = isl_set_intersect(AssumptionCtx, ConditionSets[0]);
2126 // Project out newly introduced parameters as they are not otherwise useful.
2127 if (!NewParams.empty()) {
2128 for (unsigned u = 0; u < isl_set_n_param(AssumptionCtx); u++) {
2129 auto *Id = isl_set_get_dim_id(AssumptionCtx, isl_dim_param, u);
2130 auto *Param = static_cast<const SCEV *>(isl_id_get_user(Id));
2131 isl_id_free(Id);
2133 if (!NewParams.count(Param))
2134 continue;
2136 AssumptionCtx =
2137 isl_set_project_out(AssumptionCtx, isl_dim_param, u--, 1);
2140 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "UserAssumption", CI)
2141 << "Use user assumption: " << stringFromIslObj(AssumptionCtx));
2142 Context = isl_set_intersect(Context, AssumptionCtx);
2146 void Scop::addUserContext() {
2147 if (UserContextStr.empty())
2148 return;
2150 isl_set *UserContext =
2151 isl_set_read_from_str(getIslCtx(), UserContextStr.c_str());
2152 isl_space *Space = getParamSpace().release();
2153 if (isl_space_dim(Space, isl_dim_param) !=
2154 isl_set_dim(UserContext, isl_dim_param)) {
2155 auto SpaceStr = isl_space_to_str(Space);
2156 errs() << "Error: the context provided in -polly-context has not the same "
2157 << "number of dimensions than the computed context. Due to this "
2158 << "mismatch, the -polly-context option is ignored. Please provide "
2159 << "the context in the parameter space: " << SpaceStr << ".\n";
2160 free(SpaceStr);
2161 isl_set_free(UserContext);
2162 isl_space_free(Space);
2163 return;
2166 for (unsigned i = 0; i < isl_space_dim(Space, isl_dim_param); i++) {
2167 auto *NameContext = isl_set_get_dim_name(Context, isl_dim_param, i);
2168 auto *NameUserContext = isl_set_get_dim_name(UserContext, isl_dim_param, i);
2170 if (strcmp(NameContext, NameUserContext) != 0) {
2171 auto SpaceStr = isl_space_to_str(Space);
2172 errs() << "Error: the name of dimension " << i
2173 << " provided in -polly-context "
2174 << "is '" << NameUserContext << "', but the name in the computed "
2175 << "context is '" << NameContext
2176 << "'. Due to this name mismatch, "
2177 << "the -polly-context option is ignored. Please provide "
2178 << "the context in the parameter space: " << SpaceStr << ".\n";
2179 free(SpaceStr);
2180 isl_set_free(UserContext);
2181 isl_space_free(Space);
2182 return;
2185 UserContext =
2186 isl_set_set_dim_id(UserContext, isl_dim_param, i,
2187 isl_space_get_dim_id(Space, isl_dim_param, i));
2190 Context = isl_set_intersect(Context, UserContext);
2191 isl_space_free(Space);
2194 void Scop::buildInvariantEquivalenceClasses() {
2195 DenseMap<std::pair<const SCEV *, Type *>, LoadInst *> EquivClasses;
2197 const InvariantLoadsSetTy &RIL = getRequiredInvariantLoads();
2198 for (LoadInst *LInst : RIL) {
2199 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
2201 Type *Ty = LInst->getType();
2202 LoadInst *&ClassRep = EquivClasses[std::make_pair(PointerSCEV, Ty)];
2203 if (ClassRep) {
2204 InvEquivClassVMap[LInst] = ClassRep;
2205 continue;
2208 ClassRep = LInst;
2209 InvariantEquivClasses.emplace_back(
2210 InvariantEquivClassTy{PointerSCEV, MemoryAccessList(), nullptr, Ty});
2214 void Scop::buildContext() {
2215 isl_space *Space = isl_space_params_alloc(getIslCtx(), 0);
2216 Context = isl_set_universe(isl_space_copy(Space));
2217 InvalidContext = isl_set_empty(isl_space_copy(Space));
2218 AssumedContext = isl_set_universe(Space);
2221 void Scop::addParameterBounds() {
2222 unsigned PDim = 0;
2223 for (auto *Parameter : Parameters) {
2224 ConstantRange SRange = SE->getSignedRange(Parameter);
2225 Context =
2226 addRangeBoundsToSet(give(Context), SRange, PDim++, isl::dim::param)
2227 .release();
2231 static std::vector<isl::id> getFortranArrayIds(Scop::array_range Arrays) {
2232 std::vector<isl::id> OutermostSizeIds;
2233 for (auto Array : Arrays) {
2234 // To check if an array is a Fortran array, we check if it has a isl_pw_aff
2235 // for its outermost dimension. Fortran arrays will have this since the
2236 // outermost dimension size can be picked up from their runtime description.
2237 // TODO: actually need to check if it has a FAD, but for now this works.
2238 if (Array->getNumberOfDimensions() > 0) {
2239 isl::pw_aff PwAff = Array->getDimensionSizePw(0);
2240 if (!PwAff)
2241 continue;
2243 isl::id Id =
2244 isl::manage(isl_pw_aff_get_dim_id(PwAff.get(), isl_dim_param, 0));
2245 assert(!Id.is_null() &&
2246 "Invalid Id for PwAff expression in Fortran array");
2247 Id.dump();
2248 OutermostSizeIds.push_back(Id);
2251 return OutermostSizeIds;
2254 // The FORTRAN array size parameters are known to be non-negative.
2255 static isl_set *boundFortranArrayParams(__isl_give isl_set *Context,
2256 Scop::array_range Arrays) {
2257 std::vector<isl::id> OutermostSizeIds;
2258 OutermostSizeIds = getFortranArrayIds(Arrays);
2260 for (isl::id Id : OutermostSizeIds) {
2261 int dim = isl_set_find_dim_by_id(Context, isl_dim_param, Id.get());
2262 Context = isl_set_lower_bound_si(Context, isl_dim_param, dim, 0);
2265 return Context;
2268 void Scop::realignParams() {
2269 if (PollyIgnoreParamBounds)
2270 return;
2272 // Add all parameters into a common model.
2273 isl::space Space = getFullParamSpace();
2275 // Align the parameters of all data structures to the model.
2276 Context = isl_set_align_params(Context, Space.copy());
2278 // Bound the size of the fortran array dimensions.
2279 Context = boundFortranArrayParams(Context, arrays());
2281 // As all parameters are known add bounds to them.
2282 addParameterBounds();
2284 for (ScopStmt &Stmt : *this)
2285 Stmt.realignParams();
2286 // Simplify the schedule according to the context too.
2287 Schedule = isl_schedule_gist_domain_params(Schedule, getContext().release());
2290 static __isl_give isl_set *
2291 simplifyAssumptionContext(__isl_take isl_set *AssumptionContext,
2292 const Scop &S) {
2293 // If we have modeled all blocks in the SCoP that have side effects we can
2294 // simplify the context with the constraints that are needed for anything to
2295 // be executed at all. However, if we have error blocks in the SCoP we already
2296 // assumed some parameter combinations cannot occur and removed them from the
2297 // domains, thus we cannot use the remaining domain to simplify the
2298 // assumptions.
2299 if (!S.hasErrorBlock()) {
2300 isl_set *DomainParameters = isl_union_set_params(S.getDomains().release());
2301 AssumptionContext =
2302 isl_set_gist_params(AssumptionContext, DomainParameters);
2305 AssumptionContext =
2306 isl_set_gist_params(AssumptionContext, S.getContext().release());
2307 return AssumptionContext;
2310 void Scop::simplifyContexts() {
2311 // The parameter constraints of the iteration domains give us a set of
2312 // constraints that need to hold for all cases where at least a single
2313 // statement iteration is executed in the whole scop. We now simplify the
2314 // assumed context under the assumption that such constraints hold and at
2315 // least a single statement iteration is executed. For cases where no
2316 // statement instances are executed, the assumptions we have taken about
2317 // the executed code do not matter and can be changed.
2319 // WARNING: This only holds if the assumptions we have taken do not reduce
2320 // the set of statement instances that are executed. Otherwise we
2321 // may run into a case where the iteration domains suggest that
2322 // for a certain set of parameter constraints no code is executed,
2323 // but in the original program some computation would have been
2324 // performed. In such a case, modifying the run-time conditions and
2325 // possibly influencing the run-time check may cause certain scops
2326 // to not be executed.
2328 // Example:
2330 // When delinearizing the following code:
2332 // for (long i = 0; i < 100; i++)
2333 // for (long j = 0; j < m; j++)
2334 // A[i+p][j] = 1.0;
2336 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
2337 // otherwise we would access out of bound data. Now, knowing that code is
2338 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
2339 AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
2340 InvalidContext =
2341 isl_set_align_params(InvalidContext, getParamSpace().release());
2344 /// Add the minimal/maximal access in @p Set to @p User.
2345 static isl::stat
2346 buildMinMaxAccess(isl::set Set, Scop::MinMaxVectorTy &MinMaxAccesses, Scop &S) {
2347 isl::pw_multi_aff MinPMA, MaxPMA;
2348 isl::pw_aff LastDimAff;
2349 isl::aff OneAff;
2350 unsigned Pos;
2351 isl::ctx Ctx = Set.get_ctx();
2353 Set = Set.remove_divs();
2355 if (isl_set_n_basic_set(Set.get()) >= MaxDisjunctsInDomain)
2356 return isl::stat::error;
2358 // Restrict the number of parameters involved in the access as the lexmin/
2359 // lexmax computation will take too long if this number is high.
2361 // Experiments with a simple test case using an i7 4800MQ:
2363 // #Parameters involved | Time (in sec)
2364 // 6 | 0.01
2365 // 7 | 0.04
2366 // 8 | 0.12
2367 // 9 | 0.40
2368 // 10 | 1.54
2369 // 11 | 6.78
2370 // 12 | 30.38
2372 if (isl_set_n_param(Set.get()) > RunTimeChecksMaxParameters) {
2373 unsigned InvolvedParams = 0;
2374 for (unsigned u = 0, e = isl_set_n_param(Set.get()); u < e; u++)
2375 if (Set.involves_dims(isl::dim::param, u, 1))
2376 InvolvedParams++;
2378 if (InvolvedParams > RunTimeChecksMaxParameters)
2379 return isl::stat::error;
2382 if (isl_set_n_basic_set(Set.get()) > RunTimeChecksMaxAccessDisjuncts)
2383 return isl::stat::error;
2385 MinPMA = Set.lexmin_pw_multi_aff();
2386 MaxPMA = Set.lexmax_pw_multi_aff();
2388 if (isl_ctx_last_error(Ctx.get()) == isl_error_quota)
2389 return isl::stat::error;
2391 MinPMA = MinPMA.coalesce();
2392 MaxPMA = MaxPMA.coalesce();
2394 // Adjust the last dimension of the maximal access by one as we want to
2395 // enclose the accessed memory region by MinPMA and MaxPMA. The pointer
2396 // we test during code generation might now point after the end of the
2397 // allocated array but we will never dereference it anyway.
2398 assert(MaxPMA.dim(isl::dim::out) && "Assumed at least one output dimension");
2399 Pos = MaxPMA.dim(isl::dim::out) - 1;
2400 LastDimAff = MaxPMA.get_pw_aff(Pos);
2401 OneAff = isl::aff(isl::local_space(LastDimAff.get_domain_space()));
2402 OneAff = OneAff.add_constant_si(1);
2403 LastDimAff = LastDimAff.add(OneAff);
2404 MaxPMA = MaxPMA.set_pw_aff(Pos, LastDimAff);
2406 MinMaxAccesses.push_back(std::make_pair(MinPMA.copy(), MaxPMA.copy()));
2408 return isl::stat::ok;
2411 static __isl_give isl_set *getAccessDomain(MemoryAccess *MA) {
2412 isl_set *Domain = MA->getStatement()->getDomain().release();
2413 Domain = isl_set_project_out(Domain, isl_dim_set, 0, isl_set_n_dim(Domain));
2414 return isl_set_reset_tuple_id(Domain);
2417 /// Wrapper function to calculate minimal/maximal accesses to each array.
2418 static bool calculateMinMaxAccess(Scop::AliasGroupTy AliasGroup, Scop &S,
2419 Scop::MinMaxVectorTy &MinMaxAccesses) {
2420 MinMaxAccesses.reserve(AliasGroup.size());
2422 isl::union_set Domains = S.getDomains();
2423 isl::union_map Accesses = isl::union_map::empty(S.getParamSpace());
2425 for (MemoryAccess *MA : AliasGroup)
2426 Accesses = Accesses.add_map(give(MA->getAccessRelation().release()));
2428 Accesses = Accesses.intersect_domain(Domains);
2429 isl::union_set Locations = Accesses.range();
2430 Locations = Locations.coalesce();
2431 Locations = Locations.detect_equalities();
2433 auto Lambda = [&MinMaxAccesses, &S](isl::set Set) -> isl::stat {
2434 return buildMinMaxAccess(Set, MinMaxAccesses, S);
2436 return Locations.foreach_set(Lambda) == isl::stat::ok;
2439 /// Helper to treat non-affine regions and basic blocks the same.
2441 ///{
2443 /// Return the block that is the representing block for @p RN.
2444 static inline BasicBlock *getRegionNodeBasicBlock(RegionNode *RN) {
2445 return RN->isSubRegion() ? RN->getNodeAs<Region>()->getEntry()
2446 : RN->getNodeAs<BasicBlock>();
2449 /// Return the @p idx'th block that is executed after @p RN.
2450 static inline BasicBlock *
2451 getRegionNodeSuccessor(RegionNode *RN, TerminatorInst *TI, unsigned idx) {
2452 if (RN->isSubRegion()) {
2453 assert(idx == 0);
2454 return RN->getNodeAs<Region>()->getExit();
2456 return TI->getSuccessor(idx);
2459 /// Return the smallest loop surrounding @p RN.
2460 static inline Loop *getRegionNodeLoop(RegionNode *RN, LoopInfo &LI) {
2461 if (!RN->isSubRegion()) {
2462 BasicBlock *BB = RN->getNodeAs<BasicBlock>();
2463 Loop *L = LI.getLoopFor(BB);
2465 // Unreachable statements are not considered to belong to a LLVM loop, as
2466 // they are not part of an actual loop in the control flow graph.
2467 // Nevertheless, we handle certain unreachable statements that are common
2468 // when modeling run-time bounds checks as being part of the loop to be
2469 // able to model them and to later eliminate the run-time bounds checks.
2471 // Specifically, for basic blocks that terminate in an unreachable and
2472 // where the immediate predecessor is part of a loop, we assume these
2473 // basic blocks belong to the loop the predecessor belongs to. This
2474 // allows us to model the following code.
2476 // for (i = 0; i < N; i++) {
2477 // if (i > 1024)
2478 // abort(); <- this abort might be translated to an
2479 // unreachable
2481 // A[i] = ...
2482 // }
2483 if (!L && isa<UnreachableInst>(BB->getTerminator()) && BB->getPrevNode())
2484 L = LI.getLoopFor(BB->getPrevNode());
2485 return L;
2488 Region *NonAffineSubRegion = RN->getNodeAs<Region>();
2489 Loop *L = LI.getLoopFor(NonAffineSubRegion->getEntry());
2490 while (L && NonAffineSubRegion->contains(L))
2491 L = L->getParentLoop();
2492 return L;
2495 /// Get the number of blocks in @p L.
2497 /// The number of blocks in a loop are the number of basic blocks actually
2498 /// belonging to the loop, as well as all single basic blocks that the loop
2499 /// exits to and which terminate in an unreachable instruction. We do not
2500 /// allow such basic blocks in the exit of a scop, hence they belong to the
2501 /// scop and represent run-time conditions which we want to model and
2502 /// subsequently speculate away.
2504 /// @see getRegionNodeLoop for additional details.
2505 unsigned getNumBlocksInLoop(Loop *L) {
2506 unsigned NumBlocks = L->getNumBlocks();
2507 SmallVector<BasicBlock *, 4> ExitBlocks;
2508 L->getExitBlocks(ExitBlocks);
2510 for (auto ExitBlock : ExitBlocks) {
2511 if (isa<UnreachableInst>(ExitBlock->getTerminator()))
2512 NumBlocks++;
2514 return NumBlocks;
2517 static inline unsigned getNumBlocksInRegionNode(RegionNode *RN) {
2518 if (!RN->isSubRegion())
2519 return 1;
2521 Region *R = RN->getNodeAs<Region>();
2522 return std::distance(R->block_begin(), R->block_end());
2525 static bool containsErrorBlock(RegionNode *RN, const Region &R, LoopInfo &LI,
2526 const DominatorTree &DT) {
2527 if (!RN->isSubRegion())
2528 return isErrorBlock(*RN->getNodeAs<BasicBlock>(), R, LI, DT);
2529 for (BasicBlock *BB : RN->getNodeAs<Region>()->blocks())
2530 if (isErrorBlock(*BB, R, LI, DT))
2531 return true;
2532 return false;
2535 ///}
2537 static inline __isl_give isl_set *addDomainDimId(__isl_take isl_set *Domain,
2538 unsigned Dim, Loop *L) {
2539 Domain = isl_set_lower_bound_si(Domain, isl_dim_set, Dim, -1);
2540 isl_id *DimId =
2541 isl_id_alloc(isl_set_get_ctx(Domain), nullptr, static_cast<void *>(L));
2542 return isl_set_set_dim_id(Domain, isl_dim_set, Dim, DimId);
2545 isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
2546 return getDomainConditions(Stmt->getEntryBlock());
2549 isl::set Scop::getDomainConditions(BasicBlock *BB) const {
2550 auto DIt = DomainMap.find(BB);
2551 if (DIt != DomainMap.end())
2552 return DIt->getSecond();
2554 auto &RI = *R.getRegionInfo();
2555 auto *BBR = RI.getRegionFor(BB);
2556 while (BBR->getEntry() == BB)
2557 BBR = BBR->getParent();
2558 return getDomainConditions(BBR->getEntry());
2561 bool Scop::buildDomains(Region *R, DominatorTree &DT, LoopInfo &LI,
2562 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2563 bool IsOnlyNonAffineRegion = isNonAffineSubRegion(R);
2564 auto *EntryBB = R->getEntry();
2565 auto *L = IsOnlyNonAffineRegion ? nullptr : LI.getLoopFor(EntryBB);
2566 int LD = getRelativeLoopDepth(L);
2567 auto *S = isl_set_universe(isl_space_set_alloc(getIslCtx(), 0, LD + 1));
2569 while (LD-- >= 0) {
2570 S = addDomainDimId(S, LD + 1, L);
2571 L = L->getParentLoop();
2574 InvalidDomainMap[EntryBB] = isl::manage(isl_set_empty(isl_set_get_space(S)));
2575 DomainMap[EntryBB] = isl::manage(S);
2577 if (IsOnlyNonAffineRegion)
2578 return !containsErrorBlock(R->getNode(), *R, LI, DT);
2580 if (!buildDomainsWithBranchConstraints(R, DT, LI, InvalidDomainMap))
2581 return false;
2583 if (!propagateDomainConstraints(R, DT, LI, InvalidDomainMap))
2584 return false;
2586 // Error blocks and blocks dominated by them have been assumed to never be
2587 // executed. Representing them in the Scop does not add any value. In fact,
2588 // it is likely to cause issues during construction of the ScopStmts. The
2589 // contents of error blocks have not been verified to be expressible and
2590 // will cause problems when building up a ScopStmt for them.
2591 // Furthermore, basic blocks dominated by error blocks may reference
2592 // instructions in the error block which, if the error block is not modeled,
2593 // can themselves not be constructed properly. To this end we will replace
2594 // the domains of error blocks and those only reachable via error blocks
2595 // with an empty set. Additionally, we will record for each block under which
2596 // parameter combination it would be reached via an error block in its
2597 // InvalidDomain. This information is needed during load hoisting.
2598 if (!propagateInvalidStmtDomains(R, DT, LI, InvalidDomainMap))
2599 return false;
2601 return true;
2604 /// Adjust the dimensions of @p Dom that was constructed for @p OldL
2605 /// to be compatible to domains constructed for loop @p NewL.
2607 /// This function assumes @p NewL and @p OldL are equal or there is a CFG
2608 /// edge from @p OldL to @p NewL.
2609 static __isl_give isl_set *adjustDomainDimensions(Scop &S,
2610 __isl_take isl_set *Dom,
2611 Loop *OldL, Loop *NewL) {
2612 // If the loops are the same there is nothing to do.
2613 if (NewL == OldL)
2614 return Dom;
2616 int OldDepth = S.getRelativeLoopDepth(OldL);
2617 int NewDepth = S.getRelativeLoopDepth(NewL);
2618 // If both loops are non-affine loops there is nothing to do.
2619 if (OldDepth == -1 && NewDepth == -1)
2620 return Dom;
2622 // Distinguish three cases:
2623 // 1) The depth is the same but the loops are not.
2624 // => One loop was left one was entered.
2625 // 2) The depth increased from OldL to NewL.
2626 // => One loop was entered, none was left.
2627 // 3) The depth decreased from OldL to NewL.
2628 // => Loops were left were difference of the depths defines how many.
2629 if (OldDepth == NewDepth) {
2630 assert(OldL->getParentLoop() == NewL->getParentLoop());
2631 Dom = isl_set_project_out(Dom, isl_dim_set, NewDepth, 1);
2632 Dom = isl_set_add_dims(Dom, isl_dim_set, 1);
2633 Dom = addDomainDimId(Dom, NewDepth, NewL);
2634 } else if (OldDepth < NewDepth) {
2635 assert(OldDepth + 1 == NewDepth);
2636 auto &R = S.getRegion();
2637 (void)R;
2638 assert(NewL->getParentLoop() == OldL ||
2639 ((!OldL || !R.contains(OldL)) && R.contains(NewL)));
2640 Dom = isl_set_add_dims(Dom, isl_dim_set, 1);
2641 Dom = addDomainDimId(Dom, NewDepth, NewL);
2642 } else {
2643 assert(OldDepth > NewDepth);
2644 int Diff = OldDepth - NewDepth;
2645 int NumDim = isl_set_n_dim(Dom);
2646 assert(NumDim >= Diff);
2647 Dom = isl_set_project_out(Dom, isl_dim_set, NumDim - Diff, Diff);
2650 return Dom;
2653 bool Scop::propagateInvalidStmtDomains(
2654 Region *R, DominatorTree &DT, LoopInfo &LI,
2655 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2656 ReversePostOrderTraversal<Region *> RTraversal(R);
2657 for (auto *RN : RTraversal) {
2659 // Recurse for affine subregions but go on for basic blocks and non-affine
2660 // subregions.
2661 if (RN->isSubRegion()) {
2662 Region *SubRegion = RN->getNodeAs<Region>();
2663 if (!isNonAffineSubRegion(SubRegion)) {
2664 propagateInvalidStmtDomains(SubRegion, DT, LI, InvalidDomainMap);
2665 continue;
2669 bool ContainsErrorBlock = containsErrorBlock(RN, getRegion(), LI, DT);
2670 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2671 isl::set &Domain = DomainMap[BB];
2672 assert(Domain && "Cannot propagate a nullptr");
2674 isl::set InvalidDomain = InvalidDomainMap[BB];
2676 bool IsInvalidBlock = ContainsErrorBlock || Domain.is_subset(InvalidDomain);
2678 if (!IsInvalidBlock) {
2679 InvalidDomain = InvalidDomain.intersect(Domain);
2680 } else {
2681 InvalidDomain = Domain;
2682 isl::set DomPar = Domain.params();
2683 recordAssumption(ERRORBLOCK, DomPar.release(),
2684 BB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
2685 Domain = nullptr;
2688 if (InvalidDomain.is_empty()) {
2689 InvalidDomainMap[BB] = InvalidDomain;
2690 continue;
2693 auto *BBLoop = getRegionNodeLoop(RN, LI);
2694 auto *TI = BB->getTerminator();
2695 unsigned NumSuccs = RN->isSubRegion() ? 1 : TI->getNumSuccessors();
2696 for (unsigned u = 0; u < NumSuccs; u++) {
2697 auto *SuccBB = getRegionNodeSuccessor(RN, TI, u);
2699 // Skip successors outside the SCoP.
2700 if (!contains(SuccBB))
2701 continue;
2703 // Skip backedges.
2704 if (DT.dominates(SuccBB, BB))
2705 continue;
2707 Loop *SuccBBLoop = getFirstNonBoxedLoopFor(SuccBB, LI, getBoxedLoops());
2709 auto *AdjustedInvalidDomain = adjustDomainDimensions(
2710 *this, InvalidDomain.copy(), BBLoop, SuccBBLoop);
2712 auto *SuccInvalidDomain = InvalidDomainMap[SuccBB].copy();
2713 SuccInvalidDomain =
2714 isl_set_union(SuccInvalidDomain, AdjustedInvalidDomain);
2715 SuccInvalidDomain = isl_set_coalesce(SuccInvalidDomain);
2716 unsigned NumConjucts = isl_set_n_basic_set(SuccInvalidDomain);
2718 InvalidDomainMap[SuccBB] = isl::manage(SuccInvalidDomain);
2720 // Check if the maximal number of domain disjunctions was reached.
2721 // In case this happens we will bail.
2722 if (NumConjucts < MaxDisjunctsInDomain)
2723 continue;
2725 InvalidDomainMap.erase(BB);
2726 invalidate(COMPLEXITY, TI->getDebugLoc(), TI->getParent());
2727 return false;
2730 InvalidDomainMap[BB] = InvalidDomain;
2733 return true;
2736 void Scop::propagateDomainConstraintsToRegionExit(
2737 BasicBlock *BB, Loop *BBLoop,
2738 SmallPtrSetImpl<BasicBlock *> &FinishedExitBlocks, LoopInfo &LI,
2739 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2740 // Check if the block @p BB is the entry of a region. If so we propagate it's
2741 // domain to the exit block of the region. Otherwise we are done.
2742 auto *RI = R.getRegionInfo();
2743 auto *BBReg = RI ? RI->getRegionFor(BB) : nullptr;
2744 auto *ExitBB = BBReg ? BBReg->getExit() : nullptr;
2745 if (!BBReg || BBReg->getEntry() != BB || !contains(ExitBB))
2746 return;
2748 // Do not propagate the domain if there is a loop backedge inside the region
2749 // that would prevent the exit block from being executed.
2750 auto *L = BBLoop;
2751 while (L && contains(L)) {
2752 SmallVector<BasicBlock *, 4> LatchBBs;
2753 BBLoop->getLoopLatches(LatchBBs);
2754 for (auto *LatchBB : LatchBBs)
2755 if (BB != LatchBB && BBReg->contains(LatchBB))
2756 return;
2757 L = L->getParentLoop();
2760 isl::set Domain = DomainMap[BB];
2761 assert(Domain && "Cannot propagate a nullptr");
2763 Loop *ExitBBLoop = getFirstNonBoxedLoopFor(ExitBB, LI, getBoxedLoops());
2765 // Since the dimensions of @p BB and @p ExitBB might be different we have to
2766 // adjust the domain before we can propagate it.
2767 isl::set AdjustedDomain = isl::manage(
2768 adjustDomainDimensions(*this, Domain.copy(), BBLoop, ExitBBLoop));
2769 isl::set &ExitDomain = DomainMap[ExitBB];
2771 // If the exit domain is not yet created we set it otherwise we "add" the
2772 // current domain.
2773 ExitDomain = ExitDomain ? AdjustedDomain.unite(ExitDomain) : AdjustedDomain;
2775 // Initialize the invalid domain.
2776 InvalidDomainMap[ExitBB] = ExitDomain.empty(ExitDomain.get_space());
2778 FinishedExitBlocks.insert(ExitBB);
2781 bool Scop::buildDomainsWithBranchConstraints(
2782 Region *R, DominatorTree &DT, LoopInfo &LI,
2783 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2784 // To create the domain for each block in R we iterate over all blocks and
2785 // subregions in R and propagate the conditions under which the current region
2786 // element is executed. To this end we iterate in reverse post order over R as
2787 // it ensures that we first visit all predecessors of a region node (either a
2788 // basic block or a subregion) before we visit the region node itself.
2789 // Initially, only the domain for the SCoP region entry block is set and from
2790 // there we propagate the current domain to all successors, however we add the
2791 // condition that the successor is actually executed next.
2792 // As we are only interested in non-loop carried constraints here we can
2793 // simply skip loop back edges.
2795 SmallPtrSet<BasicBlock *, 8> FinishedExitBlocks;
2796 ReversePostOrderTraversal<Region *> RTraversal(R);
2797 for (auto *RN : RTraversal) {
2798 // Recurse for affine subregions but go on for basic blocks and non-affine
2799 // subregions.
2800 if (RN->isSubRegion()) {
2801 Region *SubRegion = RN->getNodeAs<Region>();
2802 if (!isNonAffineSubRegion(SubRegion)) {
2803 if (!buildDomainsWithBranchConstraints(SubRegion, DT, LI,
2804 InvalidDomainMap))
2805 return false;
2806 continue;
2810 if (containsErrorBlock(RN, getRegion(), LI, DT))
2811 HasErrorBlock = true;
2813 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2814 TerminatorInst *TI = BB->getTerminator();
2816 if (isa<UnreachableInst>(TI))
2817 continue;
2819 isl::set Domain = DomainMap.lookup(BB);
2820 if (!Domain)
2821 continue;
2822 MaxLoopDepth = std::max(MaxLoopDepth, isl_set_n_dim(Domain.get()));
2824 auto *BBLoop = getRegionNodeLoop(RN, LI);
2825 // Propagate the domain from BB directly to blocks that have a superset
2826 // domain, at the moment only region exit nodes of regions that start in BB.
2827 propagateDomainConstraintsToRegionExit(BB, BBLoop, FinishedExitBlocks, LI,
2828 InvalidDomainMap);
2830 // If all successors of BB have been set a domain through the propagation
2831 // above we do not need to build condition sets but can just skip this
2832 // block. However, it is important to note that this is a local property
2833 // with regards to the region @p R. To this end FinishedExitBlocks is a
2834 // local variable.
2835 auto IsFinishedRegionExit = [&FinishedExitBlocks](BasicBlock *SuccBB) {
2836 return FinishedExitBlocks.count(SuccBB);
2838 if (std::all_of(succ_begin(BB), succ_end(BB), IsFinishedRegionExit))
2839 continue;
2841 // Build the condition sets for the successor nodes of the current region
2842 // node. If it is a non-affine subregion we will always execute the single
2843 // exit node, hence the single entry node domain is the condition set. For
2844 // basic blocks we use the helper function buildConditionSets.
2845 SmallVector<isl_set *, 8> ConditionSets;
2846 if (RN->isSubRegion())
2847 ConditionSets.push_back(Domain.copy());
2848 else if (!buildConditionSets(*this, BB, TI, BBLoop, Domain.get(),
2849 InvalidDomainMap, ConditionSets))
2850 return false;
2852 // Now iterate over the successors and set their initial domain based on
2853 // their condition set. We skip back edges here and have to be careful when
2854 // we leave a loop not to keep constraints over a dimension that doesn't
2855 // exist anymore.
2856 assert(RN->isSubRegion() || TI->getNumSuccessors() == ConditionSets.size());
2857 for (unsigned u = 0, e = ConditionSets.size(); u < e; u++) {
2858 isl::set CondSet = isl::manage(ConditionSets[u]);
2859 BasicBlock *SuccBB = getRegionNodeSuccessor(RN, TI, u);
2861 // Skip blocks outside the region.
2862 if (!contains(SuccBB))
2863 continue;
2865 // If we propagate the domain of some block to "SuccBB" we do not have to
2866 // adjust the domain.
2867 if (FinishedExitBlocks.count(SuccBB))
2868 continue;
2870 // Skip back edges.
2871 if (DT.dominates(SuccBB, BB))
2872 continue;
2874 Loop *SuccBBLoop = getFirstNonBoxedLoopFor(SuccBB, LI, getBoxedLoops());
2876 CondSet = isl::manage(
2877 adjustDomainDimensions(*this, CondSet.copy(), BBLoop, SuccBBLoop));
2879 // Set the domain for the successor or merge it with an existing domain in
2880 // case there are multiple paths (without loop back edges) to the
2881 // successor block.
2882 isl::set &SuccDomain = DomainMap[SuccBB];
2884 if (SuccDomain) {
2885 SuccDomain = SuccDomain.unite(CondSet).coalesce();
2886 } else {
2887 // Initialize the invalid domain.
2888 InvalidDomainMap[SuccBB] = CondSet.empty(CondSet.get_space());
2889 SuccDomain = CondSet;
2892 SuccDomain = SuccDomain.detect_equalities();
2894 // Check if the maximal number of domain disjunctions was reached.
2895 // In case this happens we will clean up and bail.
2896 if (isl_set_n_basic_set(SuccDomain.get()) < MaxDisjunctsInDomain)
2897 continue;
2899 invalidate(COMPLEXITY, DebugLoc());
2900 while (++u < ConditionSets.size())
2901 isl_set_free(ConditionSets[u]);
2902 return false;
2906 return true;
2909 isl::set Scop::getPredecessorDomainConstraints(BasicBlock *BB, isl::set Domain,
2910 DominatorTree &DT,
2911 LoopInfo &LI) {
2912 // If @p BB is the ScopEntry we are done
2913 if (R.getEntry() == BB)
2914 return isl::set::universe(Domain.get_space());
2916 // The region info of this function.
2917 auto &RI = *R.getRegionInfo();
2919 Loop *BBLoop = getFirstNonBoxedLoopFor(BB, LI, getBoxedLoops());
2921 // A domain to collect all predecessor domains, thus all conditions under
2922 // which the block is executed. To this end we start with the empty domain.
2923 isl::set PredDom = isl::set::empty(Domain.get_space());
2925 // Set of regions of which the entry block domain has been propagated to BB.
2926 // all predecessors inside any of the regions can be skipped.
2927 SmallSet<Region *, 8> PropagatedRegions;
2929 for (auto *PredBB : predecessors(BB)) {
2930 // Skip backedges.
2931 if (DT.dominates(BB, PredBB))
2932 continue;
2934 // If the predecessor is in a region we used for propagation we can skip it.
2935 auto PredBBInRegion = [PredBB](Region *PR) { return PR->contains(PredBB); };
2936 if (std::any_of(PropagatedRegions.begin(), PropagatedRegions.end(),
2937 PredBBInRegion)) {
2938 continue;
2941 // Check if there is a valid region we can use for propagation, thus look
2942 // for a region that contains the predecessor and has @p BB as exit block.
2943 auto *PredR = RI.getRegionFor(PredBB);
2944 while (PredR->getExit() != BB && !PredR->contains(BB))
2945 PredR->getParent();
2947 // If a valid region for propagation was found use the entry of that region
2948 // for propagation, otherwise the PredBB directly.
2949 if (PredR->getExit() == BB) {
2950 PredBB = PredR->getEntry();
2951 PropagatedRegions.insert(PredR);
2954 auto *PredBBDom = getDomainConditions(PredBB).release();
2955 Loop *PredBBLoop = getFirstNonBoxedLoopFor(PredBB, LI, getBoxedLoops());
2957 PredBBDom = adjustDomainDimensions(*this, PredBBDom, PredBBLoop, BBLoop);
2959 PredDom = PredDom.unite(isl::manage(PredBBDom));
2962 return PredDom;
2965 bool Scop::propagateDomainConstraints(
2966 Region *R, DominatorTree &DT, LoopInfo &LI,
2967 DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
2968 // Iterate over the region R and propagate the domain constrains from the
2969 // predecessors to the current node. In contrast to the
2970 // buildDomainsWithBranchConstraints function, this one will pull the domain
2971 // information from the predecessors instead of pushing it to the successors.
2972 // Additionally, we assume the domains to be already present in the domain
2973 // map here. However, we iterate again in reverse post order so we know all
2974 // predecessors have been visited before a block or non-affine subregion is
2975 // visited.
2977 ReversePostOrderTraversal<Region *> RTraversal(R);
2978 for (auto *RN : RTraversal) {
2979 // Recurse for affine subregions but go on for basic blocks and non-affine
2980 // subregions.
2981 if (RN->isSubRegion()) {
2982 Region *SubRegion = RN->getNodeAs<Region>();
2983 if (!isNonAffineSubRegion(SubRegion)) {
2984 if (!propagateDomainConstraints(SubRegion, DT, LI, InvalidDomainMap))
2985 return false;
2986 continue;
2990 BasicBlock *BB = getRegionNodeBasicBlock(RN);
2991 isl::set &Domain = DomainMap[BB];
2992 assert(Domain);
2994 // Under the union of all predecessor conditions we can reach this block.
2995 isl::set PredDom = getPredecessorDomainConstraints(BB, Domain, DT, LI);
2996 Domain = Domain.intersect(PredDom).coalesce();
2997 Domain = Domain.align_params(getParamSpace());
2999 Loop *BBLoop = getRegionNodeLoop(RN, LI);
3000 if (BBLoop && BBLoop->getHeader() == BB && contains(BBLoop))
3001 if (!addLoopBoundsToHeaderDomain(BBLoop, LI, InvalidDomainMap))
3002 return false;
3005 return true;
3008 /// Create a map to map from a given iteration to a subsequent iteration.
3010 /// This map maps from SetSpace -> SetSpace where the dimensions @p Dim
3011 /// is incremented by one and all other dimensions are equal, e.g.,
3012 /// [i0, i1, i2, i3] -> [i0, i1, i2 + 1, i3]
3014 /// if @p Dim is 2 and @p SetSpace has 4 dimensions.
3015 static __isl_give isl_map *
3016 createNextIterationMap(__isl_take isl_space *SetSpace, unsigned Dim) {
3017 auto *MapSpace = isl_space_map_from_set(SetSpace);
3018 auto *NextIterationMap = isl_map_universe(isl_space_copy(MapSpace));
3019 for (unsigned u = 0; u < isl_map_dim(NextIterationMap, isl_dim_in); u++)
3020 if (u != Dim)
3021 NextIterationMap =
3022 isl_map_equate(NextIterationMap, isl_dim_in, u, isl_dim_out, u);
3023 auto *C = isl_constraint_alloc_equality(isl_local_space_from_space(MapSpace));
3024 C = isl_constraint_set_constant_si(C, 1);
3025 C = isl_constraint_set_coefficient_si(C, isl_dim_in, Dim, 1);
3026 C = isl_constraint_set_coefficient_si(C, isl_dim_out, Dim, -1);
3027 NextIterationMap = isl_map_add_constraint(NextIterationMap, C);
3028 return NextIterationMap;
3031 bool Scop::addLoopBoundsToHeaderDomain(
3032 Loop *L, LoopInfo &LI, DenseMap<BasicBlock *, isl::set> &InvalidDomainMap) {
3033 int LoopDepth = getRelativeLoopDepth(L);
3034 assert(LoopDepth >= 0 && "Loop in region should have at least depth one");
3036 BasicBlock *HeaderBB = L->getHeader();
3037 assert(DomainMap.count(HeaderBB));
3038 isl::set &HeaderBBDom = DomainMap[HeaderBB];
3040 isl::map NextIterationMap = isl::manage(
3041 createNextIterationMap(HeaderBBDom.get_space().release(), LoopDepth));
3043 isl::set UnionBackedgeCondition = HeaderBBDom.empty(HeaderBBDom.get_space());
3045 SmallVector<BasicBlock *, 4> LatchBlocks;
3046 L->getLoopLatches(LatchBlocks);
3048 for (BasicBlock *LatchBB : LatchBlocks) {
3049 // If the latch is only reachable via error statements we skip it.
3050 isl::set LatchBBDom = DomainMap.lookup(LatchBB);
3051 if (!LatchBBDom)
3052 continue;
3054 isl::set BackedgeCondition = nullptr;
3056 TerminatorInst *TI = LatchBB->getTerminator();
3057 BranchInst *BI = dyn_cast<BranchInst>(TI);
3058 assert(BI && "Only branch instructions allowed in loop latches");
3060 if (BI->isUnconditional())
3061 BackedgeCondition = LatchBBDom;
3062 else {
3063 SmallVector<isl_set *, 8> ConditionSets;
3064 int idx = BI->getSuccessor(0) != HeaderBB;
3065 if (!buildConditionSets(*this, LatchBB, TI, L, LatchBBDom.get(),
3066 InvalidDomainMap, ConditionSets))
3067 return false;
3069 // Free the non back edge condition set as we do not need it.
3070 isl_set_free(ConditionSets[1 - idx]);
3072 BackedgeCondition = isl::manage(ConditionSets[idx]);
3075 int LatchLoopDepth = getRelativeLoopDepth(LI.getLoopFor(LatchBB));
3076 assert(LatchLoopDepth >= LoopDepth);
3077 BackedgeCondition = BackedgeCondition.project_out(
3078 isl::dim::set, LoopDepth + 1, LatchLoopDepth - LoopDepth);
3079 UnionBackedgeCondition = UnionBackedgeCondition.unite(BackedgeCondition);
3082 isl::map ForwardMap = ForwardMap.lex_le(HeaderBBDom.get_space());
3083 for (int i = 0; i < LoopDepth; i++)
3084 ForwardMap = ForwardMap.equate(isl::dim::in, i, isl::dim::out, i);
3086 isl::set UnionBackedgeConditionComplement =
3087 UnionBackedgeCondition.complement();
3088 UnionBackedgeConditionComplement =
3089 UnionBackedgeConditionComplement.lower_bound_si(isl::dim::set, LoopDepth,
3091 UnionBackedgeConditionComplement =
3092 UnionBackedgeConditionComplement.apply(ForwardMap);
3093 HeaderBBDom = HeaderBBDom.subtract(UnionBackedgeConditionComplement);
3094 HeaderBBDom = HeaderBBDom.apply(NextIterationMap);
3096 auto Parts = partitionSetParts(HeaderBBDom.copy(), LoopDepth);
3097 HeaderBBDom = isl::manage(Parts.second);
3099 // Check if there is a <nsw> tagged AddRec for this loop and if so do not add
3100 // the bounded assumptions to the context as they are already implied by the
3101 // <nsw> tag.
3102 if (Affinator.hasNSWAddRecForLoop(L)) {
3103 isl_set_free(Parts.first);
3104 return true;
3107 isl_set *UnboundedCtx = isl_set_params(Parts.first);
3108 recordAssumption(INFINITELOOP, UnboundedCtx,
3109 HeaderBB->getTerminator()->getDebugLoc(), AS_RESTRICTION);
3110 return true;
3113 MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
3114 Value *PointerBase = MA->getOriginalBaseAddr();
3116 auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase);
3117 if (!PointerBaseInst)
3118 return nullptr;
3120 auto *BasePtrStmt = getStmtFor(PointerBaseInst);
3121 if (!BasePtrStmt)
3122 return nullptr;
3124 return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
3127 bool Scop::hasNonHoistableBasePtrInScop(MemoryAccess *MA,
3128 isl::union_map Writes) {
3129 if (auto *BasePtrMA = lookupBasePtrAccess(MA)) {
3130 return getNonHoistableCtx(BasePtrMA, Writes).is_null();
3133 Value *BaseAddr = MA->getOriginalBaseAddr();
3134 if (auto *BasePtrInst = dyn_cast<Instruction>(BaseAddr))
3135 if (!isa<LoadInst>(BasePtrInst))
3136 return contains(BasePtrInst);
3138 return false;
3141 bool Scop::buildAliasChecks(AliasAnalysis &AA) {
3142 if (!PollyUseRuntimeAliasChecks)
3143 return true;
3145 if (buildAliasGroups(AA)) {
3146 // Aliasing assumptions do not go through addAssumption but we still want to
3147 // collect statistics so we do it here explicitly.
3148 if (MinMaxAliasGroups.size())
3149 AssumptionsAliasing++;
3150 return true;
3153 // If a problem occurs while building the alias groups we need to delete
3154 // this SCoP and pretend it wasn't valid in the first place. To this end
3155 // we make the assumed context infeasible.
3156 invalidate(ALIASING, DebugLoc());
3158 DEBUG(dbgs() << "\n\nNOTE: Run time checks for " << getNameStr()
3159 << " could not be created as the number of parameters involved "
3160 "is too high. The SCoP will be "
3161 "dismissed.\nUse:\n\t--polly-rtc-max-parameters=X\nto adjust "
3162 "the maximal number of parameters but be advised that the "
3163 "compile time might increase exponentially.\n\n");
3164 return false;
3167 std::tuple<Scop::AliasGroupVectorTy, DenseSet<const ScopArrayInfo *>>
3168 Scop::buildAliasGroupsForAccesses(AliasAnalysis &AA) {
3169 AliasSetTracker AST(AA);
3171 DenseMap<Value *, MemoryAccess *> PtrToAcc;
3172 DenseSet<const ScopArrayInfo *> HasWriteAccess;
3173 for (ScopStmt &Stmt : *this) {
3175 isl_set *StmtDomain = Stmt.getDomain().release();
3176 bool StmtDomainEmpty = isl_set_is_empty(StmtDomain);
3177 isl_set_free(StmtDomain);
3179 // Statements with an empty domain will never be executed.
3180 if (StmtDomainEmpty)
3181 continue;
3183 for (MemoryAccess *MA : Stmt) {
3184 if (MA->isScalarKind())
3185 continue;
3186 if (!MA->isRead())
3187 HasWriteAccess.insert(MA->getScopArrayInfo());
3188 MemAccInst Acc(MA->getAccessInstruction());
3189 if (MA->isRead() && isa<MemTransferInst>(Acc))
3190 PtrToAcc[cast<MemTransferInst>(Acc)->getRawSource()] = MA;
3191 else
3192 PtrToAcc[Acc.getPointerOperand()] = MA;
3193 AST.add(Acc);
3197 AliasGroupVectorTy AliasGroups;
3198 for (AliasSet &AS : AST) {
3199 if (AS.isMustAlias() || AS.isForwardingAliasSet())
3200 continue;
3201 AliasGroupTy AG;
3202 for (auto &PR : AS)
3203 AG.push_back(PtrToAcc[PR.getValue()]);
3204 if (AG.size() < 2)
3205 continue;
3206 AliasGroups.push_back(std::move(AG));
3209 return std::make_tuple(AliasGroups, HasWriteAccess);
3212 void Scop::splitAliasGroupsByDomain(AliasGroupVectorTy &AliasGroups) {
3213 for (unsigned u = 0; u < AliasGroups.size(); u++) {
3214 AliasGroupTy NewAG;
3215 AliasGroupTy &AG = AliasGroups[u];
3216 AliasGroupTy::iterator AGI = AG.begin();
3217 isl_set *AGDomain = getAccessDomain(*AGI);
3218 while (AGI != AG.end()) {
3219 MemoryAccess *MA = *AGI;
3220 isl_set *MADomain = getAccessDomain(MA);
3221 if (isl_set_is_disjoint(AGDomain, MADomain)) {
3222 NewAG.push_back(MA);
3223 AGI = AG.erase(AGI);
3224 isl_set_free(MADomain);
3225 } else {
3226 AGDomain = isl_set_union(AGDomain, MADomain);
3227 AGI++;
3230 if (NewAG.size() > 1)
3231 AliasGroups.push_back(std::move(NewAG));
3232 isl_set_free(AGDomain);
3236 bool Scop::buildAliasGroups(AliasAnalysis &AA) {
3237 // To create sound alias checks we perform the following steps:
3238 // o) We partition each group into read only and non read only accesses.
3239 // o) For each group with more than one base pointer we then compute minimal
3240 // and maximal accesses to each array of a group in read only and non
3241 // read only partitions separately.
3242 AliasGroupVectorTy AliasGroups;
3243 DenseSet<const ScopArrayInfo *> HasWriteAccess;
3245 std::tie(AliasGroups, HasWriteAccess) = buildAliasGroupsForAccesses(AA);
3247 splitAliasGroupsByDomain(AliasGroups);
3249 for (AliasGroupTy &AG : AliasGroups) {
3250 if (!hasFeasibleRuntimeContext())
3251 return false;
3254 IslMaxOperationsGuard MaxOpGuard(getIslCtx(), OptComputeOut);
3255 bool Valid = buildAliasGroup(AG, HasWriteAccess);
3256 if (!Valid)
3257 return false;
3259 if (isl_ctx_last_error(getIslCtx()) == isl_error_quota) {
3260 invalidate(COMPLEXITY, DebugLoc());
3261 return false;
3265 return true;
3268 bool Scop::buildAliasGroup(Scop::AliasGroupTy &AliasGroup,
3269 DenseSet<const ScopArrayInfo *> HasWriteAccess) {
3270 AliasGroupTy ReadOnlyAccesses;
3271 AliasGroupTy ReadWriteAccesses;
3272 SmallPtrSet<const ScopArrayInfo *, 4> ReadWriteArrays;
3273 SmallPtrSet<const ScopArrayInfo *, 4> ReadOnlyArrays;
3275 if (AliasGroup.size() < 2)
3276 return true;
3278 for (MemoryAccess *Access : AliasGroup) {
3279 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "PossibleAlias",
3280 Access->getAccessInstruction())
3281 << "Possibly aliasing pointer, use restrict keyword.");
3282 const ScopArrayInfo *Array = Access->getScopArrayInfo();
3283 if (HasWriteAccess.count(Array)) {
3284 ReadWriteArrays.insert(Array);
3285 ReadWriteAccesses.push_back(Access);
3286 } else {
3287 ReadOnlyArrays.insert(Array);
3288 ReadOnlyAccesses.push_back(Access);
3292 // If there are no read-only pointers, and less than two read-write pointers,
3293 // no alias check is needed.
3294 if (ReadOnlyAccesses.empty() && ReadWriteArrays.size() <= 1)
3295 return true;
3297 // If there is no read-write pointer, no alias check is needed.
3298 if (ReadWriteArrays.empty())
3299 return true;
3301 // For non-affine accesses, no alias check can be generated as we cannot
3302 // compute a sufficiently tight lower and upper bound: bail out.
3303 for (MemoryAccess *MA : AliasGroup) {
3304 if (!MA->isAffine()) {
3305 invalidate(ALIASING, MA->getAccessInstruction()->getDebugLoc(),
3306 MA->getAccessInstruction()->getParent());
3307 return false;
3311 // Ensure that for all memory accesses for which we generate alias checks,
3312 // their base pointers are available.
3313 for (MemoryAccess *MA : AliasGroup) {
3314 if (MemoryAccess *BasePtrMA = lookupBasePtrAccess(MA))
3315 addRequiredInvariantLoad(
3316 cast<LoadInst>(BasePtrMA->getAccessInstruction()));
3319 MinMaxAliasGroups.emplace_back();
3320 MinMaxVectorPairTy &pair = MinMaxAliasGroups.back();
3321 MinMaxVectorTy &MinMaxAccessesReadWrite = pair.first;
3322 MinMaxVectorTy &MinMaxAccessesReadOnly = pair.second;
3324 bool Valid;
3326 Valid =
3327 calculateMinMaxAccess(ReadWriteAccesses, *this, MinMaxAccessesReadWrite);
3329 if (!Valid)
3330 return false;
3332 // Bail out if the number of values we need to compare is too large.
3333 // This is important as the number of comparisons grows quadratically with
3334 // the number of values we need to compare.
3335 if (MinMaxAccessesReadWrite.size() + ReadOnlyArrays.size() >
3336 RunTimeChecksMaxArraysPerGroup)
3337 return false;
3339 Valid =
3340 calculateMinMaxAccess(ReadOnlyAccesses, *this, MinMaxAccessesReadOnly);
3342 if (!Valid)
3343 return false;
3345 return true;
3348 /// Get the smallest loop that contains @p S but is not in @p S.
3349 static Loop *getLoopSurroundingScop(Scop &S, LoopInfo &LI) {
3350 // Start with the smallest loop containing the entry and expand that
3351 // loop until it contains all blocks in the region. If there is a loop
3352 // containing all blocks in the region check if it is itself contained
3353 // and if so take the parent loop as it will be the smallest containing
3354 // the region but not contained by it.
3355 Loop *L = LI.getLoopFor(S.getEntry());
3356 while (L) {
3357 bool AllContained = true;
3358 for (auto *BB : S.blocks())
3359 AllContained &= L->contains(BB);
3360 if (AllContained)
3361 break;
3362 L = L->getParentLoop();
3365 return L ? (S.contains(L) ? L->getParentLoop() : L) : nullptr;
3368 int Scop::NextScopID = 0;
3370 std::string Scop::CurrentFunc;
3372 int Scop::getNextID(std::string ParentFunc) {
3373 if (ParentFunc != CurrentFunc) {
3374 CurrentFunc = ParentFunc;
3375 NextScopID = 0;
3377 return NextScopID++;
3380 Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
3381 DominatorTree &DT, ScopDetection::DetectionContext &DC,
3382 OptimizationRemarkEmitter &ORE)
3383 : SE(&ScalarEvolution), DT(&DT), R(R), name(R.getNameStr()),
3384 HasSingleExitEdge(R.getExitingBlock()), DC(DC), ORE(ORE),
3385 IslCtx(isl_ctx_alloc(), isl_ctx_free), Affinator(this, LI),
3386 ID(getNextID((*R.getEntry()->getParent()).getName().str())) {
3387 if (IslOnErrorAbort)
3388 isl_options_set_on_error(getIslCtx(), ISL_ON_ERROR_ABORT);
3389 buildContext();
3392 Scop::~Scop() {
3393 isl_set_free(Context);
3394 isl_set_free(AssumedContext);
3395 isl_set_free(InvalidContext);
3396 isl_schedule_free(Schedule);
3398 ParameterIds.clear();
3400 for (auto &AS : RecordedAssumptions)
3401 isl_set_free(AS.Set);
3403 // Free the alias groups
3404 for (MinMaxVectorPairTy &MinMaxAccessPair : MinMaxAliasGroups) {
3405 for (MinMaxAccessTy &MMA : MinMaxAccessPair.first) {
3406 isl_pw_multi_aff_free(MMA.first);
3407 isl_pw_multi_aff_free(MMA.second);
3409 for (MinMaxAccessTy &MMA : MinMaxAccessPair.second) {
3410 isl_pw_multi_aff_free(MMA.first);
3411 isl_pw_multi_aff_free(MMA.second);
3415 for (const auto &IAClass : InvariantEquivClasses)
3416 isl_set_free(IAClass.ExecutionContext);
3418 // Explicitly release all Scop objects and the underlying isl objects before
3419 // we release the isl context.
3420 Stmts.clear();
3421 ScopArrayInfoSet.clear();
3422 ScopArrayInfoMap.clear();
3423 ScopArrayNameMap.clear();
3424 AccessFunctions.clear();
3427 void Scop::foldSizeConstantsToRight() {
3428 isl_union_set *Accessed = isl_union_map_range(getAccesses().release());
3430 for (auto Array : arrays()) {
3431 if (Array->getNumberOfDimensions() <= 1)
3432 continue;
3434 isl_space *Space = Array->getSpace().release();
3436 Space = isl_space_align_params(Space, isl_union_set_get_space(Accessed));
3438 if (!isl_union_set_contains(Accessed, Space)) {
3439 isl_space_free(Space);
3440 continue;
3443 isl_set *Elements = isl_union_set_extract_set(Accessed, Space);
3445 isl_map *Transform =
3446 isl_map_universe(isl_space_map_from_set(Array->getSpace().release()));
3448 std::vector<int> Int;
3450 int Dims = isl_set_dim(Elements, isl_dim_set);
3451 for (int i = 0; i < Dims; i++) {
3452 isl_set *DimOnly =
3453 isl_set_project_out(isl_set_copy(Elements), isl_dim_set, 0, i);
3454 DimOnly = isl_set_project_out(DimOnly, isl_dim_set, 1, Dims - i - 1);
3455 DimOnly = isl_set_lower_bound_si(DimOnly, isl_dim_set, 0, 0);
3457 isl_basic_set *DimHull = isl_set_affine_hull(DimOnly);
3459 if (i == Dims - 1) {
3460 Int.push_back(1);
3461 Transform = isl_map_equate(Transform, isl_dim_in, i, isl_dim_out, i);
3462 isl_basic_set_free(DimHull);
3463 continue;
3466 if (isl_basic_set_dim(DimHull, isl_dim_div) == 1) {
3467 isl_aff *Diff = isl_basic_set_get_div(DimHull, 0);
3468 isl_val *Val = isl_aff_get_denominator_val(Diff);
3469 isl_aff_free(Diff);
3471 int ValInt = 1;
3473 if (isl_val_is_int(Val))
3474 ValInt = isl_val_get_num_si(Val);
3475 isl_val_free(Val);
3477 Int.push_back(ValInt);
3479 isl_constraint *C = isl_constraint_alloc_equality(
3480 isl_local_space_from_space(isl_map_get_space(Transform)));
3481 C = isl_constraint_set_coefficient_si(C, isl_dim_out, i, ValInt);
3482 C = isl_constraint_set_coefficient_si(C, isl_dim_in, i, -1);
3483 Transform = isl_map_add_constraint(Transform, C);
3484 isl_basic_set_free(DimHull);
3485 continue;
3488 isl_basic_set *ZeroSet = isl_basic_set_copy(DimHull);
3489 ZeroSet = isl_basic_set_fix_si(ZeroSet, isl_dim_set, 0, 0);
3491 int ValInt = 1;
3492 if (isl_basic_set_is_equal(ZeroSet, DimHull)) {
3493 ValInt = 0;
3496 Int.push_back(ValInt);
3497 Transform = isl_map_equate(Transform, isl_dim_in, i, isl_dim_out, i);
3498 isl_basic_set_free(DimHull);
3499 isl_basic_set_free(ZeroSet);
3502 isl_set *MappedElements = isl_map_domain(isl_map_copy(Transform));
3504 if (!isl_set_is_subset(Elements, MappedElements)) {
3505 isl_set_free(Elements);
3506 isl_set_free(MappedElements);
3507 isl_map_free(Transform);
3508 continue;
3511 isl_set_free(MappedElements);
3513 bool CanFold = true;
3515 if (Int[0] <= 1)
3516 CanFold = false;
3518 unsigned NumDims = Array->getNumberOfDimensions();
3519 for (unsigned i = 1; i < NumDims - 1; i++)
3520 if (Int[0] != Int[i] && Int[i])
3521 CanFold = false;
3523 if (!CanFold) {
3524 isl_set_free(Elements);
3525 isl_map_free(Transform);
3526 continue;
3529 for (auto &Access : AccessFunctions)
3530 if (Access->getScopArrayInfo() == Array)
3531 Access->setAccessRelation(Access->getAccessRelation().apply_range(
3532 isl::manage(isl_map_copy(Transform))));
3534 isl_map_free(Transform);
3536 std::vector<const SCEV *> Sizes;
3537 for (unsigned i = 0; i < NumDims; i++) {
3538 auto Size = Array->getDimensionSize(i);
3540 if (i == NumDims - 1)
3541 Size = SE->getMulExpr(Size, SE->getConstant(Size->getType(), Int[0]));
3542 Sizes.push_back(Size);
3545 Array->updateSizes(Sizes, false /* CheckConsistency */);
3547 isl_set_free(Elements);
3549 isl_union_set_free(Accessed);
3552 void Scop::markFortranArrays() {
3553 for (ScopStmt &Stmt : Stmts) {
3554 for (MemoryAccess *MemAcc : Stmt) {
3555 Value *FAD = MemAcc->getFortranArrayDescriptor();
3556 if (!FAD)
3557 continue;
3559 // TODO: const_cast-ing to edit
3560 ScopArrayInfo *SAI =
3561 const_cast<ScopArrayInfo *>(MemAcc->getLatestScopArrayInfo());
3562 assert(SAI && "memory access into a Fortran array does not "
3563 "have an associated ScopArrayInfo");
3564 SAI->applyAndSetFAD(FAD);
3569 void Scop::finalizeAccesses() {
3570 updateAccessDimensionality();
3571 foldSizeConstantsToRight();
3572 foldAccessRelations();
3573 assumeNoOutOfBounds();
3574 markFortranArrays();
3577 void Scop::updateAccessDimensionality() {
3578 // Check all array accesses for each base pointer and find a (virtual) element
3579 // size for the base pointer that divides all access functions.
3580 for (ScopStmt &Stmt : *this)
3581 for (MemoryAccess *Access : Stmt) {
3582 if (!Access->isArrayKind())
3583 continue;
3584 ScopArrayInfo *Array =
3585 const_cast<ScopArrayInfo *>(Access->getScopArrayInfo());
3587 if (Array->getNumberOfDimensions() != 1)
3588 continue;
3589 unsigned DivisibleSize = Array->getElemSizeInBytes();
3590 const SCEV *Subscript = Access->getSubscript(0);
3591 while (!isDivisible(Subscript, DivisibleSize, *SE))
3592 DivisibleSize /= 2;
3593 auto *Ty = IntegerType::get(SE->getContext(), DivisibleSize * 8);
3594 Array->updateElementType(Ty);
3597 for (auto &Stmt : *this)
3598 for (auto &Access : Stmt)
3599 Access->updateDimensionality();
3602 void Scop::foldAccessRelations() {
3603 for (auto &Stmt : *this)
3604 for (auto &Access : Stmt)
3605 Access->foldAccessRelation();
3608 void Scop::assumeNoOutOfBounds() {
3609 for (auto &Stmt : *this)
3610 for (auto &Access : Stmt)
3611 Access->assumeNoOutOfBound();
3614 void Scop::removeFromStmtMap(ScopStmt &Stmt) {
3615 for (Instruction *Inst : Stmt.getInstructions())
3616 InstStmtMap.erase(Inst);
3618 if (Stmt.isRegionStmt()) {
3619 for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
3620 StmtMap.erase(BB);
3621 // Skip entry basic block, as its instructions are already deleted as
3622 // part of the statement's instruction list.
3623 if (BB == Stmt.getEntryBlock())
3624 continue;
3625 for (Instruction &Inst : *BB)
3626 InstStmtMap.erase(&Inst);
3628 } else {
3629 auto StmtMapIt = StmtMap.find(Stmt.getBasicBlock());
3630 if (StmtMapIt != StmtMap.end())
3631 StmtMapIt->second.erase(std::remove(StmtMapIt->second.begin(),
3632 StmtMapIt->second.end(), &Stmt),
3633 StmtMapIt->second.end());
3634 for (Instruction *Inst : Stmt.getInstructions())
3635 InstStmtMap.erase(Inst);
3639 void Scop::removeStmts(std::function<bool(ScopStmt &)> ShouldDelete) {
3640 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
3641 if (!ShouldDelete(*StmtIt)) {
3642 StmtIt++;
3643 continue;
3646 removeFromStmtMap(*StmtIt);
3647 StmtIt = Stmts.erase(StmtIt);
3651 void Scop::removeStmtNotInDomainMap() {
3652 auto ShouldDelete = [this](ScopStmt &Stmt) -> bool {
3653 return !this->DomainMap.lookup(Stmt.getEntryBlock());
3655 removeStmts(ShouldDelete);
3658 void Scop::simplifySCoP(bool AfterHoisting) {
3659 auto ShouldDelete = [AfterHoisting](ScopStmt &Stmt) -> bool {
3660 bool RemoveStmt = Stmt.isEmpty();
3662 // Remove read only statements only after invariant load hoisting.
3663 if (!RemoveStmt && AfterHoisting) {
3664 bool OnlyRead = true;
3665 for (MemoryAccess *MA : Stmt) {
3666 if (MA->isRead())
3667 continue;
3669 OnlyRead = false;
3670 break;
3673 RemoveStmt = OnlyRead;
3675 return RemoveStmt;
3678 removeStmts(ShouldDelete);
3681 InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
3682 LoadInst *LInst = dyn_cast<LoadInst>(Val);
3683 if (!LInst)
3684 return nullptr;
3686 if (Value *Rep = InvEquivClassVMap.lookup(LInst))
3687 LInst = cast<LoadInst>(Rep);
3689 Type *Ty = LInst->getType();
3690 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
3691 for (auto &IAClass : InvariantEquivClasses) {
3692 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
3693 continue;
3695 auto &MAs = IAClass.InvariantAccesses;
3696 for (auto *MA : MAs)
3697 if (MA->getAccessInstruction() == Val)
3698 return &IAClass;
3701 return nullptr;
3704 bool isAParameter(llvm::Value *maybeParam, const Function &F) {
3705 for (const llvm::Argument &Arg : F.args())
3706 if (&Arg == maybeParam)
3707 return true;
3709 return false;
3712 bool Scop::canAlwaysBeHoisted(MemoryAccess *MA, bool StmtInvalidCtxIsEmpty,
3713 bool MAInvalidCtxIsEmpty,
3714 bool NonHoistableCtxIsEmpty) {
3715 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3716 const DataLayout &DL = LInst->getParent()->getModule()->getDataLayout();
3717 if (PollyAllowDereferenceOfAllFunctionParams &&
3718 isAParameter(LInst->getPointerOperand(), getFunction()))
3719 return true;
3721 // TODO: We can provide more information for better but more expensive
3722 // results.
3723 if (!isDereferenceableAndAlignedPointer(LInst->getPointerOperand(),
3724 LInst->getAlignment(), DL))
3725 return false;
3727 // If the location might be overwritten we do not hoist it unconditionally.
3729 // TODO: This is probably too conservative.
3730 if (!NonHoistableCtxIsEmpty)
3731 return false;
3733 // If a dereferenceable load is in a statement that is modeled precisely we
3734 // can hoist it.
3735 if (StmtInvalidCtxIsEmpty && MAInvalidCtxIsEmpty)
3736 return true;
3738 // Even if the statement is not modeled precisely we can hoist the load if it
3739 // does not involve any parameters that might have been specialized by the
3740 // statement domain.
3741 for (unsigned u = 0, e = MA->getNumSubscripts(); u < e; u++)
3742 if (!isa<SCEVConstant>(MA->getSubscript(u)))
3743 return false;
3744 return true;
3747 void Scop::addInvariantLoads(ScopStmt &Stmt, InvariantAccessesTy &InvMAs) {
3748 if (InvMAs.empty())
3749 return;
3751 isl::set StmtInvalidCtx = Stmt.getInvalidContext();
3752 bool StmtInvalidCtxIsEmpty = StmtInvalidCtx.is_empty();
3754 // Get the context under which the statement is executed but remove the error
3755 // context under which this statement is reached.
3756 isl::set DomainCtx = Stmt.getDomain().params();
3757 DomainCtx = DomainCtx.subtract(StmtInvalidCtx);
3759 if (isl_set_n_basic_set(DomainCtx.get()) >= MaxDisjunctsInDomain) {
3760 auto *AccInst = InvMAs.front().MA->getAccessInstruction();
3761 invalidate(COMPLEXITY, AccInst->getDebugLoc(), AccInst->getParent());
3762 return;
3765 // Project out all parameters that relate to loads in the statement. Otherwise
3766 // we could have cyclic dependences on the constraints under which the
3767 // hoisted loads are executed and we could not determine an order in which to
3768 // pre-load them. This happens because not only lower bounds are part of the
3769 // domain but also upper bounds.
3770 for (auto &InvMA : InvMAs) {
3771 auto *MA = InvMA.MA;
3772 Instruction *AccInst = MA->getAccessInstruction();
3773 if (SE->isSCEVable(AccInst->getType())) {
3774 SetVector<Value *> Values;
3775 for (const SCEV *Parameter : Parameters) {
3776 Values.clear();
3777 findValues(Parameter, *SE, Values);
3778 if (!Values.count(AccInst))
3779 continue;
3781 if (isl::id ParamId = getIdForParam(Parameter)) {
3782 int Dim = DomainCtx.find_dim_by_id(isl::dim::param, ParamId);
3783 if (Dim >= 0)
3784 DomainCtx = DomainCtx.eliminate(isl::dim::param, Dim, 1);
3790 for (auto &InvMA : InvMAs) {
3791 auto *MA = InvMA.MA;
3792 isl::set NHCtx = InvMA.NonHoistableCtx;
3794 // Check for another invariant access that accesses the same location as
3795 // MA and if found consolidate them. Otherwise create a new equivalence
3796 // class at the end of InvariantEquivClasses.
3797 LoadInst *LInst = cast<LoadInst>(MA->getAccessInstruction());
3798 Type *Ty = LInst->getType();
3799 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
3801 isl::set MAInvalidCtx = MA->getInvalidContext();
3802 bool NonHoistableCtxIsEmpty = NHCtx.is_empty();
3803 bool MAInvalidCtxIsEmpty = MAInvalidCtx.is_empty();
3805 isl::set MACtx;
3806 // Check if we know that this pointer can be speculatively accessed.
3807 if (canAlwaysBeHoisted(MA, StmtInvalidCtxIsEmpty, MAInvalidCtxIsEmpty,
3808 NonHoistableCtxIsEmpty)) {
3809 MACtx = isl::set::universe(DomainCtx.get_space());
3810 } else {
3811 MACtx = DomainCtx;
3812 MACtx = MACtx.subtract(MAInvalidCtx.unite(NHCtx));
3813 MACtx = MACtx.gist_params(getContext());
3816 bool Consolidated = false;
3817 for (auto &IAClass : InvariantEquivClasses) {
3818 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
3819 continue;
3821 // If the pointer and the type is equal check if the access function wrt.
3822 // to the domain is equal too. It can happen that the domain fixes
3823 // parameter values and these can be different for distinct part of the
3824 // SCoP. If this happens we cannot consolidate the loads but need to
3825 // create a new invariant load equivalence class.
3826 auto &MAs = IAClass.InvariantAccesses;
3827 if (!MAs.empty()) {
3828 auto *LastMA = MAs.front();
3830 isl::set AR = MA->getAccessRelation().range();
3831 isl::set LastAR = LastMA->getAccessRelation().range();
3832 bool SameAR = AR.is_equal(LastAR);
3834 if (!SameAR)
3835 continue;
3838 // Add MA to the list of accesses that are in this class.
3839 MAs.push_front(MA);
3841 Consolidated = true;
3843 // Unify the execution context of the class and this statement.
3844 isl::set IAClassDomainCtx = isl::manage(IAClass.ExecutionContext);
3845 if (IAClassDomainCtx)
3846 IAClassDomainCtx = IAClassDomainCtx.unite(MACtx).coalesce();
3847 else
3848 IAClassDomainCtx = MACtx;
3849 IAClass.ExecutionContext = IAClassDomainCtx.release();
3850 break;
3853 if (Consolidated)
3854 continue;
3856 // If we did not consolidate MA, thus did not find an equivalence class
3857 // for it, we create a new one.
3858 InvariantEquivClasses.emplace_back(InvariantEquivClassTy{
3859 PointerSCEV, MemoryAccessList{MA}, MACtx.release(), Ty});
3863 /// Check if an access range is too complex.
3865 /// An access range is too complex, if it contains either many disjuncts or
3866 /// very complex expressions. As a simple heuristic, we assume if a set to
3867 /// be too complex if the sum of existentially quantified dimensions and
3868 /// set dimensions is larger than a threshold. This reliably detects both
3869 /// sets with many disjuncts as well as sets with many divisions as they
3870 /// arise in h264.
3872 /// @param AccessRange The range to check for complexity.
3874 /// @returns True if the access range is too complex.
3875 static bool isAccessRangeTooComplex(isl::set AccessRange) {
3876 unsigned NumTotalDims = 0;
3878 auto CountDimensions = [&NumTotalDims](isl::basic_set BSet) -> isl::stat {
3879 NumTotalDims += BSet.dim(isl::dim::div);
3880 NumTotalDims += BSet.dim(isl::dim::set);
3881 return isl::stat::ok;
3884 AccessRange.foreach_basic_set(CountDimensions);
3886 if (NumTotalDims > MaxDimensionsInAccessRange)
3887 return true;
3889 return false;
3892 isl::set Scop::getNonHoistableCtx(MemoryAccess *Access, isl::union_map Writes) {
3893 // TODO: Loads that are not loop carried, hence are in a statement with
3894 // zero iterators, are by construction invariant, though we
3895 // currently "hoist" them anyway. This is necessary because we allow
3896 // them to be treated as parameters (e.g., in conditions) and our code
3897 // generation would otherwise use the old value.
3899 auto &Stmt = *Access->getStatement();
3900 BasicBlock *BB = Stmt.getEntryBlock();
3902 if (Access->isScalarKind() || Access->isWrite() || !Access->isAffine() ||
3903 Access->isMemoryIntrinsic())
3904 return nullptr;
3906 // Skip accesses that have an invariant base pointer which is defined but
3907 // not loaded inside the SCoP. This can happened e.g., if a readnone call
3908 // returns a pointer that is used as a base address. However, as we want
3909 // to hoist indirect pointers, we allow the base pointer to be defined in
3910 // the region if it is also a memory access. Each ScopArrayInfo object
3911 // that has a base pointer origin has a base pointer that is loaded and
3912 // that it is invariant, thus it will be hoisted too. However, if there is
3913 // no base pointer origin we check that the base pointer is defined
3914 // outside the region.
3915 auto *LI = cast<LoadInst>(Access->getAccessInstruction());
3916 if (hasNonHoistableBasePtrInScop(Access, Writes))
3917 return nullptr;
3919 isl::map AccessRelation = give(Access->getAccessRelation().release());
3920 assert(!AccessRelation.is_empty());
3922 if (AccessRelation.involves_dims(isl::dim::in, 0, Stmt.getNumIterators()))
3923 return nullptr;
3925 AccessRelation = AccessRelation.intersect_domain(Stmt.getDomain());
3926 isl::set SafeToLoad;
3928 auto &DL = getFunction().getParent()->getDataLayout();
3929 if (isSafeToLoadUnconditionally(LI->getPointerOperand(), LI->getAlignment(),
3930 DL)) {
3931 SafeToLoad = isl::set::universe(AccessRelation.get_space().range());
3932 } else if (BB != LI->getParent()) {
3933 // Skip accesses in non-affine subregions as they might not be executed
3934 // under the same condition as the entry of the non-affine subregion.
3935 return nullptr;
3936 } else {
3937 SafeToLoad = AccessRelation.range();
3940 if (isAccessRangeTooComplex(AccessRelation.range()))
3941 return nullptr;
3943 isl::union_map Written = Writes.intersect_range(SafeToLoad);
3944 isl::set WrittenCtx = Written.params();
3945 bool IsWritten = !WrittenCtx.is_empty();
3947 if (!IsWritten)
3948 return WrittenCtx;
3950 WrittenCtx = WrittenCtx.remove_divs();
3951 bool TooComplex =
3952 isl_set_n_basic_set(WrittenCtx.get()) >= MaxDisjunctsInDomain;
3953 if (TooComplex || !isRequiredInvariantLoad(LI))
3954 return nullptr;
3956 addAssumption(INVARIANTLOAD, WrittenCtx.copy(), LI->getDebugLoc(),
3957 AS_RESTRICTION, LI->getParent());
3958 return WrittenCtx;
3961 void Scop::verifyInvariantLoads() {
3962 auto &RIL = getRequiredInvariantLoads();
3963 for (LoadInst *LI : RIL) {
3964 assert(LI && contains(LI));
3965 // If there exists a statement in the scop which has a memory access for
3966 // @p LI, then mark this scop as infeasible for optimization.
3967 for (ScopStmt &Stmt : Stmts)
3968 if (Stmt.getArrayAccessOrNULLFor(LI)) {
3969 invalidate(INVARIANTLOAD, LI->getDebugLoc(), LI->getParent());
3970 return;
3975 void Scop::hoistInvariantLoads() {
3976 if (!PollyInvariantLoadHoisting)
3977 return;
3979 isl::union_map Writes = getWrites();
3980 for (ScopStmt &Stmt : *this) {
3981 InvariantAccessesTy InvariantAccesses;
3983 for (MemoryAccess *Access : Stmt)
3984 if (isl::set NHCtx = getNonHoistableCtx(Access, Writes))
3985 InvariantAccesses.push_back({Access, NHCtx});
3987 // Transfer the memory access from the statement to the SCoP.
3988 for (auto InvMA : InvariantAccesses)
3989 Stmt.removeMemoryAccess(InvMA.MA);
3990 addInvariantLoads(Stmt, InvariantAccesses);
3994 /// Find the canonical scop array info object for a set of invariant load
3995 /// hoisted loads. The canonical array is the one that corresponds to the
3996 /// first load in the list of accesses which is used as base pointer of a
3997 /// scop array.
3998 static const ScopArrayInfo *findCanonicalArray(Scop *S,
3999 MemoryAccessList &Accesses) {
4000 for (MemoryAccess *Access : Accesses) {
4001 const ScopArrayInfo *CanonicalArray = S->getScopArrayInfoOrNull(
4002 Access->getAccessInstruction(), MemoryKind::Array);
4003 if (CanonicalArray)
4004 return CanonicalArray;
4006 return nullptr;
4009 /// Check if @p Array severs as base array in an invariant load.
4010 static bool isUsedForIndirectHoistedLoad(Scop *S, const ScopArrayInfo *Array) {
4011 for (InvariantEquivClassTy &EqClass2 : S->getInvariantAccesses())
4012 for (MemoryAccess *Access2 : EqClass2.InvariantAccesses)
4013 if (Access2->getScopArrayInfo() == Array)
4014 return true;
4015 return false;
4018 /// Replace the base pointer arrays in all memory accesses referencing @p Old,
4019 /// with a reference to @p New.
4020 static void replaceBasePtrArrays(Scop *S, const ScopArrayInfo *Old,
4021 const ScopArrayInfo *New) {
4022 for (ScopStmt &Stmt : *S)
4023 for (MemoryAccess *Access : Stmt) {
4024 if (Access->getLatestScopArrayInfo() != Old)
4025 continue;
4027 isl::id Id = New->getBasePtrId();
4028 isl::map Map = Access->getAccessRelation();
4029 Map = Map.set_tuple_id(isl::dim::out, Id);
4030 Access->setAccessRelation(Map);
4034 void Scop::canonicalizeDynamicBasePtrs() {
4035 for (InvariantEquivClassTy &EqClass : InvariantEquivClasses) {
4036 MemoryAccessList &BasePtrAccesses = EqClass.InvariantAccesses;
4038 const ScopArrayInfo *CanonicalBasePtrSAI =
4039 findCanonicalArray(this, BasePtrAccesses);
4041 if (!CanonicalBasePtrSAI)
4042 continue;
4044 for (MemoryAccess *BasePtrAccess : BasePtrAccesses) {
4045 const ScopArrayInfo *BasePtrSAI = getScopArrayInfoOrNull(
4046 BasePtrAccess->getAccessInstruction(), MemoryKind::Array);
4047 if (!BasePtrSAI || BasePtrSAI == CanonicalBasePtrSAI ||
4048 !BasePtrSAI->isCompatibleWith(CanonicalBasePtrSAI))
4049 continue;
4051 // we currently do not canonicalize arrays where some accesses are
4052 // hoisted as invariant loads. If we would, we need to update the access
4053 // function of the invariant loads as well. However, as this is not a
4054 // very common situation, we leave this for now to avoid further
4055 // complexity increases.
4056 if (isUsedForIndirectHoistedLoad(this, BasePtrSAI))
4057 continue;
4059 replaceBasePtrArrays(this, BasePtrSAI, CanonicalBasePtrSAI);
4064 ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
4065 ArrayRef<const SCEV *> Sizes,
4066 MemoryKind Kind,
4067 const char *BaseName) {
4068 assert((BasePtr || BaseName) &&
4069 "BasePtr and BaseName can not be nullptr at the same time.");
4070 assert(!(BasePtr && BaseName) && "BaseName is redundant.");
4071 auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(BasePtr, Kind)]
4072 : ScopArrayNameMap[BaseName];
4073 if (!SAI) {
4074 auto &DL = getFunction().getParent()->getDataLayout();
4075 SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
4076 DL, this, BaseName));
4077 ScopArrayInfoSet.insert(SAI.get());
4078 } else {
4079 SAI->updateElementType(ElementType);
4080 // In case of mismatching array sizes, we bail out by setting the run-time
4081 // context to false.
4082 if (!SAI->updateSizes(Sizes))
4083 invalidate(DELINEARIZATION, DebugLoc());
4085 return SAI.get();
4088 ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
4089 const std::string &BaseName,
4090 const std::vector<unsigned> &Sizes) {
4091 auto *DimSizeType = Type::getInt64Ty(getSE()->getContext());
4092 std::vector<const SCEV *> SCEVSizes;
4094 for (auto size : Sizes)
4095 if (size)
4096 SCEVSizes.push_back(getSE()->getConstant(DimSizeType, size, false));
4097 else
4098 SCEVSizes.push_back(nullptr);
4100 auto *SAI = getOrCreateScopArrayInfo(nullptr, ElementType, SCEVSizes,
4101 MemoryKind::Array, BaseName.c_str());
4102 return SAI;
4105 const ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr,
4106 MemoryKind Kind) {
4107 auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
4108 return SAI;
4111 const ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
4112 auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
4113 assert(SAI && "No ScopArrayInfo available for this base pointer");
4114 return SAI;
4117 std::string Scop::getContextStr() const { return getContext().to_str(); }
4119 std::string Scop::getAssumedContextStr() const {
4120 assert(AssumedContext && "Assumed context not yet built");
4121 return stringFromIslObj(AssumedContext);
4124 std::string Scop::getInvalidContextStr() const {
4125 return stringFromIslObj(InvalidContext);
4128 std::string Scop::getNameStr() const {
4129 std::string ExitName, EntryName;
4130 std::tie(EntryName, ExitName) = getEntryExitStr();
4131 return EntryName + "---" + ExitName;
4134 std::pair<std::string, std::string> Scop::getEntryExitStr() const {
4135 std::string ExitName, EntryName;
4136 raw_string_ostream ExitStr(ExitName);
4137 raw_string_ostream EntryStr(EntryName);
4139 R.getEntry()->printAsOperand(EntryStr, false);
4140 EntryStr.str();
4142 if (R.getExit()) {
4143 R.getExit()->printAsOperand(ExitStr, false);
4144 ExitStr.str();
4145 } else
4146 ExitName = "FunctionExit";
4148 return std::make_pair(EntryName, ExitName);
4151 isl::set Scop::getContext() const { return isl::manage(isl_set_copy(Context)); }
4152 isl::space Scop::getParamSpace() const { return getContext().get_space(); }
4154 isl::space Scop::getFullParamSpace() const {
4155 std::vector<isl::id> FortranIDs;
4156 FortranIDs = getFortranArrayIds(arrays());
4158 isl::space Space = isl::space::params_alloc(
4159 getIslCtx(), ParameterIds.size() + FortranIDs.size());
4161 unsigned PDim = 0;
4162 for (const SCEV *Parameter : Parameters) {
4163 isl::id Id = getIdForParam(Parameter);
4164 Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
4167 for (isl::id Id : FortranIDs)
4168 Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
4170 return Space;
4173 isl::set Scop::getAssumedContext() const {
4174 assert(AssumedContext && "Assumed context not yet built");
4175 return isl::manage(isl_set_copy(AssumedContext));
4178 bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
4179 if (PollyProcessUnprofitable)
4180 return true;
4182 if (isEmpty())
4183 return false;
4185 unsigned OptimizableStmtsOrLoops = 0;
4186 for (auto &Stmt : *this) {
4187 if (Stmt.getNumIterators() == 0)
4188 continue;
4190 bool ContainsArrayAccs = false;
4191 bool ContainsScalarAccs = false;
4192 for (auto *MA : Stmt) {
4193 if (MA->isRead())
4194 continue;
4195 ContainsArrayAccs |= MA->isLatestArrayKind();
4196 ContainsScalarAccs |= MA->isLatestScalarKind();
4199 if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
4200 OptimizableStmtsOrLoops += Stmt.getNumIterators();
4203 return OptimizableStmtsOrLoops > 1;
4206 bool Scop::hasFeasibleRuntimeContext() const {
4207 auto *PositiveContext = getAssumedContext().release();
4208 auto *NegativeContext = getInvalidContext().release();
4209 PositiveContext =
4210 addNonEmptyDomainConstraints(isl::manage(PositiveContext)).release();
4211 bool IsFeasible = !(isl_set_is_empty(PositiveContext) ||
4212 isl_set_is_subset(PositiveContext, NegativeContext));
4213 isl_set_free(PositiveContext);
4214 if (!IsFeasible) {
4215 isl_set_free(NegativeContext);
4216 return false;
4219 auto *DomainContext = isl_union_set_params(getDomains().release());
4220 IsFeasible = !isl_set_is_subset(DomainContext, NegativeContext);
4221 IsFeasible &= !isl_set_is_subset(Context, NegativeContext);
4222 isl_set_free(NegativeContext);
4223 isl_set_free(DomainContext);
4225 return IsFeasible;
4228 static std::string toString(AssumptionKind Kind) {
4229 switch (Kind) {
4230 case ALIASING:
4231 return "No-aliasing";
4232 case INBOUNDS:
4233 return "Inbounds";
4234 case WRAPPING:
4235 return "No-overflows";
4236 case UNSIGNED:
4237 return "Signed-unsigned";
4238 case COMPLEXITY:
4239 return "Low complexity";
4240 case PROFITABLE:
4241 return "Profitable";
4242 case ERRORBLOCK:
4243 return "No-error";
4244 case INFINITELOOP:
4245 return "Finite loop";
4246 case INVARIANTLOAD:
4247 return "Invariant load";
4248 case DELINEARIZATION:
4249 return "Delinearization";
4251 llvm_unreachable("Unknown AssumptionKind!");
4254 bool Scop::isEffectiveAssumption(__isl_keep isl_set *Set, AssumptionSign Sign) {
4255 if (Sign == AS_ASSUMPTION) {
4256 if (isl_set_is_subset(Context, Set))
4257 return false;
4259 if (isl_set_is_subset(AssumedContext, Set))
4260 return false;
4261 } else {
4262 if (isl_set_is_disjoint(Set, Context))
4263 return false;
4265 if (isl_set_is_subset(Set, InvalidContext))
4266 return false;
4268 return true;
4271 bool Scop::trackAssumption(AssumptionKind Kind, __isl_keep isl_set *Set,
4272 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
4273 if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
4274 return false;
4276 // Do never emit trivial assumptions as they only clutter the output.
4277 if (!PollyRemarksMinimal) {
4278 isl_set *Univ = nullptr;
4279 if (Sign == AS_ASSUMPTION)
4280 Univ = isl_set_universe(isl_set_get_space(Set));
4282 bool IsTrivial = (Sign == AS_RESTRICTION && isl_set_is_empty(Set)) ||
4283 (Sign == AS_ASSUMPTION && isl_set_is_equal(Univ, Set));
4284 isl_set_free(Univ);
4286 if (IsTrivial)
4287 return false;
4290 switch (Kind) {
4291 case ALIASING:
4292 AssumptionsAliasing++;
4293 break;
4294 case INBOUNDS:
4295 AssumptionsInbounds++;
4296 break;
4297 case WRAPPING:
4298 AssumptionsWrapping++;
4299 break;
4300 case UNSIGNED:
4301 AssumptionsUnsigned++;
4302 break;
4303 case COMPLEXITY:
4304 AssumptionsComplexity++;
4305 break;
4306 case PROFITABLE:
4307 AssumptionsUnprofitable++;
4308 break;
4309 case ERRORBLOCK:
4310 AssumptionsErrorBlock++;
4311 break;
4312 case INFINITELOOP:
4313 AssumptionsInfiniteLoop++;
4314 break;
4315 case INVARIANTLOAD:
4316 AssumptionsInvariantLoad++;
4317 break;
4318 case DELINEARIZATION:
4319 AssumptionsDelinearization++;
4320 break;
4323 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
4324 std::string Msg = toString(Kind) + Suffix + stringFromIslObj(Set);
4325 if (BB)
4326 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
4327 << Msg);
4328 else
4329 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
4330 R.getEntry())
4331 << Msg);
4332 return true;
4335 void Scop::addAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
4336 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
4337 // Simplify the assumptions/restrictions first.
4338 Set = isl_set_gist_params(Set, getContext().release());
4340 if (!trackAssumption(Kind, Set, Loc, Sign, BB)) {
4341 isl_set_free(Set);
4342 return;
4345 if (Sign == AS_ASSUMPTION) {
4346 AssumedContext = isl_set_intersect(AssumedContext, Set);
4347 AssumedContext = isl_set_coalesce(AssumedContext);
4348 } else {
4349 InvalidContext = isl_set_union(InvalidContext, Set);
4350 InvalidContext = isl_set_coalesce(InvalidContext);
4354 void Scop::recordAssumption(AssumptionKind Kind, __isl_take isl_set *Set,
4355 DebugLoc Loc, AssumptionSign Sign, BasicBlock *BB) {
4356 assert((isl_set_is_params(Set) || BB) &&
4357 "Assumptions without a basic block must be parameter sets");
4358 RecordedAssumptions.push_back({Kind, Sign, Set, Loc, BB});
4361 void Scop::addRecordedAssumptions() {
4362 while (!RecordedAssumptions.empty()) {
4363 const Assumption &AS = RecordedAssumptions.pop_back_val();
4365 if (!AS.BB) {
4366 addAssumption(AS.Kind, AS.Set, AS.Loc, AS.Sign, nullptr /* BasicBlock */);
4367 continue;
4370 // If the domain was deleted the assumptions are void.
4371 isl_set *Dom = getDomainConditions(AS.BB).release();
4372 if (!Dom) {
4373 isl_set_free(AS.Set);
4374 continue;
4377 // If a basic block was given use its domain to simplify the assumption.
4378 // In case of restrictions we know they only have to hold on the domain,
4379 // thus we can intersect them with the domain of the block. However, for
4380 // assumptions the domain has to imply them, thus:
4381 // _ _____
4382 // Dom => S <==> A v B <==> A - B
4384 // To avoid the complement we will register A - B as a restriction not an
4385 // assumption.
4386 isl_set *S = AS.Set;
4387 if (AS.Sign == AS_RESTRICTION)
4388 S = isl_set_params(isl_set_intersect(S, Dom));
4389 else /* (AS.Sign == AS_ASSUMPTION) */
4390 S = isl_set_params(isl_set_subtract(Dom, S));
4392 addAssumption(AS.Kind, S, AS.Loc, AS_RESTRICTION, AS.BB);
4396 void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
4397 DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
4398 addAssumption(Kind, isl_set_empty(getParamSpace().release()), Loc,
4399 AS_ASSUMPTION, BB);
4402 isl::set Scop::getInvalidContext() const {
4403 return isl::manage(isl_set_copy(InvalidContext));
4406 void Scop::printContext(raw_ostream &OS) const {
4407 OS << "Context:\n";
4408 OS.indent(4) << Context << "\n";
4410 OS.indent(4) << "Assumed Context:\n";
4411 OS.indent(4) << AssumedContext << "\n";
4413 OS.indent(4) << "Invalid Context:\n";
4414 OS.indent(4) << InvalidContext << "\n";
4416 unsigned Dim = 0;
4417 for (const SCEV *Parameter : Parameters)
4418 OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
4421 void Scop::printAliasAssumptions(raw_ostream &OS) const {
4422 int noOfGroups = 0;
4423 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
4424 if (Pair.second.size() == 0)
4425 noOfGroups += 1;
4426 else
4427 noOfGroups += Pair.second.size();
4430 OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
4431 if (MinMaxAliasGroups.empty()) {
4432 OS.indent(8) << "n/a\n";
4433 return;
4436 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
4438 // If the group has no read only accesses print the write accesses.
4439 if (Pair.second.empty()) {
4440 OS.indent(8) << "[[";
4441 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
4442 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
4443 << ">";
4445 OS << " ]]\n";
4448 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
4449 OS.indent(8) << "[[";
4450 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
4451 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
4452 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
4453 << ">";
4455 OS << " ]]\n";
4460 void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
4461 OS << "Statements {\n";
4463 for (const ScopStmt &Stmt : *this) {
4464 OS.indent(4);
4465 Stmt.print(OS, PrintInstructions);
4468 OS.indent(4) << "}\n";
4471 void Scop::printArrayInfo(raw_ostream &OS) const {
4472 OS << "Arrays {\n";
4474 for (auto &Array : arrays())
4475 Array->print(OS);
4477 OS.indent(4) << "}\n";
4479 OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
4481 for (auto &Array : arrays())
4482 Array->print(OS, /* SizeAsPwAff */ true);
4484 OS.indent(4) << "}\n";
4487 void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
4488 OS.indent(4) << "Function: " << getFunction().getName() << "\n";
4489 OS.indent(4) << "Region: " << getNameStr() << "\n";
4490 OS.indent(4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
4491 OS.indent(4) << "Invariant Accesses: {\n";
4492 for (const auto &IAClass : InvariantEquivClasses) {
4493 const auto &MAs = IAClass.InvariantAccesses;
4494 if (MAs.empty()) {
4495 OS.indent(12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
4496 } else {
4497 MAs.front()->print(OS);
4498 OS.indent(12) << "Execution Context: " << IAClass.ExecutionContext
4499 << "\n";
4502 OS.indent(4) << "}\n";
4503 printContext(OS.indent(4));
4504 printArrayInfo(OS.indent(4));
4505 printAliasAssumptions(OS);
4506 printStatements(OS.indent(4), PrintInstructions);
4509 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4510 LLVM_DUMP_METHOD void Scop::dump() const { print(dbgs(), true); }
4511 #endif
4513 isl_ctx *Scop::getIslCtx() const { return IslCtx.get(); }
4515 __isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
4516 bool NonNegative) {
4517 // First try to use the SCEVAffinator to generate a piecewise defined
4518 // affine function from @p E in the context of @p BB. If that tasks becomes to
4519 // complex the affinator might return a nullptr. In such a case we invalidate
4520 // the SCoP and return a dummy value. This way we do not need to add error
4521 // handling code to all users of this function.
4522 auto PWAC = Affinator.getPwAff(E, BB);
4523 if (PWAC.first) {
4524 // TODO: We could use a heuristic and either use:
4525 // SCEVAffinator::takeNonNegativeAssumption
4526 // or
4527 // SCEVAffinator::interpretAsUnsigned
4528 // to deal with unsigned or "NonNegative" SCEVs.
4529 if (NonNegative)
4530 Affinator.takeNonNegativeAssumption(PWAC);
4531 return PWAC;
4534 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
4535 invalidate(COMPLEXITY, DL, BB);
4536 return Affinator.getPwAff(SE->getZero(E->getType()), BB);
4539 isl::union_set Scop::getDomains() const {
4540 isl_space *EmptySpace = isl_space_params_alloc(getIslCtx(), 0);
4541 isl_union_set *Domain = isl_union_set_empty(EmptySpace);
4543 for (const ScopStmt &Stmt : *this)
4544 Domain = isl_union_set_add_set(Domain, Stmt.getDomain().release());
4546 return isl::manage(Domain);
4549 isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB) {
4550 PWACtx PWAC = getPwAff(E, BB);
4551 isl_set_free(PWAC.second);
4552 return isl::manage(PWAC.first);
4555 isl::union_map
4556 Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
4557 isl::union_map Accesses = isl::union_map::empty(getParamSpace());
4559 for (ScopStmt &Stmt : *this) {
4560 for (MemoryAccess *MA : Stmt) {
4561 if (!Predicate(*MA))
4562 continue;
4564 isl::set Domain = Stmt.getDomain();
4565 isl::map AccessDomain = MA->getAccessRelation();
4566 AccessDomain = AccessDomain.intersect_domain(Domain);
4567 Accesses = Accesses.add_map(AccessDomain);
4571 return Accesses.coalesce();
4574 isl::union_map Scop::getMustWrites() {
4575 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
4578 isl::union_map Scop::getMayWrites() {
4579 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
4582 isl::union_map Scop::getWrites() {
4583 return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
4586 isl::union_map Scop::getReads() {
4587 return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
4590 isl::union_map Scop::getAccesses() {
4591 return getAccessesOfType([](MemoryAccess &MA) { return true; });
4594 isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
4595 return getAccessesOfType(
4596 [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
4599 // Check whether @p Node is an extension node.
4601 // @return true if @p Node is an extension node.
4602 isl_bool isNotExtNode(__isl_keep isl_schedule_node *Node, void *User) {
4603 if (isl_schedule_node_get_type(Node) == isl_schedule_node_extension)
4604 return isl_bool_error;
4605 else
4606 return isl_bool_true;
4609 bool Scop::containsExtensionNode(__isl_keep isl_schedule *Schedule) {
4610 return isl_schedule_foreach_schedule_node_top_down(Schedule, isNotExtNode,
4611 nullptr) == isl_stat_error;
4614 isl::union_map Scop::getSchedule() const {
4615 auto *Tree = getScheduleTree().release();
4616 if (containsExtensionNode(Tree)) {
4617 isl_schedule_free(Tree);
4618 return nullptr;
4620 auto *S = isl_schedule_get_map(Tree);
4621 isl_schedule_free(Tree);
4622 return isl::manage(S);
4625 isl::schedule Scop::getScheduleTree() const {
4626 return isl::manage(isl_schedule_intersect_domain(isl_schedule_copy(Schedule),
4627 getDomains().release()));
4630 void Scop::setSchedule(__isl_take isl_union_map *NewSchedule) {
4631 auto *S = isl_schedule_from_domain(getDomains().release());
4632 S = isl_schedule_insert_partial_schedule(
4633 S, isl_multi_union_pw_aff_from_union_map(NewSchedule));
4634 isl_schedule_free(Schedule);
4635 Schedule = S;
4638 void Scop::setScheduleTree(__isl_take isl_schedule *NewSchedule) {
4639 isl_schedule_free(Schedule);
4640 Schedule = NewSchedule;
4643 bool Scop::restrictDomains(isl::union_set Domain) {
4644 bool Changed = false;
4645 for (ScopStmt &Stmt : *this) {
4646 isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
4647 isl::union_set NewStmtDomain = StmtDomain.intersect(Domain);
4649 if (StmtDomain.is_subset(NewStmtDomain))
4650 continue;
4652 Changed = true;
4654 NewStmtDomain = NewStmtDomain.coalesce();
4656 if (NewStmtDomain.is_empty())
4657 Stmt.restrictDomain(isl::set::empty(Stmt.getDomainSpace()));
4658 else
4659 Stmt.restrictDomain(isl::set(NewStmtDomain));
4661 return Changed;
4664 ScalarEvolution *Scop::getSE() const { return SE; }
4666 // Create an isl_multi_union_aff that defines an identity mapping from the
4667 // elements of USet to their N-th dimension.
4669 // # Example:
4671 // Domain: { A[i,j]; B[i,j,k] }
4672 // N: 1
4674 // Resulting Mapping: { {A[i,j] -> [(j)]; B[i,j,k] -> [(j)] }
4676 // @param USet A union set describing the elements for which to generate a
4677 // mapping.
4678 // @param N The dimension to map to.
4679 // @returns A mapping from USet to its N-th dimension.
4680 static isl::multi_union_pw_aff mapToDimension(isl::union_set USet, int N) {
4681 assert(N >= 0);
4682 assert(USet);
4683 assert(!USet.is_empty());
4685 auto Result = isl::union_pw_multi_aff::empty(USet.get_space());
4687 auto Lambda = [&Result, N](isl::set S) -> isl::stat {
4688 int Dim = S.dim(isl::dim::set);
4689 auto PMA = isl::pw_multi_aff::project_out_map(S.get_space(), isl::dim::set,
4690 N, Dim - N);
4691 if (N > 1)
4692 PMA = PMA.drop_dims(isl::dim::out, 0, N - 1);
4694 Result = Result.add_pw_multi_aff(PMA);
4695 return isl::stat::ok;
4698 isl::stat Res = USet.foreach_set(Lambda);
4699 (void)Res;
4701 assert(Res == isl::stat::ok);
4703 return isl::multi_union_pw_aff(isl::union_pw_multi_aff(Result));
4706 void Scop::addScopStmt(BasicBlock *BB, Loop *SurroundingLoop,
4707 std::vector<Instruction *> Instructions, int Count) {
4708 assert(BB && "Unexpected nullptr!");
4709 Stmts.emplace_back(*this, *BB, SurroundingLoop, Instructions, Count);
4710 auto *Stmt = &Stmts.back();
4711 StmtMap[BB].push_back(Stmt);
4712 for (Instruction *Inst : Instructions) {
4713 assert(!InstStmtMap.count(Inst) &&
4714 "Unexpected statement corresponding to the instruction.");
4715 InstStmtMap[Inst] = Stmt;
4719 void Scop::addScopStmt(Region *R, Loop *SurroundingLoop,
4720 std::vector<Instruction *> Instructions) {
4721 assert(R && "Unexpected nullptr!");
4722 Stmts.emplace_back(*this, *R, SurroundingLoop, Instructions);
4723 auto *Stmt = &Stmts.back();
4725 for (Instruction *Inst : Instructions) {
4726 assert(!InstStmtMap.count(Inst) &&
4727 "Unexpected statement corresponding to the instruction.");
4728 InstStmtMap[Inst] = Stmt;
4731 for (BasicBlock *BB : R->blocks()) {
4732 StmtMap[BB].push_back(Stmt);
4733 if (BB == R->getEntry())
4734 continue;
4735 for (Instruction &Inst : *BB) {
4736 assert(!InstStmtMap.count(&Inst) &&
4737 "Unexpected statement corresponding to the instruction.");
4738 InstStmtMap[&Inst] = Stmt;
4743 ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
4744 isl::set Domain) {
4745 #ifndef NDEBUG
4746 isl::set SourceDomain = SourceRel.domain();
4747 isl::set TargetDomain = TargetRel.domain();
4748 assert(Domain.is_subset(TargetDomain) &&
4749 "Target access not defined for complete statement domain");
4750 assert(Domain.is_subset(SourceDomain) &&
4751 "Source access not defined for complete statement domain");
4752 #endif
4753 Stmts.emplace_back(*this, SourceRel, TargetRel, Domain);
4754 CopyStmtsNum++;
4755 return &(Stmts.back());
4758 void Scop::buildSchedule(LoopInfo &LI) {
4759 Loop *L = getLoopSurroundingScop(*this, LI);
4760 LoopStackTy LoopStack({LoopStackElementTy(L, nullptr, 0)});
4761 buildSchedule(getRegion().getNode(), LoopStack, LI);
4762 assert(LoopStack.size() == 1 && LoopStack.back().L == L);
4763 Schedule = LoopStack[0].Schedule;
4766 /// To generate a schedule for the elements in a Region we traverse the Region
4767 /// in reverse-post-order and add the contained RegionNodes in traversal order
4768 /// to the schedule of the loop that is currently at the top of the LoopStack.
4769 /// For loop-free codes, this results in a correct sequential ordering.
4771 /// Example:
4772 /// bb1(0)
4773 /// / \.
4774 /// bb2(1) bb3(2)
4775 /// \ / \.
4776 /// bb4(3) bb5(4)
4777 /// \ /
4778 /// bb6(5)
4780 /// Including loops requires additional processing. Whenever a loop header is
4781 /// encountered, the corresponding loop is added to the @p LoopStack. Starting
4782 /// from an empty schedule, we first process all RegionNodes that are within
4783 /// this loop and complete the sequential schedule at this loop-level before
4784 /// processing about any other nodes. To implement this
4785 /// loop-nodes-first-processing, the reverse post-order traversal is
4786 /// insufficient. Hence, we additionally check if the traversal yields
4787 /// sub-regions or blocks that are outside the last loop on the @p LoopStack.
4788 /// These region-nodes are then queue and only traverse after the all nodes
4789 /// within the current loop have been processed.
4790 void Scop::buildSchedule(Region *R, LoopStackTy &LoopStack, LoopInfo &LI) {
4791 Loop *OuterScopLoop = getLoopSurroundingScop(*this, LI);
4793 ReversePostOrderTraversal<Region *> RTraversal(R);
4794 std::deque<RegionNode *> WorkList(RTraversal.begin(), RTraversal.end());
4795 std::deque<RegionNode *> DelayList;
4796 bool LastRNWaiting = false;
4798 // Iterate over the region @p R in reverse post-order but queue
4799 // sub-regions/blocks iff they are not part of the last encountered but not
4800 // completely traversed loop. The variable LastRNWaiting is a flag to indicate
4801 // that we queued the last sub-region/block from the reverse post-order
4802 // iterator. If it is set we have to explore the next sub-region/block from
4803 // the iterator (if any) to guarantee progress. If it is not set we first try
4804 // the next queued sub-region/blocks.
4805 while (!WorkList.empty() || !DelayList.empty()) {
4806 RegionNode *RN;
4808 if ((LastRNWaiting && !WorkList.empty()) || DelayList.empty()) {
4809 RN = WorkList.front();
4810 WorkList.pop_front();
4811 LastRNWaiting = false;
4812 } else {
4813 RN = DelayList.front();
4814 DelayList.pop_front();
4817 Loop *L = getRegionNodeLoop(RN, LI);
4818 if (!contains(L))
4819 L = OuterScopLoop;
4821 Loop *LastLoop = LoopStack.back().L;
4822 if (LastLoop != L) {
4823 if (LastLoop && !LastLoop->contains(L)) {
4824 LastRNWaiting = true;
4825 DelayList.push_back(RN);
4826 continue;
4828 LoopStack.push_back({L, nullptr, 0});
4830 buildSchedule(RN, LoopStack, LI);
4834 void Scop::buildSchedule(RegionNode *RN, LoopStackTy &LoopStack, LoopInfo &LI) {
4835 if (RN->isSubRegion()) {
4836 auto *LocalRegion = RN->getNodeAs<Region>();
4837 if (!isNonAffineSubRegion(LocalRegion)) {
4838 buildSchedule(LocalRegion, LoopStack, LI);
4839 return;
4843 assert(LoopStack.rbegin() != LoopStack.rend());
4844 auto LoopData = LoopStack.rbegin();
4845 LoopData->NumBlocksProcessed += getNumBlocksInRegionNode(RN);
4847 for (auto *Stmt : getStmtListFor(RN)) {
4848 auto *UDomain = isl_union_set_from_set(Stmt->getDomain().release());
4849 auto *StmtSchedule = isl_schedule_from_domain(UDomain);
4850 LoopData->Schedule = combineInSequence(LoopData->Schedule, StmtSchedule);
4853 // Check if we just processed the last node in this loop. If we did, finalize
4854 // the loop by:
4856 // - adding new schedule dimensions
4857 // - folding the resulting schedule into the parent loop schedule
4858 // - dropping the loop schedule from the LoopStack.
4860 // Then continue to check surrounding loops, which might also have been
4861 // completed by this node.
4862 size_t Dimension = LoopStack.size();
4863 while (LoopData->L &&
4864 LoopData->NumBlocksProcessed == getNumBlocksInLoop(LoopData->L)) {
4865 auto *Schedule = LoopData->Schedule;
4866 auto NumBlocksProcessed = LoopData->NumBlocksProcessed;
4868 assert(std::next(LoopData) != LoopStack.rend());
4869 ++LoopData;
4870 --Dimension;
4872 if (Schedule) {
4873 isl::union_set Domain = give(isl_schedule_get_domain(Schedule));
4874 isl::multi_union_pw_aff MUPA = mapToDimension(Domain, Dimension);
4875 Schedule = isl_schedule_insert_partial_schedule(Schedule, MUPA.release());
4876 LoopData->Schedule = combineInSequence(LoopData->Schedule, Schedule);
4879 LoopData->NumBlocksProcessed += NumBlocksProcessed;
4881 // Now pop all loops processed up there from the LoopStack
4882 LoopStack.erase(LoopStack.begin() + Dimension, LoopStack.end());
4885 ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
4886 auto StmtMapIt = StmtMap.find(BB);
4887 if (StmtMapIt == StmtMap.end())
4888 return {};
4889 return StmtMapIt->second;
4892 ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
4893 ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
4894 if (!StmtList.empty())
4895 return StmtList.back();
4896 return nullptr;
4899 ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
4900 if (RN->isSubRegion())
4901 return getStmtListFor(RN->getNodeAs<Region>());
4902 return getStmtListFor(RN->getNodeAs<BasicBlock>());
4905 ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
4906 return getStmtListFor(R->getEntry());
4909 int Scop::getRelativeLoopDepth(const Loop *L) const {
4910 if (!L || !R.contains(L))
4911 return -1;
4912 // outermostLoopInRegion always returns nullptr for top level regions
4913 if (R.isTopLevelRegion()) {
4914 // LoopInfo's depths start at 1, we start at 0
4915 return L->getLoopDepth() - 1;
4916 } else {
4917 Loop *OuterLoop = R.outermostLoopInRegion(const_cast<Loop *>(L));
4918 assert(OuterLoop);
4919 return L->getLoopDepth() - OuterLoop->getLoopDepth();
4923 ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
4924 for (auto &SAI : arrays()) {
4925 if (SAI->getName() == BaseName)
4926 return SAI;
4928 return nullptr;
4931 void Scop::addAccessData(MemoryAccess *Access) {
4932 const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
4933 assert(SAI && "can only use after access relations have been constructed");
4935 if (Access->isOriginalValueKind() && Access->isRead())
4936 ValueUseAccs[SAI].push_back(Access);
4937 else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
4938 PHIIncomingAccs[SAI].push_back(Access);
4941 void Scop::removeAccessData(MemoryAccess *Access) {
4942 if (Access->isOriginalValueKind() && Access->isWrite()) {
4943 ValueDefAccs.erase(Access->getAccessValue());
4944 } else if (Access->isOriginalValueKind() && Access->isRead()) {
4945 auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
4946 std::remove(Uses.begin(), Uses.end(), Access);
4947 } else if (Access->isOriginalPHIKind() && Access->isRead()) {
4948 PHINode *PHI = cast<PHINode>(Access->getAccessInstruction());
4949 PHIReadAccs.erase(PHI);
4950 } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
4951 auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
4952 std::remove(Incomings.begin(), Incomings.end(), Access);
4956 MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
4957 assert(SAI->isValueKind());
4959 Instruction *Val = dyn_cast<Instruction>(SAI->getBasePtr());
4960 if (!Val)
4961 return nullptr;
4963 return ValueDefAccs.lookup(Val);
4966 ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
4967 assert(SAI->isValueKind());
4968 auto It = ValueUseAccs.find(SAI);
4969 if (It == ValueUseAccs.end())
4970 return {};
4971 return It->second;
4974 MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
4975 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
4977 if (SAI->isExitPHIKind())
4978 return nullptr;
4980 PHINode *PHI = cast<PHINode>(SAI->getBasePtr());
4981 return PHIReadAccs.lookup(PHI);
4984 ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
4985 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
4986 auto It = PHIIncomingAccs.find(SAI);
4987 if (It == PHIIncomingAccs.end())
4988 return {};
4989 return It->second;
4992 bool Scop::isEscaping(Instruction *Inst) {
4993 assert(contains(Inst) && "The concept of escaping makes only sense for "
4994 "values defined inside the SCoP");
4996 for (Use &Use : Inst->uses()) {
4997 BasicBlock *UserBB = getUseBlock(Use);
4998 if (!contains(UserBB))
4999 return true;
5001 // When the SCoP region exit needs to be simplified, PHIs in the region exit
5002 // move to a new basic block such that its incoming blocks are not in the
5003 // SCoP anymore.
5004 if (hasSingleExitEdge() && isa<PHINode>(Use.getUser()) &&
5005 isExit(cast<PHINode>(Use.getUser())->getParent()))
5006 return true;
5008 return false;
5011 Scop::ScopStatistics Scop::getStatistics() const {
5012 ScopStatistics Result;
5013 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
5014 auto LoopStat = ScopDetection::countBeneficialLoops(&R, *SE, *getLI(), 0);
5016 int NumTotalLoops = LoopStat.NumLoops;
5017 Result.NumBoxedLoops = getBoxedLoops().size();
5018 Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
5020 for (const ScopStmt &Stmt : *this) {
5021 isl::set Domain = Stmt.getDomain().intersect_params(getContext());
5022 bool IsInLoop = Stmt.getNumIterators() >= 1;
5023 for (MemoryAccess *MA : Stmt) {
5024 if (!MA->isWrite())
5025 continue;
5027 if (MA->isLatestValueKind()) {
5028 Result.NumValueWrites += 1;
5029 if (IsInLoop)
5030 Result.NumValueWritesInLoops += 1;
5033 if (MA->isLatestAnyPHIKind()) {
5034 Result.NumPHIWrites += 1;
5035 if (IsInLoop)
5036 Result.NumPHIWritesInLoops += 1;
5039 isl::set AccSet =
5040 MA->getAccessRelation().intersect_domain(Domain).range();
5041 if (AccSet.is_singleton()) {
5042 Result.NumSingletonWrites += 1;
5043 if (IsInLoop)
5044 Result.NumSingletonWritesInLoops += 1;
5048 #endif
5049 return Result;
5052 raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
5053 scop.print(OS, PollyPrintInstructions);
5054 return OS;
5057 //===----------------------------------------------------------------------===//
5058 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
5059 AU.addRequired<LoopInfoWrapperPass>();
5060 AU.addRequired<RegionInfoPass>();
5061 AU.addRequired<DominatorTreeWrapperPass>();
5062 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
5063 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
5064 AU.addRequired<AAResultsWrapperPass>();
5065 AU.addRequired<AssumptionCacheTracker>();
5066 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
5067 AU.setPreservesAll();
5070 void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
5071 Scop::ScopStatistics ScopStats) {
5072 assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
5074 NumScops++;
5075 NumLoopsInScop += Stats.NumLoops;
5076 MaxNumLoopsInScop =
5077 std::max(MaxNumLoopsInScop.getValue(), (unsigned)Stats.NumLoops);
5079 if (Stats.MaxDepth == 1)
5080 NumScopsDepthOne++;
5081 else if (Stats.MaxDepth == 2)
5082 NumScopsDepthTwo++;
5083 else if (Stats.MaxDepth == 3)
5084 NumScopsDepthThree++;
5085 else if (Stats.MaxDepth == 4)
5086 NumScopsDepthFour++;
5087 else if (Stats.MaxDepth == 5)
5088 NumScopsDepthFive++;
5089 else
5090 NumScopsDepthLarger++;
5092 NumAffineLoops += ScopStats.NumAffineLoops;
5093 NumBoxedLoops += ScopStats.NumBoxedLoops;
5095 NumValueWrites += ScopStats.NumValueWrites;
5096 NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
5097 NumPHIWrites += ScopStats.NumPHIWrites;
5098 NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
5099 NumSingletonWrites += ScopStats.NumSingletonWrites;
5100 NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
5103 bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
5104 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
5106 if (!SD.isMaxRegionInScop(*R))
5107 return false;
5109 Function *F = R->getEntry()->getParent();
5110 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5111 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5112 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
5113 auto const &DL = F->getParent()->getDataLayout();
5114 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5115 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
5116 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5118 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
5119 S = SB.getScop(); // take ownership of scop object
5121 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
5122 if (S) {
5123 ScopDetection::LoopStats Stats =
5124 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
5125 updateLoopCountStatistic(Stats, S->getStatistics());
5127 #endif
5129 return false;
5132 void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
5133 if (S)
5134 S->print(OS, PollyPrintInstructions);
5135 else
5136 OS << "Invalid Scop!\n";
5139 char ScopInfoRegionPass::ID = 0;
5141 Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
5143 INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
5144 "Polly - Create polyhedral description of Scops", false,
5145 false);
5146 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
5147 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
5148 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
5149 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
5150 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
5151 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
5152 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
5153 INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
5154 "Polly - Create polyhedral description of Scops", false,
5155 false)
5157 //===----------------------------------------------------------------------===//
5158 ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
5159 LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
5160 AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
5161 : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
5162 recompute();
5165 void ScopInfo::recompute() {
5166 RegionToScopMap.clear();
5167 /// Create polyhedral description of scops for all the valid regions of a
5168 /// function.
5169 for (auto &It : SD) {
5170 Region *R = const_cast<Region *>(It);
5171 if (!SD.isMaxRegionInScop(*R))
5172 continue;
5174 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
5175 std::unique_ptr<Scop> S = SB.getScop();
5176 if (!S)
5177 continue;
5178 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
5179 ScopDetection::LoopStats Stats =
5180 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
5181 updateLoopCountStatistic(Stats, S->getStatistics());
5182 #endif
5183 bool Inserted = RegionToScopMap.insert({R, std::move(S)}).second;
5184 assert(Inserted && "Building Scop for the same region twice!");
5185 (void)Inserted;
5189 bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
5190 FunctionAnalysisManager::Invalidator &Inv) {
5191 // Check whether the analysis, all analyses on functions have been preserved
5192 // or anything we're holding references to is being invalidated
5193 auto PAC = PA.getChecker<ScopInfoAnalysis>();
5194 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
5195 Inv.invalidate<ScopAnalysis>(F, PA) ||
5196 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
5197 Inv.invalidate<LoopAnalysis>(F, PA) ||
5198 Inv.invalidate<AAManager>(F, PA) ||
5199 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
5200 Inv.invalidate<AssumptionAnalysis>(F, PA);
5203 AnalysisKey ScopInfoAnalysis::Key;
5205 ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
5206 FunctionAnalysisManager &FAM) {
5207 auto &SD = FAM.getResult<ScopAnalysis>(F);
5208 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
5209 auto &LI = FAM.getResult<LoopAnalysis>(F);
5210 auto &AA = FAM.getResult<AAManager>(F);
5211 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
5212 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
5213 auto &DL = F.getParent()->getDataLayout();
5214 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
5215 return {DL, SD, SE, LI, AA, DT, AC, ORE};
5218 PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
5219 FunctionAnalysisManager &FAM) {
5220 auto &SI = FAM.getResult<ScopInfoAnalysis>(F);
5221 // Since the legacy PM processes Scops in bottom up, we print them in reverse
5222 // order here to keep the output persistent
5223 for (auto &It : reverse(SI)) {
5224 if (It.second)
5225 It.second->print(Stream, PollyPrintInstructions);
5226 else
5227 Stream << "Invalid Scop!\n";
5229 return PreservedAnalyses::all();
5232 void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
5233 AU.addRequired<LoopInfoWrapperPass>();
5234 AU.addRequired<RegionInfoPass>();
5235 AU.addRequired<DominatorTreeWrapperPass>();
5236 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
5237 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
5238 AU.addRequired<AAResultsWrapperPass>();
5239 AU.addRequired<AssumptionCacheTracker>();
5240 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
5241 AU.setPreservesAll();
5244 bool ScopInfoWrapperPass::runOnFunction(Function &F) {
5245 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
5246 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
5247 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
5248 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
5249 auto const &DL = F.getParent()->getDataLayout();
5250 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5251 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
5252 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5254 Result.reset(new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
5255 return false;
5258 void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
5259 for (auto &It : *Result) {
5260 if (It.second)
5261 It.second->print(OS, PollyPrintInstructions);
5262 else
5263 OS << "Invalid Scop!\n";
5267 char ScopInfoWrapperPass::ID = 0;
5269 Pass *polly::createScopInfoWrapperPassPass() {
5270 return new ScopInfoWrapperPass();
5273 INITIALIZE_PASS_BEGIN(
5274 ScopInfoWrapperPass, "polly-function-scops",
5275 "Polly - Create polyhedral description of all Scops of a function", false,
5276 false);
5277 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
5278 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
5279 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
5280 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
5281 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
5282 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
5283 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
5284 INITIALIZE_PASS_END(
5285 ScopInfoWrapperPass, "polly-function-scops",
5286 "Polly - Create polyhedral description of all Scops of a function", false,
5287 false)