Update polly for r323999.
[polly-mirror.git] / lib / Transform / Simplify.cpp
bloba0b89e0c9f8b6be426be76f4d6a13099d6b4ec92
1 //===------ Simplify.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Simplify a SCoP by removing unnecessary statements and accesses.
12 //===----------------------------------------------------------------------===//
14 #include "polly/Simplify.h"
15 #include "polly/ScopInfo.h"
16 #include "polly/ScopPass.h"
17 #include "polly/Support/GICHelper.h"
18 #include "polly/Support/ISLOStream.h"
19 #include "polly/Support/ISLTools.h"
20 #include "polly/Support/VirtualInstruction.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Support/Debug.h"
23 #define DEBUG_TYPE "polly-simplify"
25 using namespace llvm;
26 using namespace polly;
28 namespace {
30 #define TWO_STATISTICS(VARNAME, DESC) \
31 static llvm::Statistic VARNAME[2] = { \
32 {DEBUG_TYPE, #VARNAME "0", DESC " (first)", {0}, {false}}, \
33 {DEBUG_TYPE, #VARNAME "1", DESC " (second)", {0}, {false}}}
35 /// Number of max disjuncts we allow in removeOverwrites(). This is to avoid
36 /// that the analysis of accesses in a statement is becoming too complex. Chosen
37 /// to be relatively small because all the common cases should access only few
38 /// array elements per statement.
39 static int const SimplifyMaxDisjuncts = 4;
41 TWO_STATISTICS(ScopsProcessed, "Number of SCoPs processed");
42 TWO_STATISTICS(ScopsModified, "Number of SCoPs simplified");
44 TWO_STATISTICS(TotalOverwritesRemoved, "Number of removed overwritten writes");
45 TWO_STATISTICS(TotalWritesCoalesced, "Number of writes coalesced with another");
46 TWO_STATISTICS(TotalRedundantWritesRemoved,
47 "Number of writes of same value removed in any SCoP");
48 TWO_STATISTICS(TotalEmptyPartialAccessesRemoved,
49 "Number of empty partial accesses removed");
50 TWO_STATISTICS(TotalDeadAccessesRemoved, "Number of dead accesses removed");
51 TWO_STATISTICS(TotalDeadInstructionsRemoved,
52 "Number of unused instructions removed");
53 TWO_STATISTICS(TotalStmtsRemoved, "Number of statements removed in any SCoP");
55 TWO_STATISTICS(NumValueWrites, "Number of scalar value writes after Simplify");
56 TWO_STATISTICS(
57 NumValueWritesInLoops,
58 "Number of scalar value writes nested in affine loops after Simplify");
59 TWO_STATISTICS(NumPHIWrites,
60 "Number of scalar phi writes after the first simplification");
61 TWO_STATISTICS(
62 NumPHIWritesInLoops,
63 "Number of scalar phi writes nested in affine loops after Simplify");
64 TWO_STATISTICS(NumSingletonWrites, "Number of singleton writes after Simplify");
65 TWO_STATISTICS(
66 NumSingletonWritesInLoops,
67 "Number of singleton writes nested in affine loops after Simplify");
69 static bool isImplicitRead(MemoryAccess *MA) {
70 return MA->isRead() && MA->isOriginalScalarKind();
73 static bool isExplicitAccess(MemoryAccess *MA) {
74 return MA->isOriginalArrayKind();
77 static bool isImplicitWrite(MemoryAccess *MA) {
78 return MA->isWrite() && MA->isOriginalScalarKind();
81 /// Like isl::union_map::add_map, but may also return an underapproximated
82 /// result if getting too complex.
83 ///
84 /// This is implemented by adding disjuncts to the results until the limit is
85 /// reached.
86 static isl::union_map underapproximatedAddMap(isl::union_map UMap,
87 isl::map Map) {
88 if (UMap.is_null() || Map.is_null())
89 return {};
91 isl::map PrevMap = UMap.extract_map(Map.get_space());
93 // Fast path: If known that we cannot exceed the disjunct limit, just add
94 // them.
95 if (isl_map_n_basic_map(PrevMap.get()) + isl_map_n_basic_map(Map.get()) <=
96 SimplifyMaxDisjuncts)
97 return UMap.add_map(Map);
99 isl::map Result = isl::map::empty(PrevMap.get_space());
100 PrevMap.foreach_basic_map([&Result](isl::basic_map BMap) -> isl::stat {
101 if (isl_map_n_basic_map(Result.get()) > SimplifyMaxDisjuncts)
102 return isl::stat::error;
103 Result = Result.unite(BMap);
104 return isl::stat::ok;
106 Map.foreach_basic_map([&Result](isl::basic_map BMap) -> isl::stat {
107 if (isl_map_n_basic_map(Result.get()) > SimplifyMaxDisjuncts)
108 return isl::stat::error;
109 Result = Result.unite(BMap);
110 return isl::stat::ok;
113 isl::union_map UResult =
114 UMap.subtract(isl::map::universe(PrevMap.get_space()));
115 UResult.add_map(Result);
117 return UResult;
120 class Simplify : public ScopPass {
121 private:
122 /// The invocation id (if there are multiple instances in the pass manager's
123 /// pipeline) to determine which statistics to update.
124 int CallNo;
126 /// The last/current SCoP that is/has been processed.
127 Scop *S;
129 /// Number of writes that are overwritten anyway.
130 int OverwritesRemoved = 0;
132 /// Number of combined writes.
133 int WritesCoalesced = 0;
135 /// Number of redundant writes removed from this SCoP.
136 int RedundantWritesRemoved = 0;
138 /// Number of writes with empty access domain removed.
139 int EmptyPartialAccessesRemoved = 0;
141 /// Number of unused accesses removed from this SCoP.
142 int DeadAccessesRemoved = 0;
144 /// Number of unused instructions removed from this SCoP.
145 int DeadInstructionsRemoved = 0;
147 /// Number of unnecessary statements removed from the SCoP.
148 int StmtsRemoved = 0;
150 /// Return whether at least one simplification has been applied.
151 bool isModified() const {
152 return OverwritesRemoved > 0 || WritesCoalesced > 0 ||
153 RedundantWritesRemoved > 0 || EmptyPartialAccessesRemoved > 0 ||
154 DeadAccessesRemoved > 0 || DeadInstructionsRemoved > 0 ||
155 StmtsRemoved > 0;
158 /// Remove writes that are overwritten unconditionally later in the same
159 /// statement.
161 /// There must be no read of the same value between the write (that is to be
162 /// removed) and the overwrite.
163 void removeOverwrites() {
164 for (auto &Stmt : *S) {
165 isl::set Domain = Stmt.getDomain();
166 isl::union_map WillBeOverwritten =
167 isl::union_map::empty(S->getParamSpace());
169 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt));
171 // Iterate in reverse order, so the overwrite comes before the write that
172 // is to be removed.
173 for (auto *MA : reverse(Accesses)) {
175 // In region statements, the explicit accesses can be in blocks that are
176 // can be executed in any order. We therefore process only the implicit
177 // writes and stop after that.
178 if (Stmt.isRegionStmt() && isExplicitAccess(MA))
179 break;
181 auto AccRel = MA->getAccessRelation();
182 AccRel = AccRel.intersect_domain(Domain);
183 AccRel = AccRel.intersect_params(S->getContext());
185 // If a value is read in-between, do not consider it as overwritten.
186 if (MA->isRead()) {
187 // Invalidate all overwrites for the array it accesses to avoid too
188 // complex isl sets.
189 isl::map AccRelUniv = isl::map::universe(AccRel.get_space());
190 WillBeOverwritten = WillBeOverwritten.subtract(AccRelUniv);
191 continue;
194 // If all of a write's elements are overwritten, remove it.
195 isl::union_map AccRelUnion = AccRel;
196 if (AccRelUnion.is_subset(WillBeOverwritten)) {
197 DEBUG(dbgs() << "Removing " << MA
198 << " which will be overwritten anyway\n");
200 Stmt.removeSingleMemoryAccess(MA);
201 OverwritesRemoved++;
202 TotalOverwritesRemoved[CallNo]++;
205 // Unconditional writes overwrite other values.
206 if (MA->isMustWrite()) {
207 // Avoid too complex isl sets. If necessary, throw away some of the
208 // knowledge.
209 WillBeOverwritten =
210 underapproximatedAddMap(WillBeOverwritten, AccRel);
216 /// Combine writes that write the same value if possible.
218 /// This function is able to combine:
219 /// - Partial writes with disjoint domain.
220 /// - Writes that write to the same array element.
222 /// In all cases, both writes must write the same values.
223 void coalesceWrites() {
224 for (auto &Stmt : *S) {
225 isl::set Domain = Stmt.getDomain().intersect_params(S->getContext());
227 // We let isl do the lookup for the same-value condition. For this, we
228 // wrap llvm::Value into an isl::set such that isl can do the lookup in
229 // its hashtable implementation. llvm::Values are only compared within a
230 // ScopStmt, so the map can be local to this scope. TODO: Refactor with
231 // ZoneAlgorithm::makeValueSet()
232 SmallDenseMap<Value *, isl::set> ValueSets;
233 auto makeValueSet = [&ValueSets, this](Value *V) -> isl::set {
234 assert(V);
235 isl::set &Result = ValueSets[V];
236 if (Result.is_null()) {
237 isl::ctx Ctx = S->getIslCtx();
238 std::string Name =
239 getIslCompatibleName("Val", V, ValueSets.size() - 1,
240 std::string(), UseInstructionNames);
241 isl::id Id = isl::id::alloc(Ctx, Name, V);
242 Result = isl::set::universe(
243 isl::space(Ctx, 0, 0).set_tuple_id(isl::dim::set, Id));
245 return Result;
248 // List of all eligible (for coalescing) writes of the future.
249 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] }
250 isl::union_map FutureWrites = isl::union_map::empty(S->getParamSpace());
252 // Iterate over accesses from the last to the first.
253 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt));
254 for (MemoryAccess *MA : reverse(Accesses)) {
255 // In region statements, the explicit accesses can be in blocks that can
256 // be executed in any order. We therefore process only the implicit
257 // writes and stop after that.
258 if (Stmt.isRegionStmt() && isExplicitAccess(MA))
259 break;
261 // { Domain[] -> Element[] }
262 isl::map AccRel =
263 MA->getLatestAccessRelation().intersect_domain(Domain);
265 // { [Domain[] -> Element[]] }
266 isl::set AccRelWrapped = AccRel.wrap();
268 // { Value[] }
269 isl::set ValSet;
271 if (MA->isMustWrite() && (MA->isOriginalScalarKind() ||
272 isa<StoreInst>(MA->getAccessInstruction()))) {
273 // Normally, tryGetValueStored() should be used to determine which
274 // element is written, but it can return nullptr; For PHI accesses,
275 // getAccessValue() returns the PHI instead of the PHI's incoming
276 // value. In this case, where we only compare values of a single
277 // statement, this is fine, because within a statement, a PHI in a
278 // successor block has always the same value as the incoming write. We
279 // still preferably use the incoming value directly so we also catch
280 // direct uses of that.
281 Value *StoredVal = MA->tryGetValueStored();
282 if (!StoredVal)
283 StoredVal = MA->getAccessValue();
284 ValSet = makeValueSet(StoredVal);
286 // { Domain[] }
287 isl::set AccDomain = AccRel.domain();
289 // Parts of the statement's domain that is not written by this access.
290 isl::set UndefDomain = Domain.subtract(AccDomain);
292 // { Element[] }
293 isl::set ElementUniverse =
294 isl::set::universe(AccRel.get_space().range());
296 // { Domain[] -> Element[] }
297 isl::map UndefAnything =
298 isl::map::from_domain_and_range(UndefDomain, ElementUniverse);
300 // We are looking a compatible write access. The other write can
301 // access these elements...
302 isl::map AllowedAccesses = AccRel.unite(UndefAnything);
304 // ... and must write the same value.
305 // { [Domain[] -> Element[]] -> Value[] }
306 isl::map Filter =
307 isl::map::from_domain_and_range(AllowedAccesses.wrap(), ValSet);
309 // Lookup future write that fulfills these conditions.
310 // { [[Domain[] -> Element[]] -> Value[]] -> MemoryAccess[] }
311 isl::union_map Filtered =
312 FutureWrites.uncurry().intersect_domain(Filter.wrap());
314 // Iterate through the candidates.
315 Filtered.foreach_map([&, this](isl::map Map) -> isl::stat {
316 MemoryAccess *OtherMA = (MemoryAccess *)Map.get_space()
317 .get_tuple_id(isl::dim::out)
318 .get_user();
320 isl::map OtherAccRel =
321 OtherMA->getLatestAccessRelation().intersect_domain(Domain);
323 // The filter only guaranteed that some of OtherMA's accessed
324 // elements are allowed. Verify that it only accesses allowed
325 // elements. Otherwise, continue with the next candidate.
326 if (!OtherAccRel.is_subset(AllowedAccesses).is_true())
327 return isl::stat::ok;
329 // The combined access relation.
330 // { Domain[] -> Element[] }
331 isl::map NewAccRel = AccRel.unite(OtherAccRel);
332 simplify(NewAccRel);
334 // Carry out the coalescing.
335 Stmt.removeSingleMemoryAccess(MA);
336 OtherMA->setNewAccessRelation(NewAccRel);
338 // We removed MA, OtherMA takes its role.
339 MA = OtherMA;
341 TotalWritesCoalesced[CallNo]++;
342 WritesCoalesced++;
344 // Don't look for more candidates.
345 return isl::stat::error;
349 // Two writes cannot be coalesced if there is another access (to some of
350 // the written elements) between them. Remove all visited write accesses
351 // from the list of eligible writes. Don't just remove the accessed
352 // elements, but any MemoryAccess that touches any of the invalidated
353 // elements.
354 SmallPtrSet<MemoryAccess *, 2> TouchedAccesses;
355 FutureWrites.intersect_domain(AccRelWrapped)
356 .foreach_map([&TouchedAccesses](isl::map Map) -> isl::stat {
357 MemoryAccess *MA = (MemoryAccess *)Map.get_space()
358 .range()
359 .unwrap()
360 .get_tuple_id(isl::dim::out)
361 .get_user();
362 TouchedAccesses.insert(MA);
363 return isl::stat::ok;
365 isl::union_map NewFutureWrites =
366 isl::union_map::empty(FutureWrites.get_space());
367 FutureWrites.foreach_map([&TouchedAccesses, &NewFutureWrites](
368 isl::map FutureWrite) -> isl::stat {
369 MemoryAccess *MA = (MemoryAccess *)FutureWrite.get_space()
370 .range()
371 .unwrap()
372 .get_tuple_id(isl::dim::out)
373 .get_user();
374 if (!TouchedAccesses.count(MA))
375 NewFutureWrites = NewFutureWrites.add_map(FutureWrite);
376 return isl::stat::ok;
378 FutureWrites = NewFutureWrites;
380 if (MA->isMustWrite() && !ValSet.is_null()) {
381 // { MemoryAccess[] }
382 auto AccSet =
383 isl::set::universe(isl::space(S->getIslCtx(), 0, 0)
384 .set_tuple_id(isl::dim::set, MA->getId()));
386 // { Val[] -> MemoryAccess[] }
387 isl::map ValAccSet = isl::map::from_domain_and_range(ValSet, AccSet);
389 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] }
390 isl::map AccRelValAcc =
391 isl::map::from_domain_and_range(AccRelWrapped, ValAccSet.wrap());
392 FutureWrites = FutureWrites.add_map(AccRelValAcc);
398 /// Remove writes that just write the same value already stored in the
399 /// element.
400 void removeRedundantWrites() {
401 for (auto &Stmt : *S) {
402 SmallDenseMap<Value *, isl::set> ValueSets;
403 auto makeValueSet = [&ValueSets, this](Value *V) -> isl::set {
404 assert(V);
405 isl::set &Result = ValueSets[V];
406 if (Result.is_null()) {
407 isl_ctx *Ctx = S->getIslCtx().get();
408 std::string Name =
409 getIslCompatibleName("Val", V, ValueSets.size() - 1,
410 std::string(), UseInstructionNames);
411 isl::id Id = give(isl_id_alloc(Ctx, Name.c_str(), V));
412 Result = isl::set::universe(
413 isl::space(Ctx, 0, 0).set_tuple_id(isl::dim::set, Id));
415 return Result;
418 isl::set Domain = Stmt.getDomain();
419 Domain = Domain.intersect_params(S->getContext());
421 // List of element reads that still have the same value while iterating
422 // through the MemoryAccesses.
423 // { [Domain[] -> Element[]] -> Val[] }
424 isl::union_map Known = isl::union_map::empty(S->getParamSpace());
426 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt));
427 for (MemoryAccess *MA : Accesses) {
428 // Is the memory access in a defined order relative to the other
429 // accesses? In region statements, only the first and the last accesses
430 // have defined order. Execution of those in the middle may depend on
431 // runtime conditions an therefore cannot be modified.
432 bool IsOrdered =
433 Stmt.isBlockStmt() || MA->isOriginalScalarKind() ||
434 (!S->getBoxedLoops().size() && MA->getAccessInstruction() &&
435 Stmt.getEntryBlock() == MA->getAccessInstruction()->getParent());
437 isl::map AccRel = MA->getAccessRelation();
438 AccRel = AccRel.intersect_domain(Domain);
439 isl::set AccRelWrapped = AccRel.wrap();
441 // Determine whether a write is redundant (stores only values that are
442 // already present in the written array elements) and remove it if this
443 // is the case.
444 if (IsOrdered && MA->isMustWrite() &&
445 (isa<StoreInst>(MA->getAccessInstruction()) ||
446 MA->isOriginalScalarKind())) {
447 Value *StoredVal = MA->tryGetValueStored();
448 if (!StoredVal)
449 StoredVal = MA->getAccessValue();
451 if (StoredVal) {
452 // Lookup in the set of known values.
453 isl::map AccRelStoredVal = isl::map::from_domain_and_range(
454 AccRelWrapped, makeValueSet(StoredVal));
455 if (isl::union_map(AccRelStoredVal).is_subset(Known)) {
456 DEBUG(dbgs() << "Cleanup of " << MA << ":\n");
457 DEBUG(dbgs() << " Scalar: " << *StoredVal << "\n");
458 DEBUG(dbgs() << " AccRel: " << AccRel << "\n");
460 Stmt.removeSingleMemoryAccess(MA);
462 RedundantWritesRemoved++;
463 TotalRedundantWritesRemoved[CallNo]++;
468 // Update the know values set.
469 if (MA->isRead()) {
470 // Loaded values are the currently known values of the array element
471 // it was loaded from.
472 Value *LoadedVal = MA->getAccessValue();
473 if (LoadedVal && IsOrdered) {
474 isl::map AccRelVal = isl::map::from_domain_and_range(
475 AccRelWrapped, makeValueSet(LoadedVal));
477 Known = Known.add_map(AccRelVal);
479 } else if (MA->isWrite()) {
480 // Remove (possibly) overwritten values from the known elements set.
481 // We remove all elements of the accessed array to avoid too complex
482 // isl sets.
483 isl::set AccRelUniv = isl::set::universe(AccRelWrapped.get_space());
484 Known = Known.subtract_domain(AccRelUniv);
486 // At this point, we could add the written value of must-writes.
487 // However, writing same values is already handled by
488 // coalesceWrites().
494 /// Remove statements without side effects.
495 void removeUnnecessaryStmts() {
496 auto NumStmtsBefore = S->getSize();
497 S->simplifySCoP(true);
498 assert(NumStmtsBefore >= S->getSize());
499 StmtsRemoved = NumStmtsBefore - S->getSize();
500 DEBUG(dbgs() << "Removed " << StmtsRemoved << " (of " << NumStmtsBefore
501 << ") statements\n");
502 TotalStmtsRemoved[CallNo] += StmtsRemoved;
505 /// Remove accesses that have an empty domain.
506 void removeEmptyPartialAccesses() {
507 for (ScopStmt &Stmt : *S) {
508 // Defer the actual removal to not invalidate iterators.
509 SmallVector<MemoryAccess *, 8> DeferredRemove;
511 for (MemoryAccess *MA : Stmt) {
512 if (!MA->isWrite())
513 continue;
515 isl::map AccRel = MA->getAccessRelation();
516 if (!AccRel.is_empty().is_true())
517 continue;
519 DEBUG(dbgs() << "Removing " << MA
520 << " because it's a partial access that never occurs\n");
521 DeferredRemove.push_back(MA);
524 for (MemoryAccess *MA : DeferredRemove) {
525 Stmt.removeSingleMemoryAccess(MA);
526 EmptyPartialAccessesRemoved++;
527 TotalEmptyPartialAccessesRemoved[CallNo]++;
532 /// Mark all reachable instructions and access, and sweep those that are not
533 /// reachable.
534 void markAndSweep(LoopInfo *LI) {
535 DenseSet<MemoryAccess *> UsedMA;
536 DenseSet<VirtualInstruction> UsedInsts;
538 // Get all reachable instructions and accesses.
539 markReachable(S, LI, UsedInsts, UsedMA);
541 // Remove all non-reachable accesses.
542 // We need get all MemoryAccesses first, in order to not invalidate the
543 // iterators when removing them.
544 SmallVector<MemoryAccess *, 64> AllMAs;
545 for (ScopStmt &Stmt : *S)
546 AllMAs.append(Stmt.begin(), Stmt.end());
548 for (MemoryAccess *MA : AllMAs) {
549 if (UsedMA.count(MA))
550 continue;
551 DEBUG(dbgs() << "Removing " << MA << " because its value is not used\n");
552 ScopStmt *Stmt = MA->getStatement();
553 Stmt->removeSingleMemoryAccess(MA);
555 DeadAccessesRemoved++;
556 TotalDeadAccessesRemoved[CallNo]++;
559 // Remove all non-reachable instructions.
560 for (ScopStmt &Stmt : *S) {
561 // Note that for region statements, we can only remove the non-terminator
562 // instructions of the entry block. All other instructions are not in the
563 // instructions list, but implicitly always part of the statement.
565 SmallVector<Instruction *, 32> AllInsts(Stmt.insts_begin(),
566 Stmt.insts_end());
567 SmallVector<Instruction *, 32> RemainInsts;
569 for (Instruction *Inst : AllInsts) {
570 auto It = UsedInsts.find({&Stmt, Inst});
571 if (It == UsedInsts.end()) {
572 DEBUG(dbgs() << "Removing "; Inst->print(dbgs());
573 dbgs() << " because it is not used\n");
574 DeadInstructionsRemoved++;
575 TotalDeadInstructionsRemoved[CallNo]++;
576 continue;
579 RemainInsts.push_back(Inst);
581 // If instructions appear multiple times, keep only the first.
582 UsedInsts.erase(It);
585 // Set the new instruction list to be only those we did not remove.
586 Stmt.setInstructions(RemainInsts);
590 /// Print simplification statistics to @p OS.
591 void printStatistics(llvm::raw_ostream &OS, int Indent = 0) const {
592 OS.indent(Indent) << "Statistics {\n";
593 OS.indent(Indent + 4) << "Overwrites removed: " << OverwritesRemoved
594 << '\n';
595 OS.indent(Indent + 4) << "Partial writes coalesced: " << WritesCoalesced
596 << "\n";
597 OS.indent(Indent + 4) << "Redundant writes removed: "
598 << RedundantWritesRemoved << "\n";
599 OS.indent(Indent + 4) << "Accesses with empty domains removed: "
600 << EmptyPartialAccessesRemoved << "\n";
601 OS.indent(Indent + 4) << "Dead accesses removed: " << DeadAccessesRemoved
602 << '\n';
603 OS.indent(Indent + 4) << "Dead instructions removed: "
604 << DeadInstructionsRemoved << '\n';
605 OS.indent(Indent + 4) << "Stmts removed: " << StmtsRemoved << "\n";
606 OS.indent(Indent) << "}\n";
609 /// Print the current state of all MemoryAccesses to @p OS.
610 void printAccesses(llvm::raw_ostream &OS, int Indent = 0) const {
611 OS.indent(Indent) << "After accesses {\n";
612 for (auto &Stmt : *S) {
613 OS.indent(Indent + 4) << Stmt.getBaseName() << "\n";
614 for (auto *MA : Stmt)
615 MA->print(OS);
617 OS.indent(Indent) << "}\n";
620 public:
621 static char ID;
622 explicit Simplify(int CallNo = 0) : ScopPass(ID), CallNo(CallNo) {}
624 virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
625 AU.addRequiredTransitive<ScopInfoRegionPass>();
626 AU.addRequired<LoopInfoWrapperPass>();
627 AU.setPreservesAll();
630 virtual bool runOnScop(Scop &S) override {
631 // Reset statistics of last processed SCoP.
632 releaseMemory();
633 assert(!isModified());
635 // Prepare processing of this SCoP.
636 this->S = &S;
637 ScopsProcessed[CallNo]++;
639 DEBUG(dbgs() << "Removing partial writes that never happen...\n");
640 removeEmptyPartialAccesses();
642 DEBUG(dbgs() << "Removing overwrites...\n");
643 removeOverwrites();
645 DEBUG(dbgs() << "Coalesce partial writes...\n");
646 coalesceWrites();
648 DEBUG(dbgs() << "Removing redundant writes...\n");
649 removeRedundantWrites();
651 DEBUG(dbgs() << "Cleanup unused accesses...\n");
652 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
653 markAndSweep(LI);
655 DEBUG(dbgs() << "Removing statements without side effects...\n");
656 removeUnnecessaryStmts();
658 if (isModified())
659 ScopsModified[CallNo]++;
660 DEBUG(dbgs() << "\nFinal Scop:\n");
661 DEBUG(dbgs() << S);
663 auto ScopStats = S.getStatistics();
664 NumValueWrites[CallNo] += ScopStats.NumValueWrites;
665 NumValueWritesInLoops[CallNo] += ScopStats.NumValueWritesInLoops;
666 NumPHIWrites[CallNo] += ScopStats.NumPHIWrites;
667 NumPHIWritesInLoops[CallNo] += ScopStats.NumPHIWritesInLoops;
668 NumSingletonWrites[CallNo] += ScopStats.NumSingletonWrites;
669 NumSingletonWritesInLoops[CallNo] += ScopStats.NumSingletonWritesInLoops;
671 return false;
674 virtual void printScop(raw_ostream &OS, Scop &S) const override {
675 assert(&S == this->S &&
676 "Can only print analysis for the last processed SCoP");
677 printStatistics(OS);
679 if (!isModified()) {
680 OS << "SCoP could not be simplified\n";
681 return;
683 printAccesses(OS);
686 virtual void releaseMemory() override {
687 S = nullptr;
689 OverwritesRemoved = 0;
690 WritesCoalesced = 0;
691 RedundantWritesRemoved = 0;
692 EmptyPartialAccessesRemoved = 0;
693 DeadAccessesRemoved = 0;
694 DeadInstructionsRemoved = 0;
695 StmtsRemoved = 0;
699 char Simplify::ID;
700 } // anonymous namespace
702 namespace polly {
703 SmallVector<MemoryAccess *, 32> getAccessesInOrder(ScopStmt &Stmt) {
705 SmallVector<MemoryAccess *, 32> Accesses;
707 for (MemoryAccess *MemAcc : Stmt)
708 if (isImplicitRead(MemAcc))
709 Accesses.push_back(MemAcc);
711 for (MemoryAccess *MemAcc : Stmt)
712 if (isExplicitAccess(MemAcc))
713 Accesses.push_back(MemAcc);
715 for (MemoryAccess *MemAcc : Stmt)
716 if (isImplicitWrite(MemAcc))
717 Accesses.push_back(MemAcc);
719 return Accesses;
721 } // namespace polly
723 Pass *polly::createSimplifyPass(int CallNo) { return new Simplify(CallNo); }
725 INITIALIZE_PASS_BEGIN(Simplify, "polly-simplify", "Polly - Simplify", false,
726 false)
727 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
728 INITIALIZE_PASS_END(Simplify, "polly-simplify", "Polly - Simplify", false,
729 false)