[Simplify] Improve scalability.
[polly-mirror.git] / lib / Transform / Simplify.cpp
blobd62686c65852b1b1c6e1e4b498814776fe8def98
1 //===------ Simplify.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // Simplify a SCoP by removing unnecessary statements and accesses.
12 //===----------------------------------------------------------------------===//
14 #include "polly/Simplify.h"
15 #include "polly/ScopInfo.h"
16 #include "polly/ScopPass.h"
17 #include "polly/Support/GICHelper.h"
18 #include "polly/Support/ISLOStream.h"
19 #include "polly/Support/ISLTools.h"
20 #include "polly/Support/VirtualInstruction.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Support/Debug.h"
23 #define DEBUG_TYPE "polly-simplify"
25 using namespace llvm;
26 using namespace polly;
28 namespace {
30 /// Number of max disjuncts we allow in removeOverwrites(). This is to avoid
31 /// that the analysis of accesses in a statement is becoming too complex. Chosen
32 /// to be relatively small because all the common cases should access only few
33 /// array elements per statement.
34 static int const SimplifyMaxDisjuncts = 4;
36 STATISTIC(ScopsProcessed, "Number of SCoPs processed");
37 STATISTIC(ScopsModified, "Number of SCoPs simplified");
39 STATISTIC(PairUnequalAccRels, "Number of Load-Store pairs NOT removed because "
40 "of different access relations");
41 STATISTIC(InBetweenStore, "Number of Load-Store pairs NOT removed because "
42 "there is another store between them");
43 STATISTIC(TotalOverwritesRemoved, "Number of removed overwritten writes");
44 STATISTIC(TotalWritesCoalesced, "Number of writes coalesced with another");
45 STATISTIC(TotalRedundantWritesRemoved,
46 "Number of writes of same value removed in any SCoP");
47 STATISTIC(TotalEmptyPartialAccessesRemoved,
48 "Number of empty partial accesses removed");
49 STATISTIC(TotalDeadAccessesRemoved, "Number of dead accesses removed");
50 STATISTIC(TotalDeadInstructionsRemoved,
51 "Number of unused instructions removed");
52 STATISTIC(TotalStmtsRemoved, "Number of statements removed in any SCoP");
54 static bool isImplicitRead(MemoryAccess *MA) {
55 return MA->isRead() && MA->isOriginalScalarKind();
58 static bool isExplicitAccess(MemoryAccess *MA) {
59 return MA->isOriginalArrayKind();
62 static bool isImplicitWrite(MemoryAccess *MA) {
63 return MA->isWrite() && MA->isOriginalScalarKind();
66 /// Like isl::union_map::add_map, but may also return an underapproximated
67 /// result if getting too complex.
68 ///
69 /// This is implemented by adding disjuncts to the results until the limit is
70 /// reached.
71 static isl::union_map underapproximatedAddMap(isl::union_map UMap,
72 isl::map Map) {
73 if (UMap.is_null() || Map.is_null())
74 return {};
76 isl::map PrevMap = UMap.extract_map(Map.get_space());
78 // Fast path: If known that we cannot exceed the disjunct limit, just add
79 // them.
80 if (isl_map_n_basic_map(PrevMap.get()) + isl_map_n_basic_map(Map.get()) <=
81 SimplifyMaxDisjuncts)
82 return UMap.add_map(Map);
84 isl::map Result = isl::map::empty(PrevMap.get_space());
85 PrevMap.foreach_basic_map([&Result](isl::basic_map BMap) -> isl::stat {
86 if (isl_map_n_basic_map(Result.get()) > SimplifyMaxDisjuncts)
87 return isl::stat::error;
88 Result = Result.unite(BMap);
89 return isl::stat::ok;
90 });
91 Map.foreach_basic_map([&Result](isl::basic_map BMap) -> isl::stat {
92 if (isl_map_n_basic_map(Result.get()) > SimplifyMaxDisjuncts)
93 return isl::stat::error;
94 Result = Result.unite(BMap);
95 return isl::stat::ok;
96 });
98 isl::union_map UResult =
99 UMap.subtract(isl::map::universe(PrevMap.get_space()));
100 UResult.add_map(Result);
102 return UResult;
105 /// Return a vector that contains MemoryAccesses in the order in
106 /// which they are executed.
108 /// The order is:
109 /// - Implicit reads (BlockGenerator::generateScalarLoads)
110 /// - Explicit reads and writes (BlockGenerator::generateArrayLoad,
111 /// BlockGenerator::generateArrayStore)
112 /// - In block statements, the accesses are in order in which their
113 /// instructions are executed.
114 /// - In region statements, that order of execution is not predictable at
115 /// compile-time.
116 /// - Implicit writes (BlockGenerator::generateScalarStores)
117 /// The order in which implicit writes are executed relative to each other is
118 /// undefined.
119 static SmallVector<MemoryAccess *, 32> getAccessesInOrder(ScopStmt &Stmt) {
121 SmallVector<MemoryAccess *, 32> Accesses;
123 for (MemoryAccess *MemAcc : Stmt)
124 if (isImplicitRead(MemAcc))
125 Accesses.push_back(MemAcc);
127 for (MemoryAccess *MemAcc : Stmt)
128 if (isExplicitAccess(MemAcc))
129 Accesses.push_back(MemAcc);
131 for (MemoryAccess *MemAcc : Stmt)
132 if (isImplicitWrite(MemAcc))
133 Accesses.push_back(MemAcc);
135 return Accesses;
138 class Simplify : public ScopPass {
139 private:
140 /// The last/current SCoP that is/has been processed.
141 Scop *S;
143 /// Number of writes that are overwritten anyway.
144 int OverwritesRemoved = 0;
146 /// Number of combined writes.
147 int WritesCoalesced = 0;
149 /// Number of redundant writes removed from this SCoP.
150 int RedundantWritesRemoved = 0;
152 /// Number of writes with empty access domain removed.
153 int EmptyPartialAccessesRemoved = 0;
155 /// Number of unused accesses removed from this SCoP.
156 int DeadAccessesRemoved = 0;
158 /// Number of unused instructions removed from this SCoP.
159 int DeadInstructionsRemoved = 0;
161 /// Number of unnecessary statements removed from the SCoP.
162 int StmtsRemoved = 0;
164 /// Return whether at least one simplification has been applied.
165 bool isModified() const {
166 return OverwritesRemoved > 0 || WritesCoalesced > 0 ||
167 RedundantWritesRemoved > 0 || EmptyPartialAccessesRemoved > 0 ||
168 DeadAccessesRemoved > 0 || DeadInstructionsRemoved > 0 ||
169 StmtsRemoved > 0;
172 MemoryAccess *getReadAccessForValue(ScopStmt *Stmt, llvm::Value *Val) {
173 if (!isa<Instruction>(Val))
174 return nullptr;
176 for (auto *MA : *Stmt) {
177 if (!MA->isRead())
178 continue;
179 if (MA->getAccessValue() != Val)
180 continue;
182 return MA;
185 return nullptr;
188 /// Return a write access that occurs between @p From and @p To.
190 /// In region statements the order is ignored because we cannot predict it.
192 /// @param Stmt Statement of both writes.
193 /// @param From Start looking after this access.
194 /// @param To Stop looking at this access, with the access itself.
195 /// @param Targets Look for an access that may wrote to one of these elements.
197 /// @return A write access between @p From and @p To that writes to at least
198 /// one element in @p Targets.
199 MemoryAccess *hasWriteBetween(ScopStmt *Stmt, MemoryAccess *From,
200 MemoryAccess *To, isl::map Targets) {
201 auto TargetsSpace = Targets.get_space();
203 bool Started = Stmt->isRegionStmt();
204 auto Accesses = getAccessesInOrder(*Stmt);
205 for (auto *Acc : Accesses) {
206 if (Acc->isLatestScalarKind())
207 continue;
209 if (Stmt->isBlockStmt() && From == Acc) {
210 assert(!Started);
211 Started = true;
212 continue;
214 if (Stmt->isBlockStmt() && To == Acc) {
215 assert(Started);
216 return nullptr;
218 if (!Started)
219 continue;
221 if (!Acc->isWrite())
222 continue;
224 isl::map AccRel = Acc->getAccessRelation();
225 auto AccRelSpace = AccRel.get_space();
227 // Spaces being different means that they access different arrays.
228 if (!TargetsSpace.has_equal_tuples(AccRelSpace))
229 continue;
231 AccRel = AccRel.intersect_domain(give(Acc->getStatement()->getDomain()));
232 AccRel = AccRel.intersect_params(give(S->getContext()));
233 auto CommonElt = Targets.intersect(AccRel);
234 if (!CommonElt.is_empty())
235 return Acc;
237 assert(Stmt->isRegionStmt() &&
238 "To must be encountered in block statements");
239 return nullptr;
242 /// Remove writes that are overwritten unconditionally later in the same
243 /// statement.
245 /// There must be no read of the same value between the write (that is to be
246 /// removed) and the overwrite.
247 void removeOverwrites() {
248 for (auto &Stmt : *S) {
249 auto Domain = give(Stmt.getDomain());
250 isl::union_map WillBeOverwritten =
251 isl::union_map::empty(give(S->getParamSpace()));
253 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt));
255 // Iterate in reverse order, so the overwrite comes before the write that
256 // is to be removed.
257 for (auto *MA : reverse(Accesses)) {
259 // In region statements, the explicit accesses can be in blocks that are
260 // can be executed in any order. We therefore process only the implicit
261 // writes and stop after that.
262 if (Stmt.isRegionStmt() && isExplicitAccess(MA))
263 break;
265 auto AccRel = MA->getAccessRelation();
266 AccRel = AccRel.intersect_domain(Domain);
267 AccRel = AccRel.intersect_params(give(S->getContext()));
269 // If a value is read in-between, do not consider it as overwritten.
270 if (MA->isRead()) {
271 // Invalidate all overwrites for the array it accesses to avoid too
272 // complex isl sets.
273 isl::map AccRelUniv = isl::map::universe(AccRel.get_space());
274 WillBeOverwritten = WillBeOverwritten.subtract(AccRelUniv);
275 continue;
278 // If all of a write's elements are overwritten, remove it.
279 isl::union_map AccRelUnion = AccRel;
280 if (AccRelUnion.is_subset(WillBeOverwritten)) {
281 DEBUG(dbgs() << "Removing " << MA
282 << " which will be overwritten anyway\n");
284 Stmt.removeSingleMemoryAccess(MA);
285 OverwritesRemoved++;
286 TotalOverwritesRemoved++;
289 // Unconditional writes overwrite other values.
290 if (MA->isMustWrite()) {
291 // Avoid too complex isl sets. If necessary, throw away some of the
292 // knowledge.
293 WillBeOverwritten =
294 underapproximatedAddMap(WillBeOverwritten, AccRel);
300 /// Combine writes that write the same value if possible.
302 /// This function is able to combine:
303 /// - Partial writes with disjoint domain.
304 /// - Writes that write to the same array element.
306 /// In all cases, both writes must write the same values.
307 void coalesceWrites() {
308 for (auto &Stmt : *S) {
309 isl::set Domain =
310 give(Stmt.getDomain()).intersect_params(give(S->getContext()));
312 // We let isl do the lookup for the same-value condition. For this, we
313 // wrap llvm::Value into an isl::set such that isl can do the lookup in
314 // its hashtable implementation. llvm::Values are only compared within a
315 // ScopStmt, so the map can be local to this scope. TODO: Refactor with
316 // ZoneAlgorithm::makeValueSet()
317 SmallDenseMap<Value *, isl::set> ValueSets;
318 auto makeValueSet = [&ValueSets, this](Value *V) -> isl::set {
319 assert(V);
320 isl::set &Result = ValueSets[V];
321 if (Result.is_null()) {
322 isl_ctx *Ctx = S->getIslCtx();
323 std::string Name =
324 getIslCompatibleName("Val", V, ValueSets.size() - 1,
325 std::string(), UseInstructionNames);
326 isl::id Id = give(isl_id_alloc(Ctx, Name.c_str(), V));
327 Result = isl::set::universe(
328 isl::space(Ctx, 0, 0).set_tuple_id(isl::dim::set, Id));
330 return Result;
333 // List of all eligible (for coalescing) writes of the future.
334 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] }
335 isl::union_map FutureWrites =
336 isl::union_map::empty(give(S->getParamSpace()));
338 // Iterate over accesses from the last to the first.
339 SmallVector<MemoryAccess *, 32> Accesses(getAccessesInOrder(Stmt));
340 for (MemoryAccess *MA : reverse(Accesses)) {
341 // In region statements, the explicit accesses can be in blocks that can
342 // be executed in any order. We therefore process only the implicit
343 // writes and stop after that.
344 if (Stmt.isRegionStmt() && isExplicitAccess(MA))
345 break;
347 // { Domain[] -> Element[] }
348 isl::map AccRel =
349 MA->getLatestAccessRelation().intersect_domain(Domain);
351 // { [Domain[] -> Element[]] }
352 isl::set AccRelWrapped = AccRel.wrap();
354 // { Value[] }
355 isl::set ValSet;
357 if (MA->isMustWrite() && (MA->isOriginalScalarKind() ||
358 isa<StoreInst>(MA->getAccessInstruction()))) {
359 // Normally, tryGetValueStored() should be used to determine which
360 // element is written, but it can return nullptr; For PHI accesses,
361 // getAccessValue() returns the PHI instead of the PHI's incoming
362 // value. In this case, where we only compare values of a single
363 // statement, this is fine, because within a statement, a PHI in a
364 // successor block has always the same value as the incoming write. We
365 // still preferably use the incoming value directly so we also catch
366 // direct uses of that.
367 Value *StoredVal = MA->tryGetValueStored();
368 if (!StoredVal)
369 StoredVal = MA->getAccessValue();
370 ValSet = makeValueSet(StoredVal);
372 // { Domain[] }
373 isl::set AccDomain = AccRel.domain();
375 // Parts of the statement's domain that is not written by this access.
376 isl::set UndefDomain = Domain.subtract(AccDomain);
378 // { Element[] }
379 isl::set ElementUniverse =
380 isl::set::universe(AccRel.get_space().range());
382 // { Domain[] -> Element[] }
383 isl::map UndefAnything =
384 isl::map::from_domain_and_range(UndefDomain, ElementUniverse);
386 // We are looking a compatible write access. The other write can
387 // access these elements...
388 isl::map AllowedAccesses = AccRel.unite(UndefAnything);
390 // ... and must write the same value.
391 // { [Domain[] -> Element[]] -> Value[] }
392 isl::map Filter =
393 isl::map::from_domain_and_range(AllowedAccesses.wrap(), ValSet);
395 // Lookup future write that fulfills these conditions.
396 // { [[Domain[] -> Element[]] -> Value[]] -> MemoryAccess[] }
397 isl::union_map Filtered =
398 FutureWrites.uncurry().intersect_domain(Filter.wrap());
400 // Iterate through the candidates.
401 Filtered.foreach_map([&, this](isl::map Map) -> isl::stat {
402 MemoryAccess *OtherMA = (MemoryAccess *)Map.get_space()
403 .get_tuple_id(isl::dim::out)
404 .get_user();
406 isl::map OtherAccRel =
407 OtherMA->getLatestAccessRelation().intersect_domain(Domain);
409 // The filter only guaranteed that some of OtherMA's accessed
410 // elements are allowed. Verify that it only accesses allowed
411 // elements. Otherwise, continue with the next candidate.
412 if (!OtherAccRel.is_subset(AllowedAccesses).is_true())
413 return isl::stat::ok;
415 // The combined access relation.
416 // { Domain[] -> Element[] }
417 isl::map NewAccRel = AccRel.unite(OtherAccRel);
418 simplify(NewAccRel);
420 // Carry out the coalescing.
421 Stmt.removeSingleMemoryAccess(MA);
422 OtherMA->setNewAccessRelation(NewAccRel.copy());
424 // We removed MA, OtherMA takes its role.
425 MA = OtherMA;
427 TotalWritesCoalesced++;
428 WritesCoalesced++;
430 // Don't look for more candidates.
431 return isl::stat::error;
435 // Two writes cannot be coalesced if there is another access (to some of
436 // the written elements) between them. Remove all visited write accesses
437 // from the list of eligible writes. Don't just remove the accessed
438 // elements, but any MemoryAccess that touches any of the invalidated
439 // elements.
440 SmallPtrSet<MemoryAccess *, 2> TouchedAccesses;
441 FutureWrites.intersect_domain(AccRelWrapped)
442 .foreach_map([&TouchedAccesses](isl::map Map) -> isl::stat {
443 MemoryAccess *MA = (MemoryAccess *)Map.get_space()
444 .range()
445 .unwrap()
446 .get_tuple_id(isl::dim::out)
447 .get_user();
448 TouchedAccesses.insert(MA);
449 return isl::stat::ok;
451 isl::union_map NewFutureWrites =
452 isl::union_map::empty(FutureWrites.get_space());
453 FutureWrites.foreach_map([&TouchedAccesses, &NewFutureWrites](
454 isl::map FutureWrite) -> isl::stat {
455 MemoryAccess *MA = (MemoryAccess *)FutureWrite.get_space()
456 .range()
457 .unwrap()
458 .get_tuple_id(isl::dim::out)
459 .get_user();
460 if (!TouchedAccesses.count(MA))
461 NewFutureWrites = NewFutureWrites.add_map(FutureWrite);
462 return isl::stat::ok;
464 FutureWrites = NewFutureWrites;
466 if (MA->isMustWrite() && !ValSet.is_null()) {
467 // { MemoryAccess[] }
468 auto AccSet =
469 isl::set::universe(isl::space(S->getIslCtx(), 0, 0)
470 .set_tuple_id(isl::dim::set, MA->getId()));
472 // { Val[] -> MemoryAccess[] }
473 isl::map ValAccSet = isl::map::from_domain_and_range(ValSet, AccSet);
475 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] }
476 isl::map AccRelValAcc =
477 isl::map::from_domain_and_range(AccRelWrapped, ValAccSet.wrap());
478 FutureWrites = FutureWrites.add_map(AccRelValAcc);
484 /// Remove writes that just write the same value already stored in the
485 /// element.
486 void removeRedundantWrites() {
487 // Delay actual removal to not invalidate iterators.
488 SmallVector<MemoryAccess *, 8> StoresToRemove;
490 for (auto &Stmt : *S) {
491 for (auto *WA : Stmt) {
492 if (!WA->isMustWrite())
493 continue;
494 if (!WA->isLatestArrayKind())
495 continue;
496 if (!isa<StoreInst>(WA->getAccessInstruction()) &&
497 !WA->isOriginalScalarKind())
498 continue;
500 llvm::Value *ReadingValue = WA->tryGetValueStored();
502 if (!ReadingValue)
503 continue;
505 auto RA = getReadAccessForValue(&Stmt, ReadingValue);
506 if (!RA)
507 continue;
508 if (!RA->isLatestArrayKind())
509 continue;
511 auto WARel = WA->getLatestAccessRelation();
512 WARel = WARel.intersect_domain(give(WA->getStatement()->getDomain()));
513 WARel = WARel.intersect_params(give(S->getContext()));
514 auto RARel = RA->getLatestAccessRelation();
515 RARel = RARel.intersect_domain(give(RA->getStatement()->getDomain()));
516 RARel = RARel.intersect_params(give(S->getContext()));
518 if (!RARel.is_equal(WARel)) {
519 PairUnequalAccRels++;
520 DEBUG(dbgs() << "Not cleaning up " << WA
521 << " because of unequal access relations:\n");
522 DEBUG(dbgs() << " RA: " << RARel << "\n");
523 DEBUG(dbgs() << " WA: " << WARel << "\n");
524 continue;
527 if (auto *Conflicting = hasWriteBetween(&Stmt, RA, WA, WARel)) {
528 (void)Conflicting;
529 InBetweenStore++;
530 DEBUG(dbgs() << "Not cleaning up " << WA
531 << " because there is another store to the same element "
532 "between\n");
533 DEBUG(Conflicting->print(dbgs()));
534 continue;
537 StoresToRemove.push_back(WA);
541 for (auto *WA : StoresToRemove) {
542 auto Stmt = WA->getStatement();
543 auto AccRel = WA->getAccessRelation();
544 auto AccVal = WA->getAccessValue();
546 DEBUG(dbgs() << "Cleanup of " << WA << ":\n");
547 DEBUG(dbgs() << " Scalar: " << *AccVal << "\n");
548 DEBUG(dbgs() << " AccRel: " << AccRel << "\n");
549 (void)AccVal;
550 (void)AccRel;
552 Stmt->removeSingleMemoryAccess(WA);
554 RedundantWritesRemoved++;
555 TotalRedundantWritesRemoved++;
559 /// Remove statements without side effects.
560 void removeUnnecessaryStmts() {
561 auto NumStmtsBefore = S->getSize();
562 S->simplifySCoP(true);
563 assert(NumStmtsBefore >= S->getSize());
564 StmtsRemoved = NumStmtsBefore - S->getSize();
565 DEBUG(dbgs() << "Removed " << StmtsRemoved << " (of " << NumStmtsBefore
566 << ") statements\n");
567 TotalStmtsRemoved += StmtsRemoved;
570 /// Remove accesses that have an empty domain.
571 void removeEmptyPartialAccesses() {
572 for (ScopStmt &Stmt : *S) {
573 // Defer the actual removal to not invalidate iterators.
574 SmallVector<MemoryAccess *, 8> DeferredRemove;
576 for (MemoryAccess *MA : Stmt) {
577 if (!MA->isWrite())
578 continue;
580 isl::map AccRel = MA->getAccessRelation();
581 if (!AccRel.is_empty().is_true())
582 continue;
584 DEBUG(dbgs() << "Removing " << MA
585 << " because it's a partial access that never occurs\n");
586 DeferredRemove.push_back(MA);
589 for (MemoryAccess *MA : DeferredRemove) {
590 Stmt.removeSingleMemoryAccess(MA);
591 EmptyPartialAccessesRemoved++;
592 TotalEmptyPartialAccessesRemoved++;
597 /// Mark all reachable instructions and access, and sweep those that are not
598 /// reachable.
599 void markAndSweep(LoopInfo *LI) {
600 DenseSet<MemoryAccess *> UsedMA;
601 DenseSet<VirtualInstruction> UsedInsts;
603 // Get all reachable instructions and accesses.
604 markReachable(S, LI, UsedInsts, UsedMA);
606 // Remove all non-reachable accesses.
607 // We need get all MemoryAccesses first, in order to not invalidate the
608 // iterators when removing them.
609 SmallVector<MemoryAccess *, 64> AllMAs;
610 for (ScopStmt &Stmt : *S)
611 AllMAs.append(Stmt.begin(), Stmt.end());
613 for (MemoryAccess *MA : AllMAs) {
614 if (UsedMA.count(MA))
615 continue;
616 DEBUG(dbgs() << "Removing " << MA << " because its value is not used\n");
617 ScopStmt *Stmt = MA->getStatement();
618 Stmt->removeSingleMemoryAccess(MA);
620 DeadAccessesRemoved++;
621 TotalDeadAccessesRemoved++;
624 // Remove all non-reachable instructions.
625 for (ScopStmt &Stmt : *S) {
626 if (!Stmt.isBlockStmt())
627 continue;
629 SmallVector<Instruction *, 32> AllInsts(Stmt.insts_begin(),
630 Stmt.insts_end());
631 SmallVector<Instruction *, 32> RemainInsts;
633 for (Instruction *Inst : AllInsts) {
634 auto It = UsedInsts.find({&Stmt, Inst});
635 if (It == UsedInsts.end()) {
636 DEBUG(dbgs() << "Removing "; Inst->print(dbgs());
637 dbgs() << " because it is not used\n");
638 DeadInstructionsRemoved++;
639 TotalDeadInstructionsRemoved++;
640 continue;
643 RemainInsts.push_back(Inst);
645 // If instructions appear multiple times, keep only the first.
646 UsedInsts.erase(It);
649 // Set the new instruction list to be only those we did not remove.
650 Stmt.setInstructions(RemainInsts);
654 /// Print simplification statistics to @p OS.
655 void printStatistics(llvm::raw_ostream &OS, int Indent = 0) const {
656 OS.indent(Indent) << "Statistics {\n";
657 OS.indent(Indent + 4) << "Overwrites removed: " << OverwritesRemoved
658 << '\n';
659 OS.indent(Indent + 4) << "Partial writes coalesced: " << WritesCoalesced
660 << "\n";
661 OS.indent(Indent + 4) << "Redundant writes removed: "
662 << RedundantWritesRemoved << "\n";
663 OS.indent(Indent + 4) << "Accesses with empty domains removed: "
664 << EmptyPartialAccessesRemoved << "\n";
665 OS.indent(Indent + 4) << "Dead accesses removed: " << DeadAccessesRemoved
666 << '\n';
667 OS.indent(Indent + 4) << "Dead instructions removed: "
668 << DeadInstructionsRemoved << '\n';
669 OS.indent(Indent + 4) << "Stmts removed: " << StmtsRemoved << "\n";
670 OS.indent(Indent) << "}\n";
673 /// Print the current state of all MemoryAccesses to @p OS.
674 void printAccesses(llvm::raw_ostream &OS, int Indent = 0) const {
675 OS.indent(Indent) << "After accesses {\n";
676 for (auto &Stmt : *S) {
677 OS.indent(Indent + 4) << Stmt.getBaseName() << "\n";
678 for (auto *MA : Stmt)
679 MA->print(OS);
681 OS.indent(Indent) << "}\n";
684 public:
685 static char ID;
686 explicit Simplify() : ScopPass(ID) {}
688 virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
689 AU.addRequiredTransitive<ScopInfoRegionPass>();
690 AU.addRequired<LoopInfoWrapperPass>();
691 AU.setPreservesAll();
694 virtual bool runOnScop(Scop &S) override {
695 // Reset statistics of last processed SCoP.
696 releaseMemory();
697 assert(!isModified());
699 // Prepare processing of this SCoP.
700 this->S = &S;
701 ScopsProcessed++;
703 DEBUG(dbgs() << "Removing partial writes that never happen...\n");
704 removeEmptyPartialAccesses();
706 DEBUG(dbgs() << "Removing overwrites...\n");
707 removeOverwrites();
709 DEBUG(dbgs() << "Coalesce partial writes...\n");
710 coalesceWrites();
712 DEBUG(dbgs() << "Removing redundant writes...\n");
713 removeRedundantWrites();
715 DEBUG(dbgs() << "Cleanup unused accesses...\n");
716 LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
717 markAndSweep(LI);
719 DEBUG(dbgs() << "Removing statements without side effects...\n");
720 removeUnnecessaryStmts();
722 if (isModified())
723 ScopsModified++;
724 DEBUG(dbgs() << "\nFinal Scop:\n");
725 DEBUG(dbgs() << S);
727 return false;
730 virtual void printScop(raw_ostream &OS, Scop &S) const override {
731 assert(&S == this->S &&
732 "Can only print analysis for the last processed SCoP");
733 printStatistics(OS);
735 if (!isModified()) {
736 OS << "SCoP could not be simplified\n";
737 return;
739 printAccesses(OS);
742 virtual void releaseMemory() override {
743 S = nullptr;
745 OverwritesRemoved = 0;
746 WritesCoalesced = 0;
747 RedundantWritesRemoved = 0;
748 EmptyPartialAccessesRemoved = 0;
749 DeadAccessesRemoved = 0;
750 DeadInstructionsRemoved = 0;
751 StmtsRemoved = 0;
755 char Simplify::ID;
756 } // anonymous namespace
758 Pass *polly::createSimplifyPass() { return new Simplify(); }
760 INITIALIZE_PASS_BEGIN(Simplify, "polly-simplify", "Polly - Simplify", false,
761 false)
762 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
763 INITIALIZE_PASS_END(Simplify, "polly-simplify", "Polly - Simplify", false,
764 false)