1 //===------ Simplify.cpp ----------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Simplify a SCoP by removing unnecessary statements and accesses.
12 //===----------------------------------------------------------------------===//
14 #include "polly/Simplify.h"
15 #include "polly/ScopInfo.h"
16 #include "polly/ScopPass.h"
17 #include "polly/Support/GICHelper.h"
18 #include "polly/Support/ISLOStream.h"
19 #include "polly/Support/ISLTools.h"
20 #include "polly/Support/VirtualInstruction.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Support/Debug.h"
23 #define DEBUG_TYPE "polly-simplify"
26 using namespace polly
;
30 /// Number of max disjuncts we allow in removeOverwrites(). This is to avoid
31 /// that the analysis of accesses in a statement is becoming too complex. Chosen
32 /// to be relatively small because all the common cases should access only few
33 /// array elements per statement.
34 static int const SimplifyMaxDisjuncts
= 4;
36 STATISTIC(ScopsProcessed
, "Number of SCoPs processed");
37 STATISTIC(ScopsModified
, "Number of SCoPs simplified");
39 STATISTIC(PairUnequalAccRels
, "Number of Load-Store pairs NOT removed because "
40 "of different access relations");
41 STATISTIC(InBetweenStore
, "Number of Load-Store pairs NOT removed because "
42 "there is another store between them");
43 STATISTIC(TotalOverwritesRemoved
, "Number of removed overwritten writes");
44 STATISTIC(TotalWritesCoalesced
, "Number of writes coalesced with another");
45 STATISTIC(TotalRedundantWritesRemoved
,
46 "Number of writes of same value removed in any SCoP");
47 STATISTIC(TotalEmptyPartialAccessesRemoved
,
48 "Number of empty partial accesses removed");
49 STATISTIC(TotalDeadAccessesRemoved
, "Number of dead accesses removed");
50 STATISTIC(TotalDeadInstructionsRemoved
,
51 "Number of unused instructions removed");
52 STATISTIC(TotalStmtsRemoved
, "Number of statements removed in any SCoP");
54 static bool isImplicitRead(MemoryAccess
*MA
) {
55 return MA
->isRead() && MA
->isOriginalScalarKind();
58 static bool isExplicitAccess(MemoryAccess
*MA
) {
59 return MA
->isOriginalArrayKind();
62 static bool isImplicitWrite(MemoryAccess
*MA
) {
63 return MA
->isWrite() && MA
->isOriginalScalarKind();
66 /// Like isl::union_map::add_map, but may also return an underapproximated
67 /// result if getting too complex.
69 /// This is implemented by adding disjuncts to the results until the limit is
71 static isl::union_map
underapproximatedAddMap(isl::union_map UMap
,
73 if (UMap
.is_null() || Map
.is_null())
76 isl::map PrevMap
= UMap
.extract_map(Map
.get_space());
78 // Fast path: If known that we cannot exceed the disjunct limit, just add
80 if (isl_map_n_basic_map(PrevMap
.get()) + isl_map_n_basic_map(Map
.get()) <=
82 return UMap
.add_map(Map
);
84 isl::map Result
= isl::map::empty(PrevMap
.get_space());
85 PrevMap
.foreach_basic_map([&Result
](isl::basic_map BMap
) -> isl::stat
{
86 if (isl_map_n_basic_map(Result
.get()) > SimplifyMaxDisjuncts
)
87 return isl::stat::error
;
88 Result
= Result
.unite(BMap
);
91 Map
.foreach_basic_map([&Result
](isl::basic_map BMap
) -> isl::stat
{
92 if (isl_map_n_basic_map(Result
.get()) > SimplifyMaxDisjuncts
)
93 return isl::stat::error
;
94 Result
= Result
.unite(BMap
);
98 isl::union_map UResult
=
99 UMap
.subtract(isl::map::universe(PrevMap
.get_space()));
100 UResult
.add_map(Result
);
105 /// Return a vector that contains MemoryAccesses in the order in
106 /// which they are executed.
109 /// - Implicit reads (BlockGenerator::generateScalarLoads)
110 /// - Explicit reads and writes (BlockGenerator::generateArrayLoad,
111 /// BlockGenerator::generateArrayStore)
112 /// - In block statements, the accesses are in order in which their
113 /// instructions are executed.
114 /// - In region statements, that order of execution is not predictable at
116 /// - Implicit writes (BlockGenerator::generateScalarStores)
117 /// The order in which implicit writes are executed relative to each other is
119 static SmallVector
<MemoryAccess
*, 32> getAccessesInOrder(ScopStmt
&Stmt
) {
121 SmallVector
<MemoryAccess
*, 32> Accesses
;
123 for (MemoryAccess
*MemAcc
: Stmt
)
124 if (isImplicitRead(MemAcc
))
125 Accesses
.push_back(MemAcc
);
127 for (MemoryAccess
*MemAcc
: Stmt
)
128 if (isExplicitAccess(MemAcc
))
129 Accesses
.push_back(MemAcc
);
131 for (MemoryAccess
*MemAcc
: Stmt
)
132 if (isImplicitWrite(MemAcc
))
133 Accesses
.push_back(MemAcc
);
138 class Simplify
: public ScopPass
{
140 /// The last/current SCoP that is/has been processed.
143 /// Number of writes that are overwritten anyway.
144 int OverwritesRemoved
= 0;
146 /// Number of combined writes.
147 int WritesCoalesced
= 0;
149 /// Number of redundant writes removed from this SCoP.
150 int RedundantWritesRemoved
= 0;
152 /// Number of writes with empty access domain removed.
153 int EmptyPartialAccessesRemoved
= 0;
155 /// Number of unused accesses removed from this SCoP.
156 int DeadAccessesRemoved
= 0;
158 /// Number of unused instructions removed from this SCoP.
159 int DeadInstructionsRemoved
= 0;
161 /// Number of unnecessary statements removed from the SCoP.
162 int StmtsRemoved
= 0;
164 /// Return whether at least one simplification has been applied.
165 bool isModified() const {
166 return OverwritesRemoved
> 0 || WritesCoalesced
> 0 ||
167 RedundantWritesRemoved
> 0 || EmptyPartialAccessesRemoved
> 0 ||
168 DeadAccessesRemoved
> 0 || DeadInstructionsRemoved
> 0 ||
172 MemoryAccess
*getReadAccessForValue(ScopStmt
*Stmt
, llvm::Value
*Val
) {
173 if (!isa
<Instruction
>(Val
))
176 for (auto *MA
: *Stmt
) {
179 if (MA
->getAccessValue() != Val
)
188 /// Return a write access that occurs between @p From and @p To.
190 /// In region statements the order is ignored because we cannot predict it.
192 /// @param Stmt Statement of both writes.
193 /// @param From Start looking after this access.
194 /// @param To Stop looking at this access, with the access itself.
195 /// @param Targets Look for an access that may wrote to one of these elements.
197 /// @return A write access between @p From and @p To that writes to at least
198 /// one element in @p Targets.
199 MemoryAccess
*hasWriteBetween(ScopStmt
*Stmt
, MemoryAccess
*From
,
200 MemoryAccess
*To
, isl::map Targets
) {
201 auto TargetsSpace
= Targets
.get_space();
203 bool Started
= Stmt
->isRegionStmt();
204 auto Accesses
= getAccessesInOrder(*Stmt
);
205 for (auto *Acc
: Accesses
) {
206 if (Acc
->isLatestScalarKind())
209 if (Stmt
->isBlockStmt() && From
== Acc
) {
214 if (Stmt
->isBlockStmt() && To
== Acc
) {
224 isl::map AccRel
= Acc
->getAccessRelation();
225 auto AccRelSpace
= AccRel
.get_space();
227 // Spaces being different means that they access different arrays.
228 if (!TargetsSpace
.has_equal_tuples(AccRelSpace
))
231 AccRel
= AccRel
.intersect_domain(give(Acc
->getStatement()->getDomain()));
232 AccRel
= AccRel
.intersect_params(give(S
->getContext()));
233 auto CommonElt
= Targets
.intersect(AccRel
);
234 if (!CommonElt
.is_empty())
237 assert(Stmt
->isRegionStmt() &&
238 "To must be encountered in block statements");
242 /// Remove writes that are overwritten unconditionally later in the same
245 /// There must be no read of the same value between the write (that is to be
246 /// removed) and the overwrite.
247 void removeOverwrites() {
248 for (auto &Stmt
: *S
) {
249 auto Domain
= give(Stmt
.getDomain());
250 isl::union_map WillBeOverwritten
=
251 isl::union_map::empty(give(S
->getParamSpace()));
253 SmallVector
<MemoryAccess
*, 32> Accesses(getAccessesInOrder(Stmt
));
255 // Iterate in reverse order, so the overwrite comes before the write that
257 for (auto *MA
: reverse(Accesses
)) {
259 // In region statements, the explicit accesses can be in blocks that are
260 // can be executed in any order. We therefore process only the implicit
261 // writes and stop after that.
262 if (Stmt
.isRegionStmt() && isExplicitAccess(MA
))
265 auto AccRel
= MA
->getAccessRelation();
266 AccRel
= AccRel
.intersect_domain(Domain
);
267 AccRel
= AccRel
.intersect_params(give(S
->getContext()));
269 // If a value is read in-between, do not consider it as overwritten.
271 // Invalidate all overwrites for the array it accesses to avoid too
273 isl::map AccRelUniv
= isl::map::universe(AccRel
.get_space());
274 WillBeOverwritten
= WillBeOverwritten
.subtract(AccRelUniv
);
278 // If all of a write's elements are overwritten, remove it.
279 isl::union_map AccRelUnion
= AccRel
;
280 if (AccRelUnion
.is_subset(WillBeOverwritten
)) {
281 DEBUG(dbgs() << "Removing " << MA
282 << " which will be overwritten anyway\n");
284 Stmt
.removeSingleMemoryAccess(MA
);
286 TotalOverwritesRemoved
++;
289 // Unconditional writes overwrite other values.
290 if (MA
->isMustWrite()) {
291 // Avoid too complex isl sets. If necessary, throw away some of the
294 underapproximatedAddMap(WillBeOverwritten
, AccRel
);
300 /// Combine writes that write the same value if possible.
302 /// This function is able to combine:
303 /// - Partial writes with disjoint domain.
304 /// - Writes that write to the same array element.
306 /// In all cases, both writes must write the same values.
307 void coalesceWrites() {
308 for (auto &Stmt
: *S
) {
310 give(Stmt
.getDomain()).intersect_params(give(S
->getContext()));
312 // We let isl do the lookup for the same-value condition. For this, we
313 // wrap llvm::Value into an isl::set such that isl can do the lookup in
314 // its hashtable implementation. llvm::Values are only compared within a
315 // ScopStmt, so the map can be local to this scope. TODO: Refactor with
316 // ZoneAlgorithm::makeValueSet()
317 SmallDenseMap
<Value
*, isl::set
> ValueSets
;
318 auto makeValueSet
= [&ValueSets
, this](Value
*V
) -> isl::set
{
320 isl::set
&Result
= ValueSets
[V
];
321 if (Result
.is_null()) {
322 isl_ctx
*Ctx
= S
->getIslCtx();
324 getIslCompatibleName("Val", V
, ValueSets
.size() - 1,
325 std::string(), UseInstructionNames
);
326 isl::id Id
= give(isl_id_alloc(Ctx
, Name
.c_str(), V
));
327 Result
= isl::set::universe(
328 isl::space(Ctx
, 0, 0).set_tuple_id(isl::dim::set
, Id
));
333 // List of all eligible (for coalescing) writes of the future.
334 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] }
335 isl::union_map FutureWrites
=
336 isl::union_map::empty(give(S
->getParamSpace()));
338 // Iterate over accesses from the last to the first.
339 SmallVector
<MemoryAccess
*, 32> Accesses(getAccessesInOrder(Stmt
));
340 for (MemoryAccess
*MA
: reverse(Accesses
)) {
341 // In region statements, the explicit accesses can be in blocks that can
342 // be executed in any order. We therefore process only the implicit
343 // writes and stop after that.
344 if (Stmt
.isRegionStmt() && isExplicitAccess(MA
))
347 // { Domain[] -> Element[] }
349 MA
->getLatestAccessRelation().intersect_domain(Domain
);
351 // { [Domain[] -> Element[]] }
352 isl::set AccRelWrapped
= AccRel
.wrap();
357 if (MA
->isMustWrite() && (MA
->isOriginalScalarKind() ||
358 isa
<StoreInst
>(MA
->getAccessInstruction()))) {
359 // Normally, tryGetValueStored() should be used to determine which
360 // element is written, but it can return nullptr; For PHI accesses,
361 // getAccessValue() returns the PHI instead of the PHI's incoming
362 // value. In this case, where we only compare values of a single
363 // statement, this is fine, because within a statement, a PHI in a
364 // successor block has always the same value as the incoming write. We
365 // still preferably use the incoming value directly so we also catch
366 // direct uses of that.
367 Value
*StoredVal
= MA
->tryGetValueStored();
369 StoredVal
= MA
->getAccessValue();
370 ValSet
= makeValueSet(StoredVal
);
373 isl::set AccDomain
= AccRel
.domain();
375 // Parts of the statement's domain that is not written by this access.
376 isl::set UndefDomain
= Domain
.subtract(AccDomain
);
379 isl::set ElementUniverse
=
380 isl::set::universe(AccRel
.get_space().range());
382 // { Domain[] -> Element[] }
383 isl::map UndefAnything
=
384 isl::map::from_domain_and_range(UndefDomain
, ElementUniverse
);
386 // We are looking a compatible write access. The other write can
387 // access these elements...
388 isl::map AllowedAccesses
= AccRel
.unite(UndefAnything
);
390 // ... and must write the same value.
391 // { [Domain[] -> Element[]] -> Value[] }
393 isl::map::from_domain_and_range(AllowedAccesses
.wrap(), ValSet
);
395 // Lookup future write that fulfills these conditions.
396 // { [[Domain[] -> Element[]] -> Value[]] -> MemoryAccess[] }
397 isl::union_map Filtered
=
398 FutureWrites
.uncurry().intersect_domain(Filter
.wrap());
400 // Iterate through the candidates.
401 Filtered
.foreach_map([&, this](isl::map Map
) -> isl::stat
{
402 MemoryAccess
*OtherMA
= (MemoryAccess
*)Map
.get_space()
403 .get_tuple_id(isl::dim::out
)
406 isl::map OtherAccRel
=
407 OtherMA
->getLatestAccessRelation().intersect_domain(Domain
);
409 // The filter only guaranteed that some of OtherMA's accessed
410 // elements are allowed. Verify that it only accesses allowed
411 // elements. Otherwise, continue with the next candidate.
412 if (!OtherAccRel
.is_subset(AllowedAccesses
).is_true())
413 return isl::stat::ok
;
415 // The combined access relation.
416 // { Domain[] -> Element[] }
417 isl::map NewAccRel
= AccRel
.unite(OtherAccRel
);
420 // Carry out the coalescing.
421 Stmt
.removeSingleMemoryAccess(MA
);
422 OtherMA
->setNewAccessRelation(NewAccRel
.copy());
424 // We removed MA, OtherMA takes its role.
427 TotalWritesCoalesced
++;
430 // Don't look for more candidates.
431 return isl::stat::error
;
435 // Two writes cannot be coalesced if there is another access (to some of
436 // the written elements) between them. Remove all visited write accesses
437 // from the list of eligible writes. Don't just remove the accessed
438 // elements, but any MemoryAccess that touches any of the invalidated
440 SmallPtrSet
<MemoryAccess
*, 2> TouchedAccesses
;
441 FutureWrites
.intersect_domain(AccRelWrapped
)
442 .foreach_map([&TouchedAccesses
](isl::map Map
) -> isl::stat
{
443 MemoryAccess
*MA
= (MemoryAccess
*)Map
.get_space()
446 .get_tuple_id(isl::dim::out
)
448 TouchedAccesses
.insert(MA
);
449 return isl::stat::ok
;
451 isl::union_map NewFutureWrites
=
452 isl::union_map::empty(FutureWrites
.get_space());
453 FutureWrites
.foreach_map([&TouchedAccesses
, &NewFutureWrites
](
454 isl::map FutureWrite
) -> isl::stat
{
455 MemoryAccess
*MA
= (MemoryAccess
*)FutureWrite
.get_space()
458 .get_tuple_id(isl::dim::out
)
460 if (!TouchedAccesses
.count(MA
))
461 NewFutureWrites
= NewFutureWrites
.add_map(FutureWrite
);
462 return isl::stat::ok
;
464 FutureWrites
= NewFutureWrites
;
466 if (MA
->isMustWrite() && !ValSet
.is_null()) {
467 // { MemoryAccess[] }
469 isl::set::universe(isl::space(S
->getIslCtx(), 0, 0)
470 .set_tuple_id(isl::dim::set
, MA
->getId()));
472 // { Val[] -> MemoryAccess[] }
473 isl::map ValAccSet
= isl::map::from_domain_and_range(ValSet
, AccSet
);
475 // { [Domain[] -> Element[]] -> [Value[] -> MemoryAccess[]] }
476 isl::map AccRelValAcc
=
477 isl::map::from_domain_and_range(AccRelWrapped
, ValAccSet
.wrap());
478 FutureWrites
= FutureWrites
.add_map(AccRelValAcc
);
484 /// Remove writes that just write the same value already stored in the
486 void removeRedundantWrites() {
487 // Delay actual removal to not invalidate iterators.
488 SmallVector
<MemoryAccess
*, 8> StoresToRemove
;
490 for (auto &Stmt
: *S
) {
491 for (auto *WA
: Stmt
) {
492 if (!WA
->isMustWrite())
494 if (!WA
->isLatestArrayKind())
496 if (!isa
<StoreInst
>(WA
->getAccessInstruction()) &&
497 !WA
->isOriginalScalarKind())
500 llvm::Value
*ReadingValue
= WA
->tryGetValueStored();
505 auto RA
= getReadAccessForValue(&Stmt
, ReadingValue
);
508 if (!RA
->isLatestArrayKind())
511 auto WARel
= WA
->getLatestAccessRelation();
512 WARel
= WARel
.intersect_domain(give(WA
->getStatement()->getDomain()));
513 WARel
= WARel
.intersect_params(give(S
->getContext()));
514 auto RARel
= RA
->getLatestAccessRelation();
515 RARel
= RARel
.intersect_domain(give(RA
->getStatement()->getDomain()));
516 RARel
= RARel
.intersect_params(give(S
->getContext()));
518 if (!RARel
.is_equal(WARel
)) {
519 PairUnequalAccRels
++;
520 DEBUG(dbgs() << "Not cleaning up " << WA
521 << " because of unequal access relations:\n");
522 DEBUG(dbgs() << " RA: " << RARel
<< "\n");
523 DEBUG(dbgs() << " WA: " << WARel
<< "\n");
527 if (auto *Conflicting
= hasWriteBetween(&Stmt
, RA
, WA
, WARel
)) {
530 DEBUG(dbgs() << "Not cleaning up " << WA
531 << " because there is another store to the same element "
533 DEBUG(Conflicting
->print(dbgs()));
537 StoresToRemove
.push_back(WA
);
541 for (auto *WA
: StoresToRemove
) {
542 auto Stmt
= WA
->getStatement();
543 auto AccRel
= WA
->getAccessRelation();
544 auto AccVal
= WA
->getAccessValue();
546 DEBUG(dbgs() << "Cleanup of " << WA
<< ":\n");
547 DEBUG(dbgs() << " Scalar: " << *AccVal
<< "\n");
548 DEBUG(dbgs() << " AccRel: " << AccRel
<< "\n");
552 Stmt
->removeSingleMemoryAccess(WA
);
554 RedundantWritesRemoved
++;
555 TotalRedundantWritesRemoved
++;
559 /// Remove statements without side effects.
560 void removeUnnecessaryStmts() {
561 auto NumStmtsBefore
= S
->getSize();
562 S
->simplifySCoP(true);
563 assert(NumStmtsBefore
>= S
->getSize());
564 StmtsRemoved
= NumStmtsBefore
- S
->getSize();
565 DEBUG(dbgs() << "Removed " << StmtsRemoved
<< " (of " << NumStmtsBefore
566 << ") statements\n");
567 TotalStmtsRemoved
+= StmtsRemoved
;
570 /// Remove accesses that have an empty domain.
571 void removeEmptyPartialAccesses() {
572 for (ScopStmt
&Stmt
: *S
) {
573 // Defer the actual removal to not invalidate iterators.
574 SmallVector
<MemoryAccess
*, 8> DeferredRemove
;
576 for (MemoryAccess
*MA
: Stmt
) {
580 isl::map AccRel
= MA
->getAccessRelation();
581 if (!AccRel
.is_empty().is_true())
584 DEBUG(dbgs() << "Removing " << MA
585 << " because it's a partial access that never occurs\n");
586 DeferredRemove
.push_back(MA
);
589 for (MemoryAccess
*MA
: DeferredRemove
) {
590 Stmt
.removeSingleMemoryAccess(MA
);
591 EmptyPartialAccessesRemoved
++;
592 TotalEmptyPartialAccessesRemoved
++;
597 /// Mark all reachable instructions and access, and sweep those that are not
599 void markAndSweep(LoopInfo
*LI
) {
600 DenseSet
<MemoryAccess
*> UsedMA
;
601 DenseSet
<VirtualInstruction
> UsedInsts
;
603 // Get all reachable instructions and accesses.
604 markReachable(S
, LI
, UsedInsts
, UsedMA
);
606 // Remove all non-reachable accesses.
607 // We need get all MemoryAccesses first, in order to not invalidate the
608 // iterators when removing them.
609 SmallVector
<MemoryAccess
*, 64> AllMAs
;
610 for (ScopStmt
&Stmt
: *S
)
611 AllMAs
.append(Stmt
.begin(), Stmt
.end());
613 for (MemoryAccess
*MA
: AllMAs
) {
614 if (UsedMA
.count(MA
))
616 DEBUG(dbgs() << "Removing " << MA
<< " because its value is not used\n");
617 ScopStmt
*Stmt
= MA
->getStatement();
618 Stmt
->removeSingleMemoryAccess(MA
);
620 DeadAccessesRemoved
++;
621 TotalDeadAccessesRemoved
++;
624 // Remove all non-reachable instructions.
625 for (ScopStmt
&Stmt
: *S
) {
626 if (!Stmt
.isBlockStmt())
629 SmallVector
<Instruction
*, 32> AllInsts(Stmt
.insts_begin(),
631 SmallVector
<Instruction
*, 32> RemainInsts
;
633 for (Instruction
*Inst
: AllInsts
) {
634 auto It
= UsedInsts
.find({&Stmt
, Inst
});
635 if (It
== UsedInsts
.end()) {
636 DEBUG(dbgs() << "Removing "; Inst
->print(dbgs());
637 dbgs() << " because it is not used\n");
638 DeadInstructionsRemoved
++;
639 TotalDeadInstructionsRemoved
++;
643 RemainInsts
.push_back(Inst
);
645 // If instructions appear multiple times, keep only the first.
649 // Set the new instruction list to be only those we did not remove.
650 Stmt
.setInstructions(RemainInsts
);
654 /// Print simplification statistics to @p OS.
655 void printStatistics(llvm::raw_ostream
&OS
, int Indent
= 0) const {
656 OS
.indent(Indent
) << "Statistics {\n";
657 OS
.indent(Indent
+ 4) << "Overwrites removed: " << OverwritesRemoved
659 OS
.indent(Indent
+ 4) << "Partial writes coalesced: " << WritesCoalesced
661 OS
.indent(Indent
+ 4) << "Redundant writes removed: "
662 << RedundantWritesRemoved
<< "\n";
663 OS
.indent(Indent
+ 4) << "Accesses with empty domains removed: "
664 << EmptyPartialAccessesRemoved
<< "\n";
665 OS
.indent(Indent
+ 4) << "Dead accesses removed: " << DeadAccessesRemoved
667 OS
.indent(Indent
+ 4) << "Dead instructions removed: "
668 << DeadInstructionsRemoved
<< '\n';
669 OS
.indent(Indent
+ 4) << "Stmts removed: " << StmtsRemoved
<< "\n";
670 OS
.indent(Indent
) << "}\n";
673 /// Print the current state of all MemoryAccesses to @p OS.
674 void printAccesses(llvm::raw_ostream
&OS
, int Indent
= 0) const {
675 OS
.indent(Indent
) << "After accesses {\n";
676 for (auto &Stmt
: *S
) {
677 OS
.indent(Indent
+ 4) << Stmt
.getBaseName() << "\n";
678 for (auto *MA
: Stmt
)
681 OS
.indent(Indent
) << "}\n";
686 explicit Simplify() : ScopPass(ID
) {}
688 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
689 AU
.addRequiredTransitive
<ScopInfoRegionPass
>();
690 AU
.addRequired
<LoopInfoWrapperPass
>();
691 AU
.setPreservesAll();
694 virtual bool runOnScop(Scop
&S
) override
{
695 // Reset statistics of last processed SCoP.
697 assert(!isModified());
699 // Prepare processing of this SCoP.
703 DEBUG(dbgs() << "Removing partial writes that never happen...\n");
704 removeEmptyPartialAccesses();
706 DEBUG(dbgs() << "Removing overwrites...\n");
709 DEBUG(dbgs() << "Coalesce partial writes...\n");
712 DEBUG(dbgs() << "Removing redundant writes...\n");
713 removeRedundantWrites();
715 DEBUG(dbgs() << "Cleanup unused accesses...\n");
716 LoopInfo
*LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
719 DEBUG(dbgs() << "Removing statements without side effects...\n");
720 removeUnnecessaryStmts();
724 DEBUG(dbgs() << "\nFinal Scop:\n");
730 virtual void printScop(raw_ostream
&OS
, Scop
&S
) const override
{
731 assert(&S
== this->S
&&
732 "Can only print analysis for the last processed SCoP");
736 OS
<< "SCoP could not be simplified\n";
742 virtual void releaseMemory() override
{
745 OverwritesRemoved
= 0;
747 RedundantWritesRemoved
= 0;
748 EmptyPartialAccessesRemoved
= 0;
749 DeadAccessesRemoved
= 0;
750 DeadInstructionsRemoved
= 0;
756 } // anonymous namespace
758 Pass
*polly::createSimplifyPass() { return new Simplify(); }
760 INITIALIZE_PASS_BEGIN(Simplify
, "polly-simplify", "Polly - Simplify", false,
762 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
763 INITIALIZE_PASS_END(Simplify
, "polly-simplify", "Polly - Simplify", false,