1 //===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Small functions that help with LLVM-IR.
12 //===----------------------------------------------------------------------===//
14 #ifndef POLLY_SUPPORT_IRHELPER_H
15 #define POLLY_SUPPORT_IRHELPER_H
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/IR/Instructions.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/IR/ValueHandle.h"
28 class ScalarEvolution
;
34 class GetElementPtrInst
;
40 /// Type to remap values.
41 using ValueMapT
= llvm::DenseMap
<llvm::AssertingVH
<llvm::Value
>,
42 llvm::AssertingVH
<llvm::Value
>>;
44 /// Type for a set of invariant loads.
45 using InvariantLoadsSetTy
= llvm::SetVector
<llvm::AssertingVH
<llvm::LoadInst
>>;
47 /// Set type for parameters.
48 using ParameterSetTy
= llvm::SetVector
<const llvm::SCEV
*>;
50 /// Set of loops (used to remember loops in non-affine subregions).
51 using BoxedLoopsSetTy
= llvm::SetVector
<const llvm::Loop
*>;
53 /// Utility proxy to wrap the common members of LoadInst and StoreInst.
55 /// This works like the LLVM utility class CallSite, ie. it forwards all calls
56 /// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
57 /// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
58 /// MemTransferInst, etc. in that it offers a common interface, but does not act
59 /// as a fake base class.
60 /// It is similar to StringRef and ArrayRef in that it holds a pointer to the
61 /// referenced object and should be passed by-value as it is small enough.
63 /// This proxy can either represent a LoadInst instance, a StoreInst instance,
64 /// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
65 /// nullptr (only creatable using the default constructor); never an Instruction
66 /// that is neither of the above mentioned. When representing a nullptr, only
67 /// the following methods are defined:
68 /// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
69 /// operator bool(), operator!()
71 /// The functions isa, cast, cast_or_null, dyn_cast are modeled te resemble
72 /// those from llvm/Support/Casting.h. Partial template function specialization
73 /// is currently not supported in C++ such that those cannot be used directly.
74 /// (llvm::isa could, but then llvm:cast etc. would not have the expected
81 MemAccInst() : I(nullptr) {}
82 MemAccInst(const MemAccInst
&Inst
) : I(Inst
.I
) {}
83 /* implicit */ MemAccInst(llvm::LoadInst
&LI
) : I(&LI
) {}
84 /* implicit */ MemAccInst(llvm::LoadInst
*LI
) : I(LI
) {}
85 /* implicit */ MemAccInst(llvm::StoreInst
&SI
) : I(&SI
) {}
86 /* implicit */ MemAccInst(llvm::StoreInst
*SI
) : I(SI
) {}
87 /* implicit */ MemAccInst(llvm::MemIntrinsic
*MI
) : I(MI
) {}
88 /* implicit */ MemAccInst(llvm::CallInst
*CI
) : I(CI
) {}
89 explicit MemAccInst(llvm::Instruction
&I
) : I(&I
) { assert(isa(I
)); }
90 explicit MemAccInst(llvm::Instruction
*I
) : I(I
) { assert(isa(I
)); }
92 static bool isa(const llvm::Value
&V
) {
93 return llvm::isa
<llvm::LoadInst
>(V
) || llvm::isa
<llvm::StoreInst
>(V
) ||
94 llvm::isa
<llvm::CallInst
>(V
) || llvm::isa
<llvm::MemIntrinsic
>(V
);
96 static bool isa(const llvm::Value
*V
) {
97 return llvm::isa
<llvm::LoadInst
>(V
) || llvm::isa
<llvm::StoreInst
>(V
) ||
98 llvm::isa
<llvm::CallInst
>(V
) || llvm::isa
<llvm::MemIntrinsic
>(V
);
100 static MemAccInst
cast(llvm::Value
&V
) {
101 return MemAccInst(llvm::cast
<llvm::Instruction
>(V
));
103 static MemAccInst
cast(llvm::Value
*V
) {
104 return MemAccInst(llvm::cast
<llvm::Instruction
>(V
));
106 static MemAccInst
cast_or_null(llvm::Value
&V
) {
107 return MemAccInst(llvm::cast
<llvm::Instruction
>(V
));
109 static MemAccInst
cast_or_null(llvm::Value
*V
) {
112 return MemAccInst(llvm::cast
<llvm::Instruction
>(V
));
114 static MemAccInst
dyn_cast(llvm::Value
&V
) {
116 return MemAccInst(llvm::cast
<llvm::Instruction
>(V
));
119 static MemAccInst
dyn_cast(llvm::Value
*V
) {
122 return MemAccInst(llvm::cast
<llvm::Instruction
>(V
));
126 MemAccInst
&operator=(const MemAccInst
&Inst
) {
130 MemAccInst
&operator=(llvm::LoadInst
&LI
) {
134 MemAccInst
&operator=(llvm::LoadInst
*LI
) {
138 MemAccInst
&operator=(llvm::StoreInst
&SI
) {
142 MemAccInst
&operator=(llvm::StoreInst
*SI
) {
146 MemAccInst
&operator=(llvm::MemIntrinsic
&MI
) {
150 MemAccInst
&operator=(llvm::MemIntrinsic
*MI
) {
154 MemAccInst
&operator=(llvm::CallInst
&CI
) {
158 MemAccInst
&operator=(llvm::CallInst
*CI
) {
163 llvm::Instruction
*get() const {
164 assert(I
&& "Unexpected nullptr!");
167 operator llvm::Instruction
*() const { return asInstruction(); }
168 llvm::Instruction
*operator->() const { return get(); }
170 explicit operator bool() const { return isInstruction(); }
171 bool operator!() const { return isNull(); }
173 llvm::Value
*getValueOperand() const {
177 return asStore()->getValueOperand();
178 if (isMemIntrinsic())
182 llvm_unreachable("Operation not supported on nullptr");
184 llvm::Value
*getPointerOperand() const {
186 return asLoad()->getPointerOperand();
188 return asStore()->getPointerOperand();
189 if (isMemIntrinsic())
190 return asMemIntrinsic()->getRawDest();
193 llvm_unreachable("Operation not supported on nullptr");
196 unsigned getAlignment() const {
198 return asLoad()->getAlignment();
200 return asStore()->getAlignment();
201 if (isMemIntrinsic())
202 return asMemIntrinsic()->getAlignment();
205 llvm_unreachable("Operation not supported on nullptr");
207 bool isVolatile() const {
209 return asLoad()->isVolatile();
211 return asStore()->isVolatile();
212 if (isMemIntrinsic())
213 return asMemIntrinsic()->isVolatile();
216 llvm_unreachable("Operation not supported on nullptr");
218 bool isSimple() const {
220 return asLoad()->isSimple();
222 return asStore()->isSimple();
223 if (isMemIntrinsic())
224 return !asMemIntrinsic()->isVolatile();
227 llvm_unreachable("Operation not supported on nullptr");
229 llvm::AtomicOrdering
getOrdering() const {
231 return asLoad()->getOrdering();
233 return asStore()->getOrdering();
234 if (isMemIntrinsic())
235 return llvm::AtomicOrdering::NotAtomic
;
237 return llvm::AtomicOrdering::NotAtomic
;
238 llvm_unreachable("Operation not supported on nullptr");
240 bool isUnordered() const {
242 return asLoad()->isUnordered();
244 return asStore()->isUnordered();
245 // Copied from the Load/Store implementation of isUnordered:
246 if (isMemIntrinsic())
247 return !asMemIntrinsic()->isVolatile();
250 llvm_unreachable("Operation not supported on nullptr");
253 bool isNull() const { return !I
; }
254 bool isInstruction() const { return I
; }
256 llvm::Instruction
*asInstruction() const { return I
; }
259 bool isLoad() const { return I
&& llvm::isa
<llvm::LoadInst
>(I
); }
260 bool isStore() const { return I
&& llvm::isa
<llvm::StoreInst
>(I
); }
261 bool isCallInst() const { return I
&& llvm::isa
<llvm::CallInst
>(I
); }
262 bool isMemIntrinsic() const { return I
&& llvm::isa
<llvm::MemIntrinsic
>(I
); }
263 bool isMemSetInst() const { return I
&& llvm::isa
<llvm::MemSetInst
>(I
); }
264 bool isMemTransferInst() const {
265 return I
&& llvm::isa
<llvm::MemTransferInst
>(I
);
268 llvm::LoadInst
*asLoad() const { return llvm::cast
<llvm::LoadInst
>(I
); }
269 llvm::StoreInst
*asStore() const { return llvm::cast
<llvm::StoreInst
>(I
); }
270 llvm::CallInst
*asCallInst() const { return llvm::cast
<llvm::CallInst
>(I
); }
271 llvm::MemIntrinsic
*asMemIntrinsic() const {
272 return llvm::cast
<llvm::MemIntrinsic
>(I
);
274 llvm::MemSetInst
*asMemSetInst() const {
275 return llvm::cast
<llvm::MemSetInst
>(I
);
277 llvm::MemTransferInst
*asMemTransferInst() const {
278 return llvm::cast
<llvm::MemTransferInst
>(I
);
284 /// Specialize simplify_type for MemAccInst to enable dyn_cast and cast
285 /// from a MemAccInst object.
286 template <> struct simplify_type
<polly::MemAccInst
> {
287 typedef Instruction
*SimpleType
;
288 static SimpleType
getSimplifiedValue(polly::MemAccInst
&I
) {
289 return I
.asInstruction();
296 /// Simplify the region to have a single unconditional entry edge and a
297 /// single exit edge.
299 /// Although this function allows DT and RI to be null, regions only work
300 /// properly if the DominatorTree (for Region::contains) and RegionInfo are kept
303 /// @param R The region to be simplified
304 /// @param DT DominatorTree to be updated.
305 /// @param LI LoopInfo to be updated.
306 /// @param RI RegionInfo to be updated.
307 void simplifyRegion(llvm::Region
*R
, llvm::DominatorTree
*DT
,
308 llvm::LoopInfo
*LI
, llvm::RegionInfo
*RI
);
310 /// Split the entry block of a function to store the newly inserted
311 /// allocations outside of all Scops.
313 /// @param EntryBlock The entry block of the current function.
314 /// @param P The pass that currently running.
316 void splitEntryBlockForAlloca(llvm::BasicBlock
*EntryBlock
, llvm::Pass
*P
);
318 /// Wrapper for SCEVExpander extended to all Polly features.
320 /// This wrapper will internally call the SCEVExpander but also makes sure that
321 /// all additional features not represented in SCEV (e.g., SDiv/SRem are not
322 /// black boxes but can be part of the function) will be expanded correctly.
324 /// The parameters are the same as for the creation of a SCEVExpander as well
325 /// as the call to SCEVExpander::expandCodeFor:
327 /// @param S The current Scop.
328 /// @param SE The Scalar Evolution pass.
329 /// @param DL The module data layout.
330 /// @param Name The suffix added to the new instruction names.
331 /// @param E The expression for which code is actually generated.
332 /// @param Ty The type of the resulting code.
333 /// @param IP The insertion point for the new code.
334 /// @param VMap A remapping of values used in @p E.
335 /// @param RTCBB The last block of the RTC. Used to insert loop-invariant
336 /// instructions in rare cases.
337 llvm::Value
*expandCodeFor(Scop
&S
, llvm::ScalarEvolution
&SE
,
338 const llvm::DataLayout
&DL
, const char *Name
,
339 const llvm::SCEV
*E
, llvm::Type
*Ty
,
340 llvm::Instruction
*IP
, ValueMapT
*VMap
,
341 llvm::BasicBlock
*RTCBB
);
343 /// Check if the block is a error block.
345 /// A error block is currently any block that fulfills at least one of
346 /// the following conditions:
348 /// - It is terminated by an unreachable instruction
349 /// - It contains a call to a non-pure function that is not immediately
350 /// dominated by a loop header and that does not dominate the region exit.
351 /// This is a heuristic to pick only error blocks that are conditionally
352 /// executed and can be assumed to be not executed at all without the domains
355 /// @param BB The block to check.
356 /// @param R The analyzed region.
357 /// @param LI The loop info analysis.
358 /// @param DT The dominator tree of the function.
360 /// @return True if the block is a error block, false otherwise.
361 bool isErrorBlock(llvm::BasicBlock
&BB
, const llvm::Region
&R
,
362 llvm::LoopInfo
&LI
, const llvm::DominatorTree
&DT
);
364 /// Return the condition for the terminator @p TI.
366 /// For unconditional branches the "i1 true" condition will be returned.
368 /// @param TI The terminator to get the condition from.
370 /// @return The condition of @p TI and nullptr if none could be extracted.
371 llvm::Value
*getConditionFromTerminator(llvm::TerminatorInst
*TI
);
373 /// Check if @p LInst can be hoisted in @p R.
375 /// @param LInst The load to check.
376 /// @param R The analyzed region.
377 /// @param LI The loop info.
378 /// @param SE The scalar evolution analysis.
379 /// @param DT The dominator tree of the function.
381 /// @return True if @p LInst can be hoisted in @p R.
382 bool isHoistableLoad(llvm::LoadInst
*LInst
, llvm::Region
&R
, llvm::LoopInfo
&LI
,
383 llvm::ScalarEvolution
&SE
, const llvm::DominatorTree
&DT
);
385 /// Return true iff @p V is an intrinsic that we ignore during code
387 bool isIgnoredIntrinsic(const llvm::Value
*V
);
389 /// Check whether a value an be synthesized by the code generator.
391 /// Some value will be recalculated only from information that is code generated
392 /// from the polyhedral representation. For such instructions we do not need to
393 /// ensure that their operands are available during code generation.
395 /// @param V The value to check.
396 /// @param S The current SCoP.
397 /// @param SE The scalar evolution database.
398 /// @param Scope Location where the value would by synthesized.
399 /// @return If the instruction I can be regenerated from its
400 /// scalar evolution representation, return true,
401 /// otherwise return false.
402 bool canSynthesize(const llvm::Value
*V
, const Scop
&S
,
403 llvm::ScalarEvolution
*SE
, llvm::Loop
*Scope
);
405 /// Return the block in which a value is used.
407 /// For normal instructions, this is the instruction's parent block. For PHI
408 /// nodes, this is the incoming block of that use, because this is where the
409 /// operand must be defined (i.e. its definition dominates this block).
410 /// Non-instructions do not use operands at a specific point such that in this
411 /// case this function returns nullptr.
412 llvm::BasicBlock
*getUseBlock(const llvm::Use
&U
);
414 /// Derive the individual index expressions from a GEP instruction.
416 /// This function optimistically assumes the GEP references into a fixed size
417 /// array. If this is actually true, this function returns a list of array
418 /// subscript expressions as SCEV as well as a list of integers describing
419 /// the size of the individual array dimensions. Both lists have either equal
420 /// length or the size list is one element shorter in case there is no known
421 /// size available for the outermost array dimension.
423 /// @param GEP The GetElementPtr instruction to analyze.
425 /// @return A tuple with the subscript expressions and the dimension sizes.
426 std::tuple
<std::vector
<const llvm::SCEV
*>, std::vector
<int>>
427 getIndexExpressionsFromGEP(llvm::GetElementPtrInst
*GEP
,
428 llvm::ScalarEvolution
&SE
);
430 // If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
431 // for Polly. If the loop is affine, return the loop itself.
433 // @param L Pointer to the Loop object to analyze.
434 // @param LI Reference to the LoopInfo.
435 // @param BoxedLoops Set of Boxed Loops we get from the SCoP.
436 llvm::Loop
*getFirstNonBoxedLoopFor(llvm::Loop
*L
, llvm::LoopInfo
&LI
,
437 const BoxedLoopsSetTy
&BoxedLoops
);
439 // If the Basic Block belongs to a loop that is nonaffine/boxed, return the
440 // first non-boxed surrounding loop for Polly. If the loop is affine, return
443 // @param BB Pointer to the Basic Block to analyze.
444 // @param LI Reference to the LoopInfo.
445 // @param BoxedLoops Set of Boxed Loops we get from the SCoP.
446 llvm::Loop
*getFirstNonBoxedLoopFor(llvm::BasicBlock
*BB
, llvm::LoopInfo
&LI
,
447 const BoxedLoopsSetTy
&BoxedLoops
);