1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
11 // GPU mapping strategy.
13 //===----------------------------------------------------------------------===//
15 #include "polly/CodeGen/PPCGCodeGeneration.h"
16 #include "polly/CodeGen/CodeGeneration.h"
17 #include "polly/CodeGen/IslAst.h"
18 #include "polly/CodeGen/IslNodeBuilder.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/SCEVValidator.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/GlobalsModRef.h"
30 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/IR/LegacyPassManager.h"
34 #include "llvm/IR/Verifier.h"
35 #include "llvm/IRReader/IRReader.h"
36 #include "llvm/Linker/Linker.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/TargetSelect.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
41 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
43 #include "isl/union_map.h"
46 #include "ppcg/cuda.h"
48 #include "ppcg/gpu_print.h"
49 #include "ppcg/ppcg.h"
50 #include "ppcg/schedule.h"
53 #include "llvm/Support/Debug.h"
55 using namespace polly
;
58 #define DEBUG_TYPE "polly-codegen-ppcg"
60 static cl::opt
<bool> DumpSchedule("polly-acc-dump-schedule",
61 cl::desc("Dump the computed GPU Schedule"),
62 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
63 cl::cat(PollyCategory
));
66 DumpCode("polly-acc-dump-code",
67 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden
,
68 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
70 static cl::opt
<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
71 cl::desc("Dump the kernel LLVM-IR"),
72 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
73 cl::cat(PollyCategory
));
75 static cl::opt
<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
76 cl::desc("Dump the kernel assembly code"),
77 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
78 cl::cat(PollyCategory
));
80 static cl::opt
<bool> FastMath("polly-acc-fastmath",
81 cl::desc("Allow unsafe math optimizations"),
82 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
83 cl::cat(PollyCategory
));
84 static cl::opt
<bool> SharedMemory("polly-acc-use-shared",
85 cl::desc("Use shared memory"), cl::Hidden
,
86 cl::init(false), cl::ZeroOrMore
,
87 cl::cat(PollyCategory
));
88 static cl::opt
<bool> PrivateMemory("polly-acc-use-private",
89 cl::desc("Use private memory"), cl::Hidden
,
90 cl::init(false), cl::ZeroOrMore
,
91 cl::cat(PollyCategory
));
93 bool polly::PollyManagedMemory
;
94 static cl::opt
<bool, true>
95 XManagedMemory("polly-acc-codegen-managed-memory",
96 cl::desc("Generate Host kernel code assuming"
97 " that all memory has been"
98 " declared as managed memory"),
99 cl::location(PollyManagedMemory
), cl::Hidden
,
100 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
103 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
104 cl::desc("Fail and generate a backtrace if"
105 " verifyModule fails on the GPU "
107 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
108 cl::cat(PollyCategory
));
110 static cl::opt
<std::string
> CUDALibDevice(
111 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden
,
112 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
113 cl::ZeroOrMore
, cl::cat(PollyCategory
));
115 static cl::opt
<std::string
>
116 CudaVersion("polly-acc-cuda-version",
117 cl::desc("The CUDA version to compile for"), cl::Hidden
,
118 cl::init("sm_30"), cl::ZeroOrMore
, cl::cat(PollyCategory
));
121 MinCompute("polly-acc-mincompute",
122 cl::desc("Minimal number of compute statements to run on GPU."),
123 cl::Hidden
, cl::init(10 * 512 * 512));
125 /// Return a unique name for a Scop, which is the scop region with the
127 std::string
getUniqueScopName(const Scop
*S
) {
128 return "Scop Region: " + S
->getNameStr() +
129 " | Function: " + std::string(S
->getFunction().getName());
132 /// Used to store information PPCG wants for kills. This information is
133 /// used by live range reordering.
135 /// @see computeLiveRangeReordering
136 /// @see GPUNodeBuilder::createPPCGScop
137 /// @see GPUNodeBuilder::createPPCGProg
138 struct MustKillsInfo
{
139 /// Collection of all kill statements that will be sequenced at the end of
140 /// PPCGScop->schedule.
142 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
143 /// which merges schedules in *arbitrary* order.
144 /// (we don't care about the order of the kills anyway).
145 isl::schedule KillsSchedule
;
146 /// Map from kill statement instances to scalars that need to be
149 /// We currently derive kill information for:
150 /// 1. phi nodes. PHI nodes are not alive outside the scop and can
151 /// consequently all be killed.
152 /// 2. Scalar arrays that are not used outside the Scop. This is
153 /// checked by `isScalarUsesContainedInScop`.
154 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
155 isl::union_map TaggedMustKills
;
157 /// Tagged must kills stripped of the tags.
158 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] }
159 isl::union_map MustKills
;
161 MustKillsInfo() : KillsSchedule(nullptr) {}
164 /// Check if SAI's uses are entirely contained within Scop S.
165 /// If a scalar is used only with a Scop, we are free to kill it, as no data
166 /// can flow in/out of the value any more.
167 /// @see computeMustKillsInfo
168 static bool isScalarUsesContainedInScop(const Scop
&S
,
169 const ScopArrayInfo
*SAI
) {
170 assert(SAI
->isValueKind() && "this function only deals with scalars."
171 " Dealing with arrays required alias analysis");
173 const Region
&R
= S
.getRegion();
174 for (User
*U
: SAI
->getBasePtr()->users()) {
175 Instruction
*I
= dyn_cast
<Instruction
>(U
);
176 assert(I
&& "invalid user of scop array info");
183 /// Compute must-kills needed to enable live range reordering with PPCG.
185 /// @params S The Scop to compute live range reordering information
186 /// @returns live range reordering information that can be used to setup
188 static MustKillsInfo
computeMustKillsInfo(const Scop
&S
) {
189 const isl::space ParamSpace
= S
.getParamSpace();
192 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
193 // 1.1 phi nodes in scop.
194 // 1.2 scalars that are only used within the scop
195 SmallVector
<isl::id
, 4> KillMemIds
;
196 for (ScopArrayInfo
*SAI
: S
.arrays()) {
197 if (SAI
->isPHIKind() ||
198 (SAI
->isValueKind() && isScalarUsesContainedInScop(S
, SAI
)))
199 KillMemIds
.push_back(isl::manage(SAI
->getBasePtrId().release()));
202 Info
.TaggedMustKills
= isl::union_map::empty(ParamSpace
);
203 Info
.MustKills
= isl::union_map::empty(ParamSpace
);
205 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
207 // - filter: "[control] -> { }"
208 // So, we choose to not create this to keep the output a little nicer,
209 // at the cost of some code complexity.
210 Info
.KillsSchedule
= nullptr;
212 for (isl::id
&ToKillId
: KillMemIds
) {
213 isl::id KillStmtId
= isl::id::alloc(
215 std::string("SKill_phantom_").append(ToKillId
.get_name()), nullptr);
217 // NOTE: construction of tagged_must_kill:
218 // 2. We need to construct a map:
219 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
220 // To construct this, we use `isl_map_domain_product` on 2 maps`:
222 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
223 // 2b. PhantomRefToScalar:
224 // [param] -> { ref_phantom[] -> scalar_to_kill[] }
226 // Combining these with `isl_map_domain_product` gives us
228 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
230 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
231 isl::map StmtToScalar
= isl::map::universe(ParamSpace
);
232 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::in
, isl::id(KillStmtId
));
233 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::out
, isl::id(ToKillId
));
235 isl::id PhantomRefId
= isl::id::alloc(
236 S
.getIslCtx(), std::string("ref_phantom") + ToKillId
.get_name(),
239 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
240 isl::map PhantomRefToScalar
= isl::map::universe(ParamSpace
);
242 PhantomRefToScalar
.set_tuple_id(isl::dim::in
, PhantomRefId
);
244 PhantomRefToScalar
.set_tuple_id(isl::dim::out
, ToKillId
);
246 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
247 isl::map TaggedMustKill
= StmtToScalar
.domain_product(PhantomRefToScalar
);
248 Info
.TaggedMustKills
= Info
.TaggedMustKills
.unite(TaggedMustKill
);
250 // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
251 Info
.MustKills
= Info
.TaggedMustKills
.domain_factor_domain();
253 // 3. Create the kill schedule of the form:
254 // "[param] -> { Stmt_phantom[] }"
255 // Then add this to Info.KillsSchedule.
256 isl::space KillStmtSpace
= ParamSpace
;
257 KillStmtSpace
= KillStmtSpace
.set_tuple_id(isl::dim::set
, KillStmtId
);
258 isl::union_set KillStmtDomain
= isl::set::universe(KillStmtSpace
);
260 isl::schedule KillSchedule
= isl::schedule::from_domain(KillStmtDomain
);
261 if (Info
.KillsSchedule
)
262 Info
.KillsSchedule
= Info
.KillsSchedule
.set(KillSchedule
);
264 Info
.KillsSchedule
= KillSchedule
;
270 /// Create the ast expressions for a ScopStmt.
272 /// This function is a callback for to generate the ast expressions for each
273 /// of the scheduled ScopStmts.
274 static __isl_give isl_id_to_ast_expr
*pollyBuildAstExprForStmt(
275 void *StmtT
, __isl_take isl_ast_build
*Build_C
,
276 isl_multi_pw_aff
*(*FunctionIndex
)(__isl_take isl_multi_pw_aff
*MPA
,
277 isl_id
*Id
, void *User
),
279 isl_ast_expr
*(*FunctionExpr
)(isl_ast_expr
*Expr
, isl_id
*Id
, void *User
),
282 ScopStmt
*Stmt
= (ScopStmt
*)StmtT
;
284 if (!Stmt
|| !Build_C
)
287 isl::ast_build Build
= isl::manage(isl_ast_build_copy(Build_C
));
288 isl::ctx Ctx
= Build
.get_ctx();
289 isl::id_to_ast_expr RefToExpr
= isl::id_to_ast_expr::alloc(Ctx
, 0);
291 Stmt
->setAstBuild(Build
);
293 for (MemoryAccess
*Acc
: *Stmt
) {
294 isl::map AddrFunc
= Acc
->getAddressFunction();
295 AddrFunc
= AddrFunc
.intersect_domain(Stmt
->getDomain());
297 isl::id RefId
= Acc
->getId();
298 isl::pw_multi_aff PMA
= isl::pw_multi_aff::from_map(AddrFunc
);
300 isl::multi_pw_aff MPA
= isl::multi_pw_aff(PMA
);
301 MPA
= MPA
.coalesce();
302 MPA
= isl::manage(FunctionIndex(MPA
.release(), RefId
.get(), UserIndex
));
304 isl::ast_expr Access
= Build
.access_from(MPA
);
305 Access
= isl::manage(FunctionExpr(Access
.release(), RefId
.get(), UserExpr
));
306 RefToExpr
= RefToExpr
.set(RefId
, Access
);
309 return RefToExpr
.release();
312 /// Given a LLVM Type, compute its size in bytes,
313 static int computeSizeInBytes(const Type
*T
) {
314 int bytes
= T
->getPrimitiveSizeInBits() / 8;
316 bytes
= T
->getScalarSizeInBits() / 8;
320 /// Generate code for a GPU specific isl AST.
322 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
323 /// generates code for general-purpose AST nodes, with special functionality
324 /// for generating GPU specific user nodes.
326 /// @see GPUNodeBuilder::createUser
327 class GPUNodeBuilder
: public IslNodeBuilder
{
329 GPUNodeBuilder(PollyIRBuilder
&Builder
, ScopAnnotator
&Annotator
,
330 const DataLayout
&DL
, LoopInfo
&LI
, ScalarEvolution
&SE
,
331 DominatorTree
&DT
, Scop
&S
, BasicBlock
*StartBlock
,
332 gpu_prog
*Prog
, GPURuntime Runtime
, GPUArch Arch
)
333 : IslNodeBuilder(Builder
, Annotator
, DL
, LI
, SE
, DT
, S
, StartBlock
),
334 Prog(Prog
), Runtime(Runtime
), Arch(Arch
) {
335 getExprBuilder().setIDToSAI(&IDToSAI
);
338 /// Create after-run-time-check initialization code.
339 void initializeAfterRTH();
341 /// Finalize the generated scop.
342 virtual void finalize();
344 /// Track if the full build process was successful.
346 /// This value is set to false, if throughout the build process an error
347 /// occurred which prevents us from generating valid GPU code.
348 bool BuildSuccessful
= true;
350 /// The maximal number of loops surrounding a sequential kernel.
351 unsigned DeepestSequential
= 0;
353 /// The maximal number of loops surrounding a parallel kernel.
354 unsigned DeepestParallel
= 0;
356 /// Return the name to set for the ptx_kernel.
357 std::string
getKernelFuncName(int Kernel_id
);
360 /// A vector of array base pointers for which a new ScopArrayInfo was created.
362 /// This vector is used to delete the ScopArrayInfo when it is not needed any
364 std::vector
<Value
*> LocalArrays
;
366 /// A map from ScopArrays to their corresponding device allocations.
367 std::map
<ScopArrayInfo
*, Value
*> DeviceAllocations
;
369 /// The current GPU context.
372 /// The set of isl_ids allocated in the kernel
373 std::vector
<isl_id
*> KernelIds
;
375 /// A module containing GPU code.
377 /// This pointer is only set in case we are currently generating GPU code.
378 std::unique_ptr
<Module
> GPUModule
;
380 /// The GPU program we generate code for.
383 /// The GPU Runtime implementation to use (OpenCL or CUDA).
386 /// The GPU Architecture to target.
389 /// Class to free isl_ids.
392 void operator()(__isl_take isl_id
*Id
) { isl_id_free(Id
); };
395 /// A set containing all isl_ids allocated in a GPU kernel.
397 /// By releasing this set all isl_ids will be freed.
398 std::set
<std::unique_ptr
<isl_id
, IslIdDeleter
>> KernelIDs
;
400 IslExprBuilder::IDToScopArrayInfoTy IDToSAI
;
402 /// Create code for user-defined AST nodes.
404 /// These AST nodes can be of type:
406 /// - ScopStmt: A computational statement (TODO)
407 /// - Kernel: A GPU kernel call (TODO)
408 /// - Data-Transfer: A GPU <-> CPU data-transfer
409 /// - In-kernel synchronization
410 /// - In-kernel memory copy statement
412 /// @param UserStmt The ast node to generate code for.
413 virtual void createUser(__isl_take isl_ast_node
*UserStmt
);
415 enum DataDirection
{ HOST_TO_DEVICE
, DEVICE_TO_HOST
};
417 /// Create code for a data transfer statement
419 /// @param TransferStmt The data transfer statement.
420 /// @param Direction The direction in which to transfer data.
421 void createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
422 enum DataDirection Direction
);
424 /// Find llvm::Values referenced in GPU kernel.
426 /// @param Kernel The kernel to scan for llvm::Values
428 /// @returns A tuple, whose:
429 /// - First element contains the set of values referenced by the
431 /// - Second element contains the set of functions referenced by the
432 /// kernel. All functions in the set satisfy
433 /// `isValidFunctionInKernel`.
434 /// - Third element contains loops that have induction variables
435 /// which are used in the kernel, *and* these loops are *neither*
436 /// in the scop, nor do they immediately surroung the Scop.
437 /// See [Code generation of induction variables of loops outside
439 std::tuple
<SetVector
<Value
*>, SetVector
<Function
*>, SetVector
<const Loop
*>>
440 getReferencesInKernel(ppcg_kernel
*Kernel
);
442 /// Compute the sizes of the execution grid for a given kernel.
444 /// @param Kernel The kernel to compute grid sizes for.
446 /// @returns A tuple with grid sizes for X and Y dimension
447 std::tuple
<Value
*, Value
*> getGridSizes(ppcg_kernel
*Kernel
);
449 /// Get the managed array pointer for sending host pointers to the device.
451 /// This is to be used only with managed memory
452 Value
*getManagedDeviceArray(gpu_array_info
*Array
, ScopArrayInfo
*ArrayInfo
);
454 /// Compute the sizes of the thread blocks for a given kernel.
456 /// @param Kernel The kernel to compute thread block sizes for.
458 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
459 std::tuple
<Value
*, Value
*, Value
*> getBlockSizes(ppcg_kernel
*Kernel
);
461 /// Store a specific kernel launch parameter in the array of kernel launch
464 /// @param Parameters The list of parameters in which to store.
465 /// @param Param The kernel launch parameter to store.
466 /// @param Index The index in the parameter list, at which to store the
468 void insertStoreParameter(Instruction
*Parameters
, Instruction
*Param
,
471 /// Create kernel launch parameters.
473 /// @param Kernel The kernel to create parameters for.
474 /// @param F The kernel function that has been created.
475 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
477 /// @returns A stack allocated array with pointers to the parameter
478 /// values that are passed to the kernel.
479 Value
*createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
480 SetVector
<Value
*> SubtreeValues
);
482 /// Create declarations for kernel variable.
484 /// This includes shared memory declarations.
486 /// @param Kernel The kernel definition to create variables for.
487 /// @param FN The function into which to generate the variables.
488 void createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
);
490 /// Add CUDA annotations to module.
492 /// Add a set of CUDA annotations that declares the maximal block dimensions
493 /// that will be used to execute the CUDA kernel. This allows the NVIDIA
494 /// PTX compiler to bound the number of allocated registers to ensure the
495 /// resulting kernel is known to run with up to as many block dimensions
496 /// as specified here.
498 /// @param M The module to add the annotations to.
499 /// @param BlockDimX The size of block dimension X.
500 /// @param BlockDimY The size of block dimension Y.
501 /// @param BlockDimZ The size of block dimension Z.
502 void addCUDAAnnotations(Module
*M
, Value
*BlockDimX
, Value
*BlockDimY
,
505 /// Create GPU kernel.
507 /// Code generate the kernel described by @p KernelStmt.
509 /// @param KernelStmt The ast node to generate kernel code for.
510 void createKernel(__isl_take isl_ast_node
*KernelStmt
);
512 /// Generate code that computes the size of an array.
514 /// @param Array The array for which to compute a size.
515 Value
*getArraySize(gpu_array_info
*Array
);
517 /// Generate code to compute the minimal offset at which an array is accessed.
519 /// The offset of an array is the minimal array location accessed in a scop.
523 /// for (long i = 0; i < 100; i++)
526 /// getArrayOffset(A) results in 42.
528 /// @param Array The array for which to compute the offset.
529 /// @returns An llvm::Value that contains the offset of the array.
530 Value
*getArrayOffset(gpu_array_info
*Array
);
532 /// Prepare the kernel arguments for kernel code generation
534 /// @param Kernel The kernel to generate code for.
535 /// @param FN The function created for the kernel.
536 void prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
);
538 /// Create kernel function.
540 /// Create a kernel function located in a newly created module that can serve
541 /// as target for device code generation. Set the Builder to point to the
542 /// start block of this newly created function.
544 /// @param Kernel The kernel to generate code for.
545 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
546 /// @param SubtreeFunctions The set of llvm::Functions referenced by this
548 void createKernelFunction(ppcg_kernel
*Kernel
,
549 SetVector
<Value
*> &SubtreeValues
,
550 SetVector
<Function
*> &SubtreeFunctions
);
552 /// Create the declaration of a kernel function.
554 /// The kernel function takes as arguments:
556 /// - One i8 pointer for each external array reference used in the kernel.
559 /// - Other LLVM Value references (TODO)
561 /// @param Kernel The kernel to generate the function declaration for.
562 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
564 /// @returns The newly declared function.
565 Function
*createKernelFunctionDecl(ppcg_kernel
*Kernel
,
566 SetVector
<Value
*> &SubtreeValues
);
568 /// Insert intrinsic functions to obtain thread and block ids.
570 /// @param The kernel to generate the intrinsic functions for.
571 void insertKernelIntrinsics(ppcg_kernel
*Kernel
);
573 /// Insert function calls to retrieve the SPIR group/local ids.
575 /// @param The kernel to generate the function calls for.
576 void insertKernelCallsSPIR(ppcg_kernel
*Kernel
);
578 /// Setup the creation of functions referenced by the GPU kernel.
580 /// 1. Create new function declarations in GPUModule which are the same as
581 /// SubtreeFunctions.
583 /// 2. Populate IslNodeBuilder::ValueMap with mappings from
584 /// old functions (that come from the original module) to new functions
585 /// (that are created within GPUModule). That way, we generate references
586 /// to the correct function (in GPUModule) in BlockGenerator.
588 /// @see IslNodeBuilder::ValueMap
589 /// @see BlockGenerator::GlobalMap
590 /// @see BlockGenerator::getNewValue
591 /// @see GPUNodeBuilder::getReferencesInKernel.
593 /// @param SubtreeFunctions The set of llvm::Functions referenced by
595 void setupKernelSubtreeFunctions(SetVector
<Function
*> SubtreeFunctions
);
597 /// Create a global-to-shared or shared-to-global copy statement.
599 /// @param CopyStmt The copy statement to generate code for
600 void createKernelCopy(ppcg_kernel_stmt
*CopyStmt
);
602 /// Create code for a ScopStmt called in @p Expr.
604 /// @param Expr The expression containing the call.
605 /// @param KernelStmt The kernel statement referenced in the call.
606 void createScopStmt(isl_ast_expr
*Expr
, ppcg_kernel_stmt
*KernelStmt
);
608 /// Create an in-kernel synchronization call.
609 void createKernelSync();
611 /// Create a PTX assembly string for the current GPU kernel.
613 /// @returns A string containing the corresponding PTX assembly code.
614 std::string
createKernelASM();
616 /// Remove references from the dominator tree to the kernel function @p F.
618 /// @param F The function to remove references to.
619 void clearDominators(Function
*F
);
621 /// Remove references from scalar evolution to the kernel function @p F.
623 /// @param F The function to remove references to.
624 void clearScalarEvolution(Function
*F
);
626 /// Remove references from loop info to the kernel function @p F.
628 /// @param F The function to remove references to.
629 void clearLoops(Function
*F
);
631 /// Check if the scop requires to be linked with CUDA's libdevice.
632 bool requiresCUDALibDevice();
634 /// Link with the NVIDIA libdevice library (if needed and available).
635 void addCUDALibDevice();
637 /// Finalize the generation of the kernel function.
639 /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
640 /// dump its IR to stderr.
642 /// @returns The Assembly string of the kernel.
643 std::string
finalizeKernelFunction();
645 /// Finalize the generation of the kernel arguments.
647 /// This function ensures that not-read-only scalars used in a kernel are
648 /// stored back to the global memory location they are backed with before
649 /// the kernel terminates.
651 /// @params Kernel The kernel to finalize kernel arguments for.
652 void finalizeKernelArguments(ppcg_kernel
*Kernel
);
654 /// Create code that allocates memory to store arrays on device.
655 void allocateDeviceArrays();
657 /// Create code to prepare the managed device pointers.
658 void prepareManagedDeviceArrays();
660 /// Free all allocated device arrays.
661 void freeDeviceArrays();
663 /// Create a call to initialize the GPU context.
665 /// @returns A pointer to the newly initialized context.
666 Value
*createCallInitContext();
668 /// Create a call to get the device pointer for a kernel allocation.
670 /// @param Allocation The Polly GPU allocation
672 /// @returns The device parameter corresponding to this allocation.
673 Value
*createCallGetDevicePtr(Value
*Allocation
);
675 /// Create a call to free the GPU context.
677 /// @param Context A pointer to an initialized GPU context.
678 void createCallFreeContext(Value
*Context
);
680 /// Create a call to allocate memory on the device.
682 /// @param Size The size of memory to allocate
684 /// @returns A pointer that identifies this allocation.
685 Value
*createCallAllocateMemoryForDevice(Value
*Size
);
687 /// Create a call to free a device array.
689 /// @param Array The device array to free.
690 void createCallFreeDeviceMemory(Value
*Array
);
692 /// Create a call to copy data from host to device.
694 /// @param HostPtr A pointer to the host data that should be copied.
695 /// @param DevicePtr A device pointer specifying the location to copy to.
696 void createCallCopyFromHostToDevice(Value
*HostPtr
, Value
*DevicePtr
,
699 /// Create a call to copy data from device to host.
701 /// @param DevicePtr A pointer to the device data that should be copied.
702 /// @param HostPtr A host pointer specifying the location to copy to.
703 void createCallCopyFromDeviceToHost(Value
*DevicePtr
, Value
*HostPtr
,
706 /// Create a call to synchronize Host & Device.
708 /// This is to be used only with managed memory.
709 void createCallSynchronizeDevice();
711 /// Create a call to get a kernel from an assembly string.
713 /// @param Buffer The string describing the kernel.
714 /// @param Entry The name of the kernel function to call.
716 /// @returns A pointer to a kernel object
717 Value
*createCallGetKernel(Value
*Buffer
, Value
*Entry
);
719 /// Create a call to free a GPU kernel.
721 /// @param GPUKernel THe kernel to free.
722 void createCallFreeKernel(Value
*GPUKernel
);
724 /// Create a call to launch a GPU kernel.
726 /// @param GPUKernel The kernel to launch.
727 /// @param GridDimX The size of the first grid dimension.
728 /// @param GridDimY The size of the second grid dimension.
729 /// @param GridBlockX The size of the first block dimension.
730 /// @param GridBlockY The size of the second block dimension.
731 /// @param GridBlockZ The size of the third block dimension.
732 /// @param Parameters A pointer to an array that contains itself pointers to
733 /// the parameter values passed for each kernel argument.
734 void createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
735 Value
*GridDimY
, Value
*BlockDimX
,
736 Value
*BlockDimY
, Value
*BlockDimZ
,
740 std::string
GPUNodeBuilder::getKernelFuncName(int Kernel_id
) {
741 return "FUNC_" + S
.getFunction().getName().str() + "_SCOP_" +
742 std::to_string(S
.getID()) + "_KERNEL_" + std::to_string(Kernel_id
);
745 void GPUNodeBuilder::initializeAfterRTH() {
746 BasicBlock
*NewBB
= SplitBlock(Builder
.GetInsertBlock(),
747 &*Builder
.GetInsertPoint(), &DT
, &LI
);
748 NewBB
->setName("polly.acc.initialize");
749 Builder
.SetInsertPoint(&NewBB
->front());
751 GPUContext
= createCallInitContext();
753 if (!PollyManagedMemory
)
754 allocateDeviceArrays();
756 prepareManagedDeviceArrays();
759 void GPUNodeBuilder::finalize() {
760 if (!PollyManagedMemory
)
763 createCallFreeContext(GPUContext
);
764 IslNodeBuilder::finalize();
767 void GPUNodeBuilder::allocateDeviceArrays() {
768 assert(!PollyManagedMemory
&&
769 "Managed memory will directly send host pointers "
770 "to the kernel. There is no need for device arrays");
771 isl_ast_build
*Build
= isl_ast_build_from_context(S
.getContext().release());
773 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
774 gpu_array_info
*Array
= &Prog
->array
[i
];
775 auto *ScopArray
= (ScopArrayInfo
*)Array
->user
;
776 std::string
DevArrayName("p_dev_array_");
777 DevArrayName
.append(Array
->name
);
779 Value
*ArraySize
= getArraySize(Array
);
780 Value
*Offset
= getArrayOffset(Array
);
782 ArraySize
= Builder
.CreateSub(
784 Builder
.CreateMul(Offset
,
785 Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
786 const SCEV
*SizeSCEV
= SE
.getSCEV(ArraySize
);
787 // It makes no sense to have an array of size 0. The CUDA API will
788 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
789 // choose to be defensive and catch this at the compile phase. It is
790 // most likely that we are doing something wrong with size computation.
791 if (SizeSCEV
->isZero()) {
792 errs() << getUniqueScopName(&S
)
793 << " has computed array size 0: " << *ArraySize
794 << " | for array: " << *(ScopArray
->getBasePtr())
795 << ". This is illegal, exiting.\n";
796 report_fatal_error("array size was computed to be 0");
799 Value
*DevArray
= createCallAllocateMemoryForDevice(ArraySize
);
800 DevArray
->setName(DevArrayName
);
801 DeviceAllocations
[ScopArray
] = DevArray
;
804 isl_ast_build_free(Build
);
807 void GPUNodeBuilder::prepareManagedDeviceArrays() {
808 assert(PollyManagedMemory
&&
809 "Device array most only be prepared in managed-memory mode");
810 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
811 gpu_array_info
*Array
= &Prog
->array
[i
];
812 ScopArrayInfo
*ScopArray
= (ScopArrayInfo
*)Array
->user
;
815 if (gpu_array_is_scalar(Array
))
816 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
818 HostPtr
= ScopArray
->getBasePtr();
819 HostPtr
= getLatestValue(HostPtr
);
821 Value
*Offset
= getArrayOffset(Array
);
823 HostPtr
= Builder
.CreatePointerCast(
824 HostPtr
, ScopArray
->getElementType()->getPointerTo());
825 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
828 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
829 DeviceAllocations
[ScopArray
] = HostPtr
;
833 void GPUNodeBuilder::addCUDAAnnotations(Module
*M
, Value
*BlockDimX
,
834 Value
*BlockDimY
, Value
*BlockDimZ
) {
835 auto AnnotationNode
= M
->getOrInsertNamedMetadata("nvvm.annotations");
838 if (F
.getCallingConv() != CallingConv::PTX_Kernel
)
841 Value
*V
[] = {BlockDimX
, BlockDimY
, BlockDimZ
};
843 Metadata
*Elements
[] = {
844 ValueAsMetadata::get(&F
), MDString::get(M
->getContext(), "maxntidx"),
845 ValueAsMetadata::get(V
[0]), MDString::get(M
->getContext(), "maxntidy"),
846 ValueAsMetadata::get(V
[1]), MDString::get(M
->getContext(), "maxntidz"),
847 ValueAsMetadata::get(V
[2]),
849 MDNode
*Node
= MDNode::get(M
->getContext(), Elements
);
850 AnnotationNode
->addOperand(Node
);
854 void GPUNodeBuilder::freeDeviceArrays() {
855 assert(!PollyManagedMemory
&& "Managed memory does not use device arrays");
856 for (auto &Array
: DeviceAllocations
)
857 createCallFreeDeviceMemory(Array
.second
);
860 Value
*GPUNodeBuilder::createCallGetKernel(Value
*Buffer
, Value
*Entry
) {
861 const char *Name
= "polly_getKernel";
862 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
863 Function
*F
= M
->getFunction(Name
);
865 // If F is not available, declare it.
867 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
868 std::vector
<Type
*> Args
;
869 Args
.push_back(Builder
.getInt8PtrTy());
870 Args
.push_back(Builder
.getInt8PtrTy());
871 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
872 F
= Function::Create(Ty
, Linkage
, Name
, M
);
875 return Builder
.CreateCall(F
, {Buffer
, Entry
});
878 Value
*GPUNodeBuilder::createCallGetDevicePtr(Value
*Allocation
) {
879 const char *Name
= "polly_getDevicePtr";
880 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
881 Function
*F
= M
->getFunction(Name
);
883 // If F is not available, declare it.
885 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
886 std::vector
<Type
*> Args
;
887 Args
.push_back(Builder
.getInt8PtrTy());
888 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
889 F
= Function::Create(Ty
, Linkage
, Name
, M
);
892 return Builder
.CreateCall(F
, {Allocation
});
895 void GPUNodeBuilder::createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
896 Value
*GridDimY
, Value
*BlockDimX
,
897 Value
*BlockDimY
, Value
*BlockDimZ
,
899 const char *Name
= "polly_launchKernel";
900 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
901 Function
*F
= M
->getFunction(Name
);
903 // If F is not available, declare it.
905 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
906 std::vector
<Type
*> Args
;
907 Args
.push_back(Builder
.getInt8PtrTy());
908 Args
.push_back(Builder
.getInt32Ty());
909 Args
.push_back(Builder
.getInt32Ty());
910 Args
.push_back(Builder
.getInt32Ty());
911 Args
.push_back(Builder
.getInt32Ty());
912 Args
.push_back(Builder
.getInt32Ty());
913 Args
.push_back(Builder
.getInt8PtrTy());
914 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
915 F
= Function::Create(Ty
, Linkage
, Name
, M
);
918 Builder
.CreateCall(F
, {GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
919 BlockDimZ
, Parameters
});
922 void GPUNodeBuilder::createCallFreeKernel(Value
*GPUKernel
) {
923 const char *Name
= "polly_freeKernel";
924 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
925 Function
*F
= M
->getFunction(Name
);
927 // If F is not available, declare it.
929 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
930 std::vector
<Type
*> Args
;
931 Args
.push_back(Builder
.getInt8PtrTy());
932 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
933 F
= Function::Create(Ty
, Linkage
, Name
, M
);
936 Builder
.CreateCall(F
, {GPUKernel
});
939 void GPUNodeBuilder::createCallFreeDeviceMemory(Value
*Array
) {
940 assert(!PollyManagedMemory
&&
941 "Managed memory does not allocate or free memory "
943 const char *Name
= "polly_freeDeviceMemory";
944 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
945 Function
*F
= M
->getFunction(Name
);
947 // If F is not available, declare it.
949 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
950 std::vector
<Type
*> Args
;
951 Args
.push_back(Builder
.getInt8PtrTy());
952 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
953 F
= Function::Create(Ty
, Linkage
, Name
, M
);
956 Builder
.CreateCall(F
, {Array
});
959 Value
*GPUNodeBuilder::createCallAllocateMemoryForDevice(Value
*Size
) {
960 assert(!PollyManagedMemory
&&
961 "Managed memory does not allocate or free memory "
963 const char *Name
= "polly_allocateMemoryForDevice";
964 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
965 Function
*F
= M
->getFunction(Name
);
967 // If F is not available, declare it.
969 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
970 std::vector
<Type
*> Args
;
971 Args
.push_back(Builder
.getInt64Ty());
972 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
973 F
= Function::Create(Ty
, Linkage
, Name
, M
);
976 return Builder
.CreateCall(F
, {Size
});
979 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value
*HostData
,
982 assert(!PollyManagedMemory
&&
983 "Managed memory does not transfer memory between "
985 const char *Name
= "polly_copyFromHostToDevice";
986 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
987 Function
*F
= M
->getFunction(Name
);
989 // If F is not available, declare it.
991 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
992 std::vector
<Type
*> Args
;
993 Args
.push_back(Builder
.getInt8PtrTy());
994 Args
.push_back(Builder
.getInt8PtrTy());
995 Args
.push_back(Builder
.getInt64Ty());
996 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
997 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1000 Builder
.CreateCall(F
, {HostData
, DeviceData
, Size
});
1003 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value
*DeviceData
,
1006 assert(!PollyManagedMemory
&&
1007 "Managed memory does not transfer memory between "
1009 const char *Name
= "polly_copyFromDeviceToHost";
1010 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1011 Function
*F
= M
->getFunction(Name
);
1013 // If F is not available, declare it.
1015 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1016 std::vector
<Type
*> Args
;
1017 Args
.push_back(Builder
.getInt8PtrTy());
1018 Args
.push_back(Builder
.getInt8PtrTy());
1019 Args
.push_back(Builder
.getInt64Ty());
1020 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1021 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1024 Builder
.CreateCall(F
, {DeviceData
, HostData
, Size
});
1027 void GPUNodeBuilder::createCallSynchronizeDevice() {
1028 assert(PollyManagedMemory
&& "explicit synchronization is only necessary for "
1030 const char *Name
= "polly_synchronizeDevice";
1031 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1032 Function
*F
= M
->getFunction(Name
);
1034 // If F is not available, declare it.
1036 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1037 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), false);
1038 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1041 Builder
.CreateCall(F
);
1044 Value
*GPUNodeBuilder::createCallInitContext() {
1048 case GPURuntime::CUDA
:
1049 Name
= "polly_initContextCUDA";
1051 case GPURuntime::OpenCL
:
1052 Name
= "polly_initContextCL";
1056 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1057 Function
*F
= M
->getFunction(Name
);
1059 // If F is not available, declare it.
1061 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1062 std::vector
<Type
*> Args
;
1063 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
1064 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1067 return Builder
.CreateCall(F
, {});
1070 void GPUNodeBuilder::createCallFreeContext(Value
*Context
) {
1071 const char *Name
= "polly_freeContext";
1072 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1073 Function
*F
= M
->getFunction(Name
);
1075 // If F is not available, declare it.
1077 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1078 std::vector
<Type
*> Args
;
1079 Args
.push_back(Builder
.getInt8PtrTy());
1080 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1081 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1084 Builder
.CreateCall(F
, {Context
});
1087 /// Check if one string is a prefix of another.
1089 /// @param String The string in which to look for the prefix.
1090 /// @param Prefix The prefix to look for.
1091 static bool isPrefix(std::string String
, std::string Prefix
) {
1092 return String
.find(Prefix
) == 0;
1095 Value
*GPUNodeBuilder::getArraySize(gpu_array_info
*Array
) {
1096 isl::ast_build Build
= isl::ast_build::from_context(S
.getContext());
1097 Value
*ArraySize
= ConstantInt::get(Builder
.getInt64Ty(), Array
->size
);
1099 if (!gpu_array_is_scalar(Array
)) {
1100 isl::multi_pw_aff ArrayBound
=
1101 isl::manage(isl_multi_pw_aff_copy(Array
->bound
));
1103 isl::pw_aff OffsetDimZero
= ArrayBound
.get_pw_aff(0);
1104 isl::ast_expr Res
= Build
.expr_from(OffsetDimZero
);
1106 for (unsigned int i
= 1; i
< Array
->n_index
; i
++) {
1107 isl::pw_aff Bound_I
= ArrayBound
.get_pw_aff(i
);
1108 isl::ast_expr Expr
= Build
.expr_from(Bound_I
);
1109 Res
= Res
.mul(Expr
);
1112 Value
*NumElements
= ExprBuilder
.create(Res
.release());
1113 if (NumElements
->getType() != ArraySize
->getType())
1114 NumElements
= Builder
.CreateSExt(NumElements
, ArraySize
->getType());
1115 ArraySize
= Builder
.CreateMul(ArraySize
, NumElements
);
1120 Value
*GPUNodeBuilder::getArrayOffset(gpu_array_info
*Array
) {
1121 if (gpu_array_is_scalar(Array
))
1124 isl::ast_build Build
= isl::ast_build::from_context(S
.getContext());
1126 isl::set Min
= isl::manage(isl_set_copy(Array
->extent
)).lexmin();
1128 isl::set ZeroSet
= isl::set::universe(Min
.get_space());
1130 for (long i
= 0; i
< Min
.dim(isl::dim::set
); i
++)
1131 ZeroSet
= ZeroSet
.fix_si(isl::dim::set
, i
, 0);
1133 if (Min
.is_subset(ZeroSet
)) {
1137 isl::ast_expr Result
= isl::ast_expr::from_val(isl::val(Min
.get_ctx(), 0));
1139 for (long i
= 0; i
< Min
.dim(isl::dim::set
); i
++) {
1141 isl::pw_aff Bound_I
=
1142 isl::manage(isl_multi_pw_aff_get_pw_aff(Array
->bound
, i
- 1));
1143 isl::ast_expr BExpr
= Build
.expr_from(Bound_I
);
1144 Result
= Result
.mul(BExpr
);
1146 isl::pw_aff DimMin
= Min
.dim_min(i
);
1147 isl::ast_expr MExpr
= Build
.expr_from(DimMin
);
1148 Result
= Result
.add(MExpr
);
1151 return ExprBuilder
.create(Result
.release());
1154 Value
*GPUNodeBuilder::getManagedDeviceArray(gpu_array_info
*Array
,
1155 ScopArrayInfo
*ArrayInfo
) {
1156 assert(PollyManagedMemory
&& "Only used when you wish to get a host "
1157 "pointer for sending data to the kernel, "
1158 "with managed memory");
1159 std::map
<ScopArrayInfo
*, Value
*>::iterator it
;
1160 it
= DeviceAllocations
.find(ArrayInfo
);
1161 assert(it
!= DeviceAllocations
.end() &&
1162 "Device array expected to be available");
1166 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
1167 enum DataDirection Direction
) {
1168 assert(!PollyManagedMemory
&& "Managed memory needs no data transfers");
1169 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(TransferStmt
);
1170 isl_ast_expr
*Arg
= isl_ast_expr_get_op_arg(Expr
, 0);
1171 isl_id
*Id
= isl_ast_expr_get_id(Arg
);
1172 auto Array
= (gpu_array_info
*)isl_id_get_user(Id
);
1173 auto ScopArray
= (ScopArrayInfo
*)(Array
->user
);
1175 Value
*Size
= getArraySize(Array
);
1176 Value
*Offset
= getArrayOffset(Array
);
1177 Value
*DevPtr
= DeviceAllocations
[ScopArray
];
1181 if (gpu_array_is_scalar(Array
))
1182 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
1184 HostPtr
= ScopArray
->getBasePtr();
1185 HostPtr
= getLatestValue(HostPtr
);
1188 HostPtr
= Builder
.CreatePointerCast(
1189 HostPtr
, ScopArray
->getElementType()->getPointerTo());
1190 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
1193 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
1196 Size
= Builder
.CreateSub(
1197 Size
, Builder
.CreateMul(
1198 Offset
, Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
1201 if (Direction
== HOST_TO_DEVICE
)
1202 createCallCopyFromHostToDevice(HostPtr
, DevPtr
, Size
);
1204 createCallCopyFromDeviceToHost(DevPtr
, HostPtr
, Size
);
1207 isl_ast_expr_free(Arg
);
1208 isl_ast_expr_free(Expr
);
1209 isl_ast_node_free(TransferStmt
);
1212 void GPUNodeBuilder::createUser(__isl_take isl_ast_node
*UserStmt
) {
1213 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(UserStmt
);
1214 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1215 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1217 isl_ast_expr_free(StmtExpr
);
1219 const char *Str
= isl_id_get_name(Id
);
1220 if (!strcmp(Str
, "kernel")) {
1221 createKernel(UserStmt
);
1222 isl_ast_expr_free(Expr
);
1225 if (!strcmp(Str
, "init_device")) {
1226 initializeAfterRTH();
1227 isl_ast_node_free(UserStmt
);
1228 isl_ast_expr_free(Expr
);
1231 if (!strcmp(Str
, "clear_device")) {
1233 isl_ast_node_free(UserStmt
);
1234 isl_ast_expr_free(Expr
);
1237 if (isPrefix(Str
, "to_device")) {
1238 if (!PollyManagedMemory
)
1239 createDataTransfer(UserStmt
, HOST_TO_DEVICE
);
1241 isl_ast_node_free(UserStmt
);
1243 isl_ast_expr_free(Expr
);
1247 if (isPrefix(Str
, "from_device")) {
1248 if (!PollyManagedMemory
) {
1249 createDataTransfer(UserStmt
, DEVICE_TO_HOST
);
1251 createCallSynchronizeDevice();
1252 isl_ast_node_free(UserStmt
);
1254 isl_ast_expr_free(Expr
);
1258 isl_id
*Anno
= isl_ast_node_get_annotation(UserStmt
);
1259 struct ppcg_kernel_stmt
*KernelStmt
=
1260 (struct ppcg_kernel_stmt
*)isl_id_get_user(Anno
);
1263 switch (KernelStmt
->type
) {
1264 case ppcg_kernel_domain
:
1265 createScopStmt(Expr
, KernelStmt
);
1266 isl_ast_node_free(UserStmt
);
1268 case ppcg_kernel_copy
:
1269 createKernelCopy(KernelStmt
);
1270 isl_ast_expr_free(Expr
);
1271 isl_ast_node_free(UserStmt
);
1273 case ppcg_kernel_sync
:
1275 isl_ast_expr_free(Expr
);
1276 isl_ast_node_free(UserStmt
);
1280 isl_ast_expr_free(Expr
);
1281 isl_ast_node_free(UserStmt
);
1284 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt
*KernelStmt
) {
1285 isl_ast_expr
*LocalIndex
= isl_ast_expr_copy(KernelStmt
->u
.c
.local_index
);
1286 LocalIndex
= isl_ast_expr_address_of(LocalIndex
);
1287 Value
*LocalAddr
= ExprBuilder
.create(LocalIndex
);
1288 isl_ast_expr
*Index
= isl_ast_expr_copy(KernelStmt
->u
.c
.index
);
1289 Index
= isl_ast_expr_address_of(Index
);
1290 Value
*GlobalAddr
= ExprBuilder
.create(Index
);
1292 if (KernelStmt
->u
.c
.read
) {
1293 LoadInst
*Load
= Builder
.CreateLoad(GlobalAddr
, "shared.read");
1294 Builder
.CreateStore(Load
, LocalAddr
);
1296 LoadInst
*Load
= Builder
.CreateLoad(LocalAddr
, "shared.write");
1297 Builder
.CreateStore(Load
, GlobalAddr
);
1301 void GPUNodeBuilder::createScopStmt(isl_ast_expr
*Expr
,
1302 ppcg_kernel_stmt
*KernelStmt
) {
1303 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1304 isl_id_to_ast_expr
*Indexes
= KernelStmt
->u
.d
.ref2expr
;
1307 LTS
.insert(OutsideLoopIterations
.begin(), OutsideLoopIterations
.end());
1309 createSubstitutions(Expr
, Stmt
, LTS
);
1311 if (Stmt
->isBlockStmt())
1312 BlockGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1314 RegionGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1317 void GPUNodeBuilder::createKernelSync() {
1318 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1319 const char *SpirName
= "__gen_ocl_barrier_global";
1324 case GPUArch::SPIR64
:
1325 case GPUArch::SPIR32
:
1326 Sync
= M
->getFunction(SpirName
);
1328 // If Sync is not available, declare it.
1330 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1331 std::vector
<Type
*> Args
;
1332 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1333 Sync
= Function::Create(Ty
, Linkage
, SpirName
, M
);
1334 Sync
->setCallingConv(CallingConv::SPIR_FUNC
);
1337 case GPUArch::NVPTX64
:
1338 Sync
= Intrinsic::getDeclaration(M
, Intrinsic::nvvm_barrier0
);
1342 Builder
.CreateCall(Sync
, {});
1345 /// Collect llvm::Values referenced from @p Node
1347 /// This function only applies to isl_ast_nodes that are user_nodes referring
1348 /// to a ScopStmt. All other node types are ignore.
1350 /// @param Node The node to collect references for.
1351 /// @param User A user pointer used as storage for the data that is collected.
1353 /// @returns isl_bool_true if data could be collected successfully.
1354 isl_bool
collectReferencesInGPUStmt(__isl_keep isl_ast_node
*Node
, void *User
) {
1355 if (isl_ast_node_get_type(Node
) != isl_ast_node_user
)
1356 return isl_bool_true
;
1358 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(Node
);
1359 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1360 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1361 const char *Str
= isl_id_get_name(Id
);
1363 isl_ast_expr_free(StmtExpr
);
1364 isl_ast_expr_free(Expr
);
1366 if (!isPrefix(Str
, "Stmt"))
1367 return isl_bool_true
;
1369 Id
= isl_ast_node_get_annotation(Node
);
1370 auto *KernelStmt
= (ppcg_kernel_stmt
*)isl_id_get_user(Id
);
1371 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1374 addReferencesFromStmt(Stmt
, User
, false /* CreateScalarRefs */);
1376 return isl_bool_true
;
1379 /// A list of functions that are available in NVIDIA's libdevice.
1380 const std::set
<std::string
> CUDALibDeviceFunctions
= {
1381 "exp", "expf", "expl", "cos", "cosf",
1382 "sqrt", "sqrtf", "copysign", "copysignf", "copysignl"};
1384 /// Return the corresponding CUDA libdevice function name for @p F.
1386 /// Return "" if we are not compiling for CUDA.
1387 std::string
getCUDALibDeviceFuntion(Function
*F
) {
1388 if (CUDALibDeviceFunctions
.count(F
->getName()))
1389 return std::string("__nv_") + std::string(F
->getName());
1394 /// Check if F is a function that we can code-generate in a GPU kernel.
1395 static bool isValidFunctionInKernel(llvm::Function
*F
, bool AllowLibDevice
) {
1396 assert(F
&& "F is an invalid pointer");
1397 // We string compare against the name of the function to allow
1398 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1400 const StringRef Name
= F
->getName();
1402 if (AllowLibDevice
&& getCUDALibDeviceFuntion(F
).length() > 0)
1405 return F
->isIntrinsic() &&
1406 (Name
.startswith("llvm.sqrt") || Name
.startswith("llvm.fabs") ||
1407 Name
.startswith("llvm.copysign"));
1410 /// Do not take `Function` as a subtree value.
1412 /// We try to take the reference of all subtree values and pass them along
1413 /// to the kernel from the host. Taking an address of any function and
1414 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1416 static bool isValidSubtreeValue(llvm::Value
*V
) { return !isa
<Function
>(V
); }
1418 /// Return `Function`s from `RawSubtreeValues`.
1419 static SetVector
<Function
*>
1420 getFunctionsFromRawSubtreeValues(SetVector
<Value
*> RawSubtreeValues
,
1421 bool AllowCUDALibDevice
) {
1422 SetVector
<Function
*> SubtreeFunctions
;
1423 for (Value
*It
: RawSubtreeValues
) {
1424 Function
*F
= dyn_cast
<Function
>(It
);
1426 assert(isValidFunctionInKernel(F
, AllowCUDALibDevice
) &&
1427 "Code should have bailed out by "
1428 "this point if an invalid function "
1429 "were present in a kernel.");
1430 SubtreeFunctions
.insert(F
);
1433 return SubtreeFunctions
;
1436 std::tuple
<SetVector
<Value
*>, SetVector
<Function
*>, SetVector
<const Loop
*>>
1437 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel
*Kernel
) {
1438 SetVector
<Value
*> SubtreeValues
;
1439 SetVector
<const SCEV
*> SCEVs
;
1440 SetVector
<const Loop
*> Loops
;
1441 SubtreeReferences References
= {
1442 LI
, SE
, S
, ValueMap
, SubtreeValues
, SCEVs
, getBlockGenerator()};
1444 for (const auto &I
: IDToValue
)
1445 SubtreeValues
.insert(I
.second
);
1447 // NOTE: this is populated in IslNodeBuilder::addParameters
1448 // See [Code generation of induction variables of loops outside Scops].
1449 for (const auto &I
: OutsideLoopIterations
)
1450 SubtreeValues
.insert(cast
<SCEVUnknown
>(I
.second
)->getValue());
1452 isl_ast_node_foreach_descendant_top_down(
1453 Kernel
->tree
, collectReferencesInGPUStmt
, &References
);
1455 for (const SCEV
*Expr
: SCEVs
) {
1456 findValues(Expr
, SE
, SubtreeValues
);
1457 findLoops(Expr
, Loops
);
1460 Loops
.remove_if([this](const Loop
*L
) {
1461 return S
.contains(L
) || L
->contains(S
.getEntry());
1464 for (auto &SAI
: S
.arrays())
1465 SubtreeValues
.remove(SAI
->getBasePtr());
1467 isl_space
*Space
= S
.getParamSpace().release();
1468 for (long i
= 0; i
< isl_space_dim(Space
, isl_dim_param
); i
++) {
1469 isl_id
*Id
= isl_space_get_dim_id(Space
, isl_dim_param
, i
);
1470 assert(IDToValue
.count(Id
));
1471 Value
*Val
= IDToValue
[Id
];
1472 SubtreeValues
.remove(Val
);
1475 isl_space_free(Space
);
1477 for (long i
= 0; i
< isl_space_dim(Kernel
->space
, isl_dim_set
); i
++) {
1478 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1479 assert(IDToValue
.count(Id
));
1480 Value
*Val
= IDToValue
[Id
];
1481 SubtreeValues
.remove(Val
);
1485 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1486 // SubtreeValues. This is important, because we should not lose any
1487 // SubtreeValues in the process of constructing the
1488 // "ValidSubtree{Values, Functions} sets. Nor should the set
1489 // ValidSubtree{Values, Functions} have any common element.
1490 auto ValidSubtreeValuesIt
=
1491 make_filter_range(SubtreeValues
, isValidSubtreeValue
);
1492 SetVector
<Value
*> ValidSubtreeValues(ValidSubtreeValuesIt
.begin(),
1493 ValidSubtreeValuesIt
.end());
1495 bool AllowCUDALibDevice
= Arch
== GPUArch::NVPTX64
;
1497 SetVector
<Function
*> ValidSubtreeFunctions(
1498 getFunctionsFromRawSubtreeValues(SubtreeValues
, AllowCUDALibDevice
));
1500 // @see IslNodeBuilder::getReferencesInSubtree
1501 SetVector
<Value
*> ReplacedValues
;
1502 for (Value
*V
: ValidSubtreeValues
) {
1503 auto It
= ValueMap
.find(V
);
1504 if (It
== ValueMap
.end())
1505 ReplacedValues
.insert(V
);
1507 ReplacedValues
.insert(It
->second
);
1509 return std::make_tuple(ReplacedValues
, ValidSubtreeFunctions
, Loops
);
1512 void GPUNodeBuilder::clearDominators(Function
*F
) {
1513 DomTreeNode
*N
= DT
.getNode(&F
->getEntryBlock());
1514 std::vector
<BasicBlock
*> Nodes
;
1515 for (po_iterator
<DomTreeNode
*> I
= po_begin(N
), E
= po_end(N
); I
!= E
; ++I
)
1516 Nodes
.push_back(I
->getBlock());
1518 for (BasicBlock
*BB
: Nodes
)
1522 void GPUNodeBuilder::clearScalarEvolution(Function
*F
) {
1523 for (BasicBlock
&BB
: *F
) {
1524 Loop
*L
= LI
.getLoopFor(&BB
);
1530 void GPUNodeBuilder::clearLoops(Function
*F
) {
1531 for (BasicBlock
&BB
: *F
) {
1532 Loop
*L
= LI
.getLoopFor(&BB
);
1535 LI
.removeBlock(&BB
);
1539 std::tuple
<Value
*, Value
*> GPUNodeBuilder::getGridSizes(ppcg_kernel
*Kernel
) {
1540 std::vector
<Value
*> Sizes
;
1541 isl::ast_build Context
= isl::ast_build::from_context(S
.getContext());
1543 isl::multi_pw_aff GridSizePwAffs
=
1544 isl::manage(isl_multi_pw_aff_copy(Kernel
->grid_size
));
1545 for (long i
= 0; i
< Kernel
->n_grid
; i
++) {
1546 isl::pw_aff Size
= GridSizePwAffs
.get_pw_aff(i
);
1547 isl::ast_expr GridSize
= Context
.expr_from(Size
);
1548 Value
*Res
= ExprBuilder
.create(GridSize
.release());
1549 Res
= Builder
.CreateTrunc(Res
, Builder
.getInt32Ty());
1550 Sizes
.push_back(Res
);
1553 for (long i
= Kernel
->n_grid
; i
< 3; i
++)
1554 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1556 return std::make_tuple(Sizes
[0], Sizes
[1]);
1559 std::tuple
<Value
*, Value
*, Value
*>
1560 GPUNodeBuilder::getBlockSizes(ppcg_kernel
*Kernel
) {
1561 std::vector
<Value
*> Sizes
;
1563 for (long i
= 0; i
< Kernel
->n_block
; i
++) {
1564 Value
*Res
= ConstantInt::get(Builder
.getInt32Ty(), Kernel
->block_dim
[i
]);
1565 Sizes
.push_back(Res
);
1568 for (long i
= Kernel
->n_block
; i
< 3; i
++)
1569 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1571 return std::make_tuple(Sizes
[0], Sizes
[1], Sizes
[2]);
1574 void GPUNodeBuilder::insertStoreParameter(Instruction
*Parameters
,
1575 Instruction
*Param
, int Index
) {
1576 Value
*Slot
= Builder
.CreateGEP(
1577 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1578 Value
*ParamTyped
= Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1579 Builder
.CreateStore(ParamTyped
, Slot
);
1583 GPUNodeBuilder::createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
1584 SetVector
<Value
*> SubtreeValues
) {
1585 const int NumArgs
= F
->arg_size();
1586 std::vector
<int> ArgSizes(NumArgs
);
1588 Type
*ArrayTy
= ArrayType::get(Builder
.getInt8PtrTy(), 2 * NumArgs
);
1590 BasicBlock
*EntryBlock
=
1591 &Builder
.GetInsertBlock()->getParent()->getEntryBlock();
1592 auto AddressSpace
= F
->getParent()->getDataLayout().getAllocaAddrSpace();
1593 std::string Launch
= "polly_launch_" + std::to_string(Kernel
->id
);
1594 Instruction
*Parameters
= new AllocaInst(
1595 ArrayTy
, AddressSpace
, Launch
+ "_params", EntryBlock
->getTerminator());
1598 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1599 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1602 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1603 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1605 ArgSizes
[Index
] = SAI
->getElemSizeInBytes();
1607 Value
*DevArray
= nullptr;
1608 if (PollyManagedMemory
) {
1609 DevArray
= getManagedDeviceArray(&Prog
->array
[i
],
1610 const_cast<ScopArrayInfo
*>(SAI
));
1612 DevArray
= DeviceAllocations
[const_cast<ScopArrayInfo
*>(SAI
)];
1613 DevArray
= createCallGetDevicePtr(DevArray
);
1615 assert(DevArray
!= nullptr && "Array to be offloaded to device not "
1617 Value
*Offset
= getArrayOffset(&Prog
->array
[i
]);
1620 DevArray
= Builder
.CreatePointerCast(
1621 DevArray
, SAI
->getElementType()->getPointerTo());
1622 DevArray
= Builder
.CreateGEP(DevArray
, Builder
.CreateNeg(Offset
));
1623 DevArray
= Builder
.CreatePointerCast(DevArray
, Builder
.getInt8PtrTy());
1625 Value
*Slot
= Builder
.CreateGEP(
1626 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1628 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1629 Value
*ValPtr
= nullptr;
1630 if (PollyManagedMemory
)
1633 ValPtr
= BlockGen
.getOrCreateAlloca(SAI
);
1635 assert(ValPtr
!= nullptr && "ValPtr that should point to a valid object"
1636 " to be stored into Parameters");
1638 Builder
.CreatePointerCast(ValPtr
, Builder
.getInt8PtrTy());
1639 Builder
.CreateStore(ValPtrCast
, Slot
);
1641 Instruction
*Param
=
1642 new AllocaInst(Builder
.getInt8PtrTy(), AddressSpace
,
1643 Launch
+ "_param_" + std::to_string(Index
),
1644 EntryBlock
->getTerminator());
1645 Builder
.CreateStore(DevArray
, Param
);
1647 Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1648 Builder
.CreateStore(ParamTyped
, Slot
);
1653 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1655 for (long i
= 0; i
< NumHostIters
; i
++) {
1656 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1657 Value
*Val
= IDToValue
[Id
];
1660 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1662 Instruction
*Param
=
1663 new AllocaInst(Val
->getType(), AddressSpace
,
1664 Launch
+ "_param_" + std::to_string(Index
),
1665 EntryBlock
->getTerminator());
1666 Builder
.CreateStore(Val
, Param
);
1667 insertStoreParameter(Parameters
, Param
, Index
);
1671 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1673 for (long i
= 0; i
< NumVars
; i
++) {
1674 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1675 Value
*Val
= IDToValue
[Id
];
1676 if (ValueMap
.count(Val
))
1677 Val
= ValueMap
[Val
];
1680 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1682 Instruction
*Param
=
1683 new AllocaInst(Val
->getType(), AddressSpace
,
1684 Launch
+ "_param_" + std::to_string(Index
),
1685 EntryBlock
->getTerminator());
1686 Builder
.CreateStore(Val
, Param
);
1687 insertStoreParameter(Parameters
, Param
, Index
);
1691 for (auto Val
: SubtreeValues
) {
1692 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1694 Instruction
*Param
=
1695 new AllocaInst(Val
->getType(), AddressSpace
,
1696 Launch
+ "_param_" + std::to_string(Index
),
1697 EntryBlock
->getTerminator());
1698 Builder
.CreateStore(Val
, Param
);
1699 insertStoreParameter(Parameters
, Param
, Index
);
1703 for (int i
= 0; i
< NumArgs
; i
++) {
1704 Value
*Val
= ConstantInt::get(Builder
.getInt32Ty(), ArgSizes
[i
]);
1705 Instruction
*Param
=
1706 new AllocaInst(Builder
.getInt32Ty(), AddressSpace
,
1707 Launch
+ "_param_size_" + std::to_string(i
),
1708 EntryBlock
->getTerminator());
1709 Builder
.CreateStore(Val
, Param
);
1710 insertStoreParameter(Parameters
, Param
, Index
);
1714 auto Location
= EntryBlock
->getTerminator();
1715 return new BitCastInst(Parameters
, Builder
.getInt8PtrTy(),
1716 Launch
+ "_params_i8ptr", Location
);
1719 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1720 SetVector
<Function
*> SubtreeFunctions
) {
1721 for (auto Fn
: SubtreeFunctions
) {
1722 const std::string ClonedFnName
= Fn
->getName();
1723 Function
*Clone
= GPUModule
->getFunction(ClonedFnName
);
1726 Function::Create(Fn
->getFunctionType(), GlobalValue::ExternalLinkage
,
1727 ClonedFnName
, GPUModule
.get());
1728 assert(Clone
&& "Expected cloned function to be initialized.");
1729 assert(ValueMap
.find(Fn
) == ValueMap
.end() &&
1730 "Fn already present in ValueMap");
1731 ValueMap
[Fn
] = Clone
;
1734 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node
*KernelStmt
) {
1735 isl_id
*Id
= isl_ast_node_get_annotation(KernelStmt
);
1736 ppcg_kernel
*Kernel
= (ppcg_kernel
*)isl_id_get_user(Id
);
1738 isl_ast_node_free(KernelStmt
);
1740 if (Kernel
->n_grid
> 1)
1742 std::max(DeepestParallel
, isl_space_dim(Kernel
->space
, isl_dim_set
));
1745 std::max(DeepestSequential
, isl_space_dim(Kernel
->space
, isl_dim_set
));
1747 Value
*BlockDimX
, *BlockDimY
, *BlockDimZ
;
1748 std::tie(BlockDimX
, BlockDimY
, BlockDimZ
) = getBlockSizes(Kernel
);
1750 SetVector
<Value
*> SubtreeValues
;
1751 SetVector
<Function
*> SubtreeFunctions
;
1752 SetVector
<const Loop
*> Loops
;
1753 std::tie(SubtreeValues
, SubtreeFunctions
, Loops
) =
1754 getReferencesInKernel(Kernel
);
1756 assert(Kernel
->tree
&& "Device AST of kernel node is empty");
1758 Instruction
&HostInsertPoint
= *Builder
.GetInsertPoint();
1759 IslExprBuilder::IDToValueTy HostIDs
= IDToValue
;
1760 ValueMapT HostValueMap
= ValueMap
;
1761 BlockGenerator::AllocaMapTy HostScalarMap
= ScalarMap
;
1764 // Create for all loops we depend on values that contain the current loop
1765 // iteration. These values are necessary to generate code for SCEVs that
1766 // depend on such loops. As a result we need to pass them to the subfunction.
1767 for (const Loop
*L
: Loops
) {
1768 const SCEV
*OuterLIV
= SE
.getAddRecExpr(SE
.getUnknown(Builder
.getInt64(0)),
1769 SE
.getUnknown(Builder
.getInt64(1)),
1770 L
, SCEV::FlagAnyWrap
);
1771 Value
*V
= generateSCEV(OuterLIV
);
1772 OutsideLoopIterations
[L
] = SE
.getUnknown(V
);
1773 SubtreeValues
.insert(V
);
1776 createKernelFunction(Kernel
, SubtreeValues
, SubtreeFunctions
);
1777 setupKernelSubtreeFunctions(SubtreeFunctions
);
1779 create(isl_ast_node_copy(Kernel
->tree
));
1781 finalizeKernelArguments(Kernel
);
1782 Function
*F
= Builder
.GetInsertBlock()->getParent();
1783 if (Arch
== GPUArch::NVPTX64
)
1784 addCUDAAnnotations(F
->getParent(), BlockDimX
, BlockDimY
, BlockDimZ
);
1786 clearScalarEvolution(F
);
1789 IDToValue
= HostIDs
;
1791 ValueMap
= std::move(HostValueMap
);
1792 ScalarMap
= std::move(HostScalarMap
);
1795 Annotator
.resetAlternativeAliasBases();
1796 for (auto &BasePtr
: LocalArrays
)
1797 S
.invalidateScopArrayInfo(BasePtr
, MemoryKind::Array
);
1798 LocalArrays
.clear();
1800 std::string ASMString
= finalizeKernelFunction();
1801 Builder
.SetInsertPoint(&HostInsertPoint
);
1802 Value
*Parameters
= createLaunchParameters(Kernel
, F
, SubtreeValues
);
1804 std::string Name
= getKernelFuncName(Kernel
->id
);
1805 Value
*KernelString
= Builder
.CreateGlobalStringPtr(ASMString
, Name
);
1806 Value
*NameString
= Builder
.CreateGlobalStringPtr(Name
, Name
+ "_name");
1807 Value
*GPUKernel
= createCallGetKernel(KernelString
, NameString
);
1809 Value
*GridDimX
, *GridDimY
;
1810 std::tie(GridDimX
, GridDimY
) = getGridSizes(Kernel
);
1812 createCallLaunchKernel(GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
1813 BlockDimZ
, Parameters
);
1814 createCallFreeKernel(GPUKernel
);
1816 for (auto Id
: KernelIds
)
1822 /// Compute the DataLayout string for the NVPTX backend.
1824 /// @param is64Bit Are we looking for a 64 bit architecture?
1825 static std::string
computeNVPTXDataLayout(bool is64Bit
) {
1826 std::string Ret
= "";
1829 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1830 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1831 "64-v128:128:128-n16:32:64";
1833 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1834 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1835 "64-v128:128:128-n16:32:64";
1841 /// Compute the DataLayout string for a SPIR kernel.
1843 /// @param is64Bit Are we looking for a 64 bit architecture?
1844 static std::string
computeSPIRDataLayout(bool is64Bit
) {
1845 std::string Ret
= "";
1848 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1849 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1850 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1851 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1853 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1854 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1855 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1856 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1863 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel
*Kernel
,
1864 SetVector
<Value
*> &SubtreeValues
) {
1865 std::vector
<Type
*> Args
;
1866 std::string Identifier
= getKernelFuncName(Kernel
->id
);
1868 std::vector
<Metadata
*> MemoryType
;
1870 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1871 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1874 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1875 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1876 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1877 Args
.push_back(SAI
->getElementType());
1878 MemoryType
.push_back(
1879 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1881 static const int UseGlobalMemory
= 1;
1882 Args
.push_back(Builder
.getInt8PtrTy(UseGlobalMemory
));
1883 MemoryType
.push_back(
1884 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 1)));
1888 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1890 for (long i
= 0; i
< NumHostIters
; i
++) {
1891 Args
.push_back(Builder
.getInt64Ty());
1892 MemoryType
.push_back(
1893 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1896 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1898 for (long i
= 0; i
< NumVars
; i
++) {
1899 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1900 Value
*Val
= IDToValue
[Id
];
1902 Args
.push_back(Val
->getType());
1903 MemoryType
.push_back(
1904 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1907 for (auto *V
: SubtreeValues
) {
1908 Args
.push_back(V
->getType());
1909 MemoryType
.push_back(
1910 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1913 auto *FT
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1914 auto *FN
= Function::Create(FT
, Function::ExternalLinkage
, Identifier
,
1917 std::vector
<Metadata
*> EmptyStrings
;
1919 for (unsigned int i
= 0; i
< MemoryType
.size(); i
++) {
1920 EmptyStrings
.push_back(MDString::get(FN
->getContext(), ""));
1923 if (Arch
== GPUArch::SPIR32
|| Arch
== GPUArch::SPIR64
) {
1924 FN
->setMetadata("kernel_arg_addr_space",
1925 MDNode::get(FN
->getContext(), MemoryType
));
1926 FN
->setMetadata("kernel_arg_name",
1927 MDNode::get(FN
->getContext(), EmptyStrings
));
1928 FN
->setMetadata("kernel_arg_access_qual",
1929 MDNode::get(FN
->getContext(), EmptyStrings
));
1930 FN
->setMetadata("kernel_arg_type",
1931 MDNode::get(FN
->getContext(), EmptyStrings
));
1932 FN
->setMetadata("kernel_arg_type_qual",
1933 MDNode::get(FN
->getContext(), EmptyStrings
));
1934 FN
->setMetadata("kernel_arg_base_type",
1935 MDNode::get(FN
->getContext(), EmptyStrings
));
1939 case GPUArch::NVPTX64
:
1940 FN
->setCallingConv(CallingConv::PTX_Kernel
);
1942 case GPUArch::SPIR32
:
1943 case GPUArch::SPIR64
:
1944 FN
->setCallingConv(CallingConv::SPIR_KERNEL
);
1948 auto Arg
= FN
->arg_begin();
1949 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
1950 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1953 Arg
->setName(Kernel
->array
[i
].array
->name
);
1955 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1956 const ScopArrayInfo
*SAI
=
1957 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
1958 Type
*EleTy
= SAI
->getElementType();
1960 SmallVector
<const SCEV
*, 4> Sizes
;
1961 isl_ast_build
*Build
=
1962 isl_ast_build_from_context(isl_set_copy(Prog
->context
));
1963 Sizes
.push_back(nullptr);
1964 for (long j
= 1; j
< Kernel
->array
[i
].array
->n_index
; j
++) {
1965 isl_ast_expr
*DimSize
= isl_ast_build_expr_from_pw_aff(
1966 Build
, isl_multi_pw_aff_get_pw_aff(Kernel
->array
[i
].array
->bound
, j
));
1967 auto V
= ExprBuilder
.create(DimSize
);
1968 Sizes
.push_back(SE
.getSCEV(V
));
1970 const ScopArrayInfo
*SAIRep
=
1971 S
.getOrCreateScopArrayInfo(Val
, EleTy
, Sizes
, MemoryKind::Array
);
1972 LocalArrays
.push_back(Val
);
1974 isl_ast_build_free(Build
);
1975 KernelIds
.push_back(Id
);
1976 IDToSAI
[Id
] = SAIRep
;
1980 for (long i
= 0; i
< NumHostIters
; i
++) {
1981 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1982 Arg
->setName(isl_id_get_name(Id
));
1983 IDToValue
[Id
] = &*Arg
;
1984 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
1988 for (long i
= 0; i
< NumVars
; i
++) {
1989 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1990 Arg
->setName(isl_id_get_name(Id
));
1991 Value
*Val
= IDToValue
[Id
];
1992 ValueMap
[Val
] = &*Arg
;
1993 IDToValue
[Id
] = &*Arg
;
1994 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
1998 for (auto *V
: SubtreeValues
) {
1999 Arg
->setName(V
->getName());
2000 ValueMap
[V
] = &*Arg
;
2007 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel
*Kernel
) {
2008 Intrinsic::ID IntrinsicsBID
[2];
2009 Intrinsic::ID IntrinsicsTID
[3];
2012 case GPUArch::SPIR64
:
2013 case GPUArch::SPIR32
:
2014 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2015 case GPUArch::NVPTX64
:
2016 IntrinsicsBID
[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x
;
2017 IntrinsicsBID
[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y
;
2019 IntrinsicsTID
[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x
;
2020 IntrinsicsTID
[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y
;
2021 IntrinsicsTID
[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z
;
2025 auto addId
= [this](__isl_take isl_id
*Id
, Intrinsic::ID Intr
) mutable {
2026 std::string Name
= isl_id_get_name(Id
);
2027 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2028 Function
*IntrinsicFn
= Intrinsic::getDeclaration(M
, Intr
);
2029 Value
*Val
= Builder
.CreateCall(IntrinsicFn
, {});
2030 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2031 IDToValue
[Id
] = Val
;
2032 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2035 for (int i
= 0; i
< Kernel
->n_grid
; ++i
) {
2036 isl_id
*Id
= isl_id_list_get_id(Kernel
->block_ids
, i
);
2037 addId(Id
, IntrinsicsBID
[i
]);
2040 for (int i
= 0; i
< Kernel
->n_block
; ++i
) {
2041 isl_id
*Id
= isl_id_list_get_id(Kernel
->thread_ids
, i
);
2042 addId(Id
, IntrinsicsTID
[i
]);
2046 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel
*Kernel
) {
2047 const char *GroupName
[3] = {"__gen_ocl_get_group_id0",
2048 "__gen_ocl_get_group_id1",
2049 "__gen_ocl_get_group_id2"};
2051 const char *LocalName
[3] = {"__gen_ocl_get_local_id0",
2052 "__gen_ocl_get_local_id1",
2053 "__gen_ocl_get_local_id2"};
2055 auto createFunc
= [this](const char *Name
, __isl_take isl_id
*Id
) mutable {
2056 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2057 Function
*FN
= M
->getFunction(Name
);
2059 // If FN is not available, declare it.
2061 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
2062 std::vector
<Type
*> Args
;
2063 FunctionType
*Ty
= FunctionType::get(Builder
.getInt32Ty(), Args
, false);
2064 FN
= Function::Create(Ty
, Linkage
, Name
, M
);
2065 FN
->setCallingConv(CallingConv::SPIR_FUNC
);
2068 Value
*Val
= Builder
.CreateCall(FN
, {});
2069 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2070 IDToValue
[Id
] = Val
;
2071 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2074 for (int i
= 0; i
< Kernel
->n_grid
; ++i
)
2075 createFunc(GroupName
[i
], isl_id_list_get_id(Kernel
->block_ids
, i
));
2077 for (int i
= 0; i
< Kernel
->n_block
; ++i
)
2078 createFunc(LocalName
[i
], isl_id_list_get_id(Kernel
->thread_ids
, i
));
2081 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
) {
2082 auto Arg
= FN
->arg_begin();
2083 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2084 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2087 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2088 const ScopArrayInfo
*SAI
=
2089 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
2092 if (SAI
->getNumberOfDimensions() > 0) {
2099 if (!gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2100 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2101 Value
*TypedArgPtr
= Builder
.CreatePointerCast(Val
, TypePtr
);
2102 Val
= Builder
.CreateLoad(TypedArgPtr
);
2105 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2106 Builder
.CreateStore(Val
, Alloca
);
2112 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel
*Kernel
) {
2113 auto *FN
= Builder
.GetInsertBlock()->getParent();
2114 auto Arg
= FN
->arg_begin();
2116 bool StoredScalar
= false;
2117 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2118 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2121 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2122 const ScopArrayInfo
*SAI
=
2123 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
2126 if (SAI
->getNumberOfDimensions() > 0) {
2131 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2136 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2137 Value
*ArgPtr
= &*Arg
;
2138 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2139 Value
*TypedArgPtr
= Builder
.CreatePointerCast(ArgPtr
, TypePtr
);
2140 Value
*Val
= Builder
.CreateLoad(Alloca
);
2141 Builder
.CreateStore(Val
, TypedArgPtr
);
2142 StoredScalar
= true;
2148 /// In case more than one thread contains scalar stores, the generated
2149 /// code might be incorrect, if we only store at the end of the kernel.
2150 /// To support this case we need to store these scalars back at each
2151 /// memory store or at least before each kernel barrier.
2152 if (Kernel
->n_block
!= 0 || Kernel
->n_grid
!= 0) {
2153 BuildSuccessful
= 0;
2155 dbgs() << getUniqueScopName(&S
)
2156 << " has a store to a scalar value that"
2157 " would be undefined to run in parallel. Bailing out.\n";);
2162 void GPUNodeBuilder::createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
) {
2163 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2165 for (int i
= 0; i
< Kernel
->n_var
; ++i
) {
2166 struct ppcg_kernel_var
&Var
= Kernel
->var
[i
];
2167 isl_id
*Id
= isl_space_get_tuple_id(Var
.array
->space
, isl_dim_set
);
2168 Type
*EleTy
= ScopArrayInfo::getFromId(isl::manage(Id
))->getElementType();
2170 Type
*ArrayTy
= EleTy
;
2171 SmallVector
<const SCEV
*, 4> Sizes
;
2173 Sizes
.push_back(nullptr);
2174 for (unsigned int j
= 1; j
< Var
.array
->n_index
; ++j
) {
2175 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2176 long Bound
= isl_val_get_num_si(Val
);
2178 Sizes
.push_back(S
.getSE()->getConstant(Builder
.getInt64Ty(), Bound
));
2181 for (int j
= Var
.array
->n_index
- 1; j
>= 0; --j
) {
2182 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2183 long Bound
= isl_val_get_num_si(Val
);
2185 ArrayTy
= ArrayType::get(ArrayTy
, Bound
);
2188 const ScopArrayInfo
*SAI
;
2190 if (Var
.type
== ppcg_access_shared
) {
2191 auto GlobalVar
= new GlobalVariable(
2192 *M
, ArrayTy
, false, GlobalValue::InternalLinkage
, 0, Var
.name
,
2193 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal
, 3);
2194 GlobalVar
->setAlignment(EleTy
->getPrimitiveSizeInBits() / 8);
2195 GlobalVar
->setInitializer(Constant::getNullValue(ArrayTy
));
2197 Allocation
= GlobalVar
;
2198 } else if (Var
.type
== ppcg_access_private
) {
2199 Allocation
= Builder
.CreateAlloca(ArrayTy
, 0, "private_array");
2201 llvm_unreachable("unknown variable type");
2204 S
.getOrCreateScopArrayInfo(Allocation
, EleTy
, Sizes
, MemoryKind::Array
);
2205 Id
= isl_id_alloc(S
.getIslCtx(), Var
.name
, nullptr);
2206 IDToValue
[Id
] = Allocation
;
2207 LocalArrays
.push_back(Allocation
);
2208 KernelIds
.push_back(Id
);
2213 void GPUNodeBuilder::createKernelFunction(
2214 ppcg_kernel
*Kernel
, SetVector
<Value
*> &SubtreeValues
,
2215 SetVector
<Function
*> &SubtreeFunctions
) {
2216 std::string Identifier
= getKernelFuncName(Kernel
->id
);
2217 GPUModule
.reset(new Module(Identifier
, Builder
.getContext()));
2220 case GPUArch::NVPTX64
:
2221 if (Runtime
== GPURuntime::CUDA
)
2222 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2223 else if (Runtime
== GPURuntime::OpenCL
)
2224 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2225 GPUModule
->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2227 case GPUArch::SPIR32
:
2228 GPUModule
->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2229 GPUModule
->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2231 case GPUArch::SPIR64
:
2232 GPUModule
->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2233 GPUModule
->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2237 Function
*FN
= createKernelFunctionDecl(Kernel
, SubtreeValues
);
2239 BasicBlock
*PrevBlock
= Builder
.GetInsertBlock();
2240 auto EntryBlock
= BasicBlock::Create(Builder
.getContext(), "entry", FN
);
2242 DT
.addNewBlock(EntryBlock
, PrevBlock
);
2244 Builder
.SetInsertPoint(EntryBlock
);
2245 Builder
.CreateRetVoid();
2246 Builder
.SetInsertPoint(EntryBlock
, EntryBlock
->begin());
2248 ScopDetection::markFunctionAsInvalid(FN
);
2250 prepareKernelArguments(Kernel
, FN
);
2251 createKernelVariables(Kernel
, FN
);
2254 case GPUArch::NVPTX64
:
2255 insertKernelIntrinsics(Kernel
);
2257 case GPUArch::SPIR32
:
2258 case GPUArch::SPIR64
:
2259 insertKernelCallsSPIR(Kernel
);
2264 std::string
GPUNodeBuilder::createKernelASM() {
2265 llvm::Triple GPUTriple
;
2268 case GPUArch::NVPTX64
:
2270 case GPURuntime::CUDA
:
2271 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2273 case GPURuntime::OpenCL
:
2274 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2278 case GPUArch::SPIR64
:
2279 case GPUArch::SPIR32
:
2280 std::string SPIRAssembly
;
2281 raw_string_ostream
IROstream(SPIRAssembly
);
2282 IROstream
<< *GPUModule
;
2284 return SPIRAssembly
;
2288 auto GPUTarget
= TargetRegistry::lookupTarget(GPUTriple
.getTriple(), ErrMsg
);
2291 errs() << ErrMsg
<< "\n";
2295 TargetOptions Options
;
2296 Options
.UnsafeFPMath
= FastMath
;
2298 std::string subtarget
;
2301 case GPUArch::NVPTX64
:
2302 subtarget
= CudaVersion
;
2304 case GPUArch::SPIR32
:
2305 case GPUArch::SPIR64
:
2306 llvm_unreachable("No subtarget for SPIR architecture");
2309 std::unique_ptr
<TargetMachine
> TargetM(GPUTarget
->createTargetMachine(
2310 GPUTriple
.getTriple(), subtarget
, "", Options
, Optional
<Reloc::Model
>()));
2312 SmallString
<0> ASMString
;
2313 raw_svector_ostream
ASMStream(ASMString
);
2314 llvm::legacy::PassManager PM
;
2316 PM
.add(createTargetTransformInfoWrapperPass(TargetM
->getTargetIRAnalysis()));
2318 if (TargetM
->addPassesToEmitFile(
2319 PM
, ASMStream
, TargetMachine::CGFT_AssemblyFile
, true /* verify */)) {
2320 errs() << "The target does not support generation of this file type!\n";
2326 return ASMStream
.str();
2329 bool GPUNodeBuilder::requiresCUDALibDevice() {
2330 bool RequiresLibDevice
= false;
2331 for (Function
&F
: GPUModule
->functions()) {
2332 if (!F
.isDeclaration())
2335 std::string CUDALibDeviceFunc
= getCUDALibDeviceFuntion(&F
);
2336 if (CUDALibDeviceFunc
.length() != 0) {
2337 F
.setName(CUDALibDeviceFunc
);
2338 RequiresLibDevice
= true;
2342 return RequiresLibDevice
;
2345 void GPUNodeBuilder::addCUDALibDevice() {
2346 if (Arch
!= GPUArch::NVPTX64
)
2349 if (requiresCUDALibDevice()) {
2352 errs() << CUDALibDevice
<< "\n";
2353 auto LibDeviceModule
=
2354 parseIRFile(CUDALibDevice
, Error
, GPUModule
->getContext());
2356 if (!LibDeviceModule
) {
2357 BuildSuccessful
= false;
2358 report_fatal_error("Could not find or load libdevice. Skipping GPU "
2359 "kernel generation. Please set -polly-acc-libdevice "
2364 Linker
L(*GPUModule
);
2366 // Set an nvptx64 target triple to avoid linker warnings. The original
2367 // triple of the libdevice files are nvptx-unknown-unknown.
2368 LibDeviceModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2369 L
.linkInModule(std::move(LibDeviceModule
), Linker::LinkOnlyNeeded
);
2373 std::string
GPUNodeBuilder::finalizeKernelFunction() {
2375 if (verifyModule(*GPUModule
)) {
2376 DEBUG(dbgs() << "verifyModule failed on module:\n";
2377 GPUModule
->print(dbgs(), nullptr); dbgs() << "\n";);
2378 DEBUG(dbgs() << "verifyModule Error:\n";
2379 verifyModule(*GPUModule
, &dbgs()););
2381 if (FailOnVerifyModuleFailure
)
2382 llvm_unreachable("VerifyModule failed.");
2384 BuildSuccessful
= false;
2391 outs() << *GPUModule
<< "\n";
2393 if (Arch
!= GPUArch::SPIR32
&& Arch
!= GPUArch::SPIR64
) {
2395 llvm::legacy::PassManager OptPasses
;
2396 PassManagerBuilder PassBuilder
;
2397 PassBuilder
.OptLevel
= 3;
2398 PassBuilder
.SizeLevel
= 0;
2399 PassBuilder
.populateModulePassManager(OptPasses
);
2400 OptPasses
.run(*GPUModule
);
2403 std::string Assembly
= createKernelASM();
2406 outs() << Assembly
<< "\n";
2408 GPUModule
.release();
2413 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2414 /// @param PwAffs The list of piecewise affine functions to create an
2415 /// `isl_pw_aff_list` from. We expect an rvalue ref because
2416 /// all the isl_pw_aff are used up by this function.
2418 /// @returns The `isl_pw_aff_list`.
2419 __isl_give isl_pw_aff_list
*
2420 createPwAffList(isl_ctx
*Context
,
2421 const std::vector
<__isl_take isl_pw_aff
*> &&PwAffs
) {
2422 isl_pw_aff_list
*List
= isl_pw_aff_list_alloc(Context
, PwAffs
.size());
2424 for (unsigned i
= 0; i
< PwAffs
.size(); i
++) {
2425 List
= isl_pw_aff_list_insert(List
, i
, PwAffs
[i
]);
2430 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2432 /// We loop over all `pw_aff` and align all of their spaces together to
2433 /// create a common space for all the `pw_aff`. This common space is the
2434 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2435 /// with the given `SeedSpace`.
2436 /// @param PwAffs The list of piecewise affine functions we want to align.
2437 /// This is an rvalue reference because the entire vector is
2438 /// used up by the end of the operation.
2439 /// @param SeedSpace The space to start the alignment process with.
2440 /// @returns A std::pair, whose first element is the aligned space,
2441 /// whose second element is the vector of aligned piecewise
2443 static std::pair
<__isl_give isl_space
*, std::vector
<__isl_give isl_pw_aff
*>>
2444 alignPwAffs(const std::vector
<__isl_take isl_pw_aff
*> &&PwAffs
,
2445 __isl_take isl_space
*SeedSpace
) {
2446 assert(SeedSpace
&& "Invalid seed space given.");
2448 isl_space
*AlignSpace
= SeedSpace
;
2449 for (isl_pw_aff
*PwAff
: PwAffs
) {
2450 isl_space
*PwAffSpace
= isl_pw_aff_get_domain_space(PwAff
);
2451 AlignSpace
= isl_space_align_params(AlignSpace
, PwAffSpace
);
2453 std::vector
<isl_pw_aff
*> AdjustedPwAffs
;
2455 for (unsigned i
= 0; i
< PwAffs
.size(); i
++) {
2456 isl_pw_aff
*Adjusted
= PwAffs
[i
];
2457 assert(Adjusted
&& "Invalid pw_aff given.");
2458 Adjusted
= isl_pw_aff_align_params(Adjusted
, isl_space_copy(AlignSpace
));
2459 AdjustedPwAffs
.push_back(Adjusted
);
2461 return std::make_pair(AlignSpace
, AdjustedPwAffs
);
2465 class PPCGCodeGeneration
: public ScopPass
{
2469 GPURuntime Runtime
= GPURuntime::CUDA
;
2471 GPUArch Architecture
= GPUArch::NVPTX64
;
2473 /// The scop that is currently processed.
2478 ScalarEvolution
*SE
;
2479 const DataLayout
*DL
;
2482 PPCGCodeGeneration() : ScopPass(ID
) {}
2484 /// Construct compilation options for PPCG.
2486 /// @returns The compilation options.
2487 ppcg_options
*createPPCGOptions() {
2489 (ppcg_debug_options
*)malloc(sizeof(ppcg_debug_options
));
2490 auto Options
= (ppcg_options
*)malloc(sizeof(ppcg_options
));
2492 DebugOptions
->dump_schedule_constraints
= false;
2493 DebugOptions
->dump_schedule
= false;
2494 DebugOptions
->dump_final_schedule
= false;
2495 DebugOptions
->dump_sizes
= false;
2496 DebugOptions
->verbose
= false;
2498 Options
->debug
= DebugOptions
;
2500 Options
->group_chains
= false;
2501 Options
->reschedule
= true;
2502 Options
->scale_tile_loops
= false;
2503 Options
->wrap
= false;
2505 Options
->non_negative_parameters
= false;
2506 Options
->ctx
= nullptr;
2507 Options
->sizes
= nullptr;
2509 Options
->tile
= true;
2510 Options
->tile_size
= 32;
2512 Options
->isolate_full_tiles
= false;
2514 Options
->use_private_memory
= PrivateMemory
;
2515 Options
->use_shared_memory
= SharedMemory
;
2516 Options
->max_shared_memory
= 48 * 1024;
2518 Options
->target
= PPCG_TARGET_CUDA
;
2519 Options
->openmp
= false;
2520 Options
->linearize_device_arrays
= true;
2521 Options
->allow_gnu_extensions
= false;
2523 Options
->unroll_copy_shared
= false;
2524 Options
->unroll_gpu_tile
= false;
2525 Options
->live_range_reordering
= true;
2527 Options
->live_range_reordering
= true;
2528 Options
->hybrid
= false;
2529 Options
->opencl_compiler_options
= nullptr;
2530 Options
->opencl_use_gpu
= false;
2531 Options
->opencl_n_include_file
= 0;
2532 Options
->opencl_include_files
= nullptr;
2533 Options
->opencl_print_kernel_types
= false;
2534 Options
->opencl_embed_kernel_code
= false;
2536 Options
->save_schedule_file
= nullptr;
2537 Options
->load_schedule_file
= nullptr;
2542 /// Get a tagged access relation containing all accesses of type @p AccessTy.
2544 /// Instead of a normal access of the form:
2546 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2548 /// a tagged access has the form
2550 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2552 /// where 'id' is an additional space that references the memory access that
2553 /// triggered the access.
2555 /// @param AccessTy The type of the memory accesses to collect.
2557 /// @return The relation describing all tagged memory accesses.
2558 isl_union_map
*getTaggedAccesses(enum MemoryAccess::AccessType AccessTy
) {
2559 isl_union_map
*Accesses
= isl_union_map_empty(S
->getParamSpace().release());
2561 for (auto &Stmt
: *S
)
2562 for (auto &Acc
: Stmt
)
2563 if (Acc
->getType() == AccessTy
) {
2564 isl_map
*Relation
= Acc
->getAccessRelation().release();
2566 isl_map_intersect_domain(Relation
, Stmt
.getDomain().release());
2568 isl_space
*Space
= isl_map_get_space(Relation
);
2569 Space
= isl_space_range(Space
);
2570 Space
= isl_space_from_range(Space
);
2572 isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2573 isl_map
*Universe
= isl_map_universe(Space
);
2574 Relation
= isl_map_domain_product(Relation
, Universe
);
2575 Accesses
= isl_union_map_add_map(Accesses
, Relation
);
2581 /// Get the set of all read accesses, tagged with the access id.
2583 /// @see getTaggedAccesses
2584 isl_union_map
*getTaggedReads() {
2585 return getTaggedAccesses(MemoryAccess::READ
);
2588 /// Get the set of all may (and must) accesses, tagged with the access id.
2590 /// @see getTaggedAccesses
2591 isl_union_map
*getTaggedMayWrites() {
2592 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE
),
2593 getTaggedAccesses(MemoryAccess::MUST_WRITE
));
2596 /// Get the set of all must accesses, tagged with the access id.
2598 /// @see getTaggedAccesses
2599 isl_union_map
*getTaggedMustWrites() {
2600 return getTaggedAccesses(MemoryAccess::MUST_WRITE
);
2603 /// Collect parameter and array names as isl_ids.
2605 /// To reason about the different parameters and arrays used, ppcg requires
2606 /// a list of all isl_ids in use. As PPCG traditionally performs
2607 /// source-to-source compilation each of these isl_ids is mapped to the
2608 /// expression that represents it. As we do not have a corresponding
2609 /// expression in Polly, we just map each id to a 'zero' expression to match
2610 /// the data format that ppcg expects.
2612 /// @returns Retun a map from collected ids to 'zero' ast expressions.
2613 __isl_give isl_id_to_ast_expr
*getNames() {
2614 auto *Names
= isl_id_to_ast_expr_alloc(
2616 S
->getNumParams() + std::distance(S
->array_begin(), S
->array_end()));
2617 auto *Zero
= isl_ast_expr_from_val(isl_val_zero(S
->getIslCtx()));
2619 for (const SCEV
*P
: S
->parameters()) {
2620 isl_id
*Id
= S
->getIdForParam(P
).release();
2621 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2624 for (auto &Array
: S
->arrays()) {
2625 auto Id
= Array
->getBasePtrId().release();
2626 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2629 isl_ast_expr_free(Zero
);
2634 /// Create a new PPCG scop from the current scop.
2636 /// The PPCG scop is initialized with data from the current polly::Scop. From
2637 /// this initial data, the data-dependences in the PPCG scop are initialized.
2638 /// We do not use Polly's dependence analysis for now, to ensure we match
2639 /// the PPCG default behaviour more closely.
2641 /// @returns A new ppcg scop.
2642 ppcg_scop
*createPPCGScop() {
2643 MustKillsInfo KillsInfo
= computeMustKillsInfo(*S
);
2645 auto PPCGScop
= (ppcg_scop
*)malloc(sizeof(ppcg_scop
));
2647 PPCGScop
->options
= createPPCGOptions();
2648 // enable live range reordering
2649 PPCGScop
->options
->live_range_reordering
= 1;
2651 PPCGScop
->start
= 0;
2654 PPCGScop
->context
= S
->getContext().release();
2655 PPCGScop
->domain
= S
->getDomains().release();
2656 // TODO: investigate this further. PPCG calls collect_call_domains.
2657 PPCGScop
->call
= isl_union_set_from_set(S
->getContext().release());
2658 PPCGScop
->tagged_reads
= getTaggedReads();
2659 PPCGScop
->reads
= S
->getReads().release();
2660 PPCGScop
->live_in
= nullptr;
2661 PPCGScop
->tagged_may_writes
= getTaggedMayWrites();
2662 PPCGScop
->may_writes
= S
->getWrites().release();
2663 PPCGScop
->tagged_must_writes
= getTaggedMustWrites();
2664 PPCGScop
->must_writes
= S
->getMustWrites().release();
2665 PPCGScop
->live_out
= nullptr;
2666 PPCGScop
->tagged_must_kills
= KillsInfo
.TaggedMustKills
.take();
2667 PPCGScop
->must_kills
= KillsInfo
.MustKills
.take();
2669 PPCGScop
->tagger
= nullptr;
2670 PPCGScop
->independence
=
2671 isl_union_map_empty(isl_set_get_space(PPCGScop
->context
));
2672 PPCGScop
->dep_flow
= nullptr;
2673 PPCGScop
->tagged_dep_flow
= nullptr;
2674 PPCGScop
->dep_false
= nullptr;
2675 PPCGScop
->dep_forced
= nullptr;
2676 PPCGScop
->dep_order
= nullptr;
2677 PPCGScop
->tagged_dep_order
= nullptr;
2679 PPCGScop
->schedule
= S
->getScheduleTree().release();
2680 // If we have something non-trivial to kill, add it to the schedule
2681 if (KillsInfo
.KillsSchedule
.get())
2682 PPCGScop
->schedule
= isl_schedule_sequence(
2683 PPCGScop
->schedule
, KillsInfo
.KillsSchedule
.take());
2685 PPCGScop
->names
= getNames();
2686 PPCGScop
->pet
= nullptr;
2688 compute_tagger(PPCGScop
);
2689 compute_dependences(PPCGScop
);
2690 eliminate_dead_code(PPCGScop
);
2695 /// Collect the array accesses in a statement.
2697 /// @param Stmt The statement for which to collect the accesses.
2699 /// @returns A list of array accesses.
2700 gpu_stmt_access
*getStmtAccesses(ScopStmt
&Stmt
) {
2701 gpu_stmt_access
*Accesses
= nullptr;
2703 for (MemoryAccess
*Acc
: Stmt
) {
2704 auto Access
= isl_alloc_type(S
->getIslCtx(), struct gpu_stmt_access
);
2705 Access
->read
= Acc
->isRead();
2706 Access
->write
= Acc
->isWrite();
2707 Access
->access
= Acc
->getAccessRelation().release();
2708 isl_space
*Space
= isl_map_get_space(Access
->access
);
2709 Space
= isl_space_range(Space
);
2710 Space
= isl_space_from_range(Space
);
2711 Space
= isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2712 isl_map
*Universe
= isl_map_universe(Space
);
2713 Access
->tagged_access
=
2714 isl_map_domain_product(Acc
->getAccessRelation().release(), Universe
);
2715 Access
->exact_write
= !Acc
->isMayWrite();
2716 Access
->ref_id
= Acc
->getId().release();
2717 Access
->next
= Accesses
;
2718 Access
->n_index
= Acc
->getScopArrayInfo()->getNumberOfDimensions();
2725 /// Collect the list of GPU statements.
2727 /// Each statement has an id, a pointer to the underlying data structure,
2728 /// as well as a list with all memory accesses.
2730 /// TODO: Initialize the list of memory accesses.
2732 /// @returns A linked-list of statements.
2733 gpu_stmt
*getStatements() {
2734 gpu_stmt
*Stmts
= isl_calloc_array(S
->getIslCtx(), struct gpu_stmt
,
2735 std::distance(S
->begin(), S
->end()));
2738 for (auto &Stmt
: *S
) {
2739 gpu_stmt
*GPUStmt
= &Stmts
[i
];
2741 GPUStmt
->id
= Stmt
.getDomainId().release();
2743 // We use the pet stmt pointer to keep track of the Polly statements.
2744 GPUStmt
->stmt
= (pet_stmt
*)&Stmt
;
2745 GPUStmt
->accesses
= getStmtAccesses(Stmt
);
2752 /// Derive the extent of an array.
2754 /// The extent of an array is the set of elements that are within the
2755 /// accessed array. For the inner dimensions, the extent constraints are
2756 /// 0 and the size of the corresponding array dimension. For the first
2757 /// (outermost) dimension, the extent constraints are the minimal and maximal
2758 /// subscript value for the first dimension.
2760 /// @param Array The array to derive the extent for.
2762 /// @returns An isl_set describing the extent of the array.
2763 __isl_give isl_set
*getExtent(ScopArrayInfo
*Array
) {
2764 unsigned NumDims
= Array
->getNumberOfDimensions();
2765 isl_union_map
*Accesses
= S
->getAccesses().release();
2767 isl_union_map_intersect_domain(Accesses
, S
->getDomains().release());
2768 Accesses
= isl_union_map_detect_equalities(Accesses
);
2769 isl_union_set
*AccessUSet
= isl_union_map_range(Accesses
);
2770 AccessUSet
= isl_union_set_coalesce(AccessUSet
);
2771 AccessUSet
= isl_union_set_detect_equalities(AccessUSet
);
2772 AccessUSet
= isl_union_set_coalesce(AccessUSet
);
2774 if (isl_union_set_is_empty(AccessUSet
)) {
2775 isl_union_set_free(AccessUSet
);
2776 return isl_set_empty(Array
->getSpace().release());
2779 if (Array
->getNumberOfDimensions() == 0) {
2780 isl_union_set_free(AccessUSet
);
2781 return isl_set_universe(Array
->getSpace().release());
2784 isl_set
*AccessSet
=
2785 isl_union_set_extract_set(AccessUSet
, Array
->getSpace().release());
2787 isl_union_set_free(AccessUSet
);
2788 isl_local_space
*LS
=
2789 isl_local_space_from_space(Array
->getSpace().release());
2792 isl_pw_aff_from_aff(isl_aff_var_on_domain(LS
, isl_dim_set
, 0));
2794 isl_pw_aff
*OuterMin
= isl_set_dim_min(isl_set_copy(AccessSet
), 0);
2795 isl_pw_aff
*OuterMax
= isl_set_dim_max(AccessSet
, 0);
2796 OuterMin
= isl_pw_aff_add_dims(OuterMin
, isl_dim_in
,
2797 isl_pw_aff_dim(Val
, isl_dim_in
));
2798 OuterMax
= isl_pw_aff_add_dims(OuterMax
, isl_dim_in
,
2799 isl_pw_aff_dim(Val
, isl_dim_in
));
2800 OuterMin
= isl_pw_aff_set_tuple_id(OuterMin
, isl_dim_in
,
2801 Array
->getBasePtrId().release());
2802 OuterMax
= isl_pw_aff_set_tuple_id(OuterMax
, isl_dim_in
,
2803 Array
->getBasePtrId().release());
2805 isl_set
*Extent
= isl_set_universe(Array
->getSpace().release());
2807 Extent
= isl_set_intersect(
2808 Extent
, isl_pw_aff_le_set(OuterMin
, isl_pw_aff_copy(Val
)));
2809 Extent
= isl_set_intersect(Extent
, isl_pw_aff_ge_set(OuterMax
, Val
));
2811 for (unsigned i
= 1; i
< NumDims
; ++i
)
2812 Extent
= isl_set_lower_bound_si(Extent
, isl_dim_set
, i
, 0);
2814 for (unsigned i
= 0; i
< NumDims
; ++i
) {
2816 const_cast<isl_pw_aff
*>(Array
->getDimensionSizePw(i
).release());
2818 // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2819 // Fortran array will we have a legitimate dimension.
2821 assert(i
== 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2825 isl_pw_aff
*Val
= isl_pw_aff_from_aff(isl_aff_var_on_domain(
2826 isl_local_space_from_space(Array
->getSpace().release()), isl_dim_set
,
2828 PwAff
= isl_pw_aff_add_dims(PwAff
, isl_dim_in
,
2829 isl_pw_aff_dim(Val
, isl_dim_in
));
2830 PwAff
= isl_pw_aff_set_tuple_id(PwAff
, isl_dim_in
,
2831 isl_pw_aff_get_tuple_id(Val
, isl_dim_in
));
2832 auto *Set
= isl_pw_aff_gt_set(PwAff
, Val
);
2833 Extent
= isl_set_intersect(Set
, Extent
);
2839 /// Derive the bounds of an array.
2841 /// For the first dimension we derive the bound of the array from the extent
2842 /// of this dimension. For inner dimensions we obtain their size directly from
2845 /// @param PPCGArray The array to compute bounds for.
2846 /// @param Array The polly array from which to take the information.
2847 void setArrayBounds(gpu_array_info
&PPCGArray
, ScopArrayInfo
*Array
) {
2848 std::vector
<isl_pw_aff
*> Bounds
;
2850 if (PPCGArray
.n_index
> 0) {
2851 if (isl_set_is_empty(PPCGArray
.extent
)) {
2852 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2853 isl_local_space
*LS
= isl_local_space_from_space(
2854 isl_space_params(isl_set_get_space(Dom
)));
2856 isl_pw_aff
*Zero
= isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS
));
2857 Bounds
.push_back(Zero
);
2859 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2860 Dom
= isl_set_project_out(Dom
, isl_dim_set
, 1, PPCGArray
.n_index
- 1);
2861 isl_pw_aff
*Bound
= isl_set_dim_max(isl_set_copy(Dom
), 0);
2863 Dom
= isl_pw_aff_domain(isl_pw_aff_copy(Bound
));
2864 isl_local_space
*LS
=
2865 isl_local_space_from_space(isl_set_get_space(Dom
));
2866 isl_aff
*One
= isl_aff_zero_on_domain(LS
);
2867 One
= isl_aff_add_constant_si(One
, 1);
2868 Bound
= isl_pw_aff_add(Bound
, isl_pw_aff_alloc(Dom
, One
));
2869 Bound
= isl_pw_aff_gist(Bound
, S
->getContext().release());
2870 Bounds
.push_back(Bound
);
2874 for (unsigned i
= 1; i
< PPCGArray
.n_index
; ++i
) {
2875 isl_pw_aff
*Bound
= Array
->getDimensionSizePw(i
).release();
2876 auto LS
= isl_pw_aff_get_domain_space(Bound
);
2877 auto Aff
= isl_multi_aff_zero(LS
);
2878 Bound
= isl_pw_aff_pullback_multi_aff(Bound
, Aff
);
2879 Bounds
.push_back(Bound
);
2882 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2883 /// to have the same parameter dimensions. So, we need to align them to an
2884 /// appropriate space.
2885 /// Scop::Context is _not_ an appropriate space, because when we have
2886 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
2887 /// contain all parameter dimensions.
2888 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
2889 isl_space
*SeedAlignSpace
= S
->getParamSpace().release();
2890 SeedAlignSpace
= isl_space_add_dims(SeedAlignSpace
, isl_dim_set
, 1);
2892 isl_space
*AlignSpace
= nullptr;
2893 std::vector
<isl_pw_aff
*> AlignedBounds
;
2894 std::tie(AlignSpace
, AlignedBounds
) =
2895 alignPwAffs(std::move(Bounds
), SeedAlignSpace
);
2897 assert(AlignSpace
&& "alignPwAffs did not initialise AlignSpace");
2899 isl_pw_aff_list
*BoundsList
=
2900 createPwAffList(S
->getIslCtx(), std::move(AlignedBounds
));
2902 isl_space
*BoundsSpace
= isl_set_get_space(PPCGArray
.extent
);
2903 BoundsSpace
= isl_space_align_params(BoundsSpace
, AlignSpace
);
2905 assert(BoundsSpace
&& "Unable to access space of array.");
2906 assert(BoundsList
&& "Unable to access list of bounds.");
2909 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace
, BoundsList
);
2910 assert(PPCGArray
.bound
&& "PPCGArray.bound was not constructed correctly.");
2913 /// Create the arrays for @p PPCGProg.
2915 /// @param PPCGProg The program to compute the arrays for.
2916 void createArrays(gpu_prog
*PPCGProg
,
2917 const SmallVector
<ScopArrayInfo
*, 4> &ValidSAIs
) {
2919 for (auto &Array
: ValidSAIs
) {
2920 std::string TypeName
;
2921 raw_string_ostream
OS(TypeName
);
2923 OS
<< *Array
->getElementType();
2924 TypeName
= OS
.str();
2926 gpu_array_info
&PPCGArray
= PPCGProg
->array
[i
];
2928 PPCGArray
.space
= Array
->getSpace().release();
2929 PPCGArray
.type
= strdup(TypeName
.c_str());
2930 PPCGArray
.size
= DL
->getTypeAllocSize(Array
->getElementType());
2931 PPCGArray
.name
= strdup(Array
->getName().c_str());
2932 PPCGArray
.extent
= nullptr;
2933 PPCGArray
.n_index
= Array
->getNumberOfDimensions();
2934 PPCGArray
.extent
= getExtent(Array
);
2935 PPCGArray
.n_ref
= 0;
2936 PPCGArray
.refs
= nullptr;
2937 PPCGArray
.accessed
= true;
2938 PPCGArray
.read_only_scalar
=
2939 Array
->isReadOnly() && Array
->getNumberOfDimensions() == 0;
2940 PPCGArray
.has_compound_element
= false;
2941 PPCGArray
.local
= false;
2942 PPCGArray
.declare_local
= false;
2943 PPCGArray
.global
= false;
2944 PPCGArray
.linearize
= false;
2945 PPCGArray
.dep_order
= nullptr;
2946 PPCGArray
.user
= Array
;
2948 PPCGArray
.bound
= nullptr;
2949 setArrayBounds(PPCGArray
, Array
);
2952 collect_references(PPCGProg
, &PPCGArray
);
2956 /// Create an identity map between the arrays in the scop.
2958 /// @returns An identity map between the arrays in the scop.
2959 isl_union_map
*getArrayIdentity() {
2960 isl_union_map
*Maps
= isl_union_map_empty(S
->getParamSpace().release());
2962 for (auto &Array
: S
->arrays()) {
2963 isl_space
*Space
= Array
->getSpace().release();
2964 Space
= isl_space_map_from_set(Space
);
2965 isl_map
*Identity
= isl_map_identity(Space
);
2966 Maps
= isl_union_map_add_map(Maps
, Identity
);
2972 /// Create a default-initialized PPCG GPU program.
2974 /// @returns A new gpu program description.
2975 gpu_prog
*createPPCGProg(ppcg_scop
*PPCGScop
) {
2980 auto PPCGProg
= isl_calloc_type(S
->getIslCtx(), struct gpu_prog
);
2982 PPCGProg
->ctx
= S
->getIslCtx();
2983 PPCGProg
->scop
= PPCGScop
;
2984 PPCGProg
->context
= isl_set_copy(PPCGScop
->context
);
2985 PPCGProg
->read
= isl_union_map_copy(PPCGScop
->reads
);
2986 PPCGProg
->may_write
= isl_union_map_copy(PPCGScop
->may_writes
);
2987 PPCGProg
->must_write
= isl_union_map_copy(PPCGScop
->must_writes
);
2988 PPCGProg
->tagged_must_kill
=
2989 isl_union_map_copy(PPCGScop
->tagged_must_kills
);
2990 PPCGProg
->to_inner
= getArrayIdentity();
2991 PPCGProg
->to_outer
= getArrayIdentity();
2992 // TODO: verify that this assignment is correct.
2993 PPCGProg
->any_to_outer
= nullptr;
2995 // this needs to be set when live range reordering is enabled.
2996 // NOTE: I believe that is conservatively correct. I'm not sure
2997 // what the semantics of this is.
2998 // Quoting PPCG/gpu.h: "Order dependences on non-scalars."
2999 PPCGProg
->array_order
=
3000 isl_union_map_empty(isl_set_get_space(PPCGScop
->context
));
3001 PPCGProg
->n_stmts
= std::distance(S
->begin(), S
->end());
3002 PPCGProg
->stmts
= getStatements();
3004 // Only consider arrays that have a non-empty extent.
3005 // Otherwise, this will cause us to consider the following kinds of
3007 // 1. Invariant loads that are represented by SAI objects.
3008 // 2. Arrays with statically known zero size.
3009 auto ValidSAIsRange
=
3010 make_filter_range(S
->arrays(), [this](ScopArrayInfo
*SAI
) -> bool {
3011 return !isl::manage(getExtent(SAI
)).is_empty();
3013 SmallVector
<ScopArrayInfo
*, 4> ValidSAIs(ValidSAIsRange
.begin(),
3014 ValidSAIsRange
.end());
3017 ValidSAIs
.size(); // std::distance(S->array_begin(), S->array_end());
3018 PPCGProg
->array
= isl_calloc_array(S
->getIslCtx(), struct gpu_array_info
,
3021 createArrays(PPCGProg
, ValidSAIs
);
3023 PPCGProg
->may_persist
= compute_may_persist(PPCGProg
);
3027 struct PrintGPUUserData
{
3028 struct cuda_info
*CudaInfo
;
3029 struct gpu_prog
*PPCGProg
;
3030 std::vector
<ppcg_kernel
*> Kernels
;
3033 /// Print a user statement node in the host code.
3035 /// We use ppcg's printing facilities to print the actual statement and
3036 /// additionally build up a list of all kernels that are encountered in the
3039 /// @param P The printer to print to
3040 /// @param Options The printing options to use
3041 /// @param Node The node to print
3042 /// @param User A user pointer to carry additional data. This pointer is
3043 /// expected to be of type PrintGPUUserData.
3045 /// @returns A printer to which the output has been printed.
3046 static __isl_give isl_printer
*
3047 printHostUser(__isl_take isl_printer
*P
,
3048 __isl_take isl_ast_print_options
*Options
,
3049 __isl_take isl_ast_node
*Node
, void *User
) {
3050 auto Data
= (struct PrintGPUUserData
*)User
;
3051 auto Id
= isl_ast_node_get_annotation(Node
);
3054 bool IsUser
= !strcmp(isl_id_get_name(Id
), "user");
3056 // If this is a user statement, format it ourselves as ppcg would
3057 // otherwise try to call pet functionality that is not available in
3060 P
= isl_printer_start_line(P
);
3061 P
= isl_printer_print_ast_node(P
, Node
);
3062 P
= isl_printer_end_line(P
);
3064 isl_ast_print_options_free(Options
);
3068 auto Kernel
= (struct ppcg_kernel
*)isl_id_get_user(Id
);
3070 Data
->Kernels
.push_back(Kernel
);
3073 return print_host_user(P
, Options
, Node
, User
);
3076 /// Print C code corresponding to the control flow in @p Kernel.
3078 /// @param Kernel The kernel to print
3079 void printKernel(ppcg_kernel
*Kernel
) {
3080 auto *P
= isl_printer_to_str(S
->getIslCtx());
3081 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
3082 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx());
3083 P
= isl_ast_node_print(Kernel
->tree
, P
, Options
);
3084 char *String
= isl_printer_get_str(P
);
3085 printf("%s\n", String
);
3087 isl_printer_free(P
);
3090 /// Print C code corresponding to the GPU code described by @p Tree.
3092 /// @param Tree An AST describing GPU code
3093 /// @param PPCGProg The PPCG program from which @Tree has been constructed.
3094 void printGPUTree(isl_ast_node
*Tree
, gpu_prog
*PPCGProg
) {
3095 auto *P
= isl_printer_to_str(S
->getIslCtx());
3096 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
3098 PrintGPUUserData Data
;
3099 Data
.PPCGProg
= PPCGProg
;
3101 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx());
3103 isl_ast_print_options_set_print_user(Options
, printHostUser
, &Data
);
3104 P
= isl_ast_node_print(Tree
, P
, Options
);
3105 char *String
= isl_printer_get_str(P
);
3107 printf("%s\n", String
);
3109 isl_printer_free(P
);
3111 for (auto Kernel
: Data
.Kernels
) {
3112 printf("# kernel%d\n", Kernel
->id
);
3113 printKernel(Kernel
);
3117 // Generate a GPU program using PPCG.
3119 // GPU mapping consists of multiple steps:
3121 // 1) Compute new schedule for the program.
3122 // 2) Map schedule to GPU (TODO)
3123 // 3) Generate code for new schedule (TODO)
3125 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3126 // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3127 // strategy directly from this pass.
3128 gpu_gen
*generateGPU(ppcg_scop
*PPCGScop
, gpu_prog
*PPCGProg
) {
3130 auto PPCGGen
= isl_calloc_type(S
->getIslCtx(), struct gpu_gen
);
3132 PPCGGen
->ctx
= S
->getIslCtx();
3133 PPCGGen
->options
= PPCGScop
->options
;
3134 PPCGGen
->print
= nullptr;
3135 PPCGGen
->print_user
= nullptr;
3136 PPCGGen
->build_ast_expr
= &pollyBuildAstExprForStmt
;
3137 PPCGGen
->prog
= PPCGProg
;
3138 PPCGGen
->tree
= nullptr;
3139 PPCGGen
->types
.n
= 0;
3140 PPCGGen
->types
.name
= nullptr;
3141 PPCGGen
->sizes
= nullptr;
3142 PPCGGen
->used_sizes
= nullptr;
3143 PPCGGen
->kernel_id
= 0;
3145 // Set scheduling strategy to same strategy PPCG is using.
3146 isl_options_set_schedule_outer_coincidence(PPCGGen
->ctx
, true);
3147 isl_options_set_schedule_maximize_band_depth(PPCGGen
->ctx
, true);
3148 isl_options_set_schedule_whole_component(PPCGGen
->ctx
, false);
3150 isl_schedule
*Schedule
= get_schedule(PPCGGen
);
3152 int has_permutable
= has_any_permutable_node(Schedule
);
3155 isl_schedule_align_params(Schedule
, S
->getFullParamSpace().release());
3157 if (!has_permutable
|| has_permutable
< 0) {
3158 Schedule
= isl_schedule_free(Schedule
);
3159 DEBUG(dbgs() << getUniqueScopName(S
)
3160 << " does not have permutable bands. Bailing out\n";);
3162 Schedule
= map_to_device(PPCGGen
, Schedule
);
3163 PPCGGen
->tree
= generate_code(PPCGGen
, isl_schedule_copy(Schedule
));
3167 isl_printer
*P
= isl_printer_to_str(S
->getIslCtx());
3168 P
= isl_printer_set_yaml_style(P
, ISL_YAML_STYLE_BLOCK
);
3169 P
= isl_printer_print_str(P
, "Schedule\n");
3170 P
= isl_printer_print_str(P
, "========\n");
3172 P
= isl_printer_print_schedule(P
, Schedule
);
3174 P
= isl_printer_print_str(P
, "No schedule found\n");
3176 printf("%s\n", isl_printer_get_str(P
));
3177 isl_printer_free(P
);
3184 printGPUTree(PPCGGen
->tree
, PPCGProg
);
3186 printf("No code generated\n");
3189 isl_schedule_free(Schedule
);
3194 /// Free gpu_gen structure.
3196 /// @param PPCGGen The ppcg_gen object to free.
3197 void freePPCGGen(gpu_gen
*PPCGGen
) {
3198 isl_ast_node_free(PPCGGen
->tree
);
3199 isl_union_map_free(PPCGGen
->sizes
);
3200 isl_union_map_free(PPCGGen
->used_sizes
);
3204 /// Free the options in the ppcg scop structure.
3206 /// ppcg is not freeing these options for us. To avoid leaks we do this
3209 /// @param PPCGScop The scop referencing the options to free.
3210 void freeOptions(ppcg_scop
*PPCGScop
) {
3211 free(PPCGScop
->options
->debug
);
3212 PPCGScop
->options
->debug
= nullptr;
3213 free(PPCGScop
->options
);
3214 PPCGScop
->options
= nullptr;
3217 /// Approximate the number of points in the set.
3219 /// This function returns an ast expression that overapproximates the number
3220 /// of points in an isl set through the rectangular hull surrounding this set.
3222 /// @param Set The set to count.
3223 /// @param Build The isl ast build object to use for creating the ast
3226 /// @returns An approximation of the number of points in the set.
3227 __isl_give isl_ast_expr
*approxPointsInSet(__isl_take isl_set
*Set
,
3228 __isl_keep isl_ast_build
*Build
) {
3230 isl_val
*One
= isl_val_int_from_si(isl_set_get_ctx(Set
), 1);
3231 auto *Expr
= isl_ast_expr_from_val(isl_val_copy(One
));
3233 isl_space
*Space
= isl_set_get_space(Set
);
3234 Space
= isl_space_params(Space
);
3235 auto *Univ
= isl_set_universe(Space
);
3236 isl_pw_aff
*OneAff
= isl_pw_aff_val_on_domain(Univ
, One
);
3238 for (long i
= 0; i
< isl_set_dim(Set
, isl_dim_set
); i
++) {
3239 isl_pw_aff
*Max
= isl_set_dim_max(isl_set_copy(Set
), i
);
3240 isl_pw_aff
*Min
= isl_set_dim_min(isl_set_copy(Set
), i
);
3241 isl_pw_aff
*DimSize
= isl_pw_aff_sub(Max
, Min
);
3242 DimSize
= isl_pw_aff_add(DimSize
, isl_pw_aff_copy(OneAff
));
3243 auto DimSizeExpr
= isl_ast_build_expr_from_pw_aff(Build
, DimSize
);
3244 Expr
= isl_ast_expr_mul(Expr
, DimSizeExpr
);
3248 isl_pw_aff_free(OneAff
);
3253 /// Approximate a number of dynamic instructions executed by a given
3256 /// @param Stmt The statement for which to compute the number of dynamic
3258 /// @param Build The isl ast build object to use for creating the ast
3260 /// @returns An approximation of the number of dynamic instructions executed
3262 __isl_give isl_ast_expr
*approxDynamicInst(ScopStmt
&Stmt
,
3263 __isl_keep isl_ast_build
*Build
) {
3264 auto Iterations
= approxPointsInSet(Stmt
.getDomain().release(), Build
);
3268 if (Stmt
.isBlockStmt()) {
3269 auto *BB
= Stmt
.getBasicBlock();
3270 InstCount
= std::distance(BB
->begin(), BB
->end());
3272 auto *R
= Stmt
.getRegion();
3274 for (auto *BB
: R
->blocks()) {
3275 InstCount
+= std::distance(BB
->begin(), BB
->end());
3279 isl_val
*InstVal
= isl_val_int_from_si(S
->getIslCtx(), InstCount
);
3280 auto *InstExpr
= isl_ast_expr_from_val(InstVal
);
3281 return isl_ast_expr_mul(InstExpr
, Iterations
);
3284 /// Approximate dynamic instructions executed in scop.
3286 /// @param S The scop for which to approximate dynamic instructions.
3287 /// @param Build The isl ast build object to use for creating the ast
3289 /// @returns An approximation of the number of dynamic instructions executed
3291 __isl_give isl_ast_expr
*
3292 getNumberOfIterations(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3293 isl_ast_expr
*Instructions
;
3295 isl_val
*Zero
= isl_val_int_from_si(S
.getIslCtx(), 0);
3296 Instructions
= isl_ast_expr_from_val(Zero
);
3298 for (ScopStmt
&Stmt
: S
) {
3299 isl_ast_expr
*StmtInstructions
= approxDynamicInst(Stmt
, Build
);
3300 Instructions
= isl_ast_expr_add(Instructions
, StmtInstructions
);
3302 return Instructions
;
3305 /// Create a check that ensures sufficient compute in scop.
3307 /// @param S The scop for which to ensure sufficient compute.
3308 /// @param Build The isl ast build object to use for creating the ast
3310 /// @returns An expression that evaluates to TRUE in case of sufficient
3311 /// compute and to FALSE, otherwise.
3312 __isl_give isl_ast_expr
*
3313 createSufficientComputeCheck(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3314 auto Iterations
= getNumberOfIterations(S
, Build
);
3315 auto *MinComputeVal
= isl_val_int_from_si(S
.getIslCtx(), MinCompute
);
3316 auto *MinComputeExpr
= isl_ast_expr_from_val(MinComputeVal
);
3317 return isl_ast_expr_ge(Iterations
, MinComputeExpr
);
3320 /// Check if the basic block contains a function we cannot codegen for GPU
3323 /// If this basic block does something with a `Function` other than calling
3324 /// a function that we support in a kernel, return true.
3325 bool containsInvalidKernelFunctionInBlock(const BasicBlock
*BB
,
3326 bool AllowCUDALibDevice
) {
3327 for (const Instruction
&Inst
: *BB
) {
3328 const CallInst
*Call
= dyn_cast
<CallInst
>(&Inst
);
3329 if (Call
&& isValidFunctionInKernel(Call
->getCalledFunction(),
3330 AllowCUDALibDevice
)) {
3334 for (Value
*SrcVal
: Inst
.operands()) {
3335 PointerType
*p
= dyn_cast
<PointerType
>(SrcVal
->getType());
3338 if (isa
<FunctionType
>(p
->getElementType()))
3345 /// Return whether the Scop S uses functions in a way that we do not support.
3346 bool containsInvalidKernelFunction(const Scop
&S
, bool AllowCUDALibDevice
) {
3347 for (auto &Stmt
: S
) {
3348 if (Stmt
.isBlockStmt()) {
3349 if (containsInvalidKernelFunctionInBlock(Stmt
.getBasicBlock(),
3350 AllowCUDALibDevice
))
3353 assert(Stmt
.isRegionStmt() &&
3354 "Stmt was neither block nor region statement");
3355 for (const BasicBlock
*BB
: Stmt
.getRegion()->blocks())
3356 if (containsInvalidKernelFunctionInBlock(BB
, AllowCUDALibDevice
))
3363 /// Generate code for a given GPU AST described by @p Root.
3365 /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3366 /// @param Prog The GPU Program to generate code for.
3367 void generateCode(__isl_take isl_ast_node
*Root
, gpu_prog
*Prog
) {
3368 ScopAnnotator Annotator
;
3369 Annotator
.buildAliasScopes(*S
);
3371 Region
*R
= &S
->getRegion();
3373 simplifyRegion(R
, DT
, LI
, RI
);
3375 BasicBlock
*EnteringBB
= R
->getEnteringBlock();
3377 PollyIRBuilder Builder
= createPollyIRBuilder(EnteringBB
, Annotator
);
3379 // Only build the run-time condition and parameters _after_ having
3380 // introduced the conditional branch. This is important as the conditional
3381 // branch will guard the original scop from new induction variables that
3382 // the SCEVExpander may introduce while code generating the parameters and
3383 // which may introduce scalar dependences that prevent us from correctly
3384 // code generating this scop.
3385 BBPair StartExitBlocks
;
3386 BranchInst
*CondBr
= nullptr;
3387 std::tie(StartExitBlocks
, CondBr
) =
3388 executeScopConditionally(*S
, Builder
.getTrue(), *DT
, *RI
, *LI
);
3389 BasicBlock
*StartBlock
= std::get
<0>(StartExitBlocks
);
3391 assert(CondBr
&& "CondBr not initialized by executeScopConditionally");
3393 GPUNodeBuilder
NodeBuilder(Builder
, Annotator
, *DL
, *LI
, *SE
, *DT
, *S
,
3394 StartBlock
, Prog
, Runtime
, Architecture
);
3396 // TODO: Handle LICM
3397 auto SplitBlock
= StartBlock
->getSinglePredecessor();
3398 Builder
.SetInsertPoint(SplitBlock
->getTerminator());
3400 isl_ast_build
*Build
= isl_ast_build_alloc(S
->getIslCtx());
3401 isl_ast_expr
*Condition
= IslAst::buildRunCondition(*S
, Build
);
3402 isl_ast_expr
*SufficientCompute
= createSufficientComputeCheck(*S
, Build
);
3403 Condition
= isl_ast_expr_and(Condition
, SufficientCompute
);
3404 isl_ast_build_free(Build
);
3406 // preload invariant loads. Note: This should happen before the RTC
3407 // because the RTC may depend on values that are invariant load hoisted.
3408 if (!NodeBuilder
.preloadInvariantLoads()) {
3409 DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3410 S
->getFunction().getName() +
3411 " | Scop Region: " + S
->getNameStr());
3412 // adjust the dominator tree accordingly.
3413 auto *ExitingBlock
= StartBlock
->getUniqueSuccessor();
3414 assert(ExitingBlock
);
3415 auto *MergeBlock
= ExitingBlock
->getUniqueSuccessor();
3417 polly::markBlockUnreachable(*StartBlock
, Builder
);
3418 polly::markBlockUnreachable(*ExitingBlock
, Builder
);
3419 auto *ExitingBB
= S
->getExitingBlock();
3422 DT
->changeImmediateDominator(MergeBlock
, ExitingBB
);
3423 DT
->eraseNode(ExitingBlock
);
3424 isl_ast_expr_free(Condition
);
3425 isl_ast_node_free(Root
);
3428 NodeBuilder
.addParameters(S
->getContext().release());
3429 Value
*RTC
= NodeBuilder
.createRTC(Condition
);
3430 Builder
.GetInsertBlock()->getTerminator()->setOperand(0, RTC
);
3432 Builder
.SetInsertPoint(&*StartBlock
->begin());
3434 NodeBuilder
.create(Root
);
3437 /// In case a sequential kernel has more surrounding loops as any parallel
3438 /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3439 /// point in running it on a GPU.
3440 if (NodeBuilder
.DeepestSequential
> NodeBuilder
.DeepestParallel
)
3441 CondBr
->setOperand(0, Builder
.getFalse());
3443 if (!NodeBuilder
.BuildSuccessful
)
3444 CondBr
->setOperand(0, Builder
.getFalse());
3447 bool runOnScop(Scop
&CurrentScop
) override
{
3449 LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
3450 DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
3451 SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
3452 DL
= &S
->getRegion().getEntry()->getModule()->getDataLayout();
3453 RI
= &getAnalysis
<RegionInfoPass
>().getRegionInfo();
3455 // We currently do not support functions other than intrinsics inside
3456 // kernels, as code generation will need to offload function calls to the
3457 // kernel. This may lead to a kernel trying to call a function on the host.
3458 // This also allows us to prevent codegen from trying to take the
3459 // address of an intrinsic function to send to the kernel.
3460 if (containsInvalidKernelFunction(CurrentScop
,
3461 Architecture
== GPUArch::NVPTX64
)) {
3463 dbgs() << getUniqueScopName(S
)
3464 << " contains function which cannot be materialised in a GPU "
3465 "kernel. Bailing out.\n";);
3469 auto PPCGScop
= createPPCGScop();
3470 auto PPCGProg
= createPPCGProg(PPCGScop
);
3471 auto PPCGGen
= generateGPU(PPCGScop
, PPCGProg
);
3473 if (PPCGGen
->tree
) {
3474 generateCode(isl_ast_node_copy(PPCGGen
->tree
), PPCGProg
);
3475 CurrentScop
.markAsToBeSkipped();
3477 DEBUG(dbgs() << getUniqueScopName(S
)
3478 << " has empty PPCGGen->tree. Bailing out.\n");
3481 freeOptions(PPCGScop
);
3482 freePPCGGen(PPCGGen
);
3483 gpu_prog_free(PPCGProg
);
3484 ppcg_scop_free(PPCGScop
);
3489 void printScop(raw_ostream
&, Scop
&) const override
{}
3491 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
3492 AU
.addRequired
<DominatorTreeWrapperPass
>();
3493 AU
.addRequired
<RegionInfoPass
>();
3494 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
3495 AU
.addRequired
<ScopDetectionWrapperPass
>();
3496 AU
.addRequired
<ScopInfoRegionPass
>();
3497 AU
.addRequired
<LoopInfoWrapperPass
>();
3499 AU
.addPreserved
<AAResultsWrapperPass
>();
3500 AU
.addPreserved
<BasicAAWrapperPass
>();
3501 AU
.addPreserved
<LoopInfoWrapperPass
>();
3502 AU
.addPreserved
<DominatorTreeWrapperPass
>();
3503 AU
.addPreserved
<GlobalsAAWrapperPass
>();
3504 AU
.addPreserved
<ScopDetectionWrapperPass
>();
3505 AU
.addPreserved
<ScalarEvolutionWrapperPass
>();
3506 AU
.addPreserved
<SCEVAAWrapperPass
>();
3508 // FIXME: We do not yet add regions for the newly generated code to the
3510 AU
.addPreserved
<RegionInfoPass
>();
3511 AU
.addPreserved
<ScopInfoRegionPass
>();
3516 char PPCGCodeGeneration::ID
= 1;
3518 Pass
*polly::createPPCGCodeGenerationPass(GPUArch Arch
, GPURuntime Runtime
) {
3519 PPCGCodeGeneration
*generator
= new PPCGCodeGeneration();
3520 generator
->Runtime
= Runtime
;
3521 generator
->Architecture
= Arch
;
3525 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration
, "polly-codegen-ppcg",
3526 "Polly - Apply PPCG translation to SCOP", false, false)
3527 INITIALIZE_PASS_DEPENDENCY(DependenceInfo
);
3528 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
);
3529 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
);
3530 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass
);
3531 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
);
3532 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass
);
3533 INITIALIZE_PASS_END(PPCGCodeGeneration
, "polly-codegen-ppcg",
3534 "Polly - Apply PPCG translation to SCOP", false, false)