1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
11 // GPU mapping strategy.
13 //===----------------------------------------------------------------------===//
15 #include "polly/CodeGen/PPCGCodeGeneration.h"
16 #include "polly/CodeGen/CodeGeneration.h"
17 #include "polly/CodeGen/IslAst.h"
18 #include "polly/CodeGen/IslNodeBuilder.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/SCEVValidator.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/GlobalsModRef.h"
30 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/TargetTransformInfo.h"
33 #include "llvm/IR/LegacyPassManager.h"
34 #include "llvm/IR/Verifier.h"
35 #include "llvm/IRReader/IRReader.h"
36 #include "llvm/Linker/Linker.h"
37 #include "llvm/Support/TargetRegistry.h"
38 #include "llvm/Support/TargetSelect.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
41 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
43 #include "isl/union_map.h"
46 #include "ppcg/cuda.h"
48 #include "ppcg/gpu_print.h"
49 #include "ppcg/ppcg.h"
50 #include "ppcg/schedule.h"
53 #include "llvm/Support/Debug.h"
55 using namespace polly
;
58 #define DEBUG_TYPE "polly-codegen-ppcg"
60 static cl::opt
<bool> DumpSchedule("polly-acc-dump-schedule",
61 cl::desc("Dump the computed GPU Schedule"),
62 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
63 cl::cat(PollyCategory
));
66 DumpCode("polly-acc-dump-code",
67 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden
,
68 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
70 static cl::opt
<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
71 cl::desc("Dump the kernel LLVM-IR"),
72 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
73 cl::cat(PollyCategory
));
75 static cl::opt
<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
76 cl::desc("Dump the kernel assembly code"),
77 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
78 cl::cat(PollyCategory
));
80 static cl::opt
<bool> FastMath("polly-acc-fastmath",
81 cl::desc("Allow unsafe math optimizations"),
82 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
83 cl::cat(PollyCategory
));
84 static cl::opt
<bool> SharedMemory("polly-acc-use-shared",
85 cl::desc("Use shared memory"), cl::Hidden
,
86 cl::init(false), cl::ZeroOrMore
,
87 cl::cat(PollyCategory
));
88 static cl::opt
<bool> PrivateMemory("polly-acc-use-private",
89 cl::desc("Use private memory"), cl::Hidden
,
90 cl::init(false), cl::ZeroOrMore
,
91 cl::cat(PollyCategory
));
93 bool polly::PollyManagedMemory
;
94 static cl::opt
<bool, true>
95 XManagedMemory("polly-acc-codegen-managed-memory",
96 cl::desc("Generate Host kernel code assuming"
97 " that all memory has been"
98 " declared as managed memory"),
99 cl::location(PollyManagedMemory
), cl::Hidden
,
100 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
103 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
104 cl::desc("Fail and generate a backtrace if"
105 " verifyModule fails on the GPU "
107 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
108 cl::cat(PollyCategory
));
110 static cl::opt
<std::string
> CUDALibDevice(
111 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden
,
112 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
113 cl::ZeroOrMore
, cl::cat(PollyCategory
));
115 static cl::opt
<std::string
>
116 CudaVersion("polly-acc-cuda-version",
117 cl::desc("The CUDA version to compile for"), cl::Hidden
,
118 cl::init("sm_30"), cl::ZeroOrMore
, cl::cat(PollyCategory
));
121 MinCompute("polly-acc-mincompute",
122 cl::desc("Minimal number of compute statements to run on GPU."),
123 cl::Hidden
, cl::init(10 * 512 * 512));
125 /// Return a unique name for a Scop, which is the scop region with the
127 std::string
getUniqueScopName(const Scop
*S
) {
128 return "Scop Region: " + S
->getNameStr() +
129 " | Function: " + std::string(S
->getFunction().getName());
132 /// Used to store information PPCG wants for kills. This information is
133 /// used by live range reordering.
135 /// @see computeLiveRangeReordering
136 /// @see GPUNodeBuilder::createPPCGScop
137 /// @see GPUNodeBuilder::createPPCGProg
138 struct MustKillsInfo
{
139 /// Collection of all kill statements that will be sequenced at the end of
140 /// PPCGScop->schedule.
142 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
143 /// which merges schedules in *arbitrary* order.
144 /// (we don't care about the order of the kills anyway).
145 isl::schedule KillsSchedule
;
146 /// Map from kill statement instances to scalars that need to be
149 /// We currently derive kill information for:
150 /// 1. phi nodes. PHI nodes are not alive outside the scop and can
151 /// consequently all be killed.
152 /// 2. Scalar arrays that are not used outside the Scop. This is
153 /// checked by `isScalarUsesContainedInScop`.
154 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
155 isl::union_map TaggedMustKills
;
157 /// Tagged must kills stripped of the tags.
158 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] }
159 isl::union_map MustKills
;
161 MustKillsInfo() : KillsSchedule(nullptr) {}
164 /// Check if SAI's uses are entirely contained within Scop S.
165 /// If a scalar is used only with a Scop, we are free to kill it, as no data
166 /// can flow in/out of the value any more.
167 /// @see computeMustKillsInfo
168 static bool isScalarUsesContainedInScop(const Scop
&S
,
169 const ScopArrayInfo
*SAI
) {
170 assert(SAI
->isValueKind() && "this function only deals with scalars."
171 " Dealing with arrays required alias analysis");
173 const Region
&R
= S
.getRegion();
174 for (User
*U
: SAI
->getBasePtr()->users()) {
175 Instruction
*I
= dyn_cast
<Instruction
>(U
);
176 assert(I
&& "invalid user of scop array info");
183 /// Compute must-kills needed to enable live range reordering with PPCG.
185 /// @params S The Scop to compute live range reordering information
186 /// @returns live range reordering information that can be used to setup
188 static MustKillsInfo
computeMustKillsInfo(const Scop
&S
) {
189 const isl::space ParamSpace
= S
.getParamSpace();
192 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
193 // 1.1 phi nodes in scop.
194 // 1.2 scalars that are only used within the scop
195 SmallVector
<isl::id
, 4> KillMemIds
;
196 for (ScopArrayInfo
*SAI
: S
.arrays()) {
197 if (SAI
->isPHIKind() ||
198 (SAI
->isValueKind() && isScalarUsesContainedInScop(S
, SAI
)))
199 KillMemIds
.push_back(isl::manage(SAI
->getBasePtrId().release()));
202 Info
.TaggedMustKills
= isl::union_map::empty(ParamSpace
);
203 Info
.MustKills
= isl::union_map::empty(ParamSpace
);
205 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
207 // - filter: "[control] -> { }"
208 // So, we choose to not create this to keep the output a little nicer,
209 // at the cost of some code complexity.
210 Info
.KillsSchedule
= nullptr;
212 for (isl::id
&ToKillId
: KillMemIds
) {
213 isl::id KillStmtId
= isl::id::alloc(
215 std::string("SKill_phantom_").append(ToKillId
.get_name()), nullptr);
217 // NOTE: construction of tagged_must_kill:
218 // 2. We need to construct a map:
219 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
220 // To construct this, we use `isl_map_domain_product` on 2 maps`:
222 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
223 // 2b. PhantomRefToScalar:
224 // [param] -> { ref_phantom[] -> scalar_to_kill[] }
226 // Combining these with `isl_map_domain_product` gives us
228 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
230 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
231 isl::map StmtToScalar
= isl::map::universe(ParamSpace
);
232 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::in
, isl::id(KillStmtId
));
233 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::out
, isl::id(ToKillId
));
235 isl::id PhantomRefId
= isl::id::alloc(
236 S
.getIslCtx(), std::string("ref_phantom") + ToKillId
.get_name(),
239 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
240 isl::map PhantomRefToScalar
= isl::map::universe(ParamSpace
);
242 PhantomRefToScalar
.set_tuple_id(isl::dim::in
, PhantomRefId
);
244 PhantomRefToScalar
.set_tuple_id(isl::dim::out
, ToKillId
);
246 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
247 isl::map TaggedMustKill
= StmtToScalar
.domain_product(PhantomRefToScalar
);
248 Info
.TaggedMustKills
= Info
.TaggedMustKills
.unite(TaggedMustKill
);
250 // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
251 Info
.MustKills
= Info
.TaggedMustKills
.domain_factor_domain();
253 // 3. Create the kill schedule of the form:
254 // "[param] -> { Stmt_phantom[] }"
255 // Then add this to Info.KillsSchedule.
256 isl::space KillStmtSpace
= ParamSpace
;
257 KillStmtSpace
= KillStmtSpace
.set_tuple_id(isl::dim::set
, KillStmtId
);
258 isl::union_set KillStmtDomain
= isl::set::universe(KillStmtSpace
);
260 isl::schedule KillSchedule
= isl::schedule::from_domain(KillStmtDomain
);
261 if (Info
.KillsSchedule
)
262 Info
.KillsSchedule
= Info
.KillsSchedule
.set(KillSchedule
);
264 Info
.KillsSchedule
= KillSchedule
;
270 /// Create the ast expressions for a ScopStmt.
272 /// This function is a callback for to generate the ast expressions for each
273 /// of the scheduled ScopStmts.
274 static __isl_give isl_id_to_ast_expr
*pollyBuildAstExprForStmt(
275 void *StmtT
, __isl_take isl_ast_build
*Build_C
,
276 isl_multi_pw_aff
*(*FunctionIndex
)(__isl_take isl_multi_pw_aff
*MPA
,
277 isl_id
*Id
, void *User
),
279 isl_ast_expr
*(*FunctionExpr
)(isl_ast_expr
*Expr
, isl_id
*Id
, void *User
),
282 ScopStmt
*Stmt
= (ScopStmt
*)StmtT
;
284 if (!Stmt
|| !Build_C
)
287 isl::ast_build Build
= isl::manage(isl_ast_build_copy(Build_C
));
288 isl::ctx Ctx
= Build
.get_ctx();
289 isl::id_to_ast_expr RefToExpr
= isl::id_to_ast_expr::alloc(Ctx
, 0);
291 Stmt
->setAstBuild(Build
);
293 for (MemoryAccess
*Acc
: *Stmt
) {
294 isl::map AddrFunc
= Acc
->getAddressFunction();
295 AddrFunc
= AddrFunc
.intersect_domain(Stmt
->getDomain());
297 isl::id RefId
= Acc
->getId();
298 isl::pw_multi_aff PMA
= isl::pw_multi_aff::from_map(AddrFunc
);
300 isl::multi_pw_aff MPA
= isl::multi_pw_aff(PMA
);
301 MPA
= MPA
.coalesce();
302 MPA
= isl::manage(FunctionIndex(MPA
.release(), RefId
.get(), UserIndex
));
304 isl::ast_expr Access
= Build
.access_from(MPA
);
305 Access
= isl::manage(FunctionExpr(Access
.release(), RefId
.get(), UserExpr
));
306 RefToExpr
= RefToExpr
.set(RefId
, Access
);
309 return RefToExpr
.release();
312 /// Given a LLVM Type, compute its size in bytes,
313 static int computeSizeInBytes(const Type
*T
) {
314 int bytes
= T
->getPrimitiveSizeInBits() / 8;
316 bytes
= T
->getScalarSizeInBits() / 8;
320 /// Generate code for a GPU specific isl AST.
322 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
323 /// generates code for general-purpose AST nodes, with special functionality
324 /// for generating GPU specific user nodes.
326 /// @see GPUNodeBuilder::createUser
327 class GPUNodeBuilder
: public IslNodeBuilder
{
329 GPUNodeBuilder(PollyIRBuilder
&Builder
, ScopAnnotator
&Annotator
,
330 const DataLayout
&DL
, LoopInfo
&LI
, ScalarEvolution
&SE
,
331 DominatorTree
&DT
, Scop
&S
, BasicBlock
*StartBlock
,
332 gpu_prog
*Prog
, GPURuntime Runtime
, GPUArch Arch
)
333 : IslNodeBuilder(Builder
, Annotator
, DL
, LI
, SE
, DT
, S
, StartBlock
),
334 Prog(Prog
), Runtime(Runtime
), Arch(Arch
) {
335 getExprBuilder().setIDToSAI(&IDToSAI
);
338 /// Create after-run-time-check initialization code.
339 void initializeAfterRTH();
341 /// Finalize the generated scop.
342 virtual void finalize();
344 /// Track if the full build process was successful.
346 /// This value is set to false, if throughout the build process an error
347 /// occurred which prevents us from generating valid GPU code.
348 bool BuildSuccessful
= true;
350 /// The maximal number of loops surrounding a sequential kernel.
351 unsigned DeepestSequential
= 0;
353 /// The maximal number of loops surrounding a parallel kernel.
354 unsigned DeepestParallel
= 0;
356 /// Return the name to set for the ptx_kernel.
357 std::string
getKernelFuncName(int Kernel_id
);
360 /// A vector of array base pointers for which a new ScopArrayInfo was created.
362 /// This vector is used to delete the ScopArrayInfo when it is not needed any
364 std::vector
<Value
*> LocalArrays
;
366 /// A map from ScopArrays to their corresponding device allocations.
367 std::map
<ScopArrayInfo
*, Value
*> DeviceAllocations
;
369 /// The current GPU context.
372 /// The set of isl_ids allocated in the kernel
373 std::vector
<isl_id
*> KernelIds
;
375 /// A module containing GPU code.
377 /// This pointer is only set in case we are currently generating GPU code.
378 std::unique_ptr
<Module
> GPUModule
;
380 /// The GPU program we generate code for.
383 /// The GPU Runtime implementation to use (OpenCL or CUDA).
386 /// The GPU Architecture to target.
389 /// Class to free isl_ids.
392 void operator()(__isl_take isl_id
*Id
) { isl_id_free(Id
); };
395 /// A set containing all isl_ids allocated in a GPU kernel.
397 /// By releasing this set all isl_ids will be freed.
398 std::set
<std::unique_ptr
<isl_id
, IslIdDeleter
>> KernelIDs
;
400 IslExprBuilder::IDToScopArrayInfoTy IDToSAI
;
402 /// Create code for user-defined AST nodes.
404 /// These AST nodes can be of type:
406 /// - ScopStmt: A computational statement (TODO)
407 /// - Kernel: A GPU kernel call (TODO)
408 /// - Data-Transfer: A GPU <-> CPU data-transfer
409 /// - In-kernel synchronization
410 /// - In-kernel memory copy statement
412 /// @param UserStmt The ast node to generate code for.
413 virtual void createUser(__isl_take isl_ast_node
*UserStmt
);
415 enum DataDirection
{ HOST_TO_DEVICE
, DEVICE_TO_HOST
};
417 /// Create code for a data transfer statement
419 /// @param TransferStmt The data transfer statement.
420 /// @param Direction The direction in which to transfer data.
421 void createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
422 enum DataDirection Direction
);
424 /// Find llvm::Values referenced in GPU kernel.
426 /// @param Kernel The kernel to scan for llvm::Values
428 /// @returns A tuple, whose:
429 /// - First element contains the set of values referenced by the
431 /// - Second element contains the set of functions referenced by the
432 /// kernel. All functions in the set satisfy
433 /// `isValidFunctionInKernel`.
434 /// - Third element contains loops that have induction variables
435 /// which are used in the kernel, *and* these loops are *neither*
436 /// in the scop, nor do they immediately surroung the Scop.
437 /// See [Code generation of induction variables of loops outside
439 std::tuple
<SetVector
<Value
*>, SetVector
<Function
*>, SetVector
<const Loop
*>,
441 getReferencesInKernel(ppcg_kernel
*Kernel
);
443 /// Compute the sizes of the execution grid for a given kernel.
445 /// @param Kernel The kernel to compute grid sizes for.
447 /// @returns A tuple with grid sizes for X and Y dimension
448 std::tuple
<Value
*, Value
*> getGridSizes(ppcg_kernel
*Kernel
);
450 /// Get the managed array pointer for sending host pointers to the device.
452 /// This is to be used only with managed memory
453 Value
*getManagedDeviceArray(gpu_array_info
*Array
, ScopArrayInfo
*ArrayInfo
);
455 /// Compute the sizes of the thread blocks for a given kernel.
457 /// @param Kernel The kernel to compute thread block sizes for.
459 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
460 std::tuple
<Value
*, Value
*, Value
*> getBlockSizes(ppcg_kernel
*Kernel
);
462 /// Store a specific kernel launch parameter in the array of kernel launch
465 /// @param Parameters The list of parameters in which to store.
466 /// @param Param The kernel launch parameter to store.
467 /// @param Index The index in the parameter list, at which to store the
469 void insertStoreParameter(Instruction
*Parameters
, Instruction
*Param
,
472 /// Create kernel launch parameters.
474 /// @param Kernel The kernel to create parameters for.
475 /// @param F The kernel function that has been created.
476 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
478 /// @returns A stack allocated array with pointers to the parameter
479 /// values that are passed to the kernel.
480 Value
*createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
481 SetVector
<Value
*> SubtreeValues
);
483 /// Create declarations for kernel variable.
485 /// This includes shared memory declarations.
487 /// @param Kernel The kernel definition to create variables for.
488 /// @param FN The function into which to generate the variables.
489 void createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
);
491 /// Add CUDA annotations to module.
493 /// Add a set of CUDA annotations that declares the maximal block dimensions
494 /// that will be used to execute the CUDA kernel. This allows the NVIDIA
495 /// PTX compiler to bound the number of allocated registers to ensure the
496 /// resulting kernel is known to run with up to as many block dimensions
497 /// as specified here.
499 /// @param M The module to add the annotations to.
500 /// @param BlockDimX The size of block dimension X.
501 /// @param BlockDimY The size of block dimension Y.
502 /// @param BlockDimZ The size of block dimension Z.
503 void addCUDAAnnotations(Module
*M
, Value
*BlockDimX
, Value
*BlockDimY
,
506 /// Create GPU kernel.
508 /// Code generate the kernel described by @p KernelStmt.
510 /// @param KernelStmt The ast node to generate kernel code for.
511 void createKernel(__isl_take isl_ast_node
*KernelStmt
);
513 /// Generate code that computes the size of an array.
515 /// @param Array The array for which to compute a size.
516 Value
*getArraySize(gpu_array_info
*Array
);
518 /// Generate code to compute the minimal offset at which an array is accessed.
520 /// The offset of an array is the minimal array location accessed in a scop.
524 /// for (long i = 0; i < 100; i++)
527 /// getArrayOffset(A) results in 42.
529 /// @param Array The array for which to compute the offset.
530 /// @returns An llvm::Value that contains the offset of the array.
531 Value
*getArrayOffset(gpu_array_info
*Array
);
533 /// Prepare the kernel arguments for kernel code generation
535 /// @param Kernel The kernel to generate code for.
536 /// @param FN The function created for the kernel.
537 void prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
);
539 /// Create kernel function.
541 /// Create a kernel function located in a newly created module that can serve
542 /// as target for device code generation. Set the Builder to point to the
543 /// start block of this newly created function.
545 /// @param Kernel The kernel to generate code for.
546 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
547 /// @param SubtreeFunctions The set of llvm::Functions referenced by this
549 void createKernelFunction(ppcg_kernel
*Kernel
,
550 SetVector
<Value
*> &SubtreeValues
,
551 SetVector
<Function
*> &SubtreeFunctions
);
553 /// Create the declaration of a kernel function.
555 /// The kernel function takes as arguments:
557 /// - One i8 pointer for each external array reference used in the kernel.
560 /// - Other LLVM Value references (TODO)
562 /// @param Kernel The kernel to generate the function declaration for.
563 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
565 /// @returns The newly declared function.
566 Function
*createKernelFunctionDecl(ppcg_kernel
*Kernel
,
567 SetVector
<Value
*> &SubtreeValues
);
569 /// Insert intrinsic functions to obtain thread and block ids.
571 /// @param The kernel to generate the intrinsic functions for.
572 void insertKernelIntrinsics(ppcg_kernel
*Kernel
);
574 /// Insert function calls to retrieve the SPIR group/local ids.
576 /// @param The kernel to generate the function calls for.
577 void insertKernelCallsSPIR(ppcg_kernel
*Kernel
);
579 /// Setup the creation of functions referenced by the GPU kernel.
581 /// 1. Create new function declarations in GPUModule which are the same as
582 /// SubtreeFunctions.
584 /// 2. Populate IslNodeBuilder::ValueMap with mappings from
585 /// old functions (that come from the original module) to new functions
586 /// (that are created within GPUModule). That way, we generate references
587 /// to the correct function (in GPUModule) in BlockGenerator.
589 /// @see IslNodeBuilder::ValueMap
590 /// @see BlockGenerator::GlobalMap
591 /// @see BlockGenerator::getNewValue
592 /// @see GPUNodeBuilder::getReferencesInKernel.
594 /// @param SubtreeFunctions The set of llvm::Functions referenced by
596 void setupKernelSubtreeFunctions(SetVector
<Function
*> SubtreeFunctions
);
598 /// Create a global-to-shared or shared-to-global copy statement.
600 /// @param CopyStmt The copy statement to generate code for
601 void createKernelCopy(ppcg_kernel_stmt
*CopyStmt
);
603 /// Create code for a ScopStmt called in @p Expr.
605 /// @param Expr The expression containing the call.
606 /// @param KernelStmt The kernel statement referenced in the call.
607 void createScopStmt(isl_ast_expr
*Expr
, ppcg_kernel_stmt
*KernelStmt
);
609 /// Create an in-kernel synchronization call.
610 void createKernelSync();
612 /// Create a PTX assembly string for the current GPU kernel.
614 /// @returns A string containing the corresponding PTX assembly code.
615 std::string
createKernelASM();
617 /// Remove references from the dominator tree to the kernel function @p F.
619 /// @param F The function to remove references to.
620 void clearDominators(Function
*F
);
622 /// Remove references from scalar evolution to the kernel function @p F.
624 /// @param F The function to remove references to.
625 void clearScalarEvolution(Function
*F
);
627 /// Remove references from loop info to the kernel function @p F.
629 /// @param F The function to remove references to.
630 void clearLoops(Function
*F
);
632 /// Check if the scop requires to be linked with CUDA's libdevice.
633 bool requiresCUDALibDevice();
635 /// Link with the NVIDIA libdevice library (if needed and available).
636 void addCUDALibDevice();
638 /// Finalize the generation of the kernel function.
640 /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
641 /// dump its IR to stderr.
643 /// @returns The Assembly string of the kernel.
644 std::string
finalizeKernelFunction();
646 /// Finalize the generation of the kernel arguments.
648 /// This function ensures that not-read-only scalars used in a kernel are
649 /// stored back to the global memory location they are backed with before
650 /// the kernel terminates.
652 /// @params Kernel The kernel to finalize kernel arguments for.
653 void finalizeKernelArguments(ppcg_kernel
*Kernel
);
655 /// Create code that allocates memory to store arrays on device.
656 void allocateDeviceArrays();
658 /// Create code to prepare the managed device pointers.
659 void prepareManagedDeviceArrays();
661 /// Free all allocated device arrays.
662 void freeDeviceArrays();
664 /// Create a call to initialize the GPU context.
666 /// @returns A pointer to the newly initialized context.
667 Value
*createCallInitContext();
669 /// Create a call to get the device pointer for a kernel allocation.
671 /// @param Allocation The Polly GPU allocation
673 /// @returns The device parameter corresponding to this allocation.
674 Value
*createCallGetDevicePtr(Value
*Allocation
);
676 /// Create a call to free the GPU context.
678 /// @param Context A pointer to an initialized GPU context.
679 void createCallFreeContext(Value
*Context
);
681 /// Create a call to allocate memory on the device.
683 /// @param Size The size of memory to allocate
685 /// @returns A pointer that identifies this allocation.
686 Value
*createCallAllocateMemoryForDevice(Value
*Size
);
688 /// Create a call to free a device array.
690 /// @param Array The device array to free.
691 void createCallFreeDeviceMemory(Value
*Array
);
693 /// Create a call to copy data from host to device.
695 /// @param HostPtr A pointer to the host data that should be copied.
696 /// @param DevicePtr A device pointer specifying the location to copy to.
697 void createCallCopyFromHostToDevice(Value
*HostPtr
, Value
*DevicePtr
,
700 /// Create a call to copy data from device to host.
702 /// @param DevicePtr A pointer to the device data that should be copied.
703 /// @param HostPtr A host pointer specifying the location to copy to.
704 void createCallCopyFromDeviceToHost(Value
*DevicePtr
, Value
*HostPtr
,
707 /// Create a call to synchronize Host & Device.
709 /// This is to be used only with managed memory.
710 void createCallSynchronizeDevice();
712 /// Create a call to get a kernel from an assembly string.
714 /// @param Buffer The string describing the kernel.
715 /// @param Entry The name of the kernel function to call.
717 /// @returns A pointer to a kernel object
718 Value
*createCallGetKernel(Value
*Buffer
, Value
*Entry
);
720 /// Create a call to free a GPU kernel.
722 /// @param GPUKernel THe kernel to free.
723 void createCallFreeKernel(Value
*GPUKernel
);
725 /// Create a call to launch a GPU kernel.
727 /// @param GPUKernel The kernel to launch.
728 /// @param GridDimX The size of the first grid dimension.
729 /// @param GridDimY The size of the second grid dimension.
730 /// @param GridBlockX The size of the first block dimension.
731 /// @param GridBlockY The size of the second block dimension.
732 /// @param GridBlockZ The size of the third block dimension.
733 /// @param Parameters A pointer to an array that contains itself pointers to
734 /// the parameter values passed for each kernel argument.
735 void createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
736 Value
*GridDimY
, Value
*BlockDimX
,
737 Value
*BlockDimY
, Value
*BlockDimZ
,
741 std::string
GPUNodeBuilder::getKernelFuncName(int Kernel_id
) {
742 return "FUNC_" + S
.getFunction().getName().str() + "_SCOP_" +
743 std::to_string(S
.getID()) + "_KERNEL_" + std::to_string(Kernel_id
);
746 void GPUNodeBuilder::initializeAfterRTH() {
747 BasicBlock
*NewBB
= SplitBlock(Builder
.GetInsertBlock(),
748 &*Builder
.GetInsertPoint(), &DT
, &LI
);
749 NewBB
->setName("polly.acc.initialize");
750 Builder
.SetInsertPoint(&NewBB
->front());
752 GPUContext
= createCallInitContext();
754 if (!PollyManagedMemory
)
755 allocateDeviceArrays();
757 prepareManagedDeviceArrays();
760 void GPUNodeBuilder::finalize() {
761 if (!PollyManagedMemory
)
764 createCallFreeContext(GPUContext
);
765 IslNodeBuilder::finalize();
768 void GPUNodeBuilder::allocateDeviceArrays() {
769 assert(!PollyManagedMemory
&&
770 "Managed memory will directly send host pointers "
771 "to the kernel. There is no need for device arrays");
772 isl_ast_build
*Build
= isl_ast_build_from_context(S
.getContext().release());
774 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
775 gpu_array_info
*Array
= &Prog
->array
[i
];
776 auto *ScopArray
= (ScopArrayInfo
*)Array
->user
;
777 std::string
DevArrayName("p_dev_array_");
778 DevArrayName
.append(Array
->name
);
780 Value
*ArraySize
= getArraySize(Array
);
781 Value
*Offset
= getArrayOffset(Array
);
783 ArraySize
= Builder
.CreateSub(
785 Builder
.CreateMul(Offset
,
786 Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
787 const SCEV
*SizeSCEV
= SE
.getSCEV(ArraySize
);
788 // It makes no sense to have an array of size 0. The CUDA API will
789 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
790 // choose to be defensive and catch this at the compile phase. It is
791 // most likely that we are doing something wrong with size computation.
792 if (SizeSCEV
->isZero()) {
793 errs() << getUniqueScopName(&S
)
794 << " has computed array size 0: " << *ArraySize
795 << " | for array: " << *(ScopArray
->getBasePtr())
796 << ". This is illegal, exiting.\n";
797 report_fatal_error("array size was computed to be 0");
800 Value
*DevArray
= createCallAllocateMemoryForDevice(ArraySize
);
801 DevArray
->setName(DevArrayName
);
802 DeviceAllocations
[ScopArray
] = DevArray
;
805 isl_ast_build_free(Build
);
808 void GPUNodeBuilder::prepareManagedDeviceArrays() {
809 assert(PollyManagedMemory
&&
810 "Device array most only be prepared in managed-memory mode");
811 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
812 gpu_array_info
*Array
= &Prog
->array
[i
];
813 ScopArrayInfo
*ScopArray
= (ScopArrayInfo
*)Array
->user
;
816 if (gpu_array_is_scalar(Array
))
817 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
819 HostPtr
= ScopArray
->getBasePtr();
820 HostPtr
= getLatestValue(HostPtr
);
822 Value
*Offset
= getArrayOffset(Array
);
824 HostPtr
= Builder
.CreatePointerCast(
825 HostPtr
, ScopArray
->getElementType()->getPointerTo());
826 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
829 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
830 DeviceAllocations
[ScopArray
] = HostPtr
;
834 void GPUNodeBuilder::addCUDAAnnotations(Module
*M
, Value
*BlockDimX
,
835 Value
*BlockDimY
, Value
*BlockDimZ
) {
836 auto AnnotationNode
= M
->getOrInsertNamedMetadata("nvvm.annotations");
839 if (F
.getCallingConv() != CallingConv::PTX_Kernel
)
842 Value
*V
[] = {BlockDimX
, BlockDimY
, BlockDimZ
};
844 Metadata
*Elements
[] = {
845 ValueAsMetadata::get(&F
), MDString::get(M
->getContext(), "maxntidx"),
846 ValueAsMetadata::get(V
[0]), MDString::get(M
->getContext(), "maxntidy"),
847 ValueAsMetadata::get(V
[1]), MDString::get(M
->getContext(), "maxntidz"),
848 ValueAsMetadata::get(V
[2]),
850 MDNode
*Node
= MDNode::get(M
->getContext(), Elements
);
851 AnnotationNode
->addOperand(Node
);
855 void GPUNodeBuilder::freeDeviceArrays() {
856 assert(!PollyManagedMemory
&& "Managed memory does not use device arrays");
857 for (auto &Array
: DeviceAllocations
)
858 createCallFreeDeviceMemory(Array
.second
);
861 Value
*GPUNodeBuilder::createCallGetKernel(Value
*Buffer
, Value
*Entry
) {
862 const char *Name
= "polly_getKernel";
863 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
864 Function
*F
= M
->getFunction(Name
);
866 // If F is not available, declare it.
868 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
869 std::vector
<Type
*> Args
;
870 Args
.push_back(Builder
.getInt8PtrTy());
871 Args
.push_back(Builder
.getInt8PtrTy());
872 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
873 F
= Function::Create(Ty
, Linkage
, Name
, M
);
876 return Builder
.CreateCall(F
, {Buffer
, Entry
});
879 Value
*GPUNodeBuilder::createCallGetDevicePtr(Value
*Allocation
) {
880 const char *Name
= "polly_getDevicePtr";
881 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
882 Function
*F
= M
->getFunction(Name
);
884 // If F is not available, declare it.
886 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
887 std::vector
<Type
*> Args
;
888 Args
.push_back(Builder
.getInt8PtrTy());
889 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
890 F
= Function::Create(Ty
, Linkage
, Name
, M
);
893 return Builder
.CreateCall(F
, {Allocation
});
896 void GPUNodeBuilder::createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
897 Value
*GridDimY
, Value
*BlockDimX
,
898 Value
*BlockDimY
, Value
*BlockDimZ
,
900 const char *Name
= "polly_launchKernel";
901 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
902 Function
*F
= M
->getFunction(Name
);
904 // If F is not available, declare it.
906 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
907 std::vector
<Type
*> Args
;
908 Args
.push_back(Builder
.getInt8PtrTy());
909 Args
.push_back(Builder
.getInt32Ty());
910 Args
.push_back(Builder
.getInt32Ty());
911 Args
.push_back(Builder
.getInt32Ty());
912 Args
.push_back(Builder
.getInt32Ty());
913 Args
.push_back(Builder
.getInt32Ty());
914 Args
.push_back(Builder
.getInt8PtrTy());
915 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
916 F
= Function::Create(Ty
, Linkage
, Name
, M
);
919 Builder
.CreateCall(F
, {GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
920 BlockDimZ
, Parameters
});
923 void GPUNodeBuilder::createCallFreeKernel(Value
*GPUKernel
) {
924 const char *Name
= "polly_freeKernel";
925 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
926 Function
*F
= M
->getFunction(Name
);
928 // If F is not available, declare it.
930 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
931 std::vector
<Type
*> Args
;
932 Args
.push_back(Builder
.getInt8PtrTy());
933 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
934 F
= Function::Create(Ty
, Linkage
, Name
, M
);
937 Builder
.CreateCall(F
, {GPUKernel
});
940 void GPUNodeBuilder::createCallFreeDeviceMemory(Value
*Array
) {
941 assert(!PollyManagedMemory
&&
942 "Managed memory does not allocate or free memory "
944 const char *Name
= "polly_freeDeviceMemory";
945 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
946 Function
*F
= M
->getFunction(Name
);
948 // If F is not available, declare it.
950 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
951 std::vector
<Type
*> Args
;
952 Args
.push_back(Builder
.getInt8PtrTy());
953 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
954 F
= Function::Create(Ty
, Linkage
, Name
, M
);
957 Builder
.CreateCall(F
, {Array
});
960 Value
*GPUNodeBuilder::createCallAllocateMemoryForDevice(Value
*Size
) {
961 assert(!PollyManagedMemory
&&
962 "Managed memory does not allocate or free memory "
964 const char *Name
= "polly_allocateMemoryForDevice";
965 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
966 Function
*F
= M
->getFunction(Name
);
968 // If F is not available, declare it.
970 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
971 std::vector
<Type
*> Args
;
972 Args
.push_back(Builder
.getInt64Ty());
973 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
974 F
= Function::Create(Ty
, Linkage
, Name
, M
);
977 return Builder
.CreateCall(F
, {Size
});
980 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value
*HostData
,
983 assert(!PollyManagedMemory
&&
984 "Managed memory does not transfer memory between "
986 const char *Name
= "polly_copyFromHostToDevice";
987 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
988 Function
*F
= M
->getFunction(Name
);
990 // If F is not available, declare it.
992 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
993 std::vector
<Type
*> Args
;
994 Args
.push_back(Builder
.getInt8PtrTy());
995 Args
.push_back(Builder
.getInt8PtrTy());
996 Args
.push_back(Builder
.getInt64Ty());
997 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
998 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1001 Builder
.CreateCall(F
, {HostData
, DeviceData
, Size
});
1004 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value
*DeviceData
,
1007 assert(!PollyManagedMemory
&&
1008 "Managed memory does not transfer memory between "
1010 const char *Name
= "polly_copyFromDeviceToHost";
1011 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1012 Function
*F
= M
->getFunction(Name
);
1014 // If F is not available, declare it.
1016 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1017 std::vector
<Type
*> Args
;
1018 Args
.push_back(Builder
.getInt8PtrTy());
1019 Args
.push_back(Builder
.getInt8PtrTy());
1020 Args
.push_back(Builder
.getInt64Ty());
1021 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1022 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1025 Builder
.CreateCall(F
, {DeviceData
, HostData
, Size
});
1028 void GPUNodeBuilder::createCallSynchronizeDevice() {
1029 assert(PollyManagedMemory
&& "explicit synchronization is only necessary for "
1031 const char *Name
= "polly_synchronizeDevice";
1032 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1033 Function
*F
= M
->getFunction(Name
);
1035 // If F is not available, declare it.
1037 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1038 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), false);
1039 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1042 Builder
.CreateCall(F
);
1045 Value
*GPUNodeBuilder::createCallInitContext() {
1049 case GPURuntime::CUDA
:
1050 Name
= "polly_initContextCUDA";
1052 case GPURuntime::OpenCL
:
1053 Name
= "polly_initContextCL";
1057 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1058 Function
*F
= M
->getFunction(Name
);
1060 // If F is not available, declare it.
1062 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1063 std::vector
<Type
*> Args
;
1064 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
1065 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1068 return Builder
.CreateCall(F
, {});
1071 void GPUNodeBuilder::createCallFreeContext(Value
*Context
) {
1072 const char *Name
= "polly_freeContext";
1073 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1074 Function
*F
= M
->getFunction(Name
);
1076 // If F is not available, declare it.
1078 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1079 std::vector
<Type
*> Args
;
1080 Args
.push_back(Builder
.getInt8PtrTy());
1081 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1082 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1085 Builder
.CreateCall(F
, {Context
});
1088 /// Check if one string is a prefix of another.
1090 /// @param String The string in which to look for the prefix.
1091 /// @param Prefix The prefix to look for.
1092 static bool isPrefix(std::string String
, std::string Prefix
) {
1093 return String
.find(Prefix
) == 0;
1096 Value
*GPUNodeBuilder::getArraySize(gpu_array_info
*Array
) {
1097 isl::ast_build Build
= isl::ast_build::from_context(S
.getContext());
1098 Value
*ArraySize
= ConstantInt::get(Builder
.getInt64Ty(), Array
->size
);
1100 if (!gpu_array_is_scalar(Array
)) {
1101 isl::multi_pw_aff ArrayBound
=
1102 isl::manage(isl_multi_pw_aff_copy(Array
->bound
));
1104 isl::pw_aff OffsetDimZero
= ArrayBound
.get_pw_aff(0);
1105 isl::ast_expr Res
= Build
.expr_from(OffsetDimZero
);
1107 for (unsigned int i
= 1; i
< Array
->n_index
; i
++) {
1108 isl::pw_aff Bound_I
= ArrayBound
.get_pw_aff(i
);
1109 isl::ast_expr Expr
= Build
.expr_from(Bound_I
);
1110 Res
= Res
.mul(Expr
);
1113 Value
*NumElements
= ExprBuilder
.create(Res
.release());
1114 if (NumElements
->getType() != ArraySize
->getType())
1115 NumElements
= Builder
.CreateSExt(NumElements
, ArraySize
->getType());
1116 ArraySize
= Builder
.CreateMul(ArraySize
, NumElements
);
1121 Value
*GPUNodeBuilder::getArrayOffset(gpu_array_info
*Array
) {
1122 if (gpu_array_is_scalar(Array
))
1125 isl::ast_build Build
= isl::ast_build::from_context(S
.getContext());
1127 isl::set Min
= isl::manage(isl_set_copy(Array
->extent
)).lexmin();
1129 isl::set ZeroSet
= isl::set::universe(Min
.get_space());
1131 for (long i
= 0; i
< Min
.dim(isl::dim::set
); i
++)
1132 ZeroSet
= ZeroSet
.fix_si(isl::dim::set
, i
, 0);
1134 if (Min
.is_subset(ZeroSet
)) {
1138 isl::ast_expr Result
= isl::ast_expr::from_val(isl::val(Min
.get_ctx(), 0));
1140 for (long i
= 0; i
< Min
.dim(isl::dim::set
); i
++) {
1142 isl::pw_aff Bound_I
=
1143 isl::manage(isl_multi_pw_aff_get_pw_aff(Array
->bound
, i
- 1));
1144 isl::ast_expr BExpr
= Build
.expr_from(Bound_I
);
1145 Result
= Result
.mul(BExpr
);
1147 isl::pw_aff DimMin
= Min
.dim_min(i
);
1148 isl::ast_expr MExpr
= Build
.expr_from(DimMin
);
1149 Result
= Result
.add(MExpr
);
1152 return ExprBuilder
.create(Result
.release());
1155 Value
*GPUNodeBuilder::getManagedDeviceArray(gpu_array_info
*Array
,
1156 ScopArrayInfo
*ArrayInfo
) {
1157 assert(PollyManagedMemory
&& "Only used when you wish to get a host "
1158 "pointer for sending data to the kernel, "
1159 "with managed memory");
1160 std::map
<ScopArrayInfo
*, Value
*>::iterator it
;
1161 it
= DeviceAllocations
.find(ArrayInfo
);
1162 assert(it
!= DeviceAllocations
.end() &&
1163 "Device array expected to be available");
1167 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
1168 enum DataDirection Direction
) {
1169 assert(!PollyManagedMemory
&& "Managed memory needs no data transfers");
1170 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(TransferStmt
);
1171 isl_ast_expr
*Arg
= isl_ast_expr_get_op_arg(Expr
, 0);
1172 isl_id
*Id
= isl_ast_expr_get_id(Arg
);
1173 auto Array
= (gpu_array_info
*)isl_id_get_user(Id
);
1174 auto ScopArray
= (ScopArrayInfo
*)(Array
->user
);
1176 Value
*Size
= getArraySize(Array
);
1177 Value
*Offset
= getArrayOffset(Array
);
1178 Value
*DevPtr
= DeviceAllocations
[ScopArray
];
1182 if (gpu_array_is_scalar(Array
))
1183 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
1185 HostPtr
= ScopArray
->getBasePtr();
1186 HostPtr
= getLatestValue(HostPtr
);
1189 HostPtr
= Builder
.CreatePointerCast(
1190 HostPtr
, ScopArray
->getElementType()->getPointerTo());
1191 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
1194 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
1197 Size
= Builder
.CreateSub(
1198 Size
, Builder
.CreateMul(
1199 Offset
, Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
1202 if (Direction
== HOST_TO_DEVICE
)
1203 createCallCopyFromHostToDevice(HostPtr
, DevPtr
, Size
);
1205 createCallCopyFromDeviceToHost(DevPtr
, HostPtr
, Size
);
1208 isl_ast_expr_free(Arg
);
1209 isl_ast_expr_free(Expr
);
1210 isl_ast_node_free(TransferStmt
);
1213 void GPUNodeBuilder::createUser(__isl_take isl_ast_node
*UserStmt
) {
1214 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(UserStmt
);
1215 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1216 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1218 isl_ast_expr_free(StmtExpr
);
1220 const char *Str
= isl_id_get_name(Id
);
1221 if (!strcmp(Str
, "kernel")) {
1222 createKernel(UserStmt
);
1223 if (PollyManagedMemory
)
1224 createCallSynchronizeDevice();
1225 isl_ast_expr_free(Expr
);
1228 if (!strcmp(Str
, "init_device")) {
1229 initializeAfterRTH();
1230 isl_ast_node_free(UserStmt
);
1231 isl_ast_expr_free(Expr
);
1234 if (!strcmp(Str
, "clear_device")) {
1236 isl_ast_node_free(UserStmt
);
1237 isl_ast_expr_free(Expr
);
1240 if (isPrefix(Str
, "to_device")) {
1241 if (!PollyManagedMemory
)
1242 createDataTransfer(UserStmt
, HOST_TO_DEVICE
);
1244 isl_ast_node_free(UserStmt
);
1246 isl_ast_expr_free(Expr
);
1250 if (isPrefix(Str
, "from_device")) {
1251 if (!PollyManagedMemory
) {
1252 createDataTransfer(UserStmt
, DEVICE_TO_HOST
);
1254 isl_ast_node_free(UserStmt
);
1256 isl_ast_expr_free(Expr
);
1260 isl_id
*Anno
= isl_ast_node_get_annotation(UserStmt
);
1261 struct ppcg_kernel_stmt
*KernelStmt
=
1262 (struct ppcg_kernel_stmt
*)isl_id_get_user(Anno
);
1265 switch (KernelStmt
->type
) {
1266 case ppcg_kernel_domain
:
1267 createScopStmt(Expr
, KernelStmt
);
1268 isl_ast_node_free(UserStmt
);
1270 case ppcg_kernel_copy
:
1271 createKernelCopy(KernelStmt
);
1272 isl_ast_expr_free(Expr
);
1273 isl_ast_node_free(UserStmt
);
1275 case ppcg_kernel_sync
:
1277 isl_ast_expr_free(Expr
);
1278 isl_ast_node_free(UserStmt
);
1282 isl_ast_expr_free(Expr
);
1283 isl_ast_node_free(UserStmt
);
1286 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt
*KernelStmt
) {
1287 isl_ast_expr
*LocalIndex
= isl_ast_expr_copy(KernelStmt
->u
.c
.local_index
);
1288 LocalIndex
= isl_ast_expr_address_of(LocalIndex
);
1289 Value
*LocalAddr
= ExprBuilder
.create(LocalIndex
);
1290 isl_ast_expr
*Index
= isl_ast_expr_copy(KernelStmt
->u
.c
.index
);
1291 Index
= isl_ast_expr_address_of(Index
);
1292 Value
*GlobalAddr
= ExprBuilder
.create(Index
);
1294 if (KernelStmt
->u
.c
.read
) {
1295 LoadInst
*Load
= Builder
.CreateLoad(GlobalAddr
, "shared.read");
1296 Builder
.CreateStore(Load
, LocalAddr
);
1298 LoadInst
*Load
= Builder
.CreateLoad(LocalAddr
, "shared.write");
1299 Builder
.CreateStore(Load
, GlobalAddr
);
1303 void GPUNodeBuilder::createScopStmt(isl_ast_expr
*Expr
,
1304 ppcg_kernel_stmt
*KernelStmt
) {
1305 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1306 isl_id_to_ast_expr
*Indexes
= KernelStmt
->u
.d
.ref2expr
;
1309 LTS
.insert(OutsideLoopIterations
.begin(), OutsideLoopIterations
.end());
1311 createSubstitutions(Expr
, Stmt
, LTS
);
1313 if (Stmt
->isBlockStmt())
1314 BlockGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1316 RegionGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1319 void GPUNodeBuilder::createKernelSync() {
1320 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1321 const char *SpirName
= "__gen_ocl_barrier_global";
1326 case GPUArch::SPIR64
:
1327 case GPUArch::SPIR32
:
1328 Sync
= M
->getFunction(SpirName
);
1330 // If Sync is not available, declare it.
1332 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1333 std::vector
<Type
*> Args
;
1334 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1335 Sync
= Function::Create(Ty
, Linkage
, SpirName
, M
);
1336 Sync
->setCallingConv(CallingConv::SPIR_FUNC
);
1339 case GPUArch::NVPTX64
:
1340 Sync
= Intrinsic::getDeclaration(M
, Intrinsic::nvvm_barrier0
);
1344 Builder
.CreateCall(Sync
, {});
1347 /// Collect llvm::Values referenced from @p Node
1349 /// This function only applies to isl_ast_nodes that are user_nodes referring
1350 /// to a ScopStmt. All other node types are ignore.
1352 /// @param Node The node to collect references for.
1353 /// @param User A user pointer used as storage for the data that is collected.
1355 /// @returns isl_bool_true if data could be collected successfully.
1356 isl_bool
collectReferencesInGPUStmt(__isl_keep isl_ast_node
*Node
, void *User
) {
1357 if (isl_ast_node_get_type(Node
) != isl_ast_node_user
)
1358 return isl_bool_true
;
1360 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(Node
);
1361 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1362 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1363 const char *Str
= isl_id_get_name(Id
);
1365 isl_ast_expr_free(StmtExpr
);
1366 isl_ast_expr_free(Expr
);
1368 if (!isPrefix(Str
, "Stmt"))
1369 return isl_bool_true
;
1371 Id
= isl_ast_node_get_annotation(Node
);
1372 auto *KernelStmt
= (ppcg_kernel_stmt
*)isl_id_get_user(Id
);
1373 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1376 addReferencesFromStmt(Stmt
, User
, false /* CreateScalarRefs */);
1378 return isl_bool_true
;
1381 /// A list of functions that are available in NVIDIA's libdevice.
1382 const std::set
<std::string
> CUDALibDeviceFunctions
= {
1383 "exp", "expf", "expl", "cos", "cosf", "sqrt",
1384 "sqrtf", "copysign", "copysignf", "copysignl", "log", "logf"};
1386 /// Return the corresponding CUDA libdevice function name for @p F.
1388 /// Return "" if we are not compiling for CUDA.
1389 std::string
getCUDALibDeviceFuntion(Function
*F
) {
1390 if (CUDALibDeviceFunctions
.count(F
->getName()))
1391 return std::string("__nv_") + std::string(F
->getName());
1396 /// Check if F is a function that we can code-generate in a GPU kernel.
1397 static bool isValidFunctionInKernel(llvm::Function
*F
, bool AllowLibDevice
) {
1398 assert(F
&& "F is an invalid pointer");
1399 // We string compare against the name of the function to allow
1400 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1402 const StringRef Name
= F
->getName();
1404 if (AllowLibDevice
&& getCUDALibDeviceFuntion(F
).length() > 0)
1407 return F
->isIntrinsic() &&
1408 (Name
.startswith("llvm.sqrt") || Name
.startswith("llvm.fabs") ||
1409 Name
.startswith("llvm.copysign"));
1412 /// Do not take `Function` as a subtree value.
1414 /// We try to take the reference of all subtree values and pass them along
1415 /// to the kernel from the host. Taking an address of any function and
1416 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1418 static bool isValidSubtreeValue(llvm::Value
*V
) { return !isa
<Function
>(V
); }
1420 /// Return `Function`s from `RawSubtreeValues`.
1421 static SetVector
<Function
*>
1422 getFunctionsFromRawSubtreeValues(SetVector
<Value
*> RawSubtreeValues
,
1423 bool AllowCUDALibDevice
) {
1424 SetVector
<Function
*> SubtreeFunctions
;
1425 for (Value
*It
: RawSubtreeValues
) {
1426 Function
*F
= dyn_cast
<Function
>(It
);
1428 assert(isValidFunctionInKernel(F
, AllowCUDALibDevice
) &&
1429 "Code should have bailed out by "
1430 "this point if an invalid function "
1431 "were present in a kernel.");
1432 SubtreeFunctions
.insert(F
);
1435 return SubtreeFunctions
;
1438 std::tuple
<SetVector
<Value
*>, SetVector
<Function
*>, SetVector
<const Loop
*>,
1440 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel
*Kernel
) {
1441 SetVector
<Value
*> SubtreeValues
;
1442 SetVector
<const SCEV
*> SCEVs
;
1443 SetVector
<const Loop
*> Loops
;
1444 isl::space ParamSpace
= isl::space(S
.getIslCtx(), 0, 0).params();
1445 SubtreeReferences References
= {
1446 LI
, SE
, S
, ValueMap
, SubtreeValues
, SCEVs
, getBlockGenerator(),
1449 for (const auto &I
: IDToValue
)
1450 SubtreeValues
.insert(I
.second
);
1452 // NOTE: this is populated in IslNodeBuilder::addParameters
1453 // See [Code generation of induction variables of loops outside Scops].
1454 for (const auto &I
: OutsideLoopIterations
)
1455 SubtreeValues
.insert(cast
<SCEVUnknown
>(I
.second
)->getValue());
1457 isl_ast_node_foreach_descendant_top_down(
1458 Kernel
->tree
, collectReferencesInGPUStmt
, &References
);
1460 for (const SCEV
*Expr
: SCEVs
) {
1461 findValues(Expr
, SE
, SubtreeValues
);
1462 findLoops(Expr
, Loops
);
1465 Loops
.remove_if([this](const Loop
*L
) {
1466 return S
.contains(L
) || L
->contains(S
.getEntry());
1469 for (auto &SAI
: S
.arrays())
1470 SubtreeValues
.remove(SAI
->getBasePtr());
1472 isl_space
*Space
= S
.getParamSpace().release();
1473 for (long i
= 0; i
< isl_space_dim(Space
, isl_dim_param
); i
++) {
1474 isl_id
*Id
= isl_space_get_dim_id(Space
, isl_dim_param
, i
);
1475 assert(IDToValue
.count(Id
));
1476 Value
*Val
= IDToValue
[Id
];
1477 SubtreeValues
.remove(Val
);
1480 isl_space_free(Space
);
1482 for (long i
= 0; i
< isl_space_dim(Kernel
->space
, isl_dim_set
); i
++) {
1483 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1484 assert(IDToValue
.count(Id
));
1485 Value
*Val
= IDToValue
[Id
];
1486 SubtreeValues
.remove(Val
);
1490 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1491 // SubtreeValues. This is important, because we should not lose any
1492 // SubtreeValues in the process of constructing the
1493 // "ValidSubtree{Values, Functions} sets. Nor should the set
1494 // ValidSubtree{Values, Functions} have any common element.
1495 auto ValidSubtreeValuesIt
=
1496 make_filter_range(SubtreeValues
, isValidSubtreeValue
);
1497 SetVector
<Value
*> ValidSubtreeValues(ValidSubtreeValuesIt
.begin(),
1498 ValidSubtreeValuesIt
.end());
1500 bool AllowCUDALibDevice
= Arch
== GPUArch::NVPTX64
;
1502 SetVector
<Function
*> ValidSubtreeFunctions(
1503 getFunctionsFromRawSubtreeValues(SubtreeValues
, AllowCUDALibDevice
));
1505 // @see IslNodeBuilder::getReferencesInSubtree
1506 SetVector
<Value
*> ReplacedValues
;
1507 for (Value
*V
: ValidSubtreeValues
) {
1508 auto It
= ValueMap
.find(V
);
1509 if (It
== ValueMap
.end())
1510 ReplacedValues
.insert(V
);
1512 ReplacedValues
.insert(It
->second
);
1514 return std::make_tuple(ReplacedValues
, ValidSubtreeFunctions
, Loops
,
1518 void GPUNodeBuilder::clearDominators(Function
*F
) {
1519 DomTreeNode
*N
= DT
.getNode(&F
->getEntryBlock());
1520 std::vector
<BasicBlock
*> Nodes
;
1521 for (po_iterator
<DomTreeNode
*> I
= po_begin(N
), E
= po_end(N
); I
!= E
; ++I
)
1522 Nodes
.push_back(I
->getBlock());
1524 for (BasicBlock
*BB
: Nodes
)
1528 void GPUNodeBuilder::clearScalarEvolution(Function
*F
) {
1529 for (BasicBlock
&BB
: *F
) {
1530 Loop
*L
= LI
.getLoopFor(&BB
);
1536 void GPUNodeBuilder::clearLoops(Function
*F
) {
1537 for (BasicBlock
&BB
: *F
) {
1538 Loop
*L
= LI
.getLoopFor(&BB
);
1541 LI
.removeBlock(&BB
);
1545 std::tuple
<Value
*, Value
*> GPUNodeBuilder::getGridSizes(ppcg_kernel
*Kernel
) {
1546 std::vector
<Value
*> Sizes
;
1547 isl::ast_build Context
= isl::ast_build::from_context(S
.getContext());
1549 isl::multi_pw_aff GridSizePwAffs
=
1550 isl::manage(isl_multi_pw_aff_copy(Kernel
->grid_size
));
1551 for (long i
= 0; i
< Kernel
->n_grid
; i
++) {
1552 isl::pw_aff Size
= GridSizePwAffs
.get_pw_aff(i
);
1553 isl::ast_expr GridSize
= Context
.expr_from(Size
);
1554 Value
*Res
= ExprBuilder
.create(GridSize
.release());
1555 Res
= Builder
.CreateTrunc(Res
, Builder
.getInt32Ty());
1556 Sizes
.push_back(Res
);
1559 for (long i
= Kernel
->n_grid
; i
< 3; i
++)
1560 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1562 return std::make_tuple(Sizes
[0], Sizes
[1]);
1565 std::tuple
<Value
*, Value
*, Value
*>
1566 GPUNodeBuilder::getBlockSizes(ppcg_kernel
*Kernel
) {
1567 std::vector
<Value
*> Sizes
;
1569 for (long i
= 0; i
< Kernel
->n_block
; i
++) {
1570 Value
*Res
= ConstantInt::get(Builder
.getInt32Ty(), Kernel
->block_dim
[i
]);
1571 Sizes
.push_back(Res
);
1574 for (long i
= Kernel
->n_block
; i
< 3; i
++)
1575 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1577 return std::make_tuple(Sizes
[0], Sizes
[1], Sizes
[2]);
1580 void GPUNodeBuilder::insertStoreParameter(Instruction
*Parameters
,
1581 Instruction
*Param
, int Index
) {
1582 Value
*Slot
= Builder
.CreateGEP(
1583 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1584 Value
*ParamTyped
= Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1585 Builder
.CreateStore(ParamTyped
, Slot
);
1589 GPUNodeBuilder::createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
1590 SetVector
<Value
*> SubtreeValues
) {
1591 const int NumArgs
= F
->arg_size();
1592 std::vector
<int> ArgSizes(NumArgs
);
1594 // If we are using the OpenCL Runtime, we need to add the kernel argument
1595 // sizes to the end of the launch-parameter list, so OpenCL can determine
1596 // how big the respective kernel arguments are.
1597 // Here we need to reserve adequate space for that.
1599 if (Runtime
== GPURuntime::OpenCL
)
1600 ArrayTy
= ArrayType::get(Builder
.getInt8PtrTy(), 2 * NumArgs
);
1602 ArrayTy
= ArrayType::get(Builder
.getInt8PtrTy(), NumArgs
);
1604 BasicBlock
*EntryBlock
=
1605 &Builder
.GetInsertBlock()->getParent()->getEntryBlock();
1606 auto AddressSpace
= F
->getParent()->getDataLayout().getAllocaAddrSpace();
1607 std::string Launch
= "polly_launch_" + std::to_string(Kernel
->id
);
1608 Instruction
*Parameters
= new AllocaInst(
1609 ArrayTy
, AddressSpace
, Launch
+ "_params", EntryBlock
->getTerminator());
1612 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1613 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1616 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1617 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1619 if (Runtime
== GPURuntime::OpenCL
)
1620 ArgSizes
[Index
] = SAI
->getElemSizeInBytes();
1622 Value
*DevArray
= nullptr;
1623 if (PollyManagedMemory
) {
1624 DevArray
= getManagedDeviceArray(&Prog
->array
[i
],
1625 const_cast<ScopArrayInfo
*>(SAI
));
1627 DevArray
= DeviceAllocations
[const_cast<ScopArrayInfo
*>(SAI
)];
1628 DevArray
= createCallGetDevicePtr(DevArray
);
1630 assert(DevArray
!= nullptr && "Array to be offloaded to device not "
1632 Value
*Offset
= getArrayOffset(&Prog
->array
[i
]);
1635 DevArray
= Builder
.CreatePointerCast(
1636 DevArray
, SAI
->getElementType()->getPointerTo());
1637 DevArray
= Builder
.CreateGEP(DevArray
, Builder
.CreateNeg(Offset
));
1638 DevArray
= Builder
.CreatePointerCast(DevArray
, Builder
.getInt8PtrTy());
1640 Value
*Slot
= Builder
.CreateGEP(
1641 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1643 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1644 Value
*ValPtr
= nullptr;
1645 if (PollyManagedMemory
)
1648 ValPtr
= BlockGen
.getOrCreateAlloca(SAI
);
1650 assert(ValPtr
!= nullptr && "ValPtr that should point to a valid object"
1651 " to be stored into Parameters");
1653 Builder
.CreatePointerCast(ValPtr
, Builder
.getInt8PtrTy());
1654 Builder
.CreateStore(ValPtrCast
, Slot
);
1656 Instruction
*Param
=
1657 new AllocaInst(Builder
.getInt8PtrTy(), AddressSpace
,
1658 Launch
+ "_param_" + std::to_string(Index
),
1659 EntryBlock
->getTerminator());
1660 Builder
.CreateStore(DevArray
, Param
);
1662 Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1663 Builder
.CreateStore(ParamTyped
, Slot
);
1668 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1670 for (long i
= 0; i
< NumHostIters
; i
++) {
1671 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1672 Value
*Val
= IDToValue
[Id
];
1675 if (Runtime
== GPURuntime::OpenCL
)
1676 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1678 Instruction
*Param
=
1679 new AllocaInst(Val
->getType(), AddressSpace
,
1680 Launch
+ "_param_" + std::to_string(Index
),
1681 EntryBlock
->getTerminator());
1682 Builder
.CreateStore(Val
, Param
);
1683 insertStoreParameter(Parameters
, Param
, Index
);
1687 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1689 for (long i
= 0; i
< NumVars
; i
++) {
1690 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1691 Value
*Val
= IDToValue
[Id
];
1692 if (ValueMap
.count(Val
))
1693 Val
= ValueMap
[Val
];
1696 if (Runtime
== GPURuntime::OpenCL
)
1697 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1699 Instruction
*Param
=
1700 new AllocaInst(Val
->getType(), AddressSpace
,
1701 Launch
+ "_param_" + std::to_string(Index
),
1702 EntryBlock
->getTerminator());
1703 Builder
.CreateStore(Val
, Param
);
1704 insertStoreParameter(Parameters
, Param
, Index
);
1708 for (auto Val
: SubtreeValues
) {
1709 if (Runtime
== GPURuntime::OpenCL
)
1710 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1712 Instruction
*Param
=
1713 new AllocaInst(Val
->getType(), AddressSpace
,
1714 Launch
+ "_param_" + std::to_string(Index
),
1715 EntryBlock
->getTerminator());
1716 Builder
.CreateStore(Val
, Param
);
1717 insertStoreParameter(Parameters
, Param
, Index
);
1721 if (Runtime
== GPURuntime::OpenCL
) {
1722 for (int i
= 0; i
< NumArgs
; i
++) {
1723 Value
*Val
= ConstantInt::get(Builder
.getInt32Ty(), ArgSizes
[i
]);
1724 Instruction
*Param
=
1725 new AllocaInst(Builder
.getInt32Ty(), AddressSpace
,
1726 Launch
+ "_param_size_" + std::to_string(i
),
1727 EntryBlock
->getTerminator());
1728 Builder
.CreateStore(Val
, Param
);
1729 insertStoreParameter(Parameters
, Param
, Index
);
1734 auto Location
= EntryBlock
->getTerminator();
1735 return new BitCastInst(Parameters
, Builder
.getInt8PtrTy(),
1736 Launch
+ "_params_i8ptr", Location
);
1739 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1740 SetVector
<Function
*> SubtreeFunctions
) {
1741 for (auto Fn
: SubtreeFunctions
) {
1742 const std::string ClonedFnName
= Fn
->getName();
1743 Function
*Clone
= GPUModule
->getFunction(ClonedFnName
);
1746 Function::Create(Fn
->getFunctionType(), GlobalValue::ExternalLinkage
,
1747 ClonedFnName
, GPUModule
.get());
1748 assert(Clone
&& "Expected cloned function to be initialized.");
1749 assert(ValueMap
.find(Fn
) == ValueMap
.end() &&
1750 "Fn already present in ValueMap");
1751 ValueMap
[Fn
] = Clone
;
1754 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node
*KernelStmt
) {
1755 isl_id
*Id
= isl_ast_node_get_annotation(KernelStmt
);
1756 ppcg_kernel
*Kernel
= (ppcg_kernel
*)isl_id_get_user(Id
);
1758 isl_ast_node_free(KernelStmt
);
1760 if (Kernel
->n_grid
> 1)
1762 std::max(DeepestParallel
, isl_space_dim(Kernel
->space
, isl_dim_set
));
1765 std::max(DeepestSequential
, isl_space_dim(Kernel
->space
, isl_dim_set
));
1767 Value
*BlockDimX
, *BlockDimY
, *BlockDimZ
;
1768 std::tie(BlockDimX
, BlockDimY
, BlockDimZ
) = getBlockSizes(Kernel
);
1770 SetVector
<Value
*> SubtreeValues
;
1771 SetVector
<Function
*> SubtreeFunctions
;
1772 SetVector
<const Loop
*> Loops
;
1773 isl::space ParamSpace
;
1774 std::tie(SubtreeValues
, SubtreeFunctions
, Loops
, ParamSpace
) =
1775 getReferencesInKernel(Kernel
);
1777 // Add parameters that appear only in the access function to the kernel
1778 // space. This is important to make sure that all isl_ids are passed as
1779 // parameters to the kernel, even though we may not have all parameters
1780 // in the context to improve compile time.
1781 Kernel
->space
= isl_space_align_params(Kernel
->space
, ParamSpace
.release());
1783 assert(Kernel
->tree
&& "Device AST of kernel node is empty");
1785 Instruction
&HostInsertPoint
= *Builder
.GetInsertPoint();
1786 IslExprBuilder::IDToValueTy HostIDs
= IDToValue
;
1787 ValueMapT HostValueMap
= ValueMap
;
1788 BlockGenerator::AllocaMapTy HostScalarMap
= ScalarMap
;
1791 // Create for all loops we depend on values that contain the current loop
1792 // iteration. These values are necessary to generate code for SCEVs that
1793 // depend on such loops. As a result we need to pass them to the subfunction.
1794 for (const Loop
*L
: Loops
) {
1795 const SCEV
*OuterLIV
= SE
.getAddRecExpr(SE
.getUnknown(Builder
.getInt64(0)),
1796 SE
.getUnknown(Builder
.getInt64(1)),
1797 L
, SCEV::FlagAnyWrap
);
1798 Value
*V
= generateSCEV(OuterLIV
);
1799 OutsideLoopIterations
[L
] = SE
.getUnknown(V
);
1800 SubtreeValues
.insert(V
);
1803 createKernelFunction(Kernel
, SubtreeValues
, SubtreeFunctions
);
1804 setupKernelSubtreeFunctions(SubtreeFunctions
);
1806 create(isl_ast_node_copy(Kernel
->tree
));
1808 finalizeKernelArguments(Kernel
);
1809 Function
*F
= Builder
.GetInsertBlock()->getParent();
1810 if (Arch
== GPUArch::NVPTX64
)
1811 addCUDAAnnotations(F
->getParent(), BlockDimX
, BlockDimY
, BlockDimZ
);
1813 clearScalarEvolution(F
);
1816 IDToValue
= HostIDs
;
1818 ValueMap
= std::move(HostValueMap
);
1819 ScalarMap
= std::move(HostScalarMap
);
1822 Annotator
.resetAlternativeAliasBases();
1823 for (auto &BasePtr
: LocalArrays
)
1824 S
.invalidateScopArrayInfo(BasePtr
, MemoryKind::Array
);
1825 LocalArrays
.clear();
1827 std::string ASMString
= finalizeKernelFunction();
1828 Builder
.SetInsertPoint(&HostInsertPoint
);
1829 Value
*Parameters
= createLaunchParameters(Kernel
, F
, SubtreeValues
);
1831 std::string Name
= getKernelFuncName(Kernel
->id
);
1832 Value
*KernelString
= Builder
.CreateGlobalStringPtr(ASMString
, Name
);
1833 Value
*NameString
= Builder
.CreateGlobalStringPtr(Name
, Name
+ "_name");
1834 Value
*GPUKernel
= createCallGetKernel(KernelString
, NameString
);
1836 Value
*GridDimX
, *GridDimY
;
1837 std::tie(GridDimX
, GridDimY
) = getGridSizes(Kernel
);
1839 createCallLaunchKernel(GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
1840 BlockDimZ
, Parameters
);
1841 createCallFreeKernel(GPUKernel
);
1843 for (auto Id
: KernelIds
)
1849 /// Compute the DataLayout string for the NVPTX backend.
1851 /// @param is64Bit Are we looking for a 64 bit architecture?
1852 static std::string
computeNVPTXDataLayout(bool is64Bit
) {
1853 std::string Ret
= "";
1856 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1857 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1858 "64-v128:128:128-n16:32:64";
1860 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1861 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1862 "64-v128:128:128-n16:32:64";
1868 /// Compute the DataLayout string for a SPIR kernel.
1870 /// @param is64Bit Are we looking for a 64 bit architecture?
1871 static std::string
computeSPIRDataLayout(bool is64Bit
) {
1872 std::string Ret
= "";
1875 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1876 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1877 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1878 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1880 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1881 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1882 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1883 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1890 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel
*Kernel
,
1891 SetVector
<Value
*> &SubtreeValues
) {
1892 std::vector
<Type
*> Args
;
1893 std::string Identifier
= getKernelFuncName(Kernel
->id
);
1895 std::vector
<Metadata
*> MemoryType
;
1897 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1898 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1901 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1902 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1903 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1904 Args
.push_back(SAI
->getElementType());
1905 MemoryType
.push_back(
1906 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1908 static const int UseGlobalMemory
= 1;
1909 Args
.push_back(Builder
.getInt8PtrTy(UseGlobalMemory
));
1910 MemoryType
.push_back(
1911 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 1)));
1915 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1917 for (long i
= 0; i
< NumHostIters
; i
++) {
1918 Args
.push_back(Builder
.getInt64Ty());
1919 MemoryType
.push_back(
1920 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1923 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1925 for (long i
= 0; i
< NumVars
; i
++) {
1926 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1927 Value
*Val
= IDToValue
[Id
];
1929 Args
.push_back(Val
->getType());
1930 MemoryType
.push_back(
1931 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1934 for (auto *V
: SubtreeValues
) {
1935 Args
.push_back(V
->getType());
1936 MemoryType
.push_back(
1937 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1940 auto *FT
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1941 auto *FN
= Function::Create(FT
, Function::ExternalLinkage
, Identifier
,
1944 std::vector
<Metadata
*> EmptyStrings
;
1946 for (unsigned int i
= 0; i
< MemoryType
.size(); i
++) {
1947 EmptyStrings
.push_back(MDString::get(FN
->getContext(), ""));
1950 if (Arch
== GPUArch::SPIR32
|| Arch
== GPUArch::SPIR64
) {
1951 FN
->setMetadata("kernel_arg_addr_space",
1952 MDNode::get(FN
->getContext(), MemoryType
));
1953 FN
->setMetadata("kernel_arg_name",
1954 MDNode::get(FN
->getContext(), EmptyStrings
));
1955 FN
->setMetadata("kernel_arg_access_qual",
1956 MDNode::get(FN
->getContext(), EmptyStrings
));
1957 FN
->setMetadata("kernel_arg_type",
1958 MDNode::get(FN
->getContext(), EmptyStrings
));
1959 FN
->setMetadata("kernel_arg_type_qual",
1960 MDNode::get(FN
->getContext(), EmptyStrings
));
1961 FN
->setMetadata("kernel_arg_base_type",
1962 MDNode::get(FN
->getContext(), EmptyStrings
));
1966 case GPUArch::NVPTX64
:
1967 FN
->setCallingConv(CallingConv::PTX_Kernel
);
1969 case GPUArch::SPIR32
:
1970 case GPUArch::SPIR64
:
1971 FN
->setCallingConv(CallingConv::SPIR_KERNEL
);
1975 auto Arg
= FN
->arg_begin();
1976 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
1977 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1980 Arg
->setName(Kernel
->array
[i
].array
->name
);
1982 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1983 const ScopArrayInfo
*SAI
=
1984 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
1985 Type
*EleTy
= SAI
->getElementType();
1987 SmallVector
<const SCEV
*, 4> Sizes
;
1988 isl_ast_build
*Build
=
1989 isl_ast_build_from_context(isl_set_copy(Prog
->context
));
1990 Sizes
.push_back(nullptr);
1991 for (long j
= 1; j
< Kernel
->array
[i
].array
->n_index
; j
++) {
1992 isl_ast_expr
*DimSize
= isl_ast_build_expr_from_pw_aff(
1993 Build
, isl_multi_pw_aff_get_pw_aff(Kernel
->array
[i
].array
->bound
, j
));
1994 auto V
= ExprBuilder
.create(DimSize
);
1995 Sizes
.push_back(SE
.getSCEV(V
));
1997 const ScopArrayInfo
*SAIRep
=
1998 S
.getOrCreateScopArrayInfo(Val
, EleTy
, Sizes
, MemoryKind::Array
);
1999 LocalArrays
.push_back(Val
);
2001 isl_ast_build_free(Build
);
2002 KernelIds
.push_back(Id
);
2003 IDToSAI
[Id
] = SAIRep
;
2007 for (long i
= 0; i
< NumHostIters
; i
++) {
2008 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
2009 Arg
->setName(isl_id_get_name(Id
));
2010 IDToValue
[Id
] = &*Arg
;
2011 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2015 for (long i
= 0; i
< NumVars
; i
++) {
2016 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
2017 Arg
->setName(isl_id_get_name(Id
));
2018 Value
*Val
= IDToValue
[Id
];
2019 ValueMap
[Val
] = &*Arg
;
2020 IDToValue
[Id
] = &*Arg
;
2021 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2025 for (auto *V
: SubtreeValues
) {
2026 Arg
->setName(V
->getName());
2027 ValueMap
[V
] = &*Arg
;
2034 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel
*Kernel
) {
2035 Intrinsic::ID IntrinsicsBID
[2];
2036 Intrinsic::ID IntrinsicsTID
[3];
2039 case GPUArch::SPIR64
:
2040 case GPUArch::SPIR32
:
2041 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2042 case GPUArch::NVPTX64
:
2043 IntrinsicsBID
[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x
;
2044 IntrinsicsBID
[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y
;
2046 IntrinsicsTID
[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x
;
2047 IntrinsicsTID
[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y
;
2048 IntrinsicsTID
[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z
;
2052 auto addId
= [this](__isl_take isl_id
*Id
, Intrinsic::ID Intr
) mutable {
2053 std::string Name
= isl_id_get_name(Id
);
2054 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2055 Function
*IntrinsicFn
= Intrinsic::getDeclaration(M
, Intr
);
2056 Value
*Val
= Builder
.CreateCall(IntrinsicFn
, {});
2057 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2058 IDToValue
[Id
] = Val
;
2059 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2062 for (int i
= 0; i
< Kernel
->n_grid
; ++i
) {
2063 isl_id
*Id
= isl_id_list_get_id(Kernel
->block_ids
, i
);
2064 addId(Id
, IntrinsicsBID
[i
]);
2067 for (int i
= 0; i
< Kernel
->n_block
; ++i
) {
2068 isl_id
*Id
= isl_id_list_get_id(Kernel
->thread_ids
, i
);
2069 addId(Id
, IntrinsicsTID
[i
]);
2073 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel
*Kernel
) {
2074 const char *GroupName
[3] = {"__gen_ocl_get_group_id0",
2075 "__gen_ocl_get_group_id1",
2076 "__gen_ocl_get_group_id2"};
2078 const char *LocalName
[3] = {"__gen_ocl_get_local_id0",
2079 "__gen_ocl_get_local_id1",
2080 "__gen_ocl_get_local_id2"};
2082 auto createFunc
= [this](const char *Name
, __isl_take isl_id
*Id
) mutable {
2083 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2084 Function
*FN
= M
->getFunction(Name
);
2086 // If FN is not available, declare it.
2088 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
2089 std::vector
<Type
*> Args
;
2090 FunctionType
*Ty
= FunctionType::get(Builder
.getInt32Ty(), Args
, false);
2091 FN
= Function::Create(Ty
, Linkage
, Name
, M
);
2092 FN
->setCallingConv(CallingConv::SPIR_FUNC
);
2095 Value
*Val
= Builder
.CreateCall(FN
, {});
2096 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2097 IDToValue
[Id
] = Val
;
2098 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2101 for (int i
= 0; i
< Kernel
->n_grid
; ++i
)
2102 createFunc(GroupName
[i
], isl_id_list_get_id(Kernel
->block_ids
, i
));
2104 for (int i
= 0; i
< Kernel
->n_block
; ++i
)
2105 createFunc(LocalName
[i
], isl_id_list_get_id(Kernel
->thread_ids
, i
));
2108 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
) {
2109 auto Arg
= FN
->arg_begin();
2110 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2111 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2114 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2115 const ScopArrayInfo
*SAI
=
2116 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
2119 if (SAI
->getNumberOfDimensions() > 0) {
2126 if (!gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2127 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2128 Value
*TypedArgPtr
= Builder
.CreatePointerCast(Val
, TypePtr
);
2129 Val
= Builder
.CreateLoad(TypedArgPtr
);
2132 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2133 Builder
.CreateStore(Val
, Alloca
);
2139 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel
*Kernel
) {
2140 auto *FN
= Builder
.GetInsertBlock()->getParent();
2141 auto Arg
= FN
->arg_begin();
2143 bool StoredScalar
= false;
2144 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2145 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2148 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2149 const ScopArrayInfo
*SAI
=
2150 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
2153 if (SAI
->getNumberOfDimensions() > 0) {
2158 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2163 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2164 Value
*ArgPtr
= &*Arg
;
2165 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2166 Value
*TypedArgPtr
= Builder
.CreatePointerCast(ArgPtr
, TypePtr
);
2167 Value
*Val
= Builder
.CreateLoad(Alloca
);
2168 Builder
.CreateStore(Val
, TypedArgPtr
);
2169 StoredScalar
= true;
2175 /// In case more than one thread contains scalar stores, the generated
2176 /// code might be incorrect, if we only store at the end of the kernel.
2177 /// To support this case we need to store these scalars back at each
2178 /// memory store or at least before each kernel barrier.
2179 if (Kernel
->n_block
!= 0 || Kernel
->n_grid
!= 0) {
2180 BuildSuccessful
= 0;
2182 dbgs() << getUniqueScopName(&S
)
2183 << " has a store to a scalar value that"
2184 " would be undefined to run in parallel. Bailing out.\n";);
2189 void GPUNodeBuilder::createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
) {
2190 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2192 for (int i
= 0; i
< Kernel
->n_var
; ++i
) {
2193 struct ppcg_kernel_var
&Var
= Kernel
->var
[i
];
2194 isl_id
*Id
= isl_space_get_tuple_id(Var
.array
->space
, isl_dim_set
);
2195 Type
*EleTy
= ScopArrayInfo::getFromId(isl::manage(Id
))->getElementType();
2197 Type
*ArrayTy
= EleTy
;
2198 SmallVector
<const SCEV
*, 4> Sizes
;
2200 Sizes
.push_back(nullptr);
2201 for (unsigned int j
= 1; j
< Var
.array
->n_index
; ++j
) {
2202 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2203 long Bound
= isl_val_get_num_si(Val
);
2205 Sizes
.push_back(S
.getSE()->getConstant(Builder
.getInt64Ty(), Bound
));
2208 for (int j
= Var
.array
->n_index
- 1; j
>= 0; --j
) {
2209 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2210 long Bound
= isl_val_get_num_si(Val
);
2212 ArrayTy
= ArrayType::get(ArrayTy
, Bound
);
2215 const ScopArrayInfo
*SAI
;
2217 if (Var
.type
== ppcg_access_shared
) {
2218 auto GlobalVar
= new GlobalVariable(
2219 *M
, ArrayTy
, false, GlobalValue::InternalLinkage
, 0, Var
.name
,
2220 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal
, 3);
2221 GlobalVar
->setAlignment(EleTy
->getPrimitiveSizeInBits() / 8);
2222 GlobalVar
->setInitializer(Constant::getNullValue(ArrayTy
));
2224 Allocation
= GlobalVar
;
2225 } else if (Var
.type
== ppcg_access_private
) {
2226 Allocation
= Builder
.CreateAlloca(ArrayTy
, 0, "private_array");
2228 llvm_unreachable("unknown variable type");
2231 S
.getOrCreateScopArrayInfo(Allocation
, EleTy
, Sizes
, MemoryKind::Array
);
2232 Id
= isl_id_alloc(S
.getIslCtx(), Var
.name
, nullptr);
2233 IDToValue
[Id
] = Allocation
;
2234 LocalArrays
.push_back(Allocation
);
2235 KernelIds
.push_back(Id
);
2240 void GPUNodeBuilder::createKernelFunction(
2241 ppcg_kernel
*Kernel
, SetVector
<Value
*> &SubtreeValues
,
2242 SetVector
<Function
*> &SubtreeFunctions
) {
2243 std::string Identifier
= getKernelFuncName(Kernel
->id
);
2244 GPUModule
.reset(new Module(Identifier
, Builder
.getContext()));
2247 case GPUArch::NVPTX64
:
2248 if (Runtime
== GPURuntime::CUDA
)
2249 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2250 else if (Runtime
== GPURuntime::OpenCL
)
2251 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2252 GPUModule
->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2254 case GPUArch::SPIR32
:
2255 GPUModule
->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2256 GPUModule
->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2258 case GPUArch::SPIR64
:
2259 GPUModule
->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2260 GPUModule
->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2264 Function
*FN
= createKernelFunctionDecl(Kernel
, SubtreeValues
);
2266 BasicBlock
*PrevBlock
= Builder
.GetInsertBlock();
2267 auto EntryBlock
= BasicBlock::Create(Builder
.getContext(), "entry", FN
);
2269 DT
.addNewBlock(EntryBlock
, PrevBlock
);
2271 Builder
.SetInsertPoint(EntryBlock
);
2272 Builder
.CreateRetVoid();
2273 Builder
.SetInsertPoint(EntryBlock
, EntryBlock
->begin());
2275 ScopDetection::markFunctionAsInvalid(FN
);
2277 prepareKernelArguments(Kernel
, FN
);
2278 createKernelVariables(Kernel
, FN
);
2281 case GPUArch::NVPTX64
:
2282 insertKernelIntrinsics(Kernel
);
2284 case GPUArch::SPIR32
:
2285 case GPUArch::SPIR64
:
2286 insertKernelCallsSPIR(Kernel
);
2291 std::string
GPUNodeBuilder::createKernelASM() {
2292 llvm::Triple GPUTriple
;
2295 case GPUArch::NVPTX64
:
2297 case GPURuntime::CUDA
:
2298 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2300 case GPURuntime::OpenCL
:
2301 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2305 case GPUArch::SPIR64
:
2306 case GPUArch::SPIR32
:
2307 std::string SPIRAssembly
;
2308 raw_string_ostream
IROstream(SPIRAssembly
);
2309 IROstream
<< *GPUModule
;
2311 return SPIRAssembly
;
2315 auto GPUTarget
= TargetRegistry::lookupTarget(GPUTriple
.getTriple(), ErrMsg
);
2318 errs() << ErrMsg
<< "\n";
2322 TargetOptions Options
;
2323 Options
.UnsafeFPMath
= FastMath
;
2325 std::string subtarget
;
2328 case GPUArch::NVPTX64
:
2329 subtarget
= CudaVersion
;
2331 case GPUArch::SPIR32
:
2332 case GPUArch::SPIR64
:
2333 llvm_unreachable("No subtarget for SPIR architecture");
2336 std::unique_ptr
<TargetMachine
> TargetM(GPUTarget
->createTargetMachine(
2337 GPUTriple
.getTriple(), subtarget
, "", Options
, Optional
<Reloc::Model
>()));
2339 SmallString
<0> ASMString
;
2340 raw_svector_ostream
ASMStream(ASMString
);
2341 llvm::legacy::PassManager PM
;
2343 PM
.add(createTargetTransformInfoWrapperPass(TargetM
->getTargetIRAnalysis()));
2345 if (TargetM
->addPassesToEmitFile(
2346 PM
, ASMStream
, TargetMachine::CGFT_AssemblyFile
, true /* verify */)) {
2347 errs() << "The target does not support generation of this file type!\n";
2353 return ASMStream
.str();
2356 bool GPUNodeBuilder::requiresCUDALibDevice() {
2357 bool RequiresLibDevice
= false;
2358 for (Function
&F
: GPUModule
->functions()) {
2359 if (!F
.isDeclaration())
2362 std::string CUDALibDeviceFunc
= getCUDALibDeviceFuntion(&F
);
2363 if (CUDALibDeviceFunc
.length() != 0) {
2364 F
.setName(CUDALibDeviceFunc
);
2365 RequiresLibDevice
= true;
2369 return RequiresLibDevice
;
2372 void GPUNodeBuilder::addCUDALibDevice() {
2373 if (Arch
!= GPUArch::NVPTX64
)
2376 if (requiresCUDALibDevice()) {
2379 errs() << CUDALibDevice
<< "\n";
2380 auto LibDeviceModule
=
2381 parseIRFile(CUDALibDevice
, Error
, GPUModule
->getContext());
2383 if (!LibDeviceModule
) {
2384 BuildSuccessful
= false;
2385 report_fatal_error("Could not find or load libdevice. Skipping GPU "
2386 "kernel generation. Please set -polly-acc-libdevice "
2391 Linker
L(*GPUModule
);
2393 // Set an nvptx64 target triple to avoid linker warnings. The original
2394 // triple of the libdevice files are nvptx-unknown-unknown.
2395 LibDeviceModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2396 L
.linkInModule(std::move(LibDeviceModule
), Linker::LinkOnlyNeeded
);
2400 std::string
GPUNodeBuilder::finalizeKernelFunction() {
2402 if (verifyModule(*GPUModule
)) {
2403 DEBUG(dbgs() << "verifyModule failed on module:\n";
2404 GPUModule
->print(dbgs(), nullptr); dbgs() << "\n";);
2405 DEBUG(dbgs() << "verifyModule Error:\n";
2406 verifyModule(*GPUModule
, &dbgs()););
2408 if (FailOnVerifyModuleFailure
)
2409 llvm_unreachable("VerifyModule failed.");
2411 BuildSuccessful
= false;
2418 outs() << *GPUModule
<< "\n";
2420 if (Arch
!= GPUArch::SPIR32
&& Arch
!= GPUArch::SPIR64
) {
2422 llvm::legacy::PassManager OptPasses
;
2423 PassManagerBuilder PassBuilder
;
2424 PassBuilder
.OptLevel
= 3;
2425 PassBuilder
.SizeLevel
= 0;
2426 PassBuilder
.populateModulePassManager(OptPasses
);
2427 OptPasses
.run(*GPUModule
);
2430 std::string Assembly
= createKernelASM();
2433 outs() << Assembly
<< "\n";
2435 GPUModule
.release();
2440 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2441 /// @param PwAffs The list of piecewise affine functions to create an
2442 /// `isl_pw_aff_list` from. We expect an rvalue ref because
2443 /// all the isl_pw_aff are used up by this function.
2445 /// @returns The `isl_pw_aff_list`.
2446 __isl_give isl_pw_aff_list
*
2447 createPwAffList(isl_ctx
*Context
,
2448 const std::vector
<__isl_take isl_pw_aff
*> &&PwAffs
) {
2449 isl_pw_aff_list
*List
= isl_pw_aff_list_alloc(Context
, PwAffs
.size());
2451 for (unsigned i
= 0; i
< PwAffs
.size(); i
++) {
2452 List
= isl_pw_aff_list_insert(List
, i
, PwAffs
[i
]);
2457 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2459 /// We loop over all `pw_aff` and align all of their spaces together to
2460 /// create a common space for all the `pw_aff`. This common space is the
2461 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2462 /// with the given `SeedSpace`.
2463 /// @param PwAffs The list of piecewise affine functions we want to align.
2464 /// This is an rvalue reference because the entire vector is
2465 /// used up by the end of the operation.
2466 /// @param SeedSpace The space to start the alignment process with.
2467 /// @returns A std::pair, whose first element is the aligned space,
2468 /// whose second element is the vector of aligned piecewise
2470 static std::pair
<__isl_give isl_space
*, std::vector
<__isl_give isl_pw_aff
*>>
2471 alignPwAffs(const std::vector
<__isl_take isl_pw_aff
*> &&PwAffs
,
2472 __isl_take isl_space
*SeedSpace
) {
2473 assert(SeedSpace
&& "Invalid seed space given.");
2475 isl_space
*AlignSpace
= SeedSpace
;
2476 for (isl_pw_aff
*PwAff
: PwAffs
) {
2477 isl_space
*PwAffSpace
= isl_pw_aff_get_domain_space(PwAff
);
2478 AlignSpace
= isl_space_align_params(AlignSpace
, PwAffSpace
);
2480 std::vector
<isl_pw_aff
*> AdjustedPwAffs
;
2482 for (unsigned i
= 0; i
< PwAffs
.size(); i
++) {
2483 isl_pw_aff
*Adjusted
= PwAffs
[i
];
2484 assert(Adjusted
&& "Invalid pw_aff given.");
2485 Adjusted
= isl_pw_aff_align_params(Adjusted
, isl_space_copy(AlignSpace
));
2486 AdjustedPwAffs
.push_back(Adjusted
);
2488 return std::make_pair(AlignSpace
, AdjustedPwAffs
);
2492 class PPCGCodeGeneration
: public ScopPass
{
2496 GPURuntime Runtime
= GPURuntime::CUDA
;
2498 GPUArch Architecture
= GPUArch::NVPTX64
;
2500 /// The scop that is currently processed.
2505 ScalarEvolution
*SE
;
2506 const DataLayout
*DL
;
2509 PPCGCodeGeneration() : ScopPass(ID
) {}
2511 /// Construct compilation options for PPCG.
2513 /// @returns The compilation options.
2514 ppcg_options
*createPPCGOptions() {
2516 (ppcg_debug_options
*)malloc(sizeof(ppcg_debug_options
));
2517 auto Options
= (ppcg_options
*)malloc(sizeof(ppcg_options
));
2519 DebugOptions
->dump_schedule_constraints
= false;
2520 DebugOptions
->dump_schedule
= false;
2521 DebugOptions
->dump_final_schedule
= false;
2522 DebugOptions
->dump_sizes
= false;
2523 DebugOptions
->verbose
= false;
2525 Options
->debug
= DebugOptions
;
2527 Options
->group_chains
= false;
2528 Options
->reschedule
= true;
2529 Options
->scale_tile_loops
= false;
2530 Options
->wrap
= false;
2532 Options
->non_negative_parameters
= false;
2533 Options
->ctx
= nullptr;
2534 Options
->sizes
= nullptr;
2536 Options
->tile
= true;
2537 Options
->tile_size
= 32;
2539 Options
->isolate_full_tiles
= false;
2541 Options
->use_private_memory
= PrivateMemory
;
2542 Options
->use_shared_memory
= SharedMemory
;
2543 Options
->max_shared_memory
= 48 * 1024;
2545 Options
->target
= PPCG_TARGET_CUDA
;
2546 Options
->openmp
= false;
2547 Options
->linearize_device_arrays
= true;
2548 Options
->allow_gnu_extensions
= false;
2550 Options
->unroll_copy_shared
= false;
2551 Options
->unroll_gpu_tile
= false;
2552 Options
->live_range_reordering
= true;
2554 Options
->live_range_reordering
= true;
2555 Options
->hybrid
= false;
2556 Options
->opencl_compiler_options
= nullptr;
2557 Options
->opencl_use_gpu
= false;
2558 Options
->opencl_n_include_file
= 0;
2559 Options
->opencl_include_files
= nullptr;
2560 Options
->opencl_print_kernel_types
= false;
2561 Options
->opencl_embed_kernel_code
= false;
2563 Options
->save_schedule_file
= nullptr;
2564 Options
->load_schedule_file
= nullptr;
2569 /// Get a tagged access relation containing all accesses of type @p AccessTy.
2571 /// Instead of a normal access of the form:
2573 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2575 /// a tagged access has the form
2577 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2579 /// where 'id' is an additional space that references the memory access that
2580 /// triggered the access.
2582 /// @param AccessTy The type of the memory accesses to collect.
2584 /// @return The relation describing all tagged memory accesses.
2585 isl_union_map
*getTaggedAccesses(enum MemoryAccess::AccessType AccessTy
) {
2586 isl_union_map
*Accesses
= isl_union_map_empty(S
->getParamSpace().release());
2588 for (auto &Stmt
: *S
)
2589 for (auto &Acc
: Stmt
)
2590 if (Acc
->getType() == AccessTy
) {
2591 isl_map
*Relation
= Acc
->getAccessRelation().release();
2593 isl_map_intersect_domain(Relation
, Stmt
.getDomain().release());
2595 isl_space
*Space
= isl_map_get_space(Relation
);
2596 Space
= isl_space_range(Space
);
2597 Space
= isl_space_from_range(Space
);
2599 isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2600 isl_map
*Universe
= isl_map_universe(Space
);
2601 Relation
= isl_map_domain_product(Relation
, Universe
);
2602 Accesses
= isl_union_map_add_map(Accesses
, Relation
);
2608 /// Get the set of all read accesses, tagged with the access id.
2610 /// @see getTaggedAccesses
2611 isl_union_map
*getTaggedReads() {
2612 return getTaggedAccesses(MemoryAccess::READ
);
2615 /// Get the set of all may (and must) accesses, tagged with the access id.
2617 /// @see getTaggedAccesses
2618 isl_union_map
*getTaggedMayWrites() {
2619 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE
),
2620 getTaggedAccesses(MemoryAccess::MUST_WRITE
));
2623 /// Get the set of all must accesses, tagged with the access id.
2625 /// @see getTaggedAccesses
2626 isl_union_map
*getTaggedMustWrites() {
2627 return getTaggedAccesses(MemoryAccess::MUST_WRITE
);
2630 /// Collect parameter and array names as isl_ids.
2632 /// To reason about the different parameters and arrays used, ppcg requires
2633 /// a list of all isl_ids in use. As PPCG traditionally performs
2634 /// source-to-source compilation each of these isl_ids is mapped to the
2635 /// expression that represents it. As we do not have a corresponding
2636 /// expression in Polly, we just map each id to a 'zero' expression to match
2637 /// the data format that ppcg expects.
2639 /// @returns Retun a map from collected ids to 'zero' ast expressions.
2640 __isl_give isl_id_to_ast_expr
*getNames() {
2641 auto *Names
= isl_id_to_ast_expr_alloc(
2643 S
->getNumParams() + std::distance(S
->array_begin(), S
->array_end()));
2644 auto *Zero
= isl_ast_expr_from_val(isl_val_zero(S
->getIslCtx()));
2646 for (const SCEV
*P
: S
->parameters()) {
2647 isl_id
*Id
= S
->getIdForParam(P
).release();
2648 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2651 for (auto &Array
: S
->arrays()) {
2652 auto Id
= Array
->getBasePtrId().release();
2653 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2656 isl_ast_expr_free(Zero
);
2661 /// Create a new PPCG scop from the current scop.
2663 /// The PPCG scop is initialized with data from the current polly::Scop. From
2664 /// this initial data, the data-dependences in the PPCG scop are initialized.
2665 /// We do not use Polly's dependence analysis for now, to ensure we match
2666 /// the PPCG default behaviour more closely.
2668 /// @returns A new ppcg scop.
2669 ppcg_scop
*createPPCGScop() {
2670 MustKillsInfo KillsInfo
= computeMustKillsInfo(*S
);
2672 auto PPCGScop
= (ppcg_scop
*)malloc(sizeof(ppcg_scop
));
2674 PPCGScop
->options
= createPPCGOptions();
2675 // enable live range reordering
2676 PPCGScop
->options
->live_range_reordering
= 1;
2678 PPCGScop
->start
= 0;
2681 PPCGScop
->context
= S
->getContext().release();
2682 PPCGScop
->domain
= S
->getDomains().release();
2683 // TODO: investigate this further. PPCG calls collect_call_domains.
2684 PPCGScop
->call
= isl_union_set_from_set(S
->getContext().release());
2685 PPCGScop
->tagged_reads
= getTaggedReads();
2686 PPCGScop
->reads
= S
->getReads().release();
2687 PPCGScop
->live_in
= nullptr;
2688 PPCGScop
->tagged_may_writes
= getTaggedMayWrites();
2689 PPCGScop
->may_writes
= S
->getWrites().release();
2690 PPCGScop
->tagged_must_writes
= getTaggedMustWrites();
2691 PPCGScop
->must_writes
= S
->getMustWrites().release();
2692 PPCGScop
->live_out
= nullptr;
2693 PPCGScop
->tagged_must_kills
= KillsInfo
.TaggedMustKills
.take();
2694 PPCGScop
->must_kills
= KillsInfo
.MustKills
.take();
2696 PPCGScop
->tagger
= nullptr;
2697 PPCGScop
->independence
=
2698 isl_union_map_empty(isl_set_get_space(PPCGScop
->context
));
2699 PPCGScop
->dep_flow
= nullptr;
2700 PPCGScop
->tagged_dep_flow
= nullptr;
2701 PPCGScop
->dep_false
= nullptr;
2702 PPCGScop
->dep_forced
= nullptr;
2703 PPCGScop
->dep_order
= nullptr;
2704 PPCGScop
->tagged_dep_order
= nullptr;
2706 PPCGScop
->schedule
= S
->getScheduleTree().release();
2707 // If we have something non-trivial to kill, add it to the schedule
2708 if (KillsInfo
.KillsSchedule
.get())
2709 PPCGScop
->schedule
= isl_schedule_sequence(
2710 PPCGScop
->schedule
, KillsInfo
.KillsSchedule
.take());
2712 PPCGScop
->names
= getNames();
2713 PPCGScop
->pet
= nullptr;
2715 compute_tagger(PPCGScop
);
2716 compute_dependences(PPCGScop
);
2717 eliminate_dead_code(PPCGScop
);
2722 /// Collect the array accesses in a statement.
2724 /// @param Stmt The statement for which to collect the accesses.
2726 /// @returns A list of array accesses.
2727 gpu_stmt_access
*getStmtAccesses(ScopStmt
&Stmt
) {
2728 gpu_stmt_access
*Accesses
= nullptr;
2730 for (MemoryAccess
*Acc
: Stmt
) {
2731 auto Access
= isl_alloc_type(S
->getIslCtx(), struct gpu_stmt_access
);
2732 Access
->read
= Acc
->isRead();
2733 Access
->write
= Acc
->isWrite();
2734 Access
->access
= Acc
->getAccessRelation().release();
2735 isl_space
*Space
= isl_map_get_space(Access
->access
);
2736 Space
= isl_space_range(Space
);
2737 Space
= isl_space_from_range(Space
);
2738 Space
= isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2739 isl_map
*Universe
= isl_map_universe(Space
);
2740 Access
->tagged_access
=
2741 isl_map_domain_product(Acc
->getAccessRelation().release(), Universe
);
2742 Access
->exact_write
= !Acc
->isMayWrite();
2743 Access
->ref_id
= Acc
->getId().release();
2744 Access
->next
= Accesses
;
2745 Access
->n_index
= Acc
->getScopArrayInfo()->getNumberOfDimensions();
2746 // TODO: Also mark one-element accesses to arrays as fixed-element.
2747 Access
->fixed_element
=
2748 Acc
->isLatestScalarKind() ? isl_bool_true
: isl_bool_false
;
2755 /// Collect the list of GPU statements.
2757 /// Each statement has an id, a pointer to the underlying data structure,
2758 /// as well as a list with all memory accesses.
2760 /// TODO: Initialize the list of memory accesses.
2762 /// @returns A linked-list of statements.
2763 gpu_stmt
*getStatements() {
2764 gpu_stmt
*Stmts
= isl_calloc_array(S
->getIslCtx(), struct gpu_stmt
,
2765 std::distance(S
->begin(), S
->end()));
2768 for (auto &Stmt
: *S
) {
2769 gpu_stmt
*GPUStmt
= &Stmts
[i
];
2771 GPUStmt
->id
= Stmt
.getDomainId().release();
2773 // We use the pet stmt pointer to keep track of the Polly statements.
2774 GPUStmt
->stmt
= (pet_stmt
*)&Stmt
;
2775 GPUStmt
->accesses
= getStmtAccesses(Stmt
);
2782 /// Derive the extent of an array.
2784 /// The extent of an array is the set of elements that are within the
2785 /// accessed array. For the inner dimensions, the extent constraints are
2786 /// 0 and the size of the corresponding array dimension. For the first
2787 /// (outermost) dimension, the extent constraints are the minimal and maximal
2788 /// subscript value for the first dimension.
2790 /// @param Array The array to derive the extent for.
2792 /// @returns An isl_set describing the extent of the array.
2793 isl::set
getExtent(ScopArrayInfo
*Array
) {
2794 unsigned NumDims
= Array
->getNumberOfDimensions();
2796 if (Array
->getNumberOfDimensions() == 0)
2797 return isl::set::universe(Array
->getSpace());
2799 isl::union_map Accesses
= S
->getAccesses(Array
);
2800 isl::union_set AccessUSet
= Accesses
.range();
2801 AccessUSet
= AccessUSet
.coalesce();
2802 AccessUSet
= AccessUSet
.detect_equalities();
2803 AccessUSet
= AccessUSet
.coalesce();
2805 if (AccessUSet
.is_empty())
2806 return isl::set::empty(Array
->getSpace());
2808 isl::set AccessSet
= AccessUSet
.extract_set(Array
->getSpace());
2810 isl::local_space LS
= isl::local_space(Array
->getSpace());
2812 isl::pw_aff Val
= isl::aff::var_on_domain(LS
, isl::dim::set
, 0);
2813 isl::pw_aff OuterMin
= AccessSet
.dim_min(0);
2814 isl::pw_aff OuterMax
= AccessSet
.dim_max(0);
2815 OuterMin
= OuterMin
.add_dims(isl::dim::in
, Val
.dim(isl::dim::in
));
2816 OuterMax
= OuterMax
.add_dims(isl::dim::in
, Val
.dim(isl::dim::in
));
2817 OuterMin
= OuterMin
.set_tuple_id(isl::dim::in
, Array
->getBasePtrId());
2818 OuterMax
= OuterMax
.set_tuple_id(isl::dim::in
, Array
->getBasePtrId());
2820 isl::set Extent
= isl::set::universe(Array
->getSpace());
2822 Extent
= Extent
.intersect(OuterMin
.le_set(Val
));
2823 Extent
= Extent
.intersect(OuterMax
.ge_set(Val
));
2825 for (unsigned i
= 1; i
< NumDims
; ++i
)
2826 Extent
= Extent
.lower_bound_si(isl::dim::set
, i
, 0);
2828 for (unsigned i
= 0; i
< NumDims
; ++i
) {
2829 isl::pw_aff PwAff
= Array
->getDimensionSizePw(i
);
2831 // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2832 // Fortran array will we have a legitimate dimension.
2833 if (PwAff
.is_null()) {
2834 assert(i
== 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2838 isl::pw_aff Val
= isl::aff::var_on_domain(
2839 isl::local_space(Array
->getSpace()), isl::dim::set
, i
);
2840 PwAff
= PwAff
.add_dims(isl::dim::in
, Val
.dim(isl::dim::in
));
2841 PwAff
= PwAff
.set_tuple_id(isl::dim::in
, Val
.get_tuple_id(isl::dim::in
));
2842 isl::set Set
= PwAff
.gt_set(Val
);
2843 Extent
= Set
.intersect(Extent
);
2849 /// Derive the bounds of an array.
2851 /// For the first dimension we derive the bound of the array from the extent
2852 /// of this dimension. For inner dimensions we obtain their size directly from
2855 /// @param PPCGArray The array to compute bounds for.
2856 /// @param Array The polly array from which to take the information.
2857 void setArrayBounds(gpu_array_info
&PPCGArray
, ScopArrayInfo
*Array
) {
2858 std::vector
<isl_pw_aff
*> Bounds
;
2860 if (PPCGArray
.n_index
> 0) {
2861 if (isl_set_is_empty(PPCGArray
.extent
)) {
2862 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2863 isl_local_space
*LS
= isl_local_space_from_space(
2864 isl_space_params(isl_set_get_space(Dom
)));
2866 isl_pw_aff
*Zero
= isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS
));
2867 Bounds
.push_back(Zero
);
2869 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2870 Dom
= isl_set_project_out(Dom
, isl_dim_set
, 1, PPCGArray
.n_index
- 1);
2871 isl_pw_aff
*Bound
= isl_set_dim_max(isl_set_copy(Dom
), 0);
2873 Dom
= isl_pw_aff_domain(isl_pw_aff_copy(Bound
));
2874 isl_local_space
*LS
=
2875 isl_local_space_from_space(isl_set_get_space(Dom
));
2876 isl_aff
*One
= isl_aff_zero_on_domain(LS
);
2877 One
= isl_aff_add_constant_si(One
, 1);
2878 Bound
= isl_pw_aff_add(Bound
, isl_pw_aff_alloc(Dom
, One
));
2879 Bound
= isl_pw_aff_gist(Bound
, S
->getContext().release());
2880 Bounds
.push_back(Bound
);
2884 for (unsigned i
= 1; i
< PPCGArray
.n_index
; ++i
) {
2885 isl_pw_aff
*Bound
= Array
->getDimensionSizePw(i
).release();
2886 auto LS
= isl_pw_aff_get_domain_space(Bound
);
2887 auto Aff
= isl_multi_aff_zero(LS
);
2888 Bound
= isl_pw_aff_pullback_multi_aff(Bound
, Aff
);
2889 Bounds
.push_back(Bound
);
2892 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2893 /// to have the same parameter dimensions. So, we need to align them to an
2894 /// appropriate space.
2895 /// Scop::Context is _not_ an appropriate space, because when we have
2896 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
2897 /// contain all parameter dimensions.
2898 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
2899 isl_space
*SeedAlignSpace
= S
->getParamSpace().release();
2900 SeedAlignSpace
= isl_space_add_dims(SeedAlignSpace
, isl_dim_set
, 1);
2902 isl_space
*AlignSpace
= nullptr;
2903 std::vector
<isl_pw_aff
*> AlignedBounds
;
2904 std::tie(AlignSpace
, AlignedBounds
) =
2905 alignPwAffs(std::move(Bounds
), SeedAlignSpace
);
2907 assert(AlignSpace
&& "alignPwAffs did not initialise AlignSpace");
2909 isl_pw_aff_list
*BoundsList
=
2910 createPwAffList(S
->getIslCtx(), std::move(AlignedBounds
));
2912 isl_space
*BoundsSpace
= isl_set_get_space(PPCGArray
.extent
);
2913 BoundsSpace
= isl_space_align_params(BoundsSpace
, AlignSpace
);
2915 assert(BoundsSpace
&& "Unable to access space of array.");
2916 assert(BoundsList
&& "Unable to access list of bounds.");
2919 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace
, BoundsList
);
2920 assert(PPCGArray
.bound
&& "PPCGArray.bound was not constructed correctly.");
2923 /// Create the arrays for @p PPCGProg.
2925 /// @param PPCGProg The program to compute the arrays for.
2926 void createArrays(gpu_prog
*PPCGProg
,
2927 const SmallVector
<ScopArrayInfo
*, 4> &ValidSAIs
) {
2929 for (auto &Array
: ValidSAIs
) {
2930 std::string TypeName
;
2931 raw_string_ostream
OS(TypeName
);
2933 OS
<< *Array
->getElementType();
2934 TypeName
= OS
.str();
2936 gpu_array_info
&PPCGArray
= PPCGProg
->array
[i
];
2938 PPCGArray
.space
= Array
->getSpace().release();
2939 PPCGArray
.type
= strdup(TypeName
.c_str());
2940 PPCGArray
.size
= DL
->getTypeAllocSize(Array
->getElementType());
2941 PPCGArray
.name
= strdup(Array
->getName().c_str());
2942 PPCGArray
.extent
= nullptr;
2943 PPCGArray
.n_index
= Array
->getNumberOfDimensions();
2944 PPCGArray
.extent
= getExtent(Array
).release();
2945 PPCGArray
.n_ref
= 0;
2946 PPCGArray
.refs
= nullptr;
2947 PPCGArray
.accessed
= true;
2948 PPCGArray
.read_only_scalar
=
2949 Array
->isReadOnly() && Array
->getNumberOfDimensions() == 0;
2950 PPCGArray
.has_compound_element
= false;
2951 PPCGArray
.local
= false;
2952 PPCGArray
.declare_local
= false;
2953 PPCGArray
.global
= false;
2954 PPCGArray
.linearize
= false;
2955 PPCGArray
.dep_order
= nullptr;
2956 PPCGArray
.user
= Array
;
2958 PPCGArray
.bound
= nullptr;
2959 setArrayBounds(PPCGArray
, Array
);
2962 collect_references(PPCGProg
, &PPCGArray
);
2963 PPCGArray
.only_fixed_element
= only_fixed_element_accessed(&PPCGArray
);
2967 /// Create an identity map between the arrays in the scop.
2969 /// @returns An identity map between the arrays in the scop.
2970 isl_union_map
*getArrayIdentity() {
2971 isl_union_map
*Maps
= isl_union_map_empty(S
->getParamSpace().release());
2973 for (auto &Array
: S
->arrays()) {
2974 isl_space
*Space
= Array
->getSpace().release();
2975 Space
= isl_space_map_from_set(Space
);
2976 isl_map
*Identity
= isl_map_identity(Space
);
2977 Maps
= isl_union_map_add_map(Maps
, Identity
);
2983 /// Create a default-initialized PPCG GPU program.
2985 /// @returns A new gpu program description.
2986 gpu_prog
*createPPCGProg(ppcg_scop
*PPCGScop
) {
2991 auto PPCGProg
= isl_calloc_type(S
->getIslCtx(), struct gpu_prog
);
2993 PPCGProg
->ctx
= S
->getIslCtx();
2994 PPCGProg
->scop
= PPCGScop
;
2995 PPCGProg
->context
= isl_set_copy(PPCGScop
->context
);
2996 PPCGProg
->read
= isl_union_map_copy(PPCGScop
->reads
);
2997 PPCGProg
->may_write
= isl_union_map_copy(PPCGScop
->may_writes
);
2998 PPCGProg
->must_write
= isl_union_map_copy(PPCGScop
->must_writes
);
2999 PPCGProg
->tagged_must_kill
=
3000 isl_union_map_copy(PPCGScop
->tagged_must_kills
);
3001 PPCGProg
->to_inner
= getArrayIdentity();
3002 PPCGProg
->to_outer
= getArrayIdentity();
3003 // TODO: verify that this assignment is correct.
3004 PPCGProg
->any_to_outer
= nullptr;
3005 PPCGProg
->n_stmts
= std::distance(S
->begin(), S
->end());
3006 PPCGProg
->stmts
= getStatements();
3008 // Only consider arrays that have a non-empty extent.
3009 // Otherwise, this will cause us to consider the following kinds of
3011 // 1. Invariant loads that are represented by SAI objects.
3012 // 2. Arrays with statically known zero size.
3013 auto ValidSAIsRange
=
3014 make_filter_range(S
->arrays(), [this](ScopArrayInfo
*SAI
) -> bool {
3015 return !getExtent(SAI
).is_empty();
3017 SmallVector
<ScopArrayInfo
*, 4> ValidSAIs(ValidSAIsRange
.begin(),
3018 ValidSAIsRange
.end());
3021 ValidSAIs
.size(); // std::distance(S->array_begin(), S->array_end());
3022 PPCGProg
->array
= isl_calloc_array(S
->getIslCtx(), struct gpu_array_info
,
3025 createArrays(PPCGProg
, ValidSAIs
);
3027 PPCGProg
->array_order
= nullptr;
3028 collect_order_dependences(PPCGProg
);
3030 PPCGProg
->may_persist
= compute_may_persist(PPCGProg
);
3034 struct PrintGPUUserData
{
3035 struct cuda_info
*CudaInfo
;
3036 struct gpu_prog
*PPCGProg
;
3037 std::vector
<ppcg_kernel
*> Kernels
;
3040 /// Print a user statement node in the host code.
3042 /// We use ppcg's printing facilities to print the actual statement and
3043 /// additionally build up a list of all kernels that are encountered in the
3046 /// @param P The printer to print to
3047 /// @param Options The printing options to use
3048 /// @param Node The node to print
3049 /// @param User A user pointer to carry additional data. This pointer is
3050 /// expected to be of type PrintGPUUserData.
3052 /// @returns A printer to which the output has been printed.
3053 static __isl_give isl_printer
*
3054 printHostUser(__isl_take isl_printer
*P
,
3055 __isl_take isl_ast_print_options
*Options
,
3056 __isl_take isl_ast_node
*Node
, void *User
) {
3057 auto Data
= (struct PrintGPUUserData
*)User
;
3058 auto Id
= isl_ast_node_get_annotation(Node
);
3061 bool IsUser
= !strcmp(isl_id_get_name(Id
), "user");
3063 // If this is a user statement, format it ourselves as ppcg would
3064 // otherwise try to call pet functionality that is not available in
3067 P
= isl_printer_start_line(P
);
3068 P
= isl_printer_print_ast_node(P
, Node
);
3069 P
= isl_printer_end_line(P
);
3071 isl_ast_print_options_free(Options
);
3075 auto Kernel
= (struct ppcg_kernel
*)isl_id_get_user(Id
);
3077 Data
->Kernels
.push_back(Kernel
);
3080 return print_host_user(P
, Options
, Node
, User
);
3083 /// Print C code corresponding to the control flow in @p Kernel.
3085 /// @param Kernel The kernel to print
3086 void printKernel(ppcg_kernel
*Kernel
) {
3087 auto *P
= isl_printer_to_str(S
->getIslCtx());
3088 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
3089 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx());
3090 P
= isl_ast_node_print(Kernel
->tree
, P
, Options
);
3091 char *String
= isl_printer_get_str(P
);
3092 printf("%s\n", String
);
3094 isl_printer_free(P
);
3097 /// Print C code corresponding to the GPU code described by @p Tree.
3099 /// @param Tree An AST describing GPU code
3100 /// @param PPCGProg The PPCG program from which @Tree has been constructed.
3101 void printGPUTree(isl_ast_node
*Tree
, gpu_prog
*PPCGProg
) {
3102 auto *P
= isl_printer_to_str(S
->getIslCtx());
3103 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
3105 PrintGPUUserData Data
;
3106 Data
.PPCGProg
= PPCGProg
;
3108 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx());
3110 isl_ast_print_options_set_print_user(Options
, printHostUser
, &Data
);
3111 P
= isl_ast_node_print(Tree
, P
, Options
);
3112 char *String
= isl_printer_get_str(P
);
3114 printf("%s\n", String
);
3116 isl_printer_free(P
);
3118 for (auto Kernel
: Data
.Kernels
) {
3119 printf("# kernel%d\n", Kernel
->id
);
3120 printKernel(Kernel
);
3124 // Generate a GPU program using PPCG.
3126 // GPU mapping consists of multiple steps:
3128 // 1) Compute new schedule for the program.
3129 // 2) Map schedule to GPU (TODO)
3130 // 3) Generate code for new schedule (TODO)
3132 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3133 // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3134 // strategy directly from this pass.
3135 gpu_gen
*generateGPU(ppcg_scop
*PPCGScop
, gpu_prog
*PPCGProg
) {
3137 auto PPCGGen
= isl_calloc_type(S
->getIslCtx(), struct gpu_gen
);
3139 PPCGGen
->ctx
= S
->getIslCtx();
3140 PPCGGen
->options
= PPCGScop
->options
;
3141 PPCGGen
->print
= nullptr;
3142 PPCGGen
->print_user
= nullptr;
3143 PPCGGen
->build_ast_expr
= &pollyBuildAstExprForStmt
;
3144 PPCGGen
->prog
= PPCGProg
;
3145 PPCGGen
->tree
= nullptr;
3146 PPCGGen
->types
.n
= 0;
3147 PPCGGen
->types
.name
= nullptr;
3148 PPCGGen
->sizes
= nullptr;
3149 PPCGGen
->used_sizes
= nullptr;
3150 PPCGGen
->kernel_id
= 0;
3152 // Set scheduling strategy to same strategy PPCG is using.
3153 isl_options_set_schedule_outer_coincidence(PPCGGen
->ctx
, true);
3154 isl_options_set_schedule_maximize_band_depth(PPCGGen
->ctx
, true);
3155 isl_options_set_schedule_whole_component(PPCGGen
->ctx
, false);
3157 isl_schedule
*Schedule
= get_schedule(PPCGGen
);
3159 int has_permutable
= has_any_permutable_node(Schedule
);
3162 isl_schedule_align_params(Schedule
, S
->getFullParamSpace().release());
3164 if (!has_permutable
|| has_permutable
< 0) {
3165 Schedule
= isl_schedule_free(Schedule
);
3166 DEBUG(dbgs() << getUniqueScopName(S
)
3167 << " does not have permutable bands. Bailing out\n";);
3169 const bool CreateTransferToFromDevice
= !PollyManagedMemory
;
3170 Schedule
= map_to_device(PPCGGen
, Schedule
, CreateTransferToFromDevice
);
3171 PPCGGen
->tree
= generate_code(PPCGGen
, isl_schedule_copy(Schedule
));
3175 isl_printer
*P
= isl_printer_to_str(S
->getIslCtx());
3176 P
= isl_printer_set_yaml_style(P
, ISL_YAML_STYLE_BLOCK
);
3177 P
= isl_printer_print_str(P
, "Schedule\n");
3178 P
= isl_printer_print_str(P
, "========\n");
3180 P
= isl_printer_print_schedule(P
, Schedule
);
3182 P
= isl_printer_print_str(P
, "No schedule found\n");
3184 printf("%s\n", isl_printer_get_str(P
));
3185 isl_printer_free(P
);
3192 printGPUTree(PPCGGen
->tree
, PPCGProg
);
3194 printf("No code generated\n");
3197 isl_schedule_free(Schedule
);
3202 /// Free gpu_gen structure.
3204 /// @param PPCGGen The ppcg_gen object to free.
3205 void freePPCGGen(gpu_gen
*PPCGGen
) {
3206 isl_ast_node_free(PPCGGen
->tree
);
3207 isl_union_map_free(PPCGGen
->sizes
);
3208 isl_union_map_free(PPCGGen
->used_sizes
);
3212 /// Free the options in the ppcg scop structure.
3214 /// ppcg is not freeing these options for us. To avoid leaks we do this
3217 /// @param PPCGScop The scop referencing the options to free.
3218 void freeOptions(ppcg_scop
*PPCGScop
) {
3219 free(PPCGScop
->options
->debug
);
3220 PPCGScop
->options
->debug
= nullptr;
3221 free(PPCGScop
->options
);
3222 PPCGScop
->options
= nullptr;
3225 /// Approximate the number of points in the set.
3227 /// This function returns an ast expression that overapproximates the number
3228 /// of points in an isl set through the rectangular hull surrounding this set.
3230 /// @param Set The set to count.
3231 /// @param Build The isl ast build object to use for creating the ast
3234 /// @returns An approximation of the number of points in the set.
3235 __isl_give isl_ast_expr
*approxPointsInSet(__isl_take isl_set
*Set
,
3236 __isl_keep isl_ast_build
*Build
) {
3238 isl_val
*One
= isl_val_int_from_si(isl_set_get_ctx(Set
), 1);
3239 auto *Expr
= isl_ast_expr_from_val(isl_val_copy(One
));
3241 isl_space
*Space
= isl_set_get_space(Set
);
3242 Space
= isl_space_params(Space
);
3243 auto *Univ
= isl_set_universe(Space
);
3244 isl_pw_aff
*OneAff
= isl_pw_aff_val_on_domain(Univ
, One
);
3246 for (long i
= 0; i
< isl_set_dim(Set
, isl_dim_set
); i
++) {
3247 isl_pw_aff
*Max
= isl_set_dim_max(isl_set_copy(Set
), i
);
3248 isl_pw_aff
*Min
= isl_set_dim_min(isl_set_copy(Set
), i
);
3249 isl_pw_aff
*DimSize
= isl_pw_aff_sub(Max
, Min
);
3250 DimSize
= isl_pw_aff_add(DimSize
, isl_pw_aff_copy(OneAff
));
3251 auto DimSizeExpr
= isl_ast_build_expr_from_pw_aff(Build
, DimSize
);
3252 Expr
= isl_ast_expr_mul(Expr
, DimSizeExpr
);
3256 isl_pw_aff_free(OneAff
);
3261 /// Approximate a number of dynamic instructions executed by a given
3264 /// @param Stmt The statement for which to compute the number of dynamic
3266 /// @param Build The isl ast build object to use for creating the ast
3268 /// @returns An approximation of the number of dynamic instructions executed
3270 __isl_give isl_ast_expr
*approxDynamicInst(ScopStmt
&Stmt
,
3271 __isl_keep isl_ast_build
*Build
) {
3272 auto Iterations
= approxPointsInSet(Stmt
.getDomain().release(), Build
);
3276 if (Stmt
.isBlockStmt()) {
3277 auto *BB
= Stmt
.getBasicBlock();
3278 InstCount
= std::distance(BB
->begin(), BB
->end());
3280 auto *R
= Stmt
.getRegion();
3282 for (auto *BB
: R
->blocks()) {
3283 InstCount
+= std::distance(BB
->begin(), BB
->end());
3287 isl_val
*InstVal
= isl_val_int_from_si(S
->getIslCtx(), InstCount
);
3288 auto *InstExpr
= isl_ast_expr_from_val(InstVal
);
3289 return isl_ast_expr_mul(InstExpr
, Iterations
);
3292 /// Approximate dynamic instructions executed in scop.
3294 /// @param S The scop for which to approximate dynamic instructions.
3295 /// @param Build The isl ast build object to use for creating the ast
3297 /// @returns An approximation of the number of dynamic instructions executed
3299 __isl_give isl_ast_expr
*
3300 getNumberOfIterations(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3301 isl_ast_expr
*Instructions
;
3303 isl_val
*Zero
= isl_val_int_from_si(S
.getIslCtx(), 0);
3304 Instructions
= isl_ast_expr_from_val(Zero
);
3306 for (ScopStmt
&Stmt
: S
) {
3307 isl_ast_expr
*StmtInstructions
= approxDynamicInst(Stmt
, Build
);
3308 Instructions
= isl_ast_expr_add(Instructions
, StmtInstructions
);
3310 return Instructions
;
3313 /// Create a check that ensures sufficient compute in scop.
3315 /// @param S The scop for which to ensure sufficient compute.
3316 /// @param Build The isl ast build object to use for creating the ast
3318 /// @returns An expression that evaluates to TRUE in case of sufficient
3319 /// compute and to FALSE, otherwise.
3320 __isl_give isl_ast_expr
*
3321 createSufficientComputeCheck(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3322 auto Iterations
= getNumberOfIterations(S
, Build
);
3323 auto *MinComputeVal
= isl_val_int_from_si(S
.getIslCtx(), MinCompute
);
3324 auto *MinComputeExpr
= isl_ast_expr_from_val(MinComputeVal
);
3325 return isl_ast_expr_ge(Iterations
, MinComputeExpr
);
3328 /// Check if the basic block contains a function we cannot codegen for GPU
3331 /// If this basic block does something with a `Function` other than calling
3332 /// a function that we support in a kernel, return true.
3333 bool containsInvalidKernelFunctionInBlock(const BasicBlock
*BB
,
3334 bool AllowCUDALibDevice
) {
3335 for (const Instruction
&Inst
: *BB
) {
3336 const CallInst
*Call
= dyn_cast
<CallInst
>(&Inst
);
3337 if (Call
&& isValidFunctionInKernel(Call
->getCalledFunction(),
3338 AllowCUDALibDevice
)) {
3342 for (Value
*SrcVal
: Inst
.operands()) {
3343 PointerType
*p
= dyn_cast
<PointerType
>(SrcVal
->getType());
3346 if (isa
<FunctionType
>(p
->getElementType()))
3353 /// Return whether the Scop S uses functions in a way that we do not support.
3354 bool containsInvalidKernelFunction(const Scop
&S
, bool AllowCUDALibDevice
) {
3355 for (auto &Stmt
: S
) {
3356 if (Stmt
.isBlockStmt()) {
3357 if (containsInvalidKernelFunctionInBlock(Stmt
.getBasicBlock(),
3358 AllowCUDALibDevice
))
3361 assert(Stmt
.isRegionStmt() &&
3362 "Stmt was neither block nor region statement");
3363 for (const BasicBlock
*BB
: Stmt
.getRegion()->blocks())
3364 if (containsInvalidKernelFunctionInBlock(BB
, AllowCUDALibDevice
))
3371 /// Generate code for a given GPU AST described by @p Root.
3373 /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3374 /// @param Prog The GPU Program to generate code for.
3375 void generateCode(__isl_take isl_ast_node
*Root
, gpu_prog
*Prog
) {
3376 ScopAnnotator Annotator
;
3377 Annotator
.buildAliasScopes(*S
);
3379 Region
*R
= &S
->getRegion();
3381 simplifyRegion(R
, DT
, LI
, RI
);
3383 BasicBlock
*EnteringBB
= R
->getEnteringBlock();
3385 PollyIRBuilder Builder
= createPollyIRBuilder(EnteringBB
, Annotator
);
3387 // Only build the run-time condition and parameters _after_ having
3388 // introduced the conditional branch. This is important as the conditional
3389 // branch will guard the original scop from new induction variables that
3390 // the SCEVExpander may introduce while code generating the parameters and
3391 // which may introduce scalar dependences that prevent us from correctly
3392 // code generating this scop.
3393 BBPair StartExitBlocks
;
3394 BranchInst
*CondBr
= nullptr;
3395 std::tie(StartExitBlocks
, CondBr
) =
3396 executeScopConditionally(*S
, Builder
.getTrue(), *DT
, *RI
, *LI
);
3397 BasicBlock
*StartBlock
= std::get
<0>(StartExitBlocks
);
3399 assert(CondBr
&& "CondBr not initialized by executeScopConditionally");
3401 GPUNodeBuilder
NodeBuilder(Builder
, Annotator
, *DL
, *LI
, *SE
, *DT
, *S
,
3402 StartBlock
, Prog
, Runtime
, Architecture
);
3404 // TODO: Handle LICM
3405 auto SplitBlock
= StartBlock
->getSinglePredecessor();
3406 Builder
.SetInsertPoint(SplitBlock
->getTerminator());
3408 isl_ast_build
*Build
= isl_ast_build_alloc(S
->getIslCtx());
3409 isl_ast_expr
*Condition
= IslAst::buildRunCondition(*S
, Build
);
3410 isl_ast_expr
*SufficientCompute
= createSufficientComputeCheck(*S
, Build
);
3411 Condition
= isl_ast_expr_and(Condition
, SufficientCompute
);
3412 isl_ast_build_free(Build
);
3414 // preload invariant loads. Note: This should happen before the RTC
3415 // because the RTC may depend on values that are invariant load hoisted.
3416 if (!NodeBuilder
.preloadInvariantLoads()) {
3417 DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3418 S
->getFunction().getName() +
3419 " | Scop Region: " + S
->getNameStr());
3420 // adjust the dominator tree accordingly.
3421 auto *ExitingBlock
= StartBlock
->getUniqueSuccessor();
3422 assert(ExitingBlock
);
3423 auto *MergeBlock
= ExitingBlock
->getUniqueSuccessor();
3425 polly::markBlockUnreachable(*StartBlock
, Builder
);
3426 polly::markBlockUnreachable(*ExitingBlock
, Builder
);
3427 auto *ExitingBB
= S
->getExitingBlock();
3430 DT
->changeImmediateDominator(MergeBlock
, ExitingBB
);
3431 DT
->eraseNode(ExitingBlock
);
3432 isl_ast_expr_free(Condition
);
3433 isl_ast_node_free(Root
);
3436 NodeBuilder
.addParameters(S
->getContext().release());
3437 Value
*RTC
= NodeBuilder
.createRTC(Condition
);
3438 Builder
.GetInsertBlock()->getTerminator()->setOperand(0, RTC
);
3440 Builder
.SetInsertPoint(&*StartBlock
->begin());
3442 NodeBuilder
.create(Root
);
3445 /// In case a sequential kernel has more surrounding loops as any parallel
3446 /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3447 /// point in running it on a GPU.
3448 if (NodeBuilder
.DeepestSequential
> NodeBuilder
.DeepestParallel
)
3449 CondBr
->setOperand(0, Builder
.getFalse());
3451 if (!NodeBuilder
.BuildSuccessful
)
3452 CondBr
->setOperand(0, Builder
.getFalse());
3455 bool runOnScop(Scop
&CurrentScop
) override
{
3457 LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
3458 DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
3459 SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
3460 DL
= &S
->getRegion().getEntry()->getModule()->getDataLayout();
3461 RI
= &getAnalysis
<RegionInfoPass
>().getRegionInfo();
3463 DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S
)
3464 << " | loop depth: " << S
->getMaxLoopDepth() << "\n");
3466 // We currently do not support functions other than intrinsics inside
3467 // kernels, as code generation will need to offload function calls to the
3468 // kernel. This may lead to a kernel trying to call a function on the host.
3469 // This also allows us to prevent codegen from trying to take the
3470 // address of an intrinsic function to send to the kernel.
3471 if (containsInvalidKernelFunction(CurrentScop
,
3472 Architecture
== GPUArch::NVPTX64
)) {
3474 dbgs() << getUniqueScopName(S
)
3475 << " contains function which cannot be materialised in a GPU "
3476 "kernel. Bailing out.\n";);
3480 auto PPCGScop
= createPPCGScop();
3481 auto PPCGProg
= createPPCGProg(PPCGScop
);
3482 auto PPCGGen
= generateGPU(PPCGScop
, PPCGProg
);
3484 if (PPCGGen
->tree
) {
3485 generateCode(isl_ast_node_copy(PPCGGen
->tree
), PPCGProg
);
3486 CurrentScop
.markAsToBeSkipped();
3488 DEBUG(dbgs() << getUniqueScopName(S
)
3489 << " has empty PPCGGen->tree. Bailing out.\n");
3492 freeOptions(PPCGScop
);
3493 freePPCGGen(PPCGGen
);
3494 gpu_prog_free(PPCGProg
);
3495 ppcg_scop_free(PPCGScop
);
3500 void printScop(raw_ostream
&, Scop
&) const override
{}
3502 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
3503 AU
.addRequired
<DominatorTreeWrapperPass
>();
3504 AU
.addRequired
<RegionInfoPass
>();
3505 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
3506 AU
.addRequired
<ScopDetectionWrapperPass
>();
3507 AU
.addRequired
<ScopInfoRegionPass
>();
3508 AU
.addRequired
<LoopInfoWrapperPass
>();
3510 AU
.addPreserved
<AAResultsWrapperPass
>();
3511 AU
.addPreserved
<BasicAAWrapperPass
>();
3512 AU
.addPreserved
<LoopInfoWrapperPass
>();
3513 AU
.addPreserved
<DominatorTreeWrapperPass
>();
3514 AU
.addPreserved
<GlobalsAAWrapperPass
>();
3515 AU
.addPreserved
<ScopDetectionWrapperPass
>();
3516 AU
.addPreserved
<ScalarEvolutionWrapperPass
>();
3517 AU
.addPreserved
<SCEVAAWrapperPass
>();
3519 // FIXME: We do not yet add regions for the newly generated code to the
3521 AU
.addPreserved
<RegionInfoPass
>();
3522 AU
.addPreserved
<ScopInfoRegionPass
>();
3527 char PPCGCodeGeneration::ID
= 1;
3529 Pass
*polly::createPPCGCodeGenerationPass(GPUArch Arch
, GPURuntime Runtime
) {
3530 PPCGCodeGeneration
*generator
= new PPCGCodeGeneration();
3531 generator
->Runtime
= Runtime
;
3532 generator
->Architecture
= Arch
;
3536 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration
, "polly-codegen-ppcg",
3537 "Polly - Apply PPCG translation to SCOP", false, false)
3538 INITIALIZE_PASS_DEPENDENCY(DependenceInfo
);
3539 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
);
3540 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
);
3541 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass
);
3542 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
);
3543 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass
);
3544 INITIALIZE_PASS_END(PPCGCodeGeneration
, "polly-codegen-ppcg",
3545 "Polly - Apply PPCG translation to SCOP", false, false)