1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
11 // GPU mapping strategy.
13 //===----------------------------------------------------------------------===//
15 #include "polly/CodeGen/PPCGCodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/Utils.h"
19 #include "polly/DependenceInfo.h"
20 #include "polly/LinkAllPasses.h"
21 #include "polly/Options.h"
22 #include "polly/ScopDetection.h"
23 #include "polly/ScopInfo.h"
24 #include "polly/Support/SCEVValidator.h"
25 #include "llvm/ADT/PostOrderIterator.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/BasicAliasAnalysis.h"
28 #include "llvm/Analysis/GlobalsModRef.h"
29 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
30 #include "llvm/Analysis/TargetLibraryInfo.h"
31 #include "llvm/Analysis/TargetTransformInfo.h"
32 #include "llvm/IR/LegacyPassManager.h"
33 #include "llvm/IR/Verifier.h"
34 #include "llvm/IRReader/IRReader.h"
35 #include "llvm/Linker/Linker.h"
36 #include "llvm/Support/TargetRegistry.h"
37 #include "llvm/Support/TargetSelect.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
40 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
42 #include "isl/union_map.h"
45 #include "ppcg/cuda.h"
47 #include "ppcg/gpu_print.h"
48 #include "ppcg/ppcg.h"
49 #include "ppcg/schedule.h"
52 #include "llvm/Support/Debug.h"
54 using namespace polly
;
57 #define DEBUG_TYPE "polly-codegen-ppcg"
59 static cl::opt
<bool> DumpSchedule("polly-acc-dump-schedule",
60 cl::desc("Dump the computed GPU Schedule"),
61 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
62 cl::cat(PollyCategory
));
65 DumpCode("polly-acc-dump-code",
66 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden
,
67 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
69 static cl::opt
<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
70 cl::desc("Dump the kernel LLVM-IR"),
71 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
72 cl::cat(PollyCategory
));
74 static cl::opt
<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
75 cl::desc("Dump the kernel assembly code"),
76 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
77 cl::cat(PollyCategory
));
79 static cl::opt
<bool> FastMath("polly-acc-fastmath",
80 cl::desc("Allow unsafe math optimizations"),
81 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
82 cl::cat(PollyCategory
));
83 static cl::opt
<bool> SharedMemory("polly-acc-use-shared",
84 cl::desc("Use shared memory"), cl::Hidden
,
85 cl::init(false), cl::ZeroOrMore
,
86 cl::cat(PollyCategory
));
87 static cl::opt
<bool> PrivateMemory("polly-acc-use-private",
88 cl::desc("Use private memory"), cl::Hidden
,
89 cl::init(false), cl::ZeroOrMore
,
90 cl::cat(PollyCategory
));
92 static cl::opt
<bool> ManagedMemory("polly-acc-codegen-managed-memory",
93 cl::desc("Generate Host kernel code assuming"
94 " that all memory has been"
95 " declared as managed memory"),
96 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
97 cl::cat(PollyCategory
));
100 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
101 cl::desc("Fail and generate a backtrace if"
102 " verifyModule fails on the GPU "
104 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
105 cl::cat(PollyCategory
));
107 static cl::opt
<std::string
> CUDALibDevice(
108 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden
,
109 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
110 cl::ZeroOrMore
, cl::cat(PollyCategory
));
112 static cl::opt
<std::string
>
113 CudaVersion("polly-acc-cuda-version",
114 cl::desc("The CUDA version to compile for"), cl::Hidden
,
115 cl::init("sm_30"), cl::ZeroOrMore
, cl::cat(PollyCategory
));
118 MinCompute("polly-acc-mincompute",
119 cl::desc("Minimal number of compute statements to run on GPU."),
120 cl::Hidden
, cl::init(10 * 512 * 512));
122 /// Used to store information PPCG wants for kills. This information is
123 /// used by live range reordering.
125 /// @see computeLiveRangeReordering
126 /// @see GPUNodeBuilder::createPPCGScop
127 /// @see GPUNodeBuilder::createPPCGProg
128 struct MustKillsInfo
{
129 /// Collection of all kill statements that will be sequenced at the end of
130 /// PPCGScop->schedule.
132 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
133 /// which merges schedules in *arbitrary* order.
134 /// (we don't care about the order of the kills anyway).
135 isl::schedule KillsSchedule
;
136 /// Map from kill statement instances to scalars that need to be
139 /// We currently derive kill information for:
140 /// 1. phi nodes. PHI nodes are not alive outside the scop and can
141 /// consequently all be killed.
142 /// 2. Scalar arrays that are not used outside the Scop. This is
143 /// checked by `isScalarUsesContainedInScop`.
144 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
145 isl::union_map TaggedMustKills
;
147 /// Tagged must kills stripped of the tags.
148 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] }
149 isl::union_map MustKills
;
151 MustKillsInfo() : KillsSchedule(nullptr) {}
154 /// Check if SAI's uses are entirely contained within Scop S.
155 /// If a scalar is used only with a Scop, we are free to kill it, as no data
156 /// can flow in/out of the value any more.
157 /// @see computeMustKillsInfo
158 static bool isScalarUsesContainedInScop(const Scop
&S
,
159 const ScopArrayInfo
*SAI
) {
160 assert(SAI
->isValueKind() && "this function only deals with scalars."
161 " Dealing with arrays required alias analysis");
163 const Region
&R
= S
.getRegion();
164 for (User
*U
: SAI
->getBasePtr()->users()) {
165 Instruction
*I
= dyn_cast
<Instruction
>(U
);
166 assert(I
&& "invalid user of scop array info");
173 /// Compute must-kills needed to enable live range reordering with PPCG.
175 /// @params S The Scop to compute live range reordering information
176 /// @returns live range reordering information that can be used to setup
178 static MustKillsInfo
computeMustKillsInfo(const Scop
&S
) {
179 const isl::space
ParamSpace(isl::manage(S
.getParamSpace()));
182 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
183 // 1.1 phi nodes in scop.
184 // 1.2 scalars that are only used within the scop
185 SmallVector
<isl::id
, 4> KillMemIds
;
186 for (ScopArrayInfo
*SAI
: S
.arrays()) {
187 if (SAI
->isPHIKind() ||
188 (SAI
->isValueKind() && isScalarUsesContainedInScop(S
, SAI
)))
189 KillMemIds
.push_back(isl::manage(SAI
->getBasePtrId().release()));
192 Info
.TaggedMustKills
= isl::union_map::empty(isl::space(ParamSpace
));
193 Info
.MustKills
= isl::union_map::empty(isl::space(ParamSpace
));
195 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
197 // - filter: "[control] -> { }"
198 // So, we choose to not create this to keep the output a little nicer,
199 // at the cost of some code complexity.
200 Info
.KillsSchedule
= nullptr;
202 for (isl::id
&ToKillId
: KillMemIds
) {
203 isl::id KillStmtId
= isl::id::alloc(
205 std::string("SKill_phantom_").append(ToKillId
.get_name()), nullptr);
207 // NOTE: construction of tagged_must_kill:
208 // 2. We need to construct a map:
209 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
210 // To construct this, we use `isl_map_domain_product` on 2 maps`:
212 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
213 // 2b. PhantomRefToScalar:
214 // [param] -> { ref_phantom[] -> scalar_to_kill[] }
216 // Combining these with `isl_map_domain_product` gives us
218 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
220 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
221 isl::map StmtToScalar
= isl::map::universe(isl::space(ParamSpace
));
222 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::in
, isl::id(KillStmtId
));
223 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::out
, isl::id(ToKillId
));
225 isl::id PhantomRefId
= isl::id::alloc(
226 S
.getIslCtx(), std::string("ref_phantom") + ToKillId
.get_name(),
229 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
230 isl::map PhantomRefToScalar
= isl::map::universe(isl::space(ParamSpace
));
232 PhantomRefToScalar
.set_tuple_id(isl::dim::in
, PhantomRefId
);
234 PhantomRefToScalar
.set_tuple_id(isl::dim::out
, ToKillId
);
236 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
237 isl::map TaggedMustKill
= StmtToScalar
.domain_product(PhantomRefToScalar
);
238 Info
.TaggedMustKills
= Info
.TaggedMustKills
.unite(TaggedMustKill
);
240 // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
241 Info
.MustKills
= Info
.TaggedMustKills
.domain_factor_domain();
243 // 3. Create the kill schedule of the form:
244 // "[param] -> { Stmt_phantom[] }"
245 // Then add this to Info.KillsSchedule.
246 isl::space KillStmtSpace
= ParamSpace
;
247 KillStmtSpace
= KillStmtSpace
.set_tuple_id(isl::dim::set
, KillStmtId
);
248 isl::union_set KillStmtDomain
= isl::set::universe(KillStmtSpace
);
250 isl::schedule KillSchedule
= isl::schedule::from_domain(KillStmtDomain
);
251 if (Info
.KillsSchedule
)
252 Info
.KillsSchedule
= Info
.KillsSchedule
.set(KillSchedule
);
254 Info
.KillsSchedule
= KillSchedule
;
260 /// Create the ast expressions for a ScopStmt.
262 /// This function is a callback for to generate the ast expressions for each
263 /// of the scheduled ScopStmts.
264 static __isl_give isl_id_to_ast_expr
*pollyBuildAstExprForStmt(
265 void *StmtT
, __isl_take isl_ast_build
*Build_C
,
266 isl_multi_pw_aff
*(*FunctionIndex
)(__isl_take isl_multi_pw_aff
*MPA
,
267 isl_id
*Id
, void *User
),
269 isl_ast_expr
*(*FunctionExpr
)(isl_ast_expr
*Expr
, isl_id
*Id
, void *User
),
272 ScopStmt
*Stmt
= (ScopStmt
*)StmtT
;
274 if (!Stmt
|| !Build_C
)
277 isl::ast_build Build
= isl::manage(isl_ast_build_copy(Build_C
));
278 isl::ctx Ctx
= Build
.get_ctx();
279 isl::id_to_ast_expr RefToExpr
= isl::id_to_ast_expr::alloc(Ctx
, 0);
281 for (MemoryAccess
*Acc
: *Stmt
) {
282 isl::map AddrFunc
= Acc
->getAddressFunction();
283 AddrFunc
= AddrFunc
.intersect_domain(isl::manage(Stmt
->getDomain()));
285 isl::id RefId
= Acc
->getId();
286 isl::pw_multi_aff PMA
= isl::pw_multi_aff::from_map(AddrFunc
);
288 isl::multi_pw_aff MPA
= isl::multi_pw_aff(PMA
);
289 MPA
= MPA
.coalesce();
290 MPA
= isl::manage(FunctionIndex(MPA
.release(), RefId
.get(), UserIndex
));
292 isl::ast_expr Access
= Build
.access_from(MPA
);
293 Access
= isl::manage(FunctionExpr(Access
.release(), RefId
.get(), UserExpr
));
294 RefToExpr
= RefToExpr
.set(RefId
, Access
);
297 return RefToExpr
.release();
300 /// Given a LLVM Type, compute its size in bytes,
301 static int computeSizeInBytes(const Type
*T
) {
302 int bytes
= T
->getPrimitiveSizeInBits() / 8;
304 bytes
= T
->getScalarSizeInBits() / 8;
308 /// Generate code for a GPU specific isl AST.
310 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
311 /// generates code for general-purpose AST nodes, with special functionality
312 /// for generating GPU specific user nodes.
314 /// @see GPUNodeBuilder::createUser
315 class GPUNodeBuilder
: public IslNodeBuilder
{
317 GPUNodeBuilder(PollyIRBuilder
&Builder
, ScopAnnotator
&Annotator
,
318 const DataLayout
&DL
, LoopInfo
&LI
, ScalarEvolution
&SE
,
319 DominatorTree
&DT
, Scop
&S
, BasicBlock
*StartBlock
,
320 gpu_prog
*Prog
, GPURuntime Runtime
, GPUArch Arch
)
321 : IslNodeBuilder(Builder
, Annotator
, DL
, LI
, SE
, DT
, S
, StartBlock
),
322 Prog(Prog
), Runtime(Runtime
), Arch(Arch
) {
323 getExprBuilder().setIDToSAI(&IDToSAI
);
326 /// Create after-run-time-check initialization code.
327 void initializeAfterRTH();
329 /// Finalize the generated scop.
330 virtual void finalize();
332 /// Track if the full build process was successful.
334 /// This value is set to false, if throughout the build process an error
335 /// occurred which prevents us from generating valid GPU code.
336 bool BuildSuccessful
= true;
338 /// The maximal number of loops surrounding a sequential kernel.
339 unsigned DeepestSequential
= 0;
341 /// The maximal number of loops surrounding a parallel kernel.
342 unsigned DeepestParallel
= 0;
344 /// Return the name to set for the ptx_kernel.
345 std::string
getKernelFuncName(int Kernel_id
);
348 /// A vector of array base pointers for which a new ScopArrayInfo was created.
350 /// This vector is used to delete the ScopArrayInfo when it is not needed any
352 std::vector
<Value
*> LocalArrays
;
354 /// A map from ScopArrays to their corresponding device allocations.
355 std::map
<ScopArrayInfo
*, Value
*> DeviceAllocations
;
357 /// The current GPU context.
360 /// The set of isl_ids allocated in the kernel
361 std::vector
<isl_id
*> KernelIds
;
363 /// A module containing GPU code.
365 /// This pointer is only set in case we are currently generating GPU code.
366 std::unique_ptr
<Module
> GPUModule
;
368 /// The GPU program we generate code for.
371 /// The GPU Runtime implementation to use (OpenCL or CUDA).
374 /// The GPU Architecture to target.
377 /// Class to free isl_ids.
380 void operator()(__isl_take isl_id
*Id
) { isl_id_free(Id
); };
383 /// A set containing all isl_ids allocated in a GPU kernel.
385 /// By releasing this set all isl_ids will be freed.
386 std::set
<std::unique_ptr
<isl_id
, IslIdDeleter
>> KernelIDs
;
388 IslExprBuilder::IDToScopArrayInfoTy IDToSAI
;
390 /// Create code for user-defined AST nodes.
392 /// These AST nodes can be of type:
394 /// - ScopStmt: A computational statement (TODO)
395 /// - Kernel: A GPU kernel call (TODO)
396 /// - Data-Transfer: A GPU <-> CPU data-transfer
397 /// - In-kernel synchronization
398 /// - In-kernel memory copy statement
400 /// @param UserStmt The ast node to generate code for.
401 virtual void createUser(__isl_take isl_ast_node
*UserStmt
);
403 enum DataDirection
{ HOST_TO_DEVICE
, DEVICE_TO_HOST
};
405 /// Create code for a data transfer statement
407 /// @param TransferStmt The data transfer statement.
408 /// @param Direction The direction in which to transfer data.
409 void createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
410 enum DataDirection Direction
);
412 /// Find llvm::Values referenced in GPU kernel.
414 /// @param Kernel The kernel to scan for llvm::Values
416 /// @returns A pair, whose first element contains the set of values
417 /// referenced by the kernel, and whose second element contains the
418 /// set of functions referenced by the kernel. All functions in the
419 /// second set satisfy isValidFunctionInKernel.
420 std::pair
<SetVector
<Value
*>, SetVector
<Function
*>>
421 getReferencesInKernel(ppcg_kernel
*Kernel
);
423 /// Compute the sizes of the execution grid for a given kernel.
425 /// @param Kernel The kernel to compute grid sizes for.
427 /// @returns A tuple with grid sizes for X and Y dimension
428 std::tuple
<Value
*, Value
*> getGridSizes(ppcg_kernel
*Kernel
);
430 /// Creates a array that can be sent to the kernel on the device using a
431 /// host pointer. This is required for managed memory, when we directly send
432 /// host pointers to the device.
434 /// This is to be used only with managed memory
435 Value
*getOrCreateManagedDeviceArray(gpu_array_info
*Array
,
436 ScopArrayInfo
*ArrayInfo
);
438 /// Compute the sizes of the thread blocks for a given kernel.
440 /// @param Kernel The kernel to compute thread block sizes for.
442 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
443 std::tuple
<Value
*, Value
*, Value
*> getBlockSizes(ppcg_kernel
*Kernel
);
445 /// Store a specific kernel launch parameter in the array of kernel launch
448 /// @param Parameters The list of parameters in which to store.
449 /// @param Param The kernel launch parameter to store.
450 /// @param Index The index in the parameter list, at which to store the
452 void insertStoreParameter(Instruction
*Parameters
, Instruction
*Param
,
455 /// Create kernel launch parameters.
457 /// @param Kernel The kernel to create parameters for.
458 /// @param F The kernel function that has been created.
459 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
461 /// @returns A stack allocated array with pointers to the parameter
462 /// values that are passed to the kernel.
463 Value
*createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
464 SetVector
<Value
*> SubtreeValues
);
466 /// Create declarations for kernel variable.
468 /// This includes shared memory declarations.
470 /// @param Kernel The kernel definition to create variables for.
471 /// @param FN The function into which to generate the variables.
472 void createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
);
474 /// Add CUDA annotations to module.
476 /// Add a set of CUDA annotations that declares the maximal block dimensions
477 /// that will be used to execute the CUDA kernel. This allows the NVIDIA
478 /// PTX compiler to bound the number of allocated registers to ensure the
479 /// resulting kernel is known to run with up to as many block dimensions
480 /// as specified here.
482 /// @param M The module to add the annotations to.
483 /// @param BlockDimX The size of block dimension X.
484 /// @param BlockDimY The size of block dimension Y.
485 /// @param BlockDimZ The size of block dimension Z.
486 void addCUDAAnnotations(Module
*M
, Value
*BlockDimX
, Value
*BlockDimY
,
489 /// Create GPU kernel.
491 /// Code generate the kernel described by @p KernelStmt.
493 /// @param KernelStmt The ast node to generate kernel code for.
494 void createKernel(__isl_take isl_ast_node
*KernelStmt
);
496 /// Generate code that computes the size of an array.
498 /// @param Array The array for which to compute a size.
499 Value
*getArraySize(gpu_array_info
*Array
);
501 /// Generate code to compute the minimal offset at which an array is accessed.
503 /// The offset of an array is the minimal array location accessed in a scop.
507 /// for (long i = 0; i < 100; i++)
510 /// getArrayOffset(A) results in 42.
512 /// @param Array The array for which to compute the offset.
513 /// @returns An llvm::Value that contains the offset of the array.
514 Value
*getArrayOffset(gpu_array_info
*Array
);
516 /// Prepare the kernel arguments for kernel code generation
518 /// @param Kernel The kernel to generate code for.
519 /// @param FN The function created for the kernel.
520 void prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
);
522 /// Create kernel function.
524 /// Create a kernel function located in a newly created module that can serve
525 /// as target for device code generation. Set the Builder to point to the
526 /// start block of this newly created function.
528 /// @param Kernel The kernel to generate code for.
529 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
530 /// @param SubtreeFunctions The set of llvm::Functions referenced by this
532 void createKernelFunction(ppcg_kernel
*Kernel
,
533 SetVector
<Value
*> &SubtreeValues
,
534 SetVector
<Function
*> &SubtreeFunctions
);
536 /// Create the declaration of a kernel function.
538 /// The kernel function takes as arguments:
540 /// - One i8 pointer for each external array reference used in the kernel.
543 /// - Other LLVM Value references (TODO)
545 /// @param Kernel The kernel to generate the function declaration for.
546 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
548 /// @returns The newly declared function.
549 Function
*createKernelFunctionDecl(ppcg_kernel
*Kernel
,
550 SetVector
<Value
*> &SubtreeValues
);
552 /// Insert intrinsic functions to obtain thread and block ids.
554 /// @param The kernel to generate the intrinsic functions for.
555 void insertKernelIntrinsics(ppcg_kernel
*Kernel
);
557 /// Insert function calls to retrieve the SPIR group/local ids.
559 /// @param The kernel to generate the function calls for.
560 void insertKernelCallsSPIR(ppcg_kernel
*Kernel
);
562 /// Setup the creation of functions referenced by the GPU kernel.
564 /// 1. Create new function declarations in GPUModule which are the same as
565 /// SubtreeFunctions.
567 /// 2. Populate IslNodeBuilder::ValueMap with mappings from
568 /// old functions (that come from the original module) to new functions
569 /// (that are created within GPUModule). That way, we generate references
570 /// to the correct function (in GPUModule) in BlockGenerator.
572 /// @see IslNodeBuilder::ValueMap
573 /// @see BlockGenerator::GlobalMap
574 /// @see BlockGenerator::getNewValue
575 /// @see GPUNodeBuilder::getReferencesInKernel.
577 /// @param SubtreeFunctions The set of llvm::Functions referenced by
579 void setupKernelSubtreeFunctions(SetVector
<Function
*> SubtreeFunctions
);
581 /// Create a global-to-shared or shared-to-global copy statement.
583 /// @param CopyStmt The copy statement to generate code for
584 void createKernelCopy(ppcg_kernel_stmt
*CopyStmt
);
586 /// Create code for a ScopStmt called in @p Expr.
588 /// @param Expr The expression containing the call.
589 /// @param KernelStmt The kernel statement referenced in the call.
590 void createScopStmt(isl_ast_expr
*Expr
, ppcg_kernel_stmt
*KernelStmt
);
592 /// Create an in-kernel synchronization call.
593 void createKernelSync();
595 /// Create a PTX assembly string for the current GPU kernel.
597 /// @returns A string containing the corresponding PTX assembly code.
598 std::string
createKernelASM();
600 /// Remove references from the dominator tree to the kernel function @p F.
602 /// @param F The function to remove references to.
603 void clearDominators(Function
*F
);
605 /// Remove references from scalar evolution to the kernel function @p F.
607 /// @param F The function to remove references to.
608 void clearScalarEvolution(Function
*F
);
610 /// Remove references from loop info to the kernel function @p F.
612 /// @param F The function to remove references to.
613 void clearLoops(Function
*F
);
615 /// Check if the scop requires to be linked with CUDA's libdevice.
616 bool requiresCUDALibDevice();
618 /// Link with the NVIDIA libdevice library (if needed and available).
619 void addCUDALibDevice();
621 /// Finalize the generation of the kernel function.
623 /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
624 /// dump its IR to stderr.
626 /// @returns The Assembly string of the kernel.
627 std::string
finalizeKernelFunction();
629 /// Finalize the generation of the kernel arguments.
631 /// This function ensures that not-read-only scalars used in a kernel are
632 /// stored back to the global memory location they are backed with before
633 /// the kernel terminates.
635 /// @params Kernel The kernel to finalize kernel arguments for.
636 void finalizeKernelArguments(ppcg_kernel
*Kernel
);
638 /// Create code that allocates memory to store arrays on device.
639 void allocateDeviceArrays();
641 /// Free all allocated device arrays.
642 void freeDeviceArrays();
644 /// Create a call to initialize the GPU context.
646 /// @returns A pointer to the newly initialized context.
647 Value
*createCallInitContext();
649 /// Create a call to get the device pointer for a kernel allocation.
651 /// @param Allocation The Polly GPU allocation
653 /// @returns The device parameter corresponding to this allocation.
654 Value
*createCallGetDevicePtr(Value
*Allocation
);
656 /// Create a call to free the GPU context.
658 /// @param Context A pointer to an initialized GPU context.
659 void createCallFreeContext(Value
*Context
);
661 /// Create a call to allocate memory on the device.
663 /// @param Size The size of memory to allocate
665 /// @returns A pointer that identifies this allocation.
666 Value
*createCallAllocateMemoryForDevice(Value
*Size
);
668 /// Create a call to free a device array.
670 /// @param Array The device array to free.
671 void createCallFreeDeviceMemory(Value
*Array
);
673 /// Create a call to copy data from host to device.
675 /// @param HostPtr A pointer to the host data that should be copied.
676 /// @param DevicePtr A device pointer specifying the location to copy to.
677 void createCallCopyFromHostToDevice(Value
*HostPtr
, Value
*DevicePtr
,
680 /// Create a call to copy data from device to host.
682 /// @param DevicePtr A pointer to the device data that should be copied.
683 /// @param HostPtr A host pointer specifying the location to copy to.
684 void createCallCopyFromDeviceToHost(Value
*DevicePtr
, Value
*HostPtr
,
687 /// Create a call to synchronize Host & Device.
689 /// This is to be used only with managed memory.
690 void createCallSynchronizeDevice();
692 /// Create a call to get a kernel from an assembly string.
694 /// @param Buffer The string describing the kernel.
695 /// @param Entry The name of the kernel function to call.
697 /// @returns A pointer to a kernel object
698 Value
*createCallGetKernel(Value
*Buffer
, Value
*Entry
);
700 /// Create a call to free a GPU kernel.
702 /// @param GPUKernel THe kernel to free.
703 void createCallFreeKernel(Value
*GPUKernel
);
705 /// Create a call to launch a GPU kernel.
707 /// @param GPUKernel The kernel to launch.
708 /// @param GridDimX The size of the first grid dimension.
709 /// @param GridDimY The size of the second grid dimension.
710 /// @param GridBlockX The size of the first block dimension.
711 /// @param GridBlockY The size of the second block dimension.
712 /// @param GridBlockZ The size of the third block dimension.
713 /// @param Parameters A pointer to an array that contains itself pointers to
714 /// the parameter values passed for each kernel argument.
715 void createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
716 Value
*GridDimY
, Value
*BlockDimX
,
717 Value
*BlockDimY
, Value
*BlockDimZ
,
721 std::string
GPUNodeBuilder::getKernelFuncName(int Kernel_id
) {
722 return "FUNC_" + S
.getFunction().getName().str() + "_SCOP_" +
723 std::to_string(S
.getID()) + "_KERNEL_" + std::to_string(Kernel_id
);
726 void GPUNodeBuilder::initializeAfterRTH() {
727 BasicBlock
*NewBB
= SplitBlock(Builder
.GetInsertBlock(),
728 &*Builder
.GetInsertPoint(), &DT
, &LI
);
729 NewBB
->setName("polly.acc.initialize");
730 Builder
.SetInsertPoint(&NewBB
->front());
732 GPUContext
= createCallInitContext();
735 allocateDeviceArrays();
738 void GPUNodeBuilder::finalize() {
742 createCallFreeContext(GPUContext
);
743 IslNodeBuilder::finalize();
746 void GPUNodeBuilder::allocateDeviceArrays() {
747 assert(!ManagedMemory
&& "Managed memory will directly send host pointers "
748 "to the kernel. There is no need for device arrays");
749 isl_ast_build
*Build
= isl_ast_build_from_context(S
.getContext());
751 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
752 gpu_array_info
*Array
= &Prog
->array
[i
];
753 auto *ScopArray
= (ScopArrayInfo
*)Array
->user
;
754 std::string
DevArrayName("p_dev_array_");
755 DevArrayName
.append(Array
->name
);
757 Value
*ArraySize
= getArraySize(Array
);
758 Value
*Offset
= getArrayOffset(Array
);
760 ArraySize
= Builder
.CreateSub(
762 Builder
.CreateMul(Offset
,
763 Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
764 Value
*DevArray
= createCallAllocateMemoryForDevice(ArraySize
);
765 DevArray
->setName(DevArrayName
);
766 DeviceAllocations
[ScopArray
] = DevArray
;
769 isl_ast_build_free(Build
);
772 void GPUNodeBuilder::addCUDAAnnotations(Module
*M
, Value
*BlockDimX
,
773 Value
*BlockDimY
, Value
*BlockDimZ
) {
774 auto AnnotationNode
= M
->getOrInsertNamedMetadata("nvvm.annotations");
777 if (F
.getCallingConv() != CallingConv::PTX_Kernel
)
780 Value
*V
[] = {BlockDimX
, BlockDimY
, BlockDimZ
};
782 Metadata
*Elements
[] = {
783 ValueAsMetadata::get(&F
), MDString::get(M
->getContext(), "maxntidx"),
784 ValueAsMetadata::get(V
[0]), MDString::get(M
->getContext(), "maxntidy"),
785 ValueAsMetadata::get(V
[1]), MDString::get(M
->getContext(), "maxntidz"),
786 ValueAsMetadata::get(V
[2]),
788 MDNode
*Node
= MDNode::get(M
->getContext(), Elements
);
789 AnnotationNode
->addOperand(Node
);
793 void GPUNodeBuilder::freeDeviceArrays() {
794 assert(!ManagedMemory
&& "Managed memory does not use device arrays");
795 for (auto &Array
: DeviceAllocations
)
796 createCallFreeDeviceMemory(Array
.second
);
799 Value
*GPUNodeBuilder::createCallGetKernel(Value
*Buffer
, Value
*Entry
) {
800 const char *Name
= "polly_getKernel";
801 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
802 Function
*F
= M
->getFunction(Name
);
804 // If F is not available, declare it.
806 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
807 std::vector
<Type
*> Args
;
808 Args
.push_back(Builder
.getInt8PtrTy());
809 Args
.push_back(Builder
.getInt8PtrTy());
810 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
811 F
= Function::Create(Ty
, Linkage
, Name
, M
);
814 return Builder
.CreateCall(F
, {Buffer
, Entry
});
817 Value
*GPUNodeBuilder::createCallGetDevicePtr(Value
*Allocation
) {
818 const char *Name
= "polly_getDevicePtr";
819 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
820 Function
*F
= M
->getFunction(Name
);
822 // If F is not available, declare it.
824 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
825 std::vector
<Type
*> Args
;
826 Args
.push_back(Builder
.getInt8PtrTy());
827 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
828 F
= Function::Create(Ty
, Linkage
, Name
, M
);
831 return Builder
.CreateCall(F
, {Allocation
});
834 void GPUNodeBuilder::createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
835 Value
*GridDimY
, Value
*BlockDimX
,
836 Value
*BlockDimY
, Value
*BlockDimZ
,
838 const char *Name
= "polly_launchKernel";
839 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
840 Function
*F
= M
->getFunction(Name
);
842 // If F is not available, declare it.
844 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
845 std::vector
<Type
*> Args
;
846 Args
.push_back(Builder
.getInt8PtrTy());
847 Args
.push_back(Builder
.getInt32Ty());
848 Args
.push_back(Builder
.getInt32Ty());
849 Args
.push_back(Builder
.getInt32Ty());
850 Args
.push_back(Builder
.getInt32Ty());
851 Args
.push_back(Builder
.getInt32Ty());
852 Args
.push_back(Builder
.getInt8PtrTy());
853 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
854 F
= Function::Create(Ty
, Linkage
, Name
, M
);
857 Builder
.CreateCall(F
, {GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
858 BlockDimZ
, Parameters
});
861 void GPUNodeBuilder::createCallFreeKernel(Value
*GPUKernel
) {
862 const char *Name
= "polly_freeKernel";
863 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
864 Function
*F
= M
->getFunction(Name
);
866 // If F is not available, declare it.
868 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
869 std::vector
<Type
*> Args
;
870 Args
.push_back(Builder
.getInt8PtrTy());
871 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
872 F
= Function::Create(Ty
, Linkage
, Name
, M
);
875 Builder
.CreateCall(F
, {GPUKernel
});
878 void GPUNodeBuilder::createCallFreeDeviceMemory(Value
*Array
) {
879 assert(!ManagedMemory
&& "Managed memory does not allocate or free memory "
881 const char *Name
= "polly_freeDeviceMemory";
882 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
883 Function
*F
= M
->getFunction(Name
);
885 // If F is not available, declare it.
887 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
888 std::vector
<Type
*> Args
;
889 Args
.push_back(Builder
.getInt8PtrTy());
890 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
891 F
= Function::Create(Ty
, Linkage
, Name
, M
);
894 Builder
.CreateCall(F
, {Array
});
897 Value
*GPUNodeBuilder::createCallAllocateMemoryForDevice(Value
*Size
) {
898 assert(!ManagedMemory
&& "Managed memory does not allocate or free memory "
900 const char *Name
= "polly_allocateMemoryForDevice";
901 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
902 Function
*F
= M
->getFunction(Name
);
904 // If F is not available, declare it.
906 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
907 std::vector
<Type
*> Args
;
908 Args
.push_back(Builder
.getInt64Ty());
909 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
910 F
= Function::Create(Ty
, Linkage
, Name
, M
);
913 return Builder
.CreateCall(F
, {Size
});
916 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value
*HostData
,
919 assert(!ManagedMemory
&& "Managed memory does not transfer memory between "
921 const char *Name
= "polly_copyFromHostToDevice";
922 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
923 Function
*F
= M
->getFunction(Name
);
925 // If F is not available, declare it.
927 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
928 std::vector
<Type
*> Args
;
929 Args
.push_back(Builder
.getInt8PtrTy());
930 Args
.push_back(Builder
.getInt8PtrTy());
931 Args
.push_back(Builder
.getInt64Ty());
932 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
933 F
= Function::Create(Ty
, Linkage
, Name
, M
);
936 Builder
.CreateCall(F
, {HostData
, DeviceData
, Size
});
939 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value
*DeviceData
,
942 assert(!ManagedMemory
&& "Managed memory does not transfer memory between "
944 const char *Name
= "polly_copyFromDeviceToHost";
945 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
946 Function
*F
= M
->getFunction(Name
);
948 // If F is not available, declare it.
950 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
951 std::vector
<Type
*> Args
;
952 Args
.push_back(Builder
.getInt8PtrTy());
953 Args
.push_back(Builder
.getInt8PtrTy());
954 Args
.push_back(Builder
.getInt64Ty());
955 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
956 F
= Function::Create(Ty
, Linkage
, Name
, M
);
959 Builder
.CreateCall(F
, {DeviceData
, HostData
, Size
});
962 void GPUNodeBuilder::createCallSynchronizeDevice() {
963 assert(ManagedMemory
&& "explicit synchronization is only necessary for "
965 const char *Name
= "polly_synchronizeDevice";
966 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
967 Function
*F
= M
->getFunction(Name
);
969 // If F is not available, declare it.
971 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
972 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), false);
973 F
= Function::Create(Ty
, Linkage
, Name
, M
);
976 Builder
.CreateCall(F
);
979 Value
*GPUNodeBuilder::createCallInitContext() {
983 case GPURuntime::CUDA
:
984 Name
= "polly_initContextCUDA";
986 case GPURuntime::OpenCL
:
987 Name
= "polly_initContextCL";
991 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
992 Function
*F
= M
->getFunction(Name
);
994 // If F is not available, declare it.
996 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
997 std::vector
<Type
*> Args
;
998 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
999 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1002 return Builder
.CreateCall(F
, {});
1005 void GPUNodeBuilder::createCallFreeContext(Value
*Context
) {
1006 const char *Name
= "polly_freeContext";
1007 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1008 Function
*F
= M
->getFunction(Name
);
1010 // If F is not available, declare it.
1012 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1013 std::vector
<Type
*> Args
;
1014 Args
.push_back(Builder
.getInt8PtrTy());
1015 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1016 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1019 Builder
.CreateCall(F
, {Context
});
1022 /// Check if one string is a prefix of another.
1024 /// @param String The string in which to look for the prefix.
1025 /// @param Prefix The prefix to look for.
1026 static bool isPrefix(std::string String
, std::string Prefix
) {
1027 return String
.find(Prefix
) == 0;
1030 Value
*GPUNodeBuilder::getArraySize(gpu_array_info
*Array
) {
1031 isl::ast_build Build
=
1032 isl::ast_build::from_context(isl::manage(S
.getContext()));
1033 Value
*ArraySize
= ConstantInt::get(Builder
.getInt64Ty(), Array
->size
);
1035 if (!gpu_array_is_scalar(Array
)) {
1036 isl::multi_pw_aff ArrayBound
=
1037 isl::manage(isl_multi_pw_aff_copy(Array
->bound
));
1039 isl::pw_aff OffsetDimZero
= ArrayBound
.get_pw_aff(0);
1040 isl::ast_expr Res
= Build
.expr_from(OffsetDimZero
);
1042 for (unsigned int i
= 1; i
< Array
->n_index
; i
++) {
1043 isl::pw_aff Bound_I
= ArrayBound
.get_pw_aff(i
);
1044 isl::ast_expr Expr
= Build
.expr_from(Bound_I
);
1045 Res
= Res
.mul(Expr
);
1048 Value
*NumElements
= ExprBuilder
.create(Res
.release());
1049 if (NumElements
->getType() != ArraySize
->getType())
1050 NumElements
= Builder
.CreateSExt(NumElements
, ArraySize
->getType());
1051 ArraySize
= Builder
.CreateMul(ArraySize
, NumElements
);
1056 Value
*GPUNodeBuilder::getArrayOffset(gpu_array_info
*Array
) {
1057 if (gpu_array_is_scalar(Array
))
1060 isl::ast_build Build
=
1061 isl::ast_build::from_context(isl::manage(S
.getContext()));
1063 isl::set Min
= isl::manage(isl_set_copy(Array
->extent
)).lexmin();
1065 isl::set ZeroSet
= isl::set::universe(Min
.get_space());
1067 for (long i
= 0; i
< Min
.dim(isl::dim::set
); i
++)
1068 ZeroSet
= ZeroSet
.fix_si(isl::dim::set
, i
, 0);
1070 if (Min
.is_subset(ZeroSet
)) {
1074 isl::ast_expr Result
= isl::ast_expr::from_val(isl::val(Min
.get_ctx(), 0));
1076 for (long i
= 0; i
< Min
.dim(isl::dim::set
); i
++) {
1078 isl::pw_aff Bound_I
=
1079 isl::manage(isl_multi_pw_aff_get_pw_aff(Array
->bound
, i
- 1));
1080 isl::ast_expr BExpr
= Build
.expr_from(Bound_I
);
1081 Result
= Result
.mul(BExpr
);
1083 isl::pw_aff DimMin
= Min
.dim_min(i
);
1084 isl::ast_expr MExpr
= Build
.expr_from(DimMin
);
1085 Result
= Result
.add(MExpr
);
1088 return ExprBuilder
.create(Result
.release());
1091 Value
*GPUNodeBuilder::getOrCreateManagedDeviceArray(gpu_array_info
*Array
,
1092 ScopArrayInfo
*ArrayInfo
) {
1094 assert(ManagedMemory
&& "Only used when you wish to get a host "
1095 "pointer for sending data to the kernel, "
1096 "with managed memory");
1097 std::map
<ScopArrayInfo
*, Value
*>::iterator it
;
1098 if ((it
= DeviceAllocations
.find(ArrayInfo
)) != DeviceAllocations
.end()) {
1103 if (gpu_array_is_scalar(Array
))
1104 HostPtr
= BlockGen
.getOrCreateAlloca(ArrayInfo
);
1106 HostPtr
= ArrayInfo
->getBasePtr();
1107 HostPtr
= getLatestValue(HostPtr
);
1109 Value
*Offset
= getArrayOffset(Array
);
1111 HostPtr
= Builder
.CreatePointerCast(
1112 HostPtr
, ArrayInfo
->getElementType()->getPointerTo());
1113 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
1116 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
1117 DeviceAllocations
[ArrayInfo
] = HostPtr
;
1122 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
1123 enum DataDirection Direction
) {
1124 assert(!ManagedMemory
&& "Managed memory needs no data transfers");
1125 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(TransferStmt
);
1126 isl_ast_expr
*Arg
= isl_ast_expr_get_op_arg(Expr
, 0);
1127 isl_id
*Id
= isl_ast_expr_get_id(Arg
);
1128 auto Array
= (gpu_array_info
*)isl_id_get_user(Id
);
1129 auto ScopArray
= (ScopArrayInfo
*)(Array
->user
);
1131 Value
*Size
= getArraySize(Array
);
1132 Value
*Offset
= getArrayOffset(Array
);
1133 Value
*DevPtr
= DeviceAllocations
[ScopArray
];
1137 if (gpu_array_is_scalar(Array
))
1138 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
1140 HostPtr
= ScopArray
->getBasePtr();
1141 HostPtr
= getLatestValue(HostPtr
);
1144 HostPtr
= Builder
.CreatePointerCast(
1145 HostPtr
, ScopArray
->getElementType()->getPointerTo());
1146 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
1149 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
1152 Size
= Builder
.CreateSub(
1153 Size
, Builder
.CreateMul(
1154 Offset
, Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
1157 if (Direction
== HOST_TO_DEVICE
)
1158 createCallCopyFromHostToDevice(HostPtr
, DevPtr
, Size
);
1160 createCallCopyFromDeviceToHost(DevPtr
, HostPtr
, Size
);
1163 isl_ast_expr_free(Arg
);
1164 isl_ast_expr_free(Expr
);
1165 isl_ast_node_free(TransferStmt
);
1168 void GPUNodeBuilder::createUser(__isl_take isl_ast_node
*UserStmt
) {
1169 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(UserStmt
);
1170 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1171 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1173 isl_ast_expr_free(StmtExpr
);
1175 const char *Str
= isl_id_get_name(Id
);
1176 if (!strcmp(Str
, "kernel")) {
1177 createKernel(UserStmt
);
1178 isl_ast_expr_free(Expr
);
1181 if (!strcmp(Str
, "init_device")) {
1182 initializeAfterRTH();
1183 isl_ast_node_free(UserStmt
);
1184 isl_ast_expr_free(Expr
);
1187 if (!strcmp(Str
, "clear_device")) {
1189 isl_ast_node_free(UserStmt
);
1190 isl_ast_expr_free(Expr
);
1193 if (isPrefix(Str
, "to_device")) {
1195 createDataTransfer(UserStmt
, HOST_TO_DEVICE
);
1197 isl_ast_node_free(UserStmt
);
1199 isl_ast_expr_free(Expr
);
1203 if (isPrefix(Str
, "from_device")) {
1204 if (!ManagedMemory
) {
1205 createDataTransfer(UserStmt
, DEVICE_TO_HOST
);
1207 createCallSynchronizeDevice();
1208 isl_ast_node_free(UserStmt
);
1210 isl_ast_expr_free(Expr
);
1214 isl_id
*Anno
= isl_ast_node_get_annotation(UserStmt
);
1215 struct ppcg_kernel_stmt
*KernelStmt
=
1216 (struct ppcg_kernel_stmt
*)isl_id_get_user(Anno
);
1219 switch (KernelStmt
->type
) {
1220 case ppcg_kernel_domain
:
1221 createScopStmt(Expr
, KernelStmt
);
1222 isl_ast_node_free(UserStmt
);
1224 case ppcg_kernel_copy
:
1225 createKernelCopy(KernelStmt
);
1226 isl_ast_expr_free(Expr
);
1227 isl_ast_node_free(UserStmt
);
1229 case ppcg_kernel_sync
:
1231 isl_ast_expr_free(Expr
);
1232 isl_ast_node_free(UserStmt
);
1236 isl_ast_expr_free(Expr
);
1237 isl_ast_node_free(UserStmt
);
1240 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt
*KernelStmt
) {
1241 isl_ast_expr
*LocalIndex
= isl_ast_expr_copy(KernelStmt
->u
.c
.local_index
);
1242 LocalIndex
= isl_ast_expr_address_of(LocalIndex
);
1243 Value
*LocalAddr
= ExprBuilder
.create(LocalIndex
);
1244 isl_ast_expr
*Index
= isl_ast_expr_copy(KernelStmt
->u
.c
.index
);
1245 Index
= isl_ast_expr_address_of(Index
);
1246 Value
*GlobalAddr
= ExprBuilder
.create(Index
);
1248 if (KernelStmt
->u
.c
.read
) {
1249 LoadInst
*Load
= Builder
.CreateLoad(GlobalAddr
, "shared.read");
1250 Builder
.CreateStore(Load
, LocalAddr
);
1252 LoadInst
*Load
= Builder
.CreateLoad(LocalAddr
, "shared.write");
1253 Builder
.CreateStore(Load
, GlobalAddr
);
1257 void GPUNodeBuilder::createScopStmt(isl_ast_expr
*Expr
,
1258 ppcg_kernel_stmt
*KernelStmt
) {
1259 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1260 isl_id_to_ast_expr
*Indexes
= KernelStmt
->u
.d
.ref2expr
;
1263 LTS
.insert(OutsideLoopIterations
.begin(), OutsideLoopIterations
.end());
1265 createSubstitutions(Expr
, Stmt
, LTS
);
1267 if (Stmt
->isBlockStmt())
1268 BlockGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1270 RegionGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1273 void GPUNodeBuilder::createKernelSync() {
1274 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1275 const char *SpirName
= "__gen_ocl_barrier_global";
1280 case GPUArch::SPIR64
:
1281 case GPUArch::SPIR32
:
1282 Sync
= M
->getFunction(SpirName
);
1284 // If Sync is not available, declare it.
1286 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1287 std::vector
<Type
*> Args
;
1288 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1289 Sync
= Function::Create(Ty
, Linkage
, SpirName
, M
);
1290 Sync
->setCallingConv(CallingConv::SPIR_FUNC
);
1293 case GPUArch::NVPTX64
:
1294 Sync
= Intrinsic::getDeclaration(M
, Intrinsic::nvvm_barrier0
);
1298 Builder
.CreateCall(Sync
, {});
1301 /// Collect llvm::Values referenced from @p Node
1303 /// This function only applies to isl_ast_nodes that are user_nodes referring
1304 /// to a ScopStmt. All other node types are ignore.
1306 /// @param Node The node to collect references for.
1307 /// @param User A user pointer used as storage for the data that is collected.
1309 /// @returns isl_bool_true if data could be collected successfully.
1310 isl_bool
collectReferencesInGPUStmt(__isl_keep isl_ast_node
*Node
, void *User
) {
1311 if (isl_ast_node_get_type(Node
) != isl_ast_node_user
)
1312 return isl_bool_true
;
1314 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(Node
);
1315 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1316 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1317 const char *Str
= isl_id_get_name(Id
);
1319 isl_ast_expr_free(StmtExpr
);
1320 isl_ast_expr_free(Expr
);
1322 if (!isPrefix(Str
, "Stmt"))
1323 return isl_bool_true
;
1325 Id
= isl_ast_node_get_annotation(Node
);
1326 auto *KernelStmt
= (ppcg_kernel_stmt
*)isl_id_get_user(Id
);
1327 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1330 addReferencesFromStmt(Stmt
, User
, false /* CreateScalarRefs */);
1332 return isl_bool_true
;
1335 /// A list of functions that are available in NVIDIA's libdevice.
1336 const std::set
<std::string
> CUDALibDeviceFunctions
= {
1337 "exp", "expf", "expl", "cos", "cosf",
1338 "sqrt", "sqrtf", "copysign", "copysignf", "copysignl"};
1340 /// Return the corresponding CUDA libdevice function name for @p F.
1342 /// Return "" if we are not compiling for CUDA.
1343 std::string
getCUDALibDeviceFuntion(Function
*F
) {
1344 if (CUDALibDeviceFunctions
.count(F
->getName()))
1345 return std::string("__nv_") + std::string(F
->getName());
1350 /// Check if F is a function that we can code-generate in a GPU kernel.
1351 static bool isValidFunctionInKernel(llvm::Function
*F
, bool AllowLibDevice
) {
1352 assert(F
&& "F is an invalid pointer");
1353 // We string compare against the name of the function to allow
1354 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1356 const StringRef Name
= F
->getName();
1358 if (AllowLibDevice
&& getCUDALibDeviceFuntion(F
).length() > 0)
1361 return F
->isIntrinsic() &&
1362 (Name
.startswith("llvm.sqrt") || Name
.startswith("llvm.fabs") ||
1363 Name
.startswith("llvm.copysign"));
1366 /// Do not take `Function` as a subtree value.
1368 /// We try to take the reference of all subtree values and pass them along
1369 /// to the kernel from the host. Taking an address of any function and
1370 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1372 static bool isValidSubtreeValue(llvm::Value
*V
) { return !isa
<Function
>(V
); }
1374 /// Return `Function`s from `RawSubtreeValues`.
1375 static SetVector
<Function
*>
1376 getFunctionsFromRawSubtreeValues(SetVector
<Value
*> RawSubtreeValues
,
1377 bool AllowCUDALibDevice
) {
1378 SetVector
<Function
*> SubtreeFunctions
;
1379 for (Value
*It
: RawSubtreeValues
) {
1380 Function
*F
= dyn_cast
<Function
>(It
);
1382 assert(isValidFunctionInKernel(F
, AllowCUDALibDevice
) &&
1383 "Code should have bailed out by "
1384 "this point if an invalid function "
1385 "were present in a kernel.");
1386 SubtreeFunctions
.insert(F
);
1389 return SubtreeFunctions
;
1392 std::pair
<SetVector
<Value
*>, SetVector
<Function
*>>
1393 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel
*Kernel
) {
1394 SetVector
<Value
*> SubtreeValues
;
1395 SetVector
<const SCEV
*> SCEVs
;
1396 SetVector
<const Loop
*> Loops
;
1397 SubtreeReferences References
= {
1398 LI
, SE
, S
, ValueMap
, SubtreeValues
, SCEVs
, getBlockGenerator()};
1400 for (const auto &I
: IDToValue
)
1401 SubtreeValues
.insert(I
.second
);
1403 isl_ast_node_foreach_descendant_top_down(
1404 Kernel
->tree
, collectReferencesInGPUStmt
, &References
);
1406 for (const SCEV
*Expr
: SCEVs
)
1407 findValues(Expr
, SE
, SubtreeValues
);
1409 for (auto &SAI
: S
.arrays())
1410 SubtreeValues
.remove(SAI
->getBasePtr());
1412 isl_space
*Space
= S
.getParamSpace();
1413 for (long i
= 0; i
< isl_space_dim(Space
, isl_dim_param
); i
++) {
1414 isl_id
*Id
= isl_space_get_dim_id(Space
, isl_dim_param
, i
);
1415 assert(IDToValue
.count(Id
));
1416 Value
*Val
= IDToValue
[Id
];
1417 SubtreeValues
.remove(Val
);
1420 isl_space_free(Space
);
1422 for (long i
= 0; i
< isl_space_dim(Kernel
->space
, isl_dim_set
); i
++) {
1423 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1424 assert(IDToValue
.count(Id
));
1425 Value
*Val
= IDToValue
[Id
];
1426 SubtreeValues
.remove(Val
);
1430 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1431 // SubtreeValues. This is important, because we should not lose any
1432 // SubtreeValues in the process of constructing the
1433 // "ValidSubtree{Values, Functions} sets. Nor should the set
1434 // ValidSubtree{Values, Functions} have any common element.
1435 auto ValidSubtreeValuesIt
=
1436 make_filter_range(SubtreeValues
, isValidSubtreeValue
);
1437 SetVector
<Value
*> ValidSubtreeValues(ValidSubtreeValuesIt
.begin(),
1438 ValidSubtreeValuesIt
.end());
1440 bool AllowCUDALibDevice
= Arch
== GPUArch::NVPTX64
;
1442 SetVector
<Function
*> ValidSubtreeFunctions(
1443 getFunctionsFromRawSubtreeValues(SubtreeValues
, AllowCUDALibDevice
));
1445 // @see IslNodeBuilder::getReferencesInSubtree
1446 SetVector
<Value
*> ReplacedValues
;
1447 for (Value
*V
: ValidSubtreeValues
) {
1448 auto It
= ValueMap
.find(V
);
1449 if (It
== ValueMap
.end())
1450 ReplacedValues
.insert(V
);
1452 ReplacedValues
.insert(It
->second
);
1454 return std::make_pair(ReplacedValues
, ValidSubtreeFunctions
);
1457 void GPUNodeBuilder::clearDominators(Function
*F
) {
1458 DomTreeNode
*N
= DT
.getNode(&F
->getEntryBlock());
1459 std::vector
<BasicBlock
*> Nodes
;
1460 for (po_iterator
<DomTreeNode
*> I
= po_begin(N
), E
= po_end(N
); I
!= E
; ++I
)
1461 Nodes
.push_back(I
->getBlock());
1463 for (BasicBlock
*BB
: Nodes
)
1467 void GPUNodeBuilder::clearScalarEvolution(Function
*F
) {
1468 for (BasicBlock
&BB
: *F
) {
1469 Loop
*L
= LI
.getLoopFor(&BB
);
1475 void GPUNodeBuilder::clearLoops(Function
*F
) {
1476 for (BasicBlock
&BB
: *F
) {
1477 Loop
*L
= LI
.getLoopFor(&BB
);
1480 LI
.removeBlock(&BB
);
1484 std::tuple
<Value
*, Value
*> GPUNodeBuilder::getGridSizes(ppcg_kernel
*Kernel
) {
1485 std::vector
<Value
*> Sizes
;
1486 isl::ast_build Context
=
1487 isl::ast_build::from_context(isl::manage(S
.getContext()));
1489 isl::multi_pw_aff GridSizePwAffs
=
1490 isl::manage(isl_multi_pw_aff_copy(Kernel
->grid_size
));
1491 for (long i
= 0; i
< Kernel
->n_grid
; i
++) {
1492 isl::pw_aff Size
= GridSizePwAffs
.get_pw_aff(i
);
1493 isl::ast_expr GridSize
= Context
.expr_from(Size
);
1494 Value
*Res
= ExprBuilder
.create(GridSize
.release());
1495 Res
= Builder
.CreateTrunc(Res
, Builder
.getInt32Ty());
1496 Sizes
.push_back(Res
);
1499 for (long i
= Kernel
->n_grid
; i
< 3; i
++)
1500 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1502 return std::make_tuple(Sizes
[0], Sizes
[1]);
1505 std::tuple
<Value
*, Value
*, Value
*>
1506 GPUNodeBuilder::getBlockSizes(ppcg_kernel
*Kernel
) {
1507 std::vector
<Value
*> Sizes
;
1509 for (long i
= 0; i
< Kernel
->n_block
; i
++) {
1510 Value
*Res
= ConstantInt::get(Builder
.getInt32Ty(), Kernel
->block_dim
[i
]);
1511 Sizes
.push_back(Res
);
1514 for (long i
= Kernel
->n_block
; i
< 3; i
++)
1515 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1517 return std::make_tuple(Sizes
[0], Sizes
[1], Sizes
[2]);
1520 void GPUNodeBuilder::insertStoreParameter(Instruction
*Parameters
,
1521 Instruction
*Param
, int Index
) {
1522 Value
*Slot
= Builder
.CreateGEP(
1523 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1524 Value
*ParamTyped
= Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1525 Builder
.CreateStore(ParamTyped
, Slot
);
1529 GPUNodeBuilder::createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
1530 SetVector
<Value
*> SubtreeValues
) {
1531 const int NumArgs
= F
->arg_size();
1532 std::vector
<int> ArgSizes(NumArgs
);
1534 Type
*ArrayTy
= ArrayType::get(Builder
.getInt8PtrTy(), 2 * NumArgs
);
1536 BasicBlock
*EntryBlock
=
1537 &Builder
.GetInsertBlock()->getParent()->getEntryBlock();
1538 auto AddressSpace
= F
->getParent()->getDataLayout().getAllocaAddrSpace();
1539 std::string Launch
= "polly_launch_" + std::to_string(Kernel
->id
);
1540 Instruction
*Parameters
= new AllocaInst(
1541 ArrayTy
, AddressSpace
, Launch
+ "_params", EntryBlock
->getTerminator());
1544 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1545 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1548 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1549 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1551 ArgSizes
[Index
] = SAI
->getElemSizeInBytes();
1553 Value
*DevArray
= nullptr;
1554 if (ManagedMemory
) {
1555 DevArray
= getOrCreateManagedDeviceArray(
1556 &Prog
->array
[i
], const_cast<ScopArrayInfo
*>(SAI
));
1558 DevArray
= DeviceAllocations
[const_cast<ScopArrayInfo
*>(SAI
)];
1559 DevArray
= createCallGetDevicePtr(DevArray
);
1561 assert(DevArray
!= nullptr && "Array to be offloaded to device not "
1563 Value
*Offset
= getArrayOffset(&Prog
->array
[i
]);
1566 DevArray
= Builder
.CreatePointerCast(
1567 DevArray
, SAI
->getElementType()->getPointerTo());
1568 DevArray
= Builder
.CreateGEP(DevArray
, Builder
.CreateNeg(Offset
));
1569 DevArray
= Builder
.CreatePointerCast(DevArray
, Builder
.getInt8PtrTy());
1571 Value
*Slot
= Builder
.CreateGEP(
1572 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1574 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1575 Value
*ValPtr
= nullptr;
1579 ValPtr
= BlockGen
.getOrCreateAlloca(SAI
);
1581 assert(ValPtr
!= nullptr && "ValPtr that should point to a valid object"
1582 " to be stored into Parameters");
1584 Builder
.CreatePointerCast(ValPtr
, Builder
.getInt8PtrTy());
1585 Builder
.CreateStore(ValPtrCast
, Slot
);
1587 Instruction
*Param
=
1588 new AllocaInst(Builder
.getInt8PtrTy(), AddressSpace
,
1589 Launch
+ "_param_" + std::to_string(Index
),
1590 EntryBlock
->getTerminator());
1591 Builder
.CreateStore(DevArray
, Param
);
1593 Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1594 Builder
.CreateStore(ParamTyped
, Slot
);
1599 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1601 for (long i
= 0; i
< NumHostIters
; i
++) {
1602 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1603 Value
*Val
= IDToValue
[Id
];
1606 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1608 Instruction
*Param
=
1609 new AllocaInst(Val
->getType(), AddressSpace
,
1610 Launch
+ "_param_" + std::to_string(Index
),
1611 EntryBlock
->getTerminator());
1612 Builder
.CreateStore(Val
, Param
);
1613 insertStoreParameter(Parameters
, Param
, Index
);
1617 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1619 for (long i
= 0; i
< NumVars
; i
++) {
1620 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1621 Value
*Val
= IDToValue
[Id
];
1622 if (ValueMap
.count(Val
))
1623 Val
= ValueMap
[Val
];
1626 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1628 Instruction
*Param
=
1629 new AllocaInst(Val
->getType(), AddressSpace
,
1630 Launch
+ "_param_" + std::to_string(Index
),
1631 EntryBlock
->getTerminator());
1632 Builder
.CreateStore(Val
, Param
);
1633 insertStoreParameter(Parameters
, Param
, Index
);
1637 for (auto Val
: SubtreeValues
) {
1638 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1640 Instruction
*Param
=
1641 new AllocaInst(Val
->getType(), AddressSpace
,
1642 Launch
+ "_param_" + std::to_string(Index
),
1643 EntryBlock
->getTerminator());
1644 Builder
.CreateStore(Val
, Param
);
1645 insertStoreParameter(Parameters
, Param
, Index
);
1649 for (int i
= 0; i
< NumArgs
; i
++) {
1650 Value
*Val
= ConstantInt::get(Builder
.getInt32Ty(), ArgSizes
[i
]);
1651 Instruction
*Param
=
1652 new AllocaInst(Builder
.getInt32Ty(), AddressSpace
,
1653 Launch
+ "_param_size_" + std::to_string(i
),
1654 EntryBlock
->getTerminator());
1655 Builder
.CreateStore(Val
, Param
);
1656 insertStoreParameter(Parameters
, Param
, Index
);
1660 auto Location
= EntryBlock
->getTerminator();
1661 return new BitCastInst(Parameters
, Builder
.getInt8PtrTy(),
1662 Launch
+ "_params_i8ptr", Location
);
1665 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1666 SetVector
<Function
*> SubtreeFunctions
) {
1667 for (auto Fn
: SubtreeFunctions
) {
1668 const std::string ClonedFnName
= Fn
->getName();
1669 Function
*Clone
= GPUModule
->getFunction(ClonedFnName
);
1672 Function::Create(Fn
->getFunctionType(), GlobalValue::ExternalLinkage
,
1673 ClonedFnName
, GPUModule
.get());
1674 assert(Clone
&& "Expected cloned function to be initialized.");
1675 assert(ValueMap
.find(Fn
) == ValueMap
.end() &&
1676 "Fn already present in ValueMap");
1677 ValueMap
[Fn
] = Clone
;
1680 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node
*KernelStmt
) {
1681 isl_id
*Id
= isl_ast_node_get_annotation(KernelStmt
);
1682 ppcg_kernel
*Kernel
= (ppcg_kernel
*)isl_id_get_user(Id
);
1684 isl_ast_node_free(KernelStmt
);
1686 if (Kernel
->n_grid
> 1)
1688 std::max(DeepestParallel
, isl_space_dim(Kernel
->space
, isl_dim_set
));
1691 std::max(DeepestSequential
, isl_space_dim(Kernel
->space
, isl_dim_set
));
1693 Value
*BlockDimX
, *BlockDimY
, *BlockDimZ
;
1694 std::tie(BlockDimX
, BlockDimY
, BlockDimZ
) = getBlockSizes(Kernel
);
1696 SetVector
<Value
*> SubtreeValues
;
1697 SetVector
<Function
*> SubtreeFunctions
;
1698 std::tie(SubtreeValues
, SubtreeFunctions
) = getReferencesInKernel(Kernel
);
1700 assert(Kernel
->tree
&& "Device AST of kernel node is empty");
1702 Instruction
&HostInsertPoint
= *Builder
.GetInsertPoint();
1703 IslExprBuilder::IDToValueTy HostIDs
= IDToValue
;
1704 ValueMapT HostValueMap
= ValueMap
;
1705 BlockGenerator::AllocaMapTy HostScalarMap
= ScalarMap
;
1708 SetVector
<const Loop
*> Loops
;
1710 // Create for all loops we depend on values that contain the current loop
1711 // iteration. These values are necessary to generate code for SCEVs that
1712 // depend on such loops. As a result we need to pass them to the subfunction.
1713 for (const Loop
*L
: Loops
) {
1714 const SCEV
*OuterLIV
= SE
.getAddRecExpr(SE
.getUnknown(Builder
.getInt64(0)),
1715 SE
.getUnknown(Builder
.getInt64(1)),
1716 L
, SCEV::FlagAnyWrap
);
1717 Value
*V
= generateSCEV(OuterLIV
);
1718 OutsideLoopIterations
[L
] = SE
.getUnknown(V
);
1719 SubtreeValues
.insert(V
);
1722 createKernelFunction(Kernel
, SubtreeValues
, SubtreeFunctions
);
1723 setupKernelSubtreeFunctions(SubtreeFunctions
);
1725 create(isl_ast_node_copy(Kernel
->tree
));
1727 finalizeKernelArguments(Kernel
);
1728 Function
*F
= Builder
.GetInsertBlock()->getParent();
1729 if (Arch
== GPUArch::NVPTX64
)
1730 addCUDAAnnotations(F
->getParent(), BlockDimX
, BlockDimY
, BlockDimZ
);
1732 clearScalarEvolution(F
);
1735 IDToValue
= HostIDs
;
1737 ValueMap
= std::move(HostValueMap
);
1738 ScalarMap
= std::move(HostScalarMap
);
1741 Annotator
.resetAlternativeAliasBases();
1742 for (auto &BasePtr
: LocalArrays
)
1743 S
.invalidateScopArrayInfo(BasePtr
, MemoryKind::Array
);
1744 LocalArrays
.clear();
1746 std::string ASMString
= finalizeKernelFunction();
1747 Builder
.SetInsertPoint(&HostInsertPoint
);
1748 Value
*Parameters
= createLaunchParameters(Kernel
, F
, SubtreeValues
);
1750 std::string Name
= getKernelFuncName(Kernel
->id
);
1751 Value
*KernelString
= Builder
.CreateGlobalStringPtr(ASMString
, Name
);
1752 Value
*NameString
= Builder
.CreateGlobalStringPtr(Name
, Name
+ "_name");
1753 Value
*GPUKernel
= createCallGetKernel(KernelString
, NameString
);
1755 Value
*GridDimX
, *GridDimY
;
1756 std::tie(GridDimX
, GridDimY
) = getGridSizes(Kernel
);
1758 createCallLaunchKernel(GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
1759 BlockDimZ
, Parameters
);
1760 createCallFreeKernel(GPUKernel
);
1762 for (auto Id
: KernelIds
)
1768 /// Compute the DataLayout string for the NVPTX backend.
1770 /// @param is64Bit Are we looking for a 64 bit architecture?
1771 static std::string
computeNVPTXDataLayout(bool is64Bit
) {
1772 std::string Ret
= "";
1775 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1776 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1777 "64-v128:128:128-n16:32:64";
1779 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1780 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1781 "64-v128:128:128-n16:32:64";
1787 /// Compute the DataLayout string for a SPIR kernel.
1789 /// @param is64Bit Are we looking for a 64 bit architecture?
1790 static std::string
computeSPIRDataLayout(bool is64Bit
) {
1791 std::string Ret
= "";
1794 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1795 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1796 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1797 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1799 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1800 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1801 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1802 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1809 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel
*Kernel
,
1810 SetVector
<Value
*> &SubtreeValues
) {
1811 std::vector
<Type
*> Args
;
1812 std::string Identifier
= getKernelFuncName(Kernel
->id
);
1814 std::vector
<Metadata
*> MemoryType
;
1816 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1817 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1820 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1821 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1822 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1823 Args
.push_back(SAI
->getElementType());
1824 MemoryType
.push_back(
1825 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1827 static const int UseGlobalMemory
= 1;
1828 Args
.push_back(Builder
.getInt8PtrTy(UseGlobalMemory
));
1829 MemoryType
.push_back(
1830 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 1)));
1834 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1836 for (long i
= 0; i
< NumHostIters
; i
++) {
1837 Args
.push_back(Builder
.getInt64Ty());
1838 MemoryType
.push_back(
1839 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1842 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1844 for (long i
= 0; i
< NumVars
; i
++) {
1845 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1846 Value
*Val
= IDToValue
[Id
];
1848 Args
.push_back(Val
->getType());
1849 MemoryType
.push_back(
1850 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1853 for (auto *V
: SubtreeValues
) {
1854 Args
.push_back(V
->getType());
1855 MemoryType
.push_back(
1856 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1859 auto *FT
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1860 auto *FN
= Function::Create(FT
, Function::ExternalLinkage
, Identifier
,
1863 std::vector
<Metadata
*> EmptyStrings
;
1865 for (unsigned int i
= 0; i
< MemoryType
.size(); i
++) {
1866 EmptyStrings
.push_back(MDString::get(FN
->getContext(), ""));
1869 if (Arch
== GPUArch::SPIR32
|| Arch
== GPUArch::SPIR64
) {
1870 FN
->setMetadata("kernel_arg_addr_space",
1871 MDNode::get(FN
->getContext(), MemoryType
));
1872 FN
->setMetadata("kernel_arg_name",
1873 MDNode::get(FN
->getContext(), EmptyStrings
));
1874 FN
->setMetadata("kernel_arg_access_qual",
1875 MDNode::get(FN
->getContext(), EmptyStrings
));
1876 FN
->setMetadata("kernel_arg_type",
1877 MDNode::get(FN
->getContext(), EmptyStrings
));
1878 FN
->setMetadata("kernel_arg_type_qual",
1879 MDNode::get(FN
->getContext(), EmptyStrings
));
1880 FN
->setMetadata("kernel_arg_base_type",
1881 MDNode::get(FN
->getContext(), EmptyStrings
));
1885 case GPUArch::NVPTX64
:
1886 FN
->setCallingConv(CallingConv::PTX_Kernel
);
1888 case GPUArch::SPIR32
:
1889 case GPUArch::SPIR64
:
1890 FN
->setCallingConv(CallingConv::SPIR_KERNEL
);
1894 auto Arg
= FN
->arg_begin();
1895 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
1896 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1899 Arg
->setName(Kernel
->array
[i
].array
->name
);
1901 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1902 const ScopArrayInfo
*SAI
=
1903 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
1904 Type
*EleTy
= SAI
->getElementType();
1906 SmallVector
<const SCEV
*, 4> Sizes
;
1907 isl_ast_build
*Build
=
1908 isl_ast_build_from_context(isl_set_copy(Prog
->context
));
1909 Sizes
.push_back(nullptr);
1910 for (long j
= 1; j
< Kernel
->array
[i
].array
->n_index
; j
++) {
1911 isl_ast_expr
*DimSize
= isl_ast_build_expr_from_pw_aff(
1912 Build
, isl_multi_pw_aff_get_pw_aff(Kernel
->array
[i
].array
->bound
, j
));
1913 auto V
= ExprBuilder
.create(DimSize
);
1914 Sizes
.push_back(SE
.getSCEV(V
));
1916 const ScopArrayInfo
*SAIRep
=
1917 S
.getOrCreateScopArrayInfo(Val
, EleTy
, Sizes
, MemoryKind::Array
);
1918 LocalArrays
.push_back(Val
);
1920 isl_ast_build_free(Build
);
1921 KernelIds
.push_back(Id
);
1922 IDToSAI
[Id
] = SAIRep
;
1926 for (long i
= 0; i
< NumHostIters
; i
++) {
1927 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1928 Arg
->setName(isl_id_get_name(Id
));
1929 IDToValue
[Id
] = &*Arg
;
1930 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
1934 for (long i
= 0; i
< NumVars
; i
++) {
1935 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1936 Arg
->setName(isl_id_get_name(Id
));
1937 Value
*Val
= IDToValue
[Id
];
1938 ValueMap
[Val
] = &*Arg
;
1939 IDToValue
[Id
] = &*Arg
;
1940 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
1944 for (auto *V
: SubtreeValues
) {
1945 Arg
->setName(V
->getName());
1946 ValueMap
[V
] = &*Arg
;
1953 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel
*Kernel
) {
1954 Intrinsic::ID IntrinsicsBID
[2];
1955 Intrinsic::ID IntrinsicsTID
[3];
1958 case GPUArch::SPIR64
:
1959 case GPUArch::SPIR32
:
1960 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
1961 case GPUArch::NVPTX64
:
1962 IntrinsicsBID
[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x
;
1963 IntrinsicsBID
[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y
;
1965 IntrinsicsTID
[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x
;
1966 IntrinsicsTID
[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y
;
1967 IntrinsicsTID
[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z
;
1971 auto addId
= [this](__isl_take isl_id
*Id
, Intrinsic::ID Intr
) mutable {
1972 std::string Name
= isl_id_get_name(Id
);
1973 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1974 Function
*IntrinsicFn
= Intrinsic::getDeclaration(M
, Intr
);
1975 Value
*Val
= Builder
.CreateCall(IntrinsicFn
, {});
1976 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
1977 IDToValue
[Id
] = Val
;
1978 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
1981 for (int i
= 0; i
< Kernel
->n_grid
; ++i
) {
1982 isl_id
*Id
= isl_id_list_get_id(Kernel
->block_ids
, i
);
1983 addId(Id
, IntrinsicsBID
[i
]);
1986 for (int i
= 0; i
< Kernel
->n_block
; ++i
) {
1987 isl_id
*Id
= isl_id_list_get_id(Kernel
->thread_ids
, i
);
1988 addId(Id
, IntrinsicsTID
[i
]);
1992 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel
*Kernel
) {
1993 const char *GroupName
[3] = {"__gen_ocl_get_group_id0",
1994 "__gen_ocl_get_group_id1",
1995 "__gen_ocl_get_group_id2"};
1997 const char *LocalName
[3] = {"__gen_ocl_get_local_id0",
1998 "__gen_ocl_get_local_id1",
1999 "__gen_ocl_get_local_id2"};
2001 auto createFunc
= [this](const char *Name
, __isl_take isl_id
*Id
) mutable {
2002 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2003 Function
*FN
= M
->getFunction(Name
);
2005 // If FN is not available, declare it.
2007 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
2008 std::vector
<Type
*> Args
;
2009 FunctionType
*Ty
= FunctionType::get(Builder
.getInt32Ty(), Args
, false);
2010 FN
= Function::Create(Ty
, Linkage
, Name
, M
);
2011 FN
->setCallingConv(CallingConv::SPIR_FUNC
);
2014 Value
*Val
= Builder
.CreateCall(FN
, {});
2015 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2016 IDToValue
[Id
] = Val
;
2017 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2020 for (int i
= 0; i
< Kernel
->n_grid
; ++i
)
2021 createFunc(GroupName
[i
], isl_id_list_get_id(Kernel
->block_ids
, i
));
2023 for (int i
= 0; i
< Kernel
->n_block
; ++i
)
2024 createFunc(LocalName
[i
], isl_id_list_get_id(Kernel
->thread_ids
, i
));
2027 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
) {
2028 auto Arg
= FN
->arg_begin();
2029 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2030 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2033 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2034 const ScopArrayInfo
*SAI
=
2035 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
2038 if (SAI
->getNumberOfDimensions() > 0) {
2045 if (!gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2046 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2047 Value
*TypedArgPtr
= Builder
.CreatePointerCast(Val
, TypePtr
);
2048 Val
= Builder
.CreateLoad(TypedArgPtr
);
2051 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2052 Builder
.CreateStore(Val
, Alloca
);
2058 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel
*Kernel
) {
2059 auto *FN
= Builder
.GetInsertBlock()->getParent();
2060 auto Arg
= FN
->arg_begin();
2062 bool StoredScalar
= false;
2063 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2064 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2067 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2068 const ScopArrayInfo
*SAI
=
2069 ScopArrayInfo::getFromId(isl::manage(isl_id_copy(Id
)));
2072 if (SAI
->getNumberOfDimensions() > 0) {
2077 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2082 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2083 Value
*ArgPtr
= &*Arg
;
2084 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2085 Value
*TypedArgPtr
= Builder
.CreatePointerCast(ArgPtr
, TypePtr
);
2086 Value
*Val
= Builder
.CreateLoad(Alloca
);
2087 Builder
.CreateStore(Val
, TypedArgPtr
);
2088 StoredScalar
= true;
2094 /// In case more than one thread contains scalar stores, the generated
2095 /// code might be incorrect, if we only store at the end of the kernel.
2096 /// To support this case we need to store these scalars back at each
2097 /// memory store or at least before each kernel barrier.
2098 if (Kernel
->n_block
!= 0 || Kernel
->n_grid
!= 0)
2099 BuildSuccessful
= 0;
2102 void GPUNodeBuilder::createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
) {
2103 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2105 for (int i
= 0; i
< Kernel
->n_var
; ++i
) {
2106 struct ppcg_kernel_var
&Var
= Kernel
->var
[i
];
2107 isl_id
*Id
= isl_space_get_tuple_id(Var
.array
->space
, isl_dim_set
);
2108 Type
*EleTy
= ScopArrayInfo::getFromId(isl::manage(Id
))->getElementType();
2110 Type
*ArrayTy
= EleTy
;
2111 SmallVector
<const SCEV
*, 4> Sizes
;
2113 Sizes
.push_back(nullptr);
2114 for (unsigned int j
= 1; j
< Var
.array
->n_index
; ++j
) {
2115 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2116 long Bound
= isl_val_get_num_si(Val
);
2118 Sizes
.push_back(S
.getSE()->getConstant(Builder
.getInt64Ty(), Bound
));
2121 for (int j
= Var
.array
->n_index
- 1; j
>= 0; --j
) {
2122 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2123 long Bound
= isl_val_get_num_si(Val
);
2125 ArrayTy
= ArrayType::get(ArrayTy
, Bound
);
2128 const ScopArrayInfo
*SAI
;
2130 if (Var
.type
== ppcg_access_shared
) {
2131 auto GlobalVar
= new GlobalVariable(
2132 *M
, ArrayTy
, false, GlobalValue::InternalLinkage
, 0, Var
.name
,
2133 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal
, 3);
2134 GlobalVar
->setAlignment(EleTy
->getPrimitiveSizeInBits() / 8);
2135 GlobalVar
->setInitializer(Constant::getNullValue(ArrayTy
));
2137 Allocation
= GlobalVar
;
2138 } else if (Var
.type
== ppcg_access_private
) {
2139 Allocation
= Builder
.CreateAlloca(ArrayTy
, 0, "private_array");
2141 llvm_unreachable("unknown variable type");
2144 S
.getOrCreateScopArrayInfo(Allocation
, EleTy
, Sizes
, MemoryKind::Array
);
2145 Id
= isl_id_alloc(S
.getIslCtx(), Var
.name
, nullptr);
2146 IDToValue
[Id
] = Allocation
;
2147 LocalArrays
.push_back(Allocation
);
2148 KernelIds
.push_back(Id
);
2153 void GPUNodeBuilder::createKernelFunction(
2154 ppcg_kernel
*Kernel
, SetVector
<Value
*> &SubtreeValues
,
2155 SetVector
<Function
*> &SubtreeFunctions
) {
2156 std::string Identifier
= getKernelFuncName(Kernel
->id
);
2157 GPUModule
.reset(new Module(Identifier
, Builder
.getContext()));
2160 case GPUArch::NVPTX64
:
2161 if (Runtime
== GPURuntime::CUDA
)
2162 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2163 else if (Runtime
== GPURuntime::OpenCL
)
2164 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2165 GPUModule
->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2167 case GPUArch::SPIR32
:
2168 GPUModule
->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2169 GPUModule
->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2171 case GPUArch::SPIR64
:
2172 GPUModule
->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2173 GPUModule
->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2177 Function
*FN
= createKernelFunctionDecl(Kernel
, SubtreeValues
);
2179 BasicBlock
*PrevBlock
= Builder
.GetInsertBlock();
2180 auto EntryBlock
= BasicBlock::Create(Builder
.getContext(), "entry", FN
);
2182 DT
.addNewBlock(EntryBlock
, PrevBlock
);
2184 Builder
.SetInsertPoint(EntryBlock
);
2185 Builder
.CreateRetVoid();
2186 Builder
.SetInsertPoint(EntryBlock
, EntryBlock
->begin());
2188 ScopDetection::markFunctionAsInvalid(FN
);
2190 prepareKernelArguments(Kernel
, FN
);
2191 createKernelVariables(Kernel
, FN
);
2194 case GPUArch::NVPTX64
:
2195 insertKernelIntrinsics(Kernel
);
2197 case GPUArch::SPIR32
:
2198 case GPUArch::SPIR64
:
2199 insertKernelCallsSPIR(Kernel
);
2204 std::string
GPUNodeBuilder::createKernelASM() {
2205 llvm::Triple GPUTriple
;
2208 case GPUArch::NVPTX64
:
2210 case GPURuntime::CUDA
:
2211 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2213 case GPURuntime::OpenCL
:
2214 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2218 case GPUArch::SPIR64
:
2219 case GPUArch::SPIR32
:
2220 std::string SPIRAssembly
;
2221 raw_string_ostream
IROstream(SPIRAssembly
);
2222 IROstream
<< *GPUModule
;
2224 return SPIRAssembly
;
2228 auto GPUTarget
= TargetRegistry::lookupTarget(GPUTriple
.getTriple(), ErrMsg
);
2231 errs() << ErrMsg
<< "\n";
2235 TargetOptions Options
;
2236 Options
.UnsafeFPMath
= FastMath
;
2238 std::string subtarget
;
2241 case GPUArch::NVPTX64
:
2242 subtarget
= CudaVersion
;
2244 case GPUArch::SPIR32
:
2245 case GPUArch::SPIR64
:
2246 llvm_unreachable("No subtarget for SPIR architecture");
2249 std::unique_ptr
<TargetMachine
> TargetM(GPUTarget
->createTargetMachine(
2250 GPUTriple
.getTriple(), subtarget
, "", Options
, Optional
<Reloc::Model
>()));
2252 SmallString
<0> ASMString
;
2253 raw_svector_ostream
ASMStream(ASMString
);
2254 llvm::legacy::PassManager PM
;
2256 PM
.add(createTargetTransformInfoWrapperPass(TargetM
->getTargetIRAnalysis()));
2258 if (TargetM
->addPassesToEmitFile(
2259 PM
, ASMStream
, TargetMachine::CGFT_AssemblyFile
, true /* verify */)) {
2260 errs() << "The target does not support generation of this file type!\n";
2266 return ASMStream
.str();
2269 bool GPUNodeBuilder::requiresCUDALibDevice() {
2270 for (Function
&F
: GPUModule
->functions()) {
2271 if (!F
.isDeclaration())
2274 std::string CUDALibDeviceFunc
= getCUDALibDeviceFuntion(&F
);
2275 if (CUDALibDeviceFunc
.length() != 0) {
2276 F
.setName(CUDALibDeviceFunc
);
2284 void GPUNodeBuilder::addCUDALibDevice() {
2285 if (Arch
!= GPUArch::NVPTX64
)
2288 if (requiresCUDALibDevice()) {
2291 errs() << CUDALibDevice
<< "\n";
2292 auto LibDeviceModule
=
2293 parseIRFile(CUDALibDevice
, Error
, GPUModule
->getContext());
2295 if (!LibDeviceModule
) {
2296 BuildSuccessful
= false;
2297 report_fatal_error("Could not find or load libdevice. Skipping GPU "
2298 "kernel generation. Please set -polly-acc-libdevice "
2303 Linker
L(*GPUModule
);
2305 // Set an nvptx64 target triple to avoid linker warnings. The original
2306 // triple of the libdevice files are nvptx-unknown-unknown.
2307 LibDeviceModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2308 L
.linkInModule(std::move(LibDeviceModule
), Linker::LinkOnlyNeeded
);
2312 std::string
GPUNodeBuilder::finalizeKernelFunction() {
2314 if (verifyModule(*GPUModule
)) {
2315 DEBUG(dbgs() << "verifyModule failed on module:\n";
2316 GPUModule
->print(dbgs(), nullptr); dbgs() << "\n";);
2317 DEBUG(dbgs() << "verifyModule Error:\n";
2318 verifyModule(*GPUModule
, &dbgs()););
2320 if (FailOnVerifyModuleFailure
)
2321 llvm_unreachable("VerifyModule failed.");
2323 BuildSuccessful
= false;
2330 outs() << *GPUModule
<< "\n";
2332 if (Arch
!= GPUArch::SPIR32
&& Arch
!= GPUArch::SPIR64
) {
2334 llvm::legacy::PassManager OptPasses
;
2335 PassManagerBuilder PassBuilder
;
2336 PassBuilder
.OptLevel
= 3;
2337 PassBuilder
.SizeLevel
= 0;
2338 PassBuilder
.populateModulePassManager(OptPasses
);
2339 OptPasses
.run(*GPUModule
);
2342 std::string Assembly
= createKernelASM();
2345 outs() << Assembly
<< "\n";
2347 GPUModule
.release();
2354 class PPCGCodeGeneration
: public ScopPass
{
2358 GPURuntime Runtime
= GPURuntime::CUDA
;
2360 GPUArch Architecture
= GPUArch::NVPTX64
;
2362 /// The scop that is currently processed.
2367 ScalarEvolution
*SE
;
2368 const DataLayout
*DL
;
2371 PPCGCodeGeneration() : ScopPass(ID
) {}
2373 /// Construct compilation options for PPCG.
2375 /// @returns The compilation options.
2376 ppcg_options
*createPPCGOptions() {
2378 (ppcg_debug_options
*)malloc(sizeof(ppcg_debug_options
));
2379 auto Options
= (ppcg_options
*)malloc(sizeof(ppcg_options
));
2381 DebugOptions
->dump_schedule_constraints
= false;
2382 DebugOptions
->dump_schedule
= false;
2383 DebugOptions
->dump_final_schedule
= false;
2384 DebugOptions
->dump_sizes
= false;
2385 DebugOptions
->verbose
= false;
2387 Options
->debug
= DebugOptions
;
2389 Options
->group_chains
= false;
2390 Options
->reschedule
= true;
2391 Options
->scale_tile_loops
= false;
2392 Options
->wrap
= false;
2394 Options
->non_negative_parameters
= false;
2395 Options
->ctx
= nullptr;
2396 Options
->sizes
= nullptr;
2398 Options
->tile
= true;
2399 Options
->tile_size
= 32;
2401 Options
->isolate_full_tiles
= false;
2403 Options
->use_private_memory
= PrivateMemory
;
2404 Options
->use_shared_memory
= SharedMemory
;
2405 Options
->max_shared_memory
= 48 * 1024;
2407 Options
->target
= PPCG_TARGET_CUDA
;
2408 Options
->openmp
= false;
2409 Options
->linearize_device_arrays
= true;
2410 Options
->allow_gnu_extensions
= false;
2412 Options
->unroll_copy_shared
= false;
2413 Options
->unroll_gpu_tile
= false;
2414 Options
->live_range_reordering
= true;
2416 Options
->live_range_reordering
= true;
2417 Options
->hybrid
= false;
2418 Options
->opencl_compiler_options
= nullptr;
2419 Options
->opencl_use_gpu
= false;
2420 Options
->opencl_n_include_file
= 0;
2421 Options
->opencl_include_files
= nullptr;
2422 Options
->opencl_print_kernel_types
= false;
2423 Options
->opencl_embed_kernel_code
= false;
2425 Options
->save_schedule_file
= nullptr;
2426 Options
->load_schedule_file
= nullptr;
2431 /// Get a tagged access relation containing all accesses of type @p AccessTy.
2433 /// Instead of a normal access of the form:
2435 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2437 /// a tagged access has the form
2439 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2441 /// where 'id' is an additional space that references the memory access that
2442 /// triggered the access.
2444 /// @param AccessTy The type of the memory accesses to collect.
2446 /// @return The relation describing all tagged memory accesses.
2447 isl_union_map
*getTaggedAccesses(enum MemoryAccess::AccessType AccessTy
) {
2448 isl_union_map
*Accesses
= isl_union_map_empty(S
->getParamSpace());
2450 for (auto &Stmt
: *S
)
2451 for (auto &Acc
: Stmt
)
2452 if (Acc
->getType() == AccessTy
) {
2453 isl_map
*Relation
= Acc
->getAccessRelation().release();
2454 Relation
= isl_map_intersect_domain(Relation
, Stmt
.getDomain());
2456 isl_space
*Space
= isl_map_get_space(Relation
);
2457 Space
= isl_space_range(Space
);
2458 Space
= isl_space_from_range(Space
);
2460 isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2461 isl_map
*Universe
= isl_map_universe(Space
);
2462 Relation
= isl_map_domain_product(Relation
, Universe
);
2463 Accesses
= isl_union_map_add_map(Accesses
, Relation
);
2469 /// Get the set of all read accesses, tagged with the access id.
2471 /// @see getTaggedAccesses
2472 isl_union_map
*getTaggedReads() {
2473 return getTaggedAccesses(MemoryAccess::READ
);
2476 /// Get the set of all may (and must) accesses, tagged with the access id.
2478 /// @see getTaggedAccesses
2479 isl_union_map
*getTaggedMayWrites() {
2480 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE
),
2481 getTaggedAccesses(MemoryAccess::MUST_WRITE
));
2484 /// Get the set of all must accesses, tagged with the access id.
2486 /// @see getTaggedAccesses
2487 isl_union_map
*getTaggedMustWrites() {
2488 return getTaggedAccesses(MemoryAccess::MUST_WRITE
);
2491 /// Collect parameter and array names as isl_ids.
2493 /// To reason about the different parameters and arrays used, ppcg requires
2494 /// a list of all isl_ids in use. As PPCG traditionally performs
2495 /// source-to-source compilation each of these isl_ids is mapped to the
2496 /// expression that represents it. As we do not have a corresponding
2497 /// expression in Polly, we just map each id to a 'zero' expression to match
2498 /// the data format that ppcg expects.
2500 /// @returns Retun a map from collected ids to 'zero' ast expressions.
2501 __isl_give isl_id_to_ast_expr
*getNames() {
2502 auto *Names
= isl_id_to_ast_expr_alloc(
2504 S
->getNumParams() + std::distance(S
->array_begin(), S
->array_end()));
2505 auto *Zero
= isl_ast_expr_from_val(isl_val_zero(S
->getIslCtx()));
2507 for (const SCEV
*P
: S
->parameters()) {
2508 isl_id
*Id
= S
->getIdForParam(P
);
2509 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2512 for (auto &Array
: S
->arrays()) {
2513 auto Id
= Array
->getBasePtrId().release();
2514 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2517 isl_ast_expr_free(Zero
);
2522 /// Create a new PPCG scop from the current scop.
2524 /// The PPCG scop is initialized with data from the current polly::Scop. From
2525 /// this initial data, the data-dependences in the PPCG scop are initialized.
2526 /// We do not use Polly's dependence analysis for now, to ensure we match
2527 /// the PPCG default behaviour more closely.
2529 /// @returns A new ppcg scop.
2530 ppcg_scop
*createPPCGScop() {
2531 MustKillsInfo KillsInfo
= computeMustKillsInfo(*S
);
2533 auto PPCGScop
= (ppcg_scop
*)malloc(sizeof(ppcg_scop
));
2535 PPCGScop
->options
= createPPCGOptions();
2536 // enable live range reordering
2537 PPCGScop
->options
->live_range_reordering
= 1;
2539 PPCGScop
->start
= 0;
2542 PPCGScop
->context
= S
->getContext();
2543 PPCGScop
->domain
= S
->getDomains();
2544 // TODO: investigate this further. PPCG calls collect_call_domains.
2545 PPCGScop
->call
= isl_union_set_from_set(S
->getContext());
2546 PPCGScop
->tagged_reads
= getTaggedReads();
2547 PPCGScop
->reads
= S
->getReads();
2548 PPCGScop
->live_in
= nullptr;
2549 PPCGScop
->tagged_may_writes
= getTaggedMayWrites();
2550 PPCGScop
->may_writes
= S
->getWrites();
2551 PPCGScop
->tagged_must_writes
= getTaggedMustWrites();
2552 PPCGScop
->must_writes
= S
->getMustWrites();
2553 PPCGScop
->live_out
= nullptr;
2554 PPCGScop
->tagged_must_kills
= KillsInfo
.TaggedMustKills
.take();
2555 PPCGScop
->must_kills
= KillsInfo
.MustKills
.take();
2557 PPCGScop
->tagger
= nullptr;
2558 PPCGScop
->independence
=
2559 isl_union_map_empty(isl_set_get_space(PPCGScop
->context
));
2560 PPCGScop
->dep_flow
= nullptr;
2561 PPCGScop
->tagged_dep_flow
= nullptr;
2562 PPCGScop
->dep_false
= nullptr;
2563 PPCGScop
->dep_forced
= nullptr;
2564 PPCGScop
->dep_order
= nullptr;
2565 PPCGScop
->tagged_dep_order
= nullptr;
2567 PPCGScop
->schedule
= S
->getScheduleTree();
2568 // If we have something non-trivial to kill, add it to the schedule
2569 if (KillsInfo
.KillsSchedule
.get())
2570 PPCGScop
->schedule
= isl_schedule_sequence(
2571 PPCGScop
->schedule
, KillsInfo
.KillsSchedule
.take());
2573 PPCGScop
->names
= getNames();
2574 PPCGScop
->pet
= nullptr;
2576 compute_tagger(PPCGScop
);
2577 compute_dependences(PPCGScop
);
2578 eliminate_dead_code(PPCGScop
);
2583 /// Collect the array accesses in a statement.
2585 /// @param Stmt The statement for which to collect the accesses.
2587 /// @returns A list of array accesses.
2588 gpu_stmt_access
*getStmtAccesses(ScopStmt
&Stmt
) {
2589 gpu_stmt_access
*Accesses
= nullptr;
2591 for (MemoryAccess
*Acc
: Stmt
) {
2592 auto Access
= isl_alloc_type(S
->getIslCtx(), struct gpu_stmt_access
);
2593 Access
->read
= Acc
->isRead();
2594 Access
->write
= Acc
->isWrite();
2595 Access
->access
= Acc
->getAccessRelation().release();
2596 isl_space
*Space
= isl_map_get_space(Access
->access
);
2597 Space
= isl_space_range(Space
);
2598 Space
= isl_space_from_range(Space
);
2599 Space
= isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2600 isl_map
*Universe
= isl_map_universe(Space
);
2601 Access
->tagged_access
=
2602 isl_map_domain_product(Acc
->getAccessRelation().release(), Universe
);
2603 Access
->exact_write
= !Acc
->isMayWrite();
2604 Access
->ref_id
= Acc
->getId().release();
2605 Access
->next
= Accesses
;
2606 Access
->n_index
= Acc
->getScopArrayInfo()->getNumberOfDimensions();
2613 /// Collect the list of GPU statements.
2615 /// Each statement has an id, a pointer to the underlying data structure,
2616 /// as well as a list with all memory accesses.
2618 /// TODO: Initialize the list of memory accesses.
2620 /// @returns A linked-list of statements.
2621 gpu_stmt
*getStatements() {
2622 gpu_stmt
*Stmts
= isl_calloc_array(S
->getIslCtx(), struct gpu_stmt
,
2623 std::distance(S
->begin(), S
->end()));
2626 for (auto &Stmt
: *S
) {
2627 gpu_stmt
*GPUStmt
= &Stmts
[i
];
2629 GPUStmt
->id
= Stmt
.getDomainId();
2631 // We use the pet stmt pointer to keep track of the Polly statements.
2632 GPUStmt
->stmt
= (pet_stmt
*)&Stmt
;
2633 GPUStmt
->accesses
= getStmtAccesses(Stmt
);
2640 /// Derive the extent of an array.
2642 /// The extent of an array is the set of elements that are within the
2643 /// accessed array. For the inner dimensions, the extent constraints are
2644 /// 0 and the size of the corresponding array dimension. For the first
2645 /// (outermost) dimension, the extent constraints are the minimal and maximal
2646 /// subscript value for the first dimension.
2648 /// @param Array The array to derive the extent for.
2650 /// @returns An isl_set describing the extent of the array.
2651 __isl_give isl_set
*getExtent(ScopArrayInfo
*Array
) {
2652 unsigned NumDims
= Array
->getNumberOfDimensions();
2653 isl_union_map
*Accesses
= S
->getAccesses();
2654 Accesses
= isl_union_map_intersect_domain(Accesses
, S
->getDomains());
2655 Accesses
= isl_union_map_detect_equalities(Accesses
);
2656 isl_union_set
*AccessUSet
= isl_union_map_range(Accesses
);
2657 AccessUSet
= isl_union_set_coalesce(AccessUSet
);
2658 AccessUSet
= isl_union_set_detect_equalities(AccessUSet
);
2659 AccessUSet
= isl_union_set_coalesce(AccessUSet
);
2661 if (isl_union_set_is_empty(AccessUSet
)) {
2662 isl_union_set_free(AccessUSet
);
2663 return isl_set_empty(Array
->getSpace().release());
2666 if (Array
->getNumberOfDimensions() == 0) {
2667 isl_union_set_free(AccessUSet
);
2668 return isl_set_universe(Array
->getSpace().release());
2671 isl_set
*AccessSet
=
2672 isl_union_set_extract_set(AccessUSet
, Array
->getSpace().release());
2674 isl_union_set_free(AccessUSet
);
2675 isl_local_space
*LS
=
2676 isl_local_space_from_space(Array
->getSpace().release());
2679 isl_pw_aff_from_aff(isl_aff_var_on_domain(LS
, isl_dim_set
, 0));
2681 isl_pw_aff
*OuterMin
= isl_set_dim_min(isl_set_copy(AccessSet
), 0);
2682 isl_pw_aff
*OuterMax
= isl_set_dim_max(AccessSet
, 0);
2683 OuterMin
= isl_pw_aff_add_dims(OuterMin
, isl_dim_in
,
2684 isl_pw_aff_dim(Val
, isl_dim_in
));
2685 OuterMax
= isl_pw_aff_add_dims(OuterMax
, isl_dim_in
,
2686 isl_pw_aff_dim(Val
, isl_dim_in
));
2687 OuterMin
= isl_pw_aff_set_tuple_id(OuterMin
, isl_dim_in
,
2688 Array
->getBasePtrId().release());
2689 OuterMax
= isl_pw_aff_set_tuple_id(OuterMax
, isl_dim_in
,
2690 Array
->getBasePtrId().release());
2692 isl_set
*Extent
= isl_set_universe(Array
->getSpace().release());
2694 Extent
= isl_set_intersect(
2695 Extent
, isl_pw_aff_le_set(OuterMin
, isl_pw_aff_copy(Val
)));
2696 Extent
= isl_set_intersect(Extent
, isl_pw_aff_ge_set(OuterMax
, Val
));
2698 for (unsigned i
= 1; i
< NumDims
; ++i
)
2699 Extent
= isl_set_lower_bound_si(Extent
, isl_dim_set
, i
, 0);
2701 for (unsigned i
= 0; i
< NumDims
; ++i
) {
2703 const_cast<isl_pw_aff
*>(Array
->getDimensionSizePw(i
).release());
2705 // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2706 // Fortran array will we have a legitimate dimension.
2708 assert(i
== 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2712 isl_pw_aff
*Val
= isl_pw_aff_from_aff(isl_aff_var_on_domain(
2713 isl_local_space_from_space(Array
->getSpace().release()), isl_dim_set
,
2715 PwAff
= isl_pw_aff_add_dims(PwAff
, isl_dim_in
,
2716 isl_pw_aff_dim(Val
, isl_dim_in
));
2717 PwAff
= isl_pw_aff_set_tuple_id(PwAff
, isl_dim_in
,
2718 isl_pw_aff_get_tuple_id(Val
, isl_dim_in
));
2719 auto *Set
= isl_pw_aff_gt_set(PwAff
, Val
);
2720 Extent
= isl_set_intersect(Set
, Extent
);
2726 /// Derive the bounds of an array.
2728 /// For the first dimension we derive the bound of the array from the extent
2729 /// of this dimension. For inner dimensions we obtain their size directly from
2732 /// @param PPCGArray The array to compute bounds for.
2733 /// @param Array The polly array from which to take the information.
2734 void setArrayBounds(gpu_array_info
&PPCGArray
, ScopArrayInfo
*Array
) {
2735 isl_pw_aff_list
*BoundsList
=
2736 isl_pw_aff_list_alloc(S
->getIslCtx(), PPCGArray
.n_index
);
2737 std::vector
<isl::pw_aff
> PwAffs
;
2739 isl_space
*AlignSpace
= S
->getParamSpace();
2740 AlignSpace
= isl_space_add_dims(AlignSpace
, isl_dim_set
, 1);
2742 if (PPCGArray
.n_index
> 0) {
2743 if (isl_set_is_empty(PPCGArray
.extent
)) {
2744 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2745 isl_local_space
*LS
= isl_local_space_from_space(
2746 isl_space_params(isl_set_get_space(Dom
)));
2748 isl_pw_aff
*Zero
= isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS
));
2749 Zero
= isl_pw_aff_align_params(Zero
, isl_space_copy(AlignSpace
));
2750 PwAffs
.push_back(isl::manage(isl_pw_aff_copy(Zero
)));
2751 BoundsList
= isl_pw_aff_list_insert(BoundsList
, 0, Zero
);
2753 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2754 Dom
= isl_set_project_out(Dom
, isl_dim_set
, 1, PPCGArray
.n_index
- 1);
2755 isl_pw_aff
*Bound
= isl_set_dim_max(isl_set_copy(Dom
), 0);
2757 Dom
= isl_pw_aff_domain(isl_pw_aff_copy(Bound
));
2758 isl_local_space
*LS
=
2759 isl_local_space_from_space(isl_set_get_space(Dom
));
2760 isl_aff
*One
= isl_aff_zero_on_domain(LS
);
2761 One
= isl_aff_add_constant_si(One
, 1);
2762 Bound
= isl_pw_aff_add(Bound
, isl_pw_aff_alloc(Dom
, One
));
2763 Bound
= isl_pw_aff_gist(Bound
, S
->getContext());
2764 Bound
= isl_pw_aff_align_params(Bound
, isl_space_copy(AlignSpace
));
2765 PwAffs
.push_back(isl::manage(isl_pw_aff_copy(Bound
)));
2766 BoundsList
= isl_pw_aff_list_insert(BoundsList
, 0, Bound
);
2770 for (unsigned i
= 1; i
< PPCGArray
.n_index
; ++i
) {
2771 isl_pw_aff
*Bound
= Array
->getDimensionSizePw(i
).release();
2772 auto LS
= isl_pw_aff_get_domain_space(Bound
);
2773 auto Aff
= isl_multi_aff_zero(LS
);
2774 Bound
= isl_pw_aff_pullback_multi_aff(Bound
, Aff
);
2775 Bound
= isl_pw_aff_align_params(Bound
, isl_space_copy(AlignSpace
));
2776 PwAffs
.push_back(isl::manage(isl_pw_aff_copy(Bound
)));
2777 BoundsList
= isl_pw_aff_list_insert(BoundsList
, i
, Bound
);
2780 isl_space_free(AlignSpace
);
2781 isl_space
*BoundsSpace
= isl_set_get_space(PPCGArray
.extent
);
2783 assert(BoundsSpace
&& "Unable to access space of array.");
2784 assert(BoundsList
&& "Unable to access list of bounds.");
2787 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace
, BoundsList
);
2788 assert(PPCGArray
.bound
&& "PPCGArray.bound was not constructed correctly.");
2791 /// Create the arrays for @p PPCGProg.
2793 /// @param PPCGProg The program to compute the arrays for.
2794 void createArrays(gpu_prog
*PPCGProg
,
2795 const SmallVector
<ScopArrayInfo
*, 4> &ValidSAIs
) {
2797 for (auto &Array
: ValidSAIs
) {
2798 std::string TypeName
;
2799 raw_string_ostream
OS(TypeName
);
2801 OS
<< *Array
->getElementType();
2802 TypeName
= OS
.str();
2804 gpu_array_info
&PPCGArray
= PPCGProg
->array
[i
];
2806 PPCGArray
.space
= Array
->getSpace().release();
2807 PPCGArray
.type
= strdup(TypeName
.c_str());
2808 PPCGArray
.size
= Array
->getElementType()->getPrimitiveSizeInBits() / 8;
2809 PPCGArray
.name
= strdup(Array
->getName().c_str());
2810 PPCGArray
.extent
= nullptr;
2811 PPCGArray
.n_index
= Array
->getNumberOfDimensions();
2812 PPCGArray
.extent
= getExtent(Array
);
2813 PPCGArray
.n_ref
= 0;
2814 PPCGArray
.refs
= nullptr;
2815 PPCGArray
.accessed
= true;
2816 PPCGArray
.read_only_scalar
=
2817 Array
->isReadOnly() && Array
->getNumberOfDimensions() == 0;
2818 PPCGArray
.has_compound_element
= false;
2819 PPCGArray
.local
= false;
2820 PPCGArray
.declare_local
= false;
2821 PPCGArray
.global
= false;
2822 PPCGArray
.linearize
= false;
2823 PPCGArray
.dep_order
= nullptr;
2824 PPCGArray
.user
= Array
;
2826 PPCGArray
.bound
= nullptr;
2827 setArrayBounds(PPCGArray
, Array
);
2830 collect_references(PPCGProg
, &PPCGArray
);
2834 /// Create an identity map between the arrays in the scop.
2836 /// @returns An identity map between the arrays in the scop.
2837 isl_union_map
*getArrayIdentity() {
2838 isl_union_map
*Maps
= isl_union_map_empty(S
->getParamSpace());
2840 for (auto &Array
: S
->arrays()) {
2841 isl_space
*Space
= Array
->getSpace().release();
2842 Space
= isl_space_map_from_set(Space
);
2843 isl_map
*Identity
= isl_map_identity(Space
);
2844 Maps
= isl_union_map_add_map(Maps
, Identity
);
2850 /// Create a default-initialized PPCG GPU program.
2852 /// @returns A new gpu program description.
2853 gpu_prog
*createPPCGProg(ppcg_scop
*PPCGScop
) {
2858 auto PPCGProg
= isl_calloc_type(S
->getIslCtx(), struct gpu_prog
);
2860 PPCGProg
->ctx
= S
->getIslCtx();
2861 PPCGProg
->scop
= PPCGScop
;
2862 PPCGProg
->context
= isl_set_copy(PPCGScop
->context
);
2863 PPCGProg
->read
= isl_union_map_copy(PPCGScop
->reads
);
2864 PPCGProg
->may_write
= isl_union_map_copy(PPCGScop
->may_writes
);
2865 PPCGProg
->must_write
= isl_union_map_copy(PPCGScop
->must_writes
);
2866 PPCGProg
->tagged_must_kill
=
2867 isl_union_map_copy(PPCGScop
->tagged_must_kills
);
2868 PPCGProg
->to_inner
= getArrayIdentity();
2869 PPCGProg
->to_outer
= getArrayIdentity();
2870 // TODO: verify that this assignment is correct.
2871 PPCGProg
->any_to_outer
= nullptr;
2873 // this needs to be set when live range reordering is enabled.
2874 // NOTE: I believe that is conservatively correct. I'm not sure
2875 // what the semantics of this is.
2876 // Quoting PPCG/gpu.h: "Order dependences on non-scalars."
2877 PPCGProg
->array_order
=
2878 isl_union_map_empty(isl_set_get_space(PPCGScop
->context
));
2879 PPCGProg
->n_stmts
= std::distance(S
->begin(), S
->end());
2880 PPCGProg
->stmts
= getStatements();
2882 // Only consider arrays that have a non-empty extent.
2883 // Otherwise, this will cause us to consider the following kinds of
2885 // 1. Invariant loads that are represented by SAI objects.
2886 // 2. Arrays with statically known zero size.
2887 auto ValidSAIsRange
=
2888 make_filter_range(S
->arrays(), [this](ScopArrayInfo
*SAI
) -> bool {
2889 return !isl::manage(getExtent(SAI
)).is_empty();
2891 SmallVector
<ScopArrayInfo
*, 4> ValidSAIs(ValidSAIsRange
.begin(),
2892 ValidSAIsRange
.end());
2895 ValidSAIs
.size(); // std::distance(S->array_begin(), S->array_end());
2896 PPCGProg
->array
= isl_calloc_array(S
->getIslCtx(), struct gpu_array_info
,
2899 createArrays(PPCGProg
, ValidSAIs
);
2901 PPCGProg
->may_persist
= compute_may_persist(PPCGProg
);
2905 struct PrintGPUUserData
{
2906 struct cuda_info
*CudaInfo
;
2907 struct gpu_prog
*PPCGProg
;
2908 std::vector
<ppcg_kernel
*> Kernels
;
2911 /// Print a user statement node in the host code.
2913 /// We use ppcg's printing facilities to print the actual statement and
2914 /// additionally build up a list of all kernels that are encountered in the
2917 /// @param P The printer to print to
2918 /// @param Options The printing options to use
2919 /// @param Node The node to print
2920 /// @param User A user pointer to carry additional data. This pointer is
2921 /// expected to be of type PrintGPUUserData.
2923 /// @returns A printer to which the output has been printed.
2924 static __isl_give isl_printer
*
2925 printHostUser(__isl_take isl_printer
*P
,
2926 __isl_take isl_ast_print_options
*Options
,
2927 __isl_take isl_ast_node
*Node
, void *User
) {
2928 auto Data
= (struct PrintGPUUserData
*)User
;
2929 auto Id
= isl_ast_node_get_annotation(Node
);
2932 bool IsUser
= !strcmp(isl_id_get_name(Id
), "user");
2934 // If this is a user statement, format it ourselves as ppcg would
2935 // otherwise try to call pet functionality that is not available in
2938 P
= isl_printer_start_line(P
);
2939 P
= isl_printer_print_ast_node(P
, Node
);
2940 P
= isl_printer_end_line(P
);
2942 isl_ast_print_options_free(Options
);
2946 auto Kernel
= (struct ppcg_kernel
*)isl_id_get_user(Id
);
2948 Data
->Kernels
.push_back(Kernel
);
2951 return print_host_user(P
, Options
, Node
, User
);
2954 /// Print C code corresponding to the control flow in @p Kernel.
2956 /// @param Kernel The kernel to print
2957 void printKernel(ppcg_kernel
*Kernel
) {
2958 auto *P
= isl_printer_to_str(S
->getIslCtx());
2959 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
2960 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx());
2961 P
= isl_ast_node_print(Kernel
->tree
, P
, Options
);
2962 char *String
= isl_printer_get_str(P
);
2963 printf("%s\n", String
);
2965 isl_printer_free(P
);
2968 /// Print C code corresponding to the GPU code described by @p Tree.
2970 /// @param Tree An AST describing GPU code
2971 /// @param PPCGProg The PPCG program from which @Tree has been constructed.
2972 void printGPUTree(isl_ast_node
*Tree
, gpu_prog
*PPCGProg
) {
2973 auto *P
= isl_printer_to_str(S
->getIslCtx());
2974 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
2976 PrintGPUUserData Data
;
2977 Data
.PPCGProg
= PPCGProg
;
2979 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx());
2981 isl_ast_print_options_set_print_user(Options
, printHostUser
, &Data
);
2982 P
= isl_ast_node_print(Tree
, P
, Options
);
2983 char *String
= isl_printer_get_str(P
);
2985 printf("%s\n", String
);
2987 isl_printer_free(P
);
2989 for (auto Kernel
: Data
.Kernels
) {
2990 printf("# kernel%d\n", Kernel
->id
);
2991 printKernel(Kernel
);
2995 // Generate a GPU program using PPCG.
2997 // GPU mapping consists of multiple steps:
2999 // 1) Compute new schedule for the program.
3000 // 2) Map schedule to GPU (TODO)
3001 // 3) Generate code for new schedule (TODO)
3003 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3004 // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3005 // strategy directly from this pass.
3006 gpu_gen
*generateGPU(ppcg_scop
*PPCGScop
, gpu_prog
*PPCGProg
) {
3008 auto PPCGGen
= isl_calloc_type(S
->getIslCtx(), struct gpu_gen
);
3010 PPCGGen
->ctx
= S
->getIslCtx();
3011 PPCGGen
->options
= PPCGScop
->options
;
3012 PPCGGen
->print
= nullptr;
3013 PPCGGen
->print_user
= nullptr;
3014 PPCGGen
->build_ast_expr
= &pollyBuildAstExprForStmt
;
3015 PPCGGen
->prog
= PPCGProg
;
3016 PPCGGen
->tree
= nullptr;
3017 PPCGGen
->types
.n
= 0;
3018 PPCGGen
->types
.name
= nullptr;
3019 PPCGGen
->sizes
= nullptr;
3020 PPCGGen
->used_sizes
= nullptr;
3021 PPCGGen
->kernel_id
= 0;
3023 // Set scheduling strategy to same strategy PPCG is using.
3024 isl_options_set_schedule_outer_coincidence(PPCGGen
->ctx
, true);
3025 isl_options_set_schedule_maximize_band_depth(PPCGGen
->ctx
, true);
3026 isl_options_set_schedule_whole_component(PPCGGen
->ctx
, false);
3028 isl_schedule
*Schedule
= get_schedule(PPCGGen
);
3030 int has_permutable
= has_any_permutable_node(Schedule
);
3032 if (!has_permutable
|| has_permutable
< 0) {
3033 Schedule
= isl_schedule_free(Schedule
);
3035 Schedule
= map_to_device(PPCGGen
, Schedule
);
3036 PPCGGen
->tree
= generate_code(PPCGGen
, isl_schedule_copy(Schedule
));
3040 isl_printer
*P
= isl_printer_to_str(S
->getIslCtx());
3041 P
= isl_printer_set_yaml_style(P
, ISL_YAML_STYLE_BLOCK
);
3042 P
= isl_printer_print_str(P
, "Schedule\n");
3043 P
= isl_printer_print_str(P
, "========\n");
3045 P
= isl_printer_print_schedule(P
, Schedule
);
3047 P
= isl_printer_print_str(P
, "No schedule found\n");
3049 printf("%s\n", isl_printer_get_str(P
));
3050 isl_printer_free(P
);
3057 printGPUTree(PPCGGen
->tree
, PPCGProg
);
3059 printf("No code generated\n");
3062 isl_schedule_free(Schedule
);
3067 /// Free gpu_gen structure.
3069 /// @param PPCGGen The ppcg_gen object to free.
3070 void freePPCGGen(gpu_gen
*PPCGGen
) {
3071 isl_ast_node_free(PPCGGen
->tree
);
3072 isl_union_map_free(PPCGGen
->sizes
);
3073 isl_union_map_free(PPCGGen
->used_sizes
);
3077 /// Free the options in the ppcg scop structure.
3079 /// ppcg is not freeing these options for us. To avoid leaks we do this
3082 /// @param PPCGScop The scop referencing the options to free.
3083 void freeOptions(ppcg_scop
*PPCGScop
) {
3084 free(PPCGScop
->options
->debug
);
3085 PPCGScop
->options
->debug
= nullptr;
3086 free(PPCGScop
->options
);
3087 PPCGScop
->options
= nullptr;
3090 /// Approximate the number of points in the set.
3092 /// This function returns an ast expression that overapproximates the number
3093 /// of points in an isl set through the rectangular hull surrounding this set.
3095 /// @param Set The set to count.
3096 /// @param Build The isl ast build object to use for creating the ast
3099 /// @returns An approximation of the number of points in the set.
3100 __isl_give isl_ast_expr
*approxPointsInSet(__isl_take isl_set
*Set
,
3101 __isl_keep isl_ast_build
*Build
) {
3103 isl_val
*One
= isl_val_int_from_si(isl_set_get_ctx(Set
), 1);
3104 auto *Expr
= isl_ast_expr_from_val(isl_val_copy(One
));
3106 isl_space
*Space
= isl_set_get_space(Set
);
3107 Space
= isl_space_params(Space
);
3108 auto *Univ
= isl_set_universe(Space
);
3109 isl_pw_aff
*OneAff
= isl_pw_aff_val_on_domain(Univ
, One
);
3111 for (long i
= 0; i
< isl_set_dim(Set
, isl_dim_set
); i
++) {
3112 isl_pw_aff
*Max
= isl_set_dim_max(isl_set_copy(Set
), i
);
3113 isl_pw_aff
*Min
= isl_set_dim_min(isl_set_copy(Set
), i
);
3114 isl_pw_aff
*DimSize
= isl_pw_aff_sub(Max
, Min
);
3115 DimSize
= isl_pw_aff_add(DimSize
, isl_pw_aff_copy(OneAff
));
3116 auto DimSizeExpr
= isl_ast_build_expr_from_pw_aff(Build
, DimSize
);
3117 Expr
= isl_ast_expr_mul(Expr
, DimSizeExpr
);
3121 isl_pw_aff_free(OneAff
);
3126 /// Approximate a number of dynamic instructions executed by a given
3129 /// @param Stmt The statement for which to compute the number of dynamic
3131 /// @param Build The isl ast build object to use for creating the ast
3133 /// @returns An approximation of the number of dynamic instructions executed
3135 __isl_give isl_ast_expr
*approxDynamicInst(ScopStmt
&Stmt
,
3136 __isl_keep isl_ast_build
*Build
) {
3137 auto Iterations
= approxPointsInSet(Stmt
.getDomain(), Build
);
3141 if (Stmt
.isBlockStmt()) {
3142 auto *BB
= Stmt
.getBasicBlock();
3143 InstCount
= std::distance(BB
->begin(), BB
->end());
3145 auto *R
= Stmt
.getRegion();
3147 for (auto *BB
: R
->blocks()) {
3148 InstCount
+= std::distance(BB
->begin(), BB
->end());
3152 isl_val
*InstVal
= isl_val_int_from_si(S
->getIslCtx(), InstCount
);
3153 auto *InstExpr
= isl_ast_expr_from_val(InstVal
);
3154 return isl_ast_expr_mul(InstExpr
, Iterations
);
3157 /// Approximate dynamic instructions executed in scop.
3159 /// @param S The scop for which to approximate dynamic instructions.
3160 /// @param Build The isl ast build object to use for creating the ast
3162 /// @returns An approximation of the number of dynamic instructions executed
3164 __isl_give isl_ast_expr
*
3165 getNumberOfIterations(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3166 isl_ast_expr
*Instructions
;
3168 isl_val
*Zero
= isl_val_int_from_si(S
.getIslCtx(), 0);
3169 Instructions
= isl_ast_expr_from_val(Zero
);
3171 for (ScopStmt
&Stmt
: S
) {
3172 isl_ast_expr
*StmtInstructions
= approxDynamicInst(Stmt
, Build
);
3173 Instructions
= isl_ast_expr_add(Instructions
, StmtInstructions
);
3175 return Instructions
;
3178 /// Create a check that ensures sufficient compute in scop.
3180 /// @param S The scop for which to ensure sufficient compute.
3181 /// @param Build The isl ast build object to use for creating the ast
3183 /// @returns An expression that evaluates to TRUE in case of sufficient
3184 /// compute and to FALSE, otherwise.
3185 __isl_give isl_ast_expr
*
3186 createSufficientComputeCheck(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3187 auto Iterations
= getNumberOfIterations(S
, Build
);
3188 auto *MinComputeVal
= isl_val_int_from_si(S
.getIslCtx(), MinCompute
);
3189 auto *MinComputeExpr
= isl_ast_expr_from_val(MinComputeVal
);
3190 return isl_ast_expr_ge(Iterations
, MinComputeExpr
);
3193 /// Check if the basic block contains a function we cannot codegen for GPU
3196 /// If this basic block does something with a `Function` other than calling
3197 /// a function that we support in a kernel, return true.
3198 bool containsInvalidKernelFunctionInBlock(const BasicBlock
*BB
,
3199 bool AllowCUDALibDevice
) {
3200 for (const Instruction
&Inst
: *BB
) {
3201 const CallInst
*Call
= dyn_cast
<CallInst
>(&Inst
);
3202 if (Call
&& isValidFunctionInKernel(Call
->getCalledFunction(),
3203 AllowCUDALibDevice
)) {
3207 for (Value
*SrcVal
: Inst
.operands()) {
3208 PointerType
*p
= dyn_cast
<PointerType
>(SrcVal
->getType());
3211 if (isa
<FunctionType
>(p
->getElementType()))
3218 /// Return whether the Scop S uses functions in a way that we do not support.
3219 bool containsInvalidKernelFunction(const Scop
&S
, bool AllowCUDALibDevice
) {
3220 for (auto &Stmt
: S
) {
3221 if (Stmt
.isBlockStmt()) {
3222 if (containsInvalidKernelFunctionInBlock(Stmt
.getBasicBlock(),
3223 AllowCUDALibDevice
))
3226 assert(Stmt
.isRegionStmt() &&
3227 "Stmt was neither block nor region statement");
3228 for (const BasicBlock
*BB
: Stmt
.getRegion()->blocks())
3229 if (containsInvalidKernelFunctionInBlock(BB
, AllowCUDALibDevice
))
3236 /// Generate code for a given GPU AST described by @p Root.
3238 /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3239 /// @param Prog The GPU Program to generate code for.
3240 void generateCode(__isl_take isl_ast_node
*Root
, gpu_prog
*Prog
) {
3241 ScopAnnotator Annotator
;
3242 Annotator
.buildAliasScopes(*S
);
3244 Region
*R
= &S
->getRegion();
3246 simplifyRegion(R
, DT
, LI
, RI
);
3248 BasicBlock
*EnteringBB
= R
->getEnteringBlock();
3250 PollyIRBuilder Builder
= createPollyIRBuilder(EnteringBB
, Annotator
);
3252 // Only build the run-time condition and parameters _after_ having
3253 // introduced the conditional branch. This is important as the conditional
3254 // branch will guard the original scop from new induction variables that
3255 // the SCEVExpander may introduce while code generating the parameters and
3256 // which may introduce scalar dependences that prevent us from correctly
3257 // code generating this scop.
3258 BBPair StartExitBlocks
;
3259 BranchInst
*CondBr
= nullptr;
3260 std::tie(StartExitBlocks
, CondBr
) =
3261 executeScopConditionally(*S
, Builder
.getTrue(), *DT
, *RI
, *LI
);
3262 BasicBlock
*StartBlock
= std::get
<0>(StartExitBlocks
);
3264 assert(CondBr
&& "CondBr not initialized by executeScopConditionally");
3266 GPUNodeBuilder
NodeBuilder(Builder
, Annotator
, *DL
, *LI
, *SE
, *DT
, *S
,
3267 StartBlock
, Prog
, Runtime
, Architecture
);
3269 // TODO: Handle LICM
3270 auto SplitBlock
= StartBlock
->getSinglePredecessor();
3271 Builder
.SetInsertPoint(SplitBlock
->getTerminator());
3272 NodeBuilder
.addParameters(S
->getContext());
3274 isl_ast_build
*Build
= isl_ast_build_alloc(S
->getIslCtx());
3275 isl_ast_expr
*Condition
= IslAst::buildRunCondition(*S
, Build
);
3276 isl_ast_expr
*SufficientCompute
= createSufficientComputeCheck(*S
, Build
);
3277 Condition
= isl_ast_expr_and(Condition
, SufficientCompute
);
3278 isl_ast_build_free(Build
);
3280 // preload invariant loads. Note: This should happen before the RTC
3281 // because the RTC may depend on values that are invariant load hoisted.
3282 if (!NodeBuilder
.preloadInvariantLoads())
3283 report_fatal_error("preloading invariant loads failed in function: " +
3284 S
->getFunction().getName() +
3285 " | Scop Region: " + S
->getNameStr());
3287 Value
*RTC
= NodeBuilder
.createRTC(Condition
);
3288 Builder
.GetInsertBlock()->getTerminator()->setOperand(0, RTC
);
3290 Builder
.SetInsertPoint(&*StartBlock
->begin());
3292 NodeBuilder
.create(Root
);
3294 /// In case a sequential kernel has more surrounding loops as any parallel
3295 /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3296 /// point in running it on a GPU.
3297 if (NodeBuilder
.DeepestSequential
> NodeBuilder
.DeepestParallel
)
3298 CondBr
->setOperand(0, Builder
.getFalse());
3300 if (!NodeBuilder
.BuildSuccessful
)
3301 CondBr
->setOperand(0, Builder
.getFalse());
3304 bool runOnScop(Scop
&CurrentScop
) override
{
3306 LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
3307 DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
3308 SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
3309 DL
= &S
->getRegion().getEntry()->getModule()->getDataLayout();
3310 RI
= &getAnalysis
<RegionInfoPass
>().getRegionInfo();
3312 // We currently do not support functions other than intrinsics inside
3313 // kernels, as code generation will need to offload function calls to the
3314 // kernel. This may lead to a kernel trying to call a function on the host.
3315 // This also allows us to prevent codegen from trying to take the
3316 // address of an intrinsic function to send to the kernel.
3317 if (containsInvalidKernelFunction(CurrentScop
,
3318 Architecture
== GPUArch::NVPTX64
)) {
3321 << "Scop contains function which cannot be materialised in a GPU "
3322 "kernel. Bailing out.\n";);
3326 auto PPCGScop
= createPPCGScop();
3327 auto PPCGProg
= createPPCGProg(PPCGScop
);
3328 auto PPCGGen
= generateGPU(PPCGScop
, PPCGProg
);
3330 if (PPCGGen
->tree
) {
3331 generateCode(isl_ast_node_copy(PPCGGen
->tree
), PPCGProg
);
3332 CurrentScop
.markAsToBeSkipped();
3335 freeOptions(PPCGScop
);
3336 freePPCGGen(PPCGGen
);
3337 gpu_prog_free(PPCGProg
);
3338 ppcg_scop_free(PPCGScop
);
3343 void printScop(raw_ostream
&, Scop
&) const override
{}
3345 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
3346 AU
.addRequired
<DominatorTreeWrapperPass
>();
3347 AU
.addRequired
<RegionInfoPass
>();
3348 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
3349 AU
.addRequired
<ScopDetectionWrapperPass
>();
3350 AU
.addRequired
<ScopInfoRegionPass
>();
3351 AU
.addRequired
<LoopInfoWrapperPass
>();
3353 AU
.addPreserved
<AAResultsWrapperPass
>();
3354 AU
.addPreserved
<BasicAAWrapperPass
>();
3355 AU
.addPreserved
<LoopInfoWrapperPass
>();
3356 AU
.addPreserved
<DominatorTreeWrapperPass
>();
3357 AU
.addPreserved
<GlobalsAAWrapperPass
>();
3358 AU
.addPreserved
<ScopDetectionWrapperPass
>();
3359 AU
.addPreserved
<ScalarEvolutionWrapperPass
>();
3360 AU
.addPreserved
<SCEVAAWrapperPass
>();
3362 // FIXME: We do not yet add regions for the newly generated code to the
3364 AU
.addPreserved
<RegionInfoPass
>();
3365 AU
.addPreserved
<ScopInfoRegionPass
>();
3370 char PPCGCodeGeneration::ID
= 1;
3372 Pass
*polly::createPPCGCodeGenerationPass(GPUArch Arch
, GPURuntime Runtime
) {
3373 PPCGCodeGeneration
*generator
= new PPCGCodeGeneration();
3374 generator
->Runtime
= Runtime
;
3375 generator
->Architecture
= Arch
;
3379 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration
, "polly-codegen-ppcg",
3380 "Polly - Apply PPCG translation to SCOP", false, false)
3381 INITIALIZE_PASS_DEPENDENCY(DependenceInfo
);
3382 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
);
3383 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
);
3384 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass
);
3385 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
);
3386 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass
);
3387 INITIALIZE_PASS_END(PPCGCodeGeneration
, "polly-codegen-ppcg",
3388 "Polly - Apply PPCG translation to SCOP", false, false)