Update PME CUDA spread/gather
[gromacs.git] / src / gromacs / ewald / pme_gpu_program_impl.h
blobe45a3110f5d31d741da8176e2f335ee2126542d4
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief
37 * Declares PmeGpuProgramImpl, which stores PME GPU (compiled) kernel handles.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
42 #ifndef GMX_EWALD_PME_PME_GPU_PROGRAM_IMPL_H
43 #define GMX_EWALD_PME_PME_GPU_PROGRAM_IMPL_H
45 #include "config.h"
47 #include "gromacs/gpu_utils/gputraits.h"
48 #include "gromacs/utility/classhelpers.h"
50 struct gmx_device_info_t;
52 /*! \internal
53 * \brief
54 * PME GPU persistent host program/kernel data, which should be initialized once for the whole execution.
56 * Primary purpose of this is to not recompile GPU kernels for each OpenCL unit test,
57 * while the relevant GPU context (e.g. cl_context) instance persists.
58 * In CUDA, this just assigns the kernel function pointers.
59 * This also implicitly relies on the fact that reasonable share of the kernels are always used.
60 * If there were more template parameters, even smaller share of all possible kernels would be used.
62 * \todo In future if we would need to react to either user input or
63 * auto-tuning to compile different kernels, then we might wish to
64 * revisit the number of kernels we pre-compile, and/or the management
65 * of their lifetime.
67 * This also doesn't manage cuFFT/clFFT kernels, which depend on the PME grid dimensions.
69 * TODO: pass cl_context to the constructor and not create it inside.
70 * See also Redmine #2522.
72 struct PmeGpuProgramImpl
74 /*! \brief
75 * This is a handle to the GPU context, which is just a dummy in CUDA,
76 * but is created/destroyed by this class in OpenCL.
77 * TODO: Later we want to be able to own the context at a higher level and not here,
78 * but this class would still need the non-owning context handle to build the kernels.
80 DeviceContext context;
82 //! Conveniently all the PME kernels use the same single argument type
83 #if GMX_GPU == GMX_GPU_CUDA
84 using PmeKernelHandle = void(*)(const struct PmeGpuCudaKernelParams);
85 #elif GMX_GPU == GMX_GPU_OPENCL
86 using PmeKernelHandle = cl_kernel;
87 #else
88 using PmeKernelHandle = void *;
89 #endif
91 /*! \brief
92 * Maximum synchronous GPU thread group execution width.
93 * "Warp" is a CUDA term which we end up reusing in OpenCL kernels as well.
94 * For CUDA, this is a static value that comes from gromacs/gpu_utils/cuda_arch_utils.cuh;
95 * for OpenCL, we have to query it dynamically.
97 size_t warpSize;
99 //@{
101 * Spread/spline kernels are compiled only for order of 4.
102 * There are multiple versions of each kernel, paramaretized according to
103 * Number of threads per atom. Using either order(4) or order*order (16) threads per atom is supported
104 * If the spline data is written in the spline/spread kernel and loaded in the gather
105 * or recalculated in the gather.
106 * Spreading kernels also have hardcoded X/Y indices wrapping parameters,
107 * as a placeholder for implementing 1/2D decomposition.
109 size_t spreadWorkGroupSize;
111 PmeKernelHandle splineKernel;
112 PmeKernelHandle splineKernelThPerAtom4;
113 PmeKernelHandle spreadKernel;
114 PmeKernelHandle spreadKernelThPerAtom4;
115 PmeKernelHandle splineAndSpreadKernel;
116 PmeKernelHandle splineAndSpreadKernelThPerAtom4;
117 PmeKernelHandle splineAndSpreadKernelWriteSplines;
118 PmeKernelHandle splineAndSpreadKernelWriteSplinesThPerAtom4;
119 //@}
121 //@{
122 /** Same for gather: hardcoded X/Y unwrap parameters, order of 4, plus
123 * it can either reduce with previous forces in the host buffer, or ignore them.
124 * Also similarly to the gather we can use either order(4) or order*order (16) threads per atom
125 * and either recalculate the splines or read the ones written by the spread
127 size_t gatherWorkGroupSize;
129 PmeKernelHandle gatherReduceWithInputKernel;
130 PmeKernelHandle gatherReduceWithInputKernelThPerAtom4;
131 PmeKernelHandle gatherKernel;
132 PmeKernelHandle gatherKernelThPerAtom4;
133 PmeKernelHandle gatherReduceWithInputKernelReadSplines;
134 PmeKernelHandle gatherReduceWithInputKernelReadSplinesThPerAtom4;
135 PmeKernelHandle gatherKernelReadSplines;
136 PmeKernelHandle gatherKernelReadSplinesThPerAtom4;
137 //@}
139 //@{
140 /** Solve kernel doesn't care about the interpolation order, but can optionally
141 * compute energy and virial, and supports XYZ and YZX grid orderings.
143 size_t solveMaxWorkGroupSize;
145 PmeKernelHandle solveYZXKernel;
146 PmeKernelHandle solveXYZKernel;
147 PmeKernelHandle solveYZXEnergyKernel;
148 PmeKernelHandle solveXYZEnergyKernel;
149 //@}
151 PmeGpuProgramImpl() = delete;
152 //! Constructor for the given device
153 explicit PmeGpuProgramImpl(const gmx_device_info_t *deviceInfo);
154 ~PmeGpuProgramImpl();
155 GMX_DISALLOW_COPY_AND_ASSIGN(PmeGpuProgramImpl);
157 private:
158 // Compiles kernels, if supported. Called by the constructor.
159 void compileKernels(const gmx_device_info_t *deviceInfo);
162 #endif