2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief This file defines the PME CUDA-specific data structure,
38 * various compile-time constants shared among the PME CUDA kernels,
39 * and also names some PME CUDA memory management routines.
40 * TODO: consider changing defines into variables where possible; have inline getters.
42 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 #ifndef GMX_EWALD_PME_CUH
46 #define GMX_EWALD_PME_CUH
50 #include "pme-gpu-constants.h"
51 #include "pme-gpu-internal.h"
52 #include "pme-gpu-types.h"
53 #include "pme-gpu-types-host.h"
54 #include "pme-gpu-types-host-impl.h"
57 * Gets a base of the unique index to an element in a spline parameter buffer (theta/dtheta),
58 * which is laid out for GPU spread/gather kernels. The base only corresponds to the atom index within the execution block.
59 * Feed the result into getSplineParamIndex() to get a full index.
60 * TODO: it's likely that both parameters can be just replaced with a single atom index, as they are derived from it.
61 * Do that, verifying that the generated code is not bloated, and/or revise the spline indexing scheme.
62 * Removing warp dependency would also be nice (and would probably coincide with removing PME_SPREADGATHER_ATOMS_PER_WARP).
64 * \tparam order PME order
65 * \param[in] warpIndex Warp index wrt the block.
66 * \param[in] atomWarpIndex Atom index wrt the warp (from 0 to PME_SPREADGATHER_ATOMS_PER_WARP - 1).
68 * \returns Index into theta or dtheta array using GPU layout.
71 int __host__ __device__ __forceinline__ getSplineParamIndexBase(int warpIndex, int atomWarpIndex)
73 assert((atomWarpIndex >= 0) && (atomWarpIndex < PME_SPREADGATHER_ATOMS_PER_WARP));
74 const int dimIndex = 0;
75 const int splineIndex = 0;
76 // The zeroes are here to preserve the full index formula for reference
77 return (((splineIndex + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
81 * Gets a unique index to an element in a spline parameter buffer (theta/dtheta),
82 * which is laid out for GPU spread/gather kernels. The index is wrt to the execution block,
83 * in range(0, atomsPerBlock * order * DIM).
84 * This function consumes result of getSplineParamIndexBase() and adjusts it for \p dimIndex and \p splineIndex.
86 * \tparam order PME order
87 * \param[in] paramIndexBase Must be result of getSplineParamIndexBase().
88 * \param[in] dimIndex Dimension index (from 0 to 2)
89 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
91 * \returns Index into theta or dtheta array using GPU layout.
94 int __host__ __device__ __forceinline__ getSplineParamIndex(int paramIndexBase, int dimIndex, int splineIndex)
96 assert((dimIndex >= XX) && (dimIndex < DIM));
97 assert((splineIndex >= 0) && (splineIndex < order));
98 return (paramIndexBase + (splineIndex * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP);
102 * An inline CUDA function for checking the global atom data indices against the atom data array sizes.
104 * \param[in] atomDataIndexGlobal The atom data index.
105 * \param[in] nAtomData The atom data array element count.
106 * \returns Non-0 if index is within bounds (or PME data padding is enabled), 0 otherwise.
108 * This is called from the spline_and_spread and gather PME kernels.
109 * The goal is to isolate the global range checks, and allow avoiding them with c_usePadding enabled.
111 int __device__ __forceinline__ pme_gpu_check_atom_data_index(const int atomDataIndex, const int nAtomData)
113 return c_usePadding ? 1 : (atomDataIndex < nAtomData);
117 * An inline CUDA function for skipping the zero-charge atoms.
119 * \returns Non-0 if atom should be processed, 0 otherwise.
120 * \param[in] coefficient The atom charge.
122 * This is called from the spline_and_spread and gather PME kernels.
124 int __device__ __forceinline__ pme_gpu_check_atom_charge(const float coefficient)
126 assert(isfinite(coefficient));
127 return c_skipNeutralAtoms ? (coefficient != 0.0f) : 1;
131 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
132 * to minimize number of unused blocks.
134 template <typename PmeGpu>
135 dim3 __host__ inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
137 // How many maximum widths in X do we need (hopefully just one)
138 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
139 // Trying to make things even
140 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
141 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
142 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
143 return dim3(colCount, minRowCount);
147 * A single structure encompassing all the PME data used in CUDA kernels.
148 * This inherits from PmeGpuKernelParamsBase and adds a couple cudaTextureObject_t handles,
149 * which we would like to avoid in plain C++.
151 struct PmeGpuCudaKernelParams : PmeGpuKernelParamsBase
153 /* These are CUDA texture objects, related to the grid size. */
154 /*! \brief CUDA texture object for accessing grid.d_fractShiftsTable */
155 cudaTextureObject_t fractShiftsTableTexture;
156 /*! \brief CUDA texture object for accessing grid.d_gridlineIndicesTable */
157 cudaTextureObject_t gridlineIndicesTableTexture;