2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013-2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements PME GPU spline calculation and charge spreading in CUDA.
40 * TODO: consider always pre-sorting particles (as in DD case).
42 * \author Aleksei Iupinov <a.yupinov@gmail.com>
49 #include "gromacs/ewald/pme.h"
50 #include "gromacs/gpu_utils/cuda_kernel_utils.cuh"
51 #include "gromacs/gpu_utils/cudautils.cuh"
52 #include "gromacs/utility/exceptions.h"
53 #include "gromacs/utility/gmxassert.h"
57 #include "pme-timings.cuh"
60 * This define affects the spline calculation behaviour in the kernel.
61 * 0: a single GPU thread handles a single dimension of a single particle (calculating and storing (order) spline values and derivatives).
62 * 1: (order) threads do redundant work on this same task, each one stores only a single theta and single dtheta into global arrays.
63 * The only efficiency difference is less global store operations, countered by more redundant spline computation.
65 * TODO: estimate if this should be a boolean parameter (and add it to the unit test if so).
67 #define PME_GPU_PARALLEL_SPLINE 0
70 //! Spreading max block width in warps picked among powers of 2 (2, 4, 8, 16) for max. occupancy and min. runtime in most cases
71 constexpr int c_spreadMaxWarpsPerBlock = 8;
72 /* TODO: it has been observed that the kernel can be faster with smaller block sizes (2 or 4 warps)
73 * only on some GPUs (660Ti) with large enough grid (>= 48^3) due to load/store units being overloaded
74 * (ldst_fu_utilization metric maxed out in nvprof). Runtime block size choice might be nice to have.
75 * This has been tried on architectures up to Maxwell (GTX 750) and it would be good to revisit this.
77 //! Spreading max block size in threads
78 constexpr int c_spreadMaxThreadsPerBlock = c_spreadMaxWarpsPerBlock * warp_size;
82 * General purpose function for loading atom-related data from global to shared memory.
84 * \tparam[in] T Data type (float/int/...)
85 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for in the size of the shared memory array.
86 * \tparam[in] dataCountPerAtom Number of data elements per single atom (e.g. DIM for an rvec coordinates array).
87 * \param[in] kernelParams Input PME CUDA data in constant memory.
88 * \param[out] sm_destination Shared memory array for output.
89 * \param[in] gm_source Global memory array for input.
92 const int atomsPerBlock,
93 const int dataCountPerAtom>
94 __device__ __forceinline__
95 void pme_gpu_stage_atom_data(const PmeGpuCudaKernelParams kernelParams,
96 T * __restrict__ sm_destination,
97 const T * __restrict__ gm_source)
99 static_assert(c_usePadding, "With padding disabled, index checking should be fixed to account for spline theta/dtheta per-warp alignment");
100 const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
101 const int threadLocalIndex = ((threadIdx.z * blockDim.y + threadIdx.y) * blockDim.x) + threadIdx.x;
102 const int localIndex = threadLocalIndex;
103 const int globalIndexBase = blockIndex * atomsPerBlock * dataCountPerAtom;
104 const int globalIndex = globalIndexBase + localIndex;
105 const int globalCheck = pme_gpu_check_atom_data_index(globalIndex, kernelParams.atoms.nAtoms * dataCountPerAtom);
106 if ((localIndex < atomsPerBlock * dataCountPerAtom) & globalCheck)
108 assert(isfinite(float(gm_source[globalIndex])));
109 sm_destination[localIndex] = gm_source[globalIndex];
114 * PME GPU spline parameter and gridline indices calculation.
115 * This corresponds to the CPU functions calc_interpolation_idx() and make_bsplines().
116 * First stage of the whole kernel.
118 * \tparam[in] order PME interpolation order.
119 * \tparam[in] atomsPerBlock Number of atoms processed by a block - should be accounted for
120 * in the sizes of the shared memory arrays.
121 * \param[in] kernelParams Input PME CUDA data in constant memory.
122 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
123 * \param[in] sm_coordinates Atom coordinates in the shared memory.
124 * \param[in] sm_coefficients Atom charges/coefficients in the shared memory.
125 * \param[out] sm_theta Atom spline values in the shared memory.
126 * \param[out] sm_gridlineIndices Atom gridline indices in the shared memory.
128 template <const int order,
129 const int atomsPerBlock>
130 __device__ __forceinline__ void calculate_splines(const PmeGpuCudaKernelParams kernelParams,
131 const int atomIndexOffset,
132 const float3 * __restrict__ sm_coordinates,
133 const float * __restrict__ sm_coefficients,
134 float * __restrict__ sm_theta,
135 int * __restrict__ sm_gridlineIndices)
137 /* Global memory pointers for output */
138 float * __restrict__ gm_theta = kernelParams.atoms.d_theta;
139 float * __restrict__ gm_dtheta = kernelParams.atoms.d_dtheta;
140 int * __restrict__ gm_gridlineIndices = kernelParams.atoms.d_gridlineIndices;
142 /* Fractional coordinates */
143 __shared__ float sm_fractCoords[atomsPerBlock * DIM];
145 /* Thread index w.r.t. block */
146 const int threadLocalId = (threadIdx.z * (blockDim.x * blockDim.y))
147 + (threadIdx.y * blockDim.x) + threadIdx.x;
148 /* Warp index w.r.t. block - could probably be obtained easier? */
149 const int warpIndex = threadLocalId / warp_size;
150 /* Thread index w.r.t. warp */
151 const int threadWarpIndex = threadLocalId % warp_size;
152 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
153 const int atomWarpIndex = threadWarpIndex % PME_SPREADGATHER_ATOMS_PER_WARP;
154 /* Atom index w.r.t. block/shared memory */
155 const int atomIndexLocal = warpIndex * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex;
157 /* Atom index w.r.t. global memory */
158 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
159 /* Spline contribution index in one dimension */
160 const int orderIndex = threadWarpIndex / (PME_SPREADGATHER_ATOMS_PER_WARP * DIM);
161 /* Dimension index */
162 const int dimIndex = (threadWarpIndex - orderIndex * (PME_SPREADGATHER_ATOMS_PER_WARP * DIM)) / PME_SPREADGATHER_ATOMS_PER_WARP;
164 /* Multi-purpose index of rvec/ivec atom data */
165 const int sharedMemoryIndex = atomIndexLocal * DIM + dimIndex;
167 /* Spline parameters need a working buffer.
168 * With PME_GPU_PARALLEL_SPLINE == 0 it is just a local array of (order) values for some of the threads, which is fine;
169 * With PME_GPU_PARALLEL_SPLINE == 1 (order) times more threads are involved, so the shared memory is used to avoid overhead.
170 * The buffer's size, striding and indexing are adjusted accordingly.
171 * The buffer is accessed with SPLINE_DATA_PTR and SPLINE_DATA macros.
173 #if PME_GPU_PARALLEL_SPLINE
174 const int splineDataStride = atomsPerBlock * DIM;
175 const int splineDataIndex = sharedMemoryIndex;
176 __shared__ float sm_splineData[splineDataStride * order];
177 float *splineDataPtr = sm_splineData;
179 const int splineDataStride = 1;
180 const int splineDataIndex = 0;
181 float splineData[splineDataStride * order];
182 float *splineDataPtr = splineData;
185 #define SPLINE_DATA_PTR(i) (splineDataPtr + ((i) * splineDataStride + splineDataIndex))
186 #define SPLINE_DATA(i) (*SPLINE_DATA_PTR(i))
188 const int localCheck = (dimIndex < DIM) && (orderIndex < (PME_GPU_PARALLEL_SPLINE ? order : 1));
189 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
191 if (localCheck && globalCheck)
193 /* Indices interpolation */
197 int tableIndex, tInt;
199 const float3 x = sm_coordinates[atomIndexLocal];
200 /* Accessing fields in fshOffset/nXYZ/recipbox/... with dimIndex offset
201 * puts them into local memory(!) instead of accessing the constant memory directly.
202 * That's the reason for the switch, to unroll explicitly.
203 * The commented parts correspond to the 0 components of the recipbox.
208 tableIndex = kernelParams.grid.tablesOffsets[XX];
209 n = kernelParams.grid.realGridSizeFP[XX];
210 t = x.x * kernelParams.current.recipBox[dimIndex][XX] + x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
214 tableIndex = kernelParams.grid.tablesOffsets[YY];
215 n = kernelParams.grid.realGridSizeFP[YY];
216 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + */ x.y * kernelParams.current.recipBox[dimIndex][YY] + x.z * kernelParams.current.recipBox[dimIndex][ZZ];
220 tableIndex = kernelParams.grid.tablesOffsets[ZZ];
221 n = kernelParams.grid.realGridSizeFP[ZZ];
222 t = /*x.x * kernelParams.current.recipbox[dimIndex][XX] + x.y * kernelParams.current.recipbox[dimIndex][YY] + */ x.z * kernelParams.current.recipBox[dimIndex][ZZ];
225 const float shift = c_pmeMaxUnitcellShift;
226 /* Fractional coordinates along box vectors, adding a positive shift to ensure t is positive for triclinic boxes */
229 sm_fractCoords[sharedMemoryIndex] = t - tInt;
232 assert(tInt < c_pmeNeighborUnitcellCount * n);
234 // TODO have shared table for both parameters to share the fetch, as index is always same?
235 // TODO compare texture/LDG performance
236 sm_fractCoords[sharedMemoryIndex] +=
237 fetchFromParamLookupTable(kernelParams.grid.d_fractShiftsTable,
238 kernelParams.fractShiftsTableTexture,
240 sm_gridlineIndices[sharedMemoryIndex] =
241 fetchFromParamLookupTable(kernelParams.grid.d_gridlineIndicesTable,
242 kernelParams.gridlineIndicesTableTexture,
244 gm_gridlineIndices[atomIndexOffset * DIM + sharedMemoryIndex] = sm_gridlineIndices[sharedMemoryIndex];
247 /* B-spline calculation */
249 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
253 int o = orderIndex; // This is an index that is set once for PME_GPU_PARALLEL_SPLINE == 1
255 const float dr = sm_fractCoords[sharedMemoryIndex];
256 assert(isfinite(dr));
258 /* dr is relative offset from lower cell limit */
259 *SPLINE_DATA_PTR(order - 1) = 0.0f;
260 *SPLINE_DATA_PTR(1) = dr;
261 *SPLINE_DATA_PTR(0) = 1.0f - dr;
264 for (int k = 3; k < order; k++)
266 div = 1.0f / (k - 1.0f);
267 *SPLINE_DATA_PTR(k - 1) = div * dr * SPLINE_DATA(k - 2);
269 for (int l = 1; l < (k - 1); l++)
271 *SPLINE_DATA_PTR(k - l - 1) = div * ((dr + l) * SPLINE_DATA(k - l - 2) + (k - l - dr) * SPLINE_DATA(k - l - 1));
273 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
276 const int thetaGlobalOffsetBase = atomIndexOffset * DIM * order;
278 /* Differentiation and storing the spline derivatives (dtheta) */
279 #if !PME_GPU_PARALLEL_SPLINE
280 // With PME_GPU_PARALLEL_SPLINE == 1, o is already set to orderIndex;
281 // With PME_GPU_PARALLEL_SPLINE == 0, we loop o over range(order).
283 for (o = 0; o < order; o++)
286 const int thetaIndex = PME_SPLINE_THETA_STRIDE * (((o + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
287 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
289 const float dtheta = ((o > 0) ? SPLINE_DATA(o - 1) : 0.0f) - SPLINE_DATA(o);
290 assert(isfinite(dtheta));
291 gm_dtheta[thetaGlobalIndex] = dtheta;
294 div = 1.0f / (order - 1.0f);
295 *SPLINE_DATA_PTR(order - 1) = div * dr * SPLINE_DATA(order - 2);
297 for (int k = 1; k < (order - 1); k++)
299 *SPLINE_DATA_PTR(order - k - 1) = div * ((dr + k) * SPLINE_DATA(order - k - 2) + (order - k - dr) * SPLINE_DATA(order - k - 1));
301 *SPLINE_DATA_PTR(0) = div * (1.0f - dr) * SPLINE_DATA(0);
303 /* Storing the spline values (theta) */
304 #if !PME_GPU_PARALLEL_SPLINE
305 // See comment for the loop above
307 for (o = 0; o < order; o++)
310 const int thetaIndex = PME_SPLINE_THETA_STRIDE * (((o + order * warpIndex) * DIM + dimIndex) * PME_SPREADGATHER_ATOMS_PER_WARP + atomWarpIndex);
311 const int thetaGlobalIndex = thetaGlobalOffsetBase + thetaIndex;
313 sm_theta[thetaIndex] = SPLINE_DATA(o);
314 assert(isfinite(sm_theta[thetaIndex]));
315 gm_theta[thetaGlobalIndex] = SPLINE_DATA(o);
322 * Charge spreading onto the grid.
323 * This corresponds to the CPU function spread_coefficients_bsplines_thread().
324 * Second stage of the whole kernel.
326 * \tparam[in] order PME interpolation order.
327 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
328 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
329 * \param[in] kernelParams Input PME CUDA data in constant memory.
330 * \param[in] atomIndexOffset Starting atom index for the execution block w.r.t. global memory.
331 * \param[in] sm_coefficients Atom coefficents/charges in the shared memory.
332 * \param[in] sm_gridlineIndices Atom gridline indices in the shared memory.
333 * \param[in] sm_theta Atom spline values in the shared memory.
336 const int order, const bool wrapX, const bool wrapY>
337 __device__ __forceinline__ void spread_charges(const PmeGpuCudaKernelParams kernelParams,
339 const float * __restrict__ sm_coefficients,
340 const int * __restrict__ sm_gridlineIndices,
341 const float * __restrict__ sm_theta)
343 /* Global memory pointer to the output grid */
344 float * __restrict__ gm_grid = kernelParams.grid.d_realGrid;
347 const int nx = kernelParams.grid.realGridSize[XX];
348 const int ny = kernelParams.grid.realGridSize[YY];
349 const int nz = kernelParams.grid.realGridSize[ZZ];
350 const int pny = kernelParams.grid.realGridSizePadded[YY];
351 const int pnz = kernelParams.grid.realGridSizePadded[ZZ];
353 const int offx = 0, offy = 0, offz = 0; // unused for now
355 const int atomIndexLocal = threadIdx.z;
356 const int atomIndexGlobal = atomIndexOffset + atomIndexLocal;
358 const int globalCheck = pme_gpu_check_atom_data_index(atomIndexGlobal, kernelParams.atoms.nAtoms);
359 const int chargeCheck = pme_gpu_check_atom_charge(sm_coefficients[atomIndexLocal]);
360 if (chargeCheck & globalCheck)
362 // Spline Y/Z coordinates
363 const int ithy = threadIdx.y;
364 const int ithz = threadIdx.x;
365 const int ixBase = sm_gridlineIndices[atomIndexLocal * DIM + XX] - offx;
366 int iy = sm_gridlineIndices[atomIndexLocal * DIM + YY] - offy + ithy;
367 if (wrapY & (iy >= ny))
371 int iz = sm_gridlineIndices[atomIndexLocal * DIM + ZZ] - offz + ithz;
377 /* Atom index w.r.t. warp - alternating 0 1 0 1 .. */
378 const int atomWarpIndex = atomIndexLocal % PME_SPREADGATHER_ATOMS_PER_WARP;
379 /* Warp index w.r.t. block - could probably be obtained easier? */
380 const int warpIndex = atomIndexLocal / PME_SPREADGATHER_ATOMS_PER_WARP;
381 const int dimStride = PME_SPLINE_THETA_STRIDE * PME_SPREADGATHER_ATOMS_PER_WARP;
382 const int orderStride = dimStride * DIM;
383 const int thetaOffsetBase = orderStride * order * warpIndex + atomWarpIndex;
385 const float thetaZ = sm_theta[thetaOffsetBase + ithz * orderStride + ZZ * dimStride];
386 const float thetaY = sm_theta[thetaOffsetBase + ithy * orderStride + YY * dimStride];
387 const float constVal = thetaZ * thetaY * sm_coefficients[atomIndexLocal];
388 assert(isfinite(constVal));
389 const int constOffset = iy * pnz + iz;
390 const float *sm_thetaX = sm_theta + (thetaOffsetBase + XX * dimStride);
393 for (int ithx = 0; (ithx < order); ithx++)
395 int ix = ixBase + ithx;
396 if (wrapX & (ix >= nx))
400 const int gridIndexGlobal = ix * pny * pnz + constOffset;
401 assert(isfinite(sm_thetaX[ithx * orderStride]));
402 assert(isfinite(gm_grid[gridIndexGlobal]));
403 atomicAdd(gm_grid + gridIndexGlobal, sm_thetaX[ithx * orderStride] * constVal);
409 * A spline computation and charge spreading kernel function.
411 * \tparam[in] order PME interpolation order.
412 * \tparam[in] computeSplines A boolean which tells if the spline parameter and
413 * gridline indices' computation should be performed.
414 * \tparam[in] spreadCharges A boolean which tells if the charge spreading should be performed.
415 * \tparam[in] wrapX A boolean which tells if the grid overlap in dimension X should be wrapped.
416 * \tparam[in] wrapY A boolean which tells if the grid overlap in dimension Y should be wrapped.
417 * \param[in] kernelParams Input PME CUDA data in constant memory.
421 const bool computeSplines,
422 const bool spreadCharges,
426 __launch_bounds__(c_spreadMaxThreadsPerBlock)
427 __global__ void pme_spline_and_spread_kernel(const PmeGpuCudaKernelParams kernelParams)
429 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
430 // Gridline indices, ivec
431 __shared__ int sm_gridlineIndices[atomsPerBlock * DIM];
433 __shared__ float sm_coefficients[atomsPerBlock];
435 __shared__ float sm_theta[atomsPerBlock * DIM * order];
437 const int blockIndex = blockIdx.y * gridDim.x + blockIdx.x;
438 const int atomIndexOffset = blockIndex * atomsPerBlock;
440 /* Early return for fully empty blocks at the end
441 * (should only happen on Fermi or billions of input atoms)
443 if (atomIndexOffset >= kernelParams.atoms.nAtoms)
448 /* Staging coefficients/charges for both spline and spread */
449 pme_gpu_stage_atom_data<float, atomsPerBlock, 1>(kernelParams, sm_coefficients, kernelParams.atoms.d_coefficients);
453 /* Staging coordinates */
454 __shared__ float sm_coordinates[DIM * atomsPerBlock];
455 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM>(kernelParams, sm_coordinates, kernelParams.atoms.d_coordinates);
458 calculate_splines<order, atomsPerBlock>(kernelParams, atomIndexOffset, (const float3 *)sm_coordinates,
459 sm_coefficients, sm_theta, sm_gridlineIndices);
464 /* Staging the data for spread
465 * (the data is assumed to be in GPU global memory with proper layout already,
466 * as in after running the spline kernel)
468 /* Spline data - only thetas (dthetas will only be needed in gather) */
469 pme_gpu_stage_atom_data<float, atomsPerBlock, DIM * order>(kernelParams, sm_theta, kernelParams.atoms.d_theta);
470 /* Gridline indices */
471 pme_gpu_stage_atom_data<int, atomsPerBlock, DIM>(kernelParams, sm_gridlineIndices, kernelParams.atoms.d_gridlineIndices);
479 spread_charges<order, wrapX, wrapY>(kernelParams, atomIndexOffset, sm_coefficients, sm_gridlineIndices, sm_theta);
483 void pme_gpu_spread(const PmeGpu *pmeGpu,
484 int gmx_unused gridIndex,
489 GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
490 cudaStream_t stream = pmeGpu->archSpecific->pmeStream;
491 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
492 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
494 const int order = pmeGpu->common->pme_order;
495 const int atomsPerBlock = c_spreadMaxThreadsPerBlock / PME_SPREADGATHER_THREADS_PER_ATOM;
496 // TODO: pick smaller block size in runtime if needed
497 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
498 // If doing so, change atomsPerBlock in the kernels as well.
499 // TODO: test varying block sizes on modern arch-s as well
500 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
501 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
502 GMX_ASSERT(!c_usePadding || !(PME_ATOM_DATA_ALIGNMENT % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
504 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
505 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
506 dim3 dimBlock(order, order, atomsPerBlock);
508 // These should later check for PME decomposition
509 const bool wrapX = true;
510 const bool wrapY = true;
511 GMX_UNUSED_VALUE(wrapX);
512 GMX_UNUSED_VALUE(wrapY);
517 // TODO: cleaner unroll with some template trick?
522 pme_gpu_start_timing(pmeGpu, gtPME_SPLINEANDSPREAD);
523 pme_spline_and_spread_kernel<4, true, true, wrapX, wrapY> <<< dimGrid, dimBlock, 0, stream>>> (*kernelParamsPtr);
524 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
525 pme_gpu_stop_timing(pmeGpu, gtPME_SPLINEANDSPREAD);
529 pme_gpu_start_timing(pmeGpu, gtPME_SPLINE);
530 pme_spline_and_spread_kernel<4, true, false, wrapX, wrapY> <<< dimGrid, dimBlock, 0, stream>>> (*kernelParamsPtr);
531 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
532 pme_gpu_stop_timing(pmeGpu, gtPME_SPLINE);
537 pme_gpu_start_timing(pmeGpu, gtPME_SPREAD);
538 pme_spline_and_spread_kernel<4, false, true, wrapX, wrapY> <<< dimGrid, dimBlock, 0, stream>>> (*kernelParamsPtr);
539 CU_LAUNCH_ERR("pme_spline_and_spread_kernel");
540 pme_gpu_stop_timing(pmeGpu, gtPME_SPREAD);
546 GMX_THROW(gmx::NotImplementedError("The code for pme_order != 4 was not tested!"));
549 const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
552 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
554 const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
555 if (copyBackAtomData)
557 pme_gpu_copy_output_spread_atom_data(pmeGpu);