2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme_gpu_internal.h"
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
69 #if GMX_GPU == GMX_GPU_CUDA
70 # include "gromacs/gpu_utils/pmalloc_cuda.h"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 # include "gromacs/gpu_utils/gmxopencl.h"
77 #include "gromacs/ewald/pme.h"
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_calculate_splines.h"
81 #include "pme_gpu_constants.h"
82 #include "pme_gpu_program_impl.h"
83 #include "pme_gpu_timings.h"
84 #include "pme_gpu_types.h"
85 #include "pme_gpu_types_host.h"
86 #include "pme_gpu_types_host_impl.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
93 * Atom limit above which it is advantageous to turn on the
94 * recalcuating of the splines in the gather and using less threads per atom in the spline and spread
96 constexpr int c_pmeGpuPerformanceAtomLimit
= 23000;
99 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
101 * \param[in] pmeGpu The PME GPU structure.
102 * \returns The pointer to the kernel parameters.
104 static PmeGpuKernelParamsBase
* pme_gpu_get_kernel_params_base_ptr(const PmeGpu
* pmeGpu
)
106 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
107 auto* kernelParamsPtr
= reinterpret_cast<PmeGpuKernelParamsBase
*>(pmeGpu
->kernelParams
.get());
108 return kernelParamsPtr
;
112 * Atom data block size (in terms of number of atoms).
113 * This is the least common multiple of number of atoms processed by
114 * a single block/workgroup of the spread and gather kernels.
115 * The GPU atom data buffers must be padded, which means that
116 * the numbers of atoms used for determining the size of the memory
117 * allocation must be divisible by this.
119 constexpr int c_pmeAtomDataBlockSize
= 64;
121 int pme_gpu_get_atom_data_block_size()
123 return c_pmeAtomDataBlockSize
;
126 void pme_gpu_synchronize(const PmeGpu
* pmeGpu
)
128 pmeGpu
->archSpecific
->pmeStream_
.synchronize();
131 void pme_gpu_alloc_energy_virial(PmeGpu
* pmeGpu
)
133 const size_t energyAndVirialSize
= c_virialAndEnergyCount
* sizeof(float);
134 allocateDeviceBuffer(&pmeGpu
->kernelParams
->constants
.d_virialAndEnergy
, c_virialAndEnergyCount
,
135 pmeGpu
->archSpecific
->deviceContext_
);
136 pmalloc(reinterpret_cast<void**>(&pmeGpu
->staging
.h_virialAndEnergy
), energyAndVirialSize
);
139 void pme_gpu_free_energy_virial(PmeGpu
* pmeGpu
)
141 freeDeviceBuffer(&pmeGpu
->kernelParams
->constants
.d_virialAndEnergy
);
142 pfree(pmeGpu
->staging
.h_virialAndEnergy
);
143 pmeGpu
->staging
.h_virialAndEnergy
= nullptr;
146 void pme_gpu_clear_energy_virial(const PmeGpu
* pmeGpu
)
148 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->constants
.d_virialAndEnergy
, 0,
149 c_virialAndEnergyCount
, pmeGpu
->archSpecific
->pmeStream_
);
152 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu
* pmeGpu
)
154 const int splineValuesOffset
[DIM
] = { 0, pmeGpu
->kernelParams
->grid
.realGridSize
[XX
],
155 pmeGpu
->kernelParams
->grid
.realGridSize
[XX
]
156 + pmeGpu
->kernelParams
->grid
.realGridSize
[YY
] };
157 memcpy(&pmeGpu
->kernelParams
->grid
.splineValuesOffset
, &splineValuesOffset
, sizeof(splineValuesOffset
));
159 const int newSplineValuesSize
= pmeGpu
->kernelParams
->grid
.realGridSize
[XX
]
160 + pmeGpu
->kernelParams
->grid
.realGridSize
[YY
]
161 + pmeGpu
->kernelParams
->grid
.realGridSize
[ZZ
];
162 const bool shouldRealloc
= (newSplineValuesSize
> pmeGpu
->archSpecific
->splineValuesSize
);
163 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_splineModuli
, newSplineValuesSize
,
164 &pmeGpu
->archSpecific
->splineValuesSize
,
165 &pmeGpu
->archSpecific
->splineValuesSizeAlloc
,
166 pmeGpu
->archSpecific
->deviceContext_
);
169 /* Reallocate the host buffer */
170 pfree(pmeGpu
->staging
.h_splineModuli
);
171 pmalloc(reinterpret_cast<void**>(&pmeGpu
->staging
.h_splineModuli
),
172 newSplineValuesSize
* sizeof(float));
174 for (int i
= 0; i
< DIM
; i
++)
176 memcpy(pmeGpu
->staging
.h_splineModuli
+ splineValuesOffset
[i
],
177 pmeGpu
->common
->bsp_mod
[i
].data(), pmeGpu
->common
->bsp_mod
[i
].size() * sizeof(float));
179 /* TODO: pin original buffer instead! */
180 copyToDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_splineModuli
, pmeGpu
->staging
.h_splineModuli
,
181 0, newSplineValuesSize
, pmeGpu
->archSpecific
->pmeStream_
,
182 pmeGpu
->settings
.transferKind
, nullptr);
185 void pme_gpu_free_bspline_values(const PmeGpu
* pmeGpu
)
187 pfree(pmeGpu
->staging
.h_splineModuli
);
188 freeDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_splineModuli
);
191 void pme_gpu_realloc_forces(PmeGpu
* pmeGpu
)
193 const size_t newForcesSize
= pmeGpu
->nAtomsAlloc
* DIM
;
194 GMX_ASSERT(newForcesSize
> 0, "Bad number of atoms in PME GPU");
195 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_forces
, newForcesSize
,
196 &pmeGpu
->archSpecific
->forcesSize
, &pmeGpu
->archSpecific
->forcesSizeAlloc
,
197 pmeGpu
->archSpecific
->deviceContext_
);
198 pmeGpu
->staging
.h_forces
.reserveWithPadding(pmeGpu
->nAtomsAlloc
);
199 pmeGpu
->staging
.h_forces
.resizeWithPadding(pmeGpu
->kernelParams
->atoms
.nAtoms
);
202 void pme_gpu_free_forces(const PmeGpu
* pmeGpu
)
204 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_forces
);
207 void pme_gpu_copy_input_forces(PmeGpu
* pmeGpu
)
209 GMX_ASSERT(pmeGpu
->kernelParams
->atoms
.nAtoms
> 0, "Bad number of atoms in PME GPU");
210 float* h_forcesFloat
= reinterpret_cast<float*>(pmeGpu
->staging
.h_forces
.data());
211 copyToDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_forces
, h_forcesFloat
, 0,
212 DIM
* pmeGpu
->kernelParams
->atoms
.nAtoms
, pmeGpu
->archSpecific
->pmeStream_
,
213 pmeGpu
->settings
.transferKind
, nullptr);
216 void pme_gpu_copy_output_forces(PmeGpu
* pmeGpu
)
218 GMX_ASSERT(pmeGpu
->kernelParams
->atoms
.nAtoms
> 0, "Bad number of atoms in PME GPU");
219 float* h_forcesFloat
= reinterpret_cast<float*>(pmeGpu
->staging
.h_forces
.data());
220 copyFromDeviceBuffer(h_forcesFloat
, &pmeGpu
->kernelParams
->atoms
.d_forces
, 0,
221 DIM
* pmeGpu
->kernelParams
->atoms
.nAtoms
, pmeGpu
->archSpecific
->pmeStream_
,
222 pmeGpu
->settings
.transferKind
, nullptr);
225 void pme_gpu_realloc_and_copy_input_coefficients(PmeGpu
* pmeGpu
, const float* h_coefficients
)
227 GMX_ASSERT(h_coefficients
, "Bad host-side charge buffer in PME GPU");
228 const size_t newCoefficientsSize
= pmeGpu
->nAtomsAlloc
;
229 GMX_ASSERT(newCoefficientsSize
> 0, "Bad number of atoms in PME GPU");
230 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coefficients
, newCoefficientsSize
,
231 &pmeGpu
->archSpecific
->coefficientsSize
,
232 &pmeGpu
->archSpecific
->coefficientsSizeAlloc
,
233 pmeGpu
->archSpecific
->deviceContext_
);
234 copyToDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coefficients
,
235 const_cast<float*>(h_coefficients
), 0, pmeGpu
->kernelParams
->atoms
.nAtoms
,
236 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
238 const size_t paddingIndex
= pmeGpu
->kernelParams
->atoms
.nAtoms
;
239 const size_t paddingCount
= pmeGpu
->nAtomsAlloc
- paddingIndex
;
240 if (paddingCount
> 0)
242 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->atoms
.d_coefficients
, paddingIndex
,
243 paddingCount
, pmeGpu
->archSpecific
->pmeStream_
);
247 void pme_gpu_free_coefficients(const PmeGpu
* pmeGpu
)
249 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coefficients
);
252 void pme_gpu_realloc_spline_data(PmeGpu
* pmeGpu
)
254 const int order
= pmeGpu
->common
->pme_order
;
255 const int newSplineDataSize
= DIM
* order
* pmeGpu
->nAtomsAlloc
;
256 GMX_ASSERT(newSplineDataSize
> 0, "Bad number of atoms in PME GPU");
257 /* Two arrays of the same size */
258 const bool shouldRealloc
= (newSplineDataSize
> pmeGpu
->archSpecific
->splineDataSize
);
259 int currentSizeTemp
= pmeGpu
->archSpecific
->splineDataSize
;
260 int currentSizeTempAlloc
= pmeGpu
->archSpecific
->splineDataSizeAlloc
;
261 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_theta
, newSplineDataSize
, ¤tSizeTemp
,
262 ¤tSizeTempAlloc
, pmeGpu
->archSpecific
->deviceContext_
);
263 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_dtheta
, newSplineDataSize
,
264 &pmeGpu
->archSpecific
->splineDataSize
, &pmeGpu
->archSpecific
->splineDataSizeAlloc
,
265 pmeGpu
->archSpecific
->deviceContext_
);
266 // the host side reallocation
269 pfree(pmeGpu
->staging
.h_theta
);
270 pmalloc(reinterpret_cast<void**>(&pmeGpu
->staging
.h_theta
), newSplineDataSize
* sizeof(float));
271 pfree(pmeGpu
->staging
.h_dtheta
);
272 pmalloc(reinterpret_cast<void**>(&pmeGpu
->staging
.h_dtheta
), newSplineDataSize
* sizeof(float));
276 void pme_gpu_free_spline_data(const PmeGpu
* pmeGpu
)
278 /* Two arrays of the same size */
279 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_theta
);
280 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_dtheta
);
281 pfree(pmeGpu
->staging
.h_theta
);
282 pfree(pmeGpu
->staging
.h_dtheta
);
285 void pme_gpu_realloc_grid_indices(PmeGpu
* pmeGpu
)
287 const size_t newIndicesSize
= DIM
* pmeGpu
->nAtomsAlloc
;
288 GMX_ASSERT(newIndicesSize
> 0, "Bad number of atoms in PME GPU");
289 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_gridlineIndices
, newIndicesSize
,
290 &pmeGpu
->archSpecific
->gridlineIndicesSize
,
291 &pmeGpu
->archSpecific
->gridlineIndicesSizeAlloc
,
292 pmeGpu
->archSpecific
->deviceContext_
);
293 pfree(pmeGpu
->staging
.h_gridlineIndices
);
294 pmalloc(reinterpret_cast<void**>(&pmeGpu
->staging
.h_gridlineIndices
), newIndicesSize
* sizeof(int));
297 void pme_gpu_free_grid_indices(const PmeGpu
* pmeGpu
)
299 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_gridlineIndices
);
300 pfree(pmeGpu
->staging
.h_gridlineIndices
);
303 void pme_gpu_realloc_grids(PmeGpu
* pmeGpu
)
305 auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
306 const int newRealGridSize
= kernelParamsPtr
->grid
.realGridSizePadded
[XX
]
307 * kernelParamsPtr
->grid
.realGridSizePadded
[YY
]
308 * kernelParamsPtr
->grid
.realGridSizePadded
[ZZ
];
309 const int newComplexGridSize
= kernelParamsPtr
->grid
.complexGridSizePadded
[XX
]
310 * kernelParamsPtr
->grid
.complexGridSizePadded
[YY
]
311 * kernelParamsPtr
->grid
.complexGridSizePadded
[ZZ
] * 2;
312 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
313 if (pmeGpu
->archSpecific
->performOutOfPlaceFFT
)
315 /* 2 separate grids */
316 reallocateDeviceBuffer(&kernelParamsPtr
->grid
.d_fourierGrid
, newComplexGridSize
,
317 &pmeGpu
->archSpecific
->complexGridSize
,
318 &pmeGpu
->archSpecific
->complexGridSizeAlloc
,
319 pmeGpu
->archSpecific
->deviceContext_
);
320 reallocateDeviceBuffer(
321 &kernelParamsPtr
->grid
.d_realGrid
, newRealGridSize
, &pmeGpu
->archSpecific
->realGridSize
,
322 &pmeGpu
->archSpecific
->realGridSizeAlloc
, pmeGpu
->archSpecific
->deviceContext_
);
326 /* A single buffer so that any grid will fit */
327 const int newGridsSize
= std::max(newRealGridSize
, newComplexGridSize
);
328 reallocateDeviceBuffer(
329 &kernelParamsPtr
->grid
.d_realGrid
, newGridsSize
, &pmeGpu
->archSpecific
->realGridSize
,
330 &pmeGpu
->archSpecific
->realGridSizeAlloc
, pmeGpu
->archSpecific
->deviceContext_
);
331 kernelParamsPtr
->grid
.d_fourierGrid
= kernelParamsPtr
->grid
.d_realGrid
;
332 pmeGpu
->archSpecific
->complexGridSize
= pmeGpu
->archSpecific
->realGridSize
;
333 // the size might get used later for copying the grid
337 void pme_gpu_free_grids(const PmeGpu
* pmeGpu
)
339 if (pmeGpu
->archSpecific
->performOutOfPlaceFFT
)
341 freeDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_fourierGrid
);
343 freeDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_realGrid
);
346 void pme_gpu_clear_grids(const PmeGpu
* pmeGpu
)
348 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->grid
.d_realGrid
, 0,
349 pmeGpu
->archSpecific
->realGridSize
, pmeGpu
->archSpecific
->pmeStream_
);
352 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu
* pmeGpu
)
354 pme_gpu_free_fract_shifts(pmeGpu
);
356 auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
358 const int nx
= kernelParamsPtr
->grid
.realGridSize
[XX
];
359 const int ny
= kernelParamsPtr
->grid
.realGridSize
[YY
];
360 const int nz
= kernelParamsPtr
->grid
.realGridSize
[ZZ
];
361 const int cellCount
= c_pmeNeighborUnitcellCount
;
362 const int gridDataOffset
[DIM
] = { 0, cellCount
* nx
, cellCount
* (nx
+ ny
) };
364 memcpy(kernelParamsPtr
->grid
.tablesOffsets
, &gridDataOffset
, sizeof(gridDataOffset
));
366 const int newFractShiftsSize
= cellCount
* (nx
+ ny
+ nz
);
368 #if GMX_GPU == GMX_GPU_CUDA
369 initParamLookupTable(kernelParamsPtr
->grid
.d_fractShiftsTable
, kernelParamsPtr
->fractShiftsTableTexture
,
370 pmeGpu
->common
->fsh
.data(), newFractShiftsSize
);
372 initParamLookupTable(kernelParamsPtr
->grid
.d_gridlineIndicesTable
,
373 kernelParamsPtr
->gridlineIndicesTableTexture
, pmeGpu
->common
->nn
.data(),
375 #elif GMX_GPU == GMX_GPU_OPENCL
376 // No dedicated texture routines....
377 allocateDeviceBuffer(&kernelParamsPtr
->grid
.d_fractShiftsTable
, newFractShiftsSize
,
378 pmeGpu
->archSpecific
->deviceContext_
);
379 allocateDeviceBuffer(&kernelParamsPtr
->grid
.d_gridlineIndicesTable
, newFractShiftsSize
,
380 pmeGpu
->archSpecific
->deviceContext_
);
381 copyToDeviceBuffer(&kernelParamsPtr
->grid
.d_fractShiftsTable
, pmeGpu
->common
->fsh
.data(), 0,
382 newFractShiftsSize
, pmeGpu
->archSpecific
->pmeStream_
,
383 GpuApiCallBehavior::Async
, nullptr);
384 copyToDeviceBuffer(&kernelParamsPtr
->grid
.d_gridlineIndicesTable
, pmeGpu
->common
->nn
.data(), 0,
385 newFractShiftsSize
, pmeGpu
->archSpecific
->pmeStream_
,
386 GpuApiCallBehavior::Async
, nullptr);
390 void pme_gpu_free_fract_shifts(const PmeGpu
* pmeGpu
)
392 auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
393 #if GMX_GPU == GMX_GPU_CUDA
394 destroyParamLookupTable(kernelParamsPtr
->grid
.d_fractShiftsTable
,
395 kernelParamsPtr
->fractShiftsTableTexture
);
396 destroyParamLookupTable(kernelParamsPtr
->grid
.d_gridlineIndicesTable
,
397 kernelParamsPtr
->gridlineIndicesTableTexture
);
398 #elif GMX_GPU == GMX_GPU_OPENCL
399 freeDeviceBuffer(&kernelParamsPtr
->grid
.d_fractShiftsTable
);
400 freeDeviceBuffer(&kernelParamsPtr
->grid
.d_gridlineIndicesTable
);
404 bool pme_gpu_stream_query(const PmeGpu
* pmeGpu
)
406 return haveStreamTasksCompleted(pmeGpu
->archSpecific
->pmeStream_
);
409 void pme_gpu_copy_input_gather_grid(const PmeGpu
* pmeGpu
, float* h_grid
)
411 copyToDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_realGrid
, h_grid
, 0, pmeGpu
->archSpecific
->realGridSize
,
412 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
415 void pme_gpu_copy_output_spread_grid(const PmeGpu
* pmeGpu
, float* h_grid
)
417 copyFromDeviceBuffer(h_grid
, &pmeGpu
->kernelParams
->grid
.d_realGrid
, 0,
418 pmeGpu
->archSpecific
->realGridSize
, pmeGpu
->archSpecific
->pmeStream_
,
419 pmeGpu
->settings
.transferKind
, nullptr);
420 pmeGpu
->archSpecific
->syncSpreadGridD2H
.markEvent(pmeGpu
->archSpecific
->pmeStream_
);
423 void pme_gpu_copy_output_spread_atom_data(const PmeGpu
* pmeGpu
)
425 const size_t splinesCount
= DIM
* pmeGpu
->nAtomsAlloc
* pmeGpu
->common
->pme_order
;
426 auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
427 copyFromDeviceBuffer(pmeGpu
->staging
.h_dtheta
, &kernelParamsPtr
->atoms
.d_dtheta
, 0, splinesCount
,
428 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
429 copyFromDeviceBuffer(pmeGpu
->staging
.h_theta
, &kernelParamsPtr
->atoms
.d_theta
, 0, splinesCount
,
430 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
431 copyFromDeviceBuffer(pmeGpu
->staging
.h_gridlineIndices
, &kernelParamsPtr
->atoms
.d_gridlineIndices
,
432 0, kernelParamsPtr
->atoms
.nAtoms
* DIM
, pmeGpu
->archSpecific
->pmeStream_
,
433 pmeGpu
->settings
.transferKind
, nullptr);
436 void pme_gpu_copy_input_gather_atom_data(const PmeGpu
* pmeGpu
)
438 const size_t splinesCount
= DIM
* pmeGpu
->nAtomsAlloc
* pmeGpu
->common
->pme_order
;
439 auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
441 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
442 clearDeviceBufferAsync(&kernelParamsPtr
->atoms
.d_gridlineIndices
, 0, pmeGpu
->nAtomsAlloc
* DIM
,
443 pmeGpu
->archSpecific
->pmeStream_
);
444 clearDeviceBufferAsync(&kernelParamsPtr
->atoms
.d_dtheta
, 0,
445 pmeGpu
->nAtomsAlloc
* pmeGpu
->common
->pme_order
* DIM
,
446 pmeGpu
->archSpecific
->pmeStream_
);
447 clearDeviceBufferAsync(&kernelParamsPtr
->atoms
.d_theta
, 0,
448 pmeGpu
->nAtomsAlloc
* pmeGpu
->common
->pme_order
* DIM
,
449 pmeGpu
->archSpecific
->pmeStream_
);
451 copyToDeviceBuffer(&kernelParamsPtr
->atoms
.d_dtheta
, pmeGpu
->staging
.h_dtheta
, 0, splinesCount
,
452 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
453 copyToDeviceBuffer(&kernelParamsPtr
->atoms
.d_theta
, pmeGpu
->staging
.h_theta
, 0, splinesCount
,
454 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
455 copyToDeviceBuffer(&kernelParamsPtr
->atoms
.d_gridlineIndices
, pmeGpu
->staging
.h_gridlineIndices
,
456 0, kernelParamsPtr
->atoms
.nAtoms
* DIM
, pmeGpu
->archSpecific
->pmeStream_
,
457 pmeGpu
->settings
.transferKind
, nullptr);
460 void pme_gpu_sync_spread_grid(const PmeGpu
* pmeGpu
)
462 pmeGpu
->archSpecific
->syncSpreadGridD2H
.waitForEvent();
465 void pme_gpu_init_internal(PmeGpu
* pmeGpu
)
467 #if GMX_GPU == GMX_GPU_CUDA
468 // Prepare to use the device that this PME task was assigned earlier.
469 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
470 CU_RET_ERR(cudaSetDevice(pmeGpu
->deviceInfo
->id
), "Switching to PME CUDA device");
473 /* Allocate the target-specific structures */
474 pmeGpu
->archSpecific
.reset(new PmeGpuSpecific(pmeGpu
->programHandle_
->impl_
->deviceContext_
));
475 pmeGpu
->kernelParams
.reset(new PmeGpuKernelParams());
477 pmeGpu
->archSpecific
->performOutOfPlaceFFT
= true;
478 /* This should give better performance, according to the cuFFT documentation.
479 * The performance seems to be the same though.
480 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
483 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
484 if (GMX_GPU
== GMX_GPU_CUDA
)
486 /* WARNING: CUDA timings are incorrect with multiple streams.
487 * This is the main reason why they are disabled by default.
489 // TODO: Consider turning on by default when we can detect nr of streams.
490 pmeGpu
->archSpecific
->useTiming
= (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
492 else if (GMX_GPU
== GMX_GPU_OPENCL
)
494 pmeGpu
->archSpecific
->useTiming
= (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
497 #if GMX_GPU == GMX_GPU_CUDA
498 pmeGpu
->maxGridWidthX
= pmeGpu
->deviceInfo
->prop
.maxGridSize
[0];
499 #elif GMX_GPU == GMX_GPU_OPENCL
500 pmeGpu
->maxGridWidthX
= INT32_MAX
/ 2;
501 // TODO: is there no really global work size limit in OpenCL?
504 /* Creating a PME GPU stream:
505 * - default high priority with CUDA
506 * - no priorities implemented yet with OpenCL; see #2532
508 pmeGpu
->archSpecific
->pmeStream_
.init(*pmeGpu
->deviceInfo
, pmeGpu
->archSpecific
->deviceContext_
,
509 DeviceStreamPriority::High
, pmeGpu
->archSpecific
->useTiming
);
512 void pme_gpu_reinit_3dfft(const PmeGpu
* pmeGpu
)
514 if (pme_gpu_settings(pmeGpu
).performGPUFFT
)
516 pmeGpu
->archSpecific
->fftSetup
.resize(0);
517 for (int i
= 0; i
< pmeGpu
->common
->ngrids
; i
++)
519 pmeGpu
->archSpecific
->fftSetup
.push_back(std::make_unique
<GpuParallel3dFft
>(pmeGpu
));
524 void pme_gpu_destroy_3dfft(const PmeGpu
* pmeGpu
)
526 pmeGpu
->archSpecific
->fftSetup
.resize(0);
529 void pme_gpu_getEnergyAndVirial(const gmx_pme_t
& pme
, PmeOutput
* output
)
531 const PmeGpu
* pmeGpu
= pme
.gpu
;
532 for (int j
= 0; j
< c_virialAndEnergyCount
; j
++)
534 GMX_ASSERT(std::isfinite(pmeGpu
->staging
.h_virialAndEnergy
[j
]),
535 "PME GPU produces incorrect energy/virial.");
539 output
->coulombVirial_
[XX
][XX
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
540 output
->coulombVirial_
[YY
][YY
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
541 output
->coulombVirial_
[ZZ
][ZZ
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
542 output
->coulombVirial_
[XX
][YY
] = output
->coulombVirial_
[YY
][XX
] =
543 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
544 output
->coulombVirial_
[XX
][ZZ
] = output
->coulombVirial_
[ZZ
][XX
] =
545 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
546 output
->coulombVirial_
[YY
][ZZ
] = output
->coulombVirial_
[ZZ
][YY
] =
547 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
548 output
->coulombEnergy_
= 0.5F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
551 /*! \brief Sets the force-related members in \p output
553 * \param[in] pmeGpu PME GPU data structure
554 * \param[out] output Pointer to PME output data structure
556 static void pme_gpu_getForceOutput(PmeGpu
* pmeGpu
, PmeOutput
* output
)
558 output
->haveForceOutput_
= !pmeGpu
->settings
.useGpuForceReduction
;
559 if (output
->haveForceOutput_
)
561 output
->forces_
= pmeGpu
->staging
.h_forces
;
565 PmeOutput
pme_gpu_getOutput(const gmx_pme_t
& pme
, const bool computeEnergyAndVirial
)
567 PmeGpu
* pmeGpu
= pme
.gpu
;
571 pme_gpu_getForceOutput(pmeGpu
, &output
);
573 if (computeEnergyAndVirial
)
575 if (pme_gpu_settings(pmeGpu
).performGPUSolve
)
577 pme_gpu_getEnergyAndVirial(pme
, &output
);
581 get_pme_ener_vir_q(pme
.solve_work
, pme
.nthread
, &output
);
587 void pme_gpu_update_input_box(PmeGpu gmx_unused
* pmeGpu
, const matrix gmx_unused box
)
590 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
593 pmeGpu
->common
->boxScaler
->scaleBox(box
, scaledBox
);
594 auto* kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
595 kernelParamsPtr
->current
.boxVolume
= scaledBox
[XX
][XX
] * scaledBox
[YY
][YY
] * scaledBox
[ZZ
][ZZ
];
596 GMX_ASSERT(kernelParamsPtr
->current
.boxVolume
!= 0.0F
, "Zero volume of the unit cell");
598 gmx::invertBoxMatrix(scaledBox
, recipBox
);
600 /* The GPU recipBox is transposed as compared to the CPU recipBox.
601 * Spread uses matrix columns (while solve and gather use rows).
602 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
604 const real newRecipBox
[DIM
][DIM
] = { { recipBox
[XX
][XX
], recipBox
[YY
][XX
], recipBox
[ZZ
][XX
] },
605 { 0.0, recipBox
[YY
][YY
], recipBox
[ZZ
][YY
] },
606 { 0.0, 0.0, recipBox
[ZZ
][ZZ
] } };
607 memcpy(kernelParamsPtr
->current
.recipBox
, newRecipBox
, sizeof(matrix
));
611 /*! \brief \libinternal
612 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
614 * \param[in] pmeGpu The PME GPU structure.
616 static void pme_gpu_reinit_grids(PmeGpu
* pmeGpu
)
618 auto* kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
619 kernelParamsPtr
->grid
.ewaldFactor
=
620 (M_PI
* M_PI
) / (pmeGpu
->common
->ewaldcoeff_q
* pmeGpu
->common
->ewaldcoeff_q
);
622 /* The grid size variants */
623 for (int i
= 0; i
< DIM
; i
++)
625 kernelParamsPtr
->grid
.realGridSize
[i
] = pmeGpu
->common
->nk
[i
];
626 kernelParamsPtr
->grid
.realGridSizeFP
[i
] =
627 static_cast<float>(kernelParamsPtr
->grid
.realGridSize
[i
]);
628 kernelParamsPtr
->grid
.realGridSizePadded
[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
630 // The complex grid currently uses no padding;
631 // if it starts to do so, then another test should be added for that
632 kernelParamsPtr
->grid
.complexGridSize
[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
633 kernelParamsPtr
->grid
.complexGridSizePadded
[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
635 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
636 if (!pme_gpu_settings(pmeGpu
).performGPUFFT
)
638 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
639 kernelParamsPtr
->grid
.realGridSizePadded
[ZZ
] =
640 (kernelParamsPtr
->grid
.realGridSize
[ZZ
] / 2 + 1) * 2;
643 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
644 kernelParamsPtr
->grid
.complexGridSize
[ZZ
] /= 2;
645 kernelParamsPtr
->grid
.complexGridSize
[ZZ
]++;
646 kernelParamsPtr
->grid
.complexGridSizePadded
[ZZ
] = kernelParamsPtr
->grid
.complexGridSize
[ZZ
];
648 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu
);
649 pme_gpu_realloc_and_copy_bspline_values(pmeGpu
);
650 pme_gpu_realloc_grids(pmeGpu
);
651 pme_gpu_reinit_3dfft(pmeGpu
);
654 /* Several GPU functions that refer to the CPU PME data live here.
655 * We would like to keep these away from the GPU-framework specific code for clarity,
656 * as well as compilation issues with MPI.
659 /*! \brief \libinternal
660 * Copies everything useful from the PME CPU to the PME GPU structure.
661 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
663 * \param[in] pme The PME structure.
665 static void pme_gpu_copy_common_data_from(const gmx_pme_t
* pme
)
667 /* TODO: Consider refactoring the CPU PME code to use the same structure,
668 * so that this function becomes 2 lines */
669 PmeGpu
* pmeGpu
= pme
->gpu
;
670 pmeGpu
->common
->ngrids
= pme
->ngrids
;
671 pmeGpu
->common
->epsilon_r
= pme
->epsilon_r
;
672 pmeGpu
->common
->ewaldcoeff_q
= pme
->ewaldcoeff_q
;
673 pmeGpu
->common
->nk
[XX
] = pme
->nkx
;
674 pmeGpu
->common
->nk
[YY
] = pme
->nky
;
675 pmeGpu
->common
->nk
[ZZ
] = pme
->nkz
;
676 pmeGpu
->common
->pme_order
= pme
->pme_order
;
677 if (pmeGpu
->common
->pme_order
!= c_pmeGpuOrder
)
679 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
681 for (int i
= 0; i
< DIM
; i
++)
683 pmeGpu
->common
->bsp_mod
[i
].assign(pme
->bsp_mod
[i
], pme
->bsp_mod
[i
] + pmeGpu
->common
->nk
[i
]);
685 const int cellCount
= c_pmeNeighborUnitcellCount
;
686 pmeGpu
->common
->fsh
.resize(0);
687 pmeGpu
->common
->fsh
.insert(pmeGpu
->common
->fsh
.end(), pme
->fshx
, pme
->fshx
+ cellCount
* pme
->nkx
);
688 pmeGpu
->common
->fsh
.insert(pmeGpu
->common
->fsh
.end(), pme
->fshy
, pme
->fshy
+ cellCount
* pme
->nky
);
689 pmeGpu
->common
->fsh
.insert(pmeGpu
->common
->fsh
.end(), pme
->fshz
, pme
->fshz
+ cellCount
* pme
->nkz
);
690 pmeGpu
->common
->nn
.resize(0);
691 pmeGpu
->common
->nn
.insert(pmeGpu
->common
->nn
.end(), pme
->nnx
, pme
->nnx
+ cellCount
* pme
->nkx
);
692 pmeGpu
->common
->nn
.insert(pmeGpu
->common
->nn
.end(), pme
->nny
, pme
->nny
+ cellCount
* pme
->nky
);
693 pmeGpu
->common
->nn
.insert(pmeGpu
->common
->nn
.end(), pme
->nnz
, pme
->nnz
+ cellCount
* pme
->nkz
);
694 pmeGpu
->common
->runMode
= pme
->runMode
;
695 pmeGpu
->common
->isRankPmeOnly
= !pme
->bPPnode
;
696 pmeGpu
->common
->boxScaler
= pme
->boxScaler
;
699 /*! \libinternal \brief
700 * uses heuristics to select the best performing PME gather and scatter kernels
702 * \param[in,out] pmeGpu The PME GPU structure.
704 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu
* pmeGpu
)
706 if (pmeGpu
->kernelParams
->atoms
.nAtoms
> c_pmeGpuPerformanceAtomLimit
&& (GMX_GPU
== GMX_GPU_CUDA
))
708 pmeGpu
->settings
.threadsPerAtom
= ThreadsPerAtom::Order
;
709 pmeGpu
->settings
.recalculateSplines
= true;
713 pmeGpu
->settings
.threadsPerAtom
= ThreadsPerAtom::OrderSquared
;
714 pmeGpu
->settings
.recalculateSplines
= false;
719 /*! \libinternal \brief
720 * Initializes the PME GPU data at the beginning of the run.
721 * TODO: this should become PmeGpu::PmeGpu()
723 * \param[in,out] pme The PME structure.
724 * \param[in,out] deviceInfo The GPU device information structure.
725 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
727 static void pme_gpu_init(gmx_pme_t
* pme
, const DeviceInformation
* deviceInfo
, const PmeGpuProgram
* pmeGpuProgram
)
729 GMX_ASSERT(deviceInfo
!= nullptr,
730 "Device information can not be nullptr when GPU is used for PME.");
731 pme
->gpu
= new PmeGpu();
732 PmeGpu
* pmeGpu
= pme
->gpu
;
733 changePinningPolicy(&pmeGpu
->staging
.h_forces
, pme_get_pinning_policy());
734 pmeGpu
->common
= std::make_shared
<PmeShared
>();
736 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
737 /* A convenience variable. */
738 pmeGpu
->settings
.useDecomposition
= (pme
->nnodes
!= 1);
739 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
740 pmeGpu
->settings
.performGPUGather
= true;
741 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
742 pmeGpu
->settings
.useGpuForceReduction
= false;
744 pme_gpu_set_testing(pmeGpu
, false);
746 pmeGpu
->deviceInfo
= deviceInfo
;
747 GMX_ASSERT(pmeGpuProgram
!= nullptr, "GPU kernels must be already compiled");
748 pmeGpu
->programHandle_
= pmeGpuProgram
;
750 pmeGpu
->initializedClfftLibrary_
= std::make_unique
<gmx::ClfftInitializer
>();
752 pme_gpu_init_internal(pmeGpu
);
753 pme_gpu_alloc_energy_virial(pmeGpu
);
755 pme_gpu_copy_common_data_from(pme
);
757 GMX_ASSERT(pmeGpu
->common
->epsilon_r
!= 0.0F
, "PME GPU: bad electrostatic coefficient");
759 auto* kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
760 kernelParamsPtr
->constants
.elFactor
= ONE_4PI_EPS0
/ pmeGpu
->common
->epsilon_r
;
763 void pme_gpu_get_real_grid_sizes(const PmeGpu
* pmeGpu
, gmx::IVec
* gridSize
, gmx::IVec
* paddedGridSize
)
765 GMX_ASSERT(gridSize
!= nullptr, "");
766 GMX_ASSERT(paddedGridSize
!= nullptr, "");
767 GMX_ASSERT(pmeGpu
!= nullptr, "");
768 auto* kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
769 for (int i
= 0; i
< DIM
; i
++)
771 (*gridSize
)[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
772 (*paddedGridSize
)[i
] = kernelParamsPtr
->grid
.realGridSizePadded
[i
];
776 void pme_gpu_reinit(gmx_pme_t
* pme
, const DeviceInformation
* deviceInfo
, const PmeGpuProgram
* pmeGpuProgram
)
778 GMX_ASSERT(pme
!= nullptr, "Need valid PME object");
779 if (pme
->runMode
== PmeRunMode::CPU
)
781 GMX_ASSERT(pme
->gpu
== nullptr, "Should not have PME GPU object");
787 /* First-time initialization */
788 pme_gpu_init(pme
, deviceInfo
, pmeGpuProgram
);
792 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
793 pme_gpu_copy_common_data_from(pme
);
795 /* GPU FFT will only get used for a single rank.*/
796 pme
->gpu
->settings
.performGPUFFT
=
797 (pme
->gpu
->common
->runMode
== PmeRunMode::GPU
) && !pme
->gpu
->settings
.useDecomposition
;
798 pme
->gpu
->settings
.performGPUSolve
= (pme
->gpu
->common
->runMode
== PmeRunMode::GPU
);
800 /* Reinit active timers */
801 pme_gpu_reinit_timings(pme
->gpu
);
803 pme_gpu_reinit_grids(pme
->gpu
);
804 // Note: if timing the reinit launch overhead becomes more relevant
805 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
806 pme_gpu_reinit_computation(pme
, nullptr);
807 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
808 * update for mixed mode on grid switch. TODO: use shared recipbox field.
810 std::memset(pme
->gpu
->common
->previousBox
, 0, sizeof(pme
->gpu
->common
->previousBox
));
813 void pme_gpu_destroy(PmeGpu
* pmeGpu
)
815 /* Free lots of data */
816 pme_gpu_free_energy_virial(pmeGpu
);
817 pme_gpu_free_bspline_values(pmeGpu
);
818 pme_gpu_free_forces(pmeGpu
);
819 pme_gpu_free_coefficients(pmeGpu
);
820 pme_gpu_free_spline_data(pmeGpu
);
821 pme_gpu_free_grid_indices(pmeGpu
);
822 pme_gpu_free_fract_shifts(pmeGpu
);
823 pme_gpu_free_grids(pmeGpu
);
825 pme_gpu_destroy_3dfft(pmeGpu
);
830 void pme_gpu_reinit_atoms(PmeGpu
* pmeGpu
, const int nAtoms
, const real
* charges
)
832 auto* kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
833 kernelParamsPtr
->atoms
.nAtoms
= nAtoms
;
834 const int block_size
= pme_gpu_get_atom_data_block_size();
835 const int nAtomsNewPadded
= ((nAtoms
+ block_size
- 1) / block_size
) * block_size
;
836 const bool haveToRealloc
= (pmeGpu
->nAtomsAlloc
< nAtomsNewPadded
);
837 pmeGpu
->nAtomsAlloc
= nAtomsNewPadded
;
840 GMX_RELEASE_ASSERT(false, "Only single precision supported");
841 GMX_UNUSED_VALUE(charges
);
843 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu
, reinterpret_cast<const float*>(charges
));
844 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
849 pme_gpu_realloc_forces(pmeGpu
);
850 pme_gpu_realloc_spline_data(pmeGpu
);
851 pme_gpu_realloc_grid_indices(pmeGpu
);
853 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu
);
857 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
858 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
860 * \param[in] pmeGpu The PME GPU data structure.
861 * \param[in] PMEStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
863 static CommandEvent
* pme_gpu_fetch_timing_event(const PmeGpu
* pmeGpu
, size_t PMEStageId
)
865 CommandEvent
* timingEvent
= nullptr;
866 if (pme_gpu_timings_enabled(pmeGpu
))
868 GMX_ASSERT(PMEStageId
< pmeGpu
->archSpecific
->timingEvents
.size(),
869 "Wrong PME GPU timing event index");
870 timingEvent
= pmeGpu
->archSpecific
->timingEvents
[PMEStageId
].fetchNextEvent();
875 void pme_gpu_3dfft(const PmeGpu
* pmeGpu
, gmx_fft_direction dir
, int grid_index
)
877 int timerId
= (dir
== GMX_FFT_REAL_TO_COMPLEX
) ? gtPME_FFT_R2C
: gtPME_FFT_C2R
;
879 pme_gpu_start_timing(pmeGpu
, timerId
);
880 pmeGpu
->archSpecific
->fftSetup
[grid_index
]->perform3dFft(
881 dir
, pme_gpu_fetch_timing_event(pmeGpu
, timerId
));
882 pme_gpu_stop_timing(pmeGpu
, timerId
);
886 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
887 * to minimize number of unused blocks.
889 std::pair
<int, int> inline pmeGpuCreateGrid(const PmeGpu
* pmeGpu
, int blockCount
)
891 // How many maximum widths in X do we need (hopefully just one)
892 const int minRowCount
= (blockCount
+ pmeGpu
->maxGridWidthX
- 1) / pmeGpu
->maxGridWidthX
;
893 // Trying to make things even
894 const int colCount
= (blockCount
+ minRowCount
- 1) / minRowCount
;
895 GMX_ASSERT((colCount
* minRowCount
- blockCount
) >= 0, "pmeGpuCreateGrid: totally wrong");
896 GMX_ASSERT((colCount
* minRowCount
- blockCount
) < minRowCount
,
897 "pmeGpuCreateGrid: excessive blocks");
898 return std::pair
<int, int>(colCount
, minRowCount
);
902 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
904 * \param[in] pmeGpu The PME GPU structure.
905 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
906 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
908 * \return Pointer to CUDA kernel
910 static auto selectSplineAndSpreadKernelPtr(const PmeGpu
* pmeGpu
, ThreadsPerAtom threadsPerAtom
, bool writeSplinesToGlobal
)
912 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
913 if (writeSplinesToGlobal
)
915 if (threadsPerAtom
== ThreadsPerAtom::Order
)
917 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernelWriteSplinesThPerAtom4
;
921 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernelWriteSplines
;
926 if (threadsPerAtom
== ThreadsPerAtom::Order
)
928 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernelThPerAtom4
;
932 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernel
;
940 * Returns a pointer to appropriate spline kernel based on the input bool values
942 * \param[in] pmeGpu The PME GPU structure.
943 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
944 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
946 * \return Pointer to CUDA kernel
948 static auto selectSplineKernelPtr(const PmeGpu
* pmeGpu
,
949 ThreadsPerAtom threadsPerAtom
,
950 bool gmx_unused writeSplinesToGlobal
)
952 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
954 writeSplinesToGlobal
,
955 "Spline data should always be written to global memory when just calculating splines");
957 if (threadsPerAtom
== ThreadsPerAtom::Order
)
959 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineKernelThPerAtom4
;
963 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineKernel
;
969 * Returns a pointer to appropriate spread kernel based on the input bool values
971 * \param[in] pmeGpu The PME GPU structure.
972 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
973 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
975 * \return Pointer to CUDA kernel
977 static auto selectSpreadKernelPtr(const PmeGpu
* pmeGpu
, ThreadsPerAtom threadsPerAtom
, bool writeSplinesToGlobal
)
979 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
980 if (writeSplinesToGlobal
)
982 if (threadsPerAtom
== ThreadsPerAtom::Order
)
984 kernelPtr
= pmeGpu
->programHandle_
->impl_
->spreadKernelThPerAtom4
;
988 kernelPtr
= pmeGpu
->programHandle_
->impl_
->spreadKernel
;
993 /* if we are not saving the spline data we need to recalculate it
994 using the spline and spread Kernel */
995 if (threadsPerAtom
== ThreadsPerAtom::Order
)
997 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernelThPerAtom4
;
1001 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernel
;
1007 void pme_gpu_spread(const PmeGpu
* pmeGpu
,
1008 GpuEventSynchronizer
* xReadyOnDevice
,
1009 int gmx_unused gridIndex
,
1011 bool computeSplines
,
1014 GMX_ASSERT(computeSplines
|| spreadCharges
,
1015 "PME spline/spread kernel has invalid input (nothing to do)");
1016 const auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
1017 GMX_ASSERT(kernelParamsPtr
->atoms
.nAtoms
> 0, "No atom data in PME GPU spread");
1019 const size_t blockSize
= pmeGpu
->programHandle_
->impl_
->spreadWorkGroupSize
;
1021 const int order
= pmeGpu
->common
->pme_order
;
1022 GMX_ASSERT(order
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
1023 const bool writeGlobal
= pmeGpu
->settings
.copyAllOutputs
;
1024 const int threadsPerAtom
=
1025 (pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::Order
? order
: order
* order
);
1026 const bool recalculateSplines
= pmeGpu
->settings
.recalculateSplines
;
1027 #if GMX_GPU == GMX_GPU_OPENCL
1028 GMX_ASSERT(pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::OrderSquared
,
1029 "Only 16 threads per atom supported in OpenCL");
1030 GMX_ASSERT(!recalculateSplines
, "Recalculating splines not supported in OpenCL");
1032 const int atomsPerBlock
= blockSize
/ threadsPerAtom
;
1034 // TODO: pick smaller block size in runtime if needed
1035 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1036 // If doing so, change atomsPerBlock in the kernels as well.
1037 // TODO: test varying block sizes on modern arch-s as well
1038 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1039 //(for spline data mostly)
1040 GMX_ASSERT(!(c_pmeAtomDataBlockSize
% atomsPerBlock
),
1041 "inconsistent atom data padding vs. spreading block size");
1043 // Ensure that coordinates are ready on the device before launching spread;
1044 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1045 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1046 GMX_ASSERT(xReadyOnDevice
!= nullptr || (GMX_GPU
!= GMX_GPU_CUDA
)
1047 || pmeGpu
->common
->isRankPmeOnly
|| pme_gpu_settings(pmeGpu
).copyAllOutputs
,
1048 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1051 xReadyOnDevice
->enqueueWaitEvent(pmeGpu
->archSpecific
->pmeStream_
);
1054 const int blockCount
= pmeGpu
->nAtomsAlloc
/ atomsPerBlock
;
1055 auto dimGrid
= pmeGpuCreateGrid(pmeGpu
, blockCount
);
1057 KernelLaunchConfig config
;
1058 config
.blockSize
[0] = order
;
1059 config
.blockSize
[1] = (pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::Order
? 1 : order
);
1060 config
.blockSize
[2] = atomsPerBlock
;
1061 config
.gridSize
[0] = dimGrid
.first
;
1062 config
.gridSize
[1] = dimGrid
.second
;
1065 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
1070 timingId
= gtPME_SPLINEANDSPREAD
;
1071 kernelPtr
= selectSplineAndSpreadKernelPtr(pmeGpu
, pmeGpu
->settings
.threadsPerAtom
,
1072 writeGlobal
|| (!recalculateSplines
));
1076 timingId
= gtPME_SPLINE
;
1077 kernelPtr
= selectSplineKernelPtr(pmeGpu
, pmeGpu
->settings
.threadsPerAtom
,
1078 writeGlobal
|| (!recalculateSplines
));
1083 timingId
= gtPME_SPREAD
;
1084 kernelPtr
= selectSpreadKernelPtr(pmeGpu
, pmeGpu
->settings
.threadsPerAtom
,
1085 writeGlobal
|| (!recalculateSplines
));
1089 pme_gpu_start_timing(pmeGpu
, timingId
);
1090 auto* timingEvent
= pme_gpu_fetch_timing_event(pmeGpu
, timingId
);
1091 #if c_canEmbedBuffers
1092 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
);
1094 const auto kernelArgs
= prepareGpuKernelArguments(
1095 kernelPtr
, config
, kernelParamsPtr
, &kernelParamsPtr
->atoms
.d_theta
,
1096 &kernelParamsPtr
->atoms
.d_dtheta
, &kernelParamsPtr
->atoms
.d_gridlineIndices
,
1097 &kernelParamsPtr
->grid
.d_realGrid
, &kernelParamsPtr
->grid
.d_fractShiftsTable
,
1098 &kernelParamsPtr
->grid
.d_gridlineIndicesTable
, &kernelParamsPtr
->atoms
.d_coefficients
,
1099 &kernelParamsPtr
->atoms
.d_coordinates
);
1102 launchGpuKernel(kernelPtr
, config
, pmeGpu
->archSpecific
->pmeStream_
, timingEvent
,
1103 "PME spline/spread", kernelArgs
);
1104 pme_gpu_stop_timing(pmeGpu
, timingId
);
1106 const auto& settings
= pmeGpu
->settings
;
1107 const bool copyBackGrid
= spreadCharges
&& (!settings
.performGPUFFT
|| settings
.copyAllOutputs
);
1110 pme_gpu_copy_output_spread_grid(pmeGpu
, h_grid
);
1112 const bool copyBackAtomData
=
1113 computeSplines
&& (!settings
.performGPUGather
|| settings
.copyAllOutputs
);
1114 if (copyBackAtomData
)
1116 pme_gpu_copy_output_spread_atom_data(pmeGpu
);
1120 void pme_gpu_solve(const PmeGpu
* pmeGpu
, t_complex
* h_grid
, GridOrdering gridOrdering
, bool computeEnergyAndVirial
)
1122 const auto& settings
= pmeGpu
->settings
;
1123 const bool copyInputAndOutputGrid
= !settings
.performGPUFFT
|| settings
.copyAllOutputs
;
1125 auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
1127 float* h_gridFloat
= reinterpret_cast<float*>(h_grid
);
1128 if (copyInputAndOutputGrid
)
1130 copyToDeviceBuffer(&kernelParamsPtr
->grid
.d_fourierGrid
, h_gridFloat
, 0,
1131 pmeGpu
->archSpecific
->complexGridSize
, pmeGpu
->archSpecific
->pmeStream_
,
1132 pmeGpu
->settings
.transferKind
, nullptr);
1135 int majorDim
= -1, middleDim
= -1, minorDim
= -1;
1136 switch (gridOrdering
)
1138 case GridOrdering::YZX
:
1144 case GridOrdering::XYZ
:
1150 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1153 const int maxBlockSize
= pmeGpu
->programHandle_
->impl_
->solveMaxWorkGroupSize
;
1155 const int gridLineSize
= pmeGpu
->kernelParams
->grid
.complexGridSize
[minorDim
];
1156 const int gridLinesPerBlock
= std::max(maxBlockSize
/ gridLineSize
, 1);
1157 const int blocksPerGridLine
= (gridLineSize
+ maxBlockSize
- 1) / maxBlockSize
;
1159 if (blocksPerGridLine
== 1)
1161 cellsPerBlock
= gridLineSize
* gridLinesPerBlock
;
1165 cellsPerBlock
= (gridLineSize
+ blocksPerGridLine
- 1) / blocksPerGridLine
;
1167 const int warpSize
= pmeGpu
->programHandle_
->warpSize();
1168 const int blockSize
= (cellsPerBlock
+ warpSize
- 1) / warpSize
* warpSize
;
1170 static_assert(GMX_GPU
!= GMX_GPU_CUDA
|| c_solveMaxWarpsPerBlock
/ 2 >= 4,
1171 "The CUDA solve energy kernels needs at least 4 warps. "
1172 "Here we launch at least half of the max warps.");
1174 KernelLaunchConfig config
;
1175 config
.blockSize
[0] = blockSize
;
1176 config
.gridSize
[0] = blocksPerGridLine
;
1177 // rounding up to full warps so that shuffle operations produce defined results
1178 config
.gridSize
[1] = (pmeGpu
->kernelParams
->grid
.complexGridSize
[middleDim
] + gridLinesPerBlock
- 1)
1179 / gridLinesPerBlock
;
1180 config
.gridSize
[2] = pmeGpu
->kernelParams
->grid
.complexGridSize
[majorDim
];
1182 int timingId
= gtPME_SOLVE
;
1183 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
1184 if (gridOrdering
== GridOrdering::YZX
)
1186 kernelPtr
= computeEnergyAndVirial
? pmeGpu
->programHandle_
->impl_
->solveYZXEnergyKernel
1187 : pmeGpu
->programHandle_
->impl_
->solveYZXKernel
;
1189 else if (gridOrdering
== GridOrdering::XYZ
)
1191 kernelPtr
= computeEnergyAndVirial
? pmeGpu
->programHandle_
->impl_
->solveXYZEnergyKernel
1192 : pmeGpu
->programHandle_
->impl_
->solveXYZKernel
;
1195 pme_gpu_start_timing(pmeGpu
, timingId
);
1196 auto* timingEvent
= pme_gpu_fetch_timing_event(pmeGpu
, timingId
);
1197 #if c_canEmbedBuffers
1198 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
);
1200 const auto kernelArgs
= prepareGpuKernelArguments(
1201 kernelPtr
, config
, kernelParamsPtr
, &kernelParamsPtr
->grid
.d_splineModuli
,
1202 &kernelParamsPtr
->constants
.d_virialAndEnergy
, &kernelParamsPtr
->grid
.d_fourierGrid
);
1204 launchGpuKernel(kernelPtr
, config
, pmeGpu
->archSpecific
->pmeStream_
, timingEvent
, "PME solve",
1206 pme_gpu_stop_timing(pmeGpu
, timingId
);
1208 if (computeEnergyAndVirial
)
1210 copyFromDeviceBuffer(pmeGpu
->staging
.h_virialAndEnergy
,
1211 &kernelParamsPtr
->constants
.d_virialAndEnergy
, 0, c_virialAndEnergyCount
,
1212 pmeGpu
->archSpecific
->pmeStream_
, pmeGpu
->settings
.transferKind
, nullptr);
1215 if (copyInputAndOutputGrid
)
1217 copyFromDeviceBuffer(h_gridFloat
, &kernelParamsPtr
->grid
.d_fourierGrid
, 0,
1218 pmeGpu
->archSpecific
->complexGridSize
, pmeGpu
->archSpecific
->pmeStream_
,
1219 pmeGpu
->settings
.transferKind
, nullptr);
1224 * Returns a pointer to appropriate gather kernel based on the inputvalues
1226 * \param[in] pmeGpu The PME GPU structure.
1227 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1228 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1230 * \return Pointer to CUDA kernel
1232 inline auto selectGatherKernelPtr(const PmeGpu
* pmeGpu
, ThreadsPerAtom threadsPerAtom
, bool readSplinesFromGlobal
)
1235 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
1237 if (readSplinesFromGlobal
)
1239 if (threadsPerAtom
== ThreadsPerAtom::Order
)
1241 kernelPtr
= pmeGpu
->programHandle_
->impl_
->gatherKernelReadSplinesThPerAtom4
;
1245 kernelPtr
= pmeGpu
->programHandle_
->impl_
->gatherKernelReadSplines
;
1250 if (threadsPerAtom
== ThreadsPerAtom::Order
)
1252 kernelPtr
= pmeGpu
->programHandle_
->impl_
->gatherKernelThPerAtom4
;
1256 kernelPtr
= pmeGpu
->programHandle_
->impl_
->gatherKernel
;
1263 void pme_gpu_gather(PmeGpu
* pmeGpu
, const float* h_grid
)
1265 const auto& settings
= pmeGpu
->settings
;
1266 if (!settings
.performGPUFFT
|| settings
.copyAllOutputs
)
1268 pme_gpu_copy_input_gather_grid(pmeGpu
, const_cast<float*>(h_grid
));
1271 if (settings
.copyAllOutputs
)
1273 pme_gpu_copy_input_gather_atom_data(pmeGpu
);
1276 /* Set if we have unit tests */
1277 const bool readGlobal
= pmeGpu
->settings
.copyAllOutputs
;
1278 const size_t blockSize
= pmeGpu
->programHandle_
->impl_
->gatherWorkGroupSize
;
1279 const int order
= pmeGpu
->common
->pme_order
;
1280 GMX_ASSERT(order
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
1281 const int threadsPerAtom
=
1282 (pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::Order
? order
: order
* order
);
1283 const bool recalculateSplines
= pmeGpu
->settings
.recalculateSplines
;
1284 #if GMX_GPU == GMX_GPU_OPENCL
1285 GMX_ASSERT(pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::OrderSquared
,
1286 "Only 16 threads per atom supported in OpenCL");
1287 GMX_ASSERT(!recalculateSplines
, "Recalculating splines not supported in OpenCL");
1289 const int atomsPerBlock
= blockSize
/ threadsPerAtom
;
1291 GMX_ASSERT(!(c_pmeAtomDataBlockSize
% atomsPerBlock
),
1292 "inconsistent atom data padding vs. gathering block size");
1294 const int blockCount
= pmeGpu
->nAtomsAlloc
/ atomsPerBlock
;
1295 auto dimGrid
= pmeGpuCreateGrid(pmeGpu
, blockCount
);
1297 KernelLaunchConfig config
;
1298 config
.blockSize
[0] = order
;
1299 config
.blockSize
[1] = (pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::Order
? 1 : order
);
1300 config
.blockSize
[2] = atomsPerBlock
;
1301 config
.gridSize
[0] = dimGrid
.first
;
1302 config
.gridSize
[1] = dimGrid
.second
;
1304 // TODO test different cache configs
1306 int timingId
= gtPME_GATHER
;
1307 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= selectGatherKernelPtr(
1308 pmeGpu
, pmeGpu
->settings
.threadsPerAtom
, readGlobal
|| (!recalculateSplines
));
1309 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1311 pme_gpu_start_timing(pmeGpu
, timingId
);
1312 auto* timingEvent
= pme_gpu_fetch_timing_event(pmeGpu
, timingId
);
1313 const auto* kernelParamsPtr
= pmeGpu
->kernelParams
.get();
1314 #if c_canEmbedBuffers
1315 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
);
1317 const auto kernelArgs
= prepareGpuKernelArguments(
1318 kernelPtr
, config
, kernelParamsPtr
, &kernelParamsPtr
->atoms
.d_coefficients
,
1319 &kernelParamsPtr
->grid
.d_realGrid
, &kernelParamsPtr
->atoms
.d_theta
,
1320 &kernelParamsPtr
->atoms
.d_dtheta
, &kernelParamsPtr
->atoms
.d_gridlineIndices
,
1321 &kernelParamsPtr
->atoms
.d_forces
);
1323 launchGpuKernel(kernelPtr
, config
, pmeGpu
->archSpecific
->pmeStream_
, timingEvent
, "PME gather",
1325 pme_gpu_stop_timing(pmeGpu
, timingId
);
1327 if (pmeGpu
->settings
.useGpuForceReduction
)
1329 pmeGpu
->archSpecific
->pmeForcesReady
.markEvent(pmeGpu
->archSpecific
->pmeStream_
);
1333 pme_gpu_copy_output_forces(pmeGpu
);
1337 void* pme_gpu_get_kernelparam_forces(const PmeGpu
* pmeGpu
)
1339 if (pmeGpu
&& pmeGpu
->kernelParams
)
1341 return pmeGpu
->kernelParams
->atoms
.d_forces
;
1349 void pme_gpu_set_kernelparam_coordinates(const PmeGpu
* pmeGpu
, DeviceBuffer
<gmx::RVec
> d_x
)
1351 GMX_ASSERT(pmeGpu
&& pmeGpu
->kernelParams
,
1352 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1355 GMX_ASSERT(checkDeviceBuffer(d_x
, pmeGpu
->kernelParams
->atoms
.nAtoms
),
1356 "The device-side buffer can not be set.");
1358 pmeGpu
->kernelParams
->atoms
.d_coordinates
= d_x
;
1361 const DeviceStream
* pme_gpu_get_stream(const PmeGpu
* pmeGpu
)
1365 return &pmeGpu
->archSpecific
->pmeStream_
;
1373 GpuEventSynchronizer
* pme_gpu_get_forces_ready_synchronizer(const PmeGpu
* pmeGpu
)
1375 if (pmeGpu
&& pmeGpu
->kernelParams
)
1377 return &pmeGpu
->archSpecific
->pmeForcesReady
;