2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
50 #include "pme_gpu_internal.h"
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
69 #if GMX_GPU == GMX_GPU_CUDA
70 #include "gromacs/gpu_utils/pmalloc_cuda.h"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #include "gromacs/gpu_utils/gmxopencl.h"
77 #include "gromacs/ewald/pme.h"
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
92 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
94 * \param[in] pmeGpu The PME GPU structure.
95 * \returns The pointer to the kernel parameters.
97 static PmeGpuKernelParamsBase
*pme_gpu_get_kernel_params_base_ptr(const PmeGpu
*pmeGpu
)
99 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
100 auto *kernelParamsPtr
= reinterpret_cast<PmeGpuKernelParamsBase
*>(pmeGpu
->kernelParams
.get());
101 return kernelParamsPtr
;
104 int pme_gpu_get_atom_data_alignment(const PmeGpu
* /*unused*/)
106 //TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
107 return c_pmeAtomDataAlignment
;
110 int pme_gpu_get_atoms_per_warp(const PmeGpu
*pmeGpu
)
112 return pmeGpu
->programHandle_
->impl_
->warpSize
/ c_pmeSpreadGatherThreadsPerAtom
;
115 void pme_gpu_synchronize(const PmeGpu
*pmeGpu
)
117 gpuStreamSynchronize(pmeGpu
->archSpecific
->pmeStream
);
120 void pme_gpu_alloc_energy_virial(PmeGpu
*pmeGpu
)
122 const size_t energyAndVirialSize
= c_virialAndEnergyCount
* sizeof(float);
123 allocateDeviceBuffer(&pmeGpu
->kernelParams
->constants
.d_virialAndEnergy
, c_virialAndEnergyCount
, pmeGpu
->archSpecific
->context
);
124 pmalloc(reinterpret_cast<void **>(&pmeGpu
->staging
.h_virialAndEnergy
), energyAndVirialSize
);
127 void pme_gpu_free_energy_virial(PmeGpu
*pmeGpu
)
129 freeDeviceBuffer(&pmeGpu
->kernelParams
->constants
.d_virialAndEnergy
);
130 pfree(pmeGpu
->staging
.h_virialAndEnergy
);
131 pmeGpu
->staging
.h_virialAndEnergy
= nullptr;
134 void pme_gpu_clear_energy_virial(const PmeGpu
*pmeGpu
)
136 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->constants
.d_virialAndEnergy
, 0,
137 c_virialAndEnergyCount
, pmeGpu
->archSpecific
->pmeStream
);
140 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu
*pmeGpu
)
142 const int splineValuesOffset
[DIM
] = {
144 pmeGpu
->kernelParams
->grid
.realGridSize
[XX
],
145 pmeGpu
->kernelParams
->grid
.realGridSize
[XX
] + pmeGpu
->kernelParams
->grid
.realGridSize
[YY
]
147 memcpy(&pmeGpu
->kernelParams
->grid
.splineValuesOffset
, &splineValuesOffset
, sizeof(splineValuesOffset
));
149 const int newSplineValuesSize
= pmeGpu
->kernelParams
->grid
.realGridSize
[XX
] +
150 pmeGpu
->kernelParams
->grid
.realGridSize
[YY
] +
151 pmeGpu
->kernelParams
->grid
.realGridSize
[ZZ
];
152 const bool shouldRealloc
= (newSplineValuesSize
> pmeGpu
->archSpecific
->splineValuesSize
);
153 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_splineModuli
, newSplineValuesSize
,
154 &pmeGpu
->archSpecific
->splineValuesSize
, &pmeGpu
->archSpecific
->splineValuesSizeAlloc
, pmeGpu
->archSpecific
->context
);
157 /* Reallocate the host buffer */
158 pfree(pmeGpu
->staging
.h_splineModuli
);
159 pmalloc(reinterpret_cast<void **>(&pmeGpu
->staging
.h_splineModuli
), newSplineValuesSize
* sizeof(float));
161 for (int i
= 0; i
< DIM
; i
++)
163 memcpy(pmeGpu
->staging
.h_splineModuli
+ splineValuesOffset
[i
], pmeGpu
->common
->bsp_mod
[i
].data(), pmeGpu
->common
->bsp_mod
[i
].size() * sizeof(float));
165 /* TODO: pin original buffer instead! */
166 copyToDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_splineModuli
, pmeGpu
->staging
.h_splineModuli
,
167 0, newSplineValuesSize
,
168 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
171 void pme_gpu_free_bspline_values(const PmeGpu
*pmeGpu
)
173 pfree(pmeGpu
->staging
.h_splineModuli
);
174 freeDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_splineModuli
);
177 void pme_gpu_realloc_forces(PmeGpu
*pmeGpu
)
179 const size_t newForcesSize
= pmeGpu
->nAtomsAlloc
* DIM
;
180 GMX_ASSERT(newForcesSize
> 0, "Bad number of atoms in PME GPU");
181 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_forces
, newForcesSize
,
182 &pmeGpu
->archSpecific
->forcesSize
, &pmeGpu
->archSpecific
->forcesSizeAlloc
, pmeGpu
->archSpecific
->context
);
183 pmeGpu
->staging
.h_forces
.reserveWithPadding(pmeGpu
->nAtomsAlloc
);
184 pmeGpu
->staging
.h_forces
.resizeWithPadding(pmeGpu
->kernelParams
->atoms
.nAtoms
);
187 void pme_gpu_free_forces(const PmeGpu
*pmeGpu
)
189 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_forces
);
192 void pme_gpu_copy_input_forces(PmeGpu
*pmeGpu
)
194 GMX_ASSERT(pmeGpu
->kernelParams
->atoms
.nAtoms
> 0, "Bad number of atoms in PME GPU");
195 float *h_forcesFloat
= reinterpret_cast<float *>(pmeGpu
->staging
.h_forces
.data());
196 copyToDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_forces
, h_forcesFloat
,
197 0, DIM
* pmeGpu
->kernelParams
->atoms
.nAtoms
,
198 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
201 void pme_gpu_copy_output_forces(PmeGpu
*pmeGpu
)
203 GMX_ASSERT(pmeGpu
->kernelParams
->atoms
.nAtoms
> 0, "Bad number of atoms in PME GPU");
204 float *h_forcesFloat
= reinterpret_cast<float *>(pmeGpu
->staging
.h_forces
.data());
205 copyFromDeviceBuffer(h_forcesFloat
, &pmeGpu
->kernelParams
->atoms
.d_forces
,
206 0, DIM
* pmeGpu
->kernelParams
->atoms
.nAtoms
,
207 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
210 void pme_gpu_realloc_coordinates(const PmeGpu
*pmeGpu
)
212 const size_t newCoordinatesSize
= pmeGpu
->nAtomsAlloc
* DIM
;
213 GMX_ASSERT(newCoordinatesSize
> 0, "Bad number of atoms in PME GPU");
214 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coordinates
, newCoordinatesSize
,
215 &pmeGpu
->archSpecific
->coordinatesSize
, &pmeGpu
->archSpecific
->coordinatesSizeAlloc
, pmeGpu
->archSpecific
->context
);
218 const size_t paddingIndex
= DIM
* pmeGpu
->kernelParams
->atoms
.nAtoms
;
219 const size_t paddingCount
= DIM
* pmeGpu
->nAtomsAlloc
- paddingIndex
;
220 if (paddingCount
> 0)
222 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->atoms
.d_coordinates
, paddingIndex
,
223 paddingCount
, pmeGpu
->archSpecific
->pmeStream
);
228 void pme_gpu_copy_input_coordinates(const PmeGpu
*pmeGpu
, const rvec
*h_coordinates
)
230 GMX_ASSERT(h_coordinates
, "Bad host-side coordinate buffer in PME GPU");
232 GMX_RELEASE_ASSERT(false, "Only single precision is supported");
233 GMX_UNUSED_VALUE(h_coordinates
);
235 const float *h_coordinatesFloat
= reinterpret_cast<const float *>(h_coordinates
);
236 copyToDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coordinates
, h_coordinatesFloat
,
237 0, pmeGpu
->kernelParams
->atoms
.nAtoms
* DIM
,
238 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
239 // FIXME: sync required since the copied data will be used by PP stream when using single GPU for both
240 // Remove after adding the required event-based sync between the above H2D and the transform kernel
241 pme_gpu_synchronize(pmeGpu
);
245 void pme_gpu_free_coordinates(const PmeGpu
*pmeGpu
)
247 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coordinates
);
250 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu
*pmeGpu
, const float *h_coefficients
)
252 GMX_ASSERT(h_coefficients
, "Bad host-side charge buffer in PME GPU");
253 const size_t newCoefficientsSize
= pmeGpu
->nAtomsAlloc
;
254 GMX_ASSERT(newCoefficientsSize
> 0, "Bad number of atoms in PME GPU");
255 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coefficients
, newCoefficientsSize
,
256 &pmeGpu
->archSpecific
->coefficientsSize
, &pmeGpu
->archSpecific
->coefficientsSizeAlloc
, pmeGpu
->archSpecific
->context
);
257 copyToDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coefficients
, const_cast<float *>(h_coefficients
),
258 0, pmeGpu
->kernelParams
->atoms
.nAtoms
,
259 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
262 const size_t paddingIndex
= pmeGpu
->kernelParams
->atoms
.nAtoms
;
263 const size_t paddingCount
= pmeGpu
->nAtomsAlloc
- paddingIndex
;
264 if (paddingCount
> 0)
266 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->atoms
.d_coefficients
, paddingIndex
,
267 paddingCount
, pmeGpu
->archSpecific
->pmeStream
);
272 void pme_gpu_free_coefficients(const PmeGpu
*pmeGpu
)
274 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_coefficients
);
277 void pme_gpu_realloc_spline_data(PmeGpu
*pmeGpu
)
279 const int order
= pmeGpu
->common
->pme_order
;
280 const int alignment
= pme_gpu_get_atoms_per_warp(pmeGpu
);
281 const size_t nAtomsPadded
= ((pmeGpu
->nAtomsAlloc
+ alignment
- 1) / alignment
) * alignment
;
282 const int newSplineDataSize
= DIM
* order
* nAtomsPadded
;
283 GMX_ASSERT(newSplineDataSize
> 0, "Bad number of atoms in PME GPU");
284 /* Two arrays of the same size */
285 const bool shouldRealloc
= (newSplineDataSize
> pmeGpu
->archSpecific
->splineDataSize
);
286 int currentSizeTemp
= pmeGpu
->archSpecific
->splineDataSize
;
287 int currentSizeTempAlloc
= pmeGpu
->archSpecific
->splineDataSizeAlloc
;
288 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_theta
, newSplineDataSize
,
289 ¤tSizeTemp
, ¤tSizeTempAlloc
, pmeGpu
->archSpecific
->context
);
290 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_dtheta
, newSplineDataSize
,
291 &pmeGpu
->archSpecific
->splineDataSize
, &pmeGpu
->archSpecific
->splineDataSizeAlloc
, pmeGpu
->archSpecific
->context
);
292 // the host side reallocation
295 pfree(pmeGpu
->staging
.h_theta
);
296 pmalloc(reinterpret_cast<void **>(&pmeGpu
->staging
.h_theta
), newSplineDataSize
* sizeof(float));
297 pfree(pmeGpu
->staging
.h_dtheta
);
298 pmalloc(reinterpret_cast<void **>(&pmeGpu
->staging
.h_dtheta
), newSplineDataSize
* sizeof(float));
302 void pme_gpu_free_spline_data(const PmeGpu
*pmeGpu
)
304 /* Two arrays of the same size */
305 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_theta
);
306 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_dtheta
);
307 pfree(pmeGpu
->staging
.h_theta
);
308 pfree(pmeGpu
->staging
.h_dtheta
);
311 void pme_gpu_realloc_grid_indices(PmeGpu
*pmeGpu
)
313 const size_t newIndicesSize
= DIM
* pmeGpu
->nAtomsAlloc
;
314 GMX_ASSERT(newIndicesSize
> 0, "Bad number of atoms in PME GPU");
315 reallocateDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_gridlineIndices
, newIndicesSize
,
316 &pmeGpu
->archSpecific
->gridlineIndicesSize
, &pmeGpu
->archSpecific
->gridlineIndicesSizeAlloc
, pmeGpu
->archSpecific
->context
);
317 pfree(pmeGpu
->staging
.h_gridlineIndices
);
318 pmalloc(reinterpret_cast<void **>(&pmeGpu
->staging
.h_gridlineIndices
), newIndicesSize
* sizeof(int));
321 void pme_gpu_free_grid_indices(const PmeGpu
*pmeGpu
)
323 freeDeviceBuffer(&pmeGpu
->kernelParams
->atoms
.d_gridlineIndices
);
324 pfree(pmeGpu
->staging
.h_gridlineIndices
);
327 void pme_gpu_realloc_grids(PmeGpu
*pmeGpu
)
329 auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
330 const int newRealGridSize
= kernelParamsPtr
->grid
.realGridSizePadded
[XX
] *
331 kernelParamsPtr
->grid
.realGridSizePadded
[YY
] *
332 kernelParamsPtr
->grid
.realGridSizePadded
[ZZ
];
333 const int newComplexGridSize
= kernelParamsPtr
->grid
.complexGridSizePadded
[XX
] *
334 kernelParamsPtr
->grid
.complexGridSizePadded
[YY
] *
335 kernelParamsPtr
->grid
.complexGridSizePadded
[ZZ
] * 2;
336 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
337 if (pmeGpu
->archSpecific
->performOutOfPlaceFFT
)
339 /* 2 separate grids */
340 reallocateDeviceBuffer(&kernelParamsPtr
->grid
.d_fourierGrid
, newComplexGridSize
,
341 &pmeGpu
->archSpecific
->complexGridSize
, &pmeGpu
->archSpecific
->complexGridSizeAlloc
, pmeGpu
->archSpecific
->context
);
342 reallocateDeviceBuffer(&kernelParamsPtr
->grid
.d_realGrid
, newRealGridSize
,
343 &pmeGpu
->archSpecific
->realGridSize
, &pmeGpu
->archSpecific
->realGridSizeAlloc
, pmeGpu
->archSpecific
->context
);
347 /* A single buffer so that any grid will fit */
348 const int newGridsSize
= std::max(newRealGridSize
, newComplexGridSize
);
349 reallocateDeviceBuffer(&kernelParamsPtr
->grid
.d_realGrid
, newGridsSize
,
350 &pmeGpu
->archSpecific
->realGridSize
, &pmeGpu
->archSpecific
->realGridSizeAlloc
, pmeGpu
->archSpecific
->context
);
351 kernelParamsPtr
->grid
.d_fourierGrid
= kernelParamsPtr
->grid
.d_realGrid
;
352 pmeGpu
->archSpecific
->complexGridSize
= pmeGpu
->archSpecific
->realGridSize
;
353 // the size might get used later for copying the grid
357 void pme_gpu_free_grids(const PmeGpu
*pmeGpu
)
359 if (pmeGpu
->archSpecific
->performOutOfPlaceFFT
)
361 freeDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_fourierGrid
);
363 freeDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_realGrid
);
366 void pme_gpu_clear_grids(const PmeGpu
*pmeGpu
)
368 clearDeviceBufferAsync(&pmeGpu
->kernelParams
->grid
.d_realGrid
, 0,
369 pmeGpu
->archSpecific
->realGridSize
, pmeGpu
->archSpecific
->pmeStream
);
372 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu
*pmeGpu
)
374 pme_gpu_free_fract_shifts(pmeGpu
);
376 auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
378 const int nx
= kernelParamsPtr
->grid
.realGridSize
[XX
];
379 const int ny
= kernelParamsPtr
->grid
.realGridSize
[YY
];
380 const int nz
= kernelParamsPtr
->grid
.realGridSize
[ZZ
];
381 const int cellCount
= c_pmeNeighborUnitcellCount
;
382 const int gridDataOffset
[DIM
] = {0, cellCount
* nx
, cellCount
* (nx
+ ny
)};
384 memcpy(kernelParamsPtr
->grid
.tablesOffsets
, &gridDataOffset
, sizeof(gridDataOffset
));
386 const int newFractShiftsSize
= cellCount
* (nx
+ ny
+ nz
);
388 #if GMX_GPU == GMX_GPU_CUDA
389 initParamLookupTable(kernelParamsPtr
->grid
.d_fractShiftsTable
,
390 kernelParamsPtr
->fractShiftsTableTexture
,
391 pmeGpu
->common
->fsh
.data(),
394 initParamLookupTable(kernelParamsPtr
->grid
.d_gridlineIndicesTable
,
395 kernelParamsPtr
->gridlineIndicesTableTexture
,
396 pmeGpu
->common
->nn
.data(),
398 #elif GMX_GPU == GMX_GPU_OPENCL
399 // No dedicated texture routines....
400 allocateDeviceBuffer(&kernelParamsPtr
->grid
.d_fractShiftsTable
, newFractShiftsSize
, pmeGpu
->archSpecific
->context
);
401 allocateDeviceBuffer(&kernelParamsPtr
->grid
.d_gridlineIndicesTable
, newFractShiftsSize
, pmeGpu
->archSpecific
->context
);
402 copyToDeviceBuffer(&kernelParamsPtr
->grid
.d_fractShiftsTable
, pmeGpu
->common
->fsh
.data(),
403 0, newFractShiftsSize
,
404 pmeGpu
->archSpecific
->pmeStream
, GpuApiCallBehavior::Async
, nullptr);
405 copyToDeviceBuffer(&kernelParamsPtr
->grid
.d_gridlineIndicesTable
, pmeGpu
->common
->nn
.data(),
406 0, newFractShiftsSize
,
407 pmeGpu
->archSpecific
->pmeStream
, GpuApiCallBehavior::Async
, nullptr);
411 void pme_gpu_free_fract_shifts(const PmeGpu
*pmeGpu
)
413 auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
414 #if GMX_GPU == GMX_GPU_CUDA
415 destroyParamLookupTable(kernelParamsPtr
->grid
.d_fractShiftsTable
,
416 kernelParamsPtr
->fractShiftsTableTexture
);
417 destroyParamLookupTable(kernelParamsPtr
->grid
.d_gridlineIndicesTable
,
418 kernelParamsPtr
->gridlineIndicesTableTexture
);
419 #elif GMX_GPU == GMX_GPU_OPENCL
420 freeDeviceBuffer(&kernelParamsPtr
->grid
.d_fractShiftsTable
);
421 freeDeviceBuffer(&kernelParamsPtr
->grid
.d_gridlineIndicesTable
);
425 bool pme_gpu_stream_query(const PmeGpu
*pmeGpu
)
427 return haveStreamTasksCompleted(pmeGpu
->archSpecific
->pmeStream
);
430 void pme_gpu_copy_input_gather_grid(const PmeGpu
*pmeGpu
, float *h_grid
)
432 copyToDeviceBuffer(&pmeGpu
->kernelParams
->grid
.d_realGrid
, h_grid
,
433 0, pmeGpu
->archSpecific
->realGridSize
,
434 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
437 void pme_gpu_copy_output_spread_grid(const PmeGpu
*pmeGpu
, float *h_grid
)
439 copyFromDeviceBuffer(h_grid
, &pmeGpu
->kernelParams
->grid
.d_realGrid
,
440 0, pmeGpu
->archSpecific
->realGridSize
,
441 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
442 pmeGpu
->archSpecific
->syncSpreadGridD2H
.markEvent(pmeGpu
->archSpecific
->pmeStream
);
445 void pme_gpu_copy_output_spread_atom_data(const PmeGpu
*pmeGpu
)
447 const int alignment
= pme_gpu_get_atoms_per_warp(pmeGpu
);
448 const size_t nAtomsPadded
= ((pmeGpu
->nAtomsAlloc
+ alignment
- 1) / alignment
) * alignment
;
449 const size_t splinesCount
= DIM
* nAtomsPadded
* pmeGpu
->common
->pme_order
;
450 auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
451 copyFromDeviceBuffer(pmeGpu
->staging
.h_dtheta
, &kernelParamsPtr
->atoms
.d_dtheta
,
453 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
454 copyFromDeviceBuffer(pmeGpu
->staging
.h_theta
, &kernelParamsPtr
->atoms
.d_theta
,
456 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
457 copyFromDeviceBuffer(pmeGpu
->staging
.h_gridlineIndices
, &kernelParamsPtr
->atoms
.d_gridlineIndices
,
458 0, kernelParamsPtr
->atoms
.nAtoms
* DIM
,
459 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
462 void pme_gpu_copy_input_gather_atom_data(const PmeGpu
*pmeGpu
)
464 const int alignment
= pme_gpu_get_atoms_per_warp(pmeGpu
);
465 const size_t nAtomsPadded
= ((pmeGpu
->nAtomsAlloc
+ alignment
- 1) / alignment
) * alignment
;
466 const size_t splinesCount
= DIM
* nAtomsPadded
* pmeGpu
->common
->pme_order
;
467 auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
470 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
471 clearDeviceBufferAsync(&kernelParamsPtr
->atoms
.d_gridlineIndices
, 0,
472 pmeGpu
->nAtomsAlloc
* DIM
, pmeGpu
->archSpecific
->pmeStream
);
473 clearDeviceBufferAsync(&kernelParamsPtr
->atoms
.d_dtheta
, 0,
474 pmeGpu
->nAtomsAlloc
* pmeGpu
->common
->pme_order
* DIM
, pmeGpu
->archSpecific
->pmeStream
);
475 clearDeviceBufferAsync(&kernelParamsPtr
->atoms
.d_theta
, 0,
476 pmeGpu
->nAtomsAlloc
* pmeGpu
->common
->pme_order
* DIM
, pmeGpu
->archSpecific
->pmeStream
);
478 copyToDeviceBuffer(&kernelParamsPtr
->atoms
.d_dtheta
, pmeGpu
->staging
.h_dtheta
,
480 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
481 copyToDeviceBuffer(&kernelParamsPtr
->atoms
.d_theta
, pmeGpu
->staging
.h_theta
,
483 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
484 copyToDeviceBuffer(&kernelParamsPtr
->atoms
.d_gridlineIndices
, pmeGpu
->staging
.h_gridlineIndices
,
485 0, kernelParamsPtr
->atoms
.nAtoms
* DIM
,
486 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
489 void pme_gpu_sync_spread_grid(const PmeGpu
*pmeGpu
)
491 pmeGpu
->archSpecific
->syncSpreadGridD2H
.waitForEvent();
494 void pme_gpu_init_internal(PmeGpu
*pmeGpu
)
496 #if GMX_GPU == GMX_GPU_CUDA
497 // Prepare to use the device that this PME task was assigned earlier.
498 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
499 CU_RET_ERR(cudaSetDevice(pmeGpu
->deviceInfo
->id
), "Switching to PME CUDA device");
502 /* Allocate the target-specific structures */
503 pmeGpu
->archSpecific
.reset(new PmeGpuSpecific());
504 pmeGpu
->kernelParams
.reset(new PmeGpuKernelParams());
506 pmeGpu
->archSpecific
->performOutOfPlaceFFT
= true;
507 /* This should give better performance, according to the cuFFT documentation.
508 * The performance seems to be the same though.
509 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
512 // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
513 pmeGpu
->archSpecific
->context
= pmeGpu
->programHandle_
->impl_
->context
;
515 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
516 if (GMX_GPU
== GMX_GPU_CUDA
)
518 /* WARNING: CUDA timings are incorrect with multiple streams.
519 * This is the main reason why they are disabled by default.
521 // TODO: Consider turning on by default when we can detect nr of streams.
522 pmeGpu
->archSpecific
->useTiming
= (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
524 else if (GMX_GPU
== GMX_GPU_OPENCL
)
526 pmeGpu
->archSpecific
->useTiming
= (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
529 #if GMX_GPU == GMX_GPU_CUDA
530 pmeGpu
->maxGridWidthX
= pmeGpu
->deviceInfo
->prop
.maxGridSize
[0];
531 #elif GMX_GPU == GMX_GPU_OPENCL
532 pmeGpu
->maxGridWidthX
= INT32_MAX
/ 2;
533 // TODO: is there no really global work size limit in OpenCL?
536 /* Creating a PME GPU stream:
537 * - default high priority with CUDA
538 * - no priorities implemented yet with OpenCL; see #2532
540 #if GMX_GPU == GMX_GPU_CUDA
542 int highest_priority
, lowest_priority
;
543 stat
= cudaDeviceGetStreamPriorityRange(&lowest_priority
, &highest_priority
);
544 CU_RET_ERR(stat
, "PME cudaDeviceGetStreamPriorityRange failed");
545 stat
= cudaStreamCreateWithPriority(&pmeGpu
->archSpecific
->pmeStream
,
546 cudaStreamDefault
, //cudaStreamNonBlocking,
548 CU_RET_ERR(stat
, "cudaStreamCreateWithPriority on the PME stream failed");
549 #elif GMX_GPU == GMX_GPU_OPENCL
550 cl_command_queue_properties queueProperties
= pmeGpu
->archSpecific
->useTiming
? CL_QUEUE_PROFILING_ENABLE
: 0;
551 cl_device_id device_id
= pmeGpu
->deviceInfo
->ocl_gpu_id
.ocl_device_id
;
553 pmeGpu
->archSpecific
->pmeStream
= clCreateCommandQueue(pmeGpu
->archSpecific
->context
,
554 device_id
, queueProperties
, &clError
);
555 if (clError
!= CL_SUCCESS
)
557 GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
562 void pme_gpu_destroy_specific(const PmeGpu
*pmeGpu
)
564 #if GMX_GPU == GMX_GPU_CUDA
565 /* Destroy the CUDA stream */
566 cudaError_t stat
= cudaStreamDestroy(pmeGpu
->archSpecific
->pmeStream
);
567 CU_RET_ERR(stat
, "PME cudaStreamDestroy error");
568 #elif GMX_GPU == GMX_GPU_OPENCL
569 cl_int clError
= clReleaseCommandQueue(pmeGpu
->archSpecific
->pmeStream
);
570 if (clError
!= CL_SUCCESS
)
572 gmx_warning("Failed to destroy PME command queue");
577 void pme_gpu_reinit_3dfft(const PmeGpu
*pmeGpu
)
579 if (pme_gpu_performs_FFT(pmeGpu
))
581 pmeGpu
->archSpecific
->fftSetup
.resize(0);
582 for (int i
= 0; i
< pmeGpu
->common
->ngrids
; i
++)
584 pmeGpu
->archSpecific
->fftSetup
.push_back(std::make_unique
<GpuParallel3dFft
>(pmeGpu
));
589 void pme_gpu_destroy_3dfft(const PmeGpu
*pmeGpu
)
591 pmeGpu
->archSpecific
->fftSetup
.resize(0);
594 int getSplineParamFullIndex(int order
, int splineIndex
, int dimIndex
, int atomIndex
, int atomsPerWarp
)
596 if (order
!= c_pmeGpuOrder
)
600 constexpr int fixedOrder
= c_pmeGpuOrder
;
601 GMX_UNUSED_VALUE(fixedOrder
);
603 const int atomWarpIndex
= atomIndex
% atomsPerWarp
;
604 const int warpIndex
= atomIndex
/ atomsPerWarp
;
605 int indexBase
, result
;
606 switch (atomsPerWarp
)
609 indexBase
= getSplineParamIndexBase
<fixedOrder
, 1>(warpIndex
, atomWarpIndex
);
610 result
= getSplineParamIndex
<fixedOrder
, 1>(indexBase
, dimIndex
, splineIndex
);
614 indexBase
= getSplineParamIndexBase
<fixedOrder
, 2>(warpIndex
, atomWarpIndex
);
615 result
= getSplineParamIndex
<fixedOrder
, 2>(indexBase
, dimIndex
, splineIndex
);
619 indexBase
= getSplineParamIndexBase
<fixedOrder
, 4>(warpIndex
, atomWarpIndex
);
620 result
= getSplineParamIndex
<fixedOrder
, 4>(indexBase
, dimIndex
, splineIndex
);
624 indexBase
= getSplineParamIndexBase
<fixedOrder
, 8>(warpIndex
, atomWarpIndex
);
625 result
= getSplineParamIndex
<fixedOrder
, 8>(indexBase
, dimIndex
, splineIndex
);
629 GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp
)));
634 PmeOutput
pme_gpu_getEnergyAndVirial(const gmx_pme_t
&pme
)
638 const PmeGpu
*pmeGpu
= pme
.gpu
;
639 for (int j
= 0; j
< c_virialAndEnergyCount
; j
++)
641 GMX_ASSERT(std::isfinite(pmeGpu
->staging
.h_virialAndEnergy
[j
]), "PME GPU produces incorrect energy/virial.");
645 output
.coulombVirial_
[XX
][XX
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
646 output
.coulombVirial_
[YY
][YY
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
647 output
.coulombVirial_
[ZZ
][ZZ
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
648 output
.coulombVirial_
[XX
][YY
] = output
.coulombVirial_
[YY
][XX
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
649 output
.coulombVirial_
[XX
][ZZ
] = output
.coulombVirial_
[ZZ
][XX
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
650 output
.coulombVirial_
[YY
][ZZ
] = output
.coulombVirial_
[ZZ
][YY
] = 0.25F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
651 output
.coulombEnergy_
= 0.5F
* pmeGpu
->staging
.h_virialAndEnergy
[j
++];
656 PmeOutput
pme_gpu_getOutput(const gmx_pme_t
&pme
,
659 PmeGpu
*pmeGpu
= pme
.gpu
;
660 const bool haveComputedEnergyAndVirial
= (flags
& GMX_PME_CALC_ENER_VIR
) != 0;
661 if (!haveComputedEnergyAndVirial
)
663 // The caller knows from the flags that the energy and the virial are not usable
665 output
.forces_
= pmeGpu
->staging
.h_forces
;
669 if (pme_gpu_performs_solve(pmeGpu
))
671 PmeOutput output
= pme_gpu_getEnergyAndVirial(pme
);
672 output
.forces_
= pmeGpu
->staging
.h_forces
;
678 get_pme_ener_vir_q(pme
.solve_work
, pme
.nthread
, &output
);
679 output
.forces_
= pmeGpu
->staging
.h_forces
;
685 void pme_gpu_update_input_box(PmeGpu gmx_unused
*pmeGpu
,
686 const matrix gmx_unused box
)
689 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
692 pmeGpu
->common
->boxScaler
->scaleBox(box
, scaledBox
);
693 auto *kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
694 kernelParamsPtr
->current
.boxVolume
= scaledBox
[XX
][XX
] * scaledBox
[YY
][YY
] * scaledBox
[ZZ
][ZZ
];
695 GMX_ASSERT(kernelParamsPtr
->current
.boxVolume
!= 0.0F
, "Zero volume of the unit cell");
697 gmx::invertBoxMatrix(scaledBox
, recipBox
);
699 /* The GPU recipBox is transposed as compared to the CPU recipBox.
700 * Spread uses matrix columns (while solve and gather use rows).
701 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
703 const real newRecipBox
[DIM
][DIM
] =
705 {recipBox
[XX
][XX
], recipBox
[YY
][XX
], recipBox
[ZZ
][XX
]},
706 { 0.0, recipBox
[YY
][YY
], recipBox
[ZZ
][YY
]},
707 { 0.0, 0.0, recipBox
[ZZ
][ZZ
]}
709 memcpy(kernelParamsPtr
->current
.recipBox
, newRecipBox
, sizeof(matrix
));
713 /*! \brief \libinternal
714 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
716 * \param[in] pmeGpu The PME GPU structure.
718 static void pme_gpu_reinit_grids(PmeGpu
*pmeGpu
)
720 auto *kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
721 kernelParamsPtr
->grid
.ewaldFactor
= (M_PI
* M_PI
) / (pmeGpu
->common
->ewaldcoeff_q
* pmeGpu
->common
->ewaldcoeff_q
);
723 /* The grid size variants */
724 for (int i
= 0; i
< DIM
; i
++)
726 kernelParamsPtr
->grid
.realGridSize
[i
] = pmeGpu
->common
->nk
[i
];
727 kernelParamsPtr
->grid
.realGridSizeFP
[i
] = static_cast<float>(kernelParamsPtr
->grid
.realGridSize
[i
]);
728 kernelParamsPtr
->grid
.realGridSizePadded
[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
730 // The complex grid currently uses no padding;
731 // if it starts to do so, then another test should be added for that
732 kernelParamsPtr
->grid
.complexGridSize
[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
733 kernelParamsPtr
->grid
.complexGridSizePadded
[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
735 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
736 if (!pme_gpu_performs_FFT(pmeGpu
))
738 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
739 kernelParamsPtr
->grid
.realGridSizePadded
[ZZ
] = (kernelParamsPtr
->grid
.realGridSize
[ZZ
] / 2 + 1) * 2;
742 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
743 kernelParamsPtr
->grid
.complexGridSize
[ZZ
] /= 2;
744 kernelParamsPtr
->grid
.complexGridSize
[ZZ
]++;
745 kernelParamsPtr
->grid
.complexGridSizePadded
[ZZ
] = kernelParamsPtr
->grid
.complexGridSize
[ZZ
];
747 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu
);
748 pme_gpu_realloc_and_copy_bspline_values(pmeGpu
);
749 pme_gpu_realloc_grids(pmeGpu
);
750 pme_gpu_reinit_3dfft(pmeGpu
);
753 /* Several GPU functions that refer to the CPU PME data live here.
754 * We would like to keep these away from the GPU-framework specific code for clarity,
755 * as well as compilation issues with MPI.
758 /*! \brief \libinternal
759 * Copies everything useful from the PME CPU to the PME GPU structure.
760 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
762 * \param[in] pme The PME structure.
764 static void pme_gpu_copy_common_data_from(const gmx_pme_t
*pme
)
766 /* TODO: Consider refactoring the CPU PME code to use the same structure,
767 * so that this function becomes 2 lines */
768 PmeGpu
*pmeGpu
= pme
->gpu
;
769 pmeGpu
->common
->ngrids
= pme
->ngrids
;
770 pmeGpu
->common
->epsilon_r
= pme
->epsilon_r
;
771 pmeGpu
->common
->ewaldcoeff_q
= pme
->ewaldcoeff_q
;
772 pmeGpu
->common
->nk
[XX
] = pme
->nkx
;
773 pmeGpu
->common
->nk
[YY
] = pme
->nky
;
774 pmeGpu
->common
->nk
[ZZ
] = pme
->nkz
;
775 pmeGpu
->common
->pme_order
= pme
->pme_order
;
776 if (pmeGpu
->common
->pme_order
!= c_pmeGpuOrder
)
778 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
780 for (int i
= 0; i
< DIM
; i
++)
782 pmeGpu
->common
->bsp_mod
[i
].assign(pme
->bsp_mod
[i
], pme
->bsp_mod
[i
] + pmeGpu
->common
->nk
[i
]);
784 const int cellCount
= c_pmeNeighborUnitcellCount
;
785 pmeGpu
->common
->fsh
.resize(0);
786 pmeGpu
->common
->fsh
.insert(pmeGpu
->common
->fsh
.end(), pme
->fshx
, pme
->fshx
+ cellCount
* pme
->nkx
);
787 pmeGpu
->common
->fsh
.insert(pmeGpu
->common
->fsh
.end(), pme
->fshy
, pme
->fshy
+ cellCount
* pme
->nky
);
788 pmeGpu
->common
->fsh
.insert(pmeGpu
->common
->fsh
.end(), pme
->fshz
, pme
->fshz
+ cellCount
* pme
->nkz
);
789 pmeGpu
->common
->nn
.resize(0);
790 pmeGpu
->common
->nn
.insert(pmeGpu
->common
->nn
.end(), pme
->nnx
, pme
->nnx
+ cellCount
* pme
->nkx
);
791 pmeGpu
->common
->nn
.insert(pmeGpu
->common
->nn
.end(), pme
->nny
, pme
->nny
+ cellCount
* pme
->nky
);
792 pmeGpu
->common
->nn
.insert(pmeGpu
->common
->nn
.end(), pme
->nnz
, pme
->nnz
+ cellCount
* pme
->nkz
);
793 pmeGpu
->common
->runMode
= pme
->runMode
;
794 pmeGpu
->common
->boxScaler
= pme
->boxScaler
;
797 /*! \libinternal \brief
798 * Initializes the PME GPU data at the beginning of the run.
799 * TODO: this should become PmeGpu::PmeGpu()
801 * \param[in,out] pme The PME structure.
802 * \param[in,out] gpuInfo The GPU information structure.
803 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
805 static void pme_gpu_init(gmx_pme_t
*pme
,
806 const gmx_device_info_t
*gpuInfo
,
807 PmeGpuProgramHandle pmeGpuProgram
)
809 pme
->gpu
= new PmeGpu();
810 PmeGpu
*pmeGpu
= pme
->gpu
;
811 changePinningPolicy(&pmeGpu
->staging
.h_forces
, pme_get_pinning_policy());
812 pmeGpu
->common
= std::make_shared
<PmeShared
>();
814 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
815 /* A convenience variable. */
816 pmeGpu
->settings
.useDecomposition
= (pme
->nnodes
== 1);
817 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
818 pmeGpu
->settings
.performGPUGather
= true;
820 pme_gpu_set_testing(pmeGpu
, false);
822 pmeGpu
->deviceInfo
= gpuInfo
;
823 GMX_ASSERT(pmeGpuProgram
!= nullptr, "GPU kernels must be already compiled");
824 pmeGpu
->programHandle_
= pmeGpuProgram
;
826 pmeGpu
->initializedClfftLibrary_
= std::make_unique
<gmx::ClfftInitializer
>();
828 pme_gpu_init_internal(pmeGpu
);
829 pme_gpu_alloc_energy_virial(pmeGpu
);
831 pme_gpu_copy_common_data_from(pme
);
833 GMX_ASSERT(pmeGpu
->common
->epsilon_r
!= 0.0F
, "PME GPU: bad electrostatic coefficient");
835 auto *kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
836 kernelParamsPtr
->constants
.elFactor
= ONE_4PI_EPS0
/ pmeGpu
->common
->epsilon_r
;
839 void pme_gpu_transform_spline_atom_data(const PmeGpu
*pmeGpu
, const PmeAtomComm
*atc
,
840 PmeSplineDataType type
, int dimIndex
, PmeLayoutTransform transform
)
842 // The GPU atom spline data is laid out in a different way currently than the CPU one.
843 // This function converts the data from GPU to CPU layout (in the host memory).
844 // It is only intended for testing purposes so far.
845 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
846 // (e.g. spreading on GPU, gathering on CPU).
847 GMX_RELEASE_ASSERT(atc
->nthread
== 1, "Only the serial PME data layout is supported");
848 const uintmax_t threadIndex
= 0;
849 const auto atomCount
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
)->atoms
.nAtoms
;
850 const auto atomsPerWarp
= pme_gpu_get_atoms_per_warp(pmeGpu
);
851 const auto pmeOrder
= pmeGpu
->common
->pme_order
;
852 GMX_ASSERT(pmeOrder
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
854 real
*cpuSplineBuffer
;
855 float *h_splineBuffer
;
858 case PmeSplineDataType::Values
:
859 cpuSplineBuffer
= atc
->spline
[threadIndex
].theta
.coefficients
[dimIndex
];
860 h_splineBuffer
= pmeGpu
->staging
.h_theta
;
863 case PmeSplineDataType::Derivatives
:
864 cpuSplineBuffer
= atc
->spline
[threadIndex
].dtheta
.coefficients
[dimIndex
];
865 h_splineBuffer
= pmeGpu
->staging
.h_dtheta
;
869 GMX_THROW(gmx::InternalError("Unknown spline data type"));
872 for (auto atomIndex
= 0; atomIndex
< atomCount
; atomIndex
++)
874 for (auto orderIndex
= 0; orderIndex
< pmeOrder
; orderIndex
++)
876 const auto gpuValueIndex
= getSplineParamFullIndex(pmeOrder
, orderIndex
, dimIndex
, atomIndex
, atomsPerWarp
);
877 const auto cpuValueIndex
= atomIndex
* pmeOrder
+ orderIndex
;
878 GMX_ASSERT(cpuValueIndex
< atomCount
* pmeOrder
, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
881 case PmeLayoutTransform::GpuToHost
:
882 cpuSplineBuffer
[cpuValueIndex
] = h_splineBuffer
[gpuValueIndex
];
885 case PmeLayoutTransform::HostToGpu
:
886 h_splineBuffer
[gpuValueIndex
] = cpuSplineBuffer
[cpuValueIndex
];
890 GMX_THROW(gmx::InternalError("Unknown layout transform"));
896 void pme_gpu_get_real_grid_sizes(const PmeGpu
*pmeGpu
, gmx::IVec
*gridSize
, gmx::IVec
*paddedGridSize
)
898 GMX_ASSERT(gridSize
!= nullptr, "");
899 GMX_ASSERT(paddedGridSize
!= nullptr, "");
900 GMX_ASSERT(pmeGpu
!= nullptr, "");
901 auto *kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
902 for (int i
= 0; i
< DIM
; i
++)
904 (*gridSize
)[i
] = kernelParamsPtr
->grid
.realGridSize
[i
];
905 (*paddedGridSize
)[i
] = kernelParamsPtr
->grid
.realGridSizePadded
[i
];
909 void pme_gpu_reinit(gmx_pme_t
*pme
,
910 const gmx_device_info_t
*gpuInfo
,
911 PmeGpuProgramHandle pmeGpuProgram
)
913 if (!pme_gpu_active(pme
))
920 /* First-time initialization */
921 pme_gpu_init(pme
, gpuInfo
, pmeGpuProgram
);
925 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
926 pme_gpu_copy_common_data_from(pme
);
928 /* GPU FFT will only get used for a single rank.*/
929 pme
->gpu
->settings
.performGPUFFT
= (pme
->gpu
->common
->runMode
== PmeRunMode::GPU
) && !pme_gpu_uses_dd(pme
->gpu
);
930 pme
->gpu
->settings
.performGPUSolve
= (pme
->gpu
->common
->runMode
== PmeRunMode::GPU
);
932 /* Reinit active timers */
933 pme_gpu_reinit_timings(pme
->gpu
);
935 pme_gpu_reinit_grids(pme
->gpu
);
936 // Note: if timing the reinit launch overhead becomes more relevant
937 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
938 pme_gpu_reinit_computation(pme
, nullptr);
939 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
940 * update for mixed mode on grid switch. TODO: use shared recipbox field.
942 std::memset(pme
->gpu
->common
->previousBox
, 0, sizeof(pme
->gpu
->common
->previousBox
));
945 void pme_gpu_destroy(PmeGpu
*pmeGpu
)
947 /* Free lots of data */
948 pme_gpu_free_energy_virial(pmeGpu
);
949 pme_gpu_free_bspline_values(pmeGpu
);
950 pme_gpu_free_forces(pmeGpu
);
951 pme_gpu_free_coordinates(pmeGpu
);
952 pme_gpu_free_coefficients(pmeGpu
);
953 pme_gpu_free_spline_data(pmeGpu
);
954 pme_gpu_free_grid_indices(pmeGpu
);
955 pme_gpu_free_fract_shifts(pmeGpu
);
956 pme_gpu_free_grids(pmeGpu
);
958 pme_gpu_destroy_3dfft(pmeGpu
);
960 /* Free the GPU-framework specific data last */
961 pme_gpu_destroy_specific(pmeGpu
);
966 void pme_gpu_reinit_atoms(PmeGpu
*pmeGpu
, const int nAtoms
, const real
*charges
)
968 auto *kernelParamsPtr
= pme_gpu_get_kernel_params_base_ptr(pmeGpu
);
969 kernelParamsPtr
->atoms
.nAtoms
= nAtoms
;
970 const int alignment
= pme_gpu_get_atom_data_alignment(pmeGpu
);
971 pmeGpu
->nAtomsPadded
= ((nAtoms
+ alignment
- 1) / alignment
) * alignment
;
972 const int nAtomsAlloc
= c_usePadding
? pmeGpu
->nAtomsPadded
: nAtoms
;
973 const bool haveToRealloc
= (pmeGpu
->nAtomsAlloc
< nAtomsAlloc
); /* This check might be redundant, but is logical */
974 pmeGpu
->nAtomsAlloc
= nAtomsAlloc
;
977 GMX_RELEASE_ASSERT(false, "Only single precision supported");
978 GMX_UNUSED_VALUE(charges
);
980 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu
, reinterpret_cast<const float *>(charges
));
981 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
986 pme_gpu_realloc_coordinates(pmeGpu
);
987 pme_gpu_realloc_forces(pmeGpu
);
988 pme_gpu_realloc_spline_data(pmeGpu
);
989 pme_gpu_realloc_grid_indices(pmeGpu
);
993 void pme_gpu_3dfft(const PmeGpu
*pmeGpu
, gmx_fft_direction dir
, int grid_index
)
995 int timerId
= (dir
== GMX_FFT_REAL_TO_COMPLEX
) ? gtPME_FFT_R2C
: gtPME_FFT_C2R
;
997 pme_gpu_start_timing(pmeGpu
, timerId
);
998 pmeGpu
->archSpecific
->fftSetup
[grid_index
]->perform3dFft(dir
, pme_gpu_fetch_timing_event(pmeGpu
, timerId
));
999 pme_gpu_stop_timing(pmeGpu
, timerId
);
1003 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1004 * to minimize number of unused blocks.
1006 std::pair
<int, int> inline pmeGpuCreateGrid(const PmeGpu
*pmeGpu
, int blockCount
)
1008 // How many maximum widths in X do we need (hopefully just one)
1009 const int minRowCount
= (blockCount
+ pmeGpu
->maxGridWidthX
- 1) / pmeGpu
->maxGridWidthX
;
1010 // Trying to make things even
1011 const int colCount
= (blockCount
+ minRowCount
- 1) / minRowCount
;
1012 GMX_ASSERT((colCount
* minRowCount
- blockCount
) >= 0, "pmeGpuCreateGrid: totally wrong");
1013 GMX_ASSERT((colCount
* minRowCount
- blockCount
) < minRowCount
, "pmeGpuCreateGrid: excessive blocks");
1014 return std::pair
<int, int>(colCount
, minRowCount
);
1017 void pme_gpu_spread(const PmeGpu
*pmeGpu
,
1018 int gmx_unused gridIndex
,
1020 bool computeSplines
,
1023 GMX_ASSERT(computeSplines
|| spreadCharges
, "PME spline/spread kernel has invalid input (nothing to do)");
1024 const auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
1025 GMX_ASSERT(kernelParamsPtr
->atoms
.nAtoms
> 0, "No atom data in PME GPU spread");
1027 const size_t blockSize
= pmeGpu
->programHandle_
->impl_
->spreadWorkGroupSize
;
1029 const int order
= pmeGpu
->common
->pme_order
;
1030 GMX_ASSERT(order
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
1031 const int atomsPerBlock
= blockSize
/ c_pmeSpreadGatherThreadsPerAtom
;
1032 // TODO: pick smaller block size in runtime if needed
1033 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1034 // If doing so, change atomsPerBlock in the kernels as well.
1035 // TODO: test varying block sizes on modern arch-s as well
1036 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1037 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
1038 GMX_ASSERT(!c_usePadding
|| !(c_pmeAtomDataAlignment
% atomsPerBlock
), "inconsistent atom data padding vs. spreading block size");
1040 const int blockCount
= pmeGpu
->nAtomsPadded
/ atomsPerBlock
;
1041 auto dimGrid
= pmeGpuCreateGrid(pmeGpu
, blockCount
);
1043 KernelLaunchConfig config
;
1044 config
.blockSize
[0] = config
.blockSize
[1] = order
;
1045 config
.blockSize
[2] = atomsPerBlock
;
1046 config
.gridSize
[0] = dimGrid
.first
;
1047 config
.gridSize
[1] = dimGrid
.second
;
1048 config
.stream
= pmeGpu
->archSpecific
->pmeStream
;
1051 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
1056 timingId
= gtPME_SPLINEANDSPREAD
;
1057 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineAndSpreadKernel
;
1061 timingId
= gtPME_SPLINE
;
1062 kernelPtr
= pmeGpu
->programHandle_
->impl_
->splineKernel
;
1067 timingId
= gtPME_SPREAD
;
1068 kernelPtr
= pmeGpu
->programHandle_
->impl_
->spreadKernel
;
1071 pme_gpu_start_timing(pmeGpu
, timingId
);
1072 auto *timingEvent
= pme_gpu_fetch_timing_event(pmeGpu
, timingId
);
1073 #if c_canEmbedBuffers
1074 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
);
1076 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
,
1077 &kernelParamsPtr
->atoms
.d_theta
,
1078 &kernelParamsPtr
->atoms
.d_dtheta
,
1079 &kernelParamsPtr
->atoms
.d_gridlineIndices
,
1080 &kernelParamsPtr
->grid
.d_realGrid
,
1081 &kernelParamsPtr
->grid
.d_fractShiftsTable
,
1082 &kernelParamsPtr
->grid
.d_gridlineIndicesTable
,
1083 &kernelParamsPtr
->atoms
.d_coefficients
,
1084 &kernelParamsPtr
->atoms
.d_coordinates
1088 launchGpuKernel(kernelPtr
, config
, timingEvent
, "PME spline/spread", kernelArgs
);
1089 pme_gpu_stop_timing(pmeGpu
, timingId
);
1091 const bool copyBackGrid
= spreadCharges
&& (pme_gpu_is_testing(pmeGpu
) || !pme_gpu_performs_FFT(pmeGpu
));
1094 pme_gpu_copy_output_spread_grid(pmeGpu
, h_grid
);
1096 const bool copyBackAtomData
= computeSplines
&& (pme_gpu_is_testing(pmeGpu
) || !pme_gpu_performs_gather(pmeGpu
));
1097 if (copyBackAtomData
)
1099 pme_gpu_copy_output_spread_atom_data(pmeGpu
);
1103 void pme_gpu_solve(const PmeGpu
*pmeGpu
, t_complex
*h_grid
,
1104 GridOrdering gridOrdering
, bool computeEnergyAndVirial
)
1106 const bool copyInputAndOutputGrid
= pme_gpu_is_testing(pmeGpu
) || !pme_gpu_performs_FFT(pmeGpu
);
1108 auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
1110 float *h_gridFloat
= reinterpret_cast<float *>(h_grid
);
1111 if (copyInputAndOutputGrid
)
1113 copyToDeviceBuffer(&kernelParamsPtr
->grid
.d_fourierGrid
, h_gridFloat
,
1114 0, pmeGpu
->archSpecific
->complexGridSize
,
1115 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
1118 int majorDim
= -1, middleDim
= -1, minorDim
= -1;
1119 switch (gridOrdering
)
1121 case GridOrdering::YZX
:
1127 case GridOrdering::XYZ
:
1134 GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1137 const int maxBlockSize
= pmeGpu
->programHandle_
->impl_
->solveMaxWorkGroupSize
;
1139 const int gridLineSize
= pmeGpu
->kernelParams
->grid
.complexGridSize
[minorDim
];
1140 const int gridLinesPerBlock
= std::max(maxBlockSize
/ gridLineSize
, 1);
1141 const int blocksPerGridLine
= (gridLineSize
+ maxBlockSize
- 1) / maxBlockSize
;
1143 if (blocksPerGridLine
== 1)
1145 cellsPerBlock
= gridLineSize
* gridLinesPerBlock
;
1149 cellsPerBlock
= (gridLineSize
+ blocksPerGridLine
- 1) / blocksPerGridLine
;
1151 const int warpSize
= pmeGpu
->programHandle_
->impl_
->warpSize
;
1152 const int blockSize
= (cellsPerBlock
+ warpSize
- 1) / warpSize
* warpSize
;
1154 static_assert(GMX_GPU
!= GMX_GPU_CUDA
|| c_solveMaxWarpsPerBlock
/2 >= 4,
1155 "The CUDA solve energy kernels needs at least 4 warps. "
1156 "Here we launch at least half of the max warps.");
1158 KernelLaunchConfig config
;
1159 config
.blockSize
[0] = blockSize
;
1160 config
.gridSize
[0] = blocksPerGridLine
;
1161 // rounding up to full warps so that shuffle operations produce defined results
1162 config
.gridSize
[1] = (pmeGpu
->kernelParams
->grid
.complexGridSize
[middleDim
] + gridLinesPerBlock
- 1) / gridLinesPerBlock
;
1163 config
.gridSize
[2] = pmeGpu
->kernelParams
->grid
.complexGridSize
[majorDim
];
1164 config
.stream
= pmeGpu
->archSpecific
->pmeStream
;
1166 int timingId
= gtPME_SOLVE
;
1167 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= nullptr;
1168 if (gridOrdering
== GridOrdering::YZX
)
1170 kernelPtr
= computeEnergyAndVirial
?
1171 pmeGpu
->programHandle_
->impl_
->solveYZXEnergyKernel
:
1172 pmeGpu
->programHandle_
->impl_
->solveYZXKernel
;
1174 else if (gridOrdering
== GridOrdering::XYZ
)
1176 kernelPtr
= computeEnergyAndVirial
?
1177 pmeGpu
->programHandle_
->impl_
->solveXYZEnergyKernel
:
1178 pmeGpu
->programHandle_
->impl_
->solveXYZKernel
;
1181 pme_gpu_start_timing(pmeGpu
, timingId
);
1182 auto *timingEvent
= pme_gpu_fetch_timing_event(pmeGpu
, timingId
);
1183 #if c_canEmbedBuffers
1184 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
);
1186 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
,
1187 &kernelParamsPtr
->grid
.d_splineModuli
,
1188 &kernelParamsPtr
->constants
.d_virialAndEnergy
,
1189 &kernelParamsPtr
->grid
.d_fourierGrid
);
1191 launchGpuKernel(kernelPtr
, config
, timingEvent
, "PME solve", kernelArgs
);
1192 pme_gpu_stop_timing(pmeGpu
, timingId
);
1194 if (computeEnergyAndVirial
)
1196 copyFromDeviceBuffer(pmeGpu
->staging
.h_virialAndEnergy
, &kernelParamsPtr
->constants
.d_virialAndEnergy
,
1197 0, c_virialAndEnergyCount
,
1198 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
1201 if (copyInputAndOutputGrid
)
1203 copyFromDeviceBuffer(h_gridFloat
, &kernelParamsPtr
->grid
.d_fourierGrid
,
1204 0, pmeGpu
->archSpecific
->complexGridSize
,
1205 pmeGpu
->archSpecific
->pmeStream
, pmeGpu
->settings
.transferKind
, nullptr);
1209 void pme_gpu_gather(PmeGpu
*pmeGpu
,
1210 PmeForceOutputHandling forceTreatment
,
1211 const float *h_grid
,
1212 bool useGpuFPmeReduction
1215 /* Copying the input CPU forces for reduction */
1216 if (forceTreatment
!= PmeForceOutputHandling::Set
)
1218 pme_gpu_copy_input_forces(pmeGpu
);
1221 if (!pme_gpu_performs_FFT(pmeGpu
) || pme_gpu_is_testing(pmeGpu
))
1223 pme_gpu_copy_input_gather_grid(pmeGpu
, const_cast<float *>(h_grid
));
1226 if (pme_gpu_is_testing(pmeGpu
))
1228 pme_gpu_copy_input_gather_atom_data(pmeGpu
);
1231 const size_t blockSize
= pmeGpu
->programHandle_
->impl_
->gatherWorkGroupSize
;
1232 const int atomsPerBlock
= blockSize
/ c_pmeSpreadGatherThreadsPerAtom
;
1233 GMX_ASSERT(!c_usePadding
|| !(c_pmeAtomDataAlignment
% atomsPerBlock
), "inconsistent atom data padding vs. gathering block size");
1235 const int blockCount
= pmeGpu
->nAtomsPadded
/ atomsPerBlock
;
1236 auto dimGrid
= pmeGpuCreateGrid(pmeGpu
, blockCount
);
1239 const int order
= pmeGpu
->common
->pme_order
;
1240 GMX_ASSERT(order
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
1242 KernelLaunchConfig config
;
1243 config
.blockSize
[0] = config
.blockSize
[1] = order
;
1244 config
.blockSize
[2] = atomsPerBlock
;
1245 config
.gridSize
[0] = dimGrid
.first
;
1246 config
.gridSize
[1] = dimGrid
.second
;
1247 config
.stream
= pmeGpu
->archSpecific
->pmeStream
;
1249 // TODO test different cache configs
1251 int timingId
= gtPME_GATHER
;
1252 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1253 PmeGpuProgramImpl::PmeKernelHandle kernelPtr
= (forceTreatment
== PmeForceOutputHandling::Set
) ?
1254 pmeGpu
->programHandle_
->impl_
->gatherKernel
:
1255 pmeGpu
->programHandle_
->impl_
->gatherReduceWithInputKernel
;
1257 pme_gpu_start_timing(pmeGpu
, timingId
);
1258 auto *timingEvent
= pme_gpu_fetch_timing_event(pmeGpu
, timingId
);
1259 const auto *kernelParamsPtr
= pmeGpu
->kernelParams
.get();
1260 #if c_canEmbedBuffers
1261 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
);
1263 const auto kernelArgs
= prepareGpuKernelArguments(kernelPtr
, config
, kernelParamsPtr
,
1264 &kernelParamsPtr
->atoms
.d_coefficients
,
1265 &kernelParamsPtr
->grid
.d_realGrid
,
1266 &kernelParamsPtr
->atoms
.d_theta
,
1267 &kernelParamsPtr
->atoms
.d_dtheta
,
1268 &kernelParamsPtr
->atoms
.d_gridlineIndices
,
1269 &kernelParamsPtr
->atoms
.d_forces
);
1271 launchGpuKernel(kernelPtr
, config
, timingEvent
, "PME gather", kernelArgs
);
1272 pme_gpu_stop_timing(pmeGpu
, timingId
);
1274 if (useGpuFPmeReduction
)
1276 pmeGpu
->archSpecific
->pmeForcesReady
.markEvent(pmeGpu
->archSpecific
->pmeStream
);
1280 pme_gpu_copy_output_forces(pmeGpu
);
1284 void * pme_gpu_get_kernelparam_coordinates(const PmeGpu
*pmeGpu
)
1286 if (pmeGpu
&& pmeGpu
->kernelParams
)
1288 return pmeGpu
->kernelParams
->atoms
.d_coordinates
;
1296 void * pme_gpu_get_kernelparam_forces(const PmeGpu
*pmeGpu
)
1298 if (pmeGpu
&& pmeGpu
->kernelParams
)
1300 return pmeGpu
->kernelParams
->atoms
.d_forces
;
1308 GpuEventSynchronizer
*pme_gpu_get_forces_ready_synchronizer(const PmeGpu
*pmeGpu
)
1310 if (pmeGpu
&& pmeGpu
->kernelParams
)
1312 return &pmeGpu
->archSpecific
->pmeForcesReady
;