Link GPU coordinate producer and consumer tasks
[gromacs.git] / src / gromacs / ewald / pme_gpu_internal.cpp
blobe98ae3a223c8be7ee88cd7aad5682b32866f2442
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \internal \file
38 * \brief This file contains internal function implementations
39 * for performing the PME calculations on GPU.
41 * Note that this file is compiled as regular C++ source in OpenCL builds, but
42 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 * \author Aleksei Iupinov <a.yupinov@gmail.com>
45 * \ingroup module_ewald
48 #include "gmxpre.h"
50 #include "pme_gpu_internal.h"
52 #include "config.h"
54 #include <list>
55 #include <memory>
56 #include <string>
58 #include "gromacs/ewald/ewald_utils.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/math/invertmatrix.h"
61 #include "gromacs/math/units.h"
62 #include "gromacs/timing/gpu_timing.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/logger.h"
67 #include "gromacs/utility/stringutil.h"
69 #if GMX_GPU == GMX_GPU_CUDA
70 #include "gromacs/gpu_utils/pmalloc_cuda.h"
72 #include "pme.cuh"
73 #elif GMX_GPU == GMX_GPU_OPENCL
74 #include "gromacs/gpu_utils/gmxopencl.h"
75 #endif
77 #include "gromacs/ewald/pme.h"
79 #include "pme_gpu_3dfft.h"
80 #include "pme_gpu_constants.h"
81 #include "pme_gpu_program_impl.h"
82 #include "pme_gpu_timings.h"
83 #include "pme_gpu_types.h"
84 #include "pme_gpu_types_host.h"
85 #include "pme_gpu_types_host_impl.h"
86 #include "pme_gpu_utils.h"
87 #include "pme_grid.h"
88 #include "pme_internal.h"
89 #include "pme_solve.h"
91 /*! \internal \brief
92 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
94 * \param[in] pmeGpu The PME GPU structure.
95 * \returns The pointer to the kernel parameters.
97 static PmeGpuKernelParamsBase *pme_gpu_get_kernel_params_base_ptr(const PmeGpu *pmeGpu)
99 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
100 auto *kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase *>(pmeGpu->kernelParams.get());
101 return kernelParamsPtr;
104 int pme_gpu_get_atom_data_alignment(const PmeGpu * /*unused*/)
106 //TODO: this can be simplified, as c_pmeAtomDataAlignment is now constant
107 if (c_usePadding)
109 return c_pmeAtomDataAlignment;
111 else
113 return 0;
118 int pme_gpu_get_atoms_per_warp(const PmeGpu *pmeGpu)
120 return pmeGpu->programHandle_->impl_->warpSize / c_pmeSpreadGatherThreadsPerAtom;
123 void pme_gpu_synchronize(const PmeGpu *pmeGpu)
125 gpuStreamSynchronize(pmeGpu->archSpecific->pmeStream);
128 void pme_gpu_alloc_energy_virial(PmeGpu *pmeGpu)
130 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
131 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy, c_virialAndEnergyCount, pmeGpu->archSpecific->context);
132 pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_virialAndEnergy), energyAndVirialSize);
135 void pme_gpu_free_energy_virial(PmeGpu *pmeGpu)
137 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy);
138 pfree(pmeGpu->staging.h_virialAndEnergy);
139 pmeGpu->staging.h_virialAndEnergy = nullptr;
142 void pme_gpu_clear_energy_virial(const PmeGpu *pmeGpu)
144 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy, 0,
145 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream);
148 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu *pmeGpu)
150 const int splineValuesOffset[DIM] = {
152 pmeGpu->kernelParams->grid.realGridSize[XX],
153 pmeGpu->kernelParams->grid.realGridSize[XX] + pmeGpu->kernelParams->grid.realGridSize[YY]
155 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
157 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX] +
158 pmeGpu->kernelParams->grid.realGridSize[YY] +
159 pmeGpu->kernelParams->grid.realGridSize[ZZ];
160 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize);
161 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, newSplineValuesSize,
162 &pmeGpu->archSpecific->splineValuesSize, &pmeGpu->archSpecific->splineValuesSizeAlloc, pmeGpu->archSpecific->context);
163 if (shouldRealloc)
165 /* Reallocate the host buffer */
166 pfree(pmeGpu->staging.h_splineModuli);
167 pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_splineModuli), newSplineValuesSize * sizeof(float));
169 for (int i = 0; i < DIM; i++)
171 memcpy(pmeGpu->staging.h_splineModuli + splineValuesOffset[i], pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
173 /* TODO: pin original buffer instead! */
174 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli, pmeGpu->staging.h_splineModuli,
175 0, newSplineValuesSize,
176 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
179 void pme_gpu_free_bspline_values(const PmeGpu *pmeGpu)
181 pfree(pmeGpu->staging.h_splineModuli);
182 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli);
185 void pme_gpu_realloc_forces(PmeGpu *pmeGpu)
187 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
188 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
189 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
190 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc, pmeGpu->archSpecific->context);
191 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
192 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
195 void pme_gpu_free_forces(const PmeGpu *pmeGpu)
197 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
200 void pme_gpu_copy_input_forces(PmeGpu *pmeGpu)
202 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
203 float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
204 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat,
205 0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
206 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
209 void pme_gpu_copy_output_forces(PmeGpu *pmeGpu)
211 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
212 float *h_forcesFloat = reinterpret_cast<float *>(pmeGpu->staging.h_forces.data());
213 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces,
214 0, DIM * pmeGpu->kernelParams->atoms.nAtoms,
215 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
218 void pme_gpu_realloc_coordinates(const PmeGpu *pmeGpu)
220 const size_t newCoordinatesSize = pmeGpu->nAtomsAlloc * DIM;
221 GMX_ASSERT(newCoordinatesSize > 0, "Bad number of atoms in PME GPU");
222 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates, newCoordinatesSize,
223 &pmeGpu->archSpecific->coordinatesSize, &pmeGpu->archSpecific->coordinatesSizeAlloc, pmeGpu->archSpecific->context);
224 if (c_usePadding)
226 const size_t paddingIndex = DIM * pmeGpu->kernelParams->atoms.nAtoms;
227 const size_t paddingCount = DIM * pmeGpu->nAtomsAlloc - paddingIndex;
228 if (paddingCount > 0)
230 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coordinates, paddingIndex,
231 paddingCount, pmeGpu->archSpecific->pmeStream);
236 void pme_gpu_free_coordinates(const PmeGpu *pmeGpu)
238 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coordinates);
241 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu *pmeGpu, const float *h_coefficients)
243 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
244 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
245 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
246 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, newCoefficientsSize,
247 &pmeGpu->archSpecific->coefficientsSize, &pmeGpu->archSpecific->coefficientsSizeAlloc, pmeGpu->archSpecific->context);
248 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients, const_cast<float *>(h_coefficients),
249 0, pmeGpu->kernelParams->atoms.nAtoms,
250 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
251 if (c_usePadding)
253 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
254 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
255 if (paddingCount > 0)
257 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients, paddingIndex,
258 paddingCount, pmeGpu->archSpecific->pmeStream);
263 void pme_gpu_free_coefficients(const PmeGpu *pmeGpu)
265 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients);
268 void pme_gpu_realloc_spline_data(PmeGpu *pmeGpu)
270 const int order = pmeGpu->common->pme_order;
271 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
272 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
273 const int newSplineDataSize = DIM * order * nAtomsPadded;
274 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
275 /* Two arrays of the same size */
276 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
277 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
278 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
279 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize,
280 &currentSizeTemp, &currentSizeTempAlloc, pmeGpu->archSpecific->context);
281 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
282 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc, pmeGpu->archSpecific->context);
283 // the host side reallocation
284 if (shouldRealloc)
286 pfree(pmeGpu->staging.h_theta);
287 pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
288 pfree(pmeGpu->staging.h_dtheta);
289 pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
293 void pme_gpu_free_spline_data(const PmeGpu *pmeGpu)
295 /* Two arrays of the same size */
296 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
297 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
298 pfree(pmeGpu->staging.h_theta);
299 pfree(pmeGpu->staging.h_dtheta);
302 void pme_gpu_realloc_grid_indices(PmeGpu *pmeGpu)
304 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
305 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
306 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
307 &pmeGpu->archSpecific->gridlineIndicesSize, &pmeGpu->archSpecific->gridlineIndicesSizeAlloc, pmeGpu->archSpecific->context);
308 pfree(pmeGpu->staging.h_gridlineIndices);
309 pmalloc(reinterpret_cast<void **>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
312 void pme_gpu_free_grid_indices(const PmeGpu *pmeGpu)
314 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
315 pfree(pmeGpu->staging.h_gridlineIndices);
318 void pme_gpu_realloc_grids(PmeGpu *pmeGpu)
320 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
321 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX] *
322 kernelParamsPtr->grid.realGridSizePadded[YY] *
323 kernelParamsPtr->grid.realGridSizePadded[ZZ];
324 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX] *
325 kernelParamsPtr->grid.complexGridSizePadded[YY] *
326 kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
327 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
328 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
330 /* 2 separate grids */
331 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, newComplexGridSize,
332 &pmeGpu->archSpecific->complexGridSize, &pmeGpu->archSpecific->complexGridSizeAlloc, pmeGpu->archSpecific->context);
333 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newRealGridSize,
334 &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
336 else
338 /* A single buffer so that any grid will fit */
339 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
340 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid, newGridsSize,
341 &pmeGpu->archSpecific->realGridSize, &pmeGpu->archSpecific->realGridSizeAlloc, pmeGpu->archSpecific->context);
342 kernelParamsPtr->grid.d_fourierGrid = kernelParamsPtr->grid.d_realGrid;
343 pmeGpu->archSpecific->complexGridSize = pmeGpu->archSpecific->realGridSize;
344 // the size might get used later for copying the grid
348 void pme_gpu_free_grids(const PmeGpu *pmeGpu)
350 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
352 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid);
354 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid);
357 void pme_gpu_clear_grids(const PmeGpu *pmeGpu)
359 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid, 0,
360 pmeGpu->archSpecific->realGridSize, pmeGpu->archSpecific->pmeStream);
363 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu *pmeGpu)
365 pme_gpu_free_fract_shifts(pmeGpu);
367 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
369 const int nx = kernelParamsPtr->grid.realGridSize[XX];
370 const int ny = kernelParamsPtr->grid.realGridSize[YY];
371 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
372 const int cellCount = c_pmeNeighborUnitcellCount;
373 const int gridDataOffset[DIM] = {0, cellCount * nx, cellCount * (nx + ny)};
375 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
377 const int newFractShiftsSize = cellCount * (nx + ny + nz);
379 #if GMX_GPU == GMX_GPU_CUDA
380 initParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
381 kernelParamsPtr->fractShiftsTableTexture,
382 pmeGpu->common->fsh.data(),
383 newFractShiftsSize);
385 initParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
386 kernelParamsPtr->gridlineIndicesTableTexture,
387 pmeGpu->common->nn.data(),
388 newFractShiftsSize);
389 #elif GMX_GPU == GMX_GPU_OPENCL
390 // No dedicated texture routines....
391 allocateDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, newFractShiftsSize, pmeGpu->archSpecific->context);
392 allocateDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, newFractShiftsSize, pmeGpu->archSpecific->context);
393 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable, pmeGpu->common->fsh.data(),
394 0, newFractShiftsSize,
395 pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
396 copyToDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable, pmeGpu->common->nn.data(),
397 0, newFractShiftsSize,
398 pmeGpu->archSpecific->pmeStream, GpuApiCallBehavior::Async, nullptr);
399 #endif
402 void pme_gpu_free_fract_shifts(const PmeGpu *pmeGpu)
404 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
405 #if GMX_GPU == GMX_GPU_CUDA
406 destroyParamLookupTable(kernelParamsPtr->grid.d_fractShiftsTable,
407 kernelParamsPtr->fractShiftsTableTexture);
408 destroyParamLookupTable(kernelParamsPtr->grid.d_gridlineIndicesTable,
409 kernelParamsPtr->gridlineIndicesTableTexture);
410 #elif GMX_GPU == GMX_GPU_OPENCL
411 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
412 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
413 #endif
416 bool pme_gpu_stream_query(const PmeGpu *pmeGpu)
418 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream);
421 void pme_gpu_copy_input_gather_grid(const PmeGpu *pmeGpu, float *h_grid)
423 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid, h_grid,
424 0, pmeGpu->archSpecific->realGridSize,
425 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
428 void pme_gpu_copy_output_spread_grid(const PmeGpu *pmeGpu, float *h_grid)
430 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid,
431 0, pmeGpu->archSpecific->realGridSize,
432 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
433 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream);
436 void pme_gpu_copy_output_spread_atom_data(const PmeGpu *pmeGpu)
438 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
439 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
440 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
441 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
442 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta,
443 0, splinesCount,
444 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
445 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta,
446 0, splinesCount,
447 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
448 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
449 0, kernelParamsPtr->atoms.nAtoms * DIM,
450 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
453 void pme_gpu_copy_input_gather_atom_data(const PmeGpu *pmeGpu)
455 const int alignment = pme_gpu_get_atoms_per_warp(pmeGpu);
456 const size_t nAtomsPadded = ((pmeGpu->nAtomsAlloc + alignment - 1) / alignment) * alignment;
457 const size_t splinesCount = DIM * nAtomsPadded * pmeGpu->common->pme_order;
458 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
459 if (c_usePadding)
461 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
462 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0,
463 pmeGpu->nAtomsAlloc * DIM, pmeGpu->archSpecific->pmeStream);
464 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
465 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
466 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
467 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM, pmeGpu->archSpecific->pmeStream);
469 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta,
470 0, splinesCount,
471 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
472 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta,
473 0, splinesCount,
474 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
475 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
476 0, kernelParamsPtr->atoms.nAtoms * DIM,
477 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
480 void pme_gpu_sync_spread_grid(const PmeGpu *pmeGpu)
482 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
485 void pme_gpu_init_internal(PmeGpu *pmeGpu)
487 #if GMX_GPU == GMX_GPU_CUDA
488 // Prepare to use the device that this PME task was assigned earlier.
489 // Other entities, such as CUDA timing events, are known to implicitly use the device context.
490 CU_RET_ERR(cudaSetDevice(pmeGpu->deviceInfo->id), "Switching to PME CUDA device");
491 #endif
493 /* Allocate the target-specific structures */
494 pmeGpu->archSpecific.reset(new PmeGpuSpecific());
495 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
497 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
498 /* This should give better performance, according to the cuFFT documentation.
499 * The performance seems to be the same though.
500 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
503 // TODO: this is just a convenient reuse because programHandle_ currently is in charge of creating context
504 pmeGpu->archSpecific->context = pmeGpu->programHandle_->impl_->context;
506 // timing enabling - TODO put this in gpu_utils (even though generally this is just option handling?) and reuse in NB
507 if (GMX_GPU == GMX_GPU_CUDA)
509 /* WARNING: CUDA timings are incorrect with multiple streams.
510 * This is the main reason why they are disabled by default.
512 // TODO: Consider turning on by default when we can detect nr of streams.
513 pmeGpu->archSpecific->useTiming = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
515 else if (GMX_GPU == GMX_GPU_OPENCL)
517 pmeGpu->archSpecific->useTiming = (getenv("GMX_DISABLE_GPU_TIMING") == nullptr);
520 #if GMX_GPU == GMX_GPU_CUDA
521 pmeGpu->maxGridWidthX = pmeGpu->deviceInfo->prop.maxGridSize[0];
522 #elif GMX_GPU == GMX_GPU_OPENCL
523 pmeGpu->maxGridWidthX = INT32_MAX / 2;
524 // TODO: is there no really global work size limit in OpenCL?
525 #endif
527 /* Creating a PME GPU stream:
528 * - default high priority with CUDA
529 * - no priorities implemented yet with OpenCL; see #2532
531 #if GMX_GPU == GMX_GPU_CUDA
532 cudaError_t stat;
533 int highest_priority, lowest_priority;
534 stat = cudaDeviceGetStreamPriorityRange(&lowest_priority, &highest_priority);
535 CU_RET_ERR(stat, "PME cudaDeviceGetStreamPriorityRange failed");
536 stat = cudaStreamCreateWithPriority(&pmeGpu->archSpecific->pmeStream,
537 cudaStreamDefault, //cudaStreamNonBlocking,
538 highest_priority);
539 CU_RET_ERR(stat, "cudaStreamCreateWithPriority on the PME stream failed");
540 #elif GMX_GPU == GMX_GPU_OPENCL
541 cl_command_queue_properties queueProperties = pmeGpu->archSpecific->useTiming ? CL_QUEUE_PROFILING_ENABLE : 0;
542 cl_device_id device_id = pmeGpu->deviceInfo->ocl_gpu_id.ocl_device_id;
543 cl_int clError;
544 pmeGpu->archSpecific->pmeStream = clCreateCommandQueue(pmeGpu->archSpecific->context,
545 device_id, queueProperties, &clError);
546 if (clError != CL_SUCCESS)
548 GMX_THROW(gmx::InternalError("Failed to create PME command queue"));
550 #endif
553 void pme_gpu_destroy_specific(const PmeGpu *pmeGpu)
555 #if GMX_GPU == GMX_GPU_CUDA
556 /* Destroy the CUDA stream */
557 cudaError_t stat = cudaStreamDestroy(pmeGpu->archSpecific->pmeStream);
558 CU_RET_ERR(stat, "PME cudaStreamDestroy error");
559 #elif GMX_GPU == GMX_GPU_OPENCL
560 cl_int clError = clReleaseCommandQueue(pmeGpu->archSpecific->pmeStream);
561 if (clError != CL_SUCCESS)
563 gmx_warning("Failed to destroy PME command queue");
565 #endif
568 void pme_gpu_reinit_3dfft(const PmeGpu *pmeGpu)
570 if (pme_gpu_performs_FFT(pmeGpu))
572 pmeGpu->archSpecific->fftSetup.resize(0);
573 for (int i = 0; i < pmeGpu->common->ngrids; i++)
575 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu));
580 void pme_gpu_destroy_3dfft(const PmeGpu *pmeGpu)
582 pmeGpu->archSpecific->fftSetup.resize(0);
585 int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
587 if (order != c_pmeGpuOrder)
589 throw order;
591 constexpr int fixedOrder = c_pmeGpuOrder;
592 GMX_UNUSED_VALUE(fixedOrder);
594 const int atomWarpIndex = atomIndex % atomsPerWarp;
595 const int warpIndex = atomIndex / atomsPerWarp;
596 int indexBase, result;
597 switch (atomsPerWarp)
599 case 1:
600 indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
601 result = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
602 break;
604 case 2:
605 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
606 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
607 break;
609 case 4:
610 indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
611 result = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
612 break;
614 case 8:
615 indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
616 result = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
617 break;
619 default:
620 GMX_THROW(gmx::NotImplementedError(gmx::formatString("Test function call not unrolled for atomsPerWarp = %d in getSplineParamFullIndex", atomsPerWarp)));
622 return result;
625 void pme_gpu_getEnergyAndVirial(const gmx_pme_t &pme,
626 PmeOutput *output)
628 const PmeGpu *pmeGpu = pme.gpu;
629 for (int j = 0; j < c_virialAndEnergyCount; j++)
631 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[j]), "PME GPU produces incorrect energy/virial.");
634 unsigned int j = 0;
635 output->coulombVirial_[XX][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
636 output->coulombVirial_[YY][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
637 output->coulombVirial_[ZZ][ZZ] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
638 output->coulombVirial_[XX][YY] = output->coulombVirial_[YY][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
639 output->coulombVirial_[XX][ZZ] = output->coulombVirial_[ZZ][XX] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
640 output->coulombVirial_[YY][ZZ] = output->coulombVirial_[ZZ][YY] = 0.25F * pmeGpu->staging.h_virialAndEnergy[j++];
641 output->coulombEnergy_ = 0.5F * pmeGpu->staging.h_virialAndEnergy[j++];
644 /*! \brief Sets the force-related members in \p output
646 * \param[in] pmeGpu PME GPU data structure
647 * \param[out] output Pointer to PME output data structure
649 static void pme_gpu_getForceOutput(PmeGpu *pmeGpu,
650 PmeOutput *output)
652 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
653 if (output->haveForceOutput_)
655 output->forces_ = pmeGpu->staging.h_forces;
659 PmeOutput pme_gpu_getOutput(const gmx_pme_t &pme,
660 const int flags)
662 PmeGpu *pmeGpu = pme.gpu;
663 const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
665 PmeOutput output;
667 pme_gpu_getForceOutput(pmeGpu, &output);
669 // The caller knows from the flags that the energy and the virial are not usable
670 // on the else branch
671 if (haveComputedEnergyAndVirial)
673 if (pme_gpu_performs_solve(pmeGpu))
675 pme_gpu_getEnergyAndVirial(pme, &output);
677 else
679 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
682 return output;
685 void pme_gpu_update_input_box(PmeGpu gmx_unused *pmeGpu,
686 const matrix gmx_unused box)
688 #if GMX_DOUBLE
689 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
690 #else
691 matrix scaledBox;
692 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
693 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
694 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
695 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
696 matrix recipBox;
697 gmx::invertBoxMatrix(scaledBox, recipBox);
699 /* The GPU recipBox is transposed as compared to the CPU recipBox.
700 * Spread uses matrix columns (while solve and gather use rows).
701 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
703 const real newRecipBox[DIM][DIM] =
705 {recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX]},
706 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY]},
707 { 0.0, 0.0, recipBox[ZZ][ZZ]}
709 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
710 #endif
713 /*! \brief \libinternal
714 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
716 * \param[in] pmeGpu The PME GPU structure.
718 static void pme_gpu_reinit_grids(PmeGpu *pmeGpu)
720 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
721 kernelParamsPtr->grid.ewaldFactor = (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
723 /* The grid size variants */
724 for (int i = 0; i < DIM; i++)
726 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
727 kernelParamsPtr->grid.realGridSizeFP[i] = static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
728 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
730 // The complex grid currently uses no padding;
731 // if it starts to do so, then another test should be added for that
732 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
733 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
735 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
736 if (!pme_gpu_performs_FFT(pmeGpu))
738 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
739 kernelParamsPtr->grid.realGridSizePadded[ZZ] = (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
742 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
743 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
744 kernelParamsPtr->grid.complexGridSize[ZZ]++;
745 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
747 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
748 pme_gpu_realloc_and_copy_bspline_values(pmeGpu);
749 pme_gpu_realloc_grids(pmeGpu);
750 pme_gpu_reinit_3dfft(pmeGpu);
753 /* Several GPU functions that refer to the CPU PME data live here.
754 * We would like to keep these away from the GPU-framework specific code for clarity,
755 * as well as compilation issues with MPI.
758 /*! \brief \libinternal
759 * Copies everything useful from the PME CPU to the PME GPU structure.
760 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
762 * \param[in] pme The PME structure.
764 static void pme_gpu_copy_common_data_from(const gmx_pme_t *pme)
766 /* TODO: Consider refactoring the CPU PME code to use the same structure,
767 * so that this function becomes 2 lines */
768 PmeGpu *pmeGpu = pme->gpu;
769 pmeGpu->common->ngrids = pme->ngrids;
770 pmeGpu->common->epsilon_r = pme->epsilon_r;
771 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
772 pmeGpu->common->nk[XX] = pme->nkx;
773 pmeGpu->common->nk[YY] = pme->nky;
774 pmeGpu->common->nk[ZZ] = pme->nkz;
775 pmeGpu->common->pme_order = pme->pme_order;
776 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
778 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
780 for (int i = 0; i < DIM; i++)
782 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
784 const int cellCount = c_pmeNeighborUnitcellCount;
785 pmeGpu->common->fsh.resize(0);
786 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
787 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
788 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
789 pmeGpu->common->nn.resize(0);
790 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
791 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
792 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
793 pmeGpu->common->runMode = pme->runMode;
794 pmeGpu->common->boxScaler = pme->boxScaler;
797 /*! \libinternal \brief
798 * Initializes the PME GPU data at the beginning of the run.
799 * TODO: this should become PmeGpu::PmeGpu()
801 * \param[in,out] pme The PME structure.
802 * \param[in,out] gpuInfo The GPU information structure.
803 * \param[in] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
805 static void pme_gpu_init(gmx_pme_t *pme,
806 const gmx_device_info_t *gpuInfo,
807 PmeGpuProgramHandle pmeGpuProgram)
809 pme->gpu = new PmeGpu();
810 PmeGpu *pmeGpu = pme->gpu;
811 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
812 pmeGpu->common = std::make_shared<PmeShared>();
814 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
815 /* A convenience variable. */
816 pmeGpu->settings.useDecomposition = (pme->nnodes == 1);
817 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
818 pmeGpu->settings.performGPUGather = true;
819 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
820 pmeGpu->settings.useGpuForceReduction = false;
822 pme_gpu_set_testing(pmeGpu, false);
824 pmeGpu->deviceInfo = gpuInfo;
825 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
826 pmeGpu->programHandle_ = pmeGpuProgram;
828 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
830 pme_gpu_init_internal(pmeGpu);
831 pme_gpu_alloc_energy_virial(pmeGpu);
833 pme_gpu_copy_common_data_from(pme);
835 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
837 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
838 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
841 void pme_gpu_transform_spline_atom_data(const PmeGpu *pmeGpu, const PmeAtomComm *atc,
842 PmeSplineDataType type, int dimIndex, PmeLayoutTransform transform)
844 // The GPU atom spline data is laid out in a different way currently than the CPU one.
845 // This function converts the data from GPU to CPU layout (in the host memory).
846 // It is only intended for testing purposes so far.
847 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their performance
848 // (e.g. spreading on GPU, gathering on CPU).
849 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
850 const uintmax_t threadIndex = 0;
851 const auto atomCount = pme_gpu_get_kernel_params_base_ptr(pmeGpu)->atoms.nAtoms;
852 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
853 const auto pmeOrder = pmeGpu->common->pme_order;
854 GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
856 real *cpuSplineBuffer;
857 float *h_splineBuffer;
858 switch (type)
860 case PmeSplineDataType::Values:
861 cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
862 h_splineBuffer = pmeGpu->staging.h_theta;
863 break;
865 case PmeSplineDataType::Derivatives:
866 cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
867 h_splineBuffer = pmeGpu->staging.h_dtheta;
868 break;
870 default:
871 GMX_THROW(gmx::InternalError("Unknown spline data type"));
874 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
876 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
878 const auto gpuValueIndex = getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
879 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
880 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder, "Atom spline data index out of bounds (while transforming GPU data layout for host)");
881 switch (transform)
883 case PmeLayoutTransform::GpuToHost:
884 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
885 break;
887 case PmeLayoutTransform::HostToGpu:
888 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
889 break;
891 default:
892 GMX_THROW(gmx::InternalError("Unknown layout transform"));
898 void pme_gpu_get_real_grid_sizes(const PmeGpu *pmeGpu, gmx::IVec *gridSize, gmx::IVec *paddedGridSize)
900 GMX_ASSERT(gridSize != nullptr, "");
901 GMX_ASSERT(paddedGridSize != nullptr, "");
902 GMX_ASSERT(pmeGpu != nullptr, "");
903 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
904 for (int i = 0; i < DIM; i++)
906 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
907 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
911 void pme_gpu_reinit(gmx_pme_t *pme,
912 const gmx_device_info_t *gpuInfo,
913 PmeGpuProgramHandle pmeGpuProgram)
915 if (!pme_gpu_active(pme))
917 return;
920 if (!pme->gpu)
922 /* First-time initialization */
923 pme_gpu_init(pme, gpuInfo, pmeGpuProgram);
925 else
927 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
928 pme_gpu_copy_common_data_from(pme);
930 /* GPU FFT will only get used for a single rank.*/
931 pme->gpu->settings.performGPUFFT = (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme_gpu_uses_dd(pme->gpu);
932 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
934 /* Reinit active timers */
935 pme_gpu_reinit_timings(pme->gpu);
937 pme_gpu_reinit_grids(pme->gpu);
938 // Note: if timing the reinit launch overhead becomes more relevant
939 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
940 pme_gpu_reinit_computation(pme, nullptr);
941 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
942 * update for mixed mode on grid switch. TODO: use shared recipbox field.
944 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
947 void pme_gpu_destroy(PmeGpu *pmeGpu)
949 /* Free lots of data */
950 pme_gpu_free_energy_virial(pmeGpu);
951 pme_gpu_free_bspline_values(pmeGpu);
952 pme_gpu_free_forces(pmeGpu);
953 pme_gpu_free_coefficients(pmeGpu);
954 pme_gpu_free_spline_data(pmeGpu);
955 pme_gpu_free_grid_indices(pmeGpu);
956 pme_gpu_free_fract_shifts(pmeGpu);
957 pme_gpu_free_grids(pmeGpu);
959 pme_gpu_destroy_3dfft(pmeGpu);
961 /* Free the GPU-framework specific data last */
962 pme_gpu_destroy_specific(pmeGpu);
964 delete pmeGpu;
967 void pme_gpu_reinit_atoms(PmeGpu *pmeGpu, const int nAtoms, const real *charges)
969 auto *kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
970 kernelParamsPtr->atoms.nAtoms = nAtoms;
971 const int alignment = pme_gpu_get_atom_data_alignment(pmeGpu);
972 pmeGpu->nAtomsPadded = ((nAtoms + alignment - 1) / alignment) * alignment;
973 const int nAtomsAlloc = c_usePadding ? pmeGpu->nAtomsPadded : nAtoms;
974 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsAlloc); /* This check might be redundant, but is logical */
975 pmeGpu->nAtomsAlloc = nAtomsAlloc;
977 #if GMX_DOUBLE
978 GMX_RELEASE_ASSERT(false, "Only single precision supported");
979 GMX_UNUSED_VALUE(charges);
980 #else
981 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float *>(charges));
982 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
983 #endif
985 if (haveToRealloc)
987 pme_gpu_realloc_forces(pmeGpu);
988 pme_gpu_realloc_spline_data(pmeGpu);
989 pme_gpu_realloc_grid_indices(pmeGpu);
993 void pme_gpu_3dfft(const PmeGpu *pmeGpu, gmx_fft_direction dir, int grid_index)
995 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
997 pme_gpu_start_timing(pmeGpu, timerId);
998 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
999 pme_gpu_stop_timing(pmeGpu, timerId);
1002 /*! \brief
1003 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
1004 * to minimize number of unused blocks.
1006 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu *pmeGpu, int blockCount)
1008 // How many maximum widths in X do we need (hopefully just one)
1009 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
1010 // Trying to make things even
1011 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
1012 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
1013 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount, "pmeGpuCreateGrid: excessive blocks");
1014 return std::pair<int, int>(colCount, minRowCount);
1017 void pme_gpu_spread(const PmeGpu *pmeGpu,
1018 GpuEventSynchronizer *xReadyOnDevice,
1019 int gmx_unused gridIndex,
1020 real *h_grid,
1021 bool computeSplines,
1022 bool spreadCharges)
1024 GMX_ASSERT(computeSplines || spreadCharges, "PME spline/spread kernel has invalid input (nothing to do)");
1025 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1026 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1028 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1030 const int order = pmeGpu->common->pme_order;
1031 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1032 const int atomsPerBlock = blockSize / c_pmeSpreadGatherThreadsPerAtom;
1033 // TODO: pick smaller block size in runtime if needed
1034 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1035 // If doing so, change atomsPerBlock in the kernels as well.
1036 // TODO: test varying block sizes on modern arch-s as well
1037 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1038 //(for spline data mostly, together with varying PME_GPU_PARALLEL_SPLINE define)
1039 GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock), "inconsistent atom data padding vs. spreading block size");
1041 // Ensure that coordinates are ready on the device before launching spread;
1042 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1043 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1044 // Note: Consider adding an assertion on xReadyOnDevice when we can detect
1045 // here separate PME ranks.
1046 if (xReadyOnDevice)
1048 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream);
1051 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1052 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1054 KernelLaunchConfig config;
1055 config.blockSize[0] = config.blockSize[1] = order;
1056 config.blockSize[2] = atomsPerBlock;
1057 config.gridSize[0] = dimGrid.first;
1058 config.gridSize[1] = dimGrid.second;
1059 config.stream = pmeGpu->archSpecific->pmeStream;
1061 int timingId;
1062 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1063 if (computeSplines)
1065 if (spreadCharges)
1067 timingId = gtPME_SPLINEANDSPREAD;
1068 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernel;
1070 else
1072 timingId = gtPME_SPLINE;
1073 kernelPtr = pmeGpu->programHandle_->impl_->splineKernel;
1076 else
1078 timingId = gtPME_SPREAD;
1079 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernel;
1082 pme_gpu_start_timing(pmeGpu, timingId);
1083 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1084 #if c_canEmbedBuffers
1085 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1086 #else
1087 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1088 &kernelParamsPtr->atoms.d_theta,
1089 &kernelParamsPtr->atoms.d_dtheta,
1090 &kernelParamsPtr->atoms.d_gridlineIndices,
1091 &kernelParamsPtr->grid.d_realGrid,
1092 &kernelParamsPtr->grid.d_fractShiftsTable,
1093 &kernelParamsPtr->grid.d_gridlineIndicesTable,
1094 &kernelParamsPtr->atoms.d_coefficients,
1095 &kernelParamsPtr->atoms.d_coordinates
1097 #endif
1099 launchGpuKernel(kernelPtr, config, timingEvent, "PME spline/spread", kernelArgs);
1100 pme_gpu_stop_timing(pmeGpu, timingId);
1102 const bool copyBackGrid = spreadCharges && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu));
1103 if (copyBackGrid)
1105 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid);
1107 const bool copyBackAtomData = computeSplines && (pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_gather(pmeGpu));
1108 if (copyBackAtomData)
1110 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1114 void pme_gpu_solve(const PmeGpu *pmeGpu, t_complex *h_grid,
1115 GridOrdering gridOrdering, bool computeEnergyAndVirial)
1117 const bool copyInputAndOutputGrid = pme_gpu_is_testing(pmeGpu) || !pme_gpu_performs_FFT(pmeGpu);
1119 auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1121 float *h_gridFloat = reinterpret_cast<float *>(h_grid);
1122 if (copyInputAndOutputGrid)
1124 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid, h_gridFloat,
1125 0, pmeGpu->archSpecific->complexGridSize,
1126 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1129 int majorDim = -1, middleDim = -1, minorDim = -1;
1130 switch (gridOrdering)
1132 case GridOrdering::YZX:
1133 majorDim = YY;
1134 middleDim = ZZ;
1135 minorDim = XX;
1136 break;
1138 case GridOrdering::XYZ:
1139 majorDim = XX;
1140 middleDim = YY;
1141 minorDim = ZZ;
1142 break;
1144 default:
1145 GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1148 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1150 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1151 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1152 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1153 int cellsPerBlock;
1154 if (blocksPerGridLine == 1)
1156 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1158 else
1160 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1162 const int warpSize = pmeGpu->programHandle_->impl_->warpSize;
1163 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1165 static_assert(GMX_GPU != GMX_GPU_CUDA || c_solveMaxWarpsPerBlock/2 >= 4,
1166 "The CUDA solve energy kernels needs at least 4 warps. "
1167 "Here we launch at least half of the max warps.");
1169 KernelLaunchConfig config;
1170 config.blockSize[0] = blockSize;
1171 config.gridSize[0] = blocksPerGridLine;
1172 // rounding up to full warps so that shuffle operations produce defined results
1173 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1) / gridLinesPerBlock;
1174 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1175 config.stream = pmeGpu->archSpecific->pmeStream;
1177 int timingId = gtPME_SOLVE;
1178 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1179 if (gridOrdering == GridOrdering::YZX)
1181 kernelPtr = computeEnergyAndVirial ?
1182 pmeGpu->programHandle_->impl_->solveYZXEnergyKernel :
1183 pmeGpu->programHandle_->impl_->solveYZXKernel;
1185 else if (gridOrdering == GridOrdering::XYZ)
1187 kernelPtr = computeEnergyAndVirial ?
1188 pmeGpu->programHandle_->impl_->solveXYZEnergyKernel :
1189 pmeGpu->programHandle_->impl_->solveXYZKernel;
1192 pme_gpu_start_timing(pmeGpu, timingId);
1193 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1194 #if c_canEmbedBuffers
1195 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1196 #else
1197 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1198 &kernelParamsPtr->grid.d_splineModuli,
1199 &kernelParamsPtr->constants.d_virialAndEnergy,
1200 &kernelParamsPtr->grid.d_fourierGrid);
1201 #endif
1202 launchGpuKernel(kernelPtr, config, timingEvent, "PME solve", kernelArgs);
1203 pme_gpu_stop_timing(pmeGpu, timingId);
1205 if (computeEnergyAndVirial)
1207 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy, &kernelParamsPtr->constants.d_virialAndEnergy,
1208 0, c_virialAndEnergyCount,
1209 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1212 if (copyInputAndOutputGrid)
1214 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid,
1215 0, pmeGpu->archSpecific->complexGridSize,
1216 pmeGpu->archSpecific->pmeStream, pmeGpu->settings.transferKind, nullptr);
1220 void pme_gpu_gather(PmeGpu *pmeGpu,
1221 PmeForceOutputHandling forceTreatment,
1222 const float *h_grid)
1224 /* Copying the input CPU forces for reduction */
1225 if (forceTreatment != PmeForceOutputHandling::Set)
1227 pme_gpu_copy_input_forces(pmeGpu);
1230 if (!pme_gpu_performs_FFT(pmeGpu) || pme_gpu_is_testing(pmeGpu))
1232 pme_gpu_copy_input_gather_grid(pmeGpu, const_cast<float *>(h_grid));
1235 if (pme_gpu_is_testing(pmeGpu))
1237 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1240 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1241 const int atomsPerBlock = blockSize / c_pmeSpreadGatherThreadsPerAtom;
1242 GMX_ASSERT(!c_usePadding || !(c_pmeAtomDataAlignment % atomsPerBlock), "inconsistent atom data padding vs. gathering block size");
1244 const int blockCount = pmeGpu->nAtomsPadded / atomsPerBlock;
1245 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1248 const int order = pmeGpu->common->pme_order;
1249 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1251 KernelLaunchConfig config;
1252 config.blockSize[0] = config.blockSize[1] = order;
1253 config.blockSize[2] = atomsPerBlock;
1254 config.gridSize[0] = dimGrid.first;
1255 config.gridSize[1] = dimGrid.second;
1256 config.stream = pmeGpu->archSpecific->pmeStream;
1258 // TODO test different cache configs
1260 int timingId = gtPME_GATHER;
1261 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1262 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = (forceTreatment == PmeForceOutputHandling::Set) ?
1263 pmeGpu->programHandle_->impl_->gatherKernel :
1264 pmeGpu->programHandle_->impl_->gatherReduceWithInputKernel;
1266 pme_gpu_start_timing(pmeGpu, timingId);
1267 auto *timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1268 const auto *kernelParamsPtr = pmeGpu->kernelParams.get();
1269 #if c_canEmbedBuffers
1270 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1271 #else
1272 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr,
1273 &kernelParamsPtr->atoms.d_coefficients,
1274 &kernelParamsPtr->grid.d_realGrid,
1275 &kernelParamsPtr->atoms.d_theta,
1276 &kernelParamsPtr->atoms.d_dtheta,
1277 &kernelParamsPtr->atoms.d_gridlineIndices,
1278 &kernelParamsPtr->atoms.d_forces);
1279 #endif
1280 launchGpuKernel(kernelPtr, config, timingEvent, "PME gather", kernelArgs);
1281 pme_gpu_stop_timing(pmeGpu, timingId);
1283 if (pmeGpu->settings.useGpuForceReduction)
1285 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream);
1287 else
1289 pme_gpu_copy_output_forces(pmeGpu);
1293 DeviceBuffer<float> pme_gpu_get_kernelparam_coordinates(const PmeGpu *pmeGpu)
1295 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams, "PME GPU device buffer was requested in non-GPU build or before the GPU PME was initialized.");
1297 return pmeGpu->kernelParams->atoms.d_coordinates;
1300 void * pme_gpu_get_kernelparam_forces(const PmeGpu *pmeGpu)
1302 if (pmeGpu && pmeGpu->kernelParams)
1304 return pmeGpu->kernelParams->atoms.d_forces;
1306 else
1308 return nullptr;
1312 /*! \brief Check the validity of the device buffer.
1314 * Checks if the buffer is not nullptr and, when possible, if it is big enough.
1316 * \todo Split and move this function to gpu_utils.
1318 * \param[in] buffer Device buffer to be checked.
1319 * \param[in] requiredSize Number of elements that the buffer will have to accommodate.
1321 * \returns If the device buffer can be set.
1323 template<typename T>
1324 static bool checkDeviceBuffer(gmx_unused DeviceBuffer<T> buffer, gmx_unused int requiredSize)
1326 #if GMX_GPU == GMX_GPU_CUDA
1327 GMX_ASSERT(buffer != nullptr, "The device pointer is nullptr");
1328 return buffer != nullptr;
1329 #elif GMX_GPU == GMX_GPU_OPENCL
1330 size_t size;
1331 int retval = clGetMemObjectInfo(buffer, CL_MEM_SIZE, sizeof(size), &size, nullptr);
1332 GMX_ASSERT(retval == CL_SUCCESS, gmx::formatString("clGetMemObjectInfo failed with error code #%d", retval).c_str());
1333 GMX_ASSERT(static_cast<int>(size) >= requiredSize, "Number of atoms in device buffer is smaller then required size.");
1334 return retval == CL_SUCCESS && static_cast<int>(size) >= requiredSize;
1335 #elif GMX_GPU == GMX_GPU_NONE
1336 GMX_ASSERT(false, "Setter for device-side coordinates was called in non-GPU build.");
1337 return false;
1338 #endif
1341 void pme_gpu_set_kernelparam_coordinates(const PmeGpu *pmeGpu, DeviceBuffer<float> d_x)
1343 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams, "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was initialized.");
1345 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms), "The device-side buffer can not be set.");
1347 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1350 void * pme_gpu_get_stream(const PmeGpu *pmeGpu)
1352 if (pmeGpu)
1354 return static_cast<void *>(&pmeGpu->archSpecific->pmeStream);
1356 else
1358 return nullptr;
1362 void * pme_gpu_get_context(const PmeGpu *pmeGpu)
1364 if (pmeGpu)
1366 return static_cast<void *>(&pmeGpu->archSpecific->context);
1368 else
1370 return nullptr;
1374 GpuEventSynchronizer *pme_gpu_get_forces_ready_synchronizer(const PmeGpu *pmeGpu)
1376 if (pmeGpu && pmeGpu->kernelParams)
1378 return &pmeGpu->archSpecific->pmeForcesReady;
1380 else
1382 return nullptr;