2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
49 #include "gromacs/ewald/ewald_utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/mdtypes/forceoutput.h"
56 #include "gromacs/mdtypes/inputrec.h"
57 #include "gromacs/mdtypes/simulation_workload.h"
58 #include "gromacs/utility/exceptions.h"
59 #include "gromacs/utility/fatalerror.h"
60 #include "gromacs/utility/gmxassert.h"
61 #include "gromacs/utility/stringutil.h"
63 #include "pme_gpu_internal.h"
64 #include "pme_gpu_settings.h"
65 #include "pme_gpu_timings.h"
66 #include "pme_gpu_types_host.h"
68 #include "pme_internal.h"
69 #include "pme_solve.h"
72 * Finds out if PME is currently running on GPU.
74 * \todo The GPU module should not be constructed (or at least called)
75 * when it is not active, so there should be no need to check whether
76 * it is active. An assertion that this is true makes sense.
78 * \param[in] pme The PME structure.
79 * \returns True if PME runs on GPU currently, false otherwise.
81 static inline bool pme_gpu_active(const gmx_pme_t
* pme
)
83 return (pme
!= nullptr) && (pme
->runMode
!= PmeRunMode::CPU
);
86 void pme_gpu_reset_timings(const gmx_pme_t
* pme
)
88 if (pme_gpu_active(pme
))
90 pme_gpu_reset_timings(pme
->gpu
);
94 void pme_gpu_get_timings(const gmx_pme_t
* pme
, gmx_wallclock_gpu_pme_t
* timings
)
96 if (pme_gpu_active(pme
))
98 pme_gpu_get_timings(pme
->gpu
, timings
);
102 int pme_gpu_get_padding_size(const gmx_pme_t
* pme
)
105 if (!pme
|| !pme_gpu_active(pme
))
111 return pme_gpu_get_atom_data_alignment(pme
->gpu
);
116 * A convenience wrapper for launching either the GPU or CPU FFT.
118 * \param[in] pme The PME structure.
119 * \param[in] gridIndex The grid index - should currently always be 0.
120 * \param[in] dir The FFT direction enum.
121 * \param[in] wcycle The wallclock counter.
123 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t
* pme
,
125 enum gmx_fft_direction dir
,
126 gmx_wallcycle_t wcycle
)
128 GMX_ASSERT(gridIndex
== 0, "Only single grid supported");
129 if (pme_gpu_settings(pme
->gpu
).performGPUFFT
)
131 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
132 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_PME
);
133 pme_gpu_3dfft(pme
->gpu
, dir
, gridIndex
);
134 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_PME
);
135 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
139 wallcycle_start(wcycle
, ewcPME_FFT_MIXED_MODE
);
140 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
141 for (int thread
= 0; thread
< pme
->nthread
; thread
++)
143 gmx_parallel_3dfft_execute(pme
->pfft_setup
[gridIndex
], dir
, thread
, wcycle
);
145 wallcycle_stop(wcycle
, ewcPME_FFT_MIXED_MODE
);
149 /* The PME computation code split into a few separate functions. */
151 void pme_gpu_prepare_computation(gmx_pme_t
* pme
,
153 gmx_wallcycle
* wcycle
,
154 const gmx::StepWorkload
& stepWork
)
156 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
157 GMX_ASSERT(pme
->nnodes
> 0, "");
158 GMX_ASSERT(pme
->nnodes
== 1 || pme
->ndecompdim
> 0, "");
160 PmeGpu
* pmeGpu
= pme
->gpu
;
161 // TODO these flags are only here to honor the CPU PME code, and probably should be removed
162 pmeGpu
->settings
.useGpuForceReduction
= stepWork
.useGpuPmeFReduction
;
164 bool shouldUpdateBox
= false;
165 for (int i
= 0; i
< DIM
; ++i
)
167 for (int j
= 0; j
<= i
; ++j
)
169 shouldUpdateBox
|= (pmeGpu
->common
->previousBox
[i
][j
] != box
[i
][j
]);
170 pmeGpu
->common
->previousBox
[i
][j
] = box
[i
][j
];
174 if (stepWork
.haveDynamicBox
|| shouldUpdateBox
) // || is to make the first computation always update
176 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
177 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_PME
);
178 pme_gpu_update_input_box(pmeGpu
, box
);
179 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_PME
);
180 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
182 if (!pme_gpu_settings(pmeGpu
).performGPUSolve
)
184 // TODO remove code duplication and add test coverage
186 pmeGpu
->common
->boxScaler
->scaleBox(box
, scaledBox
);
187 gmx::invertBoxMatrix(scaledBox
, pme
->recipbox
);
188 pme
->boxVolume
= scaledBox
[XX
][XX
] * scaledBox
[YY
][YY
] * scaledBox
[ZZ
][ZZ
];
193 void pme_gpu_launch_spread(gmx_pme_t
* pme
, GpuEventSynchronizer
* xReadyOnDevice
, gmx_wallcycle
* wcycle
)
195 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
196 GMX_ASSERT(xReadyOnDevice
|| !pme
->bPPnode
|| (GMX_GPU
!= GMX_GPU_CUDA
),
197 "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
199 PmeGpu
* pmeGpu
= pme
->gpu
;
201 const unsigned int gridIndex
= 0;
202 real
* fftgrid
= pme
->fftgrid
[gridIndex
];
203 /* Spread the coefficients on a grid */
204 const bool computeSplines
= true;
205 const bool spreadCharges
= true;
206 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
207 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_PME
);
208 pme_gpu_spread(pmeGpu
, xReadyOnDevice
, gridIndex
, fftgrid
, computeSplines
, spreadCharges
);
209 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_PME
);
210 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
213 void pme_gpu_launch_complex_transforms(gmx_pme_t
* pme
, gmx_wallcycle
* wcycle
, const gmx::StepWorkload
& stepWork
)
215 PmeGpu
* pmeGpu
= pme
->gpu
;
216 const auto& settings
= pmeGpu
->settings
;
217 const unsigned int gridIndex
= 0;
218 t_complex
* cfftgrid
= pme
->cfftgrid
[gridIndex
];
219 // There's no support for computing energy without virial, or vice versa
220 const bool computeEnergyAndVirial
= stepWork
.computeEnergy
|| stepWork
.computeVirial
;
221 if (!settings
.performGPUFFT
)
223 wallcycle_start(wcycle
, ewcWAIT_GPU_PME_SPREAD
);
224 pme_gpu_sync_spread_grid(pme
->gpu
);
225 wallcycle_stop(wcycle
, ewcWAIT_GPU_PME_SPREAD
);
231 parallel_3dfft_execute_gpu_wrapper(pme
, gridIndex
, GMX_FFT_REAL_TO_COMPLEX
, wcycle
);
233 /* solve in k-space for our local cells */
234 if (settings
.performGPUSolve
)
236 // TODO grid ordering should be set up at pme init time.
237 const auto gridOrdering
= settings
.useDecomposition
? GridOrdering::YZX
: GridOrdering::XYZ
;
238 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
239 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_PME
);
240 pme_gpu_solve(pmeGpu
, cfftgrid
, gridOrdering
, computeEnergyAndVirial
);
241 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_PME
);
242 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
246 wallcycle_start(wcycle
, ewcPME_SOLVE_MIXED_MODE
);
247 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
248 for (int thread
= 0; thread
< pme
->nthread
; thread
++)
250 solve_pme_yzx(pme
, cfftgrid
, pme
->boxVolume
, computeEnergyAndVirial
, pme
->nthread
, thread
);
252 wallcycle_stop(wcycle
, ewcPME_SOLVE_MIXED_MODE
);
255 parallel_3dfft_execute_gpu_wrapper(pme
, gridIndex
, GMX_FFT_COMPLEX_TO_REAL
, wcycle
);
257 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
260 void pme_gpu_launch_gather(const gmx_pme_t
* pme
, gmx_wallcycle gmx_unused
* wcycle
)
262 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
264 if (!pme_gpu_settings(pme
->gpu
).performGPUGather
)
269 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
270 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_PME
);
271 const unsigned int gridIndex
= 0;
272 real
* fftgrid
= pme
->fftgrid
[gridIndex
];
273 pme_gpu_gather(pme
->gpu
, reinterpret_cast<float*>(fftgrid
));
274 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_PME
);
275 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
278 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
279 static void sum_forces(gmx::ArrayRef
<gmx::RVec
> f
, gmx::ArrayRef
<const gmx::RVec
> forceToAdd
)
281 const int end
= forceToAdd
.size();
283 int gmx_unused nt
= gmx_omp_nthreads_get(emntPME
);
284 #pragma omp parallel for num_threads(nt) schedule(static)
285 for (int i
= 0; i
< end
; i
++)
287 f
[i
] += forceToAdd
[i
];
291 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
292 static void pme_gpu_reduce_outputs(const bool computeEnergyAndVirial
,
293 const PmeOutput
& output
,
294 gmx_wallcycle
* wcycle
,
295 gmx::ForceWithVirial
* forceWithVirial
,
296 gmx_enerdata_t
* enerd
)
298 wallcycle_start(wcycle
, ewcPME_GPU_F_REDUCTION
);
299 GMX_ASSERT(forceWithVirial
, "Invalid force pointer");
301 if (computeEnergyAndVirial
)
303 GMX_ASSERT(enerd
, "Invalid energy output manager");
304 forceWithVirial
->addVirialContribution(output
.coulombVirial_
);
305 enerd
->term
[F_COUL_RECIP
] += output
.coulombEnergy_
;
307 if (output
.haveForceOutput_
)
309 sum_forces(forceWithVirial
->force_
, output
.forces_
);
311 wallcycle_stop(wcycle
, ewcPME_GPU_F_REDUCTION
);
314 bool pme_gpu_try_finish_task(gmx_pme_t
* pme
,
315 const gmx::StepWorkload
& stepWork
,
316 gmx_wallcycle
* wcycle
,
317 gmx::ForceWithVirial
* forceWithVirial
,
318 gmx_enerdata_t
* enerd
,
319 GpuTaskCompletion completionKind
)
321 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
322 GMX_ASSERT(!pme
->gpu
->settings
.useGpuForceReduction
,
323 "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
325 // First, if possible, check whether all tasks on the stream have
326 // completed, and return fast if not. Accumulate to wcycle the
327 // time needed for that checking, but do not yet record that the
328 // gather has occured.
329 bool needToSynchronize
= true;
330 constexpr bool c_streamQuerySupported
= (GMX_GPU
== GMX_GPU_CUDA
);
331 // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
332 if ((completionKind
== GpuTaskCompletion::Check
) && c_streamQuerySupported
)
334 wallcycle_start_nocount(wcycle
, ewcWAIT_GPU_PME_GATHER
);
335 // Query the PME stream for completion of all tasks enqueued and
336 // if we're not done, stop the timer before early return.
337 const bool pmeGpuDone
= pme_gpu_stream_query(pme
->gpu
);
338 wallcycle_stop(wcycle
, ewcWAIT_GPU_PME_GATHER
);
344 needToSynchronize
= false;
347 wallcycle_start(wcycle
, ewcWAIT_GPU_PME_GATHER
);
348 // If the above check passed, then there is no need to make an
349 // explicit synchronization call.
350 if (needToSynchronize
)
352 // Synchronize the whole PME stream at once, including D2H result transfers.
353 pme_gpu_synchronize(pme
->gpu
);
355 pme_gpu_update_timings(pme
->gpu
);
356 // There's no support for computing energy without virial, or vice versa
357 const bool computeEnergyAndVirial
= stepWork
.computeEnergy
|| stepWork
.computeVirial
;
358 PmeOutput output
= pme_gpu_getOutput(*pme
, computeEnergyAndVirial
);
359 wallcycle_stop(wcycle
, ewcWAIT_GPU_PME_GATHER
);
361 GMX_ASSERT(pme
->gpu
->settings
.useGpuForceReduction
== !output
.haveForceOutput_
,
362 "When forces are reduced on the CPU, there needs to be force output");
363 pme_gpu_reduce_outputs(computeEnergyAndVirial
, output
, wcycle
, forceWithVirial
, enerd
);
368 // This is used by PME-only ranks
369 PmeOutput
pme_gpu_wait_finish_task(gmx_pme_t
* pme
, const bool computeEnergyAndVirial
, gmx_wallcycle
* wcycle
)
371 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
373 wallcycle_start(wcycle
, ewcWAIT_GPU_PME_GATHER
);
375 // Synchronize the whole PME stream at once, including D2H result transfers
376 // if there are outputs we need to wait for at this step; we still call getOutputs
377 // for uniformity and because it sets the PmeOutput.haveForceOutput_.
378 if (!pme
->gpu
->settings
.useGpuForceReduction
|| computeEnergyAndVirial
)
380 pme_gpu_synchronize(pme
->gpu
);
383 PmeOutput output
= pme_gpu_getOutput(*pme
, computeEnergyAndVirial
);
384 wallcycle_stop(wcycle
, ewcWAIT_GPU_PME_GATHER
);
388 // This is used when not using the alternate-waiting reduction
389 void pme_gpu_wait_and_reduce(gmx_pme_t
* pme
,
390 const gmx::StepWorkload
& stepWork
,
391 gmx_wallcycle
* wcycle
,
392 gmx::ForceWithVirial
* forceWithVirial
,
393 gmx_enerdata_t
* enerd
)
395 // There's no support for computing energy without virial, or vice versa
396 const bool computeEnergyAndVirial
= stepWork
.computeEnergy
|| stepWork
.computeVirial
;
397 PmeOutput output
= pme_gpu_wait_finish_task(pme
, computeEnergyAndVirial
, wcycle
);
398 GMX_ASSERT(pme
->gpu
->settings
.useGpuForceReduction
== !output
.haveForceOutput_
,
399 "When forces are reduced on the CPU, there needs to be force output");
400 pme_gpu_reduce_outputs(computeEnergyAndVirial
, output
, wcycle
, forceWithVirial
, enerd
);
403 void pme_gpu_reinit_computation(const gmx_pme_t
* pme
, gmx_wallcycle
* wcycle
)
405 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
407 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
408 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_PME
);
410 pme_gpu_update_timings(pme
->gpu
);
412 pme_gpu_clear_grids(pme
->gpu
);
413 pme_gpu_clear_energy_virial(pme
->gpu
);
415 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_PME
);
416 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
419 void* pme_gpu_get_device_f(const gmx_pme_t
* pme
)
421 if (!pme
|| !pme_gpu_active(pme
))
425 return pme_gpu_get_kernelparam_forces(pme
->gpu
);
428 void pme_gpu_set_device_x(const gmx_pme_t
* pme
, DeviceBuffer
<gmx::RVec
> d_x
)
430 GMX_ASSERT(pme
!= nullptr, "Null pointer is passed as a PME to the set coordinates function.");
431 GMX_ASSERT(pme_gpu_active(pme
), "This should be a GPU run of PME but it is not enabled.");
433 pme_gpu_set_kernelparam_coordinates(pme
->gpu
, d_x
);
436 void* pme_gpu_get_device_stream(const gmx_pme_t
* pme
)
438 if (!pme
|| !pme_gpu_active(pme
))
442 return pme_gpu_get_stream(pme
->gpu
);
445 void* pme_gpu_get_device_context(const gmx_pme_t
* pme
)
447 if (!pme
|| !pme_gpu_active(pme
))
451 return pme_gpu_get_context(pme
->gpu
);
454 GpuEventSynchronizer
* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t
* pme
)
456 if (!pme
|| !pme_gpu_active(pme
))
461 return pme_gpu_get_forces_ready_synchronizer(pme
->gpu
);