Two sets of coefficients for Coulomb FEP PME on GPU
[gromacs.git] / src / gromacs / ewald / pme.h
blob0408b87f77be914e4029659fca3845ace8fd446e
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \libinternal \file
40 * \brief This file contains function declarations necessary for
41 * computing energies and forces for the PME long-ranged part (Coulomb
42 * and LJ).
44 * \author Berk Hess <hess@kth.se>
45 * \inlibraryapi
46 * \ingroup module_ewald
49 #ifndef GMX_EWALD_PME_H
50 #define GMX_EWALD_PME_H
52 #include <string>
54 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
55 #include "gromacs/gpu_utils/gpu_macros.h"
56 #include "gromacs/math/vectypes.h"
57 #include "gromacs/utility/basedefinitions.h"
58 #include "gromacs/utility/real.h"
60 struct gmx_hw_info_t;
61 struct t_commrec;
62 struct t_inputrec;
63 struct t_nrnb;
64 struct PmeGpu;
65 struct gmx_wallclock_gpu_pme_t;
66 struct gmx_enerdata_t;
67 struct gmx_mtop_t;
68 struct gmx_pme_t;
69 struct gmx_wallcycle;
70 struct NumPmeDomains;
72 class DeviceContext;
73 class DeviceStream;
74 enum class GpuTaskCompletion;
75 class PmeGpuProgram;
76 class GpuEventSynchronizer;
78 namespace gmx
80 template<typename>
81 class ArrayRef;
82 class ForceWithVirial;
83 class MDLogger;
84 enum class PinningPolicy : int;
85 class StepWorkload;
86 } // namespace gmx
88 enum
90 GMX_SUM_GRID_FORWARD,
91 GMX_SUM_GRID_BACKWARD
94 /*! \brief Possible PME codepaths on a rank.
95 * \todo: make this enum class with gmx_pme_t C++ refactoring
97 enum class PmeRunMode
99 None, //!< No PME task is done
100 CPU, //!< Whole PME computation is done on CPU
101 GPU, //!< Whole PME computation is done on GPU
102 Mixed, //!< Mixed mode: only spread and gather run on GPU; FFT and solving are done on CPU.
105 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
106 int minimalPmeGridSize(int pmeOrder);
108 //! Return whether the grid of \c pme is identical to \c grid_size.
109 bool gmx_pme_grid_matches(const gmx_pme_t& pme, const ivec grid_size);
111 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
113 * With errorsAreFatal=true, an exception or fatal error is generated
114 * on violation of restrictions.
115 * With errorsAreFatal=false, false is returned on violation of restrictions.
116 * When all restrictions are obeyed, true is returned.
117 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
118 * If at calling useThreads is unknown, pass true for conservative checking.
120 * The PME GPU restrictions are checked separately during pme_gpu_init().
122 bool gmx_pme_check_restrictions(int pme_order,
123 int nkx,
124 int nky,
125 int nkz,
126 int numPmeDomainsAlongX,
127 bool useThreads,
128 bool errorsAreFatal);
130 /*! \brief Construct PME data
132 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
133 * \returns Pointer to newly allocated and initialized PME data.
135 * \todo We should evolve something like a \c GpuManager that holds \c
136 * DeviceInformation* and \c PmeGpuProgram* and perhaps other
137 * related things whose lifetime can/should exceed that of a task (or
138 * perhaps task manager). See Issue #2522.
140 gmx_pme_t* gmx_pme_init(const t_commrec* cr,
141 const NumPmeDomains& numPmeDomains,
142 const t_inputrec* ir,
143 gmx_bool bFreeEnergy_q,
144 gmx_bool bFreeEnergy_lj,
145 gmx_bool bReproducible,
146 real ewaldcoeff_q,
147 real ewaldcoeff_lj,
148 int nthread,
149 PmeRunMode runMode,
150 PmeGpu* pmeGpu,
151 const DeviceContext* deviceContext,
152 const DeviceStream* deviceStream,
153 const PmeGpuProgram* pmeGpuProgram,
154 const gmx::MDLogger& mdlog);
156 /*! \brief As gmx_pme_init, but takes most settings, except the grid/Ewald coefficients, from
157 * pme_src. This is only called when the PME cut-off/grid size changes.
159 void gmx_pme_reinit(gmx_pme_t** pmedata,
160 const t_commrec* cr,
161 gmx_pme_t* pme_src,
162 const t_inputrec* ir,
163 const ivec grid_size,
164 real ewaldcoeff_q,
165 real ewaldcoeff_lj);
167 /*! \brief Destroys the PME data structure.*/
168 void gmx_pme_destroy(gmx_pme_t* pme);
170 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
172 * Computes the PME forces and the energy and viral, when requested,
173 * for all atoms in \p coordinates. Forces, when requested, are added
174 * to the buffer \p forces, which is allowed to contain more elements
175 * than the number of elements in \p coordinates.
176 * The meaning of \p flags is defined above, and determines which
177 * parts of the calculation are performed.
179 * \return 0 indicates all well, non zero is an error code.
181 int gmx_pme_do(struct gmx_pme_t* pme,
182 gmx::ArrayRef<const gmx::RVec> coordinates,
183 gmx::ArrayRef<gmx::RVec> forces,
184 real chargeA[],
185 real chargeB[],
186 real c6A[],
187 real c6B[],
188 real sigmaA[],
189 real sigmaB[],
190 const matrix box,
191 const t_commrec* cr,
192 int maxshift_x,
193 int maxshift_y,
194 t_nrnb* nrnb,
195 gmx_wallcycle* wcycle,
196 matrix vir_q,
197 matrix vir_lj,
198 real* energy_q,
199 real* energy_lj,
200 real lambda_q,
201 real lambda_lj,
202 real* dvdlambda_q,
203 real* dvdlambda_lj,
204 const gmx::StepWorkload& stepWork);
206 /*! \brief Calculate the PME grid energy V for n charges.
208 * The potential (found in \p pme) must have been found already with a
209 * call to gmx_pme_do(). Note that the charges are not spread on the grid in the
210 * pme struct. Currently does not work in parallel or with free
211 * energy.
213 void gmx_pme_calc_energy(gmx_pme_t* pme, gmx::ArrayRef<const gmx::RVec> x, gmx::ArrayRef<const real> q, real* V);
215 /*! \brief
216 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
217 * TODO: it should update the PME CPU atom data as well.
218 * (currently PME CPU call gmx_pme_do() gets passed the input pointers for each computation).
220 * \param[in,out] pme The PME structure.
221 * \param[in] numAtoms The number of particles.
222 * \param[in] chargesA The pointer to the array of particle charges in the normal state or FEP
223 * state A. Can be nullptr if PME is not performed on the GPU.
224 * \param[in] chargesB The pointer to the array of particle charges in state B. Only used if
225 * charges are perturbed and can otherwise be nullptr.
227 void gmx_pme_reinit_atoms(gmx_pme_t* pme, int numAtoms, const real* chargesA, const real* chargesB);
229 /* A block of PME GPU functions */
231 /*! \brief Checks whether the GROMACS build allows to run PME on GPU.
232 * TODO: this partly duplicates an internal PME assert function
233 * pme_gpu_check_restrictions(), except that works with a
234 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
236 * \param[out] error If non-null, the error message when PME is not supported on GPU.
238 * \returns true if PME can run on GPU on this build, false otherwise.
240 bool pme_gpu_supports_build(std::string* error);
242 /*! \brief Checks whether the detected (GPU) hardware allows to run PME on GPU.
244 * \param[in] hwinfo Information about the detected hardware
245 * \param[out] error If non-null, the error message when PME is not supported on GPU.
247 * \returns true if PME can run on GPU on this build, false otherwise.
249 bool pme_gpu_supports_hardware(const gmx_hw_info_t& hwinfo, std::string* error);
251 /*! \brief Checks whether the input system allows to run PME on GPU.
252 * TODO: this partly duplicates an internal PME assert function
253 * pme_gpu_check_restrictions(), except that works with a
254 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
256 * \param[in] ir Input system.
257 * \param[out] error If non-null, the error message if the input is not supported on GPU.
259 * \returns true if PME can run on GPU with this input, false otherwise.
261 bool pme_gpu_supports_input(const t_inputrec& ir, std::string* error);
263 /*! \brief
264 * Returns the active PME codepath (CPU, GPU, mixed).
265 * \todo This is a rather static data that should be managed by the higher level task scheduler.
267 * \param[in] pme The PME data structure.
268 * \returns active PME codepath.
270 PmeRunMode pme_run_mode(const gmx_pme_t* pme);
272 /*! \libinternal \brief
273 * Return the pinning policy appropriate for this build configuration
274 * for relevant buffers used for PME task on this rank (e.g. running
275 * on a GPU). */
276 gmx::PinningPolicy pme_get_pinning_policy();
278 /*! \brief
279 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
280 * \todo This is a rather static data that should be managed by the hardware assignment manager.
281 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
283 * \param[in] pme The PME data structure.
284 * \returns true if PME can run on GPU, false otherwise.
286 inline bool pme_gpu_task_enabled(const gmx_pme_t* pme)
288 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
291 /*! \brief Returns the block size requirement
293 * The GPU version of PME requires that the coordinates array have a
294 * size divisible by the returned number.
296 * \param[in] pme The PME data structure.
298 GPU_FUNC_QUALIFIER int pme_gpu_get_block_size(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
299 GPU_FUNC_TERM_WITH_RETURN(0);
301 // The following functions are all the PME GPU entry points,
302 // currently inlining to nothing on non-CUDA builds.
304 /*! \brief
305 * Resets the PME GPU timings. To be called at the reset step.
307 * \param[in] pme The PME structure.
309 GPU_FUNC_QUALIFIER void pme_gpu_reset_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme)) GPU_FUNC_TERM;
311 /*! \brief
312 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
314 * \param[in] pme The PME structure.
315 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
317 GPU_FUNC_QUALIFIER void pme_gpu_get_timings(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
318 gmx_wallclock_gpu_pme_t* GPU_FUNC_ARGUMENT(timings)) GPU_FUNC_TERM;
320 /* The main PME GPU functions */
322 /*! \brief
323 * Prepares PME on GPU computation (updating the box if needed)
324 * \param[in] pme The PME data structure.
325 * \param[in] box The unit cell box.
326 * \param[in] wcycle The wallclock counter.
327 * \param[in] stepWork The required work for this simulation step
329 GPU_FUNC_QUALIFIER void pme_gpu_prepare_computation(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
330 const matrix GPU_FUNC_ARGUMENT(box),
331 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
332 const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork)) GPU_FUNC_TERM;
334 /*! \brief
335 * Launches first stage of PME on GPU - spreading kernel.
337 * \param[in] pme The PME data structure.
338 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates
339 * are ready in the device memory; nullptr allowed only on separate PME ranks.
340 * \param[in] wcycle The wallclock counter.
341 * \param[in] lambdaQ The Coulomb lambda of the current state of the
342 * system. Only used if FEP of Coulomb is active.
344 GPU_FUNC_QUALIFIER void pme_gpu_launch_spread(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
345 GpuEventSynchronizer* GPU_FUNC_ARGUMENT(xReadyOnDevice),
346 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
347 const real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
349 /*! \brief
350 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
352 * \param[in] pme The PME data structure.
353 * \param[in] wcycle The wallclock counter.
354 * \param[in] stepWork The required work for this simulation step
356 GPU_FUNC_QUALIFIER void
357 pme_gpu_launch_complex_transforms(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
358 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
359 const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork)) GPU_FUNC_TERM;
361 /*! \brief
362 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
364 * \param[in] pme The PME data structure.
365 * \param[in] wcycle The wallclock counter.
366 * \param[in] lambdaQ The Coulomb lambda to use when calculating the results.
368 GPU_FUNC_QUALIFIER void pme_gpu_launch_gather(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
369 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
370 const real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
372 /*! \brief
373 * Attempts to complete PME GPU tasks.
375 * The \p completionKind argument controls whether the function blocks until all
376 * PME GPU tasks enqueued completed (as pme_gpu_wait_finish_task() does) or only
377 * checks and returns immediately if they did not.
378 * When blocking or the tasks have completed it also gets the output forces
379 * by assigning the ArrayRef to the \p forces pointer passed in.
380 * Virial/energy are also outputs if they were to be computed.
382 * \param[in] pme The PME data structure.
383 * \param[in] stepWork The required work for this simulation step
384 * \param[in] wcycle The wallclock counter.
385 * \param[out] forceWithVirial The output force and virial
386 * \param[out] enerd The output energies
387 * \param[in] lambdaQ The Coulomb lambda to use when calculating the results.
388 * \param[in] completionKind Indicates whether PME task completion should only be checked rather
389 * than waited for
390 * \returns True if the PME GPU tasks have completed
392 GPU_FUNC_QUALIFIER bool pme_gpu_try_finish_task(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
393 const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork),
394 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
395 gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
396 gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd),
397 const real GPU_FUNC_ARGUMENT(lambdaQ),
398 GpuTaskCompletion GPU_FUNC_ARGUMENT(completionKind))
399 GPU_FUNC_TERM_WITH_RETURN(false);
401 /*! \brief
402 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
403 * (if they were to be computed).
405 * \param[in] pme The PME data structure.
406 * \param[in] stepWork The required work for this simulation step
407 * \param[in] wcycle The wallclock counter.
408 * \param[out] forceWithVirial The output force and virial
409 * \param[out] enerd The output energies
410 * \param[in] lambdaQ The Coulomb lambda to use when calculating the results.
412 GPU_FUNC_QUALIFIER void pme_gpu_wait_and_reduce(gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
413 const gmx::StepWorkload& GPU_FUNC_ARGUMENT(stepWork),
414 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle),
415 gmx::ForceWithVirial* GPU_FUNC_ARGUMENT(forceWithVirial),
416 gmx_enerdata_t* GPU_FUNC_ARGUMENT(enerd),
417 const real GPU_FUNC_ARGUMENT(lambdaQ)) GPU_FUNC_TERM;
419 /*! \brief
420 * The PME GPU reinitialization function that is called both at the end of any PME computation and on any load balancing.
422 * Clears the internal grid and energy/virial buffers; it is not safe to start
423 * the PME computation without calling this.
424 * Note that unlike in the nbnxn module, the force buffer does not need clearing.
426 * \todo Rename this function to *clear* -- it clearly only does output resetting
427 * and we should be clear about what the function does..
429 * \param[in] pme The PME data structure.
430 * \param[in] wcycle The wallclock counter.
432 GPU_FUNC_QUALIFIER void pme_gpu_reinit_computation(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
433 gmx_wallcycle* GPU_FUNC_ARGUMENT(wcycle)) GPU_FUNC_TERM;
435 /*! \brief Set pointer to device copy of coordinate data.
436 * \param[in] pme The PME data structure.
437 * \param[in] d_x The pointer to the positions buffer to be set
439 GPU_FUNC_QUALIFIER void pme_gpu_set_device_x(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme),
440 DeviceBuffer<gmx::RVec> GPU_FUNC_ARGUMENT(d_x)) GPU_FUNC_TERM;
442 /*! \brief Get pointer to device copy of force data.
443 * \param[in] pme The PME data structure.
444 * \returns Pointer to force data
446 GPU_FUNC_QUALIFIER void* pme_gpu_get_device_f(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
447 GPU_FUNC_TERM_WITH_RETURN(nullptr);
449 /*! \brief Get pointer to the device synchronizer object that allows syncing on PME force calculation completion
450 * \param[in] pme The PME data structure.
451 * \returns Pointer to sychronizer
453 GPU_FUNC_QUALIFIER GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* GPU_FUNC_ARGUMENT(pme))
454 GPU_FUNC_TERM_WITH_RETURN(nullptr);
456 #endif