Remove duplicate/outdated function declaration
[gromacs.git] / src / gromacs / ewald / pme.h
blobb753407b36e6c3556ac16f3d9d753b627c75854e
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \libinternal \file
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
41 * and LJ).
43 * \author Berk Hess <hess@kth.se>
44 * \inlibraryapi
45 * \ingroup module_ewald
48 #ifndef GMX_EWALD_PME_H
49 #define GMX_EWALD_PME_H
51 #include "gromacs/timing/wallcycle.h"
52 #include "gromacs/timing/walltime_accounting.h"
53 #include "gromacs/utility/real.h"
55 #include "pme-gpu-types.h"
57 struct interaction_const_t;
58 struct t_commrec;
59 struct t_inputrec;
60 struct t_nrnb;
61 struct pme_gpu_t;
62 struct gmx_wallclock_gpu_pme_t;
63 struct gmx_device_info_t;
65 namespace gmx
67 class ForceWithVirial;
68 class MDLogger;
71 enum {
72 GMX_SUM_GRID_FORWARD, GMX_SUM_GRID_BACKWARD
75 /*! \brief Return the smallest allowed PME grid size for \p pmeOrder */
76 int minimalPmeGridSize(int pmeOrder);
78 /*! \brief Check restrictions on pme_order and the PME grid nkx,nky,nkz.
80 * With errorsAreFatal=true, an exception or fatal error is generated
81 * on violation of restrictions.
82 * With errorsAreFatal=false, false is returned on violation of restrictions.
83 * When all restrictions are obeyed, true is returned.
84 * Argument useThreads tells if any MPI rank doing PME uses more than 1 threads.
85 * If at calling useThreads is unknown, pass true for conservative checking.
87 * The PME GPU restrictions are checked separately during pme_gpu_init().
89 bool gmx_pme_check_restrictions(int pme_order,
90 int nkx, int nky, int nkz,
91 int nnodes_major,
92 bool useThreads,
93 bool errorsAreFatal);
95 /*! \brief Initialize \p pmedata
97 * \returns 0 indicates all well, non zero is an error code.
98 * \throws gmx::InconsistentInputError if input grid sizes/PME order are inconsistent.
100 int gmx_pme_init(struct gmx_pme_t **pmedata, struct t_commrec *cr,
101 int nnodes_major, int nnodes_minor,
102 const t_inputrec *ir, int homenr,
103 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
104 gmx_bool bReproducible,
105 real ewaldcoeff_q, real ewaldcoeff_lj,
106 int nthread,
107 PmeRunMode runMode,
108 pme_gpu_t *pmeGPU,
109 gmx_device_info_t *gpuInfo,
110 const gmx::MDLogger &mdlog);
112 /*! \brief Destroys the PME data structure.*/
113 void gmx_pme_destroy(gmx_pme_t *pme);
115 //@{
116 /*! \brief Flag values that control what gmx_pme_do() will calculate
118 * These can be combined with bitwise-OR if more than one thing is required.
120 #define GMX_PME_SPREAD (1<<0)
121 #define GMX_PME_SOLVE (1<<1)
122 #define GMX_PME_CALC_F (1<<2)
123 #define GMX_PME_CALC_ENER_VIR (1<<3)
124 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
125 #define GMX_PME_CALC_POT (1<<4)
127 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD | GMX_PME_SOLVE | GMX_PME_CALC_F)
128 //@}
130 /*! \brief Do a PME calculation on a CPU for the long range electrostatics and/or LJ.
132 * The meaning of \p flags is defined above, and determines which
133 * parts of the calculation are performed.
135 * \return 0 indicates all well, non zero is an error code.
137 int gmx_pme_do(struct gmx_pme_t *pme,
138 int start, int homenr,
139 rvec x[], rvec f[],
140 real chargeA[], real chargeB[],
141 real c6A[], real c6B[],
142 real sigmaA[], real sigmaB[],
143 matrix box, t_commrec *cr,
144 int maxshift_x, int maxshift_y,
145 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
146 matrix vir_q, matrix vir_lj,
147 real *energy_q, real *energy_lj,
148 real lambda_q, real lambda_lj,
149 real *dvdlambda_q, real *dvdlambda_lj,
150 int flags);
152 /*! \brief Called on the nodes that do PME exclusively (as slaves) */
153 int gmx_pmeonly(struct gmx_pme_t *pme,
154 struct t_commrec *cr, t_nrnb *mynrnb,
155 gmx_wallcycle_t wcycle,
156 gmx_walltime_accounting_t walltime_accounting,
157 t_inputrec *ir, PmeRunMode runMode);
159 /*! \brief Calculate the PME grid energy V for n charges.
161 * The potential (found in \p pme) must have been found already with a
162 * call to gmx_pme_do() with at least GMX_PME_SPREAD and GMX_PME_SOLVE
163 * specified. Note that the charges are not spread on the grid in the
164 * pme struct. Currently does not work in parallel or with free
165 * energy.
167 void gmx_pme_calc_energy(struct gmx_pme_t *pme, int n, rvec *x, real *q, real *V);
169 /*! \brief Send the charges and maxshift to out PME-only node. */
170 void gmx_pme_send_parameters(struct t_commrec *cr,
171 const interaction_const_t *ic,
172 gmx_bool bFreeEnergy_q, gmx_bool bFreeEnergy_lj,
173 real *chargeA, real *chargeB,
174 real *sqrt_c6A, real *sqrt_c6B,
175 real *sigmaA, real *sigmaB,
176 int maxshift_x, int maxshift_y);
178 /*! \brief Send the coordinates to our PME-only node and request a PME calculation */
179 void gmx_pme_send_coordinates(struct t_commrec *cr, matrix box, rvec *x,
180 real lambda_q, real lambda_lj,
181 gmx_bool bEnerVir,
182 gmx_int64_t step);
184 /*! \brief Tell our PME-only node to finish */
185 void gmx_pme_send_finish(struct t_commrec *cr);
187 /*! \brief Tell our PME-only node to reset all cycle and flop counters */
188 void gmx_pme_send_resetcounters(struct t_commrec *cr, gmx_int64_t step);
190 /*! \brief PP nodes receive the long range forces from the PME nodes */
191 void gmx_pme_receive_f(struct t_commrec *cr,
192 gmx::ForceWithVirial *forceWithVirial,
193 real *energy_q, real *energy_lj,
194 real *dvdlambda_q, real *dvdlambda_lj,
195 float *pme_cycles);
197 /*! \brief
198 * This function updates the local atom data on GPU after DD (charges, coordinates, etc.).
199 * TODO: it should update the PME CPU atom data as well.
200 * (currently PME CPU call gmx_pme_do() gets passed the input pointers each step).
202 * \param[in] pme The PME structure.
203 * \param[in] nAtoms The number of particles.
204 * \param[in] charges The pointer to the array of particle charges.
206 void gmx_pme_reinit_atoms(const gmx_pme_t *pme, const int nAtoms, const real *charges);
208 /* A block of PME GPU functions */
210 /*! \brief Checks whether the input system allows to run PME on GPU.
211 * TODO: this mostly duplicates an internal PME assert function
212 * pme_gpu_check_restrictions(), except that works with a
213 * formed gmx_pme_t structure. Should that one go away/work with inputrec?
215 * \param[in] ir Input system.
216 * \param[out] error The error message if the input is not supported on GPU.
218 * \returns true if PME can run on GPU with this input, false otherwise.
220 bool pme_gpu_supports_input(const t_inputrec *ir, std::string *error);
222 /*! \brief
223 * Returns the active PME codepath (CPU, GPU, mixed).
224 * \todo This is a rather static data that should be managed by the higher level task scheduler.
226 * \param[in] pme The PME data structure.
227 * \returns active PME codepath.
229 PmeRunMode pme_run_mode(const gmx_pme_t *pme);
231 /*! \brief
232 * Tells if PME is enabled to run on GPU (not necessarily active at the moment).
233 * \todo This is a rather static data that should be managed by the hardware assignment manager.
234 * For now, it is synonymous with the active PME codepath (in the absence of dynamic switching).
236 * \param[in] pme The PME data structure.
237 * \returns true if PME can run on GPU, false otherwise.
239 inline bool pme_gpu_task_enabled(const gmx_pme_t *pme)
241 return (pme != nullptr) && (pme_run_mode(pme) != PmeRunMode::CPU);
244 /*! \brief
245 * Resets the PME GPU timings. To be called at the reset step.
247 * \param[in] pme The PME structure.
249 void pme_gpu_reset_timings(const gmx_pme_t *pme);
251 /*! \brief
252 * Copies the PME GPU timings to the gmx_wallclock_gpu_pme_t structure (for log output). To be called at the run end.
254 * \param[in] pme The PME structure.
255 * \param[in] timings The gmx_wallclock_gpu_pme_t structure.
257 void pme_gpu_get_timings(const gmx_pme_t *pme,
258 gmx_wallclock_gpu_pme_t *timings);
260 /* The main PME GPU functions */
262 /*! \brief
263 * Prepares PME on GPU step (updating the box if needed)
264 * \param[in] pme The PME data structure.
265 * \param[in] needToUpdateBox Tells if the stored unit cell parameters should be updated from \p box.
266 * \param[in] box The unit cell box.
267 * \param[in] wcycle The wallclock counter.
268 * \param[in] flags The combination of flags to affect the PME computation on this step.
269 * The flags are the GMX_PME_ flags from pme.h.
271 void pme_gpu_prepare_step(gmx_pme_t *pme,
272 bool needToUpdateBox,
273 const matrix box,
274 gmx_wallcycle_t wcycle,
275 int flags);
277 /*! \brief
278 * Launches first stage of PME on GPU - H2D input transfers, spreading kernel, and D2H grid transfer if needed.
280 * \param[in] pme The PME data structure.
281 * \param[in] x The array of local atoms' coordinates.
282 * \param[in] wcycle The wallclock counter.
284 void pme_gpu_launch_spread(gmx_pme_t *pme,
285 const rvec *x,
286 gmx_wallcycle_t wcycle);
288 /*! \brief
289 * Launches middle stages of PME (FFT R2C, solving, FFT C2R) either on GPU or on CPU, depending on the run mode.
291 * \param[in] pme The PME data structure.
292 * \param[in] wcycle The wallclock counter.
294 void pme_gpu_launch_complex_transforms(gmx_pme_t *pme,
295 gmx_wallcycle_t wcycle);
297 /*! \brief
298 * Launches last stage of PME on GPU - force gathering and D2H force transfer.
300 * \param[in] pme The PME data structure.
301 * \param[in] wcycle The wallclock counter.
302 * \param[in,out] forces The array of local atoms' resulting forces.
303 * \param[in] forceTreatment Tells how data in h_forces should be treated. The gathering kernel either stores
304 * the output reciprocal forces into the host array, or copies its contents to the GPU first
305 * and accumulates. The reduction is non-atomic.
307 void pme_gpu_launch_gather(const gmx_pme_t *pme,
308 gmx_wallcycle_t wcycle,
309 rvec *forces,
310 PmeForceOutputHandling forceTreatment);
312 /*! \brief
313 * Blocks until PME GPU tasks are completed, and gets the output forces and virial/energy
314 * (if they were to be computed).
316 * \param[in] pme The PME data structure.
317 * \param[in] wcycle The wallclock counter.
318 * \param[out] vir_q The output virial matrix.
319 * \param[out] energy_q The output energy.
321 void pme_gpu_wait_for_gpu(const gmx_pme_t *pme,
322 gmx_wallcycle_t wcycle,
323 matrix vir_q,
324 real *energy_q);
326 #endif