2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2005,2006,2007,2008,2009,2010,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \defgroup module_domdec Spatial domain decomposition (for parallelization over MPI)
36 * \ingroup group_mdrun
38 * \brief Manages the decomposition of the simulation volume over MPI
39 * ranks to try to distribute work evenly with minimal communication
42 * \todo Get domdec stuff out of mdtypes/commrec.h
44 * \author Berk Hess <hess@kth.se>
48 /*! \libinternal \file
50 * \brief This file declares functions for mdrun to call to manage the
51 * details of its domain decomposition.
53 * \author Berk Hess <hess@kth.se>
55 * \ingroup module_domdec
58 #ifndef GMX_DOMDEC_DOMDEC_H
59 #define GMX_DOMDEC_DOMDEC_H
63 #include "gromacs/math/paddedvector.h"
64 #include "gromacs/math/vectypes.h"
65 #include "gromacs/utility/arrayref.h"
66 #include "gromacs/utility/basedefinitions.h"
67 #include "gromacs/utility/real.h"
72 struct gmx_domdec_zones_t
;
74 struct gmx_localtop_t
;
92 class LocalAtomSetManager
;
95 /*! \brief Returns the global topology atom number belonging to local atom index i.
97 * This function is intended for writing ASCII output
98 * and returns atom numbers starting at 1.
99 * When dd=NULL returns i+1.
101 int ddglatnr(const gmx_domdec_t
*dd
, int i
);
103 /*! \brief Return a block struct for the charge groups of the whole system */
104 t_block
*dd_charge_groups_global(struct gmx_domdec_t
*dd
);
106 /*! \brief Store the global cg indices of the home cgs in state,
108 * This means it can be reset, even after a new DD partitioning.
110 void dd_store_state(struct gmx_domdec_t
*dd
, t_state
*state
);
112 /*! \brief Returns a pointer to the gmx_domdec_zones_t struct */
113 struct gmx_domdec_zones_t
*domdec_zones(struct gmx_domdec_t
*dd
);
115 /*! \brief Sets the j-charge-group range for i-charge-group \p icg */
116 void dd_get_ns_ranges(const gmx_domdec_t
*dd
, int icg
,
117 int *jcg0
, int *jcg1
, ivec shift0
, ivec shift1
);
119 /*! \brief Returns the number of home atoms */
120 int dd_numHomeAtoms(const gmx_domdec_t
&dd
);
122 /*! \brief Returns the atom range in the local state for atoms that need to be present in mdatoms */
123 int dd_natoms_mdatoms(const gmx_domdec_t
*dd
);
125 /*! \brief Returns the atom range in the local state for atoms involved in virtual sites */
126 int dd_natoms_vsite(const gmx_domdec_t
*dd
);
128 /*! \brief Sets the atom range for atom in the local state for atoms received in constraints communication */
129 void dd_get_constraint_range(const gmx_domdec_t
*dd
,
130 int *at_start
, int *at_end
);
132 /*! \libinternal \brief Struct for passing around the number of PME domains */
135 int x
; //!< The number of PME domains along dimension x
136 int y
; //!< The number of PME domains along dimension y
139 /*! \brief Returns the number of PME domains, can be called with dd=NULL */
140 NumPmeDomains
getNumPmeDomains(const gmx_domdec_t
*dd
);
142 /*! \brief Returns the set of DD ranks that communicate with pme node cr->nodeid */
143 std::vector
<int> get_pme_ddranks(const t_commrec
*cr
, int pmenodeid
);
145 /*! \brief Returns the maximum shift for coordinate communication in PME, dim x */
146 int dd_pme_maxshift_x(const gmx_domdec_t
*dd
);
148 /*! \brief Returns the maximum shift for coordinate communication in PME, dim y */
149 int dd_pme_maxshift_y(const gmx_domdec_t
*dd
);
151 /*! \brief The options for the domain decomposition MPI task ordering. */
152 enum class DdRankOrder
154 select
, //!< First value (needed to cope with command-line parsing)
155 interleave
, //!< Interleave the PP and PME ranks
156 pp_pme
, //!< First all PP ranks, all PME rank at the end
157 cartesian
, //!< Use Cartesian communicators for PP, PME and PP-PME
158 nr
//!< The number of options
161 /*! \brief The options for the dynamic load balancing. */
164 select
, //!< First value (needed to cope with command-line parsing)
165 turnOnWhenUseful
, //!< Turn on DLB when we think it would improve performance
166 no
, //!< Never turn on DLB
167 yes
, //!< Turn on DLB from the start and keep it on
168 nr
//!< The number of options
171 /*! \libinternal \brief Structure containing all (command line) options for the domain decomposition */
174 /*! \brief Constructor */
177 //! If true, check that all bonded interactions have been assigned to exactly one domain/rank.
178 gmx_bool checkBondedInteractions
;
179 //! If true, don't communicate all atoms between the non-bonded cut-off and the larger bonded cut-off, but only those that have non-local bonded interactions. This significantly reduces the communication volume.
180 gmx_bool useBondedCommunication
;
181 //! The domain decomposition grid cell count, 0 means let domdec choose based on the number of ranks.
183 //! The number of separate PME ranks requested, -1 = auto.
185 //! Ordering of the PP and PME ranks, values from enum above.
186 DdRankOrder rankOrder
;
187 //! The minimum communication range, used for extended the communication range for bonded interactions (nm).
188 real minimumCommunicationRange
;
189 //! Communication range for atom involved in constraints (P-LINCS) (nm).
190 real constraintCommunicationRange
;
191 //! Dynamic load balancing option, values from enum above.
193 /*! \brief Fraction in (0,1) by whose reciprocal the initial
194 * DD cell size will be increased in order to provide a margin
195 * in which dynamic load balancing can act, while preserving
196 * the minimum cell size. */
198 //! String containing a vector of the relative sizes in the x direction of the corresponding DD cells.
199 const char *cellSizeX
;
200 //! String containing a vector of the relative sizes in the y direction of the corresponding DD cells.
201 const char *cellSizeY
;
202 //! String containing a vector of the relative sizes in the z direction of the corresponding DD cells.
203 const char *cellSizeZ
;
206 /*! \brief Initialized the domain decomposition, chooses the DD grid and PME ranks, return the DD struct */
208 init_domain_decomposition(FILE *fplog
,
210 const DomdecOptions
&options
,
211 const MdrunOptions
&mdrunOptions
,
212 const gmx_mtop_t
*mtop
,
213 const t_inputrec
*ir
,
215 gmx::ArrayRef
<const gmx::RVec
> xGlobal
,
216 gmx::LocalAtomSetManager
*atomSets
);
218 /*! \brief Initialize data structures for bonded interactions */
219 void dd_init_bondeds(FILE *fplog
,
221 const gmx_mtop_t
*mtop
,
222 const gmx_vsite_t
*vsite
,
223 const t_inputrec
*ir
,
225 cginfo_mb_t
*cginfo_mb
);
227 /*! \brief Returns if we need to do pbc for calculating bonded interactions */
228 gmx_bool
dd_bonded_molpbc(const gmx_domdec_t
*dd
, int ePBC
);
230 /*! \brief Change the DD non-bonded communication cut-off.
232 * This could fail when trying to increase the cut-off,
233 * then FALSE will be returned and the cut-off is not modified.
235 gmx_bool
change_dd_cutoff(struct t_commrec
*cr
,
236 t_state
*state
, const t_inputrec
*ir
,
239 /*! \brief Limit DLB to preserve the option of returning to the current cut-off.
241 * Domain boundary changes due to the DD dynamic load balancing can limit
242 * the cut-off distance that can be set in change_dd_cutoff. This function
243 * sets/changes the DLB limit such that using the passed (pair-list) cut-off
244 * should still be possible after subsequently setting a shorter cut-off
245 * with change_dd_cutoff.
247 void set_dd_dlb_max_cutoff(struct t_commrec
*cr
, real cutoff
);
249 /*! \brief Return if we are currently using dynamic load balancing */
250 gmx_bool
dd_dlb_is_on(const struct gmx_domdec_t
*dd
);
252 /*! \brief Return if the DLB lock is set */
253 gmx_bool
dd_dlb_is_locked(const struct gmx_domdec_t
*dd
);
255 /*! \brief Set a lock such that with DLB=auto DLB cannot get turned on */
256 void dd_dlb_lock(struct gmx_domdec_t
*dd
);
258 /*! \brief Clear a lock such that with DLB=auto DLB may get turned on later */
259 void dd_dlb_unlock(struct gmx_domdec_t
*dd
);
261 /*! \brief Set up communication for averaging GPU wait times over domains
263 * When domains (PP MPI ranks) share a GPU, the individual GPU wait times
264 * are meaningless, as it depends on the order in which tasks on the same
265 * GPU finish. Therefore there wait times need to be averaged over the ranks
266 * sharing the same GPU. This function sets up the communication for that.
268 void dd_setup_dlb_resource_sharing(t_commrec
*cr
,
271 /*! \brief Cycle counter indices used internally in the domain decomposition */
273 ddCyclStep
, ddCyclPPduringPME
, ddCyclF
, ddCyclWaitGPU
, ddCyclPME
, ddCyclNr
276 /*! \brief Add the wallcycle count to the DD counter */
277 void dd_cycles_add(const gmx_domdec_t
*dd
, float cycles
, int ddCycl
);
279 /*! \brief Start the force flop count */
280 void dd_force_flop_start(struct gmx_domdec_t
*dd
, t_nrnb
*nrnb
);
282 /*! \brief Stop the force flop count */
283 void dd_force_flop_stop(struct gmx_domdec_t
*dd
, t_nrnb
*nrnb
);
285 /*! \brief Return the PME/PP force load ratio, or -1 if nothing was measured.
287 * Should only be called on the DD master node.
289 float dd_pme_f_ratio(struct gmx_domdec_t
*dd
);
291 /*! \brief Communicate the coordinates to the neighboring cells and do pbc. */
292 void dd_move_x(struct gmx_domdec_t
*dd
,
294 gmx::ArrayRef
<gmx::RVec
> x
,
295 gmx_wallcycle
*wcycle
);
297 /*! \brief Sum the forces over the neighboring cells.
299 * When fshift!=NULL the shift forces are updated to obtain
300 * the correct virial from the single sum including f.
302 void dd_move_f(struct gmx_domdec_t
*dd
,
303 gmx::ArrayRef
<gmx::RVec
> f
,
305 gmx_wallcycle
*wcycle
);
307 /*! \brief Communicate a real for each atom to the neighboring cells. */
308 void dd_atom_spread_real(struct gmx_domdec_t
*dd
, real v
[]);
310 /*! \brief Sum the contributions to a real for each atom over the neighboring cells. */
311 void dd_atom_sum_real(struct gmx_domdec_t
*dd
, real v
[]);
313 /*! \brief Partition the system over the nodes.
315 * step is only used for printing error messages.
316 * If bMasterState==TRUE then state_global from the master node is used,
317 * else state_local is redistributed between the nodes.
318 * When f!=NULL, *f will be reallocated to the size of state_local.
320 void dd_partition_system(FILE *fplog
,
323 gmx_bool bMasterState
,
325 t_state
*state_global
,
326 const gmx_mtop_t
*top_global
,
327 const t_inputrec
*ir
,
328 t_state
*state_local
,
330 gmx::MDAtoms
*mdatoms
,
331 gmx_localtop_t
*top_local
,
334 gmx::Constraints
*constr
,
336 gmx_wallcycle
*wcycle
,
339 /*! \brief Reset all the statistics and counters for total run counting */
340 void reset_dd_statistics_counters(struct gmx_domdec_t
*dd
);
342 /*! \brief Print statistics for domain decomposition communication */
343 void print_dd_statistics(const t_commrec
*cr
, const t_inputrec
*ir
, FILE *fplog
);
345 /*! \brief Check whether bonded interactions are missing, if appropriate
347 * \param[in] fplog Log file pointer
348 * \param[in] cr Communication object
349 * \param[in] totalNumberOfBondedInteractions Result of the global reduction over the number of bonds treated in each domain
350 * \param[in] top_global Global topology for the error message
351 * \param[in] top_local Local topology for the error message
352 * \param[in] state Global state for the error message
353 * \param[in,out] shouldCheckNumberOfBondedInteractions Whether we should do the check. Always set to false.
355 void checkNumberOfBondedInteractions(FILE *fplog
,
357 int totalNumberOfBondedInteractions
,
358 const gmx_mtop_t
*top_global
,
359 const gmx_localtop_t
*top_local
,
360 const t_state
*state
,
361 bool *shouldCheckNumberOfBondedInteractions
);
363 /* In domdec_con.c */
365 /*! \brief Communicates the virtual site forces, reduces the shift forces when \p fshift != NULL */
366 void dd_move_f_vsites(struct gmx_domdec_t
*dd
, rvec
*f
, rvec
*fshift
);
368 /*! \brief Clears the forces for virtual sites */
369 void dd_clear_f_vsites(struct gmx_domdec_t
*dd
, rvec
*f
);
371 /*! \brief Move x0 and also x1 if x1!=NULL. bX1IsCoord tells if to do PBC on x1 */
372 void dd_move_x_constraints(struct gmx_domdec_t
*dd
, matrix box
,
373 rvec
*x0
, rvec
*x1
, gmx_bool bX1IsCoord
);
375 /*! \brief Communicates the coordinates involved in virtual sites */
376 void dd_move_x_vsites(struct gmx_domdec_t
*dd
, const matrix box
, rvec
*x
);
378 /*! \brief Returns the local atom count array for all constraints
380 * The local atom count for a constraint, possible values 2/1/0, is needed
381 * to avoid not/double-counting contributions linked to the Lagrange
382 * multiplier, such as the virial and free-energy derivatives.
384 * \note When \p dd = nullptr, an empty reference is returned.
386 gmx::ArrayRef
<const int> dd_constraints_nlocalatoms(const gmx_domdec_t
*dd
);
388 /* In domdec_top.c */
390 /*! \brief Print error output when interactions are missing */
392 void dd_print_missing_interactions(FILE *fplog
, struct t_commrec
*cr
,
394 const gmx_mtop_t
*top_global
,
395 const gmx_localtop_t
*top_local
,
396 const t_state
*state_local
);
398 /*! \brief Generate and store the reverse topology */
399 void dd_make_reverse_top(FILE *fplog
,
400 gmx_domdec_t
*dd
, const gmx_mtop_t
*mtop
,
401 const gmx_vsite_t
*vsite
,
402 const t_inputrec
*ir
, gmx_bool bBCheck
);
404 /*! \brief Store the local charge group index in \p lcgs */
405 void dd_make_local_cgs(struct gmx_domdec_t
*dd
, t_block
*lcgs
);
407 /*! \brief Generate the local topology and virtual site data */
408 void dd_make_local_top(struct gmx_domdec_t
*dd
, struct gmx_domdec_zones_t
*zones
,
409 int npbcdim
, matrix box
,
410 rvec cellsize_min
, const ivec npulse
,
414 const gmx_mtop_t
*top
, gmx_localtop_t
*ltop
);
416 /*! \brief Sort ltop->ilist when we are doing free energy. */
417 void dd_sort_local_top(gmx_domdec_t
*dd
, const t_mdatoms
*mdatoms
,
418 gmx_localtop_t
*ltop
);
420 /*! \brief Construct local topology */
421 gmx_localtop_t
*dd_init_local_top(const gmx_mtop_t
*top_global
);
423 /*! \brief Construct local state */
424 void dd_init_local_state(struct gmx_domdec_t
*dd
,
425 const t_state
*state_global
, t_state
*local_state
);
427 /*! \brief Generate a list of links between charge groups that are linked by bonded interactions */
428 t_blocka
*make_charge_group_links(const gmx_mtop_t
*mtop
, gmx_domdec_t
*dd
,
429 cginfo_mb_t
*cginfo_mb
);
431 /*! \brief Calculate the maximum distance involved in 2-body and multi-body bonded interactions */
432 void dd_bonded_cg_distance(FILE *fplog
, const gmx_mtop_t
*mtop
,
433 const t_inputrec
*ir
,
434 const rvec
*x
, const matrix box
,
436 real
*r_2b
, real
*r_mb
);
438 /*! \brief Dump a pdb file with the current DD home + communicated atoms.
440 * When natoms=-1, dump all known atoms.
442 void write_dd_pdb(const char *fn
, int64_t step
, const char *title
,
443 const gmx_mtop_t
*mtop
,
445 int natoms
, const rvec x
[], const matrix box
);
448 /* In domdec_setup.c */
450 /*! \brief Returns the volume fraction of the system that is communicated */
451 real
comm_box_frac(const ivec dd_nc
, real cutoff
, const gmx_ddbox_t
*ddbox
);
453 /*! \brief Determines the optimal DD cell setup dd->nc and possibly npmenodes
456 * On the master node returns the actual cellsize limit used.
458 real
dd_choose_grid(FILE *fplog
,
459 t_commrec
*cr
, gmx_domdec_t
*dd
,
460 const t_inputrec
*ir
,
461 const gmx_mtop_t
*mtop
,
462 const matrix box
, const gmx_ddbox_t
*ddbox
,
464 gmx_bool bDynLoadBal
, real dlb_scale
,
465 real cellsize_limit
, real cutoff_dd
,
466 gmx_bool bInterCGBondeds
);
469 /* In domdec_box.c */
471 /*! \brief Set the box and PBC data in \p ddbox */
472 void set_ddbox(gmx_domdec_t
*dd
, bool masterRankHasTheSystemState
,
473 const t_inputrec
*ir
, const matrix box
,
474 bool calculateUnboundedSize
,
475 gmx::ArrayRef
<const gmx::RVec
> x
,
478 /*! \brief Set the box and PBC data in \p ddbox */
479 void set_ddbox_cr(const t_commrec
*cr
, const ivec
*dd_nc
,
480 const t_inputrec
*ir
, const matrix box
,
481 gmx::ArrayRef
<const gmx::RVec
> x
,