Fix DD DLB state issue
[gromacs.git] / src / gromacs / legacyheaders / types / commrec.h
blob64521b85aeee77be7712adc7ada3a7d6dc756142
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifndef _commrec_h
38 #define _commrec_h
40 #include <stddef.h>
42 #include "gromacs/legacyheaders/types/commrec_fwd.h" // IWYU pragma: export
43 #include "gromacs/math/vectypes.h"
44 #include "gromacs/utility/basedefinitions.h"
45 #include "gromacs/utility/gmxmpi.h"
46 #include "gromacs/utility/real.h"
48 #ifdef __cplusplus
49 extern "C" {
50 #endif
53 #define DD_MAXZONE 8
54 #define DD_MAXIZONE 4
56 typedef struct gmx_domdec_master *gmx_domdec_master_p_t;
58 typedef struct {
59 int j0; /* j-zone start */
60 int j1; /* j-zone end */
61 int cg1; /* i-charge-group end */
62 int jcg0; /* j-charge-group start */
63 int jcg1; /* j-charge-group end */
64 ivec shift0; /* Minimum shifts to consider */
65 ivec shift1; /* Maximum shifts to consider */
66 } gmx_domdec_ns_ranges_t;
68 typedef struct {
69 rvec x0; /* Zone lower corner in triclinic coordinates */
70 rvec x1; /* Zone upper corner in triclinic coordinates */
71 rvec bb_x0; /* Zone bounding box lower corner in Cartesian coords */
72 rvec bb_x1; /* Zone bounding box upper corner in Cartesian coords */
73 } gmx_domdec_zone_size_t;
75 struct gmx_domdec_zones_t {
76 /* The number of zones including the home zone */
77 int n;
78 /* The shift of the zones with respect to the home zone */
79 ivec shift[DD_MAXZONE];
80 /* The charge group boundaries for the zones */
81 int cg_range[DD_MAXZONE+1];
82 /* The number of neighbor search zones with i-particles */
83 int nizone;
84 /* The neighbor search charge group ranges for each i-zone */
85 gmx_domdec_ns_ranges_t izone[DD_MAXIZONE];
86 /* Boundaries of the zones */
87 gmx_domdec_zone_size_t size[DD_MAXZONE];
88 /* The cg density of the home zone */
89 real dens_zone0;
92 typedef struct gmx_ga2la *gmx_ga2la_t;
94 typedef struct gmx_hash *gmx_hash_t;
96 typedef struct gmx_reverse_top *gmx_reverse_top_p_t;
98 typedef struct gmx_domdec_constraints *gmx_domdec_constraints_p_t;
100 typedef struct gmx_domdec_specat_comm *gmx_domdec_specat_comm_p_t;
102 typedef struct gmx_domdec_comm *gmx_domdec_comm_p_t;
104 typedef struct gmx_pme_comm_n_box *gmx_pme_comm_n_box_p_t;
106 struct gmx_ddbox_t {
107 int npbcdim;
108 int nboundeddim;
109 rvec box0;
110 rvec box_size;
111 /* Tells if the box is skewed for each of the three cartesian directions */
112 ivec tric_dir;
113 rvec skew_fac;
114 /* Orthogonal vectors for triclinic cells, Cartesian index */
115 rvec v[DIM][DIM];
116 /* Normal vectors for the cells walls */
117 rvec normal[DIM];
121 typedef struct {
122 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
123 supported.*/
124 int *ibuf; /* for ints */
125 int ibuf_alloc;
127 gmx_int64_t *libuf;
128 int libuf_alloc;
130 float *fbuf; /* for floats */
131 int fbuf_alloc;
133 double *dbuf; /* for doubles */
134 int dbuf_alloc;
135 } mpi_in_place_buf_t;
138 struct gmx_domdec_t {
139 /* The DD particle-particle nodes only */
140 /* The communication setup within the communicator all
141 * defined in dd->comm in domdec.c
143 int nnodes;
144 MPI_Comm mpi_comm_all;
145 /* Use MPI_Sendrecv communication instead of non-blocking calls */
146 gmx_bool bSendRecv2;
147 /* The local DD cell index and rank */
148 ivec ci;
149 int rank;
150 ivec master_ci;
151 int masterrank;
152 /* Communication with the PME only nodes */
153 int pme_nodeid;
154 gmx_bool pme_receive_vir_ener;
155 gmx_pme_comm_n_box_p_t cnb;
156 int nreq_pme;
157 MPI_Request req_pme[8];
160 /* The communication setup, identical for each cell, cartesian index */
161 ivec nc;
162 int ndim;
163 ivec dim; /* indexed by 0 to ndim */
165 /* PBC from dim 0 to npbcdim */
166 int npbcdim;
168 /* Screw PBC? */
169 gmx_bool bScrewPBC;
171 /* Forward and backward neighboring cells, indexed by 0 to ndim */
172 int neighbor[DIM][2];
174 /* Only available on the master node */
175 gmx_domdec_master_p_t ma;
177 /* Are there inter charge group constraints */
178 gmx_bool bInterCGcons;
179 gmx_bool bInterCGsettles;
181 /* Global atom number to interaction list */
182 gmx_reverse_top_p_t reverse_top;
183 int nbonded_global;
184 int nbonded_local;
186 /* The number of inter charge-group exclusions */
187 int n_intercg_excl;
189 /* Vsite stuff */
190 gmx_hash_t ga2la_vsite;
191 gmx_domdec_specat_comm_p_t vsite_comm;
193 /* Constraint stuff */
194 gmx_domdec_constraints_p_t constraints;
195 gmx_domdec_specat_comm_p_t constraint_comm;
197 /* The local to gobal charge group index and local cg to local atom index */
198 int ncg_home;
199 int ncg_tot;
200 int *index_gl;
201 int *cgindex;
202 int cg_nalloc;
203 /* Local atom to local cg index, only for special cases */
204 int *la2lc;
205 int la2lc_nalloc;
207 /* The number of home atoms */
208 int nat_home;
209 /* The total number of atoms: home and received zones */
210 int nat_tot;
211 /* Index from the local atoms to the global atoms */
212 int *gatindex;
213 int gatindex_nalloc;
215 /* Global atom number to local atom number list */
216 gmx_ga2la_t ga2la;
218 /* Communication stuff */
219 gmx_domdec_comm_p_t comm;
221 /* The partioning count, to keep track of the state */
222 gmx_int64_t ddp_count;
225 /* gmx_pme_recv_f buffer */
226 int pme_recv_f_alloc;
227 rvec *pme_recv_f_buf;
231 struct gmx_multisim_t {
232 int nsim;
233 int sim;
234 MPI_Group mpi_group_masters;
235 MPI_Comm mpi_comm_masters;
236 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
237 supported.*/
238 mpi_in_place_buf_t *mpb;
241 #define DUTY_PP (1<<0)
242 #define DUTY_PME (1<<1)
244 typedef struct {
245 int bUse;
246 MPI_Comm comm_intra;
247 int rank_intra;
248 MPI_Comm comm_inter;
250 } gmx_nodecomm_t;
252 struct t_commrec {
253 /* The nodeids in one sim are numbered sequentially from 0.
254 * All communication within some simulation should happen
255 * in mpi_comm_mysim, or its subset mpi_comm_mygroup.
257 int sim_nodeid, nnodes, npmenodes;
259 /* thread numbers: */
260 /* Not used yet: int threadid, nthreads; */
261 /* The nodeid in the PP/PME, PP or PME group */
262 int nodeid;
263 MPI_Comm mpi_comm_mysim;
264 MPI_Comm mpi_comm_mygroup;
266 /* MPI ranks within a physical node for hardware access */
267 int nrank_intranode; /* nr of ranks on this physical node */
268 int rank_intranode; /* our rank on this physical node */
269 int nrank_pp_intranode; /* as nrank_intranode, for particle-particle only */
270 int rank_pp_intranode; /* as rank_intranode, for particle-particle only */
272 gmx_nodecomm_t nc;
274 /* For domain decomposition */
275 struct gmx_domdec_t *dd;
277 /* The duties of this node, see the defines above */
278 int duty;
280 struct gmx_multisim_t *ms;
282 /* these buffers are used as destination buffers if MPI_IN_PLACE isn't
283 supported.*/
284 mpi_in_place_buf_t *mpb;
287 #define MASTERNODE(cr) (((cr)->nodeid == 0) || !PAR(cr))
288 /* #define MASTERTHREAD(cr) ((cr)->threadid == 0) */
289 /* #define MASTER(cr) (MASTERNODE(cr) && MASTERTHREAD(cr)) */
290 #define MASTER(cr) MASTERNODE(cr)
291 #define SIMMASTER(cr) ((MASTER(cr) && ((cr)->duty & DUTY_PP)) || !PAR(cr))
292 #define NODEPAR(cr) ((cr)->nnodes > 1)
293 /* #define THREADPAR(cr) ((cr)->nthreads > 1) */
294 /* #define PAR(cr) (NODEPAR(cr) || THREADPAR(cr)) */
295 #define PAR(cr) NODEPAR(cr)
296 #define RANK(cr, nodeid) (nodeid)
297 #define MASTERRANK(cr) (0)
299 /* Note that even with particle decomposition removed, the use of
300 * non-DD parallelization in TPI, NM and multi-simulations means that
301 * PAR(cr) and DOMAINDECOMP(cr) are not universally synonymous. In
302 * particular, DOMAINDECOMP(cr) == true indicates that there is more
303 * than one domain, not just that the dd algorithm is active. */
304 #define DOMAINDECOMP(cr) (((cr)->dd != NULL) && PAR(cr))
305 #define DDMASTER(dd) ((dd)->rank == (dd)->masterrank)
307 #define MULTISIM(cr) ((cr)->ms)
308 #define MSRANK(ms, nodeid) (nodeid)
309 #define MASTERSIM(ms) ((ms)->sim == 0)
311 /* The master of all (the node that prints the remaining run time etc.) */
312 #define MULTIMASTER(cr) (SIMMASTER(cr) && (!MULTISIM(cr) || MASTERSIM((cr)->ms)))
314 #ifdef __cplusplus
316 #endif
317 #endif