2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
51 #include "md_logging.h"
52 #include "md_support.h"
62 #include "mpelogging.h"
68 #include "checkpoint.h"
69 #include "mtop_util.h"
70 #include "sighandler.h"
73 #include "gmx_detect_hardware.h"
74 #include "gmx_omp_nthreads.h"
75 #include "pull_rotation.h"
76 #include "calc_verletbuf.h"
77 #include "../mdlib/nbnxn_search.h"
78 #include "../mdlib/nbnxn_consts.h"
79 #include "gmx_fatal_collective.h"
82 #include "gmx_thread_affinity.h"
95 #include "gpu_utils.h"
96 #include "nbnxn_cuda_data_mgmt.h"
99 gmx_integrator_t
*func
;
102 /* The array should match the eI array in include/types/enums.h */
103 const gmx_intp_t integrator
[eiNR
] = { {do_md
}, {do_steep
}, {do_cg
}, {do_md
}, {do_md
}, {do_nm
}, {do_lbfgs
}, {do_tpi
}, {do_tpi
}, {do_md
}, {do_md
}, {do_md
}};
105 gmx_large_int_t deform_init_init_step_tpx
;
106 matrix deform_init_box_tpx
;
107 #ifdef GMX_THREAD_MPI
108 tMPI_Thread_mutex_t deform_init_box_mutex
= TMPI_THREAD_MUTEX_INITIALIZER
;
112 #ifdef GMX_THREAD_MPI
113 struct mdrunner_arglist
128 const char *dddlb_opt
;
133 const char *nbpu_opt
;
134 gmx_large_int_t nsteps_cmdline
;
144 const char *deviceOptions
;
146 int ret
; /* return value */
150 /* The function used for spawning threads. Extracts the mdrunner()
151 arguments from its one argument and calls mdrunner(), after making
153 static void mdrunner_start_fn(void *arg
)
155 struct mdrunner_arglist
*mda
= (struct mdrunner_arglist
*)arg
;
156 struct mdrunner_arglist mc
= *mda
; /* copy the arg list to make sure
157 that it's thread-local. This doesn't
158 copy pointed-to items, of course,
159 but those are all const. */
160 t_commrec
*cr
; /* we need a local version of this */
164 fnm
= dup_tfn(mc
.nfile
, mc
.fnm
);
166 cr
= init_par_threads(mc
.cr
);
173 mda
->ret
= mdrunner(&mc
.hw_opt
, fplog
, cr
, mc
.nfile
, fnm
, mc
.oenv
,
174 mc
.bVerbose
, mc
.bCompact
, mc
.nstglobalcomm
,
175 mc
.ddxyz
, mc
.dd_node_order
, mc
.rdd
,
176 mc
.rconstr
, mc
.dddlb_opt
, mc
.dlb_scale
,
177 mc
.ddcsx
, mc
.ddcsy
, mc
.ddcsz
,
179 mc
.nsteps_cmdline
, mc
.nstepout
, mc
.resetstep
,
180 mc
.nmultisim
, mc
.repl_ex_nst
, mc
.repl_ex_nex
, mc
.repl_ex_seed
, mc
.pforce
,
181 mc
.cpt_period
, mc
.max_hours
, mc
.deviceOptions
, mc
.Flags
);
184 /* called by mdrunner() to start a specific number of threads (including
185 the main thread) for thread-parallel runs. This in turn calls mdrunner()
187 All options besides nthreads are the same as for mdrunner(). */
188 static t_commrec
*mdrunner_start_threads(gmx_hw_opt_t
*hw_opt
,
189 FILE *fplog
, t_commrec
*cr
, int nfile
,
190 const t_filenm fnm
[], const output_env_t oenv
, gmx_bool bVerbose
,
191 gmx_bool bCompact
, int nstglobalcomm
,
192 ivec ddxyz
, int dd_node_order
, real rdd
, real rconstr
,
193 const char *dddlb_opt
, real dlb_scale
,
194 const char *ddcsx
, const char *ddcsy
, const char *ddcsz
,
195 const char *nbpu_opt
,
196 gmx_large_int_t nsteps_cmdline
,
197 int nstepout
, int resetstep
,
198 int nmultisim
, int repl_ex_nst
, int repl_ex_nex
, int repl_ex_seed
,
199 real pforce
, real cpt_period
, real max_hours
,
200 const char *deviceOptions
, unsigned long Flags
)
203 struct mdrunner_arglist
*mda
;
204 t_commrec
*crn
; /* the new commrec */
207 /* first check whether we even need to start tMPI */
208 if (hw_opt
->nthreads_tmpi
< 2)
213 /* a few small, one-time, almost unavoidable memory leaks: */
215 fnmn
= dup_tfn(nfile
, fnm
);
217 /* fill the data structure to pass as void pointer to thread start fn */
218 /* hw_opt contains pointers, which should all be NULL at this stage */
219 mda
->hw_opt
= *hw_opt
;
225 mda
->bVerbose
= bVerbose
;
226 mda
->bCompact
= bCompact
;
227 mda
->nstglobalcomm
= nstglobalcomm
;
228 mda
->ddxyz
[XX
] = ddxyz
[XX
];
229 mda
->ddxyz
[YY
] = ddxyz
[YY
];
230 mda
->ddxyz
[ZZ
] = ddxyz
[ZZ
];
231 mda
->dd_node_order
= dd_node_order
;
233 mda
->rconstr
= rconstr
;
234 mda
->dddlb_opt
= dddlb_opt
;
235 mda
->dlb_scale
= dlb_scale
;
239 mda
->nbpu_opt
= nbpu_opt
;
240 mda
->nsteps_cmdline
= nsteps_cmdline
;
241 mda
->nstepout
= nstepout
;
242 mda
->resetstep
= resetstep
;
243 mda
->nmultisim
= nmultisim
;
244 mda
->repl_ex_nst
= repl_ex_nst
;
245 mda
->repl_ex_nex
= repl_ex_nex
;
246 mda
->repl_ex_seed
= repl_ex_seed
;
247 mda
->pforce
= pforce
;
248 mda
->cpt_period
= cpt_period
;
249 mda
->max_hours
= max_hours
;
250 mda
->deviceOptions
= deviceOptions
;
253 /* now spawn new threads that start mdrunner_start_fn(), while
254 the main thread returns, we set thread affinity later */
255 ret
= tMPI_Init_fn(TRUE
, hw_opt
->nthreads_tmpi
, TMPI_AFFINITY_NONE
,
256 mdrunner_start_fn
, (void*)(mda
) );
257 if (ret
!= TMPI_SUCCESS
)
262 /* make a new comm_rec to reflect the new situation */
263 crn
= init_par_threads(cr
);
268 static int get_tmpi_omp_thread_division(const gmx_hw_info_t
*hwinfo
,
269 const gmx_hw_opt_t
*hw_opt
,
275 /* There are no separate PME nodes here, as we ensured in
276 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
277 * and a conditional ensures we would not have ended up here.
278 * Note that separate PME nodes might be switched on later.
282 nthreads_tmpi
= ngpu
;
283 if (nthreads_tot
> 0 && nthreads_tot
< nthreads_tmpi
)
285 nthreads_tmpi
= nthreads_tot
;
288 else if (hw_opt
->nthreads_omp
> 0)
290 /* Here we could oversubscribe, when we do, we issue a warning later */
291 nthreads_tmpi
= max(1, nthreads_tot
/hw_opt
->nthreads_omp
);
295 /* TODO choose nthreads_omp based on hardware topology
296 when we have a hardware topology detection library */
297 /* In general, when running up to 4 threads, OpenMP should be faster.
298 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
299 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
300 * even on two CPUs it's usually faster (but with many OpenMP threads
301 * it could be faster not to use HT, currently we always use HT).
302 * On Nehalem/Westmere we want to avoid running 16 threads over
303 * two CPUs with HT, so we need a limit<16; thus we use 12.
304 * A reasonable limit for Intel Sandy and Ivy bridge,
305 * not knowing the topology, is 16 threads.
307 const int nthreads_omp_always_faster
= 4;
308 const int nthreads_omp_always_faster_Nehalem
= 12;
309 const int nthreads_omp_always_faster_SandyBridge
= 16;
310 const int first_model_Nehalem
= 0x1A;
311 const int first_model_SandyBridge
= 0x2A;
312 gmx_bool bIntel_Family6
;
315 (gmx_cpuid_vendor(hwinfo
->cpuid_info
) == GMX_CPUID_VENDOR_INTEL
&&
316 gmx_cpuid_family(hwinfo
->cpuid_info
) == 6);
318 if (nthreads_tot
<= nthreads_omp_always_faster
||
320 ((gmx_cpuid_model(hwinfo
->cpuid_info
) >= nthreads_omp_always_faster_Nehalem
&& nthreads_tot
<= nthreads_omp_always_faster_Nehalem
) ||
321 (gmx_cpuid_model(hwinfo
->cpuid_info
) >= nthreads_omp_always_faster_SandyBridge
&& nthreads_tot
<= nthreads_omp_always_faster_SandyBridge
))))
323 /* Use pure OpenMP parallelization */
328 /* Don't use OpenMP parallelization */
329 nthreads_tmpi
= nthreads_tot
;
333 return nthreads_tmpi
;
337 /* Get the number of threads to use for thread-MPI based on how many
338 * were requested, which algorithms we're using,
339 * and how many particles there are.
340 * At the point we have already called check_and_update_hw_opt.
341 * Thus all options should be internally consistent and consistent
342 * with the hardware, except that ntmpi could be larger than #GPU.
344 static int get_nthreads_mpi(const gmx_hw_info_t
*hwinfo
,
345 gmx_hw_opt_t
*hw_opt
,
346 t_inputrec
*inputrec
, gmx_mtop_t
*mtop
,
350 int nthreads_hw
, nthreads_tot_max
, nthreads_tmpi
, nthreads_new
, ngpu
;
351 int min_atoms_per_mpi_thread
;
356 if (hw_opt
->nthreads_tmpi
> 0)
358 /* Trivial, return right away */
359 return hw_opt
->nthreads_tmpi
;
362 nthreads_hw
= hwinfo
->nthreads_hw_avail
;
364 /* How many total (#tMPI*#OpenMP) threads can we start? */
365 if (hw_opt
->nthreads_tot
> 0)
367 nthreads_tot_max
= hw_opt
->nthreads_tot
;
371 nthreads_tot_max
= nthreads_hw
;
374 bCanUseGPU
= (inputrec
->cutoff_scheme
== ecutsVERLET
&&
375 hwinfo
->gpu_info
.ncuda_dev_compatible
> 0);
378 ngpu
= hwinfo
->gpu_info
.ncuda_dev_compatible
;
385 if (inputrec
->cutoff_scheme
== ecutsGROUP
)
387 /* We checked this before, but it doesn't hurt to do it once more */
388 assert(hw_opt
->nthreads_omp
== 1);
392 get_tmpi_omp_thread_division(hwinfo
, hw_opt
, nthreads_tot_max
, ngpu
);
394 if (inputrec
->eI
== eiNM
|| EI_TPI(inputrec
->eI
))
396 /* Dims/steps are divided over the nodes iso splitting the atoms */
397 min_atoms_per_mpi_thread
= 0;
403 min_atoms_per_mpi_thread
= MIN_ATOMS_PER_GPU
;
407 min_atoms_per_mpi_thread
= MIN_ATOMS_PER_MPI_THREAD
;
411 /* Check if an algorithm does not support parallel simulation. */
412 if (nthreads_tmpi
!= 1 &&
413 ( inputrec
->eI
== eiLBFGS
||
414 inputrec
->coulombtype
== eelEWALD
) )
418 md_print_warn(cr
, fplog
, "The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI thread.\n");
419 if (hw_opt
->nthreads_tmpi
> nthreads_tmpi
)
421 gmx_fatal(FARGS
, "You asked for more than 1 thread-MPI thread, but an algorithm doesn't support that");
424 else if (mtop
->natoms
/nthreads_tmpi
< min_atoms_per_mpi_thread
)
426 /* the thread number was chosen automatically, but there are too many
427 threads (too few atoms per thread) */
428 nthreads_new
= max(1, mtop
->natoms
/min_atoms_per_mpi_thread
);
430 /* Avoid partial use of Hyper-Threading */
431 if (gmx_cpuid_x86_smt(hwinfo
->cpuid_info
) == GMX_CPUID_X86_SMT_ENABLED
&&
432 nthreads_new
> nthreads_hw
/2 && nthreads_new
< nthreads_hw
)
434 nthreads_new
= nthreads_hw
/2;
437 /* Avoid large prime numbers in the thread count */
438 if (nthreads_new
>= 6)
440 /* Use only 6,8,10 with additional factors of 2 */
444 while (3*fac
*2 <= nthreads_new
)
449 nthreads_new
= (nthreads_new
/fac
)*fac
;
454 if (nthreads_new
== 5)
460 nthreads_tmpi
= nthreads_new
;
462 fprintf(stderr
, "\n");
463 fprintf(stderr
, "NOTE: Parallelization is limited by the small number of atoms,\n");
464 fprintf(stderr
, " only starting %d thread-MPI threads.\n", nthreads_tmpi
);
465 fprintf(stderr
, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
468 return nthreads_tmpi
;
470 #endif /* GMX_THREAD_MPI */
473 /* Environment variable for setting nstlist */
474 static const char* NSTLIST_ENVVAR
= "GMX_NSTLIST";
475 /* Try to increase nstlist when using a GPU with nstlist less than this */
476 static const int NSTLIST_GPU_ENOUGH
= 20;
477 /* Increase nstlist until the non-bonded cost increases more than this factor */
478 static const float NBNXN_GPU_LIST_OK_FAC
= 1.20;
479 /* Don't increase nstlist beyond a non-bonded cost increases of this factor.
480 * A standard (protein+)water system at 300K with PME ewald_rtol=1e-5
481 * needs 1.28 at rcoulomb=0.9 and 1.24 at rcoulomb=1.0 to get to nstlist=40.
483 static const float NBNXN_GPU_LIST_MAX_FAC
= 1.30;
485 /* Try to increase nstlist when running on a GPU */
486 static void increase_nstlist(FILE *fp
, t_commrec
*cr
,
487 t_inputrec
*ir
, const gmx_mtop_t
*mtop
, matrix box
)
490 int nstlist_orig
, nstlist_prev
;
491 verletbuf_list_setup_t ls
;
492 real rlist_nstlist10
, rlist_inc
, rlist_ok
, rlist_max
;
493 real rlist_new
, rlist_prev
;
496 gmx_bool bBox
, bDD
, bCont
;
497 const char *nstl_fmt
= "\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
498 const char *vbd_err
= "Can not increase nstlist for GPU run because verlet-buffer-drift is not set or used";
499 const char *box_err
= "Can not increase nstlist for GPU run because the box is too small";
500 const char *dd_err
= "Can not increase nstlist for GPU run because of domain decomposition limitations";
503 /* Number of + nstlist alternative values to try when switching */
504 const int nstl
[] = { 20, 25, 40 };
505 #define NNSTL sizeof(nstl)/sizeof(nstl[0])
507 env
= getenv(NSTLIST_ENVVAR
);
512 fprintf(fp
, nstl_fmt
, ir
->nstlist
);
516 if (ir
->verletbuf_drift
== 0)
518 gmx_fatal(FARGS
, "You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
521 if (ir
->verletbuf_drift
< 0)
525 fprintf(stderr
, "%s\n", vbd_err
);
529 fprintf(fp
, "%s\n", vbd_err
);
535 nstlist_orig
= ir
->nstlist
;
538 sprintf(buf
, "Getting nstlist from environment variable GMX_NSTLIST=%s", env
);
541 fprintf(stderr
, "%s\n", buf
);
545 fprintf(fp
, "%s\n", buf
);
547 sscanf(env
, "%d", &ir
->nstlist
);
550 verletbuf_get_list_setup(TRUE
, &ls
);
552 /* Allow rlist to make the list a given factor larger than the list
553 * would be with nstlist=10.
555 nstlist_prev
= ir
->nstlist
;
557 calc_verlet_buffer_size(mtop
, det(box
), ir
, ir
->verletbuf_drift
, &ls
,
558 NULL
, &rlist_nstlist10
);
559 ir
->nstlist
= nstlist_prev
;
561 /* Determine the pair list size increase due to zero interactions */
562 rlist_inc
= nbnxn_get_rlist_effective_inc(NBNXN_GPU_CLUSTER_SIZE
, mtop
->natoms
/det(box
));
563 rlist_ok
= (rlist_nstlist10
+ rlist_inc
)*pow(NBNXN_GPU_LIST_OK_FAC
, 1.0/3.0) - rlist_inc
;
564 rlist_max
= (rlist_nstlist10
+ rlist_inc
)*pow(NBNXN_GPU_LIST_MAX_FAC
, 1.0/3.0) - rlist_inc
;
567 fprintf(debug
, "GPU nstlist tuning: rlist_inc %.3f rlist_max %.3f\n",
568 rlist_inc
, rlist_max
);
572 nstlist_prev
= nstlist_orig
;
573 rlist_prev
= ir
->rlist
;
578 ir
->nstlist
= nstl
[i
];
581 /* Set the pair-list buffer size in ir */
582 calc_verlet_buffer_size(mtop
, det(box
), ir
, ir
->verletbuf_drift
, &ls
,
585 /* Does rlist fit in the box? */
586 bBox
= (sqr(rlist_new
) < max_cutoff2(ir
->ePBC
, box
));
588 if (bBox
&& DOMAINDECOMP(cr
))
590 /* Check if rlist fits in the domain decomposition */
591 if (inputrec2nboundeddim(ir
) < DIM
)
593 gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
595 copy_mat(box
, state_tmp
.box
);
596 bDD
= change_dd_cutoff(cr
, &state_tmp
, ir
, rlist_new
);
603 if (bBox
&& bDD
&& rlist_new
<= rlist_max
)
605 /* Increase nstlist */
606 nstlist_prev
= ir
->nstlist
;
607 rlist_prev
= rlist_new
;
608 bCont
= (i
+1 < NNSTL
&& rlist_new
< rlist_ok
);
612 /* Stick with the previous nstlist */
613 ir
->nstlist
= nstlist_prev
;
614 rlist_new
= rlist_prev
;
626 gmx_warning(!bBox
? box_err
: dd_err
);
629 fprintf(fp
, "\n%s\n", bBox
? box_err
: dd_err
);
631 ir
->nstlist
= nstlist_orig
;
633 else if (ir
->nstlist
!= nstlist_orig
|| rlist_new
!= ir
->rlist
)
635 sprintf(buf
, "Changing nstlist from %d to %d, rlist from %g to %g",
636 nstlist_orig
, ir
->nstlist
,
637 ir
->rlist
, rlist_new
);
640 fprintf(stderr
, "%s\n\n", buf
);
644 fprintf(fp
, "%s\n\n", buf
);
646 ir
->rlist
= rlist_new
;
647 ir
->rlistlong
= rlist_new
;
651 static void prepare_verlet_scheme(FILE *fplog
,
653 const char *nbpu_opt
,
655 const gmx_mtop_t
*mtop
,
659 if (ir
->verletbuf_drift
> 0)
661 /* Update the Verlet buffer size for the current run setup */
662 verletbuf_list_setup_t ls
;
665 /* Here we assume CPU acceleration is on. But as currently
666 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
667 * and 4x2 gives a larger buffer than 4x4, this is ok.
669 verletbuf_get_list_setup(bUseGPU
, &ls
);
671 calc_verlet_buffer_size(mtop
, det(box
), ir
,
672 ir
->verletbuf_drift
, &ls
,
674 if (rlist_new
!= ir
->rlist
)
678 fprintf(fplog
, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
679 ir
->rlist
, rlist_new
,
680 ls
.cluster_size_i
, ls
.cluster_size_j
);
682 ir
->rlist
= rlist_new
;
683 ir
->rlistlong
= rlist_new
;
687 /* With GPU or emulation we should check nstlist for performance */
688 if ((EI_DYNAMICS(ir
->eI
) &&
690 ir
->nstlist
< NSTLIST_GPU_ENOUGH
) ||
691 getenv(NSTLIST_ENVVAR
) != NULL
)
693 /* Choose a better nstlist */
694 increase_nstlist(fplog
, cr
, ir
, mtop
, box
);
698 static void convert_to_verlet_scheme(FILE *fplog
,
700 gmx_mtop_t
*mtop
, real box_vol
)
702 char *conv_mesg
= "Converting input file with group cut-off scheme to the Verlet cut-off scheme";
704 md_print_warn(NULL
, fplog
, "%s\n", conv_mesg
);
706 ir
->cutoff_scheme
= ecutsVERLET
;
707 ir
->verletbuf_drift
= 0.005;
709 if (ir
->rcoulomb
!= ir
->rvdw
)
711 gmx_fatal(FARGS
, "The VdW and Coulomb cut-offs are different, whereas the Verlet scheme only supports equal cut-offs");
714 if (ir
->vdwtype
== evdwUSER
|| EEL_USER(ir
->coulombtype
))
716 gmx_fatal(FARGS
, "User non-bonded potentials are not (yet) supported with the Verlet scheme");
718 else if (EVDW_SWITCHED(ir
->vdwtype
) || EEL_SWITCHED(ir
->coulombtype
))
720 md_print_warn(NULL
, fplog
, "Converting switched or shifted interactions to a shifted potential (without force shift), this will lead to slightly different interaction potentials");
722 if (EVDW_SWITCHED(ir
->vdwtype
))
724 ir
->vdwtype
= evdwCUT
;
726 if (EEL_SWITCHED(ir
->coulombtype
))
728 if (EEL_FULL(ir
->coulombtype
))
730 /* With full electrostatic only PME can be switched */
731 ir
->coulombtype
= eelPME
;
735 md_print_warn(NULL
, fplog
, "NOTE: Replacing %s electrostatics with reaction-field with epsilon-rf=inf\n", eel_names
[ir
->coulombtype
]);
736 ir
->coulombtype
= eelRF
;
737 ir
->epsilon_rf
= 0.0;
741 /* We set the target energy drift to a small number.
742 * Note that this is only for testing. For production the user
743 * should think about this and set the mdp options.
745 ir
->verletbuf_drift
= 1e-4;
748 if (inputrec2nboundeddim(ir
) != 3)
750 gmx_fatal(FARGS
, "Can only convert old tpr files to the Verlet cut-off scheme with 3D pbc");
753 if (ir
->efep
!= efepNO
|| ir
->implicit_solvent
!= eisNO
)
755 gmx_fatal(FARGS
, "Will not convert old tpr files to the Verlet cut-off scheme with free-energy calculations or implicit solvent");
758 if (EI_DYNAMICS(ir
->eI
) && !(EI_MD(ir
->eI
) && ir
->etc
== etcNO
))
760 verletbuf_list_setup_t ls
;
762 verletbuf_get_list_setup(FALSE
, &ls
);
763 calc_verlet_buffer_size(mtop
, box_vol
, ir
, ir
->verletbuf_drift
, &ls
,
768 ir
->verletbuf_drift
= -1;
769 ir
->rlist
= 1.05*max(ir
->rvdw
, ir
->rcoulomb
);
772 gmx_mtop_remove_chargegroups(mtop
);
775 static void print_hw_opt(FILE *fp
, const gmx_hw_opt_t
*hw_opt
)
777 fprintf(fp
, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
778 hw_opt
->nthreads_tot
,
779 hw_opt
->nthreads_tmpi
,
780 hw_opt
->nthreads_omp
,
781 hw_opt
->nthreads_omp_pme
,
782 hw_opt
->gpu_opt
.gpu_id
!= NULL
? hw_opt
->gpu_opt
.gpu_id
: "");
785 /* Checks we can do when we don't (yet) know the cut-off scheme */
786 static void check_and_update_hw_opt_1(gmx_hw_opt_t
*hw_opt
,
787 gmx_bool bIsSimMaster
)
789 gmx_omp_nthreads_read_env(&hw_opt
->nthreads_omp
, bIsSimMaster
);
791 #ifndef GMX_THREAD_MPI
792 if (hw_opt
->nthreads_tot
> 0)
794 gmx_fatal(FARGS
, "Setting the total number of threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
796 if (hw_opt
->nthreads_tmpi
> 0)
798 gmx_fatal(FARGS
, "Setting the number of thread-MPI threads is only supported with thread-MPI and Gromacs was compiled without thread-MPI");
803 if (hw_opt
->nthreads_omp
> 1)
805 gmx_fatal(FARGS
, "More than 1 OpenMP thread requested, but Gromacs was compiled without OpenMP support");
807 hw_opt
->nthreads_omp
= 1;
810 if (hw_opt
->nthreads_tot
> 0 && hw_opt
->nthreads_omp_pme
<= 0)
812 /* We have the same number of OpenMP threads for PP and PME processes,
813 * thus we can perform several consistency checks.
815 if (hw_opt
->nthreads_tmpi
> 0 &&
816 hw_opt
->nthreads_omp
> 0 &&
817 hw_opt
->nthreads_tot
!= hw_opt
->nthreads_tmpi
*hw_opt
->nthreads_omp
)
819 gmx_fatal(FARGS
, "The total number of threads requested (%d) does not match the thread-MPI threads (%d) times the OpenMP threads (%d) requested",
820 hw_opt
->nthreads_tot
, hw_opt
->nthreads_tmpi
, hw_opt
->nthreads_omp
);
823 if (hw_opt
->nthreads_tmpi
> 0 &&
824 hw_opt
->nthreads_tot
% hw_opt
->nthreads_tmpi
!= 0)
826 gmx_fatal(FARGS
, "The total number of threads requested (%d) is not divisible by the number of thread-MPI threads requested (%d)",
827 hw_opt
->nthreads_tot
, hw_opt
->nthreads_tmpi
);
830 if (hw_opt
->nthreads_omp
> 0 &&
831 hw_opt
->nthreads_tot
% hw_opt
->nthreads_omp
!= 0)
833 gmx_fatal(FARGS
, "The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
834 hw_opt
->nthreads_tot
, hw_opt
->nthreads_omp
);
837 if (hw_opt
->nthreads_tmpi
> 0 &&
838 hw_opt
->nthreads_omp
<= 0)
840 hw_opt
->nthreads_omp
= hw_opt
->nthreads_tot
/hw_opt
->nthreads_tmpi
;
845 if (hw_opt
->nthreads_omp
> 1)
847 gmx_fatal(FARGS
, "OpenMP threads are requested, but Gromacs was compiled without OpenMP support");
851 if (hw_opt
->nthreads_omp_pme
> 0 && hw_opt
->nthreads_omp
<= 0)
853 gmx_fatal(FARGS
, "You need to specify -ntomp in addition to -ntomp_pme");
856 if (hw_opt
->nthreads_tot
== 1)
858 hw_opt
->nthreads_tmpi
= 1;
860 if (hw_opt
->nthreads_omp
> 1)
862 gmx_fatal(FARGS
, "You requested %d OpenMP threads with %d total threads",
863 hw_opt
->nthreads_tmpi
, hw_opt
->nthreads_tot
);
865 hw_opt
->nthreads_omp
= 1;
868 if (hw_opt
->nthreads_omp_pme
<= 0 && hw_opt
->nthreads_omp
> 0)
870 hw_opt
->nthreads_omp_pme
= hw_opt
->nthreads_omp
;
873 /* Parse GPU IDs, if provided.
874 * We check consistency with the tMPI thread count later.
876 gmx_parse_gpu_ids(&hw_opt
->gpu_opt
);
878 #ifdef GMX_THREAD_MPI
879 if (hw_opt
->gpu_opt
.ncuda_dev_use
> 0 && hw_opt
->nthreads_tmpi
== 0)
881 /* Set the number of MPI threads equal to the number of GPUs */
882 hw_opt
->nthreads_tmpi
= hw_opt
->gpu_opt
.ncuda_dev_use
;
884 if (hw_opt
->nthreads_tot
> 0 &&
885 hw_opt
->nthreads_tmpi
> hw_opt
->nthreads_tot
)
887 /* We have more GPUs than total threads requested.
888 * We choose to (later) generate a mismatch error,
889 * instead of launching more threads than requested.
891 hw_opt
->nthreads_tmpi
= hw_opt
->nthreads_tot
;
898 print_hw_opt(debug
, hw_opt
);
902 /* Checks we can do when we know the cut-off scheme */
903 static void check_and_update_hw_opt_2(gmx_hw_opt_t
*hw_opt
,
906 if (cutoff_scheme
== ecutsGROUP
)
908 /* We only have OpenMP support for PME only nodes */
909 if (hw_opt
->nthreads_omp
> 1)
911 gmx_fatal(FARGS
, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
912 ecutscheme_names
[cutoff_scheme
],
913 ecutscheme_names
[ecutsVERLET
]);
915 hw_opt
->nthreads_omp
= 1;
918 if (hw_opt
->nthreads_omp_pme
<= 0 && hw_opt
->nthreads_omp
> 0)
920 hw_opt
->nthreads_omp_pme
= hw_opt
->nthreads_omp
;
925 print_hw_opt(debug
, hw_opt
);
930 /* Override the value in inputrec with value passed on the command line (if any) */
931 static void override_nsteps_cmdline(FILE *fplog
,
932 gmx_large_int_t nsteps_cmdline
,
936 char sbuf
[STEPSTRSIZE
];
941 /* override with anything else than the default -2 */
942 if (nsteps_cmdline
> -2)
946 ir
->nsteps
= nsteps_cmdline
;
947 if (EI_DYNAMICS(ir
->eI
))
949 sprintf(stmp
, "Overriding nsteps with value passed on the command line: %s steps, %.3f ps",
950 gmx_step_str(nsteps_cmdline
, sbuf
),
951 nsteps_cmdline
*ir
->delta_t
);
955 sprintf(stmp
, "Overriding nsteps with value passed on the command line: %s steps",
956 gmx_step_str(nsteps_cmdline
, sbuf
));
959 md_print_warn(cr
, fplog
, "%s\n", stmp
);
963 /* Frees GPU memory and destroys the CUDA context.
965 * Note that this function needs to be called even if GPUs are not used
966 * in this run because the PME ranks have no knowledge of whether GPUs
967 * are used or not, but all ranks need to enter the barrier below.
969 static void free_gpu_resources(FILE *fplog
,
970 const t_forcerec
*fr
,
973 gmx_bool bIsPPrankUsingGPU
;
974 char gpu_err_str
[STRLEN
];
976 bIsPPrankUsingGPU
= (cr
->duty
& DUTY_PP
) && fr
->nbv
!= NULL
&& fr
->nbv
->bUseGPU
;
978 if (bIsPPrankUsingGPU
)
980 /* free nbnxn data in GPU memory */
981 nbnxn_cuda_free(fplog
, fr
->nbv
->cu_nbv
);
983 /* With tMPI we need to wait for all ranks to finish deallocation before
984 * destroying the context in free_gpu() as some ranks may be sharing
986 * Note: as only PP ranks need to free GPU resources, so it is safe to
987 * not call the barrier on PME ranks.
989 #ifdef GMX_THREAD_MPI
994 #endif /* GMX_THREAD_MPI */
996 /* uninitialize GPU (by destroying the context) */
997 if (!free_gpu(gpu_err_str
))
999 gmx_warning("On node %d failed to free GPU #%d: %s",
1000 cr
->nodeid
, get_current_gpu_device_id(), gpu_err_str
);
1005 int mdrunner(gmx_hw_opt_t
*hw_opt
,
1006 FILE *fplog
, t_commrec
*cr
, int nfile
,
1007 const t_filenm fnm
[], const output_env_t oenv
, gmx_bool bVerbose
,
1008 gmx_bool bCompact
, int nstglobalcomm
,
1009 ivec ddxyz
, int dd_node_order
, real rdd
, real rconstr
,
1010 const char *dddlb_opt
, real dlb_scale
,
1011 const char *ddcsx
, const char *ddcsy
, const char *ddcsz
,
1012 const char *nbpu_opt
,
1013 gmx_large_int_t nsteps_cmdline
, int nstepout
, int resetstep
,
1014 int nmultisim
, int repl_ex_nst
, int repl_ex_nex
,
1015 int repl_ex_seed
, real pforce
, real cpt_period
, real max_hours
,
1016 const char *deviceOptions
, unsigned long Flags
)
1018 gmx_bool bForceUseGPU
, bTryUseGPU
;
1019 double nodetime
= 0, realtime
;
1020 t_inputrec
*inputrec
;
1021 t_state
*state
= NULL
;
1023 gmx_ddbox_t ddbox
= {0};
1024 int npme_major
, npme_minor
;
1027 gmx_mtop_t
*mtop
= NULL
;
1028 t_mdatoms
*mdatoms
= NULL
;
1029 t_forcerec
*fr
= NULL
;
1030 t_fcdata
*fcd
= NULL
;
1031 real ewaldcoeff
= 0;
1032 gmx_pme_t
*pmedata
= NULL
;
1033 gmx_vsite_t
*vsite
= NULL
;
1034 gmx_constr_t constr
;
1035 int i
, m
, nChargePerturbed
= -1, status
, nalloc
;
1037 gmx_wallcycle_t wcycle
;
1038 gmx_bool bReadRNG
, bReadEkin
;
1040 gmx_runtime_t runtime
;
1042 gmx_large_int_t reset_counters
;
1043 gmx_edsam_t ed
= NULL
;
1044 t_commrec
*cr_old
= cr
;
1045 int nthreads_pme
= 1;
1046 int nthreads_pp
= 1;
1047 gmx_membed_t membed
= NULL
;
1048 gmx_hw_info_t
*hwinfo
= NULL
;
1049 /* The master rank decides early on bUseGPU and broadcasts this later */
1050 gmx_bool bUseGPU
= FALSE
;
1052 /* CAUTION: threads may be started later on in this function, so
1053 cr doesn't reflect the final parallel state right now */
1057 if (Flags
& MD_APPENDFILES
)
1062 bForceUseGPU
= (strncmp(nbpu_opt
, "gpu", 3) == 0);
1063 bTryUseGPU
= (strncmp(nbpu_opt
, "auto", 4) == 0) || bForceUseGPU
;
1065 /* Detect hardware, gather information. This is an operation that is
1066 * global for this process (MPI rank). */
1067 hwinfo
= gmx_detect_hardware(fplog
, cr
, bTryUseGPU
);
1073 /* Read (nearly) all data required for the simulation */
1074 read_tpx_state(ftp2fn(efTPX
, nfile
, fnm
), inputrec
, state
, NULL
, mtop
);
1076 if (inputrec
->cutoff_scheme
!= ecutsVERLET
&&
1077 ((Flags
& MD_TESTVERLET
) || getenv("GMX_VERLET_SCHEME") != NULL
))
1079 convert_to_verlet_scheme(fplog
, inputrec
, mtop
, det(state
->box
));
1082 if (inputrec
->cutoff_scheme
== ecutsVERLET
)
1084 /* Here the master rank decides if all ranks will use GPUs */
1085 bUseGPU
= (hwinfo
->gpu_info
.ncuda_dev_compatible
> 0 ||
1086 getenv("GMX_EMULATE_GPU") != NULL
);
1088 prepare_verlet_scheme(fplog
, cr
,
1089 nbpu_opt
, inputrec
, mtop
, state
->box
,
1092 else if (hwinfo
->gpu_info
.ncuda_dev_compatible
> 0)
1094 md_print_warn(cr
, fplog
,
1095 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
1096 " To use a GPU, set the mdp option: cutoff-scheme = Verlet\n"
1097 " (for quick performance testing you can use the -testverlet option)\n");
1101 gmx_fatal(FARGS
, "GPU requested, but can't be used without cutoff-scheme=Verlet");
1104 #ifdef GMX_TARGET_BGQ
1107 md_print_warn(cr
, fplog
,
1108 "NOTE: There is no SIMD implementation of the group scheme kernels on\n"
1109 " BlueGene/Q. You will observe better performance from using the\n"
1110 " Verlet cut-off scheme.\n");
1115 /* Check for externally set OpenMP affinity and turn off internal
1116 * pinning if any is found. We need to do this check early to tell
1117 * thread-MPI whether it should do pinning when spawning threads.
1118 * TODO: the above no longer holds, we should move these checks down
1120 gmx_omp_check_thread_affinity(fplog
, cr
, hw_opt
);
1122 /* Check and update the hardware options for internal consistency */
1123 check_and_update_hw_opt_1(hw_opt
, SIMMASTER(cr
));
1127 #ifdef GMX_THREAD_MPI
1128 /* Early check for externally set process affinity.
1129 * With thread-MPI this is needed as pinning might get turned off,
1130 * which needs to be known before starting thread-MPI.
1131 * With thread-MPI hw_opt is processed here on the master rank
1132 * and passed to the other ranks later, so we only do this on master.
1134 gmx_check_thread_affinity_set(fplog
,
1136 hw_opt
, hwinfo
->nthreads_hw_avail
, FALSE
);
1139 #ifdef GMX_THREAD_MPI
1140 if (cr
->npmenodes
> 0 && hw_opt
->nthreads_tmpi
<= 0)
1142 gmx_fatal(FARGS
, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME nodes");
1146 if (hw_opt
->nthreads_omp_pme
!= hw_opt
->nthreads_omp
&&
1149 gmx_fatal(FARGS
, "You need to explicitly specify the number of PME nodes (-npme) when using different number of OpenMP threads for PP and PME nodes");
1153 #ifdef GMX_THREAD_MPI
1156 /* Since the master knows the cut-off scheme, update hw_opt for this.
1157 * This is done later for normal MPI and also once more with tMPI
1158 * for all tMPI ranks.
1160 check_and_update_hw_opt_2(hw_opt
, inputrec
->cutoff_scheme
);
1162 /* NOW the threads will be started: */
1163 hw_opt
->nthreads_tmpi
= get_nthreads_mpi(hwinfo
,
1167 if (hw_opt
->nthreads_tot
> 0 && hw_opt
->nthreads_omp
<= 0)
1169 hw_opt
->nthreads_omp
= hw_opt
->nthreads_tot
/hw_opt
->nthreads_tmpi
;
1172 if (hw_opt
->nthreads_tmpi
> 1)
1174 /* now start the threads. */
1175 cr
= mdrunner_start_threads(hw_opt
, fplog
, cr_old
, nfile
, fnm
,
1176 oenv
, bVerbose
, bCompact
, nstglobalcomm
,
1177 ddxyz
, dd_node_order
, rdd
, rconstr
,
1178 dddlb_opt
, dlb_scale
, ddcsx
, ddcsy
, ddcsz
,
1180 nsteps_cmdline
, nstepout
, resetstep
, nmultisim
,
1181 repl_ex_nst
, repl_ex_nex
, repl_ex_seed
, pforce
,
1182 cpt_period
, max_hours
, deviceOptions
,
1184 /* the main thread continues here with a new cr. We don't deallocate
1185 the old cr because other threads may still be reading it. */
1188 gmx_comm("Failed to spawn threads");
1193 /* END OF CAUTION: cr is now reliable */
1195 /* g_membed initialisation *
1196 * Because we change the mtop, init_membed is called before the init_parallel *
1197 * (in case we ever want to make it run in parallel) */
1198 if (opt2bSet("-membed", nfile
, fnm
))
1202 fprintf(stderr
, "Initializing membed");
1204 membed
= init_membed(fplog
, nfile
, fnm
, mtop
, inputrec
, state
, cr
, &cpt_period
);
1209 /* now broadcast everything to the non-master nodes/threads: */
1210 init_parallel(fplog
, cr
, inputrec
, mtop
);
1212 /* This check needs to happen after get_nthreads_mpi() */
1213 if (inputrec
->cutoff_scheme
== ecutsVERLET
&& (Flags
& MD_PARTDEC
))
1215 gmx_fatal_collective(FARGS
, cr
, NULL
,
1216 "The Verlet cut-off scheme is not supported with particle decomposition.\n"
1217 "You can achieve the same effect as particle decomposition by running in parallel using only OpenMP threads.");
1222 pr_inputrec(fplog
, 0, "Input Parameters", inputrec
, FALSE
);
1225 /* now make sure the state is initialized and propagated */
1226 set_state_entries(state
, inputrec
, cr
->nnodes
);
1228 /* A parallel command line option consistency check that we can
1229 only do after any threads have started. */
1231 (ddxyz
[XX
] > 1 || ddxyz
[YY
] > 1 || ddxyz
[ZZ
] > 1 || cr
->npmenodes
> 0))
1234 "The -dd or -npme option request a parallel simulation, "
1236 "but %s was compiled without threads or MPI enabled"
1238 #ifdef GMX_THREAD_MPI
1239 "but the number of threads (option -nt) is 1"
1241 "but %s was not started through mpirun/mpiexec or only one process was requested through mpirun/mpiexec"
1248 if ((Flags
& MD_RERUN
) &&
1249 (EI_ENERGY_MINIMIZATION(inputrec
->eI
) || eiNM
== inputrec
->eI
))
1251 gmx_fatal(FARGS
, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
1254 if (can_use_allvsall(inputrec
, mtop
, TRUE
, cr
, fplog
) && PAR(cr
))
1256 /* Simple neighbour searching and (also?) all-vs-all loops
1257 * do not work with domain decomposition. */
1258 Flags
|= MD_PARTDEC
;
1261 if (!EEL_PME(inputrec
->coulombtype
) || (Flags
& MD_PARTDEC
))
1263 if (cr
->npmenodes
> 0)
1265 if (!EEL_PME(inputrec
->coulombtype
))
1267 gmx_fatal_collective(FARGS
, cr
, NULL
,
1268 "PME nodes are requested, but the system does not use PME electrostatics");
1270 if (Flags
& MD_PARTDEC
)
1272 gmx_fatal_collective(FARGS
, cr
, NULL
,
1273 "PME nodes are requested, but particle decomposition does not support separate PME nodes");
1283 fcRegisterSteps(inputrec
->nsteps
, inputrec
->init_step
);
1287 /* NMR restraints must be initialized before load_checkpoint,
1288 * since with time averaging the history is added to t_state.
1289 * For proper consistency check we therefore need to extend
1291 * So the PME-only nodes (if present) will also initialize
1292 * the distance restraints.
1296 /* This needs to be called before read_checkpoint to extend the state */
1297 init_disres(fplog
, mtop
, inputrec
, cr
, Flags
& MD_PARTDEC
, fcd
, state
, repl_ex_nst
> 0);
1299 if (gmx_mtop_ftype_count(mtop
, F_ORIRES
) > 0)
1301 if (PAR(cr
) && !(Flags
& MD_PARTDEC
))
1303 gmx_fatal(FARGS
, "Orientation restraints do not work (yet) with domain decomposition, use particle decomposition (mdrun option -pd)");
1305 /* Orientation restraints */
1308 init_orires(fplog
, mtop
, state
->x
, inputrec
, cr
->ms
, &(fcd
->orires
),
1313 if (DEFORM(*inputrec
))
1315 /* Store the deform reference box before reading the checkpoint */
1318 copy_mat(state
->box
, box
);
1322 gmx_bcast(sizeof(box
), box
, cr
);
1324 /* Because we do not have the update struct available yet
1325 * in which the reference values should be stored,
1326 * we store them temporarily in static variables.
1327 * This should be thread safe, since they are only written once
1328 * and with identical values.
1330 #ifdef GMX_THREAD_MPI
1331 tMPI_Thread_mutex_lock(&deform_init_box_mutex
);
1333 deform_init_init_step_tpx
= inputrec
->init_step
;
1334 copy_mat(box
, deform_init_box_tpx
);
1335 #ifdef GMX_THREAD_MPI
1336 tMPI_Thread_mutex_unlock(&deform_init_box_mutex
);
1340 if (opt2bSet("-cpi", nfile
, fnm
))
1342 /* Check if checkpoint file exists before doing continuation.
1343 * This way we can use identical input options for the first and subsequent runs...
1345 if (gmx_fexist_master(opt2fn_master("-cpi", nfile
, fnm
, cr
), cr
) )
1347 load_checkpoint(opt2fn_master("-cpi", nfile
, fnm
, cr
), &fplog
,
1348 cr
, Flags
& MD_PARTDEC
, ddxyz
,
1349 inputrec
, state
, &bReadRNG
, &bReadEkin
,
1350 (Flags
& MD_APPENDFILES
),
1351 (Flags
& MD_APPENDFILESSET
));
1355 Flags
|= MD_READ_RNG
;
1359 Flags
|= MD_READ_EKIN
;
1364 if (((MASTER(cr
) || (Flags
& MD_SEPPOT
)) && (Flags
& MD_APPENDFILES
))
1365 #ifdef GMX_THREAD_MPI
1366 /* With thread MPI only the master node/thread exists in mdrun.c,
1367 * therefore non-master nodes need to open the "seppot" log file here.
1369 || (!MASTER(cr
) && (Flags
& MD_SEPPOT
))
1373 gmx_log_open(ftp2fn(efLOG
, nfile
, fnm
), cr
, !(Flags
& MD_SEPPOT
),
1377 /* override nsteps with value from cmdline */
1378 override_nsteps_cmdline(fplog
, nsteps_cmdline
, inputrec
, cr
);
1382 copy_mat(state
->box
, box
);
1387 gmx_bcast(sizeof(box
), box
, cr
);
1390 /* Essential dynamics */
1391 if (opt2bSet("-ei", nfile
, fnm
))
1393 /* Open input and output files, allocate space for ED data structure */
1394 ed
= ed_open(mtop
->natoms
, &state
->edsamstate
, nfile
, fnm
, Flags
, oenv
, cr
);
1397 if (PAR(cr
) && !((Flags
& MD_PARTDEC
) ||
1398 EI_TPI(inputrec
->eI
) ||
1399 inputrec
->eI
== eiNM
))
1401 cr
->dd
= init_domain_decomposition(fplog
, cr
, Flags
, ddxyz
, rdd
, rconstr
,
1402 dddlb_opt
, dlb_scale
,
1403 ddcsx
, ddcsy
, ddcsz
,
1406 &ddbox
, &npme_major
, &npme_minor
);
1408 make_dd_communicators(fplog
, cr
, dd_node_order
);
1410 /* Set overallocation to avoid frequent reallocation of arrays */
1411 set_over_alloc_dd(TRUE
);
1415 /* PME, if used, is done on all nodes with 1D decomposition */
1417 cr
->duty
= (DUTY_PP
| DUTY_PME
);
1420 /* NM and TPI perform single node energy calculations in parallel */
1421 if (!(inputrec
->eI
== eiNM
|| EI_TPI(inputrec
->eI
)))
1423 npme_major
= cr
->nnodes
;
1426 if (inputrec
->ePBC
== epbcSCREW
)
1429 "pbc=%s is only implemented with domain decomposition",
1430 epbc_names
[inputrec
->ePBC
]);
1436 /* After possible communicator splitting in make_dd_communicators.
1437 * we can set up the intra/inter node communication.
1439 gmx_setup_nodecomm(fplog
, cr
);
1442 /* Initialize per-physical-node MPI process/thread ID and counters. */
1443 gmx_init_intranode_counters(cr
);
1446 md_print_info(cr
, fplog
, "Using %d MPI %s\n",
1448 #ifdef GMX_THREAD_MPI
1449 cr
->nnodes
== 1 ? "thread" : "threads"
1451 cr
->nnodes
== 1 ? "process" : "processes"
1457 /* Check and update hw_opt for the cut-off scheme */
1458 check_and_update_hw_opt_2(hw_opt
, inputrec
->cutoff_scheme
);
1460 gmx_omp_nthreads_init(fplog
, cr
,
1461 hwinfo
->nthreads_hw_avail
,
1462 hw_opt
->nthreads_omp
,
1463 hw_opt
->nthreads_omp_pme
,
1464 (cr
->duty
& DUTY_PP
) == 0,
1465 inputrec
->cutoff_scheme
== ecutsVERLET
);
1469 /* The master rank decided on the use of GPUs,
1470 * broadcast this information to all ranks.
1472 gmx_bcast_sim(sizeof(bUseGPU
), &bUseGPU
, cr
);
1477 if (cr
->npmenodes
== -1)
1479 /* Don't automatically use PME-only nodes with GPUs */
1483 /* Select GPU id's to use */
1484 gmx_select_gpu_ids(fplog
, cr
, &hwinfo
->gpu_info
, bForceUseGPU
,
1489 /* Ignore (potentially) manually selected GPUs */
1490 hw_opt
->gpu_opt
.ncuda_dev_use
= 0;
1493 /* check consistency of CPU acceleration and number of GPUs selected */
1494 gmx_check_hw_runconf_consistency(fplog
, hwinfo
, cr
, hw_opt
, bUseGPU
);
1496 if (DOMAINDECOMP(cr
))
1498 /* When we share GPUs over ranks, we need to know this for the DLB */
1499 dd_setup_dlb_resource_sharing(cr
, hwinfo
, hw_opt
);
1502 /* getting number of PP/PME threads
1503 PME: env variable should be read only on one node to make sure it is
1504 identical everywhere;
1506 /* TODO nthreads_pp is only used for pinning threads.
1507 * This is a temporary solution until we have a hw topology library.
1509 nthreads_pp
= gmx_omp_nthreads_get(emntNonbonded
);
1510 nthreads_pme
= gmx_omp_nthreads_get(emntPME
);
1512 wcycle
= wallcycle_init(fplog
, resetstep
, cr
, nthreads_pp
, nthreads_pme
);
1516 /* Master synchronizes its value of reset_counters with all nodes
1517 * including PME only nodes */
1518 reset_counters
= wcycle_get_reset_counters(wcycle
);
1519 gmx_bcast_sim(sizeof(reset_counters
), &reset_counters
, cr
);
1520 wcycle_set_reset_counters(wcycle
, reset_counters
);
1524 if (cr
->duty
& DUTY_PP
)
1526 /* For domain decomposition we allocate dynamically
1527 * in dd_partition_system.
1529 if (DOMAINDECOMP(cr
))
1531 bcast_state_setup(cr
, state
);
1537 bcast_state(cr
, state
, TRUE
);
1541 /* Initiate forcerecord */
1543 fr
->hwinfo
= hwinfo
;
1544 fr
->gpu_opt
= &hw_opt
->gpu_opt
;
1545 init_forcerec(fplog
, oenv
, fr
, fcd
, inputrec
, mtop
, cr
, box
, FALSE
,
1546 opt2fn("-table", nfile
, fnm
),
1547 opt2fn("-tabletf", nfile
, fnm
),
1548 opt2fn("-tablep", nfile
, fnm
),
1549 opt2fn("-tableb", nfile
, fnm
),
1553 /* version for PCA_NOT_READ_NODE (see md.c) */
1554 /*init_forcerec(fplog,fr,fcd,inputrec,mtop,cr,box,FALSE,
1555 "nofile","nofile","nofile","nofile",FALSE,pforce);
1557 fr
->bSepDVDL
= ((Flags
& MD_SEPPOT
) == MD_SEPPOT
);
1559 /* Initialize QM-MM */
1562 init_QMMMrec(cr
, box
, mtop
, inputrec
, fr
);
1565 /* Initialize the mdatoms structure.
1566 * mdatoms is not filled with atom data,
1567 * as this can not be done now with domain decomposition.
1569 mdatoms
= init_mdatoms(fplog
, mtop
, inputrec
->efep
!= efepNO
);
1571 if (mdatoms
->nPerturbed
> 0 && inputrec
->cutoff_scheme
== ecutsVERLET
)
1573 gmx_fatal(FARGS
, "The Verlet cut-off scheme does not (yet) support free-energy calculations with perturbed atoms, only perturbed interactions. This will be implemented soon. Use the group scheme for now.");
1576 /* Initialize the virtual site communication */
1577 vsite
= init_vsite(mtop
, cr
, FALSE
);
1579 calc_shifts(box
, fr
->shift_vec
);
1581 /* With periodic molecules the charge groups should be whole at start up
1582 * and the virtual sites should not be far from their proper positions.
1584 if (!inputrec
->bContinuation
&& MASTER(cr
) &&
1585 !(inputrec
->ePBC
!= epbcNONE
&& inputrec
->bPeriodicMols
))
1587 /* Make molecules whole at start of run */
1588 if (fr
->ePBC
!= epbcNONE
)
1590 do_pbc_first_mtop(fplog
, inputrec
->ePBC
, box
, mtop
, state
->x
);
1594 /* Correct initial vsite positions are required
1595 * for the initial distribution in the domain decomposition
1596 * and for the initial shell prediction.
1598 construct_vsites_mtop(fplog
, vsite
, mtop
, state
->x
);
1602 if (EEL_PME(fr
->eeltype
))
1604 ewaldcoeff
= fr
->ewaldcoeff
;
1605 pmedata
= &fr
->pmedata
;
1614 /* This is a PME only node */
1616 /* We don't need the state */
1619 ewaldcoeff
= calc_ewaldcoeff(inputrec
->rcoulomb
, inputrec
->ewald_rtol
);
1623 if (hw_opt
->thread_affinity
!= threadaffOFF
)
1625 /* Before setting affinity, check whether the affinity has changed
1626 * - which indicates that probably the OpenMP library has changed it
1627 * since we first checked).
1629 gmx_check_thread_affinity_set(fplog
, cr
,
1630 hw_opt
, hwinfo
->nthreads_hw_avail
, TRUE
);
1632 /* Set the CPU affinity */
1633 gmx_set_thread_affinity(fplog
, cr
, hw_opt
, nthreads_pme
, hwinfo
,
1637 /* Initiate PME if necessary,
1638 * either on all nodes or on dedicated PME nodes only. */
1639 if (EEL_PME(inputrec
->coulombtype
))
1643 nChargePerturbed
= mdatoms
->nChargePerturbed
;
1645 if (cr
->npmenodes
> 0)
1647 /* The PME only nodes need to know nChargePerturbed */
1648 gmx_bcast_sim(sizeof(nChargePerturbed
), &nChargePerturbed
, cr
);
1651 if (cr
->duty
& DUTY_PME
)
1653 status
= gmx_pme_init(pmedata
, cr
, npme_major
, npme_minor
, inputrec
,
1654 mtop
? mtop
->natoms
: 0, nChargePerturbed
,
1655 (Flags
& MD_REPRODUCIBLE
), nthreads_pme
);
1658 gmx_fatal(FARGS
, "Error %d initializing PME", status
);
1664 if (integrator
[inputrec
->eI
].func
== do_md
)
1666 /* Turn on signal handling on all nodes */
1668 * (A user signal from the PME nodes (if any)
1669 * is communicated to the PP nodes.
1671 signal_handler_install();
1674 if (cr
->duty
& DUTY_PP
)
1676 if (inputrec
->ePull
!= epullNO
)
1678 /* Initialize pull code */
1679 init_pull(fplog
, inputrec
, nfile
, fnm
, mtop
, cr
, oenv
, inputrec
->fepvals
->init_lambda
,
1680 EI_DYNAMICS(inputrec
->eI
) && MASTER(cr
), Flags
);
1685 /* Initialize enforced rotation code */
1686 init_rot(fplog
, inputrec
, nfile
, fnm
, cr
, state
->x
, box
, mtop
, oenv
,
1690 constr
= init_constraints(fplog
, mtop
, inputrec
, ed
, state
, cr
);
1692 if (DOMAINDECOMP(cr
))
1694 dd_init_bondeds(fplog
, cr
->dd
, mtop
, vsite
, constr
, inputrec
,
1695 Flags
& MD_DDBONDCHECK
, fr
->cginfo_mb
);
1697 set_dd_parameters(fplog
, cr
->dd
, dlb_scale
, inputrec
, fr
, &ddbox
);
1699 setup_dd_grid(fplog
, cr
->dd
);
1702 /* Now do whatever the user wants us to do (how flexible...) */
1703 integrator
[inputrec
->eI
].func(fplog
, cr
, nfile
, fnm
,
1704 oenv
, bVerbose
, bCompact
,
1707 nstepout
, inputrec
, mtop
,
1709 mdatoms
, nrnb
, wcycle
, ed
, fr
,
1710 repl_ex_nst
, repl_ex_nex
, repl_ex_seed
,
1712 cpt_period
, max_hours
,
1717 if (inputrec
->ePull
!= epullNO
)
1719 finish_pull(fplog
, inputrec
->pull
);
1724 finish_rot(inputrec
->rot
);
1731 gmx_pmeonly(*pmedata
, cr
, nrnb
, wcycle
, &runtime
, ewaldcoeff
, FALSE
, inputrec
);
1734 if (EI_DYNAMICS(inputrec
->eI
) || EI_TPI(inputrec
->eI
))
1736 /* Some timing stats */
1739 if (runtime
.proc
== 0)
1741 runtime
.proc
= runtime
.real
;
1750 wallcycle_stop(wcycle
, ewcRUN
);
1752 /* Finish up, write some stuff
1753 * if rerunMD, don't write last frame again
1755 finish_run(fplog
, cr
, ftp2fn(efSTO
, nfile
, fnm
),
1756 inputrec
, nrnb
, wcycle
, &runtime
,
1757 fr
!= NULL
&& fr
->nbv
!= NULL
&& fr
->nbv
->bUseGPU
?
1758 nbnxn_cuda_get_timings(fr
->nbv
->cu_nbv
) : NULL
,
1760 EI_DYNAMICS(inputrec
->eI
) && !MULTISIM(cr
));
1763 /* Free GPU memory and context */
1764 free_gpu_resources(fplog
, fr
, cr
);
1766 if (opt2bSet("-membed", nfile
, fnm
))
1771 gmx_hardware_info_free(hwinfo
);
1773 /* Does what it says */
1774 print_date_and_time(fplog
, cr
->nodeid
, "Finished mdrun", &runtime
);
1776 /* Close logfile already here if we were appending to it */
1777 if (MASTER(cr
) && (Flags
& MD_APPENDFILES
))
1779 gmx_log_close(fplog
);
1782 rc
= (int)gmx_get_stop_condition();
1784 #ifdef GMX_THREAD_MPI
1785 /* we need to join all threads. The sub-threads join when they
1786 exit this function, but the master thread needs to be told to
1788 if (PAR(cr
) && MASTER(cr
))