Clarify data flow in GPU task assignment
[gromacs.git] / src / programs / mdrun / runner.cpp
blobee3664cb216ae7bc954bf6d9f438896e4acb6d2a
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /*! \internal \file
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdlib
44 #include "gmxpre.h"
46 #include "runner.h"
48 #include "config.h"
50 #include <assert.h>
51 #include <signal.h>
52 #include <stdlib.h>
53 #include <string.h>
55 #include <algorithm>
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/ewald/pme.h"
61 #include "gromacs/fileio/checkpoint.h"
62 #include "gromacs/fileio/oenv.h"
63 #include "gromacs/fileio/tpxio.h"
64 #include "gromacs/gmxlib/network.h"
65 #include "gromacs/gpu_utils/gpu_utils.h"
66 #include "gromacs/hardware/cpuinfo.h"
67 #include "gromacs/hardware/detecthardware.h"
68 #include "gromacs/hardware/hardwareassign.h"
69 #include "gromacs/hardware/printhardware.h"
70 #include "gromacs/listed-forces/disre.h"
71 #include "gromacs/listed-forces/orires.h"
72 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
73 #include "gromacs/math/functions.h"
74 #include "gromacs/math/utilities.h"
75 #include "gromacs/math/vec.h"
76 #include "gromacs/mdlib/calc_verletbuf.h"
77 #include "gromacs/mdlib/constr.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/integrator.h"
82 #include "gromacs/mdlib/main.h"
83 #include "gromacs/mdlib/md_support.h"
84 #include "gromacs/mdlib/mdatoms.h"
85 #include "gromacs/mdlib/mdrun.h"
86 #include "gromacs/mdlib/minimize.h"
87 #include "gromacs/mdlib/nbnxn_search.h"
88 #include "gromacs/mdlib/qmmm.h"
89 #include "gromacs/mdlib/sighandler.h"
90 #include "gromacs/mdlib/sim_util.h"
91 #include "gromacs/mdlib/tpi.h"
92 #include "gromacs/mdrunutility/mdmodules.h"
93 #include "gromacs/mdrunutility/threadaffinity.h"
94 #include "gromacs/mdtypes/commrec.h"
95 #include "gromacs/mdtypes/edsamhistory.h"
96 #include "gromacs/mdtypes/energyhistory.h"
97 #include "gromacs/mdtypes/inputrec.h"
98 #include "gromacs/mdtypes/md_enums.h"
99 #include "gromacs/mdtypes/observableshistory.h"
100 #include "gromacs/mdtypes/state.h"
101 #include "gromacs/mdtypes/swaphistory.h"
102 #include "gromacs/pbcutil/pbc.h"
103 #include "gromacs/pulling/pull.h"
104 #include "gromacs/pulling/pull_rotation.h"
105 #include "gromacs/timing/wallcycle.h"
106 #include "gromacs/topology/mtop_util.h"
107 #include "gromacs/trajectory/trajectoryframe.h"
108 #include "gromacs/utility/cstringutil.h"
109 #include "gromacs/utility/exceptions.h"
110 #include "gromacs/utility/fatalerror.h"
111 #include "gromacs/utility/filestream.h"
112 #include "gromacs/utility/gmxassert.h"
113 #include "gromacs/utility/gmxmpi.h"
114 #include "gromacs/utility/logger.h"
115 #include "gromacs/utility/loggerbuilder.h"
116 #include "gromacs/utility/pleasecite.h"
117 #include "gromacs/utility/programcontext.h"
118 #include "gromacs/utility/smalloc.h"
120 #include "deform.h"
121 #include "md.h"
122 #include "membed.h"
123 #include "repl_ex.h"
124 #include "resource-division.h"
126 #ifdef GMX_FAHCORE
127 #include "corewrap.h"
128 #endif
130 //! First step used in pressure scaling
131 gmx_int64_t deform_init_init_step_tpx;
132 //! Initial box for pressure scaling
133 matrix deform_init_box_tpx;
134 //! MPI variable for use in pressure scaling
135 tMPI_Thread_mutex_t deform_init_box_mutex = TMPI_THREAD_MUTEX_INITIALIZER;
137 #if GMX_THREAD_MPI
138 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
139 * the number of threads will get lowered.
141 #define MIN_ATOMS_PER_MPI_THREAD 90
142 #define MIN_ATOMS_PER_GPU 900
144 namespace gmx
147 void Mdrunner::reinitializeOnSpawnedThread()
149 // TODO This duplication is formally necessary if any thread might
150 // modify any memory in fnm or the pointers it contains. If the
151 // contents are ever provably const, then we can remove this
152 // allocation (and memory leak).
153 // TODO This should probably become part of a copy constructor for
154 // Mdrunner.
155 fnm = dup_tfn(nfile, fnm);
157 cr = reinitialize_commrec_for_this_thread(cr);
159 if (!MASTER(cr))
161 // Only the master rank writes to the log files
162 fplog = nullptr;
166 /*! \brief The callback used for running on spawned threads.
168 * Obtains the pointer to the master mdrunner object from the one
169 * argument permitted to the thread-launch API call, copies it to make
170 * a new runner for this thread, reinitializes necessary data, and
171 * proceeds to the simulation. */
172 static void mdrunner_start_fn(void *arg)
176 auto masterMdrunner = reinterpret_cast<const gmx::Mdrunner *>(arg);
177 /* copy the arg list to make sure that it's thread-local. This
178 doesn't copy pointed-to items, of course, but those are all
179 const. */
180 gmx::Mdrunner mdrunner = *masterMdrunner;
181 mdrunner.reinitializeOnSpawnedThread();
182 mdrunner.mdrunner();
184 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
188 /*! \brief Start thread-MPI threads.
190 * Called by mdrunner() to start a specific number of threads
191 * (including the main thread) for thread-parallel runs. This in turn
192 * calls mdrunner() for each thread. All options are the same as for
193 * mdrunner(). */
194 t_commrec *Mdrunner::spawnThreads(int numThreadsToLaunch)
197 /* first check whether we even need to start tMPI */
198 if (numThreadsToLaunch < 2)
200 return cr;
203 gmx::Mdrunner spawnedMdrunner = *this;
204 // TODO This duplication is formally necessary if any thread might
205 // modify any memory in fnm or the pointers it contains. If the
206 // contents are ever provably const, then we can remove this
207 // allocation (and memory leak).
208 // TODO This should probably become part of a copy constructor for
209 // Mdrunner.
210 spawnedMdrunner.fnm = dup_tfn(this->nfile, fnm);
212 /* now spawn new threads that start mdrunner_start_fn(), while
213 the main thread returns, we set thread affinity later */
214 if (tMPI_Init_fn(TRUE, numThreadsToLaunch, TMPI_AFFINITY_NONE,
215 mdrunner_start_fn, static_cast<void*>(&spawnedMdrunner)) != TMPI_SUCCESS)
217 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
220 return reinitialize_commrec_for_this_thread(cr);
223 } // namespace
225 #endif /* GMX_THREAD_MPI */
228 /*! \brief Cost of non-bonded kernels
230 * We determine the extra cost of the non-bonded kernels compared to
231 * a reference nstlist value of 10 (which is the default in grompp).
233 static const int nbnxnReferenceNstlist = 10;
234 //! The values to try when switching
235 const int nstlist_try[] = { 20, 25, 40 };
236 //! Number of elements in the neighborsearch list trials.
237 #define NNSTL sizeof(nstlist_try)/sizeof(nstlist_try[0])
238 /* Increase nstlist until the non-bonded cost increases more than listfac_ok,
239 * but never more than listfac_max.
240 * A standard (protein+)water system at 300K with PME ewald_rtol=1e-5
241 * needs 1.28 at rcoulomb=0.9 and 1.24 at rcoulomb=1.0 to get to nstlist=40.
242 * Note that both CPU and GPU factors are conservative. Performance should
243 * not go down due to this tuning, except with a relatively slow GPU.
244 * On the other hand, at medium/high parallelization or with fast GPUs
245 * nstlist will not be increased enough to reach optimal performance.
247 /* CPU: pair-search is about a factor 1.5 slower than the non-bonded kernel */
248 //! Max OK performance ratio beween force calc and neighbor searching
249 static const float nbnxn_cpu_listfac_ok = 1.05;
250 //! Too high performance ratio beween force calc and neighbor searching
251 static const float nbnxn_cpu_listfac_max = 1.09;
252 /* CPU: pair-search is about a factor 2-3 slower than the non-bonded kernel */
253 //! Max OK performance ratio beween force calc and neighbor searching
254 static const float nbnxn_knl_listfac_ok = 1.22;
255 //! Too high performance ratio beween force calc and neighbor searching
256 static const float nbnxn_knl_listfac_max = 1.3;
257 /* GPU: pair-search is a factor 1.5-3 slower than the non-bonded kernel */
258 //! Max OK performance ratio beween force calc and neighbor searching
259 static const float nbnxn_gpu_listfac_ok = 1.20;
260 //! Too high performance ratio beween force calc and neighbor searching
261 static const float nbnxn_gpu_listfac_max = 1.30;
263 /*! \brief Try to increase nstlist when using the Verlet cut-off scheme */
264 static void increase_nstlist(FILE *fp, t_commrec *cr,
265 t_inputrec *ir, int nstlist_cmdline,
266 const gmx_mtop_t *mtop, matrix box,
267 bool makeGpuPairList, const gmx::CpuInfo &cpuinfo)
269 float listfac_ok, listfac_max;
270 int nstlist_orig, nstlist_prev;
271 verletbuf_list_setup_t ls;
272 real rlistWithReferenceNstlist, rlist_inc, rlist_ok, rlist_max;
273 real rlist_new, rlist_prev;
274 size_t nstlist_ind = 0;
275 gmx_bool bBox, bDD, bCont;
276 const char *nstl_gpu = "\nFor optimal performance with a GPU nstlist (now %d) should be larger.\nThe optimum depends on your CPU and GPU resources.\nYou might want to try several nstlist values.\n";
277 const char *nve_err = "Can not increase nstlist because an NVE ensemble is used";
278 const char *vbd_err = "Can not increase nstlist because verlet-buffer-tolerance is not set or used";
279 const char *box_err = "Can not increase nstlist because the box is too small";
280 const char *dd_err = "Can not increase nstlist because of domain decomposition limitations";
281 char buf[STRLEN];
283 if (nstlist_cmdline <= 0)
285 if (ir->nstlist == 1)
287 /* The user probably set nstlist=1 for a reason,
288 * don't mess with the settings.
290 return;
293 if (fp != nullptr && makeGpuPairList && ir->nstlist < nstlist_try[0])
295 fprintf(fp, nstl_gpu, ir->nstlist);
297 nstlist_ind = 0;
298 while (nstlist_ind < NNSTL && ir->nstlist >= nstlist_try[nstlist_ind])
300 nstlist_ind++;
302 if (nstlist_ind == NNSTL)
304 /* There are no larger nstlist value to try */
305 return;
309 if (EI_MD(ir->eI) && ir->etc == etcNO)
311 if (MASTER(cr))
313 fprintf(stderr, "%s\n", nve_err);
315 if (fp != nullptr)
317 fprintf(fp, "%s\n", nve_err);
320 return;
323 if (ir->verletbuf_tol == 0 && makeGpuPairList)
325 gmx_fatal(FARGS, "You are using an old tpr file with a GPU, please generate a new tpr file with an up to date version of grompp");
328 if (ir->verletbuf_tol < 0)
330 if (MASTER(cr))
332 fprintf(stderr, "%s\n", vbd_err);
334 if (fp != nullptr)
336 fprintf(fp, "%s\n", vbd_err);
339 return;
342 if (makeGpuPairList)
344 listfac_ok = nbnxn_gpu_listfac_ok;
345 listfac_max = nbnxn_gpu_listfac_max;
347 else if (cpuinfo.feature(gmx::CpuInfo::Feature::X86_Avx512ER))
349 listfac_ok = nbnxn_knl_listfac_ok;
350 listfac_max = nbnxn_knl_listfac_max;
352 else
354 listfac_ok = nbnxn_cpu_listfac_ok;
355 listfac_max = nbnxn_cpu_listfac_max;
358 nstlist_orig = ir->nstlist;
359 if (nstlist_cmdline > 0)
361 if (fp)
363 sprintf(buf, "Getting nstlist=%d from command line option",
364 nstlist_cmdline);
366 ir->nstlist = nstlist_cmdline;
369 verletbuf_get_list_setup(true, makeGpuPairList, &ls);
371 /* Allow rlist to make the list a given factor larger than the list
372 * would be with the reference value for nstlist (10).
374 nstlist_prev = ir->nstlist;
375 ir->nstlist = nbnxnReferenceNstlist;
376 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, nullptr,
377 &rlistWithReferenceNstlist);
378 ir->nstlist = nstlist_prev;
380 /* Determine the pair list size increase due to zero interactions */
381 rlist_inc = nbnxn_get_rlist_effective_inc(ls.cluster_size_j,
382 mtop->natoms/det(box));
383 rlist_ok = (rlistWithReferenceNstlist + rlist_inc)*std::cbrt(listfac_ok) - rlist_inc;
384 rlist_max = (rlistWithReferenceNstlist + rlist_inc)*std::cbrt(listfac_max) - rlist_inc;
385 if (debug)
387 fprintf(debug, "nstlist tuning: rlist_inc %.3f rlist_ok %.3f rlist_max %.3f\n",
388 rlist_inc, rlist_ok, rlist_max);
391 nstlist_prev = nstlist_orig;
392 rlist_prev = ir->rlist;
395 if (nstlist_cmdline <= 0)
397 ir->nstlist = nstlist_try[nstlist_ind];
400 /* Set the pair-list buffer size in ir */
401 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, nullptr, &rlist_new);
403 /* Does rlist fit in the box? */
404 bBox = (gmx::square(rlist_new) < max_cutoff2(ir->ePBC, box));
405 bDD = TRUE;
406 if (bBox && DOMAINDECOMP(cr))
408 /* Check if rlist fits in the domain decomposition */
409 if (inputrec2nboundeddim(ir) < DIM)
411 gmx_incons("Changing nstlist with domain decomposition and unbounded dimensions is not implemented yet");
413 t_state state_tmp;
414 copy_mat(box, state_tmp.box);
415 bDD = change_dd_cutoff(cr, &state_tmp, ir, rlist_new);
418 if (debug)
420 fprintf(debug, "nstlist %d rlist %.3f bBox %d bDD %d\n",
421 ir->nstlist, rlist_new, bBox, bDD);
424 bCont = FALSE;
426 if (nstlist_cmdline <= 0)
428 if (bBox && bDD && rlist_new <= rlist_max)
430 /* Increase nstlist */
431 nstlist_prev = ir->nstlist;
432 rlist_prev = rlist_new;
433 bCont = (nstlist_ind+1 < NNSTL && rlist_new < rlist_ok);
435 else
437 /* Stick with the previous nstlist */
438 ir->nstlist = nstlist_prev;
439 rlist_new = rlist_prev;
440 bBox = TRUE;
441 bDD = TRUE;
445 nstlist_ind++;
447 while (bCont);
449 if (!bBox || !bDD)
451 gmx_warning(!bBox ? box_err : dd_err);
452 if (fp != nullptr)
454 fprintf(fp, "\n%s\n", bBox ? box_err : dd_err);
456 ir->nstlist = nstlist_orig;
458 else if (ir->nstlist != nstlist_orig || rlist_new != ir->rlist)
460 sprintf(buf, "Changing nstlist from %d to %d, rlist from %g to %g",
461 nstlist_orig, ir->nstlist,
462 ir->rlist, rlist_new);
463 if (MASTER(cr))
465 fprintf(stderr, "%s\n\n", buf);
467 if (fp != nullptr)
469 fprintf(fp, "%s\n\n", buf);
471 ir->rlist = rlist_new;
475 /*! \brief Initialize variables for Verlet scheme simulation */
476 static void prepare_verlet_scheme(FILE *fplog,
477 t_commrec *cr,
478 t_inputrec *ir,
479 int nstlist_cmdline,
480 const gmx_mtop_t *mtop,
481 matrix box,
482 bool makeGpuPairList,
483 const gmx::CpuInfo &cpuinfo)
485 /* For NVE simulations, we will retain the initial list buffer */
486 if (EI_DYNAMICS(ir->eI) &&
487 ir->verletbuf_tol > 0 &&
488 !(EI_MD(ir->eI) && ir->etc == etcNO))
490 /* Update the Verlet buffer size for the current run setup */
491 verletbuf_list_setup_t ls;
492 real rlist_new;
494 /* Here we assume SIMD-enabled kernels are being used. But as currently
495 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
496 * and 4x2 gives a larger buffer than 4x4, this is ok.
498 verletbuf_get_list_setup(true, makeGpuPairList, &ls);
500 calc_verlet_buffer_size(mtop, det(box), ir, -1, &ls, nullptr, &rlist_new);
502 if (rlist_new != ir->rlist)
504 if (fplog != nullptr)
506 fprintf(fplog, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
507 ir->rlist, rlist_new,
508 ls.cluster_size_i, ls.cluster_size_j);
510 ir->rlist = rlist_new;
514 if (nstlist_cmdline > 0 && (!EI_DYNAMICS(ir->eI) || ir->verletbuf_tol <= 0))
516 gmx_fatal(FARGS, "Can not set nstlist without %s",
517 !EI_DYNAMICS(ir->eI) ? "dynamics" : "verlet-buffer-tolerance");
520 if (EI_DYNAMICS(ir->eI))
522 /* Set or try nstlist values */
523 increase_nstlist(fplog, cr, ir, nstlist_cmdline, mtop, box, makeGpuPairList, cpuinfo);
527 /*! \brief Override the nslist value in inputrec
529 * with value passed on the command line (if any)
531 static void override_nsteps_cmdline(const gmx::MDLogger &mdlog,
532 gmx_int64_t nsteps_cmdline,
533 t_inputrec *ir)
535 assert(ir);
537 /* override with anything else than the default -2 */
538 if (nsteps_cmdline > -2)
540 char sbuf_steps[STEPSTRSIZE];
541 char sbuf_msg[STRLEN];
543 ir->nsteps = nsteps_cmdline;
544 if (EI_DYNAMICS(ir->eI) && nsteps_cmdline != -1)
546 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
547 gmx_step_str(nsteps_cmdline, sbuf_steps),
548 fabs(nsteps_cmdline*ir->delta_t));
550 else
552 sprintf(sbuf_msg, "Overriding nsteps with value passed on the command line: %s steps",
553 gmx_step_str(nsteps_cmdline, sbuf_steps));
556 GMX_LOG(mdlog.warning).asParagraph().appendText(sbuf_msg);
558 else if (nsteps_cmdline < -2)
560 gmx_fatal(FARGS, "Invalid nsteps value passed on the command line: %d",
561 nsteps_cmdline);
563 /* Do nothing if nsteps_cmdline == -2 */
566 namespace gmx
569 //! Halt the run if there are inconsistences between user choices to run with GPUs and/or hardware detection.
570 static void exitIfCannotForceGpuRun(bool requirePhysicalGpu,
571 bool emulateGpu,
572 bool useVerletScheme,
573 bool compatibleGpusFound)
575 /* Was GPU acceleration either explicitly (-nb gpu) or implicitly
576 * (gpu ID passed) requested? */
577 if (!requirePhysicalGpu)
579 return;
582 if (GMX_GPU == GMX_GPU_NONE)
584 gmx_fatal(FARGS, "GPU acceleration requested, but %s was compiled without GPU support!",
585 gmx::getProgramContext().displayName());
588 if (emulateGpu)
590 gmx_fatal(FARGS, "GPU emulation cannot be requested together with GPU acceleration!");
593 if (!useVerletScheme)
595 gmx_fatal(FARGS, "GPU acceleration requested, but can't be used without cutoff-scheme=Verlet");
598 if (!compatibleGpusFound)
600 gmx_fatal(FARGS, "GPU acceleration requested, but no compatible GPUs were detected.");
604 /*! \brief Return whether GPU acceleration is useful with the given settings.
606 * If not, logs a message about falling back to CPU code. */
607 static bool gpuAccelerationIsUseful(const MDLogger &mdlog,
608 const t_inputrec *ir,
609 bool doRerun)
611 if (doRerun && ir->opts.ngener > 1)
613 /* Rerun execution time is dominated by I/O and pair search,
614 * so GPUs are not very useful, plus they do not support more
615 * than one energy group. If the user requested GPUs
616 * explicitly, a fatal error is given later. With non-reruns,
617 * we fall back to a single whole-of system energy group
618 * (which runs much faster than a multiple-energy-groups
619 * implementation would), and issue a note in the .log
620 * file. Users can re-run if they want the information. */
621 GMX_LOG(mdlog.warning).asParagraph().appendText("Multiple energy groups is not implemented for GPUs, so is not useful for this rerun, so falling back to the CPU");
622 return false;
625 return true;
628 //! \brief Return the correct integrator function.
629 static integrator_t *my_integrator(unsigned int ei)
631 switch (ei)
633 case eiMD:
634 case eiBD:
635 case eiSD1:
636 case eiVV:
637 case eiVVAK:
638 if (!EI_DYNAMICS(ei))
640 GMX_THROW(APIError("do_md integrator would be called for a non-dynamical integrator"));
642 return do_md;
643 case eiSteep:
644 return do_steep;
645 case eiCG:
646 return do_cg;
647 case eiNM:
648 return do_nm;
649 case eiLBFGS:
650 return do_lbfgs;
651 case eiTPI:
652 case eiTPIC:
653 if (!EI_TPI(ei))
655 GMX_THROW(APIError("do_tpi integrator would be called for a non-TPI integrator"));
657 return do_tpi;
658 case eiSD2_REMOVED:
659 GMX_THROW(NotImplementedError("SD2 integrator has been removed"));
660 default:
661 GMX_THROW(APIError("Non existing integrator selected"));
665 //! Initializes the logger for mdrun.
666 static gmx::LoggerOwner buildLogger(FILE *fplog, const t_commrec *cr)
668 gmx::LoggerBuilder builder;
669 if (fplog != nullptr)
671 builder.addTargetFile(gmx::MDLogger::LogLevel::Info, fplog);
673 if (cr == nullptr || SIMMASTER(cr))
675 builder.addTargetStream(gmx::MDLogger::LogLevel::Warning,
676 &gmx::TextOutputFile::standardError());
678 return builder.build();
681 int Mdrunner::mdrunner()
683 matrix box;
684 gmx_ddbox_t ddbox = {0};
685 int npme_major, npme_minor;
686 t_nrnb *nrnb;
687 gmx_mtop_t *mtop = nullptr;
688 t_mdatoms *mdatoms = nullptr;
689 t_forcerec *fr = nullptr;
690 t_fcdata *fcd = nullptr;
691 real ewaldcoeff_q = 0;
692 real ewaldcoeff_lj = 0;
693 struct gmx_pme_t **pmedata = nullptr;
694 gmx_vsite_t *vsite = nullptr;
695 gmx_constr_t constr;
696 int nChargePerturbed = -1, nTypePerturbed = 0, status;
697 gmx_wallcycle_t wcycle;
698 gmx_walltime_accounting_t walltime_accounting = nullptr;
699 int rc;
700 gmx_int64_t reset_counters;
701 int nthreads_pme = 1;
702 gmx_membed_t * membed = nullptr;
703 gmx_hw_info_t *hwinfo = nullptr;
705 /* CAUTION: threads may be started later on in this function, so
706 cr doesn't reflect the final parallel state right now */
707 gmx::MDModules mdModules;
708 t_inputrec inputrecInstance;
709 t_inputrec *inputrec = &inputrecInstance;
710 snew(mtop, 1);
712 if (Flags & MD_APPENDFILES)
714 fplog = nullptr;
717 bool doMembed = opt2bSet("-membed", nfile, fnm);
718 bool doRerun = (Flags & MD_RERUN);
720 /* Handle GPU-related user options. Later, we check consistency
721 * with things like whether support is compiled, or tMPI thread
722 * count. */
723 bool emulateGpu = getenv("GMX_EMULATE_GPU") != nullptr;
724 auto userGpuTaskAssignment = parseGpuTaskAssignment(hw_opt.gpuIdTaskAssignment);
725 bool forceUseCpu = (strncmp(nbpu_opt, "cpu", 3) == 0);
726 if (!userGpuTaskAssignment.empty() && forceUseCpu)
728 gmx_fatal(FARGS, "GPU IDs were specified, and short-ranged interactions were assigned to the CPU. Make no more than one of these choices.");
730 bool forceUsePhysicalGpu = (strncmp(nbpu_opt, "gpu", 3) == 0) || !userGpuTaskAssignment.empty();
731 bool tryUsePhysicalGpu = (strncmp(nbpu_opt, "auto", 4) == 0) && !emulateGpu && (GMX_GPU != GMX_GPU_NONE);
733 // Here we assume that SIMMASTER(cr) does not change even after the
734 // threads are started.
735 gmx::LoggerOwner logOwner(buildLogger(fplog, cr));
736 gmx::MDLogger mdlog(logOwner.logger());
738 /* Detect hardware, gather information. This is an operation that is
739 * global for this process (MPI rank). */
740 bool detectGpus = forceUsePhysicalGpu || tryUsePhysicalGpu;
741 hwinfo = gmx_detect_hardware(mdlog, cr, detectGpus);
743 gmx_print_detected_hardware(fplog, cr, mdlog, hwinfo);
745 if (fplog != nullptr)
747 /* Print references after all software/hardware printing */
748 please_cite(fplog, "Abraham2015");
749 please_cite(fplog, "Pall2015");
750 please_cite(fplog, "Pronk2013");
751 please_cite(fplog, "Hess2008b");
752 please_cite(fplog, "Spoel2005a");
753 please_cite(fplog, "Lindahl2001a");
754 please_cite(fplog, "Berendsen95a");
757 std::unique_ptr<t_state> stateInstance = std::unique_ptr<t_state>(new t_state);
758 t_state * state = stateInstance.get();
760 if (SIMMASTER(cr))
762 /* Read (nearly) all data required for the simulation */
763 read_tpx_state(ftp2fn(efTPR, nfile, fnm), inputrec, state, mtop);
765 exitIfCannotForceGpuRun(forceUsePhysicalGpu,
766 emulateGpu,
767 inputrec->cutoff_scheme == ecutsVERLET,
768 compatibleGpusFound(hwinfo->gpu_info));
770 if (inputrec->cutoff_scheme == ecutsVERLET)
772 /* TODO This logic could run later, e.g. before -npme -1
773 is handled. If inputrec has already been communicated,
774 then the resulting tryUsePhysicalGpu does not need to
775 be communicated. */
776 if ((tryUsePhysicalGpu || forceUsePhysicalGpu) &&
777 !gpuAccelerationIsUseful(mdlog, inputrec, doRerun))
779 /* Fallback message printed by nbnxn_acceleration_supported */
780 if (forceUsePhysicalGpu)
782 gmx_fatal(FARGS, "GPU acceleration requested, but not supported with the given input settings");
784 tryUsePhysicalGpu = false;
786 /* TODO This logic could run later, e.g. after inputrec,
787 mtop, and state have been communicated, but before DD
788 is initialized. In particular, -ntmpi 0 only needs to
789 know whether the Verlet scheme is active in order to
790 choose the number of ranks (because GPUs might be
791 usable).*/
792 bool makeGpuPairList = (forceUsePhysicalGpu ||
793 tryUsePhysicalGpu ||
794 emulateGpu);
795 prepare_verlet_scheme(fplog, cr,
796 inputrec, nstlist_cmdline, mtop, state->box,
797 makeGpuPairList, *hwinfo->cpuInfo);
799 else
801 if (nstlist_cmdline > 0)
803 gmx_fatal(FARGS, "Can not set nstlist with the group cut-off scheme");
806 if (compatibleGpusFound(hwinfo->gpu_info))
808 GMX_LOG(mdlog.warning).asParagraph().appendText(
809 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
810 " To use a GPU, set the mdp option: cutoff-scheme = Verlet");
811 tryUsePhysicalGpu = false;
814 #if GMX_TARGET_BGQ
815 md_print_warn(cr, fplog,
816 "NOTE: There is no SIMD implementation of the group scheme kernels on\n"
817 " BlueGene/Q. You will observe better performance from using the\n"
818 " Verlet cut-off scheme.\n");
819 #endif
823 /* Check and update the hardware options for internal consistency */
824 check_and_update_hw_opt_1(&hw_opt, cr, npme);
826 /* Early check for externally set process affinity. */
827 gmx_check_thread_affinity_set(mdlog, cr,
828 &hw_opt, hwinfo->nthreads_hw_avail, FALSE);
830 #if GMX_THREAD_MPI
831 if (SIMMASTER(cr))
833 if (npme > 0 && hw_opt.nthreads_tmpi <= 0)
835 gmx_fatal(FARGS, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
838 /* Since the master knows the cut-off scheme, update hw_opt for this.
839 * This is done later for normal MPI and also once more with tMPI
840 * for all tMPI ranks.
842 check_and_update_hw_opt_2(&hw_opt, inputrec->cutoff_scheme);
844 // Determine how many thread-MPI ranks to start.
845 hw_opt.nthreads_tmpi = get_nthreads_mpi(hwinfo,
846 &hw_opt,
847 userGpuTaskAssignment,
848 inputrec, mtop,
849 mdlog,
850 doMembed);
852 // Now start the threads for thread MPI.
853 cr = spawnThreads(hw_opt.nthreads_tmpi);
854 /* The main thread continues here with a new cr. We don't deallocate
855 the old cr because other threads may still be reading it. */
856 // TODO Both master and spawned threads call dup_tfn and
857 // reinitialize_commrec_for_this_thread. Find a way to express
858 // this better.
860 #endif
861 /* END OF CAUTION: cr is now reliable */
863 if (PAR(cr))
865 /* now broadcast everything to the non-master nodes/threads: */
866 init_parallel(cr, inputrec, mtop);
868 gmx_bcast_sim(sizeof(tryUsePhysicalGpu), &tryUsePhysicalGpu, cr);
870 // TODO: Error handling
871 mdModules.assignOptionsToModules(*inputrec->params, nullptr);
873 if (fplog != nullptr)
875 pr_inputrec(fplog, 0, "Input Parameters", inputrec, FALSE);
876 fprintf(fplog, "\n");
879 /* now make sure the state is initialized and propagated */
880 set_state_entries(state, inputrec);
882 /* A parallel command line option consistency check that we can
883 only do after any threads have started. */
884 if (!PAR(cr) &&
885 (ddxyz[XX] > 1 || ddxyz[YY] > 1 || ddxyz[ZZ] > 1 || npme > 0))
887 gmx_fatal(FARGS,
888 "The -dd or -npme option request a parallel simulation, "
889 #if !GMX_MPI
890 "but %s was compiled without threads or MPI enabled"
891 #else
892 #if GMX_THREAD_MPI
893 "but the number of MPI-threads (option -ntmpi) is not set or is 1"
894 #else
895 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
896 #endif
897 #endif
898 , output_env_get_program_display_name(oenv)
902 if (doRerun &&
903 (EI_ENERGY_MINIMIZATION(inputrec->eI) || eiNM == inputrec->eI))
905 gmx_fatal(FARGS, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
908 if (can_use_allvsall(inputrec, TRUE, cr, fplog) && DOMAINDECOMP(cr))
910 gmx_fatal(FARGS, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
913 if (!(EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype)))
915 if (npme > 0)
917 gmx_fatal_collective(FARGS, cr->mpi_comm_mysim, MASTER(cr),
918 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
921 npme = 0;
924 if ((tryUsePhysicalGpu || forceUsePhysicalGpu) && npme < 0)
926 /* With GPUs we don't automatically use PME-only ranks. PME ranks can
927 * improve performance with many threads per GPU, since our OpenMP
928 * scaling is bad, but it's difficult to automate the setup.
930 npme = 0;
933 #ifdef GMX_FAHCORE
934 if (MASTER(cr))
936 fcRegisterSteps(inputrec->nsteps, inputrec->init_step);
938 #endif
940 /* NMR restraints must be initialized before load_checkpoint,
941 * since with time averaging the history is added to t_state.
942 * For proper consistency check we therefore need to extend
943 * t_state here.
944 * So the PME-only nodes (if present) will also initialize
945 * the distance restraints.
947 snew(fcd, 1);
949 /* This needs to be called before read_checkpoint to extend the state */
950 init_disres(fplog, mtop, inputrec, cr, fcd, state, replExParams.exchangeInterval > 0);
952 init_orires(fplog, mtop, as_rvec_array(state->x.data()), inputrec, cr, &(fcd->orires),
953 state);
955 if (inputrecDeform(inputrec))
957 /* Store the deform reference box before reading the checkpoint */
958 if (SIMMASTER(cr))
960 copy_mat(state->box, box);
962 if (PAR(cr))
964 gmx_bcast(sizeof(box), box, cr);
966 /* Because we do not have the update struct available yet
967 * in which the reference values should be stored,
968 * we store them temporarily in static variables.
969 * This should be thread safe, since they are only written once
970 * and with identical values.
972 tMPI_Thread_mutex_lock(&deform_init_box_mutex);
973 deform_init_init_step_tpx = inputrec->init_step;
974 copy_mat(box, deform_init_box_tpx);
975 tMPI_Thread_mutex_unlock(&deform_init_box_mutex);
978 ObservablesHistory observablesHistory = {};
980 if (Flags & MD_STARTFROMCPT)
982 /* Check if checkpoint file exists before doing continuation.
983 * This way we can use identical input options for the first and subsequent runs...
985 gmx_bool bReadEkin;
987 load_checkpoint(opt2fn_master("-cpi", nfile, fnm, cr), &fplog,
988 cr, ddxyz, &npme,
989 inputrec, state, &bReadEkin, &observablesHistory,
990 (Flags & MD_APPENDFILES),
991 (Flags & MD_APPENDFILESSET),
992 (Flags & MD_REPRODUCIBLE));
994 if (bReadEkin)
996 Flags |= MD_READ_EKIN;
1000 if (SIMMASTER(cr) && (Flags & MD_APPENDFILES))
1002 gmx_log_open(ftp2fn(efLOG, nfile, fnm), cr,
1003 Flags, &fplog);
1004 logOwner = buildLogger(fplog, nullptr);
1005 mdlog = logOwner.logger();
1008 /* override nsteps with value from cmdline */
1009 override_nsteps_cmdline(mdlog, nsteps_cmdline, inputrec);
1011 if (SIMMASTER(cr))
1013 copy_mat(state->box, box);
1016 if (PAR(cr))
1018 gmx_bcast(sizeof(box), box, cr);
1021 if (PAR(cr) && !(EI_TPI(inputrec->eI) ||
1022 inputrec->eI == eiNM))
1024 cr->dd = init_domain_decomposition(fplog, cr, Flags, ddxyz, npme,
1025 dd_rank_order,
1026 rdd, rconstr,
1027 dddlb_opt, dlb_scale,
1028 ddcsx, ddcsy, ddcsz,
1029 mtop, inputrec,
1030 box, as_rvec_array(state->x.data()),
1031 &ddbox, &npme_major, &npme_minor);
1033 else
1035 /* PME, if used, is done on all nodes with 1D decomposition */
1036 cr->npmenodes = 0;
1037 cr->duty = (DUTY_PP | DUTY_PME);
1038 npme_major = 1;
1039 npme_minor = 1;
1041 if (inputrec->ePBC == epbcSCREW)
1043 gmx_fatal(FARGS,
1044 "pbc=%s is only implemented with domain decomposition",
1045 epbc_names[inputrec->ePBC]);
1049 if (PAR(cr))
1051 /* After possible communicator splitting in make_dd_communicators.
1052 * we can set up the intra/inter node communication.
1054 gmx_setup_nodecomm(fplog, cr);
1057 /* Initialize per-physical-node MPI process/thread ID and counters. */
1058 gmx_init_intranode_counters(cr);
1059 #if GMX_MPI
1060 if (MULTISIM(cr))
1062 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1063 "This is simulation %d out of %d running as a composite GROMACS\n"
1064 "multi-simulation job. Setup for this simulation:\n",
1065 cr->ms->sim, cr->ms->nsim);
1067 GMX_LOG(mdlog.warning).appendTextFormatted(
1068 "Using %d MPI %s\n",
1069 cr->nnodes,
1070 #if GMX_THREAD_MPI
1071 cr->nnodes == 1 ? "thread" : "threads"
1072 #else
1073 cr->nnodes == 1 ? "process" : "processes"
1074 #endif
1076 fflush(stderr);
1077 #endif
1079 /* Check and update hw_opt for the cut-off scheme */
1080 check_and_update_hw_opt_2(&hw_opt, inputrec->cutoff_scheme);
1082 /* Check and update hw_opt for the number of MPI ranks */
1083 check_and_update_hw_opt_3(&hw_opt);
1085 gmx_omp_nthreads_init(mdlog, cr,
1086 hwinfo->nthreads_hw_avail,
1087 hw_opt.nthreads_omp,
1088 hw_opt.nthreads_omp_pme,
1089 (cr->duty & DUTY_PP) == 0,
1090 inputrec->cutoff_scheme == ecutsVERLET);
1092 #ifndef NDEBUG
1093 if (EI_TPI(inputrec->eI) &&
1094 inputrec->cutoff_scheme == ecutsVERLET)
1096 gmx_feenableexcept();
1098 #endif
1100 // Contains the ID of the GPU used by each PP rank on this node,
1101 // indexed by that rank. Empty if no GPUs are selected for use on
1102 // this node.
1103 std::vector<int> gpuTaskAssignment;
1104 if (tryUsePhysicalGpu || forceUsePhysicalGpu)
1106 /* Currently the DD code assigns duty to ranks that can
1107 * include PP work that currently can be executed on a single
1108 * GPU, if present and compatible. This has to be coordinated
1109 * across PP ranks on a node, with possible multiple devices
1110 * or sharing devices on a node, either from the user
1111 * selection, or automatically. */
1112 bool rankCanUseGpu = cr->duty & DUTY_PP;
1113 gpuTaskAssignment = mapPpRanksToGpus(rankCanUseGpu, cr, hwinfo->gpu_info,
1114 userGpuTaskAssignment);
1117 /* If we are using GPUs, report on this rank how they are being
1118 * used on this node. */
1119 if (!gpuTaskAssignment.empty())
1121 auto gpuUsageReport =
1122 makeGpuUsageReport(hwinfo->gpu_info, !userGpuTaskAssignment.empty(),
1123 gpuTaskAssignment, cr->nrank_pp_intranode,
1124 cr->nnodes > 1);
1126 /* NOTE: this print is only for and on one physical node */
1127 GMX_LOG(mdlog.warning).appendText(gpuUsageReport);
1130 /* check consistency across ranks of things like SIMD
1131 * support and number of GPUs selected */
1132 gmx_check_hw_runconf_consistency(mdlog, hwinfo, cr, hw_opt, !userGpuTaskAssignment.empty(), gpuTaskAssignment);
1133 /* From now on, the userGpuTaskAssignment should never be used */
1135 /* Prevent other ranks from continuing after an inconsistency was found.
1137 * TODO This function implements a barrier so that MPI runtimes
1138 * can organize an orderly shutdown if one of the ranks has had to
1139 * issue a fatal error in various code already run. When we have
1140 * MPI-aware error handling and reporting, this should be
1141 * improved. */
1142 #if GMX_MPI
1143 if (PAR(cr))
1145 MPI_Barrier(cr->mpi_comm_mysim);
1147 #endif
1149 /* Now that we know the setup is consistent, check for efficiency */
1150 check_resource_division_efficiency(hwinfo, hw_opt.nthreads_tot, !gpuTaskAssignment.empty(), Flags & MD_NTOMPSET,
1151 cr, mdlog);
1153 gmx_device_info_t *shortRangedDeviceInfo = nullptr;
1154 int shortRangedDeviceId = -1;
1155 if (cr->duty & DUTY_PP)
1157 if (!gpuTaskAssignment.empty())
1159 shortRangedDeviceId = gpuTaskAssignment[cr->rank_pp_intranode];
1160 shortRangedDeviceInfo = getDeviceInfo(hwinfo->gpu_info, shortRangedDeviceId);
1164 if (DOMAINDECOMP(cr))
1166 /* When we share GPUs over ranks, we need to know this for the DLB */
1167 dd_setup_dlb_resource_sharing(cr, shortRangedDeviceId);
1170 /* getting number of PP/PME threads
1171 PME: env variable should be read only on one node to make sure it is
1172 identical everywhere;
1174 nthreads_pme = gmx_omp_nthreads_get(emntPME);
1176 wcycle = wallcycle_init(fplog, resetstep, cr);
1178 if (PAR(cr))
1180 /* Master synchronizes its value of reset_counters with all nodes
1181 * including PME only nodes */
1182 reset_counters = wcycle_get_reset_counters(wcycle);
1183 gmx_bcast_sim(sizeof(reset_counters), &reset_counters, cr);
1184 wcycle_set_reset_counters(wcycle, reset_counters);
1187 // Membrane embedding must be initialized before we call init_forcerec()
1188 if (doMembed)
1190 if (MASTER(cr))
1192 fprintf(stderr, "Initializing membed");
1194 /* Note that membed cannot work in parallel because mtop is
1195 * changed here. Fix this if we ever want to make it run with
1196 * multiple ranks. */
1197 membed = init_membed(fplog, nfile, fnm, mtop, inputrec, state, cr, &cpt_period);
1200 snew(nrnb, 1);
1201 if (cr->duty & DUTY_PP)
1203 bcast_state(cr, state);
1205 /* Initiate forcerecord */
1206 fr = mk_forcerec();
1207 fr->forceProviders = mdModules.initForceProviders();
1208 init_forcerec(fplog, mdlog, fr, fcd,
1209 inputrec, mtop, cr, box,
1210 opt2fn("-table", nfile, fnm),
1211 opt2fn("-tablep", nfile, fnm),
1212 getFilenm("-tableb", nfile, fnm),
1213 nbpu_opt,
1214 shortRangedDeviceInfo,
1215 FALSE,
1216 pforce);
1218 /* Initialize QM-MM */
1219 if (fr->bQMMM)
1221 init_QMMMrec(cr, mtop, inputrec, fr);
1224 /* Initialize the mdatoms structure.
1225 * mdatoms is not filled with atom data,
1226 * as this can not be done now with domain decomposition.
1228 mdatoms = init_mdatoms(fplog, mtop, inputrec->efep != efepNO);
1230 /* Initialize the virtual site communication */
1231 vsite = init_vsite(mtop, cr, FALSE);
1233 calc_shifts(box, fr->shift_vec);
1235 /* With periodic molecules the charge groups should be whole at start up
1236 * and the virtual sites should not be far from their proper positions.
1238 if (!inputrec->bContinuation && MASTER(cr) &&
1239 !(inputrec->ePBC != epbcNONE && inputrec->bPeriodicMols))
1241 /* Make molecules whole at start of run */
1242 if (fr->ePBC != epbcNONE)
1244 do_pbc_first_mtop(fplog, inputrec->ePBC, box, mtop, as_rvec_array(state->x.data()));
1246 if (vsite)
1248 /* Correct initial vsite positions are required
1249 * for the initial distribution in the domain decomposition
1250 * and for the initial shell prediction.
1252 construct_vsites_mtop(vsite, mtop, as_rvec_array(state->x.data()));
1256 if (EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype))
1258 ewaldcoeff_q = fr->ewaldcoeff_q;
1259 ewaldcoeff_lj = fr->ewaldcoeff_lj;
1260 pmedata = &fr->pmedata;
1262 else
1264 pmedata = nullptr;
1267 else
1269 /* This is a PME only node */
1271 /* We don't need the state */
1272 stateInstance.reset();
1273 state = nullptr;
1275 ewaldcoeff_q = calc_ewaldcoeff_q(inputrec->rcoulomb, inputrec->ewald_rtol);
1276 ewaldcoeff_lj = calc_ewaldcoeff_lj(inputrec->rvdw, inputrec->ewald_rtol_lj);
1277 snew(pmedata, 1);
1280 if (hw_opt.thread_affinity != threadaffOFF)
1282 /* Before setting affinity, check whether the affinity has changed
1283 * - which indicates that probably the OpenMP library has changed it
1284 * since we first checked).
1286 gmx_check_thread_affinity_set(mdlog, cr,
1287 &hw_opt, hwinfo->nthreads_hw_avail, TRUE);
1289 int nthread_local;
1290 /* threads on this MPI process or TMPI thread */
1291 if (cr->duty & DUTY_PP)
1293 nthread_local = gmx_omp_nthreads_get(emntNonbonded);
1295 else
1297 nthread_local = gmx_omp_nthreads_get(emntPME);
1300 /* Set the CPU affinity */
1301 gmx_set_thread_affinity(mdlog, cr, &hw_opt, *hwinfo->hardwareTopology,
1302 nthread_local, nullptr);
1305 /* Initiate PME if necessary,
1306 * either on all nodes or on dedicated PME nodes only. */
1307 if (EEL_PME(inputrec->coulombtype) || EVDW_PME(inputrec->vdwtype))
1309 if (mdatoms)
1311 nChargePerturbed = mdatoms->nChargePerturbed;
1312 if (EVDW_PME(inputrec->vdwtype))
1314 nTypePerturbed = mdatoms->nTypePerturbed;
1317 if (cr->npmenodes > 0)
1319 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1320 gmx_bcast_sim(sizeof(nChargePerturbed), &nChargePerturbed, cr);
1321 gmx_bcast_sim(sizeof(nTypePerturbed), &nTypePerturbed, cr);
1324 if (cr->duty & DUTY_PME)
1328 status = gmx_pme_init(pmedata, cr, npme_major, npme_minor, inputrec,
1329 mtop ? mtop->natoms : 0, nChargePerturbed, nTypePerturbed,
1330 (Flags & MD_REPRODUCIBLE),
1331 ewaldcoeff_q, ewaldcoeff_lj,
1332 nthreads_pme);
1334 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1335 if (status != 0)
1337 gmx_fatal(FARGS, "Error %d initializing PME", status);
1343 if (EI_DYNAMICS(inputrec->eI))
1345 /* Turn on signal handling on all nodes */
1347 * (A user signal from the PME nodes (if any)
1348 * is communicated to the PP nodes.
1350 signal_handler_install();
1353 if (cr->duty & DUTY_PP)
1355 /* Assumes uniform use of the number of OpenMP threads */
1356 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntDefault));
1358 if (inputrec->bPull)
1360 /* Initialize pull code */
1361 inputrec->pull_work =
1362 init_pull(fplog, inputrec->pull, inputrec, nfile, fnm,
1363 mtop, cr, oenv, inputrec->fepvals->init_lambda,
1364 EI_DYNAMICS(inputrec->eI) && MASTER(cr), Flags);
1367 if (inputrec->bRot)
1369 /* Initialize enforced rotation code */
1370 init_rot(fplog, inputrec, nfile, fnm, cr, as_rvec_array(state->x.data()), state->box, mtop, oenv,
1371 bVerbose, Flags);
1374 /* Let init_constraints know whether we have essential dynamics constraints.
1375 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1377 bool doEdsam = (opt2fn_null("-ei", nfile, fnm) != nullptr || observablesHistory.edsamHistory);
1379 constr = init_constraints(fplog, mtop, inputrec, doEdsam, cr);
1381 if (DOMAINDECOMP(cr))
1383 GMX_RELEASE_ASSERT(fr, "fr was NULL while cr->duty was DUTY_PP");
1384 /* This call is not included in init_domain_decomposition mainly
1385 * because fr->cginfo_mb is set later.
1387 dd_init_bondeds(fplog, cr->dd, mtop, vsite, inputrec,
1388 Flags & MD_DDBONDCHECK, fr->cginfo_mb);
1391 /* Now do whatever the user wants us to do (how flexible...) */
1392 my_integrator(inputrec->eI) (fplog, cr, mdlog, nfile, fnm,
1393 oenv, bVerbose,
1394 nstglobalcomm,
1395 vsite, constr,
1396 nstepout, mdModules.outputProvider(),
1397 inputrec, mtop,
1398 fcd, state, &observablesHistory,
1399 mdatoms, nrnb, wcycle, fr,
1400 replExParams,
1401 membed,
1402 cpt_period, max_hours,
1403 imdport,
1404 Flags,
1405 walltime_accounting);
1407 if (inputrec->bRot)
1409 finish_rot(inputrec->rot);
1412 if (inputrec->bPull)
1414 finish_pull(inputrec->pull_work);
1418 else
1420 GMX_RELEASE_ASSERT(pmedata, "pmedata was NULL while cr->duty was not DUTY_PP");
1421 /* do PME only */
1422 walltime_accounting = walltime_accounting_init(gmx_omp_nthreads_get(emntPME));
1423 gmx_pmeonly(*pmedata, cr, nrnb, wcycle, walltime_accounting, ewaldcoeff_q, ewaldcoeff_lj, inputrec);
1426 wallcycle_stop(wcycle, ewcRUN);
1428 /* Finish up, write some stuff
1429 * if rerunMD, don't write last frame again
1431 finish_run(fplog, mdlog, cr,
1432 inputrec, nrnb, wcycle, walltime_accounting,
1433 fr ? fr->nbv : nullptr,
1434 EI_DYNAMICS(inputrec->eI) && !MULTISIM(cr));
1436 // Free PME data
1437 if (pmedata)
1439 gmx_pme_destroy(*pmedata); // TODO: pmedata is always a single element list, refactor
1440 pmedata = nullptr;
1443 /* Free GPU memory and context */
1444 free_gpu_resources(fr, cr, shortRangedDeviceInfo);
1446 if (doMembed)
1448 free_membed(membed);
1451 gmx_hardware_info_free(hwinfo);
1453 /* Does what it says */
1454 print_date_and_time(fplog, cr->nodeid, "Finished mdrun", gmx_gettime());
1455 walltime_accounting_destroy(walltime_accounting);
1457 /* Close logfile already here if we were appending to it */
1458 if (MASTER(cr) && (Flags & MD_APPENDFILES))
1460 gmx_log_close(fplog);
1463 rc = (int)gmx_get_stop_condition();
1465 #if GMX_THREAD_MPI
1466 /* we need to join all threads. The sub-threads join when they
1467 exit this function, but the master thread needs to be told to
1468 wait for that. */
1469 if (PAR(cr) && MASTER(cr))
1471 tMPI_Finalize();
1473 #endif
1475 return rc;
1478 } // namespace gmx