2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdlib
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/ewald/ewald-utils.h"
61 #include "gromacs/ewald/pme.h"
62 #include "gromacs/fileio/checkpoint.h"
63 #include "gromacs/fileio/oenv.h"
64 #include "gromacs/fileio/tpxio.h"
65 #include "gromacs/gmxlib/network.h"
66 #include "gromacs/gpu_utils/gpu_utils.h"
67 #include "gromacs/hardware/cpuinfo.h"
68 #include "gromacs/hardware/detecthardware.h"
69 #include "gromacs/hardware/printhardware.h"
70 #include "gromacs/listed-forces/disre.h"
71 #include "gromacs/listed-forces/orires.h"
72 #include "gromacs/math/functions.h"
73 #include "gromacs/math/utilities.h"
74 #include "gromacs/math/vec.h"
75 #include "gromacs/mdlib/calc_verletbuf.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/force.h"
78 #include "gromacs/mdlib/forcerec.h"
79 #include "gromacs/mdlib/gmx_omp_nthreads.h"
80 #include "gromacs/mdlib/integrator.h"
81 #include "gromacs/mdlib/main.h"
82 #include "gromacs/mdlib/md_support.h"
83 #include "gromacs/mdlib/mdatoms.h"
84 #include "gromacs/mdlib/mdrun.h"
85 #include "gromacs/mdlib/minimize.h"
86 #include "gromacs/mdlib/nb_verlet.h"
87 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
88 #include "gromacs/mdlib/nbnxn_search.h"
89 #include "gromacs/mdlib/nbnxn_tuning.h"
90 #include "gromacs/mdlib/qmmm.h"
91 #include "gromacs/mdlib/sighandler.h"
92 #include "gromacs/mdlib/sim_util.h"
93 #include "gromacs/mdlib/tpi.h"
94 #include "gromacs/mdrunutility/mdmodules.h"
95 #include "gromacs/mdrunutility/threadaffinity.h"
96 #include "gromacs/mdtypes/commrec.h"
97 #include "gromacs/mdtypes/inputrec.h"
98 #include "gromacs/mdtypes/md_enums.h"
99 #include "gromacs/mdtypes/observableshistory.h"
100 #include "gromacs/mdtypes/state.h"
101 #include "gromacs/pbcutil/pbc.h"
102 #include "gromacs/pulling/pull.h"
103 #include "gromacs/pulling/pull_rotation.h"
104 #include "gromacs/taskassignment/decidegpuusage.h"
105 #include "gromacs/taskassignment/resourcedivision.h"
106 #include "gromacs/taskassignment/taskassignment.h"
107 #include "gromacs/taskassignment/usergpuids.h"
108 #include "gromacs/timing/wallcycle.h"
109 #include "gromacs/topology/mtop_util.h"
110 #include "gromacs/trajectory/trajectoryframe.h"
111 #include "gromacs/utility/cstringutil.h"
112 #include "gromacs/utility/exceptions.h"
113 #include "gromacs/utility/fatalerror.h"
114 #include "gromacs/utility/filestream.h"
115 #include "gromacs/utility/gmxassert.h"
116 #include "gromacs/utility/gmxmpi.h"
117 #include "gromacs/utility/logger.h"
118 #include "gromacs/utility/loggerbuilder.h"
119 #include "gromacs/utility/pleasecite.h"
120 #include "gromacs/utility/programcontext.h"
121 #include "gromacs/utility/smalloc.h"
122 #include "gromacs/utility/stringutil.h"
130 #include "corewrap.h"
133 //! First step used in pressure scaling
134 gmx_int64_t deform_init_init_step_tpx
;
135 //! Initial box for pressure scaling
136 matrix deform_init_box_tpx
;
137 //! MPI variable for use in pressure scaling
138 tMPI_Thread_mutex_t deform_init_box_mutex
= TMPI_THREAD_MUTEX_INITIALIZER
;
143 /*! \brief Barrier for safe simultaneous thread access to mdrunner data
145 * Used to ensure that the master thread does not modify mdrunner during copy
146 * on the spawned threads. */
147 static void threadMpiMdrunnerAccessBarrier()
150 MPI_Barrier(MPI_COMM_WORLD
);
154 void Mdrunner::reinitializeOnSpawnedThread()
156 // TODO This duplication is formally necessary if any thread might
157 // modify any memory in fnm or the pointers it contains. If the
158 // contents are ever provably const, then we can remove this
159 // allocation (and memory leak).
160 // TODO This should probably become part of a copy constructor for
162 fnm
= dup_tfn(nfile
, fnm
);
164 threadMpiMdrunnerAccessBarrier();
166 cr
= reinitialize_commrec_for_this_thread(cr
, ms
);
168 GMX_RELEASE_ASSERT(!MASTER(cr
), "reinitializeOnSpawnedThread should only be called on spawned threads");
170 // Only the master rank writes to the log file
174 /*! \brief The callback used for running on spawned threads.
176 * Obtains the pointer to the master mdrunner object from the one
177 * argument permitted to the thread-launch API call, copies it to make
178 * a new runner for this thread, reinitializes necessary data, and
179 * proceeds to the simulation. */
180 static void mdrunner_start_fn(const void *arg
)
184 auto masterMdrunner
= reinterpret_cast<const gmx::Mdrunner
*>(arg
);
185 /* copy the arg list to make sure that it's thread-local. This
186 doesn't copy pointed-to items, of course; fnm, cr and fplog
187 are reset in the call below, all others should be const. */
188 gmx::Mdrunner mdrunner
= *masterMdrunner
;
189 mdrunner
.reinitializeOnSpawnedThread();
192 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
196 /*! \brief Start thread-MPI threads.
198 * Called by mdrunner() to start a specific number of threads
199 * (including the main thread) for thread-parallel runs. This in turn
200 * calls mdrunner() for each thread. All options are the same as for
202 t_commrec
*Mdrunner::spawnThreads(int numThreadsToLaunch
) const
205 /* first check whether we even need to start tMPI */
206 if (numThreadsToLaunch
< 2)
212 /* now spawn new threads that start mdrunner_start_fn(), while
213 the main thread returns, we set thread affinity later */
214 if (tMPI_Init_fn(TRUE
, numThreadsToLaunch
, TMPI_AFFINITY_NONE
,
215 mdrunner_start_fn
, static_cast<const void*>(this)) != TMPI_SUCCESS
)
217 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
220 threadMpiMdrunnerAccessBarrier();
222 GMX_UNUSED_VALUE(mdrunner_start_fn
);
225 return reinitialize_commrec_for_this_thread(cr
, ms
);
230 /*! \brief Initialize variables for Verlet scheme simulation */
231 static void prepare_verlet_scheme(FILE *fplog
,
235 const gmx_mtop_t
*mtop
,
237 bool makeGpuPairList
,
238 const gmx::CpuInfo
&cpuinfo
)
240 /* For NVE simulations, we will retain the initial list buffer */
241 if (EI_DYNAMICS(ir
->eI
) &&
242 ir
->verletbuf_tol
> 0 &&
243 !(EI_MD(ir
->eI
) && ir
->etc
== etcNO
))
245 /* Update the Verlet buffer size for the current run setup */
247 /* Here we assume SIMD-enabled kernels are being used. But as currently
248 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
249 * and 4x2 gives a larger buffer than 4x4, this is ok.
251 ListSetupType listType
= (makeGpuPairList
? ListSetupType::Gpu
: ListSetupType::CpuSimdWhenSupported
);
252 VerletbufListSetup listSetup
= verletbufGetSafeListSetup(listType
);
255 calc_verlet_buffer_size(mtop
, det(box
), ir
, ir
->nstlist
, ir
->nstlist
- 1, -1, &listSetup
, nullptr, &rlist_new
);
257 if (rlist_new
!= ir
->rlist
)
259 if (fplog
!= nullptr)
261 fprintf(fplog
, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
262 ir
->rlist
, rlist_new
,
263 listSetup
.cluster_size_i
, listSetup
.cluster_size_j
);
265 ir
->rlist
= rlist_new
;
269 if (nstlist_cmdline
> 0 && (!EI_DYNAMICS(ir
->eI
) || ir
->verletbuf_tol
<= 0))
271 gmx_fatal(FARGS
, "Can not set nstlist without %s",
272 !EI_DYNAMICS(ir
->eI
) ? "dynamics" : "verlet-buffer-tolerance");
275 if (EI_DYNAMICS(ir
->eI
))
277 /* Set or try nstlist values */
278 increaseNstlist(fplog
, cr
, ir
, nstlist_cmdline
, mtop
, box
, makeGpuPairList
, cpuinfo
);
282 /*! \brief Override the nslist value in inputrec
284 * with value passed on the command line (if any)
286 static void override_nsteps_cmdline(const gmx::MDLogger
&mdlog
,
287 gmx_int64_t nsteps_cmdline
,
292 /* override with anything else than the default -2 */
293 if (nsteps_cmdline
> -2)
295 char sbuf_steps
[STEPSTRSIZE
];
296 char sbuf_msg
[STRLEN
];
298 ir
->nsteps
= nsteps_cmdline
;
299 if (EI_DYNAMICS(ir
->eI
) && nsteps_cmdline
!= -1)
301 sprintf(sbuf_msg
, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
302 gmx_step_str(nsteps_cmdline
, sbuf_steps
),
303 fabs(nsteps_cmdline
*ir
->delta_t
));
307 sprintf(sbuf_msg
, "Overriding nsteps with value passed on the command line: %s steps",
308 gmx_step_str(nsteps_cmdline
, sbuf_steps
));
311 GMX_LOG(mdlog
.warning
).asParagraph().appendText(sbuf_msg
);
313 else if (nsteps_cmdline
< -2)
315 gmx_fatal(FARGS
, "Invalid nsteps value passed on the command line: %d",
318 /* Do nothing if nsteps_cmdline == -2 */
324 /*! \brief Return whether GPU acceleration of nonbondeds is supported with the given settings.
326 * If not, and if a warning may be issued, logs a warning about
327 * falling back to CPU code. With thread-MPI, only the first
328 * call to this function should have \c issueWarning true. */
329 static bool gpuAccelerationOfNonbondedIsUseful(const MDLogger
&mdlog
,
330 const t_inputrec
*ir
,
333 if (ir
->opts
.ngener
- ir
->nwall
> 1)
335 /* The GPU code does not support more than one energy group.
336 * If the user requested GPUs explicitly, a fatal error is given later.
340 GMX_LOG(mdlog
.warning
).asParagraph()
341 .appendText("Multiple energy groups is not implemented for GPUs, falling back to the CPU. "
342 "For better performance, run on the GPU without energy groups and then do "
343 "gmx mdrun -rerun option on the trajectory with an energy group .tpr file.");
350 //! \brief Return the correct integrator function.
351 static integrator_t
*my_integrator(unsigned int ei
)
360 if (!EI_DYNAMICS(ei
))
362 GMX_THROW(APIError("do_md integrator would be called for a non-dynamical integrator"));
377 GMX_THROW(APIError("do_tpi integrator would be called for a non-TPI integrator"));
381 GMX_THROW(NotImplementedError("SD2 integrator has been removed"));
383 GMX_THROW(APIError("Non existing integrator selected"));
387 //! Initializes the logger for mdrun.
388 static gmx::LoggerOwner
buildLogger(FILE *fplog
, const t_commrec
*cr
)
390 gmx::LoggerBuilder builder
;
391 if (fplog
!= nullptr)
393 builder
.addTargetFile(gmx::MDLogger::LogLevel::Info
, fplog
);
395 if (cr
== nullptr || SIMMASTER(cr
))
397 builder
.addTargetStream(gmx::MDLogger::LogLevel::Warning
,
398 &gmx::TextOutputFile::standardError());
400 return builder
.build();
403 //! Make a TaskTarget from an mdrun argument string.
404 static TaskTarget
findTaskTarget(const char *optionString
)
406 TaskTarget returnValue
= TaskTarget::Auto
;
408 if (strncmp(optionString
, "auto", 3) == 0)
410 returnValue
= TaskTarget::Auto
;
412 else if (strncmp(optionString
, "cpu", 3) == 0)
414 returnValue
= TaskTarget::Cpu
;
416 else if (strncmp(optionString
, "gpu", 3) == 0)
418 returnValue
= TaskTarget::Gpu
;
422 GMX_ASSERT(false, "Option string should have been checked for sanity already");
428 int Mdrunner::mdrunner()
431 gmx_ddbox_t ddbox
= {0};
432 int npme_major
, npme_minor
;
434 gmx_mtop_t
*mtop
= nullptr;
435 t_forcerec
*fr
= nullptr;
436 t_fcdata
*fcd
= nullptr;
437 real ewaldcoeff_q
= 0;
438 real ewaldcoeff_lj
= 0;
439 gmx_vsite_t
*vsite
= nullptr;
441 int nChargePerturbed
= -1, nTypePerturbed
= 0;
442 gmx_wallcycle_t wcycle
;
443 gmx_walltime_accounting_t walltime_accounting
= nullptr;
445 gmx_int64_t reset_counters
;
446 int nthreads_pme
= 1;
447 gmx_membed_t
* membed
= nullptr;
448 gmx_hw_info_t
*hwinfo
= nullptr;
450 /* CAUTION: threads may be started later on in this function, so
451 cr doesn't reflect the final parallel state right now */
452 std::unique_ptr
<gmx::MDModules
> mdModules(new gmx::MDModules
);
453 t_inputrec inputrecInstance
;
454 t_inputrec
*inputrec
= &inputrecInstance
;
457 if (mdrunOptions
.continuationOptions
.appendFiles
)
462 bool doMembed
= opt2bSet("-membed", nfile
, fnm
);
463 bool doRerun
= mdrunOptions
.rerun
;
465 // Handle task-assignment related user options.
466 EmulateGpuNonbonded emulateGpuNonbonded
= (getenv("GMX_EMULATE_GPU") != nullptr ?
467 EmulateGpuNonbonded::Yes
: EmulateGpuNonbonded::No
);
468 std::vector
<int> gpuIdsAvailable
;
471 gpuIdsAvailable
= parseUserGpuIds(hw_opt
.gpuIdsAvailable
);
472 // TODO We could put the GPU IDs into a std::map to find
473 // duplicates, but for the small numbers of IDs involved, this
474 // code is simple and fast.
475 for (size_t i
= 0; i
!= gpuIdsAvailable
.size(); ++i
)
477 for (size_t j
= i
+1; j
!= gpuIdsAvailable
.size(); ++j
)
479 if (gpuIdsAvailable
[i
] == gpuIdsAvailable
[j
])
481 GMX_THROW(InvalidInputError(formatString("The string of available GPU device IDs '%s' may not contain duplicate device IDs", hw_opt
.gpuIdsAvailable
.c_str())));
486 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
488 std::vector
<int> userGpuTaskAssignment
;
491 userGpuTaskAssignment
= parseUserGpuIds(hw_opt
.userGpuTaskAssignment
);
493 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
494 auto nonbondedTarget
= findTaskTarget(nbpu_opt
);
495 auto pmeTarget
= findTaskTarget(pme_opt
);
496 auto pmeFftTarget
= findTaskTarget(pme_fft_opt
);
497 PmeRunMode pmeRunMode
= PmeRunMode::None
;
499 // Here we assume that SIMMASTER(cr) does not change even after the
500 // threads are started.
501 gmx::LoggerOwner
logOwner(buildLogger(fplog
, cr
));
502 gmx::MDLogger
mdlog(logOwner
.logger());
504 hwinfo
= gmx_detect_hardware(mdlog
);
506 gmx_print_detected_hardware(fplog
, cr
, ms
, mdlog
, hwinfo
);
508 std::vector
<int> gpuIdsToUse
;
509 auto compatibleGpus
= getCompatibleGpus(hwinfo
->gpu_info
);
510 if (gpuIdsAvailable
.empty())
512 gpuIdsToUse
= compatibleGpus
;
516 for (const auto &availableGpuId
: gpuIdsAvailable
)
518 bool availableGpuIsCompatible
= false;
519 for (const auto &compatibleGpuId
: compatibleGpus
)
521 if (availableGpuId
== compatibleGpuId
)
523 availableGpuIsCompatible
= true;
527 if (!availableGpuIsCompatible
)
529 gmx_fatal(FARGS
, "You limited the set of compatible GPUs to a set that included ID #%d, but that ID is not for a compatible GPU. List only compatible GPUs.", availableGpuId
);
531 gpuIdsToUse
.push_back(availableGpuId
);
535 if (fplog
!= nullptr)
537 /* Print references after all software/hardware printing */
538 please_cite(fplog
, "Abraham2015");
539 please_cite(fplog
, "Pall2015");
540 please_cite(fplog
, "Pronk2013");
541 please_cite(fplog
, "Hess2008b");
542 please_cite(fplog
, "Spoel2005a");
543 please_cite(fplog
, "Lindahl2001a");
544 please_cite(fplog
, "Berendsen95a");
547 std::unique_ptr
<t_state
> globalState
;
551 /* Only the master rank has the global state */
552 globalState
= std::unique_ptr
<t_state
>(new t_state
);
554 /* Read (nearly) all data required for the simulation */
555 read_tpx_state(ftp2fn(efTPR
, nfile
, fnm
), inputrec
, globalState
.get(), mtop
);
557 if (inputrec
->cutoff_scheme
!= ecutsVERLET
)
559 if (nstlist_cmdline
> 0)
561 gmx_fatal(FARGS
, "Can not set nstlist with the group cut-off scheme");
564 if (!compatibleGpus
.empty())
566 GMX_LOG(mdlog
.warning
).asParagraph().appendText(
567 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
568 " To use a GPU, set the mdp option: cutoff-scheme = Verlet");
573 /* Check and update the hardware options for internal consistency */
574 check_and_update_hw_opt_1(&hw_opt
, cr
, domdecOptions
.numPmeRanks
);
576 /* Early check for externally set process affinity. */
577 gmx_check_thread_affinity_set(mdlog
, cr
,
578 &hw_opt
, hwinfo
->nthreads_hw_avail
, FALSE
);
580 if (GMX_THREAD_MPI
&& SIMMASTER(cr
))
582 if (domdecOptions
.numPmeRanks
> 0 && hw_opt
.nthreads_tmpi
<= 0)
584 gmx_fatal(FARGS
, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
587 /* Since the master knows the cut-off scheme, update hw_opt for this.
588 * This is done later for normal MPI and also once more with tMPI
589 * for all tMPI ranks.
591 check_and_update_hw_opt_2(&hw_opt
, inputrec
->cutoff_scheme
);
593 bool useGpuForNonbonded
= false;
594 bool useGpuForPme
= false;
597 // If the user specified the number of ranks, then we must
598 // respect that, but in default mode, we need to allow for
599 // the number of GPUs to choose the number of ranks.
601 useGpuForNonbonded
= decideWhetherToUseGpusForNonbondedWithThreadMpi
602 (nonbondedTarget
, gpuIdsToUse
, userGpuTaskAssignment
, emulateGpuNonbonded
,
603 inputrec
->cutoff_scheme
== ecutsVERLET
,
604 gpuAccelerationOfNonbondedIsUseful(mdlog
, inputrec
, GMX_THREAD_MPI
),
605 hw_opt
.nthreads_tmpi
);
606 auto inputSystemHasPme
= EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
);
607 auto canUseGpuForPme
= inputSystemHasPme
&& pme_gpu_supports_input(inputrec
, nullptr);
608 useGpuForPme
= decideWhetherToUseGpusForPmeWithThreadMpi
609 (useGpuForNonbonded
, pmeTarget
, gpuIdsToUse
, userGpuTaskAssignment
,
610 canUseGpuForPme
, hw_opt
.nthreads_tmpi
, domdecOptions
.numPmeRanks
);
613 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
615 /* Determine how many thread-MPI ranks to start.
617 * TODO Over-writing the user-supplied value here does
618 * prevent any possible subsequent checks from working
620 hw_opt
.nthreads_tmpi
= get_nthreads_mpi(hwinfo
,
629 // Now start the threads for thread MPI.
630 cr
= spawnThreads(hw_opt
.nthreads_tmpi
);
631 /* The main thread continues here with a new cr. We don't deallocate
632 the old cr because other threads may still be reading it. */
633 // TODO Both master and spawned threads call dup_tfn and
634 // reinitialize_commrec_for_this_thread. Find a way to express
637 /* END OF CAUTION: cr is now reliable */
641 /* now broadcast everything to the non-master nodes/threads: */
642 init_parallel(cr
, inputrec
, mtop
);
645 // Now each rank knows the inputrec that SIMMASTER read and used,
646 // and (if applicable) cr->nnodes has been assigned the number of
647 // thread-MPI ranks that have been chosen. The ranks can now all
648 // run the task-deciding functions and will agree on the result
649 // without needing to communicate.
651 // TODO Should we do the communication in debug mode to support
652 // having an assertion?
654 // Note that these variables describe only their own node.
655 bool useGpuForNonbonded
= false;
656 bool useGpuForPme
= false;
659 // It's possible that there are different numbers of GPUs on
660 // different nodes, which is the user's responsibilty to
661 // handle. If unsuitable, we will notice that during task
663 bool gpusWereDetected
= hwinfo
->ngpu_compatible_tot
> 0;
664 useGpuForNonbonded
= decideWhetherToUseGpusForNonbonded(nonbondedTarget
, userGpuTaskAssignment
,
665 emulateGpuNonbonded
, inputrec
->cutoff_scheme
== ecutsVERLET
,
666 gpuAccelerationOfNonbondedIsUseful(mdlog
, inputrec
, !GMX_THREAD_MPI
),
668 auto inputSystemHasPme
= EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
);
669 auto canUseGpuForPme
= inputSystemHasPme
&& pme_gpu_supports_input(inputrec
, nullptr);
670 useGpuForPme
= decideWhetherToUseGpusForPme(useGpuForNonbonded
, pmeTarget
, userGpuTaskAssignment
,
671 canUseGpuForPme
, cr
->nnodes
, domdecOptions
.numPmeRanks
,
674 pmeRunMode
= (useGpuForPme
? PmeRunMode::GPU
: PmeRunMode::CPU
);
675 if (pmeRunMode
== PmeRunMode::GPU
)
677 if (pmeFftTarget
== TaskTarget::Cpu
)
679 pmeRunMode
= PmeRunMode::Mixed
;
682 else if (pmeFftTarget
== TaskTarget::Gpu
)
684 gmx_fatal(FARGS
, "Assigning FFTs to GPU requires PME to be assigned to GPU as well. With PME on CPU you should not be using -pmefft.");
687 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
689 // TODO: Error handling
690 mdModules
->assignOptionsToModules(*inputrec
->params
, nullptr);
692 if (fplog
!= nullptr)
694 pr_inputrec(fplog
, 0, "Input Parameters", inputrec
, FALSE
);
695 fprintf(fplog
, "\n");
700 /* now make sure the state is initialized and propagated */
701 set_state_entries(globalState
.get(), inputrec
);
704 /* NM and TPI parallelize over force/energy calculations, not atoms,
705 * so we need to initialize and broadcast the global state.
707 if (inputrec
->eI
== eiNM
|| inputrec
->eI
== eiTPI
)
711 globalState
= std::unique_ptr
<t_state
>(new t_state
);
713 broadcastStateWithoutDynamics(cr
, globalState
.get());
716 /* A parallel command line option consistency check that we can
717 only do after any threads have started. */
718 if (!PAR(cr
) && (domdecOptions
.numCells
[XX
] > 1 ||
719 domdecOptions
.numCells
[YY
] > 1 ||
720 domdecOptions
.numCells
[ZZ
] > 1 ||
721 domdecOptions
.numPmeRanks
> 0))
724 "The -dd or -npme option request a parallel simulation, "
726 "but %s was compiled without threads or MPI enabled"
729 "but the number of MPI-threads (option -ntmpi) is not set or is 1"
731 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
734 , output_env_get_program_display_name(oenv
)
739 (EI_ENERGY_MINIMIZATION(inputrec
->eI
) || eiNM
== inputrec
->eI
))
741 gmx_fatal(FARGS
, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
744 if (can_use_allvsall(inputrec
, TRUE
, cr
, fplog
) && DOMAINDECOMP(cr
))
746 gmx_fatal(FARGS
, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
749 if (!(EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
)))
751 if (domdecOptions
.numPmeRanks
> 0)
753 gmx_fatal_collective(FARGS
, cr
->mpi_comm_mysim
, MASTER(cr
),
754 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
757 domdecOptions
.numPmeRanks
= 0;
760 if (useGpuForNonbonded
&& domdecOptions
.numPmeRanks
< 0)
762 /* With NB GPUs we don't automatically use PME-only CPU ranks. PME ranks can
763 * improve performance with many threads per GPU, since our OpenMP
764 * scaling is bad, but it's difficult to automate the setup.
766 domdecOptions
.numPmeRanks
= 0;
770 if (domdecOptions
.numPmeRanks
< 0)
772 domdecOptions
.numPmeRanks
= 0;
773 // TODO possibly print a note that one can opt-in for a separate PME GPU rank?
777 GMX_RELEASE_ASSERT(domdecOptions
.numPmeRanks
<= 1, "PME GPU decomposition is not supported");
784 fcRegisterSteps(inputrec
->nsteps
, inputrec
->init_step
);
788 /* NMR restraints must be initialized before load_checkpoint,
789 * since with time averaging the history is added to t_state.
790 * For proper consistency check we therefore need to extend
792 * So the PME-only nodes (if present) will also initialize
793 * the distance restraints.
797 /* This needs to be called before read_checkpoint to extend the state */
798 init_disres(fplog
, mtop
, inputrec
, cr
, ms
, fcd
, globalState
.get(), replExParams
.exchangeInterval
> 0);
800 init_orires(fplog
, mtop
, inputrec
, cr
, ms
, globalState
.get(), &(fcd
->orires
));
802 if (inputrecDeform(inputrec
))
804 /* Store the deform reference box before reading the checkpoint */
807 copy_mat(globalState
->box
, box
);
811 gmx_bcast(sizeof(box
), box
, cr
);
813 /* Because we do not have the update struct available yet
814 * in which the reference values should be stored,
815 * we store them temporarily in static variables.
816 * This should be thread safe, since they are only written once
817 * and with identical values.
819 tMPI_Thread_mutex_lock(&deform_init_box_mutex
);
820 deform_init_init_step_tpx
= inputrec
->init_step
;
821 copy_mat(box
, deform_init_box_tpx
);
822 tMPI_Thread_mutex_unlock(&deform_init_box_mutex
);
825 ObservablesHistory observablesHistory
= {};
827 ContinuationOptions
&continuationOptions
= mdrunOptions
.continuationOptions
;
829 if (continuationOptions
.startedFromCheckpoint
)
831 /* Check if checkpoint file exists before doing continuation.
832 * This way we can use identical input options for the first and subsequent runs...
836 load_checkpoint(opt2fn_master("-cpi", nfile
, fnm
, cr
), &fplog
,
837 cr
, domdecOptions
.numCells
,
838 inputrec
, globalState
.get(),
839 &bReadEkin
, &observablesHistory
,
840 continuationOptions
.appendFiles
,
841 continuationOptions
.appendFilesOptionSet
,
842 mdrunOptions
.reproducible
);
846 continuationOptions
.haveReadEkin
= true;
850 if (SIMMASTER(cr
) && continuationOptions
.appendFiles
)
852 gmx_log_open(ftp2fn(efLOG
, nfile
, fnm
), cr
,
853 continuationOptions
.appendFiles
, &fplog
);
854 logOwner
= buildLogger(fplog
, nullptr);
855 mdlog
= logOwner
.logger();
858 if (mdrunOptions
.numStepsCommandline
> -2)
860 GMX_LOG(mdlog
.info
).asParagraph().
861 appendText("The -nsteps functionality is deprecated, and may be removed in a future version. "
862 "Consider using gmx convert-tpr -nsteps or changing the appropriate .mdp file field.");
864 /* override nsteps with value set on the commamdline */
865 override_nsteps_cmdline(mdlog
, mdrunOptions
.numStepsCommandline
, inputrec
);
869 copy_mat(globalState
->box
, box
);
874 gmx_bcast(sizeof(box
), box
, cr
);
877 /* Update rlist and nstlist. */
878 if (inputrec
->cutoff_scheme
== ecutsVERLET
)
880 prepare_verlet_scheme(fplog
, cr
, inputrec
, nstlist_cmdline
, mtop
, box
,
881 useGpuForNonbonded
|| (emulateGpuNonbonded
== EmulateGpuNonbonded::Yes
), *hwinfo
->cpuInfo
);
884 if (PAR(cr
) && !(EI_TPI(inputrec
->eI
) ||
885 inputrec
->eI
== eiNM
))
887 const rvec
*xOnMaster
= (SIMMASTER(cr
) ? as_rvec_array(globalState
->x
.data()) : nullptr);
889 cr
->dd
= init_domain_decomposition(fplog
, cr
, domdecOptions
, mdrunOptions
,
892 &ddbox
, &npme_major
, &npme_minor
);
893 // Note that local state still does not exist yet.
897 /* PME, if used, is done on all nodes with 1D decomposition */
899 cr
->duty
= (DUTY_PP
| DUTY_PME
);
903 if (inputrec
->ePBC
== epbcSCREW
)
906 "pbc=%s is only implemented with domain decomposition",
907 epbc_names
[inputrec
->ePBC
]);
913 /* After possible communicator splitting in make_dd_communicators.
914 * we can set up the intra/inter node communication.
916 gmx_setup_nodecomm(fplog
, cr
);
919 /* Initialize per-physical-node MPI process/thread ID and counters. */
920 gmx_init_intranode_counters(cr
);
924 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted(
925 "This is simulation %d out of %d running as a composite GROMACS\n"
926 "multi-simulation job. Setup for this simulation:\n",
929 GMX_LOG(mdlog
.warning
).appendTextFormatted(
933 cr
->nnodes
== 1 ? "thread" : "threads"
935 cr
->nnodes
== 1 ? "process" : "processes"
941 /* Check and update hw_opt for the cut-off scheme */
942 check_and_update_hw_opt_2(&hw_opt
, inputrec
->cutoff_scheme
);
944 /* Check and update the number of OpenMP threads requested */
945 checkAndUpdateRequestedNumOpenmpThreads(&hw_opt
, *hwinfo
, cr
, ms
, pmeRunMode
, *mtop
);
947 gmx_omp_nthreads_init(mdlog
, cr
,
948 hwinfo
->nthreads_hw_avail
,
950 hw_opt
.nthreads_omp_pme
,
951 !thisRankHasDuty(cr
, DUTY_PP
),
952 inputrec
->cutoff_scheme
== ecutsVERLET
);
955 if (EI_TPI(inputrec
->eI
) &&
956 inputrec
->cutoff_scheme
== ecutsVERLET
)
958 gmx_feenableexcept();
962 // Build a data structure that expresses which kinds of non-bonded
963 // task are handled by this rank.
965 // TODO Later, this might become a loop over all registered modules
966 // relevant to the mdp inputs, to find those that have such tasks.
968 // TODO This could move before init_domain_decomposition() as part
969 // of refactoring that separates the responsibility for duty
970 // assignment from setup for communication between tasks, and
971 // setup for tasks handled with a domain (ie including short-ranged
972 // tasks, bonded tasks, etc.).
974 // Note that in general useGpuForNonbonded, etc. can have a value
975 // that is inconsistent with the presence of actual GPUs on any
976 // rank, and that is not known to be a problem until the
977 // duty of the ranks on a node become node.
979 // TODO Later we might need the concept of computeTasksOnThisRank,
980 // from which we construct gpuTasksOnThisRank.
982 // Currently the DD code assigns duty to ranks that can
983 // include PP work that currently can be executed on a single
984 // GPU, if present and compatible. This has to be coordinated
985 // across PP ranks on a node, with possible multiple devices
986 // or sharing devices on a node, either from the user
987 // selection, or automatically.
988 auto haveGpus
= !gpuIdsToUse
.empty();
989 std::vector
<GpuTask
> gpuTasksOnThisRank
;
990 if (thisRankHasDuty(cr
, DUTY_PP
))
992 if (useGpuForNonbonded
)
996 gpuTasksOnThisRank
.push_back(GpuTask::Nonbonded
);
998 else if (nonbondedTarget
== TaskTarget::Gpu
)
1000 gmx_fatal(FARGS
, "Cannot run short-ranged nonbonded interactions on a GPU because there is none detected.");
1004 // TODO cr->duty & DUTY_PME should imply that a PME algorithm is active, but currently does not.
1005 if (EEL_PME(inputrec
->coulombtype
) && (thisRankHasDuty(cr
, DUTY_PME
)))
1011 gpuTasksOnThisRank
.push_back(GpuTask::Pme
);
1013 else if (pmeTarget
== TaskTarget::Gpu
)
1015 gmx_fatal(FARGS
, "Cannot run PME on a GPU because there is none detected.");
1020 GpuTaskAssignment gpuTaskAssignment
;
1023 // Produce the task assignment for this rank.
1024 gpuTaskAssignment
= runTaskAssignment(gpuIdsToUse
, userGpuTaskAssignment
, *hwinfo
,
1025 mdlog
, cr
, ms
, gpuTasksOnThisRank
);
1027 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
1029 /* Prevent other ranks from continuing after an issue was found
1030 * and reported as a fatal error.
1032 * TODO This function implements a barrier so that MPI runtimes
1033 * can organize an orderly shutdown if one of the ranks has had to
1034 * issue a fatal error in various code already run. When we have
1035 * MPI-aware error handling and reporting, this should be
1040 MPI_Barrier(cr
->mpi_comm_mysim
);
1044 MPI_Barrier(ms
->mpi_comm_masters
);
1048 /* Now that we know the setup is consistent, check for efficiency */
1049 check_resource_division_efficiency(hwinfo
, !gpuTaskAssignment
.empty(), mdrunOptions
.ntompOptionIsSet
,
1052 gmx_device_info_t
*nonbondedDeviceInfo
= nullptr;
1054 if (thisRankHasDuty(cr
, DUTY_PP
))
1056 // This works because only one task of each type is currently permitted.
1057 auto nbGpuTaskMapping
= std::find_if(gpuTaskAssignment
.begin(), gpuTaskAssignment
.end(),
1058 hasTaskType
<GpuTask::Nonbonded
>);
1059 if (nbGpuTaskMapping
!= gpuTaskAssignment
.end())
1061 int nonbondedDeviceId
= nbGpuTaskMapping
->deviceId_
;
1062 nonbondedDeviceInfo
= getDeviceInfo(hwinfo
->gpu_info
, nonbondedDeviceId
);
1063 init_gpu(mdlog
, nonbondedDeviceInfo
);
1065 if (DOMAINDECOMP(cr
))
1067 /* When we share GPUs over ranks, we need to know this for the DLB */
1068 dd_setup_dlb_resource_sharing(cr
, nonbondedDeviceId
);
1074 gmx_device_info_t
*pmeDeviceInfo
= nullptr;
1075 // This works because only one task of each type is currently permitted.
1076 auto pmeGpuTaskMapping
= std::find_if(gpuTaskAssignment
.begin(), gpuTaskAssignment
.end(), hasTaskType
<GpuTask::Pme
>);
1077 if (pmeGpuTaskMapping
!= gpuTaskAssignment
.end())
1079 pmeDeviceInfo
= getDeviceInfo(hwinfo
->gpu_info
, pmeGpuTaskMapping
->deviceId_
);
1080 init_gpu(mdlog
, pmeDeviceInfo
);
1083 /* getting number of PP/PME threads
1084 PME: env variable should be read only on one node to make sure it is
1085 identical everywhere;
1087 nthreads_pme
= gmx_omp_nthreads_get(emntPME
);
1089 int numThreadsOnThisRank
;
1090 /* threads on this MPI process or TMPI thread */
1091 if (thisRankHasDuty(cr
, DUTY_PP
))
1093 numThreadsOnThisRank
= gmx_omp_nthreads_get(emntNonbonded
);
1097 numThreadsOnThisRank
= nthreads_pme
;
1100 checkHardwareOversubscription(numThreadsOnThisRank
,
1101 *hwinfo
->hardwareTopology
,
1104 if (hw_opt
.thread_affinity
!= threadaffOFF
)
1106 /* Before setting affinity, check whether the affinity has changed
1107 * - which indicates that probably the OpenMP library has changed it
1108 * since we first checked).
1110 gmx_check_thread_affinity_set(mdlog
, cr
,
1111 &hw_opt
, hwinfo
->nthreads_hw_avail
, TRUE
);
1113 int numThreadsOnThisNode
, intraNodeThreadOffset
;
1114 analyzeThreadsOnThisNode(cr
, ms
, nullptr, numThreadsOnThisRank
, &numThreadsOnThisNode
,
1115 &intraNodeThreadOffset
);
1117 /* Set the CPU affinity */
1118 gmx_set_thread_affinity(mdlog
, cr
, &hw_opt
, *hwinfo
->hardwareTopology
,
1119 numThreadsOnThisRank
, numThreadsOnThisNode
,
1120 intraNodeThreadOffset
, nullptr);
1123 if (mdrunOptions
.timingOptions
.resetStep
> -1)
1125 GMX_LOG(mdlog
.info
).asParagraph().
1126 appendText("The -resetstep functionality is deprecated, and may be removed in a future version.");
1128 wcycle
= wallcycle_init(fplog
, mdrunOptions
.timingOptions
.resetStep
, cr
);
1132 /* Master synchronizes its value of reset_counters with all nodes
1133 * including PME only nodes */
1134 reset_counters
= wcycle_get_reset_counters(wcycle
);
1135 gmx_bcast_sim(sizeof(reset_counters
), &reset_counters
, cr
);
1136 wcycle_set_reset_counters(wcycle
, reset_counters
);
1139 // Membrane embedding must be initialized before we call init_forcerec()
1144 fprintf(stderr
, "Initializing membed");
1146 /* Note that membed cannot work in parallel because mtop is
1147 * changed here. Fix this if we ever want to make it run with
1148 * multiple ranks. */
1149 membed
= init_membed(fplog
, nfile
, fnm
, mtop
, inputrec
, globalState
.get(), cr
, &mdrunOptions
.checkpointOptions
.period
);
1152 std::unique_ptr
<MDAtoms
> mdAtoms
;
1155 if (thisRankHasDuty(cr
, DUTY_PP
))
1157 /* Initiate forcerecord */
1159 fr
->forceProviders
= mdModules
->initForceProviders();
1160 init_forcerec(fplog
, mdlog
, fr
, fcd
,
1161 inputrec
, mtop
, cr
, box
,
1162 opt2fn("-table", nfile
, fnm
),
1163 opt2fn("-tablep", nfile
, fnm
),
1164 getFilenm("-tableb", nfile
, fnm
),
1165 *hwinfo
, nonbondedDeviceInfo
,
1169 /* Initialize QM-MM */
1172 GMX_LOG(mdlog
.info
).asParagraph().
1173 appendText("Large parts of the QM/MM support is deprecated, and may be removed in a future "
1174 "version. Please get in touch with the developers if you find the support useful, "
1175 "as help is needed if the functionality is to continue to be available.");
1176 init_QMMMrec(cr
, mtop
, inputrec
, fr
);
1179 /* Initialize the mdAtoms structure.
1180 * mdAtoms is not filled with atom data,
1181 * as this can not be done now with domain decomposition.
1183 const bool useGpuForPme
= (pmeRunMode
== PmeRunMode::GPU
) || (pmeRunMode
== PmeRunMode::Mixed
);
1184 mdAtoms
= makeMDAtoms(fplog
, *mtop
, *inputrec
, useGpuForPme
&& thisRankHasDuty(cr
, DUTY_PME
));
1187 // The pinning of coordinates in the global state object works, because we only use
1188 // PME on GPU without DD or on a separate PME rank, and because the local state pointer
1189 // points to the global state object without DD.
1190 // FIXME: MD and EM separately set up the local state - this should happen in the same function,
1191 // which should also perform the pinning.
1192 changePinningPolicy(&globalState
->x
, useGpuForPme
? PinningPolicy::CanBePinned
: PinningPolicy::CannotBePinned
);
1195 /* Initialize the virtual site communication */
1196 vsite
= initVsite(*mtop
, cr
);
1198 calc_shifts(box
, fr
->shift_vec
);
1200 /* With periodic molecules the charge groups should be whole at start up
1201 * and the virtual sites should not be far from their proper positions.
1203 if (!inputrec
->bContinuation
&& MASTER(cr
) &&
1204 !(inputrec
->ePBC
!= epbcNONE
&& inputrec
->bPeriodicMols
))
1206 /* Make molecules whole at start of run */
1207 if (fr
->ePBC
!= epbcNONE
)
1209 rvec
*xGlobal
= as_rvec_array(globalState
->x
.data());
1210 do_pbc_first_mtop(fplog
, inputrec
->ePBC
, box
, mtop
, xGlobal
);
1214 /* Correct initial vsite positions are required
1215 * for the initial distribution in the domain decomposition
1216 * and for the initial shell prediction.
1218 constructVsitesGlobal(*mtop
, globalState
->x
);
1222 if (EEL_PME(fr
->ic
->eeltype
) || EVDW_PME(fr
->ic
->vdwtype
))
1224 ewaldcoeff_q
= fr
->ic
->ewaldcoeff_q
;
1225 ewaldcoeff_lj
= fr
->ic
->ewaldcoeff_lj
;
1230 /* This is a PME only node */
1232 GMX_ASSERT(globalState
== nullptr, "We don't need the state on a PME only rank and expect it to be unitialized");
1234 ewaldcoeff_q
= calc_ewaldcoeff_q(inputrec
->rcoulomb
, inputrec
->ewald_rtol
);
1235 ewaldcoeff_lj
= calc_ewaldcoeff_lj(inputrec
->rvdw
, inputrec
->ewald_rtol_lj
);
1238 gmx_pme_t
*sepPmeData
= nullptr;
1239 // This reference hides the fact that PME data is owned by runner on PME-only ranks and by forcerec on other ranks
1240 GMX_ASSERT(thisRankHasDuty(cr
, DUTY_PP
) == (fr
!= nullptr), "Double-checking that only PME-only ranks have no forcerec");
1241 gmx_pme_t
* &pmedata
= fr
? fr
->pmedata
: sepPmeData
;
1243 /* Initiate PME if necessary,
1244 * either on all nodes or on dedicated PME nodes only. */
1245 if (EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
))
1247 if (mdAtoms
&& mdAtoms
->mdatoms())
1249 nChargePerturbed
= mdAtoms
->mdatoms()->nChargePerturbed
;
1250 if (EVDW_PME(inputrec
->vdwtype
))
1252 nTypePerturbed
= mdAtoms
->mdatoms()->nTypePerturbed
;
1255 if (cr
->npmenodes
> 0)
1257 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1258 gmx_bcast_sim(sizeof(nChargePerturbed
), &nChargePerturbed
, cr
);
1259 gmx_bcast_sim(sizeof(nTypePerturbed
), &nTypePerturbed
, cr
);
1262 if (thisRankHasDuty(cr
, DUTY_PME
))
1266 pmedata
= gmx_pme_init(cr
, npme_major
, npme_minor
, inputrec
,
1267 mtop
? mtop
->natoms
: 0, nChargePerturbed
, nTypePerturbed
,
1268 mdrunOptions
.reproducible
,
1269 ewaldcoeff_q
, ewaldcoeff_lj
,
1271 pmeRunMode
, nullptr, pmeDeviceInfo
, mdlog
);
1273 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
1278 if (EI_DYNAMICS(inputrec
->eI
))
1280 /* Turn on signal handling on all nodes */
1282 * (A user signal from the PME nodes (if any)
1283 * is communicated to the PP nodes.
1285 signal_handler_install();
1288 if (thisRankHasDuty(cr
, DUTY_PP
))
1290 /* Assumes uniform use of the number of OpenMP threads */
1291 walltime_accounting
= walltime_accounting_init(gmx_omp_nthreads_get(emntDefault
));
1293 if (inputrec
->bPull
)
1295 /* Initialize pull code */
1296 inputrec
->pull_work
=
1297 init_pull(fplog
, inputrec
->pull
, inputrec
, nfile
, fnm
,
1298 mtop
, cr
, oenv
, inputrec
->fepvals
->init_lambda
,
1299 EI_DYNAMICS(inputrec
->eI
) && MASTER(cr
),
1300 continuationOptions
);
1305 /* Initialize enforced rotation code */
1306 init_rot(fplog
, inputrec
, nfile
, fnm
, cr
, globalState
.get(), mtop
, oenv
, mdrunOptions
);
1309 /* Let init_constraints know whether we have essential dynamics constraints.
1310 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1312 bool doEdsam
= (opt2fn_null("-ei", nfile
, fnm
) != nullptr || observablesHistory
.edsamHistory
);
1314 constr
= init_constraints(fplog
, mtop
, inputrec
, doEdsam
, cr
);
1316 if (DOMAINDECOMP(cr
))
1318 GMX_RELEASE_ASSERT(fr
, "fr was NULL while cr->duty was DUTY_PP");
1319 /* This call is not included in init_domain_decomposition mainly
1320 * because fr->cginfo_mb is set later.
1322 dd_init_bondeds(fplog
, cr
->dd
, mtop
, vsite
, inputrec
,
1323 domdecOptions
.checkBondedInteractions
,
1327 /* Now do whatever the user wants us to do (how flexible...) */
1328 my_integrator(inputrec
->eI
) (fplog
, cr
, ms
, mdlog
, nfile
, fnm
,
1332 mdModules
->outputProvider(),
1336 &observablesHistory
,
1337 mdAtoms
.get(), nrnb
, wcycle
, fr
,
1340 walltime_accounting
);
1344 finish_rot(inputrec
->rot
);
1347 if (inputrec
->bPull
)
1349 finish_pull(inputrec
->pull_work
);
1355 GMX_RELEASE_ASSERT(pmedata
, "pmedata was NULL while cr->duty was not DUTY_PP");
1357 walltime_accounting
= walltime_accounting_init(gmx_omp_nthreads_get(emntPME
));
1358 gmx_pmeonly(pmedata
, cr
, nrnb
, wcycle
, walltime_accounting
, inputrec
, pmeRunMode
);
1361 wallcycle_stop(wcycle
, ewcRUN
);
1363 /* Finish up, write some stuff
1364 * if rerunMD, don't write last frame again
1366 finish_run(fplog
, mdlog
, cr
,
1367 inputrec
, nrnb
, wcycle
, walltime_accounting
,
1368 fr
? fr
->nbv
: nullptr,
1370 EI_DYNAMICS(inputrec
->eI
) && !isMultiSim(ms
));
1375 gmx_pme_destroy(pmedata
);
1379 // FIXME: this is only here to manually unpin mdAtoms->chargeA_ and state->x,
1380 // before we destroy the GPU context(s) in free_gpu_resources().
1381 // Pinned buffers are associated with contexts in CUDA.
1382 // As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
1383 mdAtoms
.reset(nullptr);
1384 globalState
.reset(nullptr);
1385 mdModules
.reset(nullptr); // destruct force providers here as they might also use the GPU
1387 /* Free GPU memory and set a physical node tMPI barrier (which should eventually go away) */
1388 free_gpu_resources(fr
, cr
, ms
);
1389 free_gpu(nonbondedDeviceInfo
);
1390 free_gpu(pmeDeviceInfo
);
1394 free_membed(membed
);
1397 gmx_hardware_info_free();
1399 /* Does what it says */
1400 print_date_and_time(fplog
, cr
->nodeid
, "Finished mdrun", gmx_gettime());
1401 walltime_accounting_destroy(walltime_accounting
);
1403 /* Close logfile already here if we were appending to it */
1404 if (MASTER(cr
) && continuationOptions
.appendFiles
)
1406 gmx_log_close(fplog
);
1410 rc
= (int)gmx_get_stop_condition();
1413 /* we need to join all threads. The sub-threads join when they
1414 exit this function, but the master thread needs to be told to
1416 if (PAR(cr
) && MASTER(cr
))