2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2011,2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements the MD runner routine calling all integrators.
41 * \author David van der Spoel <david.vanderspoel@icm.uu.se>
42 * \ingroup module_mdrun
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/domdec.h"
59 #include "gromacs/domdec/domdec_struct.h"
60 #include "gromacs/ewald/ewald-utils.h"
61 #include "gromacs/ewald/pme.h"
62 #include "gromacs/ewald/pme-gpu-program.h"
63 #include "gromacs/fileio/checkpoint.h"
64 #include "gromacs/fileio/oenv.h"
65 #include "gromacs/fileio/tpxio.h"
66 #include "gromacs/gmxlib/network.h"
67 #include "gromacs/gmxlib/nrnb.h"
68 #include "gromacs/gpu_utils/clfftinitializer.h"
69 #include "gromacs/gpu_utils/gpu_utils.h"
70 #include "gromacs/hardware/cpuinfo.h"
71 #include "gromacs/hardware/detecthardware.h"
72 #include "gromacs/hardware/printhardware.h"
73 #include "gromacs/listed-forces/disre.h"
74 #include "gromacs/listed-forces/orires.h"
75 #include "gromacs/math/functions.h"
76 #include "gromacs/math/utilities.h"
77 #include "gromacs/math/vec.h"
78 #include "gromacs/mdlib/calc_verletbuf.h"
79 #include "gromacs/mdlib/deform.h"
80 #include "gromacs/mdlib/forcerec.h"
81 #include "gromacs/mdlib/gmx_omp_nthreads.h"
82 #include "gromacs/mdlib/main.h"
83 #include "gromacs/mdlib/makeconstraints.h"
84 #include "gromacs/mdlib/md_support.h"
85 #include "gromacs/mdlib/mdatoms.h"
86 #include "gromacs/mdlib/mdrun.h"
87 #include "gromacs/mdlib/membed.h"
88 #include "gromacs/mdlib/nb_verlet.h"
89 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
90 #include "gromacs/mdlib/nbnxn_search.h"
91 #include "gromacs/mdlib/nbnxn_tuning.h"
92 #include "gromacs/mdlib/qmmm.h"
93 #include "gromacs/mdlib/repl_ex.h"
94 #include "gromacs/mdlib/sighandler.h"
95 #include "gromacs/mdlib/sim_util.h"
96 #include "gromacs/mdrunutility/mdmodules.h"
97 #include "gromacs/mdrunutility/threadaffinity.h"
98 #include "gromacs/mdtypes/commrec.h"
99 #include "gromacs/mdtypes/fcdata.h"
100 #include "gromacs/mdtypes/inputrec.h"
101 #include "gromacs/mdtypes/md_enums.h"
102 #include "gromacs/mdtypes/observableshistory.h"
103 #include "gromacs/mdtypes/state.h"
104 #include "gromacs/pbcutil/pbc.h"
105 #include "gromacs/pulling/pull.h"
106 #include "gromacs/pulling/pull_rotation.h"
107 #include "gromacs/taskassignment/decidegpuusage.h"
108 #include "gromacs/taskassignment/resourcedivision.h"
109 #include "gromacs/taskassignment/taskassignment.h"
110 #include "gromacs/taskassignment/usergpuids.h"
111 #include "gromacs/timing/wallcycle.h"
112 #include "gromacs/topology/mtop_util.h"
113 #include "gromacs/trajectory/trajectoryframe.h"
114 #include "gromacs/utility/basenetwork.h"
115 #include "gromacs/utility/cstringutil.h"
116 #include "gromacs/utility/exceptions.h"
117 #include "gromacs/utility/fatalerror.h"
118 #include "gromacs/utility/filestream.h"
119 #include "gromacs/utility/gmxassert.h"
120 #include "gromacs/utility/gmxmpi.h"
121 #include "gromacs/utility/logger.h"
122 #include "gromacs/utility/loggerbuilder.h"
123 #include "gromacs/utility/physicalnodecommunicator.h"
124 #include "gromacs/utility/pleasecite.h"
125 #include "gromacs/utility/programcontext.h"
126 #include "gromacs/utility/smalloc.h"
127 #include "gromacs/utility/stringutil.h"
129 #include "integrator.h"
132 #include "corewrap.h"
135 //! First step used in pressure scaling
136 gmx_int64_t deform_init_init_step_tpx
;
137 //! Initial box for pressure scaling
138 matrix deform_init_box_tpx
;
139 //! MPI variable for use in pressure scaling
140 tMPI_Thread_mutex_t deform_init_box_mutex
= TMPI_THREAD_MUTEX_INITIALIZER
;
145 /*! \brief Barrier for safe simultaneous thread access to mdrunner data
147 * Used to ensure that the master thread does not modify mdrunner during copy
148 * on the spawned threads. */
149 static void threadMpiMdrunnerAccessBarrier()
152 MPI_Barrier(MPI_COMM_WORLD
);
156 void Mdrunner::reinitializeOnSpawnedThread()
158 threadMpiMdrunnerAccessBarrier();
160 cr
= reinitialize_commrec_for_this_thread(cr
);
162 GMX_RELEASE_ASSERT(!MASTER(cr
), "reinitializeOnSpawnedThread should only be called on spawned threads");
164 // Only the master rank writes to the log file
168 /*! \brief The callback used for running on spawned threads.
170 * Obtains the pointer to the master mdrunner object from the one
171 * argument permitted to the thread-launch API call, copies it to make
172 * a new runner for this thread, reinitializes necessary data, and
173 * proceeds to the simulation. */
174 static void mdrunner_start_fn(const void *arg
)
178 auto masterMdrunner
= reinterpret_cast<const gmx::Mdrunner
*>(arg
);
179 /* copy the arg list to make sure that it's thread-local. This
180 doesn't copy pointed-to items, of course; fnm, cr and fplog
181 are reset in the call below, all others should be const. */
182 gmx::Mdrunner mdrunner
= *masterMdrunner
;
183 mdrunner
.reinitializeOnSpawnedThread();
186 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
190 /*! \brief Start thread-MPI threads.
192 * Called by mdrunner() to start a specific number of threads
193 * (including the main thread) for thread-parallel runs. This in turn
194 * calls mdrunner() for each thread. All options are the same as for
196 t_commrec
*Mdrunner::spawnThreads(int numThreadsToLaunch
) const
199 /* first check whether we even need to start tMPI */
200 if (numThreadsToLaunch
< 2)
206 /* now spawn new threads that start mdrunner_start_fn(), while
207 the main thread returns, we set thread affinity later */
208 if (tMPI_Init_fn(TRUE
, numThreadsToLaunch
, TMPI_AFFINITY_NONE
,
209 mdrunner_start_fn
, static_cast<const void*>(this)) != TMPI_SUCCESS
)
211 GMX_THROW(gmx::InternalError("Failed to spawn thread-MPI threads"));
214 threadMpiMdrunnerAccessBarrier();
216 GMX_UNUSED_VALUE(mdrunner_start_fn
);
219 return reinitialize_commrec_for_this_thread(cr
);
224 /*! \brief Initialize variables for Verlet scheme simulation */
225 static void prepare_verlet_scheme(FILE *fplog
,
229 const gmx_mtop_t
*mtop
,
231 bool makeGpuPairList
,
232 const gmx::CpuInfo
&cpuinfo
)
234 /* For NVE simulations, we will retain the initial list buffer */
235 if (EI_DYNAMICS(ir
->eI
) &&
236 ir
->verletbuf_tol
> 0 &&
237 !(EI_MD(ir
->eI
) && ir
->etc
== etcNO
))
239 /* Update the Verlet buffer size for the current run setup */
241 /* Here we assume SIMD-enabled kernels are being used. But as currently
242 * calc_verlet_buffer_size gives the same results for 4x8 and 4x4
243 * and 4x2 gives a larger buffer than 4x4, this is ok.
245 ListSetupType listType
= (makeGpuPairList
? ListSetupType::Gpu
: ListSetupType::CpuSimdWhenSupported
);
246 VerletbufListSetup listSetup
= verletbufGetSafeListSetup(listType
);
249 calc_verlet_buffer_size(mtop
, det(box
), ir
, ir
->nstlist
, ir
->nstlist
- 1, -1, &listSetup
, nullptr, &rlist_new
);
251 if (rlist_new
!= ir
->rlist
)
253 if (fplog
!= nullptr)
255 fprintf(fplog
, "\nChanging rlist from %g to %g for non-bonded %dx%d atom kernels\n\n",
256 ir
->rlist
, rlist_new
,
257 listSetup
.cluster_size_i
, listSetup
.cluster_size_j
);
259 ir
->rlist
= rlist_new
;
263 if (nstlist_cmdline
> 0 && (!EI_DYNAMICS(ir
->eI
) || ir
->verletbuf_tol
<= 0))
265 gmx_fatal(FARGS
, "Can not set nstlist without %s",
266 !EI_DYNAMICS(ir
->eI
) ? "dynamics" : "verlet-buffer-tolerance");
269 if (EI_DYNAMICS(ir
->eI
))
271 /* Set or try nstlist values */
272 increaseNstlist(fplog
, cr
, ir
, nstlist_cmdline
, mtop
, box
, makeGpuPairList
, cpuinfo
);
276 /*! \brief Override the nslist value in inputrec
278 * with value passed on the command line (if any)
280 static void override_nsteps_cmdline(const gmx::MDLogger
&mdlog
,
281 gmx_int64_t nsteps_cmdline
,
286 /* override with anything else than the default -2 */
287 if (nsteps_cmdline
> -2)
289 char sbuf_steps
[STEPSTRSIZE
];
290 char sbuf_msg
[STRLEN
];
292 ir
->nsteps
= nsteps_cmdline
;
293 if (EI_DYNAMICS(ir
->eI
) && nsteps_cmdline
!= -1)
295 sprintf(sbuf_msg
, "Overriding nsteps with value passed on the command line: %s steps, %.3g ps",
296 gmx_step_str(nsteps_cmdline
, sbuf_steps
),
297 fabs(nsteps_cmdline
*ir
->delta_t
));
301 sprintf(sbuf_msg
, "Overriding nsteps with value passed on the command line: %s steps",
302 gmx_step_str(nsteps_cmdline
, sbuf_steps
));
305 GMX_LOG(mdlog
.warning
).asParagraph().appendText(sbuf_msg
);
307 else if (nsteps_cmdline
< -2)
309 gmx_fatal(FARGS
, "Invalid nsteps value passed on the command line: %d",
312 /* Do nothing if nsteps_cmdline == -2 */
318 /*! \brief Return whether GPU acceleration of nonbondeds is supported with the given settings.
320 * If not, and if a warning may be issued, logs a warning about
321 * falling back to CPU code. With thread-MPI, only the first
322 * call to this function should have \c issueWarning true. */
323 static bool gpuAccelerationOfNonbondedIsUseful(const MDLogger
&mdlog
,
324 const t_inputrec
*ir
,
327 if (ir
->opts
.ngener
- ir
->nwall
> 1)
329 /* The GPU code does not support more than one energy group.
330 * If the user requested GPUs explicitly, a fatal error is given later.
334 GMX_LOG(mdlog
.warning
).asParagraph()
335 .appendText("Multiple energy groups is not implemented for GPUs, falling back to the CPU. "
336 "For better performance, run on the GPU without energy groups and then do "
337 "gmx mdrun -rerun option on the trajectory with an energy group .tpr file.");
344 //! Initializes the logger for mdrun.
345 static gmx::LoggerOwner
buildLogger(FILE *fplog
, const t_commrec
*cr
)
347 gmx::LoggerBuilder builder
;
348 if (fplog
!= nullptr)
350 builder
.addTargetFile(gmx::MDLogger::LogLevel::Info
, fplog
);
352 if (cr
== nullptr || SIMMASTER(cr
))
354 builder
.addTargetStream(gmx::MDLogger::LogLevel::Warning
,
355 &gmx::TextOutputFile::standardError());
357 return builder
.build();
360 //! Make a TaskTarget from an mdrun argument string.
361 static TaskTarget
findTaskTarget(const char *optionString
)
363 TaskTarget returnValue
= TaskTarget::Auto
;
365 if (strncmp(optionString
, "auto", 3) == 0)
367 returnValue
= TaskTarget::Auto
;
369 else if (strncmp(optionString
, "cpu", 3) == 0)
371 returnValue
= TaskTarget::Cpu
;
373 else if (strncmp(optionString
, "gpu", 3) == 0)
375 returnValue
= TaskTarget::Gpu
;
379 GMX_ASSERT(false, "Option string should have been checked for sanity already");
385 int Mdrunner::mdrunner()
389 t_forcerec
*fr
= nullptr;
390 t_fcdata
*fcd
= nullptr;
391 real ewaldcoeff_q
= 0;
392 real ewaldcoeff_lj
= 0;
393 gmx_vsite_t
*vsite
= nullptr;
394 int nChargePerturbed
= -1, nTypePerturbed
= 0;
395 gmx_wallcycle_t wcycle
;
396 gmx_walltime_accounting_t walltime_accounting
= nullptr;
398 gmx_int64_t reset_counters
;
399 int nthreads_pme
= 1;
400 gmx_membed_t
* membed
= nullptr;
401 gmx_hw_info_t
*hwinfo
= nullptr;
403 /* CAUTION: threads may be started later on in this function, so
404 cr doesn't reflect the final parallel state right now */
405 std::unique_ptr
<gmx::MDModules
> mdModules(new gmx::MDModules
);
406 t_inputrec inputrecInstance
;
407 t_inputrec
*inputrec
= &inputrecInstance
;
410 if (mdrunOptions
.continuationOptions
.appendFiles
)
415 bool doMembed
= opt2bSet("-membed", nfile
, fnm
);
416 bool doRerun
= mdrunOptions
.rerun
;
418 // Handle task-assignment related user options.
419 EmulateGpuNonbonded emulateGpuNonbonded
= (getenv("GMX_EMULATE_GPU") != nullptr ?
420 EmulateGpuNonbonded::Yes
: EmulateGpuNonbonded::No
);
421 std::vector
<int> gpuIdsAvailable
;
424 gpuIdsAvailable
= parseUserGpuIds(hw_opt
.gpuIdsAvailable
);
425 // TODO We could put the GPU IDs into a std::map to find
426 // duplicates, but for the small numbers of IDs involved, this
427 // code is simple and fast.
428 for (size_t i
= 0; i
!= gpuIdsAvailable
.size(); ++i
)
430 for (size_t j
= i
+1; j
!= gpuIdsAvailable
.size(); ++j
)
432 if (gpuIdsAvailable
[i
] == gpuIdsAvailable
[j
])
434 GMX_THROW(InvalidInputError(formatString("The string of available GPU device IDs '%s' may not contain duplicate device IDs", hw_opt
.gpuIdsAvailable
.c_str())));
439 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
441 std::vector
<int> userGpuTaskAssignment
;
444 userGpuTaskAssignment
= parseUserGpuIds(hw_opt
.userGpuTaskAssignment
);
446 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
447 auto nonbondedTarget
= findTaskTarget(nbpu_opt
);
448 auto pmeTarget
= findTaskTarget(pme_opt
);
449 auto pmeFftTarget
= findTaskTarget(pme_fft_opt
);
450 PmeRunMode pmeRunMode
= PmeRunMode::None
;
452 // Here we assume that SIMMASTER(cr) does not change even after the
453 // threads are started.
454 gmx::LoggerOwner
logOwner(buildLogger(fplog
, cr
));
455 gmx::MDLogger
mdlog(logOwner
.logger());
457 // TODO The thread-MPI master rank makes a working
458 // PhysicalNodeCommunicator here, but it gets rebuilt by all ranks
459 // after the threads have been launched. This works because no use
460 // is made of that communicator until after the execution paths
461 // have rejoined. But it is likely that we can improve the way
462 // this is expressed, e.g. by expressly running detection only the
463 // master rank for thread-MPI, rather than relying on the mutex
464 // and reference count.
465 PhysicalNodeCommunicator
physicalNodeComm(MPI_COMM_WORLD
, gmx_physicalnode_id_hash());
466 hwinfo
= gmx_detect_hardware(mdlog
, physicalNodeComm
);
468 gmx_print_detected_hardware(fplog
, cr
, ms
, mdlog
, hwinfo
);
470 std::vector
<int> gpuIdsToUse
;
471 auto compatibleGpus
= getCompatibleGpus(hwinfo
->gpu_info
);
472 if (gpuIdsAvailable
.empty())
474 gpuIdsToUse
= compatibleGpus
;
478 for (const auto &availableGpuId
: gpuIdsAvailable
)
480 bool availableGpuIsCompatible
= false;
481 for (const auto &compatibleGpuId
: compatibleGpus
)
483 if (availableGpuId
== compatibleGpuId
)
485 availableGpuIsCompatible
= true;
489 if (!availableGpuIsCompatible
)
491 gmx_fatal(FARGS
, "You limited the set of compatible GPUs to a set that included ID #%d, but that ID is not for a compatible GPU. List only compatible GPUs.", availableGpuId
);
493 gpuIdsToUse
.push_back(availableGpuId
);
497 if (fplog
!= nullptr)
499 /* Print references after all software/hardware printing */
500 please_cite(fplog
, "Abraham2015");
501 please_cite(fplog
, "Pall2015");
502 please_cite(fplog
, "Pronk2013");
503 please_cite(fplog
, "Hess2008b");
504 please_cite(fplog
, "Spoel2005a");
505 please_cite(fplog
, "Lindahl2001a");
506 please_cite(fplog
, "Berendsen95a");
509 std::unique_ptr
<t_state
> globalState
;
513 /* Only the master rank has the global state */
514 globalState
= std::unique_ptr
<t_state
>(new t_state
);
516 /* Read (nearly) all data required for the simulation */
517 read_tpx_state(ftp2fn(efTPR
, nfile
, fnm
), inputrec
, globalState
.get(), &mtop
);
519 if (inputrec
->cutoff_scheme
!= ecutsVERLET
)
521 if (nstlist_cmdline
> 0)
523 gmx_fatal(FARGS
, "Can not set nstlist with the group cut-off scheme");
526 if (!compatibleGpus
.empty())
528 GMX_LOG(mdlog
.warning
).asParagraph().appendText(
529 "NOTE: GPU(s) found, but the current simulation can not use GPUs\n"
530 " To use a GPU, set the mdp option: cutoff-scheme = Verlet");
535 /* Check and update the hardware options for internal consistency */
536 check_and_update_hw_opt_1(&hw_opt
, cr
, domdecOptions
.numPmeRanks
);
538 /* Early check for externally set process affinity. */
539 gmx_check_thread_affinity_set(mdlog
, cr
,
540 &hw_opt
, hwinfo
->nthreads_hw_avail
, FALSE
);
542 if (GMX_THREAD_MPI
&& SIMMASTER(cr
))
544 if (domdecOptions
.numPmeRanks
> 0 && hw_opt
.nthreads_tmpi
<= 0)
546 gmx_fatal(FARGS
, "You need to explicitly specify the number of MPI threads (-ntmpi) when using separate PME ranks");
549 /* Since the master knows the cut-off scheme, update hw_opt for this.
550 * This is done later for normal MPI and also once more with tMPI
551 * for all tMPI ranks.
553 check_and_update_hw_opt_2(&hw_opt
, inputrec
->cutoff_scheme
);
555 bool useGpuForNonbonded
= false;
556 bool useGpuForPme
= false;
559 // If the user specified the number of ranks, then we must
560 // respect that, but in default mode, we need to allow for
561 // the number of GPUs to choose the number of ranks.
563 useGpuForNonbonded
= decideWhetherToUseGpusForNonbondedWithThreadMpi
564 (nonbondedTarget
, gpuIdsToUse
, userGpuTaskAssignment
, emulateGpuNonbonded
,
565 inputrec
->cutoff_scheme
== ecutsVERLET
,
566 gpuAccelerationOfNonbondedIsUseful(mdlog
, inputrec
, GMX_THREAD_MPI
),
567 hw_opt
.nthreads_tmpi
);
568 auto canUseGpuForPme
= pme_gpu_supports_build(nullptr) && pme_gpu_supports_input(inputrec
, nullptr);
569 useGpuForPme
= decideWhetherToUseGpusForPmeWithThreadMpi
570 (useGpuForNonbonded
, pmeTarget
, gpuIdsToUse
, userGpuTaskAssignment
,
571 canUseGpuForPme
, hw_opt
.nthreads_tmpi
, domdecOptions
.numPmeRanks
);
574 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
576 /* Determine how many thread-MPI ranks to start.
578 * TODO Over-writing the user-supplied value here does
579 * prevent any possible subsequent checks from working
581 hw_opt
.nthreads_tmpi
= get_nthreads_mpi(hwinfo
,
590 // Now start the threads for thread MPI.
591 cr
= spawnThreads(hw_opt
.nthreads_tmpi
);
592 /* The main thread continues here with a new cr. We don't deallocate
593 the old cr because other threads may still be reading it. */
594 // TODO Both master and spawned threads call dup_tfn and
595 // reinitialize_commrec_for_this_thread. Find a way to express
597 physicalNodeComm
= PhysicalNodeCommunicator(MPI_COMM_WORLD
, gmx_physicalnode_id_hash());
599 // END OF CAUTION: cr and physicalNodeComm are now reliable
603 /* now broadcast everything to the non-master nodes/threads: */
604 init_parallel(cr
, inputrec
, &mtop
);
607 // Now each rank knows the inputrec that SIMMASTER read and used,
608 // and (if applicable) cr->nnodes has been assigned the number of
609 // thread-MPI ranks that have been chosen. The ranks can now all
610 // run the task-deciding functions and will agree on the result
611 // without needing to communicate.
613 // TODO Should we do the communication in debug mode to support
614 // having an assertion?
616 // Note that these variables describe only their own node.
617 bool useGpuForNonbonded
= false;
618 bool useGpuForPme
= false;
621 // It's possible that there are different numbers of GPUs on
622 // different nodes, which is the user's responsibilty to
623 // handle. If unsuitable, we will notice that during task
625 bool gpusWereDetected
= hwinfo
->ngpu_compatible_tot
> 0;
626 useGpuForNonbonded
= decideWhetherToUseGpusForNonbonded(nonbondedTarget
, userGpuTaskAssignment
,
627 emulateGpuNonbonded
, inputrec
->cutoff_scheme
== ecutsVERLET
,
628 gpuAccelerationOfNonbondedIsUseful(mdlog
, inputrec
, !GMX_THREAD_MPI
),
630 auto canUseGpuForPme
= pme_gpu_supports_build(nullptr) && pme_gpu_supports_input(inputrec
, nullptr);
631 useGpuForPme
= decideWhetherToUseGpusForPme(useGpuForNonbonded
, pmeTarget
, userGpuTaskAssignment
,
632 canUseGpuForPme
, cr
->nnodes
, domdecOptions
.numPmeRanks
,
635 pmeRunMode
= (useGpuForPme
? PmeRunMode::GPU
: PmeRunMode::CPU
);
636 if (pmeRunMode
== PmeRunMode::GPU
)
638 if (pmeFftTarget
== TaskTarget::Cpu
)
640 pmeRunMode
= PmeRunMode::Mixed
;
643 else if (pmeFftTarget
== TaskTarget::Gpu
)
645 gmx_fatal(FARGS
, "Assigning FFTs to GPU requires PME to be assigned to GPU as well. With PME on CPU you should not be using -pmefft.");
648 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
650 // TODO: Error handling
651 mdModules
->assignOptionsToModules(*inputrec
->params
, nullptr);
653 if (fplog
!= nullptr)
655 pr_inputrec(fplog
, 0, "Input Parameters", inputrec
, FALSE
);
656 fprintf(fplog
, "\n");
661 /* now make sure the state is initialized and propagated */
662 set_state_entries(globalState
.get(), inputrec
);
665 /* NM and TPI parallelize over force/energy calculations, not atoms,
666 * so we need to initialize and broadcast the global state.
668 if (inputrec
->eI
== eiNM
|| inputrec
->eI
== eiTPI
)
672 globalState
= std::unique_ptr
<t_state
>(new t_state
);
674 broadcastStateWithoutDynamics(cr
, globalState
.get());
677 /* A parallel command line option consistency check that we can
678 only do after any threads have started. */
679 if (!PAR(cr
) && (domdecOptions
.numCells
[XX
] > 1 ||
680 domdecOptions
.numCells
[YY
] > 1 ||
681 domdecOptions
.numCells
[ZZ
] > 1 ||
682 domdecOptions
.numPmeRanks
> 0))
685 "The -dd or -npme option request a parallel simulation, "
687 "but %s was compiled without threads or MPI enabled"
690 "but the number of MPI-threads (option -ntmpi) is not set or is 1"
692 "but %s was not started through mpirun/mpiexec or only one rank was requested through mpirun/mpiexec"
695 , output_env_get_program_display_name(oenv
)
700 (EI_ENERGY_MINIMIZATION(inputrec
->eI
) || eiNM
== inputrec
->eI
))
702 gmx_fatal(FARGS
, "The .mdp file specified an energy mininization or normal mode algorithm, and these are not compatible with mdrun -rerun");
705 if (can_use_allvsall(inputrec
, TRUE
, cr
, fplog
) && DOMAINDECOMP(cr
))
707 gmx_fatal(FARGS
, "All-vs-all loops do not work with domain decomposition, use a single MPI rank");
710 if (!(EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
)))
712 if (domdecOptions
.numPmeRanks
> 0)
714 gmx_fatal_collective(FARGS
, cr
->mpi_comm_mysim
, MASTER(cr
),
715 "PME-only ranks are requested, but the system does not use PME for electrostatics or LJ");
718 domdecOptions
.numPmeRanks
= 0;
721 if (useGpuForNonbonded
&& domdecOptions
.numPmeRanks
< 0)
723 /* With NB GPUs we don't automatically use PME-only CPU ranks. PME ranks can
724 * improve performance with many threads per GPU, since our OpenMP
725 * scaling is bad, but it's difficult to automate the setup.
727 domdecOptions
.numPmeRanks
= 0;
731 if (domdecOptions
.numPmeRanks
< 0)
733 domdecOptions
.numPmeRanks
= 0;
734 // TODO possibly print a note that one can opt-in for a separate PME GPU rank?
738 GMX_RELEASE_ASSERT(domdecOptions
.numPmeRanks
<= 1, "PME GPU decomposition is not supported");
745 fcRegisterSteps(inputrec
->nsteps
, inputrec
->init_step
);
749 /* NMR restraints must be initialized before load_checkpoint,
750 * since with time averaging the history is added to t_state.
751 * For proper consistency check we therefore need to extend
753 * So the PME-only nodes (if present) will also initialize
754 * the distance restraints.
758 /* This needs to be called before read_checkpoint to extend the state */
759 init_disres(fplog
, &mtop
, inputrec
, cr
, ms
, fcd
, globalState
.get(), replExParams
.exchangeInterval
> 0);
761 init_orires(fplog
, &mtop
, inputrec
, cr
, ms
, globalState
.get(), &(fcd
->orires
));
763 if (inputrecDeform(inputrec
))
765 /* Store the deform reference box before reading the checkpoint */
768 copy_mat(globalState
->box
, box
);
772 gmx_bcast(sizeof(box
), box
, cr
);
774 /* Because we do not have the update struct available yet
775 * in which the reference values should be stored,
776 * we store them temporarily in static variables.
777 * This should be thread safe, since they are only written once
778 * and with identical values.
780 tMPI_Thread_mutex_lock(&deform_init_box_mutex
);
781 deform_init_init_step_tpx
= inputrec
->init_step
;
782 copy_mat(box
, deform_init_box_tpx
);
783 tMPI_Thread_mutex_unlock(&deform_init_box_mutex
);
786 ObservablesHistory observablesHistory
= {};
788 ContinuationOptions
&continuationOptions
= mdrunOptions
.continuationOptions
;
790 if (continuationOptions
.startedFromCheckpoint
)
792 /* Check if checkpoint file exists before doing continuation.
793 * This way we can use identical input options for the first and subsequent runs...
797 load_checkpoint(opt2fn_master("-cpi", nfile
, fnm
, cr
), &fplog
,
798 cr
, domdecOptions
.numCells
,
799 inputrec
, globalState
.get(),
800 &bReadEkin
, &observablesHistory
,
801 continuationOptions
.appendFiles
,
802 continuationOptions
.appendFilesOptionSet
,
803 mdrunOptions
.reproducible
);
807 continuationOptions
.haveReadEkin
= true;
811 if (SIMMASTER(cr
) && continuationOptions
.appendFiles
)
813 gmx_log_open(ftp2fn(efLOG
, nfile
, fnm
), cr
,
814 continuationOptions
.appendFiles
, &fplog
);
815 logOwner
= buildLogger(fplog
, nullptr);
816 mdlog
= logOwner
.logger();
819 if (mdrunOptions
.numStepsCommandline
> -2)
821 GMX_LOG(mdlog
.info
).asParagraph().
822 appendText("The -nsteps functionality is deprecated, and may be removed in a future version. "
823 "Consider using gmx convert-tpr -nsteps or changing the appropriate .mdp file field.");
825 /* override nsteps with value set on the commamdline */
826 override_nsteps_cmdline(mdlog
, mdrunOptions
.numStepsCommandline
, inputrec
);
830 copy_mat(globalState
->box
, box
);
835 gmx_bcast(sizeof(box
), box
, cr
);
838 /* Update rlist and nstlist. */
839 if (inputrec
->cutoff_scheme
== ecutsVERLET
)
841 prepare_verlet_scheme(fplog
, cr
, inputrec
, nstlist_cmdline
, &mtop
, box
,
842 useGpuForNonbonded
|| (emulateGpuNonbonded
== EmulateGpuNonbonded::Yes
), *hwinfo
->cpuInfo
);
845 /* Initalize the domain decomposition */
846 if (PAR(cr
) && !(EI_TPI(inputrec
->eI
) ||
847 inputrec
->eI
== eiNM
))
849 const rvec
*xOnMaster
= (SIMMASTER(cr
) ? as_rvec_array(globalState
->x
.data()) : nullptr);
851 cr
->dd
= init_domain_decomposition(fplog
, cr
, domdecOptions
, mdrunOptions
,
854 // Note that local state still does not exist yet.
858 /* PME, if used, is done on all nodes with 1D decomposition */
860 cr
->duty
= (DUTY_PP
| DUTY_PME
);
862 if (inputrec
->ePBC
== epbcSCREW
)
865 "pbc=%s is only implemented with domain decomposition",
866 epbc_names
[inputrec
->ePBC
]);
872 /* After possible communicator splitting in make_dd_communicators.
873 * we can set up the intra/inter node communication.
875 gmx_setup_nodecomm(fplog
, cr
);
881 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted(
882 "This is simulation %d out of %d running as a composite GROMACS\n"
883 "multi-simulation job. Setup for this simulation:\n",
886 GMX_LOG(mdlog
.warning
).appendTextFormatted(
890 cr
->nnodes
== 1 ? "thread" : "threads"
892 cr
->nnodes
== 1 ? "process" : "processes"
898 /* Check and update hw_opt for the cut-off scheme */
899 check_and_update_hw_opt_2(&hw_opt
, inputrec
->cutoff_scheme
);
901 /* Check and update the number of OpenMP threads requested */
902 checkAndUpdateRequestedNumOpenmpThreads(&hw_opt
, *hwinfo
, cr
, ms
, physicalNodeComm
.size_
,
905 gmx_omp_nthreads_init(mdlog
, cr
,
906 hwinfo
->nthreads_hw_avail
,
907 physicalNodeComm
.size_
,
909 hw_opt
.nthreads_omp_pme
,
910 !thisRankHasDuty(cr
, DUTY_PP
),
911 inputrec
->cutoff_scheme
== ecutsVERLET
);
914 if (EI_TPI(inputrec
->eI
) &&
915 inputrec
->cutoff_scheme
== ecutsVERLET
)
917 gmx_feenableexcept();
921 // Build a data structure that expresses which kinds of non-bonded
922 // task are handled by this rank.
924 // TODO Later, this might become a loop over all registered modules
925 // relevant to the mdp inputs, to find those that have such tasks.
927 // TODO This could move before init_domain_decomposition() as part
928 // of refactoring that separates the responsibility for duty
929 // assignment from setup for communication between tasks, and
930 // setup for tasks handled with a domain (ie including short-ranged
931 // tasks, bonded tasks, etc.).
933 // Note that in general useGpuForNonbonded, etc. can have a value
934 // that is inconsistent with the presence of actual GPUs on any
935 // rank, and that is not known to be a problem until the
936 // duty of the ranks on a node become node.
938 // TODO Later we might need the concept of computeTasksOnThisRank,
939 // from which we construct gpuTasksOnThisRank.
941 // Currently the DD code assigns duty to ranks that can
942 // include PP work that currently can be executed on a single
943 // GPU, if present and compatible. This has to be coordinated
944 // across PP ranks on a node, with possible multiple devices
945 // or sharing devices on a node, either from the user
946 // selection, or automatically.
947 auto haveGpus
= !gpuIdsToUse
.empty();
948 std::vector
<GpuTask
> gpuTasksOnThisRank
;
949 if (thisRankHasDuty(cr
, DUTY_PP
))
951 if (useGpuForNonbonded
)
955 gpuTasksOnThisRank
.push_back(GpuTask::Nonbonded
);
957 else if (nonbondedTarget
== TaskTarget::Gpu
)
959 gmx_fatal(FARGS
, "Cannot run short-ranged nonbonded interactions on a GPU because there is none detected.");
963 // TODO cr->duty & DUTY_PME should imply that a PME algorithm is active, but currently does not.
964 if (EEL_PME(inputrec
->coulombtype
) && (thisRankHasDuty(cr
, DUTY_PME
)))
970 gpuTasksOnThisRank
.push_back(GpuTask::Pme
);
972 else if (pmeTarget
== TaskTarget::Gpu
)
974 gmx_fatal(FARGS
, "Cannot run PME on a GPU because there is none detected.");
979 GpuTaskAssignment gpuTaskAssignment
;
982 // Produce the task assignment for this rank.
983 gpuTaskAssignment
= runTaskAssignment(gpuIdsToUse
, userGpuTaskAssignment
, *hwinfo
,
984 mdlog
, cr
, ms
, physicalNodeComm
, gpuTasksOnThisRank
);
986 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
988 /* Prevent other ranks from continuing after an issue was found
989 * and reported as a fatal error.
991 * TODO This function implements a barrier so that MPI runtimes
992 * can organize an orderly shutdown if one of the ranks has had to
993 * issue a fatal error in various code already run. When we have
994 * MPI-aware error handling and reporting, this should be
999 MPI_Barrier(cr
->mpi_comm_mysim
);
1005 MPI_Barrier(ms
->mpi_comm_masters
);
1007 /* We need another barrier to prevent non-master ranks from contiuing
1008 * when an error occured in a different simulation.
1010 MPI_Barrier(cr
->mpi_comm_mysim
);
1014 /* Now that we know the setup is consistent, check for efficiency */
1015 check_resource_division_efficiency(hwinfo
, !gpuTaskAssignment
.empty(), mdrunOptions
.ntompOptionIsSet
,
1018 gmx_device_info_t
*nonbondedDeviceInfo
= nullptr;
1020 if (thisRankHasDuty(cr
, DUTY_PP
))
1022 // This works because only one task of each type is currently permitted.
1023 auto nbGpuTaskMapping
= std::find_if(gpuTaskAssignment
.begin(), gpuTaskAssignment
.end(),
1024 hasTaskType
<GpuTask::Nonbonded
>);
1025 if (nbGpuTaskMapping
!= gpuTaskAssignment
.end())
1027 int nonbondedDeviceId
= nbGpuTaskMapping
->deviceId_
;
1028 nonbondedDeviceInfo
= getDeviceInfo(hwinfo
->gpu_info
, nonbondedDeviceId
);
1029 init_gpu(mdlog
, nonbondedDeviceInfo
);
1031 if (DOMAINDECOMP(cr
))
1033 /* When we share GPUs over ranks, we need to know this for the DLB */
1034 dd_setup_dlb_resource_sharing(cr
, nonbondedDeviceId
);
1040 std::unique_ptr
<ClfftInitializer
> initializedClfftLibrary
;
1042 gmx_device_info_t
*pmeDeviceInfo
= nullptr;
1043 // Later, this program could contain kernels that might be later
1044 // re-used as auto-tuning progresses, or subsequent simulations
1046 PmeGpuProgramStorage pmeGpuProgram
;
1047 // This works because only one task of each type is currently permitted.
1048 auto pmeGpuTaskMapping
= std::find_if(gpuTaskAssignment
.begin(), gpuTaskAssignment
.end(), hasTaskType
<GpuTask::Pme
>);
1049 if (pmeGpuTaskMapping
!= gpuTaskAssignment
.end())
1051 pmeDeviceInfo
= getDeviceInfo(hwinfo
->gpu_info
, pmeGpuTaskMapping
->deviceId_
);
1052 init_gpu(mdlog
, pmeDeviceInfo
);
1053 pmeGpuProgram
= buildPmeGpuProgram(pmeDeviceInfo
);
1054 // TODO It would be nice to move this logic into the factory
1055 // function. See Redmine #2535.
1056 bool isMasterThread
= !GMX_THREAD_MPI
|| MASTER(cr
);
1057 if (pmeRunMode
== PmeRunMode::GPU
&& !initializedClfftLibrary
&& isMasterThread
)
1059 initializedClfftLibrary
= initializeClfftLibrary();
1063 /* getting number of PP/PME threads
1064 PME: env variable should be read only on one node to make sure it is
1065 identical everywhere;
1067 nthreads_pme
= gmx_omp_nthreads_get(emntPME
);
1069 int numThreadsOnThisRank
;
1070 /* threads on this MPI process or TMPI thread */
1071 if (thisRankHasDuty(cr
, DUTY_PP
))
1073 numThreadsOnThisRank
= gmx_omp_nthreads_get(emntNonbonded
);
1077 numThreadsOnThisRank
= nthreads_pme
;
1080 checkHardwareOversubscription(numThreadsOnThisRank
, cr
->nodeid
,
1081 *hwinfo
->hardwareTopology
,
1082 physicalNodeComm
, mdlog
);
1084 if (hw_opt
.thread_affinity
!= threadaffOFF
)
1086 /* Before setting affinity, check whether the affinity has changed
1087 * - which indicates that probably the OpenMP library has changed it
1088 * since we first checked).
1090 gmx_check_thread_affinity_set(mdlog
, cr
,
1091 &hw_opt
, hwinfo
->nthreads_hw_avail
, TRUE
);
1093 int numThreadsOnThisNode
, intraNodeThreadOffset
;
1094 analyzeThreadsOnThisNode(physicalNodeComm
, numThreadsOnThisRank
, &numThreadsOnThisNode
,
1095 &intraNodeThreadOffset
);
1097 /* Set the CPU affinity */
1098 gmx_set_thread_affinity(mdlog
, cr
, &hw_opt
, *hwinfo
->hardwareTopology
,
1099 numThreadsOnThisRank
, numThreadsOnThisNode
,
1100 intraNodeThreadOffset
, nullptr);
1103 if (mdrunOptions
.timingOptions
.resetStep
> -1)
1105 GMX_LOG(mdlog
.info
).asParagraph().
1106 appendText("The -resetstep functionality is deprecated, and may be removed in a future version.");
1108 wcycle
= wallcycle_init(fplog
, mdrunOptions
.timingOptions
.resetStep
, cr
);
1112 /* Master synchronizes its value of reset_counters with all nodes
1113 * including PME only nodes */
1114 reset_counters
= wcycle_get_reset_counters(wcycle
);
1115 gmx_bcast_sim(sizeof(reset_counters
), &reset_counters
, cr
);
1116 wcycle_set_reset_counters(wcycle
, reset_counters
);
1119 // Membrane embedding must be initialized before we call init_forcerec()
1124 fprintf(stderr
, "Initializing membed");
1126 /* Note that membed cannot work in parallel because mtop is
1127 * changed here. Fix this if we ever want to make it run with
1128 * multiple ranks. */
1129 membed
= init_membed(fplog
, nfile
, fnm
, &mtop
, inputrec
, globalState
.get(), cr
, &mdrunOptions
.checkpointOptions
.period
);
1132 std::unique_ptr
<MDAtoms
> mdAtoms
;
1135 if (thisRankHasDuty(cr
, DUTY_PP
))
1137 /* Initiate forcerecord */
1139 fr
->forceProviders
= mdModules
->initForceProviders();
1140 init_forcerec(fplog
, mdlog
, fr
, fcd
,
1141 inputrec
, &mtop
, cr
, box
,
1142 opt2fn("-table", nfile
, fnm
),
1143 opt2fn("-tablep", nfile
, fnm
),
1144 opt2fns("-tableb", nfile
, fnm
),
1145 *hwinfo
, nonbondedDeviceInfo
,
1149 /* Initialize QM-MM */
1152 GMX_LOG(mdlog
.info
).asParagraph().
1153 appendText("Large parts of the QM/MM support is deprecated, and may be removed in a future "
1154 "version. Please get in touch with the developers if you find the support useful, "
1155 "as help is needed if the functionality is to continue to be available.");
1156 init_QMMMrec(cr
, &mtop
, inputrec
, fr
);
1159 /* Initialize the mdAtoms structure.
1160 * mdAtoms is not filled with atom data,
1161 * as this can not be done now with domain decomposition.
1163 const bool useGpuForPme
= (pmeRunMode
== PmeRunMode::GPU
) || (pmeRunMode
== PmeRunMode::Mixed
);
1164 mdAtoms
= makeMDAtoms(fplog
, mtop
, *inputrec
, useGpuForPme
&& thisRankHasDuty(cr
, DUTY_PME
));
1167 // The pinning of coordinates in the global state object works, because we only use
1168 // PME on GPU without DD or on a separate PME rank, and because the local state pointer
1169 // points to the global state object without DD.
1170 // FIXME: MD and EM separately set up the local state - this should happen in the same function,
1171 // which should also perform the pinning.
1172 changePinningPolicy(&globalState
->x
, useGpuForPme
? PinningPolicy::CanBePinned
: PinningPolicy::CannotBePinned
);
1175 /* Initialize the virtual site communication */
1176 vsite
= initVsite(mtop
, cr
);
1178 calc_shifts(box
, fr
->shift_vec
);
1180 /* With periodic molecules the charge groups should be whole at start up
1181 * and the virtual sites should not be far from their proper positions.
1183 if (!inputrec
->bContinuation
&& MASTER(cr
) &&
1184 !(inputrec
->ePBC
!= epbcNONE
&& inputrec
->bPeriodicMols
))
1186 /* Make molecules whole at start of run */
1187 if (fr
->ePBC
!= epbcNONE
)
1189 rvec
*xGlobal
= as_rvec_array(globalState
->x
.data());
1190 do_pbc_first_mtop(fplog
, inputrec
->ePBC
, box
, &mtop
, xGlobal
);
1194 /* Correct initial vsite positions are required
1195 * for the initial distribution in the domain decomposition
1196 * and for the initial shell prediction.
1198 constructVsitesGlobal(mtop
, globalState
->x
);
1202 if (EEL_PME(fr
->ic
->eeltype
) || EVDW_PME(fr
->ic
->vdwtype
))
1204 ewaldcoeff_q
= fr
->ic
->ewaldcoeff_q
;
1205 ewaldcoeff_lj
= fr
->ic
->ewaldcoeff_lj
;
1210 /* This is a PME only node */
1212 GMX_ASSERT(globalState
== nullptr, "We don't need the state on a PME only rank and expect it to be unitialized");
1214 ewaldcoeff_q
= calc_ewaldcoeff_q(inputrec
->rcoulomb
, inputrec
->ewald_rtol
);
1215 ewaldcoeff_lj
= calc_ewaldcoeff_lj(inputrec
->rvdw
, inputrec
->ewald_rtol_lj
);
1218 gmx_pme_t
*sepPmeData
= nullptr;
1219 // This reference hides the fact that PME data is owned by runner on PME-only ranks and by forcerec on other ranks
1220 GMX_ASSERT(thisRankHasDuty(cr
, DUTY_PP
) == (fr
!= nullptr), "Double-checking that only PME-only ranks have no forcerec");
1221 gmx_pme_t
* &pmedata
= fr
? fr
->pmedata
: sepPmeData
;
1223 /* Initiate PME if necessary,
1224 * either on all nodes or on dedicated PME nodes only. */
1225 if (EEL_PME(inputrec
->coulombtype
) || EVDW_PME(inputrec
->vdwtype
))
1227 if (mdAtoms
&& mdAtoms
->mdatoms())
1229 nChargePerturbed
= mdAtoms
->mdatoms()->nChargePerturbed
;
1230 if (EVDW_PME(inputrec
->vdwtype
))
1232 nTypePerturbed
= mdAtoms
->mdatoms()->nTypePerturbed
;
1235 if (cr
->npmenodes
> 0)
1237 /* The PME only nodes need to know nChargePerturbed(FEP on Q) and nTypePerturbed(FEP on LJ)*/
1238 gmx_bcast_sim(sizeof(nChargePerturbed
), &nChargePerturbed
, cr
);
1239 gmx_bcast_sim(sizeof(nTypePerturbed
), &nTypePerturbed
, cr
);
1242 if (thisRankHasDuty(cr
, DUTY_PME
))
1246 pmedata
= gmx_pme_init(cr
,
1247 getNumPmeDomains(cr
->dd
),
1249 mtop
.natoms
, nChargePerturbed
, nTypePerturbed
,
1250 mdrunOptions
.reproducible
,
1251 ewaldcoeff_q
, ewaldcoeff_lj
,
1253 pmeRunMode
, nullptr,
1254 pmeDeviceInfo
, pmeGpuProgram
.get(), mdlog
);
1256 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
1261 if (EI_DYNAMICS(inputrec
->eI
))
1263 /* Turn on signal handling on all nodes */
1265 * (A user signal from the PME nodes (if any)
1266 * is communicated to the PP nodes.
1268 signal_handler_install();
1271 if (thisRankHasDuty(cr
, DUTY_PP
))
1273 /* Assumes uniform use of the number of OpenMP threads */
1274 walltime_accounting
= walltime_accounting_init(gmx_omp_nthreads_get(emntDefault
));
1276 if (inputrec
->bPull
)
1278 /* Initialize pull code */
1279 inputrec
->pull_work
=
1280 init_pull(fplog
, inputrec
->pull
, inputrec
,
1281 &mtop
, cr
, inputrec
->fepvals
->init_lambda
);
1282 if (EI_DYNAMICS(inputrec
->eI
) && MASTER(cr
))
1284 init_pull_output_files(inputrec
->pull_work
,
1286 continuationOptions
);
1292 /* Initialize enforced rotation code */
1293 init_rot(fplog
, inputrec
, nfile
, fnm
, cr
, globalState
.get(), &mtop
, oenv
, mdrunOptions
);
1296 /* Let makeConstraints know whether we have essential dynamics constraints.
1297 * TODO: inputrec should tell us whether we use an algorithm, not a file option or the checkpoint
1299 bool doEssentialDynamics
= (opt2fn_null("-ei", nfile
, fnm
) != nullptr || observablesHistory
.edsamHistory
);
1300 auto constr
= makeConstraints(mtop
, *inputrec
, doEssentialDynamics
,
1301 fplog
, *mdAtoms
->mdatoms(),
1302 cr
, *ms
, nrnb
, wcycle
, fr
->bMolPBC
);
1304 if (DOMAINDECOMP(cr
))
1306 GMX_RELEASE_ASSERT(fr
, "fr was NULL while cr->duty was DUTY_PP");
1307 /* This call is not included in init_domain_decomposition mainly
1308 * because fr->cginfo_mb is set later.
1310 dd_init_bondeds(fplog
, cr
->dd
, &mtop
, vsite
, inputrec
,
1311 domdecOptions
.checkBondedInteractions
,
1315 /* Now do whatever the user wants us to do (how flexible...) */
1316 Integrator integrator
{
1317 fplog
, cr
, ms
, mdlog
, nfile
, fnm
,
1320 vsite
, constr
.get(),
1321 mdModules
->outputProvider(),
1325 &observablesHistory
,
1326 mdAtoms
.get(), nrnb
, wcycle
, fr
,
1331 integrator
.run(inputrec
->eI
);
1334 finish_rot(inputrec
->rot
);
1337 if (inputrec
->bPull
)
1339 finish_pull(inputrec
->pull_work
);
1345 GMX_RELEASE_ASSERT(pmedata
, "pmedata was NULL while cr->duty was not DUTY_PP");
1347 walltime_accounting
= walltime_accounting_init(gmx_omp_nthreads_get(emntPME
));
1348 gmx_pmeonly(pmedata
, cr
, nrnb
, wcycle
, walltime_accounting
, inputrec
, pmeRunMode
);
1351 wallcycle_stop(wcycle
, ewcRUN
);
1353 /* Finish up, write some stuff
1354 * if rerunMD, don't write last frame again
1356 finish_run(fplog
, mdlog
, cr
,
1357 inputrec
, nrnb
, wcycle
, walltime_accounting
,
1358 fr
? fr
->nbv
: nullptr,
1360 EI_DYNAMICS(inputrec
->eI
) && !isMultiSim(ms
));
1365 gmx_pme_destroy(pmedata
);
1369 // FIXME: this is only here to manually unpin mdAtoms->chargeA_ and state->x,
1370 // before we destroy the GPU context(s) in free_gpu_resources().
1371 // Pinned buffers are associated with contexts in CUDA.
1372 // As soon as we destroy GPU contexts after mdrunner() exits, these lines should go.
1373 mdAtoms
.reset(nullptr);
1374 globalState
.reset(nullptr);
1375 mdModules
.reset(nullptr); // destruct force providers here as they might also use the GPU
1377 /* Free GPU memory and set a physical node tMPI barrier (which should eventually go away) */
1378 free_gpu_resources(fr
, physicalNodeComm
);
1379 free_gpu(nonbondedDeviceInfo
);
1380 free_gpu(pmeDeviceInfo
);
1381 done_forcerec(fr
, mtop
.molblock
.size(), mtop
.groups
.grps
[egcENER
].nr
);
1386 free_membed(membed
);
1389 gmx_hardware_info_free();
1391 /* Does what it says */
1392 print_date_and_time(fplog
, cr
->nodeid
, "Finished mdrun", gmx_gettime());
1393 walltime_accounting_destroy(walltime_accounting
);
1396 /* Close logfile already here if we were appending to it */
1397 if (MASTER(cr
) && continuationOptions
.appendFiles
)
1399 gmx_log_close(fplog
);
1403 rc
= (int)gmx_get_stop_condition();
1406 /* we need to join all threads. The sub-threads join when they
1407 exit this function, but the master thread needs to be told to
1409 if (PAR(cr
) && MASTER(cr
))