2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/partition.h"
53 #include "gromacs/essentialdynamics/edsam.h"
54 #include "gromacs/ewald/pme.h"
55 #include "gromacs/gmxlib/chargegroup.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nrnb.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/bonded.h"
64 #include "gromacs/listed_forces/disre.h"
65 #include "gromacs/listed_forces/gpubonded.h"
66 #include "gromacs/listed_forces/listed_forces.h"
67 #include "gromacs/listed_forces/manage_threading.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/enerdata_utils.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/ppforceworkload.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdtypes/commrec.h"
85 #include "gromacs/mdtypes/enerdata.h"
86 #include "gromacs/mdtypes/forceoutput.h"
87 #include "gromacs/mdtypes/iforceprovider.h"
88 #include "gromacs/mdtypes/inputrec.h"
89 #include "gromacs/mdtypes/md_enums.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/nbnxm/atomdata.h"
92 #include "gromacs/nbnxm/gpu_data_mgmt.h"
93 #include "gromacs/nbnxm/nbnxm.h"
94 #include "gromacs/pbcutil/ishift.h"
95 #include "gromacs/pbcutil/mshift.h"
96 #include "gromacs/pbcutil/pbc.h"
97 #include "gromacs/pulling/pull.h"
98 #include "gromacs/pulling/pull_rotation.h"
99 #include "gromacs/timing/cyclecounter.h"
100 #include "gromacs/timing/gpu_timing.h"
101 #include "gromacs/timing/wallcycle.h"
102 #include "gromacs/timing/wallcyclereporting.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/topology/topology.h"
105 #include "gromacs/utility/arrayref.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/gmxassert.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114 #include "gromacs/utility/strconvert.h"
115 #include "gromacs/utility/sysinfo.h"
117 using gmx::ForceOutputs
;
119 // TODO: this environment variable allows us to verify before release
120 // that on less common architectures the total cost of polling is not larger than
121 // a blocking wait (so polling does not introduce overhead when the static
122 // PME-first ordering would suffice).
123 static const bool c_disableAlternatingWait
= (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
125 // environment variable to enable GPU buffer ops, to allow incremental and optional
126 // introduction of this functionality.
127 // TODO eventially tie this in with other existing GPU flags.
128 static const bool c_enableGpuBufOps
= (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
130 static void sum_forces(rvec f
[], gmx::ArrayRef
<const gmx::RVec
> forceToAdd
)
132 const int end
= forceToAdd
.size();
134 int gmx_unused nt
= gmx_omp_nthreads_get(emntDefault
);
135 #pragma omp parallel for num_threads(nt) schedule(static)
136 for (int i
= 0; i
< end
; i
++)
138 rvec_inc(f
[i
], forceToAdd
[i
]);
142 static void calc_virial(int start
, int homenr
, const rvec x
[],
143 const gmx::ForceWithShiftForces
&forceWithShiftForces
,
144 tensor vir_part
, const t_graph
*graph
, const matrix box
,
145 t_nrnb
*nrnb
, const t_forcerec
*fr
, int ePBC
)
147 /* The short-range virial from surrounding boxes */
148 const rvec
*fshift
= as_rvec_array(forceWithShiftForces
.shiftForces().data());
149 calc_vir(SHIFTS
, fr
->shift_vec
, fshift
, vir_part
, ePBC
== epbcSCREW
, box
);
150 inc_nrnb(nrnb
, eNR_VIRIAL
, SHIFTS
);
152 /* Calculate partial virial, for local atoms only, based on short range.
153 * Total virial is computed in global_stat, called from do_md
155 const rvec
*f
= as_rvec_array(forceWithShiftForces
.force().data());
156 f_calc_vir(start
, start
+homenr
, x
, f
, vir_part
, graph
, box
);
157 inc_nrnb(nrnb
, eNR_VIRIAL
, homenr
);
161 pr_rvecs(debug
, 0, "vir_part", vir_part
, DIM
);
165 static void pull_potential_wrapper(const t_commrec
*cr
,
166 const t_inputrec
*ir
,
167 const matrix box
, gmx::ArrayRef
<const gmx::RVec
> x
,
168 gmx::ForceWithVirial
*force
,
169 const t_mdatoms
*mdatoms
,
170 gmx_enerdata_t
*enerd
,
174 gmx_wallcycle_t wcycle
)
179 /* Calculate the center of mass forces, this requires communication,
180 * which is why pull_potential is called close to other communication.
182 wallcycle_start(wcycle
, ewcPULLPOT
);
183 set_pbc(&pbc
, ir
->ePBC
, box
);
185 enerd
->term
[F_COM_PULL
] +=
186 pull_potential(pull_work
, mdatoms
, &pbc
,
187 cr
, t
, lambda
[efptRESTRAINT
], as_rvec_array(x
.data()), force
, &dvdl
);
188 enerd
->dvdl_lin
[efptRESTRAINT
] += dvdl
;
189 wallcycle_stop(wcycle
, ewcPULLPOT
);
192 static void pme_receive_force_ener(const t_commrec
*cr
,
193 gmx::ForceWithVirial
*forceWithVirial
,
194 gmx_enerdata_t
*enerd
,
195 gmx_wallcycle_t wcycle
)
197 real e_q
, e_lj
, dvdl_q
, dvdl_lj
;
198 float cycles_ppdpme
, cycles_seppme
;
200 cycles_ppdpme
= wallcycle_stop(wcycle
, ewcPPDURINGPME
);
201 dd_cycles_add(cr
->dd
, cycles_ppdpme
, ddCyclPPduringPME
);
203 /* In case of node-splitting, the PP nodes receive the long-range
204 * forces, virial and energy from the PME nodes here.
206 wallcycle_start(wcycle
, ewcPP_PMEWAITRECVF
);
209 gmx_pme_receive_f(cr
, forceWithVirial
, &e_q
, &e_lj
, &dvdl_q
, &dvdl_lj
,
211 enerd
->term
[F_COUL_RECIP
] += e_q
;
212 enerd
->term
[F_LJ_RECIP
] += e_lj
;
213 enerd
->dvdl_lin
[efptCOUL
] += dvdl_q
;
214 enerd
->dvdl_lin
[efptVDW
] += dvdl_lj
;
218 dd_cycles_add(cr
->dd
, cycles_seppme
, ddCyclPME
);
220 wallcycle_stop(wcycle
, ewcPP_PMEWAITRECVF
);
223 static void print_large_forces(FILE *fp
,
231 real force2Tolerance
= gmx::square(forceTolerance
);
232 gmx::index numNonFinite
= 0;
233 for (int i
= 0; i
< md
->homenr
; i
++)
235 real force2
= norm2(f
[i
]);
236 bool nonFinite
= !std::isfinite(force2
);
237 if (force2
>= force2Tolerance
|| nonFinite
)
239 fprintf(fp
, "step %" PRId64
" atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
241 ddglatnr(cr
->dd
, i
), x
[i
][XX
], x
[i
][YY
], x
[i
][ZZ
], std::sqrt(force2
));
248 if (numNonFinite
> 0)
250 /* Note that with MPI this fatal call on one rank might interrupt
251 * the printing on other ranks. But we can only avoid that with
252 * an expensive MPI barrier that we would need at each step.
254 gmx_fatal(FARGS
, "At step %" PRId64
" detected non-finite forces on %td atoms", step
, numNonFinite
);
258 static void post_process_forces(const t_commrec
*cr
,
261 gmx_wallcycle_t wcycle
,
262 const gmx_localtop_t
*top
,
265 ForceOutputs
*forceOutputs
,
267 const t_mdatoms
*mdatoms
,
268 const t_graph
*graph
,
269 const t_forcerec
*fr
,
270 const gmx_vsite_t
*vsite
,
273 rvec
*f
= as_rvec_array(forceOutputs
->forceWithShiftForces().force().data());
275 if (fr
->haveDirectVirialContributions
)
277 auto &forceWithVirial
= forceOutputs
->forceWithVirial();
278 rvec
*fDirectVir
= as_rvec_array(forceWithVirial
.force_
.data());
282 /* Spread the mesh force on virtual sites to the other particles...
283 * This is parallellized. MPI communication is performed
284 * if the constructing atoms aren't local.
286 matrix virial
= { { 0 } };
287 spread_vsite_f(vsite
, x
, fDirectVir
, nullptr,
288 (flags
& GMX_FORCE_VIRIAL
) != 0, virial
,
290 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
, wcycle
);
291 forceWithVirial
.addVirialContribution(virial
);
294 if (flags
& GMX_FORCE_VIRIAL
)
296 /* Now add the forces, this is local */
297 sum_forces(f
, forceWithVirial
.force_
);
299 /* Add the direct virial contributions */
300 GMX_ASSERT(forceWithVirial
.computeVirial_
, "forceWithVirial should request virial computation when we request the virial");
301 m_add(vir_force
, forceWithVirial
.getVirial(), vir_force
);
305 pr_rvecs(debug
, 0, "vir_force", vir_force
, DIM
);
310 if (fr
->print_force
>= 0)
312 print_large_forces(stderr
, mdatoms
, cr
, step
, fr
->print_force
, x
, f
);
316 static void do_nb_verlet(t_forcerec
*fr
,
317 const interaction_const_t
*ic
,
318 gmx_enerdata_t
*enerd
,
320 const Nbnxm::InteractionLocality ilocality
,
324 gmx_wallcycle_t wcycle
)
326 if (!(flags
& GMX_FORCE_NONBONDED
))
328 /* skip non-bonded calculation */
332 nonbonded_verlet_t
*nbv
= fr
->nbv
.get();
334 /* GPU kernel launch overhead is already timed separately */
335 if (fr
->cutoff_scheme
!= ecutsVERLET
)
337 gmx_incons("Invalid cut-off scheme passed!");
342 /* When dynamic pair-list pruning is requested, we need to prune
343 * at nstlistPrune steps.
345 if (nbv
->isDynamicPruningStepCpu(step
))
347 /* Prune the pair-list beyond fr->ic->rlistPrune using
348 * the current coordinates of the atoms.
350 wallcycle_sub_start(wcycle
, ewcsNONBONDED_PRUNING
);
351 nbv
->dispatchPruneKernelCpu(ilocality
, fr
->shift_vec
);
352 wallcycle_sub_stop(wcycle
, ewcsNONBONDED_PRUNING
);
356 nbv
->dispatchNonbondedKernel(ilocality
, *ic
, flags
, clearF
, *fr
, enerd
, nrnb
);
359 static inline void clear_rvecs_omp(int n
, rvec v
[])
361 int nth
= gmx_omp_nthreads_get_simple_rvec_task(emntDefault
, n
);
363 /* Note that we would like to avoid this conditional by putting it
364 * into the omp pragma instead, but then we still take the full
365 * omp parallel for overhead (at least with gcc5).
369 for (int i
= 0; i
< n
; i
++)
376 #pragma omp parallel for num_threads(nth) schedule(static)
377 for (int i
= 0; i
< n
; i
++)
384 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
386 * \param groupOptions Group options, containing T-coupling options
388 static real
averageKineticEnergyEstimate(const t_grpopts
&groupOptions
)
390 real nrdfCoupled
= 0;
391 real nrdfUncoupled
= 0;
392 real kineticEnergy
= 0;
393 for (int g
= 0; g
< groupOptions
.ngtc
; g
++)
395 if (groupOptions
.tau_t
[g
] >= 0)
397 nrdfCoupled
+= groupOptions
.nrdf
[g
];
398 kineticEnergy
+= groupOptions
.nrdf
[g
]*0.5*groupOptions
.ref_t
[g
]*BOLTZ
;
402 nrdfUncoupled
+= groupOptions
.nrdf
[g
];
406 /* This conditional with > also catches nrdf=0 */
407 if (nrdfCoupled
> nrdfUncoupled
)
409 return kineticEnergy
*(nrdfCoupled
+ nrdfUncoupled
)/nrdfCoupled
;
417 /*! \brief This routine checks that the potential energy is finite.
419 * Always checks that the potential energy is finite. If step equals
420 * inputrec.init_step also checks that the magnitude of the potential energy
421 * is reasonable. Terminates with a fatal error when a check fails.
422 * Note that passing this check does not guarantee finite forces,
423 * since those use slightly different arithmetics. But in most cases
424 * there is just a narrow coordinate range where forces are not finite
425 * and energies are finite.
427 * \param[in] step The step number, used for checking and printing
428 * \param[in] enerd The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
429 * \param[in] inputrec The input record
431 static void checkPotentialEnergyValidity(int64_t step
,
432 const gmx_enerdata_t
&enerd
,
433 const t_inputrec
&inputrec
)
435 /* Threshold valid for comparing absolute potential energy against
436 * the kinetic energy. Normally one should not consider absolute
437 * potential energy values, but with a factor of one million
438 * we should never get false positives.
440 constexpr real c_thresholdFactor
= 1e6
;
442 bool energyIsNotFinite
= !std::isfinite(enerd
.term
[F_EPOT
]);
443 real averageKineticEnergy
= 0;
444 /* We only check for large potential energy at the initial step,
445 * because that is by far the most likely step for this too occur
446 * and because computing the average kinetic energy is not free.
447 * Note: nstcalcenergy >> 1 often does not allow to catch large energies
448 * before they become NaN.
450 if (step
== inputrec
.init_step
&& EI_DYNAMICS(inputrec
.eI
))
452 averageKineticEnergy
= averageKineticEnergyEstimate(inputrec
.opts
);
455 if (energyIsNotFinite
|| (averageKineticEnergy
> 0 &&
456 enerd
.term
[F_EPOT
] > c_thresholdFactor
*averageKineticEnergy
))
458 gmx_fatal(FARGS
, "Step %" PRId64
": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
461 energyIsNotFinite
? "not finite" : "extremely high",
463 enerd
.term
[F_COUL_SR
],
464 energyIsNotFinite
? "non-finite" : "very high",
465 energyIsNotFinite
? " or Nan" : "");
469 /*! \brief Return true if there are special forces computed this step.
471 * The conditionals exactly correspond to those in computeSpecialForces().
474 haveSpecialForces(const t_inputrec
*inputrec
,
475 ForceProviders
*forceProviders
,
476 const pull_t
*pull_work
,
480 const bool computeForces
= (forceFlags
& GMX_FORCE_FORCES
) != 0;
483 ((computeForces
&& forceProviders
->hasForceProvider()) || // forceProviders
484 (inputrec
->bPull
&& pull_have_potential(pull_work
)) || // pull
485 inputrec
->bRot
|| // enforced rotation
486 (ed
!= nullptr) || // flooding
487 (inputrec
->bIMD
&& computeForces
)); // IMD
490 /*! \brief Compute forces and/or energies for special algorithms
492 * The intention is to collect all calls to algorithms that compute
493 * forces on local atoms only and that do not contribute to the local
494 * virial sum (but add their virial contribution separately).
495 * Eventually these should likely all become ForceProviders.
496 * Within this function the intention is to have algorithms that do
497 * global communication at the end, so global barriers within the MD loop
498 * are as close together as possible.
500 * \param[in] fplog The log file
501 * \param[in] cr The communication record
502 * \param[in] inputrec The input record
503 * \param[in] awh The Awh module (nullptr if none in use).
504 * \param[in] enforcedRotation Enforced rotation module.
505 * \param[in] imdSession The IMD session
506 * \param[in] pull_work The pull work structure.
507 * \param[in] step The current MD step
508 * \param[in] t The current time
509 * \param[in,out] wcycle Wallcycle accounting struct
510 * \param[in,out] forceProviders Pointer to a list of force providers
511 * \param[in] box The unit cell
512 * \param[in] x The coordinates
513 * \param[in] mdatoms Per atom properties
514 * \param[in] lambda Array of free-energy lambda values
515 * \param[in] forceFlags Flags that tell whether we should compute forces/energies/virial
516 * \param[in,out] forceWithVirial Force and virial buffers
517 * \param[in,out] enerd Energy buffer
518 * \param[in,out] ed Essential dynamics pointer
519 * \param[in] bNS Tells if we did neighbor searching this step, used for ED sampling
521 * \todo Remove bNS, which is used incorrectly.
522 * \todo Convert all other algorithms called here to ForceProviders.
525 computeSpecialForces(FILE *fplog
,
527 const t_inputrec
*inputrec
,
529 gmx_enfrot
*enforcedRotation
,
530 gmx::ImdSession
*imdSession
,
534 gmx_wallcycle_t wcycle
,
535 ForceProviders
*forceProviders
,
537 gmx::ArrayRef
<const gmx::RVec
> x
,
538 const t_mdatoms
*mdatoms
,
541 gmx::ForceWithVirial
*forceWithVirial
,
542 gmx_enerdata_t
*enerd
,
546 const bool computeForces
= (forceFlags
& GMX_FORCE_FORCES
) != 0;
548 /* NOTE: Currently all ForceProviders only provide forces.
549 * When they also provide energies, remove this conditional.
553 gmx::ForceProviderInput
forceProviderInput(x
, *mdatoms
, t
, box
, *cr
);
554 gmx::ForceProviderOutput
forceProviderOutput(forceWithVirial
, enerd
);
556 /* Collect forces from modules */
557 forceProviders
->calculateForces(forceProviderInput
, &forceProviderOutput
);
560 if (inputrec
->bPull
&& pull_have_potential(pull_work
))
562 pull_potential_wrapper(cr
, inputrec
, box
, x
,
564 mdatoms
, enerd
, pull_work
, lambda
, t
,
569 enerd
->term
[F_COM_PULL
] +=
570 awh
->applyBiasForcesAndUpdateBias(inputrec
->ePBC
, *mdatoms
, box
,
572 t
, step
, wcycle
, fplog
);
576 rvec
*f
= as_rvec_array(forceWithVirial
->force_
.data());
578 /* Add the forces from enforced rotation potentials (if any) */
581 wallcycle_start(wcycle
, ewcROTadd
);
582 enerd
->term
[F_COM_PULL
] += add_rot_forces(enforcedRotation
, f
, cr
, step
, t
);
583 wallcycle_stop(wcycle
, ewcROTadd
);
588 /* Note that since init_edsam() is called after the initialization
589 * of forcerec, edsam doesn't request the noVirSum force buffer.
590 * Thus if no other algorithm (e.g. PME) requires it, the forces
591 * here will contribute to the virial.
593 do_flood(cr
, inputrec
, as_rvec_array(x
.data()), f
, ed
, box
, step
, bNS
);
596 /* Add forces from interactive molecular dynamics (IMD), if any */
597 if (inputrec
->bIMD
&& computeForces
)
599 imdSession
->applyForces(f
);
603 /*! \brief Launch the prepare_step and spread stages of PME GPU.
605 * \param[in] pmedata The PME structure
606 * \param[in] box The box matrix
607 * \param[in] x Coordinate array
608 * \param[in] flags Force flags
609 * \param[in] pmeFlags PME flags
610 * \param[in] wcycle The wallcycle structure
612 static inline void launchPmeGpuSpread(gmx_pme_t
*pmedata
,
617 gmx_wallcycle_t wcycle
)
619 pme_gpu_prepare_computation(pmedata
, (flags
& GMX_FORCE_DYNAMICBOX
) != 0, box
, wcycle
, pmeFlags
);
620 pme_gpu_launch_spread(pmedata
, x
, wcycle
);
623 /*! \brief Launch the FFT and gather stages of PME GPU
625 * This function only implements setting the output forces (no accumulation).
627 * \param[in] pmedata The PME structure
628 * \param[in] wcycle The wallcycle structure
629 * \param[in] useGpuFPmeReduction Whether forces will be reduced on GPU
631 static void launchPmeGpuFftAndGather(gmx_pme_t
*pmedata
,
632 gmx_wallcycle_t wcycle
,
633 bool useGpuFPmeReduction
)
635 pme_gpu_launch_complex_transforms(pmedata
, wcycle
);
636 pme_gpu_launch_gather(pmedata
, wcycle
, PmeForceOutputHandling::Set
, useGpuFPmeReduction
);
640 * Polling wait for either of the PME or nonbonded GPU tasks.
642 * Instead of a static order in waiting for GPU tasks, this function
643 * polls checking which of the two tasks completes first, and does the
644 * associated force buffer reduction overlapped with the other task.
645 * By doing that, unlike static scheduling order, it can always overlap
646 * one of the reductions, regardless of the GPU task completion order.
648 * \param[in] nbv Nonbonded verlet structure
649 * \param[in,out] pmedata PME module data
650 * \param[in,out] forceOutputs Output buffer for the forces and virial
651 * \param[in,out] enerd Energy data structure results are reduced into
652 * \param[in] flags Force flags
653 * \param[in] pmeFlags PME flags
654 * \param[in] wcycle The wallcycle structure
656 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t
*nbv
,
658 gmx::ForceOutputs
*forceOutputs
,
659 gmx_enerdata_t
*enerd
,
662 gmx_wallcycle_t wcycle
)
664 bool isPmeGpuDone
= false;
665 bool isNbGpuDone
= false;
669 gmx::ForceWithShiftForces
&forceWithShiftForces
= forceOutputs
->forceWithShiftForces();
670 gmx::ForceWithVirial
&forceWithVirial
= forceOutputs
->forceWithVirial();
672 gmx::ArrayRef
<const gmx::RVec
> pmeGpuForces
;
674 while (!isPmeGpuDone
|| !isNbGpuDone
)
678 GpuTaskCompletion completionType
= (isNbGpuDone
) ? GpuTaskCompletion::Wait
: GpuTaskCompletion::Check
;
679 isPmeGpuDone
= pme_gpu_try_finish_task(pmedata
, pmeFlags
, wcycle
, &forceWithVirial
, enerd
, completionType
);
684 GpuTaskCompletion completionType
= (isPmeGpuDone
) ? GpuTaskCompletion::Wait
: GpuTaskCompletion::Check
;
685 wallcycle_start_nocount(wcycle
, ewcWAIT_GPU_NB_L
);
686 isNbGpuDone
= Nbnxm::gpu_try_finish_task(nbv
->gpu_nbv
,
688 Nbnxm::AtomLocality::Local
,
689 enerd
->grpp
.ener
[egLJSR
].data(),
690 enerd
->grpp
.ener
[egCOULSR
].data(),
691 forceWithShiftForces
.shiftForces(), completionType
);
692 wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
693 // To get the call count right, when the task finished we
694 // issue a start/stop.
695 // TODO: move the ewcWAIT_GPU_NB_L cycle counting into nbnxn_gpu_try_finish_task()
696 // and ewcNB_XF_BUF_OPS counting into nbnxn_atomdata_add_nbat_f_to_f().
699 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_L
);
700 wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
702 nbv
->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local
,
703 forceWithShiftForces
.force());
709 /*! \brief Set up the different force buffers; also does clearing.
711 * \param[in] fr force record pointer
712 * \param[in] pull_work The pull work object.
713 * \param[in] inputrec input record
714 * \param[in] force force array
715 * \param[in] bDoForces True if force are computed this step
716 * \param[in] doVirial True if virial is computed this step
717 * \param[out] wcycle wallcycle recording structure
719 * \returns Cleared force output structure
722 setupForceOutputs(t_forcerec
*fr
,
724 const t_inputrec
&inputrec
,
725 gmx::ArrayRefWithPadding
<gmx::RVec
> force
,
726 const bool bDoForces
,
728 gmx_wallcycle_t wcycle
)
730 wallcycle_sub_start(wcycle
, ewcsCLEAR_FORCE_BUFFER
);
732 /* NOTE: We assume fr->shiftForces is all zeros here */
733 gmx::ForceWithShiftForces
forceWithShiftForces(force
, doVirial
, fr
->shiftForces
);
737 /* Clear the short- and long-range forces */
738 clear_rvecs_omp(fr
->natoms_force_constr
,
739 as_rvec_array(forceWithShiftForces
.force().data()));
742 /* If we need to compute the virial, we might need a separate
743 * force buffer for algorithms for which the virial is calculated
744 * directly, such as PME. Otherwise, forceWithVirial uses the
745 * the same force (f in legacy calls) buffer as other algorithms.
747 const bool useSeparateForceWithVirialBuffer
= (bDoForces
&& (doVirial
&& fr
->haveDirectVirialContributions
));
749 /* forceWithVirial uses the local atom range only */
750 gmx::ForceWithVirial
forceWithVirial(useSeparateForceWithVirialBuffer
?
751 fr
->forceBufferForDirectVirialContributions
: force
.unpaddedArrayRef(),
754 if (useSeparateForceWithVirialBuffer
)
756 /* TODO: update comment
757 * We only compute forces on local atoms. Note that vsites can
758 * spread to non-local atoms, but that part of the buffer is
759 * cleared separately in the vsite spreading code.
761 clear_rvecs_omp(forceWithVirial
.force_
.size(), as_rvec_array(forceWithVirial
.force_
.data()));
764 if (inputrec
.bPull
&& pull_have_constraint(pull_work
))
766 clear_pull_forces(pull_work
);
769 wallcycle_sub_stop(wcycle
, ewcsCLEAR_FORCE_BUFFER
);
771 return ForceOutputs(forceWithShiftForces
, forceWithVirial
);
775 /*! \brief Set up flags that indicate what type of work is there to compute.
777 * Currently we only update it at search steps,
778 * but some properties may change more frequently (e.g. virial/non-virial step),
779 * so when including those either the frequency of update (per-step) or the scope
780 * of a flag will change (i.e. a set of flags for nstlist steps).
784 setupForceWorkload(gmx::PpForceWorkload
*forceWork
,
785 const t_inputrec
*inputrec
,
786 const t_forcerec
*fr
,
787 const pull_t
*pull_work
,
794 forceWork
->haveSpecialForces
= haveSpecialForces(inputrec
, fr
->forceProviders
, pull_work
, forceFlags
, ed
);
795 forceWork
->haveCpuBondedWork
= haveCpuBondeds(*fr
);
796 forceWork
->haveGpuBondedWork
= ((fr
->gpuBonded
!= nullptr) && fr
->gpuBonded
->haveInteractions());
797 forceWork
->haveRestraintsWork
= havePositionRestraints(idef
, *fcd
);
798 forceWork
->haveCpuListedForceWork
= haveCpuListedForces(*fr
, idef
, *fcd
);
802 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
804 * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
805 * incorporated in PpForceWorkload.
808 launchGpuEndOfStepTasks(nonbonded_verlet_t
*nbv
,
809 gmx::GpuBonded
*gpuBonded
,
811 gmx_enerdata_t
*enerd
,
812 const gmx::PpForceWorkload
&forceWorkload
,
813 bool useGpuNonbonded
,
817 gmx_wallcycle_t wcycle
)
821 /* Launch pruning before buffer clearing because the API overhead of the
822 * clear kernel launches can leave the GPU idle while it could be running
825 if (nbv
->isDynamicPruningStepGpu(step
))
827 nbv
->dispatchPruneKernelGpu(step
);
830 /* now clear the GPU outputs while we finish the step on the CPU */
831 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
832 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
833 Nbnxm::gpu_clear_outputs(nbv
->gpu_nbv
, flags
);
834 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
835 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
840 pme_gpu_reinit_computation(pmedata
, wcycle
);
843 if (forceWorkload
.haveGpuBondedWork
&& (flags
& GMX_FORCE_ENERGY
))
845 // in principle this should be included in the DD balancing region,
846 // but generally it is infrequent so we'll omit it for the sake of
848 gpuBonded
->waitAccumulateEnergyTerms(enerd
);
850 gpuBonded
->clearEnergies();
855 void do_force(FILE *fplog
,
857 const gmx_multisim_t
*ms
,
858 const t_inputrec
*inputrec
,
860 gmx_enfrot
*enforcedRotation
,
861 gmx::ImdSession
*imdSession
,
865 gmx_wallcycle_t wcycle
,
866 const gmx_localtop_t
*top
,
868 gmx::ArrayRefWithPadding
<gmx::RVec
> x
, //NOLINT(performance-unnecessary-value-param)
870 gmx::ArrayRefWithPadding
<gmx::RVec
> force
, //NOLINT(performance-unnecessary-value-param)
872 const t_mdatoms
*mdatoms
,
873 gmx_enerdata_t
*enerd
,
875 gmx::ArrayRef
<real
> lambda
,
878 gmx::PpForceWorkload
*ppForceWorkload
,
879 const gmx_vsite_t
*vsite
,
884 const DDBalanceRegionHandler
&ddBalanceRegionHandler
)
888 gmx_bool bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
;
889 gmx_bool bDoForces
, bUseGPU
, bUseOrEmulGPU
;
890 nonbonded_verlet_t
*nbv
= fr
->nbv
.get();
891 interaction_const_t
*ic
= fr
->ic
;
893 /* modify force flag if not doing nonbonded */
896 flags
&= ~GMX_FORCE_NONBONDED
;
898 bStateChanged
= ((flags
& GMX_FORCE_STATECHANGED
) != 0);
899 bNS
= ((flags
& GMX_FORCE_NS
) != 0);
900 bFillGrid
= (bNS
&& bStateChanged
);
901 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
902 bDoForces
= ((flags
& GMX_FORCE_FORCES
) != 0);
903 bUseGPU
= fr
->nbv
->useGpu();
904 bUseOrEmulGPU
= bUseGPU
|| fr
->nbv
->emulateGpu();
906 const auto pmeRunMode
= fr
->pmedata
? pme_run_mode(fr
->pmedata
) : PmeRunMode::CPU
;
907 // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
908 const bool useGpuPme
= EEL_PME(fr
->ic
->eeltype
) && thisRankHasDuty(cr
, DUTY_PME
) &&
909 ((pmeRunMode
== PmeRunMode::GPU
) || (pmeRunMode
== PmeRunMode::Mixed
));
910 const int pmeFlags
= GMX_PME_SPREAD
| GMX_PME_SOLVE
|
911 ((flags
& GMX_FORCE_VIRIAL
) ? GMX_PME_CALC_ENER_VIR
: 0) |
912 ((flags
& GMX_FORCE_ENERGY
) ? GMX_PME_CALC_ENER_VIR
: 0) |
913 ((flags
& GMX_FORCE_FORCES
) ? GMX_PME_CALC_F
: 0);
915 // Switches on whether to use GPU for position and force buffer operations
916 // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
917 const BufferOpsUseGpu useGpuXBufOps
= (c_enableGpuBufOps
&& bUseGPU
&& (GMX_GPU
== GMX_GPU_CUDA
)) ?
918 BufferOpsUseGpu::True
: BufferOpsUseGpu::False
;;
919 // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
920 const BufferOpsUseGpu useGpuFBufOps
= (c_enableGpuBufOps
&& bUseGPU
&& (GMX_GPU
== GMX_GPU_CUDA
))
921 && !(flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)) ?
922 BufferOpsUseGpu::True
: BufferOpsUseGpu::False
;
923 // TODO: move / add this flag to the internal PME GPU data structures
924 const bool useGpuFPmeReduction
= (useGpuFBufOps
== BufferOpsUseGpu::True
) &&
925 thisRankHasDuty(cr
, DUTY_PME
) && useGpuPme
; // only supported if this rank is perfoming PME on the GPU
927 /* At a search step we need to start the first balancing region
928 * somewhere early inside the step after communication during domain
929 * decomposition (and not during the previous step as usual).
933 ddBalanceRegionHandler
.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes
);
937 const int homenr
= mdatoms
->homenr
;
939 clear_mat(vir_force
);
943 if (inputrecNeedMutot(inputrec
))
945 /* Calculate total (local) dipole moment in a temporary common array.
946 * This makes it possible to sum them over nodes faster.
948 calc_mu(start
, homenr
,
949 x
.unpaddedArrayRef(), mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
954 if (fr
->ePBC
!= epbcNONE
)
956 /* Compute shift vectors every step,
957 * because of pressure coupling or box deformation!
959 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
961 calc_shifts(box
, fr
->shift_vec
);
966 put_atoms_in_box_omp(fr
->ePBC
, box
, x
.unpaddedArrayRef().subArray(0, homenr
), gmx_omp_nthreads_get(emntDefault
));
967 inc_nrnb(nrnb
, eNR_SHIFTX
, homenr
);
969 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
971 unshift_self(graph
, box
, as_rvec_array(x
.unpaddedArrayRef().data()));
975 nbnxn_atomdata_copy_shiftvec((flags
& GMX_FORCE_DYNAMICBOX
) != 0,
976 fr
->shift_vec
, nbv
->nbat
.get());
979 if (!thisRankHasDuty(cr
, DUTY_PME
))
981 /* Send particle coordinates to the pme nodes.
982 * Since this is only implemented for domain decomposition
983 * and domain decomposition does not use the graph,
984 * we do not need to worry about shifting.
986 gmx_pme_send_coordinates(cr
, box
, as_rvec_array(x
.unpaddedArrayRef().data()),
987 lambda
[efptCOUL
], lambda
[efptVDW
],
988 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)) != 0,
995 launchPmeGpuSpread(fr
->pmedata
, box
, as_rvec_array(x
.unpaddedArrayRef().data()), flags
, pmeFlags
, wcycle
);
998 /* do gridding for pair search */
1001 if (graph
&& bStateChanged
)
1003 /* Calculate intramolecular shift vectors to make molecules whole */
1004 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, as_rvec_array(x
.unpaddedArrayRef().data()));
1008 // - vzero is constant, do we need to pass it?
1009 // - box_diag should be passed directly to nbnxn_put_on_grid
1015 box_diag
[XX
] = box
[XX
][XX
];
1016 box_diag
[YY
] = box
[YY
][YY
];
1017 box_diag
[ZZ
] = box
[ZZ
][ZZ
];
1019 wallcycle_start(wcycle
, ewcNS
);
1020 if (!DOMAINDECOMP(cr
))
1022 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_LOCAL
);
1023 nbnxn_put_on_grid(nbv
, box
,
1025 nullptr, 0, mdatoms
->homenr
, -1,
1026 fr
->cginfo
, x
.unpaddedArrayRef(),
1028 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_LOCAL
);
1032 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_NONLOCAL
);
1033 nbnxn_put_on_grid_nonlocal(nbv
, domdec_zones(cr
->dd
),
1034 fr
->cginfo
, x
.unpaddedArrayRef());
1035 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_NONLOCAL
);
1038 nbv
->setAtomProperties(*mdatoms
, fr
->cginfo
);
1040 wallcycle_stop(wcycle
, ewcNS
);
1042 /* initialize the GPU nbnxm atom data and bonded data structures */
1045 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
1047 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1048 Nbnxm::gpu_init_atomdata(nbv
->gpu_nbv
, nbv
->nbat
.get());
1049 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1053 /* Now we put all atoms on the grid, we can assign bonded
1054 * interactions to the GPU, where the grid order is
1055 * needed. Also the xq, f and fshift device buffers have
1056 * been reallocated if needed, so the bonded code can
1057 * learn about them. */
1058 // TODO the xq, f, and fshift buffers are now shared
1059 // resources, so they should be maintained by a
1060 // higher-level object than the nb module.
1061 fr
->gpuBonded
->updateInteractionListsAndDeviceBuffers(nbv
->getGridIndices(),
1063 Nbnxm::gpu_get_xq(nbv
->gpu_nbv
),
1064 Nbnxm::gpu_get_f(nbv
->gpu_nbv
),
1065 Nbnxm::gpu_get_fshift(nbv
->gpu_nbv
));
1067 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1070 // Need to run after the GPU-offload bonded interaction lists
1071 // are set up to be able to determine whether there is bonded work.
1072 setupForceWorkload(ppForceWorkload
,
1082 /* do local pair search */
1085 // TODO: fuse this branch with the above bNS block
1086 wallcycle_start_nocount(wcycle
, ewcNS
);
1087 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_LOCAL
);
1088 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1089 nbv
->constructPairlist(Nbnxm::InteractionLocality::Local
,
1090 &top
->excls
, step
, nrnb
);
1092 nbv
->setupGpuShortRangeWork(fr
->gpuBonded
, Nbnxm::InteractionLocality::Local
);
1094 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_LOCAL
);
1095 wallcycle_stop(wcycle
, ewcNS
);
1097 if (useGpuXBufOps
== BufferOpsUseGpu::True
)
1099 nbv
->atomdata_init_copy_x_to_nbat_x_gpu();
1101 // For force buffer ops, we use the below conditon rather than
1102 // useGpuFBufOps to ensure that init is performed even if this
1103 // NS step is also a virial step (on which f buf ops are deactivated).
1104 if (c_enableGpuBufOps
&& bUseGPU
&& (GMX_GPU
== GMX_GPU_CUDA
))
1106 nbv
->atomdata_init_add_nbat_f_to_f_gpu();
1111 nbv
->setCoordinates(Nbnxm::AtomLocality::Local
, false,
1112 x
.unpaddedArrayRef(), useGpuXBufOps
, pme_gpu_get_device_x(fr
->pmedata
));
1117 ddBalanceRegionHandler
.openBeforeForceComputationGpu();
1119 wallcycle_start(wcycle
, ewcLAUNCH_GPU
);
1121 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1122 Nbnxm::gpu_upload_shiftvec(nbv
->gpu_nbv
, nbv
->nbat
.get());
1123 if (bNS
|| (useGpuXBufOps
== BufferOpsUseGpu::False
))
1125 Nbnxm::gpu_copy_xq_to_gpu(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1126 Nbnxm::AtomLocality::Local
);
1128 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1129 // with X buffer ops offloaded to the GPU on all but the search steps
1131 // bonded work not split into separate local and non-local, so with DD
1132 // we can only launch the kernel after non-local coordinates have been received.
1133 if (ppForceWorkload
->haveGpuBondedWork
&& !havePPDomainDecomposition(cr
))
1135 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1136 fr
->gpuBonded
->launchKernel(fr
, flags
, box
);
1137 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1140 /* launch local nonbonded work on GPU */
1141 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1142 do_nb_verlet(fr
, ic
, enerd
, flags
, Nbnxm::InteractionLocality::Local
, enbvClearFNo
,
1143 step
, nrnb
, wcycle
);
1144 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1145 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1150 // In PME GPU and mixed mode we launch FFT / gather after the
1151 // X copy/transform to allow overlap as well as after the GPU NB
1152 // launch to avoid FFT launch overhead hijacking the CPU and delaying
1153 // the nonbonded kernel.
1154 launchPmeGpuFftAndGather(fr
->pmedata
, wcycle
, useGpuFPmeReduction
);
1157 /* Communicate coordinates and sum dipole if necessary +
1158 do non-local pair search */
1159 if (havePPDomainDecomposition(cr
))
1163 // TODO: fuse this branch with the above large bNS block
1164 wallcycle_start_nocount(wcycle
, ewcNS
);
1165 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1166 /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1167 nbv
->constructPairlist(Nbnxm::InteractionLocality::NonLocal
,
1168 &top
->excls
, step
, nrnb
);
1170 nbv
->setupGpuShortRangeWork(fr
->gpuBonded
, Nbnxm::InteractionLocality::NonLocal
);
1171 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1172 wallcycle_stop(wcycle
, ewcNS
);
1176 dd_move_x(cr
->dd
, box
, x
.unpaddedArrayRef(), wcycle
);
1178 nbv
->setCoordinates(Nbnxm::AtomLocality::NonLocal
, false,
1179 x
.unpaddedArrayRef(), useGpuXBufOps
, pme_gpu_get_device_x(fr
->pmedata
));
1185 wallcycle_start(wcycle
, ewcLAUNCH_GPU
);
1187 if (bNS
|| (useGpuXBufOps
== BufferOpsUseGpu::False
))
1189 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1190 Nbnxm::gpu_copy_xq_to_gpu(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1191 Nbnxm::AtomLocality::NonLocal
);
1192 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1195 if (ppForceWorkload
->haveGpuBondedWork
)
1197 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1198 fr
->gpuBonded
->launchKernel(fr
, flags
, box
);
1199 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_BONDED
);
1202 /* launch non-local nonbonded tasks on GPU */
1203 wallcycle_sub_start(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1204 do_nb_verlet(fr
, ic
, enerd
, flags
, Nbnxm::InteractionLocality::NonLocal
, enbvClearFNo
,
1205 step
, nrnb
, wcycle
);
1206 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1208 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1214 /* launch D2H copy-back F */
1215 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU
);
1216 wallcycle_sub_start_nocount(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1218 bool copyBackNbForce
= (useGpuFBufOps
== BufferOpsUseGpu::False
);
1220 if (havePPDomainDecomposition(cr
))
1222 Nbnxm::gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1223 flags
, Nbnxm::AtomLocality::NonLocal
, copyBackNbForce
);
1225 Nbnxm::gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->nbat
.get(),
1226 flags
, Nbnxm::AtomLocality::Local
, copyBackNbForce
);
1228 wallcycle_sub_stop(wcycle
, ewcsLAUNCH_GPU_NONBONDED
);
1230 if (ppForceWorkload
->haveGpuBondedWork
&& (flags
& GMX_FORCE_ENERGY
))
1232 fr
->gpuBonded
->launchEnergyTransfer();
1234 wallcycle_stop(wcycle
, ewcLAUNCH_GPU
);
1237 if (bStateChanged
&& inputrecNeedMutot(inputrec
))
1241 gmx_sumd(2*DIM
, mu
, cr
);
1243 ddBalanceRegionHandler
.reopenRegionCpu();
1246 for (i
= 0; i
< 2; i
++)
1248 for (j
= 0; j
< DIM
; j
++)
1250 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1254 if (fr
->efep
== efepNO
)
1256 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1260 for (j
= 0; j
< DIM
; j
++)
1263 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] +
1264 lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1268 /* Reset energies */
1269 reset_enerdata(enerd
);
1270 /* Clear the shift forces */
1271 // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1272 for (gmx::RVec
&elem
: fr
->shiftForces
)
1274 elem
= { 0.0_real
, 0.0_real
, 0.0_real
};
1277 if (DOMAINDECOMP(cr
) && !thisRankHasDuty(cr
, DUTY_PME
))
1279 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1280 dd_force_flop_start(cr
->dd
, nrnb
);
1285 wallcycle_start(wcycle
, ewcROT
);
1286 do_rotation(cr
, enforcedRotation
, box
, as_rvec_array(x
.unpaddedArrayRef().data()), t
, step
, bNS
);
1287 wallcycle_stop(wcycle
, ewcROT
);
1290 /* Start the force cycle counter.
1291 * Note that a different counter is used for dynamic load balancing.
1293 wallcycle_start(wcycle
, ewcFORCE
);
1295 // Set up and clear force outputs.
1296 // We use std::move to keep the compiler happy, it has no effect.
1297 ForceOutputs forceOut
= setupForceOutputs(fr
, pull_work
, *inputrec
, std::move(force
), bDoForces
,
1298 ((flags
& GMX_FORCE_VIRIAL
) != 0), wcycle
);
1300 /* We calculate the non-bonded forces, when done on the CPU, here.
1301 * We do this before calling do_force_lowlevel, because in that
1302 * function, the listed forces are calculated before PME, which
1303 * does communication. With this order, non-bonded and listed
1304 * force calculation imbalance can be balanced out by the domain
1305 * decomposition load balancing.
1310 do_nb_verlet(fr
, ic
, enerd
, flags
, Nbnxm::InteractionLocality::Local
, enbvClearFYes
,
1311 step
, nrnb
, wcycle
);
1314 if (fr
->efep
!= efepNO
)
1316 /* Calculate the local and non-local free energy interactions here.
1317 * Happens here on the CPU both with and without GPU.
1319 nbv
->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local
,
1320 fr
, as_rvec_array(x
.unpaddedArrayRef().data()), &forceOut
.forceWithShiftForces(), *mdatoms
,
1321 inputrec
->fepvals
, lambda
.data(),
1322 enerd
, flags
, nrnb
);
1324 if (havePPDomainDecomposition(cr
))
1326 nbv
->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal
,
1327 fr
, as_rvec_array(x
.unpaddedArrayRef().data()), &forceOut
.forceWithShiftForces(), *mdatoms
,
1328 inputrec
->fepvals
, lambda
.data(),
1329 enerd
, flags
, nrnb
);
1335 if (havePPDomainDecomposition(cr
))
1337 do_nb_verlet(fr
, ic
, enerd
, flags
, Nbnxm::InteractionLocality::NonLocal
, enbvClearFNo
,
1338 step
, nrnb
, wcycle
);
1341 /* Add all the non-bonded force to the normal force array.
1342 * This can be split into a local and a non-local part when overlapping
1343 * communication with calculation with domain decomposition.
1345 wallcycle_stop(wcycle
, ewcFORCE
);
1346 nbv
->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All
, forceOut
.forceWithShiftForces().force());
1347 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1349 /* If there are multiple fshift output buffers we need to reduce them */
1350 if (flags
& GMX_FORCE_VIRIAL
)
1352 /* This is not in a subcounter because it takes a
1353 negligible and constant-sized amount of time */
1354 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv
->nbat
,
1355 forceOut
.forceWithShiftForces().shiftForces());
1359 /* update QMMMrec, if necessary */
1362 update_QMMMrec(cr
, fr
, as_rvec_array(x
.unpaddedArrayRef().data()), mdatoms
, box
);
1365 /* Compute the bonded and non-bonded energies and optionally forces */
1366 do_force_lowlevel(fr
, inputrec
, &(top
->idef
),
1367 cr
, ms
, nrnb
, wcycle
, mdatoms
,
1368 x
, hist
, &forceOut
, enerd
, fcd
,
1369 box
, lambda
.data(), graph
, fr
->mu_tot
,
1371 ddBalanceRegionHandler
);
1373 wallcycle_stop(wcycle
, ewcFORCE
);
1375 computeSpecialForces(fplog
, cr
, inputrec
, awh
, enforcedRotation
,
1376 imdSession
, pull_work
, step
, t
, wcycle
,
1377 fr
->forceProviders
, box
, x
.unpaddedArrayRef(), mdatoms
, lambda
.data(),
1378 flags
, &forceOut
.forceWithVirial(), enerd
,
1381 bool useCpuFPmeReduction
= thisRankHasDuty(cr
, DUTY_PME
) && !useGpuFPmeReduction
;
1382 bool haveCpuForces
= (ppForceWorkload
->haveSpecialForces
|| ppForceWorkload
->haveCpuListedForceWork
|| useCpuFPmeReduction
);
1384 // Will store the amount of cycles spent waiting for the GPU that
1385 // will be later used in the DLB accounting.
1386 float cycles_wait_gpu
= 0;
1389 auto &forceWithShiftForces
= forceOut
.forceWithShiftForces();
1390 rvec
*f
= as_rvec_array(forceWithShiftForces
.force().data());
1392 /* wait for non-local forces (or calculate in emulation mode) */
1393 if (havePPDomainDecomposition(cr
))
1397 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_NL
);
1398 Nbnxm::gpu_wait_finish_task(nbv
->gpu_nbv
,
1399 flags
, Nbnxm::AtomLocality::NonLocal
,
1400 enerd
->grpp
.ener
[egLJSR
].data(),
1401 enerd
->grpp
.ener
[egCOULSR
].data(),
1402 forceWithShiftForces
.shiftForces());
1403 cycles_wait_gpu
+= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_NL
);
1407 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1408 do_nb_verlet(fr
, ic
, enerd
, flags
, Nbnxm::InteractionLocality::NonLocal
, enbvClearFYes
,
1409 step
, nrnb
, wcycle
);
1410 wallcycle_stop(wcycle
, ewcFORCE
);
1413 if (useGpuFBufOps
== BufferOpsUseGpu::True
&& haveCpuForces
)
1415 nbv
->launch_copy_f_to_gpu(f
, Nbnxm::AtomLocality::NonLocal
);
1418 // flag to specify if forces should be accumulated in force buffer
1419 // ops. For non-local part, this just depends on whether CPU forces are present.
1420 bool accumulateForce
= (useGpuFBufOps
== BufferOpsUseGpu::True
) && haveCpuForces
;
1421 nbv
->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal
,
1422 forceWithShiftForces
.force(), pme_gpu_get_device_f(fr
->pmedata
),
1423 pme_gpu_get_f_ready_synchronizer(fr
->pmedata
),
1424 useGpuFBufOps
, useGpuFPmeReduction
, accumulateForce
);
1425 if (useGpuFBufOps
== BufferOpsUseGpu::True
)
1427 nbv
->launch_copy_f_from_gpu(f
, Nbnxm::AtomLocality::NonLocal
);
1430 if (fr
->nbv
->emulateGpu() && (flags
& GMX_FORCE_VIRIAL
))
1432 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv
->nbat
,
1433 forceWithShiftForces
.shiftForces());
1438 if (havePPDomainDecomposition(cr
))
1440 /* We are done with the CPU compute.
1441 * We will now communicate the non-local forces.
1442 * If we use a GPU this will overlap with GPU work, so in that case
1443 * we do not close the DD force balancing region here.
1445 ddBalanceRegionHandler
.closeAfterForceComputationCpu();
1449 if (useGpuFBufOps
== BufferOpsUseGpu::True
)
1451 nbv
->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::NonLocal
);
1453 dd_move_f(cr
->dd
, &forceOut
.forceWithShiftForces(), wcycle
);
1457 // With both nonbonded and PME offloaded a GPU on the same rank, we use
1458 // an alternating wait/reduction scheme.
1459 bool alternateGpuWait
= (!c_disableAlternatingWait
&& useGpuPme
&& bUseGPU
&& !DOMAINDECOMP(cr
) &&
1460 (useGpuFBufOps
== BufferOpsUseGpu::False
));
1461 if (alternateGpuWait
)
1463 alternatePmeNbGpuWaitReduce(fr
->nbv
.get(), fr
->pmedata
, &forceOut
, enerd
,
1464 flags
, pmeFlags
, wcycle
);
1467 if (!alternateGpuWait
&& useGpuPme
)
1469 pme_gpu_wait_and_reduce(fr
->pmedata
, pmeFlags
, wcycle
, &forceOut
.forceWithVirial(), enerd
, useGpuFPmeReduction
);
1472 /* Wait for local GPU NB outputs on the non-alternating wait path */
1473 if (!alternateGpuWait
&& bUseGPU
)
1475 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1476 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1477 * but even with a step of 0.1 ms the difference is less than 1%
1480 const float gpuWaitApiOverheadMargin
= 2e6F
; /* cycles */
1482 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_L
);
1483 Nbnxm::gpu_wait_finish_task(nbv
->gpu_nbv
,
1484 flags
, Nbnxm::AtomLocality::Local
,
1485 enerd
->grpp
.ener
[egLJSR
].data(),
1486 enerd
->grpp
.ener
[egCOULSR
].data(),
1487 forceOut
.forceWithShiftForces().shiftForces());
1488 float cycles_tmp
= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
1490 if (ddBalanceRegionHandler
.useBalancingRegion())
1492 DdBalanceRegionWaitedForGpu waitedForGpu
= DdBalanceRegionWaitedForGpu::yes
;
1493 if (bDoForces
&& cycles_tmp
<= gpuWaitApiOverheadMargin
)
1495 /* We measured few cycles, it could be that the kernel
1496 * and transfer finished earlier and there was no actual
1497 * wait time, only API call overhead.
1498 * Then the actual time could be anywhere between 0 and
1499 * cycles_wait_est. We will use half of cycles_wait_est.
1501 waitedForGpu
= DdBalanceRegionWaitedForGpu::no
;
1503 ddBalanceRegionHandler
.closeAfterForceComputationGpu(cycles_wait_gpu
, waitedForGpu
);
1507 if (fr
->nbv
->emulateGpu())
1509 // NOTE: emulation kernel is not included in the balancing region,
1510 // but emulation mode does not target performance anyway
1511 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1512 do_nb_verlet(fr
, ic
, enerd
, flags
, Nbnxm::InteractionLocality::Local
,
1513 DOMAINDECOMP(cr
) ? enbvClearFNo
: enbvClearFYes
,
1514 step
, nrnb
, wcycle
);
1515 wallcycle_stop(wcycle
, ewcFORCE
);
1518 /* Do the nonbonded GPU (or emulation) force buffer reduction
1519 * on the non-alternating path. */
1520 if (bUseOrEmulGPU
&& !alternateGpuWait
)
1522 gmx::ArrayRef
<gmx::RVec
> force
= forceOut
.forceWithShiftForces().force();
1523 rvec
*f
= as_rvec_array(force
.data());
1525 // TODO: move these steps as early as possible:
1526 // - CPU f H2D should be as soon as all CPU-side forces are done
1527 // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1528 // before the next CPU task that consumes the forces: vsite spread or update)
1530 if (useGpuFBufOps
== BufferOpsUseGpu::True
&& (haveCpuForces
|| DOMAINDECOMP(cr
)))
1532 nbv
->launch_copy_f_to_gpu(f
, Nbnxm::AtomLocality::Local
);
1534 // flag to specify if forces should be accumulated in force
1535 // buffer ops. For local part, this depends on whether CPU
1536 // forces are present, or if DD is active (in which case the
1537 // halo exchange has resulted in contributions from the
1539 bool accumulateForce
= (useGpuFBufOps
== BufferOpsUseGpu::True
) &&
1540 (haveCpuForces
|| DOMAINDECOMP(cr
));
1541 nbv
->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local
,
1542 force
, pme_gpu_get_device_f(fr
->pmedata
),
1543 pme_gpu_get_f_ready_synchronizer(fr
->pmedata
),
1544 useGpuFBufOps
, useGpuFPmeReduction
, accumulateForce
);
1545 if (useGpuFBufOps
== BufferOpsUseGpu::True
)
1547 nbv
->launch_copy_f_from_gpu(f
, Nbnxm::AtomLocality::Local
);
1548 nbv
->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::Local
);
1552 launchGpuEndOfStepTasks(nbv
, fr
->gpuBonded
, fr
->pmedata
, enerd
,
1559 if (DOMAINDECOMP(cr
))
1561 dd_force_flop_stop(cr
->dd
, nrnb
);
1566 rvec
*f
= as_rvec_array(forceOut
.forceWithShiftForces().force().data());
1568 /* If we have NoVirSum forces, but we do not calculate the virial,
1569 * we sum fr->f_novirsum=forceOut.f later.
1571 if (vsite
&& !(fr
->haveDirectVirialContributions
&& !(flags
& GMX_FORCE_VIRIAL
)))
1573 rvec
*fshift
= as_rvec_array(forceOut
.forceWithShiftForces().shiftForces().data());
1574 spread_vsite_f(vsite
, as_rvec_array(x
.unpaddedArrayRef().data()), f
, fshift
, FALSE
, nullptr, nrnb
,
1575 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
, wcycle
);
1578 if (flags
& GMX_FORCE_VIRIAL
)
1580 /* Calculation of the virial must be done after vsites! */
1581 calc_virial(0, mdatoms
->homenr
, as_rvec_array(x
.unpaddedArrayRef().data()),
1582 forceOut
.forceWithShiftForces(),
1583 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1587 if (PAR(cr
) && !thisRankHasDuty(cr
, DUTY_PME
))
1589 /* In case of node-splitting, the PP nodes receive the long-range
1590 * forces, virial and energy from the PME nodes here.
1592 pme_receive_force_ener(cr
, &forceOut
.forceWithVirial(), enerd
, wcycle
);
1597 post_process_forces(cr
, step
, nrnb
, wcycle
,
1598 top
, box
, as_rvec_array(x
.unpaddedArrayRef().data()), &forceOut
,
1599 vir_force
, mdatoms
, graph
, fr
, vsite
,
1603 if (flags
& GMX_FORCE_ENERGY
)
1605 /* Sum the potential energy terms from group contributions */
1606 sum_epot(&(enerd
->grpp
), enerd
->term
);
1608 if (!EI_TPI(inputrec
->eI
))
1610 checkPotentialEnergyValidity(step
, *enerd
, *inputrec
);
1614 /* In case we don't have constraints and are using GPUs, the next balancing
1615 * region starts here.
1616 * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1617 * virial calculation and COM pulling, is not thus not included in
1618 * the balance timing, which is ok as most tasks do communication.
1620 ddBalanceRegionHandler
.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no
);