Remove gmx custom fixed int (e.g. gmx_int64_t) types
[gromacs.git] / src / gromacs / mdlib / update.cpp
blobb418cd9ecc260679d8d5a360d2445378c1831b73
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "update.h"
41 #include <stdio.h>
43 #include <cmath>
45 #include <algorithm>
47 #include "gromacs/domdec/domdec_struct.h"
48 #include "gromacs/fileio/confio.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/listed-forces/disre.h"
52 #include "gromacs/listed-forces/orires.h"
53 #include "gromacs/math/functions.h"
54 #include "gromacs/math/invertmatrix.h"
55 #include "gromacs/math/paddedvector.h"
56 #include "gromacs/math/units.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/boxdeformation.h"
60 #include "gromacs/mdlib/constr.h"
61 #include "gromacs/mdlib/gmx_omp_nthreads.h"
62 #include "gromacs/mdlib/mdrun.h"
63 #include "gromacs/mdlib/sim_util.h"
64 #include "gromacs/mdlib/tgroup.h"
65 #include "gromacs/mdtypes/commrec.h"
66 #include "gromacs/mdtypes/group.h"
67 #include "gromacs/mdtypes/inputrec.h"
68 #include "gromacs/mdtypes/md_enums.h"
69 #include "gromacs/mdtypes/state.h"
70 #include "gromacs/pbcutil/boxutilities.h"
71 #include "gromacs/pbcutil/mshift.h"
72 #include "gromacs/pbcutil/pbc.h"
73 #include "gromacs/pulling/pull.h"
74 #include "gromacs/random/tabulatednormaldistribution.h"
75 #include "gromacs/random/threefry.h"
76 #include "gromacs/simd/simd.h"
77 #include "gromacs/timing/wallcycle.h"
78 #include "gromacs/topology/atoms.h"
79 #include "gromacs/utility/exceptions.h"
80 #include "gromacs/utility/fatalerror.h"
81 #include "gromacs/utility/futil.h"
82 #include "gromacs/utility/gmxassert.h"
83 #include "gromacs/utility/gmxomp.h"
84 #include "gromacs/utility/smalloc.h"
86 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
88 typedef struct {
89 double em;
90 } gmx_sd_const_t;
92 typedef struct {
93 real V;
94 } gmx_sd_sigma_t;
96 typedef struct {
97 /* BD stuff */
98 real *bd_rf;
99 /* SD stuff */
100 gmx_sd_const_t *sdc;
101 gmx_sd_sigma_t *sdsig;
102 /* andersen temperature control stuff */
103 gmx_bool *randomize_group;
104 real *boltzfac;
105 } gmx_stochd_t;
107 struct gmx_update_t
109 gmx_stochd_t *sd;
110 /* xprime for constraint algorithms */
111 PaddedRVecVector xp;
113 /* Variables for the deform algorithm */
114 int64_t deformref_step;
115 matrix deformref_box;
117 //! Box deformation handler (or nullptr if inactive).
118 gmx::BoxDeformation *deform;
121 static bool isTemperatureCouplingStep(int64_t step, const t_inputrec *ir)
123 /* We should only couple after a step where energies were determined (for leapfrog versions)
124 or the step energies are determined, for velocity verlet versions */
125 int offset;
126 if (EI_VV(ir->eI))
128 offset = 0;
130 else
132 offset = 1;
134 return ir->etc != etcNO &&
135 (ir->nsttcouple == 1 ||
136 do_per_step(step + ir->nsttcouple - offset, ir->nsttcouple));
139 static bool isPressureCouplingStep(int64_t step, const t_inputrec *ir)
141 GMX_ASSERT(ir->epc != epcMTTK, "MTTK pressure coupling is not handled here");
143 int offset;
144 if (ir->epc == epcBERENDSEN)
146 offset = 0;
148 else
150 offset = 1;
152 /* We should only couple after a step where pressures were determined */
153 return ir->epc != etcNO &&
154 (ir->nstpcouple == 1 ||
155 do_per_step(step + ir->nstpcouple - offset, ir->nstpcouple));
158 /*! \brief Sets the velocities of virtual sites to zero */
159 static void clearVsiteVelocities(int start,
160 int nrend,
161 const unsigned short *particleType,
162 rvec * gmx_restrict v)
164 for (int a = start; a < nrend; a++)
166 if (particleType[a] == eptVSite)
168 clear_rvec(v[a]);
173 /*! \brief Sets the number of different temperature coupling values */
174 enum class NumTempScaleValues
176 single, //!< Single T-scaling value (either one group or all values =1)
177 multiple //!< Multiple T-scaling values, need to use T-group indices
180 /*! \brief Sets if to apply no or diagonal Parrinello-Rahman pressure scaling
182 * Note that this enum is only used in updateMDLeapfrogSimple(), which does
183 * not handle fully anistropic Parrinello-Rahman scaling, so we only have
184 * options \p no and \p diagonal here and no anistropic option.
186 enum class ApplyParrinelloRahmanVScaling
188 no, //!< Do not apply velocity scaling (not a PR-coupling run or step)
189 diagonal //!< Apply velocity scaling using a diagonal matrix
192 /*! \brief Integrate using leap-frog with T-scaling and optionally diagonal Parrinello-Rahman p-coupling
194 * \tparam numTempScaleValues The number of different T-couple values
195 * \tparam applyPRVScaling Apply Parrinello-Rahman velocity scaling
196 * \param[in] start Index of first atom to update
197 * \param[in] nrend Last atom to update: \p nrend - 1
198 * \param[in] dt The time step
199 * \param[in] dtPressureCouple Time step for pressure coupling
200 * \param[in] invMassPerDim 1/mass per atom and dimension
201 * \param[in] tcstat Temperature coupling information
202 * \param[in] cTC T-coupling group index per atom
203 * \param[in] pRVScaleMatrixDiagonal Parrinello-Rahman v-scale matrix diagonal
204 * \param[in] x Input coordinates
205 * \param[out] xprime Updated coordinates
206 * \param[inout] v Velocities
207 * \param[in] f Forces
209 * We expect this template to get good SIMD acceleration by most compilers,
210 * unlike the more complex general template.
211 * Note that we might get even better SIMD acceleration when we introduce
212 * aligned (and padded) memory, possibly with some hints for the compilers.
214 template<NumTempScaleValues numTempScaleValues,
215 ApplyParrinelloRahmanVScaling applyPRVScaling>
216 static void
217 updateMDLeapfrogSimple(int start,
218 int nrend,
219 real dt,
220 real dtPressureCouple,
221 const rvec * gmx_restrict invMassPerDim,
222 const t_grp_tcstat * tcstat,
223 const unsigned short * cTC,
224 const rvec pRVScaleMatrixDiagonal,
225 const rvec * gmx_restrict x,
226 rvec * gmx_restrict xprime,
227 rvec * gmx_restrict v,
228 const rvec * gmx_restrict f)
230 real lambdaGroup;
232 if (numTempScaleValues == NumTempScaleValues::single)
234 lambdaGroup = tcstat[0].lambda;
237 for (int a = start; a < nrend; a++)
239 if (numTempScaleValues == NumTempScaleValues::multiple)
241 lambdaGroup = tcstat[cTC[a]].lambda;
244 for (int d = 0; d < DIM; d++)
246 /* Note that using rvec invMassPerDim results in more efficient
247 * SIMD code, but this increases the cache pressure.
248 * For large systems with PME on the CPU this slows down the
249 * (then already slow) update by 20%. If all data remains in cache,
250 * using rvec is much faster.
252 real vNew = lambdaGroup*v[a][d] + f[a][d]*invMassPerDim[a][d]*dt;
254 if (applyPRVScaling == ApplyParrinelloRahmanVScaling::diagonal)
256 vNew -= dtPressureCouple*pRVScaleMatrixDiagonal[d]*v[a][d];
258 v[a][d] = vNew;
259 xprime[a][d] = x[a][d] + vNew*dt;
264 #if GMX_SIMD && GMX_SIMD_HAVE_REAL
265 #define GMX_HAVE_SIMD_UPDATE 1
266 #else
267 #define GMX_HAVE_SIMD_UPDATE 0
268 #endif
270 #if GMX_HAVE_SIMD_UPDATE
272 /*! \brief Load (aligned) the contents of GMX_SIMD_REAL_WIDTH rvec elements sequentially into 3 SIMD registers
274 * The loaded output is:
275 * \p r0: { r[index][XX], r[index][YY], ... }
276 * \p r1: { ... }
277 * \p r2: { ..., r[index+GMX_SIMD_REAL_WIDTH-1][YY], r[index+GMX_SIMD_REAL_WIDTH-1][ZZ] }
279 * \param[in] r Real to an rvec array, has to be aligned to SIMD register width
280 * \param[in] index Index of the first rvec triplet of reals to load
281 * \param[out] r0 Pointer to first SIMD register
282 * \param[out] r1 Pointer to second SIMD register
283 * \param[out] r2 Pointer to third SIMD register
285 static inline void simdLoadRvecs(const rvec *r,
286 int index,
287 SimdReal *r0,
288 SimdReal *r1,
289 SimdReal *r2)
291 const real *realPtr = r[index];
293 GMX_ASSERT(isSimdAligned(realPtr), "Pointer should be SIMD aligned");
295 *r0 = simdLoad(realPtr + 0*GMX_SIMD_REAL_WIDTH);
296 *r1 = simdLoad(realPtr + 1*GMX_SIMD_REAL_WIDTH);
297 *r2 = simdLoad(realPtr + 2*GMX_SIMD_REAL_WIDTH);
300 /*! \brief Store (aligned) 3 SIMD registers sequentially to GMX_SIMD_REAL_WIDTH rvec elements
302 * The stored output is:
303 * \p r[index] = { { r0[0], r0[1], ... }
304 * ...
305 * \p r[index+GMX_SIMD_REAL_WIDTH-1] = { ... , r2[GMX_SIMD_REAL_WIDTH-2], r2[GMX_SIMD_REAL_WIDTH-1] }
307 * \param[out] r Pointer to an rvec array, has to be aligned to SIMD register width
308 * \param[in] index Index of the first rvec triplet of reals to store to
309 * \param[in] r0 First SIMD register
310 * \param[in] r1 Second SIMD register
311 * \param[in] r2 Third SIMD register
313 static inline void simdStoreRvecs(rvec *r,
314 int index,
315 SimdReal r0,
316 SimdReal r1,
317 SimdReal r2)
319 real *realPtr = r[index];
321 GMX_ASSERT(isSimdAligned(realPtr), "Pointer should be SIMD aligned");
323 store(realPtr + 0*GMX_SIMD_REAL_WIDTH, r0);
324 store(realPtr + 1*GMX_SIMD_REAL_WIDTH, r1);
325 store(realPtr + 2*GMX_SIMD_REAL_WIDTH, r2);
328 /*! \brief Integrate using leap-frog with single group T-scaling and SIMD
330 * \param[in] start Index of first atom to update
331 * \param[in] nrend Last atom to update: \p nrend - 1
332 * \param[in] dt The time step
333 * \param[in] invMass 1/mass per atom
334 * \param[in] tcstat Temperature coupling information
335 * \param[in] x Input coordinates
336 * \param[out] xprime Updated coordinates
337 * \param[inout] v Velocities
338 * \param[in] f Forces
340 static void
341 updateMDLeapfrogSimpleSimd(int start,
342 int nrend,
343 real dt,
344 const real * gmx_restrict invMass,
345 const t_grp_tcstat * tcstat,
346 const rvec * gmx_restrict x,
347 rvec * gmx_restrict xprime,
348 rvec * gmx_restrict v,
349 const rvec * gmx_restrict f)
351 SimdReal timestep(dt);
352 SimdReal lambdaSystem(tcstat[0].lambda);
354 /* We declare variables here, since code is often slower when declaring them inside the loop */
356 /* Note: We should implement a proper PaddedVector, so we don't need this check */
357 GMX_ASSERT(isSimdAligned(invMass), "invMass should be aligned");
359 for (int a = start; a < nrend; a += GMX_SIMD_REAL_WIDTH)
361 SimdReal invMass0, invMass1, invMass2;
362 expandScalarsToTriplets(simdLoad(invMass + a),
363 &invMass0, &invMass1, &invMass2);
365 SimdReal v0, v1, v2;
366 SimdReal f0, f1, f2;
367 simdLoadRvecs(v, a, &v0, &v1, &v2);
368 simdLoadRvecs(f, a, &f0, &f1, &f2);
370 v0 = fma(f0*invMass0, timestep, lambdaSystem*v0);
371 v1 = fma(f1*invMass1, timestep, lambdaSystem*v1);
372 v2 = fma(f2*invMass2, timestep, lambdaSystem*v2);
374 simdStoreRvecs(v, a, v0, v1, v2);
376 SimdReal x0, x1, x2;
377 simdLoadRvecs(x, a, &x0, &x1, &x2);
379 SimdReal xprime0 = fma(v0, timestep, x0);
380 SimdReal xprime1 = fma(v1, timestep, x1);
381 SimdReal xprime2 = fma(v2, timestep, x2);
383 simdStoreRvecs(xprime, a, xprime0, xprime1, xprime2);
387 #endif // GMX_HAVE_SIMD_UPDATE
389 /*! \brief Sets the NEMD acceleration type */
390 enum class AccelerationType
392 none, group, cosine
395 /*! \brief Integrate using leap-frog with support for everything
397 * \tparam accelerationType Type of NEMD acceleration
398 * \param[in] start Index of first atom to update
399 * \param[in] nrend Last atom to update: \p nrend - 1
400 * \param[in] doNoseHoover If to apply Nose-Hoover T-coupling
401 * \param[in] dt The time step
402 * \param[in] dtPressureCouple Time step for pressure coupling, is 0 when no pressure coupling should be applied at this step
403 * \param[in] ir The input parameter record
404 * \param[in] md Atom properties
405 * \param[in] ekind Kinetic energy data
406 * \param[in] box The box dimensions
407 * \param[in] x Input coordinates
408 * \param[out] xprime Updated coordinates
409 * \param[inout] v Velocities
410 * \param[in] f Forces
411 * \param[in] nh_vxi Nose-Hoover velocity scaling factors
412 * \param[in] M Parrinello-Rahman scaling matrix
414 template<AccelerationType accelerationType>
415 static void
416 updateMDLeapfrogGeneral(int start,
417 int nrend,
418 bool doNoseHoover,
419 real dt,
420 real dtPressureCouple,
421 const t_inputrec * ir,
422 const t_mdatoms * md,
423 const gmx_ekindata_t * ekind,
424 const matrix box,
425 const rvec * gmx_restrict x,
426 rvec * gmx_restrict xprime,
427 rvec * gmx_restrict v,
428 const rvec * gmx_restrict f,
429 const double * gmx_restrict nh_vxi,
430 const matrix M)
432 /* This is a version of the leap-frog integrator that supports
433 * all combinations of T-coupling, P-coupling and NEMD.
434 * Nose-Hoover uses the reversible leap-frog integrator from
435 * Holian et al. Phys Rev E 52(3) : 2338, 1995
438 const unsigned short * cTC = md->cTC;
439 const t_grp_tcstat * tcstat = ekind->tcstat;
441 const unsigned short * cACC = md->cACC;
442 const rvec * accel = ir->opts.acc;
443 const t_grp_acc * grpstat = ekind->grpstat;
445 const rvec * gmx_restrict invMassPerDim = md->invMassPerDim;
447 /* Initialize group values, changed later when multiple groups are used */
448 int ga = 0;
449 int gt = 0;
450 real factorNH = 0;
452 for (int n = start; n < nrend; n++)
454 if (cTC)
456 gt = cTC[n];
458 real lg = tcstat[gt].lambda;
460 rvec vRel;
461 real cosineZ, vCosine;
462 #ifdef __INTEL_COMPILER
463 #pragma warning( disable : 280 )
464 #endif
465 switch (accelerationType)
467 case AccelerationType::none:
468 copy_rvec(v[n], vRel);
469 break;
470 case AccelerationType::group:
471 if (cACC)
473 ga = cACC[n];
475 /* Avoid scaling the group velocity */
476 rvec_sub(v[n], grpstat[ga].u, vRel);
477 break;
478 case AccelerationType::cosine:
479 cosineZ = std::cos(x[n][ZZ]*static_cast<real>(M_PI)/box[ZZ][ZZ]);
480 vCosine = cosineZ*ekind->cosacc.vcos;
481 /* Avoid scaling the cosine profile velocity */
482 copy_rvec(v[n], vRel);
483 vRel[XX] -= vCosine;
484 break;
487 if (doNoseHoover)
489 /* Here we account for multiple time stepping, by increasing
490 * the Nose-Hoover correction by nsttcouple
492 factorNH = 0.5*ir->nsttcouple*dt*nh_vxi[gt];
495 for (int d = 0; d < DIM; d++)
497 real vNew =
498 (lg*vRel[d] + (f[n][d]*invMassPerDim[n][d]*dt - factorNH*vRel[d]
499 - dtPressureCouple*iprod(M[d], vRel)))/(1 + factorNH);
500 switch (accelerationType)
502 case AccelerationType::none:
503 break;
504 case AccelerationType::group:
505 /* Add back the mean velocity and apply acceleration */
506 vNew += grpstat[ga].u[d] + accel[ga][d]*dt;
507 break;
508 case AccelerationType::cosine:
509 if (d == XX)
511 /* Add back the mean velocity and apply acceleration */
512 vNew += vCosine + cosineZ*ekind->cosacc.cos_accel*dt;
514 break;
516 v[n][d] = vNew;
517 xprime[n][d] = x[n][d] + vNew*dt;
522 /*! \brief Handles the Leap-frog MD x and v integration */
523 static void do_update_md(int start,
524 int nrend,
525 int64_t step,
526 real dt,
527 const t_inputrec * ir,
528 const t_mdatoms * md,
529 const gmx_ekindata_t * ekind,
530 const matrix box,
531 const rvec * gmx_restrict x,
532 rvec * gmx_restrict xprime,
533 rvec * gmx_restrict v,
534 const rvec * gmx_restrict f,
535 const double * gmx_restrict nh_vxi,
536 const matrix M)
538 GMX_ASSERT(nrend == start || xprime != x, "For SIMD optimization certain compilers need to have xprime != x");
540 /* Note: Berendsen pressure scaling is handled after do_update_md() */
541 bool doTempCouple = isTemperatureCouplingStep(step, ir);
542 bool doNoseHoover = (ir->etc == etcNOSEHOOVER && doTempCouple);
543 bool doParrinelloRahman = (ir->epc == epcPARRINELLORAHMAN && isPressureCouplingStep(step, ir));
544 bool doPROffDiagonal = (doParrinelloRahman && (M[YY][XX] != 0 || M[ZZ][XX] != 0 || M[ZZ][YY] != 0));
546 real dtPressureCouple = (doParrinelloRahman ? ir->nstpcouple*dt : 0);
548 /* NEMD (also cosine) acceleration is applied in updateMDLeapFrogGeneral */
549 bool doAcceleration = (ekind->bNEMD || ekind->cosacc.cos_accel != 0);
551 if (doNoseHoover || doPROffDiagonal || doAcceleration)
553 matrix stepM;
554 if (!doParrinelloRahman)
556 /* We should not apply PR scaling at this step */
557 clear_mat(stepM);
559 else
561 copy_mat(M, stepM);
564 if (!doAcceleration)
566 updateMDLeapfrogGeneral<AccelerationType::none>
567 (start, nrend, doNoseHoover, dt, dtPressureCouple,
568 ir, md, ekind, box, x, xprime, v, f, nh_vxi, stepM);
570 else if (ekind->bNEMD)
572 updateMDLeapfrogGeneral<AccelerationType::group>
573 (start, nrend, doNoseHoover, dt, dtPressureCouple,
574 ir, md, ekind, box, x, xprime, v, f, nh_vxi, stepM);
576 else
578 updateMDLeapfrogGeneral<AccelerationType::cosine>
579 (start, nrend, doNoseHoover, dt, dtPressureCouple,
580 ir, md, ekind, box, x, xprime, v, f, nh_vxi, stepM);
583 else
585 /* Use a simple and thus more efficient integration loop. */
586 /* The simple loop does not check for particle type (so it can
587 * be vectorized), which means we need to clear the velocities
588 * of virtual sites in advance, when present. Note that vsite
589 * velocities are computed after update and constraints from
590 * their displacement.
592 if (md->haveVsites)
594 /* Note: The overhead of this loop is completely neligible */
595 clearVsiteVelocities(start, nrend, md->ptype, v);
598 /* We determine if we have a single T-coupling lambda value for all
599 * atoms. That allows for better SIMD acceleration in the template.
600 * If we do not do temperature coupling (in the run or this step),
601 * all scaling values are 1, so we effectively have a single value.
603 bool haveSingleTempScaleValue = (!doTempCouple || ekind->ngtc == 1);
605 /* Extract some pointers needed by all cases */
606 const unsigned short *cTC = md->cTC;
607 const t_grp_tcstat *tcstat = ekind->tcstat;
608 const rvec *invMassPerDim = md->invMassPerDim;
610 if (doParrinelloRahman)
612 GMX_ASSERT(!doPROffDiagonal, "updateMDLeapfrogSimple only support diagonal Parrinello-Rahman scaling matrices");
614 rvec diagM;
615 for (int d = 0; d < DIM; d++)
617 diagM[d] = M[d][d];
620 if (haveSingleTempScaleValue)
622 updateMDLeapfrogSimple
623 <NumTempScaleValues::single,
624 ApplyParrinelloRahmanVScaling::diagonal>
625 (start, nrend, dt, dtPressureCouple,
626 invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
628 else
630 updateMDLeapfrogSimple
631 <NumTempScaleValues::multiple,
632 ApplyParrinelloRahmanVScaling::diagonal>
633 (start, nrend, dt, dtPressureCouple,
634 invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
637 else
639 if (haveSingleTempScaleValue)
641 /* Note that modern compilers are pretty good at vectorizing
642 * updateMDLeapfrogSimple(). But the SIMD version will still
643 * be faster because invMass lowers the cache pressure
644 * compared to invMassPerDim.
646 #if GMX_HAVE_SIMD_UPDATE
647 /* Check if we can use invmass instead of invMassPerDim */
648 if (!md->havePartiallyFrozenAtoms)
650 updateMDLeapfrogSimpleSimd
651 (start, nrend, dt, md->invmass, tcstat, x, xprime, v, f);
653 else
654 #endif
656 updateMDLeapfrogSimple
657 <NumTempScaleValues::single,
658 ApplyParrinelloRahmanVScaling::no>
659 (start, nrend, dt, dtPressureCouple,
660 invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
663 else
665 updateMDLeapfrogSimple
666 <NumTempScaleValues::multiple,
667 ApplyParrinelloRahmanVScaling::no>
668 (start, nrend, dt, dtPressureCouple,
669 invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
675 static void do_update_vv_vel(int start, int nrend, real dt,
676 rvec accel[], ivec nFreeze[], const real invmass[],
677 const unsigned short ptype[], const unsigned short cFREEZE[],
678 const unsigned short cACC[], rvec v[], const rvec f[],
679 gmx_bool bExtended, real veta, real alpha)
681 int gf = 0, ga = 0;
682 int n, d;
683 real g, mv1, mv2;
685 if (bExtended)
687 g = 0.25*dt*veta*alpha;
688 mv1 = std::exp(-g);
689 mv2 = gmx::series_sinhx(g);
691 else
693 mv1 = 1.0;
694 mv2 = 1.0;
696 for (n = start; n < nrend; n++)
698 real w_dt = invmass[n]*dt;
699 if (cFREEZE)
701 gf = cFREEZE[n];
703 if (cACC)
705 ga = cACC[n];
708 for (d = 0; d < DIM; d++)
710 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
712 v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt;
714 else
716 v[n][d] = 0.0;
720 } /* do_update_vv_vel */
722 static void do_update_vv_pos(int start, int nrend, real dt,
723 ivec nFreeze[],
724 const unsigned short ptype[], const unsigned short cFREEZE[],
725 const rvec x[], rvec xprime[], rvec v[],
726 gmx_bool bExtended, real veta)
728 int gf = 0;
729 int n, d;
730 real g, mr1, mr2;
732 /* Would it make more sense if Parrinello-Rahman was put here? */
733 if (bExtended)
735 g = 0.5*dt*veta;
736 mr1 = std::exp(g);
737 mr2 = gmx::series_sinhx(g);
739 else
741 mr1 = 1.0;
742 mr2 = 1.0;
745 for (n = start; n < nrend; n++)
748 if (cFREEZE)
750 gf = cFREEZE[n];
753 for (d = 0; d < DIM; d++)
755 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
757 xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]);
759 else
761 xprime[n][d] = x[n][d];
765 } /* do_update_vv_pos */
767 static gmx_stochd_t *init_stochd(const t_inputrec *ir)
769 gmx_stochd_t *sd;
771 snew(sd, 1);
773 const t_grpopts *opts = &ir->opts;
774 int ngtc = opts->ngtc;
776 if (ir->eI == eiBD)
778 snew(sd->bd_rf, ngtc);
780 else if (EI_SD(ir->eI))
782 snew(sd->sdc, ngtc);
783 snew(sd->sdsig, ngtc);
785 gmx_sd_const_t *sdc = sd->sdc;
787 for (int gt = 0; gt < ngtc; gt++)
789 if (opts->tau_t[gt] > 0)
791 sdc[gt].em = std::exp(-ir->delta_t/opts->tau_t[gt]);
793 else
795 /* No friction and noise on this group */
796 sdc[gt].em = 1;
800 else if (ETC_ANDERSEN(ir->etc))
802 snew(sd->randomize_group, ngtc);
803 snew(sd->boltzfac, ngtc);
805 /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */
806 /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */
808 for (int gt = 0; gt < ngtc; gt++)
810 real reft = std::max<real>(0, opts->ref_t[gt]);
811 if ((opts->tau_t[gt] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
813 sd->randomize_group[gt] = TRUE;
814 sd->boltzfac[gt] = BOLTZ*opts->ref_t[gt];
816 else
818 sd->randomize_group[gt] = FALSE;
823 return sd;
826 void update_temperature_constants(gmx_update_t *upd, const t_inputrec *ir)
828 if (ir->eI == eiBD)
830 if (ir->bd_fric != 0)
832 for (int gt = 0; gt < ir->opts.ngtc; gt++)
834 upd->sd->bd_rf[gt] = std::sqrt(2.0*BOLTZ*ir->opts.ref_t[gt]/(ir->bd_fric*ir->delta_t));
837 else
839 for (int gt = 0; gt < ir->opts.ngtc; gt++)
841 upd->sd->bd_rf[gt] = std::sqrt(2.0*BOLTZ*ir->opts.ref_t[gt]);
845 if (ir->eI == eiSD1)
847 for (int gt = 0; gt < ir->opts.ngtc; gt++)
849 real kT = BOLTZ*ir->opts.ref_t[gt];
850 /* The mass is accounted for later, since this differs per atom */
851 upd->sd->sdsig[gt].V = std::sqrt(kT*(1 - upd->sd->sdc[gt].em*upd->sd->sdc[gt].em));
856 gmx_update_t *init_update(const t_inputrec *ir,
857 gmx::BoxDeformation *deform)
859 gmx_update_t *upd = new(gmx_update_t);
861 if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc))
863 upd->sd = init_stochd(ir);
866 update_temperature_constants(upd, ir);
868 upd->xp.resize(0);
870 upd->deform = deform;
872 return upd;
875 void update_realloc(gmx_update_t *upd, int natoms)
877 GMX_ASSERT(upd, "upd must be allocated before its fields can be reallocated");
879 upd->xp.resize(gmx::paddedRVecVectorSize(natoms));
882 /*! \brief Sets the SD update type */
883 enum class SDUpdate : int
885 ForcesOnly, FrictionAndNoiseOnly, Combined
888 /*! \brief SD integrator update
890 * Two phases are required in the general case of a constrained
891 * update, the first phase from the contribution of forces, before
892 * applying constraints, and then a second phase applying the friction
893 * and noise, and then further constraining. For details, see
894 * Goga2012.
896 * Without constraints, the two phases can be combined, for
897 * efficiency.
899 * Thus three instantiations of this templated function will be made,
900 * two with only one contribution, and one with both contributions. */
901 template <SDUpdate updateType>
902 static void
903 doSDUpdateGeneral(gmx_stochd_t *sd,
904 int start, int nrend, real dt,
905 rvec accel[], ivec nFreeze[],
906 const real invmass[], const unsigned short ptype[],
907 const unsigned short cFREEZE[], const unsigned short cACC[],
908 const unsigned short cTC[],
909 const rvec x[], rvec xprime[], rvec v[], const rvec f[],
910 int64_t step, int seed, const int *gatindex)
912 if (updateType != SDUpdate::FrictionAndNoiseOnly)
914 GMX_ASSERT(f != nullptr, "SD update with forces requires forces");
915 GMX_ASSERT(cACC != nullptr, "SD update with forces requires acceleration groups");
917 if (updateType != SDUpdate::ForcesOnly)
919 GMX_ASSERT(cTC != nullptr, "SD update with noise requires temperature groups");
922 // Even 0 bits internal counter gives 2x64 ints (more than enough for three table lookups)
923 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::UpdateCoordinates);
924 gmx::TabulatedNormalDistribution<real, 14> dist;
926 gmx_sd_const_t *sdc = sd->sdc;
927 gmx_sd_sigma_t *sig = sd->sdsig;
929 for (int n = start; n < nrend; n++)
931 int globalAtomIndex = gatindex ? gatindex[n] : n;
932 rng.restart(step, globalAtomIndex);
933 dist.reset();
935 real inverseMass = invmass[n];
936 real invsqrtMass = std::sqrt(inverseMass);
938 int freezeGroup = cFREEZE ? cFREEZE[n] : 0;
939 int accelerationGroup = cACC ? cACC[n] : 0;
940 int temperatureGroup = cTC ? cTC[n] : 0;
942 for (int d = 0; d < DIM; d++)
944 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[freezeGroup][d])
946 if (updateType == SDUpdate::ForcesOnly)
948 real vn = v[n][d] + (inverseMass*f[n][d] + accel[accelerationGroup][d])*dt;
949 v[n][d] = vn;
950 // Simple position update.
951 xprime[n][d] = x[n][d] + v[n][d]*dt;
953 else if (updateType == SDUpdate::FrictionAndNoiseOnly)
955 real vn = v[n][d];
956 v[n][d] = (vn*sdc[temperatureGroup].em +
957 invsqrtMass*sig[temperatureGroup].V*dist(rng));
958 // The previous phase already updated the
959 // positions with a full v*dt term that must
960 // now be half removed.
961 xprime[n][d] = xprime[n][d] + 0.5*(v[n][d] - vn)*dt;
963 else
965 real vn = v[n][d] + (inverseMass*f[n][d] + accel[accelerationGroup][d])*dt;
966 v[n][d] = (vn*sdc[temperatureGroup].em +
967 invsqrtMass*sig[temperatureGroup].V*dist(rng));
968 // Here we include half of the friction+noise
969 // update of v into the position update.
970 xprime[n][d] = x[n][d] + 0.5*(vn + v[n][d])*dt;
973 else
975 // When using constraints, the update is split into
976 // two phases, but we only need to zero the update of
977 // virtual, shell or frozen particles in at most one
978 // of the phases.
979 if (updateType != SDUpdate::FrictionAndNoiseOnly)
981 v[n][d] = 0.0;
982 xprime[n][d] = x[n][d];
989 static void do_update_bd(int start, int nrend, real dt,
990 ivec nFreeze[],
991 const real invmass[], const unsigned short ptype[],
992 const unsigned short cFREEZE[], const unsigned short cTC[],
993 const rvec x[], rvec xprime[], rvec v[],
994 const rvec f[], real friction_coefficient,
995 const real *rf, int64_t step, int seed,
996 const int* gatindex)
998 /* note -- these appear to be full step velocities . . . */
999 int gf = 0, gt = 0;
1000 real vn;
1001 real invfr = 0;
1002 int n, d;
1003 // Use 1 bit of internal counters to give us 2*2 64-bits values per stream
1004 // Each 64-bit value is enough for 4 normal distribution table numbers.
1005 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::UpdateCoordinates);
1006 gmx::TabulatedNormalDistribution<real, 14> dist;
1008 if (friction_coefficient != 0)
1010 invfr = 1.0/friction_coefficient;
1013 for (n = start; (n < nrend); n++)
1015 int ng = gatindex ? gatindex[n] : n;
1017 rng.restart(step, ng);
1018 dist.reset();
1020 if (cFREEZE)
1022 gf = cFREEZE[n];
1024 if (cTC)
1026 gt = cTC[n];
1028 for (d = 0; (d < DIM); d++)
1030 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
1032 if (friction_coefficient != 0)
1034 vn = invfr*f[n][d] + rf[gt]*dist(rng);
1036 else
1038 /* NOTE: invmass = 2/(mass*friction_constant*dt) */
1039 vn = 0.5*invmass[n]*f[n][d]*dt
1040 + std::sqrt(0.5*invmass[n])*rf[gt]*dist(rng);
1043 v[n][d] = vn;
1044 xprime[n][d] = x[n][d]+vn*dt;
1046 else
1048 v[n][d] = 0.0;
1049 xprime[n][d] = x[n][d];
1055 static void calc_ke_part_normal(rvec v[], t_grpopts *opts, t_mdatoms *md,
1056 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel)
1058 int g;
1059 t_grp_tcstat *tcstat = ekind->tcstat;
1060 t_grp_acc *grpstat = ekind->grpstat;
1061 int nthread, thread;
1063 /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also
1064 an option, but not supported now.
1065 bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh.
1068 /* group velocities are calculated in update_ekindata and
1069 * accumulated in acumulate_groups.
1070 * Now the partial global and groups ekin.
1072 for (g = 0; (g < opts->ngtc); g++)
1074 copy_mat(tcstat[g].ekinh, tcstat[g].ekinh_old);
1075 if (bEkinAveVel)
1077 clear_mat(tcstat[g].ekinf);
1078 tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */
1080 else
1082 clear_mat(tcstat[g].ekinh);
1085 ekind->dekindl_old = ekind->dekindl;
1086 nthread = gmx_omp_nthreads_get(emntUpdate);
1088 #pragma omp parallel for num_threads(nthread) schedule(static)
1089 for (thread = 0; thread < nthread; thread++)
1091 // This OpenMP only loops over arrays and does not call any functions
1092 // or memory allocation. It should not be able to throw, so for now
1093 // we do not need a try/catch wrapper.
1094 int start_t, end_t, n;
1095 int ga, gt;
1096 rvec v_corrt;
1097 real hm;
1098 int d, m;
1099 matrix *ekin_sum;
1100 real *dekindl_sum;
1102 start_t = ((thread+0)*md->homenr)/nthread;
1103 end_t = ((thread+1)*md->homenr)/nthread;
1105 ekin_sum = ekind->ekin_work[thread];
1106 dekindl_sum = ekind->dekindl_work[thread];
1108 for (gt = 0; gt < opts->ngtc; gt++)
1110 clear_mat(ekin_sum[gt]);
1112 *dekindl_sum = 0.0;
1114 ga = 0;
1115 gt = 0;
1116 for (n = start_t; n < end_t; n++)
1118 if (md->cACC)
1120 ga = md->cACC[n];
1122 if (md->cTC)
1124 gt = md->cTC[n];
1126 hm = 0.5*md->massT[n];
1128 for (d = 0; (d < DIM); d++)
1130 v_corrt[d] = v[n][d] - grpstat[ga].u[d];
1132 for (d = 0; (d < DIM); d++)
1134 for (m = 0; (m < DIM); m++)
1136 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1137 ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d];
1140 if (md->nMassPerturbed && md->bPerturbed[n])
1142 *dekindl_sum +=
1143 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1148 ekind->dekindl = 0;
1149 for (thread = 0; thread < nthread; thread++)
1151 for (g = 0; g < opts->ngtc; g++)
1153 if (bEkinAveVel)
1155 m_add(tcstat[g].ekinf, ekind->ekin_work[thread][g],
1156 tcstat[g].ekinf);
1158 else
1160 m_add(tcstat[g].ekinh, ekind->ekin_work[thread][g],
1161 tcstat[g].ekinh);
1165 ekind->dekindl += *ekind->dekindl_work[thread];
1168 inc_nrnb(nrnb, eNR_EKIN, md->homenr);
1171 static void calc_ke_part_visc(matrix box, rvec x[], rvec v[],
1172 t_grpopts *opts, t_mdatoms *md,
1173 gmx_ekindata_t *ekind,
1174 t_nrnb *nrnb, gmx_bool bEkinAveVel)
1176 int start = 0, homenr = md->homenr;
1177 int g, d, n, m, gt = 0;
1178 rvec v_corrt;
1179 real hm;
1180 t_grp_tcstat *tcstat = ekind->tcstat;
1181 t_cos_acc *cosacc = &(ekind->cosacc);
1182 real dekindl;
1183 real fac, cosz;
1184 double mvcos;
1186 for (g = 0; g < opts->ngtc; g++)
1188 copy_mat(ekind->tcstat[g].ekinh, ekind->tcstat[g].ekinh_old);
1189 clear_mat(ekind->tcstat[g].ekinh);
1191 ekind->dekindl_old = ekind->dekindl;
1193 fac = 2*M_PI/box[ZZ][ZZ];
1194 mvcos = 0;
1195 dekindl = 0;
1196 for (n = start; n < start+homenr; n++)
1198 if (md->cTC)
1200 gt = md->cTC[n];
1202 hm = 0.5*md->massT[n];
1204 /* Note that the times of x and v differ by half a step */
1205 /* MRS -- would have to be changed for VV */
1206 cosz = std::cos(fac*x[n][ZZ]);
1207 /* Calculate the amplitude of the new velocity profile */
1208 mvcos += 2*cosz*md->massT[n]*v[n][XX];
1210 copy_rvec(v[n], v_corrt);
1211 /* Subtract the profile for the kinetic energy */
1212 v_corrt[XX] -= cosz*cosacc->vcos;
1213 for (d = 0; (d < DIM); d++)
1215 for (m = 0; (m < DIM); m++)
1217 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1218 if (bEkinAveVel)
1220 tcstat[gt].ekinf[m][d] += hm*v_corrt[m]*v_corrt[d];
1222 else
1224 tcstat[gt].ekinh[m][d] += hm*v_corrt[m]*v_corrt[d];
1228 if (md->nPerturbed && md->bPerturbed[n])
1230 /* The minus sign here might be confusing.
1231 * The kinetic contribution from dH/dl doesn't come from
1232 * d m(l)/2 v^2 / dl, but rather from d p^2/2m(l) / dl,
1233 * where p are the momenta. The difference is only a minus sign.
1235 dekindl -= 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1238 ekind->dekindl = dekindl;
1239 cosacc->mvcos = mvcos;
1241 inc_nrnb(nrnb, eNR_EKIN, homenr);
1244 void calc_ke_part(t_state *state, t_grpopts *opts, t_mdatoms *md,
1245 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel)
1247 if (ekind->cosacc.cos_accel == 0)
1249 calc_ke_part_normal(as_rvec_array(state->v.data()), opts, md, ekind, nrnb, bEkinAveVel);
1251 else
1253 calc_ke_part_visc(state->box, as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), opts, md, ekind, nrnb, bEkinAveVel);
1257 extern void init_ekinstate(ekinstate_t *ekinstate, const t_inputrec *ir)
1259 ekinstate->ekin_n = ir->opts.ngtc;
1260 snew(ekinstate->ekinh, ekinstate->ekin_n);
1261 snew(ekinstate->ekinf, ekinstate->ekin_n);
1262 snew(ekinstate->ekinh_old, ekinstate->ekin_n);
1263 ekinstate->ekinscalef_nhc.resize(ekinstate->ekin_n);
1264 ekinstate->ekinscaleh_nhc.resize(ekinstate->ekin_n);
1265 ekinstate->vscale_nhc.resize(ekinstate->ekin_n);
1266 ekinstate->dekindl = 0;
1267 ekinstate->mvcos = 0;
1270 void update_ekinstate(ekinstate_t *ekinstate, gmx_ekindata_t *ekind)
1272 int i;
1274 for (i = 0; i < ekinstate->ekin_n; i++)
1276 copy_mat(ekind->tcstat[i].ekinh, ekinstate->ekinh[i]);
1277 copy_mat(ekind->tcstat[i].ekinf, ekinstate->ekinf[i]);
1278 copy_mat(ekind->tcstat[i].ekinh_old, ekinstate->ekinh_old[i]);
1279 ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc;
1280 ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc;
1281 ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
1284 copy_mat(ekind->ekin, ekinstate->ekin_total);
1285 ekinstate->dekindl = ekind->dekindl;
1286 ekinstate->mvcos = ekind->cosacc.mvcos;
1290 void restore_ekinstate_from_state(const t_commrec *cr,
1291 gmx_ekindata_t *ekind, const ekinstate_t *ekinstate)
1293 int i, n;
1295 if (MASTER(cr))
1297 for (i = 0; i < ekinstate->ekin_n; i++)
1299 copy_mat(ekinstate->ekinh[i], ekind->tcstat[i].ekinh);
1300 copy_mat(ekinstate->ekinf[i], ekind->tcstat[i].ekinf);
1301 copy_mat(ekinstate->ekinh_old[i], ekind->tcstat[i].ekinh_old);
1302 ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i];
1303 ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i];
1304 ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i];
1307 copy_mat(ekinstate->ekin_total, ekind->ekin);
1309 ekind->dekindl = ekinstate->dekindl;
1310 ekind->cosacc.mvcos = ekinstate->mvcos;
1311 n = ekinstate->ekin_n;
1314 if (PAR(cr))
1316 gmx_bcast(sizeof(n), &n, cr);
1317 for (i = 0; i < n; i++)
1319 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]),
1320 ekind->tcstat[i].ekinh[0], cr);
1321 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]),
1322 ekind->tcstat[i].ekinf[0], cr);
1323 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]),
1324 ekind->tcstat[i].ekinh_old[0], cr);
1326 gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
1327 &(ekind->tcstat[i].ekinscalef_nhc), cr);
1328 gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
1329 &(ekind->tcstat[i].ekinscaleh_nhc), cr);
1330 gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc),
1331 &(ekind->tcstat[i].vscale_nhc), cr);
1333 gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]),
1334 ekind->ekin[0], cr);
1336 gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr);
1337 gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr);
1341 void update_tcouple(int64_t step,
1342 t_inputrec *inputrec,
1343 t_state *state,
1344 gmx_ekindata_t *ekind,
1345 t_extmass *MassQ,
1346 t_mdatoms *md)
1349 bool doTemperatureCoupling = false;
1351 /* if using vv with trotter decomposition methods, we do this elsewhere in the code */
1352 if (!(EI_VV(inputrec->eI) &&
1353 (inputrecNvtTrotter(inputrec) || inputrecNptTrotter(inputrec) || inputrecNphTrotter(inputrec))))
1355 doTemperatureCoupling = isTemperatureCouplingStep(step, inputrec);
1358 if (doTemperatureCoupling)
1360 real dttc = inputrec->nsttcouple*inputrec->delta_t;
1362 switch (inputrec->etc)
1364 case etcNO:
1365 break;
1366 case etcBERENDSEN:
1367 berendsen_tcoupl(inputrec, ekind, dttc, state->therm_integral);
1368 break;
1369 case etcNOSEHOOVER:
1370 nosehoover_tcoupl(&(inputrec->opts), ekind, dttc,
1371 state->nosehoover_xi.data(), state->nosehoover_vxi.data(), MassQ);
1372 break;
1373 case etcVRESCALE:
1374 vrescale_tcoupl(inputrec, step, ekind, dttc,
1375 state->therm_integral.data());
1376 break;
1378 /* rescale in place here */
1379 if (EI_VV(inputrec->eI))
1381 rescale_velocities(ekind, md, 0, md->homenr, as_rvec_array(state->v.data()));
1384 else
1386 /* Set the T scaling lambda to 1 to have no scaling */
1387 for (int i = 0; (i < inputrec->opts.ngtc); i++)
1389 ekind->tcstat[i].lambda = 1.0;
1394 /*! \brief Computes the atom range for a thread to operate on, ensured SIMD aligned ranges
1396 * \param[in] numThreads The number of threads to divide atoms over
1397 * \param[in] threadIndex The thread to get the range for
1398 * \param[in] numAtoms The total number of atoms (on this rank)
1399 * \param[out] startAtom The start of the atom range
1400 * \param[out] endAtom The end of the atom range, note that this is in general not a multiple of the SIMD width
1402 static void getThreadAtomRange(int numThreads, int threadIndex, int numAtoms,
1403 int *startAtom, int *endAtom)
1405 #if GMX_HAVE_SIMD_UPDATE
1406 constexpr int blockSize = GMX_SIMD_REAL_WIDTH;
1407 #else
1408 constexpr int blockSize = 1;
1409 #endif
1410 int numBlocks = (numAtoms + blockSize - 1)/blockSize;
1412 *startAtom = ((numBlocks* threadIndex )/numThreads)*blockSize;
1413 *endAtom = ((numBlocks*(threadIndex + 1))/numThreads)*blockSize;
1414 if (threadIndex == numThreads - 1)
1416 *endAtom = numAtoms;
1420 void update_pcouple_before_coordinates(FILE *fplog,
1421 int64_t step,
1422 const t_inputrec *inputrec,
1423 t_state *state,
1424 matrix parrinellorahmanMu,
1425 matrix M,
1426 gmx_bool bInitStep)
1428 /* Berendsen P-coupling is completely handled after the coordinate update.
1429 * Trotter P-coupling is handled by separate calls to trotter_update().
1431 if (inputrec->epc == epcPARRINELLORAHMAN &&
1432 isPressureCouplingStep(step, inputrec))
1434 real dtpc = inputrec->nstpcouple*inputrec->delta_t;
1436 parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev,
1437 state->box, state->box_rel, state->boxv,
1438 M, parrinellorahmanMu, bInitStep);
1442 void constrain_velocities(int64_t step,
1443 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1444 t_state *state,
1445 tensor vir_part,
1446 gmx_wallcycle_t wcycle,
1447 gmx::Constraints *constr,
1448 gmx_bool bCalcVir,
1449 bool do_log,
1450 bool do_ene)
1452 if (!constr)
1454 return;
1458 * Steps (7C, 8C)
1459 * APPLY CONSTRAINTS:
1460 * BLOCK SHAKE
1464 tensor vir_con;
1466 /* clear out constraints before applying */
1467 clear_mat(vir_part);
1469 /* Constrain the coordinates upd->xp */
1470 wallcycle_start(wcycle, ewcCONSTR);
1472 constr->apply(do_log, do_ene,
1473 step, 1, 1.0,
1474 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1475 state->box,
1476 state->lambda[efptBONDED], dvdlambda,
1477 nullptr, bCalcVir ? &vir_con : nullptr, ConstraintVariable::Velocities);
1479 wallcycle_stop(wcycle, ewcCONSTR);
1481 if (bCalcVir)
1483 m_add(vir_part, vir_con, vir_part);
1488 void constrain_coordinates(int64_t step,
1489 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1490 t_state *state,
1491 tensor vir_part,
1492 gmx_wallcycle_t wcycle,
1493 gmx_update_t *upd,
1494 gmx::Constraints *constr,
1495 gmx_bool bCalcVir,
1496 bool do_log,
1497 bool do_ene)
1499 if (!constr)
1501 return;
1505 tensor vir_con;
1507 /* clear out constraints before applying */
1508 clear_mat(vir_part);
1510 /* Constrain the coordinates upd->xp */
1511 wallcycle_start(wcycle, ewcCONSTR);
1513 constr->apply(do_log, do_ene,
1514 step, 1, 1.0,
1515 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), nullptr,
1516 state->box,
1517 state->lambda[efptBONDED], dvdlambda,
1518 as_rvec_array(state->v.data()), bCalcVir ? &vir_con : nullptr, ConstraintVariable::Positions);
1520 wallcycle_stop(wcycle, ewcCONSTR);
1522 if (bCalcVir)
1524 m_add(vir_part, vir_con, vir_part);
1529 void
1530 update_sd_second_half(int64_t step,
1531 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1532 const t_inputrec *inputrec, /* input record and box stuff */
1533 t_mdatoms *md,
1534 t_state *state,
1535 const t_commrec *cr,
1536 t_nrnb *nrnb,
1537 gmx_wallcycle_t wcycle,
1538 gmx_update_t *upd,
1539 gmx::Constraints *constr,
1540 bool do_log,
1541 bool do_ene)
1543 if (!constr)
1545 return;
1547 if (inputrec->eI == eiSD1)
1549 int nth, th;
1550 int homenr = md->homenr;
1552 /* Cast delta_t from double to real to make the integrators faster.
1553 * The only reason for having delta_t double is to get accurate values
1554 * for t=delta_t*step when step is larger than float precision.
1555 * For integration dt the accuracy of real suffices, since with
1556 * integral += dt*integrand the increment is nearly always (much) smaller
1557 * than the integral (and the integrand has real precision).
1559 real dt = inputrec->delta_t;
1561 wallcycle_start(wcycle, ewcUPDATE);
1563 nth = gmx_omp_nthreads_get(emntUpdate);
1565 #pragma omp parallel for num_threads(nth) schedule(static)
1566 for (th = 0; th < nth; th++)
1570 int start_th, end_th;
1571 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
1573 doSDUpdateGeneral<SDUpdate::FrictionAndNoiseOnly>
1574 (upd->sd,
1575 start_th, end_th, dt,
1576 inputrec->opts.acc, inputrec->opts.nFreeze,
1577 md->invmass, md->ptype,
1578 md->cFREEZE, nullptr, md->cTC,
1579 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()),
1580 as_rvec_array(state->v.data()), nullptr,
1581 step, inputrec->ld_seed,
1582 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
1584 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1586 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1587 wallcycle_stop(wcycle, ewcUPDATE);
1590 /* Constrain the coordinates upd->xp for half a time step */
1591 wallcycle_start(wcycle, ewcCONSTR);
1592 constr->apply(do_log, do_ene,
1593 step, 1, 0.5,
1594 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), nullptr,
1595 state->box,
1596 state->lambda[efptBONDED], dvdlambda,
1597 as_rvec_array(state->v.data()), nullptr, ConstraintVariable::Positions);
1598 wallcycle_stop(wcycle, ewcCONSTR);
1603 void finish_update(const t_inputrec *inputrec, /* input record and box stuff */
1604 t_mdatoms *md,
1605 t_state *state,
1606 t_graph *graph,
1607 t_nrnb *nrnb,
1608 gmx_wallcycle_t wcycle,
1609 gmx_update_t *upd,
1610 gmx::Constraints *constr)
1612 int homenr = md->homenr;
1614 /* We must always unshift after updating coordinates; if we did not shake
1615 x was shifted in do_force */
1617 /* NOTE Currently we always integrate to a temporary buffer and
1618 * then copy the results back. */
1620 wallcycle_start_nocount(wcycle, ewcUPDATE);
1622 if (md->cFREEZE != nullptr && constr != nullptr)
1624 /* If we have atoms that are frozen along some, but not all
1625 * dimensions, then any constraints will have moved them also along
1626 * the frozen dimensions. To freeze such degrees of freedom
1627 * we copy them back here to later copy them forward. It would
1628 * be more elegant and slightly more efficient to copies zero
1629 * times instead of twice, but the graph case below prevents this.
1631 const ivec *nFreeze = inputrec->opts.nFreeze;
1632 bool partialFreezeAndConstraints = false;
1633 for (int g = 0; g < inputrec->opts.ngfrz; g++)
1635 int numFreezeDim = nFreeze[g][XX] + nFreeze[g][YY] + nFreeze[g][ZZ];
1636 if (numFreezeDim > 0 && numFreezeDim < 3)
1638 partialFreezeAndConstraints = true;
1641 if (partialFreezeAndConstraints)
1643 for (int i = 0; i < homenr; i++)
1645 int g = md->cFREEZE[i];
1647 for (int d = 0; d < DIM; d++)
1649 if (nFreeze[g][d])
1651 upd->xp[i][d] = state->x[i][d];
1658 if (graph && (graph->nnodes > 0))
1660 unshift_x(graph, state->box, as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()));
1661 if (TRICLINIC(state->box))
1663 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
1665 else
1667 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
1670 else
1672 /* The copy is performance sensitive, so use a bare pointer */
1673 rvec *xp = as_rvec_array(upd->xp.data());
1674 #ifndef __clang_analyzer__
1675 // cppcheck-suppress unreadVariable
1676 int gmx_unused nth = gmx_omp_nthreads_get(emntUpdate);
1677 #endif
1678 #pragma omp parallel for num_threads(nth) schedule(static)
1679 for (int i = 0; i < homenr; i++)
1681 // Trivial statement, does not throw
1682 copy_rvec(xp[i], state->x[i]);
1685 wallcycle_stop(wcycle, ewcUPDATE);
1687 /* ############# END the update of velocities and positions ######### */
1690 void update_pcouple_after_coordinates(FILE *fplog,
1691 int64_t step,
1692 const t_inputrec *inputrec,
1693 const t_mdatoms *md,
1694 const matrix pressure,
1695 const matrix forceVirial,
1696 const matrix constraintVirial,
1697 const matrix parrinellorahmanMu,
1698 t_state *state,
1699 t_nrnb *nrnb,
1700 gmx_update_t *upd)
1702 int start = 0;
1703 int homenr = md->homenr;
1705 /* Cast to real for faster code, no loss in precision (see comment above) */
1706 real dt = inputrec->delta_t;
1709 /* now update boxes */
1710 switch (inputrec->epc)
1712 case (epcNO):
1713 break;
1714 case (epcBERENDSEN):
1715 if (isPressureCouplingStep(step, inputrec))
1717 real dtpc = inputrec->nstpcouple*dt;
1718 matrix mu;
1719 berendsen_pcoupl(fplog, step, inputrec, dtpc,
1720 pressure, state->box,
1721 forceVirial, constraintVirial,
1722 mu, &state->baros_integral);
1723 berendsen_pscale(inputrec, mu, state->box, state->box_rel,
1724 start, homenr, as_rvec_array(state->x.data()),
1725 md->cFREEZE, nrnb);
1727 break;
1728 case (epcPARRINELLORAHMAN):
1729 if (isPressureCouplingStep(step, inputrec))
1731 /* The box velocities were updated in do_pr_pcoupl,
1732 * but we dont change the box vectors until we get here
1733 * since we need to be able to shift/unshift above.
1735 real dtpc = inputrec->nstpcouple*dt;
1736 for (int i = 0; i < DIM; i++)
1738 for (int m = 0; m <= i; m++)
1740 state->box[i][m] += dtpc*state->boxv[i][m];
1743 preserve_box_shape(inputrec, state->box_rel, state->box);
1745 /* Scale the coordinates */
1746 for (int n = start; n < start + homenr; n++)
1748 tmvmul_ur0(parrinellorahmanMu, state->x[n], state->x[n]);
1751 break;
1752 case (epcMTTK):
1753 switch (inputrec->epct)
1755 case (epctISOTROPIC):
1756 /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
1757 ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
1758 Side length scales as exp(veta*dt) */
1760 msmul(state->box, std::exp(state->veta*dt), state->box);
1762 /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
1763 o If we assume isotropic scaling, and box length scaling
1764 factor L, then V = L^DIM (det(M)). So dV/dt = DIM
1765 L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
1766 determinant of B is L^DIM det(M), and the determinant
1767 of dB/dt is (dL/dT)^DIM det (M). veta will be
1768 (det(dB/dT)/det(B))^(1/3). Then since M =
1769 B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
1771 msmul(state->box, state->veta, state->boxv);
1772 break;
1773 default:
1774 break;
1776 break;
1777 default:
1778 break;
1781 if (upd->deform)
1783 auto localX = gmx::ArrayRef<gmx::RVec>(state->x).subArray(start, homenr);
1784 upd->deform->apply(localX, state->box, step);
1788 void update_coords(int64_t step,
1789 t_inputrec *inputrec, /* input record and box stuff */
1790 t_mdatoms *md,
1791 t_state *state,
1792 gmx::PaddedArrayRef<gmx::RVec> f, /* forces on home particles */
1793 t_fcdata *fcd,
1794 gmx_ekindata_t *ekind,
1795 matrix M,
1796 gmx_update_t *upd,
1797 int UpdatePart,
1798 const t_commrec *cr, /* these shouldn't be here -- need to think about it */
1799 gmx::Constraints *constr)
1801 gmx_bool bDoConstr = (nullptr != constr);
1803 /* Running the velocity half does nothing except for velocity verlet */
1804 if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
1805 !EI_VV(inputrec->eI))
1807 gmx_incons("update_coords called for velocity without VV integrator");
1810 int homenr = md->homenr;
1812 /* Cast to real for faster code, no loss in precision (see comment above) */
1813 real dt = inputrec->delta_t;
1815 /* We need to update the NMR restraint history when time averaging is used */
1816 if (state->flags & (1<<estDISRE_RM3TAV))
1818 update_disres_history(fcd, &state->hist);
1820 if (state->flags & (1<<estORIRE_DTAV))
1822 update_orires_history(fcd, &state->hist);
1825 /* ############# START The update of velocities and positions ######### */
1826 int nth = gmx_omp_nthreads_get(emntUpdate);
1828 #pragma omp parallel for num_threads(nth) schedule(static)
1829 for (int th = 0; th < nth; th++)
1833 int start_th, end_th;
1834 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
1836 /* Strictly speaking, we would only need this check with SIMD
1837 * and for the actual SIMD width. But since the code currently
1838 * always adds padding for GMX_REAL_MAX_SIMD_WIDTH, we check that.
1840 gmx::index gmx_used_in_debug homenrSimdPadded;
1841 homenrSimdPadded = ((homenr + GMX_REAL_MAX_SIMD_WIDTH - 1)/GMX_REAL_MAX_SIMD_WIDTH)*GMX_REAL_MAX_SIMD_WIDTH;
1842 GMX_ASSERT(gmx::index(state->x.size()) >= homenrSimdPadded, "state->x needs to be padded for SIMD access");
1843 GMX_ASSERT(gmx::index(upd->xp.size()) >= homenrSimdPadded, "upd->xp needs to be padded for SIMD access");
1844 GMX_ASSERT(gmx::index(state->v.size()) >= homenrSimdPadded, "state->v needs to be padded for SIMD access");
1845 GMX_ASSERT(f.size() >= homenrSimdPadded, "f needs to be padded for SIMD access");
1847 const rvec *x_rvec = as_rvec_array(state->x.data());
1848 rvec *xp_rvec = as_rvec_array(upd->xp.data());
1849 rvec *v_rvec = as_rvec_array(state->v.data());
1850 const rvec *f_rvec = as_rvec_array(f.data());
1852 switch (inputrec->eI)
1854 case (eiMD):
1855 do_update_md(start_th, end_th, step, dt,
1856 inputrec, md, ekind, state->box,
1857 x_rvec, xp_rvec, v_rvec, f_rvec,
1858 state->nosehoover_vxi.data(), M);
1859 break;
1860 case (eiSD1):
1861 if (bDoConstr)
1863 // With constraints, the SD update is done in 2 parts
1864 doSDUpdateGeneral<SDUpdate::ForcesOnly>
1865 (upd->sd,
1866 start_th, end_th, dt,
1867 inputrec->opts.acc, inputrec->opts.nFreeze,
1868 md->invmass, md->ptype,
1869 md->cFREEZE, md->cACC, nullptr,
1870 x_rvec, xp_rvec, v_rvec, f_rvec,
1871 step, inputrec->ld_seed, nullptr);
1873 else
1875 doSDUpdateGeneral<SDUpdate::Combined>
1876 (upd->sd,
1877 start_th, end_th, dt,
1878 inputrec->opts.acc, inputrec->opts.nFreeze,
1879 md->invmass, md->ptype,
1880 md->cFREEZE, md->cACC, md->cTC,
1881 x_rvec, xp_rvec, v_rvec, f_rvec,
1882 step, inputrec->ld_seed,
1883 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
1885 break;
1886 case (eiBD):
1887 do_update_bd(start_th, end_th, dt,
1888 inputrec->opts.nFreeze, md->invmass, md->ptype,
1889 md->cFREEZE, md->cTC,
1890 x_rvec, xp_rvec, v_rvec, f_rvec,
1891 inputrec->bd_fric,
1892 upd->sd->bd_rf,
1893 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr);
1894 break;
1895 case (eiVV):
1896 case (eiVVAK):
1898 gmx_bool bExtended = (inputrec->etc == etcNOSEHOOVER ||
1899 inputrec->epc == epcPARRINELLORAHMAN ||
1900 inputrec->epc == epcMTTK);
1902 /* assuming barostat coupled to group 0 */
1903 real alpha = 1.0 + DIM/static_cast<real>(inputrec->opts.nrdf[0]);
1904 switch (UpdatePart)
1906 case etrtVELOCITY1:
1907 case etrtVELOCITY2:
1908 do_update_vv_vel(start_th, end_th, dt,
1909 inputrec->opts.acc, inputrec->opts.nFreeze,
1910 md->invmass, md->ptype,
1911 md->cFREEZE, md->cACC,
1912 v_rvec, f_rvec,
1913 bExtended, state->veta, alpha);
1914 break;
1915 case etrtPOSITION:
1916 do_update_vv_pos(start_th, end_th, dt,
1917 inputrec->opts.nFreeze,
1918 md->ptype, md->cFREEZE,
1919 x_rvec, xp_rvec, v_rvec,
1920 bExtended, state->veta);
1921 break;
1923 break;
1925 default:
1926 gmx_fatal(FARGS, "Don't know how to update coordinates");
1929 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1934 extern gmx_bool update_randomize_velocities(t_inputrec *ir, int64_t step, const t_commrec *cr,
1935 t_mdatoms *md, t_state *state, gmx_update_t *upd, gmx::Constraints *constr)
1938 real rate = (ir->delta_t)/ir->opts.tau_t[0];
1940 if (ir->etc == etcANDERSEN && constr != nullptr)
1942 /* Currently, Andersen thermostat does not support constrained
1943 systems. Functionality exists in the andersen_tcoupl
1944 function in GROMACS 4.5.7 to allow this combination. That
1945 code could be ported to the current random-number
1946 generation approach, but has not yet been done because of
1947 lack of time and resources. */
1948 gmx_fatal(FARGS, "Normal Andersen is currently not supported with constraints, use massive Andersen instead");
1951 /* proceed with andersen if 1) it's fixed probability per
1952 particle andersen or 2) it's massive andersen and it's tau_t/dt */
1953 if ((ir->etc == etcANDERSEN) || do_per_step(step, static_cast<int>(1.0/rate + 0.5)))
1955 andersen_tcoupl(ir, step, cr, md, state, rate,
1956 upd->sd->randomize_group, upd->sd->boltzfac);
1957 return TRUE;
1959 return FALSE;