Fix icc warning in post-submit
[gromacs.git] / src / gromacs / mdlib / update.cpp
blobea90b9c9110d4093d76f906780a50e8dc085c8be
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "update.h"
41 #include <stdio.h>
43 #include <cmath>
45 #include <algorithm>
47 #include "gromacs/domdec/domdec_struct.h"
48 #include "gromacs/fileio/confio.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/listed-forces/disre.h"
52 #include "gromacs/listed-forces/orires.h"
53 #include "gromacs/math/functions.h"
54 #include "gromacs/math/invertmatrix.h"
55 #include "gromacs/math/paddedvector.h"
56 #include "gromacs/math/units.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/constr.h"
60 #include "gromacs/mdlib/force.h"
61 #include "gromacs/mdlib/gmx_omp_nthreads.h"
62 #include "gromacs/mdlib/mdrun.h"
63 #include "gromacs/mdlib/sim_util.h"
64 #include "gromacs/mdlib/tgroup.h"
65 #include "gromacs/mdtypes/commrec.h"
66 #include "gromacs/mdtypes/group.h"
67 #include "gromacs/mdtypes/inputrec.h"
68 #include "gromacs/mdtypes/md_enums.h"
69 #include "gromacs/mdtypes/state.h"
70 #include "gromacs/pbcutil/boxutilities.h"
71 #include "gromacs/pbcutil/mshift.h"
72 #include "gromacs/pbcutil/pbc.h"
73 #include "gromacs/pulling/pull.h"
74 #include "gromacs/random/tabulatednormaldistribution.h"
75 #include "gromacs/random/threefry.h"
76 #include "gromacs/simd/simd.h"
77 #include "gromacs/timing/wallcycle.h"
78 #include "gromacs/topology/atoms.h"
79 #include "gromacs/utility/exceptions.h"
80 #include "gromacs/utility/fatalerror.h"
81 #include "gromacs/utility/futil.h"
82 #include "gromacs/utility/gmxassert.h"
83 #include "gromacs/utility/gmxomp.h"
84 #include "gromacs/utility/smalloc.h"
86 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
88 typedef struct {
89 double em;
90 } gmx_sd_const_t;
92 typedef struct {
93 real V;
94 } gmx_sd_sigma_t;
96 typedef struct {
97 /* BD stuff */
98 real *bd_rf;
99 /* SD stuff */
100 gmx_sd_const_t *sdc;
101 gmx_sd_sigma_t *sdsig;
102 /* andersen temperature control stuff */
103 gmx_bool *randomize_group;
104 real *boltzfac;
105 } gmx_stochd_t;
107 struct gmx_update_t
109 gmx_stochd_t *sd;
110 /* xprime for constraint algorithms */
111 PaddedRVecVector xp;
113 /* Variables for the deform algorithm */
114 gmx_int64_t deformref_step;
115 matrix deformref_box;
118 static bool isTemperatureCouplingStep(gmx_int64_t step, const t_inputrec *ir)
120 /* We should only couple after a step where energies were determined (for leapfrog versions)
121 or the step energies are determined, for velocity verlet versions */
122 int offset;
123 if (EI_VV(ir->eI))
125 offset = 0;
127 else
129 offset = 1;
131 return ir->etc != etcNO &&
132 (ir->nsttcouple == 1 ||
133 do_per_step(step + ir->nsttcouple - offset, ir->nsttcouple));
136 static bool isPressureCouplingStep(gmx_int64_t step, const t_inputrec *ir)
138 GMX_ASSERT(ir->epc != epcMTTK, "MTTK pressure coupling is not handled here");
140 int offset;
141 if (ir->epc == epcBERENDSEN)
143 offset = 0;
145 else
147 offset = 1;
149 /* We should only couple after a step where pressures were determined */
150 return ir->epc != etcNO &&
151 (ir->nstpcouple == 1 ||
152 do_per_step(step + ir->nstpcouple - offset, ir->nstpcouple));
155 /*! \brief Sets the velocities of virtual sites to zero */
156 static void clearVsiteVelocities(int start,
157 int nrend,
158 const unsigned short *particleType,
159 rvec * gmx_restrict v)
161 for (int a = start; a < nrend; a++)
163 if (particleType[a] == eptVSite)
165 clear_rvec(v[a]);
170 /*! \brief Sets the number of different temperature coupling values */
171 enum class NumTempScaleValues
173 single, //!< Single T-scaling value (either one group or all values =1)
174 multiple //!< Multiple T-scaling values, need to use T-group indices
177 /*! \brief Sets if to apply no or diagonal Parrinello-Rahman pressure scaling
179 * Note that this enum is only used in updateMDLeapfrogSimple(), which does
180 * not handle fully anistropic Parrinello-Rahman scaling, so we only have
181 * options \p no and \p diagonal here and no anistropic option.
183 enum class ApplyParrinelloRahmanVScaling
185 no, //!< Do not apply velocity scaling (not a PR-coupling run or step)
186 diagonal //!< Apply velocity scaling using a diagonal matrix
189 /*! \brief Integrate using leap-frog with T-scaling and optionally diagonal Parrinello-Rahman p-coupling
191 * \tparam numTempScaleValues The number of different T-couple values
192 * \tparam applyPRVScaling Apply Parrinello-Rahman velocity scaling
193 * \param[in] start Index of first atom to update
194 * \param[in] nrend Last atom to update: \p nrend - 1
195 * \param[in] dt The time step
196 * \param[in] dtPressureCouple Time step for pressure coupling
197 * \param[in] invMassPerDim 1/mass per atom and dimension
198 * \param[in] tcstat Temperature coupling information
199 * \param[in] cTC T-coupling group index per atom
200 * \param[in] pRVScaleMatrixDiagonal Parrinello-Rahman v-scale matrix diagonal
201 * \param[in] x Input coordinates
202 * \param[out] xprime Updated coordinates
203 * \param[inout] v Velocities
204 * \param[in] f Forces
206 * We expect this template to get good SIMD acceleration by most compilers,
207 * unlike the more complex general template.
208 * Note that we might get even better SIMD acceleration when we introduce
209 * aligned (and padded) memory, possibly with some hints for the compilers.
211 template<NumTempScaleValues numTempScaleValues,
212 ApplyParrinelloRahmanVScaling applyPRVScaling>
213 static void
214 updateMDLeapfrogSimple(int start,
215 int nrend,
216 real dt,
217 real dtPressureCouple,
218 const rvec * gmx_restrict invMassPerDim,
219 const t_grp_tcstat * tcstat,
220 const unsigned short * cTC,
221 const rvec pRVScaleMatrixDiagonal,
222 const rvec * gmx_restrict x,
223 rvec * gmx_restrict xprime,
224 rvec * gmx_restrict v,
225 const rvec * gmx_restrict f)
227 real lambdaGroup;
229 if (numTempScaleValues == NumTempScaleValues::single)
231 lambdaGroup = tcstat[0].lambda;
234 for (int a = start; a < nrend; a++)
236 if (numTempScaleValues == NumTempScaleValues::multiple)
238 lambdaGroup = tcstat[cTC[a]].lambda;
241 for (int d = 0; d < DIM; d++)
243 /* Note that using rvec invMassPerDim results in more efficient
244 * SIMD code, but this increases the cache pressure.
245 * For large systems with PME on the CPU this slows down the
246 * (then already slow) update by 20%. If all data remains in cache,
247 * using rvec is much faster.
249 real vNew = lambdaGroup*v[a][d] + f[a][d]*invMassPerDim[a][d]*dt;
251 if (applyPRVScaling == ApplyParrinelloRahmanVScaling::diagonal)
253 vNew -= dtPressureCouple*pRVScaleMatrixDiagonal[d]*v[a][d];
255 v[a][d] = vNew;
256 xprime[a][d] = x[a][d] + vNew*dt;
261 #if GMX_SIMD && GMX_SIMD_HAVE_REAL
262 #define GMX_HAVE_SIMD_UPDATE 1
263 #else
264 #define GMX_HAVE_SIMD_UPDATE 0
265 #endif
267 #if GMX_HAVE_SIMD_UPDATE
269 /*! \brief Load (aligned) the contents of GMX_SIMD_REAL_WIDTH rvec elements sequentially into 3 SIMD registers
271 * The loaded output is:
272 * \p r0: { r[index][XX], r[index][YY], ... }
273 * \p r1: { ... }
274 * \p r2: { ..., r[index+GMX_SIMD_REAL_WIDTH-1][YY], r[index+GMX_SIMD_REAL_WIDTH-1][ZZ] }
276 * \param[in] r Real to an rvec array, has to be aligned to SIMD register width
277 * \param[in] index Index of the first rvec triplet of reals to load
278 * \param[out] r0 Pointer to first SIMD register
279 * \param[out] r1 Pointer to second SIMD register
280 * \param[out] r2 Pointer to third SIMD register
282 static inline void simdLoadRvecs(const rvec *r,
283 int index,
284 SimdReal *r0,
285 SimdReal *r1,
286 SimdReal *r2)
288 const real *realPtr = r[index];
290 GMX_ASSERT(isSimdAligned(realPtr), "Pointer should be SIMD aligned");
292 *r0 = simdLoad(realPtr + 0*GMX_SIMD_REAL_WIDTH);
293 *r1 = simdLoad(realPtr + 1*GMX_SIMD_REAL_WIDTH);
294 *r2 = simdLoad(realPtr + 2*GMX_SIMD_REAL_WIDTH);
297 /*! \brief Store (aligned) 3 SIMD registers sequentially to GMX_SIMD_REAL_WIDTH rvec elements
299 * The stored output is:
300 * \p r[index] = { { r0[0], r0[1], ... }
301 * ...
302 * \p r[index+GMX_SIMD_REAL_WIDTH-1] = { ... , r2[GMX_SIMD_REAL_WIDTH-2], r2[GMX_SIMD_REAL_WIDTH-1] }
304 * \param[out] r Pointer to an rvec array, has to be aligned to SIMD register width
305 * \param[in] index Index of the first rvec triplet of reals to store to
306 * \param[in] r0 First SIMD register
307 * \param[in] r1 Second SIMD register
308 * \param[in] r2 Third SIMD register
310 static inline void simdStoreRvecs(rvec *r,
311 int index,
312 SimdReal r0,
313 SimdReal r1,
314 SimdReal r2)
316 real *realPtr = r[index];
318 GMX_ASSERT(isSimdAligned(realPtr), "Pointer should be SIMD aligned");
320 store(realPtr + 0*GMX_SIMD_REAL_WIDTH, r0);
321 store(realPtr + 1*GMX_SIMD_REAL_WIDTH, r1);
322 store(realPtr + 2*GMX_SIMD_REAL_WIDTH, r2);
325 /*! \brief Integrate using leap-frog with single group T-scaling and SIMD
327 * \param[in] start Index of first atom to update
328 * \param[in] nrend Last atom to update: \p nrend - 1
329 * \param[in] dt The time step
330 * \param[in] invMass 1/mass per atom
331 * \param[in] tcstat Temperature coupling information
332 * \param[in] x Input coordinates
333 * \param[out] xprime Updated coordinates
334 * \param[inout] v Velocities
335 * \param[in] f Forces
337 static void
338 updateMDLeapfrogSimpleSimd(int start,
339 int nrend,
340 real dt,
341 const real * gmx_restrict invMass,
342 const t_grp_tcstat * tcstat,
343 const rvec * gmx_restrict x,
344 rvec * gmx_restrict xprime,
345 rvec * gmx_restrict v,
346 const rvec * gmx_restrict f)
348 SimdReal timestep(dt);
349 SimdReal lambdaSystem(tcstat[0].lambda);
351 /* We declare variables here, since code is often slower when declaring them inside the loop */
353 /* Note: We should implement a proper PaddedVector, so we don't need this check */
354 GMX_ASSERT(isSimdAligned(invMass), "invMass should be aligned");
356 for (int a = start; a < nrend; a += GMX_SIMD_REAL_WIDTH)
358 SimdReal invMass0, invMass1, invMass2;
359 expandScalarsToTriplets(simdLoad(invMass + a),
360 &invMass0, &invMass1, &invMass2);
362 SimdReal v0, v1, v2;
363 SimdReal f0, f1, f2;
364 simdLoadRvecs(v, a, &v0, &v1, &v2);
365 simdLoadRvecs(f, a, &f0, &f1, &f2);
367 v0 = fma(f0*invMass0, timestep, lambdaSystem*v0);
368 v1 = fma(f1*invMass1, timestep, lambdaSystem*v1);
369 v2 = fma(f2*invMass2, timestep, lambdaSystem*v2);
371 simdStoreRvecs(v, a, v0, v1, v2);
373 SimdReal x0, x1, x2;
374 simdLoadRvecs(x, a, &x0, &x1, &x2);
376 SimdReal xprime0 = fma(v0, timestep, x0);
377 SimdReal xprime1 = fma(v1, timestep, x1);
378 SimdReal xprime2 = fma(v2, timestep, x2);
380 simdStoreRvecs(xprime, a, xprime0, xprime1, xprime2);
384 #endif // GMX_HAVE_SIMD_UPDATE
386 /*! \brief Sets the NEMD acceleration type */
387 enum class AccelerationType
389 none, group, cosine
392 /*! \brief Integrate using leap-frog with support for everything
394 * \tparam accelerationType Type of NEMD acceleration
395 * \param[in] start Index of first atom to update
396 * \param[in] nrend Last atom to update: \p nrend - 1
397 * \param[in] doNoseHoover If to apply Nose-Hoover T-coupling
398 * \param[in] dt The time step
399 * \param[in] dtPressureCouple Time step for pressure coupling, is 0 when no pressure coupling should be applied at this step
400 * \param[in] ir The input parameter record
401 * \param[in] md Atom properties
402 * \param[in] ekind Kinetic energy data
403 * \param[in] box The box dimensions
404 * \param[in] x Input coordinates
405 * \param[out] xprime Updated coordinates
406 * \param[inout] v Velocities
407 * \param[in] f Forces
408 * \param[in] nh_vxi Nose-Hoover velocity scaling factors
409 * \param[in] M Parrinello-Rahman scaling matrix
411 template<AccelerationType accelerationType>
412 static void
413 updateMDLeapfrogGeneral(int start,
414 int nrend,
415 bool doNoseHoover,
416 real dt,
417 real dtPressureCouple,
418 const t_inputrec * ir,
419 const t_mdatoms * md,
420 const gmx_ekindata_t * ekind,
421 const matrix box,
422 const rvec * gmx_restrict x,
423 rvec * gmx_restrict xprime,
424 rvec * gmx_restrict v,
425 const rvec * gmx_restrict f,
426 const double * gmx_restrict nh_vxi,
427 const matrix M)
429 /* This is a version of the leap-frog integrator that supports
430 * all combinations of T-coupling, P-coupling and NEMD.
431 * Nose-Hoover uses the reversible leap-frog integrator from
432 * Holian et al. Phys Rev E 52(3) : 2338, 1995
435 const unsigned short * cTC = md->cTC;
436 const t_grp_tcstat * tcstat = ekind->tcstat;
438 const unsigned short * cACC = md->cACC;
439 const rvec * accel = ir->opts.acc;
440 const t_grp_acc * grpstat = ekind->grpstat;
442 const rvec * gmx_restrict invMassPerDim = md->invMassPerDim;
444 /* Initialize group values, changed later when multiple groups are used */
445 int ga = 0;
446 int gt = 0;
447 real factorNH = 0;
449 for (int n = start; n < nrend; n++)
451 if (cTC)
453 gt = cTC[n];
455 real lg = tcstat[gt].lambda;
457 rvec vRel;
458 real cosineZ, vCosine;
459 #ifdef __INTEL_COMPILER
460 #pragma warning( disable : 280 )
461 #endif
462 switch (accelerationType)
464 case AccelerationType::none:
465 copy_rvec(v[n], vRel);
466 break;
467 case AccelerationType::group:
468 if (cACC)
470 ga = cACC[n];
472 /* Avoid scaling the group velocity */
473 rvec_sub(v[n], grpstat[ga].u, vRel);
474 break;
475 case AccelerationType::cosine:
476 cosineZ = std::cos(x[n][ZZ]*static_cast<real>(M_PI)/box[ZZ][ZZ]);
477 vCosine = cosineZ*ekind->cosacc.vcos;
478 /* Avoid scaling the cosine profile velocity */
479 copy_rvec(v[n], vRel);
480 vRel[XX] -= vCosine;
481 break;
484 if (doNoseHoover)
486 /* Here we account for multiple time stepping, by increasing
487 * the Nose-Hoover correction by nsttcouple
489 factorNH = 0.5*ir->nsttcouple*dt*nh_vxi[gt];
492 for (int d = 0; d < DIM; d++)
494 real vNew =
495 (lg*vRel[d] + (f[n][d]*invMassPerDim[n][d]*dt - factorNH*vRel[d]
496 - dtPressureCouple*iprod(M[d], vRel)))/(1 + factorNH);
497 switch (accelerationType)
499 case AccelerationType::none:
500 break;
501 case AccelerationType::group:
502 /* Add back the mean velocity and apply acceleration */
503 vNew += grpstat[ga].u[d] + accel[ga][d]*dt;
504 break;
505 case AccelerationType::cosine:
506 if (d == XX)
508 /* Add back the mean velocity and apply acceleration */
509 vNew += vCosine + cosineZ*ekind->cosacc.cos_accel*dt;
511 break;
513 v[n][d] = vNew;
514 xprime[n][d] = x[n][d] + vNew*dt;
519 /*! \brief Handles the Leap-frog MD x and v integration */
520 static void do_update_md(int start,
521 int nrend,
522 gmx_int64_t step,
523 real dt,
524 const t_inputrec * ir,
525 const t_mdatoms * md,
526 const gmx_ekindata_t * ekind,
527 const matrix box,
528 const rvec * gmx_restrict x,
529 rvec * gmx_restrict xprime,
530 rvec * gmx_restrict v,
531 const rvec * gmx_restrict f,
532 const double * gmx_restrict nh_vxi,
533 const matrix M)
535 GMX_ASSERT(nrend == start || xprime != x, "For SIMD optimization certain compilers need to have xprime != x");
537 /* Note: Berendsen pressure scaling is handled after do_update_md() */
538 bool doTempCouple = isTemperatureCouplingStep(step, ir);
539 bool doNoseHoover = (ir->etc == etcNOSEHOOVER && doTempCouple);
540 bool doParrinelloRahman = (ir->epc == epcPARRINELLORAHMAN && isPressureCouplingStep(step, ir));
541 bool doPROffDiagonal = (doParrinelloRahman && (M[YY][XX] != 0 || M[ZZ][XX] != 0 || M[ZZ][YY] != 0));
543 real dtPressureCouple = (doParrinelloRahman ? ir->nstpcouple*dt : 0);
545 /* NEMD (also cosine) acceleration is applied in updateMDLeapFrogGeneral */
546 bool doAcceleration = (ekind->bNEMD || ekind->cosacc.cos_accel != 0);
548 if (doNoseHoover || doPROffDiagonal || doAcceleration)
550 matrix stepM;
551 if (!doParrinelloRahman)
553 /* We should not apply PR scaling at this step */
554 clear_mat(stepM);
556 else
558 copy_mat(M, stepM);
561 if (!doAcceleration)
563 updateMDLeapfrogGeneral<AccelerationType::none>
564 (start, nrend, doNoseHoover, dt, dtPressureCouple,
565 ir, md, ekind, box, x, xprime, v, f, nh_vxi, stepM);
567 else if (ekind->bNEMD)
569 updateMDLeapfrogGeneral<AccelerationType::group>
570 (start, nrend, doNoseHoover, dt, dtPressureCouple,
571 ir, md, ekind, box, x, xprime, v, f, nh_vxi, stepM);
573 else
575 updateMDLeapfrogGeneral<AccelerationType::cosine>
576 (start, nrend, doNoseHoover, dt, dtPressureCouple,
577 ir, md, ekind, box, x, xprime, v, f, nh_vxi, stepM);
580 else
582 /* Use a simple and thus more efficient integration loop. */
583 /* The simple loop does not check for particle type (so it can
584 * be vectorized), which means we need to clear the velocities
585 * of virtual sites in advance, when present. Note that vsite
586 * velocities are computed after update and constraints from
587 * their displacement.
589 if (md->haveVsites)
591 /* Note: The overhead of this loop is completely neligible */
592 clearVsiteVelocities(start, nrend, md->ptype, v);
595 /* We determine if we have a single T-coupling lambda value for all
596 * atoms. That allows for better SIMD acceleration in the template.
597 * If we do not do temperature coupling (in the run or this step),
598 * all scaling values are 1, so we effectively have a single value.
600 bool haveSingleTempScaleValue = (!doTempCouple || ekind->ngtc == 1);
602 /* Extract some pointers needed by all cases */
603 const unsigned short *cTC = md->cTC;
604 const t_grp_tcstat *tcstat = ekind->tcstat;
605 const rvec *invMassPerDim = md->invMassPerDim;
607 if (doParrinelloRahman)
609 GMX_ASSERT(!doPROffDiagonal, "updateMDLeapfrogSimple only support diagonal Parrinello-Rahman scaling matrices");
611 rvec diagM;
612 for (int d = 0; d < DIM; d++)
614 diagM[d] = M[d][d];
617 if (haveSingleTempScaleValue)
619 updateMDLeapfrogSimple
620 <NumTempScaleValues::single,
621 ApplyParrinelloRahmanVScaling::diagonal>
622 (start, nrend, dt, dtPressureCouple,
623 invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
625 else
627 updateMDLeapfrogSimple
628 <NumTempScaleValues::multiple,
629 ApplyParrinelloRahmanVScaling::diagonal>
630 (start, nrend, dt, dtPressureCouple,
631 invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
634 else
636 if (haveSingleTempScaleValue)
638 /* Note that modern compilers are pretty good at vectorizing
639 * updateMDLeapfrogSimple(). But the SIMD version will still
640 * be faster because invMass lowers the cache pressure
641 * compared to invMassPerDim.
643 #if GMX_HAVE_SIMD_UPDATE
644 /* Check if we can use invmass instead of invMassPerDim */
645 if (!md->havePartiallyFrozenAtoms)
647 updateMDLeapfrogSimpleSimd
648 (start, nrend, dt, md->invmass, tcstat, x, xprime, v, f);
650 else
651 #endif
653 updateMDLeapfrogSimple
654 <NumTempScaleValues::single,
655 ApplyParrinelloRahmanVScaling::no>
656 (start, nrend, dt, dtPressureCouple,
657 invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
660 else
662 updateMDLeapfrogSimple
663 <NumTempScaleValues::multiple,
664 ApplyParrinelloRahmanVScaling::no>
665 (start, nrend, dt, dtPressureCouple,
666 invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
672 static void do_update_vv_vel(int start, int nrend, real dt,
673 rvec accel[], ivec nFreeze[], real invmass[],
674 unsigned short ptype[], unsigned short cFREEZE[],
675 unsigned short cACC[], rvec v[], const rvec f[],
676 gmx_bool bExtended, real veta, real alpha)
678 int gf = 0, ga = 0;
679 int n, d;
680 real g, mv1, mv2;
682 if (bExtended)
684 g = 0.25*dt*veta*alpha;
685 mv1 = std::exp(-g);
686 mv2 = gmx::series_sinhx(g);
688 else
690 mv1 = 1.0;
691 mv2 = 1.0;
693 for (n = start; n < nrend; n++)
695 real w_dt = invmass[n]*dt;
696 if (cFREEZE)
698 gf = cFREEZE[n];
700 if (cACC)
702 ga = cACC[n];
705 for (d = 0; d < DIM; d++)
707 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
709 v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt;
711 else
713 v[n][d] = 0.0;
717 } /* do_update_vv_vel */
719 static void do_update_vv_pos(int start, int nrend, real dt,
720 ivec nFreeze[],
721 unsigned short ptype[], unsigned short cFREEZE[],
722 const rvec x[], rvec xprime[], rvec v[],
723 gmx_bool bExtended, real veta)
725 int gf = 0;
726 int n, d;
727 real g, mr1, mr2;
729 /* Would it make more sense if Parrinello-Rahman was put here? */
730 if (bExtended)
732 g = 0.5*dt*veta;
733 mr1 = std::exp(g);
734 mr2 = gmx::series_sinhx(g);
736 else
738 mr1 = 1.0;
739 mr2 = 1.0;
742 for (n = start; n < nrend; n++)
745 if (cFREEZE)
747 gf = cFREEZE[n];
750 for (d = 0; d < DIM; d++)
752 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
754 xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]);
756 else
758 xprime[n][d] = x[n][d];
762 } /* do_update_vv_pos */
764 static gmx_stochd_t *init_stochd(const t_inputrec *ir)
766 gmx_stochd_t *sd;
768 snew(sd, 1);
770 const t_grpopts *opts = &ir->opts;
771 int ngtc = opts->ngtc;
773 if (ir->eI == eiBD)
775 snew(sd->bd_rf, ngtc);
777 else if (EI_SD(ir->eI))
779 snew(sd->sdc, ngtc);
780 snew(sd->sdsig, ngtc);
782 gmx_sd_const_t *sdc = sd->sdc;
784 for (int gt = 0; gt < ngtc; gt++)
786 if (opts->tau_t[gt] > 0)
788 sdc[gt].em = std::exp(-ir->delta_t/opts->tau_t[gt]);
790 else
792 /* No friction and noise on this group */
793 sdc[gt].em = 1;
797 else if (ETC_ANDERSEN(ir->etc))
799 snew(sd->randomize_group, ngtc);
800 snew(sd->boltzfac, ngtc);
802 /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */
803 /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */
805 for (int gt = 0; gt < ngtc; gt++)
807 real reft = std::max<real>(0, opts->ref_t[gt]);
808 if ((opts->tau_t[gt] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
810 sd->randomize_group[gt] = TRUE;
811 sd->boltzfac[gt] = BOLTZ*opts->ref_t[gt];
813 else
815 sd->randomize_group[gt] = FALSE;
820 return sd;
823 void update_temperature_constants(gmx_update_t *upd, const t_inputrec *ir)
825 if (ir->eI == eiBD)
827 if (ir->bd_fric != 0)
829 for (int gt = 0; gt < ir->opts.ngtc; gt++)
831 upd->sd->bd_rf[gt] = std::sqrt(2.0*BOLTZ*ir->opts.ref_t[gt]/(ir->bd_fric*ir->delta_t));
834 else
836 for (int gt = 0; gt < ir->opts.ngtc; gt++)
838 upd->sd->bd_rf[gt] = std::sqrt(2.0*BOLTZ*ir->opts.ref_t[gt]);
842 if (ir->eI == eiSD1)
844 for (int gt = 0; gt < ir->opts.ngtc; gt++)
846 real kT = BOLTZ*ir->opts.ref_t[gt];
847 /* The mass is accounted for later, since this differs per atom */
848 upd->sd->sdsig[gt].V = std::sqrt(kT*(1 - upd->sd->sdc[gt].em*upd->sd->sdc[gt].em));
853 gmx_update_t *init_update(const t_inputrec *ir)
855 gmx_update_t *upd = new(gmx_update_t);
857 if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc))
859 upd->sd = init_stochd(ir);
862 update_temperature_constants(upd, ir);
864 upd->xp.resize(0);
866 return upd;
869 void update_realloc(gmx_update_t *upd, int natoms)
871 GMX_ASSERT(upd, "upd must be allocated before its fields can be reallocated");
873 upd->xp.resize(gmx::paddedRVecVectorSize(natoms));
876 static void do_update_sd1(gmx_stochd_t *sd,
877 int start, int nrend, real dt,
878 rvec accel[], ivec nFreeze[],
879 real invmass[], unsigned short ptype[],
880 unsigned short cFREEZE[], unsigned short cACC[],
881 unsigned short cTC[],
882 const rvec x[], rvec xprime[], rvec v[], const rvec f[],
883 gmx_bool bDoConstr,
884 gmx_bool bFirstHalfConstr,
885 gmx_int64_t step, int seed, int* gatindex)
887 gmx_sd_const_t *sdc;
888 gmx_sd_sigma_t *sig;
889 int gf = 0, ga = 0, gt = 0;
890 real ism;
891 int n, d;
893 // Even 0 bits internal counter gives 2x64 ints (more than enough for three table lookups)
894 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::UpdateCoordinates);
895 gmx::TabulatedNormalDistribution<real, 14> dist;
897 sdc = sd->sdc;
898 sig = sd->sdsig;
900 if (!bDoConstr)
902 for (n = start; n < nrend; n++)
904 int ng = gatindex ? gatindex[n] : n;
906 rng.restart(step, ng);
907 dist.reset();
909 ism = std::sqrt(invmass[n]);
911 if (cFREEZE)
913 gf = cFREEZE[n];
915 if (cACC)
917 ga = cACC[n];
919 if (cTC)
921 gt = cTC[n];
924 for (d = 0; d < DIM; d++)
926 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
928 real sd_V, vn;
930 sd_V = ism*sig[gt].V*dist(rng);
931 vn = v[n][d] + (invmass[n]*f[n][d] + accel[ga][d])*dt;
932 v[n][d] = vn*sdc[gt].em + sd_V;
933 /* Here we include half of the friction+noise
934 * update of v into the integration of x.
936 xprime[n][d] = x[n][d] + 0.5*(vn + v[n][d])*dt;
938 else
940 v[n][d] = 0.0;
941 xprime[n][d] = x[n][d];
946 else
948 /* We do have constraints */
949 if (bFirstHalfConstr)
951 /* First update without friction and noise */
952 real im;
954 for (n = start; n < nrend; n++)
956 im = invmass[n];
958 if (cFREEZE)
960 gf = cFREEZE[n];
962 if (cACC)
964 ga = cACC[n];
967 for (d = 0; d < DIM; d++)
969 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
971 v[n][d] = v[n][d] + (im*f[n][d] + accel[ga][d])*dt;
972 xprime[n][d] = x[n][d] + v[n][d]*dt;
974 else
976 v[n][d] = 0.0;
977 xprime[n][d] = x[n][d];
982 else
984 /* Update friction and noise only */
985 for (n = start; n < nrend; n++)
987 int ng = gatindex ? gatindex[n] : n;
989 rng.restart(step, ng);
990 dist.reset();
992 ism = std::sqrt(invmass[n]);
994 if (cFREEZE)
996 gf = cFREEZE[n];
998 if (cTC)
1000 gt = cTC[n];
1003 for (d = 0; d < DIM; d++)
1005 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
1007 real sd_V, vn;
1009 sd_V = ism*sig[gt].V*dist(rng);
1010 vn = v[n][d];
1011 v[n][d] = vn*sdc[gt].em + sd_V;
1012 /* Add the friction and noise contribution only */
1013 xprime[n][d] = xprime[n][d] + 0.5*(v[n][d] - vn)*dt;
1021 static void do_update_bd(int start, int nrend, real dt,
1022 ivec nFreeze[],
1023 real invmass[], unsigned short ptype[],
1024 unsigned short cFREEZE[], unsigned short cTC[],
1025 const rvec x[], rvec xprime[], rvec v[],
1026 const rvec f[], real friction_coefficient,
1027 real *rf, gmx_int64_t step, int seed,
1028 int* gatindex)
1030 /* note -- these appear to be full step velocities . . . */
1031 int gf = 0, gt = 0;
1032 real vn;
1033 real invfr = 0;
1034 int n, d;
1035 // Use 1 bit of internal counters to give us 2*2 64-bits values per stream
1036 // Each 64-bit value is enough for 4 normal distribution table numbers.
1037 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::UpdateCoordinates);
1038 gmx::TabulatedNormalDistribution<real, 14> dist;
1040 if (friction_coefficient != 0)
1042 invfr = 1.0/friction_coefficient;
1045 for (n = start; (n < nrend); n++)
1047 int ng = gatindex ? gatindex[n] : n;
1049 rng.restart(step, ng);
1050 dist.reset();
1052 if (cFREEZE)
1054 gf = cFREEZE[n];
1056 if (cTC)
1058 gt = cTC[n];
1060 for (d = 0; (d < DIM); d++)
1062 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
1064 if (friction_coefficient != 0)
1066 vn = invfr*f[n][d] + rf[gt]*dist(rng);
1068 else
1070 /* NOTE: invmass = 2/(mass*friction_constant*dt) */
1071 vn = 0.5*invmass[n]*f[n][d]*dt
1072 + std::sqrt(0.5*invmass[n])*rf[gt]*dist(rng);
1075 v[n][d] = vn;
1076 xprime[n][d] = x[n][d]+vn*dt;
1078 else
1080 v[n][d] = 0.0;
1081 xprime[n][d] = x[n][d];
1087 static void calc_ke_part_normal(rvec v[], t_grpopts *opts, t_mdatoms *md,
1088 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel)
1090 int g;
1091 t_grp_tcstat *tcstat = ekind->tcstat;
1092 t_grp_acc *grpstat = ekind->grpstat;
1093 int nthread, thread;
1095 /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also
1096 an option, but not supported now.
1097 bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh.
1100 /* group velocities are calculated in update_ekindata and
1101 * accumulated in acumulate_groups.
1102 * Now the partial global and groups ekin.
1104 for (g = 0; (g < opts->ngtc); g++)
1106 copy_mat(tcstat[g].ekinh, tcstat[g].ekinh_old);
1107 if (bEkinAveVel)
1109 clear_mat(tcstat[g].ekinf);
1110 tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */
1112 else
1114 clear_mat(tcstat[g].ekinh);
1117 ekind->dekindl_old = ekind->dekindl;
1118 nthread = gmx_omp_nthreads_get(emntUpdate);
1120 #pragma omp parallel for num_threads(nthread) schedule(static)
1121 for (thread = 0; thread < nthread; thread++)
1123 // This OpenMP only loops over arrays and does not call any functions
1124 // or memory allocation. It should not be able to throw, so for now
1125 // we do not need a try/catch wrapper.
1126 int start_t, end_t, n;
1127 int ga, gt;
1128 rvec v_corrt;
1129 real hm;
1130 int d, m;
1131 matrix *ekin_sum;
1132 real *dekindl_sum;
1134 start_t = ((thread+0)*md->homenr)/nthread;
1135 end_t = ((thread+1)*md->homenr)/nthread;
1137 ekin_sum = ekind->ekin_work[thread];
1138 dekindl_sum = ekind->dekindl_work[thread];
1140 for (gt = 0; gt < opts->ngtc; gt++)
1142 clear_mat(ekin_sum[gt]);
1144 *dekindl_sum = 0.0;
1146 ga = 0;
1147 gt = 0;
1148 for (n = start_t; n < end_t; n++)
1150 if (md->cACC)
1152 ga = md->cACC[n];
1154 if (md->cTC)
1156 gt = md->cTC[n];
1158 hm = 0.5*md->massT[n];
1160 for (d = 0; (d < DIM); d++)
1162 v_corrt[d] = v[n][d] - grpstat[ga].u[d];
1164 for (d = 0; (d < DIM); d++)
1166 for (m = 0; (m < DIM); m++)
1168 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1169 ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d];
1172 if (md->nMassPerturbed && md->bPerturbed[n])
1174 *dekindl_sum +=
1175 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1180 ekind->dekindl = 0;
1181 for (thread = 0; thread < nthread; thread++)
1183 for (g = 0; g < opts->ngtc; g++)
1185 if (bEkinAveVel)
1187 m_add(tcstat[g].ekinf, ekind->ekin_work[thread][g],
1188 tcstat[g].ekinf);
1190 else
1192 m_add(tcstat[g].ekinh, ekind->ekin_work[thread][g],
1193 tcstat[g].ekinh);
1197 ekind->dekindl += *ekind->dekindl_work[thread];
1200 inc_nrnb(nrnb, eNR_EKIN, md->homenr);
1203 static void calc_ke_part_visc(matrix box, rvec x[], rvec v[],
1204 t_grpopts *opts, t_mdatoms *md,
1205 gmx_ekindata_t *ekind,
1206 t_nrnb *nrnb, gmx_bool bEkinAveVel)
1208 int start = 0, homenr = md->homenr;
1209 int g, d, n, m, gt = 0;
1210 rvec v_corrt;
1211 real hm;
1212 t_grp_tcstat *tcstat = ekind->tcstat;
1213 t_cos_acc *cosacc = &(ekind->cosacc);
1214 real dekindl;
1215 real fac, cosz;
1216 double mvcos;
1218 for (g = 0; g < opts->ngtc; g++)
1220 copy_mat(ekind->tcstat[g].ekinh, ekind->tcstat[g].ekinh_old);
1221 clear_mat(ekind->tcstat[g].ekinh);
1223 ekind->dekindl_old = ekind->dekindl;
1225 fac = 2*M_PI/box[ZZ][ZZ];
1226 mvcos = 0;
1227 dekindl = 0;
1228 for (n = start; n < start+homenr; n++)
1230 if (md->cTC)
1232 gt = md->cTC[n];
1234 hm = 0.5*md->massT[n];
1236 /* Note that the times of x and v differ by half a step */
1237 /* MRS -- would have to be changed for VV */
1238 cosz = std::cos(fac*x[n][ZZ]);
1239 /* Calculate the amplitude of the new velocity profile */
1240 mvcos += 2*cosz*md->massT[n]*v[n][XX];
1242 copy_rvec(v[n], v_corrt);
1243 /* Subtract the profile for the kinetic energy */
1244 v_corrt[XX] -= cosz*cosacc->vcos;
1245 for (d = 0; (d < DIM); d++)
1247 for (m = 0; (m < DIM); m++)
1249 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1250 if (bEkinAveVel)
1252 tcstat[gt].ekinf[m][d] += hm*v_corrt[m]*v_corrt[d];
1254 else
1256 tcstat[gt].ekinh[m][d] += hm*v_corrt[m]*v_corrt[d];
1260 if (md->nPerturbed && md->bPerturbed[n])
1262 /* The minus sign here might be confusing.
1263 * The kinetic contribution from dH/dl doesn't come from
1264 * d m(l)/2 v^2 / dl, but rather from d p^2/2m(l) / dl,
1265 * where p are the momenta. The difference is only a minus sign.
1267 dekindl -= 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1270 ekind->dekindl = dekindl;
1271 cosacc->mvcos = mvcos;
1273 inc_nrnb(nrnb, eNR_EKIN, homenr);
1276 void calc_ke_part(t_state *state, t_grpopts *opts, t_mdatoms *md,
1277 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel)
1279 if (ekind->cosacc.cos_accel == 0)
1281 calc_ke_part_normal(as_rvec_array(state->v.data()), opts, md, ekind, nrnb, bEkinAveVel);
1283 else
1285 calc_ke_part_visc(state->box, as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), opts, md, ekind, nrnb, bEkinAveVel);
1289 extern void init_ekinstate(ekinstate_t *ekinstate, const t_inputrec *ir)
1291 ekinstate->ekin_n = ir->opts.ngtc;
1292 snew(ekinstate->ekinh, ekinstate->ekin_n);
1293 snew(ekinstate->ekinf, ekinstate->ekin_n);
1294 snew(ekinstate->ekinh_old, ekinstate->ekin_n);
1295 ekinstate->ekinscalef_nhc.resize(ekinstate->ekin_n);
1296 ekinstate->ekinscaleh_nhc.resize(ekinstate->ekin_n);
1297 ekinstate->vscale_nhc.resize(ekinstate->ekin_n);
1298 ekinstate->dekindl = 0;
1299 ekinstate->mvcos = 0;
1302 void update_ekinstate(ekinstate_t *ekinstate, gmx_ekindata_t *ekind)
1304 int i;
1306 for (i = 0; i < ekinstate->ekin_n; i++)
1308 copy_mat(ekind->tcstat[i].ekinh, ekinstate->ekinh[i]);
1309 copy_mat(ekind->tcstat[i].ekinf, ekinstate->ekinf[i]);
1310 copy_mat(ekind->tcstat[i].ekinh_old, ekinstate->ekinh_old[i]);
1311 ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc;
1312 ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc;
1313 ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
1316 copy_mat(ekind->ekin, ekinstate->ekin_total);
1317 ekinstate->dekindl = ekind->dekindl;
1318 ekinstate->mvcos = ekind->cosacc.mvcos;
1322 void restore_ekinstate_from_state(const t_commrec *cr,
1323 gmx_ekindata_t *ekind, const ekinstate_t *ekinstate)
1325 int i, n;
1327 if (MASTER(cr))
1329 for (i = 0; i < ekinstate->ekin_n; i++)
1331 copy_mat(ekinstate->ekinh[i], ekind->tcstat[i].ekinh);
1332 copy_mat(ekinstate->ekinf[i], ekind->tcstat[i].ekinf);
1333 copy_mat(ekinstate->ekinh_old[i], ekind->tcstat[i].ekinh_old);
1334 ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i];
1335 ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i];
1336 ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i];
1339 copy_mat(ekinstate->ekin_total, ekind->ekin);
1341 ekind->dekindl = ekinstate->dekindl;
1342 ekind->cosacc.mvcos = ekinstate->mvcos;
1343 n = ekinstate->ekin_n;
1346 if (PAR(cr))
1348 gmx_bcast(sizeof(n), &n, cr);
1349 for (i = 0; i < n; i++)
1351 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]),
1352 ekind->tcstat[i].ekinh[0], cr);
1353 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]),
1354 ekind->tcstat[i].ekinf[0], cr);
1355 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]),
1356 ekind->tcstat[i].ekinh_old[0], cr);
1358 gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
1359 &(ekind->tcstat[i].ekinscalef_nhc), cr);
1360 gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
1361 &(ekind->tcstat[i].ekinscaleh_nhc), cr);
1362 gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc),
1363 &(ekind->tcstat[i].vscale_nhc), cr);
1365 gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]),
1366 ekind->ekin[0], cr);
1368 gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr);
1369 gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr);
1373 void set_deform_reference_box(gmx_update_t *upd, gmx_int64_t step, matrix box)
1375 upd->deformref_step = step;
1376 copy_mat(box, upd->deformref_box);
1379 static void deform(gmx_update_t *upd,
1380 int start, int homenr, rvec x[], matrix box,
1381 const t_inputrec *ir, gmx_int64_t step)
1383 matrix bnew, invbox, mu;
1384 real elapsed_time;
1385 int i, j;
1387 elapsed_time = (step + 1 - upd->deformref_step)*ir->delta_t;
1388 copy_mat(box, bnew);
1389 for (i = 0; i < DIM; i++)
1391 for (j = 0; j < DIM; j++)
1393 if (ir->deform[i][j] != 0)
1395 bnew[i][j] =
1396 upd->deformref_box[i][j] + elapsed_time*ir->deform[i][j];
1400 /* We correct the off-diagonal elements,
1401 * which can grow indefinitely during shearing,
1402 * so the shifts do not get messed up.
1404 for (i = 1; i < DIM; i++)
1406 for (j = i-1; j >= 0; j--)
1408 while (bnew[i][j] - box[i][j] > 0.5*bnew[j][j])
1410 rvec_dec(bnew[i], bnew[j]);
1412 while (bnew[i][j] - box[i][j] < -0.5*bnew[j][j])
1414 rvec_inc(bnew[i], bnew[j]);
1418 gmx::invertBoxMatrix(box, invbox);
1419 copy_mat(bnew, box);
1420 mmul_ur0(box, invbox, mu);
1422 for (i = start; i < start+homenr; i++)
1424 x[i][XX] = mu[XX][XX]*x[i][XX]+mu[YY][XX]*x[i][YY]+mu[ZZ][XX]*x[i][ZZ];
1425 x[i][YY] = mu[YY][YY]*x[i][YY]+mu[ZZ][YY]*x[i][ZZ];
1426 x[i][ZZ] = mu[ZZ][ZZ]*x[i][ZZ];
1430 void update_tcouple(gmx_int64_t step,
1431 t_inputrec *inputrec,
1432 t_state *state,
1433 gmx_ekindata_t *ekind,
1434 t_extmass *MassQ,
1435 t_mdatoms *md)
1438 bool doTemperatureCoupling = false;
1440 /* if using vv with trotter decomposition methods, we do this elsewhere in the code */
1441 if (!(EI_VV(inputrec->eI) &&
1442 (inputrecNvtTrotter(inputrec) || inputrecNptTrotter(inputrec) || inputrecNphTrotter(inputrec))))
1444 doTemperatureCoupling = isTemperatureCouplingStep(step, inputrec);
1447 if (doTemperatureCoupling)
1449 real dttc = inputrec->nsttcouple*inputrec->delta_t;
1451 switch (inputrec->etc)
1453 case etcNO:
1454 break;
1455 case etcBERENDSEN:
1456 berendsen_tcoupl(inputrec, ekind, dttc, state->therm_integral);
1457 break;
1458 case etcNOSEHOOVER:
1459 nosehoover_tcoupl(&(inputrec->opts), ekind, dttc,
1460 state->nosehoover_xi.data(), state->nosehoover_vxi.data(), MassQ);
1461 break;
1462 case etcVRESCALE:
1463 vrescale_tcoupl(inputrec, step, ekind, dttc,
1464 state->therm_integral.data());
1465 break;
1467 /* rescale in place here */
1468 if (EI_VV(inputrec->eI))
1470 rescale_velocities(ekind, md, 0, md->homenr, as_rvec_array(state->v.data()));
1473 else
1475 /* Set the T scaling lambda to 1 to have no scaling */
1476 for (int i = 0; (i < inputrec->opts.ngtc); i++)
1478 ekind->tcstat[i].lambda = 1.0;
1483 /*! \brief Computes the atom range for a thread to operate on, ensured SIMD aligned ranges
1485 * \param[in] numThreads The number of threads to divide atoms over
1486 * \param[in] threadIndex The thread to get the range for
1487 * \param[in] numAtoms The total number of atoms (on this rank)
1488 * \param[out] startAtom The start of the atom range
1489 * \param[out] endAtom The end of the atom range, note that this is in general not a multiple of the SIMD width
1491 static void getThreadAtomRange(int numThreads, int threadIndex, int numAtoms,
1492 int *startAtom, int *endAtom)
1494 #if GMX_HAVE_SIMD_UPDATE
1495 constexpr int blockSize = GMX_SIMD_REAL_WIDTH;
1496 #else
1497 constexpr int blockSize = 1;
1498 #endif
1499 int numBlocks = (numAtoms + blockSize - 1)/blockSize;
1501 *startAtom = ((numBlocks* threadIndex )/numThreads)*blockSize;
1502 *endAtom = ((numBlocks*(threadIndex + 1))/numThreads)*blockSize;
1503 if (threadIndex == numThreads - 1)
1505 *endAtom = numAtoms;
1509 void update_pcouple_before_coordinates(FILE *fplog,
1510 gmx_int64_t step,
1511 const t_inputrec *inputrec,
1512 t_state *state,
1513 matrix parrinellorahmanMu,
1514 matrix M,
1515 gmx_bool bInitStep)
1517 /* Berendsen P-coupling is completely handled after the coordinate update.
1518 * Trotter P-coupling is handled by separate calls to trotter_update().
1520 if (inputrec->epc == epcPARRINELLORAHMAN &&
1521 isPressureCouplingStep(step, inputrec))
1523 real dtpc = inputrec->nstpcouple*inputrec->delta_t;
1525 parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev,
1526 state->box, state->box_rel, state->boxv,
1527 M, parrinellorahmanMu, bInitStep);
1531 void constrain_velocities(gmx_int64_t step,
1532 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1533 const t_inputrec *inputrec, /* input record and box stuff */
1534 t_mdatoms *md,
1535 t_state *state,
1536 gmx_bool bMolPBC,
1537 t_idef *idef,
1538 tensor vir_part,
1539 const t_commrec *cr,
1540 const gmx_multisim_t *ms,
1541 t_nrnb *nrnb,
1542 gmx_wallcycle_t wcycle,
1543 gmx::Constraints *constr,
1544 gmx_bool bCalcVir,
1545 bool do_log,
1546 bool do_ene)
1548 if (!constr)
1550 return;
1554 * Steps (7C, 8C)
1555 * APPLY CONSTRAINTS:
1556 * BLOCK SHAKE
1560 tensor vir_con;
1562 /* clear out constraints before applying */
1563 clear_mat(vir_part);
1565 /* Constrain the coordinates upd->xp */
1566 wallcycle_start(wcycle, ewcCONSTR);
1568 constrain(nullptr, do_log, do_ene, constr, idef,
1569 inputrec, cr, ms, step, 1, 1.0, md,
1570 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1571 bMolPBC, state->box,
1572 state->lambda[efptBONDED], dvdlambda,
1573 nullptr, bCalcVir ? &vir_con : nullptr, nrnb, econqVeloc);
1575 wallcycle_stop(wcycle, ewcCONSTR);
1577 if (bCalcVir)
1579 m_add(vir_part, vir_con, vir_part);
1584 void constrain_coordinates(gmx_int64_t step,
1585 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1586 const t_inputrec *inputrec, /* input record and box stuff */
1587 t_mdatoms *md,
1588 t_state *state,
1589 gmx_bool bMolPBC,
1590 t_idef *idef,
1591 tensor vir_part,
1592 const t_commrec *cr,
1593 const gmx_multisim_t *ms,
1594 t_nrnb *nrnb,
1595 gmx_wallcycle_t wcycle,
1596 gmx_update_t *upd,
1597 gmx::Constraints *constr,
1598 gmx_bool bCalcVir,
1599 bool do_log,
1600 bool do_ene)
1602 if (!constr)
1604 return;
1608 tensor vir_con;
1610 /* clear out constraints before applying */
1611 clear_mat(vir_part);
1613 /* Constrain the coordinates upd->xp */
1614 wallcycle_start(wcycle, ewcCONSTR);
1616 constrain(nullptr, do_log, do_ene, constr, idef,
1617 inputrec, cr, ms, step, 1, 1.0, md,
1618 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), nullptr,
1619 bMolPBC, state->box,
1620 state->lambda[efptBONDED], dvdlambda,
1621 as_rvec_array(state->v.data()), bCalcVir ? &vir_con : nullptr, nrnb, econqCoord);
1623 wallcycle_stop(wcycle, ewcCONSTR);
1625 if (bCalcVir)
1627 m_add(vir_part, vir_con, vir_part);
1632 void
1633 update_sd_second_half(gmx_int64_t step,
1634 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1635 const t_inputrec *inputrec, /* input record and box stuff */
1636 t_mdatoms *md,
1637 t_state *state,
1638 gmx_bool bMolPBC,
1639 gmx::PaddedArrayRef<gmx::RVec> force, /* forces on home particles */
1640 t_idef *idef,
1641 const t_commrec *cr,
1642 const gmx_multisim_t *ms,
1643 t_nrnb *nrnb,
1644 gmx_wallcycle_t wcycle,
1645 gmx_update_t *upd,
1646 gmx::Constraints *constr,
1647 bool do_log,
1648 bool do_ene)
1650 if (!constr)
1652 return;
1654 if (inputrec->eI == eiSD1)
1656 int nth, th;
1657 int homenr = md->homenr;
1659 /* Cast delta_t from double to real to make the integrators faster.
1660 * The only reason for having delta_t double is to get accurate values
1661 * for t=delta_t*step when step is larger than float precision.
1662 * For integration dt the accuracy of real suffices, since with
1663 * integral += dt*integrand the increment is nearly always (much) smaller
1664 * than the integral (and the integrand has real precision).
1666 real dt = inputrec->delta_t;
1668 wallcycle_start(wcycle, ewcUPDATE);
1670 nth = gmx_omp_nthreads_get(emntUpdate);
1672 #pragma omp parallel for num_threads(nth) schedule(static)
1673 for (th = 0; th < nth; th++)
1677 int start_th, end_th;
1678 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
1680 /* The second part of the SD integration */
1681 // TODO this just does an update of friction and
1682 // noise, so extract that and make it obvious.
1683 do_update_sd1(upd->sd,
1684 start_th, end_th, dt,
1685 inputrec->opts.acc, inputrec->opts.nFreeze,
1686 md->invmass, md->ptype,
1687 md->cFREEZE, md->cACC, md->cTC,
1688 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), as_rvec_array(state->v.data()), as_rvec_array(force.data()),
1689 TRUE, FALSE,
1690 step, inputrec->ld_seed,
1691 DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
1693 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1695 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1696 wallcycle_stop(wcycle, ewcUPDATE);
1699 /* Constrain the coordinates upd->xp for half a time step */
1700 wallcycle_start(wcycle, ewcCONSTR);
1701 constrain(nullptr, do_log, do_ene, constr, idef,
1702 inputrec, cr, ms, step, 1, 0.5, md,
1703 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), nullptr,
1704 bMolPBC, state->box,
1705 state->lambda[efptBONDED], dvdlambda,
1706 as_rvec_array(state->v.data()), nullptr, nrnb, econqCoord);
1708 wallcycle_stop(wcycle, ewcCONSTR);
1713 void finish_update(const t_inputrec *inputrec, /* input record and box stuff */
1714 t_mdatoms *md,
1715 t_state *state,
1716 t_graph *graph,
1717 t_nrnb *nrnb,
1718 gmx_wallcycle_t wcycle,
1719 gmx_update_t *upd,
1720 gmx::Constraints *constr)
1722 int homenr = md->homenr;
1724 /* We must always unshift after updating coordinates; if we did not shake
1725 x was shifted in do_force */
1727 /* NOTE Currently we always integrate to a temporary buffer and
1728 * then copy the results back. */
1730 wallcycle_start_nocount(wcycle, ewcUPDATE);
1732 if (md->cFREEZE != nullptr && constr != nullptr)
1734 /* If we have atoms that are frozen along some, but not all
1735 * dimensions, then any constraints will have moved them also along
1736 * the frozen dimensions. To freeze such degrees of freedom
1737 * we copy them back here to later copy them forward. It would
1738 * be more elegant and slightly more efficient to copies zero
1739 * times instead of twice, but the graph case below prevents this.
1741 const ivec *nFreeze = inputrec->opts.nFreeze;
1742 bool partialFreezeAndConstraints = false;
1743 for (int g = 0; g < inputrec->opts.ngfrz; g++)
1745 int numFreezeDim = nFreeze[g][XX] + nFreeze[g][YY] + nFreeze[g][ZZ];
1746 if (numFreezeDim > 0 && numFreezeDim < 3)
1748 partialFreezeAndConstraints = true;
1751 if (partialFreezeAndConstraints)
1753 for (int i = 0; i < homenr; i++)
1755 int g = md->cFREEZE[i];
1757 for (int d = 0; d < DIM; d++)
1759 if (nFreeze[g][d])
1761 upd->xp[i][d] = state->x[i][d];
1768 if (graph && (graph->nnodes > 0))
1770 unshift_x(graph, state->box, as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()));
1771 if (TRICLINIC(state->box))
1773 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
1775 else
1777 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
1780 else
1782 /* The copy is performance sensitive, so use a bare pointer */
1783 rvec *xp = as_rvec_array(upd->xp.data());
1784 #ifndef __clang_analyzer__
1785 // cppcheck-suppress unreadVariable
1786 int gmx_unused nth = gmx_omp_nthreads_get(emntUpdate);
1787 #endif
1788 #pragma omp parallel for num_threads(nth) schedule(static)
1789 for (int i = 0; i < homenr; i++)
1791 // Trivial statement, does not throw
1792 copy_rvec(xp[i], state->x[i]);
1795 wallcycle_stop(wcycle, ewcUPDATE);
1797 /* ############# END the update of velocities and positions ######### */
1800 void update_pcouple_after_coordinates(FILE *fplog,
1801 gmx_int64_t step,
1802 const t_inputrec *inputrec,
1803 const t_mdatoms *md,
1804 const matrix pressure,
1805 const matrix forceVirial,
1806 const matrix constraintVirial,
1807 const matrix parrinellorahmanMu,
1808 t_state *state,
1809 t_nrnb *nrnb,
1810 gmx_update_t *upd)
1812 int start = 0;
1813 int homenr = md->homenr;
1815 /* Cast to real for faster code, no loss in precision (see comment above) */
1816 real dt = inputrec->delta_t;
1819 /* now update boxes */
1820 switch (inputrec->epc)
1822 case (epcNO):
1823 break;
1824 case (epcBERENDSEN):
1825 if (isPressureCouplingStep(step, inputrec))
1827 real dtpc = inputrec->nstpcouple*dt;
1828 matrix mu;
1829 berendsen_pcoupl(fplog, step, inputrec, dtpc,
1830 pressure, state->box,
1831 forceVirial, constraintVirial,
1832 mu, &state->baros_integral);
1833 berendsen_pscale(inputrec, mu, state->box, state->box_rel,
1834 start, homenr, as_rvec_array(state->x.data()),
1835 md->cFREEZE, nrnb);
1837 break;
1838 case (epcPARRINELLORAHMAN):
1839 if (isPressureCouplingStep(step, inputrec))
1841 /* The box velocities were updated in do_pr_pcoupl,
1842 * but we dont change the box vectors until we get here
1843 * since we need to be able to shift/unshift above.
1845 real dtpc = inputrec->nstpcouple*dt;
1846 for (int i = 0; i < DIM; i++)
1848 for (int m = 0; m <= i; m++)
1850 state->box[i][m] += dtpc*state->boxv[i][m];
1853 preserve_box_shape(inputrec, state->box_rel, state->box);
1855 /* Scale the coordinates */
1856 for (int n = start; n < start + homenr; n++)
1858 tmvmul_ur0(parrinellorahmanMu, state->x[n], state->x[n]);
1861 break;
1862 case (epcMTTK):
1863 switch (inputrec->epct)
1865 case (epctISOTROPIC):
1866 /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
1867 ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
1868 Side length scales as exp(veta*dt) */
1870 msmul(state->box, std::exp(state->veta*dt), state->box);
1872 /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
1873 o If we assume isotropic scaling, and box length scaling
1874 factor L, then V = L^DIM (det(M)). So dV/dt = DIM
1875 L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
1876 determinant of B is L^DIM det(M), and the determinant
1877 of dB/dt is (dL/dT)^DIM det (M). veta will be
1878 (det(dB/dT)/det(B))^(1/3). Then since M =
1879 B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
1881 msmul(state->box, state->veta, state->boxv);
1882 break;
1883 default:
1884 break;
1886 break;
1887 default:
1888 break;
1891 if (inputrecDeform(inputrec))
1893 deform(upd, start, homenr, as_rvec_array(state->x.data()), state->box, inputrec, step);
1897 void update_coords(gmx_int64_t step,
1898 t_inputrec *inputrec, /* input record and box stuff */
1899 t_mdatoms *md,
1900 t_state *state,
1901 gmx::PaddedArrayRef<gmx::RVec> f, /* forces on home particles */
1902 t_fcdata *fcd,
1903 gmx_ekindata_t *ekind,
1904 matrix M,
1905 gmx_update_t *upd,
1906 int UpdatePart,
1907 const t_commrec *cr, /* these shouldn't be here -- need to think about it */
1908 gmx::Constraints *constr)
1910 gmx_bool bDoConstr = (nullptr != constr);
1912 /* Running the velocity half does nothing except for velocity verlet */
1913 if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
1914 !EI_VV(inputrec->eI))
1916 gmx_incons("update_coords called for velocity without VV integrator");
1919 int homenr = md->homenr;
1921 /* Cast to real for faster code, no loss in precision (see comment above) */
1922 real dt = inputrec->delta_t;
1924 /* We need to update the NMR restraint history when time averaging is used */
1925 if (state->flags & (1<<estDISRE_RM3TAV))
1927 update_disres_history(fcd, &state->hist);
1929 if (state->flags & (1<<estORIRE_DTAV))
1931 update_orires_history(fcd, &state->hist);
1934 /* ############# START The update of velocities and positions ######### */
1935 int nth = gmx_omp_nthreads_get(emntUpdate);
1937 #pragma omp parallel for num_threads(nth) schedule(static)
1938 for (int th = 0; th < nth; th++)
1942 int start_th, end_th;
1943 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
1945 /* Strictly speaking, we would only need this check with SIMD
1946 * and for the actual SIMD width. But since the code currently
1947 * always adds padding for GMX_REAL_MAX_SIMD_WIDTH, we check that.
1949 size_t gmx_used_in_debug homenrSimdPadded;
1950 homenrSimdPadded = ((homenr + GMX_REAL_MAX_SIMD_WIDTH - 1)/GMX_REAL_MAX_SIMD_WIDTH)*GMX_REAL_MAX_SIMD_WIDTH;
1951 GMX_ASSERT(state->x.size() >= homenrSimdPadded, "state->x needs to be padded for SIMD access");
1952 GMX_ASSERT(upd->xp.size() >= homenrSimdPadded, "upd->xp needs to be padded for SIMD access");
1953 GMX_ASSERT(state->v.size() >= homenrSimdPadded, "state->v needs to be padded for SIMD access");
1954 GMX_ASSERT(f.size() >= homenrSimdPadded, "f needs to be padded for SIMD access");
1956 const rvec *x_rvec = as_rvec_array(state->x.data());
1957 rvec *xp_rvec = as_rvec_array(upd->xp.data());
1958 rvec *v_rvec = as_rvec_array(state->v.data());
1959 const rvec *f_rvec = as_rvec_array(f.data());
1961 switch (inputrec->eI)
1963 case (eiMD):
1964 do_update_md(start_th, end_th, step, dt,
1965 inputrec, md, ekind, state->box,
1966 x_rvec, xp_rvec, v_rvec, f_rvec,
1967 state->nosehoover_vxi.data(), M);
1968 break;
1969 case (eiSD1):
1970 /* With constraints, the SD1 update is done in 2 parts */
1971 do_update_sd1(upd->sd,
1972 start_th, end_th, dt,
1973 inputrec->opts.acc, inputrec->opts.nFreeze,
1974 md->invmass, md->ptype,
1975 md->cFREEZE, md->cACC, md->cTC,
1976 x_rvec, xp_rvec, v_rvec, f_rvec,
1977 bDoConstr, TRUE,
1978 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
1979 break;
1980 case (eiBD):
1981 do_update_bd(start_th, end_th, dt,
1982 inputrec->opts.nFreeze, md->invmass, md->ptype,
1983 md->cFREEZE, md->cTC,
1984 x_rvec, xp_rvec, v_rvec, f_rvec,
1985 inputrec->bd_fric,
1986 upd->sd->bd_rf,
1987 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
1988 break;
1989 case (eiVV):
1990 case (eiVVAK):
1992 gmx_bool bExtended = (inputrec->etc == etcNOSEHOOVER ||
1993 inputrec->epc == epcPARRINELLORAHMAN ||
1994 inputrec->epc == epcMTTK);
1996 /* assuming barostat coupled to group 0 */
1997 real alpha = 1.0 + DIM/static_cast<real>(inputrec->opts.nrdf[0]);
1998 switch (UpdatePart)
2000 case etrtVELOCITY1:
2001 case etrtVELOCITY2:
2002 do_update_vv_vel(start_th, end_th, dt,
2003 inputrec->opts.acc, inputrec->opts.nFreeze,
2004 md->invmass, md->ptype,
2005 md->cFREEZE, md->cACC,
2006 v_rvec, f_rvec,
2007 bExtended, state->veta, alpha);
2008 break;
2009 case etrtPOSITION:
2010 do_update_vv_pos(start_th, end_th, dt,
2011 inputrec->opts.nFreeze,
2012 md->ptype, md->cFREEZE,
2013 x_rvec, xp_rvec, v_rvec,
2014 bExtended, state->veta);
2015 break;
2017 break;
2019 default:
2020 gmx_fatal(FARGS, "Don't know how to update coordinates");
2021 break;
2024 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2029 extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_int64_t step, const t_commrec *cr,
2030 t_mdatoms *md, t_state *state, gmx_update_t *upd, gmx::Constraints *constr)
2033 real rate = (ir->delta_t)/ir->opts.tau_t[0];
2035 if (ir->etc == etcANDERSEN && constr != nullptr)
2037 /* Currently, Andersen thermostat does not support constrained
2038 systems. Functionality exists in the andersen_tcoupl
2039 function in GROMACS 4.5.7 to allow this combination. That
2040 code could be ported to the current random-number
2041 generation approach, but has not yet been done because of
2042 lack of time and resources. */
2043 gmx_fatal(FARGS, "Normal Andersen is currently not supported with constraints, use massive Andersen instead");
2046 /* proceed with andersen if 1) it's fixed probability per
2047 particle andersen or 2) it's massive andersen and it's tau_t/dt */
2048 if ((ir->etc == etcANDERSEN) || do_per_step(step, static_cast<int>(1.0/rate + 0.5)))
2050 andersen_tcoupl(ir, step, cr, md, state, rate,
2051 upd->sd->randomize_group, upd->sd->boltzfac);
2052 return TRUE;
2054 return FALSE;