Merge branch release-2016
[gromacs.git] / src / gromacs / mdlib / update.cpp
blob0f1374b73ec07ae1e299517380eea288ea843209
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "update.h"
41 #include <math.h>
42 #include <stdio.h>
44 #include <algorithm>
46 #include "gromacs/domdec/domdec_struct.h"
47 #include "gromacs/fileio/confio.h"
48 #include "gromacs/gmxlib/network.h"
49 #include "gromacs/gmxlib/nrnb.h"
50 #include "gromacs/listed-forces/disre.h"
51 #include "gromacs/listed-forces/orires.h"
52 #include "gromacs/math/functions.h"
53 #include "gromacs/math/invertmatrix.h"
54 #include "gromacs/math/units.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/math/vecdump.h"
57 #include "gromacs/mdlib/constr.h"
58 #include "gromacs/mdlib/force.h"
59 #include "gromacs/mdlib/gmx_omp_nthreads.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/sim_util.h"
62 #include "gromacs/mdlib/tgroup.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/group.h"
65 #include "gromacs/mdtypes/inputrec.h"
66 #include "gromacs/mdtypes/md_enums.h"
67 #include "gromacs/mdtypes/state.h"
68 #include "gromacs/pbcutil/boxutilities.h"
69 #include "gromacs/pbcutil/mshift.h"
70 #include "gromacs/pbcutil/pbc.h"
71 #include "gromacs/pulling/pull.h"
72 #include "gromacs/random/tabulatednormaldistribution.h"
73 #include "gromacs/random/threefry.h"
74 #include "gromacs/simd/simd.h"
75 #include "gromacs/timing/wallcycle.h"
76 #include "gromacs/utility/exceptions.h"
77 #include "gromacs/utility/fatalerror.h"
78 #include "gromacs/utility/futil.h"
79 #include "gromacs/utility/gmxassert.h"
80 #include "gromacs/utility/gmxomp.h"
81 #include "gromacs/utility/smalloc.h"
83 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
85 /*For debugging, start at v(-dt/2) for velolcity verlet -- uncomment next line */
86 /*#define STARTFROMDT2*/
88 typedef struct {
89 double em;
90 } gmx_sd_const_t;
92 typedef struct {
93 real V;
94 } gmx_sd_sigma_t;
96 typedef struct {
97 /* BD stuff */
98 real *bd_rf;
99 /* SD stuff */
100 gmx_sd_const_t *sdc;
101 gmx_sd_sigma_t *sdsig;
102 /* andersen temperature control stuff */
103 gmx_bool *randomize_group;
104 real *boltzfac;
105 } gmx_stochd_t;
107 struct gmx_update_t
109 gmx_stochd_t *sd;
110 /* xprime for constraint algorithms */
111 PaddedRVecVector xp;
113 /* Variables for the deform algorithm */
114 gmx_int64_t deformref_step;
115 matrix deformref_box;
118 static bool isTemperatureCouplingStep(gmx_int64_t step, const t_inputrec *ir)
120 /* We should only couple after a step where energies were determined (for leapfrog versions)
121 or the step energies are determined, for velocity verlet versions */
122 int offset;
123 if (EI_VV(ir->eI))
125 offset = 0;
127 else
129 offset = 1;
131 return ir->etc != etcNO &&
132 (ir->nsttcouple == 1 ||
133 do_per_step(step + ir->nsttcouple - offset, ir->nsttcouple));
136 static bool isPressureCouplingStep(gmx_int64_t step, const t_inputrec *ir)
138 GMX_ASSERT(ir->epc != epcMTTK, "MTTK pressure coupling is not handled here");
140 int offset;
141 if (ir->epc == epcBERENDSEN)
143 offset = 0;
145 else
147 offset = 1;
149 /* We should only couple after a step where pressures were determined */
150 return ir->epc != etcNO &&
151 (ir->nstpcouple == 1 ||
152 do_per_step(step + ir->nstpcouple - offset, ir->nstpcouple));
155 /*! \brief Sets the number of different temperature coupling values */
156 enum class NumTempScaleValues
158 single, //!< Single T-scaling value (either one group or all values =1)
159 multiple //!< Multiple T-scaling values, need to use T-group indices
162 /*! \brief Sets if to apply no or diagonal Parrinello-Rahman pressure scaling
164 * Note that this enum is only used in updateMDLeapfrogSimple(), which does
165 * not handle fully anistropic Parrinello-Rahman scaling, so we only have
166 * options \p no and \p diagonal here and no anistropic option.
168 enum class ApplyParrinelloRahmanVScaling
170 no, //!< Do not apply velocity scaling (not a PR-coupling run or step)
171 diagonal //!< Apply velocity scaling using a diagonal matrix
174 /*! \brief Integrate using leap-frog with T-scaling and optionally diagonal Parrinello-Rahman p-coupling
176 * \tparam numTempScaleValues The number of different T-couple values
177 * \tparam applyPRVScaling Apply Parrinello-Rahman velocity scaling
178 * \param[in] start Index of first atom to update
179 * \param[in] nrend Last atom to update: \p nrend - 1
180 * \param[in] dt The time step
181 * \param[in] dtPressureCouple Time step for pressure coupling
182 * \param[in] invMassPerDim 1/mass per atom and dimension
183 * \param[in] tcstat Temperature coupling information
184 * \param[in] cTC T-coupling group index per atom
185 * \param[in] pRVScaleMatrixDiagonal Parrinello-Rahman v-scale matrix diagonal
186 * \param[in] x Input coordinates
187 * \param[out] xprime Updated coordinates
188 * \param[inout] v Velocities
189 * \param[in] f Forces
191 * We expect this template to get good SIMD acceleration by most compilers,
192 * unlike the more complex general template.
193 * Note that we might get even better SIMD acceleration when we introduce
194 * aligned (and padded) memory, possibly with some hints for the compilers.
196 template<NumTempScaleValues numTempScaleValues,
197 ApplyParrinelloRahmanVScaling applyPRVScaling>
198 static void
199 updateMDLeapfrogSimple(int start,
200 int nrend,
201 real dt,
202 real dtPressureCouple,
203 const rvec * gmx_restrict invMassPerDim,
204 const t_grp_tcstat * tcstat,
205 const unsigned short * cTC,
206 const rvec pRVScaleMatrixDiagonal,
207 const rvec * gmx_restrict x,
208 rvec * gmx_restrict xprime,
209 rvec * gmx_restrict v,
210 const rvec * gmx_restrict f)
212 real lambdaGroup;
214 if (numTempScaleValues == NumTempScaleValues::single)
216 lambdaGroup = tcstat[0].lambda;
219 for (int a = start; a < nrend; a++)
221 if (numTempScaleValues == NumTempScaleValues::multiple)
223 lambdaGroup = tcstat[cTC[a]].lambda;
226 for (int d = 0; d < DIM; d++)
228 /* Note that using rvec invMassPerDim results in more efficient
229 * SIMD code, but this increases the cache pressure.
230 * For large systems with PME on the CPU this slows down the
231 * (then already slow) update by 20%. If all data remains in cache,
232 * using rvec is much faster.
234 real vNew = lambdaGroup*v[a][d] + f[a][d]*invMassPerDim[a][d]*dt;
236 if (applyPRVScaling == ApplyParrinelloRahmanVScaling::diagonal)
238 vNew -= dtPressureCouple*pRVScaleMatrixDiagonal[d]*v[a][d];
240 v[a][d] = vNew;
241 xprime[a][d] = x[a][d] + vNew*dt;
246 #if GMX_SIMD && GMX_SIMD_HAVE_REAL
247 #define GMX_HAVE_SIMD_UPDATE 1
248 #else
249 #define GMX_HAVE_SIMD_UPDATE 0
250 #endif
252 #if GMX_HAVE_SIMD_UPDATE
254 /*! \brief Load (aligned) the contents of GMX_SIMD_REAL_WIDTH rvec elements sequentially into 3 SIMD registers
256 * The loaded output is:
257 * \p r0: { r[index][XX], r[index][YY], ... }
258 * \p r1: { ... }
259 * \p r2: { ..., r[index+GMX_SIMD_REAL_WIDTH-1][YY], r[index+GMX_SIMD_REAL_WIDTH-1][ZZ] }
261 * \param[in] r Real to an rvec array, has to be aligned to SIMD register width
262 * \param[in] index Index of the first rvec triplet of reals to load
263 * \param[out] r0 Pointer to first SIMD register
264 * \param[out] r1 Pointer to second SIMD register
265 * \param[out] r2 Pointer to third SIMD register
267 static inline void simdLoadRvecs(const rvec *r,
268 int index,
269 SimdReal *r0,
270 SimdReal *r1,
271 SimdReal *r2)
273 const real *realPtr = r[index];
275 GMX_ASSERT(isSimdAligned(realPtr), "Pointer should be SIMD aligned");
277 *r0 = simdLoad(realPtr + 0*GMX_SIMD_REAL_WIDTH);
278 *r1 = simdLoad(realPtr + 1*GMX_SIMD_REAL_WIDTH);
279 *r2 = simdLoad(realPtr + 2*GMX_SIMD_REAL_WIDTH);
282 /*! \brief Store (aligned) 3 SIMD registers sequentially to GMX_SIMD_REAL_WIDTH rvec elements
284 * The stored output is:
285 * \p r[index] = { { r0[0], r0[1], ... }
286 * ...
287 * \p r[index+GMX_SIMD_REAL_WIDTH-1] = { ... , r2[GMX_SIMD_REAL_WIDTH-2], r2[GMX_SIMD_REAL_WIDTH-1] }
289 * \param[out] r Pointer to an rvec array, has to be aligned to SIMD register width
290 * \param[in] index Index of the first rvec triplet of reals to store to
291 * \param[in] r0 First SIMD register
292 * \param[in] r1 Second SIMD register
293 * \param[in] r2 Third SIMD register
295 static inline void simdStoreRvecs(rvec *r,
296 int index,
297 SimdReal r0,
298 SimdReal r1,
299 SimdReal r2)
301 real *realPtr = r[index];
303 GMX_ASSERT(isSimdAligned(realPtr), "Pointer should be SIMD aligned");
305 store(realPtr + 0*GMX_SIMD_REAL_WIDTH, r0);
306 store(realPtr + 1*GMX_SIMD_REAL_WIDTH, r1);
307 store(realPtr + 2*GMX_SIMD_REAL_WIDTH, r2);
310 /*! \brief Integrate using leap-frog with single group T-scaling and SIMD
312 * \param[in] start Index of first atom to update
313 * \param[in] nrend Last atom to update: \p nrend - 1
314 * \param[in] dt The time step
315 * \param[in] invMass 1/mass per atom
316 * \param[in] tcstat Temperature coupling information
317 * \param[in] x Input coordinates
318 * \param[out] xprime Updated coordinates
319 * \param[inout] v Velocities
320 * \param[in] f Forces
322 static void
323 updateMDLeapfrogSimpleSimd(int start,
324 int nrend,
325 real dt,
326 const real * gmx_restrict invMass,
327 const t_grp_tcstat * tcstat,
328 const rvec * gmx_restrict x,
329 rvec * gmx_restrict xprime,
330 rvec * gmx_restrict v,
331 const rvec * gmx_restrict f)
333 SimdReal timestep(dt);
334 SimdReal lambdaSystem(tcstat[0].lambda);
336 /* We declare variables here, since code is often slower when declaring them inside the loop */
338 /* Note: We should implement a proper PaddedVector, so we don't need this check */
339 GMX_ASSERT(isSimdAligned(invMass), "invMass should be aligned");
341 for (int a = start; a < nrend; a += GMX_SIMD_REAL_WIDTH)
343 SimdReal invMass0, invMass1, invMass2;
344 expandScalarsToTriplets(simdLoad(invMass + a),
345 &invMass0, &invMass1, &invMass2);
347 SimdReal v0, v1, v2;
348 SimdReal f0, f1, f2;
349 simdLoadRvecs(v, a, &v0, &v1, &v2);
350 simdLoadRvecs(f, a, &f0, &f1, &f2);
352 v0 = fma(f0*invMass0, timestep, lambdaSystem*v0);
353 v1 = fma(f1*invMass1, timestep, lambdaSystem*v1);
354 v2 = fma(f2*invMass2, timestep, lambdaSystem*v2);
356 simdStoreRvecs(v, a, v0, v1, v2);
358 SimdReal x0, x1, x2;
359 simdLoadRvecs(x, a, &x0, &x1, &x2);
361 SimdReal xprime0 = fma(v0, timestep, x0);
362 SimdReal xprime1 = fma(v1, timestep, x1);
363 SimdReal xprime2 = fma(v2, timestep, x2);
365 simdStoreRvecs(xprime, a, xprime0, xprime1, xprime2);
369 #endif // GMX_HAVE_SIMD_UPDATE
371 /*! \brief Sets the NEMD acceleration type */
372 enum class AccelerationType
374 none, group, cosine
377 /*! \brief Integrate using leap-frog with support for everything
379 * \tparam accelerationType Type of NEMD acceleration
380 * \param[in] start Index of first atom to update
381 * \param[in] nrend Last atom to update: \p nrend - 1
382 * \param[in] doNoseHoover If to apply Nose-Hoover T-coupling
383 * \param[in] dt The time step
384 * \param[in] dtPressureCouple Time step for pressure coupling, is 0 when no pressure coupling should be applied at this step
385 * \param[in] ir The input parameter record
386 * \param[in] md Atom properties
387 * \param[in] ekind Kinetic energy data
388 * \param[in] box The box dimensions
389 * \param[in] x Input coordinates
390 * \param[out] xprime Updated coordinates
391 * \param[inout] v Velocities
392 * \param[in] f Forces
393 * \param[in] nh_vxi Nose-Hoover velocity scaling factors
394 * \param[in] M Parrinello-Rahman scaling matrix
396 template<AccelerationType accelerationType>
397 static void
398 updateMDLeapfrogGeneral(int start,
399 int nrend,
400 bool doNoseHoover,
401 real dt,
402 real dtPressureCouple,
403 const t_inputrec * ir,
404 const t_mdatoms * md,
405 const gmx_ekindata_t * ekind,
406 const matrix box,
407 const rvec * gmx_restrict x,
408 rvec * gmx_restrict xprime,
409 rvec * gmx_restrict v,
410 const rvec * gmx_restrict f,
411 const double * gmx_restrict nh_vxi,
412 const matrix M)
414 /* This is a version of the leap-frog integrator that supports
415 * all combinations of T-coupling, P-coupling and NEMD.
416 * Nose-Hoover uses the reversible leap-frog integrator from
417 * Holian et al. Phys Rev E 52(3) : 2338, 1995
420 const unsigned short * cTC = md->cTC;
421 const t_grp_tcstat * tcstat = ekind->tcstat;
423 const unsigned short * cACC = md->cACC;
424 const rvec * accel = ir->opts.acc;
425 const t_grp_acc * grpstat = ekind->grpstat;
427 const rvec * gmx_restrict invMassPerDim = md->invMassPerDim;
429 /* Initialize group values, changed later when multiple groups are used */
430 int ga = 0;
431 int gt = 0;
432 real factorNH = 0;
434 for (int n = start; n < nrend; n++)
436 if (cTC)
438 gt = cTC[n];
440 real lg = tcstat[gt].lambda;
442 rvec vRel;
443 real cosineZ, vCosine;
444 #ifdef __INTEL_COMPILER
445 #pragma warning( disable : 280 )
446 #endif
447 switch (accelerationType)
449 case AccelerationType::none:
450 copy_rvec(v[n], vRel);
451 break;
452 case AccelerationType::group:
453 if (cACC)
455 ga = cACC[n];
457 /* Avoid scaling the group velocity */
458 rvec_sub(v[n], grpstat[ga].u, vRel);
459 break;
460 case AccelerationType::cosine:
461 cosineZ = std::cos(x[n][ZZ]*static_cast<real>(M_PI)/box[ZZ][ZZ]);
462 vCosine = cosineZ*ekind->cosacc.vcos;
463 /* Avoid scaling the cosine profile velocity */
464 copy_rvec(v[n], vRel);
465 vRel[XX] -= vCosine;
466 break;
469 if (doNoseHoover)
471 /* Here we account for multiple time stepping, by increasing
472 * the Nose-Hoover correction by nsttcouple
474 factorNH = 0.5*ir->nsttcouple*dt*nh_vxi[gt];
477 for (int d = 0; d < DIM; d++)
479 real vNew =
480 (lg*vRel[d] + (f[n][d]*invMassPerDim[n][d]*dt - factorNH*vRel[d]
481 - dtPressureCouple*iprod(M[d], vRel)))/(1 + factorNH);
482 switch (accelerationType)
484 case AccelerationType::none:
485 break;
486 case AccelerationType::group:
487 /* Add back the mean velocity and apply acceleration */
488 vNew += grpstat[ga].u[d] + accel[ga][d]*dt;
489 break;
490 case AccelerationType::cosine:
491 if (d == XX)
493 /* Add back the mean velocity and apply acceleration */
494 vNew += vCosine + cosineZ*ekind->cosacc.cos_accel*dt;
496 break;
498 v[n][d] = vNew;
499 xprime[n][d] = x[n][d] + vNew*dt;
504 /*! \brief Handles the Leap-frog MD x and v integration */
505 static void do_update_md(int start,
506 int nrend,
507 gmx_int64_t step,
508 real dt,
509 const t_inputrec * ir,
510 const t_mdatoms * md,
511 const gmx_ekindata_t * ekind,
512 const matrix box,
513 const rvec * gmx_restrict x,
514 rvec * gmx_restrict xprime,
515 rvec * gmx_restrict v,
516 const rvec * gmx_restrict f,
517 const double * gmx_restrict nh_vxi,
518 const matrix M)
520 GMX_ASSERT(nrend == start || xprime != x, "For SIMD optimization certain compilers need to have xprime != x");
522 /* Note: Berendsen pressure scaling is handled after do_update_md() */
523 bool doTempCouple = isTemperatureCouplingStep(step, ir);
524 bool doNoseHoover = (ir->etc == etcNOSEHOOVER && doTempCouple);
525 bool doParrinelloRahman = (ir->epc == epcPARRINELLORAHMAN && isPressureCouplingStep(step, ir));
526 bool doPROffDiagonal = (doParrinelloRahman && (M[YY][XX] != 0 || M[ZZ][XX] != 0 || M[ZZ][YY] != 0));
528 real dtPressureCouple = (doParrinelloRahman ? ir->nstpcouple*dt : 0);
530 /* NEMD (also cosine) acceleration is applied in updateMDLeapFrogGeneral */
531 bool doAcceleration = (ekind->bNEMD || ekind->cosacc.cos_accel != 0);
533 if (doNoseHoover || doPROffDiagonal || doAcceleration)
535 if (!doAcceleration)
537 updateMDLeapfrogGeneral<AccelerationType::none>
538 (start, nrend, doNoseHoover, dt, dtPressureCouple,
539 ir, md, ekind, box, x, xprime, v, f, nh_vxi, M);
541 else if (ekind->bNEMD)
543 updateMDLeapfrogGeneral<AccelerationType::group>
544 (start, nrend, doNoseHoover, dt, dtPressureCouple,
545 ir, md, ekind, box, x, xprime, v, f, nh_vxi, M);
547 else
549 updateMDLeapfrogGeneral<AccelerationType::cosine>
550 (start, nrend, doNoseHoover, dt, dtPressureCouple,
551 ir, md, ekind, box, x, xprime, v, f, nh_vxi, M);
554 else
556 /* We determine if we have a single T-coupling lambda value for all
557 * atoms. That allows for better SIMD acceleration in the template.
558 * If we do not do temperature coupling (in the run or this step),
559 * all scaling values are 1, so we effectively have a single value.
561 bool haveSingleTempScaleValue = (!doTempCouple || ekind->ngtc == 1);
563 /* Extract some pointers needed by all cases */
564 const unsigned short *cTC = md->cTC;
565 const t_grp_tcstat *tcstat = ekind->tcstat;
566 const rvec *invMassPerDim = md->invMassPerDim;
568 if (doParrinelloRahman)
570 GMX_ASSERT(!doPROffDiagonal, "updateMDLeapfrogSimple only support diagonal Parrinello-Rahman scaling matrices");
572 rvec diagM;
573 for (int d = 0; d < DIM; d++)
575 diagM[d] = M[d][d];
578 if (haveSingleTempScaleValue)
580 updateMDLeapfrogSimple
581 <NumTempScaleValues::single,
582 ApplyParrinelloRahmanVScaling::diagonal>
583 (start, nrend, dt, dtPressureCouple,
584 invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
586 else
588 updateMDLeapfrogSimple
589 <NumTempScaleValues::multiple,
590 ApplyParrinelloRahmanVScaling::diagonal>
591 (start, nrend, dt, dtPressureCouple,
592 invMassPerDim, tcstat, cTC, diagM, x, xprime, v, f);
595 else
597 if (haveSingleTempScaleValue)
599 /* Note that modern compilers are pretty good at vectorizing
600 * updateMDLeapfrogSimple(). But the SIMD version will still
601 * be faster because invMass lowers the cache pressure
602 * compared to invMassPerDim.
604 #if GMX_HAVE_SIMD_UPDATE
605 /* Check if we can use invmass instead of invMassPerDim */
606 if (!md->havePartiallyFrozenAtoms)
608 updateMDLeapfrogSimpleSimd
609 (start, nrend, dt, md->invmass, tcstat, x, xprime, v, f);
611 else
612 #endif
614 updateMDLeapfrogSimple
615 <NumTempScaleValues::single,
616 ApplyParrinelloRahmanVScaling::no>
617 (start, nrend, dt, dtPressureCouple,
618 invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
621 else
623 updateMDLeapfrogSimple
624 <NumTempScaleValues::multiple,
625 ApplyParrinelloRahmanVScaling::no>
626 (start, nrend, dt, dtPressureCouple,
627 invMassPerDim, tcstat, cTC, nullptr, x, xprime, v, f);
633 static void do_update_vv_vel(int start, int nrend, real dt,
634 rvec accel[], ivec nFreeze[], real invmass[],
635 unsigned short ptype[], unsigned short cFREEZE[],
636 unsigned short cACC[], rvec v[], const rvec f[],
637 gmx_bool bExtended, real veta, real alpha)
639 int gf = 0, ga = 0;
640 int n, d;
641 real g, mv1, mv2;
643 if (bExtended)
645 g = 0.25*dt*veta*alpha;
646 mv1 = std::exp(-g);
647 mv2 = gmx::series_sinhx(g);
649 else
651 mv1 = 1.0;
652 mv2 = 1.0;
654 for (n = start; n < nrend; n++)
656 real w_dt = invmass[n]*dt;
657 if (cFREEZE)
659 gf = cFREEZE[n];
661 if (cACC)
663 ga = cACC[n];
666 for (d = 0; d < DIM; d++)
668 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
670 v[n][d] = mv1*(mv1*v[n][d] + 0.5*(w_dt*mv2*f[n][d]))+0.5*accel[ga][d]*dt;
672 else
674 v[n][d] = 0.0;
678 } /* do_update_vv_vel */
680 static void do_update_vv_pos(int start, int nrend, real dt,
681 ivec nFreeze[],
682 unsigned short ptype[], unsigned short cFREEZE[],
683 const rvec x[], rvec xprime[], rvec v[],
684 gmx_bool bExtended, real veta)
686 int gf = 0;
687 int n, d;
688 real g, mr1, mr2;
690 /* Would it make more sense if Parrinello-Rahman was put here? */
691 if (bExtended)
693 g = 0.5*dt*veta;
694 mr1 = std::exp(g);
695 mr2 = gmx::series_sinhx(g);
697 else
699 mr1 = 1.0;
700 mr2 = 1.0;
703 for (n = start; n < nrend; n++)
706 if (cFREEZE)
708 gf = cFREEZE[n];
711 for (d = 0; d < DIM; d++)
713 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
715 xprime[n][d] = mr1*(mr1*x[n][d]+mr2*dt*v[n][d]);
717 else
719 xprime[n][d] = x[n][d];
723 } /* do_update_vv_pos */
725 static gmx_stochd_t *init_stochd(const t_inputrec *ir)
727 gmx_stochd_t *sd;
729 snew(sd, 1);
731 const t_grpopts *opts = &ir->opts;
732 int ngtc = opts->ngtc;
734 if (ir->eI == eiBD)
736 snew(sd->bd_rf, ngtc);
738 else if (EI_SD(ir->eI))
740 snew(sd->sdc, ngtc);
741 snew(sd->sdsig, ngtc);
743 gmx_sd_const_t *sdc = sd->sdc;
745 for (int gt = 0; gt < ngtc; gt++)
747 if (opts->tau_t[gt] > 0)
749 sdc[gt].em = std::exp(-ir->delta_t/opts->tau_t[gt]);
751 else
753 /* No friction and noise on this group */
754 sdc[gt].em = 1;
758 else if (ETC_ANDERSEN(ir->etc))
760 snew(sd->randomize_group, ngtc);
761 snew(sd->boltzfac, ngtc);
763 /* for now, assume that all groups, if randomized, are randomized at the same rate, i.e. tau_t is the same. */
764 /* since constraint groups don't necessarily match up with temperature groups! This is checked in readir.c */
766 for (int gt = 0; gt < ngtc; gt++)
768 real reft = std::max<real>(0, opts->ref_t[gt]);
769 if ((opts->tau_t[gt] > 0) && (reft > 0)) /* tau_t or ref_t = 0 means that no randomization is done */
771 sd->randomize_group[gt] = TRUE;
772 sd->boltzfac[gt] = BOLTZ*opts->ref_t[gt];
774 else
776 sd->randomize_group[gt] = FALSE;
781 return sd;
784 void update_temperature_constants(gmx_update_t *upd, const t_inputrec *ir)
786 if (ir->eI == eiBD)
788 if (ir->bd_fric != 0)
790 for (int gt = 0; gt < ir->opts.ngtc; gt++)
792 upd->sd->bd_rf[gt] = std::sqrt(2.0*BOLTZ*ir->opts.ref_t[gt]/(ir->bd_fric*ir->delta_t));
795 else
797 for (int gt = 0; gt < ir->opts.ngtc; gt++)
799 upd->sd->bd_rf[gt] = std::sqrt(2.0*BOLTZ*ir->opts.ref_t[gt]);
803 if (ir->eI == eiSD1)
805 for (int gt = 0; gt < ir->opts.ngtc; gt++)
807 real kT = BOLTZ*ir->opts.ref_t[gt];
808 /* The mass is accounted for later, since this differs per atom */
809 upd->sd->sdsig[gt].V = std::sqrt(kT*(1 - upd->sd->sdc[gt].em*upd->sd->sdc[gt].em));
814 gmx_update_t *init_update(const t_inputrec *ir)
816 gmx_update_t *upd = new(gmx_update_t);
818 if (ir->eI == eiBD || EI_SD(ir->eI) || ir->etc == etcVRESCALE || ETC_ANDERSEN(ir->etc))
820 upd->sd = init_stochd(ir);
823 update_temperature_constants(upd, ir);
825 upd->xp.resize(0);
827 return upd;
830 void update_realloc(gmx_update_t *upd, int natoms)
832 GMX_ASSERT(upd, "upd must be allocated before its fields can be reallocated");
834 upd->xp.resize(gmx::paddedRVecVectorSize(natoms));
837 static void do_update_sd1(gmx_stochd_t *sd,
838 int start, int nrend, real dt,
839 rvec accel[], ivec nFreeze[],
840 real invmass[], unsigned short ptype[],
841 unsigned short cFREEZE[], unsigned short cACC[],
842 unsigned short cTC[],
843 const rvec x[], rvec xprime[], rvec v[], const rvec f[],
844 gmx_bool bDoConstr,
845 gmx_bool bFirstHalfConstr,
846 gmx_int64_t step, int seed, int* gatindex)
848 gmx_sd_const_t *sdc;
849 gmx_sd_sigma_t *sig;
850 int gf = 0, ga = 0, gt = 0;
851 real ism;
852 int n, d;
854 // Even 0 bits internal counter gives 2x64 ints (more than enough for three table lookups)
855 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::UpdateCoordinates);
856 gmx::TabulatedNormalDistribution<real, 14> dist;
858 sdc = sd->sdc;
859 sig = sd->sdsig;
861 if (!bDoConstr)
863 for (n = start; n < nrend; n++)
865 int ng = gatindex ? gatindex[n] : n;
867 rng.restart(step, ng);
868 dist.reset();
870 ism = std::sqrt(invmass[n]);
872 if (cFREEZE)
874 gf = cFREEZE[n];
876 if (cACC)
878 ga = cACC[n];
880 if (cTC)
882 gt = cTC[n];
885 for (d = 0; d < DIM; d++)
887 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
889 real sd_V, vn;
891 sd_V = ism*sig[gt].V*dist(rng);
892 vn = v[n][d] + (invmass[n]*f[n][d] + accel[ga][d])*dt;
893 v[n][d] = vn*sdc[gt].em + sd_V;
894 /* Here we include half of the friction+noise
895 * update of v into the integration of x.
897 xprime[n][d] = x[n][d] + 0.5*(vn + v[n][d])*dt;
899 else
901 v[n][d] = 0.0;
902 xprime[n][d] = x[n][d];
907 else
909 /* We do have constraints */
910 if (bFirstHalfConstr)
912 /* First update without friction and noise */
913 real im;
915 for (n = start; n < nrend; n++)
917 im = invmass[n];
919 if (cFREEZE)
921 gf = cFREEZE[n];
923 if (cACC)
925 ga = cACC[n];
928 for (d = 0; d < DIM; d++)
930 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
932 v[n][d] = v[n][d] + (im*f[n][d] + accel[ga][d])*dt;
933 xprime[n][d] = x[n][d] + v[n][d]*dt;
935 else
937 v[n][d] = 0.0;
938 xprime[n][d] = x[n][d];
943 else
945 /* Update friction and noise only */
946 for (n = start; n < nrend; n++)
948 int ng = gatindex ? gatindex[n] : n;
950 rng.restart(step, ng);
951 dist.reset();
953 ism = std::sqrt(invmass[n]);
955 if (cFREEZE)
957 gf = cFREEZE[n];
959 if (cTC)
961 gt = cTC[n];
964 for (d = 0; d < DIM; d++)
966 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
968 real sd_V, vn;
970 sd_V = ism*sig[gt].V*dist(rng);
971 vn = v[n][d];
972 v[n][d] = vn*sdc[gt].em + sd_V;
973 /* Add the friction and noise contribution only */
974 xprime[n][d] = xprime[n][d] + 0.5*(v[n][d] - vn)*dt;
982 static void do_update_bd(int start, int nrend, real dt,
983 ivec nFreeze[],
984 real invmass[], unsigned short ptype[],
985 unsigned short cFREEZE[], unsigned short cTC[],
986 const rvec x[], rvec xprime[], rvec v[],
987 const rvec f[], real friction_coefficient,
988 real *rf, gmx_int64_t step, int seed,
989 int* gatindex)
991 /* note -- these appear to be full step velocities . . . */
992 int gf = 0, gt = 0;
993 real vn;
994 real invfr = 0;
995 int n, d;
996 // Use 1 bit of internal counters to give us 2*2 64-bits values per stream
997 // Each 64-bit value is enough for 4 normal distribution table numbers.
998 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::UpdateCoordinates);
999 gmx::TabulatedNormalDistribution<real, 14> dist;
1001 if (friction_coefficient != 0)
1003 invfr = 1.0/friction_coefficient;
1006 for (n = start; (n < nrend); n++)
1008 int ng = gatindex ? gatindex[n] : n;
1010 rng.restart(step, ng);
1011 dist.reset();
1013 if (cFREEZE)
1015 gf = cFREEZE[n];
1017 if (cTC)
1019 gt = cTC[n];
1021 for (d = 0; (d < DIM); d++)
1023 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell) && !nFreeze[gf][d])
1025 if (friction_coefficient != 0)
1027 vn = invfr*f[n][d] + rf[gt]*dist(rng);
1029 else
1031 /* NOTE: invmass = 2/(mass*friction_constant*dt) */
1032 vn = 0.5*invmass[n]*f[n][d]*dt
1033 + std::sqrt(0.5*invmass[n])*rf[gt]*dist(rng);
1036 v[n][d] = vn;
1037 xprime[n][d] = x[n][d]+vn*dt;
1039 else
1041 v[n][d] = 0.0;
1042 xprime[n][d] = x[n][d];
1048 static void dump_it_all(FILE gmx_unused *fp, const char gmx_unused *title,
1049 int gmx_unused natoms,
1050 const gmx_unused PaddedRVecVector *x,
1051 const gmx_unused PaddedRVecVector *xp,
1052 const gmx_unused PaddedRVecVector *v,
1053 const gmx_unused PaddedRVecVector *f)
1055 #ifdef DEBUG
1056 if (fp)
1058 fprintf(fp, "%s\n", title);
1059 pr_rvecs(fp, 0, "x", as_rvec_array(x->data()), natoms);
1060 pr_rvecs(fp, 0, "xp", as_rvec_array(xp->data()), natoms);
1061 pr_rvecs(fp, 0, "v", as_rvec_array(v->data()), natoms);
1062 if (f != NULL)
1064 pr_rvecs(fp, 0, "f", as_rvec_array(f->data()), natoms);
1067 #endif
1070 static void calc_ke_part_normal(rvec v[], t_grpopts *opts, t_mdatoms *md,
1071 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel)
1073 int g;
1074 t_grp_tcstat *tcstat = ekind->tcstat;
1075 t_grp_acc *grpstat = ekind->grpstat;
1076 int nthread, thread;
1078 /* three main: VV with AveVel, vv with AveEkin, leap with AveEkin. Leap with AveVel is also
1079 an option, but not supported now.
1080 bEkinAveVel: If TRUE, we sum into ekin, if FALSE, into ekinh.
1083 /* group velocities are calculated in update_ekindata and
1084 * accumulated in acumulate_groups.
1085 * Now the partial global and groups ekin.
1087 for (g = 0; (g < opts->ngtc); g++)
1089 copy_mat(tcstat[g].ekinh, tcstat[g].ekinh_old);
1090 if (bEkinAveVel)
1092 clear_mat(tcstat[g].ekinf);
1093 tcstat[g].ekinscalef_nhc = 1.0; /* need to clear this -- logic is complicated! */
1095 else
1097 clear_mat(tcstat[g].ekinh);
1100 ekind->dekindl_old = ekind->dekindl;
1101 nthread = gmx_omp_nthreads_get(emntUpdate);
1103 #pragma omp parallel for num_threads(nthread) schedule(static)
1104 for (thread = 0; thread < nthread; thread++)
1106 // This OpenMP only loops over arrays and does not call any functions
1107 // or memory allocation. It should not be able to throw, so for now
1108 // we do not need a try/catch wrapper.
1109 int start_t, end_t, n;
1110 int ga, gt;
1111 rvec v_corrt;
1112 real hm;
1113 int d, m;
1114 matrix *ekin_sum;
1115 real *dekindl_sum;
1117 start_t = ((thread+0)*md->homenr)/nthread;
1118 end_t = ((thread+1)*md->homenr)/nthread;
1120 ekin_sum = ekind->ekin_work[thread];
1121 dekindl_sum = ekind->dekindl_work[thread];
1123 for (gt = 0; gt < opts->ngtc; gt++)
1125 clear_mat(ekin_sum[gt]);
1127 *dekindl_sum = 0.0;
1129 ga = 0;
1130 gt = 0;
1131 for (n = start_t; n < end_t; n++)
1133 if (md->cACC)
1135 ga = md->cACC[n];
1137 if (md->cTC)
1139 gt = md->cTC[n];
1141 hm = 0.5*md->massT[n];
1143 for (d = 0; (d < DIM); d++)
1145 v_corrt[d] = v[n][d] - grpstat[ga].u[d];
1147 for (d = 0; (d < DIM); d++)
1149 for (m = 0; (m < DIM); m++)
1151 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1152 ekin_sum[gt][m][d] += hm*v_corrt[m]*v_corrt[d];
1155 if (md->nMassPerturbed && md->bPerturbed[n])
1157 *dekindl_sum +=
1158 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1163 ekind->dekindl = 0;
1164 for (thread = 0; thread < nthread; thread++)
1166 for (g = 0; g < opts->ngtc; g++)
1168 if (bEkinAveVel)
1170 m_add(tcstat[g].ekinf, ekind->ekin_work[thread][g],
1171 tcstat[g].ekinf);
1173 else
1175 m_add(tcstat[g].ekinh, ekind->ekin_work[thread][g],
1176 tcstat[g].ekinh);
1180 ekind->dekindl += *ekind->dekindl_work[thread];
1183 inc_nrnb(nrnb, eNR_EKIN, md->homenr);
1186 static void calc_ke_part_visc(matrix box, rvec x[], rvec v[],
1187 t_grpopts *opts, t_mdatoms *md,
1188 gmx_ekindata_t *ekind,
1189 t_nrnb *nrnb, gmx_bool bEkinAveVel)
1191 int start = 0, homenr = md->homenr;
1192 int g, d, n, m, gt = 0;
1193 rvec v_corrt;
1194 real hm;
1195 t_grp_tcstat *tcstat = ekind->tcstat;
1196 t_cos_acc *cosacc = &(ekind->cosacc);
1197 real dekindl;
1198 real fac, cosz;
1199 double mvcos;
1201 for (g = 0; g < opts->ngtc; g++)
1203 copy_mat(ekind->tcstat[g].ekinh, ekind->tcstat[g].ekinh_old);
1204 clear_mat(ekind->tcstat[g].ekinh);
1206 ekind->dekindl_old = ekind->dekindl;
1208 fac = 2*M_PI/box[ZZ][ZZ];
1209 mvcos = 0;
1210 dekindl = 0;
1211 for (n = start; n < start+homenr; n++)
1213 if (md->cTC)
1215 gt = md->cTC[n];
1217 hm = 0.5*md->massT[n];
1219 /* Note that the times of x and v differ by half a step */
1220 /* MRS -- would have to be changed for VV */
1221 cosz = std::cos(fac*x[n][ZZ]);
1222 /* Calculate the amplitude of the new velocity profile */
1223 mvcos += 2*cosz*md->massT[n]*v[n][XX];
1225 copy_rvec(v[n], v_corrt);
1226 /* Subtract the profile for the kinetic energy */
1227 v_corrt[XX] -= cosz*cosacc->vcos;
1228 for (d = 0; (d < DIM); d++)
1230 for (m = 0; (m < DIM); m++)
1232 /* if we're computing a full step velocity, v_corrt[d] has v(t). Otherwise, v(t+dt/2) */
1233 if (bEkinAveVel)
1235 tcstat[gt].ekinf[m][d] += hm*v_corrt[m]*v_corrt[d];
1237 else
1239 tcstat[gt].ekinh[m][d] += hm*v_corrt[m]*v_corrt[d];
1243 if (md->nPerturbed && md->bPerturbed[n])
1245 /* The minus sign here might be confusing.
1246 * The kinetic contribution from dH/dl doesn't come from
1247 * d m(l)/2 v^2 / dl, but rather from d p^2/2m(l) / dl,
1248 * where p are the momenta. The difference is only a minus sign.
1250 dekindl -= 0.5*(md->massB[n] - md->massA[n])*iprod(v_corrt, v_corrt);
1253 ekind->dekindl = dekindl;
1254 cosacc->mvcos = mvcos;
1256 inc_nrnb(nrnb, eNR_EKIN, homenr);
1259 void calc_ke_part(t_state *state, t_grpopts *opts, t_mdatoms *md,
1260 gmx_ekindata_t *ekind, t_nrnb *nrnb, gmx_bool bEkinAveVel)
1262 if (ekind->cosacc.cos_accel == 0)
1264 calc_ke_part_normal(as_rvec_array(state->v.data()), opts, md, ekind, nrnb, bEkinAveVel);
1266 else
1268 calc_ke_part_visc(state->box, as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), opts, md, ekind, nrnb, bEkinAveVel);
1272 extern void init_ekinstate(ekinstate_t *ekinstate, const t_inputrec *ir)
1274 ekinstate->ekin_n = ir->opts.ngtc;
1275 snew(ekinstate->ekinh, ekinstate->ekin_n);
1276 snew(ekinstate->ekinf, ekinstate->ekin_n);
1277 snew(ekinstate->ekinh_old, ekinstate->ekin_n);
1278 ekinstate->ekinscalef_nhc.resize(ekinstate->ekin_n);
1279 ekinstate->ekinscaleh_nhc.resize(ekinstate->ekin_n);
1280 ekinstate->vscale_nhc.resize(ekinstate->ekin_n);
1281 ekinstate->dekindl = 0;
1282 ekinstate->mvcos = 0;
1285 void update_ekinstate(ekinstate_t *ekinstate, gmx_ekindata_t *ekind)
1287 int i;
1289 for (i = 0; i < ekinstate->ekin_n; i++)
1291 copy_mat(ekind->tcstat[i].ekinh, ekinstate->ekinh[i]);
1292 copy_mat(ekind->tcstat[i].ekinf, ekinstate->ekinf[i]);
1293 copy_mat(ekind->tcstat[i].ekinh_old, ekinstate->ekinh_old[i]);
1294 ekinstate->ekinscalef_nhc[i] = ekind->tcstat[i].ekinscalef_nhc;
1295 ekinstate->ekinscaleh_nhc[i] = ekind->tcstat[i].ekinscaleh_nhc;
1296 ekinstate->vscale_nhc[i] = ekind->tcstat[i].vscale_nhc;
1299 copy_mat(ekind->ekin, ekinstate->ekin_total);
1300 ekinstate->dekindl = ekind->dekindl;
1301 ekinstate->mvcos = ekind->cosacc.mvcos;
1305 void restore_ekinstate_from_state(t_commrec *cr,
1306 gmx_ekindata_t *ekind, const ekinstate_t *ekinstate)
1308 int i, n;
1310 if (MASTER(cr))
1312 for (i = 0; i < ekinstate->ekin_n; i++)
1314 copy_mat(ekinstate->ekinh[i], ekind->tcstat[i].ekinh);
1315 copy_mat(ekinstate->ekinf[i], ekind->tcstat[i].ekinf);
1316 copy_mat(ekinstate->ekinh_old[i], ekind->tcstat[i].ekinh_old);
1317 ekind->tcstat[i].ekinscalef_nhc = ekinstate->ekinscalef_nhc[i];
1318 ekind->tcstat[i].ekinscaleh_nhc = ekinstate->ekinscaleh_nhc[i];
1319 ekind->tcstat[i].vscale_nhc = ekinstate->vscale_nhc[i];
1322 copy_mat(ekinstate->ekin_total, ekind->ekin);
1324 ekind->dekindl = ekinstate->dekindl;
1325 ekind->cosacc.mvcos = ekinstate->mvcos;
1326 n = ekinstate->ekin_n;
1329 if (PAR(cr))
1331 gmx_bcast(sizeof(n), &n, cr);
1332 for (i = 0; i < n; i++)
1334 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh[0][0]),
1335 ekind->tcstat[i].ekinh[0], cr);
1336 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinf[0][0]),
1337 ekind->tcstat[i].ekinf[0], cr);
1338 gmx_bcast(DIM*DIM*sizeof(ekind->tcstat[i].ekinh_old[0][0]),
1339 ekind->tcstat[i].ekinh_old[0], cr);
1341 gmx_bcast(sizeof(ekind->tcstat[i].ekinscalef_nhc),
1342 &(ekind->tcstat[i].ekinscalef_nhc), cr);
1343 gmx_bcast(sizeof(ekind->tcstat[i].ekinscaleh_nhc),
1344 &(ekind->tcstat[i].ekinscaleh_nhc), cr);
1345 gmx_bcast(sizeof(ekind->tcstat[i].vscale_nhc),
1346 &(ekind->tcstat[i].vscale_nhc), cr);
1348 gmx_bcast(DIM*DIM*sizeof(ekind->ekin[0][0]),
1349 ekind->ekin[0], cr);
1351 gmx_bcast(sizeof(ekind->dekindl), &ekind->dekindl, cr);
1352 gmx_bcast(sizeof(ekind->cosacc.mvcos), &ekind->cosacc.mvcos, cr);
1356 void set_deform_reference_box(gmx_update_t *upd, gmx_int64_t step, matrix box)
1358 upd->deformref_step = step;
1359 copy_mat(box, upd->deformref_box);
1362 static void deform(gmx_update_t *upd,
1363 int start, int homenr, rvec x[], matrix box,
1364 const t_inputrec *ir, gmx_int64_t step)
1366 matrix bnew, invbox, mu;
1367 real elapsed_time;
1368 int i, j;
1370 elapsed_time = (step + 1 - upd->deformref_step)*ir->delta_t;
1371 copy_mat(box, bnew);
1372 for (i = 0; i < DIM; i++)
1374 for (j = 0; j < DIM; j++)
1376 if (ir->deform[i][j] != 0)
1378 bnew[i][j] =
1379 upd->deformref_box[i][j] + elapsed_time*ir->deform[i][j];
1383 /* We correct the off-diagonal elements,
1384 * which can grow indefinitely during shearing,
1385 * so the shifts do not get messed up.
1387 for (i = 1; i < DIM; i++)
1389 for (j = i-1; j >= 0; j--)
1391 while (bnew[i][j] - box[i][j] > 0.5*bnew[j][j])
1393 rvec_dec(bnew[i], bnew[j]);
1395 while (bnew[i][j] - box[i][j] < -0.5*bnew[j][j])
1397 rvec_inc(bnew[i], bnew[j]);
1401 gmx::invertBoxMatrix(box, invbox);
1402 copy_mat(bnew, box);
1403 mmul_ur0(box, invbox, mu);
1405 for (i = start; i < start+homenr; i++)
1407 x[i][XX] = mu[XX][XX]*x[i][XX]+mu[YY][XX]*x[i][YY]+mu[ZZ][XX]*x[i][ZZ];
1408 x[i][YY] = mu[YY][YY]*x[i][YY]+mu[ZZ][YY]*x[i][ZZ];
1409 x[i][ZZ] = mu[ZZ][ZZ]*x[i][ZZ];
1413 void update_tcouple(gmx_int64_t step,
1414 t_inputrec *inputrec,
1415 t_state *state,
1416 gmx_ekindata_t *ekind,
1417 t_extmass *MassQ,
1418 t_mdatoms *md)
1421 bool doTemperatureCoupling = false;
1423 /* if using vv with trotter decomposition methods, we do this elsewhere in the code */
1424 if (!(EI_VV(inputrec->eI) &&
1425 (inputrecNvtTrotter(inputrec) || inputrecNptTrotter(inputrec) || inputrecNphTrotter(inputrec))))
1427 doTemperatureCoupling = isTemperatureCouplingStep(step, inputrec);
1430 if (doTemperatureCoupling)
1432 real dttc = inputrec->nsttcouple*inputrec->delta_t;
1434 switch (inputrec->etc)
1436 case etcNO:
1437 break;
1438 case etcBERENDSEN:
1439 berendsen_tcoupl(inputrec, ekind, dttc, state->therm_integral);
1440 break;
1441 case etcNOSEHOOVER:
1442 nosehoover_tcoupl(&(inputrec->opts), ekind, dttc,
1443 state->nosehoover_xi.data(), state->nosehoover_vxi.data(), MassQ);
1444 break;
1445 case etcVRESCALE:
1446 vrescale_tcoupl(inputrec, step, ekind, dttc,
1447 state->therm_integral.data());
1448 break;
1450 /* rescale in place here */
1451 if (EI_VV(inputrec->eI))
1453 rescale_velocities(ekind, md, 0, md->homenr, as_rvec_array(state->v.data()));
1456 else
1458 /* Set the T scaling lambda to 1 to have no scaling */
1459 for (int i = 0; (i < inputrec->opts.ngtc); i++)
1461 ekind->tcstat[i].lambda = 1.0;
1466 /*! \brief Computes the atom range for a thread to operate on, ensured SIMD aligned ranges
1468 * \param[in] numThreads The number of threads to divide atoms over
1469 * \param[in] threadIndex The thread to get the range for
1470 * \param[in] numAtoms The total number of atoms (on this rank)
1471 * \param[out] startAtom The start of the atom range
1472 * \param[out] endAtom The end of the atom range, note that this is in general not a multiple of the SIMD width
1474 static void getThreadAtomRange(int numThreads, int threadIndex, int numAtoms,
1475 int *startAtom, int *endAtom)
1477 #if GMX_HAVE_SIMD_UPDATE
1478 constexpr int blockSize = GMX_SIMD_REAL_WIDTH;
1479 #else
1480 constexpr int blockSize = 1;
1481 #endif
1482 int numBlocks = (numAtoms + blockSize - 1)/blockSize;
1484 *startAtom = ((numBlocks* threadIndex )/numThreads)*blockSize;
1485 *endAtom = ((numBlocks*(threadIndex + 1))/numThreads)*blockSize;
1486 if (threadIndex == numThreads - 1)
1488 *endAtom = numAtoms;
1492 void update_pcouple_before_coordinates(FILE *fplog,
1493 gmx_int64_t step,
1494 const t_inputrec *inputrec,
1495 t_state *state,
1496 matrix parrinellorahmanMu,
1497 matrix M,
1498 gmx_bool bInitStep)
1500 /* Berendsen P-coupling is completely handled after the coordinate update.
1501 * Trotter P-coupling is handled by separate calls to trotter_update().
1503 if (inputrec->epc == epcPARRINELLORAHMAN &&
1504 isPressureCouplingStep(step, inputrec))
1506 real dtpc = inputrec->nstpcouple*inputrec->delta_t;
1508 parrinellorahman_pcoupl(fplog, step, inputrec, dtpc, state->pres_prev,
1509 state->box, state->box_rel, state->boxv,
1510 M, parrinellorahmanMu, bInitStep);
1514 void update_constraints(FILE *fplog,
1515 gmx_int64_t step,
1516 real *dvdlambda, /* the contribution to be added to the bonded interactions */
1517 t_inputrec *inputrec, /* input record and box stuff */
1518 t_mdatoms *md,
1519 t_state *state,
1520 gmx_bool bMolPBC,
1521 t_graph *graph,
1522 PaddedRVecVector *force, /* forces on home particles */
1523 t_idef *idef,
1524 tensor vir_part,
1525 t_commrec *cr,
1526 t_nrnb *nrnb,
1527 gmx_wallcycle_t wcycle,
1528 gmx_update_t *upd,
1529 gmx_constr_t constr,
1530 gmx_bool bFirstHalf,
1531 gmx_bool bCalcVir)
1533 gmx_bool bLastStep, bLog = FALSE, bEner = FALSE, bDoConstr = FALSE;
1534 tensor vir_con;
1535 int nth, th;
1537 if (constr)
1539 bDoConstr = TRUE;
1541 if (bFirstHalf && !EI_VV(inputrec->eI))
1543 bDoConstr = FALSE;
1546 /* for now, SD update is here -- though it really seems like it
1547 should be reformulated as a velocity verlet method, since it has two parts */
1549 int homenr = md->homenr;
1551 /* Cast delta_t from double to real to make the integrators faster.
1552 * The only reason for having delta_t double is to get accurate values
1553 * for t=delta_t*step when step is larger than float precision.
1554 * For integration dt the accuracy of real suffices, since with
1555 * integral += dt*integrand the increment is nearly always (much) smaller
1556 * than the integral (and the integrand has real precision).
1558 real dt = inputrec->delta_t;
1561 * Steps (7C, 8C)
1562 * APPLY CONSTRAINTS:
1563 * BLOCK SHAKE
1565 * When doing PR pressure coupling we have to constrain the
1566 * bonds in each iteration. If we are only using Nose-Hoover tcoupling
1567 * it is enough to do this once though, since the relative velocities
1568 * after this will be normal to the bond vector
1571 if (bDoConstr)
1573 /* clear out constraints before applying */
1574 clear_mat(vir_part);
1576 bLastStep = (step == inputrec->init_step+inputrec->nsteps);
1577 bLog = (do_per_step(step, inputrec->nstlog) || bLastStep || (step < 0));
1578 bEner = (do_per_step(step, inputrec->nstenergy) || bLastStep);
1579 /* Constrain the coordinates upd->xp */
1580 wallcycle_start(wcycle, ewcCONSTR);
1581 if (EI_VV(inputrec->eI) && bFirstHalf)
1583 constrain(nullptr, bLog, bEner, constr, idef,
1584 inputrec, cr, step, 1, 1.0, md,
1585 as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1586 bMolPBC, state->box,
1587 state->lambda[efptBONDED], dvdlambda,
1588 nullptr, bCalcVir ? &vir_con : nullptr, nrnb, econqVeloc);
1590 else
1592 constrain(nullptr, bLog, bEner, constr, idef,
1593 inputrec, cr, step, 1, 1.0, md,
1594 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), nullptr,
1595 bMolPBC, state->box,
1596 state->lambda[efptBONDED], dvdlambda,
1597 as_rvec_array(state->v.data()), bCalcVir ? &vir_con : nullptr, nrnb, econqCoord);
1599 wallcycle_stop(wcycle, ewcCONSTR);
1601 where();
1603 dump_it_all(fplog, "After Shake",
1604 state->natoms, &state->x, &upd->xp, &state->v, force);
1606 if (bCalcVir)
1608 m_add(vir_part, vir_con, vir_part);
1609 if (debug)
1611 pr_rvecs(debug, 0, "constraint virial", vir_part, DIM);
1616 where();
1618 if (inputrec->eI == eiSD1 && bDoConstr && !bFirstHalf)
1620 wallcycle_start(wcycle, ewcUPDATE);
1622 nth = gmx_omp_nthreads_get(emntUpdate);
1624 #pragma omp parallel for num_threads(nth) schedule(static)
1625 for (th = 0; th < nth; th++)
1629 int start_th, end_th;
1630 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
1632 /* The second part of the SD integration */
1633 do_update_sd1(upd->sd,
1634 start_th, end_th, dt,
1635 inputrec->opts.acc, inputrec->opts.nFreeze,
1636 md->invmass, md->ptype,
1637 md->cFREEZE, md->cACC, md->cTC,
1638 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), as_rvec_array(state->v.data()), as_rvec_array(force->data()),
1639 bDoConstr, FALSE,
1640 step, inputrec->ld_seed,
1641 DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
1643 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1645 inc_nrnb(nrnb, eNR_UPDATE, homenr);
1646 wallcycle_stop(wcycle, ewcUPDATE);
1648 if (bDoConstr)
1650 /* Constrain the coordinates upd->xp for half a time step */
1651 wallcycle_start(wcycle, ewcCONSTR);
1653 constrain(nullptr, bLog, bEner, constr, idef,
1654 inputrec, cr, step, 1, 0.5, md,
1655 as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()), nullptr,
1656 bMolPBC, state->box,
1657 state->lambda[efptBONDED], dvdlambda,
1658 as_rvec_array(state->v.data()), nullptr, nrnb, econqCoord);
1660 wallcycle_stop(wcycle, ewcCONSTR);
1664 /* We must always unshift after updating coordinates; if we did not shake
1665 x was shifted in do_force */
1667 if (!(bFirstHalf)) /* in the first half of vv, no shift. */
1669 /* NOTE This part of the update actually does not belong with
1670 * the constraints, since we also call it without constraints.
1671 * But currently we always integrate to a temporary buffer and
1672 * then copy the results back here.
1674 wallcycle_start_nocount(wcycle, ewcUPDATE);
1676 if (md->cFREEZE != nullptr && constr != nullptr)
1678 /* If we have atoms that are frozen along some, but not all
1679 * dimensions, the constraints will have moved them also along
1680 * the frozen dimensions. To freeze such degrees of freedom
1681 * we copy them back here to later copy them forward. It would
1682 * be more elegant and slightly more efficient to copies zero
1683 * times instead of twice, but the graph case below prevents this.
1685 const ivec *nFreeze = inputrec->opts.nFreeze;
1686 bool partialFreezeAndConstraints = false;
1687 for (int g = 0; g < inputrec->opts.ngfrz; g++)
1689 int numFreezeDim = nFreeze[g][XX] + nFreeze[g][YY] + nFreeze[g][ZZ];
1690 if (numFreezeDim > 0 && numFreezeDim < 3)
1692 partialFreezeAndConstraints = true;
1695 if (partialFreezeAndConstraints)
1697 for (int i = 0; i < homenr; i++)
1699 int g = md->cFREEZE[i];
1701 for (int d = 0; d < DIM; d++)
1703 if (nFreeze[g][d])
1705 upd->xp[i][d] = state->x[i][d];
1712 if (graph && (graph->nnodes > 0))
1714 unshift_x(graph, state->box, as_rvec_array(state->x.data()), as_rvec_array(upd->xp.data()));
1715 if (TRICLINIC(state->box))
1717 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
1719 else
1721 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
1724 else
1726 /* The copy is performance sensitive, so use a bare pointer */
1727 rvec *xp = as_rvec_array(upd->xp.data());
1728 #ifndef __clang_analyzer__
1729 // cppcheck-suppress unreadVariable
1730 nth = gmx_omp_nthreads_get(emntUpdate);
1731 #endif
1732 #pragma omp parallel for num_threads(nth) schedule(static)
1733 for (int i = 0; i < homenr; i++)
1735 // Trivial statement, does not throw
1736 copy_rvec(xp[i], state->x[i]);
1739 wallcycle_stop(wcycle, ewcUPDATE);
1741 dump_it_all(fplog, "After unshift",
1742 state->natoms, &state->x, &upd->xp, &state->v, force);
1744 /* ############# END the update of velocities and positions ######### */
1747 void update_pcouple_after_coordinates(FILE *fplog,
1748 gmx_int64_t step,
1749 const t_inputrec *inputrec,
1750 const t_mdatoms *md,
1751 const matrix pressure,
1752 const matrix forceVirial,
1753 const matrix constraintVirial,
1754 const matrix parrinellorahmanMu,
1755 t_state *state,
1756 t_nrnb *nrnb,
1757 gmx_update_t *upd)
1759 int start = 0;
1760 int homenr = md->homenr;
1762 /* Cast to real for faster code, no loss in precision (see comment above) */
1763 real dt = inputrec->delta_t;
1765 where();
1767 /* now update boxes */
1768 switch (inputrec->epc)
1770 case (epcNO):
1771 break;
1772 case (epcBERENDSEN):
1773 if (isPressureCouplingStep(step, inputrec))
1775 real dtpc = inputrec->nstpcouple*dt;
1776 matrix mu;
1777 berendsen_pcoupl(fplog, step, inputrec, dtpc,
1778 pressure, state->box,
1779 forceVirial, constraintVirial,
1780 mu, &state->baros_integral);
1781 berendsen_pscale(inputrec, mu, state->box, state->box_rel,
1782 start, homenr, as_rvec_array(state->x.data()),
1783 md->cFREEZE, nrnb);
1785 break;
1786 case (epcPARRINELLORAHMAN):
1787 if (isPressureCouplingStep(step, inputrec))
1789 /* The box velocities were updated in do_pr_pcoupl,
1790 * but we dont change the box vectors until we get here
1791 * since we need to be able to shift/unshift above.
1793 real dtpc = inputrec->nstpcouple*dt;
1794 for (int i = 0; i < DIM; i++)
1796 for (int m = 0; m <= i; m++)
1798 state->box[i][m] += dtpc*state->boxv[i][m];
1801 preserve_box_shape(inputrec, state->box_rel, state->box);
1803 /* Scale the coordinates */
1804 for (int n = start; n < start + homenr; n++)
1806 tmvmul_ur0(parrinellorahmanMu, state->x[n], state->x[n]);
1809 break;
1810 case (epcMTTK):
1811 switch (inputrec->epct)
1813 case (epctISOTROPIC):
1814 /* DIM * eta = ln V. so DIM*eta_new = DIM*eta_old + DIM*dt*veta =>
1815 ln V_new = ln V_old + 3*dt*veta => V_new = V_old*exp(3*dt*veta) =>
1816 Side length scales as exp(veta*dt) */
1818 msmul(state->box, std::exp(state->veta*dt), state->box);
1820 /* Relate veta to boxv. veta = d(eta)/dT = (1/DIM)*1/V dV/dT.
1821 o If we assume isotropic scaling, and box length scaling
1822 factor L, then V = L^DIM (det(M)). So dV/dt = DIM
1823 L^(DIM-1) dL/dt det(M), and veta = (1/L) dL/dt. The
1824 determinant of B is L^DIM det(M), and the determinant
1825 of dB/dt is (dL/dT)^DIM det (M). veta will be
1826 (det(dB/dT)/det(B))^(1/3). Then since M =
1827 B_new*(vol_new)^(1/3), dB/dT_new = (veta_new)*B(new). */
1829 msmul(state->box, state->veta, state->boxv);
1830 break;
1831 default:
1832 break;
1834 break;
1835 default:
1836 break;
1839 if (inputrecDeform(inputrec))
1841 deform(upd, start, homenr, as_rvec_array(state->x.data()), state->box, inputrec, step);
1843 where();
1844 dump_it_all(fplog, "After update",
1845 state->natoms, &state->x, &upd->xp, &state->v, nullptr);
1848 void update_coords(FILE *fplog,
1849 gmx_int64_t step,
1850 t_inputrec *inputrec, /* input record and box stuff */
1851 t_mdatoms *md,
1852 t_state *state,
1853 PaddedRVecVector *f, /* forces on home particles */
1854 t_fcdata *fcd,
1855 gmx_ekindata_t *ekind,
1856 matrix M,
1857 gmx_update_t *upd,
1858 int UpdatePart,
1859 t_commrec *cr, /* these shouldn't be here -- need to think about it */
1860 gmx_constr_t constr)
1862 gmx_bool bDoConstr = (nullptr != constr);
1864 /* Running the velocity half does nothing except for velocity verlet */
1865 if ((UpdatePart == etrtVELOCITY1 || UpdatePart == etrtVELOCITY2) &&
1866 !EI_VV(inputrec->eI))
1868 gmx_incons("update_coords called for velocity without VV integrator");
1871 int homenr = md->homenr;
1873 /* Cast to real for faster code, no loss in precision (see comment above) */
1874 real dt = inputrec->delta_t;
1876 /* We need to update the NMR restraint history when time averaging is used */
1877 if (state->flags & (1<<estDISRE_RM3TAV))
1879 update_disres_history(fcd, &state->hist);
1881 if (state->flags & (1<<estORIRE_DTAV))
1883 update_orires_history(fcd, &state->hist);
1886 /* ############# START The update of velocities and positions ######### */
1887 where();
1888 dump_it_all(fplog, "Before update",
1889 state->natoms, &state->x, &upd->xp, &state->v, f);
1891 int nth = gmx_omp_nthreads_get(emntUpdate);
1893 #pragma omp parallel for num_threads(nth) schedule(static)
1894 for (int th = 0; th < nth; th++)
1898 int start_th, end_th;
1899 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
1901 #ifndef NDEBUG
1902 /* Strictly speaking, we would only need this check with SIMD
1903 * and for the actual SIMD width. But since the code currently
1904 * always adds padding for GMX_REAL_MAX_SIMD_WIDTH, we check that.
1906 size_t homenrSimdPadded = ((homenr + GMX_REAL_MAX_SIMD_WIDTH - 1)/GMX_REAL_MAX_SIMD_WIDTH)*GMX_REAL_MAX_SIMD_WIDTH;
1907 GMX_ASSERT(state->x.size() >= homenrSimdPadded, "state->x needs to be padded for SIMD access");
1908 GMX_ASSERT(upd->xp.size() >= homenrSimdPadded, "upd->xp needs to be padded for SIMD access");
1909 GMX_ASSERT(state->v.size() >= homenrSimdPadded, "state->v needs to be padded for SIMD access");
1910 GMX_ASSERT(f->size() >= homenrSimdPadded, "f needs to be padded for SIMD access");
1911 #endif
1913 const rvec *x_rvec = as_rvec_array(state->x.data());
1914 rvec *xp_rvec = as_rvec_array(upd->xp.data());
1915 rvec *v_rvec = as_rvec_array(state->v.data());
1916 const rvec *f_rvec = as_rvec_array(f->data());
1918 switch (inputrec->eI)
1920 case (eiMD):
1921 do_update_md(start_th, end_th, step, dt,
1922 inputrec, md, ekind, state->box,
1923 x_rvec, xp_rvec, v_rvec, f_rvec,
1924 state->nosehoover_vxi.data(), M);
1925 break;
1926 case (eiSD1):
1927 /* With constraints, the SD1 update is done in 2 parts */
1928 do_update_sd1(upd->sd,
1929 start_th, end_th, dt,
1930 inputrec->opts.acc, inputrec->opts.nFreeze,
1931 md->invmass, md->ptype,
1932 md->cFREEZE, md->cACC, md->cTC,
1933 x_rvec, xp_rvec, v_rvec, f_rvec,
1934 bDoConstr, TRUE,
1935 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
1936 break;
1937 case (eiBD):
1938 do_update_bd(start_th, end_th, dt,
1939 inputrec->opts.nFreeze, md->invmass, md->ptype,
1940 md->cFREEZE, md->cTC,
1941 x_rvec, xp_rvec, v_rvec, f_rvec,
1942 inputrec->bd_fric,
1943 upd->sd->bd_rf,
1944 step, inputrec->ld_seed, DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr);
1945 break;
1946 case (eiVV):
1947 case (eiVVAK):
1949 gmx_bool bExtended = (inputrec->etc == etcNOSEHOOVER ||
1950 inputrec->epc == epcPARRINELLORAHMAN ||
1951 inputrec->epc == epcMTTK);
1953 /* assuming barostat coupled to group 0 */
1954 real alpha = 1.0 + DIM/static_cast<real>(inputrec->opts.nrdf[0]);
1955 switch (UpdatePart)
1957 case etrtVELOCITY1:
1958 case etrtVELOCITY2:
1959 do_update_vv_vel(start_th, end_th, dt,
1960 inputrec->opts.acc, inputrec->opts.nFreeze,
1961 md->invmass, md->ptype,
1962 md->cFREEZE, md->cACC,
1963 v_rvec, f_rvec,
1964 bExtended, state->veta, alpha);
1965 break;
1966 case etrtPOSITION:
1967 do_update_vv_pos(start_th, end_th, dt,
1968 inputrec->opts.nFreeze,
1969 md->ptype, md->cFREEZE,
1970 x_rvec, xp_rvec, v_rvec,
1971 bExtended, state->veta);
1972 break;
1974 break;
1976 default:
1977 gmx_fatal(FARGS, "Don't know how to update coordinates");
1978 break;
1981 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1987 void correct_ekin(FILE *log, int start, int end, rvec v[], rvec vcm, real mass[],
1988 real tmass, tensor ekin)
1991 * This is a debugging routine. It should not be called for production code
1993 * The kinetic energy should calculated according to:
1994 * Ekin = 1/2 m (v-vcm)^2
1995 * However the correction is not always applied, since vcm may not be
1996 * known in time and we compute
1997 * Ekin' = 1/2 m v^2 instead
1998 * This can be corrected afterwards by computing
1999 * Ekin = Ekin' + 1/2 m ( -2 v vcm + vcm^2)
2000 * or in hsorthand:
2001 * Ekin = Ekin' - m v vcm + 1/2 m vcm^2
2003 int i, j, k;
2004 real m, tm;
2005 rvec hvcm, mv;
2006 tensor dekin;
2008 /* Local particles */
2009 clear_rvec(mv);
2011 /* Processor dependent part. */
2012 tm = 0;
2013 for (i = start; (i < end); i++)
2015 m = mass[i];
2016 tm += m;
2017 for (j = 0; (j < DIM); j++)
2019 mv[j] += m*v[i][j];
2022 /* Shortcut */
2023 svmul(1/tmass, vcm, vcm);
2024 svmul(0.5, vcm, hvcm);
2025 clear_mat(dekin);
2026 for (j = 0; (j < DIM); j++)
2028 for (k = 0; (k < DIM); k++)
2030 dekin[j][k] += vcm[k]*(tm*hvcm[j]-mv[j]);
2033 pr_rvecs(log, 0, "dekin", dekin, DIM);
2034 pr_rvecs(log, 0, " ekin", ekin, DIM);
2035 fprintf(log, "dekin = %g, ekin = %g vcm = (%8.4f %8.4f %8.4f)\n",
2036 trace(dekin), trace(ekin), vcm[XX], vcm[YY], vcm[ZZ]);
2037 fprintf(log, "mv = (%8.4f %8.4f %8.4f)\n",
2038 mv[XX], mv[YY], mv[ZZ]);
2041 extern gmx_bool update_randomize_velocities(t_inputrec *ir, gmx_int64_t step, const t_commrec *cr,
2042 t_mdatoms *md, t_state *state, gmx_update_t *upd, gmx_constr_t constr)
2045 real rate = (ir->delta_t)/ir->opts.tau_t[0];
2047 if (ir->etc == etcANDERSEN && constr != nullptr)
2049 /* Currently, Andersen thermostat does not support constrained
2050 systems. Functionality exists in the andersen_tcoupl
2051 function in GROMACS 4.5.7 to allow this combination. That
2052 code could be ported to the current random-number
2053 generation approach, but has not yet been done because of
2054 lack of time and resources. */
2055 gmx_fatal(FARGS, "Normal Andersen is currently not supported with constraints, use massive Andersen instead");
2058 /* proceed with andersen if 1) it's fixed probability per
2059 particle andersen or 2) it's massive andersen and it's tau_t/dt */
2060 if ((ir->etc == etcANDERSEN) || do_per_step(step, static_cast<int>(1.0/rate + 0.5)))
2062 andersen_tcoupl(ir, step, cr, md, state, rate,
2063 upd->sd->randomize_group, upd->sd->boltzfac);
2064 return TRUE;
2066 return FALSE;