2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
7 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gromacs/ewald/pme.h"
47 #include "gromacs/gpu_utils/hostallocator.h"
48 #include "gromacs/math/functions.h"
49 #include "gromacs/mdlib/gmx_omp_nthreads.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/md_enums.h"
52 #include "gromacs/mdtypes/mdatom.h"
53 #include "gromacs/topology/mtop_lookup.h"
54 #include "gromacs/topology/mtop_util.h"
55 #include "gromacs/topology/topology.h"
56 #include "gromacs/utility/exceptions.h"
57 #include "gromacs/utility/smalloc.h"
59 #define ALMOST_ZERO 1e-30
64 MDAtoms::MDAtoms() : mdatoms_(nullptr) {}
68 if (mdatoms_
== nullptr)
72 sfree(mdatoms_
->massA
);
73 sfree(mdatoms_
->massB
);
74 sfree(mdatoms_
->massT
);
75 gmx::AlignedAllocationPolicy::free(mdatoms_
->invmass
);
76 sfree(mdatoms_
->invMassPerDim
);
77 sfree(mdatoms_
->typeA
);
78 sfree(mdatoms_
->typeB
);
79 /* mdatoms->chargeA and mdatoms->chargeB point at chargeA_.data()
80 * and chargeB_.data() respectively. They get freed automatically. */
81 sfree(mdatoms_
->sqrt_c6A
);
82 sfree(mdatoms_
->sigmaA
);
83 sfree(mdatoms_
->sigma3A
);
84 sfree(mdatoms_
->sqrt_c6B
);
85 sfree(mdatoms_
->sigmaB
);
86 sfree(mdatoms_
->sigma3B
);
87 sfree(mdatoms_
->ptype
);
89 sfree(mdatoms_
->cENER
);
90 sfree(mdatoms_
->cACC
);
91 sfree(mdatoms_
->cFREEZE
);
92 sfree(mdatoms_
->cVCM
);
93 sfree(mdatoms_
->cORF
);
94 sfree(mdatoms_
->bPerturbed
);
99 void MDAtoms::resizeChargeA(const int newSize
)
101 chargeA_
.resizeWithPadding(newSize
);
102 mdatoms_
->chargeA
= chargeA_
.data();
105 void MDAtoms::resizeChargeB(const int newSize
)
107 chargeB_
.resizeWithPadding(newSize
);
108 mdatoms_
->chargeB
= chargeB_
.data();
111 void MDAtoms::reserveChargeA(const int newCapacity
)
113 chargeA_
.reserveWithPadding(newCapacity
);
114 mdatoms_
->chargeA
= chargeA_
.data();
117 void MDAtoms::reserveChargeB(const int newCapacity
)
119 chargeB_
.reserveWithPadding(newCapacity
);
120 mdatoms_
->chargeB
= chargeB_
.data();
123 std::unique_ptr
<MDAtoms
> makeMDAtoms(FILE* fp
, const gmx_mtop_t
& mtop
, const t_inputrec
& ir
, const bool rankHasPmeGpuTask
)
125 auto mdAtoms
= std::make_unique
<MDAtoms
>();
126 // GPU transfers may want to use a suitable pinning mode.
127 if (rankHasPmeGpuTask
)
129 changePinningPolicy(&mdAtoms
->chargeA_
, pme_get_pinning_policy());
130 changePinningPolicy(&mdAtoms
->chargeB_
, pme_get_pinning_policy());
134 mdAtoms
->mdatoms_
.reset(md
);
136 md
->nenergrp
= mtop
.groups
.groups
[SimulationAtomGroupType::EnergyOutput
].size();
137 md
->bVCMgrps
= FALSE
;
138 for (int i
= 0; i
< mtop
.natoms
; i
++)
140 if (getGroupType(mtop
.groups
, SimulationAtomGroupType::MassCenterVelocityRemoval
, i
) > 0)
146 /* Determine the total system mass and perturbed atom counts */
147 double totalMassA
= 0.0;
148 double totalMassB
= 0.0;
150 md
->haveVsites
= FALSE
;
151 gmx_mtop_atomloop_block_t aloop
= gmx_mtop_atomloop_block_init(&mtop
);
154 while (gmx_mtop_atomloop_block_next(aloop
, &atom
, &nmol
))
156 totalMassA
+= nmol
* atom
->m
;
157 totalMassB
+= nmol
* atom
->mB
;
159 if (atom
->ptype
== eptVSite
)
161 md
->haveVsites
= TRUE
;
164 if (ir
.efep
!= efepNO
&& PERTURBED(*atom
))
167 if (atom
->mB
!= atom
->m
)
169 md
->nMassPerturbed
+= nmol
;
171 if (atom
->qB
!= atom
->q
)
173 md
->nChargePerturbed
+= nmol
;
175 if (atom
->typeB
!= atom
->type
)
177 md
->nTypePerturbed
+= nmol
;
182 md
->tmassA
= totalMassA
;
183 md
->tmassB
= totalMassB
;
185 if (ir
.efep
!= efepNO
&& fp
)
187 fprintf(fp
, "There are %d atoms and %d charges for free energy perturbation\n",
188 md
->nPerturbed
, md
->nChargePerturbed
);
191 md
->havePartiallyFrozenAtoms
= FALSE
;
192 for (int g
= 0; g
< ir
.opts
.ngfrz
; g
++)
194 for (int d
= YY
; d
< DIM
; d
++)
196 if (ir
.opts
.nFreeze
[g
][d
] != ir
.opts
.nFreeze
[g
][XX
])
198 md
->havePartiallyFrozenAtoms
= TRUE
;
203 md
->bOrires
= (gmx_mtop_ftype_count(&mtop
, F_ORIRES
) != 0);
210 void atoms2md(const gmx_mtop_t
* mtop
,
211 const t_inputrec
* ir
,
213 gmx::ArrayRef
<int> index
,
215 gmx::MDAtoms
* mdAtoms
)
218 const t_grpopts
* opts
;
219 int nthreads gmx_unused
;
221 bLJPME
= EVDW_PME(ir
->vdwtype
);
225 const SimulationGroups
& groups
= mtop
->groups
;
227 auto md
= mdAtoms
->mdatoms();
228 /* nindex>=0 indicates DD where we use an index */
235 md
->nr
= mtop
->natoms
;
238 if (md
->nr
> md
->nalloc
)
240 md
->nalloc
= over_alloc_dd(md
->nr
);
242 if (md
->nMassPerturbed
)
244 srenew(md
->massA
, md
->nalloc
);
245 srenew(md
->massB
, md
->nalloc
);
247 srenew(md
->massT
, md
->nalloc
);
248 /* The SIMD version of the integrator needs this aligned and padded.
249 * The padding needs to be with zeros, which we set later below.
251 gmx::AlignedAllocationPolicy::free(md
->invmass
);
252 md
->invmass
= new (gmx::AlignedAllocationPolicy::malloc(
253 (md
->nalloc
+ GMX_REAL_MAX_SIMD_WIDTH
) * sizeof(*md
->invmass
))) real
;
254 srenew(md
->invMassPerDim
, md
->nalloc
);
255 // TODO eventually we will have vectors and just resize
256 // everything, but for now the semantics of md->nalloc being
257 // the capacity are preserved by keeping vectors within
258 // mdAtoms having the same properties as the other arrays.
259 mdAtoms
->reserveChargeA(md
->nalloc
);
260 mdAtoms
->resizeChargeA(md
->nr
);
261 if (md
->nPerturbed
> 0)
263 mdAtoms
->reserveChargeB(md
->nalloc
);
264 mdAtoms
->resizeChargeB(md
->nr
);
266 srenew(md
->typeA
, md
->nalloc
);
269 srenew(md
->typeB
, md
->nalloc
);
273 srenew(md
->sqrt_c6A
, md
->nalloc
);
274 srenew(md
->sigmaA
, md
->nalloc
);
275 srenew(md
->sigma3A
, md
->nalloc
);
278 srenew(md
->sqrt_c6B
, md
->nalloc
);
279 srenew(md
->sigmaB
, md
->nalloc
);
280 srenew(md
->sigma3B
, md
->nalloc
);
283 srenew(md
->ptype
, md
->nalloc
);
286 srenew(md
->cTC
, md
->nalloc
);
287 /* We always copy cTC with domain decomposition */
289 srenew(md
->cENER
, md
->nalloc
);
292 srenew(md
->cACC
, md
->nalloc
);
295 && (opts
->ngfrz
> 1 || opts
->nFreeze
[0][XX
] || opts
->nFreeze
[0][YY
] || opts
->nFreeze
[0][ZZ
]))
297 srenew(md
->cFREEZE
, md
->nalloc
);
301 srenew(md
->cVCM
, md
->nalloc
);
305 srenew(md
->cORF
, md
->nalloc
);
309 srenew(md
->bPerturbed
, md
->nalloc
);
312 /* Note that these user t_mdatoms array pointers are NULL
313 * when there is only one group present.
314 * Therefore, when adding code, the user should use something like:
315 * gprnrU1 = (md->cU1==NULL ? 0 : md->cU1[localatindex])
317 if (!mtop
->groups
.groupNumbers
[SimulationAtomGroupType::User1
].empty())
319 srenew(md
->cU1
, md
->nalloc
);
321 if (!mtop
->groups
.groupNumbers
[SimulationAtomGroupType::User2
].empty())
323 srenew(md
->cU2
, md
->nalloc
);
329 nthreads
= gmx_omp_nthreads_get(emntDefault
);
330 #pragma omp parallel for num_threads(nthreads) schedule(static) firstprivate(molb)
331 for (int i
= 0; i
< md
->nr
; i
++)
347 const t_atom
& atom
= mtopGetAtomParameters(mtop
, ag
, &molb
);
351 md
->cFREEZE
[i
] = getGroupType(groups
, SimulationAtomGroupType::Freeze
, ag
);
353 if (EI_ENERGY_MINIMIZATION(ir
->eI
))
355 /* Displacement is proportional to F, masses used for constraints */
359 else if (ir
->eI
== eiBD
)
361 /* With BD the physical masses are irrelevant.
362 * To keep the code simple we use most of the normal MD code path
363 * for BD. Thus for constraining the masses should be proportional
364 * to the friction coefficient. We set the absolute value such that
365 * m/2<(dx/dt)^2> = m/2*2kT/fric*dt = kT/2 => m=fric*dt/2
366 * Then if we set the (meaningless) velocity to v=dx/dt, we get the
367 * correct kinetic energy and temperature using the usual code path.
368 * Thus with BD v*dt will give the displacement and the reported
369 * temperature can signal bad integration (too large time step).
373 mA
= 0.5 * ir
->bd_fric
* ir
->delta_t
;
374 mB
= 0.5 * ir
->bd_fric
* ir
->delta_t
;
378 /* The friction coefficient is mass/tau_t */
380 / opts
->tau_t
[md
->cTC
? groups
.groupNumbers
[SimulationAtomGroupType::TemperatureCoupling
][ag
] : 0];
381 mA
= 0.5 * atom
.m
* fac
;
382 mB
= 0.5 * atom
.mB
* fac
;
390 if (md
->nMassPerturbed
)
400 md
->invMassPerDim
[i
][XX
] = 0;
401 md
->invMassPerDim
[i
][YY
] = 0;
402 md
->invMassPerDim
[i
][ZZ
] = 0;
404 else if (md
->cFREEZE
)
407 GMX_ASSERT(opts
->nFreeze
!= nullptr,
408 "Must have freeze groups to initialize masses");
409 if (opts
->nFreeze
[g
][XX
] && opts
->nFreeze
[g
][YY
] && opts
->nFreeze
[g
][ZZ
])
411 /* Set the mass of completely frozen particles to ALMOST_ZERO
412 * iso 0 to avoid div by zero in lincs or shake.
414 md
->invmass
[i
] = ALMOST_ZERO
;
418 /* Note: Partially frozen particles use the normal invmass.
419 * If such particles are constrained, the frozen dimensions
420 * should not be updated with the constrained coordinates.
422 md
->invmass
[i
] = 1.0 / mA
;
424 for (int d
= 0; d
< DIM
; d
++)
426 md
->invMassPerDim
[i
][d
] = (opts
->nFreeze
[g
][d
] ? 0 : 1.0 / mA
);
431 md
->invmass
[i
] = 1.0 / mA
;
432 for (int d
= 0; d
< DIM
; d
++)
434 md
->invMassPerDim
[i
][d
] = 1.0 / mA
;
438 md
->chargeA
[i
] = atom
.q
;
439 md
->typeA
[i
] = atom
.type
;
442 c6
= mtop
->ffparams
.iparams
[atom
.type
* (mtop
->ffparams
.atnr
+ 1)].lj
.c6
;
443 c12
= mtop
->ffparams
.iparams
[atom
.type
* (mtop
->ffparams
.atnr
+ 1)].lj
.c12
;
444 md
->sqrt_c6A
[i
] = sqrt(c6
);
445 if (c6
== 0.0 || c12
== 0)
451 md
->sigmaA
[i
] = gmx::sixthroot(c12
/ c6
);
453 md
->sigma3A
[i
] = 1 / (md
->sigmaA
[i
] * md
->sigmaA
[i
] * md
->sigmaA
[i
]);
457 md
->bPerturbed
[i
] = PERTURBED(atom
);
458 md
->chargeB
[i
] = atom
.qB
;
459 md
->typeB
[i
] = atom
.typeB
;
462 c6
= mtop
->ffparams
.iparams
[atom
.typeB
* (mtop
->ffparams
.atnr
+ 1)].lj
.c6
;
463 c12
= mtop
->ffparams
.iparams
[atom
.typeB
* (mtop
->ffparams
.atnr
+ 1)].lj
.c12
;
464 md
->sqrt_c6B
[i
] = sqrt(c6
);
465 if (c6
== 0.0 || c12
== 0)
471 md
->sigmaB
[i
] = gmx::sixthroot(c12
/ c6
);
473 md
->sigma3B
[i
] = 1 / (md
->sigmaB
[i
] * md
->sigmaB
[i
] * md
->sigmaB
[i
]);
476 md
->ptype
[i
] = atom
.ptype
;
479 md
->cTC
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::TemperatureCoupling
][ag
];
481 md
->cENER
[i
] = getGroupType(groups
, SimulationAtomGroupType::EnergyOutput
, ag
);
484 md
->cACC
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::Acceleration
][ag
];
488 md
->cVCM
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::MassCenterVelocityRemoval
][ag
];
492 md
->cORF
[i
] = getGroupType(groups
, SimulationAtomGroupType::OrientationRestraintsFit
, ag
);
497 md
->cU1
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::User1
][ag
];
501 md
->cU2
[i
] = groups
.groupNumbers
[SimulationAtomGroupType::User2
][ag
];
504 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
509 /* Pad invmass with 0 so a SIMD MD update does not change v and x */
510 for (int i
= md
->nr
; i
< md
->nr
+ GMX_REAL_MAX_SIMD_WIDTH
; i
++)
517 /* We set mass, invmass, invMassPerDim and tmass for lambda=0.
518 * For free-energy runs, these should be updated using update_mdatoms().
520 md
->tmass
= md
->tmassA
;
524 void update_mdatoms(t_mdatoms
* md
, real lambda
)
526 if (md
->nMassPerturbed
&& lambda
!= md
->lambda
)
528 real L1
= 1 - lambda
;
530 /* Update masses of perturbed atoms for the change in lambda */
531 int gmx_unused nthreads
= gmx_omp_nthreads_get(emntDefault
);
532 #pragma omp parallel for num_threads(nthreads) schedule(static)
533 for (int i
= 0; i
< md
->nr
; i
++)
535 if (md
->bPerturbed
[i
])
537 md
->massT
[i
] = L1
* md
->massA
[i
] + lambda
* md
->massB
[i
];
538 /* Atoms with invmass 0 or ALMOST_ZERO are massless or frozen
539 * and their invmass does not depend on lambda.
541 if (md
->invmass
[i
] > 1.1 * ALMOST_ZERO
)
543 md
->invmass
[i
] = 1.0 / md
->massT
[i
];
544 for (int d
= 0; d
< DIM
; d
++)
546 if (md
->invMassPerDim
[i
][d
] > 1.1 * ALMOST_ZERO
)
548 md
->invMassPerDim
[i
][d
] = md
->invmass
[i
];
555 /* Update the system mass for the change in lambda */
556 md
->tmass
= L1
* md
->tmassA
+ lambda
* md
->tmassB
;