Update instructions in containers.rst
[gromacs.git] / src / gromacs / mdlib / vcm.cpp
blob63a3df664a2855772605ccb429a10c131ae6b91a
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This file is completely threadsafe - keep it that way! */
39 #include "gmxpre.h"
41 #include "vcm.h"
43 #include "gromacs/math/functions.h"
44 #include "gromacs/math/invertmatrix.h"
45 #include "gromacs/math/vec.h"
46 #include "gromacs/math/vecdump.h"
47 #include "gromacs/mdlib/gmx_omp_nthreads.h"
48 #include "gromacs/mdtypes/inputrec.h"
49 #include "gromacs/mdtypes/md_enums.h"
50 #include "gromacs/mdtypes/mdatom.h"
51 #include "gromacs/pbcutil/pbc.h"
52 #include "gromacs/topology/topology.h"
53 #include "gromacs/utility/fatalerror.h"
54 #include "gromacs/utility/gmxassert.h"
55 #include "gromacs/utility/gmxomp.h"
56 #include "gromacs/utility/smalloc.h"
58 t_vcm::t_vcm(const SimulationGroups& groups, const t_inputrec& ir) :
59 integratorConservesMomentum(!EI_RANDOM(ir.eI))
61 mode = (ir.nstcomm > 0) ? ir.comm_mode : ecmNO;
62 ndim = ndof_com(&ir);
63 timeStep = ir.nstcomm * ir.delta_t;
65 if (mode == ecmANGULAR && ndim < 3)
67 gmx_fatal(FARGS, "Can not have angular comm removal with pbc=%s",
68 c_pbcTypeNames[ir.pbcType].c_str());
71 if (mode != ecmNO)
73 nr = groups.groups[SimulationAtomGroupType::MassCenterVelocityRemoval].size();
74 /* Allocate one extra for a possible rest group */
75 size = nr + 1;
76 /* We need vcm->nr+1 elements per thread, but to avoid cache
77 * invalidation we add 2 elements to get a 152 byte separation.
79 stride = nr + 3;
80 if (mode == ecmANGULAR)
82 snew(group_i, size);
84 group_j.resize(size);
85 group_x.resize(size);
86 group_w.resize(size);
89 group_name.resize(size);
90 group_p.resize(size);
91 group_v.resize(size);
92 group_mass.resize(size);
93 group_ndf.resize(size);
94 for (int g = 0; (g < nr); g++)
96 group_ndf[g] = ir.opts.nrdf[g];
97 group_name[g] =
98 *groups.groupNames[groups.groups[SimulationAtomGroupType::MassCenterVelocityRemoval][g]];
101 thread_vcm.resize(gmx_omp_nthreads_get(emntDefault) * stride);
104 nFreeze = ir.opts.nFreeze;
107 t_vcm::~t_vcm()
109 if (mode == ecmANGULAR)
111 sfree(group_i);
115 void reportComRemovalInfo(FILE* fp, const t_vcm& vcm)
118 /* Copy pointer to group names and print it. */
119 if (fp && vcm.mode != ecmNO)
121 fprintf(fp, "Center of mass motion removal mode is %s\n", ECOM(vcm.mode));
122 fprintf(fp,
123 "We have the following groups for center of"
124 " mass motion removal:\n");
126 for (int g = 0; (g < vcm.nr); g++)
129 fprintf(fp, "%3d: %s\n", g, vcm.group_name[g]);
135 static void update_tensor(const rvec x, real m0, tensor I)
137 real xy, xz, yz;
139 /* Compute inertia tensor contribution due to this atom */
140 xy = x[XX] * x[YY] * m0;
141 xz = x[XX] * x[ZZ] * m0;
142 yz = x[YY] * x[ZZ] * m0;
143 I[XX][XX] += x[XX] * x[XX] * m0;
144 I[YY][YY] += x[YY] * x[YY] * m0;
145 I[ZZ][ZZ] += x[ZZ] * x[ZZ] * m0;
146 I[XX][YY] += xy;
147 I[YY][XX] += xy;
148 I[XX][ZZ] += xz;
149 I[ZZ][XX] += xz;
150 I[YY][ZZ] += yz;
151 I[ZZ][YY] += yz;
154 /* Center of mass code for groups */
155 void calc_vcm_grp(const t_mdatoms& md,
156 gmx::ArrayRef<const gmx::RVec> x,
157 gmx::ArrayRef<const gmx::RVec> v,
158 t_vcm* vcm)
160 if (vcm->mode == ecmNO)
162 return;
164 int nthreads = gmx_omp_nthreads_get(emntDefault);
167 #pragma omp parallel num_threads(nthreads) default(none) shared(x, v, vcm, md)
169 int t = gmx_omp_get_thread_num();
170 for (int g = 0; g < vcm->size; g++)
172 /* Reset linear momentum */
173 t_vcm_thread* vcm_t = &vcm->thread_vcm[t * vcm->stride + g];
174 vcm_t->mass = 0;
175 clear_rvec(vcm_t->p);
176 if (vcm->mode == ecmANGULAR)
178 /* Reset angular momentum */
179 clear_rvec(vcm_t->j);
180 clear_rvec(vcm_t->x);
181 clear_mat(vcm_t->i);
185 #pragma omp for schedule(static)
186 for (int i = 0; i < md.homenr; i++)
188 int g = 0;
189 real m0 = md.massT[i];
190 if (md.cVCM)
192 g = md.cVCM[i];
194 t_vcm_thread* vcm_t = &vcm->thread_vcm[t * vcm->stride + g];
195 /* Calculate linear momentum */
196 vcm_t->mass += m0;
197 int m;
198 for (m = 0; (m < DIM); m++)
200 vcm_t->p[m] += m0 * v[i][m];
203 if (vcm->mode == ecmANGULAR)
205 /* Calculate angular momentum */
206 rvec j0;
207 cprod(x[i], v[i], j0);
209 for (m = 0; (m < DIM); m++)
211 vcm_t->j[m] += m0 * j0[m];
212 vcm_t->x[m] += m0 * x[i][m];
214 /* Update inertia tensor */
215 update_tensor(x[i], m0, vcm_t->i);
219 for (int g = 0; g < vcm->size; g++)
221 /* Reset linear momentum */
222 vcm->group_mass[g] = 0;
223 clear_rvec(vcm->group_p[g]);
224 if (vcm->mode == ecmANGULAR)
226 /* Reset angular momentum */
227 clear_rvec(vcm->group_j[g]);
228 clear_rvec(vcm->group_x[g]);
229 clear_rvec(vcm->group_w[g]);
230 clear_mat(vcm->group_i[g]);
233 for (int t = 0; t < nthreads; t++)
235 t_vcm_thread* vcm_t = &vcm->thread_vcm[t * vcm->stride + g];
236 vcm->group_mass[g] += vcm_t->mass;
237 rvec_inc(vcm->group_p[g], vcm_t->p);
238 if (vcm->mode == ecmANGULAR)
240 rvec_inc(vcm->group_j[g], vcm_t->j);
241 rvec_inc(vcm->group_x[g], vcm_t->x);
242 m_add(vcm_t->i, vcm->group_i[g], vcm->group_i[g]);
249 /*! \brief Remove the COM motion velocity from the velocities
251 * \note This routine should be called from within an OpenMP parallel region.
253 * \tparam numDimensions Correct dimensions 0 to \p numDimensions-1
254 * \param[in] mdatoms The atom property and group information
255 * \param[in,out] v The velocities to correct
256 * \param[in] vcm VCM data
258 template<int numDimensions>
259 static void doStopComMotionLinear(const t_mdatoms& mdatoms, gmx::ArrayRef<gmx::RVec> v, const t_vcm& vcm)
261 const int homenr = mdatoms.homenr;
262 const unsigned short* group_id = mdatoms.cVCM;
264 if (mdatoms.cFREEZE != nullptr)
266 GMX_RELEASE_ASSERT(vcm.nFreeze != nullptr, "Need freeze dimension info with freeze groups");
268 #pragma omp for schedule(static)
269 for (int i = 0; i < homenr; i++)
271 unsigned short vcmGroup = (group_id == nullptr ? 0 : group_id[i]);
272 unsigned short freezeGroup = mdatoms.cFREEZE[i];
273 for (int d = 0; d < numDimensions; d++)
275 if (vcm.nFreeze[freezeGroup][d] == 0)
277 v[i][d] -= vcm.group_v[vcmGroup][d];
282 else if (group_id == nullptr)
283 { // NOLINT bugprone-branch-clone This is actually a clang-tidy bug
284 #pragma omp for schedule(static)
285 for (int i = 0; i < homenr; i++)
287 for (int d = 0; d < numDimensions; d++)
289 v[i][d] -= vcm.group_v[0][d];
293 else
295 #pragma omp for schedule(static)
296 for (int i = 0; i < homenr; i++)
298 const int g = group_id[i];
299 for (int d = 0; d < numDimensions; d++)
301 v[i][d] -= vcm.group_v[g][d];
307 /*! \brief Remove the COM motion velocity from the velocities, correct the coordinates assuming constant acceleration
309 * \note This routine should be called from within an OpenMP parallel region.
311 * \tparam numDimensions Correct dimensions 0 to \p numDimensions-1
312 * \param[in] homenr The number of atoms to correct
313 * \param[in] group_id List of VCM group ids, when nullptr is passed all atoms are assumed to be in group 0
314 * \param[in,out] x The coordinates to correct
315 * \param[in,out] v The velocities to correct
316 * \param[in] vcm VCM data
318 template<int numDimensions>
319 static void doStopComMotionAccelerationCorrection(int homenr,
320 const unsigned short* group_id,
321 gmx::ArrayRef<gmx::RVec> x,
322 gmx::ArrayRef<gmx::RVec> v,
323 const t_vcm& vcm)
325 const real xCorrectionFactor = 0.5 * vcm.timeStep;
327 // NOLINTNEXTLINE bugprone-branch-clone This is actually a clang-tidy bug
328 if (group_id == nullptr)
330 #pragma omp for schedule(static)
331 for (int i = 0; i < homenr; i++)
333 for (int d = 0; d < numDimensions; d++)
335 x[i][d] -= vcm.group_v[0][d] * xCorrectionFactor;
336 v[i][d] -= vcm.group_v[0][d];
340 else
342 #pragma omp for schedule(static)
343 for (int i = 0; i < homenr; i++)
345 const int g = group_id[i];
346 for (int d = 0; d < numDimensions; d++)
348 x[i][d] -= vcm.group_v[g][d] * xCorrectionFactor;
349 v[i][d] -= vcm.group_v[g][d];
355 static void do_stopcm_grp(const t_mdatoms& mdatoms,
356 gmx::ArrayRef<gmx::RVec> x,
357 gmx::ArrayRef<gmx::RVec> v,
358 const t_vcm& vcm)
360 if (vcm.mode == ecmNO)
362 return;
365 const int homenr = mdatoms.homenr;
366 const unsigned short* group_id = mdatoms.cVCM;
368 int gmx_unused nth = gmx_omp_nthreads_get(emntDefault);
369 // homenr could be shared, but gcc-8 & gcc-9 don't agree how to write that...
370 // https://www.gnu.org/software/gcc/gcc-9/porting_to.html -> OpenMP data sharing
371 #pragma omp parallel num_threads(nth) default(none) shared(x, v, vcm, group_id, mdatoms) \
372 firstprivate(homenr)
374 if (vcm.mode == ecmLINEAR || vcm.mode == ecmANGULAR
375 || (vcm.mode == ecmLINEAR_ACCELERATION_CORRECTION && x.empty()))
377 /* Subtract linear momentum for v */
378 switch (vcm.ndim)
380 case 1: doStopComMotionLinear<1>(mdatoms, v, vcm); break;
381 case 2: doStopComMotionLinear<2>(mdatoms, v, vcm); break;
382 case 3: doStopComMotionLinear<3>(mdatoms, v, vcm); break;
385 else
387 GMX_ASSERT(vcm.mode == ecmLINEAR_ACCELERATION_CORRECTION,
388 "When the mode is not linear or angular, it should be acceleration "
389 "correction");
390 /* Subtract linear momentum for v and x*/
391 switch (vcm.ndim)
393 case 1:
394 doStopComMotionAccelerationCorrection<1>(homenr, group_id, x, v, vcm);
395 break;
396 case 2:
397 doStopComMotionAccelerationCorrection<2>(homenr, group_id, x, v, vcm);
398 break;
399 case 3:
400 doStopComMotionAccelerationCorrection<3>(homenr, group_id, x, v, vcm);
401 break;
404 if (vcm.mode == ecmANGULAR)
406 /* Subtract angular momentum */
407 GMX_ASSERT(!x.empty(), "Need x to compute angular momentum correction");
409 int g = 0;
410 #pragma omp for schedule(static)
411 for (int i = 0; i < homenr; i++)
413 if (group_id)
415 g = group_id[i];
417 /* Compute the correction to the velocity for each atom */
418 rvec dv, dx;
419 rvec_sub(x[i], vcm.group_x[g], dx);
420 cprod(vcm.group_w[g], dx, dv);
421 rvec_dec(v[i], dv);
428 static void get_minv(tensor A, tensor B)
430 int m, n;
431 double fac, rfac;
432 tensor tmp;
434 tmp[XX][XX] = A[YY][YY] + A[ZZ][ZZ];
435 tmp[YY][XX] = -A[XX][YY];
436 tmp[ZZ][XX] = -A[XX][ZZ];
437 tmp[XX][YY] = -A[XX][YY];
438 tmp[YY][YY] = A[XX][XX] + A[ZZ][ZZ];
439 tmp[ZZ][YY] = -A[YY][ZZ];
440 tmp[XX][ZZ] = -A[XX][ZZ];
441 tmp[YY][ZZ] = -A[YY][ZZ];
442 tmp[ZZ][ZZ] = A[XX][XX] + A[YY][YY];
444 /* This is a hack to prevent very large determinants */
445 rfac = (tmp[XX][XX] + tmp[YY][YY] + tmp[ZZ][ZZ]) / 3;
446 if (rfac == 0.0)
448 gmx_fatal(FARGS, "Can not stop center of mass: maybe 2dimensional system");
450 fac = 1.0 / rfac;
451 for (m = 0; (m < DIM); m++)
453 for (n = 0; (n < DIM); n++)
455 tmp[m][n] *= fac;
458 gmx::invertMatrix(tmp, B);
459 for (m = 0; (m < DIM); m++)
461 for (n = 0; (n < DIM); n++)
463 B[m][n] *= fac;
468 /* Processes VCM after reduction over ranks and prints warning with high VMC and fp != nullptr */
469 static void process_and_check_cm_grp(FILE* fp, t_vcm* vcm, real Temp_Max)
471 int m, g;
472 real ekcm, ekrot, tm, tm_1, Temp_cm;
473 rvec jcm;
474 tensor Icm;
476 /* First analyse the total results */
477 if (vcm->mode != ecmNO)
479 for (g = 0; (g < vcm->nr); g++)
481 tm = vcm->group_mass[g];
482 if (tm != 0)
484 tm_1 = 1.0 / tm;
485 svmul(tm_1, vcm->group_p[g], vcm->group_v[g]);
487 /* Else it's zero anyway! */
489 if (vcm->mode == ecmANGULAR)
491 for (g = 0; (g < vcm->nr); g++)
493 tm = vcm->group_mass[g];
494 if (tm != 0)
496 tm_1 = 1.0 / tm;
498 /* Compute center of mass for this group */
499 for (m = 0; (m < DIM); m++)
501 vcm->group_x[g][m] *= tm_1;
504 /* Subtract the center of mass contribution to the
505 * angular momentum
507 cprod(vcm->group_x[g], vcm->group_v[g], jcm);
508 for (m = 0; (m < DIM); m++)
510 vcm->group_j[g][m] -= tm * jcm[m];
513 /* Subtract the center of mass contribution from the inertia
514 * tensor (this is not as trivial as it seems, but due to
515 * some cancellation we can still do it, even in parallel).
517 clear_mat(Icm);
518 update_tensor(vcm->group_x[g], tm, Icm);
519 m_sub(vcm->group_i[g], Icm, vcm->group_i[g]);
521 /* Compute angular velocity, using matrix operation
522 * Since J = I w
523 * we have
524 * w = I^-1 J
526 get_minv(vcm->group_i[g], Icm);
527 mvmul(Icm, vcm->group_j[g], vcm->group_w[g]);
529 /* Else it's zero anyway! */
533 for (g = 0; (g < vcm->nr); g++)
535 ekcm = 0;
536 if (vcm->group_mass[g] != 0 && vcm->group_ndf[g] > 0)
538 for (m = 0; m < vcm->ndim; m++)
540 ekcm += gmx::square(vcm->group_v[g][m]);
542 ekcm *= 0.5 * vcm->group_mass[g];
543 Temp_cm = 2 * ekcm / vcm->group_ndf[g];
545 if ((Temp_cm > Temp_Max) && fp)
547 fprintf(fp, "Large VCM(group %s): %12.5f, %12.5f, %12.5f, Temp-cm: %12.5e\n",
548 vcm->group_name[g], vcm->group_v[g][XX], vcm->group_v[g][YY],
549 vcm->group_v[g][ZZ], Temp_cm);
552 if (vcm->mode == ecmANGULAR)
554 ekrot = 0.5 * iprod(vcm->group_j[g], vcm->group_w[g]);
555 // TODO: Change absolute energy comparison to relative
556 if ((ekrot > 1) && fp && vcm->integratorConservesMomentum)
558 /* if we have an integrator that may not conserve momenta, skip */
559 tm = vcm->group_mass[g];
560 fprintf(fp, "Group %s with mass %12.5e, Ekrot %12.5e Det(I) = %12.5e\n",
561 vcm->group_name[g], tm, ekrot, det(vcm->group_i[g]));
562 fprintf(fp, " COM: %12.5f %12.5f %12.5f\n", vcm->group_x[g][XX],
563 vcm->group_x[g][YY], vcm->group_x[g][ZZ]);
564 fprintf(fp, " P: %12.5f %12.5f %12.5f\n", vcm->group_p[g][XX],
565 vcm->group_p[g][YY], vcm->group_p[g][ZZ]);
566 fprintf(fp, " V: %12.5f %12.5f %12.5f\n", vcm->group_v[g][XX],
567 vcm->group_v[g][YY], vcm->group_v[g][ZZ]);
568 fprintf(fp, " J: %12.5f %12.5f %12.5f\n", vcm->group_j[g][XX],
569 vcm->group_j[g][YY], vcm->group_j[g][ZZ]);
570 fprintf(fp, " w: %12.5f %12.5f %12.5f\n", vcm->group_w[g][XX],
571 vcm->group_w[g][YY], vcm->group_w[g][ZZ]);
572 pr_rvecs(fp, 0, "Inertia tensor", vcm->group_i[g], DIM);
579 void process_and_stopcm_grp(FILE* fplog,
580 t_vcm* vcm,
581 const t_mdatoms& mdatoms,
582 gmx::ArrayRef<gmx::RVec> x,
583 gmx::ArrayRef<gmx::RVec> v)
585 if (vcm->mode != ecmNO)
587 // TODO: Replace fixed temperature of 1 by a system value
588 process_and_check_cm_grp(fplog, vcm, 1);
590 do_stopcm_grp(mdatoms, x, v, *vcm);