More const correctness
[gromacs.git] / src / gromacs / mdlib / shellfc.cpp
blob58f6f50bf9c1415f2be3a8eb12d0362ef0347708
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2008, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "shellfc.h"
41 #include <stdlib.h>
42 #include <string.h>
44 #include <cstdint>
46 #include <algorithm>
47 #include <array>
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/gmxlib/chargegroup.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/math/functions.h"
55 #include "gromacs/math/units.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/math/vecdump.h"
58 #include "gromacs/mdlib/constr.h"
59 #include "gromacs/mdlib/force.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/sim_util.h"
62 #include "gromacs/mdlib/vsite.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/inputrec.h"
65 #include "gromacs/mdtypes/md_enums.h"
66 #include "gromacs/mdtypes/state.h"
67 #include "gromacs/pbcutil/mshift.h"
68 #include "gromacs/pbcutil/pbc.h"
69 #include "gromacs/topology/ifunc.h"
70 #include "gromacs/topology/mtop_lookup.h"
71 #include "gromacs/topology/mtop_util.h"
72 #include "gromacs/utility/arrayref.h"
73 #include "gromacs/utility/arraysize.h"
74 #include "gromacs/utility/cstringutil.h"
75 #include "gromacs/utility/fatalerror.h"
76 #include "gromacs/utility/smalloc.h"
78 typedef struct {
79 int nnucl;
80 int shell; /* The shell id */
81 int nucl1, nucl2, nucl3; /* The nuclei connected to the shell */
82 /* gmx_bool bInterCG; */ /* Coupled to nuclei outside cg? */
83 real k; /* force constant */
84 real k_1; /* 1 over force constant */
85 rvec xold;
86 rvec fold;
87 rvec step;
88 } t_shell;
90 struct gmx_shellfc_t {
91 /* Shell counts, indices, parameters and working data */
92 int nshell_gl; /* The number of shells in the system */
93 t_shell *shell_gl; /* All the shells (for DD only) */
94 int *shell_index_gl; /* Global shell index (for DD only) */
95 gmx_bool bInterCG; /* Are there inter charge-group shells? */
96 int nshell; /* The number of local shells */
97 t_shell *shell; /* The local shells */
98 int shell_nalloc; /* The allocation size of shell */
99 gmx_bool bPredict; /* Predict shell positions */
100 gmx_bool bRequireInit; /* Require initialization of shell positions */
101 int nflexcon; /* The number of flexible constraints */
103 /* Temporary arrays, should be fixed size 2 when fully converted to C++ */
104 PaddedRVecVector *x; /* Array for iterative minimization */
105 PaddedRVecVector *f; /* Array for iterative minimization */
107 /* Flexible constraint working data */
108 rvec *acc_dir; /* Acceleration direction for flexcon */
109 rvec *x_old; /* Old coordinates for flexcon */
110 int flex_nalloc; /* The allocation size of acc_dir and x_old */
111 rvec *adir_xnold; /* Work space for init_adir */
112 rvec *adir_xnew; /* Work space for init_adir */
113 int adir_nalloc; /* Work space for init_adir */
114 std::int64_t numForceEvaluations; /* Total number of force evaluations */
115 int numConvergedIterations; /* Total number of iterations that converged */
119 static void pr_shell(FILE *fplog, int ns, t_shell s[])
121 int i;
123 fprintf(fplog, "SHELL DATA\n");
124 fprintf(fplog, "%5s %8s %5s %5s %5s\n",
125 "Shell", "Force k", "Nucl1", "Nucl2", "Nucl3");
126 for (i = 0; (i < ns); i++)
128 fprintf(fplog, "%5d %8.3f %5d", s[i].shell, 1.0/s[i].k_1, s[i].nucl1);
129 if (s[i].nnucl == 2)
131 fprintf(fplog, " %5d\n", s[i].nucl2);
133 else if (s[i].nnucl == 3)
135 fprintf(fplog, " %5d %5d\n", s[i].nucl2, s[i].nucl3);
137 else
139 fprintf(fplog, "\n");
144 /* TODO The remain call of this function passes non-NULL mass and NULL
145 * mtop, so this routine can be simplified.
147 * The other code path supported doing prediction before the MD loop
148 * started, but even when called, the prediction was always
149 * over-written by a subsequent call in the MD loop, so has been
150 * removed. */
151 static void predict_shells(FILE *fplog, rvec x[], rvec v[], real dt,
152 int ns, t_shell s[],
153 real mass[], gmx_mtop_t *mtop, gmx_bool bInit)
155 int i, m, s1, n1, n2, n3;
156 real dt_1, fudge, tm, m1, m2, m3;
157 rvec *ptr;
159 /* We introduce a fudge factor for performance reasons: with this choice
160 * the initial force on the shells is about a factor of two lower than
161 * without
163 fudge = 1.0;
165 if (bInit)
167 if (fplog)
169 fprintf(fplog, "RELAX: Using prediction for initial shell placement\n");
171 ptr = x;
172 dt_1 = 1;
174 else
176 ptr = v;
177 dt_1 = fudge*dt;
180 int molb = 0;
181 for (i = 0; (i < ns); i++)
183 s1 = s[i].shell;
184 if (bInit)
186 clear_rvec(x[s1]);
188 switch (s[i].nnucl)
190 case 1:
191 n1 = s[i].nucl1;
192 for (m = 0; (m < DIM); m++)
194 x[s1][m] += ptr[n1][m]*dt_1;
196 break;
197 case 2:
198 n1 = s[i].nucl1;
199 n2 = s[i].nucl2;
200 if (mass)
202 m1 = mass[n1];
203 m2 = mass[n2];
205 else
207 /* Not the correct masses with FE, but it is just a prediction... */
208 m1 = mtopGetAtomMass(mtop, n1, &molb);
209 m2 = mtopGetAtomMass(mtop, n2, &molb);
211 tm = dt_1/(m1+m2);
212 for (m = 0; (m < DIM); m++)
214 x[s1][m] += (m1*ptr[n1][m]+m2*ptr[n2][m])*tm;
216 break;
217 case 3:
218 n1 = s[i].nucl1;
219 n2 = s[i].nucl2;
220 n3 = s[i].nucl3;
221 if (mass)
223 m1 = mass[n1];
224 m2 = mass[n2];
225 m3 = mass[n3];
227 else
229 /* Not the correct masses with FE, but it is just a prediction... */
230 m1 = mtopGetAtomMass(mtop, n1, &molb);
231 m2 = mtopGetAtomMass(mtop, n2, &molb);
232 m3 = mtopGetAtomMass(mtop, n3, &molb);
234 tm = dt_1/(m1+m2+m3);
235 for (m = 0; (m < DIM); m++)
237 x[s1][m] += (m1*ptr[n1][m]+m2*ptr[n2][m]+m3*ptr[n3][m])*tm;
239 break;
240 default:
241 gmx_fatal(FARGS, "Shell %d has %d nuclei!", i, s[i].nnucl);
246 /*! \brief Count the different particle types in a system
248 * Routine prints a warning to stderr in case an unknown particle type
249 * is encountered.
250 * \param[in] fplog Print what we have found if not NULL
251 * \param[in] mtop Molecular topology.
252 * \returns Array holding the number of particles of a type
254 static std::array<int, eptNR> countPtypes(FILE *fplog,
255 const gmx_mtop_t *mtop)
257 std::array<int, eptNR> nptype = { { 0 } };
258 /* Count number of shells, and find their indices */
259 for (int i = 0; (i < eptNR); i++)
261 nptype[i] = 0;
264 gmx_mtop_atomloop_block_t aloopb = gmx_mtop_atomloop_block_init(mtop);
265 int nmol;
266 const t_atom *atom;
267 while (gmx_mtop_atomloop_block_next(aloopb, &atom, &nmol))
269 switch (atom->ptype)
271 case eptAtom:
272 case eptVSite:
273 case eptShell:
274 nptype[atom->ptype] += nmol;
275 break;
276 default:
277 fprintf(stderr, "Warning unsupported particle type %d in countPtypes",
278 static_cast<int>(atom->ptype));
281 if (fplog)
283 /* Print the number of each particle type */
284 int n = 0;
285 for (const auto &i : nptype)
287 if (i != 0)
289 fprintf(fplog, "There are: %d %ss\n", i, ptype_str[n]);
291 n++;
294 return nptype;
297 gmx_shellfc_t *init_shell_flexcon(FILE *fplog,
298 const gmx_mtop_t *mtop, int nflexcon,
299 int nstcalcenergy,
300 bool usingDomainDecomposition)
302 gmx_shellfc_t *shfc;
303 t_shell *shell;
304 int *shell_index = nullptr, *at2cg;
305 const t_atom *atom;
307 int ns, nshell, nsi;
308 int i, j, type, a_offset, cg, mol, ftype, nra;
309 real qS, alpha;
310 int aS, aN = 0; /* Shell and nucleus */
311 int bondtypes[] = { F_BONDS, F_HARMONIC, F_CUBICBONDS, F_POLARIZATION, F_ANHARM_POL, F_WATER_POL };
312 #define NBT asize(bondtypes)
313 t_iatom *ia;
314 gmx_mtop_atomloop_all_t aloop;
315 const gmx_ffparams_t *ffparams;
317 std::array<int, eptNR> n = countPtypes(fplog, mtop);
318 nshell = n[eptShell];
320 if (nshell == 0 && nflexcon == 0)
322 /* We're not doing shells or flexible constraints */
323 return nullptr;
326 snew(shfc, 1);
327 shfc->x = new PaddedRVecVector[2] {};
328 shfc->f = new PaddedRVecVector[2] {};
329 shfc->nflexcon = nflexcon;
331 if (nshell == 0)
333 /* Only flexible constraints, no shells.
334 * Note that make_local_shells() does not need to be called.
336 shfc->nshell = 0;
337 shfc->bPredict = FALSE;
339 return shfc;
342 if (nstcalcenergy != 1)
344 gmx_fatal(FARGS, "You have nstcalcenergy set to a value (%d) that is different from 1.\nThis is not supported in combination with shell particles.\nPlease make a new tpr file.", nstcalcenergy);
346 if (usingDomainDecomposition)
348 gmx_fatal(FARGS, "Shell particles are not implemented with domain decomposition, use a single rank");
351 /* We have shells: fill the shell data structure */
353 /* Global system sized array, this should be avoided */
354 snew(shell_index, mtop->natoms);
356 aloop = gmx_mtop_atomloop_all_init(mtop);
357 nshell = 0;
358 while (gmx_mtop_atomloop_all_next(aloop, &i, &atom))
360 if (atom->ptype == eptShell)
362 shell_index[i] = nshell++;
366 snew(shell, nshell);
368 /* Initiate the shell structures */
369 for (i = 0; (i < nshell); i++)
371 shell[i].shell = -1;
372 shell[i].nnucl = 0;
373 shell[i].nucl1 = -1;
374 shell[i].nucl2 = -1;
375 shell[i].nucl3 = -1;
376 /* shell[i].bInterCG=FALSE; */
377 shell[i].k_1 = 0;
378 shell[i].k = 0;
381 ffparams = &mtop->ffparams;
383 /* Now fill the structures */
384 shfc->bInterCG = FALSE;
385 ns = 0;
386 a_offset = 0;
387 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
389 const gmx_molblock_t *molb = &mtop->molblock[mb];
390 const gmx_moltype_t *molt = &mtop->moltype[molb->type];
391 const t_block *cgs = &molt->cgs;
393 snew(at2cg, molt->atoms.nr);
394 for (cg = 0; cg < cgs->nr; cg++)
396 for (i = cgs->index[cg]; i < cgs->index[cg+1]; i++)
398 at2cg[i] = cg;
402 atom = molt->atoms.atom;
403 for (mol = 0; mol < molb->nmol; mol++)
405 for (j = 0; (j < NBT); j++)
407 ia = molt->ilist[bondtypes[j]].iatoms;
408 for (i = 0; (i < molt->ilist[bondtypes[j]].nr); )
410 type = ia[0];
411 ftype = ffparams->functype[type];
412 nra = interaction_function[ftype].nratoms;
414 /* Check whether we have a bond with a shell */
415 aS = -1;
417 switch (bondtypes[j])
419 case F_BONDS:
420 case F_HARMONIC:
421 case F_CUBICBONDS:
422 case F_POLARIZATION:
423 case F_ANHARM_POL:
424 if (atom[ia[1]].ptype == eptShell)
426 aS = ia[1];
427 aN = ia[2];
429 else if (atom[ia[2]].ptype == eptShell)
431 aS = ia[2];
432 aN = ia[1];
434 break;
435 case F_WATER_POL:
436 aN = ia[4]; /* Dummy */
437 aS = ia[5]; /* Shell */
438 break;
439 default:
440 gmx_fatal(FARGS, "Death Horror: %s, %d", __FILE__, __LINE__);
443 if (aS != -1)
445 qS = atom[aS].q;
447 /* Check whether one of the particles is a shell... */
448 nsi = shell_index[a_offset+aS];
449 if ((nsi < 0) || (nsi >= nshell))
451 gmx_fatal(FARGS, "nsi is %d should be within 0 - %d. aS = %d",
452 nsi, nshell, aS);
454 if (shell[nsi].shell == -1)
456 shell[nsi].shell = a_offset + aS;
457 ns++;
459 else if (shell[nsi].shell != a_offset+aS)
461 gmx_fatal(FARGS, "Weird stuff in %s, %d", __FILE__, __LINE__);
464 if (shell[nsi].nucl1 == -1)
466 shell[nsi].nucl1 = a_offset + aN;
468 else if (shell[nsi].nucl2 == -1)
470 shell[nsi].nucl2 = a_offset + aN;
472 else if (shell[nsi].nucl3 == -1)
474 shell[nsi].nucl3 = a_offset + aN;
476 else
478 if (fplog)
480 pr_shell(fplog, ns, shell);
482 gmx_fatal(FARGS, "Can not handle more than three bonds per shell\n");
484 if (at2cg[aS] != at2cg[aN])
486 /* shell[nsi].bInterCG = TRUE; */
487 shfc->bInterCG = TRUE;
490 switch (bondtypes[j])
492 case F_BONDS:
493 case F_HARMONIC:
494 shell[nsi].k += ffparams->iparams[type].harmonic.krA;
495 break;
496 case F_CUBICBONDS:
497 shell[nsi].k += ffparams->iparams[type].cubic.kb;
498 break;
499 case F_POLARIZATION:
500 case F_ANHARM_POL:
501 if (!gmx_within_tol(qS, atom[aS].qB, GMX_REAL_EPS*10))
503 gmx_fatal(FARGS, "polarize can not be used with qA(%e) != qB(%e) for atom %d of molecule block %d", qS, atom[aS].qB, aS+1, mb+1);
505 shell[nsi].k += gmx::square(qS)*ONE_4PI_EPS0/
506 ffparams->iparams[type].polarize.alpha;
507 break;
508 case F_WATER_POL:
509 if (!gmx_within_tol(qS, atom[aS].qB, GMX_REAL_EPS*10))
511 gmx_fatal(FARGS, "water_pol can not be used with qA(%e) != qB(%e) for atom %d of molecule block %d", qS, atom[aS].qB, aS+1, mb+1);
513 alpha = (ffparams->iparams[type].wpol.al_x+
514 ffparams->iparams[type].wpol.al_y+
515 ffparams->iparams[type].wpol.al_z)/3.0;
516 shell[nsi].k += gmx::square(qS)*ONE_4PI_EPS0/alpha;
517 break;
518 default:
519 gmx_fatal(FARGS, "Death Horror: %s, %d", __FILE__, __LINE__);
521 shell[nsi].nnucl++;
523 ia += nra+1;
524 i += nra+1;
527 a_offset += molt->atoms.nr;
529 /* Done with this molecule type */
530 sfree(at2cg);
533 /* Verify whether it's all correct */
534 if (ns != nshell)
536 gmx_fatal(FARGS, "Something weird with shells. They may not be bonded to something");
539 for (i = 0; (i < ns); i++)
541 shell[i].k_1 = 1.0/shell[i].k;
544 if (debug)
546 pr_shell(debug, ns, shell);
550 shfc->nshell_gl = ns;
551 shfc->shell_gl = shell;
552 shfc->shell_index_gl = shell_index;
554 shfc->bPredict = (getenv("GMX_NOPREDICT") == nullptr);
555 shfc->bRequireInit = FALSE;
556 if (!shfc->bPredict)
558 if (fplog)
560 fprintf(fplog, "\nWill never predict shell positions\n");
563 else
565 shfc->bRequireInit = (getenv("GMX_REQUIRE_SHELL_INIT") != nullptr);
566 if (shfc->bRequireInit && fplog)
568 fprintf(fplog, "\nWill always initiate shell positions\n");
572 if (shfc->bPredict)
574 if (shfc->bInterCG)
576 if (fplog)
578 fprintf(fplog, "\nNOTE: there all shells that are connected to particles outside thier own charge group, will not predict shells positions during the run\n\n");
580 /* Prediction improves performance, so we should implement either:
581 * 1. communication for the atoms needed for prediction
582 * 2. prediction using the velocities of shells; currently the
583 * shell velocities are zeroed, it's a bit tricky to keep
584 * track of the shell displacements and thus the velocity.
586 shfc->bPredict = FALSE;
590 return shfc;
593 void make_local_shells(const t_commrec *cr,
594 const t_mdatoms *md,
595 gmx_shellfc_t *shfc)
597 t_shell *shell;
598 int a0, a1, *ind, nshell, i;
599 gmx_domdec_t *dd = nullptr;
601 if (DOMAINDECOMP(cr))
603 dd = cr->dd;
604 a0 = 0;
605 a1 = dd->nat_home;
607 else
609 /* Single node: we need all shells, just copy the pointer */
610 shfc->nshell = shfc->nshell_gl;
611 shfc->shell = shfc->shell_gl;
613 return;
616 ind = shfc->shell_index_gl;
618 nshell = 0;
619 shell = shfc->shell;
620 for (i = a0; i < a1; i++)
622 if (md->ptype[i] == eptShell)
624 if (nshell+1 > shfc->shell_nalloc)
626 shfc->shell_nalloc = over_alloc_dd(nshell+1);
627 srenew(shell, shfc->shell_nalloc);
629 if (dd)
631 shell[nshell] = shfc->shell_gl[ind[dd->gatindex[i]]];
633 else
635 shell[nshell] = shfc->shell_gl[ind[i]];
638 /* With inter-cg shells we can no do shell prediction,
639 * so we do not need the nuclei numbers.
641 if (!shfc->bInterCG)
643 shell[nshell].nucl1 = i + shell[nshell].nucl1 - shell[nshell].shell;
644 if (shell[nshell].nnucl > 1)
646 shell[nshell].nucl2 = i + shell[nshell].nucl2 - shell[nshell].shell;
648 if (shell[nshell].nnucl > 2)
650 shell[nshell].nucl3 = i + shell[nshell].nucl3 - shell[nshell].shell;
653 shell[nshell].shell = i;
654 nshell++;
658 shfc->nshell = nshell;
659 shfc->shell = shell;
662 static void do_1pos(rvec xnew, const rvec xold, const rvec f, real step)
664 real xo, yo, zo;
665 real dx, dy, dz;
667 xo = xold[XX];
668 yo = xold[YY];
669 zo = xold[ZZ];
671 dx = f[XX]*step;
672 dy = f[YY]*step;
673 dz = f[ZZ]*step;
675 xnew[XX] = xo+dx;
676 xnew[YY] = yo+dy;
677 xnew[ZZ] = zo+dz;
680 static void do_1pos3(rvec xnew, const rvec xold, const rvec f, const rvec step)
682 real xo, yo, zo;
683 real dx, dy, dz;
685 xo = xold[XX];
686 yo = xold[YY];
687 zo = xold[ZZ];
689 dx = f[XX]*step[XX];
690 dy = f[YY]*step[YY];
691 dz = f[ZZ]*step[ZZ];
693 xnew[XX] = xo+dx;
694 xnew[YY] = yo+dy;
695 xnew[ZZ] = zo+dz;
698 static void directional_sd(gmx::ArrayRef<const gmx::RVec> xold,
699 gmx::ArrayRef<gmx::RVec> xnew,
700 const rvec acc_dir[], int homenr, real step)
702 const rvec *xo = as_rvec_array(xold.data());
703 rvec *xn = as_rvec_array(xnew.data());
705 for (int i = 0; i < homenr; i++)
707 do_1pos(xn[i], xo[i], acc_dir[i], step);
711 static void shell_pos_sd(gmx::ArrayRef<const gmx::RVec> xcur,
712 gmx::ArrayRef<gmx::RVec> xnew,
713 gmx::ArrayRef<const gmx::RVec> f,
714 int ns, t_shell s[], int count)
716 const real step_scale_min = 0.8,
717 step_scale_increment = 0.2,
718 step_scale_max = 1.2,
719 step_scale_multiple = (step_scale_max - step_scale_min) / step_scale_increment;
720 int i, shell, d;
721 real dx, df, k_est;
722 const real zero = 0;
723 #ifdef PRINT_STEP
724 real step_min, step_max;
726 step_min = 1e30;
727 step_max = 0;
728 #endif
729 for (i = 0; (i < ns); i++)
731 shell = s[i].shell;
732 if (count == 1)
734 for (d = 0; d < DIM; d++)
736 s[i].step[d] = s[i].k_1;
737 #ifdef PRINT_STEP
738 step_min = std::min(step_min, s[i].step[d]);
739 step_max = std::max(step_max, s[i].step[d]);
740 #endif
743 else
745 for (d = 0; d < DIM; d++)
747 dx = xcur[shell][d] - s[i].xold[d];
748 df = f[shell][d] - s[i].fold[d];
749 /* -dx/df gets used to generate an interpolated value, but would
750 * cause a NaN if df were binary-equal to zero. Values close to
751 * zero won't cause problems (because of the min() and max()), so
752 * just testing for binary inequality is OK. */
753 if (zero != df)
755 k_est = -dx/df;
756 /* Scale the step size by a factor interpolated from
757 * step_scale_min to step_scale_max, as k_est goes from 0 to
758 * step_scale_multiple * s[i].step[d] */
759 s[i].step[d] =
760 step_scale_min * s[i].step[d] +
761 step_scale_increment * std::min(step_scale_multiple * s[i].step[d], std::max(k_est, zero));
763 else
765 /* Here 0 == df */
766 if (gmx_numzero(dx)) /* 0 == dx */
768 /* Likely this will never happen, but if it does just
769 * don't scale the step. */
771 else /* 0 != dx */
773 s[i].step[d] *= step_scale_max;
776 #ifdef PRINT_STEP
777 step_min = std::min(step_min, s[i].step[d]);
778 step_max = std::max(step_max, s[i].step[d]);
779 #endif
782 copy_rvec(xcur [shell], s[i].xold);
783 copy_rvec(f[shell], s[i].fold);
785 do_1pos3(xnew[shell], xcur[shell], f[shell], s[i].step);
787 if (gmx_debug_at)
789 fprintf(debug, "shell[%d] = %d\n", i, shell);
790 pr_rvec(debug, 0, "fshell", f[shell], DIM, TRUE);
791 pr_rvec(debug, 0, "xold", xcur[shell], DIM, TRUE);
792 pr_rvec(debug, 0, "step", s[i].step, DIM, TRUE);
793 pr_rvec(debug, 0, "xnew", xnew[shell], DIM, TRUE);
796 #ifdef PRINT_STEP
797 printf("step %.3e %.3e\n", step_min, step_max);
798 #endif
801 static void decrease_step_size(int nshell, t_shell s[])
803 int i;
805 for (i = 0; i < nshell; i++)
807 svmul(0.8, s[i].step, s[i].step);
811 static void print_epot(FILE *fp, gmx_int64_t mdstep, int count, real epot, real df,
812 int ndir, real sf_dir)
814 char buf[22];
816 fprintf(fp, "MDStep=%5s/%2d EPot: %12.8e, rmsF: %6.2e",
817 gmx_step_str(mdstep, buf), count, epot, df);
818 if (ndir)
820 fprintf(fp, ", dir. rmsF: %6.2e\n", std::sqrt(sf_dir/ndir));
822 else
824 fprintf(fp, "\n");
829 static real rms_force(const t_commrec *cr, gmx::ArrayRef<const gmx::RVec> force, int ns, t_shell s[],
830 int ndir, real *sf_dir, real *Epot)
832 double buf[4];
833 const rvec *f = as_rvec_array(force.data());
835 buf[0] = *sf_dir;
836 for (int i = 0; i < ns; i++)
838 int shell = s[i].shell;
839 buf[0] += norm2(f[shell]);
841 int ntot = ns;
843 if (PAR(cr))
845 buf[1] = ntot;
846 buf[2] = *sf_dir;
847 buf[3] = *Epot;
848 gmx_sumd(4, buf, cr);
849 ntot = (int)(buf[1] + 0.5);
850 *sf_dir = buf[2];
851 *Epot = buf[3];
853 ntot += ndir;
855 return (ntot ? std::sqrt(buf[0]/ntot) : 0);
858 static void check_pbc(FILE *fp, gmx::ArrayRef<gmx::RVec> x, int shell)
860 int m, now;
862 now = shell-4;
863 for (m = 0; (m < DIM); m++)
865 if (fabs(x[shell][m]-x[now][m]) > 0.3)
867 pr_rvecs(fp, 0, "SHELL-X", as_rvec_array(x.data())+now, 5);
868 break;
873 static void dump_shells(FILE *fp, gmx::ArrayRef<gmx::RVec> x, gmx::ArrayRef<gmx::RVec> f, real ftol, int ns, t_shell s[])
875 int i, shell;
876 real ft2, ff2;
878 ft2 = gmx::square(ftol);
880 for (i = 0; (i < ns); i++)
882 shell = s[i].shell;
883 ff2 = iprod(f[shell], f[shell]);
884 if (ff2 > ft2)
886 fprintf(fp, "SHELL %5d, force %10.5f %10.5f %10.5f, |f| %10.5f\n",
887 shell, f[shell][XX], f[shell][YY], f[shell][ZZ], std::sqrt(ff2));
889 check_pbc(fp, x, shell);
893 static void init_adir(FILE *log,
894 gmx_shellfc_t *shfc,
895 gmx::Constraints *constr,
896 const t_idef *idef,
897 const t_inputrec *ir,
898 const t_commrec *cr,
899 const gmx_multisim_t *ms,
900 int dd_ac1,
901 gmx_int64_t step,
902 const t_mdatoms *md,
903 int end,
904 rvec *x_old,
905 rvec *x_init,
906 rvec *x,
907 rvec *f,
908 rvec *acc_dir,
909 gmx_bool bMolPBC,
910 matrix box,
911 gmx::ArrayRef<const real> lambda,
912 real *dvdlambda,
913 t_nrnb *nrnb)
915 rvec *xnold, *xnew;
916 double dt, w_dt;
917 int n, d;
918 unsigned short *ptype;
920 if (DOMAINDECOMP(cr))
922 n = dd_ac1;
924 else
926 n = end;
928 if (n > shfc->adir_nalloc)
930 shfc->adir_nalloc = over_alloc_dd(n);
931 srenew(shfc->adir_xnold, shfc->adir_nalloc);
932 srenew(shfc->adir_xnew, shfc->adir_nalloc);
934 xnold = shfc->adir_xnold;
935 xnew = shfc->adir_xnew;
937 ptype = md->ptype;
939 dt = ir->delta_t;
941 /* Does NOT work with freeze or acceleration groups (yet) */
942 for (n = 0; n < end; n++)
944 w_dt = md->invmass[n]*dt;
946 for (d = 0; d < DIM; d++)
948 if ((ptype[n] != eptVSite) && (ptype[n] != eptShell))
950 xnold[n][d] = x[n][d] - (x_init[n][d] - x_old[n][d]);
951 xnew[n][d] = 2*x[n][d] - x_old[n][d] + f[n][d]*w_dt*dt;
953 else
955 xnold[n][d] = x[n][d];
956 xnew[n][d] = x[n][d];
960 constrain(log, FALSE, FALSE, constr, idef, ir, cr, ms, step, 0, 1.0, md,
961 x, xnold, nullptr, bMolPBC, box,
962 lambda[efptBONDED], &(dvdlambda[efptBONDED]),
963 nullptr, nullptr, nrnb, gmx::econqCoord);
964 constrain(log, FALSE, FALSE, constr, idef, ir, cr, ms, step, 0, 1.0, md,
965 x, xnew, nullptr, bMolPBC, box,
966 lambda[efptBONDED], &(dvdlambda[efptBONDED]),
967 nullptr, nullptr, nrnb, gmx::econqCoord);
969 for (n = 0; n < end; n++)
971 for (d = 0; d < DIM; d++)
973 xnew[n][d] =
974 -(2*x[n][d]-xnold[n][d]-xnew[n][d])/gmx::square(dt)
975 - f[n][d]*md->invmass[n];
977 clear_rvec(acc_dir[n]);
980 /* Project the acceleration on the old bond directions */
981 constrain(log, FALSE, FALSE, constr, idef, ir, cr, ms, step, 0, 1.0, md,
982 x_old, xnew, acc_dir, bMolPBC, box,
983 lambda[efptBONDED], &(dvdlambda[efptBONDED]),
984 nullptr, nullptr, nrnb, gmx::econqDeriv_FlexCon);
987 void relax_shell_flexcon(FILE *fplog,
988 const t_commrec *cr,
989 const gmx_multisim_t *ms,
990 gmx_bool bVerbose,
991 gmx_int64_t mdstep,
992 const t_inputrec *inputrec,
993 gmx_bool bDoNS,
994 int force_flags,
995 gmx_localtop_t *top,
996 gmx::Constraints *constr,
997 gmx_enerdata_t *enerd,
998 t_fcdata *fcd,
999 t_state *state,
1000 gmx::PaddedArrayRef<gmx::RVec> f,
1001 tensor force_vir,
1002 const t_mdatoms *md,
1003 t_nrnb *nrnb,
1004 gmx_wallcycle_t wcycle,
1005 t_graph *graph,
1006 const gmx_groups_t *groups,
1007 gmx_shellfc_t *shfc,
1008 t_forcerec *fr,
1009 double t,
1010 rvec mu_tot,
1011 const gmx_vsite_t *vsite,
1012 DdOpenBalanceRegionBeforeForceComputation ddOpenBalanceRegion,
1013 DdCloseBalanceRegionAfterForceComputation ddCloseBalanceRegion)
1015 int nshell;
1016 t_shell *shell;
1017 const t_idef *idef;
1018 rvec *acc_dir = nullptr, *x_old = nullptr;
1019 real Epot[2], df[2];
1020 real sf_dir, invdt;
1021 real ftol, dum = 0;
1022 char sbuf[22];
1023 gmx_bool bCont, bInit, bConverged;
1024 int nat, dd_ac0, dd_ac1 = 0, i;
1025 int homenr = md->homenr, end = homenr, cg0, cg1;
1026 int nflexcon, number_steps, d, Min = 0, count = 0;
1027 #define Try (1-Min) /* At start Try = 1 */
1029 bCont = (mdstep == inputrec->init_step) && inputrec->bContinuation;
1030 bInit = (mdstep == inputrec->init_step) || shfc->bRequireInit;
1031 ftol = inputrec->em_tol;
1032 number_steps = inputrec->niter;
1033 nshell = shfc->nshell;
1034 shell = shfc->shell;
1035 nflexcon = shfc->nflexcon;
1037 idef = &top->idef;
1039 if (DOMAINDECOMP(cr))
1041 nat = dd_natoms_vsite(cr->dd);
1042 if (nflexcon > 0)
1044 dd_get_constraint_range(cr->dd, &dd_ac0, &dd_ac1);
1045 nat = std::max(nat, dd_ac1);
1048 else
1050 nat = state->natoms;
1053 for (i = 0; (i < 2); i++)
1055 shfc->x[i].resize(gmx::paddedRVecVectorSize(nat));
1056 shfc->f[i].resize(gmx::paddedRVecVectorSize(nat));
1059 /* Create views that we can swap */
1060 gmx::PaddedArrayRef<gmx::RVec> pos[2];
1061 gmx::PaddedArrayRef<gmx::RVec> force[2];
1062 for (i = 0; (i < 2); i++)
1064 pos[i] = shfc->x[i];
1065 force[i] = shfc->f[i];
1068 if (bDoNS && inputrec->ePBC != epbcNONE && !DOMAINDECOMP(cr))
1070 /* This is the only time where the coordinates are used
1071 * before do_force is called, which normally puts all
1072 * charge groups in the box.
1074 if (inputrec->cutoff_scheme == ecutsVERLET)
1076 auto xRef = makeArrayRef(state->x);
1077 put_atoms_in_box_omp(fr->ePBC, state->box, xRef.subArray(0, md->homenr));
1079 else
1081 cg0 = 0;
1082 cg1 = top->cgs.nr;
1083 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, state->box,
1084 &(top->cgs), as_rvec_array(state->x.data()), fr->cg_cm);
1087 if (graph)
1089 mk_mshift(fplog, graph, fr->ePBC, state->box, as_rvec_array(state->x.data()));
1093 /* After this all coordinate arrays will contain whole charge groups */
1094 if (graph)
1096 shift_self(graph, state->box, as_rvec_array(state->x.data()));
1099 if (nflexcon)
1101 if (nat > shfc->flex_nalloc)
1103 shfc->flex_nalloc = over_alloc_dd(nat);
1104 srenew(shfc->acc_dir, shfc->flex_nalloc);
1105 srenew(shfc->x_old, shfc->flex_nalloc);
1107 acc_dir = shfc->acc_dir;
1108 x_old = shfc->x_old;
1109 for (i = 0; i < homenr; i++)
1111 for (d = 0; d < DIM; d++)
1113 shfc->x_old[i][d] =
1114 state->x[i][d] - state->v[i][d]*inputrec->delta_t;
1119 /* Do a prediction of the shell positions, when appropriate.
1120 * Without velocities (EM, NM, BD) we only do initial prediction.
1122 if (shfc->bPredict && !bCont && (EI_STATE_VELOCITY(inputrec->eI) || bInit))
1124 predict_shells(fplog, as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), inputrec->delta_t, nshell, shell,
1125 md->massT, nullptr, bInit);
1128 /* do_force expected the charge groups to be in the box */
1129 if (graph)
1131 unshift_self(graph, state->box, as_rvec_array(state->x.data()));
1134 /* Calculate the forces first time around */
1135 if (gmx_debug_at)
1137 pr_rvecs(debug, 0, "x b4 do_force", as_rvec_array(state->x.data()), homenr);
1139 do_force(fplog, cr, ms, inputrec, mdstep, nrnb, wcycle, top, groups,
1140 state->box, state->x, &state->hist,
1141 force[Min], force_vir, md, enerd, fcd,
1142 state->lambda, graph,
1143 fr, vsite, mu_tot, t, nullptr,
1144 (bDoNS ? GMX_FORCE_NS : 0) | force_flags,
1145 ddOpenBalanceRegion, ddCloseBalanceRegion);
1147 sf_dir = 0;
1148 if (nflexcon)
1150 init_adir(fplog, shfc,
1151 constr, idef, inputrec, cr, ms, dd_ac1, mdstep, md, end,
1152 shfc->x_old, as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), as_rvec_array(force[Min].data()),
1153 shfc->acc_dir,
1154 fr->bMolPBC, state->box, state->lambda, &dum, nrnb);
1156 for (i = 0; i < end; i++)
1158 sf_dir += md->massT[i]*norm2(shfc->acc_dir[i]);
1162 Epot[Min] = enerd->term[F_EPOT];
1164 df[Min] = rms_force(cr, shfc->f[Min], nshell, shell, nflexcon, &sf_dir, &Epot[Min]);
1165 df[Try] = 0;
1166 if (debug)
1168 fprintf(debug, "df = %g %g\n", df[Min], df[Try]);
1171 if (gmx_debug_at)
1173 pr_rvecs(debug, 0, "force0", as_rvec_array(force[Min].data()), md->nr);
1176 if (nshell+nflexcon > 0)
1178 /* Copy x to pos[Min] & pos[Try]: during minimization only the
1179 * shell positions are updated, therefore the other particles must
1180 * be set here.
1182 pos[Min] = state->x;
1183 pos[Try] = state->x;
1186 if (bVerbose && MASTER(cr))
1188 print_epot(stdout, mdstep, 0, Epot[Min], df[Min], nflexcon, sf_dir);
1191 if (debug)
1193 fprintf(debug, "%17s: %14.10e\n",
1194 interaction_function[F_EKIN].longname, enerd->term[F_EKIN]);
1195 fprintf(debug, "%17s: %14.10e\n",
1196 interaction_function[F_EPOT].longname, enerd->term[F_EPOT]);
1197 fprintf(debug, "%17s: %14.10e\n",
1198 interaction_function[F_ETOT].longname, enerd->term[F_ETOT]);
1199 fprintf(debug, "SHELLSTEP %s\n", gmx_step_str(mdstep, sbuf));
1202 /* First check whether we should do shells, or whether the force is
1203 * low enough even without minimization.
1205 bConverged = (df[Min] < ftol);
1207 for (count = 1; (!(bConverged) && (count < number_steps)); count++)
1209 if (vsite)
1211 construct_vsites(vsite, as_rvec_array(pos[Min].data()),
1212 inputrec->delta_t, as_rvec_array(state->v.data()),
1213 idef->iparams, idef->il,
1214 fr->ePBC, fr->bMolPBC, cr, state->box);
1217 if (nflexcon)
1219 init_adir(fplog, shfc,
1220 constr, idef, inputrec, cr, ms, dd_ac1, mdstep, md, end,
1221 x_old, as_rvec_array(state->x.data()), as_rvec_array(pos[Min].data()), as_rvec_array(force[Min].data()), acc_dir,
1222 fr->bMolPBC, state->box, state->lambda, &dum, nrnb);
1224 directional_sd(pos[Min], pos[Try], acc_dir, end, fr->fc_stepsize);
1227 /* New positions, Steepest descent */
1228 shell_pos_sd(pos[Min], pos[Try], force[Min], nshell, shell, count);
1230 /* do_force expected the charge groups to be in the box */
1231 if (graph)
1233 unshift_self(graph, state->box, as_rvec_array(pos[Try].data()));
1236 if (gmx_debug_at)
1238 pr_rvecs(debug, 0, "RELAX: pos[Min] ", as_rvec_array(pos[Min].data()), homenr);
1239 pr_rvecs(debug, 0, "RELAX: pos[Try] ", as_rvec_array(pos[Try].data()), homenr);
1241 /* Try the new positions */
1242 do_force(fplog, cr, ms, inputrec, 1, nrnb, wcycle,
1243 top, groups, state->box, pos[Try], &state->hist,
1244 force[Try], force_vir,
1245 md, enerd, fcd, state->lambda, graph,
1246 fr, vsite, mu_tot, t, nullptr,
1247 force_flags,
1248 ddOpenBalanceRegion, ddCloseBalanceRegion);
1250 if (gmx_debug_at)
1252 pr_rvecs(debug, 0, "RELAX: force[Min]", as_rvec_array(force[Min].data()), homenr);
1253 pr_rvecs(debug, 0, "RELAX: force[Try]", as_rvec_array(force[Try].data()), homenr);
1255 sf_dir = 0;
1256 if (nflexcon)
1258 init_adir(fplog, shfc,
1259 constr, idef, inputrec, cr, ms, dd_ac1, mdstep, md, end,
1260 x_old, as_rvec_array(state->x.data()), as_rvec_array(pos[Try].data()), as_rvec_array(force[Try].data()), acc_dir,
1261 fr->bMolPBC, state->box, state->lambda, &dum, nrnb);
1263 for (i = 0; i < end; i++)
1265 sf_dir += md->massT[i]*norm2(acc_dir[i]);
1269 Epot[Try] = enerd->term[F_EPOT];
1271 df[Try] = rms_force(cr, force[Try], nshell, shell, nflexcon, &sf_dir, &Epot[Try]);
1273 if (debug)
1275 fprintf(debug, "df = %g %g\n", df[Min], df[Try]);
1278 if (debug)
1280 if (gmx_debug_at)
1282 pr_rvecs(debug, 0, "F na do_force", as_rvec_array(force[Try].data()), homenr);
1284 if (gmx_debug_at)
1286 fprintf(debug, "SHELL ITER %d\n", count);
1287 dump_shells(debug, pos[Try], force[Try], ftol, nshell, shell);
1291 if (bVerbose && MASTER(cr))
1293 print_epot(stdout, mdstep, count, Epot[Try], df[Try], nflexcon, sf_dir);
1296 bConverged = (df[Try] < ftol);
1298 if ((df[Try] < df[Min]))
1300 if (debug)
1302 fprintf(debug, "Swapping Min and Try\n");
1304 if (nflexcon)
1306 /* Correct the velocities for the flexible constraints */
1307 invdt = 1/inputrec->delta_t;
1308 for (i = 0; i < end; i++)
1310 for (d = 0; d < DIM; d++)
1312 state->v[i][d] += (pos[Try][i][d] - pos[Min][i][d])*invdt;
1316 Min = Try;
1318 else
1320 decrease_step_size(nshell, shell);
1323 shfc->numForceEvaluations += count;
1324 if (bConverged)
1326 shfc->numConvergedIterations++;
1328 if (MASTER(cr) && !(bConverged))
1330 /* Note that the energies and virial are incorrect when not converged */
1331 if (fplog)
1333 fprintf(fplog,
1334 "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
1335 gmx_step_str(mdstep, sbuf), number_steps, df[Min]);
1337 fprintf(stderr,
1338 "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
1339 gmx_step_str(mdstep, sbuf), number_steps, df[Min]);
1342 /* Copy back the coordinates and the forces */
1343 std::copy(pos[Min].begin(), pos[Min].end(), state->x.begin());
1344 std::copy(force[Min].begin(), force[Min].end(), f.begin());
1347 void done_shellfc(FILE *fplog, gmx_shellfc_t *shfc, gmx_int64_t numSteps)
1349 if (shfc && fplog && numSteps > 0)
1351 double numStepsAsDouble = static_cast<double>(numSteps);
1352 fprintf(fplog, "Fraction of iterations that converged: %.2f %%\n",
1353 (shfc->numConvergedIterations*100.0)/numStepsAsDouble);
1354 fprintf(fplog, "Average number of force evaluations per MD step: %.2f\n\n",
1355 shfc->numForceEvaluations/numStepsAsDouble);
1358 // TODO Deallocate memory in shfc