Split lines with many copyright years
[gromacs.git] / src / gromacs / mdrun / minimize.cpp
blob99d846757909c89c471abd2ad32c864201c33997
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \internal \file
40 * \brief This file defines integrators for energy minimization
42 * \author Berk Hess <hess@kth.se>
43 * \author Erik Lindahl <erik@kth.se>
44 * \ingroup module_mdrun
46 #include "gmxpre.h"
48 #include "config.h"
50 #include <cmath>
51 #include <cstring>
52 #include <ctime>
54 #include <algorithm>
55 #include <vector>
57 #include "gromacs/commandline/filenm.h"
58 #include "gromacs/domdec/collect.h"
59 #include "gromacs/domdec/dlbtiming.h"
60 #include "gromacs/domdec/domdec.h"
61 #include "gromacs/domdec/domdec_struct.h"
62 #include "gromacs/domdec/mdsetup.h"
63 #include "gromacs/domdec/partition.h"
64 #include "gromacs/ewald/pme.h"
65 #include "gromacs/fileio/confio.h"
66 #include "gromacs/fileio/mtxio.h"
67 #include "gromacs/gmxlib/network.h"
68 #include "gromacs/gmxlib/nrnb.h"
69 #include "gromacs/imd/imd.h"
70 #include "gromacs/linearalgebra/sparsematrix.h"
71 #include "gromacs/listed_forces/manage_threading.h"
72 #include "gromacs/math/functions.h"
73 #include "gromacs/math/vec.h"
74 #include "gromacs/mdlib/constr.h"
75 #include "gromacs/mdlib/dispersioncorrection.h"
76 #include "gromacs/mdlib/ebin.h"
77 #include "gromacs/mdlib/enerdata_utils.h"
78 #include "gromacs/mdlib/energyoutput.h"
79 #include "gromacs/mdlib/force.h"
80 #include "gromacs/mdlib/forcerec.h"
81 #include "gromacs/mdlib/gmx_omp_nthreads.h"
82 #include "gromacs/mdlib/md_support.h"
83 #include "gromacs/mdlib/mdatoms.h"
84 #include "gromacs/mdlib/stat.h"
85 #include "gromacs/mdlib/tgroup.h"
86 #include "gromacs/mdlib/trajectory_writing.h"
87 #include "gromacs/mdlib/update.h"
88 #include "gromacs/mdlib/vsite.h"
89 #include "gromacs/mdrunutility/handlerestart.h"
90 #include "gromacs/mdrunutility/printtime.h"
91 #include "gromacs/mdtypes/commrec.h"
92 #include "gromacs/mdtypes/inputrec.h"
93 #include "gromacs/mdtypes/md_enums.h"
94 #include "gromacs/mdtypes/mdrunoptions.h"
95 #include "gromacs/mdtypes/state.h"
96 #include "gromacs/pbcutil/mshift.h"
97 #include "gromacs/pbcutil/pbc.h"
98 #include "gromacs/timing/wallcycle.h"
99 #include "gromacs/timing/walltime_accounting.h"
100 #include "gromacs/topology/mtop_util.h"
101 #include "gromacs/topology/topology.h"
102 #include "gromacs/utility/cstringutil.h"
103 #include "gromacs/utility/exceptions.h"
104 #include "gromacs/utility/fatalerror.h"
105 #include "gromacs/utility/logger.h"
106 #include "gromacs/utility/smalloc.h"
108 #include "legacysimulator.h"
109 #include "shellfc.h"
111 using gmx::MdrunScheduleWorkload;
113 //! Utility structure for manipulating states during EM
114 typedef struct
116 //! Copy of the global state
117 t_state s;
118 //! Force array
119 PaddedHostVector<gmx::RVec> f;
120 //! Potential energy
121 real epot;
122 //! Norm of the force
123 real fnorm;
124 //! Maximum force
125 real fmax;
126 //! Direction
127 int a_fmax;
128 } em_state_t;
130 //! Print the EM starting conditions
131 static void print_em_start(FILE* fplog,
132 const t_commrec* cr,
133 gmx_walltime_accounting_t walltime_accounting,
134 gmx_wallcycle_t wcycle,
135 const char* name)
137 walltime_accounting_start_time(walltime_accounting);
138 wallcycle_start(wcycle, ewcRUN);
139 print_start(fplog, cr, walltime_accounting, name);
142 //! Stop counting time for EM
143 static void em_time_end(gmx_walltime_accounting_t walltime_accounting, gmx_wallcycle_t wcycle)
145 wallcycle_stop(wcycle, ewcRUN);
147 walltime_accounting_end_time(walltime_accounting);
150 //! Printing a log file and console header
151 static void sp_header(FILE* out, const char* minimizer, real ftol, int nsteps)
153 fprintf(out, "\n");
154 fprintf(out, "%s:\n", minimizer);
155 fprintf(out, " Tolerance (Fmax) = %12.5e\n", ftol);
156 fprintf(out, " Number of steps = %12d\n", nsteps);
159 //! Print warning message
160 static void warn_step(FILE* fp, real ftol, real fmax, gmx_bool bLastStep, gmx_bool bConstrain)
162 constexpr bool realIsDouble = GMX_DOUBLE;
163 char buffer[2048];
165 if (!std::isfinite(fmax))
167 sprintf(buffer,
168 "\nEnergy minimization has stopped because the force "
169 "on at least one atom is not finite. This usually means "
170 "atoms are overlapping. Modify the input coordinates to "
171 "remove atom overlap or use soft-core potentials with "
172 "the free energy code to avoid infinite forces.\n%s",
173 !realIsDouble ? "You could also be lucky that switching to double precision "
174 "is sufficient to obtain finite forces.\n"
175 : "");
177 else if (bLastStep)
179 sprintf(buffer,
180 "\nEnergy minimization reached the maximum number "
181 "of steps before the forces reached the requested "
182 "precision Fmax < %g.\n",
183 ftol);
185 else
187 sprintf(buffer,
188 "\nEnergy minimization has stopped, but the forces have "
189 "not converged to the requested precision Fmax < %g (which "
190 "may not be possible for your system). It stopped "
191 "because the algorithm tried to make a new step whose size "
192 "was too small, or there was no change in the energy since "
193 "last step. Either way, we regard the minimization as "
194 "converged to within the available machine precision, "
195 "given your starting configuration and EM parameters.\n%s%s",
196 ftol,
197 !realIsDouble ? "\nDouble precision normally gives you higher accuracy, but "
198 "this is often not needed for preparing to run molecular "
199 "dynamics.\n"
200 : "",
201 bConstrain ? "You might need to increase your constraint accuracy, or turn\n"
202 "off constraints altogether (set constraints = none in mdp file)\n"
203 : "");
206 fputs(wrap_lines(buffer, 78, 0, FALSE), stderr);
207 fputs(wrap_lines(buffer, 78, 0, FALSE), fp);
210 //! Print message about convergence of the EM
211 static void print_converged(FILE* fp,
212 const char* alg,
213 real ftol,
214 int64_t count,
215 gmx_bool bDone,
216 int64_t nsteps,
217 const em_state_t* ems,
218 double sqrtNumAtoms)
220 char buf[STEPSTRSIZE];
222 if (bDone)
224 fprintf(fp, "\n%s converged to Fmax < %g in %s steps\n", alg, ftol, gmx_step_str(count, buf));
226 else if (count < nsteps)
228 fprintf(fp,
229 "\n%s converged to machine precision in %s steps,\n"
230 "but did not reach the requested Fmax < %g.\n",
231 alg, gmx_step_str(count, buf), ftol);
233 else
235 fprintf(fp, "\n%s did not converge to Fmax < %g in %s steps.\n", alg, ftol,
236 gmx_step_str(count, buf));
239 #if GMX_DOUBLE
240 fprintf(fp, "Potential Energy = %21.14e\n", ems->epot);
241 fprintf(fp, "Maximum force = %21.14e on atom %d\n", ems->fmax, ems->a_fmax + 1);
242 fprintf(fp, "Norm of force = %21.14e\n", ems->fnorm / sqrtNumAtoms);
243 #else
244 fprintf(fp, "Potential Energy = %14.7e\n", ems->epot);
245 fprintf(fp, "Maximum force = %14.7e on atom %d\n", ems->fmax, ems->a_fmax + 1);
246 fprintf(fp, "Norm of force = %14.7e\n", ems->fnorm / sqrtNumAtoms);
247 #endif
250 //! Compute the norm and max of the force array in parallel
251 static void get_f_norm_max(const t_commrec* cr,
252 t_grpopts* opts,
253 t_mdatoms* mdatoms,
254 const rvec* f,
255 real* fnorm,
256 real* fmax,
257 int* a_fmax)
259 double fnorm2, *sum;
260 real fmax2, fam;
261 int la_max, a_max, start, end, i, m, gf;
263 /* This routine finds the largest force and returns it.
264 * On parallel machines the global max is taken.
266 fnorm2 = 0;
267 fmax2 = 0;
268 la_max = -1;
269 start = 0;
270 end = mdatoms->homenr;
271 if (mdatoms->cFREEZE)
273 for (i = start; i < end; i++)
275 gf = mdatoms->cFREEZE[i];
276 fam = 0;
277 for (m = 0; m < DIM; m++)
279 if (!opts->nFreeze[gf][m])
281 fam += gmx::square(f[i][m]);
284 fnorm2 += fam;
285 if (fam > fmax2)
287 fmax2 = fam;
288 la_max = i;
292 else
294 for (i = start; i < end; i++)
296 fam = norm2(f[i]);
297 fnorm2 += fam;
298 if (fam > fmax2)
300 fmax2 = fam;
301 la_max = i;
306 if (la_max >= 0 && DOMAINDECOMP(cr))
308 a_max = cr->dd->globalAtomIndices[la_max];
310 else
312 a_max = la_max;
314 if (PAR(cr))
316 snew(sum, 2 * cr->nnodes + 1);
317 sum[2 * cr->nodeid] = fmax2;
318 sum[2 * cr->nodeid + 1] = a_max;
319 sum[2 * cr->nnodes] = fnorm2;
320 gmx_sumd(2 * cr->nnodes + 1, sum, cr);
321 fnorm2 = sum[2 * cr->nnodes];
322 /* Determine the global maximum */
323 for (i = 0; i < cr->nnodes; i++)
325 if (sum[2 * i] > fmax2)
327 fmax2 = sum[2 * i];
328 a_max = gmx::roundToInt(sum[2 * i + 1]);
331 sfree(sum);
334 if (fnorm)
336 *fnorm = sqrt(fnorm2);
338 if (fmax)
340 *fmax = sqrt(fmax2);
342 if (a_fmax)
344 *a_fmax = a_max;
348 //! Compute the norm of the force
349 static void get_state_f_norm_max(const t_commrec* cr, t_grpopts* opts, t_mdatoms* mdatoms, em_state_t* ems)
351 get_f_norm_max(cr, opts, mdatoms, ems->f.rvec_array(), &ems->fnorm, &ems->fmax, &ems->a_fmax);
354 //! Initialize the energy minimization
355 static void init_em(FILE* fplog,
356 const gmx::MDLogger& mdlog,
357 const char* title,
358 const t_commrec* cr,
359 t_inputrec* ir,
360 gmx::ImdSession* imdSession,
361 pull_t* pull_work,
362 t_state* state_global,
363 gmx_mtop_t* top_global,
364 em_state_t* ems,
365 gmx_localtop_t* top,
366 t_nrnb* nrnb,
367 t_forcerec* fr,
368 t_graph** graph,
369 gmx::MDAtoms* mdAtoms,
370 gmx_global_stat_t* gstat,
371 gmx_vsite_t* vsite,
372 gmx::Constraints* constr,
373 gmx_shellfc_t** shellfc)
375 real dvdl_constr;
377 if (fplog)
379 fprintf(fplog, "Initiating %s\n", title);
382 if (MASTER(cr))
384 state_global->ngtc = 0;
386 initialize_lambdas(fplog, *ir, MASTER(cr), &(state_global->fep_state), state_global->lambda, nullptr);
388 if (ir->eI == eiNM)
390 GMX_ASSERT(shellfc != nullptr, "With NM we always support shells");
392 *shellfc = init_shell_flexcon(stdout, top_global, constr ? constr->numFlexibleConstraints() : 0,
393 ir->nstcalcenergy, DOMAINDECOMP(cr));
395 else
397 GMX_ASSERT(EI_ENERGY_MINIMIZATION(ir->eI),
398 "This else currently only handles energy minimizers, consider if your algorithm "
399 "needs shell/flexible-constraint support");
401 /* With energy minimization, shells and flexible constraints are
402 * automatically minimized when treated like normal DOFS.
404 if (shellfc != nullptr)
406 *shellfc = nullptr;
410 auto mdatoms = mdAtoms->mdatoms();
411 if (DOMAINDECOMP(cr))
413 top->useInDomainDecomp_ = true;
414 dd_init_local_top(*top_global, top);
416 dd_init_local_state(cr->dd, state_global, &ems->s);
418 /* Distribute the charge groups over the nodes from the master node */
419 dd_partition_system(fplog, mdlog, ir->init_step, cr, TRUE, 1, state_global, *top_global, ir,
420 imdSession, pull_work, &ems->s, &ems->f, mdAtoms, top, fr, vsite,
421 constr, nrnb, nullptr, FALSE);
422 dd_store_state(cr->dd, &ems->s);
424 *graph = nullptr;
426 else
428 state_change_natoms(state_global, state_global->natoms);
429 /* Just copy the state */
430 ems->s = *state_global;
431 state_change_natoms(&ems->s, ems->s.natoms);
432 ems->f.resizeWithPadding(ems->s.natoms);
434 mdAlgorithmsSetupAtomData(cr, ir, *top_global, top, fr, graph, mdAtoms, constr, vsite,
435 shellfc ? *shellfc : nullptr);
437 if (vsite)
439 set_vsite_top(vsite, top, mdatoms);
443 update_mdatoms(mdAtoms->mdatoms(), ems->s.lambda[efptMASS]);
445 if (constr)
447 // TODO how should this cross-module support dependency be managed?
448 if (ir->eConstrAlg == econtSHAKE && gmx_mtop_ftype_count(top_global, F_CONSTR) > 0)
450 gmx_fatal(FARGS, "Can not do energy minimization with %s, use %s\n",
451 econstr_names[econtSHAKE], econstr_names[econtLINCS]);
454 if (!ir->bContinuation)
456 /* Constrain the starting coordinates */
457 dvdl_constr = 0;
458 constr->apply(TRUE, TRUE, -1, 0, 1.0, ems->s.x.rvec_array(), ems->s.x.rvec_array(),
459 nullptr, ems->s.box, ems->s.lambda[efptFEP], &dvdl_constr, nullptr,
460 nullptr, gmx::ConstraintVariable::Positions);
464 if (PAR(cr))
466 *gstat = global_stat_init(ir);
468 else
470 *gstat = nullptr;
473 calc_shifts(ems->s.box, fr->shift_vec);
476 //! Finalize the minimization
477 static void finish_em(const t_commrec* cr,
478 gmx_mdoutf_t outf,
479 gmx_walltime_accounting_t walltime_accounting,
480 gmx_wallcycle_t wcycle)
482 if (!thisRankHasDuty(cr, DUTY_PME))
484 /* Tell the PME only node to finish */
485 gmx_pme_send_finish(cr);
488 done_mdoutf(outf);
490 em_time_end(walltime_accounting, wcycle);
493 //! Swap two different EM states during minimization
494 static void swap_em_state(em_state_t** ems1, em_state_t** ems2)
496 em_state_t* tmp;
498 tmp = *ems1;
499 *ems1 = *ems2;
500 *ems2 = tmp;
503 //! Save the EM trajectory
504 static void write_em_traj(FILE* fplog,
505 const t_commrec* cr,
506 gmx_mdoutf_t outf,
507 gmx_bool bX,
508 gmx_bool bF,
509 const char* confout,
510 gmx_mtop_t* top_global,
511 t_inputrec* ir,
512 int64_t step,
513 em_state_t* state,
514 t_state* state_global,
515 ObservablesHistory* observablesHistory)
517 int mdof_flags = 0;
519 if (bX)
521 mdof_flags |= MDOF_X;
523 if (bF)
525 mdof_flags |= MDOF_F;
528 /* If we want IMD output, set appropriate MDOF flag */
529 if (ir->bIMD)
531 mdof_flags |= MDOF_IMD;
534 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
535 static_cast<double>(step), &state->s, state_global,
536 observablesHistory, state->f);
538 if (confout != nullptr)
540 if (DOMAINDECOMP(cr))
542 /* If bX=true, x was collected to state_global in the call above */
543 if (!bX)
545 auto globalXRef = MASTER(cr) ? state_global->x : gmx::ArrayRef<gmx::RVec>();
546 dd_collect_vec(cr->dd, &state->s, state->s.x, globalXRef);
549 else
551 /* Copy the local state pointer */
552 state_global = &state->s;
555 if (MASTER(cr))
557 if (ir->ePBC != epbcNONE && !ir->bPeriodicMols && DOMAINDECOMP(cr))
559 /* Make molecules whole only for confout writing */
560 do_pbc_mtop(ir->ePBC, state->s.box, top_global, state_global->x.rvec_array());
563 write_sto_conf_mtop(confout, *top_global->name, top_global,
564 state_global->x.rvec_array(), nullptr, ir->ePBC, state->s.box);
569 //! \brief Do one minimization step
571 // \returns true when the step succeeded, false when a constraint error occurred
572 static bool do_em_step(const t_commrec* cr,
573 t_inputrec* ir,
574 t_mdatoms* md,
575 em_state_t* ems1,
576 real a,
577 const PaddedHostVector<gmx::RVec>* force,
578 em_state_t* ems2,
579 gmx::Constraints* constr,
580 int64_t count)
583 t_state *s1, *s2;
584 int start, end;
585 real dvdl_constr;
586 int nthreads gmx_unused;
588 bool validStep = true;
590 s1 = &ems1->s;
591 s2 = &ems2->s;
593 if (DOMAINDECOMP(cr) && s1->ddp_count != cr->dd->ddp_count)
595 gmx_incons("state mismatch in do_em_step");
598 s2->flags = s1->flags;
600 if (s2->natoms != s1->natoms)
602 state_change_natoms(s2, s1->natoms);
603 ems2->f.resizeWithPadding(s2->natoms);
605 if (DOMAINDECOMP(cr) && s2->cg_gl.size() != s1->cg_gl.size())
607 s2->cg_gl.resize(s1->cg_gl.size());
610 copy_mat(s1->box, s2->box);
611 /* Copy free energy state */
612 s2->lambda = s1->lambda;
613 copy_mat(s1->box, s2->box);
615 start = 0;
616 end = md->homenr;
618 nthreads = gmx_omp_nthreads_get(emntUpdate);
619 #pragma omp parallel num_threads(nthreads)
621 const rvec* x1 = s1->x.rvec_array();
622 rvec* x2 = s2->x.rvec_array();
623 const rvec* f = force->rvec_array();
625 int gf = 0;
626 #pragma omp for schedule(static) nowait
627 for (int i = start; i < end; i++)
631 if (md->cFREEZE)
633 gf = md->cFREEZE[i];
635 for (int m = 0; m < DIM; m++)
637 if (ir->opts.nFreeze[gf][m])
639 x2[i][m] = x1[i][m];
641 else
643 x2[i][m] = x1[i][m] + a * f[i][m];
647 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
650 if (s2->flags & (1 << estCGP))
652 /* Copy the CG p vector */
653 const rvec* p1 = s1->cg_p.rvec_array();
654 rvec* p2 = s2->cg_p.rvec_array();
655 #pragma omp for schedule(static) nowait
656 for (int i = start; i < end; i++)
658 // Trivial OpenMP block that does not throw
659 copy_rvec(p1[i], p2[i]);
663 if (DOMAINDECOMP(cr))
665 /* OpenMP does not supported unsigned loop variables */
666 #pragma omp for schedule(static) nowait
667 for (gmx::index i = 0; i < gmx::ssize(s2->cg_gl); i++)
669 s2->cg_gl[i] = s1->cg_gl[i];
674 if (DOMAINDECOMP(cr))
676 s2->ddp_count = s1->ddp_count;
677 s2->ddp_count_cg_gl = s1->ddp_count_cg_gl;
680 if (constr)
682 dvdl_constr = 0;
683 validStep = constr->apply(TRUE, TRUE, count, 0, 1.0, s1->x.rvec_array(), s2->x.rvec_array(),
684 nullptr, s2->box, s2->lambda[efptBONDED], &dvdl_constr, nullptr,
685 nullptr, gmx::ConstraintVariable::Positions);
687 if (cr->nnodes > 1)
689 /* This global reduction will affect performance at high
690 * parallelization, but we can not really avoid it.
691 * But usually EM is not run at high parallelization.
693 int reductionBuffer = static_cast<int>(!validStep);
694 gmx_sumi(1, &reductionBuffer, cr);
695 validStep = (reductionBuffer == 0);
698 // We should move this check to the different minimizers
699 if (!validStep && ir->eI != eiSteep)
701 gmx_fatal(FARGS,
702 "The coordinates could not be constrained. Minimizer '%s' can not handle "
703 "constraint failures, use minimizer '%s' before using '%s'.",
704 EI(ir->eI), EI(eiSteep), EI(ir->eI));
708 return validStep;
711 //! Prepare EM for using domain decomposition parallellization
712 static void em_dd_partition_system(FILE* fplog,
713 const gmx::MDLogger& mdlog,
714 int step,
715 const t_commrec* cr,
716 gmx_mtop_t* top_global,
717 t_inputrec* ir,
718 gmx::ImdSession* imdSession,
719 pull_t* pull_work,
720 em_state_t* ems,
721 gmx_localtop_t* top,
722 gmx::MDAtoms* mdAtoms,
723 t_forcerec* fr,
724 gmx_vsite_t* vsite,
725 gmx::Constraints* constr,
726 t_nrnb* nrnb,
727 gmx_wallcycle_t wcycle)
729 /* Repartition the domain decomposition */
730 dd_partition_system(fplog, mdlog, step, cr, FALSE, 1, nullptr, *top_global, ir, imdSession, pull_work,
731 &ems->s, &ems->f, mdAtoms, top, fr, vsite, constr, nrnb, wcycle, FALSE);
732 dd_store_state(cr->dd, &ems->s);
735 namespace
738 /*! \brief Class to handle the work of setting and doing an energy evaluation.
740 * This class is a mere aggregate of parameters to pass to evaluate an
741 * energy, so that future changes to names and types of them consume
742 * less time when refactoring other code.
744 * Aggregate initialization is used, for which the chief risk is that
745 * if a member is added at the end and not all initializer lists are
746 * updated, then the member will be value initialized, which will
747 * typically mean initialization to zero.
749 * Use a braced initializer list to construct one of these. */
750 class EnergyEvaluator
752 public:
753 /*! \brief Evaluates an energy on the state in \c ems.
755 * \todo In practice, the same objects mu_tot, vir, and pres
756 * are always passed to this function, so we would rather have
757 * them as data members. However, their C-array types are
758 * unsuited for aggregate initialization. When the types
759 * improve, the call signature of this method can be reduced.
761 void run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst);
762 //! Handles logging (deprecated).
763 FILE* fplog;
764 //! Handles logging.
765 const gmx::MDLogger& mdlog;
766 //! Handles communication.
767 const t_commrec* cr;
768 //! Coordinates multi-simulations.
769 const gmx_multisim_t* ms;
770 //! Holds the simulation topology.
771 gmx_mtop_t* top_global;
772 //! Holds the domain topology.
773 gmx_localtop_t* top;
774 //! User input options.
775 t_inputrec* inputrec;
776 //! The Interactive Molecular Dynamics session.
777 gmx::ImdSession* imdSession;
778 //! The pull work object.
779 pull_t* pull_work;
780 //! Manages flop accounting.
781 t_nrnb* nrnb;
782 //! Manages wall cycle accounting.
783 gmx_wallcycle_t wcycle;
784 //! Coordinates global reduction.
785 gmx_global_stat_t gstat;
786 //! Handles virtual sites.
787 gmx_vsite_t* vsite;
788 //! Handles constraints.
789 gmx::Constraints* constr;
790 //! Handles strange things.
791 t_fcdata* fcd;
792 //! Molecular graph for SHAKE.
793 t_graph* graph;
794 //! Per-atom data for this domain.
795 gmx::MDAtoms* mdAtoms;
796 //! Handles how to calculate the forces.
797 t_forcerec* fr;
798 //! Schedule of force-calculation work each step for this task.
799 MdrunScheduleWorkload* runScheduleWork;
800 //! Stores the computed energies.
801 gmx_enerdata_t* enerd;
804 void EnergyEvaluator::run(em_state_t* ems, rvec mu_tot, tensor vir, tensor pres, int64_t count, gmx_bool bFirst)
806 real t;
807 gmx_bool bNS;
808 tensor force_vir, shake_vir, ekin;
809 real dvdl_constr;
810 real terminate = 0;
812 /* Set the time to the initial time, the time does not change during EM */
813 t = inputrec->init_t;
815 if (bFirst || (DOMAINDECOMP(cr) && ems->s.ddp_count < cr->dd->ddp_count))
817 /* This is the first state or an old state used before the last ns */
818 bNS = TRUE;
820 else
822 bNS = FALSE;
823 if (inputrec->nstlist > 0)
825 bNS = TRUE;
829 if (vsite)
831 construct_vsites(vsite, ems->s.x.rvec_array(), 1, nullptr, top->idef.iparams, top->idef.il,
832 fr->ePBC, fr->bMolPBC, cr, ems->s.box);
835 if (DOMAINDECOMP(cr) && bNS)
837 /* Repartition the domain decomposition */
838 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession, pull_work,
839 ems, top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
842 /* Calc force & energy on new trial position */
843 /* do_force always puts the charge groups in the box and shifts again
844 * We do not unshift, so molecules are always whole in congrad.c
846 do_force(fplog, cr, ms, inputrec, nullptr, nullptr, imdSession, pull_work, count, nrnb, wcycle,
847 top, ems->s.box, ems->s.x.arrayRefWithPadding(), &ems->s.hist,
848 ems->f.arrayRefWithPadding(), force_vir, mdAtoms->mdatoms(), enerd, fcd, ems->s.lambda,
849 graph, fr, runScheduleWork, vsite, mu_tot, t, nullptr,
850 GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES | GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY
851 | (bNS ? GMX_FORCE_NS : 0),
852 DDBalanceRegionHandler(cr));
854 /* Clear the unused shake virial and pressure */
855 clear_mat(shake_vir);
856 clear_mat(pres);
858 /* Communicate stuff when parallel */
859 if (PAR(cr) && inputrec->eI != eiNM)
861 wallcycle_start(wcycle, ewcMoveE);
863 global_stat(gstat, cr, enerd, force_vir, shake_vir, mu_tot, inputrec, nullptr, nullptr, nullptr,
864 1, &terminate, nullptr, FALSE, CGLO_ENERGY | CGLO_PRESSURE | CGLO_CONSTRAINT);
866 wallcycle_stop(wcycle, ewcMoveE);
869 if (fr->dispersionCorrection)
871 /* Calculate long range corrections to pressure and energy */
872 const DispersionCorrection::Correction correction =
873 fr->dispersionCorrection->calculate(ems->s.box, ems->s.lambda[efptVDW]);
875 enerd->term[F_DISPCORR] = correction.energy;
876 enerd->term[F_EPOT] += correction.energy;
877 enerd->term[F_PRES] += correction.pressure;
878 enerd->term[F_DVDL] += correction.dvdl;
880 else
882 enerd->term[F_DISPCORR] = 0;
885 ems->epot = enerd->term[F_EPOT];
887 if (constr)
889 /* Project out the constraint components of the force */
890 dvdl_constr = 0;
891 rvec* f_rvec = ems->f.rvec_array();
892 constr->apply(FALSE, FALSE, count, 0, 1.0, ems->s.x.rvec_array(), f_rvec, f_rvec,
893 ems->s.box, ems->s.lambda[efptBONDED], &dvdl_constr, nullptr, &shake_vir,
894 gmx::ConstraintVariable::ForceDispl);
895 enerd->term[F_DVDL_CONSTR] += dvdl_constr;
896 m_add(force_vir, shake_vir, vir);
898 else
900 copy_mat(force_vir, vir);
903 clear_mat(ekin);
904 enerd->term[F_PRES] = calc_pres(fr->ePBC, inputrec->nwall, ems->s.box, ekin, vir, pres);
906 sum_dhdl(enerd, ems->s.lambda, *inputrec->fepvals);
908 if (EI_ENERGY_MINIMIZATION(inputrec->eI))
910 get_state_f_norm_max(cr, &(inputrec->opts), mdAtoms->mdatoms(), ems);
914 } // namespace
916 //! Parallel utility summing energies and forces
917 static double reorder_partsum(const t_commrec* cr,
918 t_grpopts* opts,
919 gmx_mtop_t* top_global,
920 em_state_t* s_min,
921 em_state_t* s_b)
923 if (debug)
925 fprintf(debug, "Doing reorder_partsum\n");
928 const rvec* fm = s_min->f.rvec_array();
929 const rvec* fb = s_b->f.rvec_array();
931 /* Collect fm in a global vector fmg.
932 * This conflicts with the spirit of domain decomposition,
933 * but to fully optimize this a much more complicated algorithm is required.
935 const int natoms = top_global->natoms;
936 rvec* fmg;
937 snew(fmg, natoms);
939 gmx::ArrayRef<const int> indicesMin = s_min->s.cg_gl;
940 int i = 0;
941 for (int a : indicesMin)
943 copy_rvec(fm[i], fmg[a]);
944 i++;
946 gmx_sum(top_global->natoms * 3, fmg[0], cr);
948 /* Now we will determine the part of the sum for the cgs in state s_b */
949 gmx::ArrayRef<const int> indicesB = s_b->s.cg_gl;
951 double partsum = 0;
952 i = 0;
953 int gf = 0;
954 gmx::ArrayRef<unsigned char> grpnrFREEZE =
955 top_global->groups.groupNumbers[SimulationAtomGroupType::Freeze];
956 for (int a : indicesB)
958 if (!grpnrFREEZE.empty())
960 gf = grpnrFREEZE[i];
962 for (int m = 0; m < DIM; m++)
964 if (!opts->nFreeze[gf][m])
966 partsum += (fb[i][m] - fmg[a][m]) * fb[i][m];
969 i++;
972 sfree(fmg);
974 return partsum;
977 //! Print some stuff, like beta, whatever that means.
978 static real pr_beta(const t_commrec* cr,
979 t_grpopts* opts,
980 t_mdatoms* mdatoms,
981 gmx_mtop_t* top_global,
982 em_state_t* s_min,
983 em_state_t* s_b)
985 double sum;
987 /* This is just the classical Polak-Ribiere calculation of beta;
988 * it looks a bit complicated since we take freeze groups into account,
989 * and might have to sum it in parallel runs.
992 if (!DOMAINDECOMP(cr)
993 || (s_min->s.ddp_count == cr->dd->ddp_count && s_b->s.ddp_count == cr->dd->ddp_count))
995 const rvec* fm = s_min->f.rvec_array();
996 const rvec* fb = s_b->f.rvec_array();
997 sum = 0;
998 int gf = 0;
999 /* This part of code can be incorrect with DD,
1000 * since the atom ordering in s_b and s_min might differ.
1002 for (int i = 0; i < mdatoms->homenr; i++)
1004 if (mdatoms->cFREEZE)
1006 gf = mdatoms->cFREEZE[i];
1008 for (int m = 0; m < DIM; m++)
1010 if (!opts->nFreeze[gf][m])
1012 sum += (fb[i][m] - fm[i][m]) * fb[i][m];
1017 else
1019 /* We need to reorder cgs while summing */
1020 sum = reorder_partsum(cr, opts, top_global, s_min, s_b);
1022 if (PAR(cr))
1024 gmx_sumd(1, &sum, cr);
1027 return sum / gmx::square(s_min->fnorm);
1030 namespace gmx
1033 void LegacySimulator::do_cg()
1035 const char* CG = "Polak-Ribiere Conjugate Gradients";
1037 gmx_localtop_t top;
1038 gmx_global_stat_t gstat;
1039 t_graph* graph;
1040 double tmp, minstep;
1041 real stepsize;
1042 real a, b, c, beta = 0.0;
1043 real epot_repl = 0;
1044 real pnorm;
1045 gmx_bool converged, foundlower;
1046 rvec mu_tot = { 0 };
1047 gmx_bool do_log = FALSE, do_ene = FALSE, do_x, do_f;
1048 tensor vir, pres;
1049 int number_steps, neval = 0, nstcg = inputrec->nstcgsteep;
1050 int m, step, nminstep;
1051 auto mdatoms = mdAtoms->mdatoms();
1053 GMX_LOG(mdlog.info)
1054 .asParagraph()
1055 .appendText(
1056 "Note that activating conjugate gradient energy minimization via the "
1057 "integrator .mdp option and the command gmx mdrun may "
1058 "be available in a different form in a future version of GROMACS, "
1059 "e.g. gmx minimize and an .mdp option.");
1061 step = 0;
1063 if (MASTER(cr))
1065 // In CG, the state is extended with a search direction
1066 state_global->flags |= (1 << estCGP);
1068 // Ensure the extra per-atom state array gets allocated
1069 state_change_natoms(state_global, state_global->natoms);
1071 // Initialize the search direction to zero
1072 for (RVec& cg_p : state_global->cg_p)
1074 cg_p = { 0, 0, 0 };
1078 /* Create 4 states on the stack and extract pointers that we will swap */
1079 em_state_t s0{}, s1{}, s2{}, s3{};
1080 em_state_t* s_min = &s0;
1081 em_state_t* s_a = &s1;
1082 em_state_t* s_b = &s2;
1083 em_state_t* s_c = &s3;
1085 /* Init em and store the local state in s_min */
1086 init_em(fplog, mdlog, CG, cr, inputrec, imdSession, pull_work, state_global, top_global, s_min,
1087 &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, nullptr);
1088 gmx_mdoutf* outf =
1089 init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
1090 inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
1091 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work, nullptr,
1092 false, StartingBehavior::NewSimulation, mdModulesNotifier);
1094 /* Print to log file */
1095 print_em_start(fplog, cr, walltime_accounting, wcycle, CG);
1097 /* Max number of steps */
1098 number_steps = inputrec->nsteps;
1100 if (MASTER(cr))
1102 sp_header(stderr, CG, inputrec->em_tol, number_steps);
1104 if (fplog)
1106 sp_header(fplog, CG, inputrec->em_tol, number_steps);
1109 EnergyEvaluator energyEvaluator{
1110 fplog, mdlog, cr, ms, top_global, &top, inputrec,
1111 imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
1112 fcd, graph, mdAtoms, fr, runScheduleWork, enerd
1114 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1115 /* do_force always puts the charge groups in the box and shifts again
1116 * We do not unshift, so molecules are always whole in congrad.c
1118 energyEvaluator.run(s_min, mu_tot, vir, pres, -1, TRUE);
1120 if (MASTER(cr))
1122 /* Copy stuff to the energy bin for easy printing etc. */
1123 matrix nullBox = {};
1124 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1125 enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
1126 nullptr, vir, pres, nullptr, mu_tot, constr);
1128 energyOutput.printHeader(fplog, step, step);
1129 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
1130 step, fcd, nullptr);
1133 /* Estimate/guess the initial stepsize */
1134 stepsize = inputrec->em_stepsize / s_min->fnorm;
1136 if (MASTER(cr))
1138 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1139 fprintf(stderr, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1140 fprintf(stderr, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1141 fprintf(stderr, "\n");
1142 /* and copy to the log file too... */
1143 fprintf(fplog, " F-max = %12.5e on atom %d\n", s_min->fmax, s_min->a_fmax + 1);
1144 fprintf(fplog, " F-Norm = %12.5e\n", s_min->fnorm / sqrtNumAtoms);
1145 fprintf(fplog, "\n");
1147 /* Start the loop over CG steps.
1148 * Each successful step is counted, and we continue until
1149 * we either converge or reach the max number of steps.
1151 converged = FALSE;
1152 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1155 /* start taking steps in a new direction
1156 * First time we enter the routine, beta=0, and the direction is
1157 * simply the negative gradient.
1160 /* Calculate the new direction in p, and the gradient in this direction, gpa */
1161 rvec* pm = s_min->s.cg_p.rvec_array();
1162 const rvec* sfm = s_min->f.rvec_array();
1163 double gpa = 0;
1164 int gf = 0;
1165 for (int i = 0; i < mdatoms->homenr; i++)
1167 if (mdatoms->cFREEZE)
1169 gf = mdatoms->cFREEZE[i];
1171 for (m = 0; m < DIM; m++)
1173 if (!inputrec->opts.nFreeze[gf][m])
1175 pm[i][m] = sfm[i][m] + beta * pm[i][m];
1176 gpa -= pm[i][m] * sfm[i][m];
1177 /* f is negative gradient, thus the sign */
1179 else
1181 pm[i][m] = 0;
1186 /* Sum the gradient along the line across CPUs */
1187 if (PAR(cr))
1189 gmx_sumd(1, &gpa, cr);
1192 /* Calculate the norm of the search vector */
1193 get_f_norm_max(cr, &(inputrec->opts), mdatoms, pm, &pnorm, nullptr, nullptr);
1195 /* Just in case stepsize reaches zero due to numerical precision... */
1196 if (stepsize <= 0)
1198 stepsize = inputrec->em_stepsize / pnorm;
1202 * Double check the value of the derivative in the search direction.
1203 * If it is positive it must be due to the old information in the
1204 * CG formula, so just remove that and start over with beta=0.
1205 * This corresponds to a steepest descent step.
1207 if (gpa > 0)
1209 beta = 0;
1210 step--; /* Don't count this step since we are restarting */
1211 continue; /* Go back to the beginning of the big for-loop */
1214 /* Calculate minimum allowed stepsize, before the average (norm)
1215 * relative change in coordinate is smaller than precision
1217 minstep = 0;
1218 auto s_min_x = makeArrayRef(s_min->s.x);
1219 for (int i = 0; i < mdatoms->homenr; i++)
1221 for (m = 0; m < DIM; m++)
1223 tmp = fabs(s_min_x[i][m]);
1224 if (tmp < 1.0)
1226 tmp = 1.0;
1228 tmp = pm[i][m] / tmp;
1229 minstep += tmp * tmp;
1232 /* Add up from all CPUs */
1233 if (PAR(cr))
1235 gmx_sumd(1, &minstep, cr);
1238 minstep = GMX_REAL_EPS / sqrt(minstep / (3 * top_global->natoms));
1240 if (stepsize < minstep)
1242 converged = TRUE;
1243 break;
1246 /* Write coordinates if necessary */
1247 do_x = do_per_step(step, inputrec->nstxout);
1248 do_f = do_per_step(step, inputrec->nstfout);
1250 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, step, s_min,
1251 state_global, observablesHistory);
1253 /* Take a step downhill.
1254 * In theory, we should minimize the function along this direction.
1255 * That is quite possible, but it turns out to take 5-10 function evaluations
1256 * for each line. However, we dont really need to find the exact minimum -
1257 * it is much better to start a new CG step in a modified direction as soon
1258 * as we are close to it. This will save a lot of energy evaluations.
1260 * In practice, we just try to take a single step.
1261 * If it worked (i.e. lowered the energy), we increase the stepsize but
1262 * the continue straight to the next CG step without trying to find any minimum.
1263 * If it didn't work (higher energy), there must be a minimum somewhere between
1264 * the old position and the new one.
1266 * Due to the finite numerical accuracy, it turns out that it is a good idea
1267 * to even accept a SMALL increase in energy, if the derivative is still downhill.
1268 * This leads to lower final energies in the tests I've done. / Erik
1270 s_a->epot = s_min->epot;
1271 a = 0.0;
1272 c = a + stepsize; /* reference position along line is zero */
1274 if (DOMAINDECOMP(cr) && s_min->s.ddp_count < cr->dd->ddp_count)
1276 em_dd_partition_system(fplog, mdlog, step, cr, top_global, inputrec, imdSession,
1277 pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
1280 /* Take a trial step (new coords in s_c) */
1281 do_em_step(cr, inputrec, mdatoms, s_min, c, &s_min->s.cg_p, s_c, constr, -1);
1283 neval++;
1284 /* Calculate energy for the trial step */
1285 energyEvaluator.run(s_c, mu_tot, vir, pres, -1, FALSE);
1287 /* Calc derivative along line */
1288 const rvec* pc = s_c->s.cg_p.rvec_array();
1289 const rvec* sfc = s_c->f.rvec_array();
1290 double gpc = 0;
1291 for (int i = 0; i < mdatoms->homenr; i++)
1293 for (m = 0; m < DIM; m++)
1295 gpc -= pc[i][m] * sfc[i][m]; /* f is negative gradient, thus the sign */
1298 /* Sum the gradient along the line across CPUs */
1299 if (PAR(cr))
1301 gmx_sumd(1, &gpc, cr);
1304 /* This is the max amount of increase in energy we tolerate */
1305 tmp = std::sqrt(GMX_REAL_EPS) * fabs(s_a->epot);
1307 /* Accept the step if the energy is lower, or if it is not significantly higher
1308 * and the line derivative is still negative.
1310 if (s_c->epot < s_a->epot || (gpc < 0 && s_c->epot < (s_a->epot + tmp)))
1312 foundlower = TRUE;
1313 /* Great, we found a better energy. Increase step for next iteration
1314 * if we are still going down, decrease it otherwise
1316 if (gpc < 0)
1318 stepsize *= 1.618034; /* The golden section */
1320 else
1322 stepsize *= 0.618034; /* 1/golden section */
1325 else
1327 /* New energy is the same or higher. We will have to do some work
1328 * to find a smaller value in the interval. Take smaller step next time!
1330 foundlower = FALSE;
1331 stepsize *= 0.618034;
1335 /* OK, if we didn't find a lower value we will have to locate one now - there must
1336 * be one in the interval [a=0,c].
1337 * The same thing is valid here, though: Don't spend dozens of iterations to find
1338 * the line minimum. We try to interpolate based on the derivative at the endpoints,
1339 * and only continue until we find a lower value. In most cases this means 1-2 iterations.
1341 * I also have a safeguard for potentially really pathological functions so we never
1342 * take more than 20 steps before we give up ...
1344 * If we already found a lower value we just skip this step and continue to the update.
1346 double gpb;
1347 if (!foundlower)
1349 nminstep = 0;
1353 /* Select a new trial point.
1354 * If the derivatives at points a & c have different sign we interpolate to zero,
1355 * otherwise just do a bisection.
1357 if (gpa < 0 && gpc > 0)
1359 b = a + gpa * (a - c) / (gpc - gpa);
1361 else
1363 b = 0.5 * (a + c);
1366 /* safeguard if interpolation close to machine accuracy causes errors:
1367 * never go outside the interval
1369 if (b <= a || b >= c)
1371 b = 0.5 * (a + c);
1374 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
1376 /* Reload the old state */
1377 em_dd_partition_system(fplog, mdlog, -1, cr, top_global, inputrec, imdSession, pull_work,
1378 s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
1381 /* Take a trial step to this new point - new coords in s_b */
1382 do_em_step(cr, inputrec, mdatoms, s_min, b, &s_min->s.cg_p, s_b, constr, -1);
1384 neval++;
1385 /* Calculate energy for the trial step */
1386 energyEvaluator.run(s_b, mu_tot, vir, pres, -1, FALSE);
1388 /* p does not change within a step, but since the domain decomposition
1389 * might change, we have to use cg_p of s_b here.
1391 const rvec* pb = s_b->s.cg_p.rvec_array();
1392 const rvec* sfb = s_b->f.rvec_array();
1393 gpb = 0;
1394 for (int i = 0; i < mdatoms->homenr; i++)
1396 for (m = 0; m < DIM; m++)
1398 gpb -= pb[i][m] * sfb[i][m]; /* f is negative gradient, thus the sign */
1401 /* Sum the gradient along the line across CPUs */
1402 if (PAR(cr))
1404 gmx_sumd(1, &gpb, cr);
1407 if (debug)
1409 fprintf(debug, "CGE: EpotA %f EpotB %f EpotC %f gpb %f\n", s_a->epot, s_b->epot,
1410 s_c->epot, gpb);
1413 epot_repl = s_b->epot;
1415 /* Keep one of the intervals based on the value of the derivative at the new point */
1416 if (gpb > 0)
1418 /* Replace c endpoint with b */
1419 swap_em_state(&s_b, &s_c);
1420 c = b;
1421 gpc = gpb;
1423 else
1425 /* Replace a endpoint with b */
1426 swap_em_state(&s_b, &s_a);
1427 a = b;
1428 gpa = gpb;
1432 * Stop search as soon as we find a value smaller than the endpoints.
1433 * Never run more than 20 steps, no matter what.
1435 nminstep++;
1436 } while ((epot_repl > s_a->epot || epot_repl > s_c->epot) && (nminstep < 20));
1438 if (std::fabs(epot_repl - s_min->epot) < fabs(s_min->epot) * GMX_REAL_EPS || nminstep >= 20)
1440 /* OK. We couldn't find a significantly lower energy.
1441 * If beta==0 this was steepest descent, and then we give up.
1442 * If not, set beta=0 and restart with steepest descent before quitting.
1444 if (beta == 0.0)
1446 /* Converged */
1447 converged = TRUE;
1448 break;
1450 else
1452 /* Reset memory before giving up */
1453 beta = 0.0;
1454 continue;
1458 /* Select min energy state of A & C, put the best in B.
1460 if (s_c->epot < s_a->epot)
1462 if (debug)
1464 fprintf(debug, "CGE: C (%f) is lower than A (%f), moving C to B\n", s_c->epot,
1465 s_a->epot);
1467 swap_em_state(&s_b, &s_c);
1468 gpb = gpc;
1470 else
1472 if (debug)
1474 fprintf(debug, "CGE: A (%f) is lower than C (%f), moving A to B\n", s_a->epot,
1475 s_c->epot);
1477 swap_em_state(&s_b, &s_a);
1478 gpb = gpa;
1481 else
1483 if (debug)
1485 fprintf(debug, "CGE: Found a lower energy %f, moving C to B\n", s_c->epot);
1487 swap_em_state(&s_b, &s_c);
1488 gpb = gpc;
1491 /* new search direction */
1492 /* beta = 0 means forget all memory and restart with steepest descents. */
1493 if (nstcg && ((step % nstcg) == 0))
1495 beta = 0.0;
1497 else
1499 /* s_min->fnorm cannot be zero, because then we would have converged
1500 * and broken out.
1503 /* Polak-Ribiere update.
1504 * Change to fnorm2/fnorm2_old for Fletcher-Reeves
1506 beta = pr_beta(cr, &inputrec->opts, mdatoms, top_global, s_min, s_b);
1508 /* Limit beta to prevent oscillations */
1509 if (fabs(beta) > 5.0)
1511 beta = 0.0;
1515 /* update positions */
1516 swap_em_state(&s_min, &s_b);
1517 gpa = gpb;
1519 /* Print it if necessary */
1520 if (MASTER(cr))
1522 if (mdrunOptions.verbose)
1524 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1525 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
1526 s_min->epot, s_min->fnorm / sqrtNumAtoms, s_min->fmax, s_min->a_fmax + 1);
1527 fflush(stderr);
1529 /* Store the new (lower) energies */
1530 matrix nullBox = {};
1531 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1532 enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
1533 nullptr, vir, pres, nullptr, mu_tot, constr);
1535 do_log = do_per_step(step, inputrec->nstlog);
1536 do_ene = do_per_step(step, inputrec->nstenergy);
1538 imdSession->fillEnergyRecord(step, TRUE);
1540 if (do_log)
1542 energyOutput.printHeader(fplog, step, step);
1544 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
1545 do_log ? fplog : nullptr, step, step, fcd, nullptr);
1548 /* Send energies and positions to the IMD client if bIMD is TRUE. */
1549 if (MASTER(cr) && imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0))
1551 imdSession->sendPositionsAndEnergies();
1554 /* Stop when the maximum force lies below tolerance.
1555 * If we have reached machine precision, converged is already set to true.
1557 converged = converged || (s_min->fmax < inputrec->em_tol);
1559 } /* End of the loop */
1561 if (converged)
1563 step--; /* we never took that last step in this case */
1565 if (s_min->fmax > inputrec->em_tol)
1567 if (MASTER(cr))
1569 warn_step(fplog, inputrec->em_tol, s_min->fmax, step - 1 == number_steps, FALSE);
1571 converged = FALSE;
1574 if (MASTER(cr))
1576 /* If we printed energy and/or logfile last step (which was the last step)
1577 * we don't have to do it again, but otherwise print the final values.
1579 if (!do_log)
1581 /* Write final value to log since we didn't do anything the last step */
1582 energyOutput.printHeader(fplog, step, step);
1584 if (!do_ene || !do_log)
1586 /* Write final energy file entries */
1587 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
1588 !do_log ? fplog : nullptr, step, step, fcd, nullptr);
1592 /* Print some stuff... */
1593 if (MASTER(cr))
1595 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
1598 /* IMPORTANT!
1599 * For accurate normal mode calculation it is imperative that we
1600 * store the last conformation into the full precision binary trajectory.
1602 * However, we should only do it if we did NOT already write this step
1603 * above (which we did if do_x or do_f was true).
1605 /* Note that with 0 < nstfout != nstxout we can end up with two frames
1606 * in the trajectory with the same step number.
1608 do_x = !do_per_step(step, inputrec->nstxout);
1609 do_f = (inputrec->nstfout > 0 && !do_per_step(step, inputrec->nstfout));
1611 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
1612 step, s_min, state_global, observablesHistory);
1615 if (MASTER(cr))
1617 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1618 print_converged(stderr, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1619 print_converged(fplog, CG, inputrec->em_tol, step, converged, number_steps, s_min, sqrtNumAtoms);
1621 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
1624 finish_em(cr, outf, walltime_accounting, wcycle);
1626 /* To print the actual number of steps we needed somewhere */
1627 walltime_accounting_set_nsteps_done(walltime_accounting, step);
1631 void LegacySimulator::do_lbfgs()
1633 static const char* LBFGS = "Low-Memory BFGS Minimizer";
1634 em_state_t ems;
1635 gmx_localtop_t top;
1636 gmx_global_stat_t gstat;
1637 t_graph* graph;
1638 int ncorr, nmaxcorr, point, cp, neval, nminstep;
1639 double stepsize, step_taken, gpa, gpb, gpc, tmp, minstep;
1640 real * rho, *alpha, *p, *s, **dx, **dg;
1641 real a, b, c, maxdelta, delta;
1642 real diag, Epot0;
1643 real dgdx, dgdg, sq, yr, beta;
1644 gmx_bool converged;
1645 rvec mu_tot = { 0 };
1646 gmx_bool do_log, do_ene, do_x, do_f, foundlower, *frozen;
1647 tensor vir, pres;
1648 int start, end, number_steps;
1649 int i, k, m, n, gf, step;
1650 int mdof_flags;
1651 auto mdatoms = mdAtoms->mdatoms();
1653 GMX_LOG(mdlog.info)
1654 .asParagraph()
1655 .appendText(
1656 "Note that activating L-BFGS energy minimization via the "
1657 "integrator .mdp option and the command gmx mdrun may "
1658 "be available in a different form in a future version of GROMACS, "
1659 "e.g. gmx minimize and an .mdp option.");
1661 if (PAR(cr))
1663 gmx_fatal(FARGS, "L-BFGS minimization only supports a single rank");
1666 if (nullptr != constr)
1668 gmx_fatal(
1669 FARGS,
1670 "The combination of constraints and L-BFGS minimization is not implemented. Either "
1671 "do not use constraints, or use another minimizer (e.g. steepest descent).");
1674 n = 3 * state_global->natoms;
1675 nmaxcorr = inputrec->nbfgscorr;
1677 snew(frozen, n);
1679 snew(p, n);
1680 snew(rho, nmaxcorr);
1681 snew(alpha, nmaxcorr);
1683 snew(dx, nmaxcorr);
1684 for (i = 0; i < nmaxcorr; i++)
1686 snew(dx[i], n);
1689 snew(dg, nmaxcorr);
1690 for (i = 0; i < nmaxcorr; i++)
1692 snew(dg[i], n);
1695 step = 0;
1696 neval = 0;
1698 /* Init em */
1699 init_em(fplog, mdlog, LBFGS, cr, inputrec, imdSession, pull_work, state_global, top_global,
1700 &ems, &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, nullptr);
1701 gmx_mdoutf* outf =
1702 init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
1703 inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
1704 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work, nullptr,
1705 false, StartingBehavior::NewSimulation, mdModulesNotifier);
1707 start = 0;
1708 end = mdatoms->homenr;
1710 /* We need 4 working states */
1711 em_state_t s0{}, s1{}, s2{}, s3{};
1712 em_state_t* sa = &s0;
1713 em_state_t* sb = &s1;
1714 em_state_t* sc = &s2;
1715 em_state_t* last = &s3;
1716 /* Initialize by copying the state from ems (we could skip x and f here) */
1717 *sa = ems;
1718 *sb = ems;
1719 *sc = ems;
1721 /* Print to log file */
1722 print_em_start(fplog, cr, walltime_accounting, wcycle, LBFGS);
1724 do_log = do_ene = do_x = do_f = TRUE;
1726 /* Max number of steps */
1727 number_steps = inputrec->nsteps;
1729 /* Create a 3*natoms index to tell whether each degree of freedom is frozen */
1730 gf = 0;
1731 for (i = start; i < end; i++)
1733 if (mdatoms->cFREEZE)
1735 gf = mdatoms->cFREEZE[i];
1737 for (m = 0; m < DIM; m++)
1739 frozen[3 * i + m] = (inputrec->opts.nFreeze[gf][m] != 0);
1742 if (MASTER(cr))
1744 sp_header(stderr, LBFGS, inputrec->em_tol, number_steps);
1746 if (fplog)
1748 sp_header(fplog, LBFGS, inputrec->em_tol, number_steps);
1751 if (vsite)
1753 construct_vsites(vsite, state_global->x.rvec_array(), 1, nullptr, top.idef.iparams,
1754 top.idef.il, fr->ePBC, fr->bMolPBC, cr, state_global->box);
1757 /* Call the force routine and some auxiliary (neighboursearching etc.) */
1758 /* do_force always puts the charge groups in the box and shifts again
1759 * We do not unshift, so molecules are always whole
1761 neval++;
1762 EnergyEvaluator energyEvaluator{
1763 fplog, mdlog, cr, ms, top_global, &top, inputrec,
1764 imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
1765 fcd, graph, mdAtoms, fr, runScheduleWork, enerd
1767 energyEvaluator.run(&ems, mu_tot, vir, pres, -1, TRUE);
1769 if (MASTER(cr))
1771 /* Copy stuff to the energy bin for easy printing etc. */
1772 matrix nullBox = {};
1773 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
1774 enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
1775 nullptr, vir, pres, nullptr, mu_tot, constr);
1777 energyOutput.printHeader(fplog, step, step);
1778 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, FALSE, FALSE, fplog, step,
1779 step, fcd, nullptr);
1782 /* Set the initial step.
1783 * since it will be multiplied by the non-normalized search direction
1784 * vector (force vector the first time), we scale it by the
1785 * norm of the force.
1788 if (MASTER(cr))
1790 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
1791 fprintf(stderr, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1792 fprintf(stderr, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1793 fprintf(stderr, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
1794 fprintf(stderr, "\n");
1795 /* and copy to the log file too... */
1796 fprintf(fplog, "Using %d BFGS correction steps.\n\n", nmaxcorr);
1797 fprintf(fplog, " F-max = %12.5e on atom %d\n", ems.fmax, ems.a_fmax + 1);
1798 fprintf(fplog, " F-Norm = %12.5e\n", ems.fnorm / sqrtNumAtoms);
1799 fprintf(fplog, "\n");
1802 // Point is an index to the memory of search directions, where 0 is the first one.
1803 point = 0;
1805 // Set initial search direction to the force (-gradient), or 0 for frozen particles.
1806 real* fInit = static_cast<real*>(ems.f.rvec_array()[0]);
1807 for (i = 0; i < n; i++)
1809 if (!frozen[i])
1811 dx[point][i] = fInit[i]; /* Initial search direction */
1813 else
1815 dx[point][i] = 0;
1819 // Stepsize will be modified during the search, and actually it is not critical
1820 // (the main efficiency in the algorithm comes from changing directions), but
1821 // we still need an initial value, so estimate it as the inverse of the norm
1822 // so we take small steps where the potential fluctuates a lot.
1823 stepsize = 1.0 / ems.fnorm;
1825 /* Start the loop over BFGS steps.
1826 * Each successful step is counted, and we continue until
1827 * we either converge or reach the max number of steps.
1830 ncorr = 0;
1832 /* Set the gradient from the force */
1833 converged = FALSE;
1834 for (step = 0; (number_steps < 0 || step <= number_steps) && !converged; step++)
1837 /* Write coordinates if necessary */
1838 do_x = do_per_step(step, inputrec->nstxout);
1839 do_f = do_per_step(step, inputrec->nstfout);
1841 mdof_flags = 0;
1842 if (do_x)
1844 mdof_flags |= MDOF_X;
1847 if (do_f)
1849 mdof_flags |= MDOF_F;
1852 if (inputrec->bIMD)
1854 mdof_flags |= MDOF_IMD;
1857 mdoutf_write_to_trajectory_files(fplog, cr, outf, mdof_flags, top_global->natoms, step,
1858 static_cast<real>(step), &ems.s, state_global,
1859 observablesHistory, ems.f);
1861 /* Do the linesearching in the direction dx[point][0..(n-1)] */
1863 /* make s a pointer to current search direction - point=0 first time we get here */
1864 s = dx[point];
1866 real* xx = static_cast<real*>(ems.s.x.rvec_array()[0]);
1867 real* ff = static_cast<real*>(ems.f.rvec_array()[0]);
1869 // calculate line gradient in position A
1870 for (gpa = 0, i = 0; i < n; i++)
1872 gpa -= s[i] * ff[i];
1875 /* Calculate minimum allowed stepsize along the line, before the average (norm)
1876 * relative change in coordinate is smaller than precision
1878 for (minstep = 0, i = 0; i < n; i++)
1880 tmp = fabs(xx[i]);
1881 if (tmp < 1.0)
1883 tmp = 1.0;
1885 tmp = s[i] / tmp;
1886 minstep += tmp * tmp;
1888 minstep = GMX_REAL_EPS / sqrt(minstep / n);
1890 if (stepsize < minstep)
1892 converged = TRUE;
1893 break;
1896 // Before taking any steps along the line, store the old position
1897 *last = ems;
1898 real* lastx = static_cast<real*>(last->s.x.data()[0]);
1899 real* lastf = static_cast<real*>(last->f.data()[0]);
1900 Epot0 = ems.epot;
1902 *sa = ems;
1904 /* Take a step downhill.
1905 * In theory, we should find the actual minimum of the function in this
1906 * direction, somewhere along the line.
1907 * That is quite possible, but it turns out to take 5-10 function evaluations
1908 * for each line. However, we dont really need to find the exact minimum -
1909 * it is much better to start a new BFGS step in a modified direction as soon
1910 * as we are close to it. This will save a lot of energy evaluations.
1912 * In practice, we just try to take a single step.
1913 * If it worked (i.e. lowered the energy), we increase the stepsize but
1914 * continue straight to the next BFGS step without trying to find any minimum,
1915 * i.e. we change the search direction too. If the line was smooth, it is
1916 * likely we are in a smooth region, and then it makes sense to take longer
1917 * steps in the modified search direction too.
1919 * If it didn't work (higher energy), there must be a minimum somewhere between
1920 * the old position and the new one. Then we need to start by finding a lower
1921 * value before we change search direction. Since the energy was apparently
1922 * quite rough, we need to decrease the step size.
1924 * Due to the finite numerical accuracy, it turns out that it is a good idea
1925 * to accept a SMALL increase in energy, if the derivative is still downhill.
1926 * This leads to lower final energies in the tests I've done. / Erik
1929 // State "A" is the first position along the line.
1930 // reference position along line is initially zero
1931 a = 0.0;
1933 // Check stepsize first. We do not allow displacements
1934 // larger than emstep.
1938 // Pick a new position C by adding stepsize to A.
1939 c = a + stepsize;
1941 // Calculate what the largest change in any individual coordinate
1942 // would be (translation along line * gradient along line)
1943 maxdelta = 0;
1944 for (i = 0; i < n; i++)
1946 delta = c * s[i];
1947 if (delta > maxdelta)
1949 maxdelta = delta;
1952 // If any displacement is larger than the stepsize limit, reduce the step
1953 if (maxdelta > inputrec->em_stepsize)
1955 stepsize *= 0.1;
1957 } while (maxdelta > inputrec->em_stepsize);
1959 // Take a trial step and move the coordinate array xc[] to position C
1960 real* xc = static_cast<real*>(sc->s.x.rvec_array()[0]);
1961 for (i = 0; i < n; i++)
1963 xc[i] = lastx[i] + c * s[i];
1966 neval++;
1967 // Calculate energy for the trial step in position C
1968 energyEvaluator.run(sc, mu_tot, vir, pres, step, FALSE);
1970 // Calc line gradient in position C
1971 real* fc = static_cast<real*>(sc->f.rvec_array()[0]);
1972 for (gpc = 0, i = 0; i < n; i++)
1974 gpc -= s[i] * fc[i]; /* f is negative gradient, thus the sign */
1976 /* Sum the gradient along the line across CPUs */
1977 if (PAR(cr))
1979 gmx_sumd(1, &gpc, cr);
1982 // This is the max amount of increase in energy we tolerate.
1983 // By allowing VERY small changes (close to numerical precision) we
1984 // frequently find even better (lower) final energies.
1985 tmp = std::sqrt(GMX_REAL_EPS) * fabs(sa->epot);
1987 // Accept the step if the energy is lower in the new position C (compared to A),
1988 // or if it is not significantly higher and the line derivative is still negative.
1989 foundlower = sc->epot < sa->epot || (gpc < 0 && sc->epot < (sa->epot + tmp));
1990 // If true, great, we found a better energy. We no longer try to alter the
1991 // stepsize, but simply accept this new better position. The we select a new
1992 // search direction instead, which will be much more efficient than continuing
1993 // to take smaller steps along a line. Set fnorm based on the new C position,
1994 // which will be used to update the stepsize to 1/fnorm further down.
1996 // If false, the energy is NOT lower in point C, i.e. it will be the same
1997 // or higher than in point A. In this case it is pointless to move to point C,
1998 // so we will have to do more iterations along the same line to find a smaller
1999 // value in the interval [A=0.0,C].
2000 // Here, A is still 0.0, but that will change when we do a search in the interval
2001 // [0.0,C] below. That search we will do by interpolation or bisection rather
2002 // than with the stepsize, so no need to modify it. For the next search direction
2003 // it will be reset to 1/fnorm anyway.
2005 if (!foundlower)
2007 // OK, if we didn't find a lower value we will have to locate one now - there must
2008 // be one in the interval [a,c].
2009 // The same thing is valid here, though: Don't spend dozens of iterations to find
2010 // the line minimum. We try to interpolate based on the derivative at the endpoints,
2011 // and only continue until we find a lower value. In most cases this means 1-2 iterations.
2012 // I also have a safeguard for potentially really pathological functions so we never
2013 // take more than 20 steps before we give up.
2014 // If we already found a lower value we just skip this step and continue to the update.
2015 real fnorm = 0;
2016 nminstep = 0;
2019 // Select a new trial point B in the interval [A,C].
2020 // If the derivatives at points a & c have different sign we interpolate to zero,
2021 // otherwise just do a bisection since there might be multiple minima/maxima
2022 // inside the interval.
2023 if (gpa < 0 && gpc > 0)
2025 b = a + gpa * (a - c) / (gpc - gpa);
2027 else
2029 b = 0.5 * (a + c);
2032 /* safeguard if interpolation close to machine accuracy causes errors:
2033 * never go outside the interval
2035 if (b <= a || b >= c)
2037 b = 0.5 * (a + c);
2040 // Take a trial step to point B
2041 real* xb = static_cast<real*>(sb->s.x.rvec_array()[0]);
2042 for (i = 0; i < n; i++)
2044 xb[i] = lastx[i] + b * s[i];
2047 neval++;
2048 // Calculate energy for the trial step in point B
2049 energyEvaluator.run(sb, mu_tot, vir, pres, step, FALSE);
2050 fnorm = sb->fnorm;
2052 // Calculate gradient in point B
2053 real* fb = static_cast<real*>(sb->f.rvec_array()[0]);
2054 for (gpb = 0, i = 0; i < n; i++)
2056 gpb -= s[i] * fb[i]; /* f is negative gradient, thus the sign */
2058 /* Sum the gradient along the line across CPUs */
2059 if (PAR(cr))
2061 gmx_sumd(1, &gpb, cr);
2064 // Keep one of the intervals [A,B] or [B,C] based on the value of the derivative
2065 // at the new point B, and rename the endpoints of this new interval A and C.
2066 if (gpb > 0)
2068 /* Replace c endpoint with b */
2069 c = b;
2070 /* copy state b to c */
2071 *sc = *sb;
2073 else
2075 /* Replace a endpoint with b */
2076 a = b;
2077 /* copy state b to a */
2078 *sa = *sb;
2082 * Stop search as soon as we find a value smaller than the endpoints,
2083 * or if the tolerance is below machine precision.
2084 * Never run more than 20 steps, no matter what.
2086 nminstep++;
2087 } while ((sb->epot > sa->epot || sb->epot > sc->epot) && (nminstep < 20));
2089 if (std::fabs(sb->epot - Epot0) < GMX_REAL_EPS || nminstep >= 20)
2091 /* OK. We couldn't find a significantly lower energy.
2092 * If ncorr==0 this was steepest descent, and then we give up.
2093 * If not, reset memory to restart as steepest descent before quitting.
2095 if (ncorr == 0)
2097 /* Converged */
2098 converged = TRUE;
2099 break;
2101 else
2103 /* Reset memory */
2104 ncorr = 0;
2105 /* Search in gradient direction */
2106 for (i = 0; i < n; i++)
2108 dx[point][i] = ff[i];
2110 /* Reset stepsize */
2111 stepsize = 1.0 / fnorm;
2112 continue;
2116 /* Select min energy state of A & C, put the best in xx/ff/Epot
2118 if (sc->epot < sa->epot)
2120 /* Use state C */
2121 ems = *sc;
2122 step_taken = c;
2124 else
2126 /* Use state A */
2127 ems = *sa;
2128 step_taken = a;
2131 else
2133 /* found lower */
2134 /* Use state C */
2135 ems = *sc;
2136 step_taken = c;
2139 /* Update the memory information, and calculate a new
2140 * approximation of the inverse hessian
2143 /* Have new data in Epot, xx, ff */
2144 if (ncorr < nmaxcorr)
2146 ncorr++;
2149 for (i = 0; i < n; i++)
2151 dg[point][i] = lastf[i] - ff[i];
2152 dx[point][i] *= step_taken;
2155 dgdg = 0;
2156 dgdx = 0;
2157 for (i = 0; i < n; i++)
2159 dgdg += dg[point][i] * dg[point][i];
2160 dgdx += dg[point][i] * dx[point][i];
2163 diag = dgdx / dgdg;
2165 rho[point] = 1.0 / dgdx;
2166 point++;
2168 if (point >= nmaxcorr)
2170 point = 0;
2173 /* Update */
2174 for (i = 0; i < n; i++)
2176 p[i] = ff[i];
2179 cp = point;
2181 /* Recursive update. First go back over the memory points */
2182 for (k = 0; k < ncorr; k++)
2184 cp--;
2185 if (cp < 0)
2187 cp = ncorr - 1;
2190 sq = 0;
2191 for (i = 0; i < n; i++)
2193 sq += dx[cp][i] * p[i];
2196 alpha[cp] = rho[cp] * sq;
2198 for (i = 0; i < n; i++)
2200 p[i] -= alpha[cp] * dg[cp][i];
2204 for (i = 0; i < n; i++)
2206 p[i] *= diag;
2209 /* And then go forward again */
2210 for (k = 0; k < ncorr; k++)
2212 yr = 0;
2213 for (i = 0; i < n; i++)
2215 yr += p[i] * dg[cp][i];
2218 beta = rho[cp] * yr;
2219 beta = alpha[cp] - beta;
2221 for (i = 0; i < n; i++)
2223 p[i] += beta * dx[cp][i];
2226 cp++;
2227 if (cp >= ncorr)
2229 cp = 0;
2233 for (i = 0; i < n; i++)
2235 if (!frozen[i])
2237 dx[point][i] = p[i];
2239 else
2241 dx[point][i] = 0;
2245 /* Print it if necessary */
2246 if (MASTER(cr))
2248 if (mdrunOptions.verbose)
2250 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2251 fprintf(stderr, "\rStep %d, Epot=%12.6e, Fnorm=%9.3e, Fmax=%9.3e (atom %d)\n", step,
2252 ems.epot, ems.fnorm / sqrtNumAtoms, ems.fmax, ems.a_fmax + 1);
2253 fflush(stderr);
2255 /* Store the new (lower) energies */
2256 matrix nullBox = {};
2257 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(step), mdatoms->tmass,
2258 enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
2259 nullptr, vir, pres, nullptr, mu_tot, constr);
2261 do_log = do_per_step(step, inputrec->nstlog);
2262 do_ene = do_per_step(step, inputrec->nstenergy);
2264 imdSession->fillEnergyRecord(step, TRUE);
2266 if (do_log)
2268 energyOutput.printHeader(fplog, step, step);
2270 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), do_ene, FALSE, FALSE,
2271 do_log ? fplog : nullptr, step, step, fcd, nullptr);
2274 /* Send x and E to IMD client, if bIMD is TRUE. */
2275 if (imdSession->run(step, TRUE, state_global->box, state_global->x.rvec_array(), 0) && MASTER(cr))
2277 imdSession->sendPositionsAndEnergies();
2280 // Reset stepsize in we are doing more iterations
2281 stepsize = 1.0;
2283 /* Stop when the maximum force lies below tolerance.
2284 * If we have reached machine precision, converged is already set to true.
2286 converged = converged || (ems.fmax < inputrec->em_tol);
2288 } /* End of the loop */
2290 if (converged)
2292 step--; /* we never took that last step in this case */
2294 if (ems.fmax > inputrec->em_tol)
2296 if (MASTER(cr))
2298 warn_step(fplog, inputrec->em_tol, ems.fmax, step - 1 == number_steps, FALSE);
2300 converged = FALSE;
2303 /* If we printed energy and/or logfile last step (which was the last step)
2304 * we don't have to do it again, but otherwise print the final values.
2306 if (!do_log) /* Write final value to log since we didn't do anythin last step */
2308 energyOutput.printHeader(fplog, step, step);
2310 if (!do_ene || !do_log) /* Write final energy file entries */
2312 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), !do_ene, FALSE, FALSE,
2313 !do_log ? fplog : nullptr, step, step, fcd, nullptr);
2316 /* Print some stuff... */
2317 if (MASTER(cr))
2319 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2322 /* IMPORTANT!
2323 * For accurate normal mode calculation it is imperative that we
2324 * store the last conformation into the full precision binary trajectory.
2326 * However, we should only do it if we did NOT already write this step
2327 * above (which we did if do_x or do_f was true).
2329 do_x = !do_per_step(step, inputrec->nstxout);
2330 do_f = !do_per_step(step, inputrec->nstfout);
2331 write_em_traj(fplog, cr, outf, do_x, do_f, ftp2fn(efSTO, nfile, fnm), top_global, inputrec,
2332 step, &ems, state_global, observablesHistory);
2334 if (MASTER(cr))
2336 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2337 print_converged(stderr, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2338 print_converged(fplog, LBFGS, inputrec->em_tol, step, converged, number_steps, &ems, sqrtNumAtoms);
2340 fprintf(fplog, "\nPerformed %d energy evaluations in total.\n", neval);
2343 finish_em(cr, outf, walltime_accounting, wcycle);
2345 /* To print the actual number of steps we needed somewhere */
2346 walltime_accounting_set_nsteps_done(walltime_accounting, step);
2349 void LegacySimulator::do_steep()
2351 const char* SD = "Steepest Descents";
2352 gmx_localtop_t top;
2353 gmx_global_stat_t gstat;
2354 t_graph* graph;
2355 real stepsize;
2356 real ustep;
2357 gmx_bool bDone, bAbort, do_x, do_f;
2358 tensor vir, pres;
2359 rvec mu_tot = { 0 };
2360 int nsteps;
2361 int count = 0;
2362 int steps_accepted = 0;
2363 auto mdatoms = mdAtoms->mdatoms();
2365 GMX_LOG(mdlog.info)
2366 .asParagraph()
2367 .appendText(
2368 "Note that activating steepest-descent energy minimization via the "
2369 "integrator .mdp option and the command gmx mdrun may "
2370 "be available in a different form in a future version of GROMACS, "
2371 "e.g. gmx minimize and an .mdp option.");
2373 /* Create 2 states on the stack and extract pointers that we will swap */
2374 em_state_t s0{}, s1{};
2375 em_state_t* s_min = &s0;
2376 em_state_t* s_try = &s1;
2378 /* Init em and store the local state in s_try */
2379 init_em(fplog, mdlog, SD, cr, inputrec, imdSession, pull_work, state_global, top_global, s_try,
2380 &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, nullptr);
2381 gmx_mdoutf* outf =
2382 init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
2383 inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
2384 gmx::EnergyOutput energyOutput(mdoutf_get_fp_ene(outf), top_global, inputrec, pull_work, nullptr,
2385 false, StartingBehavior::NewSimulation, mdModulesNotifier);
2387 /* Print to log file */
2388 print_em_start(fplog, cr, walltime_accounting, wcycle, SD);
2390 /* Set variables for stepsize (in nm). This is the largest
2391 * step that we are going to make in any direction.
2393 ustep = inputrec->em_stepsize;
2394 stepsize = 0;
2396 /* Max number of steps */
2397 nsteps = inputrec->nsteps;
2399 if (MASTER(cr))
2401 /* Print to the screen */
2402 sp_header(stderr, SD, inputrec->em_tol, nsteps);
2404 if (fplog)
2406 sp_header(fplog, SD, inputrec->em_tol, nsteps);
2408 EnergyEvaluator energyEvaluator{
2409 fplog, mdlog, cr, ms, top_global, &top, inputrec,
2410 imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
2411 fcd, graph, mdAtoms, fr, runScheduleWork, enerd
2414 /**** HERE STARTS THE LOOP ****
2415 * count is the counter for the number of steps
2416 * bDone will be TRUE when the minimization has converged
2417 * bAbort will be TRUE when nsteps steps have been performed or when
2418 * the stepsize becomes smaller than is reasonable for machine precision
2420 count = 0;
2421 bDone = FALSE;
2422 bAbort = FALSE;
2423 while (!bDone && !bAbort)
2425 bAbort = (nsteps >= 0) && (count == nsteps);
2427 /* set new coordinates, except for first step */
2428 bool validStep = true;
2429 if (count > 0)
2431 validStep = do_em_step(cr, inputrec, mdatoms, s_min, stepsize, &s_min->f, s_try, constr, count);
2434 if (validStep)
2436 energyEvaluator.run(s_try, mu_tot, vir, pres, count, count == 0);
2438 else
2440 // Signal constraint error during stepping with energy=inf
2441 s_try->epot = std::numeric_limits<real>::infinity();
2444 if (MASTER(cr))
2446 energyOutput.printHeader(fplog, count, count);
2449 if (count == 0)
2451 s_min->epot = s_try->epot;
2454 /* Print it if necessary */
2455 if (MASTER(cr))
2457 if (mdrunOptions.verbose)
2459 fprintf(stderr, "Step=%5d, Dmax= %6.1e nm, Epot= %12.5e Fmax= %11.5e, atom= %d%c",
2460 count, ustep, s_try->epot, s_try->fmax, s_try->a_fmax + 1,
2461 ((count == 0) || (s_try->epot < s_min->epot)) ? '\n' : '\r');
2462 fflush(stderr);
2465 if ((count == 0) || (s_try->epot < s_min->epot))
2467 /* Store the new (lower) energies */
2468 matrix nullBox = {};
2469 energyOutput.addDataAtEnergyStep(false, false, static_cast<double>(count), mdatoms->tmass,
2470 enerd, nullptr, nullptr, nullptr, nullBox, nullptr,
2471 nullptr, vir, pres, nullptr, mu_tot, constr);
2473 imdSession->fillEnergyRecord(count, TRUE);
2475 const bool do_dr = do_per_step(steps_accepted, inputrec->nstdisreout);
2476 const bool do_or = do_per_step(steps_accepted, inputrec->nstorireout);
2477 energyOutput.printStepToEnergyFile(mdoutf_get_fp_ene(outf), TRUE, do_dr, do_or,
2478 fplog, count, count, fcd, nullptr);
2479 fflush(fplog);
2483 /* Now if the new energy is smaller than the previous...
2484 * or if this is the first step!
2485 * or if we did random steps!
2488 if ((count == 0) || (s_try->epot < s_min->epot))
2490 steps_accepted++;
2492 /* Test whether the convergence criterion is met... */
2493 bDone = (s_try->fmax < inputrec->em_tol);
2495 /* Copy the arrays for force, positions and energy */
2496 /* The 'Min' array always holds the coords and forces of the minimal
2497 sampled energy */
2498 swap_em_state(&s_min, &s_try);
2499 if (count > 0)
2501 ustep *= 1.2;
2504 /* Write to trn, if necessary */
2505 do_x = do_per_step(steps_accepted, inputrec->nstxout);
2506 do_f = do_per_step(steps_accepted, inputrec->nstfout);
2507 write_em_traj(fplog, cr, outf, do_x, do_f, nullptr, top_global, inputrec, count, s_min,
2508 state_global, observablesHistory);
2510 else
2512 /* If energy is not smaller make the step smaller... */
2513 ustep *= 0.5;
2515 if (DOMAINDECOMP(cr) && s_min->s.ddp_count != cr->dd->ddp_count)
2517 /* Reload the old state */
2518 em_dd_partition_system(fplog, mdlog, count, cr, top_global, inputrec, imdSession,
2519 pull_work, s_min, &top, mdAtoms, fr, vsite, constr, nrnb, wcycle);
2523 // If the force is very small after finishing minimization,
2524 // we risk dividing by zero when calculating the step size.
2525 // So we check first if the minimization has stopped before
2526 // trying to obtain a new step size.
2527 if (!bDone)
2529 /* Determine new step */
2530 stepsize = ustep / s_min->fmax;
2533 /* Check if stepsize is too small, with 1 nm as a characteristic length */
2534 #if GMX_DOUBLE
2535 if (count == nsteps || ustep < 1e-12)
2536 #else
2537 if (count == nsteps || ustep < 1e-6)
2538 #endif
2540 if (MASTER(cr))
2542 warn_step(fplog, inputrec->em_tol, s_min->fmax, count == nsteps, constr != nullptr);
2544 bAbort = TRUE;
2547 /* Send IMD energies and positions, if bIMD is TRUE. */
2548 if (imdSession->run(count, TRUE, state_global->box,
2549 MASTER(cr) ? state_global->x.rvec_array() : nullptr, 0)
2550 && MASTER(cr))
2552 imdSession->sendPositionsAndEnergies();
2555 count++;
2556 } /* End of the loop */
2558 /* Print some data... */
2559 if (MASTER(cr))
2561 fprintf(stderr, "\nwriting lowest energy coordinates.\n");
2563 write_em_traj(fplog, cr, outf, TRUE, inputrec->nstfout != 0, ftp2fn(efSTO, nfile, fnm),
2564 top_global, inputrec, count, s_min, state_global, observablesHistory);
2566 if (MASTER(cr))
2568 double sqrtNumAtoms = sqrt(static_cast<double>(state_global->natoms));
2570 print_converged(stderr, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
2571 print_converged(fplog, SD, inputrec->em_tol, count, bDone, nsteps, s_min, sqrtNumAtoms);
2574 finish_em(cr, outf, walltime_accounting, wcycle);
2576 /* To print the actual number of steps we needed somewhere */
2577 inputrec->nsteps = count;
2579 walltime_accounting_set_nsteps_done(walltime_accounting, count);
2582 void LegacySimulator::do_nm()
2584 const char* NM = "Normal Mode Analysis";
2585 int nnodes;
2586 gmx_localtop_t top;
2587 gmx_global_stat_t gstat;
2588 t_graph* graph;
2589 tensor vir, pres;
2590 rvec mu_tot = { 0 };
2591 rvec* dfdx;
2592 gmx_bool bSparse; /* use sparse matrix storage format */
2593 size_t sz;
2594 gmx_sparsematrix_t* sparse_matrix = nullptr;
2595 real* full_matrix = nullptr;
2597 /* added with respect to mdrun */
2598 int row, col;
2599 real der_range = 10.0 * std::sqrt(GMX_REAL_EPS);
2600 real x_min;
2601 bool bIsMaster = MASTER(cr);
2602 auto mdatoms = mdAtoms->mdatoms();
2604 GMX_LOG(mdlog.info)
2605 .asParagraph()
2606 .appendText(
2607 "Note that activating normal-mode analysis via the integrator "
2608 ".mdp option and the command gmx mdrun may "
2609 "be available in a different form in a future version of GROMACS, "
2610 "e.g. gmx normal-modes.");
2612 if (constr != nullptr)
2614 gmx_fatal(
2615 FARGS,
2616 "Constraints present with Normal Mode Analysis, this combination is not supported");
2619 gmx_shellfc_t* shellfc;
2621 em_state_t state_work{};
2623 /* Init em and store the local state in state_minimum */
2624 init_em(fplog, mdlog, NM, cr, inputrec, imdSession, pull_work, state_global, top_global,
2625 &state_work, &top, nrnb, fr, &graph, mdAtoms, &gstat, vsite, constr, &shellfc);
2626 gmx_mdoutf* outf =
2627 init_mdoutf(fplog, nfile, fnm, mdrunOptions, cr, outputProvider, mdModulesNotifier,
2628 inputrec, top_global, nullptr, wcycle, StartingBehavior::NewSimulation);
2630 std::vector<int> atom_index = get_atom_index(top_global);
2631 std::vector<gmx::RVec> fneg(atom_index.size(), { 0, 0, 0 });
2632 snew(dfdx, atom_index.size());
2634 #if !GMX_DOUBLE
2635 if (bIsMaster)
2637 fprintf(stderr,
2638 "NOTE: This version of GROMACS has been compiled in single precision,\n"
2639 " which MIGHT not be accurate enough for normal mode analysis.\n"
2640 " GROMACS now uses sparse matrix storage, so the memory requirements\n"
2641 " are fairly modest even if you recompile in double precision.\n\n");
2643 #endif
2645 /* Check if we can/should use sparse storage format.
2647 * Sparse format is only useful when the Hessian itself is sparse, which it
2648 * will be when we use a cutoff.
2649 * For small systems (n<1000) it is easier to always use full matrix format, though.
2651 if (EEL_FULL(fr->ic->eeltype) || fr->rlist == 0.0)
2653 GMX_LOG(mdlog.warning)
2654 .appendText("Non-cutoff electrostatics used, forcing full Hessian format.");
2655 bSparse = FALSE;
2657 else if (atom_index.size() < 1000)
2659 GMX_LOG(mdlog.warning)
2660 .appendTextFormatted("Small system size (N=%zu), using full Hessian format.",
2661 atom_index.size());
2662 bSparse = FALSE;
2664 else
2666 GMX_LOG(mdlog.warning).appendText("Using compressed symmetric sparse Hessian format.");
2667 bSparse = TRUE;
2670 /* Number of dimensions, based on real atoms, that is not vsites or shell */
2671 sz = DIM * atom_index.size();
2673 fprintf(stderr, "Allocating Hessian memory...\n\n");
2675 if (bSparse)
2677 sparse_matrix = gmx_sparsematrix_init(sz);
2678 sparse_matrix->compressed_symmetric = TRUE;
2680 else
2682 snew(full_matrix, sz * sz);
2685 /* Write start time and temperature */
2686 print_em_start(fplog, cr, walltime_accounting, wcycle, NM);
2688 /* fudge nr of steps to nr of atoms */
2689 inputrec->nsteps = atom_index.size() * 2;
2691 if (bIsMaster)
2693 fprintf(stderr, "starting normal mode calculation '%s'\n%" PRId64 " steps.\n\n",
2694 *(top_global->name), inputrec->nsteps);
2697 nnodes = cr->nnodes;
2699 /* Make evaluate_energy do a single node force calculation */
2700 cr->nnodes = 1;
2701 EnergyEvaluator energyEvaluator{
2702 fplog, mdlog, cr, ms, top_global, &top, inputrec,
2703 imdSession, pull_work, nrnb, wcycle, gstat, vsite, constr,
2704 fcd, graph, mdAtoms, fr, runScheduleWork, enerd
2706 energyEvaluator.run(&state_work, mu_tot, vir, pres, -1, TRUE);
2707 cr->nnodes = nnodes;
2709 /* if forces are not small, warn user */
2710 get_state_f_norm_max(cr, &(inputrec->opts), mdatoms, &state_work);
2712 GMX_LOG(mdlog.warning).appendTextFormatted("Maximum force:%12.5e", state_work.fmax);
2713 if (state_work.fmax > 1.0e-3)
2715 GMX_LOG(mdlog.warning)
2716 .appendText(
2717 "The force is probably not small enough to "
2718 "ensure that you are at a minimum.\n"
2719 "Be aware that negative eigenvalues may occur\n"
2720 "when the resulting matrix is diagonalized.");
2723 /***********************************************************
2725 * Loop over all pairs in matrix
2727 * do_force called twice. Once with positive and
2728 * once with negative displacement
2730 ************************************************************/
2732 /* Steps are divided one by one over the nodes */
2733 bool bNS = true;
2734 auto state_work_x = makeArrayRef(state_work.s.x);
2735 auto state_work_f = makeArrayRef(state_work.f);
2736 for (index aid = cr->nodeid; aid < ssize(atom_index); aid += nnodes)
2738 size_t atom = atom_index[aid];
2739 for (size_t d = 0; d < DIM; d++)
2741 int64_t step = 0;
2742 int force_flags = GMX_FORCE_STATECHANGED | GMX_FORCE_ALLFORCES;
2743 double t = 0;
2745 x_min = state_work_x[atom][d];
2747 for (unsigned int dx = 0; (dx < 2); dx++)
2749 if (dx == 0)
2751 state_work_x[atom][d] = x_min - der_range;
2753 else
2755 state_work_x[atom][d] = x_min + der_range;
2758 /* Make evaluate_energy do a single node force calculation */
2759 cr->nnodes = 1;
2760 if (shellfc)
2762 /* Now is the time to relax the shells */
2763 relax_shell_flexcon(fplog, cr, ms, mdrunOptions.verbose, nullptr, step, inputrec,
2764 imdSession, pull_work, bNS, force_flags, &top, constr, enerd,
2765 fcd, state_work.s.natoms, state_work.s.x.arrayRefWithPadding(),
2766 state_work.s.v.arrayRefWithPadding(), state_work.s.box,
2767 state_work.s.lambda, &state_work.s.hist,
2768 state_work.f.arrayRefWithPadding(), vir, mdatoms, nrnb,
2769 wcycle, graph, shellfc, fr, runScheduleWork, t, mu_tot,
2770 vsite, DDBalanceRegionHandler(nullptr));
2771 bNS = false;
2772 step++;
2774 else
2776 energyEvaluator.run(&state_work, mu_tot, vir, pres, aid * 2 + dx, FALSE);
2779 cr->nnodes = nnodes;
2781 if (dx == 0)
2783 std::copy(state_work_f.begin(), state_work_f.begin() + atom_index.size(),
2784 fneg.begin());
2788 /* x is restored to original */
2789 state_work_x[atom][d] = x_min;
2791 for (size_t j = 0; j < atom_index.size(); j++)
2793 for (size_t k = 0; (k < DIM); k++)
2795 dfdx[j][k] = -(state_work_f[atom_index[j]][k] - fneg[j][k]) / (2 * der_range);
2799 if (!bIsMaster)
2801 #if GMX_MPI
2802 # define mpi_type GMX_MPI_REAL
2803 MPI_Send(dfdx[0], atom_index.size() * DIM, mpi_type, MASTER(cr), cr->nodeid,
2804 cr->mpi_comm_mygroup);
2805 #endif
2807 else
2809 for (index node = 0; (node < nnodes && aid + node < ssize(atom_index)); node++)
2811 if (node > 0)
2813 #if GMX_MPI
2814 MPI_Status stat;
2815 MPI_Recv(dfdx[0], atom_index.size() * DIM, mpi_type, node, node,
2816 cr->mpi_comm_mygroup, &stat);
2817 # undef mpi_type
2818 #endif
2821 row = (aid + node) * DIM + d;
2823 for (size_t j = 0; j < atom_index.size(); j++)
2825 for (size_t k = 0; k < DIM; k++)
2827 col = j * DIM + k;
2829 if (bSparse)
2831 if (col >= row && dfdx[j][k] != 0.0)
2833 gmx_sparsematrix_increment_value(sparse_matrix, row, col, dfdx[j][k]);
2836 else
2838 full_matrix[row * sz + col] = dfdx[j][k];
2845 if (mdrunOptions.verbose && fplog)
2847 fflush(fplog);
2850 /* write progress */
2851 if (bIsMaster && mdrunOptions.verbose)
2853 fprintf(stderr, "\rFinished step %d out of %td",
2854 std::min<int>(atom + nnodes, atom_index.size()), ssize(atom_index));
2855 fflush(stderr);
2859 if (bIsMaster)
2861 fprintf(stderr, "\n\nWriting Hessian...\n");
2862 gmx_mtxio_write(ftp2fn(efMTX, nfile, fnm), sz, sz, full_matrix, sparse_matrix);
2865 finish_em(cr, outf, walltime_accounting, wcycle);
2867 walltime_accounting_set_nsteps_done(walltime_accounting, atom_index.size() * 2);
2870 } // namespace gmx