More const correctness
[gromacs.git] / src / gromacs / mdlib / force.cpp
blob416c95c3cd5225fdc943b512c964f363d733ccc6
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "force.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <string.h>
46 #include <cmath>
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/ewald/ewald.h"
51 #include "gromacs/ewald/long-range-correction.h"
52 #include "gromacs/ewald/pme.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/gmxlib/nrnb.h"
55 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
56 #include "gromacs/listed-forces/listed-forces.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/forcerec-threading.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdlib/qmmm.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/forceoutput.h"
65 #include "gromacs/mdtypes/inputrec.h"
66 #include "gromacs/mdtypes/md_enums.h"
67 #include "gromacs/pbcutil/ishift.h"
68 #include "gromacs/pbcutil/mshift.h"
69 #include "gromacs/pbcutil/pbc.h"
70 #include "gromacs/timing/wallcycle.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/smalloc.h"
76 void ns(FILE *fp,
77 t_forcerec *fr,
78 matrix box,
79 const gmx_groups_t *groups,
80 gmx_localtop_t *top,
81 const t_mdatoms *md,
82 const t_commrec *cr,
83 t_nrnb *nrnb,
84 gmx_bool bFillGrid)
86 int nsearch;
89 if (!fr->ns->nblist_initialized)
91 init_neighbor_list(fp, fr, md->homenr);
94 nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
95 bFillGrid);
96 if (debug)
98 fprintf(debug, "nsearch = %d\n", nsearch);
101 /* Check whether we have to do dynamic load balancing */
102 /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
103 count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
104 &(top->idef),opts->ngener);
106 if (fr->ns->dump_nl > 0)
108 dump_nblist(fp, cr, fr, fr->ns->dump_nl);
112 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
114 ewc_t->Vcorr_q = 0;
115 ewc_t->Vcorr_lj = 0;
116 ewc_t->dvdl[efptCOUL] = 0;
117 ewc_t->dvdl[efptVDW] = 0;
118 clear_mat(ewc_t->vir_q);
119 clear_mat(ewc_t->vir_lj);
122 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
124 ewald_corr_thread_t &dest = ewc_t[0];
126 for (int t = 1; t < nthreads; t++)
128 dest.Vcorr_q += ewc_t[t].Vcorr_q;
129 dest.Vcorr_lj += ewc_t[t].Vcorr_lj;
130 dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
131 dest.dvdl[efptVDW] += ewc_t[t].dvdl[efptVDW];
132 m_add(dest.vir_q, ewc_t[t].vir_q, dest.vir_q);
133 m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
137 void do_force_lowlevel(t_forcerec *fr,
138 const t_inputrec *ir,
139 const t_idef *idef,
140 const t_commrec *cr,
141 const gmx_multisim_t *ms,
142 t_nrnb *nrnb,
143 gmx_wallcycle_t wcycle,
144 const t_mdatoms *md,
145 rvec x[],
146 history_t *hist,
147 rvec *forceForUseWithShiftForces,
148 gmx::ForceWithVirial *forceWithVirial,
149 gmx_enerdata_t *enerd,
150 t_fcdata *fcd,
151 matrix box,
152 t_lambda *fepvals,
153 real *lambda,
154 const t_graph *graph,
155 const t_blocka *excl,
156 rvec mu_tot[],
157 int flags,
158 float *cycles_pme)
160 int i, j;
161 int donb_flags;
162 int pme_flags;
163 t_pbc pbc;
164 real dvdl_dum[efptNR], dvdl_nb[efptNR];
166 #if GMX_MPI
167 double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
168 #endif
170 set_pbc(&pbc, fr->ePBC, box);
172 /* reset free energy components */
173 for (i = 0; i < efptNR; i++)
175 dvdl_nb[i] = 0;
176 dvdl_dum[i] = 0;
179 /* do QMMM first if requested */
180 if (fr->bQMMM)
182 enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
185 /* Call the short range functions all in one go. */
187 #if GMX_MPI
188 /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
189 #define TAKETIME FALSE
190 if (TAKETIME)
192 MPI_Barrier(cr->mpi_comm_mygroup);
193 t0 = MPI_Wtime();
195 #endif
197 if (ir->nwall)
199 /* foreign lambda component for walls */
200 real dvdl_walls = do_walls(ir, fr, box, md, x, forceForUseWithShiftForces, lambda[efptVDW],
201 enerd->grpp.ener[egLJSR], nrnb);
202 enerd->dvdl_lin[efptVDW] += dvdl_walls;
205 /* We only do non-bonded calculation with group scheme here, the verlet
206 * calls are done from do_force_cutsVERLET(). */
207 if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
209 donb_flags = 0;
210 /* Add short-range interactions */
211 donb_flags |= GMX_NONBONDED_DO_SR;
213 /* Currently all group scheme kernels always calculate (shift-)forces */
214 if (flags & GMX_FORCE_FORCES)
216 donb_flags |= GMX_NONBONDED_DO_FORCE;
218 if (flags & GMX_FORCE_VIRIAL)
220 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
222 if (flags & GMX_FORCE_ENERGY)
224 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
227 wallcycle_sub_start(wcycle, ewcsNONBONDED);
228 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
229 &enerd->grpp, nrnb,
230 lambda, dvdl_nb, -1, -1, donb_flags);
232 /* If we do foreign lambda and we have soft-core interactions
233 * we have to recalculate the (non-linear) energies contributions.
235 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
237 for (i = 0; i < enerd->n_lambda; i++)
239 real lam_i[efptNR];
241 for (j = 0; j < efptNR; j++)
243 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
245 reset_foreign_enerdata(enerd);
246 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
247 &(enerd->foreign_grpp), nrnb,
248 lam_i, dvdl_dum, -1, -1,
249 (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
250 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
251 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
254 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
257 #if GMX_MPI
258 if (TAKETIME)
260 t1 = MPI_Wtime();
261 fr->t_fnbf += t1-t0;
263 #endif
265 if (fepvals->sc_alpha != 0)
267 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
269 else
271 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
274 if (fepvals->sc_alpha != 0)
276 /* even though coulomb part is linear, we already added it, beacuse we
277 need to go through the vdw calculation anyway */
279 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
281 else
283 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
286 if (debug)
288 pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
291 /* Shift the coordinates. Must be done before listed forces and PPPM,
292 * but is also necessary for SHAKE and update, therefore it can NOT
293 * go when no listed forces have to be evaluated.
295 * The shifting and PBC code is deliberately not timed, since with
296 * the Verlet scheme it only takes non-zero time with triclinic
297 * boxes, and even then the time is around a factor of 100 less
298 * than the next smallest counter.
302 /* Here sometimes we would not need to shift with NBFonly,
303 * but we do so anyhow for consistency of the returned coordinates.
305 if (graph)
307 shift_self(graph, box, x);
308 if (TRICLINIC(box))
310 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
312 else
314 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
317 /* Check whether we need to do listed interactions or correct for exclusions */
318 if (fr->bMolPBC &&
319 ((flags & GMX_FORCE_LISTED)
320 || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
322 /* TODO There are no electrostatics methods that require this
323 transformation, when using the Verlet scheme, so update the
324 above conditional. */
325 /* Since all atoms are in the rectangular or triclinic unit-cell,
326 * only single box vector shifts (2 in x) are required.
328 set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
329 TRUE, box);
332 do_force_listed(wcycle, box, ir->fepvals, cr, ms,
333 idef, (const rvec *) x, hist,
334 forceForUseWithShiftForces, forceWithVirial,
335 fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
336 DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr,
337 flags);
340 *cycles_pme = 0;
342 /* Do long-range electrostatics and/or LJ-PME, including related short-range
343 * corrections.
345 if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
347 int status = 0;
348 real Vlr_q = 0, Vlr_lj = 0;
350 /* We reduce all virial, dV/dlambda and energy contributions, except
351 * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
353 ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
354 clearEwaldThreadOutput(&ewaldOutput);
356 if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
358 /* With the Verlet scheme exclusion forces are calculated
359 * in the non-bonded kernel.
361 /* The TPI molecule does not have exclusions with the rest
362 * of the system and no intra-molecular PME grid
363 * contributions will be calculated in
364 * gmx_pme_calc_energy.
366 if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
367 ir->ewald_geometry != eewg3D ||
368 ir->epsilon_surface != 0)
370 int nthreads, t;
372 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
374 if (fr->n_tpi > 0)
376 gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
379 nthreads = fr->nthread_ewc;
380 #pragma omp parallel for num_threads(nthreads) schedule(static)
381 for (t = 0; t < nthreads; t++)
385 ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
386 if (t > 0)
388 clearEwaldThreadOutput(&ewc_t);
391 /* Threading is only supported with the Verlet cut-off
392 * scheme and then only single particle forces (no
393 * exclusion forces) are calculated, so we can store
394 * the forces in the normal, single forceWithVirial->force_ array.
396 ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
397 md->chargeA, md->chargeB,
398 md->sqrt_c6A, md->sqrt_c6B,
399 md->sigmaA, md->sigmaB,
400 md->sigma3A, md->sigma3B,
401 md->nChargePerturbed || md->nTypePerturbed,
402 ir->cutoff_scheme != ecutsVERLET,
403 excl, x, box, mu_tot,
404 ir->ewald_geometry,
405 ir->epsilon_surface,
406 as_rvec_array(forceWithVirial->force_.data()),
407 ewc_t.vir_q, ewc_t.vir_lj,
408 &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
409 lambda[efptCOUL], lambda[efptVDW],
410 &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
412 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
414 if (nthreads > 1)
416 reduceEwaldThreadOuput(nthreads, fr->ewc_t);
418 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
421 if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
423 /* This is not in a subcounter because it takes a
424 negligible and constant-sized amount of time */
425 ewaldOutput.Vcorr_q +=
426 ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
427 &ewaldOutput.dvdl[efptCOUL],
428 ewaldOutput.vir_q);
431 if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
432 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
434 /* Do reciprocal PME for Coulomb and/or LJ. */
435 assert(fr->n_tpi >= 0);
436 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
438 pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
440 if (flags & GMX_FORCE_FORCES)
442 pme_flags |= GMX_PME_CALC_F;
444 if (flags & GMX_FORCE_VIRIAL)
446 pme_flags |= GMX_PME_CALC_ENER_VIR;
448 if (fr->n_tpi > 0)
450 /* We don't calculate f, but we do want the potential */
451 pme_flags |= GMX_PME_CALC_POT;
454 /* With domain decomposition we close the CPU side load
455 * balancing region here, because PME does global
456 * communication that acts as a global barrier.
458 if (DOMAINDECOMP(cr))
460 ddCloseBalanceRegionCpu(cr->dd);
463 wallcycle_start(wcycle, ewcPMEMESH);
464 status = gmx_pme_do(fr->pmedata,
465 0, md->homenr - fr->n_tpi,
467 as_rvec_array(forceWithVirial->force_.data()),
468 md->chargeA, md->chargeB,
469 md->sqrt_c6A, md->sqrt_c6B,
470 md->sigmaA, md->sigmaB,
471 box, cr,
472 DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
473 DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
474 nrnb, wcycle,
475 ewaldOutput.vir_q, ewaldOutput.vir_lj,
476 &Vlr_q, &Vlr_lj,
477 lambda[efptCOUL], lambda[efptVDW],
478 &ewaldOutput.dvdl[efptCOUL],
479 &ewaldOutput.dvdl[efptVDW],
480 pme_flags);
481 *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
482 if (status != 0)
484 gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
487 /* We should try to do as little computation after
488 * this as possible, because parallel PME synchronizes
489 * the nodes, so we want all load imbalance of the
490 * rest of the force calculation to be before the PME
491 * call. DD load balancing is done on the whole time
492 * of the force call (without PME).
495 if (fr->n_tpi > 0)
497 if (EVDW_PME(ir->vdwtype))
500 gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
502 /* Determine the PME grid energy of the test molecule
503 * with the PME grid potential of the other charges.
505 gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
506 x + md->homenr - fr->n_tpi,
507 md->chargeA + md->homenr - fr->n_tpi,
508 &Vlr_q);
513 if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
515 Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
516 md->chargeA, md->chargeB,
517 box, cr, md->homenr,
518 ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
519 lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
520 fr->ewald_table);
523 /* Note that with separate PME nodes we get the real energies later */
524 forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
525 forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
526 enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
527 enerd->dvdl_lin[efptVDW] += ewaldOutput.dvdl[efptVDW];
528 enerd->term[F_COUL_RECIP] = Vlr_q + ewaldOutput.Vcorr_q;
529 enerd->term[F_LJ_RECIP] = Vlr_lj + ewaldOutput.Vcorr_lj;
531 if (debug)
533 fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
534 Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
535 pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
536 pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
537 fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
538 Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
539 pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
542 else
544 /* Is there a reaction-field exclusion correction needed?
545 * With the Verlet scheme, exclusion forces are calculated
546 * in the non-bonded kernel.
548 if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
550 real dvdl_rf_excl = 0;
551 enerd->term[F_RF_EXCL] =
552 RF_excl_correction(fr, graph, md, excl, x, forceForUseWithShiftForces,
553 fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
555 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
559 if (debug)
561 print_nrnb(debug, nrnb);
564 #if GMX_MPI
565 if (TAKETIME)
567 t2 = MPI_Wtime();
568 MPI_Barrier(cr->mpi_comm_mygroup);
569 t3 = MPI_Wtime();
570 fr->t_wait += t3-t2;
571 if (fr->timesteps == 11)
573 char buf[22];
574 fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
575 cr->nodeid, gmx_step_str(fr->timesteps, buf),
576 100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
577 (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
579 fr->timesteps++;
581 #endif
583 if (debug)
585 pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
590 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
592 int i, n2;
594 for (i = 0; i < F_NRE; i++)
596 enerd->term[i] = 0;
597 enerd->foreign_term[i] = 0;
601 for (i = 0; i < efptNR; i++)
603 enerd->dvdl_lin[i] = 0;
604 enerd->dvdl_nonlin[i] = 0;
607 n2 = ngener*ngener;
608 if (debug)
610 fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
612 enerd->grpp.nener = n2;
613 enerd->foreign_grpp.nener = n2;
614 for (i = 0; (i < egNR); i++)
616 snew(enerd->grpp.ener[i], n2);
617 snew(enerd->foreign_grpp.ener[i], n2);
620 if (n_lambda)
622 enerd->n_lambda = 1 + n_lambda;
623 snew(enerd->enerpart_lambda, enerd->n_lambda);
625 else
627 enerd->n_lambda = 0;
631 void destroy_enerdata(gmx_enerdata_t *enerd)
633 int i;
635 for (i = 0; (i < egNR); i++)
637 sfree(enerd->grpp.ener[i]);
640 for (i = 0; (i < egNR); i++)
642 sfree(enerd->foreign_grpp.ener[i]);
645 if (enerd->n_lambda)
647 sfree(enerd->enerpart_lambda);
651 static real sum_v(int n, real v[])
653 real t;
654 int i;
656 t = 0.0;
657 for (i = 0; (i < n); i++)
659 t = t + v[i];
662 return t;
665 void sum_epot(gmx_grppairener_t *grpp, real *epot)
667 int i;
669 /* Accumulate energies */
670 epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]);
671 epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]);
672 epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]);
673 epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]);
675 /* lattice part of LR doesnt belong to any group
676 * and has been added earlier
678 epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
680 epot[F_EPOT] = 0;
681 for (i = 0; (i < F_EPOT); i++)
683 if (i != F_DISRESVIOL && i != F_ORIRESDEV)
685 epot[F_EPOT] += epot[i];
690 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
692 int index;
693 double dlam;
695 enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */
696 enerd->term[F_DVDL] = 0.0;
697 for (int i = 0; i < efptNR; i++)
699 if (fepvals->separate_dvdl[i])
701 /* could this be done more readably/compactly? */
702 switch (i)
704 case (efptMASS):
705 index = F_DKDL;
706 break;
707 case (efptCOUL):
708 index = F_DVDL_COUL;
709 break;
710 case (efptVDW):
711 index = F_DVDL_VDW;
712 break;
713 case (efptBONDED):
714 index = F_DVDL_BONDED;
715 break;
716 case (efptRESTRAINT):
717 index = F_DVDL_RESTRAINT;
718 break;
719 default:
720 index = F_DVDL;
721 break;
723 enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
724 if (debug)
726 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
727 efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
730 else
732 enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
733 if (debug)
735 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
736 efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
741 /* Notes on the foreign lambda free energy difference evaluation:
742 * Adding the potential and ekin terms that depend linearly on lambda
743 * as delta lam * dvdl to the energy differences is exact.
744 * For the constraints this is not exact, but we have no other option
745 * without literally changing the lengths and reevaluating the energies at each step.
746 * (try to remedy this post 4.6 - MRS)
748 if (fepvals->separate_dvdl[efptBONDED])
750 enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
752 else
754 enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
756 enerd->term[F_DVDL_CONSTR] = 0;
758 for (int i = 0; i < fepvals->n_lambda; i++)
760 /* note we are iterating over fepvals here!
761 For the current lam, dlam = 0 automatically,
762 so we don't need to add anything to the
763 enerd->enerpart_lambda[0] */
765 /* we don't need to worry about dvdl_lin contributions to dE at
766 current lambda, because the contributions to the current
767 lambda are automatically zeroed */
769 for (size_t j = 0; j < lambda.size(); j++)
771 /* Note that this loop is over all dhdl components, not just the separated ones */
772 dlam = (fepvals->all_lambda[j][i] - lambda[j]);
773 enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
774 if (debug)
776 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
777 fepvals->all_lambda[j][i], efpt_names[j],
778 (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
779 dlam, enerd->dvdl_lin[j]);
786 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
788 int i, j;
790 /* First reset all foreign energy components. Foreign energies always called on
791 neighbor search steps */
792 for (i = 0; (i < egNR); i++)
794 for (j = 0; (j < enerd->grpp.nener); j++)
796 enerd->foreign_grpp.ener[i][j] = 0.0;
800 /* potential energy components */
801 for (i = 0; (i <= F_EPOT); i++)
803 enerd->foreign_term[i] = 0.0;
807 void reset_enerdata(gmx_enerdata_t *enerd)
809 int i, j;
811 /* First reset all energy components. */
812 for (i = 0; (i < egNR); i++)
814 for (j = 0; (j < enerd->grpp.nener); j++)
816 enerd->grpp.ener[i][j] = 0.0;
819 for (i = 0; i < efptNR; i++)
821 enerd->dvdl_lin[i] = 0.0;
822 enerd->dvdl_nonlin[i] = 0.0;
825 /* Normal potential energy components */
826 for (i = 0; (i <= F_EPOT); i++)
828 enerd->term[i] = 0.0;
830 enerd->term[F_DVDL] = 0.0;
831 enerd->term[F_DVDL_COUL] = 0.0;
832 enerd->term[F_DVDL_VDW] = 0.0;
833 enerd->term[F_DVDL_BONDED] = 0.0;
834 enerd->term[F_DVDL_RESTRAINT] = 0.0;
835 enerd->term[F_DKDL] = 0.0;
836 if (enerd->n_lambda > 0)
838 for (i = 0; i < enerd->n_lambda; i++)
840 enerd->enerpart_lambda[i] = 0.0;
843 /* reset foreign energy data - separate function since we also call it elsewhere */
844 reset_foreign_enerdata(enerd);