Partial conversion of gmx_domdec_t to C++
[gromacs.git] / src / gromacs / mdlib / force.cpp
blobf37a997a808d92ac063cdb7362c5eaa5706b33cd
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "force.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <string.h>
46 #include <cmath>
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/ewald/ewald.h"
51 #include "gromacs/ewald/long-range-correction.h"
52 #include "gromacs/ewald/pme.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/gmxlib/nrnb.h"
55 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
56 #include "gromacs/listed-forces/listed-forces.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/force_flags.h"
60 #include "gromacs/mdlib/forcerec-threading.h"
61 #include "gromacs/mdlib/mdrun.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdlib/qmmm.h"
64 #include "gromacs/mdlib/rf_util.h"
65 #include "gromacs/mdlib/wall.h"
66 #include "gromacs/mdtypes/commrec.h"
67 #include "gromacs/mdtypes/forceoutput.h"
68 #include "gromacs/mdtypes/inputrec.h"
69 #include "gromacs/mdtypes/md_enums.h"
70 #include "gromacs/pbcutil/ishift.h"
71 #include "gromacs/pbcutil/mshift.h"
72 #include "gromacs/pbcutil/pbc.h"
73 #include "gromacs/timing/wallcycle.h"
74 #include "gromacs/utility/cstringutil.h"
75 #include "gromacs/utility/exceptions.h"
76 #include "gromacs/utility/fatalerror.h"
77 #include "gromacs/utility/smalloc.h"
79 void ns(FILE *fp,
80 t_forcerec *fr,
81 matrix box,
82 const gmx_groups_t *groups,
83 gmx_localtop_t *top,
84 const t_mdatoms *md,
85 const t_commrec *cr,
86 t_nrnb *nrnb,
87 gmx_bool bFillGrid)
89 int nsearch;
92 if (!fr->ns->nblist_initialized)
94 init_neighbor_list(fp, fr, md->homenr);
97 nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
98 bFillGrid);
99 if (debug)
101 fprintf(debug, "nsearch = %d\n", nsearch);
104 /* Check whether we have to do dynamic load balancing */
105 /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
106 count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
107 &(top->idef),opts->ngener);
109 if (fr->ns->dump_nl > 0)
111 dump_nblist(fp, cr, fr, fr->ns->dump_nl);
115 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
117 ewc_t->Vcorr_q = 0;
118 ewc_t->Vcorr_lj = 0;
119 ewc_t->dvdl[efptCOUL] = 0;
120 ewc_t->dvdl[efptVDW] = 0;
121 clear_mat(ewc_t->vir_q);
122 clear_mat(ewc_t->vir_lj);
125 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
127 ewald_corr_thread_t &dest = ewc_t[0];
129 for (int t = 1; t < nthreads; t++)
131 dest.Vcorr_q += ewc_t[t].Vcorr_q;
132 dest.Vcorr_lj += ewc_t[t].Vcorr_lj;
133 dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
134 dest.dvdl[efptVDW] += ewc_t[t].dvdl[efptVDW];
135 m_add(dest.vir_q, ewc_t[t].vir_q, dest.vir_q);
136 m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
140 void do_force_lowlevel(t_forcerec *fr,
141 const t_inputrec *ir,
142 const t_idef *idef,
143 const t_commrec *cr,
144 const gmx_multisim_t *ms,
145 t_nrnb *nrnb,
146 gmx_wallcycle_t wcycle,
147 const t_mdatoms *md,
148 rvec x[],
149 history_t *hist,
150 rvec *forceForUseWithShiftForces,
151 gmx::ForceWithVirial *forceWithVirial,
152 gmx_enerdata_t *enerd,
153 t_fcdata *fcd,
154 matrix box,
155 t_lambda *fepvals,
156 real *lambda,
157 const t_graph *graph,
158 const t_blocka *excl,
159 rvec mu_tot[],
160 int flags,
161 float *cycles_pme)
163 int i, j;
164 int donb_flags;
165 int pme_flags;
166 t_pbc pbc;
167 real dvdl_dum[efptNR], dvdl_nb[efptNR];
169 #if GMX_MPI
170 double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
171 #endif
173 set_pbc(&pbc, fr->ePBC, box);
175 /* reset free energy components */
176 for (i = 0; i < efptNR; i++)
178 dvdl_nb[i] = 0;
179 dvdl_dum[i] = 0;
182 /* do QMMM first if requested */
183 if (fr->bQMMM)
185 enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
188 /* Call the short range functions all in one go. */
190 #if GMX_MPI
191 /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
192 #define TAKETIME FALSE
193 if (TAKETIME)
195 MPI_Barrier(cr->mpi_comm_mygroup);
196 t0 = MPI_Wtime();
198 #endif
200 if (ir->nwall)
202 /* foreign lambda component for walls */
203 real dvdl_walls = do_walls(ir, fr, box, md, x, forceForUseWithShiftForces, lambda[efptVDW],
204 enerd->grpp.ener[egLJSR], nrnb);
205 enerd->dvdl_lin[efptVDW] += dvdl_walls;
208 /* We only do non-bonded calculation with group scheme here, the verlet
209 * calls are done from do_force_cutsVERLET(). */
210 if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
212 donb_flags = 0;
213 /* Add short-range interactions */
214 donb_flags |= GMX_NONBONDED_DO_SR;
216 /* Currently all group scheme kernels always calculate (shift-)forces */
217 if (flags & GMX_FORCE_FORCES)
219 donb_flags |= GMX_NONBONDED_DO_FORCE;
221 if (flags & GMX_FORCE_VIRIAL)
223 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
225 if (flags & GMX_FORCE_ENERGY)
227 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
230 wallcycle_sub_start(wcycle, ewcsNONBONDED);
231 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
232 &enerd->grpp, nrnb,
233 lambda, dvdl_nb, -1, -1, donb_flags);
235 /* If we do foreign lambda and we have soft-core interactions
236 * we have to recalculate the (non-linear) energies contributions.
238 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
240 for (i = 0; i < enerd->n_lambda; i++)
242 real lam_i[efptNR];
244 for (j = 0; j < efptNR; j++)
246 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
248 reset_foreign_enerdata(enerd);
249 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
250 &(enerd->foreign_grpp), nrnb,
251 lam_i, dvdl_dum, -1, -1,
252 (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
253 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
254 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
257 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
260 #if GMX_MPI
261 if (TAKETIME)
263 t1 = MPI_Wtime();
264 fr->t_fnbf += t1-t0;
266 #endif
268 if (fepvals->sc_alpha != 0)
270 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
272 else
274 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
277 if (fepvals->sc_alpha != 0)
279 /* even though coulomb part is linear, we already added it, beacuse we
280 need to go through the vdw calculation anyway */
282 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
284 else
286 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
289 if (debug)
291 pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
294 /* Shift the coordinates. Must be done before listed forces and PPPM,
295 * but is also necessary for SHAKE and update, therefore it can NOT
296 * go when no listed forces have to be evaluated.
298 * The shifting and PBC code is deliberately not timed, since with
299 * the Verlet scheme it only takes non-zero time with triclinic
300 * boxes, and even then the time is around a factor of 100 less
301 * than the next smallest counter.
305 /* Here sometimes we would not need to shift with NBFonly,
306 * but we do so anyhow for consistency of the returned coordinates.
308 if (graph)
310 shift_self(graph, box, x);
311 if (TRICLINIC(box))
313 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
315 else
317 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
320 /* Check whether we need to do listed interactions or correct for exclusions */
321 if (fr->bMolPBC &&
322 ((flags & GMX_FORCE_LISTED)
323 || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
325 /* TODO There are no electrostatics methods that require this
326 transformation, when using the Verlet scheme, so update the
327 above conditional. */
328 /* Since all atoms are in the rectangular or triclinic unit-cell,
329 * only single box vector shifts (2 in x) are required.
331 set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
332 TRUE, box);
335 do_force_listed(wcycle, box, ir->fepvals, cr, ms,
336 idef, (const rvec *) x, hist,
337 forceForUseWithShiftForces, forceWithVirial,
338 fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
339 DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr,
340 flags);
343 *cycles_pme = 0;
345 /* Do long-range electrostatics and/or LJ-PME, including related short-range
346 * corrections.
348 if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
350 int status = 0;
351 real Vlr_q = 0, Vlr_lj = 0;
353 /* We reduce all virial, dV/dlambda and energy contributions, except
354 * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
356 ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
357 clearEwaldThreadOutput(&ewaldOutput);
359 if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
361 /* With the Verlet scheme exclusion forces are calculated
362 * in the non-bonded kernel.
364 /* The TPI molecule does not have exclusions with the rest
365 * of the system and no intra-molecular PME grid
366 * contributions will be calculated in
367 * gmx_pme_calc_energy.
369 if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
370 ir->ewald_geometry != eewg3D ||
371 ir->epsilon_surface != 0)
373 int nthreads, t;
375 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
377 if (fr->n_tpi > 0)
379 gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
382 nthreads = fr->nthread_ewc;
383 #pragma omp parallel for num_threads(nthreads) schedule(static)
384 for (t = 0; t < nthreads; t++)
388 ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
389 if (t > 0)
391 clearEwaldThreadOutput(&ewc_t);
394 /* Threading is only supported with the Verlet cut-off
395 * scheme and then only single particle forces (no
396 * exclusion forces) are calculated, so we can store
397 * the forces in the normal, single forceWithVirial->force_ array.
399 ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
400 md->chargeA, md->chargeB,
401 md->sqrt_c6A, md->sqrt_c6B,
402 md->sigmaA, md->sigmaB,
403 md->sigma3A, md->sigma3B,
404 md->nChargePerturbed || md->nTypePerturbed,
405 ir->cutoff_scheme != ecutsVERLET,
406 excl, x, box, mu_tot,
407 ir->ewald_geometry,
408 ir->epsilon_surface,
409 as_rvec_array(forceWithVirial->force_.data()),
410 ewc_t.vir_q, ewc_t.vir_lj,
411 &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
412 lambda[efptCOUL], lambda[efptVDW],
413 &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
415 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
417 if (nthreads > 1)
419 reduceEwaldThreadOuput(nthreads, fr->ewc_t);
421 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
424 if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
426 /* This is not in a subcounter because it takes a
427 negligible and constant-sized amount of time */
428 ewaldOutput.Vcorr_q +=
429 ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
430 &ewaldOutput.dvdl[efptCOUL],
431 ewaldOutput.vir_q);
434 if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
435 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
437 /* Do reciprocal PME for Coulomb and/or LJ. */
438 assert(fr->n_tpi >= 0);
439 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
441 pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
443 if (flags & GMX_FORCE_FORCES)
445 pme_flags |= GMX_PME_CALC_F;
447 if (flags & GMX_FORCE_VIRIAL)
449 pme_flags |= GMX_PME_CALC_ENER_VIR;
451 if (fr->n_tpi > 0)
453 /* We don't calculate f, but we do want the potential */
454 pme_flags |= GMX_PME_CALC_POT;
457 /* With domain decomposition we close the CPU side load
458 * balancing region here, because PME does global
459 * communication that acts as a global barrier.
461 if (DOMAINDECOMP(cr))
463 ddCloseBalanceRegionCpu(cr->dd);
466 wallcycle_start(wcycle, ewcPMEMESH);
467 status = gmx_pme_do(fr->pmedata,
468 0, md->homenr - fr->n_tpi,
470 as_rvec_array(forceWithVirial->force_.data()),
471 md->chargeA, md->chargeB,
472 md->sqrt_c6A, md->sqrt_c6B,
473 md->sigmaA, md->sigmaB,
474 box, cr,
475 DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
476 DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
477 nrnb, wcycle,
478 ewaldOutput.vir_q, ewaldOutput.vir_lj,
479 &Vlr_q, &Vlr_lj,
480 lambda[efptCOUL], lambda[efptVDW],
481 &ewaldOutput.dvdl[efptCOUL],
482 &ewaldOutput.dvdl[efptVDW],
483 pme_flags);
484 *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
485 if (status != 0)
487 gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
490 /* We should try to do as little computation after
491 * this as possible, because parallel PME synchronizes
492 * the nodes, so we want all load imbalance of the
493 * rest of the force calculation to be before the PME
494 * call. DD load balancing is done on the whole time
495 * of the force call (without PME).
498 if (fr->n_tpi > 0)
500 if (EVDW_PME(ir->vdwtype))
503 gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
505 /* Determine the PME grid energy of the test molecule
506 * with the PME grid potential of the other charges.
508 gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
509 x + md->homenr - fr->n_tpi,
510 md->chargeA + md->homenr - fr->n_tpi,
511 &Vlr_q);
516 if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
518 Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
519 md->chargeA, md->chargeB,
520 box, cr, md->homenr,
521 ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
522 lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
523 fr->ewald_table);
526 /* Note that with separate PME nodes we get the real energies later */
527 forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
528 forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
529 enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
530 enerd->dvdl_lin[efptVDW] += ewaldOutput.dvdl[efptVDW];
531 enerd->term[F_COUL_RECIP] = Vlr_q + ewaldOutput.Vcorr_q;
532 enerd->term[F_LJ_RECIP] = Vlr_lj + ewaldOutput.Vcorr_lj;
534 if (debug)
536 fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
537 Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
538 pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
539 pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
540 fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
541 Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
542 pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
545 else
547 /* Is there a reaction-field exclusion correction needed?
548 * With the Verlet scheme, exclusion forces are calculated
549 * in the non-bonded kernel.
551 if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
553 real dvdl_rf_excl = 0;
554 enerd->term[F_RF_EXCL] =
555 RF_excl_correction(fr, graph, md, excl, DOMAINDECOMP(cr),
556 x, forceForUseWithShiftForces,
557 fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
559 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
563 if (debug)
565 print_nrnb(debug, nrnb);
568 #if GMX_MPI
569 if (TAKETIME)
571 t2 = MPI_Wtime();
572 MPI_Barrier(cr->mpi_comm_mygroup);
573 t3 = MPI_Wtime();
574 fr->t_wait += t3-t2;
575 if (fr->timesteps == 11)
577 char buf[22];
578 fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
579 cr->nodeid, gmx_step_str(fr->timesteps, buf),
580 100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
581 (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
583 fr->timesteps++;
585 #endif
587 if (debug)
589 pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
594 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
596 int i, n2;
598 for (i = 0; i < F_NRE; i++)
600 enerd->term[i] = 0;
601 enerd->foreign_term[i] = 0;
605 for (i = 0; i < efptNR; i++)
607 enerd->dvdl_lin[i] = 0;
608 enerd->dvdl_nonlin[i] = 0;
611 n2 = ngener*ngener;
612 if (debug)
614 fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
616 enerd->grpp.nener = n2;
617 enerd->foreign_grpp.nener = n2;
618 for (i = 0; (i < egNR); i++)
620 snew(enerd->grpp.ener[i], n2);
621 snew(enerd->foreign_grpp.ener[i], n2);
624 if (n_lambda)
626 enerd->n_lambda = 1 + n_lambda;
627 snew(enerd->enerpart_lambda, enerd->n_lambda);
629 else
631 enerd->n_lambda = 0;
635 void destroy_enerdata(gmx_enerdata_t *enerd)
637 int i;
639 for (i = 0; (i < egNR); i++)
641 sfree(enerd->grpp.ener[i]);
644 for (i = 0; (i < egNR); i++)
646 sfree(enerd->foreign_grpp.ener[i]);
649 if (enerd->n_lambda)
651 sfree(enerd->enerpart_lambda);
655 static real sum_v(int n, real v[])
657 real t;
658 int i;
660 t = 0.0;
661 for (i = 0; (i < n); i++)
663 t = t + v[i];
666 return t;
669 void sum_epot(gmx_grppairener_t *grpp, real *epot)
671 int i;
673 /* Accumulate energies */
674 epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]);
675 epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]);
676 epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]);
677 epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]);
679 /* lattice part of LR doesnt belong to any group
680 * and has been added earlier
682 epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
684 epot[F_EPOT] = 0;
685 for (i = 0; (i < F_EPOT); i++)
687 if (i != F_DISRESVIOL && i != F_ORIRESDEV)
689 epot[F_EPOT] += epot[i];
694 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
696 int index;
697 double dlam;
699 enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */
700 enerd->term[F_DVDL] = 0.0;
701 for (int i = 0; i < efptNR; i++)
703 if (fepvals->separate_dvdl[i])
705 /* could this be done more readably/compactly? */
706 switch (i)
708 case (efptMASS):
709 index = F_DKDL;
710 break;
711 case (efptCOUL):
712 index = F_DVDL_COUL;
713 break;
714 case (efptVDW):
715 index = F_DVDL_VDW;
716 break;
717 case (efptBONDED):
718 index = F_DVDL_BONDED;
719 break;
720 case (efptRESTRAINT):
721 index = F_DVDL_RESTRAINT;
722 break;
723 default:
724 index = F_DVDL;
725 break;
727 enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
728 if (debug)
730 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
731 efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
734 else
736 enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
737 if (debug)
739 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
740 efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
745 /* Notes on the foreign lambda free energy difference evaluation:
746 * Adding the potential and ekin terms that depend linearly on lambda
747 * as delta lam * dvdl to the energy differences is exact.
748 * For the constraints this is not exact, but we have no other option
749 * without literally changing the lengths and reevaluating the energies at each step.
750 * (try to remedy this post 4.6 - MRS)
752 if (fepvals->separate_dvdl[efptBONDED])
754 enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
756 else
758 enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
760 enerd->term[F_DVDL_CONSTR] = 0;
762 for (int i = 0; i < fepvals->n_lambda; i++)
764 /* note we are iterating over fepvals here!
765 For the current lam, dlam = 0 automatically,
766 so we don't need to add anything to the
767 enerd->enerpart_lambda[0] */
769 /* we don't need to worry about dvdl_lin contributions to dE at
770 current lambda, because the contributions to the current
771 lambda are automatically zeroed */
773 for (size_t j = 0; j < lambda.size(); j++)
775 /* Note that this loop is over all dhdl components, not just the separated ones */
776 dlam = (fepvals->all_lambda[j][i] - lambda[j]);
777 enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
778 if (debug)
780 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
781 fepvals->all_lambda[j][i], efpt_names[j],
782 (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
783 dlam, enerd->dvdl_lin[j]);
790 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
792 int i, j;
794 /* First reset all foreign energy components. Foreign energies always called on
795 neighbor search steps */
796 for (i = 0; (i < egNR); i++)
798 for (j = 0; (j < enerd->grpp.nener); j++)
800 enerd->foreign_grpp.ener[i][j] = 0.0;
804 /* potential energy components */
805 for (i = 0; (i <= F_EPOT); i++)
807 enerd->foreign_term[i] = 0.0;
811 void reset_enerdata(gmx_enerdata_t *enerd)
813 int i, j;
815 /* First reset all energy components. */
816 for (i = 0; (i < egNR); i++)
818 for (j = 0; (j < enerd->grpp.nener); j++)
820 enerd->grpp.ener[i][j] = 0.0;
823 for (i = 0; i < efptNR; i++)
825 enerd->dvdl_lin[i] = 0.0;
826 enerd->dvdl_nonlin[i] = 0.0;
829 /* Normal potential energy components */
830 for (i = 0; (i <= F_EPOT); i++)
832 enerd->term[i] = 0.0;
834 enerd->term[F_DVDL] = 0.0;
835 enerd->term[F_DVDL_COUL] = 0.0;
836 enerd->term[F_DVDL_VDW] = 0.0;
837 enerd->term[F_DVDL_BONDED] = 0.0;
838 enerd->term[F_DVDL_RESTRAINT] = 0.0;
839 enerd->term[F_DKDL] = 0.0;
840 if (enerd->n_lambda > 0)
842 for (i = 0; i < enerd->n_lambda; i++)
844 enerd->enerpart_lambda[i] = 0.0;
847 /* reset foreign energy data - separate function since we also call it elsewhere */
848 reset_foreign_enerdata(enerd);