Enable uncrustify for OpenCL
[gromacs.git] / src / gromacs / mdlib / force.cpp
blob0850c34602761acea93a0cb0dbefc9b779875531
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "force.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <string.h>
46 #include <cmath>
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/ewald/ewald.h"
51 #include "gromacs/ewald/long-range-correction.h"
52 #include "gromacs/ewald/pme.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/gmxlib/nrnb.h"
55 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
56 #include "gromacs/listed-forces/listed-forces.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/forcerec-threading.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdlib/qmmm.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/forceoutput.h"
65 #include "gromacs/mdtypes/inputrec.h"
66 #include "gromacs/mdtypes/md_enums.h"
67 #include "gromacs/pbcutil/ishift.h"
68 #include "gromacs/pbcutil/mshift.h"
69 #include "gromacs/pbcutil/pbc.h"
70 #include "gromacs/timing/wallcycle.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/smalloc.h"
76 void ns(FILE *fp,
77 t_forcerec *fr,
78 matrix box,
79 gmx_groups_t *groups,
80 gmx_localtop_t *top,
81 t_mdatoms *md,
82 const t_commrec *cr,
83 t_nrnb *nrnb,
84 gmx_bool bFillGrid)
86 int nsearch;
89 if (!fr->ns->nblist_initialized)
91 init_neighbor_list(fp, fr, md->homenr);
94 nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
95 bFillGrid);
96 if (debug)
98 fprintf(debug, "nsearch = %d\n", nsearch);
101 /* Check whether we have to do dynamic load balancing */
102 /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
103 count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
104 &(top->idef),opts->ngener);
106 if (fr->ns->dump_nl > 0)
108 dump_nblist(fp, cr, fr, fr->ns->dump_nl);
112 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
114 ewc_t->Vcorr_q = 0;
115 ewc_t->Vcorr_lj = 0;
116 ewc_t->dvdl[efptCOUL] = 0;
117 ewc_t->dvdl[efptVDW] = 0;
118 clear_mat(ewc_t->vir_q);
119 clear_mat(ewc_t->vir_lj);
122 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
124 ewald_corr_thread_t &dest = ewc_t[0];
126 for (int t = 1; t < nthreads; t++)
128 dest.Vcorr_q += ewc_t[t].Vcorr_q;
129 dest.Vcorr_lj += ewc_t[t].Vcorr_lj;
130 dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
131 dest.dvdl[efptVDW] += ewc_t[t].dvdl[efptVDW];
132 m_add(dest.vir_q, ewc_t[t].vir_q, dest.vir_q);
133 m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
137 void do_force_lowlevel(t_forcerec *fr, t_inputrec *ir,
138 t_idef *idef, const t_commrec *cr,
139 const gmx_multisim_t *ms,
140 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
141 t_mdatoms *md,
142 rvec x[], history_t *hist,
143 rvec *forceForUseWithShiftForces,
144 gmx::ForceWithVirial *forceWithVirial,
145 gmx_enerdata_t *enerd,
146 t_fcdata *fcd,
147 matrix box,
148 t_lambda *fepvals,
149 real *lambda,
150 t_graph *graph,
151 t_blocka *excl,
152 rvec mu_tot[],
153 int flags,
154 float *cycles_pme)
156 int i, j;
157 int donb_flags;
158 int pme_flags;
159 t_pbc pbc;
160 real dvdl_dum[efptNR], dvdl_nb[efptNR];
162 #if GMX_MPI
163 double t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
164 #endif
166 set_pbc(&pbc, fr->ePBC, box);
168 /* reset free energy components */
169 for (i = 0; i < efptNR; i++)
171 dvdl_nb[i] = 0;
172 dvdl_dum[i] = 0;
175 /* do QMMM first if requested */
176 if (fr->bQMMM)
178 enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
181 /* Call the short range functions all in one go. */
183 #if GMX_MPI
184 /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
185 #define TAKETIME FALSE
186 if (TAKETIME)
188 MPI_Barrier(cr->mpi_comm_mygroup);
189 t0 = MPI_Wtime();
191 #endif
193 if (ir->nwall)
195 /* foreign lambda component for walls */
196 real dvdl_walls = do_walls(ir, fr, box, md, x, forceForUseWithShiftForces, lambda[efptVDW],
197 enerd->grpp.ener[egLJSR], nrnb);
198 enerd->dvdl_lin[efptVDW] += dvdl_walls;
201 /* We only do non-bonded calculation with group scheme here, the verlet
202 * calls are done from do_force_cutsVERLET(). */
203 if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
205 donb_flags = 0;
206 /* Add short-range interactions */
207 donb_flags |= GMX_NONBONDED_DO_SR;
209 /* Currently all group scheme kernels always calculate (shift-)forces */
210 if (flags & GMX_FORCE_FORCES)
212 donb_flags |= GMX_NONBONDED_DO_FORCE;
214 if (flags & GMX_FORCE_VIRIAL)
216 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
218 if (flags & GMX_FORCE_ENERGY)
220 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
223 wallcycle_sub_start(wcycle, ewcsNONBONDED);
224 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
225 &enerd->grpp, nrnb,
226 lambda, dvdl_nb, -1, -1, donb_flags);
228 /* If we do foreign lambda and we have soft-core interactions
229 * we have to recalculate the (non-linear) energies contributions.
231 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
233 for (i = 0; i < enerd->n_lambda; i++)
235 real lam_i[efptNR];
237 for (j = 0; j < efptNR; j++)
239 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
241 reset_foreign_enerdata(enerd);
242 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
243 &(enerd->foreign_grpp), nrnb,
244 lam_i, dvdl_dum, -1, -1,
245 (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
246 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
247 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
250 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
253 #if GMX_MPI
254 if (TAKETIME)
256 t1 = MPI_Wtime();
257 fr->t_fnbf += t1-t0;
259 #endif
261 if (fepvals->sc_alpha != 0)
263 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
265 else
267 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
270 if (fepvals->sc_alpha != 0)
272 /* even though coulomb part is linear, we already added it, beacuse we
273 need to go through the vdw calculation anyway */
275 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
277 else
279 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
282 if (debug)
284 pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
287 /* Shift the coordinates. Must be done before listed forces and PPPM,
288 * but is also necessary for SHAKE and update, therefore it can NOT
289 * go when no listed forces have to be evaluated.
291 * The shifting and PBC code is deliberately not timed, since with
292 * the Verlet scheme it only takes non-zero time with triclinic
293 * boxes, and even then the time is around a factor of 100 less
294 * than the next smallest counter.
298 /* Here sometimes we would not need to shift with NBFonly,
299 * but we do so anyhow for consistency of the returned coordinates.
301 if (graph)
303 shift_self(graph, box, x);
304 if (TRICLINIC(box))
306 inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
308 else
310 inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
313 /* Check whether we need to do listed interactions or correct for exclusions */
314 if (fr->bMolPBC &&
315 ((flags & GMX_FORCE_LISTED)
316 || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
318 /* TODO There are no electrostatics methods that require this
319 transformation, when using the Verlet scheme, so update the
320 above conditional. */
321 /* Since all atoms are in the rectangular or triclinic unit-cell,
322 * only single box vector shifts (2 in x) are required.
324 set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
325 TRUE, box);
328 do_force_listed(wcycle, box, ir->fepvals, cr, ms,
329 idef, (const rvec *) x, hist,
330 forceForUseWithShiftForces, forceWithVirial,
331 fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
332 DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr,
333 flags);
336 *cycles_pme = 0;
338 /* Do long-range electrostatics and/or LJ-PME, including related short-range
339 * corrections.
341 if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
343 int status = 0;
344 real Vlr_q = 0, Vlr_lj = 0;
346 /* We reduce all virial, dV/dlambda and energy contributions, except
347 * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
349 ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
350 clearEwaldThreadOutput(&ewaldOutput);
352 if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
354 /* With the Verlet scheme exclusion forces are calculated
355 * in the non-bonded kernel.
357 /* The TPI molecule does not have exclusions with the rest
358 * of the system and no intra-molecular PME grid
359 * contributions will be calculated in
360 * gmx_pme_calc_energy.
362 if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
363 ir->ewald_geometry != eewg3D ||
364 ir->epsilon_surface != 0)
366 int nthreads, t;
368 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
370 if (fr->n_tpi > 0)
372 gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
375 nthreads = fr->nthread_ewc;
376 #pragma omp parallel for num_threads(nthreads) schedule(static)
377 for (t = 0; t < nthreads; t++)
381 ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
382 if (t > 0)
384 clearEwaldThreadOutput(&ewc_t);
387 /* Threading is only supported with the Verlet cut-off
388 * scheme and then only single particle forces (no
389 * exclusion forces) are calculated, so we can store
390 * the forces in the normal, single forceWithVirial->force_ array.
392 ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
393 md->chargeA, md->chargeB,
394 md->sqrt_c6A, md->sqrt_c6B,
395 md->sigmaA, md->sigmaB,
396 md->sigma3A, md->sigma3B,
397 md->nChargePerturbed || md->nTypePerturbed,
398 ir->cutoff_scheme != ecutsVERLET,
399 excl, x, box, mu_tot,
400 ir->ewald_geometry,
401 ir->epsilon_surface,
402 as_rvec_array(forceWithVirial->force_.data()),
403 ewc_t.vir_q, ewc_t.vir_lj,
404 &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
405 lambda[efptCOUL], lambda[efptVDW],
406 &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
408 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
410 if (nthreads > 1)
412 reduceEwaldThreadOuput(nthreads, fr->ewc_t);
414 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
417 if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
419 /* This is not in a subcounter because it takes a
420 negligible and constant-sized amount of time */
421 ewaldOutput.Vcorr_q +=
422 ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
423 &ewaldOutput.dvdl[efptCOUL],
424 ewaldOutput.vir_q);
427 if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
428 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
430 /* Do reciprocal PME for Coulomb and/or LJ. */
431 assert(fr->n_tpi >= 0);
432 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
434 pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
436 if (flags & GMX_FORCE_FORCES)
438 pme_flags |= GMX_PME_CALC_F;
440 if (flags & GMX_FORCE_VIRIAL)
442 pme_flags |= GMX_PME_CALC_ENER_VIR;
444 if (fr->n_tpi > 0)
446 /* We don't calculate f, but we do want the potential */
447 pme_flags |= GMX_PME_CALC_POT;
450 /* With domain decomposition we close the CPU side load
451 * balancing region here, because PME does global
452 * communication that acts as a global barrier.
454 if (DOMAINDECOMP(cr))
456 ddCloseBalanceRegionCpu(cr->dd);
459 wallcycle_start(wcycle, ewcPMEMESH);
460 status = gmx_pme_do(fr->pmedata,
461 0, md->homenr - fr->n_tpi,
463 as_rvec_array(forceWithVirial->force_.data()),
464 md->chargeA, md->chargeB,
465 md->sqrt_c6A, md->sqrt_c6B,
466 md->sigmaA, md->sigmaB,
467 box, cr,
468 DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
469 DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
470 nrnb, wcycle,
471 ewaldOutput.vir_q, ewaldOutput.vir_lj,
472 &Vlr_q, &Vlr_lj,
473 lambda[efptCOUL], lambda[efptVDW],
474 &ewaldOutput.dvdl[efptCOUL],
475 &ewaldOutput.dvdl[efptVDW],
476 pme_flags);
477 *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
478 if (status != 0)
480 gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
483 /* We should try to do as little computation after
484 * this as possible, because parallel PME synchronizes
485 * the nodes, so we want all load imbalance of the
486 * rest of the force calculation to be before the PME
487 * call. DD load balancing is done on the whole time
488 * of the force call (without PME).
491 if (fr->n_tpi > 0)
493 if (EVDW_PME(ir->vdwtype))
496 gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
498 /* Determine the PME grid energy of the test molecule
499 * with the PME grid potential of the other charges.
501 gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
502 x + md->homenr - fr->n_tpi,
503 md->chargeA + md->homenr - fr->n_tpi,
504 &Vlr_q);
509 if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
511 Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
512 md->chargeA, md->chargeB,
513 box, cr, md->homenr,
514 ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
515 lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
516 fr->ewald_table);
519 /* Note that with separate PME nodes we get the real energies later */
520 forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
521 forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
522 enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
523 enerd->dvdl_lin[efptVDW] += ewaldOutput.dvdl[efptVDW];
524 enerd->term[F_COUL_RECIP] = Vlr_q + ewaldOutput.Vcorr_q;
525 enerd->term[F_LJ_RECIP] = Vlr_lj + ewaldOutput.Vcorr_lj;
527 if (debug)
529 fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
530 Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
531 pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
532 pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
533 fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
534 Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
535 pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
538 else
540 /* Is there a reaction-field exclusion correction needed?
541 * With the Verlet scheme, exclusion forces are calculated
542 * in the non-bonded kernel.
544 if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
546 real dvdl_rf_excl = 0;
547 enerd->term[F_RF_EXCL] =
548 RF_excl_correction(fr, graph, md, excl, x, forceForUseWithShiftForces,
549 fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
551 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
555 if (debug)
557 print_nrnb(debug, nrnb);
560 #if GMX_MPI
561 if (TAKETIME)
563 t2 = MPI_Wtime();
564 MPI_Barrier(cr->mpi_comm_mygroup);
565 t3 = MPI_Wtime();
566 fr->t_wait += t3-t2;
567 if (fr->timesteps == 11)
569 char buf[22];
570 fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
571 cr->nodeid, gmx_step_str(fr->timesteps, buf),
572 100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
573 (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
575 fr->timesteps++;
577 #endif
579 if (debug)
581 pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
586 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
588 int i, n2;
590 for (i = 0; i < F_NRE; i++)
592 enerd->term[i] = 0;
593 enerd->foreign_term[i] = 0;
597 for (i = 0; i < efptNR; i++)
599 enerd->dvdl_lin[i] = 0;
600 enerd->dvdl_nonlin[i] = 0;
603 n2 = ngener*ngener;
604 if (debug)
606 fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
608 enerd->grpp.nener = n2;
609 enerd->foreign_grpp.nener = n2;
610 for (i = 0; (i < egNR); i++)
612 snew(enerd->grpp.ener[i], n2);
613 snew(enerd->foreign_grpp.ener[i], n2);
616 if (n_lambda)
618 enerd->n_lambda = 1 + n_lambda;
619 snew(enerd->enerpart_lambda, enerd->n_lambda);
621 else
623 enerd->n_lambda = 0;
627 void destroy_enerdata(gmx_enerdata_t *enerd)
629 int i;
631 for (i = 0; (i < egNR); i++)
633 sfree(enerd->grpp.ener[i]);
636 for (i = 0; (i < egNR); i++)
638 sfree(enerd->foreign_grpp.ener[i]);
641 if (enerd->n_lambda)
643 sfree(enerd->enerpart_lambda);
647 static real sum_v(int n, real v[])
649 real t;
650 int i;
652 t = 0.0;
653 for (i = 0; (i < n); i++)
655 t = t + v[i];
658 return t;
661 void sum_epot(gmx_grppairener_t *grpp, real *epot)
663 int i;
665 /* Accumulate energies */
666 epot[F_COUL_SR] = sum_v(grpp->nener, grpp->ener[egCOULSR]);
667 epot[F_LJ] = sum_v(grpp->nener, grpp->ener[egLJSR]);
668 epot[F_LJ14] = sum_v(grpp->nener, grpp->ener[egLJ14]);
669 epot[F_COUL14] = sum_v(grpp->nener, grpp->ener[egCOUL14]);
671 /* lattice part of LR doesnt belong to any group
672 * and has been added earlier
674 epot[F_BHAM] = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
676 epot[F_EPOT] = 0;
677 for (i = 0; (i < F_EPOT); i++)
679 if (i != F_DISRESVIOL && i != F_ORIRESDEV)
681 epot[F_EPOT] += epot[i];
686 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
688 int index;
689 double dlam;
691 enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW]; /* include dispersion correction */
692 enerd->term[F_DVDL] = 0.0;
693 for (int i = 0; i < efptNR; i++)
695 if (fepvals->separate_dvdl[i])
697 /* could this be done more readably/compactly? */
698 switch (i)
700 case (efptMASS):
701 index = F_DKDL;
702 break;
703 case (efptCOUL):
704 index = F_DVDL_COUL;
705 break;
706 case (efptVDW):
707 index = F_DVDL_VDW;
708 break;
709 case (efptBONDED):
710 index = F_DVDL_BONDED;
711 break;
712 case (efptRESTRAINT):
713 index = F_DVDL_RESTRAINT;
714 break;
715 default:
716 index = F_DVDL;
717 break;
719 enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
720 if (debug)
722 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
723 efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
726 else
728 enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
729 if (debug)
731 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
732 efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
737 /* Notes on the foreign lambda free energy difference evaluation:
738 * Adding the potential and ekin terms that depend linearly on lambda
739 * as delta lam * dvdl to the energy differences is exact.
740 * For the constraints this is not exact, but we have no other option
741 * without literally changing the lengths and reevaluating the energies at each step.
742 * (try to remedy this post 4.6 - MRS)
744 if (fepvals->separate_dvdl[efptBONDED])
746 enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
748 else
750 enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
752 enerd->term[F_DVDL_CONSTR] = 0;
754 for (int i = 0; i < fepvals->n_lambda; i++)
756 /* note we are iterating over fepvals here!
757 For the current lam, dlam = 0 automatically,
758 so we don't need to add anything to the
759 enerd->enerpart_lambda[0] */
761 /* we don't need to worry about dvdl_lin contributions to dE at
762 current lambda, because the contributions to the current
763 lambda are automatically zeroed */
765 for (size_t j = 0; j < lambda.size(); j++)
767 /* Note that this loop is over all dhdl components, not just the separated ones */
768 dlam = (fepvals->all_lambda[j][i] - lambda[j]);
769 enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
770 if (debug)
772 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
773 fepvals->all_lambda[j][i], efpt_names[j],
774 (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
775 dlam, enerd->dvdl_lin[j]);
782 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
784 int i, j;
786 /* First reset all foreign energy components. Foreign energies always called on
787 neighbor search steps */
788 for (i = 0; (i < egNR); i++)
790 for (j = 0; (j < enerd->grpp.nener); j++)
792 enerd->foreign_grpp.ener[i][j] = 0.0;
796 /* potential energy components */
797 for (i = 0; (i <= F_EPOT); i++)
799 enerd->foreign_term[i] = 0.0;
803 void reset_enerdata(gmx_enerdata_t *enerd)
805 int i, j;
807 /* First reset all energy components. */
808 for (i = 0; (i < egNR); i++)
810 for (j = 0; (j < enerd->grpp.nener); j++)
812 enerd->grpp.ener[i][j] = 0.0;
815 for (i = 0; i < efptNR; i++)
817 enerd->dvdl_lin[i] = 0.0;
818 enerd->dvdl_nonlin[i] = 0.0;
821 /* Normal potential energy components */
822 for (i = 0; (i <= F_EPOT); i++)
824 enerd->term[i] = 0.0;
826 enerd->term[F_DVDL] = 0.0;
827 enerd->term[F_DVDL_COUL] = 0.0;
828 enerd->term[F_DVDL_VDW] = 0.0;
829 enerd->term[F_DVDL_BONDED] = 0.0;
830 enerd->term[F_DVDL_RESTRAINT] = 0.0;
831 enerd->term[F_DKDL] = 0.0;
832 if (enerd->n_lambda > 0)
834 for (i = 0; i < enerd->n_lambda; i++)
836 enerd->enerpart_lambda[i] = 0.0;
839 /* reset foreign energy data - separate function since we also call it elsewhere */
840 reset_foreign_enerdata(enerd);