Fix small leak in forcerec
[gromacs.git] / src / gromacs / mdlib / forcerec.cpp
blob6ad3940728610c6721371311ab322daea376708f
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "forcerec.h"
41 #include "config.h"
43 #include <cassert>
44 #include <cmath>
45 #include <cstdlib>
46 #include <cstring>
48 #include <algorithm>
50 #include "gromacs/commandline/filenm.h"
51 #include "gromacs/compat/make_unique.h"
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/ewald/ewald.h"
55 #include "gromacs/ewald/ewald-utils.h"
56 #include "gromacs/fileio/filetypes.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
59 #include "gromacs/gpu_utils/gpu_utils.h"
60 #include "gromacs/hardware/hw_info.h"
61 #include "gromacs/listed-forces/gpubonded.h"
62 #include "gromacs/listed-forces/manage-threading.h"
63 #include "gromacs/listed-forces/pairs.h"
64 #include "gromacs/math/functions.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/math/utilities.h"
67 #include "gromacs/math/vec.h"
68 #include "gromacs/mdlib/force.h"
69 #include "gromacs/mdlib/forcerec-threading.h"
70 #include "gromacs/mdlib/gmx_omp_nthreads.h"
71 #include "gromacs/mdlib/md_support.h"
72 #include "gromacs/mdlib/nb_verlet.h"
73 #include "gromacs/mdlib/nbnxn_atomdata.h"
74 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
75 #include "gromacs/mdlib/nbnxn_grid.h"
76 #include "gromacs/mdlib/nbnxn_internal.h"
77 #include "gromacs/mdlib/nbnxn_search.h"
78 #include "gromacs/mdlib/nbnxn_simd.h"
79 #include "gromacs/mdlib/nbnxn_tuning.h"
80 #include "gromacs/mdlib/nbnxn_util.h"
81 #include "gromacs/mdlib/ns.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/rf_util.h"
84 #include "gromacs/mdlib/sim_util.h"
85 #include "gromacs/mdlib/wall.h"
86 #include "gromacs/mdtypes/commrec.h"
87 #include "gromacs/mdtypes/fcdata.h"
88 #include "gromacs/mdtypes/group.h"
89 #include "gromacs/mdtypes/iforceprovider.h"
90 #include "gromacs/mdtypes/inputrec.h"
91 #include "gromacs/mdtypes/md_enums.h"
92 #include "gromacs/pbcutil/ishift.h"
93 #include "gromacs/pbcutil/pbc.h"
94 #include "gromacs/simd/simd.h"
95 #include "gromacs/tables/forcetable.h"
96 #include "gromacs/topology/mtop_util.h"
97 #include "gromacs/trajectory/trajectoryframe.h"
98 #include "gromacs/utility/cstringutil.h"
99 #include "gromacs/utility/exceptions.h"
100 #include "gromacs/utility/fatalerror.h"
101 #include "gromacs/utility/gmxassert.h"
102 #include "gromacs/utility/logger.h"
103 #include "gromacs/utility/physicalnodecommunicator.h"
104 #include "gromacs/utility/pleasecite.h"
105 #include "gromacs/utility/smalloc.h"
106 #include "gromacs/utility/strconvert.h"
108 #include "nbnxn_gpu_jit_support.h"
110 t_forcerec *mk_forcerec()
112 t_forcerec *fr;
114 snew(fr, 1);
116 return fr;
119 static real *mk_nbfp(const gmx_ffparams_t *idef, gmx_bool bBHAM)
121 real *nbfp;
122 int i, j, k, atnr;
124 atnr = idef->atnr;
125 if (bBHAM)
127 snew(nbfp, 3*atnr*atnr);
128 for (i = k = 0; (i < atnr); i++)
130 for (j = 0; (j < atnr); j++, k++)
132 BHAMA(nbfp, atnr, i, j) = idef->iparams[k].bham.a;
133 BHAMB(nbfp, atnr, i, j) = idef->iparams[k].bham.b;
134 /* nbfp now includes the 6.0 derivative prefactor */
135 BHAMC(nbfp, atnr, i, j) = idef->iparams[k].bham.c*6.0;
139 else
141 snew(nbfp, 2*atnr*atnr);
142 for (i = k = 0; (i < atnr); i++)
144 for (j = 0; (j < atnr); j++, k++)
146 /* nbfp now includes the 6.0/12.0 derivative prefactors */
147 C6(nbfp, atnr, i, j) = idef->iparams[k].lj.c6*6.0;
148 C12(nbfp, atnr, i, j) = idef->iparams[k].lj.c12*12.0;
153 return nbfp;
156 static real *make_ljpme_c6grid(const gmx_ffparams_t *idef, t_forcerec *fr)
158 int i, j, k, atnr;
159 real c6, c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
160 real *grid;
162 /* For LJ-PME simulations, we correct the energies with the reciprocal space
163 * inside of the cut-off. To do this the non-bonded kernels needs to have
164 * access to the C6-values used on the reciprocal grid in pme.c
167 atnr = idef->atnr;
168 snew(grid, 2*atnr*atnr);
169 for (i = k = 0; (i < atnr); i++)
171 for (j = 0; (j < atnr); j++, k++)
173 c6i = idef->iparams[i*(atnr+1)].lj.c6;
174 c12i = idef->iparams[i*(atnr+1)].lj.c12;
175 c6j = idef->iparams[j*(atnr+1)].lj.c6;
176 c12j = idef->iparams[j*(atnr+1)].lj.c12;
177 c6 = std::sqrt(c6i * c6j);
178 if (fr->ljpme_combination_rule == eljpmeLB
179 && !gmx_numzero(c6) && !gmx_numzero(c12i) && !gmx_numzero(c12j))
181 sigmai = gmx::sixthroot(c12i / c6i);
182 sigmaj = gmx::sixthroot(c12j / c6j);
183 epsi = c6i * c6i / c12i;
184 epsj = c6j * c6j / c12j;
185 c6 = std::sqrt(epsi * epsj) * gmx::power6(0.5*(sigmai+sigmaj));
187 /* Store the elements at the same relative positions as C6 in nbfp in order
188 * to simplify access in the kernels
190 grid[2*(atnr*i+j)] = c6*6.0;
193 return grid;
196 static real *mk_nbfp_combination_rule(const gmx_ffparams_t *idef, int comb_rule)
198 real *nbfp;
199 int i, j, atnr;
200 real c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
201 real c6, c12;
203 atnr = idef->atnr;
204 snew(nbfp, 2*atnr*atnr);
205 for (i = 0; i < atnr; ++i)
207 for (j = 0; j < atnr; ++j)
209 c6i = idef->iparams[i*(atnr+1)].lj.c6;
210 c12i = idef->iparams[i*(atnr+1)].lj.c12;
211 c6j = idef->iparams[j*(atnr+1)].lj.c6;
212 c12j = idef->iparams[j*(atnr+1)].lj.c12;
213 c6 = std::sqrt(c6i * c6j);
214 c12 = std::sqrt(c12i * c12j);
215 if (comb_rule == eCOMB_ARITHMETIC
216 && !gmx_numzero(c6) && !gmx_numzero(c12))
218 sigmai = gmx::sixthroot(c12i / c6i);
219 sigmaj = gmx::sixthroot(c12j / c6j);
220 epsi = c6i * c6i / c12i;
221 epsj = c6j * c6j / c12j;
222 c6 = std::sqrt(epsi * epsj) * gmx::power6(0.5*(sigmai+sigmaj));
223 c12 = std::sqrt(epsi * epsj) * gmx::power12(0.5*(sigmai+sigmaj));
225 C6(nbfp, atnr, i, j) = c6*6.0;
226 C12(nbfp, atnr, i, j) = c12*12.0;
229 return nbfp;
232 /* This routine sets fr->solvent_opt to the most common solvent in the
233 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
234 * the fr->solvent_type array with the correct type (or esolNO).
236 * Charge groups that fulfill the conditions but are not identical to the
237 * most common one will be marked as esolNO in the solvent_type array.
239 * TIP3p is identical to SPC for these purposes, so we call it
240 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
242 * NOTE: QM particle should not
243 * become an optimized solvent. Not even if there is only one charge
244 * group in the Qm
247 typedef struct
249 int model;
250 int count;
251 int vdwtype[4];
252 real charge[4];
253 } solvent_parameters_t;
255 static void
256 check_solvent_cg(const gmx_moltype_t *molt,
257 int cg0,
258 int nmol,
259 const unsigned char *qm_grpnr,
260 const t_grps *qm_grps,
261 t_forcerec * fr,
262 int *n_solvent_parameters,
263 solvent_parameters_t **solvent_parameters_p,
264 int cginfo,
265 int *cg_sp)
267 t_atom *atom;
268 int j, k;
269 int j0, j1, nj;
270 gmx_bool perturbed;
271 gmx_bool has_vdw[4];
272 gmx_bool match;
273 real tmp_charge[4] = { 0.0 }; /* init to zero to make gcc4.8 happy */
274 int tmp_vdwtype[4] = { 0 }; /* init to zero to make gcc4.8 happy */
275 int tjA;
276 gmx_bool qm;
277 solvent_parameters_t *solvent_parameters;
279 /* We use a list with parameters for each solvent type.
280 * Every time we discover a new molecule that fulfills the basic
281 * conditions for a solvent we compare with the previous entries
282 * in these lists. If the parameters are the same we just increment
283 * the counter for that type, and otherwise we create a new type
284 * based on the current molecule.
286 * Once we've finished going through all molecules we check which
287 * solvent is most common, and mark all those molecules while we
288 * clear the flag on all others.
291 solvent_parameters = *solvent_parameters_p;
293 /* Mark the cg first as non optimized */
294 *cg_sp = -1;
296 /* Check if this cg has no exclusions with atoms in other charge groups
297 * and all atoms inside the charge group excluded.
298 * We only have 3 or 4 atom solvent loops.
300 if (GET_CGINFO_EXCL_INTER(cginfo) ||
301 !GET_CGINFO_EXCL_INTRA(cginfo))
303 return;
306 /* Get the indices of the first atom in this charge group */
307 j0 = molt->cgs.index[cg0];
308 j1 = molt->cgs.index[cg0+1];
310 /* Number of atoms in our molecule */
311 nj = j1 - j0;
313 if (debug)
315 fprintf(debug,
316 "Moltype '%s': there are %d atoms in this charge group\n",
317 *molt->name, nj);
320 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
321 * otherwise skip it.
323 if (nj < 3 || nj > 4)
325 return;
328 /* Check if we are doing QM on this group */
329 qm = FALSE;
330 if (qm_grpnr != nullptr)
332 for (j = j0; j < j1 && !qm; j++)
334 qm = (qm_grpnr[j] < qm_grps->nr - 1);
337 /* Cannot use solvent optimization with QM */
338 if (qm)
340 return;
343 atom = molt->atoms.atom;
345 /* Still looks like a solvent, time to check parameters */
347 /* If it is perturbed (free energy) we can't use the solvent loops,
348 * so then we just skip to the next molecule.
350 perturbed = FALSE;
352 for (j = j0; j < j1 && !perturbed; j++)
354 perturbed = PERTURBED(atom[j]);
357 if (perturbed)
359 return;
362 /* Now it's only a question if the VdW and charge parameters
363 * are OK. Before doing the check we compare and see if they are
364 * identical to a possible previous solvent type.
365 * First we assign the current types and charges.
367 for (j = 0; j < nj; j++)
369 tmp_vdwtype[j] = atom[j0+j].type;
370 tmp_charge[j] = atom[j0+j].q;
373 /* Does it match any previous solvent type? */
374 for (k = 0; k < *n_solvent_parameters; k++)
376 match = TRUE;
379 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
380 if ( (solvent_parameters[k].model == esolSPC && nj != 3) ||
381 (solvent_parameters[k].model == esolTIP4P && nj != 4) )
383 match = FALSE;
386 /* Check that types & charges match for all atoms in molecule */
387 for (j = 0; j < nj && match; j++)
389 if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
391 match = FALSE;
393 if (tmp_charge[j] != solvent_parameters[k].charge[j])
395 match = FALSE;
398 if (match)
400 /* Congratulations! We have a matched solvent.
401 * Flag it with this type for later processing.
403 *cg_sp = k;
404 solvent_parameters[k].count += nmol;
406 /* We are done with this charge group */
407 return;
411 /* If we get here, we have a tentative new solvent type.
412 * Before we add it we must check that it fulfills the requirements
413 * of the solvent optimized loops. First determine which atoms have
414 * VdW interactions.
416 for (j = 0; j < nj; j++)
418 has_vdw[j] = FALSE;
419 tjA = tmp_vdwtype[j];
421 /* Go through all other tpes and see if any have non-zero
422 * VdW parameters when combined with this one.
424 for (k = 0; k < fr->ntype && (!has_vdw[j]); k++)
426 /* We already checked that the atoms weren't perturbed,
427 * so we only need to check state A now.
429 if (fr->bBHAM)
431 has_vdw[j] = (has_vdw[j] ||
432 (BHAMA(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
433 (BHAMB(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
434 (BHAMC(fr->nbfp, fr->ntype, tjA, k) != 0.0));
436 else
438 /* Standard LJ */
439 has_vdw[j] = (has_vdw[j] ||
440 (C6(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
441 (C12(fr->nbfp, fr->ntype, tjA, k) != 0.0));
446 /* Now we know all we need to make the final check and assignment. */
447 if (nj == 3)
449 /* So, is it an SPC?
450 * For this we require thatn all atoms have charge,
451 * the charges on atom 2 & 3 should be the same, and only
452 * atom 1 might have VdW.
454 if (!has_vdw[1] &&
455 !has_vdw[2] &&
456 tmp_charge[0] != 0 &&
457 tmp_charge[1] != 0 &&
458 tmp_charge[2] == tmp_charge[1])
460 srenew(solvent_parameters, *n_solvent_parameters+1);
461 solvent_parameters[*n_solvent_parameters].model = esolSPC;
462 solvent_parameters[*n_solvent_parameters].count = nmol;
463 for (k = 0; k < 3; k++)
465 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
466 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
469 *cg_sp = *n_solvent_parameters;
470 (*n_solvent_parameters)++;
473 else if (nj == 4)
475 /* Or could it be a TIP4P?
476 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
477 * Only atom 1 mght have VdW.
479 if (!has_vdw[1] &&
480 !has_vdw[2] &&
481 !has_vdw[3] &&
482 tmp_charge[0] == 0 &&
483 tmp_charge[1] != 0 &&
484 tmp_charge[2] == tmp_charge[1] &&
485 tmp_charge[3] != 0)
487 srenew(solvent_parameters, *n_solvent_parameters+1);
488 solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
489 solvent_parameters[*n_solvent_parameters].count = nmol;
490 for (k = 0; k < 4; k++)
492 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
493 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
496 *cg_sp = *n_solvent_parameters;
497 (*n_solvent_parameters)++;
501 *solvent_parameters_p = solvent_parameters;
504 static void
505 check_solvent(FILE * fp,
506 const gmx_mtop_t * mtop,
507 t_forcerec * fr,
508 cginfo_mb_t *cginfo_mb)
510 const t_block * cgs;
511 const gmx_moltype_t *molt;
512 int mol, cg_mol, at_offset, am, cgm, i, nmol_ch, nmol;
513 int n_solvent_parameters;
514 solvent_parameters_t *solvent_parameters;
515 int **cg_sp;
516 int bestsp, bestsol;
518 if (debug)
520 fprintf(debug, "Going to determine what solvent types we have.\n");
523 n_solvent_parameters = 0;
524 solvent_parameters = nullptr;
525 /* Allocate temporary array for solvent type */
526 snew(cg_sp, mtop->molblock.size());
528 at_offset = 0;
529 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
531 molt = &mtop->moltype[mtop->molblock[mb].type];
532 cgs = &molt->cgs;
533 /* Here we have to loop over all individual molecules
534 * because we need to check for QMMM particles.
536 snew(cg_sp[mb], cginfo_mb[mb].cg_mod);
537 nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
538 nmol = mtop->molblock[mb].nmol/nmol_ch;
539 for (mol = 0; mol < nmol_ch; mol++)
541 cgm = mol*cgs->nr;
542 am = mol*cgs->index[cgs->nr];
543 for (cg_mol = 0; cg_mol < cgs->nr; cg_mol++)
545 check_solvent_cg(molt, cg_mol, nmol,
546 mtop->groups.grpnr[egcQMMM] ?
547 mtop->groups.grpnr[egcQMMM]+at_offset+am : nullptr,
548 &mtop->groups.grps[egcQMMM],
550 &n_solvent_parameters, &solvent_parameters,
551 cginfo_mb[mb].cginfo[cgm+cg_mol],
552 &cg_sp[mb][cgm+cg_mol]);
555 at_offset += cgs->index[cgs->nr];
558 /* Puh! We finished going through all charge groups.
559 * Now find the most common solvent model.
562 /* Most common solvent this far */
563 bestsp = -2;
564 for (i = 0; i < n_solvent_parameters; i++)
566 if (bestsp == -2 ||
567 solvent_parameters[i].count > solvent_parameters[bestsp].count)
569 bestsp = i;
573 if (bestsp >= 0)
575 bestsol = solvent_parameters[bestsp].model;
577 else
579 bestsol = esolNO;
582 fr->nWatMol = 0;
583 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
585 cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
586 nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
587 for (i = 0; i < cginfo_mb[mb].cg_mod; i++)
589 if (cg_sp[mb][i] == bestsp)
591 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], bestsol);
592 fr->nWatMol += nmol;
594 else
596 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], esolNO);
599 sfree(cg_sp[mb]);
601 sfree(cg_sp);
603 if (bestsol != esolNO && fp != nullptr)
605 fprintf(fp, "\nEnabling %s-like water optimization for %d molecules.\n\n",
606 esol_names[bestsol],
607 solvent_parameters[bestsp].count);
610 sfree(solvent_parameters);
611 fr->solvent_opt = bestsol;
614 enum {
615 acNONE = 0, acCONSTRAINT, acSETTLE
618 static cginfo_mb_t *init_cginfo_mb(FILE *fplog, const gmx_mtop_t *mtop,
619 t_forcerec *fr, gmx_bool bNoSolvOpt,
620 gmx_bool *bFEP_NonBonded,
621 gmx_bool *bExcl_IntraCGAll_InterCGNone)
623 const t_block *cgs;
624 const t_blocka *excl;
625 const gmx_moltype_t *molt;
626 const gmx_molblock_t *molb;
627 cginfo_mb_t *cginfo_mb;
628 gmx_bool *type_VDW;
629 int *cginfo;
630 int cg_offset, a_offset;
631 int m, cg, a0, a1, gid, ai, j, aj, excl_nalloc;
632 int *a_con;
633 int ftype;
634 int ia;
635 gmx_bool bId, *bExcl, bExclIntraAll, bExclInter, bHaveVDW, bHaveQ, bHavePerturbedAtoms;
637 snew(cginfo_mb, mtop->molblock.size());
639 snew(type_VDW, fr->ntype);
640 for (ai = 0; ai < fr->ntype; ai++)
642 type_VDW[ai] = FALSE;
643 for (j = 0; j < fr->ntype; j++)
645 type_VDW[ai] = type_VDW[ai] ||
646 fr->bBHAM ||
647 C6(fr->nbfp, fr->ntype, ai, j) != 0 ||
648 C12(fr->nbfp, fr->ntype, ai, j) != 0;
652 *bFEP_NonBonded = FALSE;
653 *bExcl_IntraCGAll_InterCGNone = TRUE;
655 excl_nalloc = 10;
656 snew(bExcl, excl_nalloc);
657 cg_offset = 0;
658 a_offset = 0;
659 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
661 molb = &mtop->molblock[mb];
662 molt = &mtop->moltype[molb->type];
663 cgs = &molt->cgs;
664 excl = &molt->excls;
666 /* Check if the cginfo is identical for all molecules in this block.
667 * If so, we only need an array of the size of one molecule.
668 * Otherwise we make an array of #mol times #cgs per molecule.
670 bId = TRUE;
671 for (m = 0; m < molb->nmol; m++)
673 int am = m*cgs->index[cgs->nr];
674 for (cg = 0; cg < cgs->nr; cg++)
676 a0 = cgs->index[cg];
677 a1 = cgs->index[cg+1];
678 if (getGroupType(mtop->groups, egcENER, a_offset+am+a0) !=
679 getGroupType(mtop->groups, egcENER, a_offset +a0))
681 bId = FALSE;
683 if (mtop->groups.grpnr[egcQMMM] != nullptr)
685 for (ai = a0; ai < a1; ai++)
687 if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
688 mtop->groups.grpnr[egcQMMM][a_offset +ai])
690 bId = FALSE;
697 cginfo_mb[mb].cg_start = cg_offset;
698 cginfo_mb[mb].cg_end = cg_offset + molb->nmol*cgs->nr;
699 cginfo_mb[mb].cg_mod = (bId ? 1 : molb->nmol)*cgs->nr;
700 snew(cginfo_mb[mb].cginfo, cginfo_mb[mb].cg_mod);
701 cginfo = cginfo_mb[mb].cginfo;
703 /* Set constraints flags for constrained atoms */
704 snew(a_con, molt->atoms.nr);
705 for (ftype = 0; ftype < F_NRE; ftype++)
707 if (interaction_function[ftype].flags & IF_CONSTRAINT)
709 int nral;
711 nral = NRAL(ftype);
712 for (ia = 0; ia < molt->ilist[ftype].size(); ia += 1+nral)
714 int a;
716 for (a = 0; a < nral; a++)
718 a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
719 (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
725 for (m = 0; m < (bId ? 1 : molb->nmol); m++)
727 int cgm = m*cgs->nr;
728 int am = m*cgs->index[cgs->nr];
729 for (cg = 0; cg < cgs->nr; cg++)
731 a0 = cgs->index[cg];
732 a1 = cgs->index[cg+1];
734 /* Store the energy group in cginfo */
735 gid = getGroupType(mtop->groups, egcENER, a_offset+am+a0);
736 SET_CGINFO_GID(cginfo[cgm+cg], gid);
738 /* Check the intra/inter charge group exclusions */
739 if (a1-a0 > excl_nalloc)
741 excl_nalloc = a1 - a0;
742 srenew(bExcl, excl_nalloc);
744 /* bExclIntraAll: all intra cg interactions excluded
745 * bExclInter: any inter cg interactions excluded
747 bExclIntraAll = TRUE;
748 bExclInter = FALSE;
749 bHaveVDW = FALSE;
750 bHaveQ = FALSE;
751 bHavePerturbedAtoms = FALSE;
752 for (ai = a0; ai < a1; ai++)
754 /* Check VDW and electrostatic interactions */
755 bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
756 type_VDW[molt->atoms.atom[ai].typeB]);
757 bHaveQ = bHaveQ || (molt->atoms.atom[ai].q != 0 ||
758 molt->atoms.atom[ai].qB != 0);
760 bHavePerturbedAtoms = bHavePerturbedAtoms || (PERTURBED(molt->atoms.atom[ai]) != 0);
762 /* Clear the exclusion list for atom ai */
763 for (aj = a0; aj < a1; aj++)
765 bExcl[aj-a0] = FALSE;
767 /* Loop over all the exclusions of atom ai */
768 for (j = excl->index[ai]; j < excl->index[ai+1]; j++)
770 aj = excl->a[j];
771 if (aj < a0 || aj >= a1)
773 bExclInter = TRUE;
775 else
777 bExcl[aj-a0] = TRUE;
780 /* Check if ai excludes a0 to a1 */
781 for (aj = a0; aj < a1; aj++)
783 if (!bExcl[aj-a0])
785 bExclIntraAll = FALSE;
789 switch (a_con[ai])
791 case acCONSTRAINT:
792 SET_CGINFO_CONSTR(cginfo[cgm+cg]);
793 break;
794 case acSETTLE:
795 SET_CGINFO_SETTLE(cginfo[cgm+cg]);
796 break;
797 default:
798 break;
801 if (bExclIntraAll)
803 SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
805 if (bExclInter)
807 SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
809 if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
811 /* The size in cginfo is currently only read with DD */
812 gmx_fatal(FARGS, "A charge group has size %d which is larger than the limit of %d atoms", a1-a0, MAX_CHARGEGROUP_SIZE);
814 if (bHaveVDW)
816 SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
818 if (bHaveQ)
820 SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
822 if (bHavePerturbedAtoms && fr->efep != efepNO)
824 SET_CGINFO_FEP(cginfo[cgm+cg]);
825 *bFEP_NonBonded = TRUE;
827 /* Store the charge group size */
828 SET_CGINFO_NATOMS(cginfo[cgm+cg], a1-a0);
830 if (!bExclIntraAll || bExclInter)
832 *bExcl_IntraCGAll_InterCGNone = FALSE;
837 sfree(a_con);
839 cg_offset += molb->nmol*cgs->nr;
840 a_offset += molb->nmol*cgs->index[cgs->nr];
842 sfree(type_VDW);
843 sfree(bExcl);
845 /* the solvent optimizer is called after the QM is initialized,
846 * because we don't want to have the QM subsystemto become an
847 * optimized solvent
850 check_solvent(fplog, mtop, fr, cginfo_mb);
852 if (getenv("GMX_NO_SOLV_OPT"))
854 if (fplog)
856 fprintf(fplog, "Found environment variable GMX_NO_SOLV_OPT.\n"
857 "Disabling all solvent optimization\n");
859 fr->solvent_opt = esolNO;
861 if (bNoSolvOpt)
863 fr->solvent_opt = esolNO;
865 if (!fr->solvent_opt)
867 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
869 for (cg = 0; cg < cginfo_mb[mb].cg_mod; cg++)
871 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg], esolNO);
876 return cginfo_mb;
879 static int *cginfo_expand(int nmb, cginfo_mb_t *cgi_mb)
881 int ncg, mb, cg;
882 int *cginfo;
884 ncg = cgi_mb[nmb-1].cg_end;
885 snew(cginfo, ncg);
886 mb = 0;
887 for (cg = 0; cg < ncg; cg++)
889 while (cg >= cgi_mb[mb].cg_end)
891 mb++;
893 cginfo[cg] =
894 cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
897 return cginfo;
900 static void done_cginfo_mb(cginfo_mb_t *cginfo_mb, int numMolBlocks)
902 if (cginfo_mb == nullptr)
904 return;
906 for (int mb = 0; mb < numMolBlocks; ++mb)
908 sfree(cginfo_mb[mb].cginfo);
910 sfree(cginfo_mb);
913 /* Sets the sum of charges (squared) and C6 in the system in fr.
914 * Returns whether the system has a net charge.
916 static bool set_chargesum(FILE *log, t_forcerec *fr, const gmx_mtop_t *mtop)
918 /*This now calculates sum for q and c6*/
919 double qsum, q2sum, q, c6sum, c6;
921 qsum = 0;
922 q2sum = 0;
923 c6sum = 0;
924 for (const gmx_molblock_t &molb : mtop->molblock)
926 int nmol = molb.nmol;
927 const t_atoms *atoms = &mtop->moltype[molb.type].atoms;
928 for (int i = 0; i < atoms->nr; i++)
930 q = atoms->atom[i].q;
931 qsum += nmol*q;
932 q2sum += nmol*q*q;
933 c6 = mtop->ffparams.iparams[atoms->atom[i].type*(mtop->ffparams.atnr+1)].lj.c6;
934 c6sum += nmol*c6;
937 fr->qsum[0] = qsum;
938 fr->q2sum[0] = q2sum;
939 fr->c6sum[0] = c6sum;
941 if (fr->efep != efepNO)
943 qsum = 0;
944 q2sum = 0;
945 c6sum = 0;
946 for (const gmx_molblock_t &molb : mtop->molblock)
948 int nmol = molb.nmol;
949 const t_atoms *atoms = &mtop->moltype[molb.type].atoms;
950 for (int i = 0; i < atoms->nr; i++)
952 q = atoms->atom[i].qB;
953 qsum += nmol*q;
954 q2sum += nmol*q*q;
955 c6 = mtop->ffparams.iparams[atoms->atom[i].typeB*(mtop->ffparams.atnr+1)].lj.c6;
956 c6sum += nmol*c6;
958 fr->qsum[1] = qsum;
959 fr->q2sum[1] = q2sum;
960 fr->c6sum[1] = c6sum;
963 else
965 fr->qsum[1] = fr->qsum[0];
966 fr->q2sum[1] = fr->q2sum[0];
967 fr->c6sum[1] = fr->c6sum[0];
969 if (log)
971 if (fr->efep == efepNO)
973 fprintf(log, "System total charge: %.3f\n", fr->qsum[0]);
975 else
977 fprintf(log, "System total charge, top. A: %.3f top. B: %.3f\n",
978 fr->qsum[0], fr->qsum[1]);
982 /* A cut-off of 1e-4 is used to catch rounding errors due to ascii input */
983 return (std::abs(fr->qsum[0]) > 1e-4 ||
984 std::abs(fr->qsum[1]) > 1e-4);
987 void update_forcerec(t_forcerec *fr, matrix box)
989 if (fr->ic->eeltype == eelGRF)
991 calc_rffac(nullptr, fr->ic->eeltype, fr->ic->epsilon_r, fr->ic->epsilon_rf,
992 fr->ic->rcoulomb, fr->temp, fr->zsquare, box,
993 &fr->ic->k_rf, &fr->ic->c_rf);
997 void set_avcsixtwelve(FILE *fplog, t_forcerec *fr, const gmx_mtop_t *mtop)
999 const t_atoms *atoms, *atoms_tpi;
1000 const t_blocka *excl;
1001 int nmolc, i, j, tpi, tpj, j1, j2, k, nexcl, q;
1002 int64_t npair, npair_ij, tmpi, tmpj;
1003 double csix, ctwelve;
1004 int ntp, *typecount;
1005 gmx_bool bBHAM;
1006 real *nbfp;
1007 real *nbfp_comb = nullptr;
1009 ntp = fr->ntype;
1010 bBHAM = fr->bBHAM;
1011 nbfp = fr->nbfp;
1013 /* For LJ-PME, we want to correct for the difference between the
1014 * actual C6 values and the C6 values used by the LJ-PME based on
1015 * combination rules. */
1017 if (EVDW_PME(fr->ic->vdwtype))
1019 nbfp_comb = mk_nbfp_combination_rule(&mtop->ffparams,
1020 (fr->ljpme_combination_rule == eljpmeLB) ? eCOMB_ARITHMETIC : eCOMB_GEOMETRIC);
1021 for (tpi = 0; tpi < ntp; ++tpi)
1023 for (tpj = 0; tpj < ntp; ++tpj)
1025 C6(nbfp_comb, ntp, tpi, tpj) =
1026 C6(nbfp, ntp, tpi, tpj) - C6(nbfp_comb, ntp, tpi, tpj);
1027 C12(nbfp_comb, ntp, tpi, tpj) = C12(nbfp, ntp, tpi, tpj);
1030 nbfp = nbfp_comb;
1032 for (q = 0; q < (fr->efep == efepNO ? 1 : 2); q++)
1034 csix = 0;
1035 ctwelve = 0;
1036 npair = 0;
1037 nexcl = 0;
1038 if (!fr->n_tpi)
1040 /* Count the types so we avoid natoms^2 operations */
1041 snew(typecount, ntp);
1042 gmx_mtop_count_atomtypes(mtop, q, typecount);
1044 for (tpi = 0; tpi < ntp; tpi++)
1046 for (tpj = tpi; tpj < ntp; tpj++)
1048 tmpi = typecount[tpi];
1049 tmpj = typecount[tpj];
1050 if (tpi != tpj)
1052 npair_ij = tmpi*tmpj;
1054 else
1056 npair_ij = tmpi*(tmpi - 1)/2;
1058 if (bBHAM)
1060 /* nbfp now includes the 6.0 derivative prefactor */
1061 csix += npair_ij*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1063 else
1065 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1066 csix += npair_ij* C6(nbfp, ntp, tpi, tpj)/6.0;
1067 ctwelve += npair_ij* C12(nbfp, ntp, tpi, tpj)/12.0;
1069 npair += npair_ij;
1072 sfree(typecount);
1073 /* Subtract the excluded pairs.
1074 * The main reason for substracting exclusions is that in some cases
1075 * some combinations might never occur and the parameters could have
1076 * any value. These unused values should not influence the dispersion
1077 * correction.
1079 for (const gmx_molblock_t &molb : mtop->molblock)
1081 int nmol = molb.nmol;
1082 atoms = &mtop->moltype[molb.type].atoms;
1083 excl = &mtop->moltype[molb.type].excls;
1084 for (int i = 0; (i < atoms->nr); i++)
1086 if (q == 0)
1088 tpi = atoms->atom[i].type;
1090 else
1092 tpi = atoms->atom[i].typeB;
1094 j1 = excl->index[i];
1095 j2 = excl->index[i+1];
1096 for (j = j1; j < j2; j++)
1098 k = excl->a[j];
1099 if (k > i)
1101 if (q == 0)
1103 tpj = atoms->atom[k].type;
1105 else
1107 tpj = atoms->atom[k].typeB;
1109 if (bBHAM)
1111 /* nbfp now includes the 6.0 derivative prefactor */
1112 csix -= nmol*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1114 else
1116 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1117 csix -= nmol*C6 (nbfp, ntp, tpi, tpj)/6.0;
1118 ctwelve -= nmol*C12(nbfp, ntp, tpi, tpj)/12.0;
1120 nexcl += molb.nmol;
1126 else
1128 /* Only correct for the interaction of the test particle
1129 * with the rest of the system.
1131 atoms_tpi =
1132 &mtop->moltype[mtop->molblock.back().type].atoms;
1134 npair = 0;
1135 for (size_t mb = 0; mb < mtop->molblock.size(); mb++)
1137 const gmx_molblock_t &molb = mtop->molblock[mb];
1138 atoms = &mtop->moltype[molb.type].atoms;
1139 for (j = 0; j < atoms->nr; j++)
1141 nmolc = molb.nmol;
1142 /* Remove the interaction of the test charge group
1143 * with itself.
1145 if (mb == mtop->molblock.size() - 1)
1147 nmolc--;
1149 if (mb == 0 && molb.nmol == 1)
1151 gmx_fatal(FARGS, "Old format tpr with TPI, please generate a new tpr file");
1154 if (q == 0)
1156 tpj = atoms->atom[j].type;
1158 else
1160 tpj = atoms->atom[j].typeB;
1162 for (i = 0; i < fr->n_tpi; i++)
1164 if (q == 0)
1166 tpi = atoms_tpi->atom[i].type;
1168 else
1170 tpi = atoms_tpi->atom[i].typeB;
1172 if (bBHAM)
1174 /* nbfp now includes the 6.0 derivative prefactor */
1175 csix += nmolc*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1177 else
1179 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1180 csix += nmolc*C6 (nbfp, ntp, tpi, tpj)/6.0;
1181 ctwelve += nmolc*C12(nbfp, ntp, tpi, tpj)/12.0;
1183 npair += nmolc;
1188 if (npair - nexcl <= 0 && fplog)
1190 fprintf(fplog, "\nWARNING: There are no atom pairs for dispersion correction\n\n");
1191 csix = 0;
1192 ctwelve = 0;
1194 else
1196 csix /= npair - nexcl;
1197 ctwelve /= npair - nexcl;
1199 if (debug)
1201 fprintf(debug, "Counted %d exclusions\n", nexcl);
1202 fprintf(debug, "Average C6 parameter is: %10g\n", csix);
1203 fprintf(debug, "Average C12 parameter is: %10g\n", ctwelve);
1205 fr->avcsix[q] = csix;
1206 fr->avctwelve[q] = ctwelve;
1209 if (EVDW_PME(fr->ic->vdwtype))
1211 sfree(nbfp_comb);
1214 if (fplog != nullptr)
1216 if (fr->eDispCorr == edispcAllEner ||
1217 fr->eDispCorr == edispcAllEnerPres)
1219 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1220 fr->avcsix[0], fr->avctwelve[0]);
1222 else
1224 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e\n", fr->avcsix[0]);
1230 static real calcBuckinghamBMax(FILE *fplog, const gmx_mtop_t *mtop)
1232 const t_atoms *at1, *at2;
1233 int i, j, tpi, tpj, ntypes;
1234 real b, bmin;
1236 if (fplog)
1238 fprintf(fplog, "Determining largest Buckingham b parameter for table\n");
1240 ntypes = mtop->ffparams.atnr;
1242 bmin = -1;
1243 real bham_b_max = 0;
1244 for (size_t mt1 = 0; mt1 < mtop->moltype.size(); mt1++)
1246 at1 = &mtop->moltype[mt1].atoms;
1247 for (i = 0; (i < at1->nr); i++)
1249 tpi = at1->atom[i].type;
1250 if (tpi >= ntypes)
1252 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", i, tpi, ntypes);
1255 for (size_t mt2 = mt1; mt2 < mtop->moltype.size(); mt2++)
1257 at2 = &mtop->moltype[mt2].atoms;
1258 for (j = 0; (j < at2->nr); j++)
1260 tpj = at2->atom[j].type;
1261 if (tpj >= ntypes)
1263 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", j, tpj, ntypes);
1265 b = mtop->ffparams.iparams[tpi*ntypes + tpj].bham.b;
1266 if (b > bham_b_max)
1268 bham_b_max = b;
1270 if ((b < bmin) || (bmin == -1))
1272 bmin = b;
1278 if (fplog)
1280 fprintf(fplog, "Buckingham b parameters, min: %g, max: %g\n",
1281 bmin, bham_b_max);
1284 return bham_b_max;
1287 static void make_nbf_tables(FILE *fp,
1288 const interaction_const_t *ic, real rtab,
1289 const char *tabfn, char *eg1, char *eg2,
1290 t_nblists *nbl)
1292 char buf[STRLEN];
1293 int i, j;
1295 if (tabfn == nullptr)
1297 if (debug)
1299 fprintf(debug, "No table file name passed, can not read table, can not do non-bonded interactions\n");
1301 return;
1304 sprintf(buf, "%s", tabfn);
1305 if (eg1 && eg2)
1307 /* Append the two energy group names */
1308 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1, "_%s_%s.%s",
1309 eg1, eg2, ftp2ext(efXVG));
1311 nbl->table_elec_vdw = make_tables(fp, ic, buf, rtab, 0);
1312 /* Copy the contents of the table to separate coulomb and LJ tables too,
1313 * to improve cache performance.
1315 /* For performance reasons we want
1316 * the table data to be aligned to 16-byte. The pointers could be freed
1317 * but currently aren't.
1319 snew(nbl->table_elec, 1);
1320 nbl->table_elec->interaction = GMX_TABLE_INTERACTION_ELEC;
1321 nbl->table_elec->format = nbl->table_elec_vdw->format;
1322 nbl->table_elec->r = nbl->table_elec_vdw->r;
1323 nbl->table_elec->n = nbl->table_elec_vdw->n;
1324 nbl->table_elec->scale = nbl->table_elec_vdw->scale;
1325 nbl->table_elec->formatsize = nbl->table_elec_vdw->formatsize;
1326 nbl->table_elec->ninteractions = 1;
1327 nbl->table_elec->stride = nbl->table_elec->formatsize * nbl->table_elec->ninteractions;
1328 snew_aligned(nbl->table_elec->data, nbl->table_elec->stride*(nbl->table_elec->n+1), 32);
1330 snew(nbl->table_vdw, 1);
1331 nbl->table_vdw->interaction = GMX_TABLE_INTERACTION_VDWREP_VDWDISP;
1332 nbl->table_vdw->format = nbl->table_elec_vdw->format;
1333 nbl->table_vdw->r = nbl->table_elec_vdw->r;
1334 nbl->table_vdw->n = nbl->table_elec_vdw->n;
1335 nbl->table_vdw->scale = nbl->table_elec_vdw->scale;
1336 nbl->table_vdw->formatsize = nbl->table_elec_vdw->formatsize;
1337 nbl->table_vdw->ninteractions = 2;
1338 nbl->table_vdw->stride = nbl->table_vdw->formatsize * nbl->table_vdw->ninteractions;
1339 snew_aligned(nbl->table_vdw->data, nbl->table_vdw->stride*(nbl->table_vdw->n+1), 32);
1341 /* NOTE: Using a single i-loop here leads to mix-up of data in table_vdw
1342 * with (at least) gcc 6.2, 6.3 and 6.4 when compiled with -O3 and AVX
1344 for (i = 0; i <= nbl->table_elec_vdw->n; i++)
1346 for (j = 0; j < 4; j++)
1348 nbl->table_elec->data[4*i+j] = nbl->table_elec_vdw->data[12*i+j];
1351 for (i = 0; i <= nbl->table_elec_vdw->n; i++)
1353 for (j = 0; j < 8; j++)
1355 nbl->table_vdw->data[8*i+j] = nbl->table_elec_vdw->data[12*i+4+j];
1360 /*!\brief If there's bonded interactions of type \c ftype1 or \c
1361 * ftype2 present in the topology, build an array of the number of
1362 * interactions present for each bonded interaction index found in the
1363 * topology.
1365 * \c ftype1 or \c ftype2 may be set to -1 to disable seeking for a
1366 * valid type with that parameter.
1368 * \c count will be reallocated as necessary to fit the largest bonded
1369 * interaction index found, and its current size will be returned in
1370 * \c ncount. It will contain zero for every bonded interaction index
1371 * for which no interactions are present in the topology.
1373 static void count_tables(int ftype1, int ftype2, const gmx_mtop_t *mtop,
1374 int *ncount, int **count)
1376 int ftype, i, j, tabnr;
1378 // Loop over all moleculetypes
1379 for (const gmx_moltype_t &molt : mtop->moltype)
1381 // Loop over all interaction types
1382 for (ftype = 0; ftype < F_NRE; ftype++)
1384 // If the current interaction type is one of the types whose tables we're trying to count...
1385 if (ftype == ftype1 || ftype == ftype2)
1387 const InteractionList &il = molt.ilist[ftype];
1388 const int stride = 1 + NRAL(ftype);
1389 // ... and there are actually some interactions for this type
1390 for (i = 0; i < il.size(); i += stride)
1392 // Find out which table index the user wanted
1393 tabnr = mtop->ffparams.iparams[il.iatoms[i]].tab.table;
1394 if (tabnr < 0)
1396 gmx_fatal(FARGS, "A bonded table number is smaller than 0: %d\n", tabnr);
1398 // Make room for this index in the data structure
1399 if (tabnr >= *ncount)
1401 srenew(*count, tabnr+1);
1402 for (j = *ncount; j < tabnr+1; j++)
1404 (*count)[j] = 0;
1406 *ncount = tabnr+1;
1408 // Record that this table index is used and must have a valid file
1409 (*count)[tabnr]++;
1416 /*!\brief If there's bonded interactions of flavour \c tabext and type
1417 * \c ftype1 or \c ftype2 present in the topology, seek them in the
1418 * list of filenames passed to mdrun, and make bonded tables from
1419 * those files.
1421 * \c ftype1 or \c ftype2 may be set to -1 to disable seeking for a
1422 * valid type with that parameter.
1424 * A fatal error occurs if no matching filename is found.
1426 static bondedtable_t *make_bonded_tables(FILE *fplog,
1427 int ftype1, int ftype2,
1428 const gmx_mtop_t *mtop,
1429 gmx::ArrayRef<const std::string> tabbfnm,
1430 const char *tabext)
1432 int ncount, *count;
1433 bondedtable_t *tab;
1435 tab = nullptr;
1437 ncount = 0;
1438 count = nullptr;
1439 count_tables(ftype1, ftype2, mtop, &ncount, &count);
1441 // Are there any relevant tabulated bond interactions?
1442 if (ncount > 0)
1444 snew(tab, ncount);
1445 for (int i = 0; i < ncount; i++)
1447 // Do any interactions exist that requires this table?
1448 if (count[i] > 0)
1450 // This pattern enforces the current requirement that
1451 // table filenames end in a characteristic sequence
1452 // before the file type extension, and avoids table 13
1453 // being recognized and used for table 1.
1454 std::string patternToFind = gmx::formatString("_%s%d.%s", tabext, i, ftp2ext(efXVG));
1455 bool madeTable = false;
1456 for (gmx::index j = 0; j < tabbfnm.size() && !madeTable; ++j)
1458 if (gmx::endsWith(tabbfnm[j], patternToFind))
1460 // Finally read the table from the file found
1461 tab[i] = make_bonded_table(fplog, tabbfnm[j].c_str(), NRAL(ftype1)-2);
1462 madeTable = true;
1465 if (!madeTable)
1467 bool isPlural = (ftype2 != -1);
1468 gmx_fatal(FARGS, "Tabulated interaction of type '%s%s%s' with index %d cannot be used because no table file whose name matched '%s' was passed via the gmx mdrun -tableb command-line option.",
1469 interaction_function[ftype1].longname,
1470 isPlural ? "' or '" : "",
1471 isPlural ? interaction_function[ftype2].longname : "",
1473 patternToFind.c_str());
1477 sfree(count);
1480 return tab;
1483 void forcerec_set_ranges(t_forcerec *fr,
1484 int ncg_home, int ncg_force,
1485 int natoms_force,
1486 int natoms_force_constr, int natoms_f_novirsum)
1488 fr->cg0 = 0;
1489 fr->hcg = ncg_home;
1491 /* fr->ncg_force is unused in the standard code,
1492 * but it can be useful for modified code dealing with charge groups.
1494 fr->ncg_force = ncg_force;
1495 fr->natoms_force = natoms_force;
1496 fr->natoms_force_constr = natoms_force_constr;
1498 if (fr->natoms_force_constr > fr->nalloc_force)
1500 fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
1503 if (fr->haveDirectVirialContributions)
1505 fr->forceBufferForDirectVirialContributions->resize(natoms_f_novirsum);
1509 static real cutoff_inf(real cutoff)
1511 if (cutoff == 0)
1513 cutoff = GMX_CUTOFF_INF;
1516 return cutoff;
1519 gmx_bool can_use_allvsall(const t_inputrec *ir, gmx_bool bPrintNote, const t_commrec *cr, FILE *fp)
1521 gmx_bool bAllvsAll;
1523 bAllvsAll =
1525 ir->rlist == 0 &&
1526 ir->rcoulomb == 0 &&
1527 ir->rvdw == 0 &&
1528 ir->ePBC == epbcNONE &&
1529 ir->vdwtype == evdwCUT &&
1530 ir->coulombtype == eelCUT &&
1531 ir->efep == efepNO &&
1532 getenv("GMX_NO_ALLVSALL") == nullptr
1535 if (bAllvsAll && ir->opts.ngener > 1)
1537 const char *note = "NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1539 if (bPrintNote)
1541 if (fp != nullptr)
1543 fprintf(fp, "\n%s\n", note);
1546 bAllvsAll = FALSE;
1549 if (bAllvsAll && fp && MASTER(cr))
1551 fprintf(fp, "\nUsing SIMD all-vs-all kernels.\n\n");
1554 return bAllvsAll;
1558 gmx_bool nbnxn_simd_supported(const gmx::MDLogger &mdlog,
1559 const t_inputrec *ir)
1561 if (ir->vdwtype == evdwPME && ir->ljpme_combination_rule == eljpmeLB)
1563 /* LJ PME with LB combination rule does 7 mesh operations.
1564 * This so slow that we don't compile SIMD non-bonded kernels
1565 * for that. */
1566 GMX_LOG(mdlog.warning).asParagraph().appendText("LJ-PME with Lorentz-Berthelot is not supported with SIMD kernels, falling back to plain C kernels");
1567 return FALSE;
1570 return TRUE;
1574 static void pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused *ir,
1575 int *kernel_type,
1576 int *ewald_excl,
1577 const gmx_hw_info_t gmx_unused &hardwareInfo)
1579 *kernel_type = nbnxnk4x4_PlainC;
1580 *ewald_excl = ewaldexclTable;
1582 #if GMX_SIMD
1584 #ifdef GMX_NBNXN_SIMD_4XN
1585 *kernel_type = nbnxnk4xN_SIMD_4xN;
1586 #endif
1587 #ifdef GMX_NBNXN_SIMD_2XNN
1588 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1589 #endif
1591 #if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
1592 /* We need to choose if we want 2x(N+N) or 4xN kernels.
1593 * This is based on the SIMD acceleration choice and CPU information
1594 * detected at runtime.
1596 * 4xN calculates more (zero) interactions, but has less pair-search
1597 * work and much better kernel instruction scheduling.
1599 * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
1600 * which doesn't have FMA, both the analytical and tabulated Ewald
1601 * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
1602 * 2x(4+4) because it results in significantly fewer pairs.
1603 * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1604 * 10% with HT, 50% without HT. As we currently don't detect the actual
1605 * use of HT, use 4x8 to avoid a potential performance hit.
1606 * On Intel Haswell 4x8 is always faster.
1608 *kernel_type = nbnxnk4xN_SIMD_4xN;
1610 #if !GMX_SIMD_HAVE_FMA
1611 if (EEL_PME_EWALD(ir->coulombtype) ||
1612 EVDW_PME(ir->vdwtype))
1614 /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
1615 * There are enough instructions to make 2x(4+4) efficient.
1617 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1619 #endif
1620 if (hardwareInfo.haveAmdZenCpu)
1622 /* One 256-bit FMA per cycle makes 2xNN faster */
1623 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1625 #endif /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
1628 if (getenv("GMX_NBNXN_SIMD_4XN") != nullptr)
1630 #ifdef GMX_NBNXN_SIMD_4XN
1631 *kernel_type = nbnxnk4xN_SIMD_4xN;
1632 #else
1633 gmx_fatal(FARGS, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
1634 #endif
1636 if (getenv("GMX_NBNXN_SIMD_2XNN") != nullptr)
1638 #ifdef GMX_NBNXN_SIMD_2XNN
1639 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1640 #else
1641 gmx_fatal(FARGS, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
1642 #endif
1645 /* Analytical Ewald exclusion correction is only an option in
1646 * the SIMD kernel.
1647 * Since table lookup's don't parallelize with SIMD, analytical
1648 * will probably always be faster for a SIMD width of 8 or more.
1649 * With FMA analytical is sometimes faster for a width if 4 as well.
1650 * In single precision, this is faster on Bulldozer.
1652 #if GMX_SIMD_REAL_WIDTH >= 8 || \
1653 (GMX_SIMD_REAL_WIDTH >= 4 && GMX_SIMD_HAVE_FMA && !GMX_DOUBLE)
1654 /* On AMD Zen, tabulated Ewald kernels are faster on all 4 combinations
1655 * of single or double precision and 128 or 256-bit AVX2.
1657 if (!hardwareInfo.haveAmdZenCpu)
1659 *ewald_excl = ewaldexclAnalytical;
1661 #endif
1662 if (getenv("GMX_NBNXN_EWALD_TABLE") != nullptr)
1664 *ewald_excl = ewaldexclTable;
1666 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != nullptr)
1668 *ewald_excl = ewaldexclAnalytical;
1672 #endif // GMX_SIMD
1676 const char *lookup_nbnxn_kernel_name(int kernel_type)
1678 const char *returnvalue = nullptr;
1679 switch (kernel_type)
1681 case nbnxnkNotSet:
1682 returnvalue = "not set";
1683 break;
1684 case nbnxnk4x4_PlainC:
1685 returnvalue = "plain C";
1686 break;
1687 case nbnxnk4xN_SIMD_4xN:
1688 case nbnxnk4xN_SIMD_2xNN:
1689 #if GMX_SIMD
1690 returnvalue = "SIMD";
1691 #else // GMX_SIMD
1692 returnvalue = "not available";
1693 #endif // GMX_SIMD
1694 break;
1695 case nbnxnk8x8x8_GPU: returnvalue = "GPU"; break;
1696 case nbnxnk8x8x8_PlainC: returnvalue = "plain C"; break;
1698 case nbnxnkNR:
1699 default:
1700 gmx_fatal(FARGS, "Illegal kernel type selected");
1702 return returnvalue;
1705 static void pick_nbnxn_kernel(const gmx::MDLogger &mdlog,
1706 gmx_bool use_simd_kernels,
1707 const gmx_hw_info_t &hardwareInfo,
1708 gmx_bool bUseGPU,
1709 EmulateGpuNonbonded emulateGpu,
1710 const t_inputrec *ir,
1711 int *kernel_type,
1712 int *ewald_excl,
1713 gmx_bool bDoNonbonded)
1715 assert(kernel_type);
1717 *kernel_type = nbnxnkNotSet;
1718 *ewald_excl = ewaldexclTable;
1720 if (emulateGpu == EmulateGpuNonbonded::Yes)
1722 *kernel_type = nbnxnk8x8x8_PlainC;
1724 if (bDoNonbonded)
1726 GMX_LOG(mdlog.warning).asParagraph().appendText("Emulating a GPU run on the CPU (slow)");
1729 else if (bUseGPU)
1731 *kernel_type = nbnxnk8x8x8_GPU;
1734 if (*kernel_type == nbnxnkNotSet)
1736 if (use_simd_kernels &&
1737 nbnxn_simd_supported(mdlog, ir))
1739 pick_nbnxn_kernel_cpu(ir, kernel_type, ewald_excl, hardwareInfo);
1741 else
1743 *kernel_type = nbnxnk4x4_PlainC;
1747 if (bDoNonbonded)
1749 GMX_LOG(mdlog.info).asParagraph().appendTextFormatted(
1750 "Using %s %dx%d nonbonded short-range kernels",
1751 lookup_nbnxn_kernel_name(*kernel_type),
1752 nbnxn_kernel_to_cluster_i_size(*kernel_type),
1753 nbnxn_kernel_to_cluster_j_size(*kernel_type));
1755 if (nbnxnk4x4_PlainC == *kernel_type ||
1756 nbnxnk8x8x8_PlainC == *kernel_type)
1758 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1759 "WARNING: Using the slow %s kernels. This should\n"
1760 "not happen during routine usage on supported platforms.",
1761 lookup_nbnxn_kernel_name(*kernel_type));
1766 /*! \brief Print Coulomb Ewald citations and set ewald coefficients */
1767 static void initCoulombEwaldParameters(FILE *fp, const t_inputrec *ir,
1768 bool systemHasNetCharge,
1769 interaction_const_t *ic)
1771 if (!EEL_PME_EWALD(ir->coulombtype))
1773 return;
1776 if (fp)
1778 fprintf(fp, "Will do PME sum in reciprocal space for electrostatic interactions.\n");
1780 if (ir->coulombtype == eelP3M_AD)
1782 please_cite(fp, "Hockney1988");
1783 please_cite(fp, "Ballenegger2012");
1785 else
1787 please_cite(fp, "Essmann95a");
1790 if (ir->ewald_geometry == eewg3DC)
1792 if (fp)
1794 fprintf(fp, "Using the Ewald3DC correction for systems with a slab geometry%s.\n",
1795 systemHasNetCharge ? " and net charge" : "");
1797 please_cite(fp, "In-Chul99a");
1798 if (systemHasNetCharge)
1800 please_cite(fp, "Ballenegger2009");
1805 ic->ewaldcoeff_q = calc_ewaldcoeff_q(ir->rcoulomb, ir->ewald_rtol);
1806 if (fp)
1808 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for Ewald\n",
1809 1/ic->ewaldcoeff_q);
1812 if (ic->coulomb_modifier == eintmodPOTSHIFT)
1814 GMX_RELEASE_ASSERT(ic->rcoulomb != 0, "Cutoff radius cannot be zero");
1815 ic->sh_ewald = std::erfc(ic->ewaldcoeff_q*ic->rcoulomb) / ic->rcoulomb;
1817 else
1819 ic->sh_ewald = 0;
1823 /*! \brief Print Van der Waals Ewald citations and set ewald coefficients */
1824 static void initVdwEwaldParameters(FILE *fp, const t_inputrec *ir,
1825 interaction_const_t *ic)
1827 if (!EVDW_PME(ir->vdwtype))
1829 return;
1832 if (fp)
1834 fprintf(fp, "Will do PME sum in reciprocal space for LJ dispersion interactions.\n");
1835 please_cite(fp, "Essmann95a");
1837 ic->ewaldcoeff_lj = calc_ewaldcoeff_lj(ir->rvdw, ir->ewald_rtol_lj);
1838 if (fp)
1840 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for LJ Ewald\n",
1841 1/ic->ewaldcoeff_lj);
1844 if (ic->vdw_modifier == eintmodPOTSHIFT)
1846 real crc2 = gmx::square(ic->ewaldcoeff_lj*ic->rvdw);
1847 ic->sh_lj_ewald = (std::exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)/gmx::power6(ic->rvdw);
1849 else
1851 ic->sh_lj_ewald = 0;
1855 gmx_bool uses_simple_tables(int cutoff_scheme,
1856 nonbonded_verlet_t *nbv,
1857 int group)
1859 gmx_bool bUsesSimpleTables = TRUE;
1860 int grp_index;
1862 switch (cutoff_scheme)
1864 case ecutsGROUP:
1865 bUsesSimpleTables = TRUE;
1866 break;
1867 case ecutsVERLET:
1868 assert(nullptr != nbv);
1869 grp_index = (group < 0) ? 0 : (nbv->ngrp - 1);
1870 bUsesSimpleTables = nbnxn_kernel_pairlist_simple(nbv->grp[grp_index].kernel_type);
1871 break;
1872 default:
1873 gmx_incons("unimplemented");
1875 return bUsesSimpleTables;
1878 static void init_ewald_f_table(interaction_const_t *ic,
1879 real rtab)
1881 real maxr;
1883 /* Get the Ewald table spacing based on Coulomb and/or LJ
1884 * Ewald coefficients and rtol.
1886 ic->tabq_scale = ewald_spline3_table_scale(ic);
1888 if (ic->cutoff_scheme == ecutsVERLET)
1890 maxr = ic->rcoulomb;
1892 else
1894 maxr = std::max(ic->rcoulomb, rtab);
1896 ic->tabq_size = static_cast<int>(maxr*ic->tabq_scale) + 2;
1898 sfree_aligned(ic->tabq_coul_FDV0);
1899 sfree_aligned(ic->tabq_coul_F);
1900 sfree_aligned(ic->tabq_coul_V);
1902 sfree_aligned(ic->tabq_vdw_FDV0);
1903 sfree_aligned(ic->tabq_vdw_F);
1904 sfree_aligned(ic->tabq_vdw_V);
1906 if (EEL_PME_EWALD(ic->eeltype))
1908 /* Create the original table data in FDV0 */
1909 snew_aligned(ic->tabq_coul_FDV0, ic->tabq_size*4, 32);
1910 snew_aligned(ic->tabq_coul_F, ic->tabq_size, 32);
1911 snew_aligned(ic->tabq_coul_V, ic->tabq_size, 32);
1912 table_spline3_fill_ewald_lr(ic->tabq_coul_F, ic->tabq_coul_V, ic->tabq_coul_FDV0,
1913 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_q, v_q_ewald_lr);
1916 if (EVDW_PME(ic->vdwtype))
1918 snew_aligned(ic->tabq_vdw_FDV0, ic->tabq_size*4, 32);
1919 snew_aligned(ic->tabq_vdw_F, ic->tabq_size, 32);
1920 snew_aligned(ic->tabq_vdw_V, ic->tabq_size, 32);
1921 table_spline3_fill_ewald_lr(ic->tabq_vdw_F, ic->tabq_vdw_V, ic->tabq_vdw_FDV0,
1922 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_lj, v_lj_ewald_lr);
1926 void init_interaction_const_tables(FILE *fp,
1927 interaction_const_t *ic,
1928 real rtab)
1930 if (EEL_PME_EWALD(ic->eeltype) || EVDW_PME(ic->vdwtype))
1932 init_ewald_f_table(ic, rtab);
1934 if (fp != nullptr)
1936 fprintf(fp, "Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1937 1/ic->tabq_scale, ic->tabq_size);
1942 static void clear_force_switch_constants(shift_consts_t *sc)
1944 sc->c2 = 0;
1945 sc->c3 = 0;
1946 sc->cpot = 0;
1949 static void force_switch_constants(real p,
1950 real rsw, real rc,
1951 shift_consts_t *sc)
1953 /* Here we determine the coefficient for shifting the force to zero
1954 * between distance rsw and the cut-off rc.
1955 * For a potential of r^-p, we have force p*r^-(p+1).
1956 * But to save flops we absorb p in the coefficient.
1957 * Thus we get:
1958 * force/p = r^-(p+1) + c2*r^2 + c3*r^3
1959 * potential = r^-p + c2/3*r^3 + c3/4*r^4 + cpot
1961 sc->c2 = ((p + 1)*rsw - (p + 4)*rc)/(pow(rc, p + 2)*gmx::square(rc - rsw));
1962 sc->c3 = -((p + 1)*rsw - (p + 3)*rc)/(pow(rc, p + 2)*gmx::power3(rc - rsw));
1963 sc->cpot = -pow(rc, -p) + p*sc->c2/3*gmx::power3(rc - rsw) + p*sc->c3/4*gmx::power4(rc - rsw);
1966 static void potential_switch_constants(real rsw, real rc,
1967 switch_consts_t *sc)
1969 /* The switch function is 1 at rsw and 0 at rc.
1970 * The derivative and second derivate are zero at both ends.
1971 * rsw = max(r - r_switch, 0)
1972 * sw = 1 + c3*rsw^3 + c4*rsw^4 + c5*rsw^5
1973 * dsw = 3*c3*rsw^2 + 4*c4*rsw^3 + 5*c5*rsw^4
1974 * force = force*dsw - potential*sw
1975 * potential *= sw
1977 sc->c3 = -10/gmx::power3(rc - rsw);
1978 sc->c4 = 15/gmx::power4(rc - rsw);
1979 sc->c5 = -6/gmx::power5(rc - rsw);
1982 /*! \brief Construct interaction constants
1984 * This data is used (particularly) by search and force code for
1985 * short-range interactions. Many of these are constant for the whole
1986 * simulation; some are constant only after PME tuning completes.
1988 static void
1989 init_interaction_const(FILE *fp,
1990 interaction_const_t **interaction_const,
1991 const t_inputrec *ir,
1992 const gmx_mtop_t *mtop,
1993 bool systemHasNetCharge)
1995 interaction_const_t *ic;
1997 snew(ic, 1);
1999 ic->cutoff_scheme = ir->cutoff_scheme;
2001 /* Just allocate something so we can free it */
2002 snew_aligned(ic->tabq_coul_FDV0, 16, 32);
2003 snew_aligned(ic->tabq_coul_F, 16, 32);
2004 snew_aligned(ic->tabq_coul_V, 16, 32);
2006 /* Lennard-Jones */
2007 ic->vdwtype = ir->vdwtype;
2008 ic->vdw_modifier = ir->vdw_modifier;
2009 ic->reppow = mtop->ffparams.reppow;
2010 ic->rvdw = cutoff_inf(ir->rvdw);
2011 ic->rvdw_switch = ir->rvdw_switch;
2012 ic->ljpme_comb_rule = ir->ljpme_combination_rule;
2013 ic->useBuckingham = (mtop->ffparams.functype[0] == F_BHAM);
2014 if (ic->useBuckingham)
2016 ic->buckinghamBMax = calcBuckinghamBMax(fp, mtop);
2019 initVdwEwaldParameters(fp, ir, ic);
2021 clear_force_switch_constants(&ic->dispersion_shift);
2022 clear_force_switch_constants(&ic->repulsion_shift);
2024 switch (ic->vdw_modifier)
2026 case eintmodPOTSHIFT:
2027 /* Only shift the potential, don't touch the force */
2028 ic->dispersion_shift.cpot = -1.0/gmx::power6(ic->rvdw);
2029 ic->repulsion_shift.cpot = -1.0/gmx::power12(ic->rvdw);
2030 break;
2031 case eintmodFORCESWITCH:
2032 /* Switch the force, switch and shift the potential */
2033 force_switch_constants(6.0, ic->rvdw_switch, ic->rvdw,
2034 &ic->dispersion_shift);
2035 force_switch_constants(12.0, ic->rvdw_switch, ic->rvdw,
2036 &ic->repulsion_shift);
2037 break;
2038 case eintmodPOTSWITCH:
2039 /* Switch the potential and force */
2040 potential_switch_constants(ic->rvdw_switch, ic->rvdw,
2041 &ic->vdw_switch);
2042 break;
2043 case eintmodNONE:
2044 case eintmodEXACTCUTOFF:
2045 /* Nothing to do here */
2046 break;
2047 default:
2048 gmx_incons("unimplemented potential modifier");
2051 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
2053 /* Electrostatics */
2054 ic->eeltype = ir->coulombtype;
2055 ic->coulomb_modifier = ir->coulomb_modifier;
2056 ic->rcoulomb = cutoff_inf(ir->rcoulomb);
2057 ic->rcoulomb_switch = ir->rcoulomb_switch;
2058 ic->epsilon_r = ir->epsilon_r;
2060 /* Set the Coulomb energy conversion factor */
2061 if (ic->epsilon_r != 0)
2063 ic->epsfac = ONE_4PI_EPS0/ic->epsilon_r;
2065 else
2067 /* eps = 0 is infinite dieletric: no Coulomb interactions */
2068 ic->epsfac = 0;
2071 /* Reaction-field */
2072 if (EEL_RF(ic->eeltype))
2074 ic->epsilon_rf = ir->epsilon_rf;
2075 /* Generalized reaction field parameters are updated every step */
2076 if (ic->eeltype != eelGRF)
2078 calc_rffac(fp, ic->eeltype, ic->epsilon_r, ic->epsilon_rf,
2079 ic->rcoulomb, 0, 0, nullptr,
2080 &ic->k_rf, &ic->c_rf);
2083 if (ir->cutoff_scheme == ecutsGROUP && ic->eeltype == eelRF_ZERO)
2085 /* grompp should have done this, but this scheme is obsolete */
2086 ic->coulomb_modifier = eintmodEXACTCUTOFF;
2089 else
2091 /* For plain cut-off we might use the reaction-field kernels */
2092 ic->epsilon_rf = ic->epsilon_r;
2093 ic->k_rf = 0;
2094 if (ir->coulomb_modifier == eintmodPOTSHIFT)
2096 ic->c_rf = 1/ic->rcoulomb;
2098 else
2100 ic->c_rf = 0;
2104 initCoulombEwaldParameters(fp, ir, systemHasNetCharge, ic);
2106 if (fp != nullptr)
2108 real dispersion_shift;
2110 dispersion_shift = ic->dispersion_shift.cpot;
2111 if (EVDW_PME(ic->vdwtype))
2113 dispersion_shift -= ic->sh_lj_ewald;
2115 fprintf(fp, "Potential shift: LJ r^-12: %.3e r^-6: %.3e",
2116 ic->repulsion_shift.cpot, dispersion_shift);
2118 if (ic->eeltype == eelCUT)
2120 fprintf(fp, ", Coulomb %.e", -ic->c_rf);
2122 else if (EEL_PME(ic->eeltype))
2124 fprintf(fp, ", Ewald %.3e", -ic->sh_ewald);
2126 fprintf(fp, "\n");
2129 *interaction_const = ic;
2132 static void
2133 done_interaction_const(interaction_const_t *interaction_const)
2135 sfree_aligned(interaction_const->tabq_coul_FDV0);
2136 sfree_aligned(interaction_const->tabq_coul_F);
2137 sfree_aligned(interaction_const->tabq_coul_V);
2138 sfree(interaction_const);
2141 static void init_nb_verlet(const gmx::MDLogger &mdlog,
2142 nonbonded_verlet_t **nb_verlet,
2143 gmx_bool bFEP_NonBonded,
2144 const t_inputrec *ir,
2145 const t_forcerec *fr,
2146 const t_commrec *cr,
2147 const gmx_hw_info_t &hardwareInfo,
2148 const gmx_device_info_t *deviceInfo,
2149 const gmx_mtop_t *mtop,
2150 matrix box)
2152 nonbonded_verlet_t *nbv;
2153 char *env;
2155 nbnxn_alloc_t *nb_alloc;
2156 nbnxn_free_t *nb_free;
2158 nbv = new nonbonded_verlet_t();
2160 nbv->emulateGpu = ((getenv("GMX_EMULATE_GPU") != nullptr) ? EmulateGpuNonbonded::Yes : EmulateGpuNonbonded::No);
2161 nbv->bUseGPU = deviceInfo != nullptr;
2163 GMX_RELEASE_ASSERT(!(nbv->emulateGpu == EmulateGpuNonbonded::Yes && nbv->bUseGPU), "When GPU emulation is active, there cannot be a GPU assignment");
2165 nbv->nbs = nullptr;
2166 nbv->min_ci_balanced = 0;
2168 nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
2169 for (int i = 0; i < nbv->ngrp; i++)
2171 nbv->grp[i].nbl_lists.nnbl = 0;
2172 nbv->grp[i].kernel_type = nbnxnkNotSet;
2174 if (i == 0) /* local */
2176 pick_nbnxn_kernel(mdlog, fr->use_simd_kernels, hardwareInfo,
2177 nbv->bUseGPU, nbv->emulateGpu, ir,
2178 &nbv->grp[i].kernel_type,
2179 &nbv->grp[i].ewald_excl,
2180 fr->bNonbonded);
2182 else /* non-local */
2184 /* Use the same kernel for local and non-local interactions */
2185 nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
2186 nbv->grp[i].ewald_excl = nbv->grp[0].ewald_excl;
2190 nbv->listParams = gmx::compat::make_unique<NbnxnListParameters>(ir->rlist);
2191 setupDynamicPairlistPruning(mdlog, ir, mtop, box, nbv->grp[0].kernel_type, fr->ic,
2192 nbv->listParams.get());
2194 nbv->nbs = gmx::compat::make_unique<nbnxn_search>(DOMAINDECOMP(cr) ? &cr->dd->nc : nullptr,
2195 DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : nullptr,
2196 bFEP_NonBonded,
2197 gmx_omp_nthreads_get(emntPairsearch));
2199 gpu_set_host_malloc_and_free(nbv->grp[0].kernel_type == nbnxnk8x8x8_GPU,
2200 &nb_alloc, &nb_free);
2202 for (int i = 0; i < nbv->ngrp; i++)
2204 nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
2205 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2206 /* 8x8x8 "non-simple" lists are ATM always combined */
2207 !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2208 nb_alloc, nb_free);
2211 int enbnxninitcombrule;
2212 if (fr->ic->vdwtype == evdwCUT &&
2213 (fr->ic->vdw_modifier == eintmodNONE ||
2214 fr->ic->vdw_modifier == eintmodPOTSHIFT) &&
2215 getenv("GMX_NO_LJ_COMB_RULE") == nullptr)
2217 /* Plain LJ cut-off: we can optimize with combination rules */
2218 enbnxninitcombrule = enbnxninitcombruleDETECT;
2220 else if (fr->ic->vdwtype == evdwPME)
2222 /* LJ-PME: we need to use a combination rule for the grid */
2223 if (fr->ljpme_combination_rule == eljpmeGEOM)
2225 enbnxninitcombrule = enbnxninitcombruleGEOM;
2227 else
2229 enbnxninitcombrule = enbnxninitcombruleLB;
2232 else
2234 /* We use a full combination matrix: no rule required */
2235 enbnxninitcombrule = enbnxninitcombruleNONE;
2238 snew(nbv->nbat, 1);
2239 int mimimumNumEnergyGroupNonbonded = ir->opts.ngener;
2240 if (ir->opts.ngener - ir->nwall == 1)
2242 /* We have only one non-wall energy group, we do not need energy group
2243 * support in the non-bondeds kernels, since all non-bonded energy
2244 * contributions go to the first element of the energy group matrix.
2246 mimimumNumEnergyGroupNonbonded = 1;
2248 bool bSimpleList = nbnxn_kernel_pairlist_simple(nbv->grp[0].kernel_type);
2249 nbnxn_atomdata_init(mdlog,
2250 nbv->nbat,
2251 nbv->grp[0].kernel_type,
2252 enbnxninitcombrule,
2253 fr->ntype, fr->nbfp,
2254 mimimumNumEnergyGroupNonbonded,
2255 bSimpleList ? gmx_omp_nthreads_get(emntNonbonded) : 1,
2256 nb_alloc, nb_free);
2258 if (nbv->bUseGPU)
2260 /* init the NxN GPU data; the last argument tells whether we'll have
2261 * both local and non-local NB calculation on GPU */
2262 nbnxn_gpu_init(&nbv->gpu_nbv,
2263 deviceInfo,
2264 fr->ic,
2265 nbv->listParams.get(),
2266 nbv->nbat,
2267 cr->nodeid,
2268 (nbv->ngrp > 1));
2270 if ((env = getenv("GMX_NB_MIN_CI")) != nullptr)
2272 char *end;
2274 nbv->min_ci_balanced = strtol(env, &end, 10);
2275 if (!end || (*end != 0) || nbv->min_ci_balanced < 0)
2277 gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, non-negative integer required", env);
2280 if (debug)
2282 fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
2283 nbv->min_ci_balanced);
2286 else
2288 nbv->min_ci_balanced = nbnxn_gpu_min_ci_balanced(nbv->gpu_nbv);
2289 if (debug)
2291 fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
2292 nbv->min_ci_balanced);
2298 *nb_verlet = nbv;
2301 gmx_bool usingGpu(nonbonded_verlet_t *nbv)
2303 return nbv != nullptr && nbv->bUseGPU;
2306 void init_forcerec(FILE *fp,
2307 const gmx::MDLogger &mdlog,
2308 t_forcerec *fr,
2309 t_fcdata *fcd,
2310 const t_inputrec *ir,
2311 const gmx_mtop_t *mtop,
2312 const t_commrec *cr,
2313 matrix box,
2314 const char *tabfn,
2315 const char *tabpfn,
2316 gmx::ArrayRef<const std::string> tabbfnm,
2317 const gmx_hw_info_t &hardwareInfo,
2318 const gmx_device_info_t *deviceInfo,
2319 const bool useGpuForBonded,
2320 gmx_bool bNoSolvOpt,
2321 real print_force)
2323 int m, negp_pp, negptable, egi, egj;
2324 real rtab;
2325 char *env;
2326 double dbl;
2327 const t_block *cgs;
2328 gmx_bool bGenericKernelOnly;
2329 gmx_bool needGroupSchemeTables, bSomeNormalNbListsAreInUse;
2330 gmx_bool bFEP_NonBonded;
2331 int *nm_ind, egp_flags;
2333 /* By default we turn SIMD kernels on, but it might be turned off further down... */
2334 fr->use_simd_kernels = TRUE;
2336 if (check_box(ir->ePBC, box))
2338 gmx_fatal(FARGS, "%s", check_box(ir->ePBC, box));
2341 /* Test particle insertion ? */
2342 if (EI_TPI(ir->eI))
2344 /* Set to the size of the molecule to be inserted (the last one) */
2345 /* Because of old style topologies, we have to use the last cg
2346 * instead of the last molecule type.
2348 cgs = &mtop->moltype[mtop->molblock.back().type].cgs;
2349 fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
2350 gmx::RangePartitioning molecules = gmx_mtop_molecules(*mtop);
2351 if (fr->n_tpi != molecules.block(molecules.numBlocks() - 1).size())
2353 gmx_fatal(FARGS, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2356 else
2358 fr->n_tpi = 0;
2361 if (ir->coulombtype == eelRF_NEC_UNSUPPORTED)
2363 gmx_fatal(FARGS, "%s electrostatics is no longer supported",
2364 eel_names[ir->coulombtype]);
2367 if (ir->bAdress)
2369 gmx_fatal(FARGS, "AdResS simulations are no longer supported");
2371 if (ir->useTwinRange)
2373 gmx_fatal(FARGS, "Twin-range simulations are no longer supported");
2375 /* Copy the user determined parameters */
2376 fr->userint1 = ir->userint1;
2377 fr->userint2 = ir->userint2;
2378 fr->userint3 = ir->userint3;
2379 fr->userint4 = ir->userint4;
2380 fr->userreal1 = ir->userreal1;
2381 fr->userreal2 = ir->userreal2;
2382 fr->userreal3 = ir->userreal3;
2383 fr->userreal4 = ir->userreal4;
2385 /* Shell stuff */
2386 fr->fc_stepsize = ir->fc_stepsize;
2388 /* Free energy */
2389 fr->efep = ir->efep;
2390 fr->sc_alphavdw = ir->fepvals->sc_alpha;
2391 if (ir->fepvals->bScCoul)
2393 fr->sc_alphacoul = ir->fepvals->sc_alpha;
2394 fr->sc_sigma6_min = gmx::power6(ir->fepvals->sc_sigma_min);
2396 else
2398 fr->sc_alphacoul = 0;
2399 fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
2401 fr->sc_power = ir->fepvals->sc_power;
2402 fr->sc_r_power = ir->fepvals->sc_r_power;
2403 fr->sc_sigma6_def = gmx::power6(ir->fepvals->sc_sigma);
2405 env = getenv("GMX_SCSIGMA_MIN");
2406 if (env != nullptr)
2408 dbl = 0;
2409 sscanf(env, "%20lf", &dbl);
2410 fr->sc_sigma6_min = gmx::power6(dbl);
2411 if (fp)
2413 fprintf(fp, "Setting the minimum soft core sigma to %g nm\n", dbl);
2417 fr->bNonbonded = TRUE;
2418 if (getenv("GMX_NO_NONBONDED") != nullptr)
2420 /* turn off non-bonded calculations */
2421 fr->bNonbonded = FALSE;
2422 GMX_LOG(mdlog.warning).asParagraph().appendText(
2423 "Found environment variable GMX_NO_NONBONDED.\n"
2424 "Disabling nonbonded calculations.");
2427 bGenericKernelOnly = FALSE;
2429 /* We now check in the NS code whether a particular combination of interactions
2430 * can be used with water optimization, and disable it if that is not the case.
2433 if (getenv("GMX_NB_GENERIC") != nullptr)
2435 if (fp != nullptr)
2437 fprintf(fp,
2438 "Found environment variable GMX_NB_GENERIC.\n"
2439 "Disabling all interaction-specific nonbonded kernels, will only\n"
2440 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2442 bGenericKernelOnly = TRUE;
2445 if (bGenericKernelOnly)
2447 bNoSolvOpt = TRUE;
2450 if ( (getenv("GMX_DISABLE_SIMD_KERNELS") != nullptr) || (getenv("GMX_NOOPTIMIZEDKERNELS") != nullptr) )
2452 fr->use_simd_kernels = FALSE;
2453 if (fp != nullptr)
2455 fprintf(fp,
2456 "\nFound environment variable GMX_DISABLE_SIMD_KERNELS.\n"
2457 "Disabling the usage of any SIMD-specific non-bonded & bonded kernel routines\n"
2458 "(e.g. SSE2/SSE4.1/AVX).\n\n");
2462 fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
2464 /* Check if we can/should do all-vs-all kernels */
2465 fr->bAllvsAll = can_use_allvsall(ir, FALSE, nullptr, nullptr);
2466 fr->AllvsAll_work = nullptr;
2468 /* All-vs-all kernels have not been implemented in 4.6 and later.
2469 * See Redmine #1249. */
2470 if (fr->bAllvsAll)
2472 fr->bAllvsAll = FALSE;
2473 if (fp != nullptr)
2475 fprintf(fp,
2476 "\nYour simulation settings would have triggered the efficient all-vs-all\n"
2477 "kernels in GROMACS 4.5, but these have not been implemented in GROMACS\n"
2478 "4.6 and 5.x. If performance is important, please use GROMACS 4.5.7\n"
2479 "or try cutoff-scheme = Verlet.\n\n");
2483 /* Neighbour searching stuff */
2484 fr->cutoff_scheme = ir->cutoff_scheme;
2485 fr->bGrid = (ir->ns_type == ensGRID);
2486 fr->ePBC = ir->ePBC;
2488 if (fr->cutoff_scheme == ecutsGROUP)
2490 const char *note = "NOTE: This file uses the deprecated 'group' cutoff_scheme. This will be\n"
2491 "removed in a future release when 'verlet' supports all interaction forms.\n";
2493 if (MASTER(cr))
2495 fprintf(stderr, "\n%s\n", note);
2497 if (fp != nullptr)
2499 fprintf(fp, "\n%s\n", note);
2503 /* Determine if we will do PBC for distances in bonded interactions */
2504 if (fr->ePBC == epbcNONE)
2506 fr->bMolPBC = FALSE;
2508 else
2510 if (!DOMAINDECOMP(cr))
2512 gmx_bool bSHAKE;
2514 bSHAKE = (ir->eConstrAlg == econtSHAKE &&
2515 (gmx_mtop_ftype_count(mtop, F_CONSTR) > 0 ||
2516 gmx_mtop_ftype_count(mtop, F_CONSTRNC) > 0));
2518 /* The group cut-off scheme and SHAKE assume charge groups
2519 * are whole, but not using molpbc is faster in most cases.
2520 * With intermolecular interactions we need PBC for calculating
2521 * distances between atoms in different molecules.
2523 if ((fr->cutoff_scheme == ecutsGROUP || bSHAKE) &&
2524 !mtop->bIntermolecularInteractions)
2526 fr->bMolPBC = ir->bPeriodicMols;
2528 if (bSHAKE && fr->bMolPBC)
2530 gmx_fatal(FARGS, "SHAKE is not supported with periodic molecules");
2533 else
2535 /* Not making molecules whole is faster in most cases,
2536 * but With orientation restraints we need whole molecules.
2538 fr->bMolPBC = (fcd->orires.nr == 0);
2540 if (getenv("GMX_USE_GRAPH") != nullptr)
2542 fr->bMolPBC = FALSE;
2543 if (fp)
2545 GMX_LOG(mdlog.warning).asParagraph().appendText("GMX_USE_GRAPH is set, using the graph for bonded interactions");
2548 if (mtop->bIntermolecularInteractions)
2550 GMX_LOG(mdlog.warning).asParagraph().appendText("WARNING: Molecules linked by intermolecular interactions have to reside in the same periodic image, otherwise artifacts will occur!");
2554 GMX_RELEASE_ASSERT(fr->bMolPBC || !mtop->bIntermolecularInteractions, "We need to use PBC within molecules with inter-molecular interactions");
2556 if (bSHAKE && fr->bMolPBC)
2558 gmx_fatal(FARGS, "SHAKE is not properly supported with intermolecular interactions. For short simulations where linked molecules remain in the same periodic image, the environment variable GMX_USE_GRAPH can be used to override this check.\n");
2562 else
2564 fr->bMolPBC = dd_bonded_molpbc(cr->dd, fr->ePBC);
2568 fr->rc_scaling = ir->refcoord_scaling;
2569 copy_rvec(ir->posres_com, fr->posres_com);
2570 copy_rvec(ir->posres_comB, fr->posres_comB);
2571 fr->rlist = cutoff_inf(ir->rlist);
2572 fr->ljpme_combination_rule = ir->ljpme_combination_rule;
2574 /* This now calculates sum for q and c6*/
2575 bool systemHasNetCharge = set_chargesum(fp, fr, mtop);
2577 /* fr->ic is used both by verlet and group kernels (to some extent) now */
2578 init_interaction_const(fp, &fr->ic, ir, mtop, systemHasNetCharge);
2579 init_interaction_const_tables(fp, fr->ic, ir->rlist + ir->tabext);
2581 const interaction_const_t *ic = fr->ic;
2583 /* TODO: Replace this Ewald table or move it into interaction_const_t */
2584 if (ir->coulombtype == eelEWALD)
2586 init_ewald_tab(&(fr->ewald_table), ir, fp);
2589 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2590 switch (ic->eeltype)
2592 case eelCUT:
2593 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_COULOMB;
2594 break;
2596 case eelRF:
2597 case eelGRF:
2598 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2599 break;
2601 case eelRF_ZERO:
2602 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2603 GMX_RELEASE_ASSERT(ic->coulomb_modifier == eintmodEXACTCUTOFF, "With the group scheme RF-zero needs the exact cut-off modifier");
2604 break;
2606 case eelSWITCH:
2607 case eelSHIFT:
2608 case eelUSER:
2609 case eelENCADSHIFT:
2610 case eelPMESWITCH:
2611 case eelPMEUSER:
2612 case eelPMEUSERSWITCH:
2613 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2614 break;
2616 case eelPME:
2617 case eelP3M_AD:
2618 case eelEWALD:
2619 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_EWALD;
2620 break;
2622 default:
2623 gmx_fatal(FARGS, "Unsupported electrostatic interaction: %s", eel_names[ic->eeltype]);
2625 fr->nbkernel_elec_modifier = ic->coulomb_modifier;
2627 /* Vdw: Translate from mdp settings to kernel format */
2628 switch (ic->vdwtype)
2630 case evdwCUT:
2631 if (fr->bBHAM)
2633 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_BUCKINGHAM;
2635 else
2637 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LENNARDJONES;
2639 break;
2640 case evdwPME:
2641 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LJEWALD;
2642 break;
2644 case evdwSWITCH:
2645 case evdwSHIFT:
2646 case evdwUSER:
2647 case evdwENCADSHIFT:
2648 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2649 break;
2651 default:
2652 gmx_fatal(FARGS, "Unsupported vdw interaction: %s", evdw_names[ic->vdwtype]);
2654 fr->nbkernel_vdw_modifier = ic->vdw_modifier;
2656 if (ir->cutoff_scheme == ecutsGROUP)
2658 fr->bvdwtab = ((ic->vdwtype != evdwCUT || !gmx_within_tol(ic->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2659 && !EVDW_PME(ic->vdwtype));
2660 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2661 fr->bcoultab = !(ic->eeltype == eelCUT ||
2662 ic->eeltype == eelEWALD ||
2663 ic->eeltype == eelPME ||
2664 ic->eeltype == eelP3M_AD ||
2665 ic->eeltype == eelRF ||
2666 ic->eeltype == eelRF_ZERO);
2668 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2669 * going to be faster to tabulate the interaction than calling the generic kernel.
2670 * However, if generic kernels have been requested we keep things analytically.
2672 if (fr->nbkernel_elec_modifier == eintmodPOTSWITCH &&
2673 fr->nbkernel_vdw_modifier == eintmodPOTSWITCH &&
2674 !bGenericKernelOnly)
2676 if ((ic->rcoulomb_switch != ic->rvdw_switch) || (ic->rcoulomb != ic->rvdw))
2678 fr->bcoultab = TRUE;
2679 /* Once we tabulate electrostatics, we can use the switch function for LJ,
2680 * which would otherwise need two tables.
2684 else if ((fr->nbkernel_elec_modifier == eintmodPOTSHIFT && fr->nbkernel_vdw_modifier == eintmodPOTSHIFT) ||
2685 ((fr->nbkernel_elec_interaction == GMX_NBKERNEL_ELEC_REACTIONFIELD &&
2686 fr->nbkernel_elec_modifier == eintmodEXACTCUTOFF &&
2687 (fr->nbkernel_vdw_modifier == eintmodPOTSWITCH || fr->nbkernel_vdw_modifier == eintmodPOTSHIFT))))
2689 if ((ic->rcoulomb != ic->rvdw) && (!bGenericKernelOnly))
2691 fr->bcoultab = TRUE;
2695 if (fr->nbkernel_elec_modifier == eintmodFORCESWITCH)
2697 fr->bcoultab = TRUE;
2699 if (fr->nbkernel_vdw_modifier == eintmodFORCESWITCH)
2701 fr->bvdwtab = TRUE;
2704 if (getenv("GMX_REQUIRE_TABLES"))
2706 fr->bvdwtab = TRUE;
2707 fr->bcoultab = TRUE;
2710 if (fp)
2712 fprintf(fp, "Table routines are used for coulomb: %s\n",
2713 gmx::boolToString(fr->bcoultab));
2714 fprintf(fp, "Table routines are used for vdw: %s\n",
2715 gmx::boolToString(fr->bvdwtab));
2718 if (fr->bvdwtab)
2720 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2721 fr->nbkernel_vdw_modifier = eintmodNONE;
2723 if (fr->bcoultab)
2725 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2726 fr->nbkernel_elec_modifier = eintmodNONE;
2730 if (ir->cutoff_scheme == ecutsVERLET)
2732 if (!gmx_within_tol(ic->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2734 gmx_fatal(FARGS, "Cut-off scheme %s only supports LJ repulsion power 12", ecutscheme_names[ir->cutoff_scheme]);
2736 /* Older tpr files can contain Coulomb user tables with the Verlet cutoff-scheme,
2737 * while mdrun does not (and never did) support this.
2739 if (EEL_USER(fr->ic->eeltype))
2741 gmx_fatal(FARGS, "Combination of %s and cutoff scheme %s is not supported",
2742 eel_names[ir->coulombtype], ecutscheme_names[ir->cutoff_scheme]);
2745 fr->bvdwtab = FALSE;
2746 fr->bcoultab = FALSE;
2749 /* 1-4 interaction electrostatics */
2750 fr->fudgeQQ = mtop->ffparams.fudgeQQ;
2752 /* Parameters for generalized RF */
2753 fr->zsquare = 0.0;
2754 fr->temp = 0.0;
2756 if (ic->eeltype == eelGRF)
2758 init_generalized_rf(fp, mtop, ir, fr);
2761 fr->haveDirectVirialContributions =
2762 (EEL_FULL(ic->eeltype) || EVDW_PME(ic->vdwtype) ||
2763 fr->forceProviders->hasForceProvider() ||
2764 gmx_mtop_ftype_count(mtop, F_POSRES) > 0 ||
2765 gmx_mtop_ftype_count(mtop, F_FBPOSRES) > 0 ||
2766 ir->nwall > 0 ||
2767 ir->bPull ||
2768 ir->bRot ||
2769 ir->bIMD);
2771 if (fr->haveDirectVirialContributions)
2773 fr->forceBufferForDirectVirialContributions = new std::vector<gmx::RVec>;
2776 if (fr->cutoff_scheme == ecutsGROUP &&
2777 ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr))
2779 /* Count the total number of charge groups */
2780 fr->cg_nalloc = ncg_mtop(mtop);
2781 srenew(fr->cg_cm, fr->cg_nalloc);
2783 if (fr->shift_vec == nullptr)
2785 snew(fr->shift_vec, SHIFTS);
2788 if (fr->fshift == nullptr)
2790 snew(fr->fshift, SHIFTS);
2793 if (fr->nbfp == nullptr)
2795 fr->ntype = mtop->ffparams.atnr;
2796 fr->nbfp = mk_nbfp(&mtop->ffparams, fr->bBHAM);
2797 if (EVDW_PME(ic->vdwtype))
2799 fr->ljpme_c6grid = make_ljpme_c6grid(&mtop->ffparams, fr);
2803 /* Copy the energy group exclusions */
2804 fr->egp_flags = ir->opts.egp_flags;
2806 /* Van der Waals stuff */
2807 if ((ic->vdwtype != evdwCUT) && (ic->vdwtype != evdwUSER) && !fr->bBHAM)
2809 if (ic->rvdw_switch >= ic->rvdw)
2811 gmx_fatal(FARGS, "rvdw_switch (%f) must be < rvdw (%f)",
2812 ic->rvdw_switch, ic->rvdw);
2814 if (fp)
2816 fprintf(fp, "Using %s Lennard-Jones, switch between %g and %g nm\n",
2817 (ic->eeltype == eelSWITCH) ? "switched" : "shifted",
2818 ic->rvdw_switch, ic->rvdw);
2822 if (fr->bBHAM && EVDW_PME(ic->vdwtype))
2824 gmx_fatal(FARGS, "LJ PME not supported with Buckingham");
2827 if (fr->bBHAM && (ic->vdwtype == evdwSHIFT || ic->vdwtype == evdwSWITCH))
2829 gmx_fatal(FARGS, "Switch/shift interaction not supported with Buckingham");
2832 if (fr->bBHAM && fr->cutoff_scheme == ecutsVERLET)
2834 gmx_fatal(FARGS, "Verlet cutoff-scheme is not supported with Buckingham");
2837 if (fp && fr->cutoff_scheme == ecutsGROUP)
2839 fprintf(fp, "Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2840 fr->rlist, ic->rcoulomb, fr->bBHAM ? "BHAM" : "LJ", ic->rvdw);
2843 fr->eDispCorr = ir->eDispCorr;
2844 fr->numAtomsForDispersionCorrection = mtop->natoms;
2845 if (ir->eDispCorr != edispcNO)
2847 set_avcsixtwelve(fp, fr, mtop);
2850 if (ir->implicit_solvent)
2852 gmx_fatal(FARGS, "Implict solvation is no longer supported.");
2855 /* Construct tables for the group scheme. A little unnecessary to
2856 * make both vdw and coul tables sometimes, but what the
2857 * heck. Note that both cutoff schemes construct Ewald tables in
2858 * init_interaction_const_tables. */
2859 needGroupSchemeTables = (ir->cutoff_scheme == ecutsGROUP &&
2860 (fr->bcoultab || fr->bvdwtab));
2862 negp_pp = ir->opts.ngener - ir->nwall;
2863 negptable = 0;
2864 if (!needGroupSchemeTables)
2866 bSomeNormalNbListsAreInUse = TRUE;
2867 fr->nnblists = 1;
2869 else
2871 bSomeNormalNbListsAreInUse = FALSE;
2872 for (egi = 0; egi < negp_pp; egi++)
2874 for (egj = egi; egj < negp_pp; egj++)
2876 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
2877 if (!(egp_flags & EGP_EXCL))
2879 if (egp_flags & EGP_TABLE)
2881 negptable++;
2883 else
2885 bSomeNormalNbListsAreInUse = TRUE;
2890 if (bSomeNormalNbListsAreInUse)
2892 fr->nnblists = negptable + 1;
2894 else
2896 fr->nnblists = negptable;
2898 if (fr->nnblists > 1)
2900 snew(fr->gid2nblists, ir->opts.ngener*ir->opts.ngener);
2904 snew(fr->nblists, fr->nnblists);
2906 /* This code automatically gives table length tabext without cut-off's,
2907 * in that case grompp should already have checked that we do not need
2908 * normal tables and we only generate tables for 1-4 interactions.
2910 rtab = ir->rlist + ir->tabext;
2912 if (needGroupSchemeTables)
2914 /* make tables for ordinary interactions */
2915 if (bSomeNormalNbListsAreInUse)
2917 make_nbf_tables(fp, ic, rtab, tabfn, nullptr, nullptr, &fr->nblists[0]);
2918 m = 1;
2920 else
2922 m = 0;
2924 if (negptable > 0)
2926 /* Read the special tables for certain energy group pairs */
2927 nm_ind = mtop->groups.grps[egcENER].nm_ind;
2928 for (egi = 0; egi < negp_pp; egi++)
2930 for (egj = egi; egj < negp_pp; egj++)
2932 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
2933 if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL))
2935 if (fr->nnblists > 1)
2937 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = m;
2939 /* Read the table file with the two energy groups names appended */
2940 make_nbf_tables(fp, ic, rtab, tabfn,
2941 *mtop->groups.grpname[nm_ind[egi]],
2942 *mtop->groups.grpname[nm_ind[egj]],
2943 &fr->nblists[m]);
2944 m++;
2946 else if (fr->nnblists > 1)
2948 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = 0;
2955 /* Tables might not be used for the potential modifier
2956 * interactions per se, but we still need them to evaluate
2957 * switch/shift dispersion corrections in this case. */
2958 if (fr->eDispCorr != edispcNO)
2960 fr->dispersionCorrectionTable = makeDispersionCorrectionTable(fp, ic, rtab, tabfn);
2963 /* We want to use unmodified tables for 1-4 coulombic
2964 * interactions, so we must in general have an extra set of
2965 * tables. */
2966 if (gmx_mtop_ftype_count(mtop, F_LJ14) > 0 ||
2967 gmx_mtop_ftype_count(mtop, F_LJC14_Q) > 0 ||
2968 gmx_mtop_ftype_count(mtop, F_LJC_PAIRS_NB) > 0)
2970 fr->pairsTable = make_tables(fp, ic, tabpfn, rtab,
2971 GMX_MAKETABLES_14ONLY);
2974 /* Wall stuff */
2975 fr->nwall = ir->nwall;
2976 if (ir->nwall && ir->wall_type == ewtTABLE)
2978 make_wall_tables(fp, ir, tabfn, &mtop->groups, fr);
2981 if (fcd && !tabbfnm.empty())
2983 // Need to catch std::bad_alloc
2984 // TODO Don't need to catch this here, when merging with master branch
2987 fcd->bondtab = make_bonded_tables(fp,
2988 F_TABBONDS, F_TABBONDSNC,
2989 mtop, tabbfnm, "b");
2990 fcd->angletab = make_bonded_tables(fp,
2991 F_TABANGLES, -1,
2992 mtop, tabbfnm, "a");
2993 fcd->dihtab = make_bonded_tables(fp,
2994 F_TABDIHS, -1,
2995 mtop, tabbfnm, "d");
2997 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2999 else
3001 if (debug)
3003 fprintf(debug, "No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
3007 // QM/MM initialization if requested
3008 fr->bQMMM = ir->bQMMM;
3009 if (fr->bQMMM)
3011 // Initialize QM/MM if supported
3012 if (GMX_QMMM)
3014 GMX_LOG(mdlog.info).asParagraph().
3015 appendText("Large parts of the QM/MM support is deprecated, and may be removed in a future "
3016 "version. Please get in touch with the developers if you find the support useful, "
3017 "as help is needed if the functionality is to continue to be available.");
3018 fr->qr = mk_QMMMrec();
3019 init_QMMMrec(cr, mtop, ir, fr);
3021 else
3023 gmx_incons("QM/MM was requested, but is only available when GROMACS "
3024 "is configured with QM/MM support");
3028 /* Set all the static charge group info */
3029 fr->cginfo_mb = init_cginfo_mb(fp, mtop, fr, bNoSolvOpt,
3030 &bFEP_NonBonded,
3031 &fr->bExcl_IntraCGAll_InterCGNone);
3032 if (DOMAINDECOMP(cr))
3034 fr->cginfo = nullptr;
3036 else
3038 fr->cginfo = cginfo_expand(mtop->molblock.size(), fr->cginfo_mb);
3041 if (!DOMAINDECOMP(cr))
3043 forcerec_set_ranges(fr, ncg_mtop(mtop), ncg_mtop(mtop),
3044 mtop->natoms, mtop->natoms, mtop->natoms);
3047 fr->print_force = print_force;
3050 /* coarse load balancing vars */
3051 fr->t_fnbf = 0.;
3052 fr->t_wait = 0.;
3053 fr->timesteps = 0;
3055 /* Initialize neighbor search */
3056 snew(fr->ns, 1);
3057 init_ns(fp, cr, fr->ns, fr, mtop);
3059 if (thisRankHasDuty(cr, DUTY_PP))
3061 gmx_nonbonded_setup(fr, bGenericKernelOnly);
3064 /* Initialize the thread working data for bonded interactions */
3065 init_bonded_threading(fp, mtop->groups.grps[egcENER].nr,
3066 &fr->bondedThreading);
3068 fr->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
3069 snew(fr->ewc_t, fr->nthread_ewc);
3071 if (fr->cutoff_scheme == ecutsVERLET)
3073 // We checked the cut-offs in grompp, but double-check here.
3074 // We have PME+LJcutoff kernels for rcoulomb>rvdw.
3075 if (EEL_PME_EWALD(ir->coulombtype) && ir->vdwtype == eelCUT)
3077 GMX_RELEASE_ASSERT(ir->rcoulomb >= ir->rvdw, "With Verlet lists and PME we should have rcoulomb>=rvdw");
3079 else
3081 GMX_RELEASE_ASSERT(ir->rcoulomb == ir->rvdw, "With Verlet lists and no PME rcoulomb and rvdw should be identical");
3084 init_nb_verlet(mdlog, &fr->nbv, bFEP_NonBonded, ir, fr,
3085 cr, hardwareInfo, deviceInfo,
3086 mtop, box);
3088 if (useGpuForBonded)
3090 auto stream = DOMAINDECOMP(cr) ?
3091 nbnxn_gpu_get_command_stream(fr->nbv->gpu_nbv, eintNonlocal) :
3092 nbnxn_gpu_get_command_stream(fr->nbv->gpu_nbv, eintLocal);
3093 // TODO the heap allocation is only needed while
3094 // t_forcerec lacks a constructor.
3095 fr->gpuBonded = new gmx::GpuBonded(mtop->ffparams,
3096 stream);
3100 if (fp != nullptr)
3102 /* Here we switch from using mdlog, which prints the newline before
3103 * the paragraph, to our old fprintf logging, which prints the newline
3104 * after the paragraph, so we should add a newline here.
3106 fprintf(fp, "\n");
3109 if (ir->eDispCorr != edispcNO)
3111 calc_enervirdiff(fp, ir->eDispCorr, fr);
3115 /* Frees GPU memory and sets a tMPI node barrier.
3117 * Note that this function needs to be called even if GPUs are not used
3118 * in this run because the PME ranks have no knowledge of whether GPUs
3119 * are used or not, but all ranks need to enter the barrier below.
3120 * \todo Remove physical node barrier from this function after making sure
3121 * that it's not needed anymore (with a shared GPU run).
3123 void free_gpu_resources(t_forcerec *fr,
3124 const gmx::PhysicalNodeCommunicator &physicalNodeCommunicator)
3126 bool isPPrankUsingGPU = (fr != nullptr) && (fr->nbv != nullptr) && fr->nbv->bUseGPU;
3128 /* stop the GPU profiler (only CUDA) */
3129 stopGpuProfiler();
3131 if (isPPrankUsingGPU)
3133 /* free nbnxn data in GPU memory */
3134 nbnxn_gpu_free(fr->nbv->gpu_nbv);
3135 delete fr->gpuBonded;
3136 fr->gpuBonded = nullptr;
3139 /* With tMPI we need to wait for all ranks to finish deallocation before
3140 * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
3141 * GPU and context.
3143 * This is not a concern in OpenCL where we use one context per rank which
3144 * is freed in nbnxn_gpu_free().
3146 * Note: it is safe to not call the barrier on the ranks which do not use GPU,
3147 * but it is easier and more futureproof to call it on the whole node.
3149 if (GMX_THREAD_MPI)
3151 physicalNodeCommunicator.barrier();
3155 void done_forcerec(t_forcerec *fr, int numMolBlocks, int numEnergyGroups)
3157 if (fr == nullptr)
3159 // PME-only ranks don't have a forcerec
3160 return;
3162 // cginfo is dynamically allocated if no domain decomposition
3163 if (fr->cginfo != nullptr)
3165 sfree(fr->cginfo);
3167 done_cginfo_mb(fr->cginfo_mb, numMolBlocks);
3168 sfree(fr->nbfp);
3169 done_interaction_const(fr->ic);
3170 sfree(fr->shift_vec);
3171 sfree(fr->fshift);
3172 sfree(fr->nblists);
3173 done_ns(fr->ns, numEnergyGroups);
3174 sfree(fr->ewc_t);
3175 tear_down_bonded_threading(fr->bondedThreading);
3176 GMX_RELEASE_ASSERT(fr->gpuBonded == nullptr, "Should have been deleted earlier, when used");
3177 fr->bondedThreading = nullptr;
3178 sfree(fr);