Introduce ObservablesHistory container
[gromacs.git] / src / gromacs / mdlib / forcerec.cpp
blob4815eb0a15c1dd283dd63ef6ad5dcb99079fc814
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "forcerec.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <stdlib.h>
45 #include <string.h>
47 #include <cmath>
49 #include <algorithm>
51 #include "gromacs/commandline/filenm.h"
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/ewald/ewald.h"
55 #include "gromacs/fileio/filetypes.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/hardware/detecthardware.h"
60 #include "gromacs/listed-forces/manage-threading.h"
61 #include "gromacs/listed-forces/pairs.h"
62 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
63 #include "gromacs/math/functions.h"
64 #include "gromacs/math/units.h"
65 #include "gromacs/math/utilities.h"
66 #include "gromacs/math/vec.h"
67 #include "gromacs/mdlib/force.h"
68 #include "gromacs/mdlib/forcerec-threading.h"
69 #include "gromacs/mdlib/gmx_omp_nthreads.h"
70 #include "gromacs/mdlib/md_support.h"
71 #include "gromacs/mdlib/nb_verlet.h"
72 #include "gromacs/mdlib/nbnxn_atomdata.h"
73 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
74 #include "gromacs/mdlib/nbnxn_search.h"
75 #include "gromacs/mdlib/nbnxn_simd.h"
76 #include "gromacs/mdlib/nbnxn_util.h"
77 #include "gromacs/mdlib/ns.h"
78 #include "gromacs/mdlib/qmmm.h"
79 #include "gromacs/mdlib/sim_util.h"
80 #include "gromacs/mdtypes/commrec.h"
81 #include "gromacs/mdtypes/fcdata.h"
82 #include "gromacs/mdtypes/group.h"
83 #include "gromacs/mdtypes/iforceprovider.h"
84 #include "gromacs/mdtypes/inputrec.h"
85 #include "gromacs/mdtypes/md_enums.h"
86 #include "gromacs/pbcutil/ishift.h"
87 #include "gromacs/pbcutil/pbc.h"
88 #include "gromacs/simd/simd.h"
89 #include "gromacs/tables/forcetable.h"
90 #include "gromacs/topology/mtop_util.h"
91 #include "gromacs/trajectory/trajectoryframe.h"
92 #include "gromacs/utility/cstringutil.h"
93 #include "gromacs/utility/exceptions.h"
94 #include "gromacs/utility/fatalerror.h"
95 #include "gromacs/utility/gmxassert.h"
96 #include "gromacs/utility/logger.h"
97 #include "gromacs/utility/pleasecite.h"
98 #include "gromacs/utility/smalloc.h"
99 #include "gromacs/utility/strconvert.h"
101 #include "nbnxn_gpu_jit_support.h"
103 const char *egrp_nm[egNR+1] = {
104 "Coul-SR", "LJ-SR", "Buck-SR",
105 "Coul-14", "LJ-14", nullptr
108 t_forcerec *mk_forcerec(void)
110 t_forcerec *fr;
112 snew(fr, 1);
114 return fr;
117 #ifdef DEBUG
118 static void pr_nbfp(FILE *fp, real *nbfp, gmx_bool bBHAM, int atnr)
120 int i, j;
122 for (i = 0; (i < atnr); i++)
124 for (j = 0; (j < atnr); j++)
126 fprintf(fp, "%2d - %2d", i, j);
127 if (bBHAM)
129 fprintf(fp, " a=%10g, b=%10g, c=%10g\n", BHAMA(nbfp, atnr, i, j),
130 BHAMB(nbfp, atnr, i, j), BHAMC(nbfp, atnr, i, j)/6.0);
132 else
134 fprintf(fp, " c6=%10g, c12=%10g\n", C6(nbfp, atnr, i, j)/6.0,
135 C12(nbfp, atnr, i, j)/12.0);
140 #endif
142 static real *mk_nbfp(const gmx_ffparams_t *idef, gmx_bool bBHAM)
144 real *nbfp;
145 int i, j, k, atnr;
147 atnr = idef->atnr;
148 if (bBHAM)
150 snew(nbfp, 3*atnr*atnr);
151 for (i = k = 0; (i < atnr); i++)
153 for (j = 0; (j < atnr); j++, k++)
155 BHAMA(nbfp, atnr, i, j) = idef->iparams[k].bham.a;
156 BHAMB(nbfp, atnr, i, j) = idef->iparams[k].bham.b;
157 /* nbfp now includes the 6.0 derivative prefactor */
158 BHAMC(nbfp, atnr, i, j) = idef->iparams[k].bham.c*6.0;
162 else
164 snew(nbfp, 2*atnr*atnr);
165 for (i = k = 0; (i < atnr); i++)
167 for (j = 0; (j < atnr); j++, k++)
169 /* nbfp now includes the 6.0/12.0 derivative prefactors */
170 C6(nbfp, atnr, i, j) = idef->iparams[k].lj.c6*6.0;
171 C12(nbfp, atnr, i, j) = idef->iparams[k].lj.c12*12.0;
176 return nbfp;
179 static real *make_ljpme_c6grid(const gmx_ffparams_t *idef, t_forcerec *fr)
181 int i, j, k, atnr;
182 real c6, c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
183 real *grid;
185 /* For LJ-PME simulations, we correct the energies with the reciprocal space
186 * inside of the cut-off. To do this the non-bonded kernels needs to have
187 * access to the C6-values used on the reciprocal grid in pme.c
190 atnr = idef->atnr;
191 snew(grid, 2*atnr*atnr);
192 for (i = k = 0; (i < atnr); i++)
194 for (j = 0; (j < atnr); j++, k++)
196 c6i = idef->iparams[i*(atnr+1)].lj.c6;
197 c12i = idef->iparams[i*(atnr+1)].lj.c12;
198 c6j = idef->iparams[j*(atnr+1)].lj.c6;
199 c12j = idef->iparams[j*(atnr+1)].lj.c12;
200 c6 = std::sqrt(c6i * c6j);
201 if (fr->ljpme_combination_rule == eljpmeLB
202 && !gmx_numzero(c6) && !gmx_numzero(c12i) && !gmx_numzero(c12j))
204 sigmai = gmx::sixthroot(c12i / c6i);
205 sigmaj = gmx::sixthroot(c12j / c6j);
206 epsi = c6i * c6i / c12i;
207 epsj = c6j * c6j / c12j;
208 c6 = std::sqrt(epsi * epsj) * gmx::power6(0.5*(sigmai+sigmaj));
210 /* Store the elements at the same relative positions as C6 in nbfp in order
211 * to simplify access in the kernels
213 grid[2*(atnr*i+j)] = c6*6.0;
216 return grid;
219 static real *mk_nbfp_combination_rule(const gmx_ffparams_t *idef, int comb_rule)
221 real *nbfp;
222 int i, j, atnr;
223 real c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
224 real c6, c12;
226 atnr = idef->atnr;
227 snew(nbfp, 2*atnr*atnr);
228 for (i = 0; i < atnr; ++i)
230 for (j = 0; j < atnr; ++j)
232 c6i = idef->iparams[i*(atnr+1)].lj.c6;
233 c12i = idef->iparams[i*(atnr+1)].lj.c12;
234 c6j = idef->iparams[j*(atnr+1)].lj.c6;
235 c12j = idef->iparams[j*(atnr+1)].lj.c12;
236 c6 = std::sqrt(c6i * c6j);
237 c12 = std::sqrt(c12i * c12j);
238 if (comb_rule == eCOMB_ARITHMETIC
239 && !gmx_numzero(c6) && !gmx_numzero(c12))
241 sigmai = gmx::sixthroot(c12i / c6i);
242 sigmaj = gmx::sixthroot(c12j / c6j);
243 epsi = c6i * c6i / c12i;
244 epsj = c6j * c6j / c12j;
245 c6 = std::sqrt(epsi * epsj) * gmx::power6(0.5*(sigmai+sigmaj));
246 c12 = std::sqrt(epsi * epsj) * gmx::power12(0.5*(sigmai+sigmaj));
248 C6(nbfp, atnr, i, j) = c6*6.0;
249 C12(nbfp, atnr, i, j) = c12*12.0;
252 return nbfp;
255 /* This routine sets fr->solvent_opt to the most common solvent in the
256 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
257 * the fr->solvent_type array with the correct type (or esolNO).
259 * Charge groups that fulfill the conditions but are not identical to the
260 * most common one will be marked as esolNO in the solvent_type array.
262 * TIP3p is identical to SPC for these purposes, so we call it
263 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
265 * NOTE: QM particle should not
266 * become an optimized solvent. Not even if there is only one charge
267 * group in the Qm
270 typedef struct
272 int model;
273 int count;
274 int vdwtype[4];
275 real charge[4];
276 } solvent_parameters_t;
278 static void
279 check_solvent_cg(const gmx_moltype_t *molt,
280 int cg0,
281 int nmol,
282 const unsigned char *qm_grpnr,
283 const t_grps *qm_grps,
284 t_forcerec * fr,
285 int *n_solvent_parameters,
286 solvent_parameters_t **solvent_parameters_p,
287 int cginfo,
288 int *cg_sp)
290 t_atom *atom;
291 int j, k;
292 int j0, j1, nj;
293 gmx_bool perturbed;
294 gmx_bool has_vdw[4];
295 gmx_bool match;
296 real tmp_charge[4] = { 0.0 }; /* init to zero to make gcc4.8 happy */
297 int tmp_vdwtype[4] = { 0 }; /* init to zero to make gcc4.8 happy */
298 int tjA;
299 gmx_bool qm;
300 solvent_parameters_t *solvent_parameters;
302 /* We use a list with parameters for each solvent type.
303 * Every time we discover a new molecule that fulfills the basic
304 * conditions for a solvent we compare with the previous entries
305 * in these lists. If the parameters are the same we just increment
306 * the counter for that type, and otherwise we create a new type
307 * based on the current molecule.
309 * Once we've finished going through all molecules we check which
310 * solvent is most common, and mark all those molecules while we
311 * clear the flag on all others.
314 solvent_parameters = *solvent_parameters_p;
316 /* Mark the cg first as non optimized */
317 *cg_sp = -1;
319 /* Check if this cg has no exclusions with atoms in other charge groups
320 * and all atoms inside the charge group excluded.
321 * We only have 3 or 4 atom solvent loops.
323 if (GET_CGINFO_EXCL_INTER(cginfo) ||
324 !GET_CGINFO_EXCL_INTRA(cginfo))
326 return;
329 /* Get the indices of the first atom in this charge group */
330 j0 = molt->cgs.index[cg0];
331 j1 = molt->cgs.index[cg0+1];
333 /* Number of atoms in our molecule */
334 nj = j1 - j0;
336 if (debug)
338 fprintf(debug,
339 "Moltype '%s': there are %d atoms in this charge group\n",
340 *molt->name, nj);
343 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
344 * otherwise skip it.
346 if (nj < 3 || nj > 4)
348 return;
351 /* Check if we are doing QM on this group */
352 qm = FALSE;
353 if (qm_grpnr != nullptr)
355 for (j = j0; j < j1 && !qm; j++)
357 qm = (qm_grpnr[j] < qm_grps->nr - 1);
360 /* Cannot use solvent optimization with QM */
361 if (qm)
363 return;
366 atom = molt->atoms.atom;
368 /* Still looks like a solvent, time to check parameters */
370 /* If it is perturbed (free energy) we can't use the solvent loops,
371 * so then we just skip to the next molecule.
373 perturbed = FALSE;
375 for (j = j0; j < j1 && !perturbed; j++)
377 perturbed = PERTURBED(atom[j]);
380 if (perturbed)
382 return;
385 /* Now it's only a question if the VdW and charge parameters
386 * are OK. Before doing the check we compare and see if they are
387 * identical to a possible previous solvent type.
388 * First we assign the current types and charges.
390 for (j = 0; j < nj; j++)
392 tmp_vdwtype[j] = atom[j0+j].type;
393 tmp_charge[j] = atom[j0+j].q;
396 /* Does it match any previous solvent type? */
397 for (k = 0; k < *n_solvent_parameters; k++)
399 match = TRUE;
402 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
403 if ( (solvent_parameters[k].model == esolSPC && nj != 3) ||
404 (solvent_parameters[k].model == esolTIP4P && nj != 4) )
406 match = FALSE;
409 /* Check that types & charges match for all atoms in molecule */
410 for (j = 0; j < nj && match == TRUE; j++)
412 if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
414 match = FALSE;
416 if (tmp_charge[j] != solvent_parameters[k].charge[j])
418 match = FALSE;
421 if (match == TRUE)
423 /* Congratulations! We have a matched solvent.
424 * Flag it with this type for later processing.
426 *cg_sp = k;
427 solvent_parameters[k].count += nmol;
429 /* We are done with this charge group */
430 return;
434 /* If we get here, we have a tentative new solvent type.
435 * Before we add it we must check that it fulfills the requirements
436 * of the solvent optimized loops. First determine which atoms have
437 * VdW interactions.
439 for (j = 0; j < nj; j++)
441 has_vdw[j] = FALSE;
442 tjA = tmp_vdwtype[j];
444 /* Go through all other tpes and see if any have non-zero
445 * VdW parameters when combined with this one.
447 for (k = 0; k < fr->ntype && (has_vdw[j] == FALSE); k++)
449 /* We already checked that the atoms weren't perturbed,
450 * so we only need to check state A now.
452 if (fr->bBHAM)
454 has_vdw[j] = (has_vdw[j] ||
455 (BHAMA(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
456 (BHAMB(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
457 (BHAMC(fr->nbfp, fr->ntype, tjA, k) != 0.0));
459 else
461 /* Standard LJ */
462 has_vdw[j] = (has_vdw[j] ||
463 (C6(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
464 (C12(fr->nbfp, fr->ntype, tjA, k) != 0.0));
469 /* Now we know all we need to make the final check and assignment. */
470 if (nj == 3)
472 /* So, is it an SPC?
473 * For this we require thatn all atoms have charge,
474 * the charges on atom 2 & 3 should be the same, and only
475 * atom 1 might have VdW.
477 if (has_vdw[1] == FALSE &&
478 has_vdw[2] == FALSE &&
479 tmp_charge[0] != 0 &&
480 tmp_charge[1] != 0 &&
481 tmp_charge[2] == tmp_charge[1])
483 srenew(solvent_parameters, *n_solvent_parameters+1);
484 solvent_parameters[*n_solvent_parameters].model = esolSPC;
485 solvent_parameters[*n_solvent_parameters].count = nmol;
486 for (k = 0; k < 3; k++)
488 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
489 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
492 *cg_sp = *n_solvent_parameters;
493 (*n_solvent_parameters)++;
496 else if (nj == 4)
498 /* Or could it be a TIP4P?
499 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
500 * Only atom 1 mght have VdW.
502 if (has_vdw[1] == FALSE &&
503 has_vdw[2] == FALSE &&
504 has_vdw[3] == FALSE &&
505 tmp_charge[0] == 0 &&
506 tmp_charge[1] != 0 &&
507 tmp_charge[2] == tmp_charge[1] &&
508 tmp_charge[3] != 0)
510 srenew(solvent_parameters, *n_solvent_parameters+1);
511 solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
512 solvent_parameters[*n_solvent_parameters].count = nmol;
513 for (k = 0; k < 4; k++)
515 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
516 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
519 *cg_sp = *n_solvent_parameters;
520 (*n_solvent_parameters)++;
524 *solvent_parameters_p = solvent_parameters;
527 static void
528 check_solvent(FILE * fp,
529 const gmx_mtop_t * mtop,
530 t_forcerec * fr,
531 cginfo_mb_t *cginfo_mb)
533 const t_block * cgs;
534 const gmx_moltype_t *molt;
535 int mb, mol, cg_mol, at_offset, am, cgm, i, nmol_ch, nmol;
536 int n_solvent_parameters;
537 solvent_parameters_t *solvent_parameters;
538 int **cg_sp;
539 int bestsp, bestsol;
541 if (debug)
543 fprintf(debug, "Going to determine what solvent types we have.\n");
546 n_solvent_parameters = 0;
547 solvent_parameters = nullptr;
548 /* Allocate temporary array for solvent type */
549 snew(cg_sp, mtop->nmolblock);
551 at_offset = 0;
552 for (mb = 0; mb < mtop->nmolblock; mb++)
554 molt = &mtop->moltype[mtop->molblock[mb].type];
555 cgs = &molt->cgs;
556 /* Here we have to loop over all individual molecules
557 * because we need to check for QMMM particles.
559 snew(cg_sp[mb], cginfo_mb[mb].cg_mod);
560 nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
561 nmol = mtop->molblock[mb].nmol/nmol_ch;
562 for (mol = 0; mol < nmol_ch; mol++)
564 cgm = mol*cgs->nr;
565 am = mol*cgs->index[cgs->nr];
566 for (cg_mol = 0; cg_mol < cgs->nr; cg_mol++)
568 check_solvent_cg(molt, cg_mol, nmol,
569 mtop->groups.grpnr[egcQMMM] ?
570 mtop->groups.grpnr[egcQMMM]+at_offset+am : nullptr,
571 &mtop->groups.grps[egcQMMM],
573 &n_solvent_parameters, &solvent_parameters,
574 cginfo_mb[mb].cginfo[cgm+cg_mol],
575 &cg_sp[mb][cgm+cg_mol]);
578 at_offset += cgs->index[cgs->nr];
581 /* Puh! We finished going through all charge groups.
582 * Now find the most common solvent model.
585 /* Most common solvent this far */
586 bestsp = -2;
587 for (i = 0; i < n_solvent_parameters; i++)
589 if (bestsp == -2 ||
590 solvent_parameters[i].count > solvent_parameters[bestsp].count)
592 bestsp = i;
596 if (bestsp >= 0)
598 bestsol = solvent_parameters[bestsp].model;
600 else
602 bestsol = esolNO;
605 fr->nWatMol = 0;
606 for (mb = 0; mb < mtop->nmolblock; mb++)
608 cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
609 nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
610 for (i = 0; i < cginfo_mb[mb].cg_mod; i++)
612 if (cg_sp[mb][i] == bestsp)
614 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], bestsol);
615 fr->nWatMol += nmol;
617 else
619 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], esolNO);
622 sfree(cg_sp[mb]);
624 sfree(cg_sp);
626 if (bestsol != esolNO && fp != nullptr)
628 fprintf(fp, "\nEnabling %s-like water optimization for %d molecules.\n\n",
629 esol_names[bestsol],
630 solvent_parameters[bestsp].count);
633 sfree(solvent_parameters);
634 fr->solvent_opt = bestsol;
637 enum {
638 acNONE = 0, acCONSTRAINT, acSETTLE
641 static cginfo_mb_t *init_cginfo_mb(FILE *fplog, const gmx_mtop_t *mtop,
642 t_forcerec *fr, gmx_bool bNoSolvOpt,
643 gmx_bool *bFEP_NonBonded,
644 gmx_bool *bExcl_IntraCGAll_InterCGNone)
646 const t_block *cgs;
647 const t_blocka *excl;
648 const gmx_moltype_t *molt;
649 const gmx_molblock_t *molb;
650 cginfo_mb_t *cginfo_mb;
651 gmx_bool *type_VDW;
652 int *cginfo;
653 int cg_offset, a_offset;
654 int mb, m, cg, a0, a1, gid, ai, j, aj, excl_nalloc;
655 int *a_con;
656 int ftype;
657 int ia;
658 gmx_bool bId, *bExcl, bExclIntraAll, bExclInter, bHaveVDW, bHaveQ, bHavePerturbedAtoms;
660 snew(cginfo_mb, mtop->nmolblock);
662 snew(type_VDW, fr->ntype);
663 for (ai = 0; ai < fr->ntype; ai++)
665 type_VDW[ai] = FALSE;
666 for (j = 0; j < fr->ntype; j++)
668 type_VDW[ai] = type_VDW[ai] ||
669 fr->bBHAM ||
670 C6(fr->nbfp, fr->ntype, ai, j) != 0 ||
671 C12(fr->nbfp, fr->ntype, ai, j) != 0;
675 *bFEP_NonBonded = FALSE;
676 *bExcl_IntraCGAll_InterCGNone = TRUE;
678 excl_nalloc = 10;
679 snew(bExcl, excl_nalloc);
680 cg_offset = 0;
681 a_offset = 0;
682 for (mb = 0; mb < mtop->nmolblock; mb++)
684 molb = &mtop->molblock[mb];
685 molt = &mtop->moltype[molb->type];
686 cgs = &molt->cgs;
687 excl = &molt->excls;
689 /* Check if the cginfo is identical for all molecules in this block.
690 * If so, we only need an array of the size of one molecule.
691 * Otherwise we make an array of #mol times #cgs per molecule.
693 bId = TRUE;
694 for (m = 0; m < molb->nmol; m++)
696 int am = m*cgs->index[cgs->nr];
697 for (cg = 0; cg < cgs->nr; cg++)
699 a0 = cgs->index[cg];
700 a1 = cgs->index[cg+1];
701 if (ggrpnr(&mtop->groups, egcENER, a_offset+am+a0) !=
702 ggrpnr(&mtop->groups, egcENER, a_offset +a0))
704 bId = FALSE;
706 if (mtop->groups.grpnr[egcQMMM] != nullptr)
708 for (ai = a0; ai < a1; ai++)
710 if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
711 mtop->groups.grpnr[egcQMMM][a_offset +ai])
713 bId = FALSE;
720 cginfo_mb[mb].cg_start = cg_offset;
721 cginfo_mb[mb].cg_end = cg_offset + molb->nmol*cgs->nr;
722 cginfo_mb[mb].cg_mod = (bId ? 1 : molb->nmol)*cgs->nr;
723 snew(cginfo_mb[mb].cginfo, cginfo_mb[mb].cg_mod);
724 cginfo = cginfo_mb[mb].cginfo;
726 /* Set constraints flags for constrained atoms */
727 snew(a_con, molt->atoms.nr);
728 for (ftype = 0; ftype < F_NRE; ftype++)
730 if (interaction_function[ftype].flags & IF_CONSTRAINT)
732 int nral;
734 nral = NRAL(ftype);
735 for (ia = 0; ia < molt->ilist[ftype].nr; ia += 1+nral)
737 int a;
739 for (a = 0; a < nral; a++)
741 a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
742 (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
748 for (m = 0; m < (bId ? 1 : molb->nmol); m++)
750 int cgm = m*cgs->nr;
751 int am = m*cgs->index[cgs->nr];
752 for (cg = 0; cg < cgs->nr; cg++)
754 a0 = cgs->index[cg];
755 a1 = cgs->index[cg+1];
757 /* Store the energy group in cginfo */
758 gid = ggrpnr(&mtop->groups, egcENER, a_offset+am+a0);
759 SET_CGINFO_GID(cginfo[cgm+cg], gid);
761 /* Check the intra/inter charge group exclusions */
762 if (a1-a0 > excl_nalloc)
764 excl_nalloc = a1 - a0;
765 srenew(bExcl, excl_nalloc);
767 /* bExclIntraAll: all intra cg interactions excluded
768 * bExclInter: any inter cg interactions excluded
770 bExclIntraAll = TRUE;
771 bExclInter = FALSE;
772 bHaveVDW = FALSE;
773 bHaveQ = FALSE;
774 bHavePerturbedAtoms = FALSE;
775 for (ai = a0; ai < a1; ai++)
777 /* Check VDW and electrostatic interactions */
778 bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
779 type_VDW[molt->atoms.atom[ai].typeB]);
780 bHaveQ = bHaveQ || (molt->atoms.atom[ai].q != 0 ||
781 molt->atoms.atom[ai].qB != 0);
783 bHavePerturbedAtoms = bHavePerturbedAtoms || (PERTURBED(molt->atoms.atom[ai]) != 0);
785 /* Clear the exclusion list for atom ai */
786 for (aj = a0; aj < a1; aj++)
788 bExcl[aj-a0] = FALSE;
790 /* Loop over all the exclusions of atom ai */
791 for (j = excl->index[ai]; j < excl->index[ai+1]; j++)
793 aj = excl->a[j];
794 if (aj < a0 || aj >= a1)
796 bExclInter = TRUE;
798 else
800 bExcl[aj-a0] = TRUE;
803 /* Check if ai excludes a0 to a1 */
804 for (aj = a0; aj < a1; aj++)
806 if (!bExcl[aj-a0])
808 bExclIntraAll = FALSE;
812 switch (a_con[ai])
814 case acCONSTRAINT:
815 SET_CGINFO_CONSTR(cginfo[cgm+cg]);
816 break;
817 case acSETTLE:
818 SET_CGINFO_SETTLE(cginfo[cgm+cg]);
819 break;
820 default:
821 break;
824 if (bExclIntraAll)
826 SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
828 if (bExclInter)
830 SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
832 if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
834 /* The size in cginfo is currently only read with DD */
835 gmx_fatal(FARGS, "A charge group has size %d which is larger than the limit of %d atoms", a1-a0, MAX_CHARGEGROUP_SIZE);
837 if (bHaveVDW)
839 SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
841 if (bHaveQ)
843 SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
845 if (bHavePerturbedAtoms && fr->efep != efepNO)
847 SET_CGINFO_FEP(cginfo[cgm+cg]);
848 *bFEP_NonBonded = TRUE;
850 /* Store the charge group size */
851 SET_CGINFO_NATOMS(cginfo[cgm+cg], a1-a0);
853 if (!bExclIntraAll || bExclInter)
855 *bExcl_IntraCGAll_InterCGNone = FALSE;
860 sfree(a_con);
862 cg_offset += molb->nmol*cgs->nr;
863 a_offset += molb->nmol*cgs->index[cgs->nr];
865 sfree(bExcl);
867 /* the solvent optimizer is called after the QM is initialized,
868 * because we don't want to have the QM subsystemto become an
869 * optimized solvent
872 check_solvent(fplog, mtop, fr, cginfo_mb);
874 if (getenv("GMX_NO_SOLV_OPT"))
876 if (fplog)
878 fprintf(fplog, "Found environment variable GMX_NO_SOLV_OPT.\n"
879 "Disabling all solvent optimization\n");
881 fr->solvent_opt = esolNO;
883 if (bNoSolvOpt)
885 fr->solvent_opt = esolNO;
887 if (!fr->solvent_opt)
889 for (mb = 0; mb < mtop->nmolblock; mb++)
891 for (cg = 0; cg < cginfo_mb[mb].cg_mod; cg++)
893 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg], esolNO);
898 return cginfo_mb;
901 static int *cginfo_expand(int nmb, cginfo_mb_t *cgi_mb)
903 int ncg, mb, cg;
904 int *cginfo;
906 ncg = cgi_mb[nmb-1].cg_end;
907 snew(cginfo, ncg);
908 mb = 0;
909 for (cg = 0; cg < ncg; cg++)
911 while (cg >= cgi_mb[mb].cg_end)
913 mb++;
915 cginfo[cg] =
916 cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
919 return cginfo;
922 static void set_chargesum(FILE *log, t_forcerec *fr, const gmx_mtop_t *mtop)
924 /*This now calculates sum for q and c6*/
925 double qsum, q2sum, q, c6sum, c6;
926 int mb, nmol, i;
927 const t_atoms *atoms;
929 qsum = 0;
930 q2sum = 0;
931 c6sum = 0;
932 for (mb = 0; mb < mtop->nmolblock; mb++)
934 nmol = mtop->molblock[mb].nmol;
935 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
936 for (i = 0; i < atoms->nr; i++)
938 q = atoms->atom[i].q;
939 qsum += nmol*q;
940 q2sum += nmol*q*q;
941 c6 = mtop->ffparams.iparams[atoms->atom[i].type*(mtop->ffparams.atnr+1)].lj.c6;
942 c6sum += nmol*c6;
945 fr->qsum[0] = qsum;
946 fr->q2sum[0] = q2sum;
947 fr->c6sum[0] = c6sum;
949 if (fr->efep != efepNO)
951 qsum = 0;
952 q2sum = 0;
953 c6sum = 0;
954 for (mb = 0; mb < mtop->nmolblock; mb++)
956 nmol = mtop->molblock[mb].nmol;
957 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
958 for (i = 0; i < atoms->nr; i++)
960 q = atoms->atom[i].qB;
961 qsum += nmol*q;
962 q2sum += nmol*q*q;
963 c6 = mtop->ffparams.iparams[atoms->atom[i].typeB*(mtop->ffparams.atnr+1)].lj.c6;
964 c6sum += nmol*c6;
966 fr->qsum[1] = qsum;
967 fr->q2sum[1] = q2sum;
968 fr->c6sum[1] = c6sum;
971 else
973 fr->qsum[1] = fr->qsum[0];
974 fr->q2sum[1] = fr->q2sum[0];
975 fr->c6sum[1] = fr->c6sum[0];
977 if (log)
979 if (fr->efep == efepNO)
981 fprintf(log, "System total charge: %.3f\n", fr->qsum[0]);
983 else
985 fprintf(log, "System total charge, top. A: %.3f top. B: %.3f\n",
986 fr->qsum[0], fr->qsum[1]);
991 void update_forcerec(t_forcerec *fr, matrix box)
993 if (fr->eeltype == eelGRF)
995 calc_rffac(nullptr, fr->eeltype, fr->epsilon_r, fr->epsilon_rf,
996 fr->rcoulomb, fr->temp, fr->zsquare, box,
997 &fr->kappa, &fr->k_rf, &fr->c_rf);
1001 void set_avcsixtwelve(FILE *fplog, t_forcerec *fr, const gmx_mtop_t *mtop)
1003 const t_atoms *atoms, *atoms_tpi;
1004 const t_blocka *excl;
1005 int mb, nmol, nmolc, i, j, tpi, tpj, j1, j2, k, nexcl, q;
1006 gmx_int64_t npair, npair_ij, tmpi, tmpj;
1007 double csix, ctwelve;
1008 int ntp, *typecount;
1009 gmx_bool bBHAM;
1010 real *nbfp;
1011 real *nbfp_comb = nullptr;
1013 ntp = fr->ntype;
1014 bBHAM = fr->bBHAM;
1015 nbfp = fr->nbfp;
1017 /* For LJ-PME, we want to correct for the difference between the
1018 * actual C6 values and the C6 values used by the LJ-PME based on
1019 * combination rules. */
1021 if (EVDW_PME(fr->vdwtype))
1023 nbfp_comb = mk_nbfp_combination_rule(&mtop->ffparams,
1024 (fr->ljpme_combination_rule == eljpmeLB) ? eCOMB_ARITHMETIC : eCOMB_GEOMETRIC);
1025 for (tpi = 0; tpi < ntp; ++tpi)
1027 for (tpj = 0; tpj < ntp; ++tpj)
1029 C6(nbfp_comb, ntp, tpi, tpj) =
1030 C6(nbfp, ntp, tpi, tpj) - C6(nbfp_comb, ntp, tpi, tpj);
1031 C12(nbfp_comb, ntp, tpi, tpj) = C12(nbfp, ntp, tpi, tpj);
1034 nbfp = nbfp_comb;
1036 for (q = 0; q < (fr->efep == efepNO ? 1 : 2); q++)
1038 csix = 0;
1039 ctwelve = 0;
1040 npair = 0;
1041 nexcl = 0;
1042 if (!fr->n_tpi)
1044 /* Count the types so we avoid natoms^2 operations */
1045 snew(typecount, ntp);
1046 gmx_mtop_count_atomtypes(mtop, q, typecount);
1048 for (tpi = 0; tpi < ntp; tpi++)
1050 for (tpj = tpi; tpj < ntp; tpj++)
1052 tmpi = typecount[tpi];
1053 tmpj = typecount[tpj];
1054 if (tpi != tpj)
1056 npair_ij = tmpi*tmpj;
1058 else
1060 npair_ij = tmpi*(tmpi - 1)/2;
1062 if (bBHAM)
1064 /* nbfp now includes the 6.0 derivative prefactor */
1065 csix += npair_ij*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1067 else
1069 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1070 csix += npair_ij* C6(nbfp, ntp, tpi, tpj)/6.0;
1071 ctwelve += npair_ij* C12(nbfp, ntp, tpi, tpj)/12.0;
1073 npair += npair_ij;
1076 sfree(typecount);
1077 /* Subtract the excluded pairs.
1078 * The main reason for substracting exclusions is that in some cases
1079 * some combinations might never occur and the parameters could have
1080 * any value. These unused values should not influence the dispersion
1081 * correction.
1083 for (mb = 0; mb < mtop->nmolblock; mb++)
1085 nmol = mtop->molblock[mb].nmol;
1086 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
1087 excl = &mtop->moltype[mtop->molblock[mb].type].excls;
1088 for (i = 0; (i < atoms->nr); i++)
1090 if (q == 0)
1092 tpi = atoms->atom[i].type;
1094 else
1096 tpi = atoms->atom[i].typeB;
1098 j1 = excl->index[i];
1099 j2 = excl->index[i+1];
1100 for (j = j1; j < j2; j++)
1102 k = excl->a[j];
1103 if (k > i)
1105 if (q == 0)
1107 tpj = atoms->atom[k].type;
1109 else
1111 tpj = atoms->atom[k].typeB;
1113 if (bBHAM)
1115 /* nbfp now includes the 6.0 derivative prefactor */
1116 csix -= nmol*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1118 else
1120 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1121 csix -= nmol*C6 (nbfp, ntp, tpi, tpj)/6.0;
1122 ctwelve -= nmol*C12(nbfp, ntp, tpi, tpj)/12.0;
1124 nexcl += nmol;
1130 else
1132 /* Only correct for the interaction of the test particle
1133 * with the rest of the system.
1135 atoms_tpi =
1136 &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].atoms;
1138 npair = 0;
1139 for (mb = 0; mb < mtop->nmolblock; mb++)
1141 nmol = mtop->molblock[mb].nmol;
1142 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
1143 for (j = 0; j < atoms->nr; j++)
1145 nmolc = nmol;
1146 /* Remove the interaction of the test charge group
1147 * with itself.
1149 if (mb == mtop->nmolblock-1)
1151 nmolc--;
1153 if (mb == 0 && nmol == 1)
1155 gmx_fatal(FARGS, "Old format tpr with TPI, please generate a new tpr file");
1158 if (q == 0)
1160 tpj = atoms->atom[j].type;
1162 else
1164 tpj = atoms->atom[j].typeB;
1166 for (i = 0; i < fr->n_tpi; i++)
1168 if (q == 0)
1170 tpi = atoms_tpi->atom[i].type;
1172 else
1174 tpi = atoms_tpi->atom[i].typeB;
1176 if (bBHAM)
1178 /* nbfp now includes the 6.0 derivative prefactor */
1179 csix += nmolc*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1181 else
1183 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1184 csix += nmolc*C6 (nbfp, ntp, tpi, tpj)/6.0;
1185 ctwelve += nmolc*C12(nbfp, ntp, tpi, tpj)/12.0;
1187 npair += nmolc;
1192 if (npair - nexcl <= 0 && fplog)
1194 fprintf(fplog, "\nWARNING: There are no atom pairs for dispersion correction\n\n");
1195 csix = 0;
1196 ctwelve = 0;
1198 else
1200 csix /= npair - nexcl;
1201 ctwelve /= npair - nexcl;
1203 if (debug)
1205 fprintf(debug, "Counted %d exclusions\n", nexcl);
1206 fprintf(debug, "Average C6 parameter is: %10g\n", (double)csix);
1207 fprintf(debug, "Average C12 parameter is: %10g\n", (double)ctwelve);
1209 fr->avcsix[q] = csix;
1210 fr->avctwelve[q] = ctwelve;
1213 if (EVDW_PME(fr->vdwtype))
1215 sfree(nbfp_comb);
1218 if (fplog != nullptr)
1220 if (fr->eDispCorr == edispcAllEner ||
1221 fr->eDispCorr == edispcAllEnerPres)
1223 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1224 fr->avcsix[0], fr->avctwelve[0]);
1226 else
1228 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e\n", fr->avcsix[0]);
1234 static void set_bham_b_max(FILE *fplog, t_forcerec *fr,
1235 const gmx_mtop_t *mtop)
1237 const t_atoms *at1, *at2;
1238 int mt1, mt2, i, j, tpi, tpj, ntypes;
1239 real b, bmin;
1240 real *nbfp;
1242 if (fplog)
1244 fprintf(fplog, "Determining largest Buckingham b parameter for table\n");
1246 nbfp = fr->nbfp;
1247 ntypes = fr->ntype;
1249 bmin = -1;
1250 fr->bham_b_max = 0;
1251 for (mt1 = 0; mt1 < mtop->nmoltype; mt1++)
1253 at1 = &mtop->moltype[mt1].atoms;
1254 for (i = 0; (i < at1->nr); i++)
1256 tpi = at1->atom[i].type;
1257 if (tpi >= ntypes)
1259 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", i, tpi, ntypes);
1262 for (mt2 = mt1; mt2 < mtop->nmoltype; mt2++)
1264 at2 = &mtop->moltype[mt2].atoms;
1265 for (j = 0; (j < at2->nr); j++)
1267 tpj = at2->atom[j].type;
1268 if (tpj >= ntypes)
1270 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", j, tpj, ntypes);
1272 b = BHAMB(nbfp, ntypes, tpi, tpj);
1273 if (b > fr->bham_b_max)
1275 fr->bham_b_max = b;
1277 if ((b < bmin) || (bmin == -1))
1279 bmin = b;
1285 if (fplog)
1287 fprintf(fplog, "Buckingham b parameters, min: %g, max: %g\n",
1288 bmin, fr->bham_b_max);
1292 static void make_nbf_tables(FILE *fp,
1293 t_forcerec *fr, real rtab,
1294 const char *tabfn, char *eg1, char *eg2,
1295 t_nblists *nbl)
1297 char buf[STRLEN];
1298 int i, j;
1300 if (tabfn == nullptr)
1302 if (debug)
1304 fprintf(debug, "No table file name passed, can not read table, can not do non-bonded interactions\n");
1306 return;
1309 sprintf(buf, "%s", tabfn);
1310 if (eg1 && eg2)
1312 /* Append the two energy group names */
1313 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1, "_%s_%s.%s",
1314 eg1, eg2, ftp2ext(efXVG));
1316 nbl->table_elec_vdw = make_tables(fp, fr, buf, rtab, 0);
1317 /* Copy the contents of the table to separate coulomb and LJ tables too,
1318 * to improve cache performance.
1320 /* For performance reasons we want
1321 * the table data to be aligned to 16-byte. The pointers could be freed
1322 * but currently aren't.
1324 snew(nbl->table_elec, 1);
1325 nbl->table_elec->interaction = GMX_TABLE_INTERACTION_ELEC;
1326 nbl->table_elec->format = nbl->table_elec_vdw->format;
1327 nbl->table_elec->r = nbl->table_elec_vdw->r;
1328 nbl->table_elec->n = nbl->table_elec_vdw->n;
1329 nbl->table_elec->scale = nbl->table_elec_vdw->scale;
1330 nbl->table_elec->formatsize = nbl->table_elec_vdw->formatsize;
1331 nbl->table_elec->ninteractions = 1;
1332 nbl->table_elec->stride = nbl->table_elec->formatsize * nbl->table_elec->ninteractions;
1333 snew_aligned(nbl->table_elec->data, nbl->table_elec->stride*(nbl->table_elec->n+1), 32);
1335 snew(nbl->table_vdw, 1);
1336 nbl->table_vdw->interaction = GMX_TABLE_INTERACTION_VDWREP_VDWDISP;
1337 nbl->table_vdw->format = nbl->table_elec_vdw->format;
1338 nbl->table_vdw->r = nbl->table_elec_vdw->r;
1339 nbl->table_vdw->n = nbl->table_elec_vdw->n;
1340 nbl->table_vdw->scale = nbl->table_elec_vdw->scale;
1341 nbl->table_vdw->formatsize = nbl->table_elec_vdw->formatsize;
1342 nbl->table_vdw->ninteractions = 2;
1343 nbl->table_vdw->stride = nbl->table_vdw->formatsize * nbl->table_vdw->ninteractions;
1344 snew_aligned(nbl->table_vdw->data, nbl->table_vdw->stride*(nbl->table_vdw->n+1), 32);
1346 for (i = 0; i <= nbl->table_elec_vdw->n; i++)
1348 for (j = 0; j < 4; j++)
1350 nbl->table_elec->data[4*i+j] = nbl->table_elec_vdw->data[12*i+j];
1352 for (j = 0; j < 8; j++)
1354 nbl->table_vdw->data[8*i+j] = nbl->table_elec_vdw->data[12*i+4+j];
1359 /*!\brief If there's bonded interactions of type \c ftype1 or \c
1360 * ftype2 present in the topology, build an array of the number of
1361 * interactions present for each bonded interaction index found in the
1362 * topology.
1364 * \c ftype1 or \c ftype2 may be set to -1 to disable seeking for a
1365 * valid type with that parameter.
1367 * \c count will be reallocated as necessary to fit the largest bonded
1368 * interaction index found, and its current size will be returned in
1369 * \c ncount. It will contain zero for every bonded interaction index
1370 * for which no interactions are present in the topology.
1372 static void count_tables(int ftype1, int ftype2, const gmx_mtop_t *mtop,
1373 int *ncount, int **count)
1375 const gmx_moltype_t *molt;
1376 const t_ilist *il;
1377 int mt, ftype, stride, i, j, tabnr;
1379 // Loop over all moleculetypes
1380 for (mt = 0; mt < mtop->nmoltype; mt++)
1382 molt = &mtop->moltype[mt];
1383 // Loop over all interaction types
1384 for (ftype = 0; ftype < F_NRE; ftype++)
1386 // If the current interaction type is one of the types whose tables we're trying to count...
1387 if (ftype == ftype1 || ftype == ftype2)
1389 il = &molt->ilist[ftype];
1390 stride = 1 + NRAL(ftype);
1391 // ... and there are actually some interactions for this type
1392 for (i = 0; i < il->nr; i += stride)
1394 // Find out which table index the user wanted
1395 tabnr = mtop->ffparams.iparams[il->iatoms[i]].tab.table;
1396 if (tabnr < 0)
1398 gmx_fatal(FARGS, "A bonded table number is smaller than 0: %d\n", tabnr);
1400 // Make room for this index in the data structure
1401 if (tabnr >= *ncount)
1403 srenew(*count, tabnr+1);
1404 for (j = *ncount; j < tabnr+1; j++)
1406 (*count)[j] = 0;
1408 *ncount = tabnr+1;
1410 // Record that this table index is used and must have a valid file
1411 (*count)[tabnr]++;
1418 /*!\brief If there's bonded interactions of flavour \c tabext and type
1419 * \c ftype1 or \c ftype2 present in the topology, seek them in the
1420 * list of filenames passed to mdrun, and make bonded tables from
1421 * those files.
1423 * \c ftype1 or \c ftype2 may be set to -1 to disable seeking for a
1424 * valid type with that parameter.
1426 * A fatal error occurs if no matching filename is found.
1428 static bondedtable_t *make_bonded_tables(FILE *fplog,
1429 int ftype1, int ftype2,
1430 const gmx_mtop_t *mtop,
1431 const t_filenm *tabbfnm,
1432 const char *tabext)
1434 int ncount, *count;
1435 bondedtable_t *tab;
1437 tab = nullptr;
1439 ncount = 0;
1440 count = nullptr;
1441 count_tables(ftype1, ftype2, mtop, &ncount, &count);
1443 // Are there any relevant tabulated bond interactions?
1444 if (ncount > 0)
1446 snew(tab, ncount);
1447 for (int i = 0; i < ncount; i++)
1449 // Do any interactions exist that requires this table?
1450 if (count[i] > 0)
1452 // This pattern enforces the current requirement that
1453 // table filenames end in a characteristic sequence
1454 // before the file type extension, and avoids table 13
1455 // being recognized and used for table 1.
1456 std::string patternToFind = gmx::formatString("_%s%d.%s", tabext, i, ftp2ext(efXVG));
1457 bool madeTable = false;
1458 for (int j = 0; j < tabbfnm->nfiles && !madeTable; ++j)
1460 std::string filename(tabbfnm->fns[j]);
1461 if (gmx::endsWith(filename, patternToFind))
1463 // Finally read the table from the file found
1464 tab[i] = make_bonded_table(fplog, tabbfnm->fns[j], NRAL(ftype1)-2);
1465 madeTable = true;
1468 if (!madeTable)
1470 bool isPlural = (ftype2 != -1);
1471 gmx_fatal(FARGS, "Tabulated interaction of type '%s%s%s' with index %d cannot be used because no table file whose name matched '%s' was passed via the gmx mdrun -tableb command-line option.",
1472 interaction_function[ftype1].longname,
1473 isPlural ? "' or '" : "",
1474 isPlural ? interaction_function[ftype2].longname : "",
1476 patternToFind.c_str());
1480 sfree(count);
1483 return tab;
1486 void forcerec_set_ranges(t_forcerec *fr,
1487 int ncg_home, int ncg_force,
1488 int natoms_force,
1489 int natoms_force_constr, int natoms_f_novirsum)
1491 fr->cg0 = 0;
1492 fr->hcg = ncg_home;
1494 /* fr->ncg_force is unused in the standard code,
1495 * but it can be useful for modified code dealing with charge groups.
1497 fr->ncg_force = ncg_force;
1498 fr->natoms_force = natoms_force;
1499 fr->natoms_force_constr = natoms_force_constr;
1501 if (fr->natoms_force_constr > fr->nalloc_force)
1503 fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
1506 if (fr->bF_NoVirSum)
1508 /* TODO: remove this + 1 when padding is properly implemented */
1509 fr->forceBufferNoVirialSummation->resize(natoms_f_novirsum + 1);
1513 static real cutoff_inf(real cutoff)
1515 if (cutoff == 0)
1517 cutoff = GMX_CUTOFF_INF;
1520 return cutoff;
1523 gmx_bool can_use_allvsall(const t_inputrec *ir, gmx_bool bPrintNote, t_commrec *cr, FILE *fp)
1525 gmx_bool bAllvsAll;
1527 bAllvsAll =
1529 ir->rlist == 0 &&
1530 ir->rcoulomb == 0 &&
1531 ir->rvdw == 0 &&
1532 ir->ePBC == epbcNONE &&
1533 ir->vdwtype == evdwCUT &&
1534 ir->coulombtype == eelCUT &&
1535 ir->efep == efepNO &&
1536 (ir->implicit_solvent == eisNO ||
1537 (ir->implicit_solvent == eisGBSA && (ir->gb_algorithm == egbSTILL ||
1538 ir->gb_algorithm == egbHCT ||
1539 ir->gb_algorithm == egbOBC))) &&
1540 getenv("GMX_NO_ALLVSALL") == nullptr
1543 if (bAllvsAll && ir->opts.ngener > 1)
1545 const char *note = "NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1547 if (bPrintNote)
1549 if (fp != nullptr)
1551 fprintf(fp, "\n%s\n", note);
1554 bAllvsAll = FALSE;
1557 if (bAllvsAll && fp && MASTER(cr))
1559 fprintf(fp, "\nUsing SIMD all-vs-all kernels.\n\n");
1562 return bAllvsAll;
1566 gmx_bool nbnxn_gpu_acceleration_supported(const gmx::MDLogger &mdlog,
1567 const t_inputrec *ir,
1568 gmx_bool bRerunMD)
1570 if (bRerunMD && ir->opts.ngener > 1)
1572 /* Rerun execution time is dominated by I/O and pair search,
1573 * so GPUs are not very useful, plus they do not support more
1574 * than one energy group. If the user requested GPUs
1575 * explicitly, a fatal error is given later. With non-reruns,
1576 * we fall back to a single whole-of system energy group
1577 * (which runs much faster than a multiple-energy-groups
1578 * implementation would), and issue a note in the .log
1579 * file. Users can re-run if they want the information. */
1580 GMX_LOG(mdlog.warning).asParagraph().appendText("Rerun with energy groups is not implemented for GPUs, falling back to the CPU");
1581 return FALSE;
1584 return TRUE;
1587 gmx_bool nbnxn_simd_supported(const gmx::MDLogger &mdlog,
1588 const t_inputrec *ir)
1590 if (ir->vdwtype == evdwPME && ir->ljpme_combination_rule == eljpmeLB)
1592 /* LJ PME with LB combination rule does 7 mesh operations.
1593 * This so slow that we don't compile SIMD non-bonded kernels
1594 * for that. */
1595 GMX_LOG(mdlog.warning).asParagraph().appendText("LJ-PME with Lorentz-Berthelot is not supported with SIMD kernels, falling back to plain C kernels");
1596 return FALSE;
1599 return TRUE;
1603 static void pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused *ir,
1604 int *kernel_type,
1605 int *ewald_excl)
1607 *kernel_type = nbnxnk4x4_PlainC;
1608 *ewald_excl = ewaldexclTable;
1610 #if GMX_SIMD
1612 #ifdef GMX_NBNXN_SIMD_4XN
1613 *kernel_type = nbnxnk4xN_SIMD_4xN;
1614 #endif
1615 #ifdef GMX_NBNXN_SIMD_2XNN
1616 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1617 #endif
1619 #if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
1620 /* We need to choose if we want 2x(N+N) or 4xN kernels.
1621 * Currently this is based on the SIMD acceleration choice,
1622 * but it might be better to decide this at runtime based on CPU.
1624 * 4xN calculates more (zero) interactions, but has less pair-search
1625 * work and much better kernel instruction scheduling.
1627 * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
1628 * which doesn't have FMA, both the analytical and tabulated Ewald
1629 * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
1630 * 2x(4+4) because it results in significantly fewer pairs.
1631 * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1632 * 10% with HT, 50% without HT. As we currently don't detect the actual
1633 * use of HT, use 4x8 to avoid a potential performance hit.
1634 * On Intel Haswell 4x8 is always faster.
1636 *kernel_type = nbnxnk4xN_SIMD_4xN;
1638 #if !GMX_SIMD_HAVE_FMA
1639 if (EEL_PME_EWALD(ir->coulombtype) ||
1640 EVDW_PME(ir->vdwtype))
1642 /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
1643 * There are enough instructions to make 2x(4+4) efficient.
1645 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1647 #endif
1648 #endif /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
1651 if (getenv("GMX_NBNXN_SIMD_4XN") != nullptr)
1653 #ifdef GMX_NBNXN_SIMD_4XN
1654 *kernel_type = nbnxnk4xN_SIMD_4xN;
1655 #else
1656 gmx_fatal(FARGS, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
1657 #endif
1659 if (getenv("GMX_NBNXN_SIMD_2XNN") != nullptr)
1661 #ifdef GMX_NBNXN_SIMD_2XNN
1662 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1663 #else
1664 gmx_fatal(FARGS, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
1665 #endif
1668 /* Analytical Ewald exclusion correction is only an option in
1669 * the SIMD kernel.
1670 * Since table lookup's don't parallelize with SIMD, analytical
1671 * will probably always be faster for a SIMD width of 8 or more.
1672 * With FMA analytical is sometimes faster for a width if 4 as well.
1673 * On BlueGene/Q, this is faster regardless of precision.
1674 * In single precision, this is faster on Bulldozer.
1676 #if GMX_SIMD_REAL_WIDTH >= 8 || \
1677 (GMX_SIMD_REAL_WIDTH >= 4 && GMX_SIMD_HAVE_FMA && !GMX_DOUBLE) || GMX_SIMD_IBM_QPX
1678 *ewald_excl = ewaldexclAnalytical;
1679 #endif
1680 if (getenv("GMX_NBNXN_EWALD_TABLE") != nullptr)
1682 *ewald_excl = ewaldexclTable;
1684 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != nullptr)
1686 *ewald_excl = ewaldexclAnalytical;
1690 #endif // GMX_SIMD
1694 const char *lookup_nbnxn_kernel_name(int kernel_type)
1696 const char *returnvalue = nullptr;
1697 switch (kernel_type)
1699 case nbnxnkNotSet:
1700 returnvalue = "not set";
1701 break;
1702 case nbnxnk4x4_PlainC:
1703 returnvalue = "plain C";
1704 break;
1705 case nbnxnk4xN_SIMD_4xN:
1706 case nbnxnk4xN_SIMD_2xNN:
1707 #if GMX_SIMD
1708 returnvalue = "SIMD";
1709 #else // GMX_SIMD
1710 returnvalue = "not available";
1711 #endif // GMX_SIMD
1712 break;
1713 case nbnxnk8x8x8_GPU: returnvalue = "GPU"; break;
1714 case nbnxnk8x8x8_PlainC: returnvalue = "plain C"; break;
1716 case nbnxnkNR:
1717 default:
1718 gmx_fatal(FARGS, "Illegal kernel type selected");
1719 returnvalue = nullptr;
1720 break;
1722 return returnvalue;
1725 static void pick_nbnxn_kernel(FILE *fp,
1726 const gmx::MDLogger &mdlog,
1727 gmx_bool use_simd_kernels,
1728 gmx_bool bUseGPU,
1729 gmx_bool bEmulateGPU,
1730 const t_inputrec *ir,
1731 int *kernel_type,
1732 int *ewald_excl,
1733 gmx_bool bDoNonbonded)
1735 assert(kernel_type);
1737 *kernel_type = nbnxnkNotSet;
1738 *ewald_excl = ewaldexclTable;
1740 if (bEmulateGPU)
1742 *kernel_type = nbnxnk8x8x8_PlainC;
1744 if (bDoNonbonded)
1746 GMX_LOG(mdlog.warning).asParagraph().appendText("Emulating a GPU run on the CPU (slow)");
1749 else if (bUseGPU)
1751 *kernel_type = nbnxnk8x8x8_GPU;
1754 if (*kernel_type == nbnxnkNotSet)
1756 if (use_simd_kernels &&
1757 nbnxn_simd_supported(mdlog, ir))
1759 pick_nbnxn_kernel_cpu(ir, kernel_type, ewald_excl);
1761 else
1763 *kernel_type = nbnxnk4x4_PlainC;
1767 if (bDoNonbonded && fp != nullptr)
1769 fprintf(fp, "\nUsing %s %dx%d non-bonded kernels\n\n",
1770 lookup_nbnxn_kernel_name(*kernel_type),
1771 nbnxn_kernel_to_cluster_i_size(*kernel_type),
1772 nbnxn_kernel_to_cluster_j_size(*kernel_type));
1774 if (nbnxnk4x4_PlainC == *kernel_type ||
1775 nbnxnk8x8x8_PlainC == *kernel_type)
1777 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
1778 "WARNING: Using the slow %s kernels. This should\n"
1779 "not happen during routine usage on supported platforms.",
1780 lookup_nbnxn_kernel_name(*kernel_type));
1785 static void pick_nbnxn_resources(const gmx::MDLogger &mdlog,
1786 const t_commrec *cr,
1787 const gmx_hw_info_t *hwinfo,
1788 gmx_bool bDoNonbonded,
1789 gmx_bool *bUseGPU,
1790 gmx_bool *bEmulateGPU,
1791 const gmx_gpu_opt_t *gpu_opt)
1793 gmx_bool bEmulateGPUEnvVarSet;
1794 char gpu_err_str[STRLEN];
1796 *bUseGPU = FALSE;
1798 bEmulateGPUEnvVarSet = (getenv("GMX_EMULATE_GPU") != nullptr);
1800 /* Run GPU emulation mode if GMX_EMULATE_GPU is defined. Because
1801 * GPUs (currently) only handle non-bonded calculations, we will
1802 * automatically switch to emulation if non-bonded calculations are
1803 * turned off via GMX_NO_NONBONDED - this is the simple and elegant
1804 * way to turn off GPU initialization, data movement, and cleanup.
1806 * GPU emulation can be useful to assess the performance one can expect by
1807 * adding GPU(s) to the machine. The conditional below allows this even
1808 * if mdrun is compiled without GPU acceleration support.
1809 * Note that you should freezing the system as otherwise it will explode.
1811 *bEmulateGPU = (bEmulateGPUEnvVarSet ||
1812 (!bDoNonbonded && gpu_opt->n_dev_use > 0));
1814 /* Enable GPU mode when GPUs are available or no GPU emulation is requested.
1816 if (gpu_opt->n_dev_use > 0 && !(*bEmulateGPU))
1818 /* Each PP node will use the intra-node id-th device from the
1819 * list of detected/selected GPUs. */
1820 if (!init_gpu(mdlog, cr->rank_pp_intranode, gpu_err_str,
1821 &hwinfo->gpu_info, gpu_opt))
1823 /* At this point the init should never fail as we made sure that
1824 * we have all the GPUs we need. If it still does, we'll bail. */
1825 /* TODO the decorating of gpu_err_str is nicer if it
1826 happens inside init_gpu. Out here, the decorating with
1827 the MPI rank makes sense. */
1828 gmx_fatal(FARGS, "On rank %d failed to initialize GPU #%d: %s",
1829 cr->nodeid,
1830 get_gpu_device_id(&hwinfo->gpu_info, gpu_opt,
1831 cr->rank_pp_intranode),
1832 gpu_err_str);
1835 /* Here we actually turn on hardware GPU acceleration */
1836 *bUseGPU = TRUE;
1840 gmx_bool uses_simple_tables(int cutoff_scheme,
1841 nonbonded_verlet_t *nbv,
1842 int group)
1844 gmx_bool bUsesSimpleTables = TRUE;
1845 int grp_index;
1847 switch (cutoff_scheme)
1849 case ecutsGROUP:
1850 bUsesSimpleTables = TRUE;
1851 break;
1852 case ecutsVERLET:
1853 assert(NULL != nbv && NULL != nbv->grp);
1854 grp_index = (group < 0) ? 0 : (nbv->ngrp - 1);
1855 bUsesSimpleTables = nbnxn_kernel_pairlist_simple(nbv->grp[grp_index].kernel_type);
1856 break;
1857 default:
1858 gmx_incons("unimplemented");
1860 return bUsesSimpleTables;
1863 static void init_ewald_f_table(interaction_const_t *ic,
1864 real rtab)
1866 real maxr;
1868 /* Get the Ewald table spacing based on Coulomb and/or LJ
1869 * Ewald coefficients and rtol.
1871 ic->tabq_scale = ewald_spline3_table_scale(ic);
1873 if (ic->cutoff_scheme == ecutsVERLET)
1875 maxr = ic->rcoulomb;
1877 else
1879 maxr = std::max(ic->rcoulomb, rtab);
1881 ic->tabq_size = static_cast<int>(maxr*ic->tabq_scale) + 2;
1883 sfree_aligned(ic->tabq_coul_FDV0);
1884 sfree_aligned(ic->tabq_coul_F);
1885 sfree_aligned(ic->tabq_coul_V);
1887 sfree_aligned(ic->tabq_vdw_FDV0);
1888 sfree_aligned(ic->tabq_vdw_F);
1889 sfree_aligned(ic->tabq_vdw_V);
1891 if (EEL_PME_EWALD(ic->eeltype))
1893 /* Create the original table data in FDV0 */
1894 snew_aligned(ic->tabq_coul_FDV0, ic->tabq_size*4, 32);
1895 snew_aligned(ic->tabq_coul_F, ic->tabq_size, 32);
1896 snew_aligned(ic->tabq_coul_V, ic->tabq_size, 32);
1897 table_spline3_fill_ewald_lr(ic->tabq_coul_F, ic->tabq_coul_V, ic->tabq_coul_FDV0,
1898 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_q, v_q_ewald_lr);
1901 if (EVDW_PME(ic->vdwtype))
1903 snew_aligned(ic->tabq_vdw_FDV0, ic->tabq_size*4, 32);
1904 snew_aligned(ic->tabq_vdw_F, ic->tabq_size, 32);
1905 snew_aligned(ic->tabq_vdw_V, ic->tabq_size, 32);
1906 table_spline3_fill_ewald_lr(ic->tabq_vdw_F, ic->tabq_vdw_V, ic->tabq_vdw_FDV0,
1907 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_lj, v_lj_ewald_lr);
1911 void init_interaction_const_tables(FILE *fp,
1912 interaction_const_t *ic,
1913 real rtab)
1915 if (EEL_PME_EWALD(ic->eeltype) || EVDW_PME(ic->vdwtype))
1917 init_ewald_f_table(ic, rtab);
1919 if (fp != nullptr)
1921 fprintf(fp, "Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1922 1/ic->tabq_scale, ic->tabq_size);
1927 static void clear_force_switch_constants(shift_consts_t *sc)
1929 sc->c2 = 0;
1930 sc->c3 = 0;
1931 sc->cpot = 0;
1934 static void force_switch_constants(real p,
1935 real rsw, real rc,
1936 shift_consts_t *sc)
1938 /* Here we determine the coefficient for shifting the force to zero
1939 * between distance rsw and the cut-off rc.
1940 * For a potential of r^-p, we have force p*r^-(p+1).
1941 * But to save flops we absorb p in the coefficient.
1942 * Thus we get:
1943 * force/p = r^-(p+1) + c2*r^2 + c3*r^3
1944 * potential = r^-p + c2/3*r^3 + c3/4*r^4 + cpot
1946 sc->c2 = ((p + 1)*rsw - (p + 4)*rc)/(pow(rc, p + 2)*gmx::square(rc - rsw));
1947 sc->c3 = -((p + 1)*rsw - (p + 3)*rc)/(pow(rc, p + 2)*gmx::power3(rc - rsw));
1948 sc->cpot = -pow(rc, -p) + p*sc->c2/3*gmx::power3(rc - rsw) + p*sc->c3/4*gmx::power4(rc - rsw);
1951 static void potential_switch_constants(real rsw, real rc,
1952 switch_consts_t *sc)
1954 /* The switch function is 1 at rsw and 0 at rc.
1955 * The derivative and second derivate are zero at both ends.
1956 * rsw = max(r - r_switch, 0)
1957 * sw = 1 + c3*rsw^3 + c4*rsw^4 + c5*rsw^5
1958 * dsw = 3*c3*rsw^2 + 4*c4*rsw^3 + 5*c5*rsw^4
1959 * force = force*dsw - potential*sw
1960 * potential *= sw
1962 sc->c3 = -10/gmx::power3(rc - rsw);
1963 sc->c4 = 15/gmx::power4(rc - rsw);
1964 sc->c5 = -6/gmx::power5(rc - rsw);
1967 /*! \brief Construct interaction constants
1969 * This data is used (particularly) by search and force code for
1970 * short-range interactions. Many of these are constant for the whole
1971 * simulation; some are constant only after PME tuning completes.
1973 static void
1974 init_interaction_const(FILE *fp,
1975 interaction_const_t **interaction_const,
1976 const t_forcerec *fr)
1978 interaction_const_t *ic;
1980 snew(ic, 1);
1982 ic->cutoff_scheme = fr->cutoff_scheme;
1984 /* Just allocate something so we can free it */
1985 snew_aligned(ic->tabq_coul_FDV0, 16, 32);
1986 snew_aligned(ic->tabq_coul_F, 16, 32);
1987 snew_aligned(ic->tabq_coul_V, 16, 32);
1989 ic->rlist = fr->rlist;
1991 /* Lennard-Jones */
1992 ic->vdwtype = fr->vdwtype;
1993 ic->vdw_modifier = fr->vdw_modifier;
1994 ic->rvdw = fr->rvdw;
1995 ic->rvdw_switch = fr->rvdw_switch;
1996 ic->ewaldcoeff_lj = fr->ewaldcoeff_lj;
1997 ic->ljpme_comb_rule = fr->ljpme_combination_rule;
1998 ic->sh_lj_ewald = 0;
1999 clear_force_switch_constants(&ic->dispersion_shift);
2000 clear_force_switch_constants(&ic->repulsion_shift);
2002 switch (ic->vdw_modifier)
2004 case eintmodPOTSHIFT:
2005 /* Only shift the potential, don't touch the force */
2006 ic->dispersion_shift.cpot = -1.0/gmx::power6(ic->rvdw);
2007 ic->repulsion_shift.cpot = -1.0/gmx::power12(ic->rvdw);
2008 if (EVDW_PME(ic->vdwtype))
2010 real crc2;
2012 crc2 = gmx::square(ic->ewaldcoeff_lj*ic->rvdw);
2013 ic->sh_lj_ewald = (std::exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)/gmx::power6(ic->rvdw);
2015 break;
2016 case eintmodFORCESWITCH:
2017 /* Switch the force, switch and shift the potential */
2018 force_switch_constants(6.0, ic->rvdw_switch, ic->rvdw,
2019 &ic->dispersion_shift);
2020 force_switch_constants(12.0, ic->rvdw_switch, ic->rvdw,
2021 &ic->repulsion_shift);
2022 break;
2023 case eintmodPOTSWITCH:
2024 /* Switch the potential and force */
2025 potential_switch_constants(ic->rvdw_switch, ic->rvdw,
2026 &ic->vdw_switch);
2027 break;
2028 case eintmodNONE:
2029 case eintmodEXACTCUTOFF:
2030 /* Nothing to do here */
2031 break;
2032 default:
2033 gmx_incons("unimplemented potential modifier");
2036 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
2038 /* Electrostatics */
2039 ic->eeltype = fr->eeltype;
2040 ic->coulomb_modifier = fr->coulomb_modifier;
2041 ic->rcoulomb = fr->rcoulomb;
2042 ic->epsilon_r = fr->epsilon_r;
2043 ic->epsfac = fr->epsfac;
2044 ic->ewaldcoeff_q = fr->ewaldcoeff_q;
2046 if (fr->coulomb_modifier == eintmodPOTSHIFT)
2048 ic->sh_ewald = std::erfc(ic->ewaldcoeff_q*ic->rcoulomb);
2050 else
2052 ic->sh_ewald = 0;
2055 /* Reaction-field */
2056 if (EEL_RF(ic->eeltype))
2058 ic->epsilon_rf = fr->epsilon_rf;
2059 ic->k_rf = fr->k_rf;
2060 ic->c_rf = fr->c_rf;
2062 else
2064 /* For plain cut-off we might use the reaction-field kernels */
2065 ic->epsilon_rf = ic->epsilon_r;
2066 ic->k_rf = 0;
2067 if (fr->coulomb_modifier == eintmodPOTSHIFT)
2069 ic->c_rf = 1/ic->rcoulomb;
2071 else
2073 ic->c_rf = 0;
2077 if (fp != nullptr)
2079 real dispersion_shift;
2081 dispersion_shift = ic->dispersion_shift.cpot;
2082 if (EVDW_PME(ic->vdwtype))
2084 dispersion_shift -= ic->sh_lj_ewald;
2086 fprintf(fp, "Potential shift: LJ r^-12: %.3e r^-6: %.3e",
2087 ic->repulsion_shift.cpot, dispersion_shift);
2089 if (ic->eeltype == eelCUT)
2091 fprintf(fp, ", Coulomb %.e", -ic->c_rf);
2093 else if (EEL_PME(ic->eeltype))
2095 fprintf(fp, ", Ewald %.3e", -ic->sh_ewald);
2097 fprintf(fp, "\n");
2100 *interaction_const = ic;
2103 static void init_nb_verlet(FILE *fp,
2104 const gmx::MDLogger &mdlog,
2105 nonbonded_verlet_t **nb_verlet,
2106 gmx_bool bFEP_NonBonded,
2107 const t_inputrec *ir,
2108 const t_forcerec *fr,
2109 const t_commrec *cr,
2110 const char *nbpu_opt)
2112 nonbonded_verlet_t *nbv;
2113 int i;
2114 char *env;
2115 gmx_bool bEmulateGPU, bHybridGPURun = FALSE;
2117 nbnxn_alloc_t *nb_alloc;
2118 nbnxn_free_t *nb_free;
2120 snew(nbv, 1);
2122 pick_nbnxn_resources(mdlog, cr, fr->hwinfo,
2123 fr->bNonbonded,
2124 &nbv->bUseGPU,
2125 &bEmulateGPU,
2126 fr->gpu_opt);
2128 nbv->nbs = nullptr;
2129 nbv->min_ci_balanced = 0;
2131 nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
2132 for (i = 0; i < nbv->ngrp; i++)
2134 nbv->grp[i].nbl_lists.nnbl = 0;
2135 nbv->grp[i].nbat = nullptr;
2136 nbv->grp[i].kernel_type = nbnxnkNotSet;
2138 if (i == 0) /* local */
2140 pick_nbnxn_kernel(fp, mdlog, fr->use_simd_kernels,
2141 nbv->bUseGPU, bEmulateGPU, ir,
2142 &nbv->grp[i].kernel_type,
2143 &nbv->grp[i].ewald_excl,
2144 fr->bNonbonded);
2146 else /* non-local */
2148 if (nbpu_opt != nullptr && strcmp(nbpu_opt, "gpu_cpu") == 0)
2150 /* Use GPU for local, select a CPU kernel for non-local */
2151 pick_nbnxn_kernel(fp, mdlog, fr->use_simd_kernels,
2152 FALSE, FALSE, ir,
2153 &nbv->grp[i].kernel_type,
2154 &nbv->grp[i].ewald_excl,
2155 fr->bNonbonded);
2157 bHybridGPURun = TRUE;
2159 else
2161 /* Use the same kernel for local and non-local interactions */
2162 nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
2163 nbv->grp[i].ewald_excl = nbv->grp[0].ewald_excl;
2168 nbnxn_init_search(&nbv->nbs,
2169 DOMAINDECOMP(cr) ? &cr->dd->nc : nullptr,
2170 DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : nullptr,
2171 bFEP_NonBonded,
2172 gmx_omp_nthreads_get(emntPairsearch));
2174 for (i = 0; i < nbv->ngrp; i++)
2176 gpu_set_host_malloc_and_free(nbv->grp[0].kernel_type == nbnxnk8x8x8_GPU,
2177 &nb_alloc, &nb_free);
2179 nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
2180 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2181 /* 8x8x8 "non-simple" lists are ATM always combined */
2182 !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2183 nb_alloc, nb_free);
2185 if (i == 0 ||
2186 nbv->grp[0].kernel_type != nbv->grp[i].kernel_type)
2188 gmx_bool bSimpleList;
2189 int enbnxninitcombrule;
2191 bSimpleList = nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type);
2193 if (fr->vdwtype == evdwCUT &&
2194 (fr->vdw_modifier == eintmodNONE ||
2195 fr->vdw_modifier == eintmodPOTSHIFT) &&
2196 getenv("GMX_NO_LJ_COMB_RULE") == nullptr)
2198 /* Plain LJ cut-off: we can optimize with combination rules */
2199 enbnxninitcombrule = enbnxninitcombruleDETECT;
2201 else if (fr->vdwtype == evdwPME)
2203 /* LJ-PME: we need to use a combination rule for the grid */
2204 if (fr->ljpme_combination_rule == eljpmeGEOM)
2206 enbnxninitcombrule = enbnxninitcombruleGEOM;
2208 else
2210 enbnxninitcombrule = enbnxninitcombruleLB;
2213 else
2215 /* We use a full combination matrix: no rule required */
2216 enbnxninitcombrule = enbnxninitcombruleNONE;
2220 snew(nbv->grp[i].nbat, 1);
2221 nbnxn_atomdata_init(fp,
2222 nbv->grp[i].nbat,
2223 nbv->grp[i].kernel_type,
2224 enbnxninitcombrule,
2225 fr->ntype, fr->nbfp,
2226 ir->opts.ngener,
2227 bSimpleList ? gmx_omp_nthreads_get(emntNonbonded) : 1,
2228 nb_alloc, nb_free);
2230 else
2232 nbv->grp[i].nbat = nbv->grp[0].nbat;
2236 if (nbv->bUseGPU)
2238 /* init the NxN GPU data; the last argument tells whether we'll have
2239 * both local and non-local NB calculation on GPU */
2240 nbnxn_gpu_init(&nbv->gpu_nbv,
2241 &fr->hwinfo->gpu_info,
2242 fr->gpu_opt,
2243 fr->ic,
2244 nbv->grp,
2245 cr->rank_pp_intranode,
2246 cr->nodeid,
2247 (nbv->ngrp > 1) && !bHybridGPURun);
2249 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
2250 * also sharing texture references. To keep the code simple, we don't
2251 * treat texture references as shared resources, but this means that
2252 * the coulomb_tab and nbfp texture refs will get updated by multiple threads.
2253 * Hence, to ensure that the non-bonded kernels don't start before all
2254 * texture binding operations are finished, we need to wait for all ranks
2255 * to arrive here before continuing.
2257 * Note that we could omit this barrier if GPUs are not shared (or
2258 * texture objects are used), but as this is initialization code, there
2259 * is no point in complicating things.
2261 #if GMX_THREAD_MPI
2262 if (PAR(cr))
2264 gmx_barrier(cr);
2266 #endif /* GMX_THREAD_MPI */
2268 if ((env = getenv("GMX_NB_MIN_CI")) != nullptr)
2270 char *end;
2272 nbv->min_ci_balanced = strtol(env, &end, 10);
2273 if (!end || (*end != 0) || nbv->min_ci_balanced < 0)
2275 gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, non-negative integer required", env);
2278 if (debug)
2280 fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
2281 nbv->min_ci_balanced);
2284 else
2286 nbv->min_ci_balanced = nbnxn_gpu_min_ci_balanced(nbv->gpu_nbv);
2287 if (debug)
2289 fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
2290 nbv->min_ci_balanced);
2296 *nb_verlet = nbv;
2299 gmx_bool usingGpu(nonbonded_verlet_t *nbv)
2301 return nbv != nullptr && nbv->bUseGPU;
2304 void init_forcerec(FILE *fp,
2305 const gmx::MDLogger &mdlog,
2306 t_forcerec *fr,
2307 t_fcdata *fcd,
2308 IForceProvider *forceProviders,
2309 const t_inputrec *ir,
2310 const gmx_mtop_t *mtop,
2311 const t_commrec *cr,
2312 matrix box,
2313 const char *tabfn,
2314 const char *tabpfn,
2315 const t_filenm *tabbfnm,
2316 const char *nbpu_opt,
2317 gmx_bool bNoSolvOpt,
2318 real print_force)
2320 int i, m, negp_pp, negptable, egi, egj;
2321 real rtab;
2322 char *env;
2323 double dbl;
2324 const t_block *cgs;
2325 gmx_bool bGenericKernelOnly;
2326 gmx_bool needGroupSchemeTables, bSomeNormalNbListsAreInUse;
2327 gmx_bool bFEP_NonBonded;
2328 int *nm_ind, egp_flags;
2330 if (fr->hwinfo == nullptr)
2332 /* Detect hardware, gather information.
2333 * In mdrun, hwinfo has already been set before calling init_forcerec.
2334 * Here we ignore GPUs, as tools will not use them anyhow.
2336 fr->hwinfo = gmx_detect_hardware(mdlog, cr, FALSE);
2339 /* By default we turn SIMD kernels on, but it might be turned off further down... */
2340 fr->use_simd_kernels = TRUE;
2342 fr->bDomDec = DOMAINDECOMP(cr);
2344 if (check_box(ir->ePBC, box))
2346 gmx_fatal(FARGS, check_box(ir->ePBC, box));
2349 /* Test particle insertion ? */
2350 if (EI_TPI(ir->eI))
2352 /* Set to the size of the molecule to be inserted (the last one) */
2353 /* Because of old style topologies, we have to use the last cg
2354 * instead of the last molecule type.
2356 cgs = &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].cgs;
2357 fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
2358 if (fr->n_tpi != mtop->mols.index[mtop->mols.nr] - mtop->mols.index[mtop->mols.nr-1])
2360 gmx_fatal(FARGS, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2363 else
2365 fr->n_tpi = 0;
2368 if (ir->coulombtype == eelRF_NEC_UNSUPPORTED)
2370 gmx_fatal(FARGS, "%s electrostatics is no longer supported",
2371 eel_names[ir->coulombtype]);
2374 if (ir->bAdress)
2376 gmx_fatal(FARGS, "AdResS simulations are no longer supported");
2378 if (ir->useTwinRange)
2380 gmx_fatal(FARGS, "Twin-range simulations are no longer supported");
2382 /* Copy the user determined parameters */
2383 fr->userint1 = ir->userint1;
2384 fr->userint2 = ir->userint2;
2385 fr->userint3 = ir->userint3;
2386 fr->userint4 = ir->userint4;
2387 fr->userreal1 = ir->userreal1;
2388 fr->userreal2 = ir->userreal2;
2389 fr->userreal3 = ir->userreal3;
2390 fr->userreal4 = ir->userreal4;
2392 /* Shell stuff */
2393 fr->fc_stepsize = ir->fc_stepsize;
2395 /* Free energy */
2396 fr->efep = ir->efep;
2397 fr->sc_alphavdw = ir->fepvals->sc_alpha;
2398 if (ir->fepvals->bScCoul)
2400 fr->sc_alphacoul = ir->fepvals->sc_alpha;
2401 fr->sc_sigma6_min = gmx::power6(ir->fepvals->sc_sigma_min);
2403 else
2405 fr->sc_alphacoul = 0;
2406 fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
2408 fr->sc_power = ir->fepvals->sc_power;
2409 fr->sc_r_power = ir->fepvals->sc_r_power;
2410 fr->sc_sigma6_def = gmx::power6(ir->fepvals->sc_sigma);
2412 env = getenv("GMX_SCSIGMA_MIN");
2413 if (env != nullptr)
2415 dbl = 0;
2416 sscanf(env, "%20lf", &dbl);
2417 fr->sc_sigma6_min = gmx::power6(dbl);
2418 if (fp)
2420 fprintf(fp, "Setting the minimum soft core sigma to %g nm\n", dbl);
2424 fr->bNonbonded = TRUE;
2425 if (getenv("GMX_NO_NONBONDED") != nullptr)
2427 /* turn off non-bonded calculations */
2428 fr->bNonbonded = FALSE;
2429 GMX_LOG(mdlog.warning).asParagraph().appendText(
2430 "Found environment variable GMX_NO_NONBONDED.\n"
2431 "Disabling nonbonded calculations.");
2434 bGenericKernelOnly = FALSE;
2436 /* We now check in the NS code whether a particular combination of interactions
2437 * can be used with water optimization, and disable it if that is not the case.
2440 if (getenv("GMX_NB_GENERIC") != nullptr)
2442 if (fp != nullptr)
2444 fprintf(fp,
2445 "Found environment variable GMX_NB_GENERIC.\n"
2446 "Disabling all interaction-specific nonbonded kernels, will only\n"
2447 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2449 bGenericKernelOnly = TRUE;
2452 if (bGenericKernelOnly == TRUE)
2454 bNoSolvOpt = TRUE;
2457 if ( (getenv("GMX_DISABLE_SIMD_KERNELS") != nullptr) || (getenv("GMX_NOOPTIMIZEDKERNELS") != nullptr) )
2459 fr->use_simd_kernels = FALSE;
2460 if (fp != nullptr)
2462 fprintf(fp,
2463 "\nFound environment variable GMX_DISABLE_SIMD_KERNELS.\n"
2464 "Disabling the usage of any SIMD-specific non-bonded & bonded kernel routines\n"
2465 "(e.g. SSE2/SSE4.1/AVX).\n\n");
2469 fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
2471 /* Check if we can/should do all-vs-all kernels */
2472 fr->bAllvsAll = can_use_allvsall(ir, FALSE, nullptr, nullptr);
2473 fr->AllvsAll_work = nullptr;
2474 fr->AllvsAll_workgb = nullptr;
2476 /* All-vs-all kernels have not been implemented in 4.6 and later.
2477 * See Redmine #1249. */
2478 if (fr->bAllvsAll)
2480 fr->bAllvsAll = FALSE;
2481 if (fp != nullptr)
2483 fprintf(fp,
2484 "\nYour simulation settings would have triggered the efficient all-vs-all\n"
2485 "kernels in GROMACS 4.5, but these have not been implemented in GROMACS\n"
2486 "4.6 and 5.x. If performance is important, please use GROMACS 4.5.7\n"
2487 "or try cutoff-scheme = Verlet.\n\n");
2491 /* Neighbour searching stuff */
2492 fr->cutoff_scheme = ir->cutoff_scheme;
2493 fr->bGrid = (ir->ns_type == ensGRID);
2494 fr->ePBC = ir->ePBC;
2496 if (fr->cutoff_scheme == ecutsGROUP)
2498 const char *note = "NOTE: This file uses the deprecated 'group' cutoff_scheme. This will be\n"
2499 "removed in a future release when 'verlet' supports all interaction forms.\n";
2501 if (MASTER(cr))
2503 fprintf(stderr, "\n%s\n", note);
2505 if (fp != nullptr)
2507 fprintf(fp, "\n%s\n", note);
2511 /* Determine if we will do PBC for distances in bonded interactions */
2512 if (fr->ePBC == epbcNONE)
2514 fr->bMolPBC = FALSE;
2516 else
2518 if (!DOMAINDECOMP(cr))
2520 gmx_bool bSHAKE;
2522 bSHAKE = (ir->eConstrAlg == econtSHAKE &&
2523 (gmx_mtop_ftype_count(mtop, F_CONSTR) > 0 ||
2524 gmx_mtop_ftype_count(mtop, F_CONSTRNC) > 0));
2526 /* The group cut-off scheme and SHAKE assume charge groups
2527 * are whole, but not using molpbc is faster in most cases.
2528 * With intermolecular interactions we need PBC for calculating
2529 * distances between atoms in different molecules.
2531 if ((fr->cutoff_scheme == ecutsGROUP || bSHAKE) &&
2532 !mtop->bIntermolecularInteractions)
2534 fr->bMolPBC = ir->bPeriodicMols;
2536 if (bSHAKE && fr->bMolPBC)
2538 gmx_fatal(FARGS, "SHAKE is not supported with periodic molecules");
2541 else
2543 fr->bMolPBC = TRUE;
2545 if (getenv("GMX_USE_GRAPH") != nullptr)
2547 fr->bMolPBC = FALSE;
2548 if (fp)
2550 GMX_LOG(mdlog.warning).asParagraph().appendText("GMX_USE_GRAPH is set, using the graph for bonded interactions");
2553 if (mtop->bIntermolecularInteractions)
2555 GMX_LOG(mdlog.warning).asParagraph().appendText("WARNING: Molecules linked by intermolecular interactions have to reside in the same periodic image, otherwise artifacts will occur!");
2559 if (bSHAKE && fr->bMolPBC)
2561 gmx_fatal(FARGS, "SHAKE is not properly supported with intermolecular interactions. For short simulations where linked molecules remain in the same periodic image, the environment variable GMX_USE_GRAPH can be used to override this check.\n");
2565 else
2567 fr->bMolPBC = dd_bonded_molpbc(cr->dd, fr->ePBC);
2570 fr->bGB = (ir->implicit_solvent == eisGBSA);
2572 fr->rc_scaling = ir->refcoord_scaling;
2573 copy_rvec(ir->posres_com, fr->posres_com);
2574 copy_rvec(ir->posres_comB, fr->posres_comB);
2575 fr->rlist = cutoff_inf(ir->rlist);
2576 fr->eeltype = ir->coulombtype;
2577 fr->vdwtype = ir->vdwtype;
2578 fr->ljpme_combination_rule = ir->ljpme_combination_rule;
2580 fr->coulomb_modifier = ir->coulomb_modifier;
2581 fr->vdw_modifier = ir->vdw_modifier;
2583 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2584 switch (fr->eeltype)
2586 case eelCUT:
2587 fr->nbkernel_elec_interaction = (fr->bGB) ? GMX_NBKERNEL_ELEC_GENERALIZEDBORN : GMX_NBKERNEL_ELEC_COULOMB;
2588 break;
2590 case eelRF:
2591 case eelGRF:
2592 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2593 break;
2595 case eelRF_ZERO:
2596 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2597 fr->coulomb_modifier = eintmodEXACTCUTOFF;
2598 break;
2600 case eelSWITCH:
2601 case eelSHIFT:
2602 case eelUSER:
2603 case eelENCADSHIFT:
2604 case eelPMESWITCH:
2605 case eelPMEUSER:
2606 case eelPMEUSERSWITCH:
2607 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2608 break;
2610 case eelPME:
2611 case eelP3M_AD:
2612 case eelEWALD:
2613 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_EWALD;
2614 break;
2616 default:
2617 gmx_fatal(FARGS, "Unsupported electrostatic interaction: %s", eel_names[fr->eeltype]);
2618 break;
2621 /* Vdw: Translate from mdp settings to kernel format */
2622 switch (fr->vdwtype)
2624 case evdwCUT:
2625 if (fr->bBHAM)
2627 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_BUCKINGHAM;
2629 else
2631 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LENNARDJONES;
2633 break;
2634 case evdwPME:
2635 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LJEWALD;
2636 break;
2638 case evdwSWITCH:
2639 case evdwSHIFT:
2640 case evdwUSER:
2641 case evdwENCADSHIFT:
2642 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2643 break;
2645 default:
2646 gmx_fatal(FARGS, "Unsupported vdw interaction: %s", evdw_names[fr->vdwtype]);
2647 break;
2650 /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
2651 fr->nbkernel_elec_modifier = fr->coulomb_modifier;
2652 fr->nbkernel_vdw_modifier = fr->vdw_modifier;
2654 fr->rvdw = cutoff_inf(ir->rvdw);
2655 fr->rvdw_switch = ir->rvdw_switch;
2656 fr->rcoulomb = cutoff_inf(ir->rcoulomb);
2657 fr->rcoulomb_switch = ir->rcoulomb_switch;
2659 fr->bEwald = EEL_PME_EWALD(fr->eeltype);
2661 fr->reppow = mtop->ffparams.reppow;
2663 if (ir->cutoff_scheme == ecutsGROUP)
2665 fr->bvdwtab = ((fr->vdwtype != evdwCUT || !gmx_within_tol(fr->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2666 && !EVDW_PME(fr->vdwtype));
2667 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2668 fr->bcoultab = !(fr->eeltype == eelCUT ||
2669 fr->eeltype == eelEWALD ||
2670 fr->eeltype == eelPME ||
2671 fr->eeltype == eelRF ||
2672 fr->eeltype == eelRF_ZERO);
2674 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2675 * going to be faster to tabulate the interaction than calling the generic kernel.
2676 * However, if generic kernels have been requested we keep things analytically.
2678 if (fr->nbkernel_elec_modifier == eintmodPOTSWITCH &&
2679 fr->nbkernel_vdw_modifier == eintmodPOTSWITCH &&
2680 bGenericKernelOnly == FALSE)
2682 if ((fr->rcoulomb_switch != fr->rvdw_switch) || (fr->rcoulomb != fr->rvdw))
2684 fr->bcoultab = TRUE;
2685 /* Once we tabulate electrostatics, we can use the switch function for LJ,
2686 * which would otherwise need two tables.
2690 else if ((fr->nbkernel_elec_modifier == eintmodPOTSHIFT && fr->nbkernel_vdw_modifier == eintmodPOTSHIFT) ||
2691 ((fr->nbkernel_elec_interaction == GMX_NBKERNEL_ELEC_REACTIONFIELD &&
2692 fr->nbkernel_elec_modifier == eintmodEXACTCUTOFF &&
2693 (fr->nbkernel_vdw_modifier == eintmodPOTSWITCH || fr->nbkernel_vdw_modifier == eintmodPOTSHIFT))))
2695 if ((fr->rcoulomb != fr->rvdw) && (bGenericKernelOnly == FALSE))
2697 fr->bcoultab = TRUE;
2701 if (fr->nbkernel_elec_modifier == eintmodFORCESWITCH)
2703 fr->bcoultab = TRUE;
2705 if (fr->nbkernel_vdw_modifier == eintmodFORCESWITCH)
2707 fr->bvdwtab = TRUE;
2710 if (getenv("GMX_REQUIRE_TABLES"))
2712 fr->bvdwtab = TRUE;
2713 fr->bcoultab = TRUE;
2716 if (fp)
2718 fprintf(fp, "Table routines are used for coulomb: %s\n",
2719 gmx::boolToString(fr->bcoultab));
2720 fprintf(fp, "Table routines are used for vdw: %s\n",
2721 gmx::boolToString(fr->bvdwtab));
2724 if (fr->bvdwtab == TRUE)
2726 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2727 fr->nbkernel_vdw_modifier = eintmodNONE;
2729 if (fr->bcoultab == TRUE)
2731 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2732 fr->nbkernel_elec_modifier = eintmodNONE;
2736 if (ir->cutoff_scheme == ecutsVERLET)
2738 if (!gmx_within_tol(fr->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2740 gmx_fatal(FARGS, "Cut-off scheme %S only supports LJ repulsion power 12", ecutscheme_names[ir->cutoff_scheme]);
2742 fr->bvdwtab = FALSE;
2743 fr->bcoultab = FALSE;
2746 /* This now calculates sum for q and C6 */
2747 set_chargesum(fp, fr, mtop);
2749 /* Tables are used for direct ewald sum */
2750 if (fr->bEwald)
2752 if (EEL_PME(ir->coulombtype))
2754 if (fp)
2756 fprintf(fp, "Will do PME sum in reciprocal space for electrostatic interactions.\n");
2758 if (ir->coulombtype == eelP3M_AD)
2760 please_cite(fp, "Hockney1988");
2761 please_cite(fp, "Ballenegger2012");
2763 else
2765 please_cite(fp, "Essmann95a");
2768 if (ir->ewald_geometry == eewg3DC)
2770 bool haveNetCharge = (fabs(fr->qsum[0]) > 1e-4 ||
2771 fabs(fr->qsum[1]) > 1e-4);
2772 if (fp)
2774 fprintf(fp, "Using the Ewald3DC correction for systems with a slab geometry%s.\n",
2775 haveNetCharge ? " and net charge" : "");
2777 please_cite(fp, "In-Chul99a");
2778 if (haveNetCharge)
2780 please_cite(fp, "Ballenegger2009");
2784 fr->ewaldcoeff_q = calc_ewaldcoeff_q(ir->rcoulomb, ir->ewald_rtol);
2785 init_ewald_tab(&(fr->ewald_table), ir, fp);
2786 if (fp)
2788 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2789 1/fr->ewaldcoeff_q);
2793 if (EVDW_PME(ir->vdwtype))
2795 if (fp)
2797 fprintf(fp, "Will do PME sum in reciprocal space for LJ dispersion interactions.\n");
2799 please_cite(fp, "Essmann95a");
2800 fr->ewaldcoeff_lj = calc_ewaldcoeff_lj(ir->rvdw, ir->ewald_rtol_lj);
2801 if (fp)
2803 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for LJ Ewald\n",
2804 1/fr->ewaldcoeff_lj);
2808 /* Electrostatics */
2809 fr->epsilon_r = ir->epsilon_r;
2810 fr->epsilon_rf = ir->epsilon_rf;
2811 fr->fudgeQQ = mtop->ffparams.fudgeQQ;
2813 /* Parameters for generalized RF */
2814 fr->zsquare = 0.0;
2815 fr->temp = 0.0;
2817 if (fr->eeltype == eelGRF)
2819 init_generalized_rf(fp, mtop, ir, fr);
2822 fr->bF_NoVirSum = (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype) ||
2823 gmx_mtop_ftype_count(mtop, F_POSRES) > 0 ||
2824 gmx_mtop_ftype_count(mtop, F_FBPOSRES) > 0);
2826 /* Initialization call after setting bF_NoVirSum,
2827 * since it efield->initForcerec also sets this to true.
2829 forceProviders->initForcerec(fr);
2831 if (fr->bF_NoVirSum)
2833 fr->forceBufferNoVirialSummation = new PaddedRVecVector;
2836 if (fr->cutoff_scheme == ecutsGROUP &&
2837 ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr))
2839 /* Count the total number of charge groups */
2840 fr->cg_nalloc = ncg_mtop(mtop);
2841 srenew(fr->cg_cm, fr->cg_nalloc);
2843 if (fr->shift_vec == nullptr)
2845 snew(fr->shift_vec, SHIFTS);
2848 if (fr->fshift == nullptr)
2850 snew(fr->fshift, SHIFTS);
2853 if (fr->nbfp == nullptr)
2855 fr->ntype = mtop->ffparams.atnr;
2856 fr->nbfp = mk_nbfp(&mtop->ffparams, fr->bBHAM);
2857 if (EVDW_PME(fr->vdwtype))
2859 fr->ljpme_c6grid = make_ljpme_c6grid(&mtop->ffparams, fr);
2863 /* Copy the energy group exclusions */
2864 fr->egp_flags = ir->opts.egp_flags;
2866 /* Van der Waals stuff */
2867 if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM)
2869 if (fr->rvdw_switch >= fr->rvdw)
2871 gmx_fatal(FARGS, "rvdw_switch (%f) must be < rvdw (%f)",
2872 fr->rvdw_switch, fr->rvdw);
2874 if (fp)
2876 fprintf(fp, "Using %s Lennard-Jones, switch between %g and %g nm\n",
2877 (fr->eeltype == eelSWITCH) ? "switched" : "shifted",
2878 fr->rvdw_switch, fr->rvdw);
2882 if (fr->bBHAM && EVDW_PME(fr->vdwtype))
2884 gmx_fatal(FARGS, "LJ PME not supported with Buckingham");
2887 if (fr->bBHAM && (fr->vdwtype == evdwSHIFT || fr->vdwtype == evdwSWITCH))
2889 gmx_fatal(FARGS, "Switch/shift interaction not supported with Buckingham");
2892 if (fr->bBHAM && fr->cutoff_scheme == ecutsVERLET)
2894 gmx_fatal(FARGS, "Verlet cutoff-scheme is not supported with Buckingham");
2897 if (fp)
2899 fprintf(fp, "Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2900 fr->rlist, fr->rcoulomb, fr->bBHAM ? "BHAM" : "LJ", fr->rvdw);
2903 fr->eDispCorr = ir->eDispCorr;
2904 fr->numAtomsForDispersionCorrection = mtop->natoms;
2905 if (ir->eDispCorr != edispcNO)
2907 set_avcsixtwelve(fp, fr, mtop);
2910 if (fr->bBHAM)
2912 set_bham_b_max(fp, fr, mtop);
2915 fr->gb_epsilon_solvent = ir->gb_epsilon_solvent;
2917 /* Copy the GBSA data (radius, volume and surftens for each
2918 * atomtype) from the topology atomtype section to forcerec.
2920 snew(fr->atype_radius, fr->ntype);
2921 snew(fr->atype_vol, fr->ntype);
2922 snew(fr->atype_surftens, fr->ntype);
2923 snew(fr->atype_gb_radius, fr->ntype);
2924 snew(fr->atype_S_hct, fr->ntype);
2926 if (mtop->atomtypes.nr > 0)
2928 for (i = 0; i < fr->ntype; i++)
2930 fr->atype_radius[i] = mtop->atomtypes.radius[i];
2932 for (i = 0; i < fr->ntype; i++)
2934 fr->atype_vol[i] = mtop->atomtypes.vol[i];
2936 for (i = 0; i < fr->ntype; i++)
2938 fr->atype_surftens[i] = mtop->atomtypes.surftens[i];
2940 for (i = 0; i < fr->ntype; i++)
2942 fr->atype_gb_radius[i] = mtop->atomtypes.gb_radius[i];
2944 for (i = 0; i < fr->ntype; i++)
2946 fr->atype_S_hct[i] = mtop->atomtypes.S_hct[i];
2950 /* Generate the GB table if needed */
2951 if (fr->bGB)
2953 #if GMX_DOUBLE
2954 fr->gbtabscale = 2000;
2955 #else
2956 fr->gbtabscale = 500;
2957 #endif
2959 fr->gbtabr = 100;
2960 fr->gbtab = make_gb_table(fr);
2962 init_gb(&fr->born, fr, ir, mtop, ir->gb_algorithm);
2964 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2965 if (!DOMAINDECOMP(cr))
2967 make_local_gb(cr, fr->born, ir->gb_algorithm);
2971 /* Set the charge scaling */
2972 if (fr->epsilon_r != 0)
2974 fr->epsfac = ONE_4PI_EPS0/fr->epsilon_r;
2976 else
2978 /* eps = 0 is infinite dieletric: no coulomb interactions */
2979 fr->epsfac = 0;
2982 /* Reaction field constants */
2983 if (EEL_RF(fr->eeltype))
2985 calc_rffac(fp, fr->eeltype, fr->epsilon_r, fr->epsilon_rf,
2986 fr->rcoulomb, fr->temp, fr->zsquare, box,
2987 &fr->kappa, &fr->k_rf, &fr->c_rf);
2990 /* Construct tables for the group scheme. A little unnecessary to
2991 * make both vdw and coul tables sometimes, but what the
2992 * heck. Note that both cutoff schemes construct Ewald tables in
2993 * init_interaction_const_tables. */
2994 needGroupSchemeTables = (ir->cutoff_scheme == ecutsGROUP &&
2995 (fr->bcoultab || fr->bvdwtab));
2997 negp_pp = ir->opts.ngener - ir->nwall;
2998 negptable = 0;
2999 if (!needGroupSchemeTables)
3001 bSomeNormalNbListsAreInUse = TRUE;
3002 fr->nnblists = 1;
3004 else
3006 bSomeNormalNbListsAreInUse = FALSE;
3007 for (egi = 0; egi < negp_pp; egi++)
3009 for (egj = egi; egj < negp_pp; egj++)
3011 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
3012 if (!(egp_flags & EGP_EXCL))
3014 if (egp_flags & EGP_TABLE)
3016 negptable++;
3018 else
3020 bSomeNormalNbListsAreInUse = TRUE;
3025 if (bSomeNormalNbListsAreInUse)
3027 fr->nnblists = negptable + 1;
3029 else
3031 fr->nnblists = negptable;
3033 if (fr->nnblists > 1)
3035 snew(fr->gid2nblists, ir->opts.ngener*ir->opts.ngener);
3039 snew(fr->nblists, fr->nnblists);
3041 /* This code automatically gives table length tabext without cut-off's,
3042 * in that case grompp should already have checked that we do not need
3043 * normal tables and we only generate tables for 1-4 interactions.
3045 rtab = ir->rlist + ir->tabext;
3047 if (needGroupSchemeTables)
3049 /* make tables for ordinary interactions */
3050 if (bSomeNormalNbListsAreInUse)
3052 make_nbf_tables(fp, fr, rtab, tabfn, nullptr, nullptr, &fr->nblists[0]);
3053 m = 1;
3055 else
3057 m = 0;
3059 if (negptable > 0)
3061 /* Read the special tables for certain energy group pairs */
3062 nm_ind = mtop->groups.grps[egcENER].nm_ind;
3063 for (egi = 0; egi < negp_pp; egi++)
3065 for (egj = egi; egj < negp_pp; egj++)
3067 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
3068 if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL))
3070 if (fr->nnblists > 1)
3072 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = m;
3074 /* Read the table file with the two energy groups names appended */
3075 make_nbf_tables(fp, fr, rtab, tabfn,
3076 *mtop->groups.grpname[nm_ind[egi]],
3077 *mtop->groups.grpname[nm_ind[egj]],
3078 &fr->nblists[m]);
3079 m++;
3081 else if (fr->nnblists > 1)
3083 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = 0;
3090 /* Tables might not be used for the potential modifier
3091 * interactions per se, but we still need them to evaluate
3092 * switch/shift dispersion corrections in this case. */
3093 if (fr->eDispCorr != edispcNO)
3095 fr->dispersionCorrectionTable = makeDispersionCorrectionTable(fp, fr, rtab, tabfn);
3098 /* We want to use unmodified tables for 1-4 coulombic
3099 * interactions, so we must in general have an extra set of
3100 * tables. */
3101 if (gmx_mtop_ftype_count(mtop, F_LJ14) > 0 ||
3102 gmx_mtop_ftype_count(mtop, F_LJC14_Q) > 0 ||
3103 gmx_mtop_ftype_count(mtop, F_LJC_PAIRS_NB) > 0)
3105 fr->pairsTable = make_tables(fp, fr, tabpfn, rtab,
3106 GMX_MAKETABLES_14ONLY);
3109 /* Wall stuff */
3110 fr->nwall = ir->nwall;
3111 if (ir->nwall && ir->wall_type == ewtTABLE)
3113 make_wall_tables(fp, ir, tabfn, &mtop->groups, fr);
3116 if (fcd && tabbfnm)
3118 // Need to catch std::bad_alloc
3119 // TODO Don't need to catch this here, when merging with master branch
3122 fcd->bondtab = make_bonded_tables(fp,
3123 F_TABBONDS, F_TABBONDSNC,
3124 mtop, tabbfnm, "b");
3125 fcd->angletab = make_bonded_tables(fp,
3126 F_TABANGLES, -1,
3127 mtop, tabbfnm, "a");
3128 fcd->dihtab = make_bonded_tables(fp,
3129 F_TABDIHS, -1,
3130 mtop, tabbfnm, "d");
3132 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
3134 else
3136 if (debug)
3138 fprintf(debug, "No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
3142 /* QM/MM initialization if requested
3144 if (ir->bQMMM)
3146 fprintf(stderr, "QM/MM calculation requested.\n");
3149 fr->bQMMM = ir->bQMMM;
3150 fr->qr = mk_QMMMrec();
3152 /* Set all the static charge group info */
3153 fr->cginfo_mb = init_cginfo_mb(fp, mtop, fr, bNoSolvOpt,
3154 &bFEP_NonBonded,
3155 &fr->bExcl_IntraCGAll_InterCGNone);
3156 if (DOMAINDECOMP(cr))
3158 fr->cginfo = nullptr;
3160 else
3162 fr->cginfo = cginfo_expand(mtop->nmolblock, fr->cginfo_mb);
3165 if (!DOMAINDECOMP(cr))
3167 forcerec_set_ranges(fr, ncg_mtop(mtop), ncg_mtop(mtop),
3168 mtop->natoms, mtop->natoms, mtop->natoms);
3171 fr->print_force = print_force;
3174 /* coarse load balancing vars */
3175 fr->t_fnbf = 0.;
3176 fr->t_wait = 0.;
3177 fr->timesteps = 0;
3179 /* Initialize neighbor search */
3180 snew(fr->ns, 1);
3181 init_ns(fp, cr, fr->ns, fr, mtop);
3183 if (cr->duty & DUTY_PP)
3185 gmx_nonbonded_setup(fr, bGenericKernelOnly);
3188 /* Initialize the thread working data for bonded interactions */
3189 init_bonded_threading(fp, mtop->groups.grps[egcENER].nr,
3190 &fr->bonded_threading);
3192 fr->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
3193 snew(fr->ewc_t, fr->nthread_ewc);
3195 /* fr->ic is used both by verlet and group kernels (to some extent) now */
3196 init_interaction_const(fp, &fr->ic, fr);
3197 init_interaction_const_tables(fp, fr->ic, rtab);
3199 if (fr->cutoff_scheme == ecutsVERLET)
3201 // We checked the cut-offs in grompp, but double-check here.
3202 // We have PME+LJcutoff kernels for rcoulomb>rvdw.
3203 if (EEL_PME_EWALD(ir->coulombtype) && ir->vdwtype == eelCUT)
3205 GMX_RELEASE_ASSERT(ir->rcoulomb >= ir->rvdw, "With Verlet lists and PME we should have rcoulomb>=rvdw");
3207 else
3209 GMX_RELEASE_ASSERT(ir->rcoulomb == ir->rvdw, "With Verlet lists and no PME rcoulomb and rvdw should be identical");
3212 init_nb_verlet(fp, mdlog, &fr->nbv, bFEP_NonBonded, ir, fr, cr, nbpu_opt);
3215 if (ir->eDispCorr != edispcNO)
3217 calc_enervirdiff(fp, ir->eDispCorr, fr);
3221 #define pr_real(fp, r) fprintf(fp, "%s: %e\n",#r, r)
3222 #define pr_int(fp, i) fprintf((fp), "%s: %d\n",#i, i)
3223 #define pr_bool(fp, b) fprintf((fp), "%s: %s\n",#b, gmx::boolToString(b))
3225 void pr_forcerec(FILE *fp, t_forcerec *fr)
3227 int i;
3229 pr_real(fp, fr->rlist);
3230 pr_real(fp, fr->rcoulomb);
3231 pr_real(fp, fr->fudgeQQ);
3232 pr_bool(fp, fr->bGrid);
3233 /*pr_int(fp,fr->cg0);
3234 pr_int(fp,fr->hcg);*/
3235 for (i = 0; i < fr->nnblists; i++)
3237 pr_int(fp, fr->nblists[i].table_elec_vdw->n);
3239 pr_real(fp, fr->rcoulomb_switch);
3240 pr_real(fp, fr->rcoulomb);
3242 fflush(fp);
3245 /* Frees GPU memory and destroys the GPU context.
3247 * Note that this function needs to be called even if GPUs are not used
3248 * in this run because the PME ranks have no knowledge of whether GPUs
3249 * are used or not, but all ranks need to enter the barrier below.
3251 void free_gpu_resources(const t_forcerec *fr,
3252 const t_commrec *cr,
3253 const gmx_gpu_info_t *gpu_info,
3254 const gmx_gpu_opt_t *gpu_opt)
3256 gmx_bool bIsPPrankUsingGPU;
3257 char gpu_err_str[STRLEN];
3259 bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr && fr->nbv && fr->nbv->bUseGPU;
3261 if (bIsPPrankUsingGPU)
3263 /* free nbnxn data in GPU memory */
3264 nbnxn_gpu_free(fr->nbv->gpu_nbv);
3265 /* stop the GPU profiler (only CUDA) */
3266 stopGpuProfiler();
3268 /* With tMPI we need to wait for all ranks to finish deallocation before
3269 * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
3270 * GPU and context.
3272 * This is not a concern in OpenCL where we use one context per rank which
3273 * is freed in nbnxn_gpu_free().
3275 * Note: as only PP ranks need to free GPU resources, so it is safe to
3276 * not call the barrier on PME ranks.
3278 #if GMX_THREAD_MPI
3279 if (PAR(cr))
3281 gmx_barrier(cr);
3283 #endif /* GMX_THREAD_MPI */
3285 /* uninitialize GPU (by destroying the context) */
3286 if (!free_cuda_gpu(cr->rank_pp_intranode, gpu_err_str, gpu_info, gpu_opt))
3288 gmx_warning("On rank %d failed to free GPU #%d: %s",
3289 cr->nodeid, get_current_cuda_gpu_device_id(), gpu_err_str);