Removed four includes from legacyheaders/typedefs.h
[gromacs.git] / src / gromacs / mdlib / forcerec.cpp
blob809125189b68e1b35e23c66b5ac3b71b666697dc
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "forcerec.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <math.h>
45 #include <stdlib.h>
46 #include <string.h>
48 #include <algorithm>
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/ewald.h"
52 #include "gromacs/fileio/trx.h"
53 #include "gromacs/gmxlib/gpu_utils/gpu_utils.h"
54 #include "gromacs/legacyheaders/copyrite.h"
55 #include "gromacs/legacyheaders/force.h"
56 #include "gromacs/legacyheaders/gmx_detect_hardware.h"
57 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
58 #include "gromacs/legacyheaders/inputrec.h"
59 #include "gromacs/legacyheaders/md_logging.h"
60 #include "gromacs/legacyheaders/md_support.h"
61 #include "gromacs/legacyheaders/names.h"
62 #include "gromacs/legacyheaders/network.h"
63 #include "gromacs/legacyheaders/nonbonded.h"
64 #include "gromacs/legacyheaders/ns.h"
65 #include "gromacs/legacyheaders/qmmm.h"
66 #include "gromacs/legacyheaders/tables.h"
67 #include "gromacs/legacyheaders/txtdump.h"
68 #include "gromacs/legacyheaders/typedefs.h"
69 #include "gromacs/legacyheaders/types/commrec.h"
70 #include "gromacs/legacyheaders/types/group.h"
71 #include "gromacs/listed-forces/manage-threading.h"
72 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
73 #include "gromacs/math/units.h"
74 #include "gromacs/math/utilities.h"
75 #include "gromacs/math/vec.h"
76 #include "gromacs/mdlib/forcerec-threading.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_search.h"
81 #include "gromacs/mdlib/nbnxn_simd.h"
82 #include "gromacs/mdlib/nbnxn_util.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/pbcutil/pbc.h"
85 #include "gromacs/simd/simd.h"
86 #include "gromacs/topology/mtop_util.h"
87 #include "gromacs/utility/cstringutil.h"
88 #include "gromacs/utility/fatalerror.h"
89 #include "gromacs/utility/smalloc.h"
91 #include "nbnxn_gpu_jit_support.h"
93 t_forcerec *mk_forcerec(void)
95 t_forcerec *fr;
97 snew(fr, 1);
99 return fr;
102 #ifdef DEBUG
103 static void pr_nbfp(FILE *fp, real *nbfp, gmx_bool bBHAM, int atnr)
105 int i, j;
107 for (i = 0; (i < atnr); i++)
109 for (j = 0; (j < atnr); j++)
111 fprintf(fp, "%2d - %2d", i, j);
112 if (bBHAM)
114 fprintf(fp, " a=%10g, b=%10g, c=%10g\n", BHAMA(nbfp, atnr, i, j),
115 BHAMB(nbfp, atnr, i, j), BHAMC(nbfp, atnr, i, j)/6.0);
117 else
119 fprintf(fp, " c6=%10g, c12=%10g\n", C6(nbfp, atnr, i, j)/6.0,
120 C12(nbfp, atnr, i, j)/12.0);
125 #endif
127 static real *mk_nbfp(const gmx_ffparams_t *idef, gmx_bool bBHAM)
129 real *nbfp;
130 int i, j, k, atnr;
132 atnr = idef->atnr;
133 if (bBHAM)
135 snew(nbfp, 3*atnr*atnr);
136 for (i = k = 0; (i < atnr); i++)
138 for (j = 0; (j < atnr); j++, k++)
140 BHAMA(nbfp, atnr, i, j) = idef->iparams[k].bham.a;
141 BHAMB(nbfp, atnr, i, j) = idef->iparams[k].bham.b;
142 /* nbfp now includes the 6.0 derivative prefactor */
143 BHAMC(nbfp, atnr, i, j) = idef->iparams[k].bham.c*6.0;
147 else
149 snew(nbfp, 2*atnr*atnr);
150 for (i = k = 0; (i < atnr); i++)
152 for (j = 0; (j < atnr); j++, k++)
154 /* nbfp now includes the 6.0/12.0 derivative prefactors */
155 C6(nbfp, atnr, i, j) = idef->iparams[k].lj.c6*6.0;
156 C12(nbfp, atnr, i, j) = idef->iparams[k].lj.c12*12.0;
161 return nbfp;
164 static real *make_ljpme_c6grid(const gmx_ffparams_t *idef, t_forcerec *fr)
166 int i, j, k, atnr;
167 real c6, c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
168 real *grid;
169 const real oneOverSix = 1.0 / 6.0;
171 /* For LJ-PME simulations, we correct the energies with the reciprocal space
172 * inside of the cut-off. To do this the non-bonded kernels needs to have
173 * access to the C6-values used on the reciprocal grid in pme.c
176 atnr = idef->atnr;
177 snew(grid, 2*atnr*atnr);
178 for (i = k = 0; (i < atnr); i++)
180 for (j = 0; (j < atnr); j++, k++)
182 c6i = idef->iparams[i*(atnr+1)].lj.c6;
183 c12i = idef->iparams[i*(atnr+1)].lj.c12;
184 c6j = idef->iparams[j*(atnr+1)].lj.c6;
185 c12j = idef->iparams[j*(atnr+1)].lj.c12;
186 c6 = sqrt(c6i * c6j);
187 if (fr->ljpme_combination_rule == eljpmeLB
188 && !gmx_numzero(c6) && !gmx_numzero(c12i) && !gmx_numzero(c12j))
190 sigmai = pow(c12i / c6i, oneOverSix);
191 sigmaj = pow(c12j / c6j, oneOverSix);
192 epsi = c6i * c6i / c12i;
193 epsj = c6j * c6j / c12j;
194 c6 = sqrt(epsi * epsj) * pow(0.5*(sigmai+sigmaj), 6);
196 /* Store the elements at the same relative positions as C6 in nbfp in order
197 * to simplify access in the kernels
199 grid[2*(atnr*i+j)] = c6*6.0;
202 return grid;
205 static real *mk_nbfp_combination_rule(const gmx_ffparams_t *idef, int comb_rule)
207 real *nbfp;
208 int i, j, atnr;
209 real c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
210 real c6, c12;
211 const real oneOverSix = 1.0 / 6.0;
213 atnr = idef->atnr;
214 snew(nbfp, 2*atnr*atnr);
215 for (i = 0; i < atnr; ++i)
217 for (j = 0; j < atnr; ++j)
219 c6i = idef->iparams[i*(atnr+1)].lj.c6;
220 c12i = idef->iparams[i*(atnr+1)].lj.c12;
221 c6j = idef->iparams[j*(atnr+1)].lj.c6;
222 c12j = idef->iparams[j*(atnr+1)].lj.c12;
223 c6 = sqrt(c6i * c6j);
224 c12 = sqrt(c12i * c12j);
225 if (comb_rule == eCOMB_ARITHMETIC
226 && !gmx_numzero(c6) && !gmx_numzero(c12))
228 sigmai = pow(c12i / c6i, oneOverSix);
229 sigmaj = pow(c12j / c6j, oneOverSix);
230 epsi = c6i * c6i / c12i;
231 epsj = c6j * c6j / c12j;
232 c6 = sqrt(epsi * epsj) * pow(0.5*(sigmai+sigmaj), 6);
233 c12 = sqrt(epsi * epsj) * pow(0.5*(sigmai+sigmaj), 12);
235 C6(nbfp, atnr, i, j) = c6*6.0;
236 C12(nbfp, atnr, i, j) = c12*12.0;
239 return nbfp;
242 /* This routine sets fr->solvent_opt to the most common solvent in the
243 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
244 * the fr->solvent_type array with the correct type (or esolNO).
246 * Charge groups that fulfill the conditions but are not identical to the
247 * most common one will be marked as esolNO in the solvent_type array.
249 * TIP3p is identical to SPC for these purposes, so we call it
250 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
252 * NOTE: QM particle should not
253 * become an optimized solvent. Not even if there is only one charge
254 * group in the Qm
257 typedef struct
259 int model;
260 int count;
261 int vdwtype[4];
262 real charge[4];
263 } solvent_parameters_t;
265 static void
266 check_solvent_cg(const gmx_moltype_t *molt,
267 int cg0,
268 int nmol,
269 const unsigned char *qm_grpnr,
270 const t_grps *qm_grps,
271 t_forcerec * fr,
272 int *n_solvent_parameters,
273 solvent_parameters_t **solvent_parameters_p,
274 int cginfo,
275 int *cg_sp)
277 t_atom *atom;
278 int j, k;
279 int j0, j1, nj;
280 gmx_bool perturbed;
281 gmx_bool has_vdw[4];
282 gmx_bool match;
283 real tmp_charge[4] = { 0.0 }; /* init to zero to make gcc4.8 happy */
284 int tmp_vdwtype[4] = { 0 }; /* init to zero to make gcc4.8 happy */
285 int tjA;
286 gmx_bool qm;
287 solvent_parameters_t *solvent_parameters;
289 /* We use a list with parameters for each solvent type.
290 * Every time we discover a new molecule that fulfills the basic
291 * conditions for a solvent we compare with the previous entries
292 * in these lists. If the parameters are the same we just increment
293 * the counter for that type, and otherwise we create a new type
294 * based on the current molecule.
296 * Once we've finished going through all molecules we check which
297 * solvent is most common, and mark all those molecules while we
298 * clear the flag on all others.
301 solvent_parameters = *solvent_parameters_p;
303 /* Mark the cg first as non optimized */
304 *cg_sp = -1;
306 /* Check if this cg has no exclusions with atoms in other charge groups
307 * and all atoms inside the charge group excluded.
308 * We only have 3 or 4 atom solvent loops.
310 if (GET_CGINFO_EXCL_INTER(cginfo) ||
311 !GET_CGINFO_EXCL_INTRA(cginfo))
313 return;
316 /* Get the indices of the first atom in this charge group */
317 j0 = molt->cgs.index[cg0];
318 j1 = molt->cgs.index[cg0+1];
320 /* Number of atoms in our molecule */
321 nj = j1 - j0;
323 if (debug)
325 fprintf(debug,
326 "Moltype '%s': there are %d atoms in this charge group\n",
327 *molt->name, nj);
330 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
331 * otherwise skip it.
333 if (nj < 3 || nj > 4)
335 return;
338 /* Check if we are doing QM on this group */
339 qm = FALSE;
340 if (qm_grpnr != NULL)
342 for (j = j0; j < j1 && !qm; j++)
344 qm = (qm_grpnr[j] < qm_grps->nr - 1);
347 /* Cannot use solvent optimization with QM */
348 if (qm)
350 return;
353 atom = molt->atoms.atom;
355 /* Still looks like a solvent, time to check parameters */
357 /* If it is perturbed (free energy) we can't use the solvent loops,
358 * so then we just skip to the next molecule.
360 perturbed = FALSE;
362 for (j = j0; j < j1 && !perturbed; j++)
364 perturbed = PERTURBED(atom[j]);
367 if (perturbed)
369 return;
372 /* Now it's only a question if the VdW and charge parameters
373 * are OK. Before doing the check we compare and see if they are
374 * identical to a possible previous solvent type.
375 * First we assign the current types and charges.
377 for (j = 0; j < nj; j++)
379 tmp_vdwtype[j] = atom[j0+j].type;
380 tmp_charge[j] = atom[j0+j].q;
383 /* Does it match any previous solvent type? */
384 for (k = 0; k < *n_solvent_parameters; k++)
386 match = TRUE;
389 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
390 if ( (solvent_parameters[k].model == esolSPC && nj != 3) ||
391 (solvent_parameters[k].model == esolTIP4P && nj != 4) )
393 match = FALSE;
396 /* Check that types & charges match for all atoms in molecule */
397 for (j = 0; j < nj && match == TRUE; j++)
399 if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
401 match = FALSE;
403 if (tmp_charge[j] != solvent_parameters[k].charge[j])
405 match = FALSE;
408 if (match == TRUE)
410 /* Congratulations! We have a matched solvent.
411 * Flag it with this type for later processing.
413 *cg_sp = k;
414 solvent_parameters[k].count += nmol;
416 /* We are done with this charge group */
417 return;
421 /* If we get here, we have a tentative new solvent type.
422 * Before we add it we must check that it fulfills the requirements
423 * of the solvent optimized loops. First determine which atoms have
424 * VdW interactions.
426 for (j = 0; j < nj; j++)
428 has_vdw[j] = FALSE;
429 tjA = tmp_vdwtype[j];
431 /* Go through all other tpes and see if any have non-zero
432 * VdW parameters when combined with this one.
434 for (k = 0; k < fr->ntype && (has_vdw[j] == FALSE); k++)
436 /* We already checked that the atoms weren't perturbed,
437 * so we only need to check state A now.
439 if (fr->bBHAM)
441 has_vdw[j] = (has_vdw[j] ||
442 (BHAMA(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
443 (BHAMB(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
444 (BHAMC(fr->nbfp, fr->ntype, tjA, k) != 0.0));
446 else
448 /* Standard LJ */
449 has_vdw[j] = (has_vdw[j] ||
450 (C6(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
451 (C12(fr->nbfp, fr->ntype, tjA, k) != 0.0));
456 /* Now we know all we need to make the final check and assignment. */
457 if (nj == 3)
459 /* So, is it an SPC?
460 * For this we require thatn all atoms have charge,
461 * the charges on atom 2 & 3 should be the same, and only
462 * atom 1 might have VdW.
464 if (has_vdw[1] == FALSE &&
465 has_vdw[2] == FALSE &&
466 tmp_charge[0] != 0 &&
467 tmp_charge[1] != 0 &&
468 tmp_charge[2] == tmp_charge[1])
470 srenew(solvent_parameters, *n_solvent_parameters+1);
471 solvent_parameters[*n_solvent_parameters].model = esolSPC;
472 solvent_parameters[*n_solvent_parameters].count = nmol;
473 for (k = 0; k < 3; k++)
475 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
476 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
479 *cg_sp = *n_solvent_parameters;
480 (*n_solvent_parameters)++;
483 else if (nj == 4)
485 /* Or could it be a TIP4P?
486 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
487 * Only atom 1 mght have VdW.
489 if (has_vdw[1] == FALSE &&
490 has_vdw[2] == FALSE &&
491 has_vdw[3] == FALSE &&
492 tmp_charge[0] == 0 &&
493 tmp_charge[1] != 0 &&
494 tmp_charge[2] == tmp_charge[1] &&
495 tmp_charge[3] != 0)
497 srenew(solvent_parameters, *n_solvent_parameters+1);
498 solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
499 solvent_parameters[*n_solvent_parameters].count = nmol;
500 for (k = 0; k < 4; k++)
502 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
503 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
506 *cg_sp = *n_solvent_parameters;
507 (*n_solvent_parameters)++;
511 *solvent_parameters_p = solvent_parameters;
514 static void
515 check_solvent(FILE * fp,
516 const gmx_mtop_t * mtop,
517 t_forcerec * fr,
518 cginfo_mb_t *cginfo_mb)
520 const t_block * cgs;
521 const gmx_moltype_t *molt;
522 int mb, mol, cg_mol, at_offset, am, cgm, i, nmol_ch, nmol;
523 int n_solvent_parameters;
524 solvent_parameters_t *solvent_parameters;
525 int **cg_sp;
526 int bestsp, bestsol;
528 if (debug)
530 fprintf(debug, "Going to determine what solvent types we have.\n");
533 n_solvent_parameters = 0;
534 solvent_parameters = NULL;
535 /* Allocate temporary array for solvent type */
536 snew(cg_sp, mtop->nmolblock);
538 at_offset = 0;
539 for (mb = 0; mb < mtop->nmolblock; mb++)
541 molt = &mtop->moltype[mtop->molblock[mb].type];
542 cgs = &molt->cgs;
543 /* Here we have to loop over all individual molecules
544 * because we need to check for QMMM particles.
546 snew(cg_sp[mb], cginfo_mb[mb].cg_mod);
547 nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
548 nmol = mtop->molblock[mb].nmol/nmol_ch;
549 for (mol = 0; mol < nmol_ch; mol++)
551 cgm = mol*cgs->nr;
552 am = mol*cgs->index[cgs->nr];
553 for (cg_mol = 0; cg_mol < cgs->nr; cg_mol++)
555 check_solvent_cg(molt, cg_mol, nmol,
556 mtop->groups.grpnr[egcQMMM] ?
557 mtop->groups.grpnr[egcQMMM]+at_offset+am : 0,
558 &mtop->groups.grps[egcQMMM],
560 &n_solvent_parameters, &solvent_parameters,
561 cginfo_mb[mb].cginfo[cgm+cg_mol],
562 &cg_sp[mb][cgm+cg_mol]);
565 at_offset += cgs->index[cgs->nr];
568 /* Puh! We finished going through all charge groups.
569 * Now find the most common solvent model.
572 /* Most common solvent this far */
573 bestsp = -2;
574 for (i = 0; i < n_solvent_parameters; i++)
576 if (bestsp == -2 ||
577 solvent_parameters[i].count > solvent_parameters[bestsp].count)
579 bestsp = i;
583 if (bestsp >= 0)
585 bestsol = solvent_parameters[bestsp].model;
587 else
589 bestsol = esolNO;
592 fr->nWatMol = 0;
593 for (mb = 0; mb < mtop->nmolblock; mb++)
595 cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
596 nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
597 for (i = 0; i < cginfo_mb[mb].cg_mod; i++)
599 if (cg_sp[mb][i] == bestsp)
601 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], bestsol);
602 fr->nWatMol += nmol;
604 else
606 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], esolNO);
609 sfree(cg_sp[mb]);
611 sfree(cg_sp);
613 if (bestsol != esolNO && fp != NULL)
615 fprintf(fp, "\nEnabling %s-like water optimization for %d molecules.\n\n",
616 esol_names[bestsol],
617 solvent_parameters[bestsp].count);
620 sfree(solvent_parameters);
621 fr->solvent_opt = bestsol;
624 enum {
625 acNONE = 0, acCONSTRAINT, acSETTLE
628 static cginfo_mb_t *init_cginfo_mb(FILE *fplog, const gmx_mtop_t *mtop,
629 t_forcerec *fr, gmx_bool bNoSolvOpt,
630 gmx_bool *bFEP_NonBonded,
631 gmx_bool *bExcl_IntraCGAll_InterCGNone)
633 const t_block *cgs;
634 const t_blocka *excl;
635 const gmx_moltype_t *molt;
636 const gmx_molblock_t *molb;
637 cginfo_mb_t *cginfo_mb;
638 gmx_bool *type_VDW;
639 int *cginfo;
640 int cg_offset, a_offset;
641 int mb, m, cg, a0, a1, gid, ai, j, aj, excl_nalloc;
642 int *a_con;
643 int ftype;
644 int ia;
645 gmx_bool bId, *bExcl, bExclIntraAll, bExclInter, bHaveVDW, bHaveQ, bHavePerturbedAtoms;
647 snew(cginfo_mb, mtop->nmolblock);
649 snew(type_VDW, fr->ntype);
650 for (ai = 0; ai < fr->ntype; ai++)
652 type_VDW[ai] = FALSE;
653 for (j = 0; j < fr->ntype; j++)
655 type_VDW[ai] = type_VDW[ai] ||
656 fr->bBHAM ||
657 C6(fr->nbfp, fr->ntype, ai, j) != 0 ||
658 C12(fr->nbfp, fr->ntype, ai, j) != 0;
662 *bFEP_NonBonded = FALSE;
663 *bExcl_IntraCGAll_InterCGNone = TRUE;
665 excl_nalloc = 10;
666 snew(bExcl, excl_nalloc);
667 cg_offset = 0;
668 a_offset = 0;
669 for (mb = 0; mb < mtop->nmolblock; mb++)
671 molb = &mtop->molblock[mb];
672 molt = &mtop->moltype[molb->type];
673 cgs = &molt->cgs;
674 excl = &molt->excls;
676 /* Check if the cginfo is identical for all molecules in this block.
677 * If so, we only need an array of the size of one molecule.
678 * Otherwise we make an array of #mol times #cgs per molecule.
680 bId = TRUE;
681 for (m = 0; m < molb->nmol; m++)
683 int am = m*cgs->index[cgs->nr];
684 for (cg = 0; cg < cgs->nr; cg++)
686 a0 = cgs->index[cg];
687 a1 = cgs->index[cg+1];
688 if (ggrpnr(&mtop->groups, egcENER, a_offset+am+a0) !=
689 ggrpnr(&mtop->groups, egcENER, a_offset +a0))
691 bId = FALSE;
693 if (mtop->groups.grpnr[egcQMMM] != NULL)
695 for (ai = a0; ai < a1; ai++)
697 if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
698 mtop->groups.grpnr[egcQMMM][a_offset +ai])
700 bId = FALSE;
707 cginfo_mb[mb].cg_start = cg_offset;
708 cginfo_mb[mb].cg_end = cg_offset + molb->nmol*cgs->nr;
709 cginfo_mb[mb].cg_mod = (bId ? 1 : molb->nmol)*cgs->nr;
710 snew(cginfo_mb[mb].cginfo, cginfo_mb[mb].cg_mod);
711 cginfo = cginfo_mb[mb].cginfo;
713 /* Set constraints flags for constrained atoms */
714 snew(a_con, molt->atoms.nr);
715 for (ftype = 0; ftype < F_NRE; ftype++)
717 if (interaction_function[ftype].flags & IF_CONSTRAINT)
719 int nral;
721 nral = NRAL(ftype);
722 for (ia = 0; ia < molt->ilist[ftype].nr; ia += 1+nral)
724 int a;
726 for (a = 0; a < nral; a++)
728 a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
729 (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
735 for (m = 0; m < (bId ? 1 : molb->nmol); m++)
737 int cgm = m*cgs->nr;
738 int am = m*cgs->index[cgs->nr];
739 for (cg = 0; cg < cgs->nr; cg++)
741 a0 = cgs->index[cg];
742 a1 = cgs->index[cg+1];
744 /* Store the energy group in cginfo */
745 gid = ggrpnr(&mtop->groups, egcENER, a_offset+am+a0);
746 SET_CGINFO_GID(cginfo[cgm+cg], gid);
748 /* Check the intra/inter charge group exclusions */
749 if (a1-a0 > excl_nalloc)
751 excl_nalloc = a1 - a0;
752 srenew(bExcl, excl_nalloc);
754 /* bExclIntraAll: all intra cg interactions excluded
755 * bExclInter: any inter cg interactions excluded
757 bExclIntraAll = TRUE;
758 bExclInter = FALSE;
759 bHaveVDW = FALSE;
760 bHaveQ = FALSE;
761 bHavePerturbedAtoms = FALSE;
762 for (ai = a0; ai < a1; ai++)
764 /* Check VDW and electrostatic interactions */
765 bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
766 type_VDW[molt->atoms.atom[ai].typeB]);
767 bHaveQ = bHaveQ || (molt->atoms.atom[ai].q != 0 ||
768 molt->atoms.atom[ai].qB != 0);
770 bHavePerturbedAtoms = bHavePerturbedAtoms || (PERTURBED(molt->atoms.atom[ai]) != 0);
772 /* Clear the exclusion list for atom ai */
773 for (aj = a0; aj < a1; aj++)
775 bExcl[aj-a0] = FALSE;
777 /* Loop over all the exclusions of atom ai */
778 for (j = excl->index[ai]; j < excl->index[ai+1]; j++)
780 aj = excl->a[j];
781 if (aj < a0 || aj >= a1)
783 bExclInter = TRUE;
785 else
787 bExcl[aj-a0] = TRUE;
790 /* Check if ai excludes a0 to a1 */
791 for (aj = a0; aj < a1; aj++)
793 if (!bExcl[aj-a0])
795 bExclIntraAll = FALSE;
799 switch (a_con[ai])
801 case acCONSTRAINT:
802 SET_CGINFO_CONSTR(cginfo[cgm+cg]);
803 break;
804 case acSETTLE:
805 SET_CGINFO_SETTLE(cginfo[cgm+cg]);
806 break;
807 default:
808 break;
811 if (bExclIntraAll)
813 SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
815 if (bExclInter)
817 SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
819 if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
821 /* The size in cginfo is currently only read with DD */
822 gmx_fatal(FARGS, "A charge group has size %d which is larger than the limit of %d atoms", a1-a0, MAX_CHARGEGROUP_SIZE);
824 if (bHaveVDW)
826 SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
828 if (bHaveQ)
830 SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
832 if (bHavePerturbedAtoms && fr->efep != efepNO)
834 SET_CGINFO_FEP(cginfo[cgm+cg]);
835 *bFEP_NonBonded = TRUE;
837 /* Store the charge group size */
838 SET_CGINFO_NATOMS(cginfo[cgm+cg], a1-a0);
840 if (!bExclIntraAll || bExclInter)
842 *bExcl_IntraCGAll_InterCGNone = FALSE;
847 sfree(a_con);
849 cg_offset += molb->nmol*cgs->nr;
850 a_offset += molb->nmol*cgs->index[cgs->nr];
852 sfree(bExcl);
854 /* the solvent optimizer is called after the QM is initialized,
855 * because we don't want to have the QM subsystemto become an
856 * optimized solvent
859 check_solvent(fplog, mtop, fr, cginfo_mb);
861 if (getenv("GMX_NO_SOLV_OPT"))
863 if (fplog)
865 fprintf(fplog, "Found environment variable GMX_NO_SOLV_OPT.\n"
866 "Disabling all solvent optimization\n");
868 fr->solvent_opt = esolNO;
870 if (bNoSolvOpt)
872 fr->solvent_opt = esolNO;
874 if (!fr->solvent_opt)
876 for (mb = 0; mb < mtop->nmolblock; mb++)
878 for (cg = 0; cg < cginfo_mb[mb].cg_mod; cg++)
880 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg], esolNO);
885 return cginfo_mb;
888 static int *cginfo_expand(int nmb, cginfo_mb_t *cgi_mb)
890 int ncg, mb, cg;
891 int *cginfo;
893 ncg = cgi_mb[nmb-1].cg_end;
894 snew(cginfo, ncg);
895 mb = 0;
896 for (cg = 0; cg < ncg; cg++)
898 while (cg >= cgi_mb[mb].cg_end)
900 mb++;
902 cginfo[cg] =
903 cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
906 return cginfo;
909 static void set_chargesum(FILE *log, t_forcerec *fr, const gmx_mtop_t *mtop)
911 /*This now calculates sum for q and c6*/
912 double qsum, q2sum, q, c6sum, c6;
913 int mb, nmol, i;
914 const t_atoms *atoms;
916 qsum = 0;
917 q2sum = 0;
918 c6sum = 0;
919 for (mb = 0; mb < mtop->nmolblock; mb++)
921 nmol = mtop->molblock[mb].nmol;
922 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
923 for (i = 0; i < atoms->nr; i++)
925 q = atoms->atom[i].q;
926 qsum += nmol*q;
927 q2sum += nmol*q*q;
928 c6 = mtop->ffparams.iparams[atoms->atom[i].type*(mtop->ffparams.atnr+1)].lj.c6;
929 c6sum += nmol*c6;
932 fr->qsum[0] = qsum;
933 fr->q2sum[0] = q2sum;
934 fr->c6sum[0] = c6sum;
936 if (fr->efep != efepNO)
938 qsum = 0;
939 q2sum = 0;
940 c6sum = 0;
941 for (mb = 0; mb < mtop->nmolblock; mb++)
943 nmol = mtop->molblock[mb].nmol;
944 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
945 for (i = 0; i < atoms->nr; i++)
947 q = atoms->atom[i].qB;
948 qsum += nmol*q;
949 q2sum += nmol*q*q;
950 c6 = mtop->ffparams.iparams[atoms->atom[i].typeB*(mtop->ffparams.atnr+1)].lj.c6;
951 c6sum += nmol*c6;
953 fr->qsum[1] = qsum;
954 fr->q2sum[1] = q2sum;
955 fr->c6sum[1] = c6sum;
958 else
960 fr->qsum[1] = fr->qsum[0];
961 fr->q2sum[1] = fr->q2sum[0];
962 fr->c6sum[1] = fr->c6sum[0];
964 if (log)
966 if (fr->efep == efepNO)
968 fprintf(log, "System total charge: %.3f\n", fr->qsum[0]);
970 else
972 fprintf(log, "System total charge, top. A: %.3f top. B: %.3f\n",
973 fr->qsum[0], fr->qsum[1]);
978 void update_forcerec(t_forcerec *fr, matrix box)
980 if (fr->eeltype == eelGRF)
982 calc_rffac(NULL, fr->eeltype, fr->epsilon_r, fr->epsilon_rf,
983 fr->rcoulomb, fr->temp, fr->zsquare, box,
984 &fr->kappa, &fr->k_rf, &fr->c_rf);
988 void set_avcsixtwelve(FILE *fplog, t_forcerec *fr, const gmx_mtop_t *mtop)
990 const t_atoms *atoms, *atoms_tpi;
991 const t_blocka *excl;
992 int mb, nmol, nmolc, i, j, tpi, tpj, j1, j2, k, nexcl, q;
993 gmx_int64_t npair, npair_ij, tmpi, tmpj;
994 double csix, ctwelve;
995 int ntp, *typecount;
996 gmx_bool bBHAM;
997 real *nbfp;
998 real *nbfp_comb = NULL;
1000 ntp = fr->ntype;
1001 bBHAM = fr->bBHAM;
1002 nbfp = fr->nbfp;
1004 /* For LJ-PME, we want to correct for the difference between the
1005 * actual C6 values and the C6 values used by the LJ-PME based on
1006 * combination rules. */
1008 if (EVDW_PME(fr->vdwtype))
1010 nbfp_comb = mk_nbfp_combination_rule(&mtop->ffparams,
1011 (fr->ljpme_combination_rule == eljpmeLB) ? eCOMB_ARITHMETIC : eCOMB_GEOMETRIC);
1012 for (tpi = 0; tpi < ntp; ++tpi)
1014 for (tpj = 0; tpj < ntp; ++tpj)
1016 C6(nbfp_comb, ntp, tpi, tpj) =
1017 C6(nbfp, ntp, tpi, tpj) - C6(nbfp_comb, ntp, tpi, tpj);
1018 C12(nbfp_comb, ntp, tpi, tpj) = C12(nbfp, ntp, tpi, tpj);
1021 nbfp = nbfp_comb;
1023 for (q = 0; q < (fr->efep == efepNO ? 1 : 2); q++)
1025 csix = 0;
1026 ctwelve = 0;
1027 npair = 0;
1028 nexcl = 0;
1029 if (!fr->n_tpi)
1031 /* Count the types so we avoid natoms^2 operations */
1032 snew(typecount, ntp);
1033 gmx_mtop_count_atomtypes(mtop, q, typecount);
1035 for (tpi = 0; tpi < ntp; tpi++)
1037 for (tpj = tpi; tpj < ntp; tpj++)
1039 tmpi = typecount[tpi];
1040 tmpj = typecount[tpj];
1041 if (tpi != tpj)
1043 npair_ij = tmpi*tmpj;
1045 else
1047 npair_ij = tmpi*(tmpi - 1)/2;
1049 if (bBHAM)
1051 /* nbfp now includes the 6.0 derivative prefactor */
1052 csix += npair_ij*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1054 else
1056 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1057 csix += npair_ij* C6(nbfp, ntp, tpi, tpj)/6.0;
1058 ctwelve += npair_ij* C12(nbfp, ntp, tpi, tpj)/12.0;
1060 npair += npair_ij;
1063 sfree(typecount);
1064 /* Subtract the excluded pairs.
1065 * The main reason for substracting exclusions is that in some cases
1066 * some combinations might never occur and the parameters could have
1067 * any value. These unused values should not influence the dispersion
1068 * correction.
1070 for (mb = 0; mb < mtop->nmolblock; mb++)
1072 nmol = mtop->molblock[mb].nmol;
1073 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
1074 excl = &mtop->moltype[mtop->molblock[mb].type].excls;
1075 for (i = 0; (i < atoms->nr); i++)
1077 if (q == 0)
1079 tpi = atoms->atom[i].type;
1081 else
1083 tpi = atoms->atom[i].typeB;
1085 j1 = excl->index[i];
1086 j2 = excl->index[i+1];
1087 for (j = j1; j < j2; j++)
1089 k = excl->a[j];
1090 if (k > i)
1092 if (q == 0)
1094 tpj = atoms->atom[k].type;
1096 else
1098 tpj = atoms->atom[k].typeB;
1100 if (bBHAM)
1102 /* nbfp now includes the 6.0 derivative prefactor */
1103 csix -= nmol*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1105 else
1107 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1108 csix -= nmol*C6 (nbfp, ntp, tpi, tpj)/6.0;
1109 ctwelve -= nmol*C12(nbfp, ntp, tpi, tpj)/12.0;
1111 nexcl += nmol;
1117 else
1119 /* Only correct for the interaction of the test particle
1120 * with the rest of the system.
1122 atoms_tpi =
1123 &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].atoms;
1125 npair = 0;
1126 for (mb = 0; mb < mtop->nmolblock; mb++)
1128 nmol = mtop->molblock[mb].nmol;
1129 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
1130 for (j = 0; j < atoms->nr; j++)
1132 nmolc = nmol;
1133 /* Remove the interaction of the test charge group
1134 * with itself.
1136 if (mb == mtop->nmolblock-1)
1138 nmolc--;
1140 if (mb == 0 && nmol == 1)
1142 gmx_fatal(FARGS, "Old format tpr with TPI, please generate a new tpr file");
1145 if (q == 0)
1147 tpj = atoms->atom[j].type;
1149 else
1151 tpj = atoms->atom[j].typeB;
1153 for (i = 0; i < fr->n_tpi; i++)
1155 if (q == 0)
1157 tpi = atoms_tpi->atom[i].type;
1159 else
1161 tpi = atoms_tpi->atom[i].typeB;
1163 if (bBHAM)
1165 /* nbfp now includes the 6.0 derivative prefactor */
1166 csix += nmolc*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1168 else
1170 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1171 csix += nmolc*C6 (nbfp, ntp, tpi, tpj)/6.0;
1172 ctwelve += nmolc*C12(nbfp, ntp, tpi, tpj)/12.0;
1174 npair += nmolc;
1179 if (npair - nexcl <= 0 && fplog)
1181 fprintf(fplog, "\nWARNING: There are no atom pairs for dispersion correction\n\n");
1182 csix = 0;
1183 ctwelve = 0;
1185 else
1187 csix /= npair - nexcl;
1188 ctwelve /= npair - nexcl;
1190 if (debug)
1192 fprintf(debug, "Counted %d exclusions\n", nexcl);
1193 fprintf(debug, "Average C6 parameter is: %10g\n", (double)csix);
1194 fprintf(debug, "Average C12 parameter is: %10g\n", (double)ctwelve);
1196 fr->avcsix[q] = csix;
1197 fr->avctwelve[q] = ctwelve;
1200 if (EVDW_PME(fr->vdwtype))
1202 sfree(nbfp_comb);
1205 if (fplog != NULL)
1207 if (fr->eDispCorr == edispcAllEner ||
1208 fr->eDispCorr == edispcAllEnerPres)
1210 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1211 fr->avcsix[0], fr->avctwelve[0]);
1213 else
1215 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e\n", fr->avcsix[0]);
1221 static void set_bham_b_max(FILE *fplog, t_forcerec *fr,
1222 const gmx_mtop_t *mtop)
1224 const t_atoms *at1, *at2;
1225 int mt1, mt2, i, j, tpi, tpj, ntypes;
1226 real b, bmin;
1227 real *nbfp;
1229 if (fplog)
1231 fprintf(fplog, "Determining largest Buckingham b parameter for table\n");
1233 nbfp = fr->nbfp;
1234 ntypes = fr->ntype;
1236 bmin = -1;
1237 fr->bham_b_max = 0;
1238 for (mt1 = 0; mt1 < mtop->nmoltype; mt1++)
1240 at1 = &mtop->moltype[mt1].atoms;
1241 for (i = 0; (i < at1->nr); i++)
1243 tpi = at1->atom[i].type;
1244 if (tpi >= ntypes)
1246 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", i, tpi, ntypes);
1249 for (mt2 = mt1; mt2 < mtop->nmoltype; mt2++)
1251 at2 = &mtop->moltype[mt2].atoms;
1252 for (j = 0; (j < at2->nr); j++)
1254 tpj = at2->atom[j].type;
1255 if (tpj >= ntypes)
1257 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", j, tpj, ntypes);
1259 b = BHAMB(nbfp, ntypes, tpi, tpj);
1260 if (b > fr->bham_b_max)
1262 fr->bham_b_max = b;
1264 if ((b < bmin) || (bmin == -1))
1266 bmin = b;
1272 if (fplog)
1274 fprintf(fplog, "Buckingham b parameters, min: %g, max: %g\n",
1275 bmin, fr->bham_b_max);
1279 static void make_nbf_tables(FILE *fp, const output_env_t oenv,
1280 t_forcerec *fr, real rtab,
1281 const t_commrec *cr,
1282 const char *tabfn, char *eg1, char *eg2,
1283 t_nblists *nbl)
1285 char buf[STRLEN];
1286 int i, j;
1288 if (tabfn == NULL)
1290 if (debug)
1292 fprintf(debug, "No table file name passed, can not read table, can not do non-bonded interactions\n");
1294 return;
1297 sprintf(buf, "%s", tabfn);
1298 if (eg1 && eg2)
1300 /* Append the two energy group names */
1301 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1, "_%s_%s.%s",
1302 eg1, eg2, ftp2ext(efXVG));
1304 nbl->table_elec_vdw = make_tables(fp, oenv, fr, MASTER(cr), buf, rtab, 0);
1305 /* Copy the contents of the table to separate coulomb and LJ tables too,
1306 * to improve cache performance.
1308 /* For performance reasons we want
1309 * the table data to be aligned to 16-byte. The pointers could be freed
1310 * but currently aren't.
1312 nbl->table_elec.interaction = GMX_TABLE_INTERACTION_ELEC;
1313 nbl->table_elec.format = nbl->table_elec_vdw.format;
1314 nbl->table_elec.r = nbl->table_elec_vdw.r;
1315 nbl->table_elec.n = nbl->table_elec_vdw.n;
1316 nbl->table_elec.scale = nbl->table_elec_vdw.scale;
1317 nbl->table_elec.scale_exp = nbl->table_elec_vdw.scale_exp;
1318 nbl->table_elec.formatsize = nbl->table_elec_vdw.formatsize;
1319 nbl->table_elec.ninteractions = 1;
1320 nbl->table_elec.stride = nbl->table_elec.formatsize * nbl->table_elec.ninteractions;
1321 snew_aligned(nbl->table_elec.data, nbl->table_elec.stride*(nbl->table_elec.n+1), 32);
1323 nbl->table_vdw.interaction = GMX_TABLE_INTERACTION_VDWREP_VDWDISP;
1324 nbl->table_vdw.format = nbl->table_elec_vdw.format;
1325 nbl->table_vdw.r = nbl->table_elec_vdw.r;
1326 nbl->table_vdw.n = nbl->table_elec_vdw.n;
1327 nbl->table_vdw.scale = nbl->table_elec_vdw.scale;
1328 nbl->table_vdw.scale_exp = nbl->table_elec_vdw.scale_exp;
1329 nbl->table_vdw.formatsize = nbl->table_elec_vdw.formatsize;
1330 nbl->table_vdw.ninteractions = 2;
1331 nbl->table_vdw.stride = nbl->table_vdw.formatsize * nbl->table_vdw.ninteractions;
1332 snew_aligned(nbl->table_vdw.data, nbl->table_vdw.stride*(nbl->table_vdw.n+1), 32);
1334 for (i = 0; i <= nbl->table_elec_vdw.n; i++)
1336 for (j = 0; j < 4; j++)
1338 nbl->table_elec.data[4*i+j] = nbl->table_elec_vdw.data[12*i+j];
1340 for (j = 0; j < 8; j++)
1342 nbl->table_vdw.data[8*i+j] = nbl->table_elec_vdw.data[12*i+4+j];
1347 static void count_tables(int ftype1, int ftype2, const gmx_mtop_t *mtop,
1348 int *ncount, int **count)
1350 const gmx_moltype_t *molt;
1351 const t_ilist *il;
1352 int mt, ftype, stride, i, j, tabnr;
1354 for (mt = 0; mt < mtop->nmoltype; mt++)
1356 molt = &mtop->moltype[mt];
1357 for (ftype = 0; ftype < F_NRE; ftype++)
1359 if (ftype == ftype1 || ftype == ftype2)
1361 il = &molt->ilist[ftype];
1362 stride = 1 + NRAL(ftype);
1363 for (i = 0; i < il->nr; i += stride)
1365 tabnr = mtop->ffparams.iparams[il->iatoms[i]].tab.table;
1366 if (tabnr < 0)
1368 gmx_fatal(FARGS, "A bonded table number is smaller than 0: %d\n", tabnr);
1370 if (tabnr >= *ncount)
1372 srenew(*count, tabnr+1);
1373 for (j = *ncount; j < tabnr+1; j++)
1375 (*count)[j] = 0;
1377 *ncount = tabnr+1;
1379 (*count)[tabnr]++;
1386 static bondedtable_t *make_bonded_tables(FILE *fplog,
1387 int ftype1, int ftype2,
1388 const gmx_mtop_t *mtop,
1389 const char *basefn, const char *tabext)
1391 int i, ncount, *count;
1392 char tabfn[STRLEN];
1393 bondedtable_t *tab;
1395 tab = NULL;
1397 ncount = 0;
1398 count = NULL;
1399 count_tables(ftype1, ftype2, mtop, &ncount, &count);
1401 if (ncount > 0)
1403 snew(tab, ncount);
1404 for (i = 0; i < ncount; i++)
1406 if (count[i] > 0)
1408 sprintf(tabfn, "%s", basefn);
1409 sprintf(tabfn + strlen(basefn) - strlen(ftp2ext(efXVG)) - 1, "_%s%d.%s",
1410 tabext, i, ftp2ext(efXVG));
1411 tab[i] = make_bonded_table(fplog, tabfn, NRAL(ftype1)-2);
1414 sfree(count);
1417 return tab;
1420 void forcerec_set_ranges(t_forcerec *fr,
1421 int ncg_home, int ncg_force,
1422 int natoms_force,
1423 int natoms_force_constr, int natoms_f_novirsum)
1425 fr->cg0 = 0;
1426 fr->hcg = ncg_home;
1428 /* fr->ncg_force is unused in the standard code,
1429 * but it can be useful for modified code dealing with charge groups.
1431 fr->ncg_force = ncg_force;
1432 fr->natoms_force = natoms_force;
1433 fr->natoms_force_constr = natoms_force_constr;
1435 if (fr->natoms_force_constr > fr->nalloc_force)
1437 fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
1439 if (fr->bTwinRange)
1441 srenew(fr->f_twin, fr->nalloc_force);
1445 if (fr->bF_NoVirSum)
1447 fr->f_novirsum_n = natoms_f_novirsum;
1448 if (fr->f_novirsum_n > fr->f_novirsum_nalloc)
1450 fr->f_novirsum_nalloc = over_alloc_dd(fr->f_novirsum_n);
1451 srenew(fr->f_novirsum_alloc, fr->f_novirsum_nalloc);
1454 else
1456 fr->f_novirsum_n = 0;
1460 static real cutoff_inf(real cutoff)
1462 if (cutoff == 0)
1464 cutoff = GMX_CUTOFF_INF;
1467 return cutoff;
1470 static void make_adress_tf_tables(FILE *fp, const output_env_t oenv,
1471 t_forcerec *fr, const t_inputrec *ir,
1472 const char *tabfn, const gmx_mtop_t *mtop,
1473 matrix box)
1475 char buf[STRLEN];
1476 int i, j;
1478 if (tabfn == NULL)
1480 gmx_fatal(FARGS, "No thermoforce table file given. Use -tabletf to specify a file\n");
1481 return;
1484 snew(fr->atf_tabs, ir->adress->n_tf_grps);
1486 sprintf(buf, "%s", tabfn);
1487 for (i = 0; i < ir->adress->n_tf_grps; i++)
1489 j = ir->adress->tf_table_index[i]; /* get energy group index */
1490 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1, "tf_%s.%s",
1491 *(mtop->groups.grpname[mtop->groups.grps[egcENER].nm_ind[j]]), ftp2ext(efXVG));
1492 if (fp)
1494 fprintf(fp, "loading tf table for energygrp index %d from %s\n", ir->adress->tf_table_index[i], buf);
1496 fr->atf_tabs[i] = make_atf_table(fp, oenv, fr, buf, box);
1501 gmx_bool can_use_allvsall(const t_inputrec *ir, gmx_bool bPrintNote, t_commrec *cr, FILE *fp)
1503 gmx_bool bAllvsAll;
1505 bAllvsAll =
1507 ir->rlist == 0 &&
1508 ir->rcoulomb == 0 &&
1509 ir->rvdw == 0 &&
1510 ir->ePBC == epbcNONE &&
1511 ir->vdwtype == evdwCUT &&
1512 ir->coulombtype == eelCUT &&
1513 ir->efep == efepNO &&
1514 (ir->implicit_solvent == eisNO ||
1515 (ir->implicit_solvent == eisGBSA && (ir->gb_algorithm == egbSTILL ||
1516 ir->gb_algorithm == egbHCT ||
1517 ir->gb_algorithm == egbOBC))) &&
1518 getenv("GMX_NO_ALLVSALL") == NULL
1521 if (bAllvsAll && ir->opts.ngener > 1)
1523 const char *note = "NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1525 if (bPrintNote)
1527 if (MASTER(cr))
1529 fprintf(stderr, "\n%s\n", note);
1531 if (fp != NULL)
1533 fprintf(fp, "\n%s\n", note);
1536 bAllvsAll = FALSE;
1539 if (bAllvsAll && fp && MASTER(cr))
1541 fprintf(fp, "\nUsing SIMD all-vs-all kernels.\n\n");
1544 return bAllvsAll;
1548 gmx_bool nbnxn_acceleration_supported(FILE *fplog,
1549 const t_commrec *cr,
1550 const t_inputrec *ir,
1551 gmx_bool bGPU)
1553 if (!bGPU && (ir->vdwtype == evdwPME && ir->ljpme_combination_rule == eljpmeLB))
1555 md_print_warn(cr, fplog, "LJ-PME with Lorentz-Berthelot is not supported with %s, falling back to %s\n",
1556 bGPU ? "GPUs" : "SIMD kernels",
1557 bGPU ? "CPU only" : "plain-C kernels");
1558 return FALSE;
1561 return TRUE;
1565 static void pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused *ir,
1566 int *kernel_type,
1567 int *ewald_excl)
1569 *kernel_type = nbnxnk4x4_PlainC;
1570 *ewald_excl = ewaldexclTable;
1572 #ifdef GMX_NBNXN_SIMD
1574 #ifdef GMX_NBNXN_SIMD_4XN
1575 *kernel_type = nbnxnk4xN_SIMD_4xN;
1576 #endif
1577 #ifdef GMX_NBNXN_SIMD_2XNN
1578 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1579 #endif
1581 #if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
1582 /* We need to choose if we want 2x(N+N) or 4xN kernels.
1583 * Currently this is based on the SIMD acceleration choice,
1584 * but it might be better to decide this at runtime based on CPU.
1586 * 4xN calculates more (zero) interactions, but has less pair-search
1587 * work and much better kernel instruction scheduling.
1589 * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
1590 * which doesn't have FMA, both the analytical and tabulated Ewald
1591 * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
1592 * 2x(4+4) because it results in significantly fewer pairs.
1593 * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1594 * 10% with HT, 50% without HT. As we currently don't detect the actual
1595 * use of HT, use 4x8 to avoid a potential performance hit.
1596 * On Intel Haswell 4x8 is always faster.
1598 *kernel_type = nbnxnk4xN_SIMD_4xN;
1600 #ifndef GMX_SIMD_HAVE_FMA
1601 if (EEL_PME_EWALD(ir->coulombtype) ||
1602 EVDW_PME(ir->vdwtype))
1604 /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
1605 * There are enough instructions to make 2x(4+4) efficient.
1607 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1609 #endif
1610 #endif /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
1613 if (getenv("GMX_NBNXN_SIMD_4XN") != NULL)
1615 #ifdef GMX_NBNXN_SIMD_4XN
1616 *kernel_type = nbnxnk4xN_SIMD_4xN;
1617 #else
1618 gmx_fatal(FARGS, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
1619 #endif
1621 if (getenv("GMX_NBNXN_SIMD_2XNN") != NULL)
1623 #ifdef GMX_NBNXN_SIMD_2XNN
1624 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1625 #else
1626 gmx_fatal(FARGS, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
1627 #endif
1630 /* Analytical Ewald exclusion correction is only an option in
1631 * the SIMD kernel.
1632 * Since table lookup's don't parallelize with SIMD, analytical
1633 * will probably always be faster for a SIMD width of 8 or more.
1634 * With FMA analytical is sometimes faster for a width if 4 as well.
1635 * On BlueGene/Q, this is faster regardless of precision.
1636 * In single precision, this is faster on Bulldozer.
1638 #if GMX_SIMD_REAL_WIDTH >= 8 || \
1639 (GMX_SIMD_REAL_WIDTH >= 4 && defined GMX_SIMD_HAVE_FMA && !defined GMX_DOUBLE) || \
1640 defined GMX_SIMD_IBM_QPX
1641 *ewald_excl = ewaldexclAnalytical;
1642 #endif
1643 if (getenv("GMX_NBNXN_EWALD_TABLE") != NULL)
1645 *ewald_excl = ewaldexclTable;
1647 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != NULL)
1649 *ewald_excl = ewaldexclAnalytical;
1653 #endif /* GMX_NBNXN_SIMD */
1657 const char *lookup_nbnxn_kernel_name(int kernel_type)
1659 const char *returnvalue = NULL;
1660 switch (kernel_type)
1662 case nbnxnkNotSet:
1663 returnvalue = "not set";
1664 break;
1665 case nbnxnk4x4_PlainC:
1666 returnvalue = "plain C";
1667 break;
1668 case nbnxnk4xN_SIMD_4xN:
1669 case nbnxnk4xN_SIMD_2xNN:
1670 #ifdef GMX_NBNXN_SIMD
1671 #if defined GMX_SIMD_X86_SSE2
1672 returnvalue = "SSE2";
1673 #elif defined GMX_SIMD_X86_SSE4_1
1674 returnvalue = "SSE4.1";
1675 #elif defined GMX_SIMD_X86_AVX_128_FMA
1676 returnvalue = "AVX_128_FMA";
1677 #elif defined GMX_SIMD_X86_AVX_256
1678 returnvalue = "AVX_256";
1679 #elif defined GMX_SIMD_X86_AVX2_256
1680 returnvalue = "AVX2_256";
1681 #else
1682 returnvalue = "SIMD";
1683 #endif
1684 #else /* GMX_NBNXN_SIMD */
1685 returnvalue = "not available";
1686 #endif /* GMX_NBNXN_SIMD */
1687 break;
1688 case nbnxnk8x8x8_GPU: returnvalue = "GPU"; break;
1689 case nbnxnk8x8x8_PlainC: returnvalue = "plain C"; break;
1691 case nbnxnkNR:
1692 default:
1693 gmx_fatal(FARGS, "Illegal kernel type selected");
1694 returnvalue = NULL;
1695 break;
1697 return returnvalue;
1700 static void pick_nbnxn_kernel(FILE *fp,
1701 const t_commrec *cr,
1702 gmx_bool use_simd_kernels,
1703 gmx_bool bUseGPU,
1704 gmx_bool bEmulateGPU,
1705 const t_inputrec *ir,
1706 int *kernel_type,
1707 int *ewald_excl,
1708 gmx_bool bDoNonbonded)
1710 assert(kernel_type);
1712 *kernel_type = nbnxnkNotSet;
1713 *ewald_excl = ewaldexclTable;
1715 if (bEmulateGPU)
1717 *kernel_type = nbnxnk8x8x8_PlainC;
1719 if (bDoNonbonded)
1721 md_print_warn(cr, fp, "Emulating a GPU run on the CPU (slow)");
1724 else if (bUseGPU)
1726 *kernel_type = nbnxnk8x8x8_GPU;
1729 if (*kernel_type == nbnxnkNotSet)
1731 /* LJ PME with LB combination rule does 7 mesh operations.
1732 * This so slow that we don't compile SIMD non-bonded kernels for that.
1734 if (use_simd_kernels &&
1735 nbnxn_acceleration_supported(fp, cr, ir, FALSE))
1737 pick_nbnxn_kernel_cpu(ir, kernel_type, ewald_excl);
1739 else
1741 *kernel_type = nbnxnk4x4_PlainC;
1745 if (bDoNonbonded && fp != NULL)
1747 fprintf(fp, "\nUsing %s %dx%d non-bonded kernels\n\n",
1748 lookup_nbnxn_kernel_name(*kernel_type),
1749 nbnxn_kernel_to_cluster_i_size(*kernel_type),
1750 nbnxn_kernel_to_cluster_j_size(*kernel_type));
1752 if (nbnxnk4x4_PlainC == *kernel_type ||
1753 nbnxnk8x8x8_PlainC == *kernel_type)
1755 md_print_warn(cr, fp,
1756 "WARNING: Using the slow %s kernels. This should\n"
1757 "not happen during routine usage on supported platforms.\n\n",
1758 lookup_nbnxn_kernel_name(*kernel_type));
1763 static void pick_nbnxn_resources(FILE *fp,
1764 const t_commrec *cr,
1765 const gmx_hw_info_t *hwinfo,
1766 gmx_bool bDoNonbonded,
1767 gmx_bool *bUseGPU,
1768 gmx_bool *bEmulateGPU,
1769 const gmx_gpu_opt_t *gpu_opt)
1771 gmx_bool bEmulateGPUEnvVarSet;
1772 char gpu_err_str[STRLEN];
1774 *bUseGPU = FALSE;
1776 bEmulateGPUEnvVarSet = (getenv("GMX_EMULATE_GPU") != NULL);
1778 /* Run GPU emulation mode if GMX_EMULATE_GPU is defined. Because
1779 * GPUs (currently) only handle non-bonded calculations, we will
1780 * automatically switch to emulation if non-bonded calculations are
1781 * turned off via GMX_NO_NONBONDED - this is the simple and elegant
1782 * way to turn off GPU initialization, data movement, and cleanup.
1784 * GPU emulation can be useful to assess the performance one can expect by
1785 * adding GPU(s) to the machine. The conditional below allows this even
1786 * if mdrun is compiled without GPU acceleration support.
1787 * Note that you should freezing the system as otherwise it will explode.
1789 *bEmulateGPU = (bEmulateGPUEnvVarSet ||
1790 (!bDoNonbonded && gpu_opt->n_dev_use > 0));
1792 /* Enable GPU mode when GPUs are available or no GPU emulation is requested.
1794 if (gpu_opt->n_dev_use > 0 && !(*bEmulateGPU))
1796 /* Each PP node will use the intra-node id-th device from the
1797 * list of detected/selected GPUs. */
1798 if (!init_gpu(fp, cr->rank_pp_intranode, gpu_err_str,
1799 &hwinfo->gpu_info, gpu_opt))
1801 /* At this point the init should never fail as we made sure that
1802 * we have all the GPUs we need. If it still does, we'll bail. */
1803 /* TODO the decorating of gpu_err_str is nicer if it
1804 happens inside init_gpu. Out here, the decorating with
1805 the MPI rank makes sense. */
1806 gmx_fatal(FARGS, "On rank %d failed to initialize GPU #%d: %s",
1807 cr->nodeid,
1808 get_gpu_device_id(&hwinfo->gpu_info, gpu_opt,
1809 cr->rank_pp_intranode),
1810 gpu_err_str);
1813 /* Here we actually turn on hardware GPU acceleration */
1814 *bUseGPU = TRUE;
1818 gmx_bool uses_simple_tables(int cutoff_scheme,
1819 nonbonded_verlet_t *nbv,
1820 int group)
1822 gmx_bool bUsesSimpleTables = TRUE;
1823 int grp_index;
1825 switch (cutoff_scheme)
1827 case ecutsGROUP:
1828 bUsesSimpleTables = TRUE;
1829 break;
1830 case ecutsVERLET:
1831 assert(NULL != nbv && NULL != nbv->grp);
1832 grp_index = (group < 0) ? 0 : (nbv->ngrp - 1);
1833 bUsesSimpleTables = nbnxn_kernel_pairlist_simple(nbv->grp[grp_index].kernel_type);
1834 break;
1835 default:
1836 gmx_incons("unimplemented");
1838 return bUsesSimpleTables;
1841 static void init_ewald_f_table(interaction_const_t *ic,
1842 real rtab)
1844 real maxr;
1846 /* Get the Ewald table spacing based on Coulomb and/or LJ
1847 * Ewald coefficients and rtol.
1849 ic->tabq_scale = ewald_spline3_table_scale(ic);
1851 if (ic->cutoff_scheme == ecutsVERLET)
1853 maxr = ic->rcoulomb;
1855 else
1857 maxr = std::max(ic->rcoulomb, rtab);
1859 ic->tabq_size = static_cast<int>(maxr*ic->tabq_scale) + 2;
1861 sfree_aligned(ic->tabq_coul_FDV0);
1862 sfree_aligned(ic->tabq_coul_F);
1863 sfree_aligned(ic->tabq_coul_V);
1865 sfree_aligned(ic->tabq_vdw_FDV0);
1866 sfree_aligned(ic->tabq_vdw_F);
1867 sfree_aligned(ic->tabq_vdw_V);
1869 if (ic->eeltype == eelEWALD || EEL_PME(ic->eeltype))
1871 /* Create the original table data in FDV0 */
1872 snew_aligned(ic->tabq_coul_FDV0, ic->tabq_size*4, 32);
1873 snew_aligned(ic->tabq_coul_F, ic->tabq_size, 32);
1874 snew_aligned(ic->tabq_coul_V, ic->tabq_size, 32);
1875 table_spline3_fill_ewald_lr(ic->tabq_coul_F, ic->tabq_coul_V, ic->tabq_coul_FDV0,
1876 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_q, v_q_ewald_lr);
1879 if (EVDW_PME(ic->vdwtype))
1881 snew_aligned(ic->tabq_vdw_FDV0, ic->tabq_size*4, 32);
1882 snew_aligned(ic->tabq_vdw_F, ic->tabq_size, 32);
1883 snew_aligned(ic->tabq_vdw_V, ic->tabq_size, 32);
1884 table_spline3_fill_ewald_lr(ic->tabq_vdw_F, ic->tabq_vdw_V, ic->tabq_vdw_FDV0,
1885 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_lj, v_lj_ewald_lr);
1889 void init_interaction_const_tables(FILE *fp,
1890 interaction_const_t *ic,
1891 real rtab)
1893 if (ic->eeltype == eelEWALD || EEL_PME(ic->eeltype) || EVDW_PME(ic->vdwtype))
1895 init_ewald_f_table(ic, rtab);
1897 if (fp != NULL)
1899 fprintf(fp, "Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1900 1/ic->tabq_scale, ic->tabq_size);
1905 static void clear_force_switch_constants(shift_consts_t *sc)
1907 sc->c2 = 0;
1908 sc->c3 = 0;
1909 sc->cpot = 0;
1912 static void force_switch_constants(real p,
1913 real rsw, real rc,
1914 shift_consts_t *sc)
1916 /* Here we determine the coefficient for shifting the force to zero
1917 * between distance rsw and the cut-off rc.
1918 * For a potential of r^-p, we have force p*r^-(p+1).
1919 * But to save flops we absorb p in the coefficient.
1920 * Thus we get:
1921 * force/p = r^-(p+1) + c2*r^2 + c3*r^3
1922 * potential = r^-p + c2/3*r^3 + c3/4*r^4 + cpot
1924 sc->c2 = ((p + 1)*rsw - (p + 4)*rc)/(pow(rc, p + 2)*pow(rc - rsw, 2));
1925 sc->c3 = -((p + 1)*rsw - (p + 3)*rc)/(pow(rc, p + 2)*pow(rc - rsw, 3));
1926 sc->cpot = -pow(rc, -p) + p*sc->c2/3*pow(rc - rsw, 3) + p*sc->c3/4*pow(rc - rsw, 4);
1929 static void potential_switch_constants(real rsw, real rc,
1930 switch_consts_t *sc)
1932 /* The switch function is 1 at rsw and 0 at rc.
1933 * The derivative and second derivate are zero at both ends.
1934 * rsw = max(r - r_switch, 0)
1935 * sw = 1 + c3*rsw^3 + c4*rsw^4 + c5*rsw^5
1936 * dsw = 3*c3*rsw^2 + 4*c4*rsw^3 + 5*c5*rsw^4
1937 * force = force*dsw - potential*sw
1938 * potential *= sw
1940 sc->c3 = -10*pow(rc - rsw, -3);
1941 sc->c4 = 15*pow(rc - rsw, -4);
1942 sc->c5 = -6*pow(rc - rsw, -5);
1945 /*! \brief Construct interaction constants
1947 * This data is used (particularly) by search and force code for
1948 * short-range interactions. Many of these are constant for the whole
1949 * simulation; some are constant only after PME tuning completes.
1951 static void
1952 init_interaction_const(FILE *fp,
1953 interaction_const_t **interaction_const,
1954 const t_forcerec *fr)
1956 interaction_const_t *ic;
1957 const real minusSix = -6.0;
1958 const real minusTwelve = -12.0;
1960 snew(ic, 1);
1962 ic->cutoff_scheme = fr->cutoff_scheme;
1964 /* Just allocate something so we can free it */
1965 snew_aligned(ic->tabq_coul_FDV0, 16, 32);
1966 snew_aligned(ic->tabq_coul_F, 16, 32);
1967 snew_aligned(ic->tabq_coul_V, 16, 32);
1969 ic->rlist = fr->rlist;
1970 ic->rlistlong = fr->rlistlong;
1972 /* Lennard-Jones */
1973 ic->vdwtype = fr->vdwtype;
1974 ic->vdw_modifier = fr->vdw_modifier;
1975 ic->rvdw = fr->rvdw;
1976 ic->rvdw_switch = fr->rvdw_switch;
1977 ic->ewaldcoeff_lj = fr->ewaldcoeff_lj;
1978 ic->ljpme_comb_rule = fr->ljpme_combination_rule;
1979 ic->sh_lj_ewald = 0;
1980 clear_force_switch_constants(&ic->dispersion_shift);
1981 clear_force_switch_constants(&ic->repulsion_shift);
1983 switch (ic->vdw_modifier)
1985 case eintmodPOTSHIFT:
1986 /* Only shift the potential, don't touch the force */
1987 ic->dispersion_shift.cpot = -pow(ic->rvdw, minusSix);
1988 ic->repulsion_shift.cpot = -pow(ic->rvdw, minusTwelve);
1989 if (EVDW_PME(ic->vdwtype))
1991 real crc2;
1993 crc2 = sqr(ic->ewaldcoeff_lj*ic->rvdw);
1994 ic->sh_lj_ewald = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*pow(ic->rvdw, minusSix);
1996 break;
1997 case eintmodFORCESWITCH:
1998 /* Switch the force, switch and shift the potential */
1999 force_switch_constants(6.0, ic->rvdw_switch, ic->rvdw,
2000 &ic->dispersion_shift);
2001 force_switch_constants(12.0, ic->rvdw_switch, ic->rvdw,
2002 &ic->repulsion_shift);
2003 break;
2004 case eintmodPOTSWITCH:
2005 /* Switch the potential and force */
2006 potential_switch_constants(ic->rvdw_switch, ic->rvdw,
2007 &ic->vdw_switch);
2008 break;
2009 case eintmodNONE:
2010 case eintmodEXACTCUTOFF:
2011 /* Nothing to do here */
2012 break;
2013 default:
2014 gmx_incons("unimplemented potential modifier");
2017 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
2019 /* Electrostatics */
2020 ic->eeltype = fr->eeltype;
2021 ic->coulomb_modifier = fr->coulomb_modifier;
2022 ic->rcoulomb = fr->rcoulomb;
2023 ic->epsilon_r = fr->epsilon_r;
2024 ic->epsfac = fr->epsfac;
2025 ic->ewaldcoeff_q = fr->ewaldcoeff_q;
2027 if (fr->coulomb_modifier == eintmodPOTSHIFT)
2029 ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb);
2031 else
2033 ic->sh_ewald = 0;
2036 /* Reaction-field */
2037 if (EEL_RF(ic->eeltype))
2039 ic->epsilon_rf = fr->epsilon_rf;
2040 ic->k_rf = fr->k_rf;
2041 ic->c_rf = fr->c_rf;
2043 else
2045 /* For plain cut-off we might use the reaction-field kernels */
2046 ic->epsilon_rf = ic->epsilon_r;
2047 ic->k_rf = 0;
2048 if (fr->coulomb_modifier == eintmodPOTSHIFT)
2050 ic->c_rf = 1/ic->rcoulomb;
2052 else
2054 ic->c_rf = 0;
2058 if (fp != NULL)
2060 real dispersion_shift;
2062 dispersion_shift = ic->dispersion_shift.cpot;
2063 if (EVDW_PME(ic->vdwtype))
2065 dispersion_shift -= ic->sh_lj_ewald;
2067 fprintf(fp, "Potential shift: LJ r^-12: %.3e r^-6: %.3e",
2068 ic->repulsion_shift.cpot, dispersion_shift);
2070 if (ic->eeltype == eelCUT)
2072 fprintf(fp, ", Coulomb %.e", -ic->c_rf);
2074 else if (EEL_PME(ic->eeltype))
2076 fprintf(fp, ", Ewald %.3e", -ic->sh_ewald);
2078 fprintf(fp, "\n");
2081 *interaction_const = ic;
2084 static void init_nb_verlet(FILE *fp,
2085 nonbonded_verlet_t **nb_verlet,
2086 gmx_bool bFEP_NonBonded,
2087 const t_inputrec *ir,
2088 const t_forcerec *fr,
2089 const t_commrec *cr,
2090 const char *nbpu_opt)
2092 nonbonded_verlet_t *nbv;
2093 int i;
2094 char *env;
2095 gmx_bool bEmulateGPU, bHybridGPURun = FALSE;
2097 nbnxn_alloc_t *nb_alloc;
2098 nbnxn_free_t *nb_free;
2100 snew(nbv, 1);
2102 pick_nbnxn_resources(fp, cr, fr->hwinfo,
2103 fr->bNonbonded,
2104 &nbv->bUseGPU,
2105 &bEmulateGPU,
2106 fr->gpu_opt);
2108 nbv->nbs = NULL;
2109 nbv->min_ci_balanced = 0;
2111 nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
2112 for (i = 0; i < nbv->ngrp; i++)
2114 nbv->grp[i].nbl_lists.nnbl = 0;
2115 nbv->grp[i].nbat = NULL;
2116 nbv->grp[i].kernel_type = nbnxnkNotSet;
2118 if (i == 0) /* local */
2120 pick_nbnxn_kernel(fp, cr, fr->use_simd_kernels,
2121 nbv->bUseGPU, bEmulateGPU, ir,
2122 &nbv->grp[i].kernel_type,
2123 &nbv->grp[i].ewald_excl,
2124 fr->bNonbonded);
2126 else /* non-local */
2128 if (nbpu_opt != NULL && strcmp(nbpu_opt, "gpu_cpu") == 0)
2130 /* Use GPU for local, select a CPU kernel for non-local */
2131 pick_nbnxn_kernel(fp, cr, fr->use_simd_kernels,
2132 FALSE, FALSE, ir,
2133 &nbv->grp[i].kernel_type,
2134 &nbv->grp[i].ewald_excl,
2135 fr->bNonbonded);
2137 bHybridGPURun = TRUE;
2139 else
2141 /* Use the same kernel for local and non-local interactions */
2142 nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
2143 nbv->grp[i].ewald_excl = nbv->grp[0].ewald_excl;
2148 nbnxn_init_search(&nbv->nbs,
2149 DOMAINDECOMP(cr) ? &cr->dd->nc : NULL,
2150 DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : NULL,
2151 bFEP_NonBonded,
2152 gmx_omp_nthreads_get(emntPairsearch));
2154 for (i = 0; i < nbv->ngrp; i++)
2156 gpu_set_host_malloc_and_free(nbv->grp[0].kernel_type == nbnxnk8x8x8_GPU,
2157 &nb_alloc, &nb_free);
2159 nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
2160 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2161 /* 8x8x8 "non-simple" lists are ATM always combined */
2162 !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2163 nb_alloc, nb_free);
2165 if (i == 0 ||
2166 nbv->grp[0].kernel_type != nbv->grp[i].kernel_type)
2168 gmx_bool bSimpleList;
2169 int enbnxninitcombrule;
2171 bSimpleList = nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type);
2173 if (bSimpleList && (fr->vdwtype == evdwCUT && (fr->vdw_modifier == eintmodNONE || fr->vdw_modifier == eintmodPOTSHIFT)))
2175 /* Plain LJ cut-off: we can optimize with combination rules */
2176 enbnxninitcombrule = enbnxninitcombruleDETECT;
2178 else if (fr->vdwtype == evdwPME)
2180 /* LJ-PME: we need to use a combination rule for the grid */
2181 if (fr->ljpme_combination_rule == eljpmeGEOM)
2183 enbnxninitcombrule = enbnxninitcombruleGEOM;
2185 else
2187 enbnxninitcombrule = enbnxninitcombruleLB;
2190 else
2192 /* We use a full combination matrix: no rule required */
2193 enbnxninitcombrule = enbnxninitcombruleNONE;
2197 snew(nbv->grp[i].nbat, 1);
2198 nbnxn_atomdata_init(fp,
2199 nbv->grp[i].nbat,
2200 nbv->grp[i].kernel_type,
2201 enbnxninitcombrule,
2202 fr->ntype, fr->nbfp,
2203 ir->opts.ngener,
2204 bSimpleList ? gmx_omp_nthreads_get(emntNonbonded) : 1,
2205 nb_alloc, nb_free);
2207 else
2209 nbv->grp[i].nbat = nbv->grp[0].nbat;
2213 if (nbv->bUseGPU)
2215 /* init the NxN GPU data; the last argument tells whether we'll have
2216 * both local and non-local NB calculation on GPU */
2217 nbnxn_gpu_init(fp, &nbv->gpu_nbv,
2218 &fr->hwinfo->gpu_info,
2219 fr->gpu_opt,
2220 fr->ic,
2221 nbv->grp,
2222 cr->rank_pp_intranode,
2223 cr->nodeid,
2224 (nbv->ngrp > 1) && !bHybridGPURun);
2226 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
2227 * also sharing texture references. To keep the code simple, we don't
2228 * treat texture references as shared resources, but this means that
2229 * the coulomb_tab and nbfp texture refs will get updated by multiple threads.
2230 * Hence, to ensure that the non-bonded kernels don't start before all
2231 * texture binding operations are finished, we need to wait for all ranks
2232 * to arrive here before continuing.
2234 * Note that we could omit this barrier if GPUs are not shared (or
2235 * texture objects are used), but as this is initialization code, there
2236 * is no point in complicating things.
2238 #ifdef GMX_THREAD_MPI
2239 if (PAR(cr))
2241 gmx_barrier(cr);
2243 #endif /* GMX_THREAD_MPI */
2245 if ((env = getenv("GMX_NB_MIN_CI")) != NULL)
2247 char *end;
2249 nbv->min_ci_balanced = strtol(env, &end, 10);
2250 if (!end || (*end != 0) || nbv->min_ci_balanced <= 0)
2252 gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, positive integer required", env);
2255 if (debug)
2257 fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
2258 nbv->min_ci_balanced);
2261 else
2263 nbv->min_ci_balanced = nbnxn_gpu_min_ci_balanced(nbv->gpu_nbv);
2264 if (debug)
2266 fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
2267 nbv->min_ci_balanced);
2273 *nb_verlet = nbv;
2276 gmx_bool usingGpu(nonbonded_verlet_t *nbv)
2278 return nbv != NULL && nbv->bUseGPU;
2281 void init_forcerec(FILE *fp,
2282 const output_env_t oenv,
2283 t_forcerec *fr,
2284 t_fcdata *fcd,
2285 const t_inputrec *ir,
2286 const gmx_mtop_t *mtop,
2287 const t_commrec *cr,
2288 matrix box,
2289 const char *tabfn,
2290 const char *tabafn,
2291 const char *tabpfn,
2292 const char *tabbfn,
2293 const char *nbpu_opt,
2294 gmx_bool bNoSolvOpt,
2295 real print_force)
2297 int i, m, negp_pp, negptable, egi, egj;
2298 real rtab;
2299 char *env;
2300 double dbl;
2301 const t_block *cgs;
2302 gmx_bool bGenericKernelOnly;
2303 gmx_bool bMakeTables, bMakeSeparate14Table, bSomeNormalNbListsAreInUse;
2304 gmx_bool bFEP_NonBonded;
2305 int *nm_ind, egp_flags;
2307 if (fr->hwinfo == NULL)
2309 /* Detect hardware, gather information.
2310 * In mdrun, hwinfo has already been set before calling init_forcerec.
2311 * Here we ignore GPUs, as tools will not use them anyhow.
2313 fr->hwinfo = gmx_detect_hardware(fp, cr, FALSE);
2316 /* By default we turn SIMD kernels on, but it might be turned off further down... */
2317 fr->use_simd_kernels = TRUE;
2319 fr->bDomDec = DOMAINDECOMP(cr);
2321 if (check_box(ir->ePBC, box))
2323 gmx_fatal(FARGS, check_box(ir->ePBC, box));
2326 /* Test particle insertion ? */
2327 if (EI_TPI(ir->eI))
2329 /* Set to the size of the molecule to be inserted (the last one) */
2330 /* Because of old style topologies, we have to use the last cg
2331 * instead of the last molecule type.
2333 cgs = &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].cgs;
2334 fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
2335 if (fr->n_tpi != mtop->mols.index[mtop->mols.nr] - mtop->mols.index[mtop->mols.nr-1])
2337 gmx_fatal(FARGS, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2340 else
2342 fr->n_tpi = 0;
2345 /* Copy AdResS parameters */
2346 if (ir->bAdress)
2348 fr->adress_type = ir->adress->type;
2349 fr->adress_const_wf = ir->adress->const_wf;
2350 fr->adress_ex_width = ir->adress->ex_width;
2351 fr->adress_hy_width = ir->adress->hy_width;
2352 fr->adress_icor = ir->adress->icor;
2353 fr->adress_site = ir->adress->site;
2354 fr->adress_ex_forcecap = ir->adress->ex_forcecap;
2355 fr->adress_do_hybridpairs = ir->adress->do_hybridpairs;
2358 snew(fr->adress_group_explicit, ir->adress->n_energy_grps);
2359 for (i = 0; i < ir->adress->n_energy_grps; i++)
2361 fr->adress_group_explicit[i] = ir->adress->group_explicit[i];
2364 fr->n_adress_tf_grps = ir->adress->n_tf_grps;
2365 snew(fr->adress_tf_table_index, fr->n_adress_tf_grps);
2366 for (i = 0; i < fr->n_adress_tf_grps; i++)
2368 fr->adress_tf_table_index[i] = ir->adress->tf_table_index[i];
2370 copy_rvec(ir->adress->refs, fr->adress_refs);
2372 else
2374 fr->adress_type = eAdressOff;
2375 fr->adress_do_hybridpairs = FALSE;
2378 /* Copy the user determined parameters */
2379 fr->userint1 = ir->userint1;
2380 fr->userint2 = ir->userint2;
2381 fr->userint3 = ir->userint3;
2382 fr->userint4 = ir->userint4;
2383 fr->userreal1 = ir->userreal1;
2384 fr->userreal2 = ir->userreal2;
2385 fr->userreal3 = ir->userreal3;
2386 fr->userreal4 = ir->userreal4;
2388 /* Shell stuff */
2389 fr->fc_stepsize = ir->fc_stepsize;
2391 /* Free energy */
2392 fr->efep = ir->efep;
2393 fr->sc_alphavdw = ir->fepvals->sc_alpha;
2394 if (ir->fepvals->bScCoul)
2396 fr->sc_alphacoul = ir->fepvals->sc_alpha;
2397 fr->sc_sigma6_min = pow(ir->fepvals->sc_sigma_min, 6);
2399 else
2401 fr->sc_alphacoul = 0;
2402 fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
2404 fr->sc_power = ir->fepvals->sc_power;
2405 fr->sc_r_power = ir->fepvals->sc_r_power;
2406 fr->sc_sigma6_def = pow(ir->fepvals->sc_sigma, 6);
2408 env = getenv("GMX_SCSIGMA_MIN");
2409 if (env != NULL)
2411 dbl = 0;
2412 sscanf(env, "%20lf", &dbl);
2413 fr->sc_sigma6_min = pow(dbl, 6);
2414 if (fp)
2416 fprintf(fp, "Setting the minimum soft core sigma to %g nm\n", dbl);
2420 fr->bNonbonded = TRUE;
2421 if (getenv("GMX_NO_NONBONDED") != NULL)
2423 /* turn off non-bonded calculations */
2424 fr->bNonbonded = FALSE;
2425 md_print_warn(cr, fp,
2426 "Found environment variable GMX_NO_NONBONDED.\n"
2427 "Disabling nonbonded calculations.\n");
2430 bGenericKernelOnly = FALSE;
2432 /* We now check in the NS code whether a particular combination of interactions
2433 * can be used with water optimization, and disable it if that is not the case.
2436 if (getenv("GMX_NB_GENERIC") != NULL)
2438 if (fp != NULL)
2440 fprintf(fp,
2441 "Found environment variable GMX_NB_GENERIC.\n"
2442 "Disabling all interaction-specific nonbonded kernels, will only\n"
2443 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2445 bGenericKernelOnly = TRUE;
2448 if (bGenericKernelOnly == TRUE)
2450 bNoSolvOpt = TRUE;
2453 if ( (getenv("GMX_DISABLE_SIMD_KERNELS") != NULL) || (getenv("GMX_NOOPTIMIZEDKERNELS") != NULL) )
2455 fr->use_simd_kernels = FALSE;
2456 if (fp != NULL)
2458 fprintf(fp,
2459 "\nFound environment variable GMX_DISABLE_SIMD_KERNELS.\n"
2460 "Disabling the usage of any SIMD-specific non-bonded & bonded kernel routines\n"
2461 "(e.g. SSE2/SSE4.1/AVX).\n\n");
2465 fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
2467 /* Check if we can/should do all-vs-all kernels */
2468 fr->bAllvsAll = can_use_allvsall(ir, FALSE, NULL, NULL);
2469 fr->AllvsAll_work = NULL;
2470 fr->AllvsAll_workgb = NULL;
2472 /* All-vs-all kernels have not been implemented in 4.6 and later.
2473 * See Redmine #1249. */
2474 if (fr->bAllvsAll)
2476 fr->bAllvsAll = FALSE;
2477 if (fp != NULL)
2479 fprintf(fp,
2480 "\nYour simulation settings would have triggered the efficient all-vs-all\n"
2481 "kernels in GROMACS 4.5, but these have not been implemented in GROMACS\n"
2482 "4.6 and 5.x. If performance is important, please use GROMACS 4.5.7\n"
2483 "or try cutoff-scheme = Verlet.\n\n");
2487 /* Neighbour searching stuff */
2488 fr->cutoff_scheme = ir->cutoff_scheme;
2489 fr->bGrid = (ir->ns_type == ensGRID);
2490 fr->ePBC = ir->ePBC;
2492 if (fr->cutoff_scheme == ecutsGROUP)
2494 const char *note = "NOTE: This file uses the deprecated 'group' cutoff_scheme. This will be\n"
2495 "removed in a future release when 'verlet' supports all interaction forms.\n";
2497 if (MASTER(cr))
2499 fprintf(stderr, "\n%s\n", note);
2501 if (fp != NULL)
2503 fprintf(fp, "\n%s\n", note);
2507 /* Determine if we will do PBC for distances in bonded interactions */
2508 if (fr->ePBC == epbcNONE)
2510 fr->bMolPBC = FALSE;
2512 else
2514 if (!DOMAINDECOMP(cr))
2516 gmx_bool bSHAKE;
2518 bSHAKE = (ir->eConstrAlg == econtSHAKE &&
2519 (gmx_mtop_ftype_count(mtop, F_CONSTR) > 0 ||
2520 gmx_mtop_ftype_count(mtop, F_CONSTRNC) > 0));
2522 /* The group cut-off scheme and SHAKE assume charge groups
2523 * are whole, but not using molpbc is faster in most cases.
2524 * With intermolecular interactions we need PBC for calculating
2525 * distances between atoms in different molecules.
2527 if ((fr->cutoff_scheme == ecutsGROUP || bSHAKE) &&
2528 !mtop->bIntermolecularInteractions)
2530 fr->bMolPBC = ir->bPeriodicMols;
2532 if (bSHAKE && fr->bMolPBC)
2534 gmx_fatal(FARGS, "SHAKE is not supported with periodic molecules");
2537 else
2539 fr->bMolPBC = TRUE;
2541 if (getenv("GMX_USE_GRAPH") != NULL)
2543 fr->bMolPBC = FALSE;
2544 if (fp)
2546 md_print_warn(cr, fp, "GMX_USE_GRAPH is set, using the graph for bonded interactions\n");
2549 if (mtop->bIntermolecularInteractions)
2551 md_print_warn(cr, fp, "WARNING: Molecules linked by intermolecular interactions have to reside in the same periodic image, otherwise artifacts will occur!\n");
2555 if (bSHAKE && fr->bMolPBC)
2557 gmx_fatal(FARGS, "SHAKE is not properly supported with intermolecular interactions. For short simulations where linked molecules remain in the same periodic image, the environment variable GMX_USE_GRAPH can be used to override this check.\n");
2561 else
2563 fr->bMolPBC = dd_bonded_molpbc(cr->dd, fr->ePBC);
2566 fr->bGB = (ir->implicit_solvent == eisGBSA);
2568 fr->rc_scaling = ir->refcoord_scaling;
2569 copy_rvec(ir->posres_com, fr->posres_com);
2570 copy_rvec(ir->posres_comB, fr->posres_comB);
2571 fr->rlist = cutoff_inf(ir->rlist);
2572 fr->rlistlong = cutoff_inf(ir->rlistlong);
2573 fr->eeltype = ir->coulombtype;
2574 fr->vdwtype = ir->vdwtype;
2575 fr->ljpme_combination_rule = ir->ljpme_combination_rule;
2577 fr->coulomb_modifier = ir->coulomb_modifier;
2578 fr->vdw_modifier = ir->vdw_modifier;
2580 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2581 switch (fr->eeltype)
2583 case eelCUT:
2584 fr->nbkernel_elec_interaction = (fr->bGB) ? GMX_NBKERNEL_ELEC_GENERALIZEDBORN : GMX_NBKERNEL_ELEC_COULOMB;
2585 break;
2587 case eelRF:
2588 case eelGRF:
2589 case eelRF_NEC:
2590 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2591 break;
2593 case eelRF_ZERO:
2594 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2595 fr->coulomb_modifier = eintmodEXACTCUTOFF;
2596 break;
2598 case eelSWITCH:
2599 case eelSHIFT:
2600 case eelUSER:
2601 case eelENCADSHIFT:
2602 case eelPMESWITCH:
2603 case eelPMEUSER:
2604 case eelPMEUSERSWITCH:
2605 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2606 break;
2608 case eelPME:
2609 case eelEWALD:
2610 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_EWALD;
2611 break;
2613 default:
2614 gmx_fatal(FARGS, "Unsupported electrostatic interaction: %s", eel_names[fr->eeltype]);
2615 break;
2618 /* Vdw: Translate from mdp settings to kernel format */
2619 switch (fr->vdwtype)
2621 case evdwCUT:
2622 if (fr->bBHAM)
2624 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_BUCKINGHAM;
2626 else
2628 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LENNARDJONES;
2630 break;
2631 case evdwPME:
2632 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LJEWALD;
2633 break;
2635 case evdwSWITCH:
2636 case evdwSHIFT:
2637 case evdwUSER:
2638 case evdwENCADSHIFT:
2639 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2640 break;
2642 default:
2643 gmx_fatal(FARGS, "Unsupported vdw interaction: %s", evdw_names[fr->vdwtype]);
2644 break;
2647 /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
2648 fr->nbkernel_elec_modifier = fr->coulomb_modifier;
2649 fr->nbkernel_vdw_modifier = fr->vdw_modifier;
2651 fr->rvdw = cutoff_inf(ir->rvdw);
2652 fr->rvdw_switch = ir->rvdw_switch;
2653 fr->rcoulomb = cutoff_inf(ir->rcoulomb);
2654 fr->rcoulomb_switch = ir->rcoulomb_switch;
2656 fr->bTwinRange = fr->rlistlong > fr->rlist;
2657 fr->bEwald = (EEL_PME(fr->eeltype) || fr->eeltype == eelEWALD);
2659 fr->reppow = mtop->ffparams.reppow;
2661 if (ir->cutoff_scheme == ecutsGROUP)
2663 fr->bvdwtab = ((fr->vdwtype != evdwCUT || !gmx_within_tol(fr->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2664 && !EVDW_PME(fr->vdwtype));
2665 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2666 fr->bcoultab = !(fr->eeltype == eelCUT ||
2667 fr->eeltype == eelEWALD ||
2668 fr->eeltype == eelPME ||
2669 fr->eeltype == eelRF ||
2670 fr->eeltype == eelRF_ZERO);
2672 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2673 * going to be faster to tabulate the interaction than calling the generic kernel.
2674 * However, if generic kernels have been requested we keep things analytically.
2676 if (fr->nbkernel_elec_modifier == eintmodPOTSWITCH &&
2677 fr->nbkernel_vdw_modifier == eintmodPOTSWITCH &&
2678 bGenericKernelOnly == FALSE)
2680 if ((fr->rcoulomb_switch != fr->rvdw_switch) || (fr->rcoulomb != fr->rvdw))
2682 fr->bcoultab = TRUE;
2683 /* Once we tabulate electrostatics, we can use the switch function for LJ,
2684 * which would otherwise need two tables.
2688 else if ((fr->nbkernel_elec_modifier == eintmodPOTSHIFT && fr->nbkernel_vdw_modifier == eintmodPOTSHIFT) ||
2689 ((fr->nbkernel_elec_interaction == GMX_NBKERNEL_ELEC_REACTIONFIELD &&
2690 fr->nbkernel_elec_modifier == eintmodEXACTCUTOFF &&
2691 (fr->nbkernel_vdw_modifier == eintmodPOTSWITCH || fr->nbkernel_vdw_modifier == eintmodPOTSHIFT))))
2693 if ((fr->rcoulomb != fr->rvdw) && (bGenericKernelOnly == FALSE))
2695 fr->bcoultab = TRUE;
2699 if (fr->nbkernel_elec_modifier == eintmodFORCESWITCH)
2701 fr->bcoultab = TRUE;
2703 if (fr->nbkernel_vdw_modifier == eintmodFORCESWITCH)
2705 fr->bvdwtab = TRUE;
2708 if (getenv("GMX_REQUIRE_TABLES"))
2710 fr->bvdwtab = TRUE;
2711 fr->bcoultab = TRUE;
2714 if (fp)
2716 fprintf(fp, "Table routines are used for coulomb: %s\n", bool_names[fr->bcoultab]);
2717 fprintf(fp, "Table routines are used for vdw: %s\n", bool_names[fr->bvdwtab ]);
2720 if (fr->bvdwtab == TRUE)
2722 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2723 fr->nbkernel_vdw_modifier = eintmodNONE;
2725 if (fr->bcoultab == TRUE)
2727 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2728 fr->nbkernel_elec_modifier = eintmodNONE;
2732 if (ir->cutoff_scheme == ecutsVERLET)
2734 if (!gmx_within_tol(fr->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2736 gmx_fatal(FARGS, "Cut-off scheme %S only supports LJ repulsion power 12", ecutscheme_names[ir->cutoff_scheme]);
2738 fr->bvdwtab = FALSE;
2739 fr->bcoultab = FALSE;
2742 /* Tables are used for direct ewald sum */
2743 if (fr->bEwald)
2745 if (EEL_PME(ir->coulombtype))
2747 if (fp)
2749 fprintf(fp, "Will do PME sum in reciprocal space for electrostatic interactions.\n");
2751 if (ir->coulombtype == eelP3M_AD)
2753 please_cite(fp, "Hockney1988");
2754 please_cite(fp, "Ballenegger2012");
2756 else
2758 please_cite(fp, "Essmann95a");
2761 if (ir->ewald_geometry == eewg3DC)
2763 if (fp)
2765 fprintf(fp, "Using the Ewald3DC correction for systems with a slab geometry.\n");
2767 please_cite(fp, "In-Chul99a");
2770 fr->ewaldcoeff_q = calc_ewaldcoeff_q(ir->rcoulomb, ir->ewald_rtol);
2771 init_ewald_tab(&(fr->ewald_table), ir, fp);
2772 if (fp)
2774 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2775 1/fr->ewaldcoeff_q);
2779 if (EVDW_PME(ir->vdwtype))
2781 if (fp)
2783 fprintf(fp, "Will do PME sum in reciprocal space for LJ dispersion interactions.\n");
2785 please_cite(fp, "Essmann95a");
2786 fr->ewaldcoeff_lj = calc_ewaldcoeff_lj(ir->rvdw, ir->ewald_rtol_lj);
2787 if (fp)
2789 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for LJ Ewald\n",
2790 1/fr->ewaldcoeff_lj);
2794 /* Electrostatics */
2795 fr->epsilon_r = ir->epsilon_r;
2796 fr->epsilon_rf = ir->epsilon_rf;
2797 fr->fudgeQQ = mtop->ffparams.fudgeQQ;
2799 /* Parameters for generalized RF */
2800 fr->zsquare = 0.0;
2801 fr->temp = 0.0;
2803 if (fr->eeltype == eelGRF)
2805 init_generalized_rf(fp, mtop, ir, fr);
2808 fr->bF_NoVirSum = (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype) ||
2809 gmx_mtop_ftype_count(mtop, F_POSRES) > 0 ||
2810 gmx_mtop_ftype_count(mtop, F_FBPOSRES) > 0 ||
2811 IR_ELEC_FIELD(*ir) ||
2812 (fr->adress_icor != eAdressICOff)
2815 if (fr->cutoff_scheme == ecutsGROUP &&
2816 ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr))
2818 /* Count the total number of charge groups */
2819 fr->cg_nalloc = ncg_mtop(mtop);
2820 srenew(fr->cg_cm, fr->cg_nalloc);
2822 if (fr->shift_vec == NULL)
2824 snew(fr->shift_vec, SHIFTS);
2827 if (fr->fshift == NULL)
2829 snew(fr->fshift, SHIFTS);
2832 if (fr->nbfp == NULL)
2834 fr->ntype = mtop->ffparams.atnr;
2835 fr->nbfp = mk_nbfp(&mtop->ffparams, fr->bBHAM);
2836 if (EVDW_PME(fr->vdwtype))
2838 fr->ljpme_c6grid = make_ljpme_c6grid(&mtop->ffparams, fr);
2842 /* Copy the energy group exclusions */
2843 fr->egp_flags = ir->opts.egp_flags;
2845 /* Van der Waals stuff */
2846 if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM)
2848 if (fr->rvdw_switch >= fr->rvdw)
2850 gmx_fatal(FARGS, "rvdw_switch (%f) must be < rvdw (%f)",
2851 fr->rvdw_switch, fr->rvdw);
2853 if (fp)
2855 fprintf(fp, "Using %s Lennard-Jones, switch between %g and %g nm\n",
2856 (fr->eeltype == eelSWITCH) ? "switched" : "shifted",
2857 fr->rvdw_switch, fr->rvdw);
2861 if (fr->bBHAM && EVDW_PME(fr->vdwtype))
2863 gmx_fatal(FARGS, "LJ PME not supported with Buckingham");
2866 if (fr->bBHAM && (fr->vdwtype == evdwSHIFT || fr->vdwtype == evdwSWITCH))
2868 gmx_fatal(FARGS, "Switch/shift interaction not supported with Buckingham");
2871 if (fr->bBHAM && fr->cutoff_scheme == ecutsVERLET)
2873 gmx_fatal(FARGS, "Verlet cutoff-scheme is not supported with Buckingham");
2876 if (fp)
2878 fprintf(fp, "Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2879 fr->rlist, fr->rcoulomb, fr->bBHAM ? "BHAM" : "LJ", fr->rvdw);
2882 fr->eDispCorr = ir->eDispCorr;
2883 if (ir->eDispCorr != edispcNO)
2885 set_avcsixtwelve(fp, fr, mtop);
2888 if (fr->bBHAM)
2890 set_bham_b_max(fp, fr, mtop);
2893 fr->gb_epsilon_solvent = ir->gb_epsilon_solvent;
2895 /* Copy the GBSA data (radius, volume and surftens for each
2896 * atomtype) from the topology atomtype section to forcerec.
2898 snew(fr->atype_radius, fr->ntype);
2899 snew(fr->atype_vol, fr->ntype);
2900 snew(fr->atype_surftens, fr->ntype);
2901 snew(fr->atype_gb_radius, fr->ntype);
2902 snew(fr->atype_S_hct, fr->ntype);
2904 if (mtop->atomtypes.nr > 0)
2906 for (i = 0; i < fr->ntype; i++)
2908 fr->atype_radius[i] = mtop->atomtypes.radius[i];
2910 for (i = 0; i < fr->ntype; i++)
2912 fr->atype_vol[i] = mtop->atomtypes.vol[i];
2914 for (i = 0; i < fr->ntype; i++)
2916 fr->atype_surftens[i] = mtop->atomtypes.surftens[i];
2918 for (i = 0; i < fr->ntype; i++)
2920 fr->atype_gb_radius[i] = mtop->atomtypes.gb_radius[i];
2922 for (i = 0; i < fr->ntype; i++)
2924 fr->atype_S_hct[i] = mtop->atomtypes.S_hct[i];
2928 /* Generate the GB table if needed */
2929 if (fr->bGB)
2931 #ifdef GMX_DOUBLE
2932 fr->gbtabscale = 2000;
2933 #else
2934 fr->gbtabscale = 500;
2935 #endif
2937 fr->gbtabr = 100;
2938 fr->gbtab = make_gb_table(oenv, fr);
2940 init_gb(&fr->born, fr, ir, mtop, ir->gb_algorithm);
2942 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2943 if (!DOMAINDECOMP(cr))
2945 make_local_gb(cr, fr->born, ir->gb_algorithm);
2949 /* Set the charge scaling */
2950 if (fr->epsilon_r != 0)
2952 fr->epsfac = ONE_4PI_EPS0/fr->epsilon_r;
2954 else
2956 /* eps = 0 is infinite dieletric: no coulomb interactions */
2957 fr->epsfac = 0;
2960 /* Reaction field constants */
2961 if (EEL_RF(fr->eeltype))
2963 calc_rffac(fp, fr->eeltype, fr->epsilon_r, fr->epsilon_rf,
2964 fr->rcoulomb, fr->temp, fr->zsquare, box,
2965 &fr->kappa, &fr->k_rf, &fr->c_rf);
2968 /*This now calculates sum for q and c6*/
2969 set_chargesum(fp, fr, mtop);
2971 /* if we are using LR electrostatics, and they are tabulated,
2972 * the tables will contain modified coulomb interactions.
2973 * Since we want to use the non-shifted ones for 1-4
2974 * coulombic interactions, we must have an extra set of tables.
2977 /* Construct tables.
2978 * A little unnecessary to make both vdw and coul tables sometimes,
2979 * but what the heck... */
2981 bMakeTables = fr->bcoultab || fr->bvdwtab || fr->bEwald ||
2982 (ir->eDispCorr != edispcNO && ir_vdw_switched(ir));
2984 bMakeSeparate14Table = ((!bMakeTables || fr->eeltype != eelCUT || fr->vdwtype != evdwCUT ||
2985 fr->coulomb_modifier != eintmodNONE ||
2986 fr->vdw_modifier != eintmodNONE ||
2987 fr->bBHAM || fr->bEwald) &&
2988 (gmx_mtop_ftype_count(mtop, F_LJ14) > 0 ||
2989 gmx_mtop_ftype_count(mtop, F_LJC14_Q) > 0 ||
2990 gmx_mtop_ftype_count(mtop, F_LJC_PAIRS_NB) > 0));
2992 negp_pp = ir->opts.ngener - ir->nwall;
2993 negptable = 0;
2994 if (!bMakeTables)
2996 bSomeNormalNbListsAreInUse = TRUE;
2997 fr->nnblists = 1;
2999 else
3001 bSomeNormalNbListsAreInUse = (ir->eDispCorr != edispcNO);
3002 for (egi = 0; egi < negp_pp; egi++)
3004 for (egj = egi; egj < negp_pp; egj++)
3006 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
3007 if (!(egp_flags & EGP_EXCL))
3009 if (egp_flags & EGP_TABLE)
3011 negptable++;
3013 else
3015 bSomeNormalNbListsAreInUse = TRUE;
3020 if (bSomeNormalNbListsAreInUse)
3022 fr->nnblists = negptable + 1;
3024 else
3026 fr->nnblists = negptable;
3028 if (fr->nnblists > 1)
3030 snew(fr->gid2nblists, ir->opts.ngener*ir->opts.ngener);
3034 if (ir->adress)
3036 fr->nnblists *= 2;
3039 snew(fr->nblists, fr->nnblists);
3041 /* This code automatically gives table length tabext without cut-off's,
3042 * in that case grompp should already have checked that we do not need
3043 * normal tables and we only generate tables for 1-4 interactions.
3045 rtab = ir->rlistlong + ir->tabext;
3047 if (bMakeTables)
3049 /* make tables for ordinary interactions */
3050 if (bSomeNormalNbListsAreInUse)
3052 make_nbf_tables(fp, oenv, fr, rtab, cr, tabfn, NULL, NULL, &fr->nblists[0]);
3053 if (ir->adress)
3055 make_nbf_tables(fp, oenv, fr, rtab, cr, tabfn, NULL, NULL, &fr->nblists[fr->nnblists/2]);
3057 if (!bMakeSeparate14Table)
3059 fr->tab14 = fr->nblists[0].table_elec_vdw;
3061 m = 1;
3063 else
3065 m = 0;
3067 if (negptable > 0)
3069 /* Read the special tables for certain energy group pairs */
3070 nm_ind = mtop->groups.grps[egcENER].nm_ind;
3071 for (egi = 0; egi < negp_pp; egi++)
3073 for (egj = egi; egj < negp_pp; egj++)
3075 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
3076 if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL))
3078 if (fr->nnblists > 1)
3080 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = m;
3082 /* Read the table file with the two energy groups names appended */
3083 make_nbf_tables(fp, oenv, fr, rtab, cr, tabfn,
3084 *mtop->groups.grpname[nm_ind[egi]],
3085 *mtop->groups.grpname[nm_ind[egj]],
3086 &fr->nblists[m]);
3087 if (ir->adress)
3089 make_nbf_tables(fp, oenv, fr, rtab, cr, tabfn,
3090 *mtop->groups.grpname[nm_ind[egi]],
3091 *mtop->groups.grpname[nm_ind[egj]],
3092 &fr->nblists[fr->nnblists/2+m]);
3094 m++;
3096 else if (fr->nnblists > 1)
3098 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = 0;
3104 else if ((fr->eDispCorr != edispcNO) &&
3105 ((fr->vdw_modifier == eintmodPOTSWITCH) ||
3106 (fr->vdw_modifier == eintmodFORCESWITCH) ||
3107 (fr->vdw_modifier == eintmodPOTSHIFT)))
3109 /* Tables might not be used for the potential modifier interactions per se, but
3110 * we still need them to evaluate switch/shift dispersion corrections in this case.
3112 make_nbf_tables(fp, oenv, fr, rtab, cr, tabfn, NULL, NULL, &fr->nblists[0]);
3115 if (bMakeSeparate14Table)
3117 /* generate extra tables with plain Coulomb for 1-4 interactions only */
3118 fr->tab14 = make_tables(fp, oenv, fr, MASTER(cr), tabpfn, rtab,
3119 GMX_MAKETABLES_14ONLY);
3122 /* Read AdResS Thermo Force table if needed */
3123 if (fr->adress_icor == eAdressICThermoForce)
3125 /* old todo replace */
3127 if (ir->adress->n_tf_grps > 0)
3129 make_adress_tf_tables(fp, oenv, fr, ir, tabfn, mtop, box);
3132 else
3134 /* load the default table */
3135 snew(fr->atf_tabs, 1);
3136 fr->atf_tabs[DEFAULT_TF_TABLE] = make_atf_table(fp, oenv, fr, tabafn, box);
3140 /* Wall stuff */
3141 fr->nwall = ir->nwall;
3142 if (ir->nwall && ir->wall_type == ewtTABLE)
3144 make_wall_tables(fp, oenv, ir, tabfn, &mtop->groups, fr);
3147 if (fcd && tabbfn)
3149 fcd->bondtab = make_bonded_tables(fp,
3150 F_TABBONDS, F_TABBONDSNC,
3151 mtop, tabbfn, "b");
3152 fcd->angletab = make_bonded_tables(fp,
3153 F_TABANGLES, -1,
3154 mtop, tabbfn, "a");
3155 fcd->dihtab = make_bonded_tables(fp,
3156 F_TABDIHS, -1,
3157 mtop, tabbfn, "d");
3159 else
3161 if (debug)
3163 fprintf(debug, "No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
3167 /* QM/MM initialization if requested
3169 if (ir->bQMMM)
3171 fprintf(stderr, "QM/MM calculation requested.\n");
3174 fr->bQMMM = ir->bQMMM;
3175 fr->qr = mk_QMMMrec();
3177 /* Set all the static charge group info */
3178 fr->cginfo_mb = init_cginfo_mb(fp, mtop, fr, bNoSolvOpt,
3179 &bFEP_NonBonded,
3180 &fr->bExcl_IntraCGAll_InterCGNone);
3181 if (DOMAINDECOMP(cr))
3183 fr->cginfo = NULL;
3185 else
3187 fr->cginfo = cginfo_expand(mtop->nmolblock, fr->cginfo_mb);
3190 if (!DOMAINDECOMP(cr))
3192 forcerec_set_ranges(fr, ncg_mtop(mtop), ncg_mtop(mtop),
3193 mtop->natoms, mtop->natoms, mtop->natoms);
3196 fr->print_force = print_force;
3199 /* coarse load balancing vars */
3200 fr->t_fnbf = 0.;
3201 fr->t_wait = 0.;
3202 fr->timesteps = 0;
3204 /* Initialize neighbor search */
3205 init_ns(fp, cr, &fr->ns, fr, mtop);
3207 if (cr->duty & DUTY_PP)
3209 gmx_nonbonded_setup(fr, bGenericKernelOnly);
3211 if (ir->bAdress)
3213 gmx_setup_adress_kernels(fp,bGenericKernelOnly);
3218 /* Initialize the thread working data for bonded interactions */
3219 init_bonded_threading(fp, mtop->groups.grps[egcENER].nr,
3220 &fr->bonded_threading);
3222 fr->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
3223 snew(fr->ewc_t, fr->nthread_ewc);
3224 snew(fr->excl_load, fr->nthread_ewc + 1);
3226 /* fr->ic is used both by verlet and group kernels (to some extent) now */
3227 init_interaction_const(fp, &fr->ic, fr);
3228 init_interaction_const_tables(fp, fr->ic, rtab);
3230 if (fr->cutoff_scheme == ecutsVERLET)
3232 if (ir->rcoulomb != ir->rvdw)
3234 gmx_fatal(FARGS, "With Verlet lists rcoulomb and rvdw should be identical");
3237 init_nb_verlet(fp, &fr->nbv, bFEP_NonBonded, ir, fr, cr, nbpu_opt);
3240 if (ir->eDispCorr != edispcNO)
3242 calc_enervirdiff(fp, ir->eDispCorr, fr);
3246 #define pr_real(fp, r) fprintf(fp, "%s: %e\n",#r, r)
3247 #define pr_int(fp, i) fprintf((fp), "%s: %d\n",#i, i)
3248 #define pr_bool(fp, b) fprintf((fp), "%s: %s\n",#b, bool_names[b])
3250 void pr_forcerec(FILE *fp, t_forcerec *fr)
3252 int i;
3254 pr_real(fp, fr->rlist);
3255 pr_real(fp, fr->rcoulomb);
3256 pr_real(fp, fr->fudgeQQ);
3257 pr_bool(fp, fr->bGrid);
3258 pr_bool(fp, fr->bTwinRange);
3259 /*pr_int(fp,fr->cg0);
3260 pr_int(fp,fr->hcg);*/
3261 for (i = 0; i < fr->nnblists; i++)
3263 pr_int(fp, fr->nblists[i].table_elec_vdw.n);
3265 pr_real(fp, fr->rcoulomb_switch);
3266 pr_real(fp, fr->rcoulomb);
3268 fflush(fp);
3271 void forcerec_set_excl_load(t_forcerec *fr,
3272 const gmx_localtop_t *top)
3274 const int *ind, *a;
3275 int t, i, j, ntot, n, ntarget;
3277 ind = top->excls.index;
3278 a = top->excls.a;
3280 ntot = 0;
3281 for (i = 0; i < top->excls.nr; i++)
3283 for (j = ind[i]; j < ind[i+1]; j++)
3285 if (a[j] > i)
3287 ntot++;
3292 fr->excl_load[0] = 0;
3293 n = 0;
3294 i = 0;
3295 for (t = 1; t <= fr->nthread_ewc; t++)
3297 ntarget = (ntot*t)/fr->nthread_ewc;
3298 while (i < top->excls.nr && n < ntarget)
3300 for (j = ind[i]; j < ind[i+1]; j++)
3302 if (a[j] > i)
3304 n++;
3307 i++;
3309 fr->excl_load[t] = i;
3313 /* Frees GPU memory and destroys the GPU context.
3315 * Note that this function needs to be called even if GPUs are not used
3316 * in this run because the PME ranks have no knowledge of whether GPUs
3317 * are used or not, but all ranks need to enter the barrier below.
3319 void free_gpu_resources(const t_forcerec *fr,
3320 const t_commrec *cr,
3321 const gmx_gpu_info_t *gpu_info,
3322 const gmx_gpu_opt_t *gpu_opt)
3324 gmx_bool bIsPPrankUsingGPU;
3325 char gpu_err_str[STRLEN];
3327 bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr && fr->nbv && fr->nbv->bUseGPU;
3329 if (bIsPPrankUsingGPU)
3331 /* free nbnxn data in GPU memory */
3332 nbnxn_gpu_free(fr->nbv->gpu_nbv);
3334 /* With tMPI we need to wait for all ranks to finish deallocation before
3335 * destroying the context in free_gpu() as some ranks may be sharing
3336 * GPU and context.
3337 * Note: as only PP ranks need to free GPU resources, so it is safe to
3338 * not call the barrier on PME ranks.
3340 #ifdef GMX_THREAD_MPI
3341 if (PAR(cr))
3343 gmx_barrier(cr);
3345 #endif /* GMX_THREAD_MPI */
3347 /* uninitialize GPU (by destroying the context) */
3348 if (!free_cuda_gpu(cr->rank_pp_intranode, gpu_err_str, gpu_info, gpu_opt))
3350 gmx_warning("On rank %d failed to free GPU #%d: %s",
3351 cr->nodeid, get_current_cuda_gpu_device_id(), gpu_err_str);