2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
51 #include "gromacs/commandline/filenm.h"
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/ewald/ewald.h"
55 #include "gromacs/fileio/filetypes.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/hardware/detecthardware.h"
60 #include "gromacs/listed-forces/manage-threading.h"
61 #include "gromacs/listed-forces/pairs.h"
62 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
63 #include "gromacs/math/functions.h"
64 #include "gromacs/math/units.h"
65 #include "gromacs/math/utilities.h"
66 #include "gromacs/math/vec.h"
67 #include "gromacs/mdlib/force.h"
68 #include "gromacs/mdlib/forcerec-threading.h"
69 #include "gromacs/mdlib/gmx_omp_nthreads.h"
70 #include "gromacs/mdlib/md_support.h"
71 #include "gromacs/mdlib/nb_verlet.h"
72 #include "gromacs/mdlib/nbnxn_atomdata.h"
73 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
74 #include "gromacs/mdlib/nbnxn_search.h"
75 #include "gromacs/mdlib/nbnxn_simd.h"
76 #include "gromacs/mdlib/nbnxn_util.h"
77 #include "gromacs/mdlib/ns.h"
78 #include "gromacs/mdlib/qmmm.h"
79 #include "gromacs/mdlib/sim_util.h"
80 #include "gromacs/mdtypes/commrec.h"
81 #include "gromacs/mdtypes/fcdata.h"
82 #include "gromacs/mdtypes/group.h"
83 #include "gromacs/mdtypes/iforceprovider.h"
84 #include "gromacs/mdtypes/inputrec.h"
85 #include "gromacs/mdtypes/md_enums.h"
86 #include "gromacs/pbcutil/ishift.h"
87 #include "gromacs/pbcutil/pbc.h"
88 #include "gromacs/simd/simd.h"
89 #include "gromacs/tables/forcetable.h"
90 #include "gromacs/topology/mtop_util.h"
91 #include "gromacs/trajectory/trajectoryframe.h"
92 #include "gromacs/utility/cstringutil.h"
93 #include "gromacs/utility/exceptions.h"
94 #include "gromacs/utility/fatalerror.h"
95 #include "gromacs/utility/gmxassert.h"
96 #include "gromacs/utility/logger.h"
97 #include "gromacs/utility/pleasecite.h"
98 #include "gromacs/utility/smalloc.h"
99 #include "gromacs/utility/strconvert.h"
101 #include "nbnxn_gpu_jit_support.h"
103 const char *egrp_nm
[egNR
+1] = {
104 "Coul-SR", "LJ-SR", "Buck-SR",
105 "Coul-14", "LJ-14", nullptr
108 t_forcerec
*mk_forcerec(void)
118 static void pr_nbfp(FILE *fp
, real
*nbfp
, gmx_bool bBHAM
, int atnr
)
122 for (i
= 0; (i
< atnr
); i
++)
124 for (j
= 0; (j
< atnr
); j
++)
126 fprintf(fp
, "%2d - %2d", i
, j
);
129 fprintf(fp
, " a=%10g, b=%10g, c=%10g\n", BHAMA(nbfp
, atnr
, i
, j
),
130 BHAMB(nbfp
, atnr
, i
, j
), BHAMC(nbfp
, atnr
, i
, j
)/6.0);
134 fprintf(fp
, " c6=%10g, c12=%10g\n", C6(nbfp
, atnr
, i
, j
)/6.0,
135 C12(nbfp
, atnr
, i
, j
)/12.0);
142 static real
*mk_nbfp(const gmx_ffparams_t
*idef
, gmx_bool bBHAM
)
150 snew(nbfp
, 3*atnr
*atnr
);
151 for (i
= k
= 0; (i
< atnr
); i
++)
153 for (j
= 0; (j
< atnr
); j
++, k
++)
155 BHAMA(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].bham
.a
;
156 BHAMB(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].bham
.b
;
157 /* nbfp now includes the 6.0 derivative prefactor */
158 BHAMC(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].bham
.c
*6.0;
164 snew(nbfp
, 2*atnr
*atnr
);
165 for (i
= k
= 0; (i
< atnr
); i
++)
167 for (j
= 0; (j
< atnr
); j
++, k
++)
169 /* nbfp now includes the 6.0/12.0 derivative prefactors */
170 C6(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].lj
.c6
*6.0;
171 C12(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].lj
.c12
*12.0;
179 static real
*make_ljpme_c6grid(const gmx_ffparams_t
*idef
, t_forcerec
*fr
)
182 real c6
, c6i
, c6j
, c12i
, c12j
, epsi
, epsj
, sigmai
, sigmaj
;
185 /* For LJ-PME simulations, we correct the energies with the reciprocal space
186 * inside of the cut-off. To do this the non-bonded kernels needs to have
187 * access to the C6-values used on the reciprocal grid in pme.c
191 snew(grid
, 2*atnr
*atnr
);
192 for (i
= k
= 0; (i
< atnr
); i
++)
194 for (j
= 0; (j
< atnr
); j
++, k
++)
196 c6i
= idef
->iparams
[i
*(atnr
+1)].lj
.c6
;
197 c12i
= idef
->iparams
[i
*(atnr
+1)].lj
.c12
;
198 c6j
= idef
->iparams
[j
*(atnr
+1)].lj
.c6
;
199 c12j
= idef
->iparams
[j
*(atnr
+1)].lj
.c12
;
200 c6
= std::sqrt(c6i
* c6j
);
201 if (fr
->ljpme_combination_rule
== eljpmeLB
202 && !gmx_numzero(c6
) && !gmx_numzero(c12i
) && !gmx_numzero(c12j
))
204 sigmai
= gmx::sixthroot(c12i
/ c6i
);
205 sigmaj
= gmx::sixthroot(c12j
/ c6j
);
206 epsi
= c6i
* c6i
/ c12i
;
207 epsj
= c6j
* c6j
/ c12j
;
208 c6
= std::sqrt(epsi
* epsj
) * gmx::power6(0.5*(sigmai
+sigmaj
));
210 /* Store the elements at the same relative positions as C6 in nbfp in order
211 * to simplify access in the kernels
213 grid
[2*(atnr
*i
+j
)] = c6
*6.0;
219 static real
*mk_nbfp_combination_rule(const gmx_ffparams_t
*idef
, int comb_rule
)
223 real c6i
, c6j
, c12i
, c12j
, epsi
, epsj
, sigmai
, sigmaj
;
227 snew(nbfp
, 2*atnr
*atnr
);
228 for (i
= 0; i
< atnr
; ++i
)
230 for (j
= 0; j
< atnr
; ++j
)
232 c6i
= idef
->iparams
[i
*(atnr
+1)].lj
.c6
;
233 c12i
= idef
->iparams
[i
*(atnr
+1)].lj
.c12
;
234 c6j
= idef
->iparams
[j
*(atnr
+1)].lj
.c6
;
235 c12j
= idef
->iparams
[j
*(atnr
+1)].lj
.c12
;
236 c6
= std::sqrt(c6i
* c6j
);
237 c12
= std::sqrt(c12i
* c12j
);
238 if (comb_rule
== eCOMB_ARITHMETIC
239 && !gmx_numzero(c6
) && !gmx_numzero(c12
))
241 sigmai
= gmx::sixthroot(c12i
/ c6i
);
242 sigmaj
= gmx::sixthroot(c12j
/ c6j
);
243 epsi
= c6i
* c6i
/ c12i
;
244 epsj
= c6j
* c6j
/ c12j
;
245 c6
= std::sqrt(epsi
* epsj
) * gmx::power6(0.5*(sigmai
+sigmaj
));
246 c12
= std::sqrt(epsi
* epsj
) * gmx::power12(0.5*(sigmai
+sigmaj
));
248 C6(nbfp
, atnr
, i
, j
) = c6
*6.0;
249 C12(nbfp
, atnr
, i
, j
) = c12
*12.0;
255 /* This routine sets fr->solvent_opt to the most common solvent in the
256 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
257 * the fr->solvent_type array with the correct type (or esolNO).
259 * Charge groups that fulfill the conditions but are not identical to the
260 * most common one will be marked as esolNO in the solvent_type array.
262 * TIP3p is identical to SPC for these purposes, so we call it
263 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
265 * NOTE: QM particle should not
266 * become an optimized solvent. Not even if there is only one charge
276 } solvent_parameters_t
;
279 check_solvent_cg(const gmx_moltype_t
*molt
,
282 const unsigned char *qm_grpnr
,
283 const t_grps
*qm_grps
,
285 int *n_solvent_parameters
,
286 solvent_parameters_t
**solvent_parameters_p
,
296 real tmp_charge
[4] = { 0.0 }; /* init to zero to make gcc4.8 happy */
297 int tmp_vdwtype
[4] = { 0 }; /* init to zero to make gcc4.8 happy */
300 solvent_parameters_t
*solvent_parameters
;
302 /* We use a list with parameters for each solvent type.
303 * Every time we discover a new molecule that fulfills the basic
304 * conditions for a solvent we compare with the previous entries
305 * in these lists. If the parameters are the same we just increment
306 * the counter for that type, and otherwise we create a new type
307 * based on the current molecule.
309 * Once we've finished going through all molecules we check which
310 * solvent is most common, and mark all those molecules while we
311 * clear the flag on all others.
314 solvent_parameters
= *solvent_parameters_p
;
316 /* Mark the cg first as non optimized */
319 /* Check if this cg has no exclusions with atoms in other charge groups
320 * and all atoms inside the charge group excluded.
321 * We only have 3 or 4 atom solvent loops.
323 if (GET_CGINFO_EXCL_INTER(cginfo
) ||
324 !GET_CGINFO_EXCL_INTRA(cginfo
))
329 /* Get the indices of the first atom in this charge group */
330 j0
= molt
->cgs
.index
[cg0
];
331 j1
= molt
->cgs
.index
[cg0
+1];
333 /* Number of atoms in our molecule */
339 "Moltype '%s': there are %d atoms in this charge group\n",
343 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
346 if (nj
< 3 || nj
> 4)
351 /* Check if we are doing QM on this group */
353 if (qm_grpnr
!= nullptr)
355 for (j
= j0
; j
< j1
&& !qm
; j
++)
357 qm
= (qm_grpnr
[j
] < qm_grps
->nr
- 1);
360 /* Cannot use solvent optimization with QM */
366 atom
= molt
->atoms
.atom
;
368 /* Still looks like a solvent, time to check parameters */
370 /* If it is perturbed (free energy) we can't use the solvent loops,
371 * so then we just skip to the next molecule.
375 for (j
= j0
; j
< j1
&& !perturbed
; j
++)
377 perturbed
= PERTURBED(atom
[j
]);
385 /* Now it's only a question if the VdW and charge parameters
386 * are OK. Before doing the check we compare and see if they are
387 * identical to a possible previous solvent type.
388 * First we assign the current types and charges.
390 for (j
= 0; j
< nj
; j
++)
392 tmp_vdwtype
[j
] = atom
[j0
+j
].type
;
393 tmp_charge
[j
] = atom
[j0
+j
].q
;
396 /* Does it match any previous solvent type? */
397 for (k
= 0; k
< *n_solvent_parameters
; k
++)
402 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
403 if ( (solvent_parameters
[k
].model
== esolSPC
&& nj
!= 3) ||
404 (solvent_parameters
[k
].model
== esolTIP4P
&& nj
!= 4) )
409 /* Check that types & charges match for all atoms in molecule */
410 for (j
= 0; j
< nj
&& match
== TRUE
; j
++)
412 if (tmp_vdwtype
[j
] != solvent_parameters
[k
].vdwtype
[j
])
416 if (tmp_charge
[j
] != solvent_parameters
[k
].charge
[j
])
423 /* Congratulations! We have a matched solvent.
424 * Flag it with this type for later processing.
427 solvent_parameters
[k
].count
+= nmol
;
429 /* We are done with this charge group */
434 /* If we get here, we have a tentative new solvent type.
435 * Before we add it we must check that it fulfills the requirements
436 * of the solvent optimized loops. First determine which atoms have
439 for (j
= 0; j
< nj
; j
++)
442 tjA
= tmp_vdwtype
[j
];
444 /* Go through all other tpes and see if any have non-zero
445 * VdW parameters when combined with this one.
447 for (k
= 0; k
< fr
->ntype
&& (has_vdw
[j
] == FALSE
); k
++)
449 /* We already checked that the atoms weren't perturbed,
450 * so we only need to check state A now.
454 has_vdw
[j
] = (has_vdw
[j
] ||
455 (BHAMA(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0) ||
456 (BHAMB(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0) ||
457 (BHAMC(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0));
462 has_vdw
[j
] = (has_vdw
[j
] ||
463 (C6(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0) ||
464 (C12(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0));
469 /* Now we know all we need to make the final check and assignment. */
473 * For this we require thatn all atoms have charge,
474 * the charges on atom 2 & 3 should be the same, and only
475 * atom 1 might have VdW.
477 if (has_vdw
[1] == FALSE
&&
478 has_vdw
[2] == FALSE
&&
479 tmp_charge
[0] != 0 &&
480 tmp_charge
[1] != 0 &&
481 tmp_charge
[2] == tmp_charge
[1])
483 srenew(solvent_parameters
, *n_solvent_parameters
+1);
484 solvent_parameters
[*n_solvent_parameters
].model
= esolSPC
;
485 solvent_parameters
[*n_solvent_parameters
].count
= nmol
;
486 for (k
= 0; k
< 3; k
++)
488 solvent_parameters
[*n_solvent_parameters
].vdwtype
[k
] = tmp_vdwtype
[k
];
489 solvent_parameters
[*n_solvent_parameters
].charge
[k
] = tmp_charge
[k
];
492 *cg_sp
= *n_solvent_parameters
;
493 (*n_solvent_parameters
)++;
498 /* Or could it be a TIP4P?
499 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
500 * Only atom 1 mght have VdW.
502 if (has_vdw
[1] == FALSE
&&
503 has_vdw
[2] == FALSE
&&
504 has_vdw
[3] == FALSE
&&
505 tmp_charge
[0] == 0 &&
506 tmp_charge
[1] != 0 &&
507 tmp_charge
[2] == tmp_charge
[1] &&
510 srenew(solvent_parameters
, *n_solvent_parameters
+1);
511 solvent_parameters
[*n_solvent_parameters
].model
= esolTIP4P
;
512 solvent_parameters
[*n_solvent_parameters
].count
= nmol
;
513 for (k
= 0; k
< 4; k
++)
515 solvent_parameters
[*n_solvent_parameters
].vdwtype
[k
] = tmp_vdwtype
[k
];
516 solvent_parameters
[*n_solvent_parameters
].charge
[k
] = tmp_charge
[k
];
519 *cg_sp
= *n_solvent_parameters
;
520 (*n_solvent_parameters
)++;
524 *solvent_parameters_p
= solvent_parameters
;
528 check_solvent(FILE * fp
,
529 const gmx_mtop_t
* mtop
,
531 cginfo_mb_t
*cginfo_mb
)
534 const gmx_moltype_t
*molt
;
535 int mb
, mol
, cg_mol
, at_offset
, am
, cgm
, i
, nmol_ch
, nmol
;
536 int n_solvent_parameters
;
537 solvent_parameters_t
*solvent_parameters
;
543 fprintf(debug
, "Going to determine what solvent types we have.\n");
546 n_solvent_parameters
= 0;
547 solvent_parameters
= nullptr;
548 /* Allocate temporary array for solvent type */
549 snew(cg_sp
, mtop
->nmolblock
);
552 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
554 molt
= &mtop
->moltype
[mtop
->molblock
[mb
].type
];
556 /* Here we have to loop over all individual molecules
557 * because we need to check for QMMM particles.
559 snew(cg_sp
[mb
], cginfo_mb
[mb
].cg_mod
);
560 nmol_ch
= cginfo_mb
[mb
].cg_mod
/cgs
->nr
;
561 nmol
= mtop
->molblock
[mb
].nmol
/nmol_ch
;
562 for (mol
= 0; mol
< nmol_ch
; mol
++)
565 am
= mol
*cgs
->index
[cgs
->nr
];
566 for (cg_mol
= 0; cg_mol
< cgs
->nr
; cg_mol
++)
568 check_solvent_cg(molt
, cg_mol
, nmol
,
569 mtop
->groups
.grpnr
[egcQMMM
] ?
570 mtop
->groups
.grpnr
[egcQMMM
]+at_offset
+am
: nullptr,
571 &mtop
->groups
.grps
[egcQMMM
],
573 &n_solvent_parameters
, &solvent_parameters
,
574 cginfo_mb
[mb
].cginfo
[cgm
+cg_mol
],
575 &cg_sp
[mb
][cgm
+cg_mol
]);
578 at_offset
+= cgs
->index
[cgs
->nr
];
581 /* Puh! We finished going through all charge groups.
582 * Now find the most common solvent model.
585 /* Most common solvent this far */
587 for (i
= 0; i
< n_solvent_parameters
; i
++)
590 solvent_parameters
[i
].count
> solvent_parameters
[bestsp
].count
)
598 bestsol
= solvent_parameters
[bestsp
].model
;
606 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
608 cgs
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].cgs
;
609 nmol
= (mtop
->molblock
[mb
].nmol
*cgs
->nr
)/cginfo_mb
[mb
].cg_mod
;
610 for (i
= 0; i
< cginfo_mb
[mb
].cg_mod
; i
++)
612 if (cg_sp
[mb
][i
] == bestsp
)
614 SET_CGINFO_SOLOPT(cginfo_mb
[mb
].cginfo
[i
], bestsol
);
619 SET_CGINFO_SOLOPT(cginfo_mb
[mb
].cginfo
[i
], esolNO
);
626 if (bestsol
!= esolNO
&& fp
!= nullptr)
628 fprintf(fp
, "\nEnabling %s-like water optimization for %d molecules.\n\n",
630 solvent_parameters
[bestsp
].count
);
633 sfree(solvent_parameters
);
634 fr
->solvent_opt
= bestsol
;
638 acNONE
= 0, acCONSTRAINT
, acSETTLE
641 static cginfo_mb_t
*init_cginfo_mb(FILE *fplog
, const gmx_mtop_t
*mtop
,
642 t_forcerec
*fr
, gmx_bool bNoSolvOpt
,
643 gmx_bool
*bFEP_NonBonded
,
644 gmx_bool
*bExcl_IntraCGAll_InterCGNone
)
647 const t_blocka
*excl
;
648 const gmx_moltype_t
*molt
;
649 const gmx_molblock_t
*molb
;
650 cginfo_mb_t
*cginfo_mb
;
653 int cg_offset
, a_offset
;
654 int mb
, m
, cg
, a0
, a1
, gid
, ai
, j
, aj
, excl_nalloc
;
658 gmx_bool bId
, *bExcl
, bExclIntraAll
, bExclInter
, bHaveVDW
, bHaveQ
, bHavePerturbedAtoms
;
660 snew(cginfo_mb
, mtop
->nmolblock
);
662 snew(type_VDW
, fr
->ntype
);
663 for (ai
= 0; ai
< fr
->ntype
; ai
++)
665 type_VDW
[ai
] = FALSE
;
666 for (j
= 0; j
< fr
->ntype
; j
++)
668 type_VDW
[ai
] = type_VDW
[ai
] ||
670 C6(fr
->nbfp
, fr
->ntype
, ai
, j
) != 0 ||
671 C12(fr
->nbfp
, fr
->ntype
, ai
, j
) != 0;
675 *bFEP_NonBonded
= FALSE
;
676 *bExcl_IntraCGAll_InterCGNone
= TRUE
;
679 snew(bExcl
, excl_nalloc
);
682 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
684 molb
= &mtop
->molblock
[mb
];
685 molt
= &mtop
->moltype
[molb
->type
];
689 /* Check if the cginfo is identical for all molecules in this block.
690 * If so, we only need an array of the size of one molecule.
691 * Otherwise we make an array of #mol times #cgs per molecule.
694 for (m
= 0; m
< molb
->nmol
; m
++)
696 int am
= m
*cgs
->index
[cgs
->nr
];
697 for (cg
= 0; cg
< cgs
->nr
; cg
++)
700 a1
= cgs
->index
[cg
+1];
701 if (ggrpnr(&mtop
->groups
, egcENER
, a_offset
+am
+a0
) !=
702 ggrpnr(&mtop
->groups
, egcENER
, a_offset
+a0
))
706 if (mtop
->groups
.grpnr
[egcQMMM
] != nullptr)
708 for (ai
= a0
; ai
< a1
; ai
++)
710 if (mtop
->groups
.grpnr
[egcQMMM
][a_offset
+am
+ai
] !=
711 mtop
->groups
.grpnr
[egcQMMM
][a_offset
+ai
])
720 cginfo_mb
[mb
].cg_start
= cg_offset
;
721 cginfo_mb
[mb
].cg_end
= cg_offset
+ molb
->nmol
*cgs
->nr
;
722 cginfo_mb
[mb
].cg_mod
= (bId
? 1 : molb
->nmol
)*cgs
->nr
;
723 snew(cginfo_mb
[mb
].cginfo
, cginfo_mb
[mb
].cg_mod
);
724 cginfo
= cginfo_mb
[mb
].cginfo
;
726 /* Set constraints flags for constrained atoms */
727 snew(a_con
, molt
->atoms
.nr
);
728 for (ftype
= 0; ftype
< F_NRE
; ftype
++)
730 if (interaction_function
[ftype
].flags
& IF_CONSTRAINT
)
735 for (ia
= 0; ia
< molt
->ilist
[ftype
].nr
; ia
+= 1+nral
)
739 for (a
= 0; a
< nral
; a
++)
741 a_con
[molt
->ilist
[ftype
].iatoms
[ia
+1+a
]] =
742 (ftype
== F_SETTLE
? acSETTLE
: acCONSTRAINT
);
748 for (m
= 0; m
< (bId
? 1 : molb
->nmol
); m
++)
751 int am
= m
*cgs
->index
[cgs
->nr
];
752 for (cg
= 0; cg
< cgs
->nr
; cg
++)
755 a1
= cgs
->index
[cg
+1];
757 /* Store the energy group in cginfo */
758 gid
= ggrpnr(&mtop
->groups
, egcENER
, a_offset
+am
+a0
);
759 SET_CGINFO_GID(cginfo
[cgm
+cg
], gid
);
761 /* Check the intra/inter charge group exclusions */
762 if (a1
-a0
> excl_nalloc
)
764 excl_nalloc
= a1
- a0
;
765 srenew(bExcl
, excl_nalloc
);
767 /* bExclIntraAll: all intra cg interactions excluded
768 * bExclInter: any inter cg interactions excluded
770 bExclIntraAll
= TRUE
;
774 bHavePerturbedAtoms
= FALSE
;
775 for (ai
= a0
; ai
< a1
; ai
++)
777 /* Check VDW and electrostatic interactions */
778 bHaveVDW
= bHaveVDW
|| (type_VDW
[molt
->atoms
.atom
[ai
].type
] ||
779 type_VDW
[molt
->atoms
.atom
[ai
].typeB
]);
780 bHaveQ
= bHaveQ
|| (molt
->atoms
.atom
[ai
].q
!= 0 ||
781 molt
->atoms
.atom
[ai
].qB
!= 0);
783 bHavePerturbedAtoms
= bHavePerturbedAtoms
|| (PERTURBED(molt
->atoms
.atom
[ai
]) != 0);
785 /* Clear the exclusion list for atom ai */
786 for (aj
= a0
; aj
< a1
; aj
++)
788 bExcl
[aj
-a0
] = FALSE
;
790 /* Loop over all the exclusions of atom ai */
791 for (j
= excl
->index
[ai
]; j
< excl
->index
[ai
+1]; j
++)
794 if (aj
< a0
|| aj
>= a1
)
803 /* Check if ai excludes a0 to a1 */
804 for (aj
= a0
; aj
< a1
; aj
++)
808 bExclIntraAll
= FALSE
;
815 SET_CGINFO_CONSTR(cginfo
[cgm
+cg
]);
818 SET_CGINFO_SETTLE(cginfo
[cgm
+cg
]);
826 SET_CGINFO_EXCL_INTRA(cginfo
[cgm
+cg
]);
830 SET_CGINFO_EXCL_INTER(cginfo
[cgm
+cg
]);
832 if (a1
- a0
> MAX_CHARGEGROUP_SIZE
)
834 /* The size in cginfo is currently only read with DD */
835 gmx_fatal(FARGS
, "A charge group has size %d which is larger than the limit of %d atoms", a1
-a0
, MAX_CHARGEGROUP_SIZE
);
839 SET_CGINFO_HAS_VDW(cginfo
[cgm
+cg
]);
843 SET_CGINFO_HAS_Q(cginfo
[cgm
+cg
]);
845 if (bHavePerturbedAtoms
&& fr
->efep
!= efepNO
)
847 SET_CGINFO_FEP(cginfo
[cgm
+cg
]);
848 *bFEP_NonBonded
= TRUE
;
850 /* Store the charge group size */
851 SET_CGINFO_NATOMS(cginfo
[cgm
+cg
], a1
-a0
);
853 if (!bExclIntraAll
|| bExclInter
)
855 *bExcl_IntraCGAll_InterCGNone
= FALSE
;
862 cg_offset
+= molb
->nmol
*cgs
->nr
;
863 a_offset
+= molb
->nmol
*cgs
->index
[cgs
->nr
];
867 /* the solvent optimizer is called after the QM is initialized,
868 * because we don't want to have the QM subsystemto become an
872 check_solvent(fplog
, mtop
, fr
, cginfo_mb
);
874 if (getenv("GMX_NO_SOLV_OPT"))
878 fprintf(fplog
, "Found environment variable GMX_NO_SOLV_OPT.\n"
879 "Disabling all solvent optimization\n");
881 fr
->solvent_opt
= esolNO
;
885 fr
->solvent_opt
= esolNO
;
887 if (!fr
->solvent_opt
)
889 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
891 for (cg
= 0; cg
< cginfo_mb
[mb
].cg_mod
; cg
++)
893 SET_CGINFO_SOLOPT(cginfo_mb
[mb
].cginfo
[cg
], esolNO
);
901 static int *cginfo_expand(int nmb
, cginfo_mb_t
*cgi_mb
)
906 ncg
= cgi_mb
[nmb
-1].cg_end
;
909 for (cg
= 0; cg
< ncg
; cg
++)
911 while (cg
>= cgi_mb
[mb
].cg_end
)
916 cgi_mb
[mb
].cginfo
[(cg
- cgi_mb
[mb
].cg_start
) % cgi_mb
[mb
].cg_mod
];
922 static void set_chargesum(FILE *log
, t_forcerec
*fr
, const gmx_mtop_t
*mtop
)
924 /*This now calculates sum for q and c6*/
925 double qsum
, q2sum
, q
, c6sum
, c6
;
927 const t_atoms
*atoms
;
932 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
934 nmol
= mtop
->molblock
[mb
].nmol
;
935 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
936 for (i
= 0; i
< atoms
->nr
; i
++)
938 q
= atoms
->atom
[i
].q
;
941 c6
= mtop
->ffparams
.iparams
[atoms
->atom
[i
].type
*(mtop
->ffparams
.atnr
+1)].lj
.c6
;
946 fr
->q2sum
[0] = q2sum
;
947 fr
->c6sum
[0] = c6sum
;
949 if (fr
->efep
!= efepNO
)
954 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
956 nmol
= mtop
->molblock
[mb
].nmol
;
957 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
958 for (i
= 0; i
< atoms
->nr
; i
++)
960 q
= atoms
->atom
[i
].qB
;
963 c6
= mtop
->ffparams
.iparams
[atoms
->atom
[i
].typeB
*(mtop
->ffparams
.atnr
+1)].lj
.c6
;
967 fr
->q2sum
[1] = q2sum
;
968 fr
->c6sum
[1] = c6sum
;
973 fr
->qsum
[1] = fr
->qsum
[0];
974 fr
->q2sum
[1] = fr
->q2sum
[0];
975 fr
->c6sum
[1] = fr
->c6sum
[0];
979 if (fr
->efep
== efepNO
)
981 fprintf(log
, "System total charge: %.3f\n", fr
->qsum
[0]);
985 fprintf(log
, "System total charge, top. A: %.3f top. B: %.3f\n",
986 fr
->qsum
[0], fr
->qsum
[1]);
991 void update_forcerec(t_forcerec
*fr
, matrix box
)
993 if (fr
->eeltype
== eelGRF
)
995 calc_rffac(nullptr, fr
->eeltype
, fr
->epsilon_r
, fr
->epsilon_rf
,
996 fr
->rcoulomb
, fr
->temp
, fr
->zsquare
, box
,
997 &fr
->kappa
, &fr
->k_rf
, &fr
->c_rf
);
1001 void set_avcsixtwelve(FILE *fplog
, t_forcerec
*fr
, const gmx_mtop_t
*mtop
)
1003 const t_atoms
*atoms
, *atoms_tpi
;
1004 const t_blocka
*excl
;
1005 int mb
, nmol
, nmolc
, i
, j
, tpi
, tpj
, j1
, j2
, k
, nexcl
, q
;
1006 gmx_int64_t npair
, npair_ij
, tmpi
, tmpj
;
1007 double csix
, ctwelve
;
1008 int ntp
, *typecount
;
1011 real
*nbfp_comb
= nullptr;
1017 /* For LJ-PME, we want to correct for the difference between the
1018 * actual C6 values and the C6 values used by the LJ-PME based on
1019 * combination rules. */
1021 if (EVDW_PME(fr
->vdwtype
))
1023 nbfp_comb
= mk_nbfp_combination_rule(&mtop
->ffparams
,
1024 (fr
->ljpme_combination_rule
== eljpmeLB
) ? eCOMB_ARITHMETIC
: eCOMB_GEOMETRIC
);
1025 for (tpi
= 0; tpi
< ntp
; ++tpi
)
1027 for (tpj
= 0; tpj
< ntp
; ++tpj
)
1029 C6(nbfp_comb
, ntp
, tpi
, tpj
) =
1030 C6(nbfp
, ntp
, tpi
, tpj
) - C6(nbfp_comb
, ntp
, tpi
, tpj
);
1031 C12(nbfp_comb
, ntp
, tpi
, tpj
) = C12(nbfp
, ntp
, tpi
, tpj
);
1036 for (q
= 0; q
< (fr
->efep
== efepNO
? 1 : 2); q
++)
1044 /* Count the types so we avoid natoms^2 operations */
1045 snew(typecount
, ntp
);
1046 gmx_mtop_count_atomtypes(mtop
, q
, typecount
);
1048 for (tpi
= 0; tpi
< ntp
; tpi
++)
1050 for (tpj
= tpi
; tpj
< ntp
; tpj
++)
1052 tmpi
= typecount
[tpi
];
1053 tmpj
= typecount
[tpj
];
1056 npair_ij
= tmpi
*tmpj
;
1060 npair_ij
= tmpi
*(tmpi
- 1)/2;
1064 /* nbfp now includes the 6.0 derivative prefactor */
1065 csix
+= npair_ij
*BHAMC(nbfp
, ntp
, tpi
, tpj
)/6.0;
1069 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1070 csix
+= npair_ij
* C6(nbfp
, ntp
, tpi
, tpj
)/6.0;
1071 ctwelve
+= npair_ij
* C12(nbfp
, ntp
, tpi
, tpj
)/12.0;
1077 /* Subtract the excluded pairs.
1078 * The main reason for substracting exclusions is that in some cases
1079 * some combinations might never occur and the parameters could have
1080 * any value. These unused values should not influence the dispersion
1083 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
1085 nmol
= mtop
->molblock
[mb
].nmol
;
1086 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
1087 excl
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].excls
;
1088 for (i
= 0; (i
< atoms
->nr
); i
++)
1092 tpi
= atoms
->atom
[i
].type
;
1096 tpi
= atoms
->atom
[i
].typeB
;
1098 j1
= excl
->index
[i
];
1099 j2
= excl
->index
[i
+1];
1100 for (j
= j1
; j
< j2
; j
++)
1107 tpj
= atoms
->atom
[k
].type
;
1111 tpj
= atoms
->atom
[k
].typeB
;
1115 /* nbfp now includes the 6.0 derivative prefactor */
1116 csix
-= nmol
*BHAMC(nbfp
, ntp
, tpi
, tpj
)/6.0;
1120 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1121 csix
-= nmol
*C6 (nbfp
, ntp
, tpi
, tpj
)/6.0;
1122 ctwelve
-= nmol
*C12(nbfp
, ntp
, tpi
, tpj
)/12.0;
1132 /* Only correct for the interaction of the test particle
1133 * with the rest of the system.
1136 &mtop
->moltype
[mtop
->molblock
[mtop
->nmolblock
-1].type
].atoms
;
1139 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
1141 nmol
= mtop
->molblock
[mb
].nmol
;
1142 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
1143 for (j
= 0; j
< atoms
->nr
; j
++)
1146 /* Remove the interaction of the test charge group
1149 if (mb
== mtop
->nmolblock
-1)
1153 if (mb
== 0 && nmol
== 1)
1155 gmx_fatal(FARGS
, "Old format tpr with TPI, please generate a new tpr file");
1160 tpj
= atoms
->atom
[j
].type
;
1164 tpj
= atoms
->atom
[j
].typeB
;
1166 for (i
= 0; i
< fr
->n_tpi
; i
++)
1170 tpi
= atoms_tpi
->atom
[i
].type
;
1174 tpi
= atoms_tpi
->atom
[i
].typeB
;
1178 /* nbfp now includes the 6.0 derivative prefactor */
1179 csix
+= nmolc
*BHAMC(nbfp
, ntp
, tpi
, tpj
)/6.0;
1183 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1184 csix
+= nmolc
*C6 (nbfp
, ntp
, tpi
, tpj
)/6.0;
1185 ctwelve
+= nmolc
*C12(nbfp
, ntp
, tpi
, tpj
)/12.0;
1192 if (npair
- nexcl
<= 0 && fplog
)
1194 fprintf(fplog
, "\nWARNING: There are no atom pairs for dispersion correction\n\n");
1200 csix
/= npair
- nexcl
;
1201 ctwelve
/= npair
- nexcl
;
1205 fprintf(debug
, "Counted %d exclusions\n", nexcl
);
1206 fprintf(debug
, "Average C6 parameter is: %10g\n", (double)csix
);
1207 fprintf(debug
, "Average C12 parameter is: %10g\n", (double)ctwelve
);
1209 fr
->avcsix
[q
] = csix
;
1210 fr
->avctwelve
[q
] = ctwelve
;
1213 if (EVDW_PME(fr
->vdwtype
))
1218 if (fplog
!= nullptr)
1220 if (fr
->eDispCorr
== edispcAllEner
||
1221 fr
->eDispCorr
== edispcAllEnerPres
)
1223 fprintf(fplog
, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1224 fr
->avcsix
[0], fr
->avctwelve
[0]);
1228 fprintf(fplog
, "Long Range LJ corr.: <C6> %10.4e\n", fr
->avcsix
[0]);
1234 static void set_bham_b_max(FILE *fplog
, t_forcerec
*fr
,
1235 const gmx_mtop_t
*mtop
)
1237 const t_atoms
*at1
, *at2
;
1238 int mt1
, mt2
, i
, j
, tpi
, tpj
, ntypes
;
1244 fprintf(fplog
, "Determining largest Buckingham b parameter for table\n");
1251 for (mt1
= 0; mt1
< mtop
->nmoltype
; mt1
++)
1253 at1
= &mtop
->moltype
[mt1
].atoms
;
1254 for (i
= 0; (i
< at1
->nr
); i
++)
1256 tpi
= at1
->atom
[i
].type
;
1259 gmx_fatal(FARGS
, "Atomtype[%d] = %d, maximum = %d", i
, tpi
, ntypes
);
1262 for (mt2
= mt1
; mt2
< mtop
->nmoltype
; mt2
++)
1264 at2
= &mtop
->moltype
[mt2
].atoms
;
1265 for (j
= 0; (j
< at2
->nr
); j
++)
1267 tpj
= at2
->atom
[j
].type
;
1270 gmx_fatal(FARGS
, "Atomtype[%d] = %d, maximum = %d", j
, tpj
, ntypes
);
1272 b
= BHAMB(nbfp
, ntypes
, tpi
, tpj
);
1273 if (b
> fr
->bham_b_max
)
1277 if ((b
< bmin
) || (bmin
== -1))
1287 fprintf(fplog
, "Buckingham b parameters, min: %g, max: %g\n",
1288 bmin
, fr
->bham_b_max
);
1292 static void make_nbf_tables(FILE *fp
,
1293 t_forcerec
*fr
, real rtab
,
1294 const char *tabfn
, char *eg1
, char *eg2
,
1300 if (tabfn
== nullptr)
1304 fprintf(debug
, "No table file name passed, can not read table, can not do non-bonded interactions\n");
1309 sprintf(buf
, "%s", tabfn
);
1312 /* Append the two energy group names */
1313 sprintf(buf
+ strlen(tabfn
) - strlen(ftp2ext(efXVG
)) - 1, "_%s_%s.%s",
1314 eg1
, eg2
, ftp2ext(efXVG
));
1316 nbl
->table_elec_vdw
= make_tables(fp
, fr
, buf
, rtab
, 0);
1317 /* Copy the contents of the table to separate coulomb and LJ tables too,
1318 * to improve cache performance.
1320 /* For performance reasons we want
1321 * the table data to be aligned to 16-byte. The pointers could be freed
1322 * but currently aren't.
1324 snew(nbl
->table_elec
, 1);
1325 nbl
->table_elec
->interaction
= GMX_TABLE_INTERACTION_ELEC
;
1326 nbl
->table_elec
->format
= nbl
->table_elec_vdw
->format
;
1327 nbl
->table_elec
->r
= nbl
->table_elec_vdw
->r
;
1328 nbl
->table_elec
->n
= nbl
->table_elec_vdw
->n
;
1329 nbl
->table_elec
->scale
= nbl
->table_elec_vdw
->scale
;
1330 nbl
->table_elec
->formatsize
= nbl
->table_elec_vdw
->formatsize
;
1331 nbl
->table_elec
->ninteractions
= 1;
1332 nbl
->table_elec
->stride
= nbl
->table_elec
->formatsize
* nbl
->table_elec
->ninteractions
;
1333 snew_aligned(nbl
->table_elec
->data
, nbl
->table_elec
->stride
*(nbl
->table_elec
->n
+1), 32);
1335 snew(nbl
->table_vdw
, 1);
1336 nbl
->table_vdw
->interaction
= GMX_TABLE_INTERACTION_VDWREP_VDWDISP
;
1337 nbl
->table_vdw
->format
= nbl
->table_elec_vdw
->format
;
1338 nbl
->table_vdw
->r
= nbl
->table_elec_vdw
->r
;
1339 nbl
->table_vdw
->n
= nbl
->table_elec_vdw
->n
;
1340 nbl
->table_vdw
->scale
= nbl
->table_elec_vdw
->scale
;
1341 nbl
->table_vdw
->formatsize
= nbl
->table_elec_vdw
->formatsize
;
1342 nbl
->table_vdw
->ninteractions
= 2;
1343 nbl
->table_vdw
->stride
= nbl
->table_vdw
->formatsize
* nbl
->table_vdw
->ninteractions
;
1344 snew_aligned(nbl
->table_vdw
->data
, nbl
->table_vdw
->stride
*(nbl
->table_vdw
->n
+1), 32);
1346 for (i
= 0; i
<= nbl
->table_elec_vdw
->n
; i
++)
1348 for (j
= 0; j
< 4; j
++)
1350 nbl
->table_elec
->data
[4*i
+j
] = nbl
->table_elec_vdw
->data
[12*i
+j
];
1352 for (j
= 0; j
< 8; j
++)
1354 nbl
->table_vdw
->data
[8*i
+j
] = nbl
->table_elec_vdw
->data
[12*i
+4+j
];
1359 /*!\brief If there's bonded interactions of type \c ftype1 or \c
1360 * ftype2 present in the topology, build an array of the number of
1361 * interactions present for each bonded interaction index found in the
1364 * \c ftype1 or \c ftype2 may be set to -1 to disable seeking for a
1365 * valid type with that parameter.
1367 * \c count will be reallocated as necessary to fit the largest bonded
1368 * interaction index found, and its current size will be returned in
1369 * \c ncount. It will contain zero for every bonded interaction index
1370 * for which no interactions are present in the topology.
1372 static void count_tables(int ftype1
, int ftype2
, const gmx_mtop_t
*mtop
,
1373 int *ncount
, int **count
)
1375 const gmx_moltype_t
*molt
;
1377 int mt
, ftype
, stride
, i
, j
, tabnr
;
1379 // Loop over all moleculetypes
1380 for (mt
= 0; mt
< mtop
->nmoltype
; mt
++)
1382 molt
= &mtop
->moltype
[mt
];
1383 // Loop over all interaction types
1384 for (ftype
= 0; ftype
< F_NRE
; ftype
++)
1386 // If the current interaction type is one of the types whose tables we're trying to count...
1387 if (ftype
== ftype1
|| ftype
== ftype2
)
1389 il
= &molt
->ilist
[ftype
];
1390 stride
= 1 + NRAL(ftype
);
1391 // ... and there are actually some interactions for this type
1392 for (i
= 0; i
< il
->nr
; i
+= stride
)
1394 // Find out which table index the user wanted
1395 tabnr
= mtop
->ffparams
.iparams
[il
->iatoms
[i
]].tab
.table
;
1398 gmx_fatal(FARGS
, "A bonded table number is smaller than 0: %d\n", tabnr
);
1400 // Make room for this index in the data structure
1401 if (tabnr
>= *ncount
)
1403 srenew(*count
, tabnr
+1);
1404 for (j
= *ncount
; j
< tabnr
+1; j
++)
1410 // Record that this table index is used and must have a valid file
1418 /*!\brief If there's bonded interactions of flavour \c tabext and type
1419 * \c ftype1 or \c ftype2 present in the topology, seek them in the
1420 * list of filenames passed to mdrun, and make bonded tables from
1423 * \c ftype1 or \c ftype2 may be set to -1 to disable seeking for a
1424 * valid type with that parameter.
1426 * A fatal error occurs if no matching filename is found.
1428 static bondedtable_t
*make_bonded_tables(FILE *fplog
,
1429 int ftype1
, int ftype2
,
1430 const gmx_mtop_t
*mtop
,
1431 const t_filenm
*tabbfnm
,
1441 count_tables(ftype1
, ftype2
, mtop
, &ncount
, &count
);
1443 // Are there any relevant tabulated bond interactions?
1447 for (int i
= 0; i
< ncount
; i
++)
1449 // Do any interactions exist that requires this table?
1452 // This pattern enforces the current requirement that
1453 // table filenames end in a characteristic sequence
1454 // before the file type extension, and avoids table 13
1455 // being recognized and used for table 1.
1456 std::string patternToFind
= gmx::formatString("_%s%d.%s", tabext
, i
, ftp2ext(efXVG
));
1457 bool madeTable
= false;
1458 for (int j
= 0; j
< tabbfnm
->nfiles
&& !madeTable
; ++j
)
1460 std::string
filename(tabbfnm
->fns
[j
]);
1461 if (gmx::endsWith(filename
, patternToFind
))
1463 // Finally read the table from the file found
1464 tab
[i
] = make_bonded_table(fplog
, tabbfnm
->fns
[j
], NRAL(ftype1
)-2);
1470 bool isPlural
= (ftype2
!= -1);
1471 gmx_fatal(FARGS
, "Tabulated interaction of type '%s%s%s' with index %d cannot be used because no table file whose name matched '%s' was passed via the gmx mdrun -tableb command-line option.",
1472 interaction_function
[ftype1
].longname
,
1473 isPlural
? "' or '" : "",
1474 isPlural
? interaction_function
[ftype2
].longname
: "",
1476 patternToFind
.c_str());
1486 void forcerec_set_ranges(t_forcerec
*fr
,
1487 int ncg_home
, int ncg_force
,
1489 int natoms_force_constr
, int natoms_f_novirsum
)
1494 /* fr->ncg_force is unused in the standard code,
1495 * but it can be useful for modified code dealing with charge groups.
1497 fr
->ncg_force
= ncg_force
;
1498 fr
->natoms_force
= natoms_force
;
1499 fr
->natoms_force_constr
= natoms_force_constr
;
1501 if (fr
->natoms_force_constr
> fr
->nalloc_force
)
1503 fr
->nalloc_force
= over_alloc_dd(fr
->natoms_force_constr
);
1506 if (fr
->bF_NoVirSum
)
1508 /* TODO: remove this + 1 when padding is properly implemented */
1509 fr
->forceBufferNoVirialSummation
->resize(natoms_f_novirsum
+ 1);
1513 static real
cutoff_inf(real cutoff
)
1517 cutoff
= GMX_CUTOFF_INF
;
1523 gmx_bool
can_use_allvsall(const t_inputrec
*ir
, gmx_bool bPrintNote
, t_commrec
*cr
, FILE *fp
)
1530 ir
->rcoulomb
== 0 &&
1532 ir
->ePBC
== epbcNONE
&&
1533 ir
->vdwtype
== evdwCUT
&&
1534 ir
->coulombtype
== eelCUT
&&
1535 ir
->efep
== efepNO
&&
1536 (ir
->implicit_solvent
== eisNO
||
1537 (ir
->implicit_solvent
== eisGBSA
&& (ir
->gb_algorithm
== egbSTILL
||
1538 ir
->gb_algorithm
== egbHCT
||
1539 ir
->gb_algorithm
== egbOBC
))) &&
1540 getenv("GMX_NO_ALLVSALL") == nullptr
1543 if (bAllvsAll
&& ir
->opts
.ngener
> 1)
1545 const char *note
= "NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1551 fprintf(fp
, "\n%s\n", note
);
1557 if (bAllvsAll
&& fp
&& MASTER(cr
))
1559 fprintf(fp
, "\nUsing SIMD all-vs-all kernels.\n\n");
1566 gmx_bool
nbnxn_simd_supported(const gmx::MDLogger
&mdlog
,
1567 const t_inputrec
*ir
)
1569 if (ir
->vdwtype
== evdwPME
&& ir
->ljpme_combination_rule
== eljpmeLB
)
1571 /* LJ PME with LB combination rule does 7 mesh operations.
1572 * This so slow that we don't compile SIMD non-bonded kernels
1574 GMX_LOG(mdlog
.warning
).asParagraph().appendText("LJ-PME with Lorentz-Berthelot is not supported with SIMD kernels, falling back to plain C kernels");
1582 static void pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused
*ir
,
1586 *kernel_type
= nbnxnk4x4_PlainC
;
1587 *ewald_excl
= ewaldexclTable
;
1591 #ifdef GMX_NBNXN_SIMD_4XN
1592 *kernel_type
= nbnxnk4xN_SIMD_4xN
;
1594 #ifdef GMX_NBNXN_SIMD_2XNN
1595 *kernel_type
= nbnxnk4xN_SIMD_2xNN
;
1598 #if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
1599 /* We need to choose if we want 2x(N+N) or 4xN kernels.
1600 * Currently this is based on the SIMD acceleration choice,
1601 * but it might be better to decide this at runtime based on CPU.
1603 * 4xN calculates more (zero) interactions, but has less pair-search
1604 * work and much better kernel instruction scheduling.
1606 * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
1607 * which doesn't have FMA, both the analytical and tabulated Ewald
1608 * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
1609 * 2x(4+4) because it results in significantly fewer pairs.
1610 * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1611 * 10% with HT, 50% without HT. As we currently don't detect the actual
1612 * use of HT, use 4x8 to avoid a potential performance hit.
1613 * On Intel Haswell 4x8 is always faster.
1615 *kernel_type
= nbnxnk4xN_SIMD_4xN
;
1617 #if !GMX_SIMD_HAVE_FMA
1618 if (EEL_PME_EWALD(ir
->coulombtype
) ||
1619 EVDW_PME(ir
->vdwtype
))
1621 /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
1622 * There are enough instructions to make 2x(4+4) efficient.
1624 *kernel_type
= nbnxnk4xN_SIMD_2xNN
;
1627 #endif /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
1630 if (getenv("GMX_NBNXN_SIMD_4XN") != nullptr)
1632 #ifdef GMX_NBNXN_SIMD_4XN
1633 *kernel_type
= nbnxnk4xN_SIMD_4xN
;
1635 gmx_fatal(FARGS
, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
1638 if (getenv("GMX_NBNXN_SIMD_2XNN") != nullptr)
1640 #ifdef GMX_NBNXN_SIMD_2XNN
1641 *kernel_type
= nbnxnk4xN_SIMD_2xNN
;
1643 gmx_fatal(FARGS
, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
1647 /* Analytical Ewald exclusion correction is only an option in
1649 * Since table lookup's don't parallelize with SIMD, analytical
1650 * will probably always be faster for a SIMD width of 8 or more.
1651 * With FMA analytical is sometimes faster for a width if 4 as well.
1652 * On BlueGene/Q, this is faster regardless of precision.
1653 * In single precision, this is faster on Bulldozer.
1654 * On Skylake table is faster in single and double. TODO: Test 5xxx series.
1656 #if ((GMX_SIMD_REAL_WIDTH >= 8 || (GMX_SIMD_REAL_WIDTH >= 4 && GMX_SIMD_HAVE_FMA && !GMX_DOUBLE)) \
1657 && !GMX_SIMD_X86_AVX_512) || GMX_SIMD_IBM_QPX
1658 *ewald_excl
= ewaldexclAnalytical
;
1660 if (getenv("GMX_NBNXN_EWALD_TABLE") != nullptr)
1662 *ewald_excl
= ewaldexclTable
;
1664 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != nullptr)
1666 *ewald_excl
= ewaldexclAnalytical
;
1674 const char *lookup_nbnxn_kernel_name(int kernel_type
)
1676 const char *returnvalue
= nullptr;
1677 switch (kernel_type
)
1680 returnvalue
= "not set";
1682 case nbnxnk4x4_PlainC
:
1683 returnvalue
= "plain C";
1685 case nbnxnk4xN_SIMD_4xN
:
1686 case nbnxnk4xN_SIMD_2xNN
:
1688 returnvalue
= "SIMD";
1690 returnvalue
= "not available";
1693 case nbnxnk8x8x8_GPU
: returnvalue
= "GPU"; break;
1694 case nbnxnk8x8x8_PlainC
: returnvalue
= "plain C"; break;
1698 gmx_fatal(FARGS
, "Illegal kernel type selected");
1699 returnvalue
= nullptr;
1705 static void pick_nbnxn_kernel(FILE *fp
,
1706 const gmx::MDLogger
&mdlog
,
1707 gmx_bool use_simd_kernels
,
1710 const t_inputrec
*ir
,
1713 gmx_bool bDoNonbonded
)
1715 assert(kernel_type
);
1717 *kernel_type
= nbnxnkNotSet
;
1718 *ewald_excl
= ewaldexclTable
;
1722 *kernel_type
= nbnxnk8x8x8_PlainC
;
1726 GMX_LOG(mdlog
.warning
).asParagraph().appendText("Emulating a GPU run on the CPU (slow)");
1731 *kernel_type
= nbnxnk8x8x8_GPU
;
1734 if (*kernel_type
== nbnxnkNotSet
)
1736 if (use_simd_kernels
&&
1737 nbnxn_simd_supported(mdlog
, ir
))
1739 pick_nbnxn_kernel_cpu(ir
, kernel_type
, ewald_excl
);
1743 *kernel_type
= nbnxnk4x4_PlainC
;
1747 if (bDoNonbonded
&& fp
!= nullptr)
1749 fprintf(fp
, "\nUsing %s %dx%d non-bonded kernels\n\n",
1750 lookup_nbnxn_kernel_name(*kernel_type
),
1751 nbnxn_kernel_to_cluster_i_size(*kernel_type
),
1752 nbnxn_kernel_to_cluster_j_size(*kernel_type
));
1754 if (nbnxnk4x4_PlainC
== *kernel_type
||
1755 nbnxnk8x8x8_PlainC
== *kernel_type
)
1757 GMX_LOG(mdlog
.warning
).asParagraph().appendTextFormatted(
1758 "WARNING: Using the slow %s kernels. This should\n"
1759 "not happen during routine usage on supported platforms.",
1760 lookup_nbnxn_kernel_name(*kernel_type
));
1765 gmx_bool
uses_simple_tables(int cutoff_scheme
,
1766 nonbonded_verlet_t
*nbv
,
1769 gmx_bool bUsesSimpleTables
= TRUE
;
1772 switch (cutoff_scheme
)
1775 bUsesSimpleTables
= TRUE
;
1778 assert(NULL
!= nbv
&& NULL
!= nbv
->grp
);
1779 grp_index
= (group
< 0) ? 0 : (nbv
->ngrp
- 1);
1780 bUsesSimpleTables
= nbnxn_kernel_pairlist_simple(nbv
->grp
[grp_index
].kernel_type
);
1783 gmx_incons("unimplemented");
1785 return bUsesSimpleTables
;
1788 static void init_ewald_f_table(interaction_const_t
*ic
,
1793 /* Get the Ewald table spacing based on Coulomb and/or LJ
1794 * Ewald coefficients and rtol.
1796 ic
->tabq_scale
= ewald_spline3_table_scale(ic
);
1798 if (ic
->cutoff_scheme
== ecutsVERLET
)
1800 maxr
= ic
->rcoulomb
;
1804 maxr
= std::max(ic
->rcoulomb
, rtab
);
1806 ic
->tabq_size
= static_cast<int>(maxr
*ic
->tabq_scale
) + 2;
1808 sfree_aligned(ic
->tabq_coul_FDV0
);
1809 sfree_aligned(ic
->tabq_coul_F
);
1810 sfree_aligned(ic
->tabq_coul_V
);
1812 sfree_aligned(ic
->tabq_vdw_FDV0
);
1813 sfree_aligned(ic
->tabq_vdw_F
);
1814 sfree_aligned(ic
->tabq_vdw_V
);
1816 if (EEL_PME_EWALD(ic
->eeltype
))
1818 /* Create the original table data in FDV0 */
1819 snew_aligned(ic
->tabq_coul_FDV0
, ic
->tabq_size
*4, 32);
1820 snew_aligned(ic
->tabq_coul_F
, ic
->tabq_size
, 32);
1821 snew_aligned(ic
->tabq_coul_V
, ic
->tabq_size
, 32);
1822 table_spline3_fill_ewald_lr(ic
->tabq_coul_F
, ic
->tabq_coul_V
, ic
->tabq_coul_FDV0
,
1823 ic
->tabq_size
, 1/ic
->tabq_scale
, ic
->ewaldcoeff_q
, v_q_ewald_lr
);
1826 if (EVDW_PME(ic
->vdwtype
))
1828 snew_aligned(ic
->tabq_vdw_FDV0
, ic
->tabq_size
*4, 32);
1829 snew_aligned(ic
->tabq_vdw_F
, ic
->tabq_size
, 32);
1830 snew_aligned(ic
->tabq_vdw_V
, ic
->tabq_size
, 32);
1831 table_spline3_fill_ewald_lr(ic
->tabq_vdw_F
, ic
->tabq_vdw_V
, ic
->tabq_vdw_FDV0
,
1832 ic
->tabq_size
, 1/ic
->tabq_scale
, ic
->ewaldcoeff_lj
, v_lj_ewald_lr
);
1836 void init_interaction_const_tables(FILE *fp
,
1837 interaction_const_t
*ic
,
1840 if (EEL_PME_EWALD(ic
->eeltype
) || EVDW_PME(ic
->vdwtype
))
1842 init_ewald_f_table(ic
, rtab
);
1846 fprintf(fp
, "Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1847 1/ic
->tabq_scale
, ic
->tabq_size
);
1852 static void clear_force_switch_constants(shift_consts_t
*sc
)
1859 static void force_switch_constants(real p
,
1863 /* Here we determine the coefficient for shifting the force to zero
1864 * between distance rsw and the cut-off rc.
1865 * For a potential of r^-p, we have force p*r^-(p+1).
1866 * But to save flops we absorb p in the coefficient.
1868 * force/p = r^-(p+1) + c2*r^2 + c3*r^3
1869 * potential = r^-p + c2/3*r^3 + c3/4*r^4 + cpot
1871 sc
->c2
= ((p
+ 1)*rsw
- (p
+ 4)*rc
)/(pow(rc
, p
+ 2)*gmx::square(rc
- rsw
));
1872 sc
->c3
= -((p
+ 1)*rsw
- (p
+ 3)*rc
)/(pow(rc
, p
+ 2)*gmx::power3(rc
- rsw
));
1873 sc
->cpot
= -pow(rc
, -p
) + p
*sc
->c2
/3*gmx::power3(rc
- rsw
) + p
*sc
->c3
/4*gmx::power4(rc
- rsw
);
1876 static void potential_switch_constants(real rsw
, real rc
,
1877 switch_consts_t
*sc
)
1879 /* The switch function is 1 at rsw and 0 at rc.
1880 * The derivative and second derivate are zero at both ends.
1881 * rsw = max(r - r_switch, 0)
1882 * sw = 1 + c3*rsw^3 + c4*rsw^4 + c5*rsw^5
1883 * dsw = 3*c3*rsw^2 + 4*c4*rsw^3 + 5*c5*rsw^4
1884 * force = force*dsw - potential*sw
1887 sc
->c3
= -10/gmx::power3(rc
- rsw
);
1888 sc
->c4
= 15/gmx::power4(rc
- rsw
);
1889 sc
->c5
= -6/gmx::power5(rc
- rsw
);
1892 /*! \brief Construct interaction constants
1894 * This data is used (particularly) by search and force code for
1895 * short-range interactions. Many of these are constant for the whole
1896 * simulation; some are constant only after PME tuning completes.
1899 init_interaction_const(FILE *fp
,
1900 interaction_const_t
**interaction_const
,
1901 const t_forcerec
*fr
)
1903 interaction_const_t
*ic
;
1907 ic
->cutoff_scheme
= fr
->cutoff_scheme
;
1909 /* Just allocate something so we can free it */
1910 snew_aligned(ic
->tabq_coul_FDV0
, 16, 32);
1911 snew_aligned(ic
->tabq_coul_F
, 16, 32);
1912 snew_aligned(ic
->tabq_coul_V
, 16, 32);
1914 ic
->rlist
= fr
->rlist
;
1917 ic
->vdwtype
= fr
->vdwtype
;
1918 ic
->vdw_modifier
= fr
->vdw_modifier
;
1919 ic
->rvdw
= fr
->rvdw
;
1920 ic
->rvdw_switch
= fr
->rvdw_switch
;
1921 ic
->ewaldcoeff_lj
= fr
->ewaldcoeff_lj
;
1922 ic
->ljpme_comb_rule
= fr
->ljpme_combination_rule
;
1923 ic
->sh_lj_ewald
= 0;
1924 clear_force_switch_constants(&ic
->dispersion_shift
);
1925 clear_force_switch_constants(&ic
->repulsion_shift
);
1927 switch (ic
->vdw_modifier
)
1929 case eintmodPOTSHIFT
:
1930 /* Only shift the potential, don't touch the force */
1931 ic
->dispersion_shift
.cpot
= -1.0/gmx::power6(ic
->rvdw
);
1932 ic
->repulsion_shift
.cpot
= -1.0/gmx::power12(ic
->rvdw
);
1933 if (EVDW_PME(ic
->vdwtype
))
1937 crc2
= gmx::square(ic
->ewaldcoeff_lj
*ic
->rvdw
);
1938 ic
->sh_lj_ewald
= (std::exp(-crc2
)*(1 + crc2
+ 0.5*crc2
*crc2
) - 1)/gmx::power6(ic
->rvdw
);
1941 case eintmodFORCESWITCH
:
1942 /* Switch the force, switch and shift the potential */
1943 force_switch_constants(6.0, ic
->rvdw_switch
, ic
->rvdw
,
1944 &ic
->dispersion_shift
);
1945 force_switch_constants(12.0, ic
->rvdw_switch
, ic
->rvdw
,
1946 &ic
->repulsion_shift
);
1948 case eintmodPOTSWITCH
:
1949 /* Switch the potential and force */
1950 potential_switch_constants(ic
->rvdw_switch
, ic
->rvdw
,
1954 case eintmodEXACTCUTOFF
:
1955 /* Nothing to do here */
1958 gmx_incons("unimplemented potential modifier");
1961 ic
->sh_invrc6
= -ic
->dispersion_shift
.cpot
;
1963 /* Electrostatics */
1964 ic
->eeltype
= fr
->eeltype
;
1965 ic
->coulomb_modifier
= fr
->coulomb_modifier
;
1966 ic
->rcoulomb
= fr
->rcoulomb
;
1967 ic
->epsilon_r
= fr
->epsilon_r
;
1968 ic
->epsfac
= fr
->epsfac
;
1969 ic
->ewaldcoeff_q
= fr
->ewaldcoeff_q
;
1971 if (fr
->coulomb_modifier
== eintmodPOTSHIFT
)
1973 ic
->sh_ewald
= std::erfc(ic
->ewaldcoeff_q
*ic
->rcoulomb
);
1980 /* Reaction-field */
1981 if (EEL_RF(ic
->eeltype
))
1983 ic
->epsilon_rf
= fr
->epsilon_rf
;
1984 ic
->k_rf
= fr
->k_rf
;
1985 ic
->c_rf
= fr
->c_rf
;
1989 /* For plain cut-off we might use the reaction-field kernels */
1990 ic
->epsilon_rf
= ic
->epsilon_r
;
1992 if (fr
->coulomb_modifier
== eintmodPOTSHIFT
)
1994 ic
->c_rf
= 1/ic
->rcoulomb
;
2004 real dispersion_shift
;
2006 dispersion_shift
= ic
->dispersion_shift
.cpot
;
2007 if (EVDW_PME(ic
->vdwtype
))
2009 dispersion_shift
-= ic
->sh_lj_ewald
;
2011 fprintf(fp
, "Potential shift: LJ r^-12: %.3e r^-6: %.3e",
2012 ic
->repulsion_shift
.cpot
, dispersion_shift
);
2014 if (ic
->eeltype
== eelCUT
)
2016 fprintf(fp
, ", Coulomb %.e", -ic
->c_rf
);
2018 else if (EEL_PME(ic
->eeltype
))
2020 fprintf(fp
, ", Ewald %.3e", -ic
->sh_ewald
);
2025 *interaction_const
= ic
;
2028 /* TODO deviceInfo should be logically const, but currently
2029 * init_gpu modifies it to set up NVML support. This could
2030 * happen during the detection phase, and deviceInfo could
2031 * the become const. */
2032 static void init_nb_verlet(FILE *fp
,
2033 const gmx::MDLogger
&mdlog
,
2034 nonbonded_verlet_t
**nb_verlet
,
2035 gmx_bool bFEP_NonBonded
,
2036 const t_inputrec
*ir
,
2037 const t_forcerec
*fr
,
2038 const t_commrec
*cr
,
2039 const char *nbpu_opt
,
2040 gmx_device_info_t
*deviceInfo
)
2042 nonbonded_verlet_t
*nbv
;
2045 gmx_bool bHybridGPURun
= FALSE
;
2047 nbnxn_alloc_t
*nb_alloc
;
2048 nbnxn_free_t
*nb_free
;
2052 nbv
->emulateGpu
= (getenv("GMX_EMULATE_GPU") != nullptr);
2053 nbv
->bUseGPU
= deviceInfo
!= nullptr;
2055 GMX_RELEASE_ASSERT(!(nbv
->emulateGpu
&& nbv
->bUseGPU
), "When GPU emulation is active, there cannot be a GPU assignment");
2059 /* Use the assigned GPU. */
2060 init_gpu(mdlog
, cr
->nodeid
, deviceInfo
);
2064 nbv
->min_ci_balanced
= 0;
2066 nbv
->ngrp
= (DOMAINDECOMP(cr
) ? 2 : 1);
2067 for (i
= 0; i
< nbv
->ngrp
; i
++)
2069 nbv
->grp
[i
].nbl_lists
.nnbl
= 0;
2070 nbv
->grp
[i
].nbat
= nullptr;
2071 nbv
->grp
[i
].kernel_type
= nbnxnkNotSet
;
2073 if (i
== 0) /* local */
2075 pick_nbnxn_kernel(fp
, mdlog
, fr
->use_simd_kernels
,
2076 nbv
->bUseGPU
, nbv
->emulateGpu
, ir
,
2077 &nbv
->grp
[i
].kernel_type
,
2078 &nbv
->grp
[i
].ewald_excl
,
2081 else /* non-local */
2083 if (nbpu_opt
!= nullptr && strcmp(nbpu_opt
, "gpu_cpu") == 0)
2085 /* Use GPU for local, select a CPU kernel for non-local */
2086 pick_nbnxn_kernel(fp
, mdlog
, fr
->use_simd_kernels
,
2088 &nbv
->grp
[i
].kernel_type
,
2089 &nbv
->grp
[i
].ewald_excl
,
2092 bHybridGPURun
= TRUE
;
2096 /* Use the same kernel for local and non-local interactions */
2097 nbv
->grp
[i
].kernel_type
= nbv
->grp
[0].kernel_type
;
2098 nbv
->grp
[i
].ewald_excl
= nbv
->grp
[0].ewald_excl
;
2103 nbnxn_init_search(&nbv
->nbs
,
2104 DOMAINDECOMP(cr
) ? &cr
->dd
->nc
: nullptr,
2105 DOMAINDECOMP(cr
) ? domdec_zones(cr
->dd
) : nullptr,
2107 gmx_omp_nthreads_get(emntPairsearch
));
2109 for (i
= 0; i
< nbv
->ngrp
; i
++)
2111 gpu_set_host_malloc_and_free(nbv
->grp
[0].kernel_type
== nbnxnk8x8x8_GPU
,
2112 &nb_alloc
, &nb_free
);
2114 nbnxn_init_pairlist_set(&nbv
->grp
[i
].nbl_lists
,
2115 nbnxn_kernel_pairlist_simple(nbv
->grp
[i
].kernel_type
),
2116 /* 8x8x8 "non-simple" lists are ATM always combined */
2117 !nbnxn_kernel_pairlist_simple(nbv
->grp
[i
].kernel_type
),
2121 nbv
->grp
[0].kernel_type
!= nbv
->grp
[i
].kernel_type
)
2123 gmx_bool bSimpleList
;
2124 int enbnxninitcombrule
;
2126 bSimpleList
= nbnxn_kernel_pairlist_simple(nbv
->grp
[i
].kernel_type
);
2128 if (fr
->vdwtype
== evdwCUT
&&
2129 (fr
->vdw_modifier
== eintmodNONE
||
2130 fr
->vdw_modifier
== eintmodPOTSHIFT
) &&
2131 getenv("GMX_NO_LJ_COMB_RULE") == nullptr)
2133 /* Plain LJ cut-off: we can optimize with combination rules */
2134 enbnxninitcombrule
= enbnxninitcombruleDETECT
;
2136 else if (fr
->vdwtype
== evdwPME
)
2138 /* LJ-PME: we need to use a combination rule for the grid */
2139 if (fr
->ljpme_combination_rule
== eljpmeGEOM
)
2141 enbnxninitcombrule
= enbnxninitcombruleGEOM
;
2145 enbnxninitcombrule
= enbnxninitcombruleLB
;
2150 /* We use a full combination matrix: no rule required */
2151 enbnxninitcombrule
= enbnxninitcombruleNONE
;
2155 snew(nbv
->grp
[i
].nbat
, 1);
2156 nbnxn_atomdata_init(fp
,
2158 nbv
->grp
[i
].kernel_type
,
2160 fr
->ntype
, fr
->nbfp
,
2162 bSimpleList
? gmx_omp_nthreads_get(emntNonbonded
) : 1,
2167 nbv
->grp
[i
].nbat
= nbv
->grp
[0].nbat
;
2173 /* init the NxN GPU data; the last argument tells whether we'll have
2174 * both local and non-local NB calculation on GPU */
2175 nbnxn_gpu_init(&nbv
->gpu_nbv
,
2180 (nbv
->ngrp
> 1) && !bHybridGPURun
);
2182 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
2183 * also sharing texture references. To keep the code simple, we don't
2184 * treat texture references as shared resources, but this means that
2185 * the coulomb_tab and nbfp texture refs will get updated by multiple threads.
2186 * Hence, to ensure that the non-bonded kernels don't start before all
2187 * texture binding operations are finished, we need to wait for all ranks
2188 * to arrive here before continuing.
2190 * Note that we could omit this barrier if GPUs are not shared (or
2191 * texture objects are used), but as this is initialization code, there
2192 * is no point in complicating things.
2199 #endif /* GMX_THREAD_MPI */
2201 if ((env
= getenv("GMX_NB_MIN_CI")) != nullptr)
2205 nbv
->min_ci_balanced
= strtol(env
, &end
, 10);
2206 if (!end
|| (*end
!= 0) || nbv
->min_ci_balanced
< 0)
2208 gmx_fatal(FARGS
, "Invalid value passed in GMX_NB_MIN_CI=%s, non-negative integer required", env
);
2213 fprintf(debug
, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
2214 nbv
->min_ci_balanced
);
2219 nbv
->min_ci_balanced
= nbnxn_gpu_min_ci_balanced(nbv
->gpu_nbv
);
2222 fprintf(debug
, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
2223 nbv
->min_ci_balanced
);
2232 gmx_bool
usingGpu(nonbonded_verlet_t
*nbv
)
2234 return nbv
!= nullptr && nbv
->bUseGPU
;
2237 void init_forcerec(FILE *fp
,
2238 const gmx::MDLogger
&mdlog
,
2241 const t_inputrec
*ir
,
2242 const gmx_mtop_t
*mtop
,
2243 const t_commrec
*cr
,
2247 const t_filenm
*tabbfnm
,
2248 const char *nbpu_opt
,
2249 gmx_device_info_t
*deviceInfo
,
2250 gmx_bool bNoSolvOpt
,
2253 int i
, m
, negp_pp
, negptable
, egi
, egj
;
2258 gmx_bool bGenericKernelOnly
;
2259 gmx_bool needGroupSchemeTables
, bSomeNormalNbListsAreInUse
;
2260 gmx_bool bFEP_NonBonded
;
2261 int *nm_ind
, egp_flags
;
2263 /* By default we turn SIMD kernels on, but it might be turned off further down... */
2264 fr
->use_simd_kernels
= TRUE
;
2266 fr
->bDomDec
= DOMAINDECOMP(cr
);
2268 if (check_box(ir
->ePBC
, box
))
2270 gmx_fatal(FARGS
, check_box(ir
->ePBC
, box
));
2273 /* Test particle insertion ? */
2276 /* Set to the size of the molecule to be inserted (the last one) */
2277 /* Because of old style topologies, we have to use the last cg
2278 * instead of the last molecule type.
2280 cgs
= &mtop
->moltype
[mtop
->molblock
[mtop
->nmolblock
-1].type
].cgs
;
2281 fr
->n_tpi
= cgs
->index
[cgs
->nr
] - cgs
->index
[cgs
->nr
-1];
2282 if (fr
->n_tpi
!= mtop
->mols
.index
[mtop
->mols
.nr
] - mtop
->mols
.index
[mtop
->mols
.nr
-1])
2284 gmx_fatal(FARGS
, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2292 if (ir
->coulombtype
== eelRF_NEC_UNSUPPORTED
)
2294 gmx_fatal(FARGS
, "%s electrostatics is no longer supported",
2295 eel_names
[ir
->coulombtype
]);
2300 gmx_fatal(FARGS
, "AdResS simulations are no longer supported");
2302 if (ir
->useTwinRange
)
2304 gmx_fatal(FARGS
, "Twin-range simulations are no longer supported");
2306 /* Copy the user determined parameters */
2307 fr
->userint1
= ir
->userint1
;
2308 fr
->userint2
= ir
->userint2
;
2309 fr
->userint3
= ir
->userint3
;
2310 fr
->userint4
= ir
->userint4
;
2311 fr
->userreal1
= ir
->userreal1
;
2312 fr
->userreal2
= ir
->userreal2
;
2313 fr
->userreal3
= ir
->userreal3
;
2314 fr
->userreal4
= ir
->userreal4
;
2317 fr
->fc_stepsize
= ir
->fc_stepsize
;
2320 fr
->efep
= ir
->efep
;
2321 fr
->sc_alphavdw
= ir
->fepvals
->sc_alpha
;
2322 if (ir
->fepvals
->bScCoul
)
2324 fr
->sc_alphacoul
= ir
->fepvals
->sc_alpha
;
2325 fr
->sc_sigma6_min
= gmx::power6(ir
->fepvals
->sc_sigma_min
);
2329 fr
->sc_alphacoul
= 0;
2330 fr
->sc_sigma6_min
= 0; /* only needed when bScCoul is on */
2332 fr
->sc_power
= ir
->fepvals
->sc_power
;
2333 fr
->sc_r_power
= ir
->fepvals
->sc_r_power
;
2334 fr
->sc_sigma6_def
= gmx::power6(ir
->fepvals
->sc_sigma
);
2336 env
= getenv("GMX_SCSIGMA_MIN");
2340 sscanf(env
, "%20lf", &dbl
);
2341 fr
->sc_sigma6_min
= gmx::power6(dbl
);
2344 fprintf(fp
, "Setting the minimum soft core sigma to %g nm\n", dbl
);
2348 fr
->bNonbonded
= TRUE
;
2349 if (getenv("GMX_NO_NONBONDED") != nullptr)
2351 /* turn off non-bonded calculations */
2352 fr
->bNonbonded
= FALSE
;
2353 GMX_LOG(mdlog
.warning
).asParagraph().appendText(
2354 "Found environment variable GMX_NO_NONBONDED.\n"
2355 "Disabling nonbonded calculations.");
2358 bGenericKernelOnly
= FALSE
;
2360 /* We now check in the NS code whether a particular combination of interactions
2361 * can be used with water optimization, and disable it if that is not the case.
2364 if (getenv("GMX_NB_GENERIC") != nullptr)
2369 "Found environment variable GMX_NB_GENERIC.\n"
2370 "Disabling all interaction-specific nonbonded kernels, will only\n"
2371 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2373 bGenericKernelOnly
= TRUE
;
2376 if (bGenericKernelOnly
== TRUE
)
2381 if ( (getenv("GMX_DISABLE_SIMD_KERNELS") != nullptr) || (getenv("GMX_NOOPTIMIZEDKERNELS") != nullptr) )
2383 fr
->use_simd_kernels
= FALSE
;
2387 "\nFound environment variable GMX_DISABLE_SIMD_KERNELS.\n"
2388 "Disabling the usage of any SIMD-specific non-bonded & bonded kernel routines\n"
2389 "(e.g. SSE2/SSE4.1/AVX).\n\n");
2393 fr
->bBHAM
= (mtop
->ffparams
.functype
[0] == F_BHAM
);
2395 /* Check if we can/should do all-vs-all kernels */
2396 fr
->bAllvsAll
= can_use_allvsall(ir
, FALSE
, nullptr, nullptr);
2397 fr
->AllvsAll_work
= nullptr;
2398 fr
->AllvsAll_workgb
= nullptr;
2400 /* All-vs-all kernels have not been implemented in 4.6 and later.
2401 * See Redmine #1249. */
2404 fr
->bAllvsAll
= FALSE
;
2408 "\nYour simulation settings would have triggered the efficient all-vs-all\n"
2409 "kernels in GROMACS 4.5, but these have not been implemented in GROMACS\n"
2410 "4.6 and 5.x. If performance is important, please use GROMACS 4.5.7\n"
2411 "or try cutoff-scheme = Verlet.\n\n");
2415 /* Neighbour searching stuff */
2416 fr
->cutoff_scheme
= ir
->cutoff_scheme
;
2417 fr
->bGrid
= (ir
->ns_type
== ensGRID
);
2418 fr
->ePBC
= ir
->ePBC
;
2420 if (fr
->cutoff_scheme
== ecutsGROUP
)
2422 const char *note
= "NOTE: This file uses the deprecated 'group' cutoff_scheme. This will be\n"
2423 "removed in a future release when 'verlet' supports all interaction forms.\n";
2427 fprintf(stderr
, "\n%s\n", note
);
2431 fprintf(fp
, "\n%s\n", note
);
2435 /* Determine if we will do PBC for distances in bonded interactions */
2436 if (fr
->ePBC
== epbcNONE
)
2438 fr
->bMolPBC
= FALSE
;
2442 if (!DOMAINDECOMP(cr
))
2446 bSHAKE
= (ir
->eConstrAlg
== econtSHAKE
&&
2447 (gmx_mtop_ftype_count(mtop
, F_CONSTR
) > 0 ||
2448 gmx_mtop_ftype_count(mtop
, F_CONSTRNC
) > 0));
2450 /* The group cut-off scheme and SHAKE assume charge groups
2451 * are whole, but not using molpbc is faster in most cases.
2452 * With intermolecular interactions we need PBC for calculating
2453 * distances between atoms in different molecules.
2455 if ((fr
->cutoff_scheme
== ecutsGROUP
|| bSHAKE
) &&
2456 !mtop
->bIntermolecularInteractions
)
2458 fr
->bMolPBC
= ir
->bPeriodicMols
;
2460 if (bSHAKE
&& fr
->bMolPBC
)
2462 gmx_fatal(FARGS
, "SHAKE is not supported with periodic molecules");
2469 if (getenv("GMX_USE_GRAPH") != nullptr)
2471 fr
->bMolPBC
= FALSE
;
2474 GMX_LOG(mdlog
.warning
).asParagraph().appendText("GMX_USE_GRAPH is set, using the graph for bonded interactions");
2477 if (mtop
->bIntermolecularInteractions
)
2479 GMX_LOG(mdlog
.warning
).asParagraph().appendText("WARNING: Molecules linked by intermolecular interactions have to reside in the same periodic image, otherwise artifacts will occur!");
2483 if (bSHAKE
&& fr
->bMolPBC
)
2485 gmx_fatal(FARGS
, "SHAKE is not properly supported with intermolecular interactions. For short simulations where linked molecules remain in the same periodic image, the environment variable GMX_USE_GRAPH can be used to override this check.\n");
2491 fr
->bMolPBC
= dd_bonded_molpbc(cr
->dd
, fr
->ePBC
);
2494 fr
->bGB
= (ir
->implicit_solvent
== eisGBSA
);
2496 fr
->rc_scaling
= ir
->refcoord_scaling
;
2497 copy_rvec(ir
->posres_com
, fr
->posres_com
);
2498 copy_rvec(ir
->posres_comB
, fr
->posres_comB
);
2499 fr
->rlist
= cutoff_inf(ir
->rlist
);
2500 fr
->eeltype
= ir
->coulombtype
;
2501 fr
->vdwtype
= ir
->vdwtype
;
2502 fr
->ljpme_combination_rule
= ir
->ljpme_combination_rule
;
2504 fr
->coulomb_modifier
= ir
->coulomb_modifier
;
2505 fr
->vdw_modifier
= ir
->vdw_modifier
;
2507 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2508 switch (fr
->eeltype
)
2511 fr
->nbkernel_elec_interaction
= (fr
->bGB
) ? GMX_NBKERNEL_ELEC_GENERALIZEDBORN
: GMX_NBKERNEL_ELEC_COULOMB
;
2516 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_REACTIONFIELD
;
2520 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_REACTIONFIELD
;
2521 fr
->coulomb_modifier
= eintmodEXACTCUTOFF
;
2530 case eelPMEUSERSWITCH
:
2531 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_CUBICSPLINETABLE
;
2537 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_EWALD
;
2541 gmx_fatal(FARGS
, "Unsupported electrostatic interaction: %s", eel_names
[fr
->eeltype
]);
2545 /* Vdw: Translate from mdp settings to kernel format */
2546 switch (fr
->vdwtype
)
2551 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_BUCKINGHAM
;
2555 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_LENNARDJONES
;
2559 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_LJEWALD
;
2565 case evdwENCADSHIFT
:
2566 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_CUBICSPLINETABLE
;
2570 gmx_fatal(FARGS
, "Unsupported vdw interaction: %s", evdw_names
[fr
->vdwtype
]);
2574 /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
2575 fr
->nbkernel_elec_modifier
= fr
->coulomb_modifier
;
2576 fr
->nbkernel_vdw_modifier
= fr
->vdw_modifier
;
2578 fr
->rvdw
= cutoff_inf(ir
->rvdw
);
2579 fr
->rvdw_switch
= ir
->rvdw_switch
;
2580 fr
->rcoulomb
= cutoff_inf(ir
->rcoulomb
);
2581 fr
->rcoulomb_switch
= ir
->rcoulomb_switch
;
2583 fr
->bEwald
= EEL_PME_EWALD(fr
->eeltype
);
2585 fr
->reppow
= mtop
->ffparams
.reppow
;
2587 if (ir
->cutoff_scheme
== ecutsGROUP
)
2589 fr
->bvdwtab
= ((fr
->vdwtype
!= evdwCUT
|| !gmx_within_tol(fr
->reppow
, 12.0, 10*GMX_DOUBLE_EPS
))
2590 && !EVDW_PME(fr
->vdwtype
));
2591 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2592 fr
->bcoultab
= !(fr
->eeltype
== eelCUT
||
2593 fr
->eeltype
== eelEWALD
||
2594 fr
->eeltype
== eelPME
||
2595 fr
->eeltype
== eelRF
||
2596 fr
->eeltype
== eelRF_ZERO
);
2598 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2599 * going to be faster to tabulate the interaction than calling the generic kernel.
2600 * However, if generic kernels have been requested we keep things analytically.
2602 if (fr
->nbkernel_elec_modifier
== eintmodPOTSWITCH
&&
2603 fr
->nbkernel_vdw_modifier
== eintmodPOTSWITCH
&&
2604 bGenericKernelOnly
== FALSE
)
2606 if ((fr
->rcoulomb_switch
!= fr
->rvdw_switch
) || (fr
->rcoulomb
!= fr
->rvdw
))
2608 fr
->bcoultab
= TRUE
;
2609 /* Once we tabulate electrostatics, we can use the switch function for LJ,
2610 * which would otherwise need two tables.
2614 else if ((fr
->nbkernel_elec_modifier
== eintmodPOTSHIFT
&& fr
->nbkernel_vdw_modifier
== eintmodPOTSHIFT
) ||
2615 ((fr
->nbkernel_elec_interaction
== GMX_NBKERNEL_ELEC_REACTIONFIELD
&&
2616 fr
->nbkernel_elec_modifier
== eintmodEXACTCUTOFF
&&
2617 (fr
->nbkernel_vdw_modifier
== eintmodPOTSWITCH
|| fr
->nbkernel_vdw_modifier
== eintmodPOTSHIFT
))))
2619 if ((fr
->rcoulomb
!= fr
->rvdw
) && (bGenericKernelOnly
== FALSE
))
2621 fr
->bcoultab
= TRUE
;
2625 if (fr
->nbkernel_elec_modifier
== eintmodFORCESWITCH
)
2627 fr
->bcoultab
= TRUE
;
2629 if (fr
->nbkernel_vdw_modifier
== eintmodFORCESWITCH
)
2634 if (getenv("GMX_REQUIRE_TABLES"))
2637 fr
->bcoultab
= TRUE
;
2642 fprintf(fp
, "Table routines are used for coulomb: %s\n",
2643 gmx::boolToString(fr
->bcoultab
));
2644 fprintf(fp
, "Table routines are used for vdw: %s\n",
2645 gmx::boolToString(fr
->bvdwtab
));
2648 if (fr
->bvdwtab
== TRUE
)
2650 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_CUBICSPLINETABLE
;
2651 fr
->nbkernel_vdw_modifier
= eintmodNONE
;
2653 if (fr
->bcoultab
== TRUE
)
2655 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_CUBICSPLINETABLE
;
2656 fr
->nbkernel_elec_modifier
= eintmodNONE
;
2660 if (ir
->cutoff_scheme
== ecutsVERLET
)
2662 if (!gmx_within_tol(fr
->reppow
, 12.0, 10*GMX_DOUBLE_EPS
))
2664 gmx_fatal(FARGS
, "Cut-off scheme %S only supports LJ repulsion power 12", ecutscheme_names
[ir
->cutoff_scheme
]);
2666 fr
->bvdwtab
= FALSE
;
2667 fr
->bcoultab
= FALSE
;
2670 /* This now calculates sum for q and C6 */
2671 set_chargesum(fp
, fr
, mtop
);
2673 /* Tables are used for direct ewald sum */
2676 if (EEL_PME(ir
->coulombtype
))
2680 fprintf(fp
, "Will do PME sum in reciprocal space for electrostatic interactions.\n");
2682 if (ir
->coulombtype
== eelP3M_AD
)
2684 please_cite(fp
, "Hockney1988");
2685 please_cite(fp
, "Ballenegger2012");
2689 please_cite(fp
, "Essmann95a");
2692 if (ir
->ewald_geometry
== eewg3DC
)
2694 bool haveNetCharge
= (fabs(fr
->qsum
[0]) > 1e-4 ||
2695 fabs(fr
->qsum
[1]) > 1e-4);
2698 fprintf(fp
, "Using the Ewald3DC correction for systems with a slab geometry%s.\n",
2699 haveNetCharge
? " and net charge" : "");
2701 please_cite(fp
, "In-Chul99a");
2704 please_cite(fp
, "Ballenegger2009");
2708 fr
->ewaldcoeff_q
= calc_ewaldcoeff_q(ir
->rcoulomb
, ir
->ewald_rtol
);
2709 init_ewald_tab(&(fr
->ewald_table
), ir
, fp
);
2712 fprintf(fp
, "Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2713 1/fr
->ewaldcoeff_q
);
2717 if (EVDW_PME(ir
->vdwtype
))
2721 fprintf(fp
, "Will do PME sum in reciprocal space for LJ dispersion interactions.\n");
2723 please_cite(fp
, "Essmann95a");
2724 fr
->ewaldcoeff_lj
= calc_ewaldcoeff_lj(ir
->rvdw
, ir
->ewald_rtol_lj
);
2727 fprintf(fp
, "Using a Gaussian width (1/beta) of %g nm for LJ Ewald\n",
2728 1/fr
->ewaldcoeff_lj
);
2732 /* Electrostatics */
2733 fr
->epsilon_r
= ir
->epsilon_r
;
2734 fr
->epsilon_rf
= ir
->epsilon_rf
;
2735 fr
->fudgeQQ
= mtop
->ffparams
.fudgeQQ
;
2737 /* Parameters for generalized RF */
2741 if (fr
->eeltype
== eelGRF
)
2743 init_generalized_rf(fp
, mtop
, ir
, fr
);
2746 fr
->bF_NoVirSum
= (EEL_FULL(fr
->eeltype
) || EVDW_PME(fr
->vdwtype
) ||
2747 fr
->forceProviders
->hasForcesWithoutVirialContribution() ||
2748 gmx_mtop_ftype_count(mtop
, F_POSRES
) > 0 ||
2749 gmx_mtop_ftype_count(mtop
, F_FBPOSRES
) > 0);
2751 if (fr
->bF_NoVirSum
)
2753 fr
->forceBufferNoVirialSummation
= new PaddedRVecVector
;
2756 if (fr
->cutoff_scheme
== ecutsGROUP
&&
2757 ncg_mtop(mtop
) > fr
->cg_nalloc
&& !DOMAINDECOMP(cr
))
2759 /* Count the total number of charge groups */
2760 fr
->cg_nalloc
= ncg_mtop(mtop
);
2761 srenew(fr
->cg_cm
, fr
->cg_nalloc
);
2763 if (fr
->shift_vec
== nullptr)
2765 snew(fr
->shift_vec
, SHIFTS
);
2768 if (fr
->fshift
== nullptr)
2770 snew(fr
->fshift
, SHIFTS
);
2773 if (fr
->nbfp
== nullptr)
2775 fr
->ntype
= mtop
->ffparams
.atnr
;
2776 fr
->nbfp
= mk_nbfp(&mtop
->ffparams
, fr
->bBHAM
);
2777 if (EVDW_PME(fr
->vdwtype
))
2779 fr
->ljpme_c6grid
= make_ljpme_c6grid(&mtop
->ffparams
, fr
);
2783 /* Copy the energy group exclusions */
2784 fr
->egp_flags
= ir
->opts
.egp_flags
;
2786 /* Van der Waals stuff */
2787 if ((fr
->vdwtype
!= evdwCUT
) && (fr
->vdwtype
!= evdwUSER
) && !fr
->bBHAM
)
2789 if (fr
->rvdw_switch
>= fr
->rvdw
)
2791 gmx_fatal(FARGS
, "rvdw_switch (%f) must be < rvdw (%f)",
2792 fr
->rvdw_switch
, fr
->rvdw
);
2796 fprintf(fp
, "Using %s Lennard-Jones, switch between %g and %g nm\n",
2797 (fr
->eeltype
== eelSWITCH
) ? "switched" : "shifted",
2798 fr
->rvdw_switch
, fr
->rvdw
);
2802 if (fr
->bBHAM
&& EVDW_PME(fr
->vdwtype
))
2804 gmx_fatal(FARGS
, "LJ PME not supported with Buckingham");
2807 if (fr
->bBHAM
&& (fr
->vdwtype
== evdwSHIFT
|| fr
->vdwtype
== evdwSWITCH
))
2809 gmx_fatal(FARGS
, "Switch/shift interaction not supported with Buckingham");
2812 if (fr
->bBHAM
&& fr
->cutoff_scheme
== ecutsVERLET
)
2814 gmx_fatal(FARGS
, "Verlet cutoff-scheme is not supported with Buckingham");
2819 fprintf(fp
, "Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2820 fr
->rlist
, fr
->rcoulomb
, fr
->bBHAM
? "BHAM" : "LJ", fr
->rvdw
);
2823 fr
->eDispCorr
= ir
->eDispCorr
;
2824 fr
->numAtomsForDispersionCorrection
= mtop
->natoms
;
2825 if (ir
->eDispCorr
!= edispcNO
)
2827 set_avcsixtwelve(fp
, fr
, mtop
);
2832 set_bham_b_max(fp
, fr
, mtop
);
2835 fr
->gb_epsilon_solvent
= ir
->gb_epsilon_solvent
;
2837 /* Copy the GBSA data (radius, volume and surftens for each
2838 * atomtype) from the topology atomtype section to forcerec.
2840 snew(fr
->atype_radius
, fr
->ntype
);
2841 snew(fr
->atype_vol
, fr
->ntype
);
2842 snew(fr
->atype_surftens
, fr
->ntype
);
2843 snew(fr
->atype_gb_radius
, fr
->ntype
);
2844 snew(fr
->atype_S_hct
, fr
->ntype
);
2846 if (mtop
->atomtypes
.nr
> 0)
2848 for (i
= 0; i
< fr
->ntype
; i
++)
2850 fr
->atype_radius
[i
] = mtop
->atomtypes
.radius
[i
];
2852 for (i
= 0; i
< fr
->ntype
; i
++)
2854 fr
->atype_vol
[i
] = mtop
->atomtypes
.vol
[i
];
2856 for (i
= 0; i
< fr
->ntype
; i
++)
2858 fr
->atype_surftens
[i
] = mtop
->atomtypes
.surftens
[i
];
2860 for (i
= 0; i
< fr
->ntype
; i
++)
2862 fr
->atype_gb_radius
[i
] = mtop
->atomtypes
.gb_radius
[i
];
2864 for (i
= 0; i
< fr
->ntype
; i
++)
2866 fr
->atype_S_hct
[i
] = mtop
->atomtypes
.S_hct
[i
];
2870 /* Generate the GB table if needed */
2874 fr
->gbtabscale
= 2000;
2876 fr
->gbtabscale
= 500;
2880 fr
->gbtab
= make_gb_table(fr
);
2882 init_gb(&fr
->born
, fr
, ir
, mtop
, ir
->gb_algorithm
);
2884 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2885 if (!DOMAINDECOMP(cr
))
2887 make_local_gb(cr
, fr
->born
, ir
->gb_algorithm
);
2891 /* Set the charge scaling */
2892 if (fr
->epsilon_r
!= 0)
2894 fr
->epsfac
= ONE_4PI_EPS0
/fr
->epsilon_r
;
2898 /* eps = 0 is infinite dieletric: no coulomb interactions */
2902 /* Reaction field constants */
2903 if (EEL_RF(fr
->eeltype
))
2905 calc_rffac(fp
, fr
->eeltype
, fr
->epsilon_r
, fr
->epsilon_rf
,
2906 fr
->rcoulomb
, fr
->temp
, fr
->zsquare
, box
,
2907 &fr
->kappa
, &fr
->k_rf
, &fr
->c_rf
);
2910 /* Construct tables for the group scheme. A little unnecessary to
2911 * make both vdw and coul tables sometimes, but what the
2912 * heck. Note that both cutoff schemes construct Ewald tables in
2913 * init_interaction_const_tables. */
2914 needGroupSchemeTables
= (ir
->cutoff_scheme
== ecutsGROUP
&&
2915 (fr
->bcoultab
|| fr
->bvdwtab
));
2917 negp_pp
= ir
->opts
.ngener
- ir
->nwall
;
2919 if (!needGroupSchemeTables
)
2921 bSomeNormalNbListsAreInUse
= TRUE
;
2926 bSomeNormalNbListsAreInUse
= FALSE
;
2927 for (egi
= 0; egi
< negp_pp
; egi
++)
2929 for (egj
= egi
; egj
< negp_pp
; egj
++)
2931 egp_flags
= ir
->opts
.egp_flags
[GID(egi
, egj
, ir
->opts
.ngener
)];
2932 if (!(egp_flags
& EGP_EXCL
))
2934 if (egp_flags
& EGP_TABLE
)
2940 bSomeNormalNbListsAreInUse
= TRUE
;
2945 if (bSomeNormalNbListsAreInUse
)
2947 fr
->nnblists
= negptable
+ 1;
2951 fr
->nnblists
= negptable
;
2953 if (fr
->nnblists
> 1)
2955 snew(fr
->gid2nblists
, ir
->opts
.ngener
*ir
->opts
.ngener
);
2959 snew(fr
->nblists
, fr
->nnblists
);
2961 /* This code automatically gives table length tabext without cut-off's,
2962 * in that case grompp should already have checked that we do not need
2963 * normal tables and we only generate tables for 1-4 interactions.
2965 rtab
= ir
->rlist
+ ir
->tabext
;
2967 if (needGroupSchemeTables
)
2969 /* make tables for ordinary interactions */
2970 if (bSomeNormalNbListsAreInUse
)
2972 make_nbf_tables(fp
, fr
, rtab
, tabfn
, nullptr, nullptr, &fr
->nblists
[0]);
2981 /* Read the special tables for certain energy group pairs */
2982 nm_ind
= mtop
->groups
.grps
[egcENER
].nm_ind
;
2983 for (egi
= 0; egi
< negp_pp
; egi
++)
2985 for (egj
= egi
; egj
< negp_pp
; egj
++)
2987 egp_flags
= ir
->opts
.egp_flags
[GID(egi
, egj
, ir
->opts
.ngener
)];
2988 if ((egp_flags
& EGP_TABLE
) && !(egp_flags
& EGP_EXCL
))
2990 if (fr
->nnblists
> 1)
2992 fr
->gid2nblists
[GID(egi
, egj
, ir
->opts
.ngener
)] = m
;
2994 /* Read the table file with the two energy groups names appended */
2995 make_nbf_tables(fp
, fr
, rtab
, tabfn
,
2996 *mtop
->groups
.grpname
[nm_ind
[egi
]],
2997 *mtop
->groups
.grpname
[nm_ind
[egj
]],
3001 else if (fr
->nnblists
> 1)
3003 fr
->gid2nblists
[GID(egi
, egj
, ir
->opts
.ngener
)] = 0;
3010 /* Tables might not be used for the potential modifier
3011 * interactions per se, but we still need them to evaluate
3012 * switch/shift dispersion corrections in this case. */
3013 if (fr
->eDispCorr
!= edispcNO
)
3015 fr
->dispersionCorrectionTable
= makeDispersionCorrectionTable(fp
, fr
, rtab
, tabfn
);
3018 /* We want to use unmodified tables for 1-4 coulombic
3019 * interactions, so we must in general have an extra set of
3021 if (gmx_mtop_ftype_count(mtop
, F_LJ14
) > 0 ||
3022 gmx_mtop_ftype_count(mtop
, F_LJC14_Q
) > 0 ||
3023 gmx_mtop_ftype_count(mtop
, F_LJC_PAIRS_NB
) > 0)
3025 fr
->pairsTable
= make_tables(fp
, fr
, tabpfn
, rtab
,
3026 GMX_MAKETABLES_14ONLY
);
3030 fr
->nwall
= ir
->nwall
;
3031 if (ir
->nwall
&& ir
->wall_type
== ewtTABLE
)
3033 make_wall_tables(fp
, ir
, tabfn
, &mtop
->groups
, fr
);
3038 // Need to catch std::bad_alloc
3039 // TODO Don't need to catch this here, when merging with master branch
3042 fcd
->bondtab
= make_bonded_tables(fp
,
3043 F_TABBONDS
, F_TABBONDSNC
,
3044 mtop
, tabbfnm
, "b");
3045 fcd
->angletab
= make_bonded_tables(fp
,
3047 mtop
, tabbfnm
, "a");
3048 fcd
->dihtab
= make_bonded_tables(fp
,
3050 mtop
, tabbfnm
, "d");
3052 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
3058 fprintf(debug
, "No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
3062 /* QM/MM initialization if requested
3066 fprintf(stderr
, "QM/MM calculation requested.\n");
3069 fr
->bQMMM
= ir
->bQMMM
;
3070 fr
->qr
= mk_QMMMrec();
3072 /* Set all the static charge group info */
3073 fr
->cginfo_mb
= init_cginfo_mb(fp
, mtop
, fr
, bNoSolvOpt
,
3075 &fr
->bExcl_IntraCGAll_InterCGNone
);
3076 if (DOMAINDECOMP(cr
))
3078 fr
->cginfo
= nullptr;
3082 fr
->cginfo
= cginfo_expand(mtop
->nmolblock
, fr
->cginfo_mb
);
3085 if (!DOMAINDECOMP(cr
))
3087 forcerec_set_ranges(fr
, ncg_mtop(mtop
), ncg_mtop(mtop
),
3088 mtop
->natoms
, mtop
->natoms
, mtop
->natoms
);
3091 fr
->print_force
= print_force
;
3094 /* coarse load balancing vars */
3099 /* Initialize neighbor search */
3101 init_ns(fp
, cr
, fr
->ns
, fr
, mtop
);
3103 if (cr
->duty
& DUTY_PP
)
3105 gmx_nonbonded_setup(fr
, bGenericKernelOnly
);
3108 /* Initialize the thread working data for bonded interactions */
3109 init_bonded_threading(fp
, mtop
->groups
.grps
[egcENER
].nr
,
3110 &fr
->bonded_threading
);
3112 fr
->nthread_ewc
= gmx_omp_nthreads_get(emntBonded
);
3113 snew(fr
->ewc_t
, fr
->nthread_ewc
);
3115 /* fr->ic is used both by verlet and group kernels (to some extent) now */
3116 init_interaction_const(fp
, &fr
->ic
, fr
);
3117 init_interaction_const_tables(fp
, fr
->ic
, rtab
);
3119 if (fr
->cutoff_scheme
== ecutsVERLET
)
3121 // We checked the cut-offs in grompp, but double-check here.
3122 // We have PME+LJcutoff kernels for rcoulomb>rvdw.
3123 if (EEL_PME_EWALD(ir
->coulombtype
) && ir
->vdwtype
== eelCUT
)
3125 GMX_RELEASE_ASSERT(ir
->rcoulomb
>= ir
->rvdw
, "With Verlet lists and PME we should have rcoulomb>=rvdw");
3129 GMX_RELEASE_ASSERT(ir
->rcoulomb
== ir
->rvdw
, "With Verlet lists and no PME rcoulomb and rvdw should be identical");
3132 init_nb_verlet(fp
, mdlog
, &fr
->nbv
, bFEP_NonBonded
, ir
, fr
, cr
, nbpu_opt
, deviceInfo
);
3135 if (ir
->eDispCorr
!= edispcNO
)
3137 calc_enervirdiff(fp
, ir
->eDispCorr
, fr
);
3141 #define pr_real(fp, r) fprintf(fp, "%s: %e\n",#r, r)
3142 #define pr_int(fp, i) fprintf((fp), "%s: %d\n",#i, i)
3143 #define pr_bool(fp, b) fprintf((fp), "%s: %s\n",#b, gmx::boolToString(b))
3145 void pr_forcerec(FILE *fp
, t_forcerec
*fr
)
3149 pr_real(fp
, fr
->rlist
);
3150 pr_real(fp
, fr
->rcoulomb
);
3151 pr_real(fp
, fr
->fudgeQQ
);
3152 pr_bool(fp
, fr
->bGrid
);
3153 /*pr_int(fp,fr->cg0);
3154 pr_int(fp,fr->hcg);*/
3155 for (i
= 0; i
< fr
->nnblists
; i
++)
3157 pr_int(fp
, fr
->nblists
[i
].table_elec_vdw
->n
);
3159 pr_real(fp
, fr
->rcoulomb_switch
);
3160 pr_real(fp
, fr
->rcoulomb
);
3165 /* Frees GPU memory and destroys the GPU context.
3167 * Note that this function needs to be called even if GPUs are not used
3168 * in this run because the PME ranks have no knowledge of whether GPUs
3169 * are used or not, but all ranks need to enter the barrier below.
3171 void free_gpu_resources(const t_forcerec
*fr
,
3172 const t_commrec
*cr
,
3173 const gmx_device_info_t
*deviceInfo
)
3175 gmx_bool bIsPPrankUsingGPU
;
3176 char gpu_err_str
[STRLEN
];
3178 bIsPPrankUsingGPU
= (cr
->duty
& DUTY_PP
) && fr
&& fr
->nbv
&& fr
->nbv
->bUseGPU
;
3180 if (bIsPPrankUsingGPU
)
3182 /* free nbnxn data in GPU memory */
3183 nbnxn_gpu_free(fr
->nbv
->gpu_nbv
);
3184 /* stop the GPU profiler (only CUDA) */
3188 /* With tMPI we need to wait for all ranks to finish deallocation before
3189 * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
3192 * This is not a concern in OpenCL where we use one context per rank which
3193 * is freed in nbnxn_gpu_free().
3195 * Note: it is safe to not call the barrier on the ranks which do not use GPU,
3196 * but it is easier and more futureproof to call it on the whole node.
3199 if (PAR(cr
) || MULTISIM(cr
))
3201 gmx_barrier_physical_node(cr
);
3203 #endif /* GMX_THREAD_MPI */
3205 if (bIsPPrankUsingGPU
)
3207 /* uninitialize GPU (by destroying the context) */
3208 if (!free_cuda_gpu(deviceInfo
, gpu_err_str
))
3210 gmx_warning("On rank %d failed to free GPU #%d: %s",
3211 cr
->nodeid
, get_current_cuda_gpu_device_id(), gpu_err_str
);