2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/ewald.h"
52 #include "gromacs/fileio/trx.h"
53 #include "gromacs/gmxlib/gpu_utils/gpu_utils.h"
54 #include "gromacs/legacyheaders/copyrite.h"
55 #include "gromacs/legacyheaders/force.h"
56 #include "gromacs/legacyheaders/gmx_detect_hardware.h"
57 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
58 #include "gromacs/legacyheaders/inputrec.h"
59 #include "gromacs/legacyheaders/md_logging.h"
60 #include "gromacs/legacyheaders/md_support.h"
61 #include "gromacs/legacyheaders/names.h"
62 #include "gromacs/legacyheaders/network.h"
63 #include "gromacs/legacyheaders/nonbonded.h"
64 #include "gromacs/legacyheaders/ns.h"
65 #include "gromacs/legacyheaders/qmmm.h"
66 #include "gromacs/legacyheaders/tables.h"
67 #include "gromacs/legacyheaders/txtdump.h"
68 #include "gromacs/legacyheaders/typedefs.h"
69 #include "gromacs/legacyheaders/types/commrec.h"
70 #include "gromacs/legacyheaders/types/group.h"
71 #include "gromacs/listed-forces/manage-threading.h"
72 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
73 #include "gromacs/math/units.h"
74 #include "gromacs/math/utilities.h"
75 #include "gromacs/math/vec.h"
76 #include "gromacs/mdlib/forcerec-threading.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_search.h"
81 #include "gromacs/mdlib/nbnxn_simd.h"
82 #include "gromacs/mdlib/nbnxn_util.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/pbcutil/pbc.h"
85 #include "gromacs/simd/simd.h"
86 #include "gromacs/topology/mtop_util.h"
87 #include "gromacs/utility/cstringutil.h"
88 #include "gromacs/utility/fatalerror.h"
89 #include "gromacs/utility/smalloc.h"
91 #include "nbnxn_gpu_jit_support.h"
93 t_forcerec
*mk_forcerec(void)
103 static void pr_nbfp(FILE *fp
, real
*nbfp
, gmx_bool bBHAM
, int atnr
)
107 for (i
= 0; (i
< atnr
); i
++)
109 for (j
= 0; (j
< atnr
); j
++)
111 fprintf(fp
, "%2d - %2d", i
, j
);
114 fprintf(fp
, " a=%10g, b=%10g, c=%10g\n", BHAMA(nbfp
, atnr
, i
, j
),
115 BHAMB(nbfp
, atnr
, i
, j
), BHAMC(nbfp
, atnr
, i
, j
)/6.0);
119 fprintf(fp
, " c6=%10g, c12=%10g\n", C6(nbfp
, atnr
, i
, j
)/6.0,
120 C12(nbfp
, atnr
, i
, j
)/12.0);
127 static real
*mk_nbfp(const gmx_ffparams_t
*idef
, gmx_bool bBHAM
)
135 snew(nbfp
, 3*atnr
*atnr
);
136 for (i
= k
= 0; (i
< atnr
); i
++)
138 for (j
= 0; (j
< atnr
); j
++, k
++)
140 BHAMA(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].bham
.a
;
141 BHAMB(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].bham
.b
;
142 /* nbfp now includes the 6.0 derivative prefactor */
143 BHAMC(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].bham
.c
*6.0;
149 snew(nbfp
, 2*atnr
*atnr
);
150 for (i
= k
= 0; (i
< atnr
); i
++)
152 for (j
= 0; (j
< atnr
); j
++, k
++)
154 /* nbfp now includes the 6.0/12.0 derivative prefactors */
155 C6(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].lj
.c6
*6.0;
156 C12(nbfp
, atnr
, i
, j
) = idef
->iparams
[k
].lj
.c12
*12.0;
164 static real
*make_ljpme_c6grid(const gmx_ffparams_t
*idef
, t_forcerec
*fr
)
167 real c6
, c6i
, c6j
, c12i
, c12j
, epsi
, epsj
, sigmai
, sigmaj
;
169 const real oneOverSix
= 1.0 / 6.0;
171 /* For LJ-PME simulations, we correct the energies with the reciprocal space
172 * inside of the cut-off. To do this the non-bonded kernels needs to have
173 * access to the C6-values used on the reciprocal grid in pme.c
177 snew(grid
, 2*atnr
*atnr
);
178 for (i
= k
= 0; (i
< atnr
); i
++)
180 for (j
= 0; (j
< atnr
); j
++, k
++)
182 c6i
= idef
->iparams
[i
*(atnr
+1)].lj
.c6
;
183 c12i
= idef
->iparams
[i
*(atnr
+1)].lj
.c12
;
184 c6j
= idef
->iparams
[j
*(atnr
+1)].lj
.c6
;
185 c12j
= idef
->iparams
[j
*(atnr
+1)].lj
.c12
;
186 c6
= sqrt(c6i
* c6j
);
187 if (fr
->ljpme_combination_rule
== eljpmeLB
188 && !gmx_numzero(c6
) && !gmx_numzero(c12i
) && !gmx_numzero(c12j
))
190 sigmai
= pow(c12i
/ c6i
, oneOverSix
);
191 sigmaj
= pow(c12j
/ c6j
, oneOverSix
);
192 epsi
= c6i
* c6i
/ c12i
;
193 epsj
= c6j
* c6j
/ c12j
;
194 c6
= sqrt(epsi
* epsj
) * pow(0.5*(sigmai
+sigmaj
), 6);
196 /* Store the elements at the same relative positions as C6 in nbfp in order
197 * to simplify access in the kernels
199 grid
[2*(atnr
*i
+j
)] = c6
*6.0;
205 static real
*mk_nbfp_combination_rule(const gmx_ffparams_t
*idef
, int comb_rule
)
209 real c6i
, c6j
, c12i
, c12j
, epsi
, epsj
, sigmai
, sigmaj
;
211 const real oneOverSix
= 1.0 / 6.0;
214 snew(nbfp
, 2*atnr
*atnr
);
215 for (i
= 0; i
< atnr
; ++i
)
217 for (j
= 0; j
< atnr
; ++j
)
219 c6i
= idef
->iparams
[i
*(atnr
+1)].lj
.c6
;
220 c12i
= idef
->iparams
[i
*(atnr
+1)].lj
.c12
;
221 c6j
= idef
->iparams
[j
*(atnr
+1)].lj
.c6
;
222 c12j
= idef
->iparams
[j
*(atnr
+1)].lj
.c12
;
223 c6
= sqrt(c6i
* c6j
);
224 c12
= sqrt(c12i
* c12j
);
225 if (comb_rule
== eCOMB_ARITHMETIC
226 && !gmx_numzero(c6
) && !gmx_numzero(c12
))
228 sigmai
= pow(c12i
/ c6i
, oneOverSix
);
229 sigmaj
= pow(c12j
/ c6j
, oneOverSix
);
230 epsi
= c6i
* c6i
/ c12i
;
231 epsj
= c6j
* c6j
/ c12j
;
232 c6
= sqrt(epsi
* epsj
) * pow(0.5*(sigmai
+sigmaj
), 6);
233 c12
= sqrt(epsi
* epsj
) * pow(0.5*(sigmai
+sigmaj
), 12);
235 C6(nbfp
, atnr
, i
, j
) = c6
*6.0;
236 C12(nbfp
, atnr
, i
, j
) = c12
*12.0;
242 /* This routine sets fr->solvent_opt to the most common solvent in the
243 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
244 * the fr->solvent_type array with the correct type (or esolNO).
246 * Charge groups that fulfill the conditions but are not identical to the
247 * most common one will be marked as esolNO in the solvent_type array.
249 * TIP3p is identical to SPC for these purposes, so we call it
250 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
252 * NOTE: QM particle should not
253 * become an optimized solvent. Not even if there is only one charge
263 } solvent_parameters_t
;
266 check_solvent_cg(const gmx_moltype_t
*molt
,
269 const unsigned char *qm_grpnr
,
270 const t_grps
*qm_grps
,
272 int *n_solvent_parameters
,
273 solvent_parameters_t
**solvent_parameters_p
,
283 real tmp_charge
[4] = { 0.0 }; /* init to zero to make gcc4.8 happy */
284 int tmp_vdwtype
[4] = { 0 }; /* init to zero to make gcc4.8 happy */
287 solvent_parameters_t
*solvent_parameters
;
289 /* We use a list with parameters for each solvent type.
290 * Every time we discover a new molecule that fulfills the basic
291 * conditions for a solvent we compare with the previous entries
292 * in these lists. If the parameters are the same we just increment
293 * the counter for that type, and otherwise we create a new type
294 * based on the current molecule.
296 * Once we've finished going through all molecules we check which
297 * solvent is most common, and mark all those molecules while we
298 * clear the flag on all others.
301 solvent_parameters
= *solvent_parameters_p
;
303 /* Mark the cg first as non optimized */
306 /* Check if this cg has no exclusions with atoms in other charge groups
307 * and all atoms inside the charge group excluded.
308 * We only have 3 or 4 atom solvent loops.
310 if (GET_CGINFO_EXCL_INTER(cginfo
) ||
311 !GET_CGINFO_EXCL_INTRA(cginfo
))
316 /* Get the indices of the first atom in this charge group */
317 j0
= molt
->cgs
.index
[cg0
];
318 j1
= molt
->cgs
.index
[cg0
+1];
320 /* Number of atoms in our molecule */
326 "Moltype '%s': there are %d atoms in this charge group\n",
330 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
333 if (nj
< 3 || nj
> 4)
338 /* Check if we are doing QM on this group */
340 if (qm_grpnr
!= NULL
)
342 for (j
= j0
; j
< j1
&& !qm
; j
++)
344 qm
= (qm_grpnr
[j
] < qm_grps
->nr
- 1);
347 /* Cannot use solvent optimization with QM */
353 atom
= molt
->atoms
.atom
;
355 /* Still looks like a solvent, time to check parameters */
357 /* If it is perturbed (free energy) we can't use the solvent loops,
358 * so then we just skip to the next molecule.
362 for (j
= j0
; j
< j1
&& !perturbed
; j
++)
364 perturbed
= PERTURBED(atom
[j
]);
372 /* Now it's only a question if the VdW and charge parameters
373 * are OK. Before doing the check we compare and see if they are
374 * identical to a possible previous solvent type.
375 * First we assign the current types and charges.
377 for (j
= 0; j
< nj
; j
++)
379 tmp_vdwtype
[j
] = atom
[j0
+j
].type
;
380 tmp_charge
[j
] = atom
[j0
+j
].q
;
383 /* Does it match any previous solvent type? */
384 for (k
= 0; k
< *n_solvent_parameters
; k
++)
389 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
390 if ( (solvent_parameters
[k
].model
== esolSPC
&& nj
!= 3) ||
391 (solvent_parameters
[k
].model
== esolTIP4P
&& nj
!= 4) )
396 /* Check that types & charges match for all atoms in molecule */
397 for (j
= 0; j
< nj
&& match
== TRUE
; j
++)
399 if (tmp_vdwtype
[j
] != solvent_parameters
[k
].vdwtype
[j
])
403 if (tmp_charge
[j
] != solvent_parameters
[k
].charge
[j
])
410 /* Congratulations! We have a matched solvent.
411 * Flag it with this type for later processing.
414 solvent_parameters
[k
].count
+= nmol
;
416 /* We are done with this charge group */
421 /* If we get here, we have a tentative new solvent type.
422 * Before we add it we must check that it fulfills the requirements
423 * of the solvent optimized loops. First determine which atoms have
426 for (j
= 0; j
< nj
; j
++)
429 tjA
= tmp_vdwtype
[j
];
431 /* Go through all other tpes and see if any have non-zero
432 * VdW parameters when combined with this one.
434 for (k
= 0; k
< fr
->ntype
&& (has_vdw
[j
] == FALSE
); k
++)
436 /* We already checked that the atoms weren't perturbed,
437 * so we only need to check state A now.
441 has_vdw
[j
] = (has_vdw
[j
] ||
442 (BHAMA(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0) ||
443 (BHAMB(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0) ||
444 (BHAMC(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0));
449 has_vdw
[j
] = (has_vdw
[j
] ||
450 (C6(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0) ||
451 (C12(fr
->nbfp
, fr
->ntype
, tjA
, k
) != 0.0));
456 /* Now we know all we need to make the final check and assignment. */
460 * For this we require thatn all atoms have charge,
461 * the charges on atom 2 & 3 should be the same, and only
462 * atom 1 might have VdW.
464 if (has_vdw
[1] == FALSE
&&
465 has_vdw
[2] == FALSE
&&
466 tmp_charge
[0] != 0 &&
467 tmp_charge
[1] != 0 &&
468 tmp_charge
[2] == tmp_charge
[1])
470 srenew(solvent_parameters
, *n_solvent_parameters
+1);
471 solvent_parameters
[*n_solvent_parameters
].model
= esolSPC
;
472 solvent_parameters
[*n_solvent_parameters
].count
= nmol
;
473 for (k
= 0; k
< 3; k
++)
475 solvent_parameters
[*n_solvent_parameters
].vdwtype
[k
] = tmp_vdwtype
[k
];
476 solvent_parameters
[*n_solvent_parameters
].charge
[k
] = tmp_charge
[k
];
479 *cg_sp
= *n_solvent_parameters
;
480 (*n_solvent_parameters
)++;
485 /* Or could it be a TIP4P?
486 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
487 * Only atom 1 mght have VdW.
489 if (has_vdw
[1] == FALSE
&&
490 has_vdw
[2] == FALSE
&&
491 has_vdw
[3] == FALSE
&&
492 tmp_charge
[0] == 0 &&
493 tmp_charge
[1] != 0 &&
494 tmp_charge
[2] == tmp_charge
[1] &&
497 srenew(solvent_parameters
, *n_solvent_parameters
+1);
498 solvent_parameters
[*n_solvent_parameters
].model
= esolTIP4P
;
499 solvent_parameters
[*n_solvent_parameters
].count
= nmol
;
500 for (k
= 0; k
< 4; k
++)
502 solvent_parameters
[*n_solvent_parameters
].vdwtype
[k
] = tmp_vdwtype
[k
];
503 solvent_parameters
[*n_solvent_parameters
].charge
[k
] = tmp_charge
[k
];
506 *cg_sp
= *n_solvent_parameters
;
507 (*n_solvent_parameters
)++;
511 *solvent_parameters_p
= solvent_parameters
;
515 check_solvent(FILE * fp
,
516 const gmx_mtop_t
* mtop
,
518 cginfo_mb_t
*cginfo_mb
)
521 const gmx_moltype_t
*molt
;
522 int mb
, mol
, cg_mol
, at_offset
, am
, cgm
, i
, nmol_ch
, nmol
;
523 int n_solvent_parameters
;
524 solvent_parameters_t
*solvent_parameters
;
530 fprintf(debug
, "Going to determine what solvent types we have.\n");
533 n_solvent_parameters
= 0;
534 solvent_parameters
= NULL
;
535 /* Allocate temporary array for solvent type */
536 snew(cg_sp
, mtop
->nmolblock
);
539 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
541 molt
= &mtop
->moltype
[mtop
->molblock
[mb
].type
];
543 /* Here we have to loop over all individual molecules
544 * because we need to check for QMMM particles.
546 snew(cg_sp
[mb
], cginfo_mb
[mb
].cg_mod
);
547 nmol_ch
= cginfo_mb
[mb
].cg_mod
/cgs
->nr
;
548 nmol
= mtop
->molblock
[mb
].nmol
/nmol_ch
;
549 for (mol
= 0; mol
< nmol_ch
; mol
++)
552 am
= mol
*cgs
->index
[cgs
->nr
];
553 for (cg_mol
= 0; cg_mol
< cgs
->nr
; cg_mol
++)
555 check_solvent_cg(molt
, cg_mol
, nmol
,
556 mtop
->groups
.grpnr
[egcQMMM
] ?
557 mtop
->groups
.grpnr
[egcQMMM
]+at_offset
+am
: 0,
558 &mtop
->groups
.grps
[egcQMMM
],
560 &n_solvent_parameters
, &solvent_parameters
,
561 cginfo_mb
[mb
].cginfo
[cgm
+cg_mol
],
562 &cg_sp
[mb
][cgm
+cg_mol
]);
565 at_offset
+= cgs
->index
[cgs
->nr
];
568 /* Puh! We finished going through all charge groups.
569 * Now find the most common solvent model.
572 /* Most common solvent this far */
574 for (i
= 0; i
< n_solvent_parameters
; i
++)
577 solvent_parameters
[i
].count
> solvent_parameters
[bestsp
].count
)
585 bestsol
= solvent_parameters
[bestsp
].model
;
593 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
595 cgs
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].cgs
;
596 nmol
= (mtop
->molblock
[mb
].nmol
*cgs
->nr
)/cginfo_mb
[mb
].cg_mod
;
597 for (i
= 0; i
< cginfo_mb
[mb
].cg_mod
; i
++)
599 if (cg_sp
[mb
][i
] == bestsp
)
601 SET_CGINFO_SOLOPT(cginfo_mb
[mb
].cginfo
[i
], bestsol
);
606 SET_CGINFO_SOLOPT(cginfo_mb
[mb
].cginfo
[i
], esolNO
);
613 if (bestsol
!= esolNO
&& fp
!= NULL
)
615 fprintf(fp
, "\nEnabling %s-like water optimization for %d molecules.\n\n",
617 solvent_parameters
[bestsp
].count
);
620 sfree(solvent_parameters
);
621 fr
->solvent_opt
= bestsol
;
625 acNONE
= 0, acCONSTRAINT
, acSETTLE
628 static cginfo_mb_t
*init_cginfo_mb(FILE *fplog
, const gmx_mtop_t
*mtop
,
629 t_forcerec
*fr
, gmx_bool bNoSolvOpt
,
630 gmx_bool
*bFEP_NonBonded
,
631 gmx_bool
*bExcl_IntraCGAll_InterCGNone
)
634 const t_blocka
*excl
;
635 const gmx_moltype_t
*molt
;
636 const gmx_molblock_t
*molb
;
637 cginfo_mb_t
*cginfo_mb
;
640 int cg_offset
, a_offset
;
641 int mb
, m
, cg
, a0
, a1
, gid
, ai
, j
, aj
, excl_nalloc
;
645 gmx_bool bId
, *bExcl
, bExclIntraAll
, bExclInter
, bHaveVDW
, bHaveQ
, bHavePerturbedAtoms
;
647 snew(cginfo_mb
, mtop
->nmolblock
);
649 snew(type_VDW
, fr
->ntype
);
650 for (ai
= 0; ai
< fr
->ntype
; ai
++)
652 type_VDW
[ai
] = FALSE
;
653 for (j
= 0; j
< fr
->ntype
; j
++)
655 type_VDW
[ai
] = type_VDW
[ai
] ||
657 C6(fr
->nbfp
, fr
->ntype
, ai
, j
) != 0 ||
658 C12(fr
->nbfp
, fr
->ntype
, ai
, j
) != 0;
662 *bFEP_NonBonded
= FALSE
;
663 *bExcl_IntraCGAll_InterCGNone
= TRUE
;
666 snew(bExcl
, excl_nalloc
);
669 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
671 molb
= &mtop
->molblock
[mb
];
672 molt
= &mtop
->moltype
[molb
->type
];
676 /* Check if the cginfo is identical for all molecules in this block.
677 * If so, we only need an array of the size of one molecule.
678 * Otherwise we make an array of #mol times #cgs per molecule.
681 for (m
= 0; m
< molb
->nmol
; m
++)
683 int am
= m
*cgs
->index
[cgs
->nr
];
684 for (cg
= 0; cg
< cgs
->nr
; cg
++)
687 a1
= cgs
->index
[cg
+1];
688 if (ggrpnr(&mtop
->groups
, egcENER
, a_offset
+am
+a0
) !=
689 ggrpnr(&mtop
->groups
, egcENER
, a_offset
+a0
))
693 if (mtop
->groups
.grpnr
[egcQMMM
] != NULL
)
695 for (ai
= a0
; ai
< a1
; ai
++)
697 if (mtop
->groups
.grpnr
[egcQMMM
][a_offset
+am
+ai
] !=
698 mtop
->groups
.grpnr
[egcQMMM
][a_offset
+ai
])
707 cginfo_mb
[mb
].cg_start
= cg_offset
;
708 cginfo_mb
[mb
].cg_end
= cg_offset
+ molb
->nmol
*cgs
->nr
;
709 cginfo_mb
[mb
].cg_mod
= (bId
? 1 : molb
->nmol
)*cgs
->nr
;
710 snew(cginfo_mb
[mb
].cginfo
, cginfo_mb
[mb
].cg_mod
);
711 cginfo
= cginfo_mb
[mb
].cginfo
;
713 /* Set constraints flags for constrained atoms */
714 snew(a_con
, molt
->atoms
.nr
);
715 for (ftype
= 0; ftype
< F_NRE
; ftype
++)
717 if (interaction_function
[ftype
].flags
& IF_CONSTRAINT
)
722 for (ia
= 0; ia
< molt
->ilist
[ftype
].nr
; ia
+= 1+nral
)
726 for (a
= 0; a
< nral
; a
++)
728 a_con
[molt
->ilist
[ftype
].iatoms
[ia
+1+a
]] =
729 (ftype
== F_SETTLE
? acSETTLE
: acCONSTRAINT
);
735 for (m
= 0; m
< (bId
? 1 : molb
->nmol
); m
++)
738 int am
= m
*cgs
->index
[cgs
->nr
];
739 for (cg
= 0; cg
< cgs
->nr
; cg
++)
742 a1
= cgs
->index
[cg
+1];
744 /* Store the energy group in cginfo */
745 gid
= ggrpnr(&mtop
->groups
, egcENER
, a_offset
+am
+a0
);
746 SET_CGINFO_GID(cginfo
[cgm
+cg
], gid
);
748 /* Check the intra/inter charge group exclusions */
749 if (a1
-a0
> excl_nalloc
)
751 excl_nalloc
= a1
- a0
;
752 srenew(bExcl
, excl_nalloc
);
754 /* bExclIntraAll: all intra cg interactions excluded
755 * bExclInter: any inter cg interactions excluded
757 bExclIntraAll
= TRUE
;
761 bHavePerturbedAtoms
= FALSE
;
762 for (ai
= a0
; ai
< a1
; ai
++)
764 /* Check VDW and electrostatic interactions */
765 bHaveVDW
= bHaveVDW
|| (type_VDW
[molt
->atoms
.atom
[ai
].type
] ||
766 type_VDW
[molt
->atoms
.atom
[ai
].typeB
]);
767 bHaveQ
= bHaveQ
|| (molt
->atoms
.atom
[ai
].q
!= 0 ||
768 molt
->atoms
.atom
[ai
].qB
!= 0);
770 bHavePerturbedAtoms
= bHavePerturbedAtoms
|| (PERTURBED(molt
->atoms
.atom
[ai
]) != 0);
772 /* Clear the exclusion list for atom ai */
773 for (aj
= a0
; aj
< a1
; aj
++)
775 bExcl
[aj
-a0
] = FALSE
;
777 /* Loop over all the exclusions of atom ai */
778 for (j
= excl
->index
[ai
]; j
< excl
->index
[ai
+1]; j
++)
781 if (aj
< a0
|| aj
>= a1
)
790 /* Check if ai excludes a0 to a1 */
791 for (aj
= a0
; aj
< a1
; aj
++)
795 bExclIntraAll
= FALSE
;
802 SET_CGINFO_CONSTR(cginfo
[cgm
+cg
]);
805 SET_CGINFO_SETTLE(cginfo
[cgm
+cg
]);
813 SET_CGINFO_EXCL_INTRA(cginfo
[cgm
+cg
]);
817 SET_CGINFO_EXCL_INTER(cginfo
[cgm
+cg
]);
819 if (a1
- a0
> MAX_CHARGEGROUP_SIZE
)
821 /* The size in cginfo is currently only read with DD */
822 gmx_fatal(FARGS
, "A charge group has size %d which is larger than the limit of %d atoms", a1
-a0
, MAX_CHARGEGROUP_SIZE
);
826 SET_CGINFO_HAS_VDW(cginfo
[cgm
+cg
]);
830 SET_CGINFO_HAS_Q(cginfo
[cgm
+cg
]);
832 if (bHavePerturbedAtoms
&& fr
->efep
!= efepNO
)
834 SET_CGINFO_FEP(cginfo
[cgm
+cg
]);
835 *bFEP_NonBonded
= TRUE
;
837 /* Store the charge group size */
838 SET_CGINFO_NATOMS(cginfo
[cgm
+cg
], a1
-a0
);
840 if (!bExclIntraAll
|| bExclInter
)
842 *bExcl_IntraCGAll_InterCGNone
= FALSE
;
849 cg_offset
+= molb
->nmol
*cgs
->nr
;
850 a_offset
+= molb
->nmol
*cgs
->index
[cgs
->nr
];
854 /* the solvent optimizer is called after the QM is initialized,
855 * because we don't want to have the QM subsystemto become an
859 check_solvent(fplog
, mtop
, fr
, cginfo_mb
);
861 if (getenv("GMX_NO_SOLV_OPT"))
865 fprintf(fplog
, "Found environment variable GMX_NO_SOLV_OPT.\n"
866 "Disabling all solvent optimization\n");
868 fr
->solvent_opt
= esolNO
;
872 fr
->solvent_opt
= esolNO
;
874 if (!fr
->solvent_opt
)
876 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
878 for (cg
= 0; cg
< cginfo_mb
[mb
].cg_mod
; cg
++)
880 SET_CGINFO_SOLOPT(cginfo_mb
[mb
].cginfo
[cg
], esolNO
);
888 static int *cginfo_expand(int nmb
, cginfo_mb_t
*cgi_mb
)
893 ncg
= cgi_mb
[nmb
-1].cg_end
;
896 for (cg
= 0; cg
< ncg
; cg
++)
898 while (cg
>= cgi_mb
[mb
].cg_end
)
903 cgi_mb
[mb
].cginfo
[(cg
- cgi_mb
[mb
].cg_start
) % cgi_mb
[mb
].cg_mod
];
909 static void set_chargesum(FILE *log
, t_forcerec
*fr
, const gmx_mtop_t
*mtop
)
911 /*This now calculates sum for q and c6*/
912 double qsum
, q2sum
, q
, c6sum
, c6
;
914 const t_atoms
*atoms
;
919 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
921 nmol
= mtop
->molblock
[mb
].nmol
;
922 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
923 for (i
= 0; i
< atoms
->nr
; i
++)
925 q
= atoms
->atom
[i
].q
;
928 c6
= mtop
->ffparams
.iparams
[atoms
->atom
[i
].type
*(mtop
->ffparams
.atnr
+1)].lj
.c6
;
933 fr
->q2sum
[0] = q2sum
;
934 fr
->c6sum
[0] = c6sum
;
936 if (fr
->efep
!= efepNO
)
941 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
943 nmol
= mtop
->molblock
[mb
].nmol
;
944 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
945 for (i
= 0; i
< atoms
->nr
; i
++)
947 q
= atoms
->atom
[i
].qB
;
950 c6
= mtop
->ffparams
.iparams
[atoms
->atom
[i
].typeB
*(mtop
->ffparams
.atnr
+1)].lj
.c6
;
954 fr
->q2sum
[1] = q2sum
;
955 fr
->c6sum
[1] = c6sum
;
960 fr
->qsum
[1] = fr
->qsum
[0];
961 fr
->q2sum
[1] = fr
->q2sum
[0];
962 fr
->c6sum
[1] = fr
->c6sum
[0];
966 if (fr
->efep
== efepNO
)
968 fprintf(log
, "System total charge: %.3f\n", fr
->qsum
[0]);
972 fprintf(log
, "System total charge, top. A: %.3f top. B: %.3f\n",
973 fr
->qsum
[0], fr
->qsum
[1]);
978 void update_forcerec(t_forcerec
*fr
, matrix box
)
980 if (fr
->eeltype
== eelGRF
)
982 calc_rffac(NULL
, fr
->eeltype
, fr
->epsilon_r
, fr
->epsilon_rf
,
983 fr
->rcoulomb
, fr
->temp
, fr
->zsquare
, box
,
984 &fr
->kappa
, &fr
->k_rf
, &fr
->c_rf
);
988 void set_avcsixtwelve(FILE *fplog
, t_forcerec
*fr
, const gmx_mtop_t
*mtop
)
990 const t_atoms
*atoms
, *atoms_tpi
;
991 const t_blocka
*excl
;
992 int mb
, nmol
, nmolc
, i
, j
, tpi
, tpj
, j1
, j2
, k
, nexcl
, q
;
993 gmx_int64_t npair
, npair_ij
, tmpi
, tmpj
;
994 double csix
, ctwelve
;
998 real
*nbfp_comb
= NULL
;
1004 /* For LJ-PME, we want to correct for the difference between the
1005 * actual C6 values and the C6 values used by the LJ-PME based on
1006 * combination rules. */
1008 if (EVDW_PME(fr
->vdwtype
))
1010 nbfp_comb
= mk_nbfp_combination_rule(&mtop
->ffparams
,
1011 (fr
->ljpme_combination_rule
== eljpmeLB
) ? eCOMB_ARITHMETIC
: eCOMB_GEOMETRIC
);
1012 for (tpi
= 0; tpi
< ntp
; ++tpi
)
1014 for (tpj
= 0; tpj
< ntp
; ++tpj
)
1016 C6(nbfp_comb
, ntp
, tpi
, tpj
) =
1017 C6(nbfp
, ntp
, tpi
, tpj
) - C6(nbfp_comb
, ntp
, tpi
, tpj
);
1018 C12(nbfp_comb
, ntp
, tpi
, tpj
) = C12(nbfp
, ntp
, tpi
, tpj
);
1023 for (q
= 0; q
< (fr
->efep
== efepNO
? 1 : 2); q
++)
1031 /* Count the types so we avoid natoms^2 operations */
1032 snew(typecount
, ntp
);
1033 gmx_mtop_count_atomtypes(mtop
, q
, typecount
);
1035 for (tpi
= 0; tpi
< ntp
; tpi
++)
1037 for (tpj
= tpi
; tpj
< ntp
; tpj
++)
1039 tmpi
= typecount
[tpi
];
1040 tmpj
= typecount
[tpj
];
1043 npair_ij
= tmpi
*tmpj
;
1047 npair_ij
= tmpi
*(tmpi
- 1)/2;
1051 /* nbfp now includes the 6.0 derivative prefactor */
1052 csix
+= npair_ij
*BHAMC(nbfp
, ntp
, tpi
, tpj
)/6.0;
1056 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1057 csix
+= npair_ij
* C6(nbfp
, ntp
, tpi
, tpj
)/6.0;
1058 ctwelve
+= npair_ij
* C12(nbfp
, ntp
, tpi
, tpj
)/12.0;
1064 /* Subtract the excluded pairs.
1065 * The main reason for substracting exclusions is that in some cases
1066 * some combinations might never occur and the parameters could have
1067 * any value. These unused values should not influence the dispersion
1070 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
1072 nmol
= mtop
->molblock
[mb
].nmol
;
1073 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
1074 excl
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].excls
;
1075 for (i
= 0; (i
< atoms
->nr
); i
++)
1079 tpi
= atoms
->atom
[i
].type
;
1083 tpi
= atoms
->atom
[i
].typeB
;
1085 j1
= excl
->index
[i
];
1086 j2
= excl
->index
[i
+1];
1087 for (j
= j1
; j
< j2
; j
++)
1094 tpj
= atoms
->atom
[k
].type
;
1098 tpj
= atoms
->atom
[k
].typeB
;
1102 /* nbfp now includes the 6.0 derivative prefactor */
1103 csix
-= nmol
*BHAMC(nbfp
, ntp
, tpi
, tpj
)/6.0;
1107 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1108 csix
-= nmol
*C6 (nbfp
, ntp
, tpi
, tpj
)/6.0;
1109 ctwelve
-= nmol
*C12(nbfp
, ntp
, tpi
, tpj
)/12.0;
1119 /* Only correct for the interaction of the test particle
1120 * with the rest of the system.
1123 &mtop
->moltype
[mtop
->molblock
[mtop
->nmolblock
-1].type
].atoms
;
1126 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
1128 nmol
= mtop
->molblock
[mb
].nmol
;
1129 atoms
= &mtop
->moltype
[mtop
->molblock
[mb
].type
].atoms
;
1130 for (j
= 0; j
< atoms
->nr
; j
++)
1133 /* Remove the interaction of the test charge group
1136 if (mb
== mtop
->nmolblock
-1)
1140 if (mb
== 0 && nmol
== 1)
1142 gmx_fatal(FARGS
, "Old format tpr with TPI, please generate a new tpr file");
1147 tpj
= atoms
->atom
[j
].type
;
1151 tpj
= atoms
->atom
[j
].typeB
;
1153 for (i
= 0; i
< fr
->n_tpi
; i
++)
1157 tpi
= atoms_tpi
->atom
[i
].type
;
1161 tpi
= atoms_tpi
->atom
[i
].typeB
;
1165 /* nbfp now includes the 6.0 derivative prefactor */
1166 csix
+= nmolc
*BHAMC(nbfp
, ntp
, tpi
, tpj
)/6.0;
1170 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1171 csix
+= nmolc
*C6 (nbfp
, ntp
, tpi
, tpj
)/6.0;
1172 ctwelve
+= nmolc
*C12(nbfp
, ntp
, tpi
, tpj
)/12.0;
1179 if (npair
- nexcl
<= 0 && fplog
)
1181 fprintf(fplog
, "\nWARNING: There are no atom pairs for dispersion correction\n\n");
1187 csix
/= npair
- nexcl
;
1188 ctwelve
/= npair
- nexcl
;
1192 fprintf(debug
, "Counted %d exclusions\n", nexcl
);
1193 fprintf(debug
, "Average C6 parameter is: %10g\n", (double)csix
);
1194 fprintf(debug
, "Average C12 parameter is: %10g\n", (double)ctwelve
);
1196 fr
->avcsix
[q
] = csix
;
1197 fr
->avctwelve
[q
] = ctwelve
;
1200 if (EVDW_PME(fr
->vdwtype
))
1207 if (fr
->eDispCorr
== edispcAllEner
||
1208 fr
->eDispCorr
== edispcAllEnerPres
)
1210 fprintf(fplog
, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1211 fr
->avcsix
[0], fr
->avctwelve
[0]);
1215 fprintf(fplog
, "Long Range LJ corr.: <C6> %10.4e\n", fr
->avcsix
[0]);
1221 static void set_bham_b_max(FILE *fplog
, t_forcerec
*fr
,
1222 const gmx_mtop_t
*mtop
)
1224 const t_atoms
*at1
, *at2
;
1225 int mt1
, mt2
, i
, j
, tpi
, tpj
, ntypes
;
1231 fprintf(fplog
, "Determining largest Buckingham b parameter for table\n");
1238 for (mt1
= 0; mt1
< mtop
->nmoltype
; mt1
++)
1240 at1
= &mtop
->moltype
[mt1
].atoms
;
1241 for (i
= 0; (i
< at1
->nr
); i
++)
1243 tpi
= at1
->atom
[i
].type
;
1246 gmx_fatal(FARGS
, "Atomtype[%d] = %d, maximum = %d", i
, tpi
, ntypes
);
1249 for (mt2
= mt1
; mt2
< mtop
->nmoltype
; mt2
++)
1251 at2
= &mtop
->moltype
[mt2
].atoms
;
1252 for (j
= 0; (j
< at2
->nr
); j
++)
1254 tpj
= at2
->atom
[j
].type
;
1257 gmx_fatal(FARGS
, "Atomtype[%d] = %d, maximum = %d", j
, tpj
, ntypes
);
1259 b
= BHAMB(nbfp
, ntypes
, tpi
, tpj
);
1260 if (b
> fr
->bham_b_max
)
1264 if ((b
< bmin
) || (bmin
== -1))
1274 fprintf(fplog
, "Buckingham b parameters, min: %g, max: %g\n",
1275 bmin
, fr
->bham_b_max
);
1279 static void make_nbf_tables(FILE *fp
, const output_env_t oenv
,
1280 t_forcerec
*fr
, real rtab
,
1281 const t_commrec
*cr
,
1282 const char *tabfn
, char *eg1
, char *eg2
,
1292 fprintf(debug
, "No table file name passed, can not read table, can not do non-bonded interactions\n");
1297 sprintf(buf
, "%s", tabfn
);
1300 /* Append the two energy group names */
1301 sprintf(buf
+ strlen(tabfn
) - strlen(ftp2ext(efXVG
)) - 1, "_%s_%s.%s",
1302 eg1
, eg2
, ftp2ext(efXVG
));
1304 nbl
->table_elec_vdw
= make_tables(fp
, oenv
, fr
, MASTER(cr
), buf
, rtab
, 0);
1305 /* Copy the contents of the table to separate coulomb and LJ tables too,
1306 * to improve cache performance.
1308 /* For performance reasons we want
1309 * the table data to be aligned to 16-byte. The pointers could be freed
1310 * but currently aren't.
1312 nbl
->table_elec
.interaction
= GMX_TABLE_INTERACTION_ELEC
;
1313 nbl
->table_elec
.format
= nbl
->table_elec_vdw
.format
;
1314 nbl
->table_elec
.r
= nbl
->table_elec_vdw
.r
;
1315 nbl
->table_elec
.n
= nbl
->table_elec_vdw
.n
;
1316 nbl
->table_elec
.scale
= nbl
->table_elec_vdw
.scale
;
1317 nbl
->table_elec
.scale_exp
= nbl
->table_elec_vdw
.scale_exp
;
1318 nbl
->table_elec
.formatsize
= nbl
->table_elec_vdw
.formatsize
;
1319 nbl
->table_elec
.ninteractions
= 1;
1320 nbl
->table_elec
.stride
= nbl
->table_elec
.formatsize
* nbl
->table_elec
.ninteractions
;
1321 snew_aligned(nbl
->table_elec
.data
, nbl
->table_elec
.stride
*(nbl
->table_elec
.n
+1), 32);
1323 nbl
->table_vdw
.interaction
= GMX_TABLE_INTERACTION_VDWREP_VDWDISP
;
1324 nbl
->table_vdw
.format
= nbl
->table_elec_vdw
.format
;
1325 nbl
->table_vdw
.r
= nbl
->table_elec_vdw
.r
;
1326 nbl
->table_vdw
.n
= nbl
->table_elec_vdw
.n
;
1327 nbl
->table_vdw
.scale
= nbl
->table_elec_vdw
.scale
;
1328 nbl
->table_vdw
.scale_exp
= nbl
->table_elec_vdw
.scale_exp
;
1329 nbl
->table_vdw
.formatsize
= nbl
->table_elec_vdw
.formatsize
;
1330 nbl
->table_vdw
.ninteractions
= 2;
1331 nbl
->table_vdw
.stride
= nbl
->table_vdw
.formatsize
* nbl
->table_vdw
.ninteractions
;
1332 snew_aligned(nbl
->table_vdw
.data
, nbl
->table_vdw
.stride
*(nbl
->table_vdw
.n
+1), 32);
1334 for (i
= 0; i
<= nbl
->table_elec_vdw
.n
; i
++)
1336 for (j
= 0; j
< 4; j
++)
1338 nbl
->table_elec
.data
[4*i
+j
] = nbl
->table_elec_vdw
.data
[12*i
+j
];
1340 for (j
= 0; j
< 8; j
++)
1342 nbl
->table_vdw
.data
[8*i
+j
] = nbl
->table_elec_vdw
.data
[12*i
+4+j
];
1347 static void count_tables(int ftype1
, int ftype2
, const gmx_mtop_t
*mtop
,
1348 int *ncount
, int **count
)
1350 const gmx_moltype_t
*molt
;
1352 int mt
, ftype
, stride
, i
, j
, tabnr
;
1354 for (mt
= 0; mt
< mtop
->nmoltype
; mt
++)
1356 molt
= &mtop
->moltype
[mt
];
1357 for (ftype
= 0; ftype
< F_NRE
; ftype
++)
1359 if (ftype
== ftype1
|| ftype
== ftype2
)
1361 il
= &molt
->ilist
[ftype
];
1362 stride
= 1 + NRAL(ftype
);
1363 for (i
= 0; i
< il
->nr
; i
+= stride
)
1365 tabnr
= mtop
->ffparams
.iparams
[il
->iatoms
[i
]].tab
.table
;
1368 gmx_fatal(FARGS
, "A bonded table number is smaller than 0: %d\n", tabnr
);
1370 if (tabnr
>= *ncount
)
1372 srenew(*count
, tabnr
+1);
1373 for (j
= *ncount
; j
< tabnr
+1; j
++)
1386 static bondedtable_t
*make_bonded_tables(FILE *fplog
,
1387 int ftype1
, int ftype2
,
1388 const gmx_mtop_t
*mtop
,
1389 const char *basefn
, const char *tabext
)
1391 int i
, ncount
, *count
;
1399 count_tables(ftype1
, ftype2
, mtop
, &ncount
, &count
);
1404 for (i
= 0; i
< ncount
; i
++)
1408 sprintf(tabfn
, "%s", basefn
);
1409 sprintf(tabfn
+ strlen(basefn
) - strlen(ftp2ext(efXVG
)) - 1, "_%s%d.%s",
1410 tabext
, i
, ftp2ext(efXVG
));
1411 tab
[i
] = make_bonded_table(fplog
, tabfn
, NRAL(ftype1
)-2);
1420 void forcerec_set_ranges(t_forcerec
*fr
,
1421 int ncg_home
, int ncg_force
,
1423 int natoms_force_constr
, int natoms_f_novirsum
)
1428 /* fr->ncg_force is unused in the standard code,
1429 * but it can be useful for modified code dealing with charge groups.
1431 fr
->ncg_force
= ncg_force
;
1432 fr
->natoms_force
= natoms_force
;
1433 fr
->natoms_force_constr
= natoms_force_constr
;
1435 if (fr
->natoms_force_constr
> fr
->nalloc_force
)
1437 fr
->nalloc_force
= over_alloc_dd(fr
->natoms_force_constr
);
1441 srenew(fr
->f_twin
, fr
->nalloc_force
);
1445 if (fr
->bF_NoVirSum
)
1447 fr
->f_novirsum_n
= natoms_f_novirsum
;
1448 if (fr
->f_novirsum_n
> fr
->f_novirsum_nalloc
)
1450 fr
->f_novirsum_nalloc
= over_alloc_dd(fr
->f_novirsum_n
);
1451 srenew(fr
->f_novirsum_alloc
, fr
->f_novirsum_nalloc
);
1456 fr
->f_novirsum_n
= 0;
1460 static real
cutoff_inf(real cutoff
)
1464 cutoff
= GMX_CUTOFF_INF
;
1470 static void make_adress_tf_tables(FILE *fp
, const output_env_t oenv
,
1471 t_forcerec
*fr
, const t_inputrec
*ir
,
1472 const char *tabfn
, const gmx_mtop_t
*mtop
,
1480 gmx_fatal(FARGS
, "No thermoforce table file given. Use -tabletf to specify a file\n");
1484 snew(fr
->atf_tabs
, ir
->adress
->n_tf_grps
);
1486 sprintf(buf
, "%s", tabfn
);
1487 for (i
= 0; i
< ir
->adress
->n_tf_grps
; i
++)
1489 j
= ir
->adress
->tf_table_index
[i
]; /* get energy group index */
1490 sprintf(buf
+ strlen(tabfn
) - strlen(ftp2ext(efXVG
)) - 1, "tf_%s.%s",
1491 *(mtop
->groups
.grpname
[mtop
->groups
.grps
[egcENER
].nm_ind
[j
]]), ftp2ext(efXVG
));
1494 fprintf(fp
, "loading tf table for energygrp index %d from %s\n", ir
->adress
->tf_table_index
[i
], buf
);
1496 fr
->atf_tabs
[i
] = make_atf_table(fp
, oenv
, fr
, buf
, box
);
1501 gmx_bool
can_use_allvsall(const t_inputrec
*ir
, gmx_bool bPrintNote
, t_commrec
*cr
, FILE *fp
)
1508 ir
->rcoulomb
== 0 &&
1510 ir
->ePBC
== epbcNONE
&&
1511 ir
->vdwtype
== evdwCUT
&&
1512 ir
->coulombtype
== eelCUT
&&
1513 ir
->efep
== efepNO
&&
1514 (ir
->implicit_solvent
== eisNO
||
1515 (ir
->implicit_solvent
== eisGBSA
&& (ir
->gb_algorithm
== egbSTILL
||
1516 ir
->gb_algorithm
== egbHCT
||
1517 ir
->gb_algorithm
== egbOBC
))) &&
1518 getenv("GMX_NO_ALLVSALL") == NULL
1521 if (bAllvsAll
&& ir
->opts
.ngener
> 1)
1523 const char *note
= "NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1529 fprintf(stderr
, "\n%s\n", note
);
1533 fprintf(fp
, "\n%s\n", note
);
1539 if (bAllvsAll
&& fp
&& MASTER(cr
))
1541 fprintf(fp
, "\nUsing SIMD all-vs-all kernels.\n\n");
1548 gmx_bool
nbnxn_acceleration_supported(FILE *fplog
,
1549 const t_commrec
*cr
,
1550 const t_inputrec
*ir
,
1553 if (!bGPU
&& (ir
->vdwtype
== evdwPME
&& ir
->ljpme_combination_rule
== eljpmeLB
))
1555 md_print_warn(cr
, fplog
, "LJ-PME with Lorentz-Berthelot is not supported with %s, falling back to %s\n",
1556 bGPU
? "GPUs" : "SIMD kernels",
1557 bGPU
? "CPU only" : "plain-C kernels");
1565 static void pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused
*ir
,
1569 *kernel_type
= nbnxnk4x4_PlainC
;
1570 *ewald_excl
= ewaldexclTable
;
1572 #ifdef GMX_NBNXN_SIMD
1574 #ifdef GMX_NBNXN_SIMD_4XN
1575 *kernel_type
= nbnxnk4xN_SIMD_4xN
;
1577 #ifdef GMX_NBNXN_SIMD_2XNN
1578 *kernel_type
= nbnxnk4xN_SIMD_2xNN
;
1581 #if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
1582 /* We need to choose if we want 2x(N+N) or 4xN kernels.
1583 * Currently this is based on the SIMD acceleration choice,
1584 * but it might be better to decide this at runtime based on CPU.
1586 * 4xN calculates more (zero) interactions, but has less pair-search
1587 * work and much better kernel instruction scheduling.
1589 * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
1590 * which doesn't have FMA, both the analytical and tabulated Ewald
1591 * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
1592 * 2x(4+4) because it results in significantly fewer pairs.
1593 * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1594 * 10% with HT, 50% without HT. As we currently don't detect the actual
1595 * use of HT, use 4x8 to avoid a potential performance hit.
1596 * On Intel Haswell 4x8 is always faster.
1598 *kernel_type
= nbnxnk4xN_SIMD_4xN
;
1600 #ifndef GMX_SIMD_HAVE_FMA
1601 if (EEL_PME_EWALD(ir
->coulombtype
) ||
1602 EVDW_PME(ir
->vdwtype
))
1604 /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
1605 * There are enough instructions to make 2x(4+4) efficient.
1607 *kernel_type
= nbnxnk4xN_SIMD_2xNN
;
1610 #endif /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
1613 if (getenv("GMX_NBNXN_SIMD_4XN") != NULL
)
1615 #ifdef GMX_NBNXN_SIMD_4XN
1616 *kernel_type
= nbnxnk4xN_SIMD_4xN
;
1618 gmx_fatal(FARGS
, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
1621 if (getenv("GMX_NBNXN_SIMD_2XNN") != NULL
)
1623 #ifdef GMX_NBNXN_SIMD_2XNN
1624 *kernel_type
= nbnxnk4xN_SIMD_2xNN
;
1626 gmx_fatal(FARGS
, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
1630 /* Analytical Ewald exclusion correction is only an option in
1632 * Since table lookup's don't parallelize with SIMD, analytical
1633 * will probably always be faster for a SIMD width of 8 or more.
1634 * With FMA analytical is sometimes faster for a width if 4 as well.
1635 * On BlueGene/Q, this is faster regardless of precision.
1636 * In single precision, this is faster on Bulldozer.
1638 #if GMX_SIMD_REAL_WIDTH >= 8 || \
1639 (GMX_SIMD_REAL_WIDTH >= 4 && defined GMX_SIMD_HAVE_FMA && !defined GMX_DOUBLE) || \
1640 defined GMX_SIMD_IBM_QPX
1641 *ewald_excl
= ewaldexclAnalytical
;
1643 if (getenv("GMX_NBNXN_EWALD_TABLE") != NULL
)
1645 *ewald_excl
= ewaldexclTable
;
1647 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != NULL
)
1649 *ewald_excl
= ewaldexclAnalytical
;
1653 #endif /* GMX_NBNXN_SIMD */
1657 const char *lookup_nbnxn_kernel_name(int kernel_type
)
1659 const char *returnvalue
= NULL
;
1660 switch (kernel_type
)
1663 returnvalue
= "not set";
1665 case nbnxnk4x4_PlainC
:
1666 returnvalue
= "plain C";
1668 case nbnxnk4xN_SIMD_4xN
:
1669 case nbnxnk4xN_SIMD_2xNN
:
1670 #ifdef GMX_NBNXN_SIMD
1671 #if defined GMX_SIMD_X86_SSE2
1672 returnvalue
= "SSE2";
1673 #elif defined GMX_SIMD_X86_SSE4_1
1674 returnvalue
= "SSE4.1";
1675 #elif defined GMX_SIMD_X86_AVX_128_FMA
1676 returnvalue
= "AVX_128_FMA";
1677 #elif defined GMX_SIMD_X86_AVX_256
1678 returnvalue
= "AVX_256";
1679 #elif defined GMX_SIMD_X86_AVX2_256
1680 returnvalue
= "AVX2_256";
1682 returnvalue
= "SIMD";
1684 #else /* GMX_NBNXN_SIMD */
1685 returnvalue
= "not available";
1686 #endif /* GMX_NBNXN_SIMD */
1688 case nbnxnk8x8x8_GPU
: returnvalue
= "GPU"; break;
1689 case nbnxnk8x8x8_PlainC
: returnvalue
= "plain C"; break;
1693 gmx_fatal(FARGS
, "Illegal kernel type selected");
1700 static void pick_nbnxn_kernel(FILE *fp
,
1701 const t_commrec
*cr
,
1702 gmx_bool use_simd_kernels
,
1704 gmx_bool bEmulateGPU
,
1705 const t_inputrec
*ir
,
1708 gmx_bool bDoNonbonded
)
1710 assert(kernel_type
);
1712 *kernel_type
= nbnxnkNotSet
;
1713 *ewald_excl
= ewaldexclTable
;
1717 *kernel_type
= nbnxnk8x8x8_PlainC
;
1721 md_print_warn(cr
, fp
, "Emulating a GPU run on the CPU (slow)");
1726 *kernel_type
= nbnxnk8x8x8_GPU
;
1729 if (*kernel_type
== nbnxnkNotSet
)
1731 /* LJ PME with LB combination rule does 7 mesh operations.
1732 * This so slow that we don't compile SIMD non-bonded kernels for that.
1734 if (use_simd_kernels
&&
1735 nbnxn_acceleration_supported(fp
, cr
, ir
, FALSE
))
1737 pick_nbnxn_kernel_cpu(ir
, kernel_type
, ewald_excl
);
1741 *kernel_type
= nbnxnk4x4_PlainC
;
1745 if (bDoNonbonded
&& fp
!= NULL
)
1747 fprintf(fp
, "\nUsing %s %dx%d non-bonded kernels\n\n",
1748 lookup_nbnxn_kernel_name(*kernel_type
),
1749 nbnxn_kernel_to_cluster_i_size(*kernel_type
),
1750 nbnxn_kernel_to_cluster_j_size(*kernel_type
));
1752 if (nbnxnk4x4_PlainC
== *kernel_type
||
1753 nbnxnk8x8x8_PlainC
== *kernel_type
)
1755 md_print_warn(cr
, fp
,
1756 "WARNING: Using the slow %s kernels. This should\n"
1757 "not happen during routine usage on supported platforms.\n\n",
1758 lookup_nbnxn_kernel_name(*kernel_type
));
1763 static void pick_nbnxn_resources(FILE *fp
,
1764 const t_commrec
*cr
,
1765 const gmx_hw_info_t
*hwinfo
,
1766 gmx_bool bDoNonbonded
,
1768 gmx_bool
*bEmulateGPU
,
1769 const gmx_gpu_opt_t
*gpu_opt
)
1771 gmx_bool bEmulateGPUEnvVarSet
;
1772 char gpu_err_str
[STRLEN
];
1776 bEmulateGPUEnvVarSet
= (getenv("GMX_EMULATE_GPU") != NULL
);
1778 /* Run GPU emulation mode if GMX_EMULATE_GPU is defined. Because
1779 * GPUs (currently) only handle non-bonded calculations, we will
1780 * automatically switch to emulation if non-bonded calculations are
1781 * turned off via GMX_NO_NONBONDED - this is the simple and elegant
1782 * way to turn off GPU initialization, data movement, and cleanup.
1784 * GPU emulation can be useful to assess the performance one can expect by
1785 * adding GPU(s) to the machine. The conditional below allows this even
1786 * if mdrun is compiled without GPU acceleration support.
1787 * Note that you should freezing the system as otherwise it will explode.
1789 *bEmulateGPU
= (bEmulateGPUEnvVarSet
||
1790 (!bDoNonbonded
&& gpu_opt
->n_dev_use
> 0));
1792 /* Enable GPU mode when GPUs are available or no GPU emulation is requested.
1794 if (gpu_opt
->n_dev_use
> 0 && !(*bEmulateGPU
))
1796 /* Each PP node will use the intra-node id-th device from the
1797 * list of detected/selected GPUs. */
1798 if (!init_gpu(fp
, cr
->rank_pp_intranode
, gpu_err_str
,
1799 &hwinfo
->gpu_info
, gpu_opt
))
1801 /* At this point the init should never fail as we made sure that
1802 * we have all the GPUs we need. If it still does, we'll bail. */
1803 /* TODO the decorating of gpu_err_str is nicer if it
1804 happens inside init_gpu. Out here, the decorating with
1805 the MPI rank makes sense. */
1806 gmx_fatal(FARGS
, "On rank %d failed to initialize GPU #%d: %s",
1808 get_gpu_device_id(&hwinfo
->gpu_info
, gpu_opt
,
1809 cr
->rank_pp_intranode
),
1813 /* Here we actually turn on hardware GPU acceleration */
1818 gmx_bool
uses_simple_tables(int cutoff_scheme
,
1819 nonbonded_verlet_t
*nbv
,
1822 gmx_bool bUsesSimpleTables
= TRUE
;
1825 switch (cutoff_scheme
)
1828 bUsesSimpleTables
= TRUE
;
1831 assert(NULL
!= nbv
&& NULL
!= nbv
->grp
);
1832 grp_index
= (group
< 0) ? 0 : (nbv
->ngrp
- 1);
1833 bUsesSimpleTables
= nbnxn_kernel_pairlist_simple(nbv
->grp
[grp_index
].kernel_type
);
1836 gmx_incons("unimplemented");
1838 return bUsesSimpleTables
;
1841 static void init_ewald_f_table(interaction_const_t
*ic
,
1846 /* Get the Ewald table spacing based on Coulomb and/or LJ
1847 * Ewald coefficients and rtol.
1849 ic
->tabq_scale
= ewald_spline3_table_scale(ic
);
1851 if (ic
->cutoff_scheme
== ecutsVERLET
)
1853 maxr
= ic
->rcoulomb
;
1857 maxr
= std::max(ic
->rcoulomb
, rtab
);
1859 ic
->tabq_size
= static_cast<int>(maxr
*ic
->tabq_scale
) + 2;
1861 sfree_aligned(ic
->tabq_coul_FDV0
);
1862 sfree_aligned(ic
->tabq_coul_F
);
1863 sfree_aligned(ic
->tabq_coul_V
);
1865 sfree_aligned(ic
->tabq_vdw_FDV0
);
1866 sfree_aligned(ic
->tabq_vdw_F
);
1867 sfree_aligned(ic
->tabq_vdw_V
);
1869 if (ic
->eeltype
== eelEWALD
|| EEL_PME(ic
->eeltype
))
1871 /* Create the original table data in FDV0 */
1872 snew_aligned(ic
->tabq_coul_FDV0
, ic
->tabq_size
*4, 32);
1873 snew_aligned(ic
->tabq_coul_F
, ic
->tabq_size
, 32);
1874 snew_aligned(ic
->tabq_coul_V
, ic
->tabq_size
, 32);
1875 table_spline3_fill_ewald_lr(ic
->tabq_coul_F
, ic
->tabq_coul_V
, ic
->tabq_coul_FDV0
,
1876 ic
->tabq_size
, 1/ic
->tabq_scale
, ic
->ewaldcoeff_q
, v_q_ewald_lr
);
1879 if (EVDW_PME(ic
->vdwtype
))
1881 snew_aligned(ic
->tabq_vdw_FDV0
, ic
->tabq_size
*4, 32);
1882 snew_aligned(ic
->tabq_vdw_F
, ic
->tabq_size
, 32);
1883 snew_aligned(ic
->tabq_vdw_V
, ic
->tabq_size
, 32);
1884 table_spline3_fill_ewald_lr(ic
->tabq_vdw_F
, ic
->tabq_vdw_V
, ic
->tabq_vdw_FDV0
,
1885 ic
->tabq_size
, 1/ic
->tabq_scale
, ic
->ewaldcoeff_lj
, v_lj_ewald_lr
);
1889 void init_interaction_const_tables(FILE *fp
,
1890 interaction_const_t
*ic
,
1893 if (ic
->eeltype
== eelEWALD
|| EEL_PME(ic
->eeltype
) || EVDW_PME(ic
->vdwtype
))
1895 init_ewald_f_table(ic
, rtab
);
1899 fprintf(fp
, "Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1900 1/ic
->tabq_scale
, ic
->tabq_size
);
1905 static void clear_force_switch_constants(shift_consts_t
*sc
)
1912 static void force_switch_constants(real p
,
1916 /* Here we determine the coefficient for shifting the force to zero
1917 * between distance rsw and the cut-off rc.
1918 * For a potential of r^-p, we have force p*r^-(p+1).
1919 * But to save flops we absorb p in the coefficient.
1921 * force/p = r^-(p+1) + c2*r^2 + c3*r^3
1922 * potential = r^-p + c2/3*r^3 + c3/4*r^4 + cpot
1924 sc
->c2
= ((p
+ 1)*rsw
- (p
+ 4)*rc
)/(pow(rc
, p
+ 2)*pow(rc
- rsw
, 2));
1925 sc
->c3
= -((p
+ 1)*rsw
- (p
+ 3)*rc
)/(pow(rc
, p
+ 2)*pow(rc
- rsw
, 3));
1926 sc
->cpot
= -pow(rc
, -p
) + p
*sc
->c2
/3*pow(rc
- rsw
, 3) + p
*sc
->c3
/4*pow(rc
- rsw
, 4);
1929 static void potential_switch_constants(real rsw
, real rc
,
1930 switch_consts_t
*sc
)
1932 /* The switch function is 1 at rsw and 0 at rc.
1933 * The derivative and second derivate are zero at both ends.
1934 * rsw = max(r - r_switch, 0)
1935 * sw = 1 + c3*rsw^3 + c4*rsw^4 + c5*rsw^5
1936 * dsw = 3*c3*rsw^2 + 4*c4*rsw^3 + 5*c5*rsw^4
1937 * force = force*dsw - potential*sw
1940 sc
->c3
= -10*pow(rc
- rsw
, -3);
1941 sc
->c4
= 15*pow(rc
- rsw
, -4);
1942 sc
->c5
= -6*pow(rc
- rsw
, -5);
1945 /*! \brief Construct interaction constants
1947 * This data is used (particularly) by search and force code for
1948 * short-range interactions. Many of these are constant for the whole
1949 * simulation; some are constant only after PME tuning completes.
1952 init_interaction_const(FILE *fp
,
1953 interaction_const_t
**interaction_const
,
1954 const t_forcerec
*fr
)
1956 interaction_const_t
*ic
;
1957 const real minusSix
= -6.0;
1958 const real minusTwelve
= -12.0;
1962 ic
->cutoff_scheme
= fr
->cutoff_scheme
;
1964 /* Just allocate something so we can free it */
1965 snew_aligned(ic
->tabq_coul_FDV0
, 16, 32);
1966 snew_aligned(ic
->tabq_coul_F
, 16, 32);
1967 snew_aligned(ic
->tabq_coul_V
, 16, 32);
1969 ic
->rlist
= fr
->rlist
;
1970 ic
->rlistlong
= fr
->rlistlong
;
1973 ic
->vdwtype
= fr
->vdwtype
;
1974 ic
->vdw_modifier
= fr
->vdw_modifier
;
1975 ic
->rvdw
= fr
->rvdw
;
1976 ic
->rvdw_switch
= fr
->rvdw_switch
;
1977 ic
->ewaldcoeff_lj
= fr
->ewaldcoeff_lj
;
1978 ic
->ljpme_comb_rule
= fr
->ljpme_combination_rule
;
1979 ic
->sh_lj_ewald
= 0;
1980 clear_force_switch_constants(&ic
->dispersion_shift
);
1981 clear_force_switch_constants(&ic
->repulsion_shift
);
1983 switch (ic
->vdw_modifier
)
1985 case eintmodPOTSHIFT
:
1986 /* Only shift the potential, don't touch the force */
1987 ic
->dispersion_shift
.cpot
= -pow(ic
->rvdw
, minusSix
);
1988 ic
->repulsion_shift
.cpot
= -pow(ic
->rvdw
, minusTwelve
);
1989 if (EVDW_PME(ic
->vdwtype
))
1993 crc2
= sqr(ic
->ewaldcoeff_lj
*ic
->rvdw
);
1994 ic
->sh_lj_ewald
= (exp(-crc2
)*(1 + crc2
+ 0.5*crc2
*crc2
) - 1)*pow(ic
->rvdw
, minusSix
);
1997 case eintmodFORCESWITCH
:
1998 /* Switch the force, switch and shift the potential */
1999 force_switch_constants(6.0, ic
->rvdw_switch
, ic
->rvdw
,
2000 &ic
->dispersion_shift
);
2001 force_switch_constants(12.0, ic
->rvdw_switch
, ic
->rvdw
,
2002 &ic
->repulsion_shift
);
2004 case eintmodPOTSWITCH
:
2005 /* Switch the potential and force */
2006 potential_switch_constants(ic
->rvdw_switch
, ic
->rvdw
,
2010 case eintmodEXACTCUTOFF
:
2011 /* Nothing to do here */
2014 gmx_incons("unimplemented potential modifier");
2017 ic
->sh_invrc6
= -ic
->dispersion_shift
.cpot
;
2019 /* Electrostatics */
2020 ic
->eeltype
= fr
->eeltype
;
2021 ic
->coulomb_modifier
= fr
->coulomb_modifier
;
2022 ic
->rcoulomb
= fr
->rcoulomb
;
2023 ic
->epsilon_r
= fr
->epsilon_r
;
2024 ic
->epsfac
= fr
->epsfac
;
2025 ic
->ewaldcoeff_q
= fr
->ewaldcoeff_q
;
2027 if (fr
->coulomb_modifier
== eintmodPOTSHIFT
)
2029 ic
->sh_ewald
= gmx_erfc(ic
->ewaldcoeff_q
*ic
->rcoulomb
);
2036 /* Reaction-field */
2037 if (EEL_RF(ic
->eeltype
))
2039 ic
->epsilon_rf
= fr
->epsilon_rf
;
2040 ic
->k_rf
= fr
->k_rf
;
2041 ic
->c_rf
= fr
->c_rf
;
2045 /* For plain cut-off we might use the reaction-field kernels */
2046 ic
->epsilon_rf
= ic
->epsilon_r
;
2048 if (fr
->coulomb_modifier
== eintmodPOTSHIFT
)
2050 ic
->c_rf
= 1/ic
->rcoulomb
;
2060 real dispersion_shift
;
2062 dispersion_shift
= ic
->dispersion_shift
.cpot
;
2063 if (EVDW_PME(ic
->vdwtype
))
2065 dispersion_shift
-= ic
->sh_lj_ewald
;
2067 fprintf(fp
, "Potential shift: LJ r^-12: %.3e r^-6: %.3e",
2068 ic
->repulsion_shift
.cpot
, dispersion_shift
);
2070 if (ic
->eeltype
== eelCUT
)
2072 fprintf(fp
, ", Coulomb %.e", -ic
->c_rf
);
2074 else if (EEL_PME(ic
->eeltype
))
2076 fprintf(fp
, ", Ewald %.3e", -ic
->sh_ewald
);
2081 *interaction_const
= ic
;
2084 static void init_nb_verlet(FILE *fp
,
2085 nonbonded_verlet_t
**nb_verlet
,
2086 gmx_bool bFEP_NonBonded
,
2087 const t_inputrec
*ir
,
2088 const t_forcerec
*fr
,
2089 const t_commrec
*cr
,
2090 const char *nbpu_opt
)
2092 nonbonded_verlet_t
*nbv
;
2095 gmx_bool bEmulateGPU
, bHybridGPURun
= FALSE
;
2097 nbnxn_alloc_t
*nb_alloc
;
2098 nbnxn_free_t
*nb_free
;
2102 pick_nbnxn_resources(fp
, cr
, fr
->hwinfo
,
2109 nbv
->min_ci_balanced
= 0;
2111 nbv
->ngrp
= (DOMAINDECOMP(cr
) ? 2 : 1);
2112 for (i
= 0; i
< nbv
->ngrp
; i
++)
2114 nbv
->grp
[i
].nbl_lists
.nnbl
= 0;
2115 nbv
->grp
[i
].nbat
= NULL
;
2116 nbv
->grp
[i
].kernel_type
= nbnxnkNotSet
;
2118 if (i
== 0) /* local */
2120 pick_nbnxn_kernel(fp
, cr
, fr
->use_simd_kernels
,
2121 nbv
->bUseGPU
, bEmulateGPU
, ir
,
2122 &nbv
->grp
[i
].kernel_type
,
2123 &nbv
->grp
[i
].ewald_excl
,
2126 else /* non-local */
2128 if (nbpu_opt
!= NULL
&& strcmp(nbpu_opt
, "gpu_cpu") == 0)
2130 /* Use GPU for local, select a CPU kernel for non-local */
2131 pick_nbnxn_kernel(fp
, cr
, fr
->use_simd_kernels
,
2133 &nbv
->grp
[i
].kernel_type
,
2134 &nbv
->grp
[i
].ewald_excl
,
2137 bHybridGPURun
= TRUE
;
2141 /* Use the same kernel for local and non-local interactions */
2142 nbv
->grp
[i
].kernel_type
= nbv
->grp
[0].kernel_type
;
2143 nbv
->grp
[i
].ewald_excl
= nbv
->grp
[0].ewald_excl
;
2148 nbnxn_init_search(&nbv
->nbs
,
2149 DOMAINDECOMP(cr
) ? &cr
->dd
->nc
: NULL
,
2150 DOMAINDECOMP(cr
) ? domdec_zones(cr
->dd
) : NULL
,
2152 gmx_omp_nthreads_get(emntPairsearch
));
2154 for (i
= 0; i
< nbv
->ngrp
; i
++)
2156 gpu_set_host_malloc_and_free(nbv
->grp
[0].kernel_type
== nbnxnk8x8x8_GPU
,
2157 &nb_alloc
, &nb_free
);
2159 nbnxn_init_pairlist_set(&nbv
->grp
[i
].nbl_lists
,
2160 nbnxn_kernel_pairlist_simple(nbv
->grp
[i
].kernel_type
),
2161 /* 8x8x8 "non-simple" lists are ATM always combined */
2162 !nbnxn_kernel_pairlist_simple(nbv
->grp
[i
].kernel_type
),
2166 nbv
->grp
[0].kernel_type
!= nbv
->grp
[i
].kernel_type
)
2168 gmx_bool bSimpleList
;
2169 int enbnxninitcombrule
;
2171 bSimpleList
= nbnxn_kernel_pairlist_simple(nbv
->grp
[i
].kernel_type
);
2173 if (bSimpleList
&& (fr
->vdwtype
== evdwCUT
&& (fr
->vdw_modifier
== eintmodNONE
|| fr
->vdw_modifier
== eintmodPOTSHIFT
)))
2175 /* Plain LJ cut-off: we can optimize with combination rules */
2176 enbnxninitcombrule
= enbnxninitcombruleDETECT
;
2178 else if (fr
->vdwtype
== evdwPME
)
2180 /* LJ-PME: we need to use a combination rule for the grid */
2181 if (fr
->ljpme_combination_rule
== eljpmeGEOM
)
2183 enbnxninitcombrule
= enbnxninitcombruleGEOM
;
2187 enbnxninitcombrule
= enbnxninitcombruleLB
;
2192 /* We use a full combination matrix: no rule required */
2193 enbnxninitcombrule
= enbnxninitcombruleNONE
;
2197 snew(nbv
->grp
[i
].nbat
, 1);
2198 nbnxn_atomdata_init(fp
,
2200 nbv
->grp
[i
].kernel_type
,
2202 fr
->ntype
, fr
->nbfp
,
2204 bSimpleList
? gmx_omp_nthreads_get(emntNonbonded
) : 1,
2209 nbv
->grp
[i
].nbat
= nbv
->grp
[0].nbat
;
2215 /* init the NxN GPU data; the last argument tells whether we'll have
2216 * both local and non-local NB calculation on GPU */
2217 nbnxn_gpu_init(fp
, &nbv
->gpu_nbv
,
2218 &fr
->hwinfo
->gpu_info
,
2222 cr
->rank_pp_intranode
,
2224 (nbv
->ngrp
> 1) && !bHybridGPURun
);
2226 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
2227 * also sharing texture references. To keep the code simple, we don't
2228 * treat texture references as shared resources, but this means that
2229 * the coulomb_tab and nbfp texture refs will get updated by multiple threads.
2230 * Hence, to ensure that the non-bonded kernels don't start before all
2231 * texture binding operations are finished, we need to wait for all ranks
2232 * to arrive here before continuing.
2234 * Note that we could omit this barrier if GPUs are not shared (or
2235 * texture objects are used), but as this is initialization code, there
2236 * is no point in complicating things.
2238 #ifdef GMX_THREAD_MPI
2243 #endif /* GMX_THREAD_MPI */
2245 if ((env
= getenv("GMX_NB_MIN_CI")) != NULL
)
2249 nbv
->min_ci_balanced
= strtol(env
, &end
, 10);
2250 if (!end
|| (*end
!= 0) || nbv
->min_ci_balanced
<= 0)
2252 gmx_fatal(FARGS
, "Invalid value passed in GMX_NB_MIN_CI=%s, positive integer required", env
);
2257 fprintf(debug
, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
2258 nbv
->min_ci_balanced
);
2263 nbv
->min_ci_balanced
= nbnxn_gpu_min_ci_balanced(nbv
->gpu_nbv
);
2266 fprintf(debug
, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
2267 nbv
->min_ci_balanced
);
2276 gmx_bool
usingGpu(nonbonded_verlet_t
*nbv
)
2278 return nbv
!= NULL
&& nbv
->bUseGPU
;
2281 void init_forcerec(FILE *fp
,
2282 const output_env_t oenv
,
2285 const t_inputrec
*ir
,
2286 const gmx_mtop_t
*mtop
,
2287 const t_commrec
*cr
,
2293 const char *nbpu_opt
,
2294 gmx_bool bNoSolvOpt
,
2297 int i
, m
, negp_pp
, negptable
, egi
, egj
;
2302 gmx_bool bGenericKernelOnly
;
2303 gmx_bool bMakeTables
, bMakeSeparate14Table
, bSomeNormalNbListsAreInUse
;
2304 gmx_bool bFEP_NonBonded
;
2305 int *nm_ind
, egp_flags
;
2307 if (fr
->hwinfo
== NULL
)
2309 /* Detect hardware, gather information.
2310 * In mdrun, hwinfo has already been set before calling init_forcerec.
2311 * Here we ignore GPUs, as tools will not use them anyhow.
2313 fr
->hwinfo
= gmx_detect_hardware(fp
, cr
, FALSE
);
2316 /* By default we turn SIMD kernels on, but it might be turned off further down... */
2317 fr
->use_simd_kernels
= TRUE
;
2319 fr
->bDomDec
= DOMAINDECOMP(cr
);
2321 if (check_box(ir
->ePBC
, box
))
2323 gmx_fatal(FARGS
, check_box(ir
->ePBC
, box
));
2326 /* Test particle insertion ? */
2329 /* Set to the size of the molecule to be inserted (the last one) */
2330 /* Because of old style topologies, we have to use the last cg
2331 * instead of the last molecule type.
2333 cgs
= &mtop
->moltype
[mtop
->molblock
[mtop
->nmolblock
-1].type
].cgs
;
2334 fr
->n_tpi
= cgs
->index
[cgs
->nr
] - cgs
->index
[cgs
->nr
-1];
2335 if (fr
->n_tpi
!= mtop
->mols
.index
[mtop
->mols
.nr
] - mtop
->mols
.index
[mtop
->mols
.nr
-1])
2337 gmx_fatal(FARGS
, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2345 /* Copy AdResS parameters */
2348 fr
->adress_type
= ir
->adress
->type
;
2349 fr
->adress_const_wf
= ir
->adress
->const_wf
;
2350 fr
->adress_ex_width
= ir
->adress
->ex_width
;
2351 fr
->adress_hy_width
= ir
->adress
->hy_width
;
2352 fr
->adress_icor
= ir
->adress
->icor
;
2353 fr
->adress_site
= ir
->adress
->site
;
2354 fr
->adress_ex_forcecap
= ir
->adress
->ex_forcecap
;
2355 fr
->adress_do_hybridpairs
= ir
->adress
->do_hybridpairs
;
2358 snew(fr
->adress_group_explicit
, ir
->adress
->n_energy_grps
);
2359 for (i
= 0; i
< ir
->adress
->n_energy_grps
; i
++)
2361 fr
->adress_group_explicit
[i
] = ir
->adress
->group_explicit
[i
];
2364 fr
->n_adress_tf_grps
= ir
->adress
->n_tf_grps
;
2365 snew(fr
->adress_tf_table_index
, fr
->n_adress_tf_grps
);
2366 for (i
= 0; i
< fr
->n_adress_tf_grps
; i
++)
2368 fr
->adress_tf_table_index
[i
] = ir
->adress
->tf_table_index
[i
];
2370 copy_rvec(ir
->adress
->refs
, fr
->adress_refs
);
2374 fr
->adress_type
= eAdressOff
;
2375 fr
->adress_do_hybridpairs
= FALSE
;
2378 /* Copy the user determined parameters */
2379 fr
->userint1
= ir
->userint1
;
2380 fr
->userint2
= ir
->userint2
;
2381 fr
->userint3
= ir
->userint3
;
2382 fr
->userint4
= ir
->userint4
;
2383 fr
->userreal1
= ir
->userreal1
;
2384 fr
->userreal2
= ir
->userreal2
;
2385 fr
->userreal3
= ir
->userreal3
;
2386 fr
->userreal4
= ir
->userreal4
;
2389 fr
->fc_stepsize
= ir
->fc_stepsize
;
2392 fr
->efep
= ir
->efep
;
2393 fr
->sc_alphavdw
= ir
->fepvals
->sc_alpha
;
2394 if (ir
->fepvals
->bScCoul
)
2396 fr
->sc_alphacoul
= ir
->fepvals
->sc_alpha
;
2397 fr
->sc_sigma6_min
= pow(ir
->fepvals
->sc_sigma_min
, 6);
2401 fr
->sc_alphacoul
= 0;
2402 fr
->sc_sigma6_min
= 0; /* only needed when bScCoul is on */
2404 fr
->sc_power
= ir
->fepvals
->sc_power
;
2405 fr
->sc_r_power
= ir
->fepvals
->sc_r_power
;
2406 fr
->sc_sigma6_def
= pow(ir
->fepvals
->sc_sigma
, 6);
2408 env
= getenv("GMX_SCSIGMA_MIN");
2412 sscanf(env
, "%20lf", &dbl
);
2413 fr
->sc_sigma6_min
= pow(dbl
, 6);
2416 fprintf(fp
, "Setting the minimum soft core sigma to %g nm\n", dbl
);
2420 fr
->bNonbonded
= TRUE
;
2421 if (getenv("GMX_NO_NONBONDED") != NULL
)
2423 /* turn off non-bonded calculations */
2424 fr
->bNonbonded
= FALSE
;
2425 md_print_warn(cr
, fp
,
2426 "Found environment variable GMX_NO_NONBONDED.\n"
2427 "Disabling nonbonded calculations.\n");
2430 bGenericKernelOnly
= FALSE
;
2432 /* We now check in the NS code whether a particular combination of interactions
2433 * can be used with water optimization, and disable it if that is not the case.
2436 if (getenv("GMX_NB_GENERIC") != NULL
)
2441 "Found environment variable GMX_NB_GENERIC.\n"
2442 "Disabling all interaction-specific nonbonded kernels, will only\n"
2443 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2445 bGenericKernelOnly
= TRUE
;
2448 if (bGenericKernelOnly
== TRUE
)
2453 if ( (getenv("GMX_DISABLE_SIMD_KERNELS") != NULL
) || (getenv("GMX_NOOPTIMIZEDKERNELS") != NULL
) )
2455 fr
->use_simd_kernels
= FALSE
;
2459 "\nFound environment variable GMX_DISABLE_SIMD_KERNELS.\n"
2460 "Disabling the usage of any SIMD-specific non-bonded & bonded kernel routines\n"
2461 "(e.g. SSE2/SSE4.1/AVX).\n\n");
2465 fr
->bBHAM
= (mtop
->ffparams
.functype
[0] == F_BHAM
);
2467 /* Check if we can/should do all-vs-all kernels */
2468 fr
->bAllvsAll
= can_use_allvsall(ir
, FALSE
, NULL
, NULL
);
2469 fr
->AllvsAll_work
= NULL
;
2470 fr
->AllvsAll_workgb
= NULL
;
2472 /* All-vs-all kernels have not been implemented in 4.6 and later.
2473 * See Redmine #1249. */
2476 fr
->bAllvsAll
= FALSE
;
2480 "\nYour simulation settings would have triggered the efficient all-vs-all\n"
2481 "kernels in GROMACS 4.5, but these have not been implemented in GROMACS\n"
2482 "4.6 and 5.x. If performance is important, please use GROMACS 4.5.7\n"
2483 "or try cutoff-scheme = Verlet.\n\n");
2487 /* Neighbour searching stuff */
2488 fr
->cutoff_scheme
= ir
->cutoff_scheme
;
2489 fr
->bGrid
= (ir
->ns_type
== ensGRID
);
2490 fr
->ePBC
= ir
->ePBC
;
2492 if (fr
->cutoff_scheme
== ecutsGROUP
)
2494 const char *note
= "NOTE: This file uses the deprecated 'group' cutoff_scheme. This will be\n"
2495 "removed in a future release when 'verlet' supports all interaction forms.\n";
2499 fprintf(stderr
, "\n%s\n", note
);
2503 fprintf(fp
, "\n%s\n", note
);
2507 /* Determine if we will do PBC for distances in bonded interactions */
2508 if (fr
->ePBC
== epbcNONE
)
2510 fr
->bMolPBC
= FALSE
;
2514 if (!DOMAINDECOMP(cr
))
2518 bSHAKE
= (ir
->eConstrAlg
== econtSHAKE
&&
2519 (gmx_mtop_ftype_count(mtop
, F_CONSTR
) > 0 ||
2520 gmx_mtop_ftype_count(mtop
, F_CONSTRNC
) > 0));
2522 /* The group cut-off scheme and SHAKE assume charge groups
2523 * are whole, but not using molpbc is faster in most cases.
2524 * With intermolecular interactions we need PBC for calculating
2525 * distances between atoms in different molecules.
2527 if ((fr
->cutoff_scheme
== ecutsGROUP
|| bSHAKE
) &&
2528 !mtop
->bIntermolecularInteractions
)
2530 fr
->bMolPBC
= ir
->bPeriodicMols
;
2532 if (bSHAKE
&& fr
->bMolPBC
)
2534 gmx_fatal(FARGS
, "SHAKE is not supported with periodic molecules");
2541 if (getenv("GMX_USE_GRAPH") != NULL
)
2543 fr
->bMolPBC
= FALSE
;
2546 md_print_warn(cr
, fp
, "GMX_USE_GRAPH is set, using the graph for bonded interactions\n");
2549 if (mtop
->bIntermolecularInteractions
)
2551 md_print_warn(cr
, fp
, "WARNING: Molecules linked by intermolecular interactions have to reside in the same periodic image, otherwise artifacts will occur!\n");
2555 if (bSHAKE
&& fr
->bMolPBC
)
2557 gmx_fatal(FARGS
, "SHAKE is not properly supported with intermolecular interactions. For short simulations where linked molecules remain in the same periodic image, the environment variable GMX_USE_GRAPH can be used to override this check.\n");
2563 fr
->bMolPBC
= dd_bonded_molpbc(cr
->dd
, fr
->ePBC
);
2566 fr
->bGB
= (ir
->implicit_solvent
== eisGBSA
);
2568 fr
->rc_scaling
= ir
->refcoord_scaling
;
2569 copy_rvec(ir
->posres_com
, fr
->posres_com
);
2570 copy_rvec(ir
->posres_comB
, fr
->posres_comB
);
2571 fr
->rlist
= cutoff_inf(ir
->rlist
);
2572 fr
->rlistlong
= cutoff_inf(ir
->rlistlong
);
2573 fr
->eeltype
= ir
->coulombtype
;
2574 fr
->vdwtype
= ir
->vdwtype
;
2575 fr
->ljpme_combination_rule
= ir
->ljpme_combination_rule
;
2577 fr
->coulomb_modifier
= ir
->coulomb_modifier
;
2578 fr
->vdw_modifier
= ir
->vdw_modifier
;
2580 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2581 switch (fr
->eeltype
)
2584 fr
->nbkernel_elec_interaction
= (fr
->bGB
) ? GMX_NBKERNEL_ELEC_GENERALIZEDBORN
: GMX_NBKERNEL_ELEC_COULOMB
;
2590 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_REACTIONFIELD
;
2594 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_REACTIONFIELD
;
2595 fr
->coulomb_modifier
= eintmodEXACTCUTOFF
;
2604 case eelPMEUSERSWITCH
:
2605 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_CUBICSPLINETABLE
;
2610 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_EWALD
;
2614 gmx_fatal(FARGS
, "Unsupported electrostatic interaction: %s", eel_names
[fr
->eeltype
]);
2618 /* Vdw: Translate from mdp settings to kernel format */
2619 switch (fr
->vdwtype
)
2624 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_BUCKINGHAM
;
2628 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_LENNARDJONES
;
2632 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_LJEWALD
;
2638 case evdwENCADSHIFT
:
2639 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_CUBICSPLINETABLE
;
2643 gmx_fatal(FARGS
, "Unsupported vdw interaction: %s", evdw_names
[fr
->vdwtype
]);
2647 /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
2648 fr
->nbkernel_elec_modifier
= fr
->coulomb_modifier
;
2649 fr
->nbkernel_vdw_modifier
= fr
->vdw_modifier
;
2651 fr
->rvdw
= cutoff_inf(ir
->rvdw
);
2652 fr
->rvdw_switch
= ir
->rvdw_switch
;
2653 fr
->rcoulomb
= cutoff_inf(ir
->rcoulomb
);
2654 fr
->rcoulomb_switch
= ir
->rcoulomb_switch
;
2656 fr
->bTwinRange
= fr
->rlistlong
> fr
->rlist
;
2657 fr
->bEwald
= (EEL_PME(fr
->eeltype
) || fr
->eeltype
== eelEWALD
);
2659 fr
->reppow
= mtop
->ffparams
.reppow
;
2661 if (ir
->cutoff_scheme
== ecutsGROUP
)
2663 fr
->bvdwtab
= ((fr
->vdwtype
!= evdwCUT
|| !gmx_within_tol(fr
->reppow
, 12.0, 10*GMX_DOUBLE_EPS
))
2664 && !EVDW_PME(fr
->vdwtype
));
2665 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2666 fr
->bcoultab
= !(fr
->eeltype
== eelCUT
||
2667 fr
->eeltype
== eelEWALD
||
2668 fr
->eeltype
== eelPME
||
2669 fr
->eeltype
== eelRF
||
2670 fr
->eeltype
== eelRF_ZERO
);
2672 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2673 * going to be faster to tabulate the interaction than calling the generic kernel.
2674 * However, if generic kernels have been requested we keep things analytically.
2676 if (fr
->nbkernel_elec_modifier
== eintmodPOTSWITCH
&&
2677 fr
->nbkernel_vdw_modifier
== eintmodPOTSWITCH
&&
2678 bGenericKernelOnly
== FALSE
)
2680 if ((fr
->rcoulomb_switch
!= fr
->rvdw_switch
) || (fr
->rcoulomb
!= fr
->rvdw
))
2682 fr
->bcoultab
= TRUE
;
2683 /* Once we tabulate electrostatics, we can use the switch function for LJ,
2684 * which would otherwise need two tables.
2688 else if ((fr
->nbkernel_elec_modifier
== eintmodPOTSHIFT
&& fr
->nbkernel_vdw_modifier
== eintmodPOTSHIFT
) ||
2689 ((fr
->nbkernel_elec_interaction
== GMX_NBKERNEL_ELEC_REACTIONFIELD
&&
2690 fr
->nbkernel_elec_modifier
== eintmodEXACTCUTOFF
&&
2691 (fr
->nbkernel_vdw_modifier
== eintmodPOTSWITCH
|| fr
->nbkernel_vdw_modifier
== eintmodPOTSHIFT
))))
2693 if ((fr
->rcoulomb
!= fr
->rvdw
) && (bGenericKernelOnly
== FALSE
))
2695 fr
->bcoultab
= TRUE
;
2699 if (fr
->nbkernel_elec_modifier
== eintmodFORCESWITCH
)
2701 fr
->bcoultab
= TRUE
;
2703 if (fr
->nbkernel_vdw_modifier
== eintmodFORCESWITCH
)
2708 if (getenv("GMX_REQUIRE_TABLES"))
2711 fr
->bcoultab
= TRUE
;
2716 fprintf(fp
, "Table routines are used for coulomb: %s\n", bool_names
[fr
->bcoultab
]);
2717 fprintf(fp
, "Table routines are used for vdw: %s\n", bool_names
[fr
->bvdwtab
]);
2720 if (fr
->bvdwtab
== TRUE
)
2722 fr
->nbkernel_vdw_interaction
= GMX_NBKERNEL_VDW_CUBICSPLINETABLE
;
2723 fr
->nbkernel_vdw_modifier
= eintmodNONE
;
2725 if (fr
->bcoultab
== TRUE
)
2727 fr
->nbkernel_elec_interaction
= GMX_NBKERNEL_ELEC_CUBICSPLINETABLE
;
2728 fr
->nbkernel_elec_modifier
= eintmodNONE
;
2732 if (ir
->cutoff_scheme
== ecutsVERLET
)
2734 if (!gmx_within_tol(fr
->reppow
, 12.0, 10*GMX_DOUBLE_EPS
))
2736 gmx_fatal(FARGS
, "Cut-off scheme %S only supports LJ repulsion power 12", ecutscheme_names
[ir
->cutoff_scheme
]);
2738 fr
->bvdwtab
= FALSE
;
2739 fr
->bcoultab
= FALSE
;
2742 /* Tables are used for direct ewald sum */
2745 if (EEL_PME(ir
->coulombtype
))
2749 fprintf(fp
, "Will do PME sum in reciprocal space for electrostatic interactions.\n");
2751 if (ir
->coulombtype
== eelP3M_AD
)
2753 please_cite(fp
, "Hockney1988");
2754 please_cite(fp
, "Ballenegger2012");
2758 please_cite(fp
, "Essmann95a");
2761 if (ir
->ewald_geometry
== eewg3DC
)
2765 fprintf(fp
, "Using the Ewald3DC correction for systems with a slab geometry.\n");
2767 please_cite(fp
, "In-Chul99a");
2770 fr
->ewaldcoeff_q
= calc_ewaldcoeff_q(ir
->rcoulomb
, ir
->ewald_rtol
);
2771 init_ewald_tab(&(fr
->ewald_table
), ir
, fp
);
2774 fprintf(fp
, "Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2775 1/fr
->ewaldcoeff_q
);
2779 if (EVDW_PME(ir
->vdwtype
))
2783 fprintf(fp
, "Will do PME sum in reciprocal space for LJ dispersion interactions.\n");
2785 please_cite(fp
, "Essmann95a");
2786 fr
->ewaldcoeff_lj
= calc_ewaldcoeff_lj(ir
->rvdw
, ir
->ewald_rtol_lj
);
2789 fprintf(fp
, "Using a Gaussian width (1/beta) of %g nm for LJ Ewald\n",
2790 1/fr
->ewaldcoeff_lj
);
2794 /* Electrostatics */
2795 fr
->epsilon_r
= ir
->epsilon_r
;
2796 fr
->epsilon_rf
= ir
->epsilon_rf
;
2797 fr
->fudgeQQ
= mtop
->ffparams
.fudgeQQ
;
2799 /* Parameters for generalized RF */
2803 if (fr
->eeltype
== eelGRF
)
2805 init_generalized_rf(fp
, mtop
, ir
, fr
);
2808 fr
->bF_NoVirSum
= (EEL_FULL(fr
->eeltype
) || EVDW_PME(fr
->vdwtype
) ||
2809 gmx_mtop_ftype_count(mtop
, F_POSRES
) > 0 ||
2810 gmx_mtop_ftype_count(mtop
, F_FBPOSRES
) > 0 ||
2811 IR_ELEC_FIELD(*ir
) ||
2812 (fr
->adress_icor
!= eAdressICOff
)
2815 if (fr
->cutoff_scheme
== ecutsGROUP
&&
2816 ncg_mtop(mtop
) > fr
->cg_nalloc
&& !DOMAINDECOMP(cr
))
2818 /* Count the total number of charge groups */
2819 fr
->cg_nalloc
= ncg_mtop(mtop
);
2820 srenew(fr
->cg_cm
, fr
->cg_nalloc
);
2822 if (fr
->shift_vec
== NULL
)
2824 snew(fr
->shift_vec
, SHIFTS
);
2827 if (fr
->fshift
== NULL
)
2829 snew(fr
->fshift
, SHIFTS
);
2832 if (fr
->nbfp
== NULL
)
2834 fr
->ntype
= mtop
->ffparams
.atnr
;
2835 fr
->nbfp
= mk_nbfp(&mtop
->ffparams
, fr
->bBHAM
);
2836 if (EVDW_PME(fr
->vdwtype
))
2838 fr
->ljpme_c6grid
= make_ljpme_c6grid(&mtop
->ffparams
, fr
);
2842 /* Copy the energy group exclusions */
2843 fr
->egp_flags
= ir
->opts
.egp_flags
;
2845 /* Van der Waals stuff */
2846 if ((fr
->vdwtype
!= evdwCUT
) && (fr
->vdwtype
!= evdwUSER
) && !fr
->bBHAM
)
2848 if (fr
->rvdw_switch
>= fr
->rvdw
)
2850 gmx_fatal(FARGS
, "rvdw_switch (%f) must be < rvdw (%f)",
2851 fr
->rvdw_switch
, fr
->rvdw
);
2855 fprintf(fp
, "Using %s Lennard-Jones, switch between %g and %g nm\n",
2856 (fr
->eeltype
== eelSWITCH
) ? "switched" : "shifted",
2857 fr
->rvdw_switch
, fr
->rvdw
);
2861 if (fr
->bBHAM
&& EVDW_PME(fr
->vdwtype
))
2863 gmx_fatal(FARGS
, "LJ PME not supported with Buckingham");
2866 if (fr
->bBHAM
&& (fr
->vdwtype
== evdwSHIFT
|| fr
->vdwtype
== evdwSWITCH
))
2868 gmx_fatal(FARGS
, "Switch/shift interaction not supported with Buckingham");
2871 if (fr
->bBHAM
&& fr
->cutoff_scheme
== ecutsVERLET
)
2873 gmx_fatal(FARGS
, "Verlet cutoff-scheme is not supported with Buckingham");
2878 fprintf(fp
, "Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2879 fr
->rlist
, fr
->rcoulomb
, fr
->bBHAM
? "BHAM" : "LJ", fr
->rvdw
);
2882 fr
->eDispCorr
= ir
->eDispCorr
;
2883 if (ir
->eDispCorr
!= edispcNO
)
2885 set_avcsixtwelve(fp
, fr
, mtop
);
2890 set_bham_b_max(fp
, fr
, mtop
);
2893 fr
->gb_epsilon_solvent
= ir
->gb_epsilon_solvent
;
2895 /* Copy the GBSA data (radius, volume and surftens for each
2896 * atomtype) from the topology atomtype section to forcerec.
2898 snew(fr
->atype_radius
, fr
->ntype
);
2899 snew(fr
->atype_vol
, fr
->ntype
);
2900 snew(fr
->atype_surftens
, fr
->ntype
);
2901 snew(fr
->atype_gb_radius
, fr
->ntype
);
2902 snew(fr
->atype_S_hct
, fr
->ntype
);
2904 if (mtop
->atomtypes
.nr
> 0)
2906 for (i
= 0; i
< fr
->ntype
; i
++)
2908 fr
->atype_radius
[i
] = mtop
->atomtypes
.radius
[i
];
2910 for (i
= 0; i
< fr
->ntype
; i
++)
2912 fr
->atype_vol
[i
] = mtop
->atomtypes
.vol
[i
];
2914 for (i
= 0; i
< fr
->ntype
; i
++)
2916 fr
->atype_surftens
[i
] = mtop
->atomtypes
.surftens
[i
];
2918 for (i
= 0; i
< fr
->ntype
; i
++)
2920 fr
->atype_gb_radius
[i
] = mtop
->atomtypes
.gb_radius
[i
];
2922 for (i
= 0; i
< fr
->ntype
; i
++)
2924 fr
->atype_S_hct
[i
] = mtop
->atomtypes
.S_hct
[i
];
2928 /* Generate the GB table if needed */
2932 fr
->gbtabscale
= 2000;
2934 fr
->gbtabscale
= 500;
2938 fr
->gbtab
= make_gb_table(oenv
, fr
);
2940 init_gb(&fr
->born
, fr
, ir
, mtop
, ir
->gb_algorithm
);
2942 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2943 if (!DOMAINDECOMP(cr
))
2945 make_local_gb(cr
, fr
->born
, ir
->gb_algorithm
);
2949 /* Set the charge scaling */
2950 if (fr
->epsilon_r
!= 0)
2952 fr
->epsfac
= ONE_4PI_EPS0
/fr
->epsilon_r
;
2956 /* eps = 0 is infinite dieletric: no coulomb interactions */
2960 /* Reaction field constants */
2961 if (EEL_RF(fr
->eeltype
))
2963 calc_rffac(fp
, fr
->eeltype
, fr
->epsilon_r
, fr
->epsilon_rf
,
2964 fr
->rcoulomb
, fr
->temp
, fr
->zsquare
, box
,
2965 &fr
->kappa
, &fr
->k_rf
, &fr
->c_rf
);
2968 /*This now calculates sum for q and c6*/
2969 set_chargesum(fp
, fr
, mtop
);
2971 /* if we are using LR electrostatics, and they are tabulated,
2972 * the tables will contain modified coulomb interactions.
2973 * Since we want to use the non-shifted ones for 1-4
2974 * coulombic interactions, we must have an extra set of tables.
2977 /* Construct tables.
2978 * A little unnecessary to make both vdw and coul tables sometimes,
2979 * but what the heck... */
2981 bMakeTables
= fr
->bcoultab
|| fr
->bvdwtab
|| fr
->bEwald
||
2982 (ir
->eDispCorr
!= edispcNO
&& ir_vdw_switched(ir
));
2984 bMakeSeparate14Table
= ((!bMakeTables
|| fr
->eeltype
!= eelCUT
|| fr
->vdwtype
!= evdwCUT
||
2985 fr
->coulomb_modifier
!= eintmodNONE
||
2986 fr
->vdw_modifier
!= eintmodNONE
||
2987 fr
->bBHAM
|| fr
->bEwald
) &&
2988 (gmx_mtop_ftype_count(mtop
, F_LJ14
) > 0 ||
2989 gmx_mtop_ftype_count(mtop
, F_LJC14_Q
) > 0 ||
2990 gmx_mtop_ftype_count(mtop
, F_LJC_PAIRS_NB
) > 0));
2992 negp_pp
= ir
->opts
.ngener
- ir
->nwall
;
2996 bSomeNormalNbListsAreInUse
= TRUE
;
3001 bSomeNormalNbListsAreInUse
= (ir
->eDispCorr
!= edispcNO
);
3002 for (egi
= 0; egi
< negp_pp
; egi
++)
3004 for (egj
= egi
; egj
< negp_pp
; egj
++)
3006 egp_flags
= ir
->opts
.egp_flags
[GID(egi
, egj
, ir
->opts
.ngener
)];
3007 if (!(egp_flags
& EGP_EXCL
))
3009 if (egp_flags
& EGP_TABLE
)
3015 bSomeNormalNbListsAreInUse
= TRUE
;
3020 if (bSomeNormalNbListsAreInUse
)
3022 fr
->nnblists
= negptable
+ 1;
3026 fr
->nnblists
= negptable
;
3028 if (fr
->nnblists
> 1)
3030 snew(fr
->gid2nblists
, ir
->opts
.ngener
*ir
->opts
.ngener
);
3039 snew(fr
->nblists
, fr
->nnblists
);
3041 /* This code automatically gives table length tabext without cut-off's,
3042 * in that case grompp should already have checked that we do not need
3043 * normal tables and we only generate tables for 1-4 interactions.
3045 rtab
= ir
->rlistlong
+ ir
->tabext
;
3049 /* make tables for ordinary interactions */
3050 if (bSomeNormalNbListsAreInUse
)
3052 make_nbf_tables(fp
, oenv
, fr
, rtab
, cr
, tabfn
, NULL
, NULL
, &fr
->nblists
[0]);
3055 make_nbf_tables(fp
, oenv
, fr
, rtab
, cr
, tabfn
, NULL
, NULL
, &fr
->nblists
[fr
->nnblists
/2]);
3057 if (!bMakeSeparate14Table
)
3059 fr
->tab14
= fr
->nblists
[0].table_elec_vdw
;
3069 /* Read the special tables for certain energy group pairs */
3070 nm_ind
= mtop
->groups
.grps
[egcENER
].nm_ind
;
3071 for (egi
= 0; egi
< negp_pp
; egi
++)
3073 for (egj
= egi
; egj
< negp_pp
; egj
++)
3075 egp_flags
= ir
->opts
.egp_flags
[GID(egi
, egj
, ir
->opts
.ngener
)];
3076 if ((egp_flags
& EGP_TABLE
) && !(egp_flags
& EGP_EXCL
))
3078 if (fr
->nnblists
> 1)
3080 fr
->gid2nblists
[GID(egi
, egj
, ir
->opts
.ngener
)] = m
;
3082 /* Read the table file with the two energy groups names appended */
3083 make_nbf_tables(fp
, oenv
, fr
, rtab
, cr
, tabfn
,
3084 *mtop
->groups
.grpname
[nm_ind
[egi
]],
3085 *mtop
->groups
.grpname
[nm_ind
[egj
]],
3089 make_nbf_tables(fp
, oenv
, fr
, rtab
, cr
, tabfn
,
3090 *mtop
->groups
.grpname
[nm_ind
[egi
]],
3091 *mtop
->groups
.grpname
[nm_ind
[egj
]],
3092 &fr
->nblists
[fr
->nnblists
/2+m
]);
3096 else if (fr
->nnblists
> 1)
3098 fr
->gid2nblists
[GID(egi
, egj
, ir
->opts
.ngener
)] = 0;
3104 else if ((fr
->eDispCorr
!= edispcNO
) &&
3105 ((fr
->vdw_modifier
== eintmodPOTSWITCH
) ||
3106 (fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
3107 (fr
->vdw_modifier
== eintmodPOTSHIFT
)))
3109 /* Tables might not be used for the potential modifier interactions per se, but
3110 * we still need them to evaluate switch/shift dispersion corrections in this case.
3112 make_nbf_tables(fp
, oenv
, fr
, rtab
, cr
, tabfn
, NULL
, NULL
, &fr
->nblists
[0]);
3115 if (bMakeSeparate14Table
)
3117 /* generate extra tables with plain Coulomb for 1-4 interactions only */
3118 fr
->tab14
= make_tables(fp
, oenv
, fr
, MASTER(cr
), tabpfn
, rtab
,
3119 GMX_MAKETABLES_14ONLY
);
3122 /* Read AdResS Thermo Force table if needed */
3123 if (fr
->adress_icor
== eAdressICThermoForce
)
3125 /* old todo replace */
3127 if (ir
->adress
->n_tf_grps
> 0)
3129 make_adress_tf_tables(fp
, oenv
, fr
, ir
, tabfn
, mtop
, box
);
3134 /* load the default table */
3135 snew(fr
->atf_tabs
, 1);
3136 fr
->atf_tabs
[DEFAULT_TF_TABLE
] = make_atf_table(fp
, oenv
, fr
, tabafn
, box
);
3141 fr
->nwall
= ir
->nwall
;
3142 if (ir
->nwall
&& ir
->wall_type
== ewtTABLE
)
3144 make_wall_tables(fp
, oenv
, ir
, tabfn
, &mtop
->groups
, fr
);
3149 fcd
->bondtab
= make_bonded_tables(fp
,
3150 F_TABBONDS
, F_TABBONDSNC
,
3152 fcd
->angletab
= make_bonded_tables(fp
,
3155 fcd
->dihtab
= make_bonded_tables(fp
,
3163 fprintf(debug
, "No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
3167 /* QM/MM initialization if requested
3171 fprintf(stderr
, "QM/MM calculation requested.\n");
3174 fr
->bQMMM
= ir
->bQMMM
;
3175 fr
->qr
= mk_QMMMrec();
3177 /* Set all the static charge group info */
3178 fr
->cginfo_mb
= init_cginfo_mb(fp
, mtop
, fr
, bNoSolvOpt
,
3180 &fr
->bExcl_IntraCGAll_InterCGNone
);
3181 if (DOMAINDECOMP(cr
))
3187 fr
->cginfo
= cginfo_expand(mtop
->nmolblock
, fr
->cginfo_mb
);
3190 if (!DOMAINDECOMP(cr
))
3192 forcerec_set_ranges(fr
, ncg_mtop(mtop
), ncg_mtop(mtop
),
3193 mtop
->natoms
, mtop
->natoms
, mtop
->natoms
);
3196 fr
->print_force
= print_force
;
3199 /* coarse load balancing vars */
3204 /* Initialize neighbor search */
3205 init_ns(fp
, cr
, &fr
->ns
, fr
, mtop
);
3207 if (cr
->duty
& DUTY_PP
)
3209 gmx_nonbonded_setup(fr
, bGenericKernelOnly
);
3213 gmx_setup_adress_kernels(fp,bGenericKernelOnly);
3218 /* Initialize the thread working data for bonded interactions */
3219 init_bonded_threading(fp
, mtop
->groups
.grps
[egcENER
].nr
,
3220 &fr
->bonded_threading
);
3222 fr
->nthread_ewc
= gmx_omp_nthreads_get(emntBonded
);
3223 snew(fr
->ewc_t
, fr
->nthread_ewc
);
3224 snew(fr
->excl_load
, fr
->nthread_ewc
+ 1);
3226 /* fr->ic is used both by verlet and group kernels (to some extent) now */
3227 init_interaction_const(fp
, &fr
->ic
, fr
);
3228 init_interaction_const_tables(fp
, fr
->ic
, rtab
);
3230 if (fr
->cutoff_scheme
== ecutsVERLET
)
3232 if (ir
->rcoulomb
!= ir
->rvdw
)
3234 gmx_fatal(FARGS
, "With Verlet lists rcoulomb and rvdw should be identical");
3237 init_nb_verlet(fp
, &fr
->nbv
, bFEP_NonBonded
, ir
, fr
, cr
, nbpu_opt
);
3240 if (ir
->eDispCorr
!= edispcNO
)
3242 calc_enervirdiff(fp
, ir
->eDispCorr
, fr
);
3246 #define pr_real(fp, r) fprintf(fp, "%s: %e\n",#r, r)
3247 #define pr_int(fp, i) fprintf((fp), "%s: %d\n",#i, i)
3248 #define pr_bool(fp, b) fprintf((fp), "%s: %s\n",#b, bool_names[b])
3250 void pr_forcerec(FILE *fp
, t_forcerec
*fr
)
3254 pr_real(fp
, fr
->rlist
);
3255 pr_real(fp
, fr
->rcoulomb
);
3256 pr_real(fp
, fr
->fudgeQQ
);
3257 pr_bool(fp
, fr
->bGrid
);
3258 pr_bool(fp
, fr
->bTwinRange
);
3259 /*pr_int(fp,fr->cg0);
3260 pr_int(fp,fr->hcg);*/
3261 for (i
= 0; i
< fr
->nnblists
; i
++)
3263 pr_int(fp
, fr
->nblists
[i
].table_elec_vdw
.n
);
3265 pr_real(fp
, fr
->rcoulomb_switch
);
3266 pr_real(fp
, fr
->rcoulomb
);
3271 void forcerec_set_excl_load(t_forcerec
*fr
,
3272 const gmx_localtop_t
*top
)
3275 int t
, i
, j
, ntot
, n
, ntarget
;
3277 ind
= top
->excls
.index
;
3281 for (i
= 0; i
< top
->excls
.nr
; i
++)
3283 for (j
= ind
[i
]; j
< ind
[i
+1]; j
++)
3292 fr
->excl_load
[0] = 0;
3295 for (t
= 1; t
<= fr
->nthread_ewc
; t
++)
3297 ntarget
= (ntot
*t
)/fr
->nthread_ewc
;
3298 while (i
< top
->excls
.nr
&& n
< ntarget
)
3300 for (j
= ind
[i
]; j
< ind
[i
+1]; j
++)
3309 fr
->excl_load
[t
] = i
;
3313 /* Frees GPU memory and destroys the GPU context.
3315 * Note that this function needs to be called even if GPUs are not used
3316 * in this run because the PME ranks have no knowledge of whether GPUs
3317 * are used or not, but all ranks need to enter the barrier below.
3319 void free_gpu_resources(const t_forcerec
*fr
,
3320 const t_commrec
*cr
,
3321 const gmx_gpu_info_t
*gpu_info
,
3322 const gmx_gpu_opt_t
*gpu_opt
)
3324 gmx_bool bIsPPrankUsingGPU
;
3325 char gpu_err_str
[STRLEN
];
3327 bIsPPrankUsingGPU
= (cr
->duty
& DUTY_PP
) && fr
&& fr
->nbv
&& fr
->nbv
->bUseGPU
;
3329 if (bIsPPrankUsingGPU
)
3331 /* free nbnxn data in GPU memory */
3332 nbnxn_gpu_free(fr
->nbv
->gpu_nbv
);
3334 /* With tMPI we need to wait for all ranks to finish deallocation before
3335 * destroying the context in free_gpu() as some ranks may be sharing
3337 * Note: as only PP ranks need to free GPU resources, so it is safe to
3338 * not call the barrier on PME ranks.
3340 #ifdef GMX_THREAD_MPI
3345 #endif /* GMX_THREAD_MPI */
3347 /* uninitialize GPU (by destroying the context) */
3348 if (!free_cuda_gpu(cr
->rank_pp_intranode
, gpu_err_str
, gpu_info
, gpu_opt
))
3350 gmx_warning("On rank %d failed to free GPU #%d: %s",
3351 cr
->nodeid
, get_current_cuda_gpu_device_id(), gpu_err_str
);