2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed-forces/bonded.h"
64 #include "gromacs/listed-forces/disre.h"
65 #include "gromacs/listed-forces/orires.h"
66 #include "gromacs/math/functions.h"
67 #include "gromacs/math/units.h"
68 #include "gromacs/math/vec.h"
69 #include "gromacs/math/vecdump.h"
70 #include "gromacs/mdlib/calcmu.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/genborn.h"
75 #include "gromacs/mdlib/gmx_omp_nthreads.h"
76 #include "gromacs/mdlib/mdrun.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_grid.h"
81 #include "gromacs/mdlib/nbnxn_search.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
87 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/inputrec.h"
90 #include "gromacs/mdtypes/md_enums.h"
91 #include "gromacs/mdtypes/state.h"
92 #include "gromacs/pbcutil/ishift.h"
93 #include "gromacs/pbcutil/mshift.h"
94 #include "gromacs/pbcutil/pbc.h"
95 #include "gromacs/pulling/pull.h"
96 #include "gromacs/pulling/pull_rotation.h"
97 #include "gromacs/timing/cyclecounter.h"
98 #include "gromacs/timing/gpu_timing.h"
99 #include "gromacs/timing/wallcycle.h"
100 #include "gromacs/timing/wallcyclereporting.h"
101 #include "gromacs/timing/walltime_accounting.h"
102 #include "gromacs/utility/arrayref.h"
103 #include "gromacs/utility/basedefinitions.h"
104 #include "gromacs/utility/cstringutil.h"
105 #include "gromacs/utility/exceptions.h"
106 #include "gromacs/utility/fatalerror.h"
107 #include "gromacs/utility/gmxassert.h"
108 #include "gromacs/utility/gmxmpi.h"
109 #include "gromacs/utility/logger.h"
110 #include "gromacs/utility/pleasecite.h"
111 #include "gromacs/utility/smalloc.h"
112 #include "gromacs/utility/sysinfo.h"
114 #include "nbnxn_gpu.h"
116 void print_time(FILE *out
,
117 gmx_walltime_accounting_t walltime_accounting
,
120 t_commrec gmx_unused
*cr
)
123 char timebuf
[STRLEN
];
124 double dt
, elapsed_seconds
, time_per_step
;
133 fprintf(out
, "step %s", gmx_step_str(step
, buf
));
136 if ((step
>= ir
->nstlist
))
138 double seconds_since_epoch
= gmx_gettime();
139 elapsed_seconds
= seconds_since_epoch
- walltime_accounting_get_start_time_stamp(walltime_accounting
);
140 time_per_step
= elapsed_seconds
/(step
- ir
->init_step
+ 1);
141 dt
= (ir
->nsteps
+ ir
->init_step
- step
) * time_per_step
;
147 finish
= (time_t) (seconds_since_epoch
+ dt
);
148 gmx_ctime_r(&finish
, timebuf
, STRLEN
);
149 sprintf(buf
, "%s", timebuf
);
150 buf
[strlen(buf
)-1] = '\0';
151 fprintf(out
, ", will finish %s", buf
);
155 fprintf(out
, ", remaining wall clock time: %5d s ", (int)dt
);
160 fprintf(out
, " performance: %.1f ns/day ",
161 ir
->delta_t
/1000*24*60*60/time_per_step
);
174 void print_date_and_time(FILE *fplog
, int nodeid
, const char *title
,
177 char time_string
[STRLEN
];
186 char timebuf
[STRLEN
];
187 time_t temp_time
= (time_t) the_time
;
189 gmx_ctime_r(&temp_time
, timebuf
, STRLEN
);
190 for (i
= 0; timebuf
[i
] >= ' '; i
++)
192 time_string
[i
] = timebuf
[i
];
194 time_string
[i
] = '\0';
197 fprintf(fplog
, "%s on rank %d %s\n", title
, nodeid
, time_string
);
200 void print_start(FILE *fplog
, t_commrec
*cr
,
201 gmx_walltime_accounting_t walltime_accounting
,
206 sprintf(buf
, "Started %s", name
);
207 print_date_and_time(fplog
, cr
->nodeid
, buf
,
208 walltime_accounting_get_start_time_stamp(walltime_accounting
));
211 static void sum_forces(rvec f
[], const PaddedRVecVector
*forceToAdd
)
213 /* TODO: remove this - 1 when padding is properly implemented */
214 int end
= forceToAdd
->size() - 1;
215 const rvec
*fAdd
= as_rvec_array(forceToAdd
->data());
217 // cppcheck-suppress unreadVariable
218 int gmx_unused nt
= gmx_omp_nthreads_get(emntDefault
);
219 #pragma omp parallel for num_threads(nt) schedule(static)
220 for (int i
= 0; i
< end
; i
++)
222 rvec_inc(f
[i
], fAdd
[i
]);
226 static void calc_virial(int start
, int homenr
, rvec x
[], rvec f
[],
227 tensor vir_part
, t_graph
*graph
, matrix box
,
228 t_nrnb
*nrnb
, const t_forcerec
*fr
, int ePBC
)
232 /* The short-range virial from surrounding boxes */
234 calc_vir(SHIFTS
, fr
->shift_vec
, fr
->fshift
, vir_part
, ePBC
== epbcSCREW
, box
);
235 inc_nrnb(nrnb
, eNR_VIRIAL
, SHIFTS
);
237 /* Calculate partial virial, for local atoms only, based on short range.
238 * Total virial is computed in global_stat, called from do_md
240 f_calc_vir(start
, start
+homenr
, x
, f
, vir_part
, graph
, box
);
241 inc_nrnb(nrnb
, eNR_VIRIAL
, homenr
);
243 /* Add position restraint contribution */
244 for (i
= 0; i
< DIM
; i
++)
246 vir_part
[i
][i
] += fr
->vir_diag_posres
[i
];
249 /* Add wall contribution */
250 for (i
= 0; i
< DIM
; i
++)
252 vir_part
[i
][ZZ
] += fr
->vir_wall_z
[i
];
257 pr_rvecs(debug
, 0, "vir_part", vir_part
, DIM
);
261 static void pull_potential_wrapper(t_commrec
*cr
,
263 matrix box
, rvec x
[],
267 gmx_enerdata_t
*enerd
,
270 gmx_wallcycle_t wcycle
)
275 /* Calculate the center of mass forces, this requires communication,
276 * which is why pull_potential is called close to other communication.
277 * The virial contribution is calculated directly,
278 * which is why we call pull_potential after calc_virial.
280 wallcycle_start(wcycle
, ewcPULLPOT
);
281 set_pbc(&pbc
, ir
->ePBC
, box
);
283 enerd
->term
[F_COM_PULL
] +=
284 pull_potential(ir
->pull_work
, mdatoms
, &pbc
,
285 cr
, t
, lambda
[efptRESTRAINT
], x
, f
, vir_force
, &dvdl
);
286 enerd
->dvdl_lin
[efptRESTRAINT
] += dvdl
;
287 wallcycle_stop(wcycle
, ewcPULLPOT
);
290 static void pme_receive_force_ener(t_commrec
*cr
,
291 gmx_wallcycle_t wcycle
,
292 gmx_enerdata_t
*enerd
,
295 real e_q
, e_lj
, dvdl_q
, dvdl_lj
;
296 float cycles_ppdpme
, cycles_seppme
;
298 cycles_ppdpme
= wallcycle_stop(wcycle
, ewcPPDURINGPME
);
299 dd_cycles_add(cr
->dd
, cycles_ppdpme
, ddCyclPPduringPME
);
301 /* In case of node-splitting, the PP nodes receive the long-range
302 * forces, virial and energy from the PME nodes here.
304 wallcycle_start(wcycle
, ewcPP_PMEWAITRECVF
);
307 gmx_pme_receive_f(cr
, as_rvec_array(fr
->f_novirsum
->data()), fr
->vir_el_recip
, &e_q
,
308 fr
->vir_lj_recip
, &e_lj
, &dvdl_q
, &dvdl_lj
,
310 enerd
->term
[F_COUL_RECIP
] += e_q
;
311 enerd
->term
[F_LJ_RECIP
] += e_lj
;
312 enerd
->dvdl_lin
[efptCOUL
] += dvdl_q
;
313 enerd
->dvdl_lin
[efptVDW
] += dvdl_lj
;
317 dd_cycles_add(cr
->dd
, cycles_seppme
, ddCyclPME
);
319 wallcycle_stop(wcycle
, ewcPP_PMEWAITRECVF
);
322 static void print_large_forces(FILE *fp
, t_mdatoms
*md
, t_commrec
*cr
,
323 gmx_int64_t step
, real forceTolerance
,
324 const rvec
*x
, const rvec
*f
)
326 real force2Tolerance
= gmx::square(forceTolerance
);
327 std::uintmax_t numNonFinite
= 0;
328 for (int i
= 0; i
< md
->homenr
; i
++)
330 real force2
= norm2(f
[i
]);
331 bool nonFinite
= !std::isfinite(force2
);
332 if (force2
>= force2Tolerance
|| nonFinite
)
334 fprintf(fp
, "step %" GMX_PRId64
" atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
336 ddglatnr(cr
->dd
, i
), x
[i
][XX
], x
[i
][YY
], x
[i
][ZZ
], std::sqrt(force2
));
343 if (numNonFinite
> 0)
345 /* Note that with MPI this fatal call on one rank might interrupt
346 * the printing on other ranks. But we can only avoid that with
347 * an expensive MPI barrier that we would need at each step.
349 gmx_fatal(FARGS
, "At step %" GMX_PRId64
" detected non-finite forces on %ju atoms", step
, numNonFinite
);
353 static void post_process_forces(t_commrec
*cr
,
355 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
357 matrix box
, rvec x
[],
362 t_forcerec
*fr
, gmx_vsite_t
*vsite
,
369 /* Spread the mesh force on virtual sites to the other particles...
370 * This is parallellized. MPI communication is performed
371 * if the constructing atoms aren't local.
373 wallcycle_start(wcycle
, ewcVSITESPREAD
);
374 spread_vsite_f(vsite
, x
, as_rvec_array(fr
->f_novirsum
->data()), nullptr,
375 (flags
& GMX_FORCE_VIRIAL
), fr
->vir_el_recip
,
377 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
378 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
380 if (flags
& GMX_FORCE_VIRIAL
)
382 /* Now add the forces, this is local */
383 sum_forces(f
, fr
->f_novirsum
);
385 if (EEL_FULL(fr
->eeltype
))
387 /* Add the mesh contribution to the virial */
388 m_add(vir_force
, fr
->vir_el_recip
, vir_force
);
390 if (EVDW_PME(fr
->vdwtype
))
392 /* Add the mesh contribution to the virial */
393 m_add(vir_force
, fr
->vir_lj_recip
, vir_force
);
397 pr_rvecs(debug
, 0, "vir_force", vir_force
, DIM
);
402 if (fr
->print_force
>= 0)
404 print_large_forces(stderr
, mdatoms
, cr
, step
, fr
->print_force
, x
, f
);
408 static void do_nb_verlet(t_forcerec
*fr
,
409 interaction_const_t
*ic
,
410 gmx_enerdata_t
*enerd
,
411 int flags
, int ilocality
,
414 gmx_wallcycle_t wcycle
)
416 int enr_nbnxn_kernel_ljc
, enr_nbnxn_kernel_lj
;
417 nonbonded_verlet_group_t
*nbvg
;
418 gmx_bool bUsingGpuKernels
;
420 if (!(flags
& GMX_FORCE_NONBONDED
))
422 /* skip non-bonded calculation */
426 nbvg
= &fr
->nbv
->grp
[ilocality
];
428 /* GPU kernel launch overhead is already timed separately */
429 if (fr
->cutoff_scheme
!= ecutsVERLET
)
431 gmx_incons("Invalid cut-off scheme passed!");
434 bUsingGpuKernels
= (nbvg
->kernel_type
== nbnxnk8x8x8_GPU
);
436 if (!bUsingGpuKernels
)
438 wallcycle_sub_start(wcycle
, ewcsNONBONDED
);
440 switch (nbvg
->kernel_type
)
442 case nbnxnk4x4_PlainC
:
443 nbnxn_kernel_ref(&nbvg
->nbl_lists
,
449 enerd
->grpp
.ener
[egCOULSR
],
451 enerd
->grpp
.ener
[egBHAMSR
] :
452 enerd
->grpp
.ener
[egLJSR
]);
455 case nbnxnk4xN_SIMD_4xN
:
456 nbnxn_kernel_simd_4xn(&nbvg
->nbl_lists
,
463 enerd
->grpp
.ener
[egCOULSR
],
465 enerd
->grpp
.ener
[egBHAMSR
] :
466 enerd
->grpp
.ener
[egLJSR
]);
468 case nbnxnk4xN_SIMD_2xNN
:
469 nbnxn_kernel_simd_2xnn(&nbvg
->nbl_lists
,
476 enerd
->grpp
.ener
[egCOULSR
],
478 enerd
->grpp
.ener
[egBHAMSR
] :
479 enerd
->grpp
.ener
[egLJSR
]);
482 case nbnxnk8x8x8_GPU
:
483 nbnxn_gpu_launch_kernel(fr
->nbv
->gpu_nbv
, nbvg
->nbat
, flags
, ilocality
);
486 case nbnxnk8x8x8_PlainC
:
487 nbnxn_kernel_gpu_ref(nbvg
->nbl_lists
.nbl
[0],
492 nbvg
->nbat
->out
[0].f
,
494 enerd
->grpp
.ener
[egCOULSR
],
496 enerd
->grpp
.ener
[egBHAMSR
] :
497 enerd
->grpp
.ener
[egLJSR
]);
501 gmx_incons("Invalid nonbonded kernel type passed!");
504 if (!bUsingGpuKernels
)
506 wallcycle_sub_stop(wcycle
, ewcsNONBONDED
);
509 if (EEL_RF(ic
->eeltype
) || ic
->eeltype
== eelCUT
)
511 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_RF
;
513 else if ((!bUsingGpuKernels
&& nbvg
->ewald_excl
== ewaldexclAnalytical
) ||
514 (bUsingGpuKernels
&& nbnxn_gpu_is_kernel_ewald_analytical(fr
->nbv
->gpu_nbv
)))
516 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_EWALD
;
520 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_TAB
;
522 enr_nbnxn_kernel_lj
= eNR_NBNXN_LJ
;
523 if (flags
& GMX_FORCE_ENERGY
)
525 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
526 enr_nbnxn_kernel_ljc
+= 1;
527 enr_nbnxn_kernel_lj
+= 1;
530 inc_nrnb(nrnb
, enr_nbnxn_kernel_ljc
,
531 nbvg
->nbl_lists
.natpair_ljq
);
532 inc_nrnb(nrnb
, enr_nbnxn_kernel_lj
,
533 nbvg
->nbl_lists
.natpair_lj
);
534 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
535 inc_nrnb(nrnb
, enr_nbnxn_kernel_ljc
-eNR_NBNXN_LJ_RF
+eNR_NBNXN_RF
,
536 nbvg
->nbl_lists
.natpair_q
);
538 if (ic
->vdw_modifier
== eintmodFORCESWITCH
)
540 /* We add up the switch cost separately */
541 inc_nrnb(nrnb
, eNR_NBNXN_ADD_LJ_FSW
+((flags
& GMX_FORCE_ENERGY
) ? 1 : 0),
542 nbvg
->nbl_lists
.natpair_ljq
+ nbvg
->nbl_lists
.natpair_lj
);
544 if (ic
->vdw_modifier
== eintmodPOTSWITCH
)
546 /* We add up the switch cost separately */
547 inc_nrnb(nrnb
, eNR_NBNXN_ADD_LJ_PSW
+((flags
& GMX_FORCE_ENERGY
) ? 1 : 0),
548 nbvg
->nbl_lists
.natpair_ljq
+ nbvg
->nbl_lists
.natpair_lj
);
550 if (ic
->vdwtype
== evdwPME
)
552 /* We add up the LJ Ewald cost separately */
553 inc_nrnb(nrnb
, eNR_NBNXN_ADD_LJ_EWALD
+((flags
& GMX_FORCE_ENERGY
) ? 1 : 0),
554 nbvg
->nbl_lists
.natpair_ljq
+ nbvg
->nbl_lists
.natpair_lj
);
558 static void do_nb_verlet_fep(nbnxn_pairlist_set_t
*nbl_lists
,
565 gmx_enerdata_t
*enerd
,
568 gmx_wallcycle_t wcycle
)
571 nb_kernel_data_t kernel_data
;
573 real dvdl_nb
[efptNR
];
578 /* Add short-range interactions */
579 donb_flags
|= GMX_NONBONDED_DO_SR
;
581 /* Currently all group scheme kernels always calculate (shift-)forces */
582 if (flags
& GMX_FORCE_FORCES
)
584 donb_flags
|= GMX_NONBONDED_DO_FORCE
;
586 if (flags
& GMX_FORCE_VIRIAL
)
588 donb_flags
|= GMX_NONBONDED_DO_SHIFTFORCE
;
590 if (flags
& GMX_FORCE_ENERGY
)
592 donb_flags
|= GMX_NONBONDED_DO_POTENTIAL
;
595 kernel_data
.flags
= donb_flags
;
596 kernel_data
.lambda
= lambda
;
597 kernel_data
.dvdl
= dvdl_nb
;
599 kernel_data
.energygrp_elec
= enerd
->grpp
.ener
[egCOULSR
];
600 kernel_data
.energygrp_vdw
= enerd
->grpp
.ener
[egLJSR
];
602 /* reset free energy components */
603 for (i
= 0; i
< efptNR
; i
++)
608 assert(gmx_omp_nthreads_get(emntNonbonded
) == nbl_lists
->nnbl
);
610 wallcycle_sub_start(wcycle
, ewcsNONBONDED
);
611 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
612 for (th
= 0; th
< nbl_lists
->nnbl
; th
++)
616 gmx_nb_free_energy_kernel(nbl_lists
->nbl_fep
[th
],
617 x
, f
, fr
, mdatoms
, &kernel_data
, nrnb
);
619 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
622 if (fepvals
->sc_alpha
!= 0)
624 enerd
->dvdl_nonlin
[efptVDW
] += dvdl_nb
[efptVDW
];
625 enerd
->dvdl_nonlin
[efptCOUL
] += dvdl_nb
[efptCOUL
];
629 enerd
->dvdl_lin
[efptVDW
] += dvdl_nb
[efptVDW
];
630 enerd
->dvdl_lin
[efptCOUL
] += dvdl_nb
[efptCOUL
];
633 /* If we do foreign lambda and we have soft-core interactions
634 * we have to recalculate the (non-linear) energies contributions.
636 if (fepvals
->n_lambda
> 0 && (flags
& GMX_FORCE_DHDL
) && fepvals
->sc_alpha
!= 0)
638 kernel_data
.flags
= (donb_flags
& ~(GMX_NONBONDED_DO_FORCE
| GMX_NONBONDED_DO_SHIFTFORCE
)) | GMX_NONBONDED_DO_FOREIGNLAMBDA
;
639 kernel_data
.lambda
= lam_i
;
640 kernel_data
.energygrp_elec
= enerd
->foreign_grpp
.ener
[egCOULSR
];
641 kernel_data
.energygrp_vdw
= enerd
->foreign_grpp
.ener
[egLJSR
];
642 /* Note that we add to kernel_data.dvdl, but ignore the result */
644 for (i
= 0; i
< enerd
->n_lambda
; i
++)
646 for (j
= 0; j
< efptNR
; j
++)
648 lam_i
[j
] = (i
== 0 ? lambda
[j
] : fepvals
->all_lambda
[j
][i
-1]);
650 reset_foreign_enerdata(enerd
);
651 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
652 for (th
= 0; th
< nbl_lists
->nnbl
; th
++)
656 gmx_nb_free_energy_kernel(nbl_lists
->nbl_fep
[th
],
657 x
, f
, fr
, mdatoms
, &kernel_data
, nrnb
);
659 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
662 sum_epot(&(enerd
->foreign_grpp
), enerd
->foreign_term
);
663 enerd
->enerpart_lambda
[i
] += enerd
->foreign_term
[F_EPOT
];
667 wallcycle_sub_stop(wcycle
, ewcsNONBONDED
);
670 gmx_bool
use_GPU(const nonbonded_verlet_t
*nbv
)
672 return nbv
!= nullptr && nbv
->bUseGPU
;
675 static gmx_inline
void clear_rvecs_omp(int n
, rvec v
[])
677 int nth
= gmx_omp_nthreads_get_simple_rvec_task(emntDefault
, n
);
679 /* Note that we would like to avoid this conditional by putting it
680 * into the omp pragma instead, but then we still take the full
681 * omp parallel for overhead (at least with gcc5).
685 for (int i
= 0; i
< n
; i
++)
692 #pragma omp parallel for num_threads(nth) schedule(static)
693 for (int i
= 0; i
< n
; i
++)
700 /*! \brief This routine checks if the potential energy is finite.
702 * Note that passing this check does not guarantee finite forces,
703 * since those use slightly different arithmetics. But in most cases
704 * there is just a narrow coordinate range where forces are not finite
705 * and energies are finite.
707 * \param[in] enerd The energy data; the non-bonded group energies need to be added in here before calling this routine
709 static void checkPotentialEnergyValidity(const gmx_enerdata_t
*enerd
)
711 if (!std::isfinite(enerd
->term
[F_EPOT
]))
713 gmx_fatal(FARGS
, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
716 enerd
->term
[F_COUL_SR
]);
720 void do_force_cutsVERLET(FILE *fplog
, t_commrec
*cr
,
721 t_inputrec
*inputrec
,
722 gmx_int64_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
724 gmx_groups_t gmx_unused
*groups
,
725 matrix box
, rvec x
[], history_t
*hist
,
726 PaddedRVecVector
*force
,
729 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
730 real
*lambda
, t_graph
*graph
,
731 t_forcerec
*fr
, interaction_const_t
*ic
,
732 gmx_vsite_t
*vsite
, rvec mu_tot
,
733 double t
, gmx_edsam_t ed
,
739 gmx_bool bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
;
740 gmx_bool bDoForces
, bUseGPU
, bUseOrEmulGPU
;
741 gmx_bool bDiffKernels
= FALSE
;
742 rvec vzero
, box_diag
;
743 float cycles_pme
, cycles_force
, cycles_wait_gpu
;
744 /* TODO To avoid loss of precision, float can't be used for a
745 * cycle count. Build an object that can do this right and perhaps
746 * also be used by gmx_wallcycle_t */
747 gmx_cycles_t cycleCountBeforeLocalWorkCompletes
= 0;
748 nonbonded_verlet_t
*nbv
;
755 const int homenr
= mdatoms
->homenr
;
757 clear_mat(vir_force
);
759 if (DOMAINDECOMP(cr
))
761 cg1
= cr
->dd
->ncg_tot
;
772 bStateChanged
= (flags
& GMX_FORCE_STATECHANGED
);
773 bNS
= (flags
& GMX_FORCE_NS
) && (fr
->bAllvsAll
== FALSE
);
774 bFillGrid
= (bNS
&& bStateChanged
);
775 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
776 bDoForces
= (flags
& GMX_FORCE_FORCES
);
777 bUseGPU
= fr
->nbv
->bUseGPU
;
778 bUseOrEmulGPU
= bUseGPU
|| (nbv
->grp
[0].kernel_type
== nbnxnk8x8x8_PlainC
);
782 update_forcerec(fr
, box
);
784 if (inputrecNeedMutot(inputrec
))
786 /* Calculate total (local) dipole moment in a temporary common array.
787 * This makes it possible to sum them over nodes faster.
789 calc_mu(start
, homenr
,
790 x
, mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
795 if (fr
->ePBC
!= epbcNONE
)
797 /* Compute shift vectors every step,
798 * because of pressure coupling or box deformation!
800 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
802 calc_shifts(box
, fr
->shift_vec
);
807 put_atoms_in_box_omp(fr
->ePBC
, box
, homenr
, x
);
808 inc_nrnb(nrnb
, eNR_SHIFTX
, homenr
);
810 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
812 unshift_self(graph
, box
, x
);
816 nbnxn_atomdata_copy_shiftvec(flags
& GMX_FORCE_DYNAMICBOX
,
817 fr
->shift_vec
, nbv
->grp
[0].nbat
);
820 if (!(cr
->duty
& DUTY_PME
))
825 /* Send particle coordinates to the pme nodes.
826 * Since this is only implemented for domain decomposition
827 * and domain decomposition does not use the graph,
828 * we do not need to worry about shifting.
831 wallcycle_start(wcycle
, ewcPP_PMESENDX
);
833 bBS
= (inputrec
->nwall
== 2);
837 svmul(inputrec
->wall_ewald_zfac
, boxs
[ZZ
], boxs
[ZZ
]);
840 gmx_pme_send_coordinates(cr
, bBS
? boxs
: box
, x
,
841 lambda
[efptCOUL
], lambda
[efptVDW
],
842 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)),
845 wallcycle_stop(wcycle
, ewcPP_PMESENDX
);
849 /* do gridding for pair search */
852 if (graph
&& bStateChanged
)
854 /* Calculate intramolecular shift vectors to make molecules whole */
855 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
859 box_diag
[XX
] = box
[XX
][XX
];
860 box_diag
[YY
] = box
[YY
][YY
];
861 box_diag
[ZZ
] = box
[ZZ
][ZZ
];
863 wallcycle_start(wcycle
, ewcNS
);
866 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_LOCAL
);
867 nbnxn_put_on_grid(nbv
->nbs
, fr
->ePBC
, box
,
869 0, mdatoms
->homenr
, -1, fr
->cginfo
, x
,
871 nbv
->grp
[eintLocal
].kernel_type
,
872 nbv
->grp
[eintLocal
].nbat
);
873 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_LOCAL
);
877 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_NONLOCAL
);
878 nbnxn_put_on_grid_nonlocal(nbv
->nbs
, domdec_zones(cr
->dd
),
880 nbv
->grp
[eintNonlocal
].kernel_type
,
881 nbv
->grp
[eintNonlocal
].nbat
);
882 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_NONLOCAL
);
885 if (nbv
->ngrp
== 1 ||
886 nbv
->grp
[eintNonlocal
].nbat
== nbv
->grp
[eintLocal
].nbat
)
888 nbnxn_atomdata_set(nbv
->grp
[eintLocal
].nbat
, eatAll
,
889 nbv
->nbs
, mdatoms
, fr
->cginfo
);
893 nbnxn_atomdata_set(nbv
->grp
[eintLocal
].nbat
, eatLocal
,
894 nbv
->nbs
, mdatoms
, fr
->cginfo
);
895 nbnxn_atomdata_set(nbv
->grp
[eintNonlocal
].nbat
, eatAll
,
896 nbv
->nbs
, mdatoms
, fr
->cginfo
);
898 wallcycle_stop(wcycle
, ewcNS
);
901 /* initialize the GPU atom data and copy shift vector */
906 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
907 nbnxn_gpu_init_atomdata(nbv
->gpu_nbv
, nbv
->grp
[eintLocal
].nbat
);
908 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
911 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
912 nbnxn_gpu_upload_shiftvec(nbv
->gpu_nbv
, nbv
->grp
[eintLocal
].nbat
);
913 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
916 /* do local pair search */
919 wallcycle_start_nocount(wcycle
, ewcNS
);
920 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_LOCAL
);
921 nbnxn_make_pairlist(nbv
->nbs
, nbv
->grp
[eintLocal
].nbat
,
924 nbv
->min_ci_balanced
,
925 &nbv
->grp
[eintLocal
].nbl_lists
,
927 nbv
->grp
[eintLocal
].kernel_type
,
929 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_LOCAL
);
933 /* initialize local pair-list on the GPU */
934 nbnxn_gpu_init_pairlist(nbv
->gpu_nbv
,
935 nbv
->grp
[eintLocal
].nbl_lists
.nbl
[0],
938 wallcycle_stop(wcycle
, ewcNS
);
942 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
943 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
944 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatLocal
, FALSE
, x
,
945 nbv
->grp
[eintLocal
].nbat
);
946 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
947 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
952 wallcycle_start(wcycle
, ewcLAUNCH_GPU_NB
);
953 /* launch local nonbonded F on GPU */
954 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
, enbvClearFNo
,
956 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
959 /* Communicate coordinates and sum dipole if necessary +
960 do non-local pair search */
961 if (DOMAINDECOMP(cr
))
963 bDiffKernels
= (nbv
->grp
[eintNonlocal
].kernel_type
!=
964 nbv
->grp
[eintLocal
].kernel_type
);
968 /* With GPU+CPU non-bonded calculations we need to copy
969 * the local coordinates to the non-local nbat struct
970 * (in CPU format) as the non-local kernel call also
971 * calculates the local - non-local interactions.
973 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
974 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
975 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatLocal
, TRUE
, x
,
976 nbv
->grp
[eintNonlocal
].nbat
);
977 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
978 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
983 wallcycle_start_nocount(wcycle
, ewcNS
);
984 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
988 nbnxn_grid_add_simple(nbv
->nbs
, nbv
->grp
[eintNonlocal
].nbat
);
991 nbnxn_make_pairlist(nbv
->nbs
, nbv
->grp
[eintNonlocal
].nbat
,
994 nbv
->min_ci_balanced
,
995 &nbv
->grp
[eintNonlocal
].nbl_lists
,
997 nbv
->grp
[eintNonlocal
].kernel_type
,
1000 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
1002 if (nbv
->grp
[eintNonlocal
].kernel_type
== nbnxnk8x8x8_GPU
)
1004 /* initialize non-local pair-list on the GPU */
1005 nbnxn_gpu_init_pairlist(nbv
->gpu_nbv
,
1006 nbv
->grp
[eintNonlocal
].nbl_lists
.nbl
[0],
1009 wallcycle_stop(wcycle
, ewcNS
);
1013 wallcycle_start(wcycle
, ewcMOVEX
);
1014 dd_move_x(cr
->dd
, box
, x
);
1015 wallcycle_stop(wcycle
, ewcMOVEX
);
1017 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1018 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
1019 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatNonlocal
, FALSE
, x
,
1020 nbv
->grp
[eintNonlocal
].nbat
);
1021 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
1022 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1025 if (bUseGPU
&& !bDiffKernels
)
1027 wallcycle_start(wcycle
, ewcLAUNCH_GPU_NB
);
1028 /* launch non-local nonbonded F on GPU */
1029 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
, enbvClearFNo
,
1031 cycles_force
+= wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1037 /* launch D2H copy-back F */
1038 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
1039 if (DOMAINDECOMP(cr
) && !bDiffKernels
)
1041 nbnxn_gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->grp
[eintNonlocal
].nbat
,
1042 flags
, eatNonlocal
);
1044 nbnxn_gpu_launch_cpyback(nbv
->gpu_nbv
, nbv
->grp
[eintLocal
].nbat
,
1046 cycles_force
+= wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1049 if (bStateChanged
&& inputrecNeedMutot(inputrec
))
1053 gmx_sumd(2*DIM
, mu
, cr
);
1056 for (i
= 0; i
< 2; i
++)
1058 for (j
= 0; j
< DIM
; j
++)
1060 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1064 if (fr
->efep
== efepNO
)
1066 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1070 for (j
= 0; j
< DIM
; j
++)
1073 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] +
1074 lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1078 /* Reset energies */
1079 reset_enerdata(enerd
);
1080 clear_rvecs(SHIFTS
, fr
->fshift
);
1082 if (DOMAINDECOMP(cr
) && !(cr
->duty
& DUTY_PME
))
1084 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1085 dd_force_flop_start(cr
->dd
, nrnb
);
1090 /* Enforced rotation has its own cycle counter that starts after the collective
1091 * coordinates have been communicated. It is added to ddCyclF to allow
1092 * for proper load-balancing */
1093 wallcycle_start(wcycle
, ewcROT
);
1094 do_rotation(cr
, inputrec
, box
, x
, t
, step
, wcycle
, bNS
);
1095 wallcycle_stop(wcycle
, ewcROT
);
1098 /* Temporary solution until all routines take PaddedRVecVector */
1099 rvec
*f
= as_rvec_array(force
->data());
1101 /* Start the force cycle counter.
1102 * This counter is stopped after do_force_lowlevel.
1103 * No parallel communication should occur while this counter is running,
1104 * since that will interfere with the dynamic load balancing.
1106 wallcycle_start(wcycle
, ewcFORCE
);
1109 /* Reset forces for which the virial is calculated separately:
1110 * PME/Ewald forces if necessary */
1111 if (fr
->bF_NoVirSum
)
1113 if (flags
& GMX_FORCE_VIRIAL
)
1115 fr
->f_novirsum
= fr
->forceBufferNoVirialSummation
;
1119 /* We are not calculating the pressure so we do not need
1120 * a separate array for forces that do not contribute
1123 fr
->f_novirsum
= force
;
1127 if (fr
->bF_NoVirSum
)
1129 if (flags
& GMX_FORCE_VIRIAL
)
1131 /* TODO: remove this - 1 when padding is properly implemented */
1132 clear_rvecs_omp(fr
->f_novirsum
->size() - 1,
1133 as_rvec_array(fr
->f_novirsum
->data()));
1136 /* Clear the short- and long-range forces */
1137 clear_rvecs_omp(fr
->natoms_force_constr
, f
);
1139 clear_rvec(fr
->vir_diag_posres
);
1142 if (inputrec
->bPull
&& pull_have_constraint(inputrec
->pull_work
))
1144 clear_pull_forces(inputrec
->pull_work
);
1147 /* We calculate the non-bonded forces, when done on the CPU, here.
1148 * We do this before calling do_force_lowlevel, because in that
1149 * function, the listed forces are calculated before PME, which
1150 * does communication. With this order, non-bonded and listed
1151 * force calculation imbalance can be balanced out by the domain
1152 * decomposition load balancing.
1157 /* Maybe we should move this into do_force_lowlevel */
1158 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
, enbvClearFYes
,
1162 if (fr
->efep
!= efepNO
)
1164 /* Calculate the local and non-local free energy interactions here.
1165 * Happens here on the CPU both with and without GPU.
1167 if (fr
->nbv
->grp
[eintLocal
].nbl_lists
.nbl_fep
[0]->nrj
> 0)
1169 do_nb_verlet_fep(&fr
->nbv
->grp
[eintLocal
].nbl_lists
,
1171 inputrec
->fepvals
, lambda
,
1172 enerd
, flags
, nrnb
, wcycle
);
1175 if (DOMAINDECOMP(cr
) &&
1176 fr
->nbv
->grp
[eintNonlocal
].nbl_lists
.nbl_fep
[0]->nrj
> 0)
1178 do_nb_verlet_fep(&fr
->nbv
->grp
[eintNonlocal
].nbl_lists
,
1180 inputrec
->fepvals
, lambda
,
1181 enerd
, flags
, nrnb
, wcycle
);
1185 if (!bUseOrEmulGPU
|| bDiffKernels
)
1189 if (DOMAINDECOMP(cr
))
1191 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
,
1192 bDiffKernels
? enbvClearFYes
: enbvClearFNo
,
1202 aloc
= eintNonlocal
;
1205 /* Add all the non-bonded force to the normal force array.
1206 * This can be split into a local and a non-local part when overlapping
1207 * communication with calculation with domain decomposition.
1209 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1210 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1211 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1212 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatAll
, nbv
->grp
[aloc
].nbat
, f
);
1213 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1214 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1215 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1217 /* if there are multiple fshift output buffers reduce them */
1218 if ((flags
& GMX_FORCE_VIRIAL
) &&
1219 nbv
->grp
[aloc
].nbl_lists
.nnbl
> 1)
1221 /* This is not in a subcounter because it takes a
1222 negligible and constant-sized amount of time */
1223 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv
->grp
[aloc
].nbat
,
1228 /* update QMMMrec, if necessary */
1231 update_QMMMrec(cr
, fr
, x
, mdatoms
, box
, top
);
1234 /* Compute the bonded and non-bonded energies and optionally forces */
1235 do_force_lowlevel(fr
, inputrec
, &(top
->idef
),
1236 cr
, nrnb
, wcycle
, mdatoms
,
1237 x
, hist
, f
, enerd
, fcd
, top
, fr
->born
,
1239 inputrec
->fepvals
, lambda
, graph
, &(top
->excls
), fr
->mu_tot
,
1240 flags
, &cycles_pme
);
1242 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1246 do_flood(cr
, inputrec
, x
, f
, ed
, box
, step
, bNS
);
1249 if (bUseOrEmulGPU
&& !bDiffKernels
)
1251 /* wait for non-local forces (or calculate in emulation mode) */
1252 if (DOMAINDECOMP(cr
))
1258 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_NL
);
1259 nbnxn_gpu_wait_for_gpu(nbv
->gpu_nbv
,
1261 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1263 cycles_tmp
= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_NL
);
1264 cycles_wait_gpu
+= cycles_tmp
;
1265 cycles_force
+= cycles_tmp
;
1269 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1270 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
, enbvClearFYes
,
1272 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1274 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1275 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1276 /* skip the reduction if there was no non-local work to do */
1277 if (nbv
->grp
[eintNonlocal
].nbl_lists
.nbl
[0]->nsci
> 0)
1279 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatNonlocal
,
1280 nbv
->grp
[eintNonlocal
].nbat
, f
);
1282 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1283 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1287 if (bDoForces
&& DOMAINDECOMP(cr
))
1291 /* We are done with the CPU compute, but the GPU local non-bonded
1292 * kernel can still be running while we communicate the forces.
1293 * We start a counter here, so we can, hopefully, time the rest
1294 * of the GPU kernel execution and data transfer.
1296 cycleCountBeforeLocalWorkCompletes
= gmx_cycles_read();
1299 /* Communicate the forces */
1300 wallcycle_start(wcycle
, ewcMOVEF
);
1301 dd_move_f(cr
->dd
, f
, fr
->fshift
);
1302 wallcycle_stop(wcycle
, ewcMOVEF
);
1307 /* wait for local forces (or calculate in emulation mode) */
1310 float cycles_tmp
, cycles_wait_est
;
1311 /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1312 * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1313 * but even with a step of 0.1 ms the difference is less than 1%
1316 const float gpuWaitApiOverheadMargin
= 2e6f
; /* cycles */
1318 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_L
);
1319 nbnxn_gpu_wait_for_gpu(nbv
->gpu_nbv
,
1321 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1323 cycles_tmp
= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
1325 if (bDoForces
&& DOMAINDECOMP(cr
))
1327 cycles_wait_est
= gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes
;
1329 if (cycles_tmp
< gpuWaitApiOverheadMargin
)
1331 /* We measured few cycles, it could be that the kernel
1332 * and transfer finished earlier and there was no actual
1333 * wait time, only API call overhead.
1334 * Then the actual time could be anywhere between 0 and
1335 * cycles_wait_est. As a compromise, we use half the time.
1337 cycles_wait_est
*= 0.5f
;
1342 /* No force communication so we actually timed the wait */
1343 cycles_wait_est
= cycles_tmp
;
1345 /* Even though this is after dd_move_f, the actual task we are
1346 * waiting for runs asynchronously with dd_move_f and we usually
1347 * have nothing to balance it with, so we can and should add
1348 * the time to the force time for load balancing.
1350 cycles_force
+= cycles_wait_est
;
1351 cycles_wait_gpu
+= cycles_wait_est
;
1353 /* now clear the GPU outputs while we finish the step on the CPU */
1354 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
1355 nbnxn_gpu_clear_outputs(nbv
->gpu_nbv
, flags
);
1356 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1360 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1361 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
,
1362 DOMAINDECOMP(cr
) ? enbvClearFNo
: enbvClearFYes
,
1364 wallcycle_stop(wcycle
, ewcFORCE
);
1366 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1367 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1368 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatLocal
,
1369 nbv
->grp
[eintLocal
].nbat
, f
);
1370 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1371 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1374 if (DOMAINDECOMP(cr
))
1376 dd_force_flop_stop(cr
->dd
, nrnb
);
1379 dd_cycles_add(cr
->dd
, cycles_force
-cycles_pme
, ddCyclF
);
1382 dd_cycles_add(cr
->dd
, cycles_wait_gpu
, ddCyclWaitGPU
);
1389 /* Compute forces due to electric field */
1390 if (fr
->efield
!= nullptr)
1392 fr
->efield
->calculateForces(cr
, mdatoms
, fr
->f_novirsum
, t
);
1395 /* If we have NoVirSum forces, but we do not calculate the virial,
1396 * we sum fr->f_novirsum=f later.
1398 if (vsite
&& !(fr
->bF_NoVirSum
&& !(flags
& GMX_FORCE_VIRIAL
)))
1400 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1401 spread_vsite_f(vsite
, x
, f
, fr
->fshift
, FALSE
, nullptr, nrnb
,
1402 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1403 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1406 if (flags
& GMX_FORCE_VIRIAL
)
1408 /* Calculation of the virial must be done after vsites! */
1409 calc_virial(0, mdatoms
->homenr
, x
, f
,
1410 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1414 if (inputrec
->bPull
&& pull_have_potential(inputrec
->pull_work
))
1416 /* Since the COM pulling is always done mass-weighted, no forces are
1417 * applied to vsites and this call can be done after vsite spreading.
1419 pull_potential_wrapper(cr
, inputrec
, box
, x
,
1420 f
, vir_force
, mdatoms
, enerd
, lambda
, t
,
1424 /* Add the forces from enforced rotation potentials (if any) */
1427 wallcycle_start(wcycle
, ewcROTadd
);
1428 enerd
->term
[F_COM_PULL
] += add_rot_forces(inputrec
->rot
, f
, cr
, step
, t
);
1429 wallcycle_stop(wcycle
, ewcROTadd
);
1432 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1433 IMD_apply_forces(inputrec
->bIMD
, inputrec
->imd
, cr
, f
, wcycle
);
1435 if (PAR(cr
) && !(cr
->duty
& DUTY_PME
))
1437 /* In case of node-splitting, the PP nodes receive the long-range
1438 * forces, virial and energy from the PME nodes here.
1440 pme_receive_force_ener(cr
, wcycle
, enerd
, fr
);
1445 post_process_forces(cr
, step
, nrnb
, wcycle
,
1446 top
, box
, x
, f
, vir_force
, mdatoms
, graph
, fr
, vsite
,
1450 if (flags
& GMX_FORCE_ENERGY
)
1452 /* Sum the potential energy terms from group contributions */
1453 sum_epot(&(enerd
->grpp
), enerd
->term
);
1455 if (!EI_TPI(inputrec
->eI
))
1457 checkPotentialEnergyValidity(enerd
);
1462 void do_force_cutsGROUP(FILE *fplog
, t_commrec
*cr
,
1463 t_inputrec
*inputrec
,
1464 gmx_int64_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
1465 gmx_localtop_t
*top
,
1466 gmx_groups_t
*groups
,
1467 matrix box
, rvec x
[], history_t
*hist
,
1468 PaddedRVecVector
*force
,
1471 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
1472 real
*lambda
, t_graph
*graph
,
1473 t_forcerec
*fr
, gmx_vsite_t
*vsite
, rvec mu_tot
,
1474 double t
, gmx_edsam_t ed
,
1475 gmx_bool bBornRadii
,
1480 gmx_bool bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
;
1482 float cycles_pme
, cycles_force
;
1484 const int start
= 0;
1485 const int homenr
= mdatoms
->homenr
;
1487 clear_mat(vir_force
);
1490 if (DOMAINDECOMP(cr
))
1492 cg1
= cr
->dd
->ncg_tot
;
1503 bStateChanged
= (flags
& GMX_FORCE_STATECHANGED
);
1504 bNS
= (flags
& GMX_FORCE_NS
) && (fr
->bAllvsAll
== FALSE
);
1505 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1506 bFillGrid
= (bNS
&& bStateChanged
);
1507 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
1508 bDoForces
= (flags
& GMX_FORCE_FORCES
);
1512 update_forcerec(fr
, box
);
1514 if (inputrecNeedMutot(inputrec
))
1516 /* Calculate total (local) dipole moment in a temporary common array.
1517 * This makes it possible to sum them over nodes faster.
1519 calc_mu(start
, homenr
,
1520 x
, mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
1525 if (fr
->ePBC
!= epbcNONE
)
1527 /* Compute shift vectors every step,
1528 * because of pressure coupling or box deformation!
1530 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
1532 calc_shifts(box
, fr
->shift_vec
);
1537 put_charge_groups_in_box(fplog
, cg0
, cg1
, fr
->ePBC
, box
,
1538 &(top
->cgs
), x
, fr
->cg_cm
);
1539 inc_nrnb(nrnb
, eNR_CGCM
, homenr
);
1540 inc_nrnb(nrnb
, eNR_RESETX
, cg1
-cg0
);
1542 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
1544 unshift_self(graph
, box
, x
);
1549 calc_cgcm(fplog
, cg0
, cg1
, &(top
->cgs
), x
, fr
->cg_cm
);
1550 inc_nrnb(nrnb
, eNR_CGCM
, homenr
);
1553 if (bCalcCGCM
&& gmx_debug_at
)
1555 pr_rvecs(debug
, 0, "cgcm", fr
->cg_cm
, top
->cgs
.nr
);
1559 if (!(cr
->duty
& DUTY_PME
))
1564 /* Send particle coordinates to the pme nodes.
1565 * Since this is only implemented for domain decomposition
1566 * and domain decomposition does not use the graph,
1567 * we do not need to worry about shifting.
1570 wallcycle_start(wcycle
, ewcPP_PMESENDX
);
1572 bBS
= (inputrec
->nwall
== 2);
1575 copy_mat(box
, boxs
);
1576 svmul(inputrec
->wall_ewald_zfac
, boxs
[ZZ
], boxs
[ZZ
]);
1579 gmx_pme_send_coordinates(cr
, bBS
? boxs
: box
, x
,
1580 lambda
[efptCOUL
], lambda
[efptVDW
],
1581 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)),
1584 wallcycle_stop(wcycle
, ewcPP_PMESENDX
);
1586 #endif /* GMX_MPI */
1588 /* Communicate coordinates and sum dipole if necessary */
1589 if (DOMAINDECOMP(cr
))
1591 wallcycle_start(wcycle
, ewcMOVEX
);
1592 dd_move_x(cr
->dd
, box
, x
);
1593 wallcycle_stop(wcycle
, ewcMOVEX
);
1596 if (inputrecNeedMutot(inputrec
))
1602 gmx_sumd(2*DIM
, mu
, cr
);
1604 for (i
= 0; i
< 2; i
++)
1606 for (j
= 0; j
< DIM
; j
++)
1608 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1612 if (fr
->efep
== efepNO
)
1614 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1618 for (j
= 0; j
< DIM
; j
++)
1621 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] + lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1626 /* Reset energies */
1627 reset_enerdata(enerd
);
1628 clear_rvecs(SHIFTS
, fr
->fshift
);
1632 wallcycle_start(wcycle
, ewcNS
);
1634 if (graph
&& bStateChanged
)
1636 /* Calculate intramolecular shift vectors to make molecules whole */
1637 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
1640 /* Do the actual neighbour searching */
1642 groups
, top
, mdatoms
,
1643 cr
, nrnb
, bFillGrid
);
1645 wallcycle_stop(wcycle
, ewcNS
);
1648 if (inputrec
->implicit_solvent
&& bNS
)
1650 make_gb_nblist(cr
, inputrec
->gb_algorithm
,
1651 x
, box
, fr
, &top
->idef
, graph
, fr
->born
);
1654 if (DOMAINDECOMP(cr
) && !(cr
->duty
& DUTY_PME
))
1656 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1657 dd_force_flop_start(cr
->dd
, nrnb
);
1662 /* Enforced rotation has its own cycle counter that starts after the collective
1663 * coordinates have been communicated. It is added to ddCyclF to allow
1664 * for proper load-balancing */
1665 wallcycle_start(wcycle
, ewcROT
);
1666 do_rotation(cr
, inputrec
, box
, x
, t
, step
, wcycle
, bNS
);
1667 wallcycle_stop(wcycle
, ewcROT
);
1670 /* Temporary solution until all routines take PaddedRVecVector */
1671 rvec
*f
= as_rvec_array(force
->data());
1673 /* Start the force cycle counter.
1674 * This counter is stopped after do_force_lowlevel.
1675 * No parallel communication should occur while this counter is running,
1676 * since that will interfere with the dynamic load balancing.
1678 wallcycle_start(wcycle
, ewcFORCE
);
1682 /* Reset forces for which the virial is calculated separately:
1683 * PME/Ewald forces if necessary */
1684 if (fr
->bF_NoVirSum
)
1686 if (flags
& GMX_FORCE_VIRIAL
)
1688 fr
->f_novirsum
= fr
->forceBufferNoVirialSummation
;
1689 /* TODO: remove this - 1 when padding is properly implemented */
1690 clear_rvecs(fr
->f_novirsum
->size() - 1,
1691 as_rvec_array(fr
->f_novirsum
->data()));
1695 /* We are not calculating the pressure so we do not need
1696 * a separate array for forces that do not contribute
1699 fr
->f_novirsum
= force
;
1703 /* Clear the short- and long-range forces */
1704 clear_rvecs(fr
->natoms_force_constr
, f
);
1706 clear_rvec(fr
->vir_diag_posres
);
1708 if (inputrec
->bPull
&& pull_have_constraint(inputrec
->pull_work
))
1710 clear_pull_forces(inputrec
->pull_work
);
1713 /* update QMMMrec, if necessary */
1716 update_QMMMrec(cr
, fr
, x
, mdatoms
, box
, top
);
1719 /* Compute the bonded and non-bonded energies and optionally forces */
1720 do_force_lowlevel(fr
, inputrec
, &(top
->idef
),
1721 cr
, nrnb
, wcycle
, mdatoms
,
1722 x
, hist
, f
, enerd
, fcd
, top
, fr
->born
,
1724 inputrec
->fepvals
, lambda
,
1725 graph
, &(top
->excls
), fr
->mu_tot
,
1729 cycles_force
= wallcycle_stop(wcycle
, ewcFORCE
);
1733 do_flood(cr
, inputrec
, x
, f
, ed
, box
, step
, bNS
);
1736 if (DOMAINDECOMP(cr
))
1738 dd_force_flop_stop(cr
->dd
, nrnb
);
1741 dd_cycles_add(cr
->dd
, cycles_force
-cycles_pme
, ddCyclF
);
1747 /* Compute forces due to electric field */
1748 if (fr
->efield
!= nullptr)
1750 fr
->efield
->calculateForces(cr
, mdatoms
, fr
->f_novirsum
, t
);
1753 /* Communicate the forces */
1754 if (DOMAINDECOMP(cr
))
1756 wallcycle_start(wcycle
, ewcMOVEF
);
1757 dd_move_f(cr
->dd
, f
, fr
->fshift
);
1758 /* Do we need to communicate the separate force array
1759 * for terms that do not contribute to the single sum virial?
1760 * Position restraints and electric fields do not introduce
1761 * inter-cg forces, only full electrostatics methods do.
1762 * When we do not calculate the virial, fr->f_novirsum = f,
1763 * so we have already communicated these forces.
1765 if (EEL_FULL(fr
->eeltype
) && cr
->dd
->n_intercg_excl
&&
1766 (flags
& GMX_FORCE_VIRIAL
))
1768 dd_move_f(cr
->dd
, as_rvec_array(fr
->f_novirsum
->data()), nullptr);
1770 wallcycle_stop(wcycle
, ewcMOVEF
);
1773 /* If we have NoVirSum forces, but we do not calculate the virial,
1774 * we sum fr->f_novirsum=f later.
1776 if (vsite
&& !(fr
->bF_NoVirSum
&& !(flags
& GMX_FORCE_VIRIAL
)))
1778 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1779 spread_vsite_f(vsite
, x
, f
, fr
->fshift
, FALSE
, nullptr, nrnb
,
1780 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1781 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1784 if (flags
& GMX_FORCE_VIRIAL
)
1786 /* Calculation of the virial must be done after vsites! */
1787 calc_virial(0, mdatoms
->homenr
, x
, f
,
1788 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1792 if (inputrec
->bPull
&& pull_have_potential(inputrec
->pull_work
))
1794 pull_potential_wrapper(cr
, inputrec
, box
, x
,
1795 f
, vir_force
, mdatoms
, enerd
, lambda
, t
,
1799 /* Add the forces from enforced rotation potentials (if any) */
1802 wallcycle_start(wcycle
, ewcROTadd
);
1803 enerd
->term
[F_COM_PULL
] += add_rot_forces(inputrec
->rot
, f
, cr
, step
, t
);
1804 wallcycle_stop(wcycle
, ewcROTadd
);
1807 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1808 IMD_apply_forces(inputrec
->bIMD
, inputrec
->imd
, cr
, f
, wcycle
);
1810 if (PAR(cr
) && !(cr
->duty
& DUTY_PME
))
1812 /* In case of node-splitting, the PP nodes receive the long-range
1813 * forces, virial and energy from the PME nodes here.
1815 pme_receive_force_ener(cr
, wcycle
, enerd
, fr
);
1820 post_process_forces(cr
, step
, nrnb
, wcycle
,
1821 top
, box
, x
, f
, vir_force
, mdatoms
, graph
, fr
, vsite
,
1825 if (flags
& GMX_FORCE_ENERGY
)
1827 /* Sum the potential energy terms from group contributions */
1828 sum_epot(&(enerd
->grpp
), enerd
->term
);
1830 if (!EI_TPI(inputrec
->eI
))
1832 checkPotentialEnergyValidity(enerd
);
1838 void do_force(FILE *fplog
, t_commrec
*cr
,
1839 t_inputrec
*inputrec
,
1840 gmx_int64_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
1841 gmx_localtop_t
*top
,
1842 gmx_groups_t
*groups
,
1843 matrix box
, PaddedRVecVector
*coordinates
, history_t
*hist
,
1844 PaddedRVecVector
*force
,
1847 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
1848 gmx::ArrayRef
<real
> lambda
, t_graph
*graph
,
1850 gmx_vsite_t
*vsite
, rvec mu_tot
,
1851 double t
, gmx_edsam_t ed
,
1852 gmx_bool bBornRadii
,
1855 /* modify force flag if not doing nonbonded */
1856 if (!fr
->bNonbonded
)
1858 flags
&= ~GMX_FORCE_NONBONDED
;
1861 GMX_ASSERT(coordinates
->size() >= static_cast<unsigned int>(fr
->natoms_force
+ 1) ||
1862 fr
->natoms_force
== 0, "We might need 1 element extra for SIMD");
1863 GMX_ASSERT(force
->size() >= static_cast<unsigned int>(fr
->natoms_force
+ 1) ||
1864 fr
->natoms_force
== 0, "We might need 1 element extra for SIMD");
1866 rvec
*x
= as_rvec_array(coordinates
->data());
1868 switch (inputrec
->cutoff_scheme
)
1871 do_force_cutsVERLET(fplog
, cr
, inputrec
,
1879 lambda
.data(), graph
,
1887 do_force_cutsGROUP(fplog
, cr
, inputrec
,
1895 lambda
.data(), graph
,
1902 gmx_incons("Invalid cut-off scheme passed!");
1907 void do_constrain_first(FILE *fplog
, gmx_constr_t constr
,
1908 t_inputrec
*ir
, t_mdatoms
*md
,
1909 t_state
*state
, t_commrec
*cr
, t_nrnb
*nrnb
,
1910 t_forcerec
*fr
, gmx_localtop_t
*top
)
1912 int i
, m
, start
, end
;
1914 real dt
= ir
->delta_t
;
1918 /* We need to allocate one element extra, since we might use
1919 * (unaligned) 4-wide SIMD loads to access rvec entries.
1921 snew(savex
, state
->natoms
+ 1);
1928 fprintf(debug
, "vcm: start=%d, homenr=%d, end=%d\n",
1929 start
, md
->homenr
, end
);
1931 /* Do a first constrain to reset particles... */
1932 step
= ir
->init_step
;
1935 char buf
[STEPSTRSIZE
];
1936 fprintf(fplog
, "\nConstraining the starting coordinates (step %s)\n",
1937 gmx_step_str(step
, buf
));
1941 /* constrain the current position */
1942 constrain(nullptr, TRUE
, FALSE
, constr
, &(top
->idef
),
1943 ir
, cr
, step
, 0, 1.0, md
,
1944 as_rvec_array(state
->x
.data()), as_rvec_array(state
->x
.data()), nullptr,
1945 fr
->bMolPBC
, state
->box
,
1946 state
->lambda
[efptBONDED
], &dvdl_dum
,
1947 nullptr, nullptr, nrnb
, econqCoord
);
1950 /* constrain the inital velocity, and save it */
1951 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1952 constrain(nullptr, TRUE
, FALSE
, constr
, &(top
->idef
),
1953 ir
, cr
, step
, 0, 1.0, md
,
1954 as_rvec_array(state
->x
.data()), as_rvec_array(state
->v
.data()), as_rvec_array(state
->v
.data()),
1955 fr
->bMolPBC
, state
->box
,
1956 state
->lambda
[efptBONDED
], &dvdl_dum
,
1957 nullptr, nullptr, nrnb
, econqVeloc
);
1959 /* constrain the inital velocities at t-dt/2 */
1960 if (EI_STATE_VELOCITY(ir
->eI
) && ir
->eI
!= eiVV
)
1962 for (i
= start
; (i
< end
); i
++)
1964 for (m
= 0; (m
< DIM
); m
++)
1966 /* Reverse the velocity */
1967 state
->v
[i
][m
] = -state
->v
[i
][m
];
1968 /* Store the position at t-dt in buf */
1969 savex
[i
][m
] = state
->x
[i
][m
] + dt
*state
->v
[i
][m
];
1972 /* Shake the positions at t=-dt with the positions at t=0
1973 * as reference coordinates.
1977 char buf
[STEPSTRSIZE
];
1978 fprintf(fplog
, "\nConstraining the coordinates at t0-dt (step %s)\n",
1979 gmx_step_str(step
, buf
));
1982 constrain(nullptr, TRUE
, FALSE
, constr
, &(top
->idef
),
1983 ir
, cr
, step
, -1, 1.0, md
,
1984 as_rvec_array(state
->x
.data()), savex
, nullptr,
1985 fr
->bMolPBC
, state
->box
,
1986 state
->lambda
[efptBONDED
], &dvdl_dum
,
1987 as_rvec_array(state
->v
.data()), nullptr, nrnb
, econqCoord
);
1989 for (i
= start
; i
< end
; i
++)
1991 for (m
= 0; m
< DIM
; m
++)
1993 /* Re-reverse the velocities */
1994 state
->v
[i
][m
] = -state
->v
[i
][m
];
2003 integrate_table(real vdwtab
[], real scale
, int offstart
, int rstart
, int rend
,
2004 double *enerout
, double *virout
)
2006 double enersum
, virsum
;
2007 double invscale
, invscale2
, invscale3
;
2008 double r
, ea
, eb
, ec
, pa
, pb
, pc
, pd
;
2013 invscale
= 1.0/scale
;
2014 invscale2
= invscale
*invscale
;
2015 invscale3
= invscale
*invscale2
;
2017 /* Following summation derived from cubic spline definition,
2018 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2019 * the cubic spline. We first calculate the negative of the
2020 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2021 * add the more standard, abrupt cutoff correction to that result,
2022 * yielding the long-range correction for a switched function. We
2023 * perform both the pressure and energy loops at the same time for
2024 * simplicity, as the computational cost is low. */
2028 /* Since the dispersion table has been scaled down a factor
2029 * 6.0 and the repulsion a factor 12.0 to compensate for the
2030 * c6/c12 parameters inside nbfp[] being scaled up (to save
2031 * flops in kernels), we need to correct for this.
2042 for (ri
= rstart
; ri
< rend
; ++ri
)
2046 eb
= 2.0*invscale2
*r
;
2050 pb
= 3.0*invscale2
*r
;
2051 pc
= 3.0*invscale
*r
*r
;
2054 /* this "8" is from the packing in the vdwtab array - perhaps
2055 should be defined? */
2057 offset
= 8*ri
+ offstart
;
2058 y0
= vdwtab
[offset
];
2059 f
= vdwtab
[offset
+1];
2060 g
= vdwtab
[offset
+2];
2061 h
= vdwtab
[offset
+3];
2063 enersum
+= y0
*(ea
/3 + eb
/2 + ec
) + f
*(ea
/4 + eb
/3 + ec
/2) + g
*(ea
/5 + eb
/4 + ec
/3) + h
*(ea
/6 + eb
/5 + ec
/4);
2064 virsum
+= f
*(pa
/4 + pb
/3 + pc
/2 + pd
) + 2*g
*(pa
/5 + pb
/4 + pc
/3 + pd
/2) + 3*h
*(pa
/6 + pb
/5 + pc
/4 + pd
/3);
2066 *enerout
= 4.0*M_PI
*enersum
*tabfactor
;
2067 *virout
= 4.0*M_PI
*virsum
*tabfactor
;
2070 void calc_enervirdiff(FILE *fplog
, int eDispCorr
, t_forcerec
*fr
)
2072 double eners
[2], virs
[2], enersum
, virsum
;
2073 double r0
, rc3
, rc9
;
2075 real scale
, *vdwtab
;
2077 fr
->enershiftsix
= 0;
2078 fr
->enershifttwelve
= 0;
2079 fr
->enerdiffsix
= 0;
2080 fr
->enerdifftwelve
= 0;
2082 fr
->virdifftwelve
= 0;
2084 if (eDispCorr
!= edispcNO
)
2086 for (i
= 0; i
< 2; i
++)
2091 if ((fr
->vdw_modifier
== eintmodPOTSHIFT
) ||
2092 (fr
->vdw_modifier
== eintmodPOTSWITCH
) ||
2093 (fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
2094 (fr
->vdwtype
== evdwSHIFT
) ||
2095 (fr
->vdwtype
== evdwSWITCH
))
2097 if (((fr
->vdw_modifier
== eintmodPOTSWITCH
) ||
2098 (fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
2099 (fr
->vdwtype
== evdwSWITCH
)) && fr
->rvdw_switch
== 0)
2102 "With dispersion correction rvdw-switch can not be zero "
2103 "for vdw-type = %s", evdw_names
[fr
->vdwtype
]);
2106 /* TODO This code depends on the logic in tables.c that
2107 constructs the table layout, which should be made
2108 explicit in future cleanup. */
2109 GMX_ASSERT(fr
->dispersionCorrectionTable
->interaction
== GMX_TABLE_INTERACTION_VDWREP_VDWDISP
,
2110 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2111 scale
= fr
->dispersionCorrectionTable
->scale
;
2112 vdwtab
= fr
->dispersionCorrectionTable
->data
;
2114 /* Round the cut-offs to exact table values for precision */
2115 ri0
= static_cast<int>(floor(fr
->rvdw_switch
*scale
));
2116 ri1
= static_cast<int>(ceil(fr
->rvdw
*scale
));
2118 /* The code below has some support for handling force-switching, i.e.
2119 * when the force (instead of potential) is switched over a limited
2120 * region. This leads to a constant shift in the potential inside the
2121 * switching region, which we can handle by adding a constant energy
2122 * term in the force-switch case just like when we do potential-shift.
2124 * For now this is not enabled, but to keep the functionality in the
2125 * code we check separately for switch and shift. When we do force-switch
2126 * the shifting point is rvdw_switch, while it is the cutoff when we
2127 * have a classical potential-shift.
2129 * For a pure potential-shift the potential has a constant shift
2130 * all the way out to the cutoff, and that is it. For other forms
2131 * we need to calculate the constant shift up to the point where we
2132 * start modifying the potential.
2134 ri0
= (fr
->vdw_modifier
== eintmodPOTSHIFT
) ? ri1
: ri0
;
2140 if ((fr
->vdw_modifier
== eintmodFORCESWITCH
) ||
2141 (fr
->vdwtype
== evdwSHIFT
))
2143 /* Determine the constant energy shift below rvdw_switch.
2144 * Table has a scale factor since we have scaled it down to compensate
2145 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2147 fr
->enershiftsix
= (real
)(-1.0/(rc3
*rc3
)) - 6.0*vdwtab
[8*ri0
];
2148 fr
->enershifttwelve
= (real
)( 1.0/(rc9
*rc3
)) - 12.0*vdwtab
[8*ri0
+ 4];
2150 else if (fr
->vdw_modifier
== eintmodPOTSHIFT
)
2152 fr
->enershiftsix
= (real
)(-1.0/(rc3
*rc3
));
2153 fr
->enershifttwelve
= (real
)( 1.0/(rc9
*rc3
));
2156 /* Add the constant part from 0 to rvdw_switch.
2157 * This integration from 0 to rvdw_switch overcounts the number
2158 * of interactions by 1, as it also counts the self interaction.
2159 * We will correct for this later.
2161 eners
[0] += 4.0*M_PI
*fr
->enershiftsix
*rc3
/3.0;
2162 eners
[1] += 4.0*M_PI
*fr
->enershifttwelve
*rc3
/3.0;
2164 /* Calculate the contribution in the range [r0,r1] where we
2165 * modify the potential. For a pure potential-shift modifier we will
2166 * have ri0==ri1, and there will not be any contribution here.
2168 for (i
= 0; i
< 2; i
++)
2172 integrate_table(vdwtab
, scale
, (i
== 0 ? 0 : 4), ri0
, ri1
, &enersum
, &virsum
);
2173 eners
[i
] -= enersum
;
2177 /* Alright: Above we compensated by REMOVING the parts outside r0
2178 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2180 * Regardless of whether r0 is the point where we start switching,
2181 * or the cutoff where we calculated the constant shift, we include
2182 * all the parts we are missing out to infinity from r0 by
2183 * calculating the analytical dispersion correction.
2185 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2186 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2187 virs
[0] += 8.0*M_PI
/rc3
;
2188 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2190 else if (fr
->vdwtype
== evdwCUT
||
2191 EVDW_PME(fr
->vdwtype
) ||
2192 fr
->vdwtype
== evdwUSER
)
2194 if (fr
->vdwtype
== evdwUSER
&& fplog
)
2197 "WARNING: using dispersion correction with user tables\n");
2200 /* Note that with LJ-PME, the dispersion correction is multiplied
2201 * by the difference between the actual C6 and the value of C6
2202 * that would produce the combination rule.
2203 * This means the normal energy and virial difference formulas
2207 rc3
= fr
->rvdw
*fr
->rvdw
*fr
->rvdw
;
2209 /* Contribution beyond the cut-off */
2210 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2211 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2212 if (fr
->vdw_modifier
== eintmodPOTSHIFT
)
2214 /* Contribution within the cut-off */
2215 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2216 eners
[1] += 4.0*M_PI
/(3.0*rc9
);
2218 /* Contribution beyond the cut-off */
2219 virs
[0] += 8.0*M_PI
/rc3
;
2220 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2225 "Dispersion correction is not implemented for vdw-type = %s",
2226 evdw_names
[fr
->vdwtype
]);
2229 /* When we deprecate the group kernels the code below can go too */
2230 if (fr
->vdwtype
== evdwPME
&& fr
->cutoff_scheme
== ecutsGROUP
)
2232 /* Calculate self-interaction coefficient (assuming that
2233 * the reciprocal-space contribution is constant in the
2234 * region that contributes to the self-interaction).
2236 fr
->enershiftsix
= gmx::power6(fr
->ewaldcoeff_lj
) / 6.0;
2238 eners
[0] += -gmx::power3(std::sqrt(M_PI
)*fr
->ewaldcoeff_lj
)/3.0;
2239 virs
[0] += gmx::power3(std::sqrt(M_PI
)*fr
->ewaldcoeff_lj
);
2242 fr
->enerdiffsix
= eners
[0];
2243 fr
->enerdifftwelve
= eners
[1];
2244 /* The 0.5 is due to the Gromacs definition of the virial */
2245 fr
->virdiffsix
= 0.5*virs
[0];
2246 fr
->virdifftwelve
= 0.5*virs
[1];
2250 void calc_dispcorr(t_inputrec
*ir
, t_forcerec
*fr
,
2251 matrix box
, real lambda
, tensor pres
, tensor virial
,
2252 real
*prescorr
, real
*enercorr
, real
*dvdlcorr
)
2254 gmx_bool bCorrAll
, bCorrPres
;
2255 real dvdlambda
, invvol
, dens
, ninter
, avcsix
, avctwelve
, enerdiff
, svir
= 0, spres
= 0;
2265 if (ir
->eDispCorr
!= edispcNO
)
2267 bCorrAll
= (ir
->eDispCorr
== edispcAllEner
||
2268 ir
->eDispCorr
== edispcAllEnerPres
);
2269 bCorrPres
= (ir
->eDispCorr
== edispcEnerPres
||
2270 ir
->eDispCorr
== edispcAllEnerPres
);
2272 invvol
= 1/det(box
);
2275 /* Only correct for the interactions with the inserted molecule */
2276 dens
= (fr
->numAtomsForDispersionCorrection
- fr
->n_tpi
)*invvol
;
2281 dens
= fr
->numAtomsForDispersionCorrection
*invvol
;
2282 ninter
= 0.5*fr
->numAtomsForDispersionCorrection
;
2285 if (ir
->efep
== efepNO
)
2287 avcsix
= fr
->avcsix
[0];
2288 avctwelve
= fr
->avctwelve
[0];
2292 avcsix
= (1 - lambda
)*fr
->avcsix
[0] + lambda
*fr
->avcsix
[1];
2293 avctwelve
= (1 - lambda
)*fr
->avctwelve
[0] + lambda
*fr
->avctwelve
[1];
2296 enerdiff
= ninter
*(dens
*fr
->enerdiffsix
- fr
->enershiftsix
);
2297 *enercorr
+= avcsix
*enerdiff
;
2299 if (ir
->efep
!= efepNO
)
2301 dvdlambda
+= (fr
->avcsix
[1] - fr
->avcsix
[0])*enerdiff
;
2305 enerdiff
= ninter
*(dens
*fr
->enerdifftwelve
- fr
->enershifttwelve
);
2306 *enercorr
+= avctwelve
*enerdiff
;
2307 if (fr
->efep
!= efepNO
)
2309 dvdlambda
+= (fr
->avctwelve
[1] - fr
->avctwelve
[0])*enerdiff
;
2315 svir
= ninter
*dens
*avcsix
*fr
->virdiffsix
/3.0;
2316 if (ir
->eDispCorr
== edispcAllEnerPres
)
2318 svir
+= ninter
*dens
*avctwelve
*fr
->virdifftwelve
/3.0;
2320 /* The factor 2 is because of the Gromacs virial definition */
2321 spres
= -2.0*invvol
*svir
*PRESFAC
;
2323 for (m
= 0; m
< DIM
; m
++)
2325 virial
[m
][m
] += svir
;
2326 pres
[m
][m
] += spres
;
2331 /* Can't currently control when it prints, for now, just print when degugging */
2336 fprintf(debug
, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2342 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2343 *enercorr
, spres
, svir
);
2347 fprintf(debug
, "Long Range LJ corr.: Epot %10g\n", *enercorr
);
2351 if (fr
->efep
!= efepNO
)
2353 *dvdlcorr
+= dvdlambda
;
2358 void do_pbc_first(FILE *fplog
, matrix box
, t_forcerec
*fr
,
2359 t_graph
*graph
, rvec x
[])
2363 fprintf(fplog
, "Removing pbc first time\n");
2365 calc_shifts(box
, fr
->shift_vec
);
2368 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
2371 p_graph(debug
, "do_pbc_first 1", graph
);
2373 shift_self(graph
, box
, x
);
2374 /* By doing an extra mk_mshift the molecules that are broken
2375 * because they were e.g. imported from another software
2376 * will be made whole again. Such are the healing powers
2379 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
2382 p_graph(debug
, "do_pbc_first 2", graph
);
2387 fprintf(fplog
, "Done rmpbc\n");
2391 static void low_do_pbc_mtop(FILE *fplog
, int ePBC
, matrix box
,
2392 const gmx_mtop_t
*mtop
, rvec x
[],
2397 gmx_molblock_t
*molb
;
2399 if (bFirst
&& fplog
)
2401 fprintf(fplog
, "Removing pbc first time\n");
2406 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
2408 molb
= &mtop
->molblock
[mb
];
2409 if (molb
->natoms_mol
== 1 ||
2410 (!bFirst
&& mtop
->moltype
[molb
->type
].cgs
.nr
== 1))
2412 /* Just one atom or charge group in the molecule, no PBC required */
2413 as
+= molb
->nmol
*molb
->natoms_mol
;
2417 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2418 mk_graph_ilist(nullptr, mtop
->moltype
[molb
->type
].ilist
,
2419 0, molb
->natoms_mol
, FALSE
, FALSE
, graph
);
2421 for (mol
= 0; mol
< molb
->nmol
; mol
++)
2423 mk_mshift(fplog
, graph
, ePBC
, box
, x
+as
);
2425 shift_self(graph
, box
, x
+as
);
2426 /* The molecule is whole now.
2427 * We don't need the second mk_mshift call as in do_pbc_first,
2428 * since we no longer need this graph.
2431 as
+= molb
->natoms_mol
;
2439 void do_pbc_first_mtop(FILE *fplog
, int ePBC
, matrix box
,
2440 const gmx_mtop_t
*mtop
, rvec x
[])
2442 low_do_pbc_mtop(fplog
, ePBC
, box
, mtop
, x
, TRUE
);
2445 void do_pbc_mtop(FILE *fplog
, int ePBC
, matrix box
,
2446 gmx_mtop_t
*mtop
, rvec x
[])
2448 low_do_pbc_mtop(fplog
, ePBC
, box
, mtop
, x
, FALSE
);
2451 void put_atoms_in_box_omp(int ePBC
, const matrix box
, int natoms
, rvec x
[])
2454 nth
= gmx_omp_nthreads_get(emntDefault
);
2456 #pragma omp parallel for num_threads(nth) schedule(static)
2457 for (t
= 0; t
< nth
; t
++)
2463 offset
= (natoms
*t
)/nth
;
2464 len
= (natoms
*(t
+ 1))/nth
- offset
;
2465 put_atoms_in_box(ePBC
, box
, len
, x
+ offset
);
2467 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
2471 // TODO This can be cleaned up a lot, and move back to runner.cpp
2472 void finish_run(FILE *fplog
, const gmx::MDLogger
&mdlog
, t_commrec
*cr
,
2473 t_inputrec
*inputrec
,
2474 t_nrnb nrnb
[], gmx_wallcycle_t wcycle
,
2475 gmx_walltime_accounting_t walltime_accounting
,
2476 nonbonded_verlet_t
*nbv
,
2477 gmx_bool bWriteStat
)
2479 t_nrnb
*nrnb_tot
= nullptr;
2481 double nbfs
= 0, mflop
= 0;
2482 double elapsed_time
,
2483 elapsed_time_over_all_ranks
,
2484 elapsed_time_over_all_threads
,
2485 elapsed_time_over_all_threads_over_all_ranks
;
2487 if (!walltime_accounting_get_valid_finish(walltime_accounting
))
2489 GMX_LOG(mdlog
.warning
).asParagraph().appendText("Simulation ended prematurely, no performance report will be written.");
2497 MPI_Allreduce(nrnb
->n
, nrnb_tot
->n
, eNRNB
, MPI_DOUBLE
, MPI_SUM
,
2498 cr
->mpi_comm_mysim
);
2506 elapsed_time
= walltime_accounting_get_elapsed_time(walltime_accounting
);
2507 elapsed_time_over_all_ranks
= elapsed_time
;
2508 elapsed_time_over_all_threads
= walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting
);
2509 elapsed_time_over_all_threads_over_all_ranks
= elapsed_time_over_all_threads
;
2513 /* reduce elapsed_time over all MPI ranks in the current simulation */
2514 MPI_Allreduce(&elapsed_time
,
2515 &elapsed_time_over_all_ranks
,
2516 1, MPI_DOUBLE
, MPI_SUM
,
2517 cr
->mpi_comm_mysim
);
2518 elapsed_time_over_all_ranks
/= cr
->nnodes
;
2519 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2520 * current simulation. */
2521 MPI_Allreduce(&elapsed_time_over_all_threads
,
2522 &elapsed_time_over_all_threads_over_all_ranks
,
2523 1, MPI_DOUBLE
, MPI_SUM
,
2524 cr
->mpi_comm_mysim
);
2530 print_flop(fplog
, nrnb_tot
, &nbfs
, &mflop
);
2537 if ((cr
->duty
& DUTY_PP
) && DOMAINDECOMP(cr
))
2539 print_dd_statistics(cr
, inputrec
, fplog
);
2542 /* TODO Move the responsibility for any scaling by thread counts
2543 * to the code that handled the thread region, so that there's a
2544 * mechanism to keep cycle counting working during the transition
2545 * to task parallelism. */
2546 int nthreads_pp
= gmx_omp_nthreads_get(emntNonbonded
);
2547 int nthreads_pme
= gmx_omp_nthreads_get(emntPME
);
2548 wallcycle_scale_by_num_threads(wcycle
, cr
->duty
== DUTY_PME
, nthreads_pp
, nthreads_pme
);
2549 auto cycle_sum(wallcycle_sum(cr
, wcycle
));
2553 struct gmx_wallclock_gpu_t
* gputimes
= use_GPU(nbv
) ? nbnxn_gpu_get_timings(nbv
->gpu_nbv
) : nullptr;
2555 wallcycle_print(fplog
, mdlog
, cr
->nnodes
, cr
->npmenodes
, nthreads_pp
, nthreads_pme
,
2556 elapsed_time_over_all_ranks
,
2557 wcycle
, cycle_sum
, gputimes
);
2559 if (EI_DYNAMICS(inputrec
->eI
))
2561 delta_t
= inputrec
->delta_t
;
2566 print_perf(fplog
, elapsed_time_over_all_threads_over_all_ranks
,
2567 elapsed_time_over_all_ranks
,
2568 walltime_accounting_get_nsteps_done(walltime_accounting
),
2569 delta_t
, nbfs
, mflop
);
2573 print_perf(stderr
, elapsed_time_over_all_threads_over_all_ranks
,
2574 elapsed_time_over_all_ranks
,
2575 walltime_accounting_get_nsteps_done(walltime_accounting
),
2576 delta_t
, nbfs
, mflop
);
2581 extern void initialize_lambdas(FILE *fplog
, t_inputrec
*ir
, int *fep_state
, gmx::ArrayRef
<real
> lambda
, double *lam0
)
2583 /* this function works, but could probably use a logic rewrite to keep all the different
2584 types of efep straight. */
2586 if ((ir
->efep
== efepNO
) && (ir
->bSimTemp
== FALSE
))
2591 t_lambda
*fep
= ir
->fepvals
;
2592 *fep_state
= fep
->init_fep_state
; /* this might overwrite the checkpoint
2593 if checkpoint is set -- a kludge is in for now
2596 for (int i
= 0; i
< efptNR
; i
++)
2598 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2599 if (fep
->init_lambda
>= 0) /* if it's -1, it was never initializd */
2601 lambda
[i
] = fep
->init_lambda
;
2604 lam0
[i
] = lambda
[i
];
2609 lambda
[i
] = fep
->all_lambda
[i
][*fep_state
];
2612 lam0
[i
] = lambda
[i
];
2618 /* need to rescale control temperatures to match current state */
2619 for (int i
= 0; i
< ir
->opts
.ngtc
; i
++)
2621 if (ir
->opts
.ref_t
[i
] > 0)
2623 ir
->opts
.ref_t
[i
] = ir
->simtempvals
->temperatures
[*fep_state
];
2628 /* Send to the log the information on the current lambdas */
2629 if (fplog
!= nullptr)
2631 fprintf(fplog
, "Initial vector of lambda components:[ ");
2632 for (const auto &l
: lambda
)
2634 fprintf(fplog
, "%10.4f ", l
);
2636 fprintf(fplog
, "]\n");
2642 void init_md(FILE *fplog
,
2643 t_commrec
*cr
, t_inputrec
*ir
, const gmx_output_env_t
*oenv
,
2644 double *t
, double *t0
,
2645 gmx::ArrayRef
<real
> lambda
, int *fep_state
, double *lam0
,
2646 t_nrnb
*nrnb
, gmx_mtop_t
*mtop
,
2648 int nfile
, const t_filenm fnm
[],
2649 gmx_mdoutf_t
*outf
, t_mdebin
**mdebin
,
2650 tensor force_vir
, tensor shake_vir
, rvec mu_tot
,
2651 gmx_bool
*bSimAnn
, t_vcm
**vcm
, unsigned long Flags
,
2652 gmx_wallcycle_t wcycle
)
2656 /* Initial values */
2657 *t
= *t0
= ir
->init_t
;
2660 for (i
= 0; i
< ir
->opts
.ngtc
; i
++)
2662 /* set bSimAnn if any group is being annealed */
2663 if (ir
->opts
.annealing
[i
] != eannNO
)
2669 /* Initialize lambda variables */
2670 initialize_lambdas(fplog
, ir
, fep_state
, lambda
, lam0
);
2672 // TODO upd is never NULL in practice, but the analysers don't know that
2675 *upd
= init_update(ir
);
2679 update_annealing_target_temp(ir
, ir
->init_t
, upd
? *upd
: nullptr);
2684 *vcm
= init_vcm(fplog
, &mtop
->groups
, ir
);
2687 if (EI_DYNAMICS(ir
->eI
) && !(Flags
& MD_APPENDFILES
))
2689 if (ir
->etc
== etcBERENDSEN
)
2691 please_cite(fplog
, "Berendsen84a");
2693 if (ir
->etc
== etcVRESCALE
)
2695 please_cite(fplog
, "Bussi2007a");
2697 if (ir
->eI
== eiSD1
)
2699 please_cite(fplog
, "Goga2012");
2706 *outf
= init_mdoutf(fplog
, nfile
, fnm
, Flags
, cr
, ir
, mtop
, oenv
, wcycle
);
2708 *mdebin
= init_mdebin((Flags
& MD_APPENDFILES
) ? nullptr : mdoutf_get_fp_ene(*outf
),
2709 mtop
, ir
, mdoutf_get_fp_dhdl(*outf
));
2712 /* Initiate variables */
2713 clear_mat(force_vir
);
2714 clear_mat(shake_vir
);