Moved copyrite.* to fileio from gmxlib and legacyheaders.
[gromacs.git] / src / gromacs / mdlib / sim_util.cpp
blobe6375445ce7f6f8a1687905db8e3a534f1a49af4
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "sim_util.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <math.h>
45 #include <stdio.h>
46 #include <string.h>
48 #include <array>
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/essentialdynamics/edsam.h"
52 #include "gromacs/ewald/pme.h"
53 #include "gromacs/fileio/copyrite.h"
54 #include "gromacs/gmxlib/chargegroup.h"
55 #include "gromacs/gmxlib/disre.h"
56 #include "gromacs/gmxlib/gmx_omp_nthreads.h"
57 #include "gromacs/gmxlib/orires.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/imd/imd.h"
61 #include "gromacs/legacyheaders/genborn.h"
62 #include "gromacs/legacyheaders/names.h"
63 #include "gromacs/legacyheaders/network.h"
64 #include "gromacs/legacyheaders/nonbonded.h"
65 #include "gromacs/legacyheaders/nrnb.h"
66 #include "gromacs/legacyheaders/txtdump.h"
67 #include "gromacs/legacyheaders/types/commrec.h"
68 #include "gromacs/listed-forces/bonded.h"
69 #include "gromacs/math/units.h"
70 #include "gromacs/math/vec.h"
71 #include "gromacs/mdlib/calcmu.h"
72 #include "gromacs/mdlib/constr.h"
73 #include "gromacs/mdlib/force.h"
74 #include "gromacs/mdlib/forcerec.h"
75 #include "gromacs/mdlib/mdrun.h"
76 #include "gromacs/mdlib/nb_verlet.h"
77 #include "gromacs/mdlib/nbnxn_atomdata.h"
78 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
79 #include "gromacs/mdlib/nbnxn_grid.h"
80 #include "gromacs/mdlib/nbnxn_search.h"
81 #include "gromacs/mdlib/qmmm.h"
82 #include "gromacs/mdlib/update.h"
83 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
87 #include "gromacs/pbcutil/ishift.h"
88 #include "gromacs/pbcutil/mshift.h"
89 #include "gromacs/pbcutil/pbc.h"
90 #include "gromacs/pulling/pull.h"
91 #include "gromacs/pulling/pull_rotation.h"
92 #include "gromacs/timing/gpu_timing.h"
93 #include "gromacs/timing/wallcycle.h"
94 #include "gromacs/timing/walltime_accounting.h"
95 #include "gromacs/utility/cstringutil.h"
96 #include "gromacs/utility/exceptions.h"
97 #include "gromacs/utility/fatalerror.h"
98 #include "gromacs/utility/gmxassert.h"
99 #include "gromacs/utility/gmxmpi.h"
100 #include "gromacs/utility/smalloc.h"
101 #include "gromacs/utility/sysinfo.h"
103 #include "adress.h"
104 #include "nbnxn_gpu.h"
106 void print_time(FILE *out,
107 gmx_walltime_accounting_t walltime_accounting,
108 gmx_int64_t step,
109 t_inputrec *ir,
110 t_commrec gmx_unused *cr)
112 time_t finish;
113 char timebuf[STRLEN];
114 double dt, elapsed_seconds, time_per_step;
115 char buf[48];
117 #ifndef GMX_THREAD_MPI
118 if (!PAR(cr))
119 #endif
121 fprintf(out, "\r");
123 fprintf(out, "step %s", gmx_step_str(step, buf));
124 if ((step >= ir->nstlist))
126 double seconds_since_epoch = gmx_gettime();
127 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
128 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
129 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
131 if (ir->nsteps >= 0)
133 if (dt >= 300)
135 finish = (time_t) (seconds_since_epoch + dt);
136 gmx_ctime_r(&finish, timebuf, STRLEN);
137 sprintf(buf, "%s", timebuf);
138 buf[strlen(buf)-1] = '\0';
139 fprintf(out, ", will finish %s", buf);
141 else
143 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
146 else
148 fprintf(out, " performance: %.1f ns/day ",
149 ir->delta_t/1000*24*60*60/time_per_step);
152 #ifndef GMX_THREAD_MPI
153 if (PAR(cr))
155 fprintf(out, "\n");
157 #endif
159 fflush(out);
162 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
163 double the_time)
165 char time_string[STRLEN];
167 if (!fplog)
169 return;
173 int i;
174 char timebuf[STRLEN];
175 time_t temp_time = (time_t) the_time;
177 gmx_ctime_r(&temp_time, timebuf, STRLEN);
178 for (i = 0; timebuf[i] >= ' '; i++)
180 time_string[i] = timebuf[i];
182 time_string[i] = '\0';
185 fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
188 void print_start(FILE *fplog, t_commrec *cr,
189 gmx_walltime_accounting_t walltime_accounting,
190 const char *name)
192 char buf[STRLEN];
194 sprintf(buf, "Started %s", name);
195 print_date_and_time(fplog, cr->nodeid, buf,
196 walltime_accounting_get_start_time_stamp(walltime_accounting));
199 static void sum_forces(int start, int end, rvec f[], rvec flr[])
201 int i;
203 if (gmx_debug_at)
205 pr_rvecs(debug, 0, "fsr", f+start, end-start);
206 pr_rvecs(debug, 0, "flr", flr+start, end-start);
208 for (i = start; (i < end); i++)
210 rvec_inc(f[i], flr[i]);
215 * calc_f_el calculates forces due to an electric field.
217 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
219 * Et[] contains the parameters for the time dependent
220 * part of the field.
221 * Ex[] contains the parameters for
222 * the spatial dependent part of the field.
223 * The function should return the energy due to the electric field
224 * (if any) but for now returns 0.
226 * WARNING:
227 * There can be problems with the virial.
228 * Since the field is not self-consistent this is unavoidable.
229 * For neutral molecules the virial is correct within this approximation.
230 * For neutral systems with many charged molecules the error is small.
231 * But for systems with a net charge or a few charged molecules
232 * the error can be significant when the field is high.
233 * Solution: implement a self-consistent electric field into PME.
235 static void calc_f_el(FILE *fp, int start, int homenr,
236 real charge[], rvec f[],
237 t_cosines Ex[], t_cosines Et[], double t)
239 rvec Ext;
240 real t0;
241 int i, m;
243 for (m = 0; (m < DIM); m++)
245 if (Et[m].n > 0)
247 if (Et[m].n == 3)
249 t0 = Et[m].a[1];
250 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
252 else
254 Ext[m] = cos(Et[m].a[0]*t);
257 else
259 Ext[m] = 1.0;
261 if (Ex[m].n > 0)
263 /* Convert the field strength from V/nm to MD-units */
264 Ext[m] *= Ex[m].a[0]*FIELDFAC;
265 for (i = start; (i < start+homenr); i++)
267 f[i][m] += charge[i]*Ext[m];
270 else
272 Ext[m] = 0;
275 if (fp != NULL)
277 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
278 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
282 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
283 tensor vir_part, t_graph *graph, matrix box,
284 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
286 int i;
288 /* The short-range virial from surrounding boxes */
289 clear_mat(vir_part);
290 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
291 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
293 /* Calculate partial virial, for local atoms only, based on short range.
294 * Total virial is computed in global_stat, called from do_md
296 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
297 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
299 /* Add position restraint contribution */
300 for (i = 0; i < DIM; i++)
302 vir_part[i][i] += fr->vir_diag_posres[i];
305 /* Add wall contribution */
306 for (i = 0; i < DIM; i++)
308 vir_part[i][ZZ] += fr->vir_wall_z[i];
311 if (debug)
313 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
317 static void pull_potential_wrapper(t_commrec *cr,
318 t_inputrec *ir,
319 matrix box, rvec x[],
320 rvec f[],
321 tensor vir_force,
322 t_mdatoms *mdatoms,
323 gmx_enerdata_t *enerd,
324 real *lambda,
325 double t,
326 gmx_wallcycle_t wcycle)
328 t_pbc pbc;
329 real dvdl;
331 /* Calculate the center of mass forces, this requires communication,
332 * which is why pull_potential is called close to other communication.
333 * The virial contribution is calculated directly,
334 * which is why we call pull_potential after calc_virial.
336 wallcycle_start(wcycle, ewcPULLPOT);
337 set_pbc(&pbc, ir->ePBC, box);
338 dvdl = 0;
339 enerd->term[F_COM_PULL] +=
340 pull_potential(ir->pull_work, mdatoms, &pbc,
341 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
342 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
343 wallcycle_stop(wcycle, ewcPULLPOT);
346 static void pme_receive_force_ener(t_commrec *cr,
347 gmx_wallcycle_t wcycle,
348 gmx_enerdata_t *enerd,
349 t_forcerec *fr)
351 real e_q, e_lj, dvdl_q, dvdl_lj;
352 float cycles_ppdpme, cycles_seppme;
354 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
355 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
357 /* In case of node-splitting, the PP nodes receive the long-range
358 * forces, virial and energy from the PME nodes here.
360 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
361 dvdl_q = 0;
362 dvdl_lj = 0;
363 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e_q,
364 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
365 &cycles_seppme);
366 enerd->term[F_COUL_RECIP] += e_q;
367 enerd->term[F_LJ_RECIP] += e_lj;
368 enerd->dvdl_lin[efptCOUL] += dvdl_q;
369 enerd->dvdl_lin[efptVDW] += dvdl_lj;
371 if (wcycle)
373 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
375 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
378 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
379 gmx_int64_t step, real pforce, rvec *x, rvec *f)
381 int i;
382 real pf2, fn2;
383 char buf[STEPSTRSIZE];
385 pf2 = sqr(pforce);
386 for (i = 0; i < md->homenr; i++)
388 fn2 = norm2(f[i]);
389 /* We also catch NAN, if the compiler does not optimize this away. */
390 if (fn2 >= pf2 || fn2 != fn2)
392 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
393 gmx_step_str(step, buf),
394 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
399 static void post_process_forces(t_commrec *cr,
400 gmx_int64_t step,
401 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
402 gmx_localtop_t *top,
403 matrix box, rvec x[],
404 rvec f[],
405 tensor vir_force,
406 t_mdatoms *mdatoms,
407 t_graph *graph,
408 t_forcerec *fr, gmx_vsite_t *vsite,
409 int flags)
411 if (fr->bF_NoVirSum)
413 if (vsite)
415 /* Spread the mesh force on virtual sites to the other particles...
416 * This is parallellized. MPI communication is performed
417 * if the constructing atoms aren't local.
419 wallcycle_start(wcycle, ewcVSITESPREAD);
420 spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
421 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
422 nrnb,
423 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
424 wallcycle_stop(wcycle, ewcVSITESPREAD);
426 if (flags & GMX_FORCE_VIRIAL)
428 /* Now add the forces, this is local */
429 if (fr->bDomDec)
431 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
433 else
435 sum_forces(0, mdatoms->homenr,
436 f, fr->f_novirsum);
438 if (EEL_FULL(fr->eeltype))
440 /* Add the mesh contribution to the virial */
441 m_add(vir_force, fr->vir_el_recip, vir_force);
443 if (EVDW_PME(fr->vdwtype))
445 /* Add the mesh contribution to the virial */
446 m_add(vir_force, fr->vir_lj_recip, vir_force);
448 if (debug)
450 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
455 if (fr->print_force >= 0)
457 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
461 static void do_nb_verlet(t_forcerec *fr,
462 interaction_const_t *ic,
463 gmx_enerdata_t *enerd,
464 int flags, int ilocality,
465 int clearF,
466 t_nrnb *nrnb,
467 gmx_wallcycle_t wcycle)
469 int enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
470 nonbonded_verlet_group_t *nbvg;
471 gmx_bool bUsingGpuKernels;
473 if (!(flags & GMX_FORCE_NONBONDED))
475 /* skip non-bonded calculation */
476 return;
479 nbvg = &fr->nbv->grp[ilocality];
481 /* GPU kernel launch overhead is already timed separately */
482 if (fr->cutoff_scheme != ecutsVERLET)
484 gmx_incons("Invalid cut-off scheme passed!");
487 bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
489 if (!bUsingGpuKernels)
491 wallcycle_sub_start(wcycle, ewcsNONBONDED);
493 switch (nbvg->kernel_type)
495 case nbnxnk4x4_PlainC:
496 nbnxn_kernel_ref(&nbvg->nbl_lists,
497 nbvg->nbat, ic,
498 fr->shift_vec,
499 flags,
500 clearF,
501 fr->fshift[0],
502 enerd->grpp.ener[egCOULSR],
503 fr->bBHAM ?
504 enerd->grpp.ener[egBHAMSR] :
505 enerd->grpp.ener[egLJSR]);
506 break;
508 case nbnxnk4xN_SIMD_4xN:
509 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
510 nbvg->nbat, ic,
511 nbvg->ewald_excl,
512 fr->shift_vec,
513 flags,
514 clearF,
515 fr->fshift[0],
516 enerd->grpp.ener[egCOULSR],
517 fr->bBHAM ?
518 enerd->grpp.ener[egBHAMSR] :
519 enerd->grpp.ener[egLJSR]);
520 break;
521 case nbnxnk4xN_SIMD_2xNN:
522 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
523 nbvg->nbat, ic,
524 nbvg->ewald_excl,
525 fr->shift_vec,
526 flags,
527 clearF,
528 fr->fshift[0],
529 enerd->grpp.ener[egCOULSR],
530 fr->bBHAM ?
531 enerd->grpp.ener[egBHAMSR] :
532 enerd->grpp.ener[egLJSR]);
533 break;
535 case nbnxnk8x8x8_GPU:
536 nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
537 break;
539 case nbnxnk8x8x8_PlainC:
540 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
541 nbvg->nbat, ic,
542 fr->shift_vec,
543 flags,
544 clearF,
545 nbvg->nbat->out[0].f,
546 fr->fshift[0],
547 enerd->grpp.ener[egCOULSR],
548 fr->bBHAM ?
549 enerd->grpp.ener[egBHAMSR] :
550 enerd->grpp.ener[egLJSR]);
551 break;
553 default:
554 gmx_incons("Invalid nonbonded kernel type passed!");
557 if (!bUsingGpuKernels)
559 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
562 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
564 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
566 else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
567 (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
569 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
571 else
573 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
575 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
576 if (flags & GMX_FORCE_ENERGY)
578 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
579 enr_nbnxn_kernel_ljc += 1;
580 enr_nbnxn_kernel_lj += 1;
583 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
584 nbvg->nbl_lists.natpair_ljq);
585 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
586 nbvg->nbl_lists.natpair_lj);
587 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
588 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
589 nbvg->nbl_lists.natpair_q);
591 if (ic->vdw_modifier == eintmodFORCESWITCH)
593 /* We add up the switch cost separately */
594 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
595 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
597 if (ic->vdw_modifier == eintmodPOTSWITCH)
599 /* We add up the switch cost separately */
600 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
601 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
603 if (ic->vdwtype == evdwPME)
605 /* We add up the LJ Ewald cost separately */
606 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
607 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
611 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
612 t_forcerec *fr,
613 rvec x[],
614 rvec f[],
615 t_mdatoms *mdatoms,
616 t_lambda *fepvals,
617 real *lambda,
618 gmx_enerdata_t *enerd,
619 int flags,
620 t_nrnb *nrnb,
621 gmx_wallcycle_t wcycle)
623 int donb_flags;
624 nb_kernel_data_t kernel_data;
625 real lam_i[efptNR];
626 real dvdl_nb[efptNR];
627 int th;
628 int i, j;
630 donb_flags = 0;
631 /* Add short-range interactions */
632 donb_flags |= GMX_NONBONDED_DO_SR;
634 /* Currently all group scheme kernels always calculate (shift-)forces */
635 if (flags & GMX_FORCE_FORCES)
637 donb_flags |= GMX_NONBONDED_DO_FORCE;
639 if (flags & GMX_FORCE_VIRIAL)
641 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
643 if (flags & GMX_FORCE_ENERGY)
645 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
647 if (flags & GMX_FORCE_DO_LR)
649 donb_flags |= GMX_NONBONDED_DO_LR;
652 kernel_data.flags = donb_flags;
653 kernel_data.lambda = lambda;
654 kernel_data.dvdl = dvdl_nb;
656 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
657 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
659 /* reset free energy components */
660 for (i = 0; i < efptNR; i++)
662 dvdl_nb[i] = 0;
665 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
667 wallcycle_sub_start(wcycle, ewcsNONBONDED);
668 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
669 for (th = 0; th < nbl_lists->nnbl; th++)
673 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
674 x, f, fr, mdatoms, &kernel_data, nrnb);
676 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
679 if (fepvals->sc_alpha != 0)
681 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
682 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
684 else
686 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
687 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
690 /* If we do foreign lambda and we have soft-core interactions
691 * we have to recalculate the (non-linear) energies contributions.
693 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
695 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
696 kernel_data.lambda = lam_i;
697 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
698 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
699 /* Note that we add to kernel_data.dvdl, but ignore the result */
701 for (i = 0; i < enerd->n_lambda; i++)
703 for (j = 0; j < efptNR; j++)
705 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
707 reset_foreign_enerdata(enerd);
708 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
709 for (th = 0; th < nbl_lists->nnbl; th++)
713 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
714 x, f, fr, mdatoms, &kernel_data, nrnb);
716 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
719 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
720 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
724 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
727 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
729 return nbv != NULL && nbv->bUseGPU;
732 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
733 t_inputrec *inputrec,
734 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
735 gmx_localtop_t *top,
736 gmx_groups_t gmx_unused *groups,
737 matrix box, rvec x[], history_t *hist,
738 rvec f[],
739 tensor vir_force,
740 t_mdatoms *mdatoms,
741 gmx_enerdata_t *enerd, t_fcdata *fcd,
742 real *lambda, t_graph *graph,
743 t_forcerec *fr, interaction_const_t *ic,
744 gmx_vsite_t *vsite, rvec mu_tot,
745 double t, FILE *field, gmx_edsam_t ed,
746 gmx_bool bBornRadii,
747 int flags)
749 int cg1, i, j;
750 int start, homenr;
751 double mu[2*DIM];
752 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
753 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
754 gmx_bool bDiffKernels = FALSE;
755 rvec vzero, box_diag;
756 float cycles_pme, cycles_force, cycles_wait_gpu;
757 nonbonded_verlet_t *nbv;
759 cycles_force = 0;
760 cycles_wait_gpu = 0;
761 nbv = fr->nbv;
763 start = 0;
764 homenr = mdatoms->homenr;
766 clear_mat(vir_force);
768 if (DOMAINDECOMP(cr))
770 cg1 = cr->dd->ncg_tot;
772 else
774 cg1 = top->cgs.nr;
776 if (fr->n_tpi > 0)
778 cg1--;
781 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
782 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
783 bFillGrid = (bNS && bStateChanged);
784 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
785 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
786 bDoForces = (flags & GMX_FORCE_FORCES);
787 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
788 bUseGPU = fr->nbv->bUseGPU;
789 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
791 if (bStateChanged)
793 update_forcerec(fr, box);
795 if (NEED_MUTOT(*inputrec))
797 /* Calculate total (local) dipole moment in a temporary common array.
798 * This makes it possible to sum them over nodes faster.
800 calc_mu(start, homenr,
801 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
802 mu, mu+DIM);
806 if (fr->ePBC != epbcNONE)
808 /* Compute shift vectors every step,
809 * because of pressure coupling or box deformation!
811 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
813 calc_shifts(box, fr->shift_vec);
816 if (bCalcCGCM)
818 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
819 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
821 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
823 unshift_self(graph, box, x);
827 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
828 fr->shift_vec, nbv->grp[0].nbat);
830 #ifdef GMX_MPI
831 if (!(cr->duty & DUTY_PME))
833 gmx_bool bBS;
834 matrix boxs;
836 /* Send particle coordinates to the pme nodes.
837 * Since this is only implemented for domain decomposition
838 * and domain decomposition does not use the graph,
839 * we do not need to worry about shifting.
842 int pme_flags = 0;
844 wallcycle_start(wcycle, ewcPP_PMESENDX);
846 bBS = (inputrec->nwall == 2);
847 if (bBS)
849 copy_mat(box, boxs);
850 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
853 if (EEL_PME(fr->eeltype))
855 pme_flags |= GMX_PME_DO_COULOMB;
858 if (EVDW_PME(fr->vdwtype))
860 pme_flags |= GMX_PME_DO_LJ;
863 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
864 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
865 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
866 pme_flags, step);
868 wallcycle_stop(wcycle, ewcPP_PMESENDX);
870 #endif /* GMX_MPI */
872 /* do gridding for pair search */
873 if (bNS)
875 if (graph && bStateChanged)
877 /* Calculate intramolecular shift vectors to make molecules whole */
878 mk_mshift(fplog, graph, fr->ePBC, box, x);
881 clear_rvec(vzero);
882 box_diag[XX] = box[XX][XX];
883 box_diag[YY] = box[YY][YY];
884 box_diag[ZZ] = box[ZZ][ZZ];
886 wallcycle_start(wcycle, ewcNS);
887 if (!fr->bDomDec)
889 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
890 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
891 0, vzero, box_diag,
892 0, mdatoms->homenr, -1, fr->cginfo, x,
893 0, NULL,
894 nbv->grp[eintLocal].kernel_type,
895 nbv->grp[eintLocal].nbat);
896 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
898 else
900 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
901 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
902 fr->cginfo, x,
903 nbv->grp[eintNonlocal].kernel_type,
904 nbv->grp[eintNonlocal].nbat);
905 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
908 if (nbv->ngrp == 1 ||
909 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
911 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
912 nbv->nbs, mdatoms, fr->cginfo);
914 else
916 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
917 nbv->nbs, mdatoms, fr->cginfo);
918 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
919 nbv->nbs, mdatoms, fr->cginfo);
921 wallcycle_stop(wcycle, ewcNS);
924 /* initialize the GPU atom data and copy shift vector */
925 if (bUseGPU)
927 if (bNS)
929 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
930 nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
931 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
934 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
935 nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
936 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
939 /* do local pair search */
940 if (bNS)
942 wallcycle_start_nocount(wcycle, ewcNS);
943 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
944 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
945 &top->excls,
946 ic->rlist,
947 nbv->min_ci_balanced,
948 &nbv->grp[eintLocal].nbl_lists,
949 eintLocal,
950 nbv->grp[eintLocal].kernel_type,
951 nrnb);
952 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
954 if (bUseGPU)
956 /* initialize local pair-list on the GPU */
957 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
958 nbv->grp[eintLocal].nbl_lists.nbl[0],
959 eintLocal);
961 wallcycle_stop(wcycle, ewcNS);
963 else
965 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
966 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
967 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
968 nbv->grp[eintLocal].nbat);
969 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
970 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
973 if (bUseGPU)
975 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
976 /* launch local nonbonded F on GPU */
977 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
978 nrnb, wcycle);
979 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
982 /* Communicate coordinates and sum dipole if necessary +
983 do non-local pair search */
984 if (DOMAINDECOMP(cr))
986 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
987 nbv->grp[eintLocal].kernel_type);
989 if (bDiffKernels)
991 /* With GPU+CPU non-bonded calculations we need to copy
992 * the local coordinates to the non-local nbat struct
993 * (in CPU format) as the non-local kernel call also
994 * calculates the local - non-local interactions.
996 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
997 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
998 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
999 nbv->grp[eintNonlocal].nbat);
1000 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1001 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1004 if (bNS)
1006 wallcycle_start_nocount(wcycle, ewcNS);
1007 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1009 if (bDiffKernels)
1011 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1014 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1015 &top->excls,
1016 ic->rlist,
1017 nbv->min_ci_balanced,
1018 &nbv->grp[eintNonlocal].nbl_lists,
1019 eintNonlocal,
1020 nbv->grp[eintNonlocal].kernel_type,
1021 nrnb);
1023 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1025 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
1027 /* initialize non-local pair-list on the GPU */
1028 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
1029 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1030 eintNonlocal);
1032 wallcycle_stop(wcycle, ewcNS);
1034 else
1036 wallcycle_start(wcycle, ewcMOVEX);
1037 dd_move_x(cr->dd, box, x);
1039 /* When we don't need the total dipole we sum it in global_stat */
1040 if (bStateChanged && NEED_MUTOT(*inputrec))
1042 gmx_sumd(2*DIM, mu, cr);
1044 wallcycle_stop(wcycle, ewcMOVEX);
1046 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1047 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1048 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1049 nbv->grp[eintNonlocal].nbat);
1050 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1051 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1054 if (bUseGPU && !bDiffKernels)
1056 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1057 /* launch non-local nonbonded F on GPU */
1058 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1059 nrnb, wcycle);
1060 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1064 if (bUseGPU)
1066 /* launch D2H copy-back F */
1067 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1068 if (DOMAINDECOMP(cr) && !bDiffKernels)
1070 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1071 flags, eatNonlocal);
1073 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1074 flags, eatLocal);
1075 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1078 if (bStateChanged && NEED_MUTOT(*inputrec))
1080 if (PAR(cr))
1082 gmx_sumd(2*DIM, mu, cr);
1085 for (i = 0; i < 2; i++)
1087 for (j = 0; j < DIM; j++)
1089 fr->mu_tot[i][j] = mu[i*DIM + j];
1093 if (fr->efep == efepNO)
1095 copy_rvec(fr->mu_tot[0], mu_tot);
1097 else
1099 for (j = 0; j < DIM; j++)
1101 mu_tot[j] =
1102 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1103 lambda[efptCOUL]*fr->mu_tot[1][j];
1107 /* Reset energies */
1108 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1109 clear_rvecs(SHIFTS, fr->fshift);
1111 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1113 wallcycle_start(wcycle, ewcPPDURINGPME);
1114 dd_force_flop_start(cr->dd, nrnb);
1117 if (inputrec->bRot)
1119 /* Enforced rotation has its own cycle counter that starts after the collective
1120 * coordinates have been communicated. It is added to ddCyclF to allow
1121 * for proper load-balancing */
1122 wallcycle_start(wcycle, ewcROT);
1123 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1124 wallcycle_stop(wcycle, ewcROT);
1127 /* Start the force cycle counter.
1128 * This counter is stopped after do_force_lowlevel.
1129 * No parallel communication should occur while this counter is running,
1130 * since that will interfere with the dynamic load balancing.
1132 wallcycle_start(wcycle, ewcFORCE);
1133 if (bDoForces)
1135 /* Reset forces for which the virial is calculated separately:
1136 * PME/Ewald forces if necessary */
1137 if (fr->bF_NoVirSum)
1139 if (flags & GMX_FORCE_VIRIAL)
1141 fr->f_novirsum = fr->f_novirsum_alloc;
1142 if (fr->bDomDec)
1144 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1146 else
1148 clear_rvecs(homenr, fr->f_novirsum+start);
1151 else
1153 /* We are not calculating the pressure so we do not need
1154 * a separate array for forces that do not contribute
1155 * to the pressure.
1157 fr->f_novirsum = f;
1161 /* Clear the short- and long-range forces */
1162 clear_rvecs(fr->natoms_force_constr, f);
1163 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1165 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1168 clear_rvec(fr->vir_diag_posres);
1171 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1173 clear_pull_forces(inputrec->pull_work);
1176 /* We calculate the non-bonded forces, when done on the CPU, here.
1177 * We do this before calling do_force_lowlevel, because in that
1178 * function, the listed forces are calculated before PME, which
1179 * does communication. With this order, non-bonded and listed
1180 * force calculation imbalance can be balanced out by the domain
1181 * decomposition load balancing.
1184 if (!bUseOrEmulGPU)
1186 /* Maybe we should move this into do_force_lowlevel */
1187 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1188 nrnb, wcycle);
1191 if (fr->efep != efepNO)
1193 /* Calculate the local and non-local free energy interactions here.
1194 * Happens here on the CPU both with and without GPU.
1196 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1198 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1199 fr, x, f, mdatoms,
1200 inputrec->fepvals, lambda,
1201 enerd, flags, nrnb, wcycle);
1204 if (DOMAINDECOMP(cr) &&
1205 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1207 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1208 fr, x, f, mdatoms,
1209 inputrec->fepvals, lambda,
1210 enerd, flags, nrnb, wcycle);
1214 if (!bUseOrEmulGPU || bDiffKernels)
1216 int aloc;
1218 if (DOMAINDECOMP(cr))
1220 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1221 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1222 nrnb, wcycle);
1225 if (!bUseOrEmulGPU)
1227 aloc = eintLocal;
1229 else
1231 aloc = eintNonlocal;
1234 /* Add all the non-bonded force to the normal force array.
1235 * This can be split into a local and a non-local part when overlapping
1236 * communication with calculation with domain decomposition.
1238 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1239 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1240 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1241 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1242 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1243 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1244 wallcycle_start_nocount(wcycle, ewcFORCE);
1246 /* if there are multiple fshift output buffers reduce them */
1247 if ((flags & GMX_FORCE_VIRIAL) &&
1248 nbv->grp[aloc].nbl_lists.nnbl > 1)
1250 /* This is not in a subcounter because it takes a
1251 negligible and constant-sized amount of time */
1252 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1253 fr->fshift);
1257 /* update QMMMrec, if necessary */
1258 if (fr->bQMMM)
1260 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1263 /* Compute the bonded and non-bonded energies and optionally forces */
1264 do_force_lowlevel(fr, inputrec, &(top->idef),
1265 cr, nrnb, wcycle, mdatoms,
1266 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1267 bBornRadii, box,
1268 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1269 flags, &cycles_pme);
1271 if (bSepLRF)
1273 if (do_per_step(step, inputrec->nstcalclr))
1275 /* Add the long range forces to the short range forces */
1276 for (i = 0; i < fr->natoms_force_constr; i++)
1278 rvec_add(fr->f_twin[i], f[i], f[i]);
1283 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1285 if (ed)
1287 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1290 if (bUseOrEmulGPU && !bDiffKernels)
1292 /* wait for non-local forces (or calculate in emulation mode) */
1293 if (DOMAINDECOMP(cr))
1295 if (bUseGPU)
1297 float cycles_tmp;
1299 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1300 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1301 flags, eatNonlocal,
1302 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1303 fr->fshift);
1304 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1305 cycles_wait_gpu += cycles_tmp;
1306 cycles_force += cycles_tmp;
1308 else
1310 wallcycle_start_nocount(wcycle, ewcFORCE);
1311 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1312 nrnb, wcycle);
1313 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1315 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1316 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1317 /* skip the reduction if there was no non-local work to do */
1318 if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1320 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1321 nbv->grp[eintNonlocal].nbat, f);
1323 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1324 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1328 if (bDoForces && DOMAINDECOMP(cr))
1330 if (bUseGPU)
1332 /* We are done with the CPU compute, but the GPU local non-bonded
1333 * kernel can still be running while we communicate the forces.
1334 * We start a counter here, so we can, hopefully, time the rest
1335 * of the GPU kernel execution and data transfer.
1337 // TODO This counter has very little to do with the rest
1338 // of wcycle, so should be a separate object
1339 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L_EST);
1342 /* Communicate the forces */
1343 wallcycle_start(wcycle, ewcMOVEF);
1344 dd_move_f(cr->dd, f, fr->fshift);
1345 if (bSepLRF)
1347 /* We should not update the shift forces here,
1348 * since f_twin is already included in f.
1350 dd_move_f(cr->dd, fr->f_twin, NULL);
1352 wallcycle_stop(wcycle, ewcMOVEF);
1355 if (bUseOrEmulGPU)
1357 /* wait for local forces (or calculate in emulation mode) */
1358 if (bUseGPU)
1360 #if defined(GMX_GPU) && !defined(GMX_USE_OPENCL)
1361 float cycles_tmp, cycles_wait_est;
1362 const float cuda_api_overhead_margin = 50000.0f; /* cycles */
1364 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1365 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1366 flags, eatLocal,
1367 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1368 fr->fshift);
1369 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1371 if (bDoForces && DOMAINDECOMP(cr))
1373 cycles_wait_est = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L_EST);
1375 if (cycles_tmp < cuda_api_overhead_margin)
1377 /* We measured few cycles, it could be that the kernel
1378 * and transfer finished earlier and there was no actual
1379 * wait time, only API call overhead.
1380 * Then the actual time could be anywhere between 0 and
1381 * cycles_wait_est. As a compromise, we use half the time.
1383 cycles_wait_est *= 0.5f;
1386 else
1388 /* No force communication so we actually timed the wait */
1389 cycles_wait_est = cycles_tmp;
1391 /* Even though this is after dd_move_f, the actual task we are
1392 * waiting for runs asynchronously with dd_move_f and we usually
1393 * have nothing to balance it with, so we can and should add
1394 * the time to the force time for load balancing.
1396 cycles_force += cycles_wait_est;
1397 cycles_wait_gpu += cycles_wait_est;
1399 #elif defined(GMX_GPU) && defined(GMX_USE_OPENCL)
1401 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1402 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1403 flags, eatLocal,
1404 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1405 fr->fshift);
1406 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1407 #endif
1409 /* now clear the GPU outputs while we finish the step on the CPU */
1410 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1411 nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1412 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1414 else
1416 wallcycle_start_nocount(wcycle, ewcFORCE);
1417 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1418 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1419 nrnb, wcycle);
1420 wallcycle_stop(wcycle, ewcFORCE);
1422 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1423 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1424 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1425 nbv->grp[eintLocal].nbat, f);
1426 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1427 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1430 if (DOMAINDECOMP(cr))
1432 dd_force_flop_stop(cr->dd, nrnb);
1433 if (wcycle)
1435 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1436 if (bUseGPU)
1438 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1443 if (bDoForces)
1445 if (IR_ELEC_FIELD(*inputrec))
1447 /* Compute forces due to electric field */
1448 calc_f_el(MASTER(cr) ? field : NULL,
1449 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1450 inputrec->ex, inputrec->et, t);
1453 /* If we have NoVirSum forces, but we do not calculate the virial,
1454 * we sum fr->f_novirsum=f later.
1456 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1458 wallcycle_start(wcycle, ewcVSITESPREAD);
1459 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1460 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1461 wallcycle_stop(wcycle, ewcVSITESPREAD);
1463 if (bSepLRF)
1465 wallcycle_start(wcycle, ewcVSITESPREAD);
1466 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1467 nrnb,
1468 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1469 wallcycle_stop(wcycle, ewcVSITESPREAD);
1473 if (flags & GMX_FORCE_VIRIAL)
1475 /* Calculation of the virial must be done after vsites! */
1476 calc_virial(0, mdatoms->homenr, x, f,
1477 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1481 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1483 /* Since the COM pulling is always done mass-weighted, no forces are
1484 * applied to vsites and this call can be done after vsite spreading.
1486 pull_potential_wrapper(cr, inputrec, box, x,
1487 f, vir_force, mdatoms, enerd, lambda, t,
1488 wcycle);
1491 /* Add the forces from enforced rotation potentials (if any) */
1492 if (inputrec->bRot)
1494 wallcycle_start(wcycle, ewcROTadd);
1495 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1496 wallcycle_stop(wcycle, ewcROTadd);
1499 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1500 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1502 if (PAR(cr) && !(cr->duty & DUTY_PME))
1504 /* In case of node-splitting, the PP nodes receive the long-range
1505 * forces, virial and energy from the PME nodes here.
1507 pme_receive_force_ener(cr, wcycle, enerd, fr);
1510 if (bDoForces)
1512 post_process_forces(cr, step, nrnb, wcycle,
1513 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1514 flags);
1517 /* Sum the potential energy terms from group contributions */
1518 sum_epot(&(enerd->grpp), enerd->term);
1521 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1522 t_inputrec *inputrec,
1523 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1524 gmx_localtop_t *top,
1525 gmx_groups_t *groups,
1526 matrix box, rvec x[], history_t *hist,
1527 rvec f[],
1528 tensor vir_force,
1529 t_mdatoms *mdatoms,
1530 gmx_enerdata_t *enerd, t_fcdata *fcd,
1531 real *lambda, t_graph *graph,
1532 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1533 double t, FILE *field, gmx_edsam_t ed,
1534 gmx_bool bBornRadii,
1535 int flags)
1537 int cg0, cg1, i, j;
1538 int start, homenr;
1539 double mu[2*DIM];
1540 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1541 gmx_bool bDoLongRangeNS, bDoForces, bSepLRF;
1542 gmx_bool bDoAdressWF;
1543 t_pbc pbc;
1544 float cycles_pme, cycles_force;
1546 start = 0;
1547 homenr = mdatoms->homenr;
1549 clear_mat(vir_force);
1551 cg0 = 0;
1552 if (DOMAINDECOMP(cr))
1554 cg1 = cr->dd->ncg_tot;
1556 else
1558 cg1 = top->cgs.nr;
1560 if (fr->n_tpi > 0)
1562 cg1--;
1565 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1566 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1567 /* Should we update the long-range neighborlists at this step? */
1568 bDoLongRangeNS = fr->bTwinRange && bNS;
1569 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1570 bFillGrid = (bNS && bStateChanged);
1571 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1572 bDoForces = (flags & GMX_FORCE_FORCES);
1573 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1574 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1576 /* should probably move this to the forcerec since it doesn't change */
1577 bDoAdressWF = ((fr->adress_type != eAdressOff));
1579 if (bStateChanged)
1581 update_forcerec(fr, box);
1583 if (NEED_MUTOT(*inputrec))
1585 /* Calculate total (local) dipole moment in a temporary common array.
1586 * This makes it possible to sum them over nodes faster.
1588 calc_mu(start, homenr,
1589 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1590 mu, mu+DIM);
1594 if (fr->ePBC != epbcNONE)
1596 /* Compute shift vectors every step,
1597 * because of pressure coupling or box deformation!
1599 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1601 calc_shifts(box, fr->shift_vec);
1604 if (bCalcCGCM)
1606 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1607 &(top->cgs), x, fr->cg_cm);
1608 inc_nrnb(nrnb, eNR_CGCM, homenr);
1609 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1611 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1613 unshift_self(graph, box, x);
1616 else if (bCalcCGCM)
1618 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1619 inc_nrnb(nrnb, eNR_CGCM, homenr);
1622 if (bCalcCGCM && gmx_debug_at)
1624 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1627 #ifdef GMX_MPI
1628 if (!(cr->duty & DUTY_PME))
1630 gmx_bool bBS;
1631 matrix boxs;
1633 /* Send particle coordinates to the pme nodes.
1634 * Since this is only implemented for domain decomposition
1635 * and domain decomposition does not use the graph,
1636 * we do not need to worry about shifting.
1639 int pme_flags = 0;
1641 wallcycle_start(wcycle, ewcPP_PMESENDX);
1643 bBS = (inputrec->nwall == 2);
1644 if (bBS)
1646 copy_mat(box, boxs);
1647 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1650 if (EEL_PME(fr->eeltype))
1652 pme_flags |= GMX_PME_DO_COULOMB;
1655 if (EVDW_PME(fr->vdwtype))
1657 pme_flags |= GMX_PME_DO_LJ;
1660 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1661 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
1662 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1663 pme_flags, step);
1665 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1667 #endif /* GMX_MPI */
1669 /* Communicate coordinates and sum dipole if necessary */
1670 if (DOMAINDECOMP(cr))
1672 wallcycle_start(wcycle, ewcMOVEX);
1673 dd_move_x(cr->dd, box, x);
1674 wallcycle_stop(wcycle, ewcMOVEX);
1677 /* update adress weight beforehand */
1678 if (bStateChanged && bDoAdressWF)
1680 /* need pbc for adress weight calculation with pbc_dx */
1681 set_pbc(&pbc, inputrec->ePBC, box);
1682 if (fr->adress_site == eAdressSITEcog)
1684 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1685 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1687 else if (fr->adress_site == eAdressSITEcom)
1689 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1690 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1692 else if (fr->adress_site == eAdressSITEatomatom)
1694 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1695 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1697 else
1699 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1700 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1704 if (NEED_MUTOT(*inputrec))
1707 if (bStateChanged)
1709 if (PAR(cr))
1711 gmx_sumd(2*DIM, mu, cr);
1713 for (i = 0; i < 2; i++)
1715 for (j = 0; j < DIM; j++)
1717 fr->mu_tot[i][j] = mu[i*DIM + j];
1721 if (fr->efep == efepNO)
1723 copy_rvec(fr->mu_tot[0], mu_tot);
1725 else
1727 for (j = 0; j < DIM; j++)
1729 mu_tot[j] =
1730 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1735 /* Reset energies */
1736 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1737 clear_rvecs(SHIFTS, fr->fshift);
1739 if (bNS)
1741 wallcycle_start(wcycle, ewcNS);
1743 if (graph && bStateChanged)
1745 /* Calculate intramolecular shift vectors to make molecules whole */
1746 mk_mshift(fplog, graph, fr->ePBC, box, x);
1749 /* Do the actual neighbour searching */
1750 ns(fplog, fr, box,
1751 groups, top, mdatoms,
1752 cr, nrnb, bFillGrid,
1753 bDoLongRangeNS);
1755 wallcycle_stop(wcycle, ewcNS);
1758 if (inputrec->implicit_solvent && bNS)
1760 make_gb_nblist(cr, inputrec->gb_algorithm,
1761 x, box, fr, &top->idef, graph, fr->born);
1764 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1766 wallcycle_start(wcycle, ewcPPDURINGPME);
1767 dd_force_flop_start(cr->dd, nrnb);
1770 if (inputrec->bRot)
1772 /* Enforced rotation has its own cycle counter that starts after the collective
1773 * coordinates have been communicated. It is added to ddCyclF to allow
1774 * for proper load-balancing */
1775 wallcycle_start(wcycle, ewcROT);
1776 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1777 wallcycle_stop(wcycle, ewcROT);
1780 /* Start the force cycle counter.
1781 * This counter is stopped after do_force_lowlevel.
1782 * No parallel communication should occur while this counter is running,
1783 * since that will interfere with the dynamic load balancing.
1785 wallcycle_start(wcycle, ewcFORCE);
1787 if (bDoForces)
1789 /* Reset forces for which the virial is calculated separately:
1790 * PME/Ewald forces if necessary */
1791 if (fr->bF_NoVirSum)
1793 if (flags & GMX_FORCE_VIRIAL)
1795 fr->f_novirsum = fr->f_novirsum_alloc;
1796 if (fr->bDomDec)
1798 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1800 else
1802 clear_rvecs(homenr, fr->f_novirsum+start);
1805 else
1807 /* We are not calculating the pressure so we do not need
1808 * a separate array for forces that do not contribute
1809 * to the pressure.
1811 fr->f_novirsum = f;
1815 /* Clear the short- and long-range forces */
1816 clear_rvecs(fr->natoms_force_constr, f);
1817 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1819 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1822 clear_rvec(fr->vir_diag_posres);
1824 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1826 clear_pull_forces(inputrec->pull_work);
1829 /* update QMMMrec, if necessary */
1830 if (fr->bQMMM)
1832 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1835 /* Compute the bonded and non-bonded energies and optionally forces */
1836 do_force_lowlevel(fr, inputrec, &(top->idef),
1837 cr, nrnb, wcycle, mdatoms,
1838 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1839 bBornRadii, box,
1840 inputrec->fepvals, lambda,
1841 graph, &(top->excls), fr->mu_tot,
1842 flags,
1843 &cycles_pme);
1845 if (bSepLRF)
1847 if (do_per_step(step, inputrec->nstcalclr))
1849 /* Add the long range forces to the short range forces */
1850 for (i = 0; i < fr->natoms_force_constr; i++)
1852 rvec_add(fr->f_twin[i], f[i], f[i]);
1857 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1859 if (ed)
1861 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1864 if (DOMAINDECOMP(cr))
1866 dd_force_flop_stop(cr->dd, nrnb);
1867 if (wcycle)
1869 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1873 if (bDoForces)
1875 if (IR_ELEC_FIELD(*inputrec))
1877 /* Compute forces due to electric field */
1878 calc_f_el(MASTER(cr) ? field : NULL,
1879 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1880 inputrec->ex, inputrec->et, t);
1883 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1885 /* Compute thermodynamic force in hybrid AdResS region */
1886 adress_thermo_force(start, homenr, x, fr->f_novirsum, fr, mdatoms,
1887 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1890 /* Communicate the forces */
1891 if (DOMAINDECOMP(cr))
1893 wallcycle_start(wcycle, ewcMOVEF);
1894 dd_move_f(cr->dd, f, fr->fshift);
1895 /* Do we need to communicate the separate force array
1896 * for terms that do not contribute to the single sum virial?
1897 * Position restraints and electric fields do not introduce
1898 * inter-cg forces, only full electrostatics methods do.
1899 * When we do not calculate the virial, fr->f_novirsum = f,
1900 * so we have already communicated these forces.
1902 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1903 (flags & GMX_FORCE_VIRIAL))
1905 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1907 if (bSepLRF)
1909 /* We should not update the shift forces here,
1910 * since f_twin is already included in f.
1912 dd_move_f(cr->dd, fr->f_twin, NULL);
1914 wallcycle_stop(wcycle, ewcMOVEF);
1917 /* If we have NoVirSum forces, but we do not calculate the virial,
1918 * we sum fr->f_novirsum=f later.
1920 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1922 wallcycle_start(wcycle, ewcVSITESPREAD);
1923 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1924 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1925 wallcycle_stop(wcycle, ewcVSITESPREAD);
1927 if (bSepLRF)
1929 wallcycle_start(wcycle, ewcVSITESPREAD);
1930 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1931 nrnb,
1932 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1933 wallcycle_stop(wcycle, ewcVSITESPREAD);
1937 if (flags & GMX_FORCE_VIRIAL)
1939 /* Calculation of the virial must be done after vsites! */
1940 calc_virial(0, mdatoms->homenr, x, f,
1941 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1945 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1947 pull_potential_wrapper(cr, inputrec, box, x,
1948 f, vir_force, mdatoms, enerd, lambda, t,
1949 wcycle);
1952 /* Add the forces from enforced rotation potentials (if any) */
1953 if (inputrec->bRot)
1955 wallcycle_start(wcycle, ewcROTadd);
1956 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1957 wallcycle_stop(wcycle, ewcROTadd);
1960 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1961 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1963 if (PAR(cr) && !(cr->duty & DUTY_PME))
1965 /* In case of node-splitting, the PP nodes receive the long-range
1966 * forces, virial and energy from the PME nodes here.
1968 pme_receive_force_ener(cr, wcycle, enerd, fr);
1971 if (bDoForces)
1973 post_process_forces(cr, step, nrnb, wcycle,
1974 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1975 flags);
1978 /* Sum the potential energy terms from group contributions */
1979 sum_epot(&(enerd->grpp), enerd->term);
1982 void do_force(FILE *fplog, t_commrec *cr,
1983 t_inputrec *inputrec,
1984 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1985 gmx_localtop_t *top,
1986 gmx_groups_t *groups,
1987 matrix box, rvec x[], history_t *hist,
1988 rvec f[],
1989 tensor vir_force,
1990 t_mdatoms *mdatoms,
1991 gmx_enerdata_t *enerd, t_fcdata *fcd,
1992 real *lambda, t_graph *graph,
1993 t_forcerec *fr,
1994 gmx_vsite_t *vsite, rvec mu_tot,
1995 double t, FILE *field, gmx_edsam_t ed,
1996 gmx_bool bBornRadii,
1997 int flags)
1999 /* modify force flag if not doing nonbonded */
2000 if (!fr->bNonbonded)
2002 flags &= ~GMX_FORCE_NONBONDED;
2005 switch (inputrec->cutoff_scheme)
2007 case ecutsVERLET:
2008 do_force_cutsVERLET(fplog, cr, inputrec,
2009 step, nrnb, wcycle,
2010 top,
2011 groups,
2012 box, x, hist,
2013 f, vir_force,
2014 mdatoms,
2015 enerd, fcd,
2016 lambda, graph,
2017 fr, fr->ic,
2018 vsite, mu_tot,
2019 t, field, ed,
2020 bBornRadii,
2021 flags);
2022 break;
2023 case ecutsGROUP:
2024 do_force_cutsGROUP(fplog, cr, inputrec,
2025 step, nrnb, wcycle,
2026 top,
2027 groups,
2028 box, x, hist,
2029 f, vir_force,
2030 mdatoms,
2031 enerd, fcd,
2032 lambda, graph,
2033 fr, vsite, mu_tot,
2034 t, field, ed,
2035 bBornRadii,
2036 flags);
2037 break;
2038 default:
2039 gmx_incons("Invalid cut-off scheme passed!");
2044 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2045 t_inputrec *ir, t_mdatoms *md,
2046 t_state *state, t_commrec *cr, t_nrnb *nrnb,
2047 t_forcerec *fr, gmx_localtop_t *top)
2049 int i, m, start, end;
2050 gmx_int64_t step;
2051 real dt = ir->delta_t;
2052 real dvdl_dum;
2053 rvec *savex;
2055 snew(savex, state->natoms);
2057 start = 0;
2058 end = md->homenr;
2060 if (debug)
2062 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2063 start, md->homenr, end);
2065 /* Do a first constrain to reset particles... */
2066 step = ir->init_step;
2067 if (fplog)
2069 char buf[STEPSTRSIZE];
2070 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2071 gmx_step_str(step, buf));
2073 dvdl_dum = 0;
2075 /* constrain the current position */
2076 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2077 ir, cr, step, 0, 1.0, md,
2078 state->x, state->x, NULL,
2079 fr->bMolPBC, state->box,
2080 state->lambda[efptBONDED], &dvdl_dum,
2081 NULL, NULL, nrnb, econqCoord);
2082 if (EI_VV(ir->eI))
2084 /* constrain the inital velocity, and save it */
2085 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2086 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2087 ir, cr, step, 0, 1.0, md,
2088 state->x, state->v, state->v,
2089 fr->bMolPBC, state->box,
2090 state->lambda[efptBONDED], &dvdl_dum,
2091 NULL, NULL, nrnb, econqVeloc);
2093 /* constrain the inital velocities at t-dt/2 */
2094 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2096 for (i = start; (i < end); i++)
2098 for (m = 0; (m < DIM); m++)
2100 /* Reverse the velocity */
2101 state->v[i][m] = -state->v[i][m];
2102 /* Store the position at t-dt in buf */
2103 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2106 /* Shake the positions at t=-dt with the positions at t=0
2107 * as reference coordinates.
2109 if (fplog)
2111 char buf[STEPSTRSIZE];
2112 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2113 gmx_step_str(step, buf));
2115 dvdl_dum = 0;
2116 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2117 ir, cr, step, -1, 1.0, md,
2118 state->x, savex, NULL,
2119 fr->bMolPBC, state->box,
2120 state->lambda[efptBONDED], &dvdl_dum,
2121 state->v, NULL, nrnb, econqCoord);
2123 for (i = start; i < end; i++)
2125 for (m = 0; m < DIM; m++)
2127 /* Re-reverse the velocities */
2128 state->v[i][m] = -state->v[i][m];
2132 sfree(savex);
2136 static void
2137 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2138 double *enerout, double *virout)
2140 double enersum, virsum;
2141 double invscale, invscale2, invscale3;
2142 double r, ea, eb, ec, pa, pb, pc, pd;
2143 double y0, f, g, h;
2144 int ri, offset;
2145 double tabfactor;
2147 invscale = 1.0/scale;
2148 invscale2 = invscale*invscale;
2149 invscale3 = invscale*invscale2;
2151 /* Following summation derived from cubic spline definition,
2152 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2153 * the cubic spline. We first calculate the negative of the
2154 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2155 * add the more standard, abrupt cutoff correction to that result,
2156 * yielding the long-range correction for a switched function. We
2157 * perform both the pressure and energy loops at the same time for
2158 * simplicity, as the computational cost is low. */
2160 if (offstart == 0)
2162 /* Since the dispersion table has been scaled down a factor
2163 * 6.0 and the repulsion a factor 12.0 to compensate for the
2164 * c6/c12 parameters inside nbfp[] being scaled up (to save
2165 * flops in kernels), we need to correct for this.
2167 tabfactor = 6.0;
2169 else
2171 tabfactor = 12.0;
2174 enersum = 0.0;
2175 virsum = 0.0;
2176 for (ri = rstart; ri < rend; ++ri)
2178 r = ri*invscale;
2179 ea = invscale3;
2180 eb = 2.0*invscale2*r;
2181 ec = invscale*r*r;
2183 pa = invscale3;
2184 pb = 3.0*invscale2*r;
2185 pc = 3.0*invscale*r*r;
2186 pd = r*r*r;
2188 /* this "8" is from the packing in the vdwtab array - perhaps
2189 should be defined? */
2191 offset = 8*ri + offstart;
2192 y0 = vdwtab[offset];
2193 f = vdwtab[offset+1];
2194 g = vdwtab[offset+2];
2195 h = vdwtab[offset+3];
2197 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2198 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2200 *enerout = 4.0*M_PI*enersum*tabfactor;
2201 *virout = 4.0*M_PI*virsum*tabfactor;
2204 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2206 double eners[2], virs[2], enersum, virsum;
2207 double r0, rc3, rc9;
2208 int ri0, ri1, i;
2209 real scale, *vdwtab;
2211 fr->enershiftsix = 0;
2212 fr->enershifttwelve = 0;
2213 fr->enerdiffsix = 0;
2214 fr->enerdifftwelve = 0;
2215 fr->virdiffsix = 0;
2216 fr->virdifftwelve = 0;
2218 if (eDispCorr != edispcNO)
2220 for (i = 0; i < 2; i++)
2222 eners[i] = 0;
2223 virs[i] = 0;
2225 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2226 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2227 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2228 (fr->vdwtype == evdwSHIFT) ||
2229 (fr->vdwtype == evdwSWITCH))
2231 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2232 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2233 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2235 gmx_fatal(FARGS,
2236 "With dispersion correction rvdw-switch can not be zero "
2237 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2240 /* TODO This code depends on the logic in tables.c that
2241 constructs the table layout, which should be made
2242 explicit in future cleanup. */
2243 GMX_ASSERT(fr->nblists[0].table_vdw.interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2244 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2245 scale = fr->nblists[0].table_vdw.scale;
2246 vdwtab = fr->nblists[0].table_vdw.data;
2248 /* Round the cut-offs to exact table values for precision */
2249 ri0 = static_cast<int>(floor(fr->rvdw_switch*scale));
2250 ri1 = static_cast<int>(ceil(fr->rvdw*scale));
2252 /* The code below has some support for handling force-switching, i.e.
2253 * when the force (instead of potential) is switched over a limited
2254 * region. This leads to a constant shift in the potential inside the
2255 * switching region, which we can handle by adding a constant energy
2256 * term in the force-switch case just like when we do potential-shift.
2258 * For now this is not enabled, but to keep the functionality in the
2259 * code we check separately for switch and shift. When we do force-switch
2260 * the shifting point is rvdw_switch, while it is the cutoff when we
2261 * have a classical potential-shift.
2263 * For a pure potential-shift the potential has a constant shift
2264 * all the way out to the cutoff, and that is it. For other forms
2265 * we need to calculate the constant shift up to the point where we
2266 * start modifying the potential.
2268 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2270 r0 = ri0/scale;
2271 rc3 = r0*r0*r0;
2272 rc9 = rc3*rc3*rc3;
2274 if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2275 (fr->vdwtype == evdwSHIFT))
2277 /* Determine the constant energy shift below rvdw_switch.
2278 * Table has a scale factor since we have scaled it down to compensate
2279 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2281 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2282 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2284 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2286 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2287 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2290 /* Add the constant part from 0 to rvdw_switch.
2291 * This integration from 0 to rvdw_switch overcounts the number
2292 * of interactions by 1, as it also counts the self interaction.
2293 * We will correct for this later.
2295 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2296 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2298 /* Calculate the contribution in the range [r0,r1] where we
2299 * modify the potential. For a pure potential-shift modifier we will
2300 * have ri0==ri1, and there will not be any contribution here.
2302 for (i = 0; i < 2; i++)
2304 enersum = 0;
2305 virsum = 0;
2306 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2307 eners[i] -= enersum;
2308 virs[i] -= virsum;
2311 /* Alright: Above we compensated by REMOVING the parts outside r0
2312 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2314 * Regardless of whether r0 is the point where we start switching,
2315 * or the cutoff where we calculated the constant shift, we include
2316 * all the parts we are missing out to infinity from r0 by
2317 * calculating the analytical dispersion correction.
2319 eners[0] += -4.0*M_PI/(3.0*rc3);
2320 eners[1] += 4.0*M_PI/(9.0*rc9);
2321 virs[0] += 8.0*M_PI/rc3;
2322 virs[1] += -16.0*M_PI/(3.0*rc9);
2324 else if (fr->vdwtype == evdwCUT ||
2325 EVDW_PME(fr->vdwtype) ||
2326 fr->vdwtype == evdwUSER)
2328 if (fr->vdwtype == evdwUSER && fplog)
2330 fprintf(fplog,
2331 "WARNING: using dispersion correction with user tables\n");
2334 /* Note that with LJ-PME, the dispersion correction is multiplied
2335 * by the difference between the actual C6 and the value of C6
2336 * that would produce the combination rule.
2337 * This means the normal energy and virial difference formulas
2338 * can be used here.
2341 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2342 rc9 = rc3*rc3*rc3;
2343 /* Contribution beyond the cut-off */
2344 eners[0] += -4.0*M_PI/(3.0*rc3);
2345 eners[1] += 4.0*M_PI/(9.0*rc9);
2346 if (fr->vdw_modifier == eintmodPOTSHIFT)
2348 /* Contribution within the cut-off */
2349 eners[0] += -4.0*M_PI/(3.0*rc3);
2350 eners[1] += 4.0*M_PI/(3.0*rc9);
2352 /* Contribution beyond the cut-off */
2353 virs[0] += 8.0*M_PI/rc3;
2354 virs[1] += -16.0*M_PI/(3.0*rc9);
2356 else
2358 gmx_fatal(FARGS,
2359 "Dispersion correction is not implemented for vdw-type = %s",
2360 evdw_names[fr->vdwtype]);
2363 /* When we deprecate the group kernels the code below can go too */
2364 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2366 /* Calculate self-interaction coefficient (assuming that
2367 * the reciprocal-space contribution is constant in the
2368 * region that contributes to the self-interaction).
2370 fr->enershiftsix = pow(fr->ewaldcoeff_lj, 6) / 6.0;
2372 eners[0] += -pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3)/3.0;
2373 virs[0] += pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3);
2376 fr->enerdiffsix = eners[0];
2377 fr->enerdifftwelve = eners[1];
2378 /* The 0.5 is due to the Gromacs definition of the virial */
2379 fr->virdiffsix = 0.5*virs[0];
2380 fr->virdifftwelve = 0.5*virs[1];
2384 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2385 int natoms,
2386 matrix box, real lambda, tensor pres, tensor virial,
2387 real *prescorr, real *enercorr, real *dvdlcorr)
2389 gmx_bool bCorrAll, bCorrPres;
2390 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2391 int m;
2393 *prescorr = 0;
2394 *enercorr = 0;
2395 *dvdlcorr = 0;
2397 clear_mat(virial);
2398 clear_mat(pres);
2400 if (ir->eDispCorr != edispcNO)
2402 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2403 ir->eDispCorr == edispcAllEnerPres);
2404 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2405 ir->eDispCorr == edispcAllEnerPres);
2407 invvol = 1/det(box);
2408 if (fr->n_tpi)
2410 /* Only correct for the interactions with the inserted molecule */
2411 dens = (natoms - fr->n_tpi)*invvol;
2412 ninter = fr->n_tpi;
2414 else
2416 dens = natoms*invvol;
2417 ninter = 0.5*natoms;
2420 if (ir->efep == efepNO)
2422 avcsix = fr->avcsix[0];
2423 avctwelve = fr->avctwelve[0];
2425 else
2427 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2428 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2431 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2432 *enercorr += avcsix*enerdiff;
2433 dvdlambda = 0.0;
2434 if (ir->efep != efepNO)
2436 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2438 if (bCorrAll)
2440 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2441 *enercorr += avctwelve*enerdiff;
2442 if (fr->efep != efepNO)
2444 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2448 if (bCorrPres)
2450 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2451 if (ir->eDispCorr == edispcAllEnerPres)
2453 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2455 /* The factor 2 is because of the Gromacs virial definition */
2456 spres = -2.0*invvol*svir*PRESFAC;
2458 for (m = 0; m < DIM; m++)
2460 virial[m][m] += svir;
2461 pres[m][m] += spres;
2463 *prescorr += spres;
2466 /* Can't currently control when it prints, for now, just print when degugging */
2467 if (debug)
2469 if (bCorrAll)
2471 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2472 avcsix, avctwelve);
2474 if (bCorrPres)
2476 fprintf(debug,
2477 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2478 *enercorr, spres, svir);
2480 else
2482 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2486 if (fr->efep != efepNO)
2488 *dvdlcorr += dvdlambda;
2493 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2494 t_graph *graph, rvec x[])
2496 if (fplog)
2498 fprintf(fplog, "Removing pbc first time\n");
2500 calc_shifts(box, fr->shift_vec);
2501 if (graph)
2503 mk_mshift(fplog, graph, fr->ePBC, box, x);
2504 if (gmx_debug_at)
2506 p_graph(debug, "do_pbc_first 1", graph);
2508 shift_self(graph, box, x);
2509 /* By doing an extra mk_mshift the molecules that are broken
2510 * because they were e.g. imported from another software
2511 * will be made whole again. Such are the healing powers
2512 * of GROMACS.
2514 mk_mshift(fplog, graph, fr->ePBC, box, x);
2515 if (gmx_debug_at)
2517 p_graph(debug, "do_pbc_first 2", graph);
2520 if (fplog)
2522 fprintf(fplog, "Done rmpbc\n");
2526 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2527 const gmx_mtop_t *mtop, rvec x[],
2528 gmx_bool bFirst)
2530 t_graph *graph;
2531 int mb, as, mol;
2532 gmx_molblock_t *molb;
2534 if (bFirst && fplog)
2536 fprintf(fplog, "Removing pbc first time\n");
2539 snew(graph, 1);
2540 as = 0;
2541 for (mb = 0; mb < mtop->nmolblock; mb++)
2543 molb = &mtop->molblock[mb];
2544 if (molb->natoms_mol == 1 ||
2545 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2547 /* Just one atom or charge group in the molecule, no PBC required */
2548 as += molb->nmol*molb->natoms_mol;
2550 else
2552 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2553 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2554 0, molb->natoms_mol, FALSE, FALSE, graph);
2556 for (mol = 0; mol < molb->nmol; mol++)
2558 mk_mshift(fplog, graph, ePBC, box, x+as);
2560 shift_self(graph, box, x+as);
2561 /* The molecule is whole now.
2562 * We don't need the second mk_mshift call as in do_pbc_first,
2563 * since we no longer need this graph.
2566 as += molb->natoms_mol;
2568 done_graph(graph);
2571 sfree(graph);
2574 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2575 const gmx_mtop_t *mtop, rvec x[])
2577 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2580 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2581 gmx_mtop_t *mtop, rvec x[])
2583 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2586 // TODO This can be cleaned up a lot, and move back to runner.cpp
2587 void finish_run(FILE *fplog, t_commrec *cr,
2588 t_inputrec *inputrec,
2589 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2590 gmx_walltime_accounting_t walltime_accounting,
2591 nonbonded_verlet_t *nbv,
2592 gmx_bool bWriteStat)
2594 t_nrnb *nrnb_tot = NULL;
2595 double delta_t = 0;
2596 double nbfs = 0, mflop = 0;
2597 double elapsed_time,
2598 elapsed_time_over_all_ranks,
2599 elapsed_time_over_all_threads,
2600 elapsed_time_over_all_threads_over_all_ranks;
2602 if (cr->nnodes > 1)
2604 snew(nrnb_tot, 1);
2605 #ifdef GMX_MPI
2606 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2607 cr->mpi_comm_mysim);
2608 #endif
2610 else
2612 nrnb_tot = nrnb;
2615 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2616 elapsed_time_over_all_ranks = elapsed_time;
2617 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2618 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2619 #ifdef GMX_MPI
2620 if (cr->nnodes > 1)
2622 /* reduce elapsed_time over all MPI ranks in the current simulation */
2623 MPI_Allreduce(&elapsed_time,
2624 &elapsed_time_over_all_ranks,
2625 1, MPI_DOUBLE, MPI_SUM,
2626 cr->mpi_comm_mysim);
2627 elapsed_time_over_all_ranks /= cr->nnodes;
2628 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2629 * current simulation. */
2630 MPI_Allreduce(&elapsed_time_over_all_threads,
2631 &elapsed_time_over_all_threads_over_all_ranks,
2632 1, MPI_DOUBLE, MPI_SUM,
2633 cr->mpi_comm_mysim);
2635 #endif
2637 if (SIMMASTER(cr))
2639 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2641 if (cr->nnodes > 1)
2643 sfree(nrnb_tot);
2646 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2648 print_dd_statistics(cr, inputrec, fplog);
2651 /* TODO Move the responsibility for any scaling by thread counts
2652 * to the code that handled the thread region, so that there's a
2653 * mechanism to keep cycle counting working during the transition
2654 * to task parallelism. */
2655 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
2656 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2657 wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2658 auto cycle_sum(wallcycle_sum(cr, wcycle));
2660 if (SIMMASTER(cr))
2662 struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : NULL;
2664 wallcycle_print(fplog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2665 elapsed_time_over_all_ranks,
2666 wcycle, cycle_sum, gputimes);
2668 if (EI_DYNAMICS(inputrec->eI))
2670 delta_t = inputrec->delta_t;
2673 if (fplog)
2675 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2676 elapsed_time_over_all_ranks,
2677 walltime_accounting_get_nsteps_done(walltime_accounting),
2678 delta_t, nbfs, mflop);
2680 if (bWriteStat)
2682 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2683 elapsed_time_over_all_ranks,
2684 walltime_accounting_get_nsteps_done(walltime_accounting),
2685 delta_t, nbfs, mflop);
2690 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2692 /* this function works, but could probably use a logic rewrite to keep all the different
2693 types of efep straight. */
2695 int i;
2696 t_lambda *fep = ir->fepvals;
2698 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2700 for (i = 0; i < efptNR; i++)
2702 lambda[i] = 0.0;
2703 if (lam0)
2705 lam0[i] = 0.0;
2708 return;
2710 else
2712 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2713 if checkpoint is set -- a kludge is in for now
2714 to prevent this.*/
2715 for (i = 0; i < efptNR; i++)
2717 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2718 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2720 lambda[i] = fep->init_lambda;
2721 if (lam0)
2723 lam0[i] = lambda[i];
2726 else
2728 lambda[i] = fep->all_lambda[i][*fep_state];
2729 if (lam0)
2731 lam0[i] = lambda[i];
2735 if (ir->bSimTemp)
2737 /* need to rescale control temperatures to match current state */
2738 for (i = 0; i < ir->opts.ngtc; i++)
2740 if (ir->opts.ref_t[i] > 0)
2742 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2748 /* Send to the log the information on the current lambdas */
2749 if (fplog != NULL)
2751 fprintf(fplog, "Initial vector of lambda components:[ ");
2752 for (i = 0; i < efptNR; i++)
2754 fprintf(fplog, "%10.4f ", lambda[i]);
2756 fprintf(fplog, "]\n");
2758 return;
2762 void init_md(FILE *fplog,
2763 t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2764 double *t, double *t0,
2765 real *lambda, int *fep_state, double *lam0,
2766 t_nrnb *nrnb, gmx_mtop_t *mtop,
2767 gmx_update_t *upd,
2768 int nfile, const t_filenm fnm[],
2769 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2770 tensor force_vir, tensor shake_vir, rvec mu_tot,
2771 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2772 gmx_wallcycle_t wcycle)
2774 int i;
2776 /* Initial values */
2777 *t = *t0 = ir->init_t;
2779 *bSimAnn = FALSE;
2780 for (i = 0; i < ir->opts.ngtc; i++)
2782 /* set bSimAnn if any group is being annealed */
2783 if (ir->opts.annealing[i] != eannNO)
2785 *bSimAnn = TRUE;
2788 if (*bSimAnn)
2790 update_annealing_target_temp(&(ir->opts), ir->init_t);
2793 /* Initialize lambda variables */
2794 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2796 if (upd)
2798 *upd = init_update(ir);
2802 if (vcm != NULL)
2804 *vcm = init_vcm(fplog, &mtop->groups, ir);
2807 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2809 if (ir->etc == etcBERENDSEN)
2811 please_cite(fplog, "Berendsen84a");
2813 if (ir->etc == etcVRESCALE)
2815 please_cite(fplog, "Bussi2007a");
2817 if (ir->eI == eiSD1)
2819 please_cite(fplog, "Goga2012");
2822 if ((ir->et[XX].n > 0) || (ir->et[YY].n > 0) || (ir->et[ZZ].n > 0))
2824 please_cite(fplog, "Caleman2008a");
2826 init_nrnb(nrnb);
2828 if (nfile != -1)
2830 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2832 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2833 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2836 if (ir->bAdress)
2838 please_cite(fplog, "Fritsch12");
2839 please_cite(fplog, "Junghans10");
2841 /* Initiate variables */
2842 clear_mat(force_vir);
2843 clear_mat(shake_vir);
2844 clear_rvec(mu_tot);
2846 debug_gmx();