Move types/ishift.h to pbcutil/
[gromacs.git] / src / gromacs / mdlib / sim_util.c
blob9fa881613d27c246a2a09a3a2fe9c8f74bf2d7e5
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #ifdef HAVE_CONFIG_H
38 #include <config.h>
39 #endif
41 #include <assert.h>
42 #include <math.h>
43 #include <stdio.h>
44 #include <string.h>
45 #ifdef HAVE_SYS_TIME_H
46 #include <sys/time.h>
47 #endif
49 #include "typedefs.h"
50 #include "gromacs/utility/cstringutil.h"
51 #include "names.h"
52 #include "txtdump.h"
53 #include "gromacs/pbcutil/pbc.h"
54 #include "chargegroup.h"
55 #include "gromacs/math/vec.h"
56 #include "nrnb.h"
57 #include "mdrun.h"
58 #include "sim_util.h"
59 #include "update.h"
60 #include "gromacs/math/units.h"
61 #include "mdatoms.h"
62 #include "force.h"
63 #include "bondf.h"
64 #include "pme.h"
65 #include "disre.h"
66 #include "orires.h"
67 #include "network.h"
68 #include "calcmu.h"
69 #include "constr.h"
70 #include "copyrite.h"
71 #include "domdec.h"
72 #include "genborn.h"
73 #include "nbnxn_atomdata.h"
74 #include "nbnxn_search.h"
75 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
76 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
77 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
78 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
79 #include "nonbonded.h"
80 #include "../gmxlib/nonbonded/nb_kernel.h"
81 #include "../gmxlib/nonbonded/nb_free_energy.h"
83 #include "gromacs/legacyheaders/types/commrec.h"
84 #include "gromacs/pbcutil/ishift.h"
85 #include "gromacs/pbcutil/mshift.h"
86 #include "gromacs/timing/wallcycle.h"
87 #include "gromacs/timing/walltime_accounting.h"
88 #include "gromacs/utility/gmxmpi.h"
89 #include "gromacs/utility/smalloc.h"
90 #include "gromacs/essentialdynamics/edsam.h"
91 #include "gromacs/pulling/pull.h"
92 #include "gromacs/pulling/pull_rotation.h"
93 #include "gromacs/imd/imd.h"
94 #include "adress.h"
95 #include "qmmm.h"
97 #include "gmx_omp_nthreads.h"
99 #include "nbnxn_cuda_data_mgmt.h"
100 #include "nbnxn_cuda/nbnxn_cuda.h"
102 void print_time(FILE *out,
103 gmx_walltime_accounting_t walltime_accounting,
104 gmx_int64_t step,
105 t_inputrec *ir,
106 t_commrec gmx_unused *cr)
108 time_t finish;
109 char timebuf[STRLEN];
110 double dt, elapsed_seconds, time_per_step;
111 char buf[48];
113 #ifndef GMX_THREAD_MPI
114 if (!PAR(cr))
115 #endif
117 fprintf(out, "\r");
119 fprintf(out, "step %s", gmx_step_str(step, buf));
120 if ((step >= ir->nstlist))
122 double seconds_since_epoch = gmx_gettime();
123 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
124 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
125 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
127 if (ir->nsteps >= 0)
129 if (dt >= 300)
131 finish = (time_t) (seconds_since_epoch + dt);
132 gmx_ctime_r(&finish, timebuf, STRLEN);
133 sprintf(buf, "%s", timebuf);
134 buf[strlen(buf)-1] = '\0';
135 fprintf(out, ", will finish %s", buf);
137 else
139 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
142 else
144 fprintf(out, " performance: %.1f ns/day ",
145 ir->delta_t/1000*24*60*60/time_per_step);
148 #ifndef GMX_THREAD_MPI
149 if (PAR(cr))
151 fprintf(out, "\n");
153 #endif
155 fflush(out);
158 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
159 double the_time)
161 char time_string[STRLEN];
163 if (!fplog)
165 return;
169 int i;
170 char timebuf[STRLEN];
171 time_t temp_time = (time_t) the_time;
173 gmx_ctime_r(&temp_time, timebuf, STRLEN);
174 for (i = 0; timebuf[i] >= ' '; i++)
176 time_string[i] = timebuf[i];
178 time_string[i] = '\0';
181 fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
184 void print_start(FILE *fplog, t_commrec *cr,
185 gmx_walltime_accounting_t walltime_accounting,
186 const char *name)
188 char buf[STRLEN];
190 sprintf(buf, "Started %s", name);
191 print_date_and_time(fplog, cr->nodeid, buf,
192 walltime_accounting_get_start_time_stamp(walltime_accounting));
195 static void sum_forces(int start, int end, rvec f[], rvec flr[])
197 int i;
199 if (gmx_debug_at)
201 pr_rvecs(debug, 0, "fsr", f+start, end-start);
202 pr_rvecs(debug, 0, "flr", flr+start, end-start);
204 for (i = start; (i < end); i++)
206 rvec_inc(f[i], flr[i]);
211 * calc_f_el calculates forces due to an electric field.
213 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
215 * Et[] contains the parameters for the time dependent
216 * part of the field (not yet used).
217 * Ex[] contains the parameters for
218 * the spatial dependent part of the field. You can have cool periodic
219 * fields in principle, but only a constant field is supported
220 * now.
221 * The function should return the energy due to the electric field
222 * (if any) but for now returns 0.
224 * WARNING:
225 * There can be problems with the virial.
226 * Since the field is not self-consistent this is unavoidable.
227 * For neutral molecules the virial is correct within this approximation.
228 * For neutral systems with many charged molecules the error is small.
229 * But for systems with a net charge or a few charged molecules
230 * the error can be significant when the field is high.
231 * Solution: implement a self-consitent electric field into PME.
233 static void calc_f_el(FILE *fp, int start, int homenr,
234 real charge[], rvec f[],
235 t_cosines Ex[], t_cosines Et[], double t)
237 rvec Ext;
238 real t0;
239 int i, m;
241 for (m = 0; (m < DIM); m++)
243 if (Et[m].n > 0)
245 if (Et[m].n == 3)
247 t0 = Et[m].a[1];
248 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
250 else
252 Ext[m] = cos(Et[m].a[0]*t);
255 else
257 Ext[m] = 1.0;
259 if (Ex[m].n > 0)
261 /* Convert the field strength from V/nm to MD-units */
262 Ext[m] *= Ex[m].a[0]*FIELDFAC;
263 for (i = start; (i < start+homenr); i++)
265 f[i][m] += charge[i]*Ext[m];
268 else
270 Ext[m] = 0;
273 if (fp != NULL)
275 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
276 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
280 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
281 tensor vir_part, t_graph *graph, matrix box,
282 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
284 int i, j;
285 tensor virtest;
287 /* The short-range virial from surrounding boxes */
288 clear_mat(vir_part);
289 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
290 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
292 /* Calculate partial virial, for local atoms only, based on short range.
293 * Total virial is computed in global_stat, called from do_md
295 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
296 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
298 /* Add position restraint contribution */
299 for (i = 0; i < DIM; i++)
301 vir_part[i][i] += fr->vir_diag_posres[i];
304 /* Add wall contribution */
305 for (i = 0; i < DIM; i++)
307 vir_part[i][ZZ] += fr->vir_wall_z[i];
310 if (debug)
312 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
316 static void posres_wrapper(FILE *fplog,
317 int flags,
318 gmx_bool bSepDVDL,
319 t_inputrec *ir,
320 t_nrnb *nrnb,
321 gmx_localtop_t *top,
322 matrix box, rvec x[],
323 gmx_enerdata_t *enerd,
324 real *lambda,
325 t_forcerec *fr)
327 t_pbc pbc;
328 real v, dvdl;
329 int i;
331 /* Position restraints always require full pbc */
332 set_pbc(&pbc, ir->ePBC, box);
333 dvdl = 0;
334 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
335 top->idef.iparams_posres,
336 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
337 ir->ePBC == epbcNONE ? NULL : &pbc,
338 lambda[efptRESTRAINT], &dvdl,
339 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
340 if (bSepDVDL)
342 gmx_print_sepdvdl(fplog, interaction_function[F_POSRES].longname, v, dvdl);
344 enerd->term[F_POSRES] += v;
345 /* If just the force constant changes, the FEP term is linear,
346 * but if k changes, it is not.
348 enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
349 inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
351 if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
353 for (i = 0; i < enerd->n_lambda; i++)
355 real dvdl_dum, lambda_dum;
357 lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
358 v = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
359 top->idef.iparams_posres,
360 (const rvec*)x, NULL, NULL,
361 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
362 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
363 enerd->enerpart_lambda[i] += v;
368 static void fbposres_wrapper(t_inputrec *ir,
369 t_nrnb *nrnb,
370 gmx_localtop_t *top,
371 matrix box, rvec x[],
372 gmx_enerdata_t *enerd,
373 t_forcerec *fr)
375 t_pbc pbc;
376 real v;
378 /* Flat-bottomed position restraints always require full pbc */
379 set_pbc(&pbc, ir->ePBC, box);
380 v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
381 top->idef.iparams_fbposres,
382 (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
383 ir->ePBC == epbcNONE ? NULL : &pbc,
384 fr->rc_scaling, fr->ePBC, fr->posres_com);
385 enerd->term[F_FBPOSRES] += v;
386 inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
389 static void pull_potential_wrapper(FILE *fplog,
390 gmx_bool bSepDVDL,
391 t_commrec *cr,
392 t_inputrec *ir,
393 matrix box, rvec x[],
394 rvec f[],
395 tensor vir_force,
396 t_mdatoms *mdatoms,
397 gmx_enerdata_t *enerd,
398 real *lambda,
399 double t)
401 t_pbc pbc;
402 real dvdl;
404 /* Calculate the center of mass forces, this requires communication,
405 * which is why pull_potential is called close to other communication.
406 * The virial contribution is calculated directly,
407 * which is why we call pull_potential after calc_virial.
409 set_pbc(&pbc, ir->ePBC, box);
410 dvdl = 0;
411 enerd->term[F_COM_PULL] +=
412 pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
413 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
414 if (bSepDVDL)
416 gmx_print_sepdvdl(fplog, "Com pull", enerd->term[F_COM_PULL], dvdl);
418 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
421 static void pme_receive_force_ener(FILE *fplog,
422 gmx_bool bSepDVDL,
423 t_commrec *cr,
424 gmx_wallcycle_t wcycle,
425 gmx_enerdata_t *enerd,
426 t_forcerec *fr)
428 real e_q, e_lj, v, dvdl_q, dvdl_lj;
429 float cycles_ppdpme, cycles_seppme;
431 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
432 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
434 /* In case of node-splitting, the PP nodes receive the long-range
435 * forces, virial and energy from the PME nodes here.
437 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
438 dvdl_q = 0;
439 dvdl_lj = 0;
440 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e_q,
441 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
442 &cycles_seppme);
443 if (bSepDVDL)
445 gmx_print_sepdvdl(fplog, "Electrostatic PME mesh", e_q, dvdl_q);
446 gmx_print_sepdvdl(fplog, "Lennard-Jones PME mesh", e_lj, dvdl_lj);
448 enerd->term[F_COUL_RECIP] += e_q;
449 enerd->term[F_LJ_RECIP] += e_lj;
450 enerd->dvdl_lin[efptCOUL] += dvdl_q;
451 enerd->dvdl_lin[efptVDW] += dvdl_lj;
453 if (wcycle)
455 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
457 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
460 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
461 gmx_int64_t step, real pforce, rvec *x, rvec *f)
463 int i;
464 real pf2, fn2;
465 char buf[STEPSTRSIZE];
467 pf2 = sqr(pforce);
468 for (i = 0; i < md->homenr; i++)
470 fn2 = norm2(f[i]);
471 /* We also catch NAN, if the compiler does not optimize this away. */
472 if (fn2 >= pf2 || fn2 != fn2)
474 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
475 gmx_step_str(step, buf),
476 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
481 static void post_process_forces(t_commrec *cr,
482 gmx_int64_t step,
483 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
484 gmx_localtop_t *top,
485 matrix box, rvec x[],
486 rvec f[],
487 tensor vir_force,
488 t_mdatoms *mdatoms,
489 t_graph *graph,
490 t_forcerec *fr, gmx_vsite_t *vsite,
491 int flags)
493 if (fr->bF_NoVirSum)
495 if (vsite)
497 /* Spread the mesh force on virtual sites to the other particles...
498 * This is parallellized. MPI communication is performed
499 * if the constructing atoms aren't local.
501 wallcycle_start(wcycle, ewcVSITESPREAD);
502 spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
503 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
504 nrnb,
505 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
506 wallcycle_stop(wcycle, ewcVSITESPREAD);
508 if (flags & GMX_FORCE_VIRIAL)
510 /* Now add the forces, this is local */
511 if (fr->bDomDec)
513 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
515 else
517 sum_forces(0, mdatoms->homenr,
518 f, fr->f_novirsum);
520 if (EEL_FULL(fr->eeltype))
522 /* Add the mesh contribution to the virial */
523 m_add(vir_force, fr->vir_el_recip, vir_force);
525 if (EVDW_PME(fr->vdwtype))
527 /* Add the mesh contribution to the virial */
528 m_add(vir_force, fr->vir_lj_recip, vir_force);
530 if (debug)
532 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
537 if (fr->print_force >= 0)
539 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
543 static void do_nb_verlet(t_forcerec *fr,
544 interaction_const_t *ic,
545 gmx_enerdata_t *enerd,
546 int flags, int ilocality,
547 int clearF,
548 t_nrnb *nrnb,
549 gmx_wallcycle_t wcycle)
551 int nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
552 char *env;
553 nonbonded_verlet_group_t *nbvg;
554 gmx_bool bCUDA;
556 if (!(flags & GMX_FORCE_NONBONDED))
558 /* skip non-bonded calculation */
559 return;
562 nbvg = &fr->nbv->grp[ilocality];
564 /* CUDA kernel launch overhead is already timed separately */
565 if (fr->cutoff_scheme != ecutsVERLET)
567 gmx_incons("Invalid cut-off scheme passed!");
570 bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
572 if (!bCUDA)
574 wallcycle_sub_start(wcycle, ewcsNONBONDED);
576 switch (nbvg->kernel_type)
578 case nbnxnk4x4_PlainC:
579 nbnxn_kernel_ref(&nbvg->nbl_lists,
580 nbvg->nbat, ic,
581 fr->shift_vec,
582 flags,
583 clearF,
584 fr->fshift[0],
585 enerd->grpp.ener[egCOULSR],
586 fr->bBHAM ?
587 enerd->grpp.ener[egBHAMSR] :
588 enerd->grpp.ener[egLJSR]);
589 break;
591 case nbnxnk4xN_SIMD_4xN:
592 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
593 nbvg->nbat, ic,
594 nbvg->ewald_excl,
595 fr->shift_vec,
596 flags,
597 clearF,
598 fr->fshift[0],
599 enerd->grpp.ener[egCOULSR],
600 fr->bBHAM ?
601 enerd->grpp.ener[egBHAMSR] :
602 enerd->grpp.ener[egLJSR]);
603 break;
604 case nbnxnk4xN_SIMD_2xNN:
605 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
606 nbvg->nbat, ic,
607 nbvg->ewald_excl,
608 fr->shift_vec,
609 flags,
610 clearF,
611 fr->fshift[0],
612 enerd->grpp.ener[egCOULSR],
613 fr->bBHAM ?
614 enerd->grpp.ener[egBHAMSR] :
615 enerd->grpp.ener[egLJSR]);
616 break;
618 case nbnxnk8x8x8_CUDA:
619 nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
620 break;
622 case nbnxnk8x8x8_PlainC:
623 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
624 nbvg->nbat, ic,
625 fr->shift_vec,
626 flags,
627 clearF,
628 nbvg->nbat->out[0].f,
629 fr->fshift[0],
630 enerd->grpp.ener[egCOULSR],
631 fr->bBHAM ?
632 enerd->grpp.ener[egBHAMSR] :
633 enerd->grpp.ener[egLJSR]);
634 break;
636 default:
637 gmx_incons("Invalid nonbonded kernel type passed!");
640 if (!bCUDA)
642 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
645 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
647 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
649 else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
650 (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
652 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
654 else
656 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
658 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
659 if (flags & GMX_FORCE_ENERGY)
661 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
662 enr_nbnxn_kernel_ljc += 1;
663 enr_nbnxn_kernel_lj += 1;
666 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
667 nbvg->nbl_lists.natpair_ljq);
668 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
669 nbvg->nbl_lists.natpair_lj);
670 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
671 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
672 nbvg->nbl_lists.natpair_q);
674 if (ic->vdw_modifier == eintmodFORCESWITCH)
676 /* We add up the switch cost separately */
677 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
678 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
680 if (ic->vdw_modifier == eintmodPOTSWITCH)
682 /* We add up the switch cost separately */
683 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
684 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
686 if (ic->vdwtype == evdwPME)
688 /* We add up the LJ Ewald cost separately */
689 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
690 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
694 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
695 t_forcerec *fr,
696 rvec x[],
697 rvec f[],
698 t_mdatoms *mdatoms,
699 t_lambda *fepvals,
700 real *lambda,
701 gmx_enerdata_t *enerd,
702 int flags,
703 t_nrnb *nrnb,
704 gmx_wallcycle_t wcycle)
706 int donb_flags;
707 nb_kernel_data_t kernel_data;
708 real lam_i[efptNR];
709 real dvdl_nb[efptNR];
710 int th;
711 int i, j;
713 donb_flags = 0;
714 /* Add short-range interactions */
715 donb_flags |= GMX_NONBONDED_DO_SR;
717 /* Currently all group scheme kernels always calculate (shift-)forces */
718 if (flags & GMX_FORCE_FORCES)
720 donb_flags |= GMX_NONBONDED_DO_FORCE;
722 if (flags & GMX_FORCE_VIRIAL)
724 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
726 if (flags & GMX_FORCE_ENERGY)
728 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
730 if (flags & GMX_FORCE_DO_LR)
732 donb_flags |= GMX_NONBONDED_DO_LR;
735 kernel_data.flags = donb_flags;
736 kernel_data.lambda = lambda;
737 kernel_data.dvdl = dvdl_nb;
739 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
740 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
742 /* reset free energy components */
743 for (i = 0; i < efptNR; i++)
745 dvdl_nb[i] = 0;
748 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
750 wallcycle_sub_start(wcycle, ewcsNONBONDED);
751 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
752 for (th = 0; th < nbl_lists->nnbl; th++)
754 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
755 x, f, fr, mdatoms, &kernel_data, nrnb);
758 if (fepvals->sc_alpha != 0)
760 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
761 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
763 else
765 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
766 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
769 /* If we do foreign lambda and we have soft-core interactions
770 * we have to recalculate the (non-linear) energies contributions.
772 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
774 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
775 kernel_data.lambda = lam_i;
776 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
777 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
778 /* Note that we add to kernel_data.dvdl, but ignore the result */
780 for (i = 0; i < enerd->n_lambda; i++)
782 for (j = 0; j < efptNR; j++)
784 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
786 reset_foreign_enerdata(enerd);
787 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
788 for (th = 0; th < nbl_lists->nnbl; th++)
790 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
791 x, f, fr, mdatoms, &kernel_data, nrnb);
794 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
795 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
799 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
802 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
803 t_inputrec *inputrec,
804 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
805 gmx_localtop_t *top,
806 gmx_groups_t gmx_unused *groups,
807 matrix box, rvec x[], history_t *hist,
808 rvec f[],
809 tensor vir_force,
810 t_mdatoms *mdatoms,
811 gmx_enerdata_t *enerd, t_fcdata *fcd,
812 real *lambda, t_graph *graph,
813 t_forcerec *fr, interaction_const_t *ic,
814 gmx_vsite_t *vsite, rvec mu_tot,
815 double t, FILE *field, gmx_edsam_t ed,
816 gmx_bool bBornRadii,
817 int flags)
819 int cg0, cg1, i, j;
820 int start, homenr;
821 int nb_kernel_type;
822 double mu[2*DIM];
823 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
824 gmx_bool bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
825 gmx_bool bDiffKernels = FALSE;
826 matrix boxs;
827 rvec vzero, box_diag;
828 real e, v, dvdl;
829 float cycles_pme, cycles_force, cycles_wait_gpu;
830 nonbonded_verlet_t *nbv;
832 cycles_force = 0;
833 cycles_wait_gpu = 0;
834 nbv = fr->nbv;
835 nb_kernel_type = fr->nbv->grp[0].kernel_type;
837 start = 0;
838 homenr = mdatoms->homenr;
840 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
842 clear_mat(vir_force);
844 cg0 = 0;
845 if (DOMAINDECOMP(cr))
847 cg1 = cr->dd->ncg_tot;
849 else
851 cg1 = top->cgs.nr;
853 if (fr->n_tpi > 0)
855 cg1--;
858 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
859 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
860 bFillGrid = (bNS && bStateChanged);
861 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
862 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
863 bDoForces = (flags & GMX_FORCE_FORCES);
864 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
865 bUseGPU = fr->nbv->bUseGPU;
866 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
868 if (bStateChanged)
870 update_forcerec(fr, box);
872 if (NEED_MUTOT(*inputrec))
874 /* Calculate total (local) dipole moment in a temporary common array.
875 * This makes it possible to sum them over nodes faster.
877 calc_mu(start, homenr,
878 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
879 mu, mu+DIM);
883 if (fr->ePBC != epbcNONE)
885 /* Compute shift vectors every step,
886 * because of pressure coupling or box deformation!
888 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
890 calc_shifts(box, fr->shift_vec);
893 if (bCalcCGCM)
895 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
896 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
898 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
900 unshift_self(graph, box, x);
904 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
905 fr->shift_vec, nbv->grp[0].nbat);
907 #ifdef GMX_MPI
908 if (!(cr->duty & DUTY_PME))
910 /* Send particle coordinates to the pme nodes.
911 * Since this is only implemented for domain decomposition
912 * and domain decomposition does not use the graph,
913 * we do not need to worry about shifting.
916 int pme_flags = 0;
918 wallcycle_start(wcycle, ewcPP_PMESENDX);
920 bBS = (inputrec->nwall == 2);
921 if (bBS)
923 copy_mat(box, boxs);
924 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
927 if (EEL_PME(fr->eeltype))
929 pme_flags |= GMX_PME_DO_COULOMB;
932 if (EVDW_PME(fr->vdwtype))
934 pme_flags |= GMX_PME_DO_LJ;
937 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
938 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
939 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
940 pme_flags, step);
942 wallcycle_stop(wcycle, ewcPP_PMESENDX);
944 #endif /* GMX_MPI */
946 /* do gridding for pair search */
947 if (bNS)
949 if (graph && bStateChanged)
951 /* Calculate intramolecular shift vectors to make molecules whole */
952 mk_mshift(fplog, graph, fr->ePBC, box, x);
955 clear_rvec(vzero);
956 box_diag[XX] = box[XX][XX];
957 box_diag[YY] = box[YY][YY];
958 box_diag[ZZ] = box[ZZ][ZZ];
960 wallcycle_start(wcycle, ewcNS);
961 if (!fr->bDomDec)
963 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
964 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
965 0, vzero, box_diag,
966 0, mdatoms->homenr, -1, fr->cginfo, x,
967 0, NULL,
968 nbv->grp[eintLocal].kernel_type,
969 nbv->grp[eintLocal].nbat);
970 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
972 else
974 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
975 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
976 fr->cginfo, x,
977 nbv->grp[eintNonlocal].kernel_type,
978 nbv->grp[eintNonlocal].nbat);
979 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
982 if (nbv->ngrp == 1 ||
983 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
985 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
986 nbv->nbs, mdatoms, fr->cginfo);
988 else
990 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
991 nbv->nbs, mdatoms, fr->cginfo);
992 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
993 nbv->nbs, mdatoms, fr->cginfo);
995 wallcycle_stop(wcycle, ewcNS);
998 /* initialize the GPU atom data and copy shift vector */
999 if (bUseGPU)
1001 if (bNS)
1003 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1004 nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
1005 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1008 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1009 nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
1010 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1013 /* do local pair search */
1014 if (bNS)
1016 wallcycle_start_nocount(wcycle, ewcNS);
1017 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1018 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
1019 &top->excls,
1020 ic->rlist,
1021 nbv->min_ci_balanced,
1022 &nbv->grp[eintLocal].nbl_lists,
1023 eintLocal,
1024 nbv->grp[eintLocal].kernel_type,
1025 nrnb);
1026 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1028 if (bUseGPU)
1030 /* initialize local pair-list on the GPU */
1031 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1032 nbv->grp[eintLocal].nbl_lists.nbl[0],
1033 eintLocal);
1035 wallcycle_stop(wcycle, ewcNS);
1037 else
1039 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1040 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1041 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
1042 nbv->grp[eintLocal].nbat);
1043 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1044 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1047 if (bUseGPU)
1049 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1050 /* launch local nonbonded F on GPU */
1051 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
1052 nrnb, wcycle);
1053 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1056 /* Communicate coordinates and sum dipole if necessary +
1057 do non-local pair search */
1058 if (DOMAINDECOMP(cr))
1060 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
1061 nbv->grp[eintLocal].kernel_type);
1063 if (bDiffKernels)
1065 /* With GPU+CPU non-bonded calculations we need to copy
1066 * the local coordinates to the non-local nbat struct
1067 * (in CPU format) as the non-local kernel call also
1068 * calculates the local - non-local interactions.
1070 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1071 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1072 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
1073 nbv->grp[eintNonlocal].nbat);
1074 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1075 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1078 if (bNS)
1080 wallcycle_start_nocount(wcycle, ewcNS);
1081 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1083 if (bDiffKernels)
1085 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1088 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1089 &top->excls,
1090 ic->rlist,
1091 nbv->min_ci_balanced,
1092 &nbv->grp[eintNonlocal].nbl_lists,
1093 eintNonlocal,
1094 nbv->grp[eintNonlocal].kernel_type,
1095 nrnb);
1097 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1099 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1101 /* initialize non-local pair-list on the GPU */
1102 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1103 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1104 eintNonlocal);
1106 wallcycle_stop(wcycle, ewcNS);
1108 else
1110 wallcycle_start(wcycle, ewcMOVEX);
1111 dd_move_x(cr->dd, box, x);
1113 /* When we don't need the total dipole we sum it in global_stat */
1114 if (bStateChanged && NEED_MUTOT(*inputrec))
1116 gmx_sumd(2*DIM, mu, cr);
1118 wallcycle_stop(wcycle, ewcMOVEX);
1120 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1121 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1122 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1123 nbv->grp[eintNonlocal].nbat);
1124 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1125 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1128 if (bUseGPU && !bDiffKernels)
1130 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1131 /* launch non-local nonbonded F on GPU */
1132 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1133 nrnb, wcycle);
1134 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1138 if (bUseGPU)
1140 /* launch D2H copy-back F */
1141 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1142 if (DOMAINDECOMP(cr) && !bDiffKernels)
1144 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1145 flags, eatNonlocal);
1147 nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1148 flags, eatLocal);
1149 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1152 if (bStateChanged && NEED_MUTOT(*inputrec))
1154 if (PAR(cr))
1156 gmx_sumd(2*DIM, mu, cr);
1159 for (i = 0; i < 2; i++)
1161 for (j = 0; j < DIM; j++)
1163 fr->mu_tot[i][j] = mu[i*DIM + j];
1167 if (fr->efep == efepNO)
1169 copy_rvec(fr->mu_tot[0], mu_tot);
1171 else
1173 for (j = 0; j < DIM; j++)
1175 mu_tot[j] =
1176 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1177 lambda[efptCOUL]*fr->mu_tot[1][j];
1181 /* Reset energies */
1182 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1183 clear_rvecs(SHIFTS, fr->fshift);
1185 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1187 wallcycle_start(wcycle, ewcPPDURINGPME);
1188 dd_force_flop_start(cr->dd, nrnb);
1191 if (inputrec->bRot)
1193 /* Enforced rotation has its own cycle counter that starts after the collective
1194 * coordinates have been communicated. It is added to ddCyclF to allow
1195 * for proper load-balancing */
1196 wallcycle_start(wcycle, ewcROT);
1197 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1198 wallcycle_stop(wcycle, ewcROT);
1201 /* Start the force cycle counter.
1202 * This counter is stopped in do_forcelow_level.
1203 * No parallel communication should occur while this counter is running,
1204 * since that will interfere with the dynamic load balancing.
1206 wallcycle_start(wcycle, ewcFORCE);
1207 if (bDoForces)
1209 /* Reset forces for which the virial is calculated separately:
1210 * PME/Ewald forces if necessary */
1211 if (fr->bF_NoVirSum)
1213 if (flags & GMX_FORCE_VIRIAL)
1215 fr->f_novirsum = fr->f_novirsum_alloc;
1216 if (fr->bDomDec)
1218 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1220 else
1222 clear_rvecs(homenr, fr->f_novirsum+start);
1225 else
1227 /* We are not calculating the pressure so we do not need
1228 * a separate array for forces that do not contribute
1229 * to the pressure.
1231 fr->f_novirsum = f;
1235 /* Clear the short- and long-range forces */
1236 clear_rvecs(fr->natoms_force_constr, f);
1237 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1239 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1242 clear_rvec(fr->vir_diag_posres);
1245 if (inputrec->ePull == epullCONSTRAINT)
1247 clear_pull_forces(inputrec->pull);
1250 /* We calculate the non-bonded forces, when done on the CPU, here.
1251 * We do this before calling do_force_lowlevel, as in there bondeds
1252 * forces are calculated before PME, which does communication.
1253 * With this order, non-bonded and bonded force calculation imbalance
1254 * can be balanced out by the domain decomposition load balancing.
1257 if (!bUseOrEmulGPU)
1259 /* Maybe we should move this into do_force_lowlevel */
1260 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1261 nrnb, wcycle);
1264 if (fr->efep != efepNO)
1266 /* Calculate the local and non-local free energy interactions here.
1267 * Happens here on the CPU both with and without GPU.
1269 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1271 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1272 fr, x, f, mdatoms,
1273 inputrec->fepvals, lambda,
1274 enerd, flags, nrnb, wcycle);
1277 if (DOMAINDECOMP(cr) &&
1278 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1280 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1281 fr, x, f, mdatoms,
1282 inputrec->fepvals, lambda,
1283 enerd, flags, nrnb, wcycle);
1287 if (!bUseOrEmulGPU || bDiffKernels)
1289 int aloc;
1291 if (DOMAINDECOMP(cr))
1293 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1294 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1295 nrnb, wcycle);
1298 if (!bUseOrEmulGPU)
1300 aloc = eintLocal;
1302 else
1304 aloc = eintNonlocal;
1307 /* Add all the non-bonded force to the normal force array.
1308 * This can be split into a local a non-local part when overlapping
1309 * communication with calculation with domain decomposition.
1311 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1312 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1313 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1314 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1315 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1316 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1317 wallcycle_start_nocount(wcycle, ewcFORCE);
1319 /* if there are multiple fshift output buffers reduce them */
1320 if ((flags & GMX_FORCE_VIRIAL) &&
1321 nbv->grp[aloc].nbl_lists.nnbl > 1)
1323 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1324 fr->fshift);
1328 /* update QMMMrec, if necessary */
1329 if (fr->bQMMM)
1331 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1334 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1336 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1337 enerd, lambda, fr);
1340 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1342 fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1345 /* Compute the bonded and non-bonded energies and optionally forces */
1346 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1347 cr, nrnb, wcycle, mdatoms,
1348 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1349 &(top->atomtypes), bBornRadii, box,
1350 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1351 flags, &cycles_pme);
1353 if (bSepLRF)
1355 if (do_per_step(step, inputrec->nstcalclr))
1357 /* Add the long range forces to the short range forces */
1358 for (i = 0; i < fr->natoms_force_constr; i++)
1360 rvec_add(fr->f_twin[i], f[i], f[i]);
1365 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1367 if (ed)
1369 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1372 if (bUseOrEmulGPU && !bDiffKernels)
1374 /* wait for non-local forces (or calculate in emulation mode) */
1375 if (DOMAINDECOMP(cr))
1377 if (bUseGPU)
1379 float cycles_tmp;
1381 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1382 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1383 nbv->grp[eintNonlocal].nbat,
1384 flags, eatNonlocal,
1385 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1386 fr->fshift);
1387 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1388 cycles_wait_gpu += cycles_tmp;
1389 cycles_force += cycles_tmp;
1391 else
1393 wallcycle_start_nocount(wcycle, ewcFORCE);
1394 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1395 nrnb, wcycle);
1396 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1398 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1399 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1400 /* skip the reduction if there was no non-local work to do */
1401 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1403 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1404 nbv->grp[eintNonlocal].nbat, f);
1406 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1407 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1411 if (bDoForces && DOMAINDECOMP(cr))
1413 /* Communicate the forces */
1414 wallcycle_start(wcycle, ewcMOVEF);
1415 dd_move_f(cr->dd, f, fr->fshift);
1416 /* Do we need to communicate the separate force array
1417 * for terms that do not contribute to the single sum virial?
1418 * Position restraints and electric fields do not introduce
1419 * inter-cg forces, only full electrostatics methods do.
1420 * When we do not calculate the virial, fr->f_novirsum = f,
1421 * so we have already communicated these forces.
1423 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1424 (flags & GMX_FORCE_VIRIAL))
1426 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1428 if (bSepLRF)
1430 /* We should not update the shift forces here,
1431 * since f_twin is already included in f.
1433 dd_move_f(cr->dd, fr->f_twin, NULL);
1435 wallcycle_stop(wcycle, ewcMOVEF);
1438 if (bUseOrEmulGPU)
1440 /* wait for local forces (or calculate in emulation mode) */
1441 if (bUseGPU)
1443 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1444 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1445 nbv->grp[eintLocal].nbat,
1446 flags, eatLocal,
1447 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1448 fr->fshift);
1449 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1451 /* now clear the GPU outputs while we finish the step on the CPU */
1453 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1454 nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1455 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1457 else
1459 wallcycle_start_nocount(wcycle, ewcFORCE);
1460 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1461 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1462 nrnb, wcycle);
1463 wallcycle_stop(wcycle, ewcFORCE);
1465 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1466 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1467 if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1469 /* skip the reduction if there was no non-local work to do */
1470 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1471 nbv->grp[eintLocal].nbat, f);
1473 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1474 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1477 if (DOMAINDECOMP(cr))
1479 dd_force_flop_stop(cr->dd, nrnb);
1480 if (wcycle)
1482 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1483 if (bUseGPU)
1485 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1490 if (bDoForces)
1492 if (IR_ELEC_FIELD(*inputrec))
1494 /* Compute forces due to electric field */
1495 calc_f_el(MASTER(cr) ? field : NULL,
1496 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1497 inputrec->ex, inputrec->et, t);
1500 /* If we have NoVirSum forces, but we do not calculate the virial,
1501 * we sum fr->f_novirum=f later.
1503 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1505 wallcycle_start(wcycle, ewcVSITESPREAD);
1506 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1507 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1508 wallcycle_stop(wcycle, ewcVSITESPREAD);
1510 if (bSepLRF)
1512 wallcycle_start(wcycle, ewcVSITESPREAD);
1513 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1514 nrnb,
1515 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1516 wallcycle_stop(wcycle, ewcVSITESPREAD);
1520 if (flags & GMX_FORCE_VIRIAL)
1522 /* Calculation of the virial must be done after vsites! */
1523 calc_virial(0, mdatoms->homenr, x, f,
1524 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1528 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1530 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1531 f, vir_force, mdatoms, enerd, lambda, t);
1534 /* Add the forces from enforced rotation potentials (if any) */
1535 if (inputrec->bRot)
1537 wallcycle_start(wcycle, ewcROTadd);
1538 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1539 wallcycle_stop(wcycle, ewcROTadd);
1542 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1543 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1545 if (PAR(cr) && !(cr->duty & DUTY_PME))
1547 /* In case of node-splitting, the PP nodes receive the long-range
1548 * forces, virial and energy from the PME nodes here.
1550 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1553 if (bDoForces)
1555 post_process_forces(cr, step, nrnb, wcycle,
1556 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1557 flags);
1560 /* Sum the potential energy terms from group contributions */
1561 sum_epot(&(enerd->grpp), enerd->term);
1564 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1565 t_inputrec *inputrec,
1566 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1567 gmx_localtop_t *top,
1568 gmx_groups_t *groups,
1569 matrix box, rvec x[], history_t *hist,
1570 rvec f[],
1571 tensor vir_force,
1572 t_mdatoms *mdatoms,
1573 gmx_enerdata_t *enerd, t_fcdata *fcd,
1574 real *lambda, t_graph *graph,
1575 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1576 double t, FILE *field, gmx_edsam_t ed,
1577 gmx_bool bBornRadii,
1578 int flags)
1580 int cg0, cg1, i, j;
1581 int start, homenr;
1582 double mu[2*DIM];
1583 gmx_bool bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1584 gmx_bool bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1585 gmx_bool bDoAdressWF;
1586 matrix boxs;
1587 rvec vzero, box_diag;
1588 real e, v, dvdlambda[efptNR];
1589 t_pbc pbc;
1590 float cycles_pme, cycles_force;
1592 start = 0;
1593 homenr = mdatoms->homenr;
1595 bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1597 clear_mat(vir_force);
1599 cg0 = 0;
1600 if (DOMAINDECOMP(cr))
1602 cg1 = cr->dd->ncg_tot;
1604 else
1606 cg1 = top->cgs.nr;
1608 if (fr->n_tpi > 0)
1610 cg1--;
1613 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1614 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1615 /* Should we update the long-range neighborlists at this step? */
1616 bDoLongRangeNS = fr->bTwinRange && bNS;
1617 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1618 bFillGrid = (bNS && bStateChanged);
1619 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1620 bDoForces = (flags & GMX_FORCE_FORCES);
1621 bDoPotential = (flags & GMX_FORCE_ENERGY);
1622 bSepLRF = ((inputrec->nstcalclr > 1) && bDoForces &&
1623 (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1625 /* should probably move this to the forcerec since it doesn't change */
1626 bDoAdressWF = ((fr->adress_type != eAdressOff));
1628 if (bStateChanged)
1630 update_forcerec(fr, box);
1632 if (NEED_MUTOT(*inputrec))
1634 /* Calculate total (local) dipole moment in a temporary common array.
1635 * This makes it possible to sum them over nodes faster.
1637 calc_mu(start, homenr,
1638 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1639 mu, mu+DIM);
1643 if (fr->ePBC != epbcNONE)
1645 /* Compute shift vectors every step,
1646 * because of pressure coupling or box deformation!
1648 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1650 calc_shifts(box, fr->shift_vec);
1653 if (bCalcCGCM)
1655 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1656 &(top->cgs), x, fr->cg_cm);
1657 inc_nrnb(nrnb, eNR_CGCM, homenr);
1658 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1660 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1662 unshift_self(graph, box, x);
1665 else if (bCalcCGCM)
1667 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1668 inc_nrnb(nrnb, eNR_CGCM, homenr);
1671 if (bCalcCGCM && gmx_debug_at)
1673 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1676 #ifdef GMX_MPI
1677 if (!(cr->duty & DUTY_PME))
1679 /* Send particle coordinates to the pme nodes.
1680 * Since this is only implemented for domain decomposition
1681 * and domain decomposition does not use the graph,
1682 * we do not need to worry about shifting.
1685 int pme_flags = 0;
1687 wallcycle_start(wcycle, ewcPP_PMESENDX);
1689 bBS = (inputrec->nwall == 2);
1690 if (bBS)
1692 copy_mat(box, boxs);
1693 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1696 if (EEL_PME(fr->eeltype))
1698 pme_flags |= GMX_PME_DO_COULOMB;
1701 if (EVDW_PME(fr->vdwtype))
1703 pme_flags |= GMX_PME_DO_LJ;
1706 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1707 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
1708 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1709 pme_flags, step);
1711 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1713 #endif /* GMX_MPI */
1715 /* Communicate coordinates and sum dipole if necessary */
1716 if (DOMAINDECOMP(cr))
1718 wallcycle_start(wcycle, ewcMOVEX);
1719 dd_move_x(cr->dd, box, x);
1720 wallcycle_stop(wcycle, ewcMOVEX);
1723 /* update adress weight beforehand */
1724 if (bStateChanged && bDoAdressWF)
1726 /* need pbc for adress weight calculation with pbc_dx */
1727 set_pbc(&pbc, inputrec->ePBC, box);
1728 if (fr->adress_site == eAdressSITEcog)
1730 update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1731 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1733 else if (fr->adress_site == eAdressSITEcom)
1735 update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1736 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1738 else if (fr->adress_site == eAdressSITEatomatom)
1740 update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1741 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1743 else
1745 update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1746 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1750 if (NEED_MUTOT(*inputrec))
1753 if (bStateChanged)
1755 if (PAR(cr))
1757 gmx_sumd(2*DIM, mu, cr);
1759 for (i = 0; i < 2; i++)
1761 for (j = 0; j < DIM; j++)
1763 fr->mu_tot[i][j] = mu[i*DIM + j];
1767 if (fr->efep == efepNO)
1769 copy_rvec(fr->mu_tot[0], mu_tot);
1771 else
1773 for (j = 0; j < DIM; j++)
1775 mu_tot[j] =
1776 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1781 /* Reset energies */
1782 reset_enerdata(fr, bNS, enerd, MASTER(cr));
1783 clear_rvecs(SHIFTS, fr->fshift);
1785 if (bNS)
1787 wallcycle_start(wcycle, ewcNS);
1789 if (graph && bStateChanged)
1791 /* Calculate intramolecular shift vectors to make molecules whole */
1792 mk_mshift(fplog, graph, fr->ePBC, box, x);
1795 /* Do the actual neighbour searching */
1796 ns(fplog, fr, box,
1797 groups, top, mdatoms,
1798 cr, nrnb, bFillGrid,
1799 bDoLongRangeNS);
1801 wallcycle_stop(wcycle, ewcNS);
1804 if (inputrec->implicit_solvent && bNS)
1806 make_gb_nblist(cr, inputrec->gb_algorithm,
1807 x, box, fr, &top->idef, graph, fr->born);
1810 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1812 wallcycle_start(wcycle, ewcPPDURINGPME);
1813 dd_force_flop_start(cr->dd, nrnb);
1816 if (inputrec->bRot)
1818 /* Enforced rotation has its own cycle counter that starts after the collective
1819 * coordinates have been communicated. It is added to ddCyclF to allow
1820 * for proper load-balancing */
1821 wallcycle_start(wcycle, ewcROT);
1822 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1823 wallcycle_stop(wcycle, ewcROT);
1826 /* Start the force cycle counter.
1827 * This counter is stopped in do_forcelow_level.
1828 * No parallel communication should occur while this counter is running,
1829 * since that will interfere with the dynamic load balancing.
1831 wallcycle_start(wcycle, ewcFORCE);
1833 if (bDoForces)
1835 /* Reset forces for which the virial is calculated separately:
1836 * PME/Ewald forces if necessary */
1837 if (fr->bF_NoVirSum)
1839 if (flags & GMX_FORCE_VIRIAL)
1841 fr->f_novirsum = fr->f_novirsum_alloc;
1842 if (fr->bDomDec)
1844 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1846 else
1848 clear_rvecs(homenr, fr->f_novirsum+start);
1851 else
1853 /* We are not calculating the pressure so we do not need
1854 * a separate array for forces that do not contribute
1855 * to the pressure.
1857 fr->f_novirsum = f;
1861 /* Clear the short- and long-range forces */
1862 clear_rvecs(fr->natoms_force_constr, f);
1863 if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1865 clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1868 clear_rvec(fr->vir_diag_posres);
1870 if (inputrec->ePull == epullCONSTRAINT)
1872 clear_pull_forces(inputrec->pull);
1875 /* update QMMMrec, if necessary */
1876 if (fr->bQMMM)
1878 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1881 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1883 posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1884 enerd, lambda, fr);
1887 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1889 fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1892 /* Compute the bonded and non-bonded energies and optionally forces */
1893 do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1894 cr, nrnb, wcycle, mdatoms,
1895 x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1896 &(top->atomtypes), bBornRadii, box,
1897 inputrec->fepvals, lambda,
1898 graph, &(top->excls), fr->mu_tot,
1899 flags,
1900 &cycles_pme);
1902 if (bSepLRF)
1904 if (do_per_step(step, inputrec->nstcalclr))
1906 /* Add the long range forces to the short range forces */
1907 for (i = 0; i < fr->natoms_force_constr; i++)
1909 rvec_add(fr->f_twin[i], f[i], f[i]);
1914 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1916 if (ed)
1918 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1921 if (DOMAINDECOMP(cr))
1923 dd_force_flop_stop(cr->dd, nrnb);
1924 if (wcycle)
1926 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1930 if (bDoForces)
1932 if (IR_ELEC_FIELD(*inputrec))
1934 /* Compute forces due to electric field */
1935 calc_f_el(MASTER(cr) ? field : NULL,
1936 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1937 inputrec->ex, inputrec->et, t);
1940 if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1942 /* Compute thermodynamic force in hybrid AdResS region */
1943 adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1944 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1947 /* Communicate the forces */
1948 if (DOMAINDECOMP(cr))
1950 wallcycle_start(wcycle, ewcMOVEF);
1951 dd_move_f(cr->dd, f, fr->fshift);
1952 /* Do we need to communicate the separate force array
1953 * for terms that do not contribute to the single sum virial?
1954 * Position restraints and electric fields do not introduce
1955 * inter-cg forces, only full electrostatics methods do.
1956 * When we do not calculate the virial, fr->f_novirsum = f,
1957 * so we have already communicated these forces.
1959 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1960 (flags & GMX_FORCE_VIRIAL))
1962 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1964 if (bSepLRF)
1966 /* We should not update the shift forces here,
1967 * since f_twin is already included in f.
1969 dd_move_f(cr->dd, fr->f_twin, NULL);
1971 wallcycle_stop(wcycle, ewcMOVEF);
1974 /* If we have NoVirSum forces, but we do not calculate the virial,
1975 * we sum fr->f_novirum=f later.
1977 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1979 wallcycle_start(wcycle, ewcVSITESPREAD);
1980 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1981 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1982 wallcycle_stop(wcycle, ewcVSITESPREAD);
1984 if (bSepLRF)
1986 wallcycle_start(wcycle, ewcVSITESPREAD);
1987 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1988 nrnb,
1989 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1990 wallcycle_stop(wcycle, ewcVSITESPREAD);
1994 if (flags & GMX_FORCE_VIRIAL)
1996 /* Calculation of the virial must be done after vsites! */
1997 calc_virial(0, mdatoms->homenr, x, f,
1998 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
2002 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
2004 pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
2005 f, vir_force, mdatoms, enerd, lambda, t);
2008 /* Add the forces from enforced rotation potentials (if any) */
2009 if (inputrec->bRot)
2011 wallcycle_start(wcycle, ewcROTadd);
2012 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
2013 wallcycle_stop(wcycle, ewcROTadd);
2016 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
2017 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
2019 if (PAR(cr) && !(cr->duty & DUTY_PME))
2021 /* In case of node-splitting, the PP nodes receive the long-range
2022 * forces, virial and energy from the PME nodes here.
2024 pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
2027 if (bDoForces)
2029 post_process_forces(cr, step, nrnb, wcycle,
2030 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
2031 flags);
2034 /* Sum the potential energy terms from group contributions */
2035 sum_epot(&(enerd->grpp), enerd->term);
2038 void do_force(FILE *fplog, t_commrec *cr,
2039 t_inputrec *inputrec,
2040 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
2041 gmx_localtop_t *top,
2042 gmx_groups_t *groups,
2043 matrix box, rvec x[], history_t *hist,
2044 rvec f[],
2045 tensor vir_force,
2046 t_mdatoms *mdatoms,
2047 gmx_enerdata_t *enerd, t_fcdata *fcd,
2048 real *lambda, t_graph *graph,
2049 t_forcerec *fr,
2050 gmx_vsite_t *vsite, rvec mu_tot,
2051 double t, FILE *field, gmx_edsam_t ed,
2052 gmx_bool bBornRadii,
2053 int flags)
2055 /* modify force flag if not doing nonbonded */
2056 if (!fr->bNonbonded)
2058 flags &= ~GMX_FORCE_NONBONDED;
2061 switch (inputrec->cutoff_scheme)
2063 case ecutsVERLET:
2064 do_force_cutsVERLET(fplog, cr, inputrec,
2065 step, nrnb, wcycle,
2066 top,
2067 groups,
2068 box, x, hist,
2069 f, vir_force,
2070 mdatoms,
2071 enerd, fcd,
2072 lambda, graph,
2073 fr, fr->ic,
2074 vsite, mu_tot,
2075 t, field, ed,
2076 bBornRadii,
2077 flags);
2078 break;
2079 case ecutsGROUP:
2080 do_force_cutsGROUP(fplog, cr, inputrec,
2081 step, nrnb, wcycle,
2082 top,
2083 groups,
2084 box, x, hist,
2085 f, vir_force,
2086 mdatoms,
2087 enerd, fcd,
2088 lambda, graph,
2089 fr, vsite, mu_tot,
2090 t, field, ed,
2091 bBornRadii,
2092 flags);
2093 break;
2094 default:
2095 gmx_incons("Invalid cut-off scheme passed!");
2100 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2101 t_inputrec *ir, t_mdatoms *md,
2102 t_state *state, t_commrec *cr, t_nrnb *nrnb,
2103 t_forcerec *fr, gmx_localtop_t *top)
2105 int i, m, start, end;
2106 gmx_int64_t step;
2107 real dt = ir->delta_t;
2108 real dvdl_dum;
2109 rvec *savex;
2111 snew(savex, state->natoms);
2113 start = 0;
2114 end = md->homenr;
2116 if (debug)
2118 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2119 start, md->homenr, end);
2121 /* Do a first constrain to reset particles... */
2122 step = ir->init_step;
2123 if (fplog)
2125 char buf[STEPSTRSIZE];
2126 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2127 gmx_step_str(step, buf));
2129 dvdl_dum = 0;
2131 /* constrain the current position */
2132 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2133 ir, NULL, cr, step, 0, md,
2134 state->x, state->x, NULL,
2135 fr->bMolPBC, state->box,
2136 state->lambda[efptBONDED], &dvdl_dum,
2137 NULL, NULL, nrnb, econqCoord,
2138 ir->epc == epcMTTK, state->veta, state->veta);
2139 if (EI_VV(ir->eI))
2141 /* constrain the inital velocity, and save it */
2142 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2143 /* might not yet treat veta correctly */
2144 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2145 ir, NULL, cr, step, 0, md,
2146 state->x, state->v, state->v,
2147 fr->bMolPBC, state->box,
2148 state->lambda[efptBONDED], &dvdl_dum,
2149 NULL, NULL, nrnb, econqVeloc,
2150 ir->epc == epcMTTK, state->veta, state->veta);
2152 /* constrain the inital velocities at t-dt/2 */
2153 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2155 for (i = start; (i < end); i++)
2157 for (m = 0; (m < DIM); m++)
2159 /* Reverse the velocity */
2160 state->v[i][m] = -state->v[i][m];
2161 /* Store the position at t-dt in buf */
2162 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2165 /* Shake the positions at t=-dt with the positions at t=0
2166 * as reference coordinates.
2168 if (fplog)
2170 char buf[STEPSTRSIZE];
2171 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2172 gmx_step_str(step, buf));
2174 dvdl_dum = 0;
2175 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2176 ir, NULL, cr, step, -1, md,
2177 state->x, savex, NULL,
2178 fr->bMolPBC, state->box,
2179 state->lambda[efptBONDED], &dvdl_dum,
2180 state->v, NULL, nrnb, econqCoord,
2181 ir->epc == epcMTTK, state->veta, state->veta);
2183 for (i = start; i < end; i++)
2185 for (m = 0; m < DIM; m++)
2187 /* Re-reverse the velocities */
2188 state->v[i][m] = -state->v[i][m];
2192 sfree(savex);
2196 static void
2197 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2198 double *enerout, double *virout)
2200 double enersum, virsum;
2201 double invscale, invscale2, invscale3;
2202 double r, ea, eb, ec, pa, pb, pc, pd;
2203 double y0, f, g, h;
2204 int ri, offset, tabfactor;
2206 invscale = 1.0/scale;
2207 invscale2 = invscale*invscale;
2208 invscale3 = invscale*invscale2;
2210 /* Following summation derived from cubic spline definition,
2211 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2212 * the cubic spline. We first calculate the negative of the
2213 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2214 * add the more standard, abrupt cutoff correction to that result,
2215 * yielding the long-range correction for a switched function. We
2216 * perform both the pressure and energy loops at the same time for
2217 * simplicity, as the computational cost is low. */
2219 if (offstart == 0)
2221 /* Since the dispersion table has been scaled down a factor
2222 * 6.0 and the repulsion a factor 12.0 to compensate for the
2223 * c6/c12 parameters inside nbfp[] being scaled up (to save
2224 * flops in kernels), we need to correct for this.
2226 tabfactor = 6.0;
2228 else
2230 tabfactor = 12.0;
2233 enersum = 0.0;
2234 virsum = 0.0;
2235 for (ri = rstart; ri < rend; ++ri)
2237 r = ri*invscale;
2238 ea = invscale3;
2239 eb = 2.0*invscale2*r;
2240 ec = invscale*r*r;
2242 pa = invscale3;
2243 pb = 3.0*invscale2*r;
2244 pc = 3.0*invscale*r*r;
2245 pd = r*r*r;
2247 /* this "8" is from the packing in the vdwtab array - perhaps
2248 should be defined? */
2250 offset = 8*ri + offstart;
2251 y0 = vdwtab[offset];
2252 f = vdwtab[offset+1];
2253 g = vdwtab[offset+2];
2254 h = vdwtab[offset+3];
2256 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2257 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2259 *enerout = 4.0*M_PI*enersum*tabfactor;
2260 *virout = 4.0*M_PI*virsum*tabfactor;
2263 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2265 double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2266 double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2267 double invscale, invscale2, invscale3;
2268 int ri0, ri1, ri, i, offstart, offset;
2269 real scale, *vdwtab, tabfactor, tmp;
2271 fr->enershiftsix = 0;
2272 fr->enershifttwelve = 0;
2273 fr->enerdiffsix = 0;
2274 fr->enerdifftwelve = 0;
2275 fr->virdiffsix = 0;
2276 fr->virdifftwelve = 0;
2278 if (eDispCorr != edispcNO)
2280 for (i = 0; i < 2; i++)
2282 eners[i] = 0;
2283 virs[i] = 0;
2285 if (fr->vdwtype == evdwSWITCH || fr->vdwtype == evdwSHIFT ||
2286 fr->vdw_modifier == eintmodPOTSWITCH ||
2287 fr->vdw_modifier == eintmodFORCESWITCH)
2289 if (fr->rvdw_switch == 0)
2291 gmx_fatal(FARGS,
2292 "With dispersion correction rvdw-switch can not be zero "
2293 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2296 scale = fr->nblists[0].table_elec_vdw.scale;
2297 vdwtab = fr->nblists[0].table_vdw.data;
2299 /* Round the cut-offs to exact table values for precision */
2300 ri0 = floor(fr->rvdw_switch*scale);
2301 ri1 = ceil(fr->rvdw*scale);
2302 r0 = ri0/scale;
2303 r1 = ri1/scale;
2304 rc3 = r0*r0*r0;
2305 rc9 = rc3*rc3*rc3;
2307 if (fr->vdwtype == evdwSHIFT ||
2308 fr->vdw_modifier == eintmodFORCESWITCH)
2310 /* Determine the constant energy shift below rvdw_switch.
2311 * Table has a scale factor since we have scaled it down to compensate
2312 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2314 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2315 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2317 /* Add the constant part from 0 to rvdw_switch.
2318 * This integration from 0 to rvdw_switch overcounts the number
2319 * of interactions by 1, as it also counts the self interaction.
2320 * We will correct for this later.
2322 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2323 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2324 for (i = 0; i < 2; i++)
2326 enersum = 0;
2327 virsum = 0;
2328 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2329 eners[i] -= enersum;
2330 virs[i] -= virsum;
2333 /* now add the correction for rvdw_switch to infinity */
2334 eners[0] += -4.0*M_PI/(3.0*rc3);
2335 eners[1] += 4.0*M_PI/(9.0*rc9);
2336 virs[0] += 8.0*M_PI/rc3;
2337 virs[1] += -16.0*M_PI/(3.0*rc9);
2339 else if (fr->vdwtype == evdwCUT ||
2340 EVDW_PME(fr->vdwtype) ||
2341 fr->vdwtype == evdwUSER)
2343 if (fr->vdwtype == evdwUSER && fplog)
2345 fprintf(fplog,
2346 "WARNING: using dispersion correction with user tables\n");
2349 /* Note that with LJ-PME, the dispersion correction is multiplied
2350 * by the difference between the actual C6 and the value of C6
2351 * that would produce the combination rule.
2352 * This means the normal energy and virial difference formulas
2353 * can be used here.
2356 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2357 rc9 = rc3*rc3*rc3;
2358 /* Contribution beyond the cut-off */
2359 eners[0] += -4.0*M_PI/(3.0*rc3);
2360 eners[1] += 4.0*M_PI/(9.0*rc9);
2361 if (fr->vdw_modifier == eintmodPOTSHIFT)
2363 /* Contribution within the cut-off */
2364 eners[0] += -4.0*M_PI/(3.0*rc3);
2365 eners[1] += 4.0*M_PI/(3.0*rc9);
2367 /* Contribution beyond the cut-off */
2368 virs[0] += 8.0*M_PI/rc3;
2369 virs[1] += -16.0*M_PI/(3.0*rc9);
2371 else
2373 gmx_fatal(FARGS,
2374 "Dispersion correction is not implemented for vdw-type = %s",
2375 evdw_names[fr->vdwtype]);
2378 /* TODO: remove this code once we have group LJ-PME kernels
2379 * that calculate the exact, full LJ param C6/r^6 within the cut-off,
2380 * as the current nbnxn kernels do.
2382 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2384 /* Calculate self-interaction coefficient (assuming that
2385 * the reciprocal-space contribution is constant in the
2386 * region that contributes to the self-interaction).
2388 fr->enershiftsix = pow(fr->ewaldcoeff_lj, 6) / 6.0;
2390 eners[0] += -pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3)/3.0;
2391 virs[0] += pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3);
2394 fr->enerdiffsix = eners[0];
2395 fr->enerdifftwelve = eners[1];
2396 /* The 0.5 is due to the Gromacs definition of the virial */
2397 fr->virdiffsix = 0.5*virs[0];
2398 fr->virdifftwelve = 0.5*virs[1];
2402 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2403 gmx_int64_t step, int natoms,
2404 matrix box, real lambda, tensor pres, tensor virial,
2405 real *prescorr, real *enercorr, real *dvdlcorr)
2407 gmx_bool bCorrAll, bCorrPres;
2408 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2409 int m;
2411 *prescorr = 0;
2412 *enercorr = 0;
2413 *dvdlcorr = 0;
2415 clear_mat(virial);
2416 clear_mat(pres);
2418 if (ir->eDispCorr != edispcNO)
2420 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2421 ir->eDispCorr == edispcAllEnerPres);
2422 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2423 ir->eDispCorr == edispcAllEnerPres);
2425 invvol = 1/det(box);
2426 if (fr->n_tpi)
2428 /* Only correct for the interactions with the inserted molecule */
2429 dens = (natoms - fr->n_tpi)*invvol;
2430 ninter = fr->n_tpi;
2432 else
2434 dens = natoms*invvol;
2435 ninter = 0.5*natoms;
2438 if (ir->efep == efepNO)
2440 avcsix = fr->avcsix[0];
2441 avctwelve = fr->avctwelve[0];
2443 else
2445 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2446 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2449 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2450 *enercorr += avcsix*enerdiff;
2451 dvdlambda = 0.0;
2452 if (ir->efep != efepNO)
2454 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2456 if (bCorrAll)
2458 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2459 *enercorr += avctwelve*enerdiff;
2460 if (fr->efep != efepNO)
2462 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2466 if (bCorrPres)
2468 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2469 if (ir->eDispCorr == edispcAllEnerPres)
2471 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2473 /* The factor 2 is because of the Gromacs virial definition */
2474 spres = -2.0*invvol*svir*PRESFAC;
2476 for (m = 0; m < DIM; m++)
2478 virial[m][m] += svir;
2479 pres[m][m] += spres;
2481 *prescorr += spres;
2484 /* Can't currently control when it prints, for now, just print when degugging */
2485 if (debug)
2487 if (bCorrAll)
2489 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2490 avcsix, avctwelve);
2492 if (bCorrPres)
2494 fprintf(debug,
2495 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2496 *enercorr, spres, svir);
2498 else
2500 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2504 if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2506 gmx_print_sepdvdl(fplog, "Dispersion correction", *enercorr, dvdlambda);
2508 if (fr->efep != efepNO)
2510 *dvdlcorr += dvdlambda;
2515 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2516 t_graph *graph, rvec x[])
2518 if (fplog)
2520 fprintf(fplog, "Removing pbc first time\n");
2522 calc_shifts(box, fr->shift_vec);
2523 if (graph)
2525 mk_mshift(fplog, graph, fr->ePBC, box, x);
2526 if (gmx_debug_at)
2528 p_graph(debug, "do_pbc_first 1", graph);
2530 shift_self(graph, box, x);
2531 /* By doing an extra mk_mshift the molecules that are broken
2532 * because they were e.g. imported from another software
2533 * will be made whole again. Such are the healing powers
2534 * of GROMACS.
2536 mk_mshift(fplog, graph, fr->ePBC, box, x);
2537 if (gmx_debug_at)
2539 p_graph(debug, "do_pbc_first 2", graph);
2542 if (fplog)
2544 fprintf(fplog, "Done rmpbc\n");
2548 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2549 gmx_mtop_t *mtop, rvec x[],
2550 gmx_bool bFirst)
2552 t_graph *graph;
2553 int mb, as, mol;
2554 gmx_molblock_t *molb;
2556 if (bFirst && fplog)
2558 fprintf(fplog, "Removing pbc first time\n");
2561 snew(graph, 1);
2562 as = 0;
2563 for (mb = 0; mb < mtop->nmolblock; mb++)
2565 molb = &mtop->molblock[mb];
2566 if (molb->natoms_mol == 1 ||
2567 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2569 /* Just one atom or charge group in the molecule, no PBC required */
2570 as += molb->nmol*molb->natoms_mol;
2572 else
2574 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2575 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2576 0, molb->natoms_mol, FALSE, FALSE, graph);
2578 for (mol = 0; mol < molb->nmol; mol++)
2580 mk_mshift(fplog, graph, ePBC, box, x+as);
2582 shift_self(graph, box, x+as);
2583 /* The molecule is whole now.
2584 * We don't need the second mk_mshift call as in do_pbc_first,
2585 * since we no longer need this graph.
2588 as += molb->natoms_mol;
2590 done_graph(graph);
2593 sfree(graph);
2596 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2597 gmx_mtop_t *mtop, rvec x[])
2599 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2602 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2603 gmx_mtop_t *mtop, rvec x[])
2605 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2608 void finish_run(FILE *fplog, t_commrec *cr,
2609 t_inputrec *inputrec,
2610 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2611 gmx_walltime_accounting_t walltime_accounting,
2612 wallclock_gpu_t *gputimes,
2613 gmx_bool bWriteStat)
2615 int i, j;
2616 t_nrnb *nrnb_tot = NULL;
2617 real delta_t;
2618 double nbfs, mflop;
2619 double elapsed_time,
2620 elapsed_time_over_all_ranks,
2621 elapsed_time_over_all_threads,
2622 elapsed_time_over_all_threads_over_all_ranks;
2623 wallcycle_sum(cr, wcycle);
2625 if (cr->nnodes > 1)
2627 snew(nrnb_tot, 1);
2628 #ifdef GMX_MPI
2629 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2630 cr->mpi_comm_mysim);
2631 #endif
2633 else
2635 nrnb_tot = nrnb;
2638 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2639 elapsed_time_over_all_ranks = elapsed_time;
2640 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2641 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2642 #ifdef GMX_MPI
2643 if (cr->nnodes > 1)
2645 /* reduce elapsed_time over all MPI ranks in the current simulation */
2646 MPI_Allreduce(&elapsed_time,
2647 &elapsed_time_over_all_ranks,
2648 1, MPI_DOUBLE, MPI_SUM,
2649 cr->mpi_comm_mysim);
2650 elapsed_time_over_all_ranks /= cr->nnodes;
2651 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2652 * current simulation. */
2653 MPI_Allreduce(&elapsed_time_over_all_threads,
2654 &elapsed_time_over_all_threads_over_all_ranks,
2655 1, MPI_DOUBLE, MPI_SUM,
2656 cr->mpi_comm_mysim);
2658 #endif
2660 if (SIMMASTER(cr))
2662 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2664 if (cr->nnodes > 1)
2666 sfree(nrnb_tot);
2669 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2671 print_dd_statistics(cr, inputrec, fplog);
2674 if (SIMMASTER(cr))
2676 wallcycle_print(fplog, cr->nnodes, cr->npmenodes,
2677 elapsed_time_over_all_ranks,
2678 wcycle, gputimes);
2680 if (EI_DYNAMICS(inputrec->eI))
2682 delta_t = inputrec->delta_t;
2684 else
2686 delta_t = 0;
2689 if (fplog)
2691 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2692 elapsed_time_over_all_ranks,
2693 walltime_accounting_get_nsteps_done(walltime_accounting),
2694 delta_t, nbfs, mflop);
2696 if (bWriteStat)
2698 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2699 elapsed_time_over_all_ranks,
2700 walltime_accounting_get_nsteps_done(walltime_accounting),
2701 delta_t, nbfs, mflop);
2706 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2708 /* this function works, but could probably use a logic rewrite to keep all the different
2709 types of efep straight. */
2711 int i;
2712 t_lambda *fep = ir->fepvals;
2714 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2716 for (i = 0; i < efptNR; i++)
2718 lambda[i] = 0.0;
2719 if (lam0)
2721 lam0[i] = 0.0;
2724 return;
2726 else
2728 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2729 if checkpoint is set -- a kludge is in for now
2730 to prevent this.*/
2731 for (i = 0; i < efptNR; i++)
2733 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2734 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2736 lambda[i] = fep->init_lambda;
2737 if (lam0)
2739 lam0[i] = lambda[i];
2742 else
2744 lambda[i] = fep->all_lambda[i][*fep_state];
2745 if (lam0)
2747 lam0[i] = lambda[i];
2751 if (ir->bSimTemp)
2753 /* need to rescale control temperatures to match current state */
2754 for (i = 0; i < ir->opts.ngtc; i++)
2756 if (ir->opts.ref_t[i] > 0)
2758 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2764 /* Send to the log the information on the current lambdas */
2765 if (fplog != NULL)
2767 fprintf(fplog, "Initial vector of lambda components:[ ");
2768 for (i = 0; i < efptNR; i++)
2770 fprintf(fplog, "%10.4f ", lambda[i]);
2772 fprintf(fplog, "]\n");
2774 return;
2778 void init_md(FILE *fplog,
2779 t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2780 double *t, double *t0,
2781 real *lambda, int *fep_state, double *lam0,
2782 t_nrnb *nrnb, gmx_mtop_t *mtop,
2783 gmx_update_t *upd,
2784 int nfile, const t_filenm fnm[],
2785 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2786 tensor force_vir, tensor shake_vir, rvec mu_tot,
2787 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags)
2789 int i, j, n;
2790 real tmpt, mod;
2792 /* Initial values */
2793 *t = *t0 = ir->init_t;
2795 *bSimAnn = FALSE;
2796 for (i = 0; i < ir->opts.ngtc; i++)
2798 /* set bSimAnn if any group is being annealed */
2799 if (ir->opts.annealing[i] != eannNO)
2801 *bSimAnn = TRUE;
2804 if (*bSimAnn)
2806 update_annealing_target_temp(&(ir->opts), ir->init_t);
2809 /* Initialize lambda variables */
2810 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2812 if (upd)
2814 *upd = init_update(ir);
2818 if (vcm != NULL)
2820 *vcm = init_vcm(fplog, &mtop->groups, ir);
2823 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2825 if (ir->etc == etcBERENDSEN)
2827 please_cite(fplog, "Berendsen84a");
2829 if (ir->etc == etcVRESCALE)
2831 please_cite(fplog, "Bussi2007a");
2835 init_nrnb(nrnb);
2837 if (nfile != -1)
2839 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv);
2841 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2842 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2845 if (ir->bAdress)
2847 please_cite(fplog, "Fritsch12");
2848 please_cite(fplog, "Junghans10");
2850 /* Initiate variables */
2851 clear_mat(force_vir);
2852 clear_mat(shake_vir);
2853 clear_rvec(mu_tot);
2855 debug_gmx();