Add one element to state rvec arrays
[gromacs.git] / src / gromacs / mdlib / sim_util.cpp
blob5b1305bac459a63fb8fe11a608d09637797f4c96
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 #include "gmxpre.h"
39 #include "sim_util.h"
41 #include "config.h"
43 #include <assert.h>
44 #include <math.h>
45 #include <stdio.h>
46 #include <string.h>
48 #include <array>
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/essentialdynamics/edsam.h"
53 #include "gromacs/ewald/pme.h"
54 #include "gromacs/gmxlib/chargegroup.h"
55 #include "gromacs/gmxlib/network.h"
56 #include "gromacs/gmxlib/nrnb.h"
57 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
58 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
59 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
60 #include "gromacs/imd/imd.h"
61 #include "gromacs/listed-forces/bonded.h"
62 #include "gromacs/listed-forces/disre.h"
63 #include "gromacs/listed-forces/orires.h"
64 #include "gromacs/math/functions.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/math/vec.h"
67 #include "gromacs/math/vecdump.h"
68 #include "gromacs/mdlib/calcmu.h"
69 #include "gromacs/mdlib/constr.h"
70 #include "gromacs/mdlib/force.h"
71 #include "gromacs/mdlib/forcerec.h"
72 #include "gromacs/mdlib/genborn.h"
73 #include "gromacs/mdlib/gmx_omp_nthreads.h"
74 #include "gromacs/mdlib/mdrun.h"
75 #include "gromacs/mdlib/nb_verlet.h"
76 #include "gromacs/mdlib/nbnxn_atomdata.h"
77 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
78 #include "gromacs/mdlib/nbnxn_grid.h"
79 #include "gromacs/mdlib/nbnxn_search.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
83 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
84 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
85 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
86 #include "gromacs/mdtypes/commrec.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/pbcutil/ishift.h"
90 #include "gromacs/pbcutil/mshift.h"
91 #include "gromacs/pbcutil/pbc.h"
92 #include "gromacs/pulling/pull.h"
93 #include "gromacs/pulling/pull_rotation.h"
94 #include "gromacs/timing/cyclecounter.h"
95 #include "gromacs/timing/gpu_timing.h"
96 #include "gromacs/timing/wallcycle.h"
97 #include "gromacs/timing/wallcyclereporting.h"
98 #include "gromacs/timing/walltime_accounting.h"
99 #include "gromacs/utility/basedefinitions.h"
100 #include "gromacs/utility/cstringutil.h"
101 #include "gromacs/utility/exceptions.h"
102 #include "gromacs/utility/fatalerror.h"
103 #include "gromacs/utility/gmxassert.h"
104 #include "gromacs/utility/gmxmpi.h"
105 #include "gromacs/utility/pleasecite.h"
106 #include "gromacs/utility/smalloc.h"
107 #include "gromacs/utility/sysinfo.h"
109 #include "nbnxn_gpu.h"
111 static const bool useCuda = GMX_GPU == GMX_GPU_CUDA;
112 static const bool useOpenCL = GMX_GPU == GMX_GPU_OPENCL;
114 void print_time(FILE *out,
115 gmx_walltime_accounting_t walltime_accounting,
116 gmx_int64_t step,
117 t_inputrec *ir,
118 t_commrec gmx_unused *cr)
120 time_t finish;
121 char timebuf[STRLEN];
122 double dt, elapsed_seconds, time_per_step;
123 char buf[48];
125 #ifndef GMX_THREAD_MPI
126 if (!PAR(cr))
127 #endif
129 fprintf(out, "\r");
131 fprintf(out, "step %s", gmx_step_str(step, buf));
132 if ((step >= ir->nstlist))
134 double seconds_since_epoch = gmx_gettime();
135 elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
136 time_per_step = elapsed_seconds/(step - ir->init_step + 1);
137 dt = (ir->nsteps + ir->init_step - step) * time_per_step;
139 if (ir->nsteps >= 0)
141 if (dt >= 300)
143 finish = (time_t) (seconds_since_epoch + dt);
144 gmx_ctime_r(&finish, timebuf, STRLEN);
145 sprintf(buf, "%s", timebuf);
146 buf[strlen(buf)-1] = '\0';
147 fprintf(out, ", will finish %s", buf);
149 else
151 fprintf(out, ", remaining wall clock time: %5d s ", (int)dt);
154 else
156 fprintf(out, " performance: %.1f ns/day ",
157 ir->delta_t/1000*24*60*60/time_per_step);
160 #ifndef GMX_THREAD_MPI
161 if (PAR(cr))
163 fprintf(out, "\n");
165 #endif
167 fflush(out);
170 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
171 double the_time)
173 char time_string[STRLEN];
175 if (!fplog)
177 return;
181 int i;
182 char timebuf[STRLEN];
183 time_t temp_time = (time_t) the_time;
185 gmx_ctime_r(&temp_time, timebuf, STRLEN);
186 for (i = 0; timebuf[i] >= ' '; i++)
188 time_string[i] = timebuf[i];
190 time_string[i] = '\0';
193 fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
196 void print_start(FILE *fplog, t_commrec *cr,
197 gmx_walltime_accounting_t walltime_accounting,
198 const char *name)
200 char buf[STRLEN];
202 sprintf(buf, "Started %s", name);
203 print_date_and_time(fplog, cr->nodeid, buf,
204 walltime_accounting_get_start_time_stamp(walltime_accounting));
207 static void sum_forces(int start, int end, rvec f[], rvec flr[])
209 int i;
211 if (gmx_debug_at)
213 pr_rvecs(debug, 0, "fsr", f+start, end-start);
214 pr_rvecs(debug, 0, "flr", flr+start, end-start);
216 for (i = start; (i < end); i++)
218 rvec_inc(f[i], flr[i]);
223 * calc_f_el calculates forces due to an electric field.
225 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
227 * Et[] contains the parameters for the time dependent
228 * part of the field.
229 * Ex[] contains the parameters for
230 * the spatial dependent part of the field.
231 * The function should return the energy due to the electric field
232 * (if any) but for now returns 0.
234 * WARNING:
235 * There can be problems with the virial.
236 * Since the field is not self-consistent this is unavoidable.
237 * For neutral molecules the virial is correct within this approximation.
238 * For neutral systems with many charged molecules the error is small.
239 * But for systems with a net charge or a few charged molecules
240 * the error can be significant when the field is high.
241 * Solution: implement a self-consistent electric field into PME.
243 static void calc_f_el(FILE *fp, int start, int homenr,
244 real charge[], rvec f[],
245 t_cosines Ex[], t_cosines Et[], double t)
247 rvec Ext;
248 real t0;
249 int i, m;
251 for (m = 0; (m < DIM); m++)
253 if (Et[m].n > 0)
255 if (Et[m].n == 3)
257 t0 = Et[m].a[1];
258 Ext[m] = std::cos(Et[m].a[0]*(t-t0))*std::exp(-gmx::square(t-t0)/(2.0*gmx::square(Et[m].a[2])));
260 else
262 Ext[m] = std::cos(Et[m].a[0]*t);
265 else
267 Ext[m] = 1.0;
269 if (Ex[m].n > 0)
271 /* Convert the field strength from V/nm to MD-units */
272 Ext[m] *= Ex[m].a[0]*FIELDFAC;
273 for (i = start; (i < start+homenr); i++)
275 f[i][m] += charge[i]*Ext[m];
278 else
280 Ext[m] = 0;
283 if (fp != NULL)
285 fprintf(fp, "%10g %10g %10g %10g #FIELD\n", t,
286 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
290 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
291 tensor vir_part, t_graph *graph, matrix box,
292 t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
294 int i;
296 /* The short-range virial from surrounding boxes */
297 clear_mat(vir_part);
298 calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
299 inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
301 /* Calculate partial virial, for local atoms only, based on short range.
302 * Total virial is computed in global_stat, called from do_md
304 f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
305 inc_nrnb(nrnb, eNR_VIRIAL, homenr);
307 /* Add position restraint contribution */
308 for (i = 0; i < DIM; i++)
310 vir_part[i][i] += fr->vir_diag_posres[i];
313 /* Add wall contribution */
314 for (i = 0; i < DIM; i++)
316 vir_part[i][ZZ] += fr->vir_wall_z[i];
319 if (debug)
321 pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
325 static void pull_potential_wrapper(t_commrec *cr,
326 t_inputrec *ir,
327 matrix box, rvec x[],
328 rvec f[],
329 tensor vir_force,
330 t_mdatoms *mdatoms,
331 gmx_enerdata_t *enerd,
332 real *lambda,
333 double t,
334 gmx_wallcycle_t wcycle)
336 t_pbc pbc;
337 real dvdl;
339 /* Calculate the center of mass forces, this requires communication,
340 * which is why pull_potential is called close to other communication.
341 * The virial contribution is calculated directly,
342 * which is why we call pull_potential after calc_virial.
344 wallcycle_start(wcycle, ewcPULLPOT);
345 set_pbc(&pbc, ir->ePBC, box);
346 dvdl = 0;
347 enerd->term[F_COM_PULL] +=
348 pull_potential(ir->pull_work, mdatoms, &pbc,
349 cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
350 enerd->dvdl_lin[efptRESTRAINT] += dvdl;
351 wallcycle_stop(wcycle, ewcPULLPOT);
354 static void pme_receive_force_ener(t_commrec *cr,
355 gmx_wallcycle_t wcycle,
356 gmx_enerdata_t *enerd,
357 t_forcerec *fr)
359 real e_q, e_lj, dvdl_q, dvdl_lj;
360 float cycles_ppdpme, cycles_seppme;
362 cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
363 dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
365 /* In case of node-splitting, the PP nodes receive the long-range
366 * forces, virial and energy from the PME nodes here.
368 wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
369 dvdl_q = 0;
370 dvdl_lj = 0;
371 gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e_q,
372 fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
373 &cycles_seppme);
374 enerd->term[F_COUL_RECIP] += e_q;
375 enerd->term[F_LJ_RECIP] += e_lj;
376 enerd->dvdl_lin[efptCOUL] += dvdl_q;
377 enerd->dvdl_lin[efptVDW] += dvdl_lj;
379 if (wcycle)
381 dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
383 wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
386 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
387 gmx_int64_t step, real pforce, rvec *x, rvec *f)
389 int i;
390 real pf2, fn2;
391 char buf[STEPSTRSIZE];
393 pf2 = gmx::square(pforce);
394 for (i = 0; i < md->homenr; i++)
396 fn2 = norm2(f[i]);
397 /* We also catch NAN, if the compiler does not optimize this away. */
398 if (fn2 >= pf2 || fn2 != fn2)
400 fprintf(fp, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
401 gmx_step_str(step, buf),
402 ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(fn2));
407 static void post_process_forces(t_commrec *cr,
408 gmx_int64_t step,
409 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
410 gmx_localtop_t *top,
411 matrix box, rvec x[],
412 rvec f[],
413 tensor vir_force,
414 t_mdatoms *mdatoms,
415 t_graph *graph,
416 t_forcerec *fr, gmx_vsite_t *vsite,
417 int flags)
419 if (fr->bF_NoVirSum)
421 if (vsite)
423 /* Spread the mesh force on virtual sites to the other particles...
424 * This is parallellized. MPI communication is performed
425 * if the constructing atoms aren't local.
427 wallcycle_start(wcycle, ewcVSITESPREAD);
428 spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
429 (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
430 nrnb,
431 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
432 wallcycle_stop(wcycle, ewcVSITESPREAD);
434 if (flags & GMX_FORCE_VIRIAL)
436 /* Now add the forces, this is local */
437 if (fr->bDomDec)
439 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
441 else
443 sum_forces(0, mdatoms->homenr,
444 f, fr->f_novirsum);
446 if (EEL_FULL(fr->eeltype))
448 /* Add the mesh contribution to the virial */
449 m_add(vir_force, fr->vir_el_recip, vir_force);
451 if (EVDW_PME(fr->vdwtype))
453 /* Add the mesh contribution to the virial */
454 m_add(vir_force, fr->vir_lj_recip, vir_force);
456 if (debug)
458 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
463 if (fr->print_force >= 0)
465 print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
469 static void do_nb_verlet(t_forcerec *fr,
470 interaction_const_t *ic,
471 gmx_enerdata_t *enerd,
472 int flags, int ilocality,
473 int clearF,
474 t_nrnb *nrnb,
475 gmx_wallcycle_t wcycle)
477 int enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
478 nonbonded_verlet_group_t *nbvg;
479 gmx_bool bUsingGpuKernels;
481 if (!(flags & GMX_FORCE_NONBONDED))
483 /* skip non-bonded calculation */
484 return;
487 nbvg = &fr->nbv->grp[ilocality];
489 /* GPU kernel launch overhead is already timed separately */
490 if (fr->cutoff_scheme != ecutsVERLET)
492 gmx_incons("Invalid cut-off scheme passed!");
495 bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
497 if (!bUsingGpuKernels)
499 wallcycle_sub_start(wcycle, ewcsNONBONDED);
501 switch (nbvg->kernel_type)
503 case nbnxnk4x4_PlainC:
504 nbnxn_kernel_ref(&nbvg->nbl_lists,
505 nbvg->nbat, ic,
506 fr->shift_vec,
507 flags,
508 clearF,
509 fr->fshift[0],
510 enerd->grpp.ener[egCOULSR],
511 fr->bBHAM ?
512 enerd->grpp.ener[egBHAMSR] :
513 enerd->grpp.ener[egLJSR]);
514 break;
516 case nbnxnk4xN_SIMD_4xN:
517 nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
518 nbvg->nbat, ic,
519 nbvg->ewald_excl,
520 fr->shift_vec,
521 flags,
522 clearF,
523 fr->fshift[0],
524 enerd->grpp.ener[egCOULSR],
525 fr->bBHAM ?
526 enerd->grpp.ener[egBHAMSR] :
527 enerd->grpp.ener[egLJSR]);
528 break;
529 case nbnxnk4xN_SIMD_2xNN:
530 nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
531 nbvg->nbat, ic,
532 nbvg->ewald_excl,
533 fr->shift_vec,
534 flags,
535 clearF,
536 fr->fshift[0],
537 enerd->grpp.ener[egCOULSR],
538 fr->bBHAM ?
539 enerd->grpp.ener[egBHAMSR] :
540 enerd->grpp.ener[egLJSR]);
541 break;
543 case nbnxnk8x8x8_GPU:
544 nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
545 break;
547 case nbnxnk8x8x8_PlainC:
548 nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
549 nbvg->nbat, ic,
550 fr->shift_vec,
551 flags,
552 clearF,
553 nbvg->nbat->out[0].f,
554 fr->fshift[0],
555 enerd->grpp.ener[egCOULSR],
556 fr->bBHAM ?
557 enerd->grpp.ener[egBHAMSR] :
558 enerd->grpp.ener[egLJSR]);
559 break;
561 default:
562 gmx_incons("Invalid nonbonded kernel type passed!");
565 if (!bUsingGpuKernels)
567 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
570 if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
572 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
574 else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
575 (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
577 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
579 else
581 enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
583 enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
584 if (flags & GMX_FORCE_ENERGY)
586 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
587 enr_nbnxn_kernel_ljc += 1;
588 enr_nbnxn_kernel_lj += 1;
591 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
592 nbvg->nbl_lists.natpair_ljq);
593 inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
594 nbvg->nbl_lists.natpair_lj);
595 /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
596 inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
597 nbvg->nbl_lists.natpair_q);
599 if (ic->vdw_modifier == eintmodFORCESWITCH)
601 /* We add up the switch cost separately */
602 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
603 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
605 if (ic->vdw_modifier == eintmodPOTSWITCH)
607 /* We add up the switch cost separately */
608 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
609 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
611 if (ic->vdwtype == evdwPME)
613 /* We add up the LJ Ewald cost separately */
614 inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
615 nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
619 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
620 t_forcerec *fr,
621 rvec x[],
622 rvec f[],
623 t_mdatoms *mdatoms,
624 t_lambda *fepvals,
625 real *lambda,
626 gmx_enerdata_t *enerd,
627 int flags,
628 t_nrnb *nrnb,
629 gmx_wallcycle_t wcycle)
631 int donb_flags;
632 nb_kernel_data_t kernel_data;
633 real lam_i[efptNR];
634 real dvdl_nb[efptNR];
635 int th;
636 int i, j;
638 donb_flags = 0;
639 /* Add short-range interactions */
640 donb_flags |= GMX_NONBONDED_DO_SR;
642 /* Currently all group scheme kernels always calculate (shift-)forces */
643 if (flags & GMX_FORCE_FORCES)
645 donb_flags |= GMX_NONBONDED_DO_FORCE;
647 if (flags & GMX_FORCE_VIRIAL)
649 donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
651 if (flags & GMX_FORCE_ENERGY)
653 donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
656 kernel_data.flags = donb_flags;
657 kernel_data.lambda = lambda;
658 kernel_data.dvdl = dvdl_nb;
660 kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
661 kernel_data.energygrp_vdw = enerd->grpp.ener[egLJSR];
663 /* reset free energy components */
664 for (i = 0; i < efptNR; i++)
666 dvdl_nb[i] = 0;
669 assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
671 wallcycle_sub_start(wcycle, ewcsNONBONDED);
672 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
673 for (th = 0; th < nbl_lists->nnbl; th++)
677 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
678 x, f, fr, mdatoms, &kernel_data, nrnb);
680 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
683 if (fepvals->sc_alpha != 0)
685 enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
686 enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
688 else
690 enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
691 enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
694 /* If we do foreign lambda and we have soft-core interactions
695 * we have to recalculate the (non-linear) energies contributions.
697 if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
699 kernel_data.flags = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
700 kernel_data.lambda = lam_i;
701 kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
702 kernel_data.energygrp_vdw = enerd->foreign_grpp.ener[egLJSR];
703 /* Note that we add to kernel_data.dvdl, but ignore the result */
705 for (i = 0; i < enerd->n_lambda; i++)
707 for (j = 0; j < efptNR; j++)
709 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
711 reset_foreign_enerdata(enerd);
712 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
713 for (th = 0; th < nbl_lists->nnbl; th++)
717 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
718 x, f, fr, mdatoms, &kernel_data, nrnb);
720 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
723 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
724 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
728 wallcycle_sub_stop(wcycle, ewcsNONBONDED);
731 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
733 return nbv != NULL && nbv->bUseGPU;
736 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
737 t_inputrec *inputrec,
738 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
739 gmx_localtop_t *top,
740 gmx_groups_t gmx_unused *groups,
741 matrix box, rvec x[], history_t *hist,
742 rvec f[],
743 tensor vir_force,
744 t_mdatoms *mdatoms,
745 gmx_enerdata_t *enerd, t_fcdata *fcd,
746 real *lambda, t_graph *graph,
747 t_forcerec *fr, interaction_const_t *ic,
748 gmx_vsite_t *vsite, rvec mu_tot,
749 double t, FILE *field, gmx_edsam_t ed,
750 gmx_bool bBornRadii,
751 int flags)
753 int cg1, i, j;
754 int start, homenr;
755 double mu[2*DIM];
756 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
757 gmx_bool bDoForces, bUseGPU, bUseOrEmulGPU;
758 gmx_bool bDiffKernels = FALSE;
759 rvec vzero, box_diag;
760 float cycles_pme, cycles_force, cycles_wait_gpu;
761 /* TODO To avoid loss of precision, float can't be used for a
762 * cycle count. Build an object that can do this right and perhaps
763 * also be used by gmx_wallcycle_t */
764 gmx_cycles_t cycleCountBeforeLocalWorkCompletes = 0;
765 nonbonded_verlet_t *nbv;
767 cycles_force = 0;
768 cycles_wait_gpu = 0;
769 nbv = fr->nbv;
771 start = 0;
772 homenr = mdatoms->homenr;
774 clear_mat(vir_force);
776 if (DOMAINDECOMP(cr))
778 cg1 = cr->dd->ncg_tot;
780 else
782 cg1 = top->cgs.nr;
784 if (fr->n_tpi > 0)
786 cg1--;
789 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
790 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
791 bFillGrid = (bNS && bStateChanged);
792 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
793 bDoForces = (flags & GMX_FORCE_FORCES);
794 bUseGPU = fr->nbv->bUseGPU;
795 bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
797 if (bStateChanged)
799 update_forcerec(fr, box);
801 if (inputrecNeedMutot(inputrec))
803 /* Calculate total (local) dipole moment in a temporary common array.
804 * This makes it possible to sum them over nodes faster.
806 calc_mu(start, homenr,
807 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
808 mu, mu+DIM);
812 if (fr->ePBC != epbcNONE)
814 /* Compute shift vectors every step,
815 * because of pressure coupling or box deformation!
817 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
819 calc_shifts(box, fr->shift_vec);
822 if (bCalcCGCM)
824 put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
825 inc_nrnb(nrnb, eNR_SHIFTX, homenr);
827 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
829 unshift_self(graph, box, x);
833 nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
834 fr->shift_vec, nbv->grp[0].nbat);
836 #ifdef GMX_MPI
837 if (!(cr->duty & DUTY_PME))
839 gmx_bool bBS;
840 matrix boxs;
842 /* Send particle coordinates to the pme nodes.
843 * Since this is only implemented for domain decomposition
844 * and domain decomposition does not use the graph,
845 * we do not need to worry about shifting.
848 int pme_flags = 0;
850 wallcycle_start(wcycle, ewcPP_PMESENDX);
852 bBS = (inputrec->nwall == 2);
853 if (bBS)
855 copy_mat(box, boxs);
856 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
859 if (EEL_PME(fr->eeltype))
861 pme_flags |= GMX_PME_DO_COULOMB;
864 if (EVDW_PME(fr->vdwtype))
866 pme_flags |= GMX_PME_DO_LJ;
869 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
870 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
871 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
872 pme_flags, step);
874 wallcycle_stop(wcycle, ewcPP_PMESENDX);
876 #endif /* GMX_MPI */
878 /* do gridding for pair search */
879 if (bNS)
881 if (graph && bStateChanged)
883 /* Calculate intramolecular shift vectors to make molecules whole */
884 mk_mshift(fplog, graph, fr->ePBC, box, x);
887 clear_rvec(vzero);
888 box_diag[XX] = box[XX][XX];
889 box_diag[YY] = box[YY][YY];
890 box_diag[ZZ] = box[ZZ][ZZ];
892 wallcycle_start(wcycle, ewcNS);
893 if (!fr->bDomDec)
895 wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
896 nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
897 0, vzero, box_diag,
898 0, mdatoms->homenr, -1, fr->cginfo, x,
899 0, NULL,
900 nbv->grp[eintLocal].kernel_type,
901 nbv->grp[eintLocal].nbat);
902 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
904 else
906 wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
907 nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
908 fr->cginfo, x,
909 nbv->grp[eintNonlocal].kernel_type,
910 nbv->grp[eintNonlocal].nbat);
911 wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
914 if (nbv->ngrp == 1 ||
915 nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
917 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
918 nbv->nbs, mdatoms, fr->cginfo);
920 else
922 nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
923 nbv->nbs, mdatoms, fr->cginfo);
924 nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
925 nbv->nbs, mdatoms, fr->cginfo);
927 wallcycle_stop(wcycle, ewcNS);
930 /* initialize the GPU atom data and copy shift vector */
931 if (bUseGPU)
933 if (bNS)
935 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
936 nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
937 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
940 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
941 nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
942 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
945 /* do local pair search */
946 if (bNS)
948 wallcycle_start_nocount(wcycle, ewcNS);
949 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
950 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
951 &top->excls,
952 ic->rlist,
953 nbv->min_ci_balanced,
954 &nbv->grp[eintLocal].nbl_lists,
955 eintLocal,
956 nbv->grp[eintLocal].kernel_type,
957 nrnb);
958 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
960 if (bUseGPU)
962 /* initialize local pair-list on the GPU */
963 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
964 nbv->grp[eintLocal].nbl_lists.nbl[0],
965 eintLocal);
967 wallcycle_stop(wcycle, ewcNS);
969 else
971 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
972 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
973 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
974 nbv->grp[eintLocal].nbat);
975 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
976 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
979 if (bUseGPU)
981 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
982 /* launch local nonbonded F on GPU */
983 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
984 nrnb, wcycle);
985 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
988 /* Communicate coordinates and sum dipole if necessary +
989 do non-local pair search */
990 if (DOMAINDECOMP(cr))
992 bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
993 nbv->grp[eintLocal].kernel_type);
995 if (bDiffKernels)
997 /* With GPU+CPU non-bonded calculations we need to copy
998 * the local coordinates to the non-local nbat struct
999 * (in CPU format) as the non-local kernel call also
1000 * calculates the local - non-local interactions.
1002 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1003 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1004 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
1005 nbv->grp[eintNonlocal].nbat);
1006 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1007 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1010 if (bNS)
1012 wallcycle_start_nocount(wcycle, ewcNS);
1013 wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1015 if (bDiffKernels)
1017 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1020 nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1021 &top->excls,
1022 ic->rlist,
1023 nbv->min_ci_balanced,
1024 &nbv->grp[eintNonlocal].nbl_lists,
1025 eintNonlocal,
1026 nbv->grp[eintNonlocal].kernel_type,
1027 nrnb);
1029 wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1031 if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
1033 /* initialize non-local pair-list on the GPU */
1034 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
1035 nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1036 eintNonlocal);
1038 wallcycle_stop(wcycle, ewcNS);
1040 else
1042 wallcycle_start(wcycle, ewcMOVEX);
1043 dd_move_x(cr->dd, box, x);
1045 /* When we don't need the total dipole we sum it in global_stat */
1046 if (bStateChanged && inputrecNeedMutot(inputrec))
1048 gmx_sumd(2*DIM, mu, cr);
1050 wallcycle_stop(wcycle, ewcMOVEX);
1052 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1053 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1054 nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1055 nbv->grp[eintNonlocal].nbat);
1056 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1057 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1060 if (bUseGPU && !bDiffKernels)
1062 wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1063 /* launch non-local nonbonded F on GPU */
1064 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1065 nrnb, wcycle);
1066 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1070 if (bUseGPU)
1072 /* launch D2H copy-back F */
1073 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1074 if (DOMAINDECOMP(cr) && !bDiffKernels)
1076 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1077 flags, eatNonlocal);
1079 nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1080 flags, eatLocal);
1081 cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1084 if (bStateChanged && inputrecNeedMutot(inputrec))
1086 if (PAR(cr))
1088 gmx_sumd(2*DIM, mu, cr);
1091 for (i = 0; i < 2; i++)
1093 for (j = 0; j < DIM; j++)
1095 fr->mu_tot[i][j] = mu[i*DIM + j];
1099 if (fr->efep == efepNO)
1101 copy_rvec(fr->mu_tot[0], mu_tot);
1103 else
1105 for (j = 0; j < DIM; j++)
1107 mu_tot[j] =
1108 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1109 lambda[efptCOUL]*fr->mu_tot[1][j];
1113 /* Reset energies */
1114 reset_enerdata(enerd);
1115 clear_rvecs(SHIFTS, fr->fshift);
1117 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1119 wallcycle_start(wcycle, ewcPPDURINGPME);
1120 dd_force_flop_start(cr->dd, nrnb);
1123 if (inputrec->bRot)
1125 /* Enforced rotation has its own cycle counter that starts after the collective
1126 * coordinates have been communicated. It is added to ddCyclF to allow
1127 * for proper load-balancing */
1128 wallcycle_start(wcycle, ewcROT);
1129 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1130 wallcycle_stop(wcycle, ewcROT);
1133 /* Start the force cycle counter.
1134 * This counter is stopped after do_force_lowlevel.
1135 * No parallel communication should occur while this counter is running,
1136 * since that will interfere with the dynamic load balancing.
1138 wallcycle_start(wcycle, ewcFORCE);
1139 if (bDoForces)
1141 /* Reset forces for which the virial is calculated separately:
1142 * PME/Ewald forces if necessary */
1143 if (fr->bF_NoVirSum)
1145 if (flags & GMX_FORCE_VIRIAL)
1147 fr->f_novirsum = fr->f_novirsum_alloc;
1148 if (fr->bDomDec)
1150 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1152 else
1154 clear_rvecs(homenr, fr->f_novirsum+start);
1157 else
1159 /* We are not calculating the pressure so we do not need
1160 * a separate array for forces that do not contribute
1161 * to the pressure.
1163 fr->f_novirsum = f;
1167 /* Clear the short- and long-range forces */
1168 clear_rvecs(fr->natoms_force_constr, f);
1170 clear_rvec(fr->vir_diag_posres);
1173 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1175 clear_pull_forces(inputrec->pull_work);
1178 /* We calculate the non-bonded forces, when done on the CPU, here.
1179 * We do this before calling do_force_lowlevel, because in that
1180 * function, the listed forces are calculated before PME, which
1181 * does communication. With this order, non-bonded and listed
1182 * force calculation imbalance can be balanced out by the domain
1183 * decomposition load balancing.
1186 if (!bUseOrEmulGPU)
1188 /* Maybe we should move this into do_force_lowlevel */
1189 do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1190 nrnb, wcycle);
1193 if (fr->efep != efepNO)
1195 /* Calculate the local and non-local free energy interactions here.
1196 * Happens here on the CPU both with and without GPU.
1198 if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1200 do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1201 fr, x, f, mdatoms,
1202 inputrec->fepvals, lambda,
1203 enerd, flags, nrnb, wcycle);
1206 if (DOMAINDECOMP(cr) &&
1207 fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1209 do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1210 fr, x, f, mdatoms,
1211 inputrec->fepvals, lambda,
1212 enerd, flags, nrnb, wcycle);
1216 if (!bUseOrEmulGPU || bDiffKernels)
1218 int aloc;
1220 if (DOMAINDECOMP(cr))
1222 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1223 bDiffKernels ? enbvClearFYes : enbvClearFNo,
1224 nrnb, wcycle);
1227 if (!bUseOrEmulGPU)
1229 aloc = eintLocal;
1231 else
1233 aloc = eintNonlocal;
1236 /* Add all the non-bonded force to the normal force array.
1237 * This can be split into a local and a non-local part when overlapping
1238 * communication with calculation with domain decomposition.
1240 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1241 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1242 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1243 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1244 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1245 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1246 wallcycle_start_nocount(wcycle, ewcFORCE);
1248 /* if there are multiple fshift output buffers reduce them */
1249 if ((flags & GMX_FORCE_VIRIAL) &&
1250 nbv->grp[aloc].nbl_lists.nnbl > 1)
1252 /* This is not in a subcounter because it takes a
1253 negligible and constant-sized amount of time */
1254 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1255 fr->fshift);
1259 /* update QMMMrec, if necessary */
1260 if (fr->bQMMM)
1262 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1265 /* Compute the bonded and non-bonded energies and optionally forces */
1266 do_force_lowlevel(fr, inputrec, &(top->idef),
1267 cr, nrnb, wcycle, mdatoms,
1268 x, hist, f, enerd, fcd, top, fr->born,
1269 bBornRadii, box,
1270 inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1271 flags, &cycles_pme);
1273 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1275 if (ed)
1277 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1280 if (bUseOrEmulGPU && !bDiffKernels)
1282 /* wait for non-local forces (or calculate in emulation mode) */
1283 if (DOMAINDECOMP(cr))
1285 if (bUseGPU)
1287 float cycles_tmp;
1289 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1290 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1291 flags, eatNonlocal,
1292 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1293 fr->fshift);
1294 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1295 cycles_wait_gpu += cycles_tmp;
1296 cycles_force += cycles_tmp;
1298 else
1300 wallcycle_start_nocount(wcycle, ewcFORCE);
1301 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1302 nrnb, wcycle);
1303 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1305 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1306 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1307 /* skip the reduction if there was no non-local work to do */
1308 if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1310 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1311 nbv->grp[eintNonlocal].nbat, f);
1313 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1314 cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1318 if (bDoForces && DOMAINDECOMP(cr))
1320 if (bUseGPU && useCuda)
1322 /* We are done with the CPU compute, but the GPU local non-bonded
1323 * kernel can still be running while we communicate the forces.
1324 * We start a counter here, so we can, hopefully, time the rest
1325 * of the GPU kernel execution and data transfer.
1327 cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1330 /* Communicate the forces */
1331 wallcycle_start(wcycle, ewcMOVEF);
1332 dd_move_f(cr->dd, f, fr->fshift);
1333 wallcycle_stop(wcycle, ewcMOVEF);
1336 if (bUseOrEmulGPU)
1338 /* wait for local forces (or calculate in emulation mode) */
1339 if (bUseGPU && useCuda)
1341 float cycles_tmp, cycles_wait_est;
1342 const float cuda_api_overhead_margin = 50000.0f; /* cycles */
1344 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1345 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1346 flags, eatLocal,
1347 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1348 fr->fshift);
1349 cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1351 if (bDoForces && DOMAINDECOMP(cr))
1353 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1355 if (cycles_tmp < cuda_api_overhead_margin)
1357 /* We measured few cycles, it could be that the kernel
1358 * and transfer finished earlier and there was no actual
1359 * wait time, only API call overhead.
1360 * Then the actual time could be anywhere between 0 and
1361 * cycles_wait_est. As a compromise, we use half the time.
1363 cycles_wait_est *= 0.5f;
1366 else
1368 /* No force communication so we actually timed the wait */
1369 cycles_wait_est = cycles_tmp;
1371 /* Even though this is after dd_move_f, the actual task we are
1372 * waiting for runs asynchronously with dd_move_f and we usually
1373 * have nothing to balance it with, so we can and should add
1374 * the time to the force time for load balancing.
1376 cycles_force += cycles_wait_est;
1377 cycles_wait_gpu += cycles_wait_est;
1379 else if (bUseGPU && useOpenCL)
1382 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1383 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1384 flags, eatLocal,
1385 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1386 fr->fshift);
1387 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1389 if (bUseGPU)
1391 /* now clear the GPU outputs while we finish the step on the CPU */
1392 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1393 nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1394 wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1396 else
1398 wallcycle_start_nocount(wcycle, ewcFORCE);
1399 do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1400 DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1401 nrnb, wcycle);
1402 wallcycle_stop(wcycle, ewcFORCE);
1404 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1405 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1406 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1407 nbv->grp[eintLocal].nbat, f);
1408 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1409 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1412 if (DOMAINDECOMP(cr))
1414 dd_force_flop_stop(cr->dd, nrnb);
1415 if (wcycle)
1417 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1418 if (bUseGPU)
1420 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1425 if (bDoForces)
1427 if (inputrecElecField(inputrec))
1429 /* Compute forces due to electric field */
1430 calc_f_el(MASTER(cr) ? field : NULL,
1431 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1432 inputrec->ex, inputrec->et, t);
1435 /* If we have NoVirSum forces, but we do not calculate the virial,
1436 * we sum fr->f_novirsum=f later.
1438 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1440 wallcycle_start(wcycle, ewcVSITESPREAD);
1441 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1442 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1443 wallcycle_stop(wcycle, ewcVSITESPREAD);
1446 if (flags & GMX_FORCE_VIRIAL)
1448 /* Calculation of the virial must be done after vsites! */
1449 calc_virial(0, mdatoms->homenr, x, f,
1450 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1454 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1456 /* Since the COM pulling is always done mass-weighted, no forces are
1457 * applied to vsites and this call can be done after vsite spreading.
1459 pull_potential_wrapper(cr, inputrec, box, x,
1460 f, vir_force, mdatoms, enerd, lambda, t,
1461 wcycle);
1464 /* Add the forces from enforced rotation potentials (if any) */
1465 if (inputrec->bRot)
1467 wallcycle_start(wcycle, ewcROTadd);
1468 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1469 wallcycle_stop(wcycle, ewcROTadd);
1472 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1473 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1475 if (PAR(cr) && !(cr->duty & DUTY_PME))
1477 /* In case of node-splitting, the PP nodes receive the long-range
1478 * forces, virial and energy from the PME nodes here.
1480 pme_receive_force_ener(cr, wcycle, enerd, fr);
1483 if (bDoForces)
1485 post_process_forces(cr, step, nrnb, wcycle,
1486 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1487 flags);
1490 /* Sum the potential energy terms from group contributions */
1491 sum_epot(&(enerd->grpp), enerd->term);
1494 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1495 t_inputrec *inputrec,
1496 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1497 gmx_localtop_t *top,
1498 gmx_groups_t *groups,
1499 matrix box, rvec x[], history_t *hist,
1500 rvec f[],
1501 tensor vir_force,
1502 t_mdatoms *mdatoms,
1503 gmx_enerdata_t *enerd, t_fcdata *fcd,
1504 real *lambda, t_graph *graph,
1505 t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1506 double t, FILE *field, gmx_edsam_t ed,
1507 gmx_bool bBornRadii,
1508 int flags)
1510 int cg0, cg1, i, j;
1511 int start, homenr;
1512 double mu[2*DIM];
1513 gmx_bool bStateChanged, bNS, bFillGrid, bCalcCGCM;
1514 gmx_bool bDoForces;
1515 float cycles_pme, cycles_force;
1517 start = 0;
1518 homenr = mdatoms->homenr;
1520 clear_mat(vir_force);
1522 cg0 = 0;
1523 if (DOMAINDECOMP(cr))
1525 cg1 = cr->dd->ncg_tot;
1527 else
1529 cg1 = top->cgs.nr;
1531 if (fr->n_tpi > 0)
1533 cg1--;
1536 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
1537 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1538 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1539 bFillGrid = (bNS && bStateChanged);
1540 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
1541 bDoForces = (flags & GMX_FORCE_FORCES);
1543 if (bStateChanged)
1545 update_forcerec(fr, box);
1547 if (inputrecNeedMutot(inputrec))
1549 /* Calculate total (local) dipole moment in a temporary common array.
1550 * This makes it possible to sum them over nodes faster.
1552 calc_mu(start, homenr,
1553 x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1554 mu, mu+DIM);
1558 if (fr->ePBC != epbcNONE)
1560 /* Compute shift vectors every step,
1561 * because of pressure coupling or box deformation!
1563 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1565 calc_shifts(box, fr->shift_vec);
1568 if (bCalcCGCM)
1570 put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1571 &(top->cgs), x, fr->cg_cm);
1572 inc_nrnb(nrnb, eNR_CGCM, homenr);
1573 inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1575 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1577 unshift_self(graph, box, x);
1580 else if (bCalcCGCM)
1582 calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1583 inc_nrnb(nrnb, eNR_CGCM, homenr);
1586 if (bCalcCGCM && gmx_debug_at)
1588 pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1591 #ifdef GMX_MPI
1592 if (!(cr->duty & DUTY_PME))
1594 gmx_bool bBS;
1595 matrix boxs;
1597 /* Send particle coordinates to the pme nodes.
1598 * Since this is only implemented for domain decomposition
1599 * and domain decomposition does not use the graph,
1600 * we do not need to worry about shifting.
1603 int pme_flags = 0;
1605 wallcycle_start(wcycle, ewcPP_PMESENDX);
1607 bBS = (inputrec->nwall == 2);
1608 if (bBS)
1610 copy_mat(box, boxs);
1611 svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1614 if (EEL_PME(fr->eeltype))
1616 pme_flags |= GMX_PME_DO_COULOMB;
1619 if (EVDW_PME(fr->vdwtype))
1621 pme_flags |= GMX_PME_DO_LJ;
1624 gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1625 mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
1626 (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1627 pme_flags, step);
1629 wallcycle_stop(wcycle, ewcPP_PMESENDX);
1631 #endif /* GMX_MPI */
1633 /* Communicate coordinates and sum dipole if necessary */
1634 if (DOMAINDECOMP(cr))
1636 wallcycle_start(wcycle, ewcMOVEX);
1637 dd_move_x(cr->dd, box, x);
1638 wallcycle_stop(wcycle, ewcMOVEX);
1641 if (inputrecNeedMutot(inputrec))
1643 if (bStateChanged)
1645 if (PAR(cr))
1647 gmx_sumd(2*DIM, mu, cr);
1649 for (i = 0; i < 2; i++)
1651 for (j = 0; j < DIM; j++)
1653 fr->mu_tot[i][j] = mu[i*DIM + j];
1657 if (fr->efep == efepNO)
1659 copy_rvec(fr->mu_tot[0], mu_tot);
1661 else
1663 for (j = 0; j < DIM; j++)
1665 mu_tot[j] =
1666 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1671 /* Reset energies */
1672 reset_enerdata(enerd);
1673 clear_rvecs(SHIFTS, fr->fshift);
1675 if (bNS)
1677 wallcycle_start(wcycle, ewcNS);
1679 if (graph && bStateChanged)
1681 /* Calculate intramolecular shift vectors to make molecules whole */
1682 mk_mshift(fplog, graph, fr->ePBC, box, x);
1685 /* Do the actual neighbour searching */
1686 ns(fplog, fr, box,
1687 groups, top, mdatoms,
1688 cr, nrnb, bFillGrid);
1690 wallcycle_stop(wcycle, ewcNS);
1693 if (inputrec->implicit_solvent && bNS)
1695 make_gb_nblist(cr, inputrec->gb_algorithm,
1696 x, box, fr, &top->idef, graph, fr->born);
1699 if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1701 wallcycle_start(wcycle, ewcPPDURINGPME);
1702 dd_force_flop_start(cr->dd, nrnb);
1705 if (inputrec->bRot)
1707 /* Enforced rotation has its own cycle counter that starts after the collective
1708 * coordinates have been communicated. It is added to ddCyclF to allow
1709 * for proper load-balancing */
1710 wallcycle_start(wcycle, ewcROT);
1711 do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1712 wallcycle_stop(wcycle, ewcROT);
1715 /* Start the force cycle counter.
1716 * This counter is stopped after do_force_lowlevel.
1717 * No parallel communication should occur while this counter is running,
1718 * since that will interfere with the dynamic load balancing.
1720 wallcycle_start(wcycle, ewcFORCE);
1722 if (bDoForces)
1724 /* Reset forces for which the virial is calculated separately:
1725 * PME/Ewald forces if necessary */
1726 if (fr->bF_NoVirSum)
1728 if (flags & GMX_FORCE_VIRIAL)
1730 fr->f_novirsum = fr->f_novirsum_alloc;
1731 if (fr->bDomDec)
1733 clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1735 else
1737 clear_rvecs(homenr, fr->f_novirsum+start);
1740 else
1742 /* We are not calculating the pressure so we do not need
1743 * a separate array for forces that do not contribute
1744 * to the pressure.
1746 fr->f_novirsum = f;
1750 /* Clear the short- and long-range forces */
1751 clear_rvecs(fr->natoms_force_constr, f);
1753 clear_rvec(fr->vir_diag_posres);
1755 if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1757 clear_pull_forces(inputrec->pull_work);
1760 /* update QMMMrec, if necessary */
1761 if (fr->bQMMM)
1763 update_QMMMrec(cr, fr, x, mdatoms, box, top);
1766 /* Compute the bonded and non-bonded energies and optionally forces */
1767 do_force_lowlevel(fr, inputrec, &(top->idef),
1768 cr, nrnb, wcycle, mdatoms,
1769 x, hist, f, enerd, fcd, top, fr->born,
1770 bBornRadii, box,
1771 inputrec->fepvals, lambda,
1772 graph, &(top->excls), fr->mu_tot,
1773 flags,
1774 &cycles_pme);
1776 cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1778 if (ed)
1780 do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1783 if (DOMAINDECOMP(cr))
1785 dd_force_flop_stop(cr->dd, nrnb);
1786 if (wcycle)
1788 dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1792 if (bDoForces)
1794 if (inputrecElecField(inputrec))
1796 /* Compute forces due to electric field */
1797 calc_f_el(MASTER(cr) ? field : NULL,
1798 start, homenr, mdatoms->chargeA, fr->f_novirsum,
1799 inputrec->ex, inputrec->et, t);
1802 /* Communicate the forces */
1803 if (DOMAINDECOMP(cr))
1805 wallcycle_start(wcycle, ewcMOVEF);
1806 dd_move_f(cr->dd, f, fr->fshift);
1807 /* Do we need to communicate the separate force array
1808 * for terms that do not contribute to the single sum virial?
1809 * Position restraints and electric fields do not introduce
1810 * inter-cg forces, only full electrostatics methods do.
1811 * When we do not calculate the virial, fr->f_novirsum = f,
1812 * so we have already communicated these forces.
1814 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1815 (flags & GMX_FORCE_VIRIAL))
1817 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1819 wallcycle_stop(wcycle, ewcMOVEF);
1822 /* If we have NoVirSum forces, but we do not calculate the virial,
1823 * we sum fr->f_novirsum=f later.
1825 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1827 wallcycle_start(wcycle, ewcVSITESPREAD);
1828 spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1829 &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1830 wallcycle_stop(wcycle, ewcVSITESPREAD);
1833 if (flags & GMX_FORCE_VIRIAL)
1835 /* Calculation of the virial must be done after vsites! */
1836 calc_virial(0, mdatoms->homenr, x, f,
1837 vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1841 if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1843 pull_potential_wrapper(cr, inputrec, box, x,
1844 f, vir_force, mdatoms, enerd, lambda, t,
1845 wcycle);
1848 /* Add the forces from enforced rotation potentials (if any) */
1849 if (inputrec->bRot)
1851 wallcycle_start(wcycle, ewcROTadd);
1852 enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1853 wallcycle_stop(wcycle, ewcROTadd);
1856 /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1857 IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1859 if (PAR(cr) && !(cr->duty & DUTY_PME))
1861 /* In case of node-splitting, the PP nodes receive the long-range
1862 * forces, virial and energy from the PME nodes here.
1864 pme_receive_force_ener(cr, wcycle, enerd, fr);
1867 if (bDoForces)
1869 post_process_forces(cr, step, nrnb, wcycle,
1870 top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1871 flags);
1874 /* Sum the potential energy terms from group contributions */
1875 sum_epot(&(enerd->grpp), enerd->term);
1878 void do_force(FILE *fplog, t_commrec *cr,
1879 t_inputrec *inputrec,
1880 gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1881 gmx_localtop_t *top,
1882 gmx_groups_t *groups,
1883 matrix box, rvec x[], history_t *hist,
1884 rvec f[],
1885 tensor vir_force,
1886 t_mdatoms *mdatoms,
1887 gmx_enerdata_t *enerd, t_fcdata *fcd,
1888 real *lambda, t_graph *graph,
1889 t_forcerec *fr,
1890 gmx_vsite_t *vsite, rvec mu_tot,
1891 double t, FILE *field, gmx_edsam_t ed,
1892 gmx_bool bBornRadii,
1893 int flags)
1895 /* modify force flag if not doing nonbonded */
1896 if (!fr->bNonbonded)
1898 flags &= ~GMX_FORCE_NONBONDED;
1901 switch (inputrec->cutoff_scheme)
1903 case ecutsVERLET:
1904 do_force_cutsVERLET(fplog, cr, inputrec,
1905 step, nrnb, wcycle,
1906 top,
1907 groups,
1908 box, x, hist,
1909 f, vir_force,
1910 mdatoms,
1911 enerd, fcd,
1912 lambda, graph,
1913 fr, fr->ic,
1914 vsite, mu_tot,
1915 t, field, ed,
1916 bBornRadii,
1917 flags);
1918 break;
1919 case ecutsGROUP:
1920 do_force_cutsGROUP(fplog, cr, inputrec,
1921 step, nrnb, wcycle,
1922 top,
1923 groups,
1924 box, x, hist,
1925 f, vir_force,
1926 mdatoms,
1927 enerd, fcd,
1928 lambda, graph,
1929 fr, vsite, mu_tot,
1930 t, field, ed,
1931 bBornRadii,
1932 flags);
1933 break;
1934 default:
1935 gmx_incons("Invalid cut-off scheme passed!");
1940 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1941 t_inputrec *ir, t_mdatoms *md,
1942 t_state *state, t_commrec *cr, t_nrnb *nrnb,
1943 t_forcerec *fr, gmx_localtop_t *top)
1945 int i, m, start, end;
1946 gmx_int64_t step;
1947 real dt = ir->delta_t;
1948 real dvdl_dum;
1949 rvec *savex;
1951 /* We need to allocate one element extra, since we might use
1952 * (unaligned) 4-wide SIMD loads to access rvec entries.
1954 snew(savex, state->natoms + 1);
1956 start = 0;
1957 end = md->homenr;
1959 if (debug)
1961 fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1962 start, md->homenr, end);
1964 /* Do a first constrain to reset particles... */
1965 step = ir->init_step;
1966 if (fplog)
1968 char buf[STEPSTRSIZE];
1969 fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1970 gmx_step_str(step, buf));
1972 dvdl_dum = 0;
1974 /* constrain the current position */
1975 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1976 ir, cr, step, 0, 1.0, md,
1977 state->x, state->x, NULL,
1978 fr->bMolPBC, state->box,
1979 state->lambda[efptBONDED], &dvdl_dum,
1980 NULL, NULL, nrnb, econqCoord);
1981 if (EI_VV(ir->eI))
1983 /* constrain the inital velocity, and save it */
1984 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1985 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1986 ir, cr, step, 0, 1.0, md,
1987 state->x, state->v, state->v,
1988 fr->bMolPBC, state->box,
1989 state->lambda[efptBONDED], &dvdl_dum,
1990 NULL, NULL, nrnb, econqVeloc);
1992 /* constrain the inital velocities at t-dt/2 */
1993 if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1995 for (i = start; (i < end); i++)
1997 for (m = 0; (m < DIM); m++)
1999 /* Reverse the velocity */
2000 state->v[i][m] = -state->v[i][m];
2001 /* Store the position at t-dt in buf */
2002 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2005 /* Shake the positions at t=-dt with the positions at t=0
2006 * as reference coordinates.
2008 if (fplog)
2010 char buf[STEPSTRSIZE];
2011 fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2012 gmx_step_str(step, buf));
2014 dvdl_dum = 0;
2015 constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2016 ir, cr, step, -1, 1.0, md,
2017 state->x, savex, NULL,
2018 fr->bMolPBC, state->box,
2019 state->lambda[efptBONDED], &dvdl_dum,
2020 state->v, NULL, nrnb, econqCoord);
2022 for (i = start; i < end; i++)
2024 for (m = 0; m < DIM; m++)
2026 /* Re-reverse the velocities */
2027 state->v[i][m] = -state->v[i][m];
2031 sfree(savex);
2035 static void
2036 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2037 double *enerout, double *virout)
2039 double enersum, virsum;
2040 double invscale, invscale2, invscale3;
2041 double r, ea, eb, ec, pa, pb, pc, pd;
2042 double y0, f, g, h;
2043 int ri, offset;
2044 double tabfactor;
2046 invscale = 1.0/scale;
2047 invscale2 = invscale*invscale;
2048 invscale3 = invscale*invscale2;
2050 /* Following summation derived from cubic spline definition,
2051 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2052 * the cubic spline. We first calculate the negative of the
2053 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2054 * add the more standard, abrupt cutoff correction to that result,
2055 * yielding the long-range correction for a switched function. We
2056 * perform both the pressure and energy loops at the same time for
2057 * simplicity, as the computational cost is low. */
2059 if (offstart == 0)
2061 /* Since the dispersion table has been scaled down a factor
2062 * 6.0 and the repulsion a factor 12.0 to compensate for the
2063 * c6/c12 parameters inside nbfp[] being scaled up (to save
2064 * flops in kernels), we need to correct for this.
2066 tabfactor = 6.0;
2068 else
2070 tabfactor = 12.0;
2073 enersum = 0.0;
2074 virsum = 0.0;
2075 for (ri = rstart; ri < rend; ++ri)
2077 r = ri*invscale;
2078 ea = invscale3;
2079 eb = 2.0*invscale2*r;
2080 ec = invscale*r*r;
2082 pa = invscale3;
2083 pb = 3.0*invscale2*r;
2084 pc = 3.0*invscale*r*r;
2085 pd = r*r*r;
2087 /* this "8" is from the packing in the vdwtab array - perhaps
2088 should be defined? */
2090 offset = 8*ri + offstart;
2091 y0 = vdwtab[offset];
2092 f = vdwtab[offset+1];
2093 g = vdwtab[offset+2];
2094 h = vdwtab[offset+3];
2096 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2097 virsum += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2099 *enerout = 4.0*M_PI*enersum*tabfactor;
2100 *virout = 4.0*M_PI*virsum*tabfactor;
2103 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2105 double eners[2], virs[2], enersum, virsum;
2106 double r0, rc3, rc9;
2107 int ri0, ri1, i;
2108 real scale, *vdwtab;
2110 fr->enershiftsix = 0;
2111 fr->enershifttwelve = 0;
2112 fr->enerdiffsix = 0;
2113 fr->enerdifftwelve = 0;
2114 fr->virdiffsix = 0;
2115 fr->virdifftwelve = 0;
2117 if (eDispCorr != edispcNO)
2119 for (i = 0; i < 2; i++)
2121 eners[i] = 0;
2122 virs[i] = 0;
2124 if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2125 (fr->vdw_modifier == eintmodPOTSWITCH) ||
2126 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2127 (fr->vdwtype == evdwSHIFT) ||
2128 (fr->vdwtype == evdwSWITCH))
2130 if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2131 (fr->vdw_modifier == eintmodFORCESWITCH) ||
2132 (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2134 gmx_fatal(FARGS,
2135 "With dispersion correction rvdw-switch can not be zero "
2136 "for vdw-type = %s", evdw_names[fr->vdwtype]);
2139 /* TODO This code depends on the logic in tables.c that
2140 constructs the table layout, which should be made
2141 explicit in future cleanup. */
2142 GMX_ASSERT(fr->nblists[0].table_vdw->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2143 "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2144 scale = fr->nblists[0].table_vdw->scale;
2145 vdwtab = fr->nblists[0].table_vdw->data;
2147 /* Round the cut-offs to exact table values for precision */
2148 ri0 = static_cast<int>(floor(fr->rvdw_switch*scale));
2149 ri1 = static_cast<int>(ceil(fr->rvdw*scale));
2151 /* The code below has some support for handling force-switching, i.e.
2152 * when the force (instead of potential) is switched over a limited
2153 * region. This leads to a constant shift in the potential inside the
2154 * switching region, which we can handle by adding a constant energy
2155 * term in the force-switch case just like when we do potential-shift.
2157 * For now this is not enabled, but to keep the functionality in the
2158 * code we check separately for switch and shift. When we do force-switch
2159 * the shifting point is rvdw_switch, while it is the cutoff when we
2160 * have a classical potential-shift.
2162 * For a pure potential-shift the potential has a constant shift
2163 * all the way out to the cutoff, and that is it. For other forms
2164 * we need to calculate the constant shift up to the point where we
2165 * start modifying the potential.
2167 ri0 = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2169 r0 = ri0/scale;
2170 rc3 = r0*r0*r0;
2171 rc9 = rc3*rc3*rc3;
2173 if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2174 (fr->vdwtype == evdwSHIFT))
2176 /* Determine the constant energy shift below rvdw_switch.
2177 * Table has a scale factor since we have scaled it down to compensate
2178 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2180 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2181 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2183 else if (fr->vdw_modifier == eintmodPOTSHIFT)
2185 fr->enershiftsix = (real)(-1.0/(rc3*rc3));
2186 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2189 /* Add the constant part from 0 to rvdw_switch.
2190 * This integration from 0 to rvdw_switch overcounts the number
2191 * of interactions by 1, as it also counts the self interaction.
2192 * We will correct for this later.
2194 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2195 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2197 /* Calculate the contribution in the range [r0,r1] where we
2198 * modify the potential. For a pure potential-shift modifier we will
2199 * have ri0==ri1, and there will not be any contribution here.
2201 for (i = 0; i < 2; i++)
2203 enersum = 0;
2204 virsum = 0;
2205 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2206 eners[i] -= enersum;
2207 virs[i] -= virsum;
2210 /* Alright: Above we compensated by REMOVING the parts outside r0
2211 * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2213 * Regardless of whether r0 is the point where we start switching,
2214 * or the cutoff where we calculated the constant shift, we include
2215 * all the parts we are missing out to infinity from r0 by
2216 * calculating the analytical dispersion correction.
2218 eners[0] += -4.0*M_PI/(3.0*rc3);
2219 eners[1] += 4.0*M_PI/(9.0*rc9);
2220 virs[0] += 8.0*M_PI/rc3;
2221 virs[1] += -16.0*M_PI/(3.0*rc9);
2223 else if (fr->vdwtype == evdwCUT ||
2224 EVDW_PME(fr->vdwtype) ||
2225 fr->vdwtype == evdwUSER)
2227 if (fr->vdwtype == evdwUSER && fplog)
2229 fprintf(fplog,
2230 "WARNING: using dispersion correction with user tables\n");
2233 /* Note that with LJ-PME, the dispersion correction is multiplied
2234 * by the difference between the actual C6 and the value of C6
2235 * that would produce the combination rule.
2236 * This means the normal energy and virial difference formulas
2237 * can be used here.
2240 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
2241 rc9 = rc3*rc3*rc3;
2242 /* Contribution beyond the cut-off */
2243 eners[0] += -4.0*M_PI/(3.0*rc3);
2244 eners[1] += 4.0*M_PI/(9.0*rc9);
2245 if (fr->vdw_modifier == eintmodPOTSHIFT)
2247 /* Contribution within the cut-off */
2248 eners[0] += -4.0*M_PI/(3.0*rc3);
2249 eners[1] += 4.0*M_PI/(3.0*rc9);
2251 /* Contribution beyond the cut-off */
2252 virs[0] += 8.0*M_PI/rc3;
2253 virs[1] += -16.0*M_PI/(3.0*rc9);
2255 else
2257 gmx_fatal(FARGS,
2258 "Dispersion correction is not implemented for vdw-type = %s",
2259 evdw_names[fr->vdwtype]);
2262 /* When we deprecate the group kernels the code below can go too */
2263 if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2265 /* Calculate self-interaction coefficient (assuming that
2266 * the reciprocal-space contribution is constant in the
2267 * region that contributes to the self-interaction).
2269 fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2271 eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2272 virs[0] += gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2275 fr->enerdiffsix = eners[0];
2276 fr->enerdifftwelve = eners[1];
2277 /* The 0.5 is due to the Gromacs definition of the virial */
2278 fr->virdiffsix = 0.5*virs[0];
2279 fr->virdifftwelve = 0.5*virs[1];
2283 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2284 int natoms,
2285 matrix box, real lambda, tensor pres, tensor virial,
2286 real *prescorr, real *enercorr, real *dvdlcorr)
2288 gmx_bool bCorrAll, bCorrPres;
2289 real dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2290 int m;
2292 *prescorr = 0;
2293 *enercorr = 0;
2294 *dvdlcorr = 0;
2296 clear_mat(virial);
2297 clear_mat(pres);
2299 if (ir->eDispCorr != edispcNO)
2301 bCorrAll = (ir->eDispCorr == edispcAllEner ||
2302 ir->eDispCorr == edispcAllEnerPres);
2303 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2304 ir->eDispCorr == edispcAllEnerPres);
2306 invvol = 1/det(box);
2307 if (fr->n_tpi)
2309 /* Only correct for the interactions with the inserted molecule */
2310 dens = (natoms - fr->n_tpi)*invvol;
2311 ninter = fr->n_tpi;
2313 else
2315 dens = natoms*invvol;
2316 ninter = 0.5*natoms;
2319 if (ir->efep == efepNO)
2321 avcsix = fr->avcsix[0];
2322 avctwelve = fr->avctwelve[0];
2324 else
2326 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
2327 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2330 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2331 *enercorr += avcsix*enerdiff;
2332 dvdlambda = 0.0;
2333 if (ir->efep != efepNO)
2335 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2337 if (bCorrAll)
2339 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2340 *enercorr += avctwelve*enerdiff;
2341 if (fr->efep != efepNO)
2343 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2347 if (bCorrPres)
2349 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2350 if (ir->eDispCorr == edispcAllEnerPres)
2352 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2354 /* The factor 2 is because of the Gromacs virial definition */
2355 spres = -2.0*invvol*svir*PRESFAC;
2357 for (m = 0; m < DIM; m++)
2359 virial[m][m] += svir;
2360 pres[m][m] += spres;
2362 *prescorr += spres;
2365 /* Can't currently control when it prints, for now, just print when degugging */
2366 if (debug)
2368 if (bCorrAll)
2370 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2371 avcsix, avctwelve);
2373 if (bCorrPres)
2375 fprintf(debug,
2376 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2377 *enercorr, spres, svir);
2379 else
2381 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2385 if (fr->efep != efepNO)
2387 *dvdlcorr += dvdlambda;
2392 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2393 t_graph *graph, rvec x[])
2395 if (fplog)
2397 fprintf(fplog, "Removing pbc first time\n");
2399 calc_shifts(box, fr->shift_vec);
2400 if (graph)
2402 mk_mshift(fplog, graph, fr->ePBC, box, x);
2403 if (gmx_debug_at)
2405 p_graph(debug, "do_pbc_first 1", graph);
2407 shift_self(graph, box, x);
2408 /* By doing an extra mk_mshift the molecules that are broken
2409 * because they were e.g. imported from another software
2410 * will be made whole again. Such are the healing powers
2411 * of GROMACS.
2413 mk_mshift(fplog, graph, fr->ePBC, box, x);
2414 if (gmx_debug_at)
2416 p_graph(debug, "do_pbc_first 2", graph);
2419 if (fplog)
2421 fprintf(fplog, "Done rmpbc\n");
2425 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2426 const gmx_mtop_t *mtop, rvec x[],
2427 gmx_bool bFirst)
2429 t_graph *graph;
2430 int mb, as, mol;
2431 gmx_molblock_t *molb;
2433 if (bFirst && fplog)
2435 fprintf(fplog, "Removing pbc first time\n");
2438 snew(graph, 1);
2439 as = 0;
2440 for (mb = 0; mb < mtop->nmolblock; mb++)
2442 molb = &mtop->molblock[mb];
2443 if (molb->natoms_mol == 1 ||
2444 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2446 /* Just one atom or charge group in the molecule, no PBC required */
2447 as += molb->nmol*molb->natoms_mol;
2449 else
2451 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2452 mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2453 0, molb->natoms_mol, FALSE, FALSE, graph);
2455 for (mol = 0; mol < molb->nmol; mol++)
2457 mk_mshift(fplog, graph, ePBC, box, x+as);
2459 shift_self(graph, box, x+as);
2460 /* The molecule is whole now.
2461 * We don't need the second mk_mshift call as in do_pbc_first,
2462 * since we no longer need this graph.
2465 as += molb->natoms_mol;
2467 done_graph(graph);
2470 sfree(graph);
2473 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2474 const gmx_mtop_t *mtop, rvec x[])
2476 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2479 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2480 gmx_mtop_t *mtop, rvec x[])
2482 low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2485 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2487 int t, nth;
2488 nth = gmx_omp_nthreads_get(emntDefault);
2490 #pragma omp parallel for num_threads(nth) schedule(static)
2491 for (t = 0; t < nth; t++)
2495 int offset, len;
2497 offset = (natoms*t )/nth;
2498 len = (natoms*(t + 1))/nth - offset;
2499 put_atoms_in_box(ePBC, box, len, x + offset);
2501 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2505 // TODO This can be cleaned up a lot, and move back to runner.cpp
2506 void finish_run(FILE *fplog, t_commrec *cr,
2507 t_inputrec *inputrec,
2508 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2509 gmx_walltime_accounting_t walltime_accounting,
2510 nonbonded_verlet_t *nbv,
2511 gmx_bool bWriteStat)
2513 t_nrnb *nrnb_tot = NULL;
2514 double delta_t = 0;
2515 double nbfs = 0, mflop = 0;
2516 double elapsed_time,
2517 elapsed_time_over_all_ranks,
2518 elapsed_time_over_all_threads,
2519 elapsed_time_over_all_threads_over_all_ranks;
2521 if (cr->nnodes > 1)
2523 snew(nrnb_tot, 1);
2524 #ifdef GMX_MPI
2525 MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2526 cr->mpi_comm_mysim);
2527 #endif
2529 else
2531 nrnb_tot = nrnb;
2534 elapsed_time = walltime_accounting_get_elapsed_time(walltime_accounting);
2535 elapsed_time_over_all_ranks = elapsed_time;
2536 elapsed_time_over_all_threads = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2537 elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2538 #ifdef GMX_MPI
2539 if (cr->nnodes > 1)
2541 /* reduce elapsed_time over all MPI ranks in the current simulation */
2542 MPI_Allreduce(&elapsed_time,
2543 &elapsed_time_over_all_ranks,
2544 1, MPI_DOUBLE, MPI_SUM,
2545 cr->mpi_comm_mysim);
2546 elapsed_time_over_all_ranks /= cr->nnodes;
2547 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2548 * current simulation. */
2549 MPI_Allreduce(&elapsed_time_over_all_threads,
2550 &elapsed_time_over_all_threads_over_all_ranks,
2551 1, MPI_DOUBLE, MPI_SUM,
2552 cr->mpi_comm_mysim);
2554 #endif
2556 if (SIMMASTER(cr))
2558 print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2560 if (cr->nnodes > 1)
2562 sfree(nrnb_tot);
2565 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2567 print_dd_statistics(cr, inputrec, fplog);
2570 /* TODO Move the responsibility for any scaling by thread counts
2571 * to the code that handled the thread region, so that there's a
2572 * mechanism to keep cycle counting working during the transition
2573 * to task parallelism. */
2574 int nthreads_pp = gmx_omp_nthreads_get(emntNonbonded);
2575 int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2576 wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2577 auto cycle_sum(wallcycle_sum(cr, wcycle));
2579 if (SIMMASTER(cr))
2581 struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : NULL;
2583 wallcycle_print(fplog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2584 elapsed_time_over_all_ranks,
2585 wcycle, cycle_sum, gputimes);
2587 if (EI_DYNAMICS(inputrec->eI))
2589 delta_t = inputrec->delta_t;
2592 if (fplog)
2594 print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2595 elapsed_time_over_all_ranks,
2596 walltime_accounting_get_nsteps_done(walltime_accounting),
2597 delta_t, nbfs, mflop);
2599 if (bWriteStat)
2601 print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2602 elapsed_time_over_all_ranks,
2603 walltime_accounting_get_nsteps_done(walltime_accounting),
2604 delta_t, nbfs, mflop);
2609 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2611 /* this function works, but could probably use a logic rewrite to keep all the different
2612 types of efep straight. */
2614 int i;
2615 t_lambda *fep = ir->fepvals;
2617 if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2619 for (i = 0; i < efptNR; i++)
2621 lambda[i] = 0.0;
2622 if (lam0)
2624 lam0[i] = 0.0;
2627 return;
2629 else
2631 *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2632 if checkpoint is set -- a kludge is in for now
2633 to prevent this.*/
2634 for (i = 0; i < efptNR; i++)
2636 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2637 if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2639 lambda[i] = fep->init_lambda;
2640 if (lam0)
2642 lam0[i] = lambda[i];
2645 else
2647 lambda[i] = fep->all_lambda[i][*fep_state];
2648 if (lam0)
2650 lam0[i] = lambda[i];
2654 if (ir->bSimTemp)
2656 /* need to rescale control temperatures to match current state */
2657 for (i = 0; i < ir->opts.ngtc; i++)
2659 if (ir->opts.ref_t[i] > 0)
2661 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2667 /* Send to the log the information on the current lambdas */
2668 if (fplog != NULL)
2670 fprintf(fplog, "Initial vector of lambda components:[ ");
2671 for (i = 0; i < efptNR; i++)
2673 fprintf(fplog, "%10.4f ", lambda[i]);
2675 fprintf(fplog, "]\n");
2677 return;
2681 void init_md(FILE *fplog,
2682 t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2683 double *t, double *t0,
2684 real *lambda, int *fep_state, double *lam0,
2685 t_nrnb *nrnb, gmx_mtop_t *mtop,
2686 gmx_update_t *upd,
2687 int nfile, const t_filenm fnm[],
2688 gmx_mdoutf_t *outf, t_mdebin **mdebin,
2689 tensor force_vir, tensor shake_vir, rvec mu_tot,
2690 gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2691 gmx_wallcycle_t wcycle)
2693 int i;
2695 /* Initial values */
2696 *t = *t0 = ir->init_t;
2698 *bSimAnn = FALSE;
2699 for (i = 0; i < ir->opts.ngtc; i++)
2701 /* set bSimAnn if any group is being annealed */
2702 if (ir->opts.annealing[i] != eannNO)
2704 *bSimAnn = TRUE;
2707 if (*bSimAnn)
2709 update_annealing_target_temp(&(ir->opts), ir->init_t);
2712 /* Initialize lambda variables */
2713 initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2715 if (upd)
2717 *upd = init_update(ir);
2721 if (vcm != NULL)
2723 *vcm = init_vcm(fplog, &mtop->groups, ir);
2726 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2728 if (ir->etc == etcBERENDSEN)
2730 please_cite(fplog, "Berendsen84a");
2732 if (ir->etc == etcVRESCALE)
2734 please_cite(fplog, "Bussi2007a");
2736 if (ir->eI == eiSD1)
2738 please_cite(fplog, "Goga2012");
2741 if ((ir->et[XX].n > 0) || (ir->et[YY].n > 0) || (ir->et[ZZ].n > 0))
2743 please_cite(fplog, "Caleman2008a");
2745 init_nrnb(nrnb);
2747 if (nfile != -1)
2749 *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2751 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2752 mtop, ir, mdoutf_get_fp_dhdl(*outf));
2755 /* Initiate variables */
2756 clear_mat(force_vir);
2757 clear_mat(shake_vir);
2758 clear_rvec(mu_tot);