2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
42 #ifdef HAVE_SYS_TIME_H
53 #include "chargegroup.h"
73 #include "pull_rotation.h"
74 #include "gmx_random.h"
78 #include "nbnxn_atomdata.h"
79 #include "nbnxn_search.h"
80 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
81 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
82 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
83 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
85 #include "gromacs/timing/wallcycle.h"
86 #include "gromacs/timing/walltime_accounting.h"
87 #include "gromacs/utility/gmxmpi.h"
92 #include "nbnxn_cuda_data_mgmt.h"
93 #include "nbnxn_cuda/nbnxn_cuda.h"
95 void print_time(FILE *out
,
96 gmx_walltime_accounting_t walltime_accounting
,
99 t_commrec gmx_unused
*cr
)
102 char timebuf
[STRLEN
];
103 double dt
, elapsed_seconds
, time_per_step
;
106 #ifndef GMX_THREAD_MPI
112 fprintf(out
, "step %s", gmx_step_str(step
, buf
));
113 if ((step
>= ir
->nstlist
))
115 double seconds_since_epoch
= gmx_gettime();
116 elapsed_seconds
= seconds_since_epoch
- walltime_accounting_get_start_time_stamp(walltime_accounting
);
117 time_per_step
= elapsed_seconds
/(step
- ir
->init_step
+ 1);
118 dt
= (ir
->nsteps
+ ir
->init_step
- step
) * time_per_step
;
124 finish
= (time_t) (seconds_since_epoch
+ dt
);
125 gmx_ctime_r(&finish
, timebuf
, STRLEN
);
126 sprintf(buf
, "%s", timebuf
);
127 buf
[strlen(buf
)-1] = '\0';
128 fprintf(out
, ", will finish %s", buf
);
132 fprintf(out
, ", remaining wall clock time: %5d s ", (int)dt
);
137 fprintf(out
, " performance: %.1f ns/day ",
138 ir
->delta_t
/1000*24*60*60/time_per_step
);
141 #ifndef GMX_THREAD_MPI
151 void print_date_and_time(FILE *fplog
, int nodeid
, const char *title
,
152 const gmx_walltime_accounting_t walltime_accounting
)
155 char timebuf
[STRLEN
];
156 char time_string
[STRLEN
];
161 if (walltime_accounting
!= NULL
)
163 tmptime
= (time_t) walltime_accounting_get_start_time_stamp(walltime_accounting
);
164 gmx_ctime_r(&tmptime
, timebuf
, STRLEN
);
168 tmptime
= (time_t) gmx_gettime();
169 gmx_ctime_r(&tmptime
, timebuf
, STRLEN
);
171 for (i
= 0; timebuf
[i
] >= ' '; i
++)
173 time_string
[i
] = timebuf
[i
];
175 time_string
[i
] = '\0';
177 fprintf(fplog
, "%s on node %d %s\n", title
, nodeid
, time_string
);
181 static void sum_forces(int start
, int end
, rvec f
[], rvec flr
[])
187 pr_rvecs(debug
, 0, "fsr", f
+start
, end
-start
);
188 pr_rvecs(debug
, 0, "flr", flr
+start
, end
-start
);
190 for (i
= start
; (i
< end
); i
++)
192 rvec_inc(f
[i
], flr
[i
]);
197 * calc_f_el calculates forces due to an electric field.
199 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
201 * Et[] contains the parameters for the time dependent
202 * part of the field (not yet used).
203 * Ex[] contains the parameters for
204 * the spatial dependent part of the field. You can have cool periodic
205 * fields in principle, but only a constant field is supported
207 * The function should return the energy due to the electric field
208 * (if any) but for now returns 0.
211 * There can be problems with the virial.
212 * Since the field is not self-consistent this is unavoidable.
213 * For neutral molecules the virial is correct within this approximation.
214 * For neutral systems with many charged molecules the error is small.
215 * But for systems with a net charge or a few charged molecules
216 * the error can be significant when the field is high.
217 * Solution: implement a self-consitent electric field into PME.
219 static void calc_f_el(FILE *fp
, int start
, int homenr
,
220 real charge
[], rvec f
[],
221 t_cosines Ex
[], t_cosines Et
[], double t
)
227 for (m
= 0; (m
< DIM
); m
++)
234 Ext
[m
] = cos(Et
[m
].a
[0]*(t
-t0
))*exp(-sqr(t
-t0
)/(2.0*sqr(Et
[m
].a
[2])));
238 Ext
[m
] = cos(Et
[m
].a
[0]*t
);
247 /* Convert the field strength from V/nm to MD-units */
248 Ext
[m
] *= Ex
[m
].a
[0]*FIELDFAC
;
249 for (i
= start
; (i
< start
+homenr
); i
++)
251 f
[i
][m
] += charge
[i
]*Ext
[m
];
261 fprintf(fp
, "%10g %10g %10g %10g #FIELD\n", t
,
262 Ext
[XX
]/FIELDFAC
, Ext
[YY
]/FIELDFAC
, Ext
[ZZ
]/FIELDFAC
);
266 static void calc_virial(int start
, int homenr
, rvec x
[], rvec f
[],
267 tensor vir_part
, t_graph
*graph
, matrix box
,
268 t_nrnb
*nrnb
, const t_forcerec
*fr
, int ePBC
)
273 /* The short-range virial from surrounding boxes */
275 calc_vir(SHIFTS
, fr
->shift_vec
, fr
->fshift
, vir_part
, ePBC
== epbcSCREW
, box
);
276 inc_nrnb(nrnb
, eNR_VIRIAL
, SHIFTS
);
278 /* Calculate partial virial, for local atoms only, based on short range.
279 * Total virial is computed in global_stat, called from do_md
281 f_calc_vir(start
, start
+homenr
, x
, f
, vir_part
, graph
, box
);
282 inc_nrnb(nrnb
, eNR_VIRIAL
, homenr
);
284 /* Add position restraint contribution */
285 for (i
= 0; i
< DIM
; i
++)
287 vir_part
[i
][i
] += fr
->vir_diag_posres
[i
];
290 /* Add wall contribution */
291 for (i
= 0; i
< DIM
; i
++)
293 vir_part
[i
][ZZ
] += fr
->vir_wall_z
[i
];
298 pr_rvecs(debug
, 0, "vir_part", vir_part
, DIM
);
302 static void posres_wrapper(FILE *fplog
,
308 matrix box
, rvec x
[],
309 gmx_enerdata_t
*enerd
,
317 /* Position restraints always require full pbc */
318 set_pbc(&pbc
, ir
->ePBC
, box
);
320 v
= posres(top
->idef
.il
[F_POSRES
].nr
, top
->idef
.il
[F_POSRES
].iatoms
,
321 top
->idef
.iparams_posres
,
322 (const rvec
*)x
, fr
->f_novirsum
, fr
->vir_diag_posres
,
323 ir
->ePBC
== epbcNONE
? NULL
: &pbc
,
324 lambda
[efptRESTRAINT
], &dvdl
,
325 fr
->rc_scaling
, fr
->ePBC
, fr
->posres_com
, fr
->posres_comB
);
328 gmx_print_sepdvdl(fplog
, interaction_function
[F_POSRES
].longname
, v
, dvdl
);
330 enerd
->term
[F_POSRES
] += v
;
331 /* If just the force constant changes, the FEP term is linear,
332 * but if k changes, it is not.
334 enerd
->dvdl_nonlin
[efptRESTRAINT
] += dvdl
;
335 inc_nrnb(nrnb
, eNR_POSRES
, top
->idef
.il
[F_POSRES
].nr
/2);
337 if ((ir
->fepvals
->n_lambda
> 0) && (flags
& GMX_FORCE_DHDL
))
339 for (i
= 0; i
< enerd
->n_lambda
; i
++)
341 real dvdl_dum
, lambda_dum
;
343 lambda_dum
= (i
== 0 ? lambda
[efptRESTRAINT
] : ir
->fepvals
->all_lambda
[efptRESTRAINT
][i
-1]);
344 v
= posres(top
->idef
.il
[F_POSRES
].nr
, top
->idef
.il
[F_POSRES
].iatoms
,
345 top
->idef
.iparams_posres
,
346 (const rvec
*)x
, NULL
, NULL
,
347 ir
->ePBC
== epbcNONE
? NULL
: &pbc
, lambda_dum
, &dvdl
,
348 fr
->rc_scaling
, fr
->ePBC
, fr
->posres_com
, fr
->posres_comB
);
349 enerd
->enerpart_lambda
[i
] += v
;
354 static void pull_potential_wrapper(FILE *fplog
,
358 matrix box
, rvec x
[],
362 gmx_enerdata_t
*enerd
,
369 /* Calculate the center of mass forces, this requires communication,
370 * which is why pull_potential is called close to other communication.
371 * The virial contribution is calculated directly,
372 * which is why we call pull_potential after calc_virial.
374 set_pbc(&pbc
, ir
->ePBC
, box
);
376 enerd
->term
[F_COM_PULL
] +=
377 pull_potential(ir
->ePull
, ir
->pull
, mdatoms
, &pbc
,
378 cr
, t
, lambda
[efptRESTRAINT
], x
, f
, vir_force
, &dvdl
);
381 gmx_print_sepdvdl(fplog
, "Com pull", enerd
->term
[F_COM_PULL
], dvdl
);
383 enerd
->dvdl_lin
[efptRESTRAINT
] += dvdl
;
386 static void pme_receive_force_ener(FILE *fplog
,
389 gmx_wallcycle_t wcycle
,
390 gmx_enerdata_t
*enerd
,
393 real e_q
, e_lj
, v
, dvdl_q
, dvdl_lj
;
394 float cycles_ppdpme
, cycles_seppme
;
396 cycles_ppdpme
= wallcycle_stop(wcycle
, ewcPPDURINGPME
);
397 dd_cycles_add(cr
->dd
, cycles_ppdpme
, ddCyclPPduringPME
);
399 /* In case of node-splitting, the PP nodes receive the long-range
400 * forces, virial and energy from the PME nodes here.
402 wallcycle_start(wcycle
, ewcPP_PMEWAITRECVF
);
405 gmx_pme_receive_f(cr
, fr
->f_novirsum
, fr
->vir_el_recip
, &e_q
,
406 fr
->vir_lj_recip
, &e_lj
, &dvdl_q
, &dvdl_lj
,
410 gmx_print_sepdvdl(fplog
, "Electrostatic PME mesh", e_q
, dvdl_q
);
411 gmx_print_sepdvdl(fplog
, "Lennard-Jones PME mesh", e_lj
, dvdl_lj
);
413 enerd
->term
[F_COUL_RECIP
] += e_q
;
414 enerd
->term
[F_LJ_RECIP
] += e_lj
;
415 enerd
->dvdl_lin
[efptCOUL
] += dvdl_q
;
416 enerd
->dvdl_lin
[efptVDW
] += dvdl_lj
;
420 dd_cycles_add(cr
->dd
, cycles_seppme
, ddCyclPME
);
422 wallcycle_stop(wcycle
, ewcPP_PMEWAITRECVF
);
425 static void print_large_forces(FILE *fp
, t_mdatoms
*md
, t_commrec
*cr
,
426 gmx_large_int_t step
, real pforce
, rvec
*x
, rvec
*f
)
430 char buf
[STEPSTRSIZE
];
433 for (i
= md
->start
; i
< md
->start
+md
->homenr
; i
++)
436 /* We also catch NAN, if the compiler does not optimize this away. */
437 if (fn2
>= pf2
|| fn2
!= fn2
)
439 fprintf(fp
, "step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
440 gmx_step_str(step
, buf
),
441 ddglatnr(cr
->dd
, i
), x
[i
][XX
], x
[i
][YY
], x
[i
][ZZ
], sqrt(fn2
));
446 static void post_process_forces(t_commrec
*cr
,
447 gmx_large_int_t step
,
448 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
450 matrix box
, rvec x
[],
455 t_forcerec
*fr
, gmx_vsite_t
*vsite
,
462 /* Spread the mesh force on virtual sites to the other particles...
463 * This is parallellized. MPI communication is performed
464 * if the constructing atoms aren't local.
466 wallcycle_start(wcycle
, ewcVSITESPREAD
);
467 spread_vsite_f(vsite
, x
, fr
->f_novirsum
, NULL
,
468 (flags
& GMX_FORCE_VIRIAL
), fr
->vir_el_recip
,
470 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
471 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
473 if (flags
& GMX_FORCE_VIRIAL
)
475 /* Now add the forces, this is local */
478 sum_forces(0, fr
->f_novirsum_n
, f
, fr
->f_novirsum
);
482 sum_forces(mdatoms
->start
, mdatoms
->start
+mdatoms
->homenr
,
485 if (EEL_FULL(fr
->eeltype
))
487 /* Add the mesh contribution to the virial */
488 m_add(vir_force
, fr
->vir_el_recip
, vir_force
);
490 if (EVDW_PME(fr
->vdwtype
))
492 /* Add the mesh contribution to the virial */
493 m_add(vir_force
, fr
->vir_lj_recip
, vir_force
);
497 pr_rvecs(debug
, 0, "vir_force", vir_force
, DIM
);
502 if (fr
->print_force
>= 0)
504 print_large_forces(stderr
, mdatoms
, cr
, step
, fr
->print_force
, x
, f
);
508 static void do_nb_verlet(t_forcerec
*fr
,
509 interaction_const_t
*ic
,
510 gmx_enerdata_t
*enerd
,
511 int flags
, int ilocality
,
514 gmx_wallcycle_t wcycle
)
516 int nnbl
, kernel_type
, enr_nbnxn_kernel_ljc
, enr_nbnxn_kernel_lj
;
518 nonbonded_verlet_group_t
*nbvg
;
521 if (!(flags
& GMX_FORCE_NONBONDED
))
523 /* skip non-bonded calculation */
527 nbvg
= &fr
->nbv
->grp
[ilocality
];
529 /* CUDA kernel launch overhead is already timed separately */
530 if (fr
->cutoff_scheme
!= ecutsVERLET
)
532 gmx_incons("Invalid cut-off scheme passed!");
535 bCUDA
= (nbvg
->kernel_type
== nbnxnk8x8x8_CUDA
);
539 wallcycle_sub_start(wcycle
, ewcsNONBONDED
);
541 switch (nbvg
->kernel_type
)
543 case nbnxnk4x4_PlainC
:
544 nbnxn_kernel_ref(&nbvg
->nbl_lists
,
550 enerd
->grpp
.ener
[egCOULSR
],
552 enerd
->grpp
.ener
[egBHAMSR
] :
553 enerd
->grpp
.ener
[egLJSR
]);
556 case nbnxnk4xN_SIMD_4xN
:
557 nbnxn_kernel_simd_4xn(&nbvg
->nbl_lists
,
564 enerd
->grpp
.ener
[egCOULSR
],
566 enerd
->grpp
.ener
[egBHAMSR
] :
567 enerd
->grpp
.ener
[egLJSR
]);
569 case nbnxnk4xN_SIMD_2xNN
:
570 nbnxn_kernel_simd_2xnn(&nbvg
->nbl_lists
,
577 enerd
->grpp
.ener
[egCOULSR
],
579 enerd
->grpp
.ener
[egBHAMSR
] :
580 enerd
->grpp
.ener
[egLJSR
]);
583 case nbnxnk8x8x8_CUDA
:
584 nbnxn_cuda_launch_kernel(fr
->nbv
->cu_nbv
, nbvg
->nbat
, flags
, ilocality
);
587 case nbnxnk8x8x8_PlainC
:
588 nbnxn_kernel_gpu_ref(nbvg
->nbl_lists
.nbl
[0],
593 nbvg
->nbat
->out
[0].f
,
595 enerd
->grpp
.ener
[egCOULSR
],
597 enerd
->grpp
.ener
[egBHAMSR
] :
598 enerd
->grpp
.ener
[egLJSR
]);
602 gmx_incons("Invalid nonbonded kernel type passed!");
607 wallcycle_sub_stop(wcycle
, ewcsNONBONDED
);
610 if (EEL_RF(ic
->eeltype
) || ic
->eeltype
== eelCUT
)
612 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_RF
;
614 else if ((!bCUDA
&& nbvg
->ewald_excl
== ewaldexclAnalytical
) ||
615 (bCUDA
&& nbnxn_cuda_is_kernel_ewald_analytical(fr
->nbv
->cu_nbv
)))
617 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_EWALD
;
621 enr_nbnxn_kernel_ljc
= eNR_NBNXN_LJ_TAB
;
623 enr_nbnxn_kernel_lj
= eNR_NBNXN_LJ
;
624 if (flags
& GMX_FORCE_ENERGY
)
626 /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
627 enr_nbnxn_kernel_ljc
+= 1;
628 enr_nbnxn_kernel_lj
+= 1;
631 inc_nrnb(nrnb
, enr_nbnxn_kernel_ljc
,
632 nbvg
->nbl_lists
.natpair_ljq
);
633 inc_nrnb(nrnb
, enr_nbnxn_kernel_lj
,
634 nbvg
->nbl_lists
.natpair_lj
);
635 inc_nrnb(nrnb
, enr_nbnxn_kernel_ljc
-eNR_NBNXN_LJ_RF
+eNR_NBNXN_RF
,
636 nbvg
->nbl_lists
.natpair_q
);
639 void do_force_cutsVERLET(FILE *fplog
, t_commrec
*cr
,
640 t_inputrec
*inputrec
,
641 gmx_large_int_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
643 gmx_groups_t gmx_unused
*groups
,
644 matrix box
, rvec x
[], history_t
*hist
,
648 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
649 real
*lambda
, t_graph
*graph
,
650 t_forcerec
*fr
, interaction_const_t
*ic
,
651 gmx_vsite_t
*vsite
, rvec mu_tot
,
652 double t
, FILE *field
, gmx_edsam_t ed
,
660 gmx_bool bSepDVDL
, bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
, bBS
;
661 gmx_bool bDoLongRange
, bDoForces
, bSepLRF
, bUseGPU
, bUseOrEmulGPU
;
662 gmx_bool bDiffKernels
= FALSE
;
664 rvec vzero
, box_diag
;
666 float cycles_pme
, cycles_force
, cycles_wait_gpu
;
667 nonbonded_verlet_t
*nbv
;
672 nb_kernel_type
= fr
->nbv
->grp
[0].kernel_type
;
674 start
= mdatoms
->start
;
675 homenr
= mdatoms
->homenr
;
677 bSepDVDL
= (fr
->bSepDVDL
&& do_per_step(step
, inputrec
->nstlog
));
679 clear_mat(vir_force
);
682 if (DOMAINDECOMP(cr
))
684 cg1
= cr
->dd
->ncg_tot
;
695 bStateChanged
= (flags
& GMX_FORCE_STATECHANGED
);
696 bNS
= (flags
& GMX_FORCE_NS
) && (fr
->bAllvsAll
== FALSE
);
697 bFillGrid
= (bNS
&& bStateChanged
);
698 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
699 bDoLongRange
= (fr
->bTwinRange
&& bNS
&& (flags
& GMX_FORCE_DO_LR
));
700 bDoForces
= (flags
& GMX_FORCE_FORCES
);
701 bSepLRF
= (bDoLongRange
&& bDoForces
&& (flags
& GMX_FORCE_SEPLRF
));
702 bUseGPU
= fr
->nbv
->bUseGPU
;
703 bUseOrEmulGPU
= bUseGPU
|| (nbv
->grp
[0].kernel_type
== nbnxnk8x8x8_PlainC
);
707 update_forcerec(fr
, box
);
709 if (NEED_MUTOT(*inputrec
))
711 /* Calculate total (local) dipole moment in a temporary common array.
712 * This makes it possible to sum them over nodes faster.
714 calc_mu(start
, homenr
,
715 x
, mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
720 if (fr
->ePBC
!= epbcNONE
)
722 /* Compute shift vectors every step,
723 * because of pressure coupling or box deformation!
725 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
727 calc_shifts(box
, fr
->shift_vec
);
732 put_atoms_in_box_omp(fr
->ePBC
, box
, homenr
, x
);
733 inc_nrnb(nrnb
, eNR_SHIFTX
, homenr
);
735 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
737 unshift_self(graph
, box
, x
);
741 nbnxn_atomdata_copy_shiftvec(flags
& GMX_FORCE_DYNAMICBOX
,
742 fr
->shift_vec
, nbv
->grp
[0].nbat
);
745 if (!(cr
->duty
& DUTY_PME
))
747 /* Send particle coordinates to the pme nodes.
748 * Since this is only implemented for domain decomposition
749 * and domain decomposition does not use the graph,
750 * we do not need to worry about shifting.
755 wallcycle_start(wcycle
, ewcPP_PMESENDX
);
757 bBS
= (inputrec
->nwall
== 2);
761 svmul(inputrec
->wall_ewald_zfac
, boxs
[ZZ
], boxs
[ZZ
]);
764 if (EEL_PME(fr
->eeltype
))
766 pme_flags
|= GMX_PME_DO_COULOMB
;
769 if (EVDW_PME(fr
->vdwtype
))
771 pme_flags
|= GMX_PME_DO_LJ
;
772 if (fr
->ljpme_combination_rule
== eljpmeLB
)
774 pme_flags
|= GMX_PME_LJ_LB
;
778 gmx_pme_send_coordinates(cr
, bBS
? boxs
: box
, x
,
779 mdatoms
->nChargePerturbed
, mdatoms
->nTypePerturbed
, lambda
[efptCOUL
], lambda
[efptVDW
],
780 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)),
783 wallcycle_stop(wcycle
, ewcPP_PMESENDX
);
787 /* do gridding for pair search */
790 if (graph
&& bStateChanged
)
792 /* Calculate intramolecular shift vectors to make molecules whole */
793 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
797 box_diag
[XX
] = box
[XX
][XX
];
798 box_diag
[YY
] = box
[YY
][YY
];
799 box_diag
[ZZ
] = box
[ZZ
][ZZ
];
801 wallcycle_start(wcycle
, ewcNS
);
804 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_LOCAL
);
805 nbnxn_put_on_grid(nbv
->nbs
, fr
->ePBC
, box
,
807 0, mdatoms
->homenr
, -1, fr
->cginfo
, x
,
809 nbv
->grp
[eintLocal
].kernel_type
,
810 nbv
->grp
[eintLocal
].nbat
);
811 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_LOCAL
);
815 wallcycle_sub_start(wcycle
, ewcsNBS_GRID_NONLOCAL
);
816 nbnxn_put_on_grid_nonlocal(nbv
->nbs
, domdec_zones(cr
->dd
),
818 nbv
->grp
[eintNonlocal
].kernel_type
,
819 nbv
->grp
[eintNonlocal
].nbat
);
820 wallcycle_sub_stop(wcycle
, ewcsNBS_GRID_NONLOCAL
);
823 if (nbv
->ngrp
== 1 ||
824 nbv
->grp
[eintNonlocal
].nbat
== nbv
->grp
[eintLocal
].nbat
)
826 nbnxn_atomdata_set(nbv
->grp
[eintLocal
].nbat
, eatAll
,
827 nbv
->nbs
, mdatoms
, fr
->cginfo
);
831 nbnxn_atomdata_set(nbv
->grp
[eintLocal
].nbat
, eatLocal
,
832 nbv
->nbs
, mdatoms
, fr
->cginfo
);
833 nbnxn_atomdata_set(nbv
->grp
[eintNonlocal
].nbat
, eatAll
,
834 nbv
->nbs
, mdatoms
, fr
->cginfo
);
836 wallcycle_stop(wcycle
, ewcNS
);
839 /* initialize the GPU atom data and copy shift vector */
844 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
845 nbnxn_cuda_init_atomdata(nbv
->cu_nbv
, nbv
->grp
[eintLocal
].nbat
);
846 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
849 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
850 nbnxn_cuda_upload_shiftvec(nbv
->cu_nbv
, nbv
->grp
[eintLocal
].nbat
);
851 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
854 /* do local pair search */
857 wallcycle_start_nocount(wcycle
, ewcNS
);
858 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_LOCAL
);
859 nbnxn_make_pairlist(nbv
->nbs
, nbv
->grp
[eintLocal
].nbat
,
862 nbv
->min_ci_balanced
,
863 &nbv
->grp
[eintLocal
].nbl_lists
,
865 nbv
->grp
[eintLocal
].kernel_type
,
867 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_LOCAL
);
871 /* initialize local pair-list on the GPU */
872 nbnxn_cuda_init_pairlist(nbv
->cu_nbv
,
873 nbv
->grp
[eintLocal
].nbl_lists
.nbl
[0],
876 wallcycle_stop(wcycle
, ewcNS
);
880 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
881 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
882 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatLocal
, FALSE
, x
,
883 nbv
->grp
[eintLocal
].nbat
);
884 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
885 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
890 wallcycle_start(wcycle
, ewcLAUNCH_GPU_NB
);
891 /* launch local nonbonded F on GPU */
892 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
, enbvClearFNo
,
894 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
897 /* Communicate coordinates and sum dipole if necessary +
898 do non-local pair search */
899 if (DOMAINDECOMP(cr
))
901 bDiffKernels
= (nbv
->grp
[eintNonlocal
].kernel_type
!=
902 nbv
->grp
[eintLocal
].kernel_type
);
906 /* With GPU+CPU non-bonded calculations we need to copy
907 * the local coordinates to the non-local nbat struct
908 * (in CPU format) as the non-local kernel call also
909 * calculates the local - non-local interactions.
911 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
912 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
913 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatLocal
, TRUE
, x
,
914 nbv
->grp
[eintNonlocal
].nbat
);
915 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
916 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
921 wallcycle_start_nocount(wcycle
, ewcNS
);
922 wallcycle_sub_start(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
926 nbnxn_grid_add_simple(nbv
->nbs
, nbv
->grp
[eintNonlocal
].nbat
);
929 nbnxn_make_pairlist(nbv
->nbs
, nbv
->grp
[eintNonlocal
].nbat
,
932 nbv
->min_ci_balanced
,
933 &nbv
->grp
[eintNonlocal
].nbl_lists
,
935 nbv
->grp
[eintNonlocal
].kernel_type
,
938 wallcycle_sub_stop(wcycle
, ewcsNBS_SEARCH_NONLOCAL
);
940 if (nbv
->grp
[eintNonlocal
].kernel_type
== nbnxnk8x8x8_CUDA
)
942 /* initialize non-local pair-list on the GPU */
943 nbnxn_cuda_init_pairlist(nbv
->cu_nbv
,
944 nbv
->grp
[eintNonlocal
].nbl_lists
.nbl
[0],
947 wallcycle_stop(wcycle
, ewcNS
);
951 wallcycle_start(wcycle
, ewcMOVEX
);
952 dd_move_x(cr
->dd
, box
, x
);
954 /* When we don't need the total dipole we sum it in global_stat */
955 if (bStateChanged
&& NEED_MUTOT(*inputrec
))
957 gmx_sumd(2*DIM
, mu
, cr
);
959 wallcycle_stop(wcycle
, ewcMOVEX
);
961 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
962 wallcycle_sub_start(wcycle
, ewcsNB_X_BUF_OPS
);
963 nbnxn_atomdata_copy_x_to_nbat_x(nbv
->nbs
, eatNonlocal
, FALSE
, x
,
964 nbv
->grp
[eintNonlocal
].nbat
);
965 wallcycle_sub_stop(wcycle
, ewcsNB_X_BUF_OPS
);
966 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
969 if (bUseGPU
&& !bDiffKernels
)
971 wallcycle_start(wcycle
, ewcLAUNCH_GPU_NB
);
972 /* launch non-local nonbonded F on GPU */
973 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
, enbvClearFNo
,
975 cycles_force
+= wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
981 /* launch D2H copy-back F */
982 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
983 if (DOMAINDECOMP(cr
) && !bDiffKernels
)
985 nbnxn_cuda_launch_cpyback(nbv
->cu_nbv
, nbv
->grp
[eintNonlocal
].nbat
,
988 nbnxn_cuda_launch_cpyback(nbv
->cu_nbv
, nbv
->grp
[eintLocal
].nbat
,
990 cycles_force
+= wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
993 if (bStateChanged
&& NEED_MUTOT(*inputrec
))
997 gmx_sumd(2*DIM
, mu
, cr
);
1000 for (i
= 0; i
< 2; i
++)
1002 for (j
= 0; j
< DIM
; j
++)
1004 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1008 if (fr
->efep
== efepNO
)
1010 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1014 for (j
= 0; j
< DIM
; j
++)
1017 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] +
1018 lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1022 /* Reset energies */
1023 reset_enerdata(fr
, bNS
, enerd
, MASTER(cr
));
1024 clear_rvecs(SHIFTS
, fr
->fshift
);
1026 if (DOMAINDECOMP(cr
))
1028 if (!(cr
->duty
& DUTY_PME
))
1030 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1031 dd_force_flop_start(cr
->dd
, nrnb
);
1037 /* Enforced rotation has its own cycle counter that starts after the collective
1038 * coordinates have been communicated. It is added to ddCyclF to allow
1039 * for proper load-balancing */
1040 wallcycle_start(wcycle
, ewcROT
);
1041 do_rotation(cr
, inputrec
, box
, x
, t
, step
, wcycle
, bNS
);
1042 wallcycle_stop(wcycle
, ewcROT
);
1045 /* Start the force cycle counter.
1046 * This counter is stopped in do_forcelow_level.
1047 * No parallel communication should occur while this counter is running,
1048 * since that will interfere with the dynamic load balancing.
1050 wallcycle_start(wcycle
, ewcFORCE
);
1053 /* Reset forces for which the virial is calculated separately:
1054 * PME/Ewald forces if necessary */
1055 if (fr
->bF_NoVirSum
)
1057 if (flags
& GMX_FORCE_VIRIAL
)
1059 fr
->f_novirsum
= fr
->f_novirsum_alloc
;
1062 clear_rvecs(fr
->f_novirsum_n
, fr
->f_novirsum
);
1066 clear_rvecs(homenr
, fr
->f_novirsum
+start
);
1071 /* We are not calculating the pressure so we do not need
1072 * a separate array for forces that do not contribute
1079 /* Clear the short- and long-range forces */
1080 clear_rvecs(fr
->natoms_force_constr
, f
);
1081 if (bSepLRF
&& do_per_step(step
, inputrec
->nstcalclr
))
1083 clear_rvecs(fr
->natoms_force_constr
, fr
->f_twin
);
1086 clear_rvec(fr
->vir_diag_posres
);
1089 if (inputrec
->ePull
== epullCONSTRAINT
)
1091 clear_pull_forces(inputrec
->pull
);
1094 /* We calculate the non-bonded forces, when done on the CPU, here.
1095 * We do this before calling do_force_lowlevel, as in there bondeds
1096 * forces are calculated before PME, which does communication.
1097 * With this order, non-bonded and bonded force calculation imbalance
1098 * can be balanced out by the domain decomposition load balancing.
1103 /* Maybe we should move this into do_force_lowlevel */
1104 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
, enbvClearFYes
,
1108 if (!bUseOrEmulGPU
|| bDiffKernels
)
1112 if (DOMAINDECOMP(cr
))
1114 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
,
1115 bDiffKernels
? enbvClearFYes
: enbvClearFNo
,
1125 aloc
= eintNonlocal
;
1128 /* Add all the non-bonded force to the normal force array.
1129 * This can be split into a local a non-local part when overlapping
1130 * communication with calculation with domain decomposition.
1132 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1133 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1134 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1135 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatAll
, nbv
->grp
[aloc
].nbat
, f
);
1136 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1137 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1138 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1140 /* if there are multiple fshift output buffers reduce them */
1141 if ((flags
& GMX_FORCE_VIRIAL
) &&
1142 nbv
->grp
[aloc
].nbl_lists
.nnbl
> 1)
1144 nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv
->grp
[aloc
].nbat
,
1149 /* update QMMMrec, if necessary */
1152 update_QMMMrec(cr
, fr
, x
, mdatoms
, box
, top
);
1155 if ((flags
& GMX_FORCE_BONDED
) && top
->idef
.il
[F_POSRES
].nr
> 0)
1157 posres_wrapper(fplog
, flags
, bSepDVDL
, inputrec
, nrnb
, top
, box
, x
,
1161 /* Compute the bonded and non-bonded energies and optionally forces */
1162 do_force_lowlevel(fplog
, step
, fr
, inputrec
, &(top
->idef
),
1163 cr
, nrnb
, wcycle
, mdatoms
,
1164 x
, hist
, f
, bSepLRF
? fr
->f_twin
: f
, enerd
, fcd
, top
, fr
->born
,
1165 &(top
->atomtypes
), bBornRadii
, box
,
1166 inputrec
->fepvals
, lambda
, graph
, &(top
->excls
), fr
->mu_tot
,
1167 flags
, &cycles_pme
);
1171 if (do_per_step(step
, inputrec
->nstcalclr
))
1173 /* Add the long range forces to the short range forces */
1174 for (i
= 0; i
< fr
->natoms_force_constr
; i
++)
1176 rvec_add(fr
->f_twin
[i
], f
[i
], f
[i
]);
1181 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1185 do_flood(cr
, inputrec
, x
, f
, ed
, box
, step
, bNS
);
1188 if (bUseOrEmulGPU
&& !bDiffKernels
)
1190 /* wait for non-local forces (or calculate in emulation mode) */
1191 if (DOMAINDECOMP(cr
))
1197 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_NL
);
1198 nbnxn_cuda_wait_gpu(nbv
->cu_nbv
,
1199 nbv
->grp
[eintNonlocal
].nbat
,
1201 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1203 cycles_tmp
= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_NL
);
1204 cycles_wait_gpu
+= cycles_tmp
;
1205 cycles_force
+= cycles_tmp
;
1209 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1210 do_nb_verlet(fr
, ic
, enerd
, flags
, eintNonlocal
, enbvClearFYes
,
1212 cycles_force
+= wallcycle_stop(wcycle
, ewcFORCE
);
1214 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1215 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1216 /* skip the reduction if there was no non-local work to do */
1217 if (nbv
->grp
[eintLocal
].nbl_lists
.nbl
[0]->nsci
> 0)
1219 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatNonlocal
,
1220 nbv
->grp
[eintNonlocal
].nbat
, f
);
1222 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1223 cycles_force
+= wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1229 /* Communicate the forces */
1232 wallcycle_start(wcycle
, ewcMOVEF
);
1233 if (DOMAINDECOMP(cr
))
1235 dd_move_f(cr
->dd
, f
, fr
->fshift
);
1236 /* Do we need to communicate the separate force array
1237 * for terms that do not contribute to the single sum virial?
1238 * Position restraints and electric fields do not introduce
1239 * inter-cg forces, only full electrostatics methods do.
1240 * When we do not calculate the virial, fr->f_novirsum = f,
1241 * so we have already communicated these forces.
1243 if (EEL_FULL(fr
->eeltype
) && cr
->dd
->n_intercg_excl
&&
1244 (flags
& GMX_FORCE_VIRIAL
))
1246 dd_move_f(cr
->dd
, fr
->f_novirsum
, NULL
);
1250 /* We should not update the shift forces here,
1251 * since f_twin is already included in f.
1253 dd_move_f(cr
->dd
, fr
->f_twin
, NULL
);
1256 wallcycle_stop(wcycle
, ewcMOVEF
);
1262 /* wait for local forces (or calculate in emulation mode) */
1265 wallcycle_start(wcycle
, ewcWAIT_GPU_NB_L
);
1266 nbnxn_cuda_wait_gpu(nbv
->cu_nbv
,
1267 nbv
->grp
[eintLocal
].nbat
,
1269 enerd
->grpp
.ener
[egLJSR
], enerd
->grpp
.ener
[egCOULSR
],
1271 cycles_wait_gpu
+= wallcycle_stop(wcycle
, ewcWAIT_GPU_NB_L
);
1273 /* now clear the GPU outputs while we finish the step on the CPU */
1275 wallcycle_start_nocount(wcycle
, ewcLAUNCH_GPU_NB
);
1276 nbnxn_cuda_clear_outputs(nbv
->cu_nbv
, flags
);
1277 wallcycle_stop(wcycle
, ewcLAUNCH_GPU_NB
);
1281 wallcycle_start_nocount(wcycle
, ewcFORCE
);
1282 do_nb_verlet(fr
, ic
, enerd
, flags
, eintLocal
,
1283 DOMAINDECOMP(cr
) ? enbvClearFNo
: enbvClearFYes
,
1285 wallcycle_stop(wcycle
, ewcFORCE
);
1287 wallcycle_start(wcycle
, ewcNB_XF_BUF_OPS
);
1288 wallcycle_sub_start(wcycle
, ewcsNB_F_BUF_OPS
);
1289 if (nbv
->grp
[eintLocal
].nbl_lists
.nbl
[0]->nsci
> 0)
1291 /* skip the reduction if there was no non-local work to do */
1292 nbnxn_atomdata_add_nbat_f_to_f(nbv
->nbs
, eatLocal
,
1293 nbv
->grp
[eintLocal
].nbat
, f
);
1295 wallcycle_sub_stop(wcycle
, ewcsNB_F_BUF_OPS
);
1296 wallcycle_stop(wcycle
, ewcNB_XF_BUF_OPS
);
1299 if (DOMAINDECOMP(cr
))
1301 dd_force_flop_stop(cr
->dd
, nrnb
);
1304 dd_cycles_add(cr
->dd
, cycles_force
-cycles_pme
, ddCyclF
);
1307 dd_cycles_add(cr
->dd
, cycles_wait_gpu
, ddCyclWaitGPU
);
1314 if (IR_ELEC_FIELD(*inputrec
))
1316 /* Compute forces due to electric field */
1317 calc_f_el(MASTER(cr
) ? field
: NULL
,
1318 start
, homenr
, mdatoms
->chargeA
, fr
->f_novirsum
,
1319 inputrec
->ex
, inputrec
->et
, t
);
1322 /* If we have NoVirSum forces, but we do not calculate the virial,
1323 * we sum fr->f_novirum=f later.
1325 if (vsite
&& !(fr
->bF_NoVirSum
&& !(flags
& GMX_FORCE_VIRIAL
)))
1327 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1328 spread_vsite_f(vsite
, x
, f
, fr
->fshift
, FALSE
, NULL
, nrnb
,
1329 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1330 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1334 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1335 spread_vsite_f(vsite
, x
, fr
->f_twin
, NULL
, FALSE
, NULL
,
1337 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1338 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1342 if (flags
& GMX_FORCE_VIRIAL
)
1344 /* Calculation of the virial must be done after vsites! */
1345 calc_virial(mdatoms
->start
, mdatoms
->homenr
, x
, f
,
1346 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1350 if (inputrec
->ePull
== epullUMBRELLA
|| inputrec
->ePull
== epullCONST_F
)
1352 pull_potential_wrapper(fplog
, bSepDVDL
, cr
, inputrec
, box
, x
,
1353 f
, vir_force
, mdatoms
, enerd
, lambda
, t
);
1356 /* Add the forces from enforced rotation potentials (if any) */
1359 wallcycle_start(wcycle
, ewcROTadd
);
1360 enerd
->term
[F_COM_PULL
] += add_rot_forces(inputrec
->rot
, f
, cr
, step
, t
);
1361 wallcycle_stop(wcycle
, ewcROTadd
);
1364 if (PAR(cr
) && !(cr
->duty
& DUTY_PME
))
1366 /* In case of node-splitting, the PP nodes receive the long-range
1367 * forces, virial and energy from the PME nodes here.
1369 pme_receive_force_ener(fplog
, bSepDVDL
, cr
, wcycle
, enerd
, fr
);
1374 post_process_forces(cr
, step
, nrnb
, wcycle
,
1375 top
, box
, x
, f
, vir_force
, mdatoms
, graph
, fr
, vsite
,
1379 /* Sum the potential energy terms from group contributions */
1380 sum_epot(&(enerd
->grpp
), enerd
->term
);
1383 void do_force_cutsGROUP(FILE *fplog
, t_commrec
*cr
,
1384 t_inputrec
*inputrec
,
1385 gmx_large_int_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
1386 gmx_localtop_t
*top
,
1387 gmx_groups_t
*groups
,
1388 matrix box
, rvec x
[], history_t
*hist
,
1392 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
1393 real
*lambda
, t_graph
*graph
,
1394 t_forcerec
*fr
, gmx_vsite_t
*vsite
, rvec mu_tot
,
1395 double t
, FILE *field
, gmx_edsam_t ed
,
1396 gmx_bool bBornRadii
,
1402 gmx_bool bSepDVDL
, bStateChanged
, bNS
, bFillGrid
, bCalcCGCM
, bBS
;
1403 gmx_bool bDoLongRangeNS
, bDoForces
, bDoPotential
, bSepLRF
;
1404 gmx_bool bDoAdressWF
;
1406 rvec vzero
, box_diag
;
1407 real e
, v
, dvdlambda
[efptNR
];
1409 float cycles_pme
, cycles_force
;
1411 start
= mdatoms
->start
;
1412 homenr
= mdatoms
->homenr
;
1414 bSepDVDL
= (fr
->bSepDVDL
&& do_per_step(step
, inputrec
->nstlog
));
1416 clear_mat(vir_force
);
1420 pd_cg_range(cr
, &cg0
, &cg1
);
1425 if (DOMAINDECOMP(cr
))
1427 cg1
= cr
->dd
->ncg_tot
;
1439 bStateChanged
= (flags
& GMX_FORCE_STATECHANGED
);
1440 bNS
= (flags
& GMX_FORCE_NS
) && (fr
->bAllvsAll
== FALSE
);
1441 /* Should we update the long-range neighborlists at this step? */
1442 bDoLongRangeNS
= fr
->bTwinRange
&& bNS
;
1443 /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1444 bFillGrid
= (bNS
&& bStateChanged
);
1445 bCalcCGCM
= (bFillGrid
&& !DOMAINDECOMP(cr
));
1446 bDoForces
= (flags
& GMX_FORCE_FORCES
);
1447 bDoPotential
= (flags
& GMX_FORCE_ENERGY
);
1448 bSepLRF
= ((inputrec
->nstcalclr
> 1) && bDoForces
&&
1449 (flags
& GMX_FORCE_SEPLRF
) && (flags
& GMX_FORCE_DO_LR
));
1451 /* should probably move this to the forcerec since it doesn't change */
1452 bDoAdressWF
= ((fr
->adress_type
!= eAdressOff
));
1456 update_forcerec(fr
, box
);
1458 if (NEED_MUTOT(*inputrec
))
1460 /* Calculate total (local) dipole moment in a temporary common array.
1461 * This makes it possible to sum them over nodes faster.
1463 calc_mu(start
, homenr
,
1464 x
, mdatoms
->chargeA
, mdatoms
->chargeB
, mdatoms
->nChargePerturbed
,
1469 if (fr
->ePBC
!= epbcNONE
)
1471 /* Compute shift vectors every step,
1472 * because of pressure coupling or box deformation!
1474 if ((flags
& GMX_FORCE_DYNAMICBOX
) && bStateChanged
)
1476 calc_shifts(box
, fr
->shift_vec
);
1481 put_charge_groups_in_box(fplog
, cg0
, cg1
, fr
->ePBC
, box
,
1482 &(top
->cgs
), x
, fr
->cg_cm
);
1483 inc_nrnb(nrnb
, eNR_CGCM
, homenr
);
1484 inc_nrnb(nrnb
, eNR_RESETX
, cg1
-cg0
);
1486 else if (EI_ENERGY_MINIMIZATION(inputrec
->eI
) && graph
)
1488 unshift_self(graph
, box
, x
);
1493 calc_cgcm(fplog
, cg0
, cg1
, &(top
->cgs
), x
, fr
->cg_cm
);
1494 inc_nrnb(nrnb
, eNR_CGCM
, homenr
);
1501 move_cgcm(fplog
, cr
, fr
->cg_cm
);
1505 pr_rvecs(debug
, 0, "cgcm", fr
->cg_cm
, top
->cgs
.nr
);
1510 if (!(cr
->duty
& DUTY_PME
))
1512 /* Send particle coordinates to the pme nodes.
1513 * Since this is only implemented for domain decomposition
1514 * and domain decomposition does not use the graph,
1515 * we do not need to worry about shifting.
1520 wallcycle_start(wcycle
, ewcPP_PMESENDX
);
1522 bBS
= (inputrec
->nwall
== 2);
1525 copy_mat(box
, boxs
);
1526 svmul(inputrec
->wall_ewald_zfac
, boxs
[ZZ
], boxs
[ZZ
]);
1529 if (EEL_PME(fr
->eeltype
))
1531 pme_flags
|= GMX_PME_DO_COULOMB
;
1534 if (EVDW_PME(fr
->vdwtype
))
1536 pme_flags
|= GMX_PME_DO_LJ
;
1537 if (fr
->ljpme_combination_rule
== eljpmeLB
)
1539 pme_flags
|= GMX_PME_LJ_LB
;
1543 gmx_pme_send_coordinates(cr
, bBS
? boxs
: box
, x
,
1544 mdatoms
->nChargePerturbed
, mdatoms
->nTypePerturbed
, lambda
[efptCOUL
], lambda
[efptVDW
],
1545 (flags
& (GMX_FORCE_VIRIAL
| GMX_FORCE_ENERGY
)),
1548 wallcycle_stop(wcycle
, ewcPP_PMESENDX
);
1550 #endif /* GMX_MPI */
1552 /* Communicate coordinates and sum dipole if necessary */
1555 wallcycle_start(wcycle
, ewcMOVEX
);
1556 if (DOMAINDECOMP(cr
))
1558 dd_move_x(cr
->dd
, box
, x
);
1562 move_x(cr
, x
, nrnb
);
1564 wallcycle_stop(wcycle
, ewcMOVEX
);
1567 /* update adress weight beforehand */
1568 if (bStateChanged
&& bDoAdressWF
)
1570 /* need pbc for adress weight calculation with pbc_dx */
1571 set_pbc(&pbc
, inputrec
->ePBC
, box
);
1572 if (fr
->adress_site
== eAdressSITEcog
)
1574 update_adress_weights_cog(top
->idef
.iparams
, top
->idef
.il
, x
, fr
, mdatoms
,
1575 inputrec
->ePBC
== epbcNONE
? NULL
: &pbc
);
1577 else if (fr
->adress_site
== eAdressSITEcom
)
1579 update_adress_weights_com(fplog
, cg0
, cg1
, &(top
->cgs
), x
, fr
, mdatoms
,
1580 inputrec
->ePBC
== epbcNONE
? NULL
: &pbc
);
1582 else if (fr
->adress_site
== eAdressSITEatomatom
)
1584 update_adress_weights_atom_per_atom(cg0
, cg1
, &(top
->cgs
), x
, fr
, mdatoms
,
1585 inputrec
->ePBC
== epbcNONE
? NULL
: &pbc
);
1589 update_adress_weights_atom(cg0
, cg1
, &(top
->cgs
), x
, fr
, mdatoms
,
1590 inputrec
->ePBC
== epbcNONE
? NULL
: &pbc
);
1594 if (NEED_MUTOT(*inputrec
))
1601 gmx_sumd(2*DIM
, mu
, cr
);
1603 for (i
= 0; i
< 2; i
++)
1605 for (j
= 0; j
< DIM
; j
++)
1607 fr
->mu_tot
[i
][j
] = mu
[i
*DIM
+ j
];
1611 if (fr
->efep
== efepNO
)
1613 copy_rvec(fr
->mu_tot
[0], mu_tot
);
1617 for (j
= 0; j
< DIM
; j
++)
1620 (1.0 - lambda
[efptCOUL
])*fr
->mu_tot
[0][j
] + lambda
[efptCOUL
]*fr
->mu_tot
[1][j
];
1625 /* Reset energies */
1626 reset_enerdata(fr
, bNS
, enerd
, MASTER(cr
));
1627 clear_rvecs(SHIFTS
, fr
->fshift
);
1631 wallcycle_start(wcycle
, ewcNS
);
1633 if (graph
&& bStateChanged
)
1635 /* Calculate intramolecular shift vectors to make molecules whole */
1636 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
1639 /* Do the actual neighbour searching */
1641 groups
, top
, mdatoms
,
1642 cr
, nrnb
, bFillGrid
,
1645 wallcycle_stop(wcycle
, ewcNS
);
1648 if (inputrec
->implicit_solvent
&& bNS
)
1650 make_gb_nblist(cr
, inputrec
->gb_algorithm
,
1651 x
, box
, fr
, &top
->idef
, graph
, fr
->born
);
1654 if (DOMAINDECOMP(cr
))
1656 if (!(cr
->duty
& DUTY_PME
))
1658 wallcycle_start(wcycle
, ewcPPDURINGPME
);
1659 dd_force_flop_start(cr
->dd
, nrnb
);
1665 /* Enforced rotation has its own cycle counter that starts after the collective
1666 * coordinates have been communicated. It is added to ddCyclF to allow
1667 * for proper load-balancing */
1668 wallcycle_start(wcycle
, ewcROT
);
1669 do_rotation(cr
, inputrec
, box
, x
, t
, step
, wcycle
, bNS
);
1670 wallcycle_stop(wcycle
, ewcROT
);
1673 /* Start the force cycle counter.
1674 * This counter is stopped in do_forcelow_level.
1675 * No parallel communication should occur while this counter is running,
1676 * since that will interfere with the dynamic load balancing.
1678 wallcycle_start(wcycle
, ewcFORCE
);
1682 /* Reset forces for which the virial is calculated separately:
1683 * PME/Ewald forces if necessary */
1684 if (fr
->bF_NoVirSum
)
1686 if (flags
& GMX_FORCE_VIRIAL
)
1688 fr
->f_novirsum
= fr
->f_novirsum_alloc
;
1691 clear_rvecs(fr
->f_novirsum_n
, fr
->f_novirsum
);
1695 clear_rvecs(homenr
, fr
->f_novirsum
+start
);
1700 /* We are not calculating the pressure so we do not need
1701 * a separate array for forces that do not contribute
1708 /* Clear the short- and long-range forces */
1709 clear_rvecs(fr
->natoms_force_constr
, f
);
1710 if (bSepLRF
&& do_per_step(step
, inputrec
->nstcalclr
))
1712 clear_rvecs(fr
->natoms_force_constr
, fr
->f_twin
);
1715 clear_rvec(fr
->vir_diag_posres
);
1717 if (inputrec
->ePull
== epullCONSTRAINT
)
1719 clear_pull_forces(inputrec
->pull
);
1722 /* update QMMMrec, if necessary */
1725 update_QMMMrec(cr
, fr
, x
, mdatoms
, box
, top
);
1728 if ((flags
& GMX_FORCE_BONDED
) && top
->idef
.il
[F_POSRES
].nr
> 0)
1730 posres_wrapper(fplog
, flags
, bSepDVDL
, inputrec
, nrnb
, top
, box
, x
,
1734 if ((flags
& GMX_FORCE_BONDED
) && top
->idef
.il
[F_FBPOSRES
].nr
> 0)
1736 /* Flat-bottomed position restraints always require full pbc */
1737 if (!(bStateChanged
&& bDoAdressWF
))
1739 set_pbc(&pbc
, inputrec
->ePBC
, box
);
1741 v
= fbposres(top
->idef
.il
[F_FBPOSRES
].nr
, top
->idef
.il
[F_FBPOSRES
].iatoms
,
1742 top
->idef
.iparams_fbposres
,
1743 (const rvec
*)x
, fr
->f_novirsum
, fr
->vir_diag_posres
,
1744 inputrec
->ePBC
== epbcNONE
? NULL
: &pbc
,
1745 fr
->rc_scaling
, fr
->ePBC
, fr
->posres_com
);
1746 enerd
->term
[F_FBPOSRES
] += v
;
1747 inc_nrnb(nrnb
, eNR_FBPOSRES
, top
->idef
.il
[F_FBPOSRES
].nr
/2);
1750 /* Compute the bonded and non-bonded energies and optionally forces */
1751 do_force_lowlevel(fplog
, step
, fr
, inputrec
, &(top
->idef
),
1752 cr
, nrnb
, wcycle
, mdatoms
,
1753 x
, hist
, f
, bSepLRF
? fr
->f_twin
: f
, enerd
, fcd
, top
, fr
->born
,
1754 &(top
->atomtypes
), bBornRadii
, box
,
1755 inputrec
->fepvals
, lambda
,
1756 graph
, &(top
->excls
), fr
->mu_tot
,
1762 if (do_per_step(step
, inputrec
->nstcalclr
))
1764 /* Add the long range forces to the short range forces */
1765 for (i
= 0; i
< fr
->natoms_force_constr
; i
++)
1767 rvec_add(fr
->f_twin
[i
], f
[i
], f
[i
]);
1772 cycles_force
= wallcycle_stop(wcycle
, ewcFORCE
);
1776 do_flood(cr
, inputrec
, x
, f
, ed
, box
, step
, bNS
);
1779 if (DOMAINDECOMP(cr
))
1781 dd_force_flop_stop(cr
->dd
, nrnb
);
1784 dd_cycles_add(cr
->dd
, cycles_force
-cycles_pme
, ddCyclF
);
1790 if (IR_ELEC_FIELD(*inputrec
))
1792 /* Compute forces due to electric field */
1793 calc_f_el(MASTER(cr
) ? field
: NULL
,
1794 start
, homenr
, mdatoms
->chargeA
, fr
->f_novirsum
,
1795 inputrec
->ex
, inputrec
->et
, t
);
1798 if (bDoAdressWF
&& fr
->adress_icor
== eAdressICThermoForce
)
1800 /* Compute thermodynamic force in hybrid AdResS region */
1801 adress_thermo_force(start
, homenr
, &(top
->cgs
), x
, fr
->f_novirsum
, fr
, mdatoms
,
1802 inputrec
->ePBC
== epbcNONE
? NULL
: &pbc
);
1805 /* Communicate the forces */
1808 wallcycle_start(wcycle
, ewcMOVEF
);
1809 if (DOMAINDECOMP(cr
))
1811 dd_move_f(cr
->dd
, f
, fr
->fshift
);
1812 /* Do we need to communicate the separate force array
1813 * for terms that do not contribute to the single sum virial?
1814 * Position restraints and electric fields do not introduce
1815 * inter-cg forces, only full electrostatics methods do.
1816 * When we do not calculate the virial, fr->f_novirsum = f,
1817 * so we have already communicated these forces.
1819 if (EEL_FULL(fr
->eeltype
) && cr
->dd
->n_intercg_excl
&&
1820 (flags
& GMX_FORCE_VIRIAL
))
1822 dd_move_f(cr
->dd
, fr
->f_novirsum
, NULL
);
1826 /* We should not update the shift forces here,
1827 * since f_twin is already included in f.
1829 dd_move_f(cr
->dd
, fr
->f_twin
, NULL
);
1834 pd_move_f(cr
, f
, nrnb
);
1837 pd_move_f(cr
, fr
->f_twin
, nrnb
);
1840 wallcycle_stop(wcycle
, ewcMOVEF
);
1843 /* If we have NoVirSum forces, but we do not calculate the virial,
1844 * we sum fr->f_novirum=f later.
1846 if (vsite
&& !(fr
->bF_NoVirSum
&& !(flags
& GMX_FORCE_VIRIAL
)))
1848 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1849 spread_vsite_f(vsite
, x
, f
, fr
->fshift
, FALSE
, NULL
, nrnb
,
1850 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1851 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1855 wallcycle_start(wcycle
, ewcVSITESPREAD
);
1856 spread_vsite_f(vsite
, x
, fr
->f_twin
, NULL
, FALSE
, NULL
,
1858 &top
->idef
, fr
->ePBC
, fr
->bMolPBC
, graph
, box
, cr
);
1859 wallcycle_stop(wcycle
, ewcVSITESPREAD
);
1863 if (flags
& GMX_FORCE_VIRIAL
)
1865 /* Calculation of the virial must be done after vsites! */
1866 calc_virial(mdatoms
->start
, mdatoms
->homenr
, x
, f
,
1867 vir_force
, graph
, box
, nrnb
, fr
, inputrec
->ePBC
);
1871 if (inputrec
->ePull
== epullUMBRELLA
|| inputrec
->ePull
== epullCONST_F
)
1873 pull_potential_wrapper(fplog
, bSepDVDL
, cr
, inputrec
, box
, x
,
1874 f
, vir_force
, mdatoms
, enerd
, lambda
, t
);
1877 /* Add the forces from enforced rotation potentials (if any) */
1880 wallcycle_start(wcycle
, ewcROTadd
);
1881 enerd
->term
[F_COM_PULL
] += add_rot_forces(inputrec
->rot
, f
, cr
, step
, t
);
1882 wallcycle_stop(wcycle
, ewcROTadd
);
1885 if (PAR(cr
) && !(cr
->duty
& DUTY_PME
))
1887 /* In case of node-splitting, the PP nodes receive the long-range
1888 * forces, virial and energy from the PME nodes here.
1890 pme_receive_force_ener(fplog
, bSepDVDL
, cr
, wcycle
, enerd
, fr
);
1895 post_process_forces(cr
, step
, nrnb
, wcycle
,
1896 top
, box
, x
, f
, vir_force
, mdatoms
, graph
, fr
, vsite
,
1900 /* Sum the potential energy terms from group contributions */
1901 sum_epot(&(enerd
->grpp
), enerd
->term
);
1904 void do_force(FILE *fplog
, t_commrec
*cr
,
1905 t_inputrec
*inputrec
,
1906 gmx_large_int_t step
, t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
1907 gmx_localtop_t
*top
,
1908 gmx_groups_t
*groups
,
1909 matrix box
, rvec x
[], history_t
*hist
,
1913 gmx_enerdata_t
*enerd
, t_fcdata
*fcd
,
1914 real
*lambda
, t_graph
*graph
,
1916 gmx_vsite_t
*vsite
, rvec mu_tot
,
1917 double t
, FILE *field
, gmx_edsam_t ed
,
1918 gmx_bool bBornRadii
,
1921 /* modify force flag if not doing nonbonded */
1922 if (!fr
->bNonbonded
)
1924 flags
&= ~GMX_FORCE_NONBONDED
;
1927 switch (inputrec
->cutoff_scheme
)
1930 do_force_cutsVERLET(fplog
, cr
, inputrec
,
1946 do_force_cutsGROUP(fplog
, cr
, inputrec
,
1961 gmx_incons("Invalid cut-off scheme passed!");
1966 void do_constrain_first(FILE *fplog
, gmx_constr_t constr
,
1967 t_inputrec
*ir
, t_mdatoms
*md
,
1968 t_state
*state
, t_commrec
*cr
, t_nrnb
*nrnb
,
1969 t_forcerec
*fr
, gmx_localtop_t
*top
)
1971 int i
, m
, start
, end
;
1972 gmx_large_int_t step
;
1973 real dt
= ir
->delta_t
;
1977 snew(savex
, state
->natoms
);
1980 end
= md
->homenr
+ start
;
1984 fprintf(debug
, "vcm: start=%d, homenr=%d, end=%d\n",
1985 start
, md
->homenr
, end
);
1987 /* Do a first constrain to reset particles... */
1988 step
= ir
->init_step
;
1991 char buf
[STEPSTRSIZE
];
1992 fprintf(fplog
, "\nConstraining the starting coordinates (step %s)\n",
1993 gmx_step_str(step
, buf
));
1997 /* constrain the current position */
1998 constrain(NULL
, TRUE
, FALSE
, constr
, &(top
->idef
),
1999 ir
, NULL
, cr
, step
, 0, md
,
2000 state
->x
, state
->x
, NULL
,
2001 fr
->bMolPBC
, state
->box
,
2002 state
->lambda
[efptBONDED
], &dvdl_dum
,
2003 NULL
, NULL
, nrnb
, econqCoord
,
2004 ir
->epc
== epcMTTK
, state
->veta
, state
->veta
);
2007 /* constrain the inital velocity, and save it */
2008 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2009 /* might not yet treat veta correctly */
2010 constrain(NULL
, TRUE
, FALSE
, constr
, &(top
->idef
),
2011 ir
, NULL
, cr
, step
, 0, md
,
2012 state
->x
, state
->v
, state
->v
,
2013 fr
->bMolPBC
, state
->box
,
2014 state
->lambda
[efptBONDED
], &dvdl_dum
,
2015 NULL
, NULL
, nrnb
, econqVeloc
,
2016 ir
->epc
== epcMTTK
, state
->veta
, state
->veta
);
2018 /* constrain the inital velocities at t-dt/2 */
2019 if (EI_STATE_VELOCITY(ir
->eI
) && ir
->eI
!= eiVV
)
2021 for (i
= start
; (i
< end
); i
++)
2023 for (m
= 0; (m
< DIM
); m
++)
2025 /* Reverse the velocity */
2026 state
->v
[i
][m
] = -state
->v
[i
][m
];
2027 /* Store the position at t-dt in buf */
2028 savex
[i
][m
] = state
->x
[i
][m
] + dt
*state
->v
[i
][m
];
2031 /* Shake the positions at t=-dt with the positions at t=0
2032 * as reference coordinates.
2036 char buf
[STEPSTRSIZE
];
2037 fprintf(fplog
, "\nConstraining the coordinates at t0-dt (step %s)\n",
2038 gmx_step_str(step
, buf
));
2041 constrain(NULL
, TRUE
, FALSE
, constr
, &(top
->idef
),
2042 ir
, NULL
, cr
, step
, -1, md
,
2043 state
->x
, savex
, NULL
,
2044 fr
->bMolPBC
, state
->box
,
2045 state
->lambda
[efptBONDED
], &dvdl_dum
,
2046 state
->v
, NULL
, nrnb
, econqCoord
,
2047 ir
->epc
== epcMTTK
, state
->veta
, state
->veta
);
2049 for (i
= start
; i
< end
; i
++)
2051 for (m
= 0; m
< DIM
; m
++)
2053 /* Re-reverse the velocities */
2054 state
->v
[i
][m
] = -state
->v
[i
][m
];
2063 integrate_table(real vdwtab
[], real scale
, int offstart
, int rstart
, int rend
,
2064 double *enerout
, double *virout
)
2066 double enersum
, virsum
;
2067 double invscale
, invscale2
, invscale3
;
2068 double r
, ea
, eb
, ec
, pa
, pb
, pc
, pd
;
2070 int ri
, offset
, tabfactor
;
2072 invscale
= 1.0/scale
;
2073 invscale2
= invscale
*invscale
;
2074 invscale3
= invscale
*invscale2
;
2076 /* Following summation derived from cubic spline definition,
2077 * Numerical Recipies in C, second edition, p. 113-116. Exact for
2078 * the cubic spline. We first calculate the negative of the
2079 * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2080 * add the more standard, abrupt cutoff correction to that result,
2081 * yielding the long-range correction for a switched function. We
2082 * perform both the pressure and energy loops at the same time for
2083 * simplicity, as the computational cost is low. */
2087 /* Since the dispersion table has been scaled down a factor
2088 * 6.0 and the repulsion a factor 12.0 to compensate for the
2089 * c6/c12 parameters inside nbfp[] being scaled up (to save
2090 * flops in kernels), we need to correct for this.
2101 for (ri
= rstart
; ri
< rend
; ++ri
)
2105 eb
= 2.0*invscale2
*r
;
2109 pb
= 3.0*invscale2
*r
;
2110 pc
= 3.0*invscale
*r
*r
;
2113 /* this "8" is from the packing in the vdwtab array - perhaps
2114 should be defined? */
2116 offset
= 8*ri
+ offstart
;
2117 y0
= vdwtab
[offset
];
2118 f
= vdwtab
[offset
+1];
2119 g
= vdwtab
[offset
+2];
2120 h
= vdwtab
[offset
+3];
2122 enersum
+= y0
*(ea
/3 + eb
/2 + ec
) + f
*(ea
/4 + eb
/3 + ec
/2) + g
*(ea
/5 + eb
/4 + ec
/3) + h
*(ea
/6 + eb
/5 + ec
/4);
2123 virsum
+= f
*(pa
/4 + pb
/3 + pc
/2 + pd
) + 2*g
*(pa
/5 + pb
/4 + pc
/3 + pd
/2) + 3*h
*(pa
/6 + pb
/5 + pc
/4 + pd
/3);
2125 *enerout
= 4.0*M_PI
*enersum
*tabfactor
;
2126 *virout
= 4.0*M_PI
*virsum
*tabfactor
;
2129 void calc_enervirdiff(FILE *fplog
, int eDispCorr
, t_forcerec
*fr
)
2131 double eners
[2], virs
[2], enersum
, virsum
, y0
, f
, g
, h
;
2132 double r0
, r1
, r
, rc3
, rc9
, ea
, eb
, ec
, pa
, pb
, pc
, pd
;
2133 double invscale
, invscale2
, invscale3
;
2134 int ri0
, ri1
, ri
, i
, offstart
, offset
;
2135 real scale
, *vdwtab
, tabfactor
, tmp
;
2137 fr
->enershiftsix
= 0;
2138 fr
->enershifttwelve
= 0;
2139 fr
->enerdiffsix
= 0;
2140 fr
->enerdifftwelve
= 0;
2142 fr
->virdifftwelve
= 0;
2144 if (eDispCorr
!= edispcNO
)
2146 for (i
= 0; i
< 2; i
++)
2151 if ((fr
->vdwtype
== evdwSWITCH
) || (fr
->vdwtype
== evdwSHIFT
))
2153 if (fr
->rvdw_switch
== 0)
2156 "With dispersion correction rvdw-switch can not be zero "
2157 "for vdw-type = %s", evdw_names
[fr
->vdwtype
]);
2160 scale
= fr
->nblists
[0].table_elec_vdw
.scale
;
2161 vdwtab
= fr
->nblists
[0].table_vdw
.data
;
2163 /* Round the cut-offs to exact table values for precision */
2164 ri0
= floor(fr
->rvdw_switch
*scale
);
2165 ri1
= ceil(fr
->rvdw
*scale
);
2171 if (fr
->vdwtype
== evdwSHIFT
)
2173 /* Determine the constant energy shift below rvdw_switch.
2174 * Table has a scale factor since we have scaled it down to compensate
2175 * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2177 fr
->enershiftsix
= (real
)(-1.0/(rc3
*rc3
)) - 6.0*vdwtab
[8*ri0
];
2178 fr
->enershifttwelve
= (real
)( 1.0/(rc9
*rc3
)) - 12.0*vdwtab
[8*ri0
+ 4];
2180 /* Add the constant part from 0 to rvdw_switch.
2181 * This integration from 0 to rvdw_switch overcounts the number
2182 * of interactions by 1, as it also counts the self interaction.
2183 * We will correct for this later.
2185 eners
[0] += 4.0*M_PI
*fr
->enershiftsix
*rc3
/3.0;
2186 eners
[1] += 4.0*M_PI
*fr
->enershifttwelve
*rc3
/3.0;
2187 for (i
= 0; i
< 2; i
++)
2191 integrate_table(vdwtab
, scale
, (i
== 0 ? 0 : 4), ri0
, ri1
, &enersum
, &virsum
);
2192 eners
[i
] -= enersum
;
2196 /* now add the correction for rvdw_switch to infinity */
2197 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2198 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2199 virs
[0] += 8.0*M_PI
/rc3
;
2200 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2202 else if (EVDW_PME(fr
->vdwtype
))
2204 if (EVDW_SWITCHED(fr
->vdwtype
) && fr
->rvdw_switch
== 0)
2207 "With dispersion correction rvdw-switch can not be zero "
2208 "for vdw-type = %s", evdw_names
[fr
->vdwtype
]);
2211 scale
= fr
->nblists
[0].table_vdw
.scale
;
2212 vdwtab
= fr
->nblists
[0].table_vdw
.data
;
2214 ri0
= floor(fr
->rvdw_switch
*scale
);
2215 ri1
= ceil(fr
->rvdw
*scale
);
2221 /* Calculate self-interaction coefficient (assuming that
2222 * the reciprocal-space contribution is constant in the
2223 * region that contributes to the self-interaction).
2225 fr
->enershiftsix
= pow(fr
->ewaldcoeff_lj
, 6) / 6.0;
2227 /* Calculate C12 values as without PME. */
2228 if (EVDW_SWITCHED(fr
->vdwtype
))
2232 integrate_table(vdwtab
, scale
, 4, ri0
, ri1
, &enersum
, &virsum
);
2233 eners
[1] -= enersum
;
2236 /* Add analytical corrections, C6 for the whole range, C12
2237 * from rvdw_switch to infinity.
2240 eners
[0] += -pow(sqrt(M_PI
)*fr
->ewaldcoeff_lj
, 3)/3.0;
2241 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2242 virs
[0] += pow(sqrt(M_PI
)*fr
->ewaldcoeff_lj
, 3);
2243 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2245 else if ((fr
->vdwtype
== evdwCUT
) || (fr
->vdwtype
== evdwUSER
))
2247 if (fr
->vdwtype
== evdwUSER
&& fplog
)
2250 "WARNING: using dispersion correction with user tables\n");
2252 rc3
= fr
->rvdw
*fr
->rvdw
*fr
->rvdw
;
2254 /* Contribution beyond the cut-off */
2255 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2256 eners
[1] += 4.0*M_PI
/(9.0*rc9
);
2257 if (fr
->vdw_modifier
== eintmodPOTSHIFT
)
2259 /* Contribution within the cut-off */
2260 eners
[0] += -4.0*M_PI
/(3.0*rc3
);
2261 eners
[1] += 4.0*M_PI
/(3.0*rc9
);
2263 /* Contribution beyond the cut-off */
2264 virs
[0] += 8.0*M_PI
/rc3
;
2265 virs
[1] += -16.0*M_PI
/(3.0*rc9
);
2270 "Dispersion correction is not implemented for vdw-type = %s",
2271 evdw_names
[fr
->vdwtype
]);
2273 fr
->enerdiffsix
= eners
[0];
2274 fr
->enerdifftwelve
= eners
[1];
2275 /* The 0.5 is due to the Gromacs definition of the virial */
2276 fr
->virdiffsix
= 0.5*virs
[0];
2277 fr
->virdifftwelve
= 0.5*virs
[1];
2281 void calc_dispcorr(FILE *fplog
, t_inputrec
*ir
, t_forcerec
*fr
,
2282 gmx_large_int_t step
, int natoms
,
2283 matrix box
, real lambda
, tensor pres
, tensor virial
,
2284 real
*prescorr
, real
*enercorr
, real
*dvdlcorr
)
2286 gmx_bool bCorrAll
, bCorrPres
;
2287 real dvdlambda
, invvol
, dens
, ninter
, avcsix
, avctwelve
, enerdiff
, svir
= 0, spres
= 0;
2297 if (ir
->eDispCorr
!= edispcNO
)
2299 bCorrAll
= (ir
->eDispCorr
== edispcAllEner
||
2300 ir
->eDispCorr
== edispcAllEnerPres
);
2301 bCorrPres
= (ir
->eDispCorr
== edispcEnerPres
||
2302 ir
->eDispCorr
== edispcAllEnerPres
);
2304 invvol
= 1/det(box
);
2307 /* Only correct for the interactions with the inserted molecule */
2308 dens
= (natoms
- fr
->n_tpi
)*invvol
;
2313 dens
= natoms
*invvol
;
2314 ninter
= 0.5*natoms
;
2317 if (ir
->efep
== efepNO
)
2319 avcsix
= fr
->avcsix
[0];
2320 avctwelve
= fr
->avctwelve
[0];
2324 avcsix
= (1 - lambda
)*fr
->avcsix
[0] + lambda
*fr
->avcsix
[1];
2325 avctwelve
= (1 - lambda
)*fr
->avctwelve
[0] + lambda
*fr
->avctwelve
[1];
2328 enerdiff
= ninter
*(dens
*fr
->enerdiffsix
- fr
->enershiftsix
);
2329 *enercorr
+= avcsix
*enerdiff
;
2331 if (ir
->efep
!= efepNO
)
2333 dvdlambda
+= (fr
->avcsix
[1] - fr
->avcsix
[0])*enerdiff
;
2337 enerdiff
= ninter
*(dens
*fr
->enerdifftwelve
- fr
->enershifttwelve
);
2338 *enercorr
+= avctwelve
*enerdiff
;
2339 if (fr
->efep
!= efepNO
)
2341 dvdlambda
+= (fr
->avctwelve
[1] - fr
->avctwelve
[0])*enerdiff
;
2347 svir
= ninter
*dens
*avcsix
*fr
->virdiffsix
/3.0;
2348 if (ir
->eDispCorr
== edispcAllEnerPres
)
2350 svir
+= ninter
*dens
*avctwelve
*fr
->virdifftwelve
/3.0;
2352 /* The factor 2 is because of the Gromacs virial definition */
2353 spres
= -2.0*invvol
*svir
*PRESFAC
;
2355 for (m
= 0; m
< DIM
; m
++)
2357 virial
[m
][m
] += svir
;
2358 pres
[m
][m
] += spres
;
2363 /* Can't currently control when it prints, for now, just print when degugging */
2368 fprintf(debug
, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2374 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2375 *enercorr
, spres
, svir
);
2379 fprintf(debug
, "Long Range LJ corr.: Epot %10g\n", *enercorr
);
2383 if (fr
->bSepDVDL
&& do_per_step(step
, ir
->nstlog
))
2385 gmx_print_sepdvdl(fplog
, "Dispersion correction", *enercorr
, dvdlambda
);
2387 if (fr
->efep
!= efepNO
)
2389 *dvdlcorr
+= dvdlambda
;
2394 void do_pbc_first(FILE *fplog
, matrix box
, t_forcerec
*fr
,
2395 t_graph
*graph
, rvec x
[])
2399 fprintf(fplog
, "Removing pbc first time\n");
2401 calc_shifts(box
, fr
->shift_vec
);
2404 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
2407 p_graph(debug
, "do_pbc_first 1", graph
);
2409 shift_self(graph
, box
, x
);
2410 /* By doing an extra mk_mshift the molecules that are broken
2411 * because they were e.g. imported from another software
2412 * will be made whole again. Such are the healing powers
2415 mk_mshift(fplog
, graph
, fr
->ePBC
, box
, x
);
2418 p_graph(debug
, "do_pbc_first 2", graph
);
2423 fprintf(fplog
, "Done rmpbc\n");
2427 static void low_do_pbc_mtop(FILE *fplog
, int ePBC
, matrix box
,
2428 gmx_mtop_t
*mtop
, rvec x
[],
2433 gmx_molblock_t
*molb
;
2435 if (bFirst
&& fplog
)
2437 fprintf(fplog
, "Removing pbc first time\n");
2442 for (mb
= 0; mb
< mtop
->nmolblock
; mb
++)
2444 molb
= &mtop
->molblock
[mb
];
2445 if (molb
->natoms_mol
== 1 ||
2446 (!bFirst
&& mtop
->moltype
[molb
->type
].cgs
.nr
== 1))
2448 /* Just one atom or charge group in the molecule, no PBC required */
2449 as
+= molb
->nmol
*molb
->natoms_mol
;
2453 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2454 mk_graph_ilist(NULL
, mtop
->moltype
[molb
->type
].ilist
,
2455 0, molb
->natoms_mol
, FALSE
, FALSE
, graph
);
2457 for (mol
= 0; mol
< molb
->nmol
; mol
++)
2459 mk_mshift(fplog
, graph
, ePBC
, box
, x
+as
);
2461 shift_self(graph
, box
, x
+as
);
2462 /* The molecule is whole now.
2463 * We don't need the second mk_mshift call as in do_pbc_first,
2464 * since we no longer need this graph.
2467 as
+= molb
->natoms_mol
;
2475 void do_pbc_first_mtop(FILE *fplog
, int ePBC
, matrix box
,
2476 gmx_mtop_t
*mtop
, rvec x
[])
2478 low_do_pbc_mtop(fplog
, ePBC
, box
, mtop
, x
, TRUE
);
2481 void do_pbc_mtop(FILE *fplog
, int ePBC
, matrix box
,
2482 gmx_mtop_t
*mtop
, rvec x
[])
2484 low_do_pbc_mtop(fplog
, ePBC
, box
, mtop
, x
, FALSE
);
2487 void finish_run(FILE *fplog
, t_commrec
*cr
,
2488 t_inputrec
*inputrec
,
2489 t_nrnb nrnb
[], gmx_wallcycle_t wcycle
,
2490 gmx_walltime_accounting_t walltime_accounting
,
2491 wallclock_gpu_t
*gputimes
,
2492 gmx_bool bWriteStat
)
2495 t_nrnb
*nrnb_tot
= NULL
;
2498 double elapsed_time
,
2499 elapsed_time_over_all_ranks
,
2500 elapsed_time_over_all_threads
,
2501 elapsed_time_over_all_threads_over_all_ranks
;
2502 wallcycle_sum(cr
, wcycle
);
2508 MPI_Allreduce(nrnb
->n
, nrnb_tot
->n
, eNRNB
, MPI_DOUBLE
, MPI_SUM
,
2509 cr
->mpi_comm_mysim
);
2517 elapsed_time
= walltime_accounting_get_elapsed_time(walltime_accounting
);
2518 elapsed_time_over_all_ranks
= elapsed_time
;
2519 elapsed_time_over_all_threads
= walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting
);
2520 elapsed_time_over_all_threads_over_all_ranks
= elapsed_time_over_all_threads
;
2524 /* reduce elapsed_time over all MPI ranks in the current simulation */
2525 MPI_Allreduce(&elapsed_time
,
2526 &elapsed_time_over_all_ranks
,
2527 1, MPI_DOUBLE
, MPI_SUM
,
2528 cr
->mpi_comm_mysim
);
2529 elapsed_time_over_all_ranks
/= cr
->nnodes
;
2530 /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2531 * current simulation. */
2532 MPI_Allreduce(&elapsed_time_over_all_threads
,
2533 &elapsed_time_over_all_threads_over_all_ranks
,
2534 1, MPI_DOUBLE
, MPI_SUM
,
2535 cr
->mpi_comm_mysim
);
2541 print_flop(fplog
, nrnb_tot
, &nbfs
, &mflop
);
2548 if ((cr
->duty
& DUTY_PP
) && DOMAINDECOMP(cr
))
2550 print_dd_statistics(cr
, inputrec
, fplog
);
2562 snew(nrnb_all
, cr
->nnodes
);
2563 nrnb_all
[0] = *nrnb
;
2564 for (s
= 1; s
< cr
->nnodes
; s
++)
2566 MPI_Recv(nrnb_all
[s
].n
, eNRNB
, MPI_DOUBLE
, s
, 0,
2567 cr
->mpi_comm_mysim
, &stat
);
2569 pr_load(fplog
, cr
, nrnb_all
);
2574 MPI_Send(nrnb
->n
, eNRNB
, MPI_DOUBLE
, MASTERRANK(cr
), 0,
2575 cr
->mpi_comm_mysim
);
2582 wallcycle_print(fplog
, cr
->nnodes
, cr
->npmenodes
,
2583 elapsed_time_over_all_ranks
,
2586 if (EI_DYNAMICS(inputrec
->eI
))
2588 delta_t
= inputrec
->delta_t
;
2597 print_perf(fplog
, elapsed_time_over_all_threads_over_all_ranks
,
2598 elapsed_time_over_all_ranks
,
2599 walltime_accounting_get_nsteps_done(walltime_accounting
),
2600 delta_t
, nbfs
, mflop
);
2604 print_perf(stderr
, elapsed_time_over_all_threads_over_all_ranks
,
2605 elapsed_time_over_all_ranks
,
2606 walltime_accounting_get_nsteps_done(walltime_accounting
),
2607 delta_t
, nbfs
, mflop
);
2612 extern void initialize_lambdas(FILE *fplog
, t_inputrec
*ir
, int *fep_state
, real
*lambda
, double *lam0
)
2614 /* this function works, but could probably use a logic rewrite to keep all the different
2615 types of efep straight. */
2618 t_lambda
*fep
= ir
->fepvals
;
2620 if ((ir
->efep
== efepNO
) && (ir
->bSimTemp
== FALSE
))
2622 for (i
= 0; i
< efptNR
; i
++)
2634 *fep_state
= fep
->init_fep_state
; /* this might overwrite the checkpoint
2635 if checkpoint is set -- a kludge is in for now
2637 for (i
= 0; i
< efptNR
; i
++)
2639 /* overwrite lambda state with init_lambda for now for backwards compatibility */
2640 if (fep
->init_lambda
>= 0) /* if it's -1, it was never initializd */
2642 lambda
[i
] = fep
->init_lambda
;
2645 lam0
[i
] = lambda
[i
];
2650 lambda
[i
] = fep
->all_lambda
[i
][*fep_state
];
2653 lam0
[i
] = lambda
[i
];
2659 /* need to rescale control temperatures to match current state */
2660 for (i
= 0; i
< ir
->opts
.ngtc
; i
++)
2662 if (ir
->opts
.ref_t
[i
] > 0)
2664 ir
->opts
.ref_t
[i
] = ir
->simtempvals
->temperatures
[*fep_state
];
2670 /* Send to the log the information on the current lambdas */
2673 fprintf(fplog
, "Initial vector of lambda components:[ ");
2674 for (i
= 0; i
< efptNR
; i
++)
2676 fprintf(fplog
, "%10.4f ", lambda
[i
]);
2678 fprintf(fplog
, "]\n");
2684 void init_md(FILE *fplog
,
2685 t_commrec
*cr
, t_inputrec
*ir
, const output_env_t oenv
,
2686 double *t
, double *t0
,
2687 real
*lambda
, int *fep_state
, double *lam0
,
2688 t_nrnb
*nrnb
, gmx_mtop_t
*mtop
,
2690 int nfile
, const t_filenm fnm
[],
2691 gmx_mdoutf_t
**outf
, t_mdebin
**mdebin
,
2692 tensor force_vir
, tensor shake_vir
, rvec mu_tot
,
2693 gmx_bool
*bSimAnn
, t_vcm
**vcm
, unsigned long Flags
)
2698 /* Initial values */
2699 *t
= *t0
= ir
->init_t
;
2702 for (i
= 0; i
< ir
->opts
.ngtc
; i
++)
2704 /* set bSimAnn if any group is being annealed */
2705 if (ir
->opts
.annealing
[i
] != eannNO
)
2712 update_annealing_target_temp(&(ir
->opts
), ir
->init_t
);
2715 /* Initialize lambda variables */
2716 initialize_lambdas(fplog
, ir
, fep_state
, lambda
, lam0
);
2720 *upd
= init_update(ir
);
2726 *vcm
= init_vcm(fplog
, &mtop
->groups
, ir
);
2729 if (EI_DYNAMICS(ir
->eI
) && !(Flags
& MD_APPENDFILES
))
2731 if (ir
->etc
== etcBERENDSEN
)
2733 please_cite(fplog
, "Berendsen84a");
2735 if (ir
->etc
== etcVRESCALE
)
2737 please_cite(fplog
, "Bussi2007a");
2745 *outf
= init_mdoutf(nfile
, fnm
, Flags
, cr
, ir
, oenv
);
2747 *mdebin
= init_mdebin((Flags
& MD_APPENDFILES
) ? NULL
: (*outf
)->fp_ene
,
2748 mtop
, ir
, (*outf
)->fp_dhdl
);
2753 please_cite(fplog
, "Fritsch12");
2754 please_cite(fplog
, "Junghans10");
2756 /* Initiate variables */
2757 clear_mat(force_vir
);
2758 clear_mat(shake_vir
);