A freeze group can now be allowed to move rigidly in some dimension by using "freezed...
[gromacs/rigid-bodies.git] / src / mdlib / sim_util.c
blobffa5b81e431d55a634a1479278304bb40a3d4385
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
3 *
4 * This source code is part of
5 *
6 * G R O M A C S
7 *
8 * GROningen MAchine for Chemical Simulations
9 *
10 * VERSION 3.2.0
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
33 * And Hey:
34 * GROwing Monsters And Cloning Shrimps
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
40 #ifdef GMX_CRAY_XT3
41 #include<catamount/dclock.h>
42 #endif
45 #include <stdio.h>
46 #include <time.h>
47 #ifdef HAVE_SYS_TIME_H
48 #include <sys/time.h>
49 #endif
50 #include <math.h>
51 #include "typedefs.h"
52 #include "string2.h"
53 #include "gmxfio.h"
54 #include "smalloc.h"
55 #include "names.h"
56 #include "confio.h"
57 #include "mvdata.h"
58 #include "txtdump.h"
59 #include "pbc.h"
60 #include "chargegroup.h"
61 #include "vec.h"
62 #include "time.h"
63 #include "nrnb.h"
64 #include "mshift.h"
65 #include "mdrun.h"
66 #include "update.h"
67 #include "physics.h"
68 #include "main.h"
69 #include "mdatoms.h"
70 #include "force.h"
71 #include "bondf.h"
72 #include "pme.h"
73 #include "pppm.h"
74 #include "disre.h"
75 #include "orires.h"
76 #include "network.h"
77 #include "calcmu.h"
78 #include "constr.h"
79 #include "xvgr.h"
80 #include "trnio.h"
81 #include "xtcio.h"
82 #include "copyrite.h"
84 #include "mpelogging.h"
85 #include "domdec.h"
86 #include "partdec.h"
87 #include "gmx_wallcycle.h"
88 #include "genborn.h"
90 #ifdef GMX_LIB_MPI
91 #include <mpi.h>
92 #endif
93 #ifdef GMX_THREADS
94 #include "tmpi.h"
95 #endif
97 #include "qmmm.h"
99 #if 0
100 typedef struct gmx_timeprint {
102 } t_gmx_timeprint;
103 #endif
105 /* Portable version of ctime_r implemented in src/gmxlib/string2.c, but we do not want it declared in public installed headers */
106 char *
107 gmx_ctime_r(const time_t *clock,char *buf, int n);
110 double
111 gmx_gettime()
113 #ifdef HAVE_GETTIMEOFDAY
114 struct timeval t;
115 double seconds;
117 gettimeofday(&t,NULL);
119 seconds = (double) t.tv_sec + 1e-6*(double)t.tv_usec;
121 return seconds;
122 #else
123 double seconds;
125 seconds = time(NULL);
127 return seconds;
128 #endif
132 #define difftime(end,start) ((double)(end)-(double)(start))
134 void print_time(FILE *out,gmx_runtime_t *runtime,gmx_large_int_t step,
135 t_inputrec *ir, t_commrec *cr)
137 time_t finish;
138 char timebuf[STRLEN];
139 double dt;
140 char buf[48];
142 #ifndef GMX_THREADS
143 if (!PAR(cr))
144 #endif
146 fprintf(out,"\r");
148 fprintf(out,"step %s",gmx_step_str(step,buf));
149 if ((step >= ir->nstlist))
151 if ((ir->nstlist == 0) || ((step % ir->nstlist) == 0))
153 /* We have done a full cycle let's update time_per_step */
154 runtime->last = gmx_gettime();
155 dt = difftime(runtime->last,runtime->real);
156 runtime->time_per_step = dt/(step - ir->init_step + 1);
158 dt = (ir->nsteps + ir->init_step - step)*runtime->time_per_step;
160 if (ir->nsteps >= 0)
162 if (dt >= 300)
164 finish = (time_t) (runtime->last + dt);
165 gmx_ctime_r(&finish,timebuf,STRLEN);
166 sprintf(buf,"%s",timebuf);
167 buf[strlen(buf)-1]='\0';
168 fprintf(out,", will finish %s",buf);
170 else
171 fprintf(out,", remaining runtime: %5d s ",(int)dt);
173 else
175 fprintf(out," performance: %.1f ns/day ",
176 ir->delta_t/1000*24*60*60/runtime->time_per_step);
179 #ifndef GMX_THREADS
180 if (PAR(cr))
182 fprintf(out,"\n");
184 #endif
186 fflush(out);
189 #ifdef NO_CLOCK
190 #define clock() -1
191 #endif
193 static double set_proctime(gmx_runtime_t *runtime)
195 double diff;
196 #ifdef GMX_CRAY_XT3
197 double prev;
199 prev = runtime->proc;
200 runtime->proc = dclock();
202 diff = runtime->proc - prev;
203 #else
204 clock_t prev;
206 prev = runtime->proc;
207 runtime->proc = clock();
209 diff = (double)(runtime->proc - prev)/(double)CLOCKS_PER_SEC;
210 #endif
211 if (diff < 0)
213 /* The counter has probably looped, ignore this data */
214 diff = 0;
217 return diff;
220 void runtime_start(gmx_runtime_t *runtime)
222 runtime->real = gmx_gettime();
223 runtime->proc = 0;
224 set_proctime(runtime);
225 runtime->realtime = 0;
226 runtime->proctime = 0;
227 runtime->last = 0;
228 runtime->time_per_step = 0;
231 void runtime_end(gmx_runtime_t *runtime)
233 double now;
235 now = gmx_gettime();
237 runtime->proctime += set_proctime(runtime);
238 runtime->realtime = now - runtime->real;
239 runtime->real = now;
242 void runtime_upd_proc(gmx_runtime_t *runtime)
244 runtime->proctime += set_proctime(runtime);
247 void print_date_and_time(FILE *fplog,int nodeid,const char *title,
248 const gmx_runtime_t *runtime)
250 int i;
251 char timebuf[STRLEN];
252 char time_string[STRLEN];
253 time_t tmptime;
255 if (fplog)
257 if (runtime != NULL)
259 tmptime = (time_t) runtime->real;
260 gmx_ctime_r(&tmptime,timebuf,STRLEN);
262 else
264 tmptime = (time_t) gmx_gettime();
265 gmx_ctime_r(&tmptime,timebuf,STRLEN);
267 for(i=0; timebuf[i]>=' '; i++)
269 time_string[i]=timebuf[i];
271 time_string[i]='\0';
273 fprintf(fplog,"%s on node %d %s\n",title,nodeid,time_string);
277 static void sum_forces(int start,int end,rvec f[],rvec flr[])
279 int i;
281 if (gmx_debug_at) {
282 pr_rvecs(debug,0,"fsr",f+start,end-start);
283 pr_rvecs(debug,0,"flr",flr+start,end-start);
285 for(i=start; (i<end); i++)
286 rvec_inc(f[i],flr[i]);
290 * calc_f_el calculates forces due to an electric field.
292 * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
294 * Et[] contains the parameters for the time dependent
295 * part of the field (not yet used).
296 * Ex[] contains the parameters for
297 * the spatial dependent part of the field. You can have cool periodic
298 * fields in principle, but only a constant field is supported
299 * now.
300 * The function should return the energy due to the electric field
301 * (if any) but for now returns 0.
303 * WARNING:
304 * There can be problems with the virial.
305 * Since the field is not self-consistent this is unavoidable.
306 * For neutral molecules the virial is correct within this approximation.
307 * For neutral systems with many charged molecules the error is small.
308 * But for systems with a net charge or a few charged molecules
309 * the error can be significant when the field is high.
310 * Solution: implement a self-consitent electric field into PME.
312 static void calc_f_el(FILE *fp,int start,int homenr,
313 real charge[],rvec x[],rvec f[],
314 t_cosines Ex[],t_cosines Et[],double t)
316 rvec Ext;
317 real t0;
318 int i,m;
320 for(m=0; (m<DIM); m++)
322 if (Et[m].n > 0)
324 if (Et[m].n == 3)
326 t0 = Et[m].a[1];
327 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
329 else
331 Ext[m] = cos(Et[m].a[0]*t);
334 else
336 Ext[m] = 1.0;
338 if (Ex[m].n > 0)
340 /* Convert the field strength from V/nm to MD-units */
341 Ext[m] *= Ex[m].a[0]*FIELDFAC;
342 for(i=start; (i<start+homenr); i++)
343 f[i][m] += charge[i]*Ext[m];
345 else
347 Ext[m] = 0;
350 if (fp != NULL)
352 fprintf(fp,"%10g %10g %10g %10g #FIELD\n",t,
353 Ext[XX]/FIELDFAC,Ext[YY]/FIELDFAC,Ext[ZZ]/FIELDFAC);
357 static void calc_virial(FILE *fplog,int start,int homenr,rvec x[],rvec f[],
358 tensor vir_part,t_graph *graph,matrix box,
359 t_nrnb *nrnb,const t_forcerec *fr,int ePBC)
361 int i,j;
362 tensor virtest;
364 /* The short-range virial from surrounding boxes */
365 clear_mat(vir_part);
366 calc_vir(fplog,SHIFTS,fr->shift_vec,fr->fshift,vir_part,ePBC==epbcSCREW,box);
367 inc_nrnb(nrnb,eNR_VIRIAL,SHIFTS);
369 /* Calculate partial virial, for local atoms only, based on short range.
370 * Total virial is computed in global_stat, called from do_md
372 f_calc_vir(fplog,start,start+homenr,x,f,vir_part,graph,box);
373 inc_nrnb(nrnb,eNR_VIRIAL,homenr);
375 /* Add position restraint contribution */
376 for(i=0; i<DIM; i++) {
377 vir_part[i][i] += fr->vir_diag_posres[i];
380 /* Add wall contribution */
381 for(i=0; i<DIM; i++) {
382 vir_part[i][ZZ] += fr->vir_wall_z[i];
385 if (debug)
386 pr_rvecs(debug,0,"vir_part",vir_part,DIM);
389 static void print_large_forces(FILE *fp,t_mdatoms *md,t_commrec *cr,
390 gmx_large_int_t step,real pforce,rvec *x,rvec *f)
392 int i;
393 real pf2,fn2;
394 char buf[STEPSTRSIZE];
396 pf2 = sqr(pforce);
397 for(i=md->start; i<md->start+md->homenr; i++) {
398 fn2 = norm2(f[i]);
399 /* We also catch NAN, if the compiler does not optimize this away. */
400 if (fn2 >= pf2 || fn2 != fn2) {
401 fprintf(fp,"step %s atom %6d x %8.3f %8.3f %8.3f force %12.5e\n",
402 gmx_step_str(step,buf),
403 ddglatnr(cr->dd,i),x[i][XX],x[i][YY],x[i][ZZ],sqrt(fn2));
408 void do_force(FILE *fplog,t_commrec *cr,
409 t_inputrec *inputrec,
410 gmx_large_int_t step,t_nrnb *nrnb,gmx_wallcycle_t wcycle,
411 gmx_localtop_t *top,
412 gmx_mtop_t *mtop,
413 gmx_groups_t *groups,
414 matrix box,rvec x[],history_t *hist,
415 rvec f[],
416 tensor vir_force,
417 t_mdatoms *mdatoms,
418 gmx_enerdata_t *enerd,t_fcdata *fcd,
419 real lambda,t_graph *graph,
420 t_forcerec *fr,gmx_vsite_t *vsite,rvec mu_tot,
421 double t,FILE *field,gmx_edsam_t ed,
422 gmx_bool bBornRadii,
423 int flags)
425 int cg0,cg1,i,j;
426 int start,homenr;
427 double mu[2*DIM];
428 gmx_bool bSepDVDL,bStateChanged,bNS,bFillGrid,bCalcCGCM,bBS;
429 gmx_bool bDoLongRange,bDoForces,bSepLRF;
430 matrix boxs;
431 real e,v,dvdl;
432 t_pbc pbc;
433 float cycles_ppdpme,cycles_pme,cycles_seppme,cycles_force;
435 start = mdatoms->start;
436 homenr = mdatoms->homenr;
438 bSepDVDL = (fr->bSepDVDL && do_per_step(step,inputrec->nstlog));
440 clear_mat(vir_force);
442 if (PARTDECOMP(cr))
444 pd_cg_range(cr,&cg0,&cg1);
446 else
448 cg0 = 0;
449 if (DOMAINDECOMP(cr))
451 cg1 = cr->dd->ncg_tot;
453 else
455 cg1 = top->cgs.nr;
457 if (fr->n_tpi > 0)
459 cg1--;
463 bStateChanged = (flags & GMX_FORCE_STATECHANGED);
464 bNS = (flags & GMX_FORCE_NS) && (fr->bAllvsAll==FALSE);
465 bFillGrid = (bNS && bStateChanged);
466 bCalcCGCM = (bFillGrid && !DOMAINDECOMP(cr));
467 bDoLongRange = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DOLR));
468 bDoForces = (flags & GMX_FORCE_FORCES);
469 bSepLRF = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
471 if (bStateChanged)
473 update_forcerec(fplog,fr,box);
475 /* Calculate total (local) dipole moment in a temporary common array.
476 * This makes it possible to sum them over nodes faster.
478 calc_mu(start,homenr,
479 x,mdatoms->chargeA,mdatoms->chargeB,mdatoms->nChargePerturbed,
480 mu,mu+DIM);
483 if (fr->ePBC != epbcNONE) {
484 /* Compute shift vectors every step,
485 * because of pressure coupling or box deformation!
487 if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
488 calc_shifts(box,fr->shift_vec);
490 if (bCalcCGCM) {
491 put_charge_groups_in_box(fplog,cg0,cg1,fr->ePBC,box,
492 &(top->cgs),x,fr->cg_cm);
493 inc_nrnb(nrnb,eNR_CGCM,homenr);
494 inc_nrnb(nrnb,eNR_RESETX,cg1-cg0);
496 else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph) {
497 unshift_self(graph,box,x);
500 else if (bCalcCGCM) {
501 calc_cgcm(fplog,cg0,cg1,&(top->cgs),x,fr->cg_cm);
502 inc_nrnb(nrnb,eNR_CGCM,homenr);
505 if (bCalcCGCM) {
506 if (PAR(cr)) {
507 move_cgcm(fplog,cr,fr->cg_cm);
509 if (gmx_debug_at)
510 pr_rvecs(debug,0,"cgcm",fr->cg_cm,top->cgs.nr);
513 #ifdef GMX_MPI
514 if (!(cr->duty & DUTY_PME)) {
515 /* Send particle coordinates to the pme nodes.
516 * Since this is only implemented for domain decomposition
517 * and domain decomposition does not use the graph,
518 * we do not need to worry about shifting.
521 wallcycle_start(wcycle,ewcPP_PMESENDX);
522 GMX_MPE_LOG(ev_send_coordinates_start);
524 bBS = (inputrec->nwall == 2);
525 if (bBS) {
526 copy_mat(box,boxs);
527 svmul(inputrec->wall_ewald_zfac,boxs[ZZ],boxs[ZZ]);
530 gmx_pme_send_x(cr,bBS ? boxs : box,x,
531 mdatoms->nChargePerturbed,lambda,
532 ( flags & GMX_FORCE_VIRIAL),step);
534 GMX_MPE_LOG(ev_send_coordinates_finish);
535 wallcycle_stop(wcycle,ewcPP_PMESENDX);
537 #endif /* GMX_MPI */
539 /* Communicate coordinates and sum dipole if necessary */
540 if (PAR(cr))
542 wallcycle_start(wcycle,ewcMOVEX);
543 if (DOMAINDECOMP(cr))
545 dd_move_x(cr->dd,box,x);
547 else
549 move_x(fplog,cr,GMX_LEFT,GMX_RIGHT,x,nrnb);
551 /* When we don't need the total dipole we sum it in global_stat */
552 if (bStateChanged && NEED_MUTOT(*inputrec))
554 gmx_sumd(2*DIM,mu,cr);
556 wallcycle_stop(wcycle,ewcMOVEX);
558 if (bStateChanged)
560 for(i=0; i<2; i++)
562 for(j=0;j<DIM;j++)
564 fr->mu_tot[i][j] = mu[i*DIM + j];
568 if (fr->efep == efepNO)
570 copy_rvec(fr->mu_tot[0],mu_tot);
572 else
574 for(j=0; j<DIM; j++)
576 mu_tot[j] =
577 (1.0 - lambda)*fr->mu_tot[0][j] + lambda*fr->mu_tot[1][j];
581 /* Reset energies */
582 reset_enerdata(&(inputrec->opts),fr,bNS,enerd,MASTER(cr));
583 clear_rvecs(SHIFTS,fr->fshift);
585 if (bNS)
587 wallcycle_start(wcycle,ewcNS);
589 if (graph && bStateChanged)
591 /* Calculate intramolecular shift vectors to make molecules whole */
592 mk_mshift(fplog,graph,fr->ePBC,box,x);
595 /* Reset long range forces if necessary */
596 if (fr->bTwinRange)
598 /* Reset the (long-range) forces if necessary */
599 clear_rvecs(fr->natoms_force_constr,bSepLRF ? fr->f_twin : f);
602 /* Do the actual neighbour searching and if twin range electrostatics
603 * also do the calculation of long range forces and energies.
605 dvdl = 0;
606 ns(fplog,fr,x,box,
607 groups,&(inputrec->opts),top,mdatoms,
608 cr,nrnb,lambda,&dvdl,&enerd->grpp,bFillGrid,
609 bDoLongRange,bDoForces,bSepLRF ? fr->f_twin : f);
610 if (bSepDVDL)
612 fprintf(fplog,sepdvdlformat,"LR non-bonded",0.0,dvdl);
614 enerd->dvdl_lin += dvdl;
616 wallcycle_stop(wcycle,ewcNS);
619 if (inputrec->implicit_solvent && bNS)
621 make_gb_nblist(cr,inputrec->gb_algorithm,inputrec->rlist,
622 x,box,fr,&top->idef,graph,fr->born);
625 if (DOMAINDECOMP(cr))
627 if (!(cr->duty & DUTY_PME))
629 wallcycle_start(wcycle,ewcPPDURINGPME);
630 dd_force_flop_start(cr->dd,nrnb);
634 /* Start the force cycle counter.
635 * This counter is stopped in do_forcelow_level.
636 * No parallel communication should occur while this counter is running,
637 * since that will interfere with the dynamic load balancing.
639 wallcycle_start(wcycle,ewcFORCE);
641 if (bDoForces)
643 /* Reset forces for which the virial is calculated separately:
644 * PME/Ewald forces if necessary */
645 if (fr->bF_NoVirSum)
647 if (flags & GMX_FORCE_VIRIAL)
649 fr->f_novirsum = fr->f_novirsum_alloc;
650 GMX_BARRIER(cr->mpi_comm_mygroup);
651 if (fr->bDomDec)
653 clear_rvecs(fr->f_novirsum_n,fr->f_novirsum);
655 else
657 clear_rvecs(homenr,fr->f_novirsum+start);
659 GMX_BARRIER(cr->mpi_comm_mygroup);
661 else
663 /* We are not calculating the pressure so we do not need
664 * a separate array for forces that do not contribute
665 * to the pressure.
667 fr->f_novirsum = f;
671 if (bSepLRF)
673 /* Add the long range forces to the short range forces */
674 for(i=0; i<fr->natoms_force_constr; i++)
676 copy_rvec(fr->f_twin[i],f[i]);
679 else if (!(fr->bTwinRange && bNS))
681 /* Clear the short-range forces */
682 clear_rvecs(fr->natoms_force_constr,f);
685 clear_rvec(fr->vir_diag_posres);
687 GMX_BARRIER(cr->mpi_comm_mygroup);
689 if (inputrec->ePull == epullCONSTRAINT)
691 clear_pull_forces(inputrec->pull);
694 /* update QMMMrec, if necessary */
695 if(fr->bQMMM)
697 update_QMMMrec(cr,fr,x,mdatoms,box,top);
700 if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
702 /* Position restraints always require full pbc */
703 set_pbc(&pbc,inputrec->ePBC,box);
704 v = posres(top->idef.il[F_POSRES].nr,top->idef.il[F_POSRES].iatoms,
705 top->idef.iparams_posres,
706 (const rvec*)x,fr->f_novirsum,fr->vir_diag_posres,
707 inputrec->ePBC==epbcNONE ? NULL : &pbc,lambda,&dvdl,
708 fr->rc_scaling,fr->ePBC,fr->posres_com,fr->posres_comB);
709 if (bSepDVDL)
711 fprintf(fplog,sepdvdlformat,
712 interaction_function[F_POSRES].longname,v,dvdl);
714 enerd->term[F_POSRES] += v;
715 /* This linear lambda dependence assumption is only correct
716 * when only k depends on lambda,
717 * not when the reference position depends on lambda.
718 * grompp checks for this.
720 enerd->dvdl_lin += dvdl;
721 inc_nrnb(nrnb,eNR_POSRES,top->idef.il[F_POSRES].nr/2);
724 /* Compute the bonded and non-bonded energies and optionally forces */
725 do_force_lowlevel(fplog,step,fr,inputrec,&(top->idef),
726 cr,nrnb,wcycle,mdatoms,&(inputrec->opts),
727 x,hist,f,enerd,fcd,mtop,top,fr->born,
728 &(top->atomtypes),bBornRadii,box,
729 lambda,graph,&(top->excls),fr->mu_tot,
730 flags,&cycles_pme);
732 cycles_force = wallcycle_stop(wcycle,ewcFORCE);
733 GMX_BARRIER(cr->mpi_comm_mygroup);
735 if (ed)
737 do_flood(fplog,cr,x,f,ed,box,step);
740 if (DOMAINDECOMP(cr))
742 dd_force_flop_stop(cr->dd,nrnb);
743 if (wcycle)
745 dd_cycles_add(cr->dd,cycles_force-cycles_pme,ddCyclF);
749 if (bDoForces)
751 if (IR_ELEC_FIELD(*inputrec))
753 /* Compute forces due to electric field */
754 calc_f_el(MASTER(cr) ? field : NULL,
755 start,homenr,mdatoms->chargeA,x,fr->f_novirsum,
756 inputrec->ex,inputrec->et,t);
759 /* Communicate the forces */
760 if (PAR(cr))
762 wallcycle_start(wcycle,ewcMOVEF);
763 if (DOMAINDECOMP(cr))
765 dd_move_f(cr->dd,f,fr->fshift);
766 /* Do we need to communicate the separate force array
767 * for terms that do not contribute to the single sum virial?
768 * Position restraints and electric fields do not introduce
769 * inter-cg forces, only full electrostatics methods do.
770 * When we do not calculate the virial, fr->f_novirsum = f,
771 * so we have already communicated these forces.
773 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
774 (flags & GMX_FORCE_VIRIAL))
776 dd_move_f(cr->dd,fr->f_novirsum,NULL);
778 if (bSepLRF)
780 /* We should not update the shift forces here,
781 * since f_twin is already included in f.
783 dd_move_f(cr->dd,fr->f_twin,NULL);
786 else
788 pd_move_f(cr,f,nrnb);
789 if (bSepLRF)
791 pd_move_f(cr,fr->f_twin,nrnb);
794 wallcycle_stop(wcycle,ewcMOVEF);
797 /* If we have NoVirSum forces, but we do not calculate the virial,
798 * we sum fr->f_novirum=f later.
800 if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
802 wallcycle_start(wcycle,ewcVSITESPREAD);
803 spread_vsite_f(fplog,vsite,x,f,fr->fshift,nrnb,
804 &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
805 wallcycle_stop(wcycle,ewcVSITESPREAD);
807 if (bSepLRF)
809 wallcycle_start(wcycle,ewcVSITESPREAD);
810 spread_vsite_f(fplog,vsite,x,fr->f_twin,NULL,
811 nrnb,
812 &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
813 wallcycle_stop(wcycle,ewcVSITESPREAD);
817 if (flags & GMX_FORCE_VIRIAL)
819 /* Calculation of the virial must be done after vsites! */
820 calc_virial(fplog,mdatoms->start,mdatoms->homenr,x,f,
821 vir_force,graph,box,nrnb,fr,inputrec->ePBC);
825 /* calculate the forces and increment the velocities of all rigid body groups */
826 #ifndef RIGID_DEBUG
827 //#define RIGID_DEBUG
828 #endif
829 t_rigid *rigid = inputrec->rigid;
830 if (rigid && rigid->stepcnt > 0) {
831 /* add each rigid dimension to the summing buffer */
832 int i,d,count = 0,hasRigid;
833 for(i = 0; i < inputrec->opts.ngfrz; i++) {
834 hasRigid = 0;
835 for(d = 0; d < DIM; d++) {
836 if(inputrec->opts.nFreeze[i][d] == 2) {
837 hasRigid = 1;
838 rigid->dbuf[count++] = rigid->force[i][d];
841 if(hasRigid) {
842 rigid->dbuf[count++] = rigid->grpcnt[i];
843 rigid->dbuf[count++] = rigid->mass[i];
844 #ifdef RIGID_DEBUG
845 printf("%d (%d) BEFORE: group %d has force %f %f %f, vel %f %f %f, mass %f (%d)\n",
846 cr->nodeid, step, i, rigid->force[i][0], rigid->force[i][1], rigid->force[i][2],
847 rigid->vel[i][0], rigid->vel[i][1], rigid->vel[i][2], rigid->mass[i], rigid->grpcnt[i]);
848 #endif
851 /* sum them in parallel */
852 if(cr && PAR(cr)) {
853 gmx_sumd(count, rigid->dbuf, cr);
855 /* extract the summed forces and masses, and increment the velocities */
856 if(MASTER(cr)) {
857 int x = 3;
859 count = 0;
860 double grpMass, grpCnt;
861 for(i = 0; i < inputrec->opts.ngfrz; i++) {
862 hasRigid = 0;
863 for(d = 0; d < DIM; d++) {
864 if(inputrec->opts.nFreeze[i][d] == 2) {
865 hasRigid = 1;
866 rigid->force[i][d] = rigid->dbuf[count++];
869 if(hasRigid) {
870 grpCnt = rigid->dbuf[count++];
871 grpMass = rigid->dbuf[count++];
872 for(d = 0; d < DIM; d++) {
873 /* damp the previous velocity to prevent unstable oscillations */
874 rigid->vel[i][d] = 0.8 * rigid->vel[i][d] + rigid->force[i][d] / (grpMass * rigid->stepcnt);
876 #ifdef RIGID_DEBUG
877 printf("%d (%d) AFTER: group %d has force %f %f %f, vel %f %f %f, mass %f (%d)\n",
878 cr->nodeid, step, i, rigid->force[i][0], rigid->force[i][1], rigid->force[i][2],
879 rigid->vel[i][0], rigid->vel[i][1], rigid->vel[i][2], grpMass, (int)(grpCnt+.001));
880 #endif
881 for(d = 0; d < DIM; d++) rigid->force[i][d] = 0;
884 rigid->stepcnt = 0;
887 if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
889 /* Calculate the center of mass forces, this requires communication,
890 * which is why pull_potential is called close to other communication.
891 * The virial contribution is calculated directly,
892 * which is why we call pull_potential after calc_virial.
894 set_pbc(&pbc,inputrec->ePBC,box);
895 dvdl = 0;
896 enerd->term[F_COM_PULL] =
897 pull_potential(inputrec->ePull,inputrec->pull,mdatoms,&pbc,
898 cr,t,lambda,x,f,vir_force,&dvdl);
899 if (bSepDVDL)
901 fprintf(fplog,sepdvdlformat,"Com pull",enerd->term[F_COM_PULL],dvdl);
903 enerd->dvdl_lin += dvdl;
906 if (PAR(cr) && !(cr->duty & DUTY_PME))
908 cycles_ppdpme = wallcycle_stop(wcycle,ewcPPDURINGPME);
909 dd_cycles_add(cr->dd,cycles_ppdpme,ddCyclPPduringPME);
911 /* In case of node-splitting, the PP nodes receive the long-range
912 * forces, virial and energy from the PME nodes here.
914 wallcycle_start(wcycle,ewcPP_PMEWAITRECVF);
915 dvdl = 0;
916 gmx_pme_receive_f(cr,fr->f_novirsum,fr->vir_el_recip,&e,&dvdl,
917 &cycles_seppme);
918 if (bSepDVDL)
920 fprintf(fplog,sepdvdlformat,"PME mesh",e,dvdl);
922 enerd->term[F_COUL_RECIP] += e;
923 enerd->dvdl_lin += dvdl;
924 if (wcycle)
926 dd_cycles_add(cr->dd,cycles_seppme,ddCyclPME);
928 wallcycle_stop(wcycle,ewcPP_PMEWAITRECVF);
931 if (bDoForces && fr->bF_NoVirSum)
933 if (vsite)
935 /* Spread the mesh force on virtual sites to the other particles...
936 * This is parallellized. MPI communication is performed
937 * if the constructing atoms aren't local.
939 wallcycle_start(wcycle,ewcVSITESPREAD);
940 spread_vsite_f(fplog,vsite,x,fr->f_novirsum,NULL,nrnb,
941 &top->idef,fr->ePBC,fr->bMolPBC,graph,box,cr);
942 wallcycle_stop(wcycle,ewcVSITESPREAD);
944 if (flags & GMX_FORCE_VIRIAL)
946 /* Now add the forces, this is local */
947 if (fr->bDomDec)
949 sum_forces(0,fr->f_novirsum_n,f,fr->f_novirsum);
951 else
953 sum_forces(start,start+homenr,f,fr->f_novirsum);
955 if (EEL_FULL(fr->eeltype))
957 /* Add the mesh contribution to the virial */
958 m_add(vir_force,fr->vir_el_recip,vir_force);
960 if (debug)
962 pr_rvecs(debug,0,"vir_force",vir_force,DIM);
967 /* Sum the potential energy terms from group contributions */
968 sum_epot(&(inputrec->opts),enerd);
970 if (fr->print_force >= 0 && bDoForces)
972 print_large_forces(stderr,mdatoms,cr,step,fr->print_force,x,f);
976 void do_constrain_first(FILE *fplog,gmx_constr_t constr,
977 t_inputrec *ir,t_mdatoms *md,
978 t_state *state,rvec *f,
979 t_graph *graph,t_commrec *cr,t_nrnb *nrnb,
980 t_forcerec *fr, gmx_localtop_t *top, tensor shake_vir)
982 int i,m,start,end;
983 gmx_large_int_t step;
984 double mass,tmass,vcm[4];
985 real dt=ir->delta_t;
986 real dvdlambda;
987 rvec *savex;
989 snew(savex,state->natoms);
991 start = md->start;
992 end = md->homenr + start;
994 if (debug)
995 fprintf(debug,"vcm: start=%d, homenr=%d, end=%d\n",
996 start,md->homenr,end);
997 /* Do a first constrain to reset particles... */
998 step = ir->init_step;
999 if (fplog)
1001 char buf[STEPSTRSIZE];
1002 fprintf(fplog,"\nConstraining the starting coordinates (step %s)\n",
1003 gmx_step_str(step,buf));
1005 dvdlambda = 0;
1007 /* constrain the current position */
1008 constrain(NULL,TRUE,FALSE,constr,&(top->idef),
1009 ir,NULL,cr,step,0,md,
1010 state->x,state->x,NULL,
1011 state->box,state->lambda,&dvdlambda,
1012 NULL,NULL,nrnb,econqCoord,ir->epc==epcMTTK,state->veta,state->veta);
1013 if (EI_VV(ir->eI))
1015 /* constrain the inital velocity, and save it */
1016 /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1017 /* might not yet treat veta correctly */
1018 constrain(NULL,TRUE,FALSE,constr,&(top->idef),
1019 ir,NULL,cr,step,0,md,
1020 state->x,state->v,state->v,
1021 state->box,state->lambda,&dvdlambda,
1022 NULL,NULL,nrnb,econqVeloc,ir->epc==epcMTTK,state->veta,state->veta);
1024 /* constrain the inital velocities at t-dt/2 */
1025 if (EI_STATE_VELOCITY(ir->eI) && ir->eI!=eiVV)
1027 for(i=start; (i<end); i++)
1029 for(m=0; (m<DIM); m++)
1031 /* Reverse the velocity */
1032 state->v[i][m] = -state->v[i][m];
1033 /* Store the position at t-dt in buf */
1034 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1037 /* Shake the positions at t=-dt with the positions at t=0
1038 * as reference coordinates.
1040 if (fplog)
1042 char buf[STEPSTRSIZE];
1043 fprintf(fplog,"\nConstraining the coordinates at t0-dt (step %s)\n",
1044 gmx_step_str(step,buf));
1046 dvdlambda = 0;
1047 constrain(NULL,TRUE,FALSE,constr,&(top->idef),
1048 ir,NULL,cr,step,-1,md,
1049 state->x,savex,NULL,
1050 state->box,state->lambda,&dvdlambda,
1051 state->v,NULL,nrnb,econqCoord,ir->epc==epcMTTK,state->veta,state->veta);
1053 for(i=start; i<end; i++) {
1054 for(m=0; m<DIM; m++) {
1055 /* Re-reverse the velocities */
1056 state->v[i][m] = -state->v[i][m];
1061 for(m=0; (m<4); m++)
1062 vcm[m] = 0;
1063 for(i=start; i<end; i++) {
1064 mass = md->massT[i];
1065 for(m=0; m<DIM; m++) {
1066 vcm[m] += state->v[i][m]*mass;
1068 vcm[3] += mass;
1071 if (ir->nstcomm != 0 || debug) {
1072 /* Compute the global sum of vcm */
1073 if (debug)
1074 fprintf(debug,"vcm: %8.3f %8.3f %8.3f,"
1075 " total mass = %12.5e\n",vcm[XX],vcm[YY],vcm[ZZ],vcm[3]);
1076 if (PAR(cr))
1077 gmx_sumd(4,vcm,cr);
1078 tmass = vcm[3];
1079 for(m=0; (m<DIM); m++)
1080 vcm[m] /= tmass;
1081 if (debug)
1082 fprintf(debug,"vcm: %8.3f %8.3f %8.3f,"
1083 " total mass = %12.5e\n",vcm[XX],vcm[YY],vcm[ZZ],tmass);
1084 if (ir->nstcomm != 0) {
1085 /* Now we have the velocity of center of mass, let's remove it */
1086 for(i=start; (i<end); i++) {
1087 for(m=0; (m<DIM); m++)
1088 state->v[i][m] -= vcm[m];
1093 sfree(savex);
1096 void calc_enervirdiff(FILE *fplog,int eDispCorr,t_forcerec *fr)
1098 double eners[2],virs[2],enersum,virsum,y0,f,g,h;
1099 double r0,r1,r,rc3,rc9,ea,eb,ec,pa,pb,pc,pd;
1100 double invscale,invscale2,invscale3;
1101 int ri0,ri1,ri,i,offstart,offset;
1102 real scale,*vdwtab;
1104 fr->enershiftsix = 0;
1105 fr->enershifttwelve = 0;
1106 fr->enerdiffsix = 0;
1107 fr->enerdifftwelve = 0;
1108 fr->virdiffsix = 0;
1109 fr->virdifftwelve = 0;
1111 if (eDispCorr != edispcNO) {
1112 for(i=0; i<2; i++) {
1113 eners[i] = 0;
1114 virs[i] = 0;
1116 if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT)) {
1117 if (fr->rvdw_switch == 0)
1118 gmx_fatal(FARGS,
1119 "With dispersion correction rvdw-switch can not be zero "
1120 "for vdw-type = %s",evdw_names[fr->vdwtype]);
1122 scale = fr->nblists[0].tab.scale;
1123 vdwtab = fr->nblists[0].vdwtab;
1125 /* Round the cut-offs to exact table values for precision */
1126 ri0 = floor(fr->rvdw_switch*scale);
1127 ri1 = ceil(fr->rvdw*scale);
1128 r0 = ri0/scale;
1129 r1 = ri1/scale;
1130 rc3 = r0*r0*r0;
1131 rc9 = rc3*rc3*rc3;
1133 if (fr->vdwtype == evdwSHIFT) {
1134 /* Determine the constant energy shift below rvdw_switch */
1135 fr->enershiftsix = (real)(-1.0/(rc3*rc3)) - vdwtab[8*ri0];
1136 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - vdwtab[8*ri0 + 4];
1138 /* Add the constant part from 0 to rvdw_switch.
1139 * This integration from 0 to rvdw_switch overcounts the number
1140 * of interactions by 1, as it also counts the self interaction.
1141 * We will correct for this later.
1143 eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
1144 eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
1146 invscale = 1.0/(scale);
1147 invscale2 = invscale*invscale;
1148 invscale3 = invscale*invscale2;
1150 /* following summation derived from cubic spline definition,
1151 Numerical Recipies in C, second edition, p. 113-116. Exact
1152 for the cubic spline. We first calculate the negative of
1153 the energy from rvdw to rvdw_switch, assuming that g(r)=1,
1154 and then add the more standard, abrupt cutoff correction to
1155 that result, yielding the long-range correction for a
1156 switched function. We perform both the pressure and energy
1157 loops at the same time for simplicity, as the computational
1158 cost is low. */
1160 for (i=0;i<2;i++) {
1161 enersum = 0.0; virsum = 0.0;
1162 if (i==0)
1163 offstart = 0;
1164 else
1165 offstart = 4;
1166 for (ri=ri0; ri<ri1; ri++) {
1167 r = ri*invscale;
1168 ea = invscale3;
1169 eb = 2.0*invscale2*r;
1170 ec = invscale*r*r;
1172 pa = invscale3;
1173 pb = 3.0*invscale2*r;
1174 pc = 3.0*invscale*r*r;
1175 pd = r*r*r;
1177 /* this "8" is from the packing in the vdwtab array - perhaps
1178 should be #define'ed? */
1179 offset = 8*ri + offstart;
1180 y0 = vdwtab[offset];
1181 f = vdwtab[offset+1];
1182 g = vdwtab[offset+2];
1183 h = vdwtab[offset+3];
1185 enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2)+
1186 g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
1187 virsum += f*(pa/4 + pb/3 + pc/2 + pd) +
1188 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
1191 enersum *= 4.0*M_PI;
1192 virsum *= 4.0*M_PI;
1193 eners[i] -= enersum;
1194 virs[i] -= virsum;
1197 /* now add the correction for rvdw_switch to infinity */
1198 eners[0] += -4.0*M_PI/(3.0*rc3);
1199 eners[1] += 4.0*M_PI/(9.0*rc9);
1200 virs[0] += 8.0*M_PI/rc3;
1201 virs[1] += -16.0*M_PI/(3.0*rc9);
1203 else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER)) {
1204 if (fr->vdwtype == evdwUSER && fplog)
1205 fprintf(fplog,
1206 "WARNING: using dispersion correction with user tables\n");
1207 rc3 = fr->rvdw*fr->rvdw*fr->rvdw;
1208 rc9 = rc3*rc3*rc3;
1209 eners[0] += -4.0*M_PI/(3.0*rc3);
1210 eners[1] += 4.0*M_PI/(9.0*rc9);
1211 virs[0] += 8.0*M_PI/rc3;
1212 virs[1] += -16.0*M_PI/(3.0*rc9);
1213 } else {
1214 gmx_fatal(FARGS,
1215 "Dispersion correction is not implemented for vdw-type = %s",
1216 evdw_names[fr->vdwtype]);
1218 fr->enerdiffsix = eners[0];
1219 fr->enerdifftwelve = eners[1];
1220 /* The 0.5 is due to the Gromacs definition of the virial */
1221 fr->virdiffsix = 0.5*virs[0];
1222 fr->virdifftwelve = 0.5*virs[1];
1226 void calc_dispcorr(FILE *fplog,t_inputrec *ir,t_forcerec *fr,
1227 gmx_large_int_t step,int natoms,
1228 matrix box,real lambda,tensor pres,tensor virial,
1229 real *prescorr, real *enercorr, real *dvdlcorr)
1231 gmx_bool bCorrAll,bCorrPres;
1232 real dvdlambda,invvol,dens,ninter,avcsix,avctwelve,enerdiff,svir=0,spres=0;
1233 int m;
1235 *prescorr = 0;
1236 *enercorr = 0;
1237 *dvdlcorr = 0;
1239 clear_mat(virial);
1240 clear_mat(pres);
1242 if (ir->eDispCorr != edispcNO) {
1243 bCorrAll = (ir->eDispCorr == edispcAllEner ||
1244 ir->eDispCorr == edispcAllEnerPres);
1245 bCorrPres = (ir->eDispCorr == edispcEnerPres ||
1246 ir->eDispCorr == edispcAllEnerPres);
1248 invvol = 1/det(box);
1249 if (fr->n_tpi)
1251 /* Only correct for the interactions with the inserted molecule */
1252 dens = (natoms - fr->n_tpi)*invvol;
1253 ninter = fr->n_tpi;
1255 else
1257 dens = natoms*invvol;
1258 ninter = 0.5*natoms;
1261 if (ir->efep == efepNO)
1263 avcsix = fr->avcsix[0];
1264 avctwelve = fr->avctwelve[0];
1266 else
1268 avcsix = (1 - lambda)*fr->avcsix[0] + lambda*fr->avcsix[1];
1269 avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
1272 enerdiff = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
1273 *enercorr += avcsix*enerdiff;
1274 dvdlambda = 0.0;
1275 if (ir->efep != efepNO)
1277 dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
1279 if (bCorrAll)
1281 enerdiff = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
1282 *enercorr += avctwelve*enerdiff;
1283 if (fr->efep != efepNO)
1285 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
1289 if (bCorrPres)
1291 svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
1292 if (ir->eDispCorr == edispcAllEnerPres)
1294 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
1296 /* The factor 2 is because of the Gromacs virial definition */
1297 spres = -2.0*invvol*svir*PRESFAC;
1299 for(m=0; m<DIM; m++) {
1300 virial[m][m] += svir;
1301 pres[m][m] += spres;
1303 *prescorr += spres;
1306 /* Can't currently control when it prints, for now, just print when degugging */
1307 if (debug)
1309 if (bCorrAll) {
1310 fprintf(debug,"Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1311 avcsix,avctwelve);
1313 if (bCorrPres)
1315 fprintf(debug,
1316 "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
1317 *enercorr,spres,svir);
1319 else
1321 fprintf(debug,"Long Range LJ corr.: Epot %10g\n",*enercorr);
1325 if (fr->bSepDVDL && do_per_step(step,ir->nstlog))
1327 fprintf(fplog,sepdvdlformat,"Dispersion correction",
1328 *enercorr,dvdlambda);
1330 if (fr->efep != efepNO)
1332 *dvdlcorr += dvdlambda;
1337 void do_pbc_first(FILE *fplog,matrix box,t_forcerec *fr,
1338 t_graph *graph,rvec x[])
1340 if (fplog)
1341 fprintf(fplog,"Removing pbc first time\n");
1342 calc_shifts(box,fr->shift_vec);
1343 if (graph) {
1344 mk_mshift(fplog,graph,fr->ePBC,box,x);
1345 if (gmx_debug_at)
1346 p_graph(debug,"do_pbc_first 1",graph);
1347 shift_self(graph,box,x);
1348 /* By doing an extra mk_mshift the molecules that are broken
1349 * because they were e.g. imported from another software
1350 * will be made whole again. Such are the healing powers
1351 * of GROMACS.
1353 mk_mshift(fplog,graph,fr->ePBC,box,x);
1354 if (gmx_debug_at)
1355 p_graph(debug,"do_pbc_first 2",graph);
1357 if (fplog)
1358 fprintf(fplog,"Done rmpbc\n");
1361 static void low_do_pbc_mtop(FILE *fplog,int ePBC,matrix box,
1362 gmx_mtop_t *mtop,rvec x[],
1363 gmx_bool bFirst)
1365 t_graph *graph;
1366 int mb,as,mol;
1367 gmx_molblock_t *molb;
1369 if (bFirst && fplog)
1370 fprintf(fplog,"Removing pbc first time\n");
1372 snew(graph,1);
1373 as = 0;
1374 for(mb=0; mb<mtop->nmolblock; mb++) {
1375 molb = &mtop->molblock[mb];
1376 if (molb->natoms_mol == 1 ||
1377 (!bFirst && mtop->moltype[molb->type].cgs.nr == 1)) {
1378 /* Just one atom or charge group in the molecule, no PBC required */
1379 as += molb->nmol*molb->natoms_mol;
1380 } else {
1381 /* Pass NULL iso fplog to avoid graph prints for each molecule type */
1382 mk_graph_ilist(NULL,mtop->moltype[molb->type].ilist,
1383 0,molb->natoms_mol,FALSE,FALSE,graph);
1385 for(mol=0; mol<molb->nmol; mol++) {
1386 mk_mshift(fplog,graph,ePBC,box,x+as);
1388 shift_self(graph,box,x+as);
1389 /* The molecule is whole now.
1390 * We don't need the second mk_mshift call as in do_pbc_first,
1391 * since we no longer need this graph.
1394 as += molb->natoms_mol;
1396 done_graph(graph);
1399 sfree(graph);
1402 void do_pbc_first_mtop(FILE *fplog,int ePBC,matrix box,
1403 gmx_mtop_t *mtop,rvec x[])
1405 low_do_pbc_mtop(fplog,ePBC,box,mtop,x,TRUE);
1408 void do_pbc_mtop(FILE *fplog,int ePBC,matrix box,
1409 gmx_mtop_t *mtop,rvec x[])
1411 low_do_pbc_mtop(fplog,ePBC,box,mtop,x,FALSE);
1414 void finish_run(FILE *fplog,t_commrec *cr,const char *confout,
1415 t_inputrec *inputrec,
1416 t_nrnb nrnb[],gmx_wallcycle_t wcycle,
1417 gmx_runtime_t *runtime,
1418 gmx_bool bWriteStat)
1420 int i,j;
1421 t_nrnb *nrnb_tot=NULL;
1422 real delta_t;
1423 double nbfs,mflop;
1424 double cycles[ewcNR];
1426 wallcycle_sum(cr,wcycle,cycles);
1428 if (cr->nnodes > 1) {
1429 if (SIMMASTER(cr))
1430 snew(nrnb_tot,1);
1431 #ifdef GMX_MPI
1432 MPI_Reduce(nrnb->n,nrnb_tot->n,eNRNB,MPI_DOUBLE,MPI_SUM,
1433 MASTERRANK(cr),cr->mpi_comm_mysim);
1434 #endif
1435 } else {
1436 nrnb_tot = nrnb;
1439 if (SIMMASTER(cr)) {
1440 print_flop(fplog,nrnb_tot,&nbfs,&mflop);
1441 if (cr->nnodes > 1) {
1442 sfree(nrnb_tot);
1446 if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr)) {
1447 print_dd_statistics(cr,inputrec,fplog);
1450 #ifdef GMX_MPI
1451 if (PARTDECOMP(cr))
1453 if (MASTER(cr))
1455 t_nrnb *nrnb_all;
1456 int s;
1457 MPI_Status stat;
1459 snew(nrnb_all,cr->nnodes);
1460 nrnb_all[0] = *nrnb;
1461 for(s=1; s<cr->nnodes; s++)
1463 MPI_Recv(nrnb_all[s].n,eNRNB,MPI_DOUBLE,s,0,
1464 cr->mpi_comm_mysim,&stat);
1466 pr_load(fplog,cr,nrnb_all);
1467 sfree(nrnb_all);
1469 else
1471 MPI_Send(nrnb->n,eNRNB,MPI_DOUBLE,MASTERRANK(cr),0,
1472 cr->mpi_comm_mysim);
1475 #endif
1477 if (SIMMASTER(cr)) {
1478 wallcycle_print(fplog,cr->nnodes,cr->npmenodes,runtime->realtime,
1479 wcycle,cycles);
1481 if (EI_DYNAMICS(inputrec->eI)) {
1482 delta_t = inputrec->delta_t;
1483 } else {
1484 delta_t = 0;
1487 if (fplog) {
1488 print_perf(fplog,runtime->proctime,runtime->realtime,
1489 cr->nnodes-cr->npmenodes,
1490 runtime->nsteps_done,delta_t,nbfs,mflop);
1492 if (bWriteStat) {
1493 print_perf(stderr,runtime->proctime,runtime->realtime,
1494 cr->nnodes-cr->npmenodes,
1495 runtime->nsteps_done,delta_t,nbfs,mflop);
1499 runtime=inputrec->nsteps*inputrec->delta_t;
1500 if (bWriteStat) {
1501 if (cr->nnodes == 1)
1502 fprintf(stderr,"\n\n");
1503 print_perf(stderr,nodetime,realtime,runtime,&ntot,
1504 cr->nnodes-cr->npmenodes,FALSE);
1506 wallcycle_print(fplog,cr->nnodes,cr->npmenodes,realtime,wcycle,cycles);
1507 print_perf(fplog,nodetime,realtime,runtime,&ntot,cr->nnodes-cr->npmenodes,
1508 TRUE);
1509 if (PARTDECOMP(cr))
1510 pr_load(fplog,cr,nrnb_all);
1511 if (cr->nnodes > 1)
1512 sfree(nrnb_all);
1517 void init_md(FILE *fplog,
1518 t_commrec *cr,t_inputrec *ir,const output_env_t oenv,
1519 double *t,double *t0,
1520 real *lambda,double *lam0,
1521 t_nrnb *nrnb,gmx_mtop_t *mtop,
1522 gmx_update_t *upd,
1523 int nfile,const t_filenm fnm[],
1524 gmx_mdoutf_t **outf,t_mdebin **mdebin,
1525 tensor force_vir,tensor shake_vir,rvec mu_tot,
1526 gmx_bool *bSimAnn,t_vcm **vcm, t_state *state, unsigned long Flags)
1528 int i,j,n;
1529 real tmpt,mod;
1531 /* Initial values */
1532 *t = *t0 = ir->init_t;
1533 if (ir->efep != efepNO)
1535 *lam0 = ir->init_lambda;
1536 *lambda = *lam0 + ir->init_step*ir->delta_lambda;
1538 else
1540 *lambda = *lam0 = 0.0;
1543 *bSimAnn=FALSE;
1544 for(i=0;i<ir->opts.ngtc;i++)
1546 /* set bSimAnn if any group is being annealed */
1547 if(ir->opts.annealing[i]!=eannNO)
1549 *bSimAnn = TRUE;
1552 if (*bSimAnn)
1554 update_annealing_target_temp(&(ir->opts),ir->init_t);
1557 if (upd)
1559 *upd = init_update(fplog,ir);
1562 if (vcm != NULL)
1564 *vcm = init_vcm(fplog,&mtop->groups,ir);
1567 if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
1569 if (ir->etc == etcBERENDSEN)
1571 please_cite(fplog,"Berendsen84a");
1573 if (ir->etc == etcVRESCALE)
1575 please_cite(fplog,"Bussi2007a");
1579 init_nrnb(nrnb);
1581 if (nfile != -1)
1583 *outf = init_mdoutf(nfile,fnm,Flags,cr,ir,oenv);
1585 *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
1586 mtop,ir, (*outf)->fp_dhdl);
1589 /* Initiate variables */
1590 clear_mat(force_vir);
1591 clear_mat(shake_vir);
1592 clear_rvec(mu_tot);
1594 debug_gmx();