3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
9 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
10 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
11 * Copyright (c) 2001-2008, The GROMACS development team,
12 * check out http://www.gromacs.org for more information.
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
19 * If you want to redistribute modifications, please consider that
20 * scientific software is very special. Version control is crucial -
21 * bugs must be traceable. We will be happy to consider code for
22 * inclusion in the official distribution, but derived work must not
23 * be called official GROMACS. Details are found in the README & COPYING
24 * files - if they are missing, get the official version at www.gromacs.org.
26 * To help us fund GROMACS development, we humbly ask that you cite
27 * the papers on the package - you can find them in the top README file.
29 * For more info, check our website at http://www.gromacs.org
32 * Gallium Rubidium Oxygen Manganese Argon Carbon Silicon
41 #include "gmx_fatal.h"
56 #include "mtop_util.h"
57 #include "chargegroup.h"
62 atom_id shell
; /* The shell id */
63 atom_id nucl1
,nucl2
,nucl3
; /* The nuclei connected to the shell */
64 /* gmx_bool bInterCG; */ /* Coupled to nuclei outside cg? */
65 real k
; /* force constant */
66 real k_1
; /* 1 over force constant */
72 typedef struct gmx_shellfc
{
73 int nshell_gl
; /* The number of shells in the system */
74 t_shell
*shell_gl
; /* All the shells (for DD only) */
75 int *shell_index_gl
; /* Global shell index (for DD only) */
76 gmx_bool bInterCG
; /* Are there inter charge-group shells? */
77 int nshell
; /* The number of local shells */
78 t_shell
*shell
; /* The local shells */
79 int shell_nalloc
; /* The allocation size of shell */
80 gmx_bool bPredict
; /* Predict shell positions */
81 gmx_bool bForceInit
; /* Force initialization of shell positions */
82 int nflexcon
; /* The number of flexible constraints */
83 rvec
*x
[2]; /* Array for iterative minimization */
84 rvec
*f
[2]; /* Array for iterative minimization */
85 int x_nalloc
; /* The allocation size of x and f */
86 rvec
*acc_dir
; /* Acceleration direction for flexcon */
87 rvec
*x_old
; /* Old coordinates for flexcon */
88 int flex_nalloc
; /* The allocation size of acc_dir and x_old */
89 rvec
*adir_xnold
; /* Work space for init_adir */
90 rvec
*adir_xnew
; /* Work space for init_adir */
91 int adir_nalloc
; /* Work space for init_adir */
95 static void pr_shell(FILE *fplog
,int ns
,t_shell s
[])
99 fprintf(fplog
,"SHELL DATA\n");
100 fprintf(fplog
,"%5s %8s %5s %5s %5s\n",
101 "Shell","Force k","Nucl1","Nucl2","Nucl3");
102 for(i
=0; (i
<ns
); i
++) {
103 fprintf(fplog
,"%5d %8.3f %5d",s
[i
].shell
,1.0/s
[i
].k_1
,s
[i
].nucl1
);
105 fprintf(fplog
," %5d\n",s
[i
].nucl2
);
106 else if (s
[i
].nnucl
== 3)
107 fprintf(fplog
," %5d %5d\n",s
[i
].nucl2
,s
[i
].nucl3
);
113 static void predict_shells(FILE *fplog
,rvec x
[],rvec v
[],real dt
,
115 real mass
[],gmx_mtop_t
*mtop
,gmx_bool bInit
)
118 real dt_1
,dt_2
,dt_3
,fudge
,tm
,m1
,m2
,m3
;
122 /* We introduce a fudge factor for performance reasons: with this choice
123 * the initial force on the shells is about a factor of two lower than
130 fprintf(fplog
,"RELAX: Using prediction for initial shell placement\n");
139 for(i
=0; (i
<ns
); i
++) {
143 switch (s
[i
].nnucl
) {
146 for(m
=0; (m
<DIM
); m
++)
147 x
[s1
][m
]+=ptr
[n1
][m
]*dt_1
;
156 /* Not the correct masses with FE, but it is just a prediction... */
161 for(m
=0; (m
<DIM
); m
++)
162 x
[s1
][m
]+=(m1
*ptr
[n1
][m
]+m2
*ptr
[n2
][m
])*tm
;
173 /* Not the correct masses with FE, but it is just a prediction... */
174 gmx_mtop_atomnr_to_atom(mtop
,n1
,&atom
);
176 gmx_mtop_atomnr_to_atom(mtop
,n2
,&atom
);
178 gmx_mtop_atomnr_to_atom(mtop
,n3
,&atom
);
181 tm
= dt_1
/(m1
+m2
+m3
);
182 for(m
=0; (m
<DIM
); m
++)
183 x
[s1
][m
]+=(m1
*ptr
[n1
][m
]+m2
*ptr
[n2
][m
]+m3
*ptr
[n3
][m
])*tm
;
186 gmx_fatal(FARGS
,"Shell %d has %d nuclei!",i
,s
[i
].nnucl
);
191 gmx_shellfc_t
init_shell_flexcon(FILE *fplog
,
192 gmx_mtop_t
*mtop
,int nflexcon
,
195 struct gmx_shellfc
*shfc
;
197 int *shell_index
=NULL
,*at2cg
;
199 int n
[eptNR
],ns
,nshell
,nsi
;
200 int i
,j
,nmol
,type
,mb
,mt
,a_offset
,cg
,mol
,ftype
,nra
;
202 int aS
,aN
=0; /* Shell and nucleus */
203 int bondtypes
[] = { F_BONDS
, F_HARMONIC
, F_CUBICBONDS
, F_POLARIZATION
, F_WATER_POL
};
204 #define NBT asize(bondtypes)
206 gmx_mtop_atomloop_block_t aloopb
;
207 gmx_mtop_atomloop_all_t aloop
;
208 gmx_ffparams_t
*ffparams
;
209 gmx_molblock_t
*molb
;
213 /* Count number of shells, and find their indices */
214 for(i
=0; (i
<eptNR
); i
++) {
218 aloopb
= gmx_mtop_atomloop_block_init(mtop
);
219 while (gmx_mtop_atomloop_block_next(aloopb
,&atom
,&nmol
)) {
220 n
[atom
->ptype
] += nmol
;
224 /* Print the number of each particle type */
225 for(i
=0; (i
<eptNR
); i
++) {
227 fprintf(fplog
,"There are: %d %ss\n",n
[i
],ptype_str
[i
]);
232 nshell
= n
[eptShell
];
234 if (nshell
== 0 && nflexcon
== 0) {
239 shfc
->nflexcon
= nflexcon
;
245 /* We have shells: fill the shell data structure */
247 /* Global system sized array, this should be avoided */
248 snew(shell_index
,mtop
->natoms
);
250 aloop
= gmx_mtop_atomloop_all_init(mtop
);
252 while (gmx_mtop_atomloop_all_next(aloop
,&i
,&atom
)) {
253 if (atom
->ptype
== eptShell
) {
254 shell_index
[i
] = nshell
++;
260 /* Initiate the shell structures */
261 for(i
=0; (i
<nshell
); i
++) {
262 shell
[i
].shell
= NO_ATID
;
264 shell
[i
].nucl1
= NO_ATID
;
265 shell
[i
].nucl2
= NO_ATID
;
266 shell
[i
].nucl3
= NO_ATID
;
267 /* shell[i].bInterCG=FALSE; */
272 ffparams
= &mtop
->ffparams
;
274 /* Now fill the structures */
275 shfc
->bInterCG
= FALSE
;
278 for(mb
=0; mb
<mtop
->nmolblock
; mb
++) {
279 molb
= &mtop
->molblock
[mb
];
280 molt
= &mtop
->moltype
[molb
->type
];
283 snew(at2cg
,molt
->atoms
.nr
);
284 for(cg
=0; cg
<cgs
->nr
; cg
++) {
285 for(i
=cgs
->index
[cg
]; i
<cgs
->index
[cg
+1]; i
++) {
290 atom
= molt
->atoms
.atom
;
291 for(mol
=0; mol
<molb
->nmol
; mol
++) {
292 for(j
=0; (j
<NBT
); j
++) {
293 ia
= molt
->ilist
[bondtypes
[j
]].iatoms
;
294 for(i
=0; (i
<molt
->ilist
[bondtypes
[j
]].nr
); ) {
296 ftype
= ffparams
->functype
[type
];
297 nra
= interaction_function
[ftype
].nratoms
;
299 /* Check whether we have a bond with a shell */
302 switch (bondtypes
[j
]) {
307 if (atom
[ia
[1]].ptype
== eptShell
) {
311 else if (atom
[ia
[2]].ptype
== eptShell
) {
317 aN
= ia
[4]; /* Dummy */
318 aS
= ia
[5]; /* Shell */
321 gmx_fatal(FARGS
,"Death Horror: %s, %d",__FILE__
,__LINE__
);
327 /* Check whether one of the particles is a shell... */
328 nsi
= shell_index
[a_offset
+aS
];
329 if ((nsi
< 0) || (nsi
>= nshell
))
330 gmx_fatal(FARGS
,"nsi is %d should be within 0 - %d. aS = %d",
332 if (shell
[nsi
].shell
== NO_ATID
) {
333 shell
[nsi
].shell
= a_offset
+ aS
;
336 else if (shell
[nsi
].shell
!= a_offset
+aS
)
337 gmx_fatal(FARGS
,"Weird stuff in %s, %d",__FILE__
,__LINE__
);
339 if (shell
[nsi
].nucl1
== NO_ATID
) {
340 shell
[nsi
].nucl1
= a_offset
+ aN
;
341 } else if (shell
[nsi
].nucl2
== NO_ATID
) {
342 shell
[nsi
].nucl2
= a_offset
+ aN
;
343 } else if (shell
[nsi
].nucl3
== NO_ATID
) {
344 shell
[nsi
].nucl3
= a_offset
+ aN
;
347 pr_shell(fplog
,ns
,shell
);
348 gmx_fatal(FARGS
,"Can not handle more than three bonds per shell\n");
350 if (at2cg
[aS
] != at2cg
[aN
]) {
351 /* shell[nsi].bInterCG = TRUE; */
352 shfc
->bInterCG
= TRUE
;
355 switch (bondtypes
[j
]) {
358 shell
[nsi
].k
+= ffparams
->iparams
[type
].harmonic
.krA
;
361 shell
[nsi
].k
+= ffparams
->iparams
[type
].cubic
.kb
;
364 if (!gmx_within_tol(qS
, atom
[aS
].qB
, GMX_REAL_EPS
*10))
365 gmx_fatal(FARGS
,"polarize can not be used with qA(%e) != qB(%e) for atom %d of molecule block %d", qS
, atom
[aS
].qB
, aS
+1, mb
+1);
366 shell
[nsi
].k
+= sqr(qS
)*ONE_4PI_EPS0
/
367 ffparams
->iparams
[type
].polarize
.alpha
;
370 if (!gmx_within_tol(qS
, atom
[aS
].qB
, GMX_REAL_EPS
*10))
371 gmx_fatal(FARGS
,"water_pol can not be used with qA(%e) != qB(%e) for atom %d of molecule block %d", qS
, atom
[aS
].qB
, aS
+1, mb
+1);
372 alpha
= (ffparams
->iparams
[type
].wpol
.al_x
+
373 ffparams
->iparams
[type
].wpol
.al_y
+
374 ffparams
->iparams
[type
].wpol
.al_z
)/3.0;
375 shell
[nsi
].k
+= sqr(qS
)*ONE_4PI_EPS0
/alpha
;
378 gmx_fatal(FARGS
,"Death Horror: %s, %d",__FILE__
,__LINE__
);
386 a_offset
+= molt
->atoms
.nr
;
388 /* Done with this molecule type */
392 /* Verify whether it's all correct */
394 gmx_fatal(FARGS
,"Something weird with shells. They may not be bonded to something");
396 for(i
=0; (i
<ns
); i
++)
397 shell
[i
].k_1
= 1.0/shell
[i
].k
;
400 pr_shell(debug
,ns
,shell
);
403 shfc
->nshell_gl
= ns
;
404 shfc
->shell_gl
= shell
;
405 shfc
->shell_index_gl
= shell_index
;
407 shfc
->bPredict
= (getenv("GMX_NOPREDICT") == NULL
);
408 shfc
->bForceInit
= FALSE
;
409 if (!shfc
->bPredict
) {
411 fprintf(fplog
,"\nWill never predict shell positions\n");
413 shfc
->bForceInit
= (getenv("GMX_FORCEINIT") != NULL
);
414 if (shfc
->bForceInit
&& fplog
)
415 fprintf(fplog
,"\nWill always initiate shell positions\n");
418 if (shfc
->bPredict
) {
420 predict_shells(fplog
,x
,NULL
,0,shfc
->nshell_gl
,shfc
->shell_gl
,
424 if (shfc
->bInterCG
) {
426 fprintf(fplog
,"\nNOTE: there all shells that are connected to particles outside thier own charge group, will not predict shells positions during the run\n\n");
427 shfc
->bPredict
= FALSE
;
434 void make_local_shells(t_commrec
*cr
,t_mdatoms
*md
,
435 struct gmx_shellfc
*shfc
)
438 int a0
,a1
,*ind
,nshell
,i
;
439 gmx_domdec_t
*dd
=NULL
;
442 if (DOMAINDECOMP(cr
)) {
447 pd_at_range(cr
,&a0
,&a1
);
450 /* Single node: we need all shells, just copy the pointer */
451 shfc
->nshell
= shfc
->nshell_gl
;
452 shfc
->shell
= shfc
->shell_gl
;
457 ind
= shfc
->shell_index_gl
;
461 for(i
=a0
; i
<a1
; i
++) {
462 if (md
->ptype
[i
] == eptShell
) {
463 if (nshell
+1 > shfc
->shell_nalloc
) {
464 shfc
->shell_nalloc
= over_alloc_dd(nshell
+1);
465 srenew(shell
,shfc
->shell_nalloc
);
468 shell
[nshell
] = shfc
->shell_gl
[ind
[dd
->gatindex
[i
]]];
470 shell
[nshell
] = shfc
->shell_gl
[ind
[i
]];
472 /* With inter-cg shells we can no do shell prediction,
473 * so we do not need the nuclei numbers.
475 if (!shfc
->bInterCG
) {
476 shell
[nshell
].nucl1
= i
+ shell
[nshell
].nucl1
- shell
[nshell
].shell
;
477 if (shell
[nshell
].nnucl
> 1)
478 shell
[nshell
].nucl2
= i
+ shell
[nshell
].nucl2
- shell
[nshell
].shell
;
479 if (shell
[nshell
].nnucl
> 2)
480 shell
[nshell
].nucl3
= i
+ shell
[nshell
].nucl3
- shell
[nshell
].shell
;
482 shell
[nshell
].shell
= i
;
487 shfc
->nshell
= nshell
;
491 static void do_1pos(rvec xnew
,rvec xold
,rvec f
,real step
)
509 static void do_1pos3(rvec xnew
,rvec xold
,rvec f
,rvec step
)
527 static void directional_sd(FILE *log
,rvec xold
[],rvec xnew
[],rvec acc_dir
[],
528 int start
,int homenr
,real step
)
532 for(i
=start
; i
<homenr
; i
++)
533 do_1pos(xnew
[i
],xold
[i
],acc_dir
[i
],step
);
536 static void shell_pos_sd(FILE *log
,rvec xcur
[],rvec xnew
[],rvec f
[],
537 int ns
,t_shell s
[],int count
)
539 const real step_scale_min
= 0.8,
540 step_scale_increment
= 0.2,
541 step_scale_max
= 1.2,
542 step_scale_multiple
= (step_scale_max
- step_scale_min
) / step_scale_increment
;
546 real step_min
,step_max
;
551 for(i
=0; (i
<ns
); i
++) {
554 for(d
=0; d
<DIM
; d
++) {
555 s
[i
].step
[d
] = s
[i
].k_1
;
557 step_min
= min(step_min
,s
[i
].step
[d
]);
558 step_max
= max(step_max
,s
[i
].step
[d
]);
562 for(d
=0; d
<DIM
; d
++) {
563 dx
= xcur
[shell
][d
] - s
[i
].xold
[d
];
564 df
= f
[shell
][d
] - s
[i
].fold
[d
];
565 /* -dx/df gets used to generate an interpolated value, but would
566 * cause a NaN if df were binary-equal to zero. Values close to
567 * zero won't cause problems (because of the min() and max()), so
568 * just testing for binary inequality is OK. */
572 /* Scale the step size by a factor interpolated from
573 * step_scale_min to step_scale_max, as k_est goes from 0 to
574 * step_scale_multiple * s[i].step[d] */
576 step_scale_min
* s
[i
].step
[d
] +
577 step_scale_increment
* min(step_scale_multiple
* s
[i
].step
[d
], max(k_est
, 0));
582 if (gmx_numzero(dx
)) /* 0 == dx */
584 /* Likely this will never happen, but if it does just
585 * don't scale the step. */
589 s
[i
].step
[d
] *= step_scale_max
;
593 step_min
= min(step_min
,s
[i
].step
[d
]);
594 step_max
= max(step_max
,s
[i
].step
[d
]);
598 copy_rvec(xcur
[shell
],s
[i
].xold
);
599 copy_rvec(f
[shell
], s
[i
].fold
);
601 do_1pos3(xnew
[shell
],xcur
[shell
],f
[shell
],s
[i
].step
);
604 fprintf(debug
,"shell[%d] = %d\n",i
,shell
);
605 pr_rvec(debug
,0,"fshell",f
[shell
],DIM
,TRUE
);
606 pr_rvec(debug
,0,"xold",xcur
[shell
],DIM
,TRUE
);
607 pr_rvec(debug
,0,"step",s
[i
].step
,DIM
,TRUE
);
608 pr_rvec(debug
,0,"xnew",xnew
[shell
],DIM
,TRUE
);
612 printf("step %.3e %.3e\n",step_min
,step_max
);
616 static void decrease_step_size(int nshell
,t_shell s
[])
620 for(i
=0; i
<nshell
; i
++)
621 svmul(0.8,s
[i
].step
,s
[i
].step
);
624 static void print_epot(FILE *fp
,gmx_large_int_t mdstep
,int count
,real epot
,real df
,
625 int ndir
,real sf_dir
)
629 fprintf(fp
,"MDStep=%5s/%2d EPot: %12.8e, rmsF: %6.2e",
630 gmx_step_str(mdstep
,buf
),count
,epot
,df
);
632 fprintf(fp
,", dir. rmsF: %6.2e\n",sqrt(sf_dir
/ndir
));
638 static real
rms_force(t_commrec
*cr
,rvec f
[],int ns
,t_shell s
[],
639 int ndir
,real
*sf_dir
,real
*Epot
)
645 for(i
=0; i
<ns
; i
++) {
647 buf
[0] += norm2(f
[shell
]);
656 ntot
= (int)(buf
[1] + 0.5);
662 return (ntot
? sqrt(buf
[0]/ntot
) : 0);
665 static void check_pbc(FILE *fp
,rvec x
[],int shell
)
670 for(m
=0; (m
<DIM
); m
++)
671 if (fabs(x
[shell
][m
]-x
[now
][m
]) > 0.3) {
672 pr_rvecs(fp
,0,"SHELL-X",x
+now
,5);
677 static void dump_shells(FILE *fp
,rvec x
[],rvec f
[],real ftol
,int ns
,t_shell s
[])
684 for(i
=0; (i
<ns
); i
++) {
686 ff2
= iprod(f
[shell
],f
[shell
]);
688 fprintf(fp
,"SHELL %5d, force %10.5f %10.5f %10.5f, |f| %10.5f\n",
689 shell
,f
[shell
][XX
],f
[shell
][YY
],f
[shell
][ZZ
],sqrt(ff2
));
690 check_pbc(fp
,x
,shell
);
694 static void init_adir(FILE *log
,gmx_shellfc_t shfc
,
695 gmx_constr_t constr
,t_idef
*idef
,t_inputrec
*ir
,
696 t_commrec
*cr
,int dd_ac1
,
697 gmx_large_int_t step
,t_mdatoms
*md
,int start
,int end
,
698 rvec
*x_old
,rvec
*x_init
,rvec
*x
,
699 rvec
*f
,rvec
*acc_dir
,matrix box
,
700 real lambda
,real
*dvdlambda
,t_nrnb
*nrnb
)
707 unsigned short *ptype
;
710 if (DOMAINDECOMP(cr
))
714 if (n
> shfc
->adir_nalloc
) {
715 shfc
->adir_nalloc
= over_alloc_dd(n
);
716 srenew(shfc
->adir_xnold
,shfc
->adir_nalloc
);
717 srenew(shfc
->adir_xnew
,shfc
->adir_nalloc
);
719 xnold
= shfc
->adir_xnold
;
720 xnew
= shfc
->adir_xnew
;
726 /* Does NOT work with freeze or acceleration groups (yet) */
727 for (n
=start
; n
<end
; n
++) {
728 w_dt
= md
->invmass
[n
]*dt
;
730 for (d
=0; d
<DIM
; d
++) {
731 if ((ptype
[n
] != eptVSite
) && (ptype
[n
] != eptShell
)) {
732 xnold
[n
-start
][d
] = x
[n
][d
] - (x_init
[n
][d
] - x_old
[n
][d
]);
733 xnew
[n
-start
][d
] = 2*x
[n
][d
] - x_old
[n
][d
] + f
[n
][d
]*w_dt
*dt
;
735 xnold
[n
-start
][d
] = x
[n
][d
];
736 xnew
[n
-start
][d
] = x
[n
][d
];
740 constrain(log
,FALSE
,FALSE
,constr
,idef
,ir
,NULL
,cr
,step
,0,md
,
741 x
,xnold
-start
,NULL
,box
,
742 lambda
,dvdlambda
,NULL
,NULL
,nrnb
,econqCoord
,FALSE
,0,0);
743 constrain(log
,FALSE
,FALSE
,constr
,idef
,ir
,NULL
,cr
,step
,0,md
,
744 x
,xnew
-start
,NULL
,box
,
745 lambda
,dvdlambda
,NULL
,NULL
,nrnb
,econqCoord
,FALSE
,0,0);
747 /* Set xnew to minus the acceleration */
748 for (n
=start
; n
<end
; n
++) {
751 -(2*x
[n
][d
]-xnold
[n
-start
][d
]-xnew
[n
-start
][d
])/sqr(dt
)
752 - f
[n
][d
]*md
->invmass
[n
];
753 clear_rvec(acc_dir
[n
]);
756 /* Project the acceleration on the old bond directions */
757 constrain(log
,FALSE
,FALSE
,constr
,idef
,ir
,NULL
,cr
,step
,0,md
,
758 x_old
,xnew
-start
,acc_dir
,box
,
759 lambda
,dvdlambda
,NULL
,NULL
,nrnb
,econqDeriv_FlexCon
,FALSE
,0,0);
762 int relax_shell_flexcon(FILE *fplog
,t_commrec
*cr
,gmx_bool bVerbose
,
763 gmx_large_int_t mdstep
,t_inputrec
*inputrec
,
764 gmx_bool bDoNS
,int force_flags
,
769 gmx_enerdata_t
*enerd
,t_fcdata
*fcd
,
770 t_state
*state
,rvec f
[],
773 t_nrnb
*nrnb
,gmx_wallcycle_t wcycle
,
775 gmx_groups_t
*groups
,
776 struct gmx_shellfc
*shfc
,
779 double t
,rvec mu_tot
,
780 int natoms
,gmx_bool
*bConverged
,
787 rvec
*pos
[2],*force
[2],*acc_dir
=NULL
,*x_old
=NULL
;
791 real ftol
,xiH
,xiS
,dum
=0;
793 gmx_bool bCont
,bInit
;
794 int nat
,dd_ac0
,dd_ac1
=0,i
;
795 int start
=md
->start
,homenr
=md
->homenr
,end
=start
+homenr
,cg0
,cg1
;
796 int nflexcon
,g
,number_steps
,d
,Min
=0,count
=0;
797 #define Try (1-Min) /* At start Try = 1 */
799 bCont
= (mdstep
== inputrec
->init_step
) && inputrec
->bContinuation
;
800 bInit
= (mdstep
== inputrec
->init_step
) || shfc
->bForceInit
;
801 ftol
= inputrec
->em_tol
;
802 number_steps
= inputrec
->niter
;
803 nshell
= shfc
->nshell
;
805 nflexcon
= shfc
->nflexcon
;
809 if (DOMAINDECOMP(cr
)) {
810 nat
= dd_natoms_vsite(cr
->dd
);
812 dd_get_constraint_range(cr
->dd
,&dd_ac0
,&dd_ac1
);
813 nat
= max(nat
,dd_ac1
);
819 if (nat
> shfc
->x_nalloc
) {
820 /* Allocate local arrays */
821 shfc
->x_nalloc
= over_alloc_dd(nat
);
822 for(i
=0; (i
<2); i
++) {
823 srenew(shfc
->x
[i
],shfc
->x_nalloc
);
824 srenew(shfc
->f
[i
],shfc
->x_nalloc
);
827 for(i
=0; (i
<2); i
++) {
829 force
[i
] = shfc
->f
[i
];
832 /* With particle decomposition this code only works
833 * when all particles involved with each shell are in the same cg.
836 if (bDoNS
&& inputrec
->ePBC
!= epbcNONE
&& !DOMAINDECOMP(cr
)) {
837 /* This is the only time where the coordinates are used
838 * before do_force is called, which normally puts all
839 * charge groups in the box.
841 if (PARTDECOMP(cr
)) {
842 pd_cg_range(cr
,&cg0
,&cg1
);
847 put_charge_groups_in_box(fplog
,cg0
,cg1
,fr
->ePBC
,state
->box
,
848 &(top
->cgs
),state
->x
,fr
->cg_cm
);
850 mk_mshift(fplog
,graph
,fr
->ePBC
,state
->box
,state
->x
);
853 /* After this all coordinate arrays will contain whole molecules */
855 shift_self(graph
,state
->box
,state
->x
);
858 if (nat
> shfc
->flex_nalloc
) {
859 shfc
->flex_nalloc
= over_alloc_dd(nat
);
860 srenew(shfc
->acc_dir
,shfc
->flex_nalloc
);
861 srenew(shfc
->x_old
,shfc
->flex_nalloc
);
863 acc_dir
= shfc
->acc_dir
;
865 for(i
=0; i
<homenr
; i
++) {
868 state
->x
[start
+i
][d
] - state
->v
[start
+i
][d
]*inputrec
->delta_t
;
872 /* Do a prediction of the shell positions */
873 if (shfc
->bPredict
&& !bCont
) {
874 predict_shells(fplog
,state
->x
,state
->v
,inputrec
->delta_t
,nshell
,shell
,
875 md
->massT
,NULL
,bInit
);
878 /* do_force expected the charge groups to be in the box */
880 unshift_self(graph
,state
->box
,state
->x
);
882 /* Calculate the forces first time around */
884 pr_rvecs(debug
,0,"x b4 do_force",state
->x
+ start
,homenr
);
886 do_force(fplog
,cr
,inputrec
,mdstep
,nrnb
,wcycle
,top
,mtop
,groups
,
887 state
->box
,state
->x
,&state
->hist
,
888 force
[Min
],force_vir
,md
,enerd
,fcd
,
890 fr
,vsite
,mu_tot
,t
,fp_field
,NULL
,bBornRadii
,
891 (bDoNS
? GMX_FORCE_NS
: 0) | force_flags
);
895 init_adir(fplog
,shfc
,
896 constr
,idef
,inputrec
,cr
,dd_ac1
,mdstep
,md
,start
,end
,
897 shfc
->x_old
-start
,state
->x
,state
->x
,force
[Min
],
898 shfc
->acc_dir
-start
,state
->box
,state
->lambda
,&dum
,nrnb
);
900 for(i
=start
; i
<end
; i
++)
901 sf_dir
+= md
->massT
[i
]*norm2(shfc
->acc_dir
[i
-start
]);
904 Epot
[Min
] = enerd
->term
[F_EPOT
];
906 df
[Min
]=rms_force(cr
,shfc
->f
[Min
],nshell
,shell
,nflexcon
,&sf_dir
,&Epot
[Min
]);
909 fprintf(debug
,"df = %g %g\n",df
[Min
],df
[Try
]);
913 pr_rvecs(debug
,0,"force0",force
[Min
],md
->nr
);
916 if (nshell
+nflexcon
> 0) {
917 /* Copy x to pos[Min] & pos[Try]: during minimization only the
918 * shell positions are updated, therefore the other particles must
921 memcpy(pos
[Min
],state
->x
,nat
*sizeof(state
->x
[0]));
922 memcpy(pos
[Try
],state
->x
,nat
*sizeof(state
->x
[0]));
925 if (bVerbose
&& MASTER(cr
))
926 print_epot(stdout
,mdstep
,0,Epot
[Min
],df
[Min
],nflexcon
,sf_dir
);
929 fprintf(debug
,"%17s: %14.10e\n",
930 interaction_function
[F_EKIN
].longname
,enerd
->term
[F_EKIN
]);
931 fprintf(debug
,"%17s: %14.10e\n",
932 interaction_function
[F_EPOT
].longname
,enerd
->term
[F_EPOT
]);
933 fprintf(debug
,"%17s: %14.10e\n",
934 interaction_function
[F_ETOT
].longname
,enerd
->term
[F_ETOT
]);
935 fprintf(debug
,"SHELLSTEP %s\n",gmx_step_str(mdstep
,sbuf
));
938 /* First check whether we should do shells, or whether the force is
939 * low enough even without minimization.
941 *bConverged
= (df
[Min
] < ftol
);
943 for(count
=1; (!(*bConverged
) && (count
< number_steps
)); count
++) {
945 construct_vsites(fplog
,vsite
,pos
[Min
],nrnb
,inputrec
->delta_t
,state
->v
,
946 idef
->iparams
,idef
->il
,
947 fr
->ePBC
,fr
->bMolPBC
,graph
,cr
,state
->box
);
950 init_adir(fplog
,shfc
,
951 constr
,idef
,inputrec
,cr
,dd_ac1
,mdstep
,md
,start
,end
,
952 x_old
-start
,state
->x
,pos
[Min
],force
[Min
],acc_dir
-start
,
953 state
->box
,state
->lambda
,&dum
,nrnb
);
955 directional_sd(fplog
,pos
[Min
],pos
[Try
],acc_dir
-start
,start
,end
,
959 /* New positions, Steepest descent */
960 shell_pos_sd(fplog
,pos
[Min
],pos
[Try
],force
[Min
],nshell
,shell
,count
);
962 /* do_force expected the charge groups to be in the box */
964 unshift_self(graph
,state
->box
,pos
[Try
]);
967 pr_rvecs(debug
,0,"RELAX: pos[Min] ",pos
[Min
] + start
,homenr
);
968 pr_rvecs(debug
,0,"RELAX: pos[Try] ",pos
[Try
] + start
,homenr
);
970 /* Try the new positions */
971 do_force(fplog
,cr
,inputrec
,1,nrnb
,wcycle
,
972 top
,mtop
,groups
,state
->box
,pos
[Try
],&state
->hist
,
973 force
[Try
],force_vir
,
974 md
,enerd
,fcd
,state
->lambda
,graph
,
975 fr
,vsite
,mu_tot
,t
,fp_field
,NULL
,bBornRadii
,
979 pr_rvecs(debug
,0,"RELAX: force[Min]",force
[Min
] + start
,homenr
);
980 pr_rvecs(debug
,0,"RELAX: force[Try]",force
[Try
] + start
,homenr
);
984 init_adir(fplog
,shfc
,
985 constr
,idef
,inputrec
,cr
,dd_ac1
,mdstep
,md
,start
,end
,
986 x_old
-start
,state
->x
,pos
[Try
],force
[Try
],acc_dir
-start
,
987 state
->box
,state
->lambda
,&dum
,nrnb
);
989 for(i
=start
; i
<end
; i
++)
990 sf_dir
+= md
->massT
[i
]*norm2(acc_dir
[i
-start
]);
993 Epot
[Try
] = enerd
->term
[F_EPOT
];
995 df
[Try
]=rms_force(cr
,force
[Try
],nshell
,shell
,nflexcon
,&sf_dir
,&Epot
[Try
]);
998 fprintf(debug
,"df = %g %g\n",df
[Min
],df
[Try
]);
1002 pr_rvecs(debug
,0,"F na do_force",force
[Try
] + start
,homenr
);
1004 fprintf(debug
,"SHELL ITER %d\n",count
);
1005 dump_shells(debug
,pos
[Try
],force
[Try
],ftol
,nshell
,shell
);
1009 if (bVerbose
&& MASTER(cr
))
1010 print_epot(stdout
,mdstep
,count
,Epot
[Try
],df
[Try
],nflexcon
,sf_dir
);
1012 *bConverged
= (df
[Try
] < ftol
);
1014 if ((df
[Try
] < df
[Min
])) {
1016 fprintf(debug
,"Swapping Min and Try\n");
1018 /* Correct the velocities for the flexible constraints */
1019 invdt
= 1/inputrec
->delta_t
;
1020 for(i
=start
; i
<end
; i
++) {
1021 for(d
=0; d
<DIM
; d
++)
1022 state
->v
[i
][d
] += (pos
[Try
][i
][d
] - pos
[Min
][i
][d
])*invdt
;
1027 decrease_step_size(nshell
,shell
);
1030 if (MASTER(cr
) && !(*bConverged
)) {
1031 /* Note that the energies and virial are incorrect when not converged */
1034 "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
1035 gmx_step_str(mdstep
,sbuf
),number_steps
,df
[Min
]);
1037 "step %s: EM did not converge in %d iterations, RMS force %.3f\n",
1038 gmx_step_str(mdstep
,sbuf
),number_steps
,df
[Min
]);
1041 /* Copy back the coordinates and the forces */
1042 memcpy(state
->x
,pos
[Min
],nat
*sizeof(state
->x
[0]));
1043 memcpy(f
,force
[Min
],nat
*sizeof(f
[0]));