2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* This file is completely threadsafe - keep it that way! */
44 #include "gromacs/gmxlib/network.h"
45 #include "gromacs/math/vec.h"
46 #include "gromacs/mdlib/gmx_omp_nthreads.h"
47 #include "gromacs/mdlib/rbin.h"
48 #include "gromacs/mdlib/update.h"
49 #include "gromacs/mdtypes/group.h"
50 #include "gromacs/mdtypes/inputrec.h"
51 #include "gromacs/mdtypes/mdatom.h"
52 #include "gromacs/topology/mtop_util.h"
53 #include "gromacs/topology/topology.h"
54 #include "gromacs/utility/exceptions.h"
55 #include "gromacs/utility/fatalerror.h"
56 #include "gromacs/utility/futil.h"
57 #include "gromacs/utility/smalloc.h"
59 static void init_grptcstat(int ngtc
, t_grp_tcstat tcstat
[])
63 for (i
= 0; (i
< ngtc
); i
++)
66 clear_mat(tcstat
[i
].ekinh
);
67 clear_mat(tcstat
[i
].ekinh_old
);
68 clear_mat(tcstat
[i
].ekinf
);
72 static void init_grpstat(gmx_mtop_t
*mtop
, int ngacc
, t_grp_acc gstat
[])
75 gmx_mtop_atomloop_all_t aloop
;
81 groups
= &mtop
->groups
;
82 aloop
= gmx_mtop_atomloop_all_init(mtop
);
83 while (gmx_mtop_atomloop_all_next(aloop
, &i
, &atom
))
85 grp
= ggrpnr(groups
, egcACC
, i
);
86 if ((grp
< 0) && (grp
>= ngacc
))
88 gmx_incons("Input for acceleration groups wrong");
91 /* This will not work for integrator BD */
92 gstat
[grp
].mA
+= atom
->m
;
93 gstat
[grp
].mB
+= atom
->mB
;
98 void init_ekindata(FILE gmx_unused
*log
, gmx_mtop_t
*mtop
, t_grpopts
*opts
,
99 gmx_ekindata_t
*ekind
)
104 fprintf(log
, "ngtc: %d, ngacc: %d, ngener: %d\n", opts
->ngtc
, opts
->ngacc
,
108 /* bNEMD tells if we should remove remove the COM velocity
109 * from the velocities during velocity scaling in T-coupling.
110 * Turn this on when we have multiple acceleration groups
111 * or one accelerated group.
113 ekind
->bNEMD
= (opts
->ngacc
> 1 || norm2(opts
->acc
[0]) > 0);
115 ekind
->ngtc
= opts
->ngtc
;
116 snew(ekind
->tcstat
, opts
->ngtc
);
117 init_grptcstat(opts
->ngtc
, ekind
->tcstat
);
118 /* Set Berendsen tcoupl lambda's to 1,
119 * so runs without Berendsen coupling are not affected.
121 for (i
= 0; i
< opts
->ngtc
; i
++)
123 ekind
->tcstat
[i
].lambda
= 1.0;
124 ekind
->tcstat
[i
].vscale_nhc
= 1.0;
125 ekind
->tcstat
[i
].ekinscaleh_nhc
= 1.0;
126 ekind
->tcstat
[i
].ekinscalef_nhc
= 1.0;
129 nthread
= gmx_omp_nthreads_get(emntUpdate
);
131 snew(ekind
->ekin_work_alloc
, nthread
);
132 snew(ekind
->ekin_work
, nthread
);
133 snew(ekind
->dekindl_work
, nthread
);
134 #pragma omp parallel for num_threads(nthread) schedule(static)
135 for (thread
= 0; thread
< nthread
; thread
++)
139 #define EKIN_WORK_BUFFER_SIZE 2
140 /* Allocate 2 extra elements on both sides, so in single
142 * EKIN_WORK_BUFFER_SIZE*DIM*DIM*sizeof(real) = 72/144 bytes
143 * buffer on both sides to avoid cache pollution.
145 snew(ekind
->ekin_work_alloc
[thread
], ekind
->ngtc
+2*EKIN_WORK_BUFFER_SIZE
);
146 ekind
->ekin_work
[thread
] = ekind
->ekin_work_alloc
[thread
] + EKIN_WORK_BUFFER_SIZE
;
147 /* Nasty hack so we can have the per-thread accumulation
148 * variable for dekindl in the same thread-local cache lines
149 * as the per-thread accumulation tensors for ekin[fh],
150 * because they are accumulated in the same loop. */
151 ekind
->dekindl_work
[thread
] = &(ekind
->ekin_work
[thread
][ekind
->ngtc
][0][0]);
152 #undef EKIN_WORK_BUFFER_SIZE
154 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
;
157 ekind
->ngacc
= opts
->ngacc
;
158 snew(ekind
->grpstat
, opts
->ngacc
);
159 init_grpstat(mtop
, opts
->ngacc
, ekind
->grpstat
);
162 void accumulate_u(t_commrec
*cr
, t_grpopts
*opts
, gmx_ekindata_t
*ekind
)
164 /* This routine will only be called when it's necessary */
170 for (g
= 0; (g
< opts
->ngacc
); g
++)
172 add_binr(rb
, DIM
, ekind
->grpstat
[g
].u
);
176 for (g
= 0; (g
< opts
->ngacc
); g
++)
178 extract_binr(rb
, DIM
*g
, DIM
, ekind
->grpstat
[g
].u
);
183 /* I don't think accumulate_ekin is used anymore? */
186 static void accumulate_ekin(t_commrec
*cr
, t_grpopts
*opts
,
187 gmx_ekindata_t
*ekind
)
193 for (g
= 0; (g
< opts
->ngtc
); g
++)
195 gmx_sum(DIM
*DIM
, ekind
->tcstat
[g
].ekinf
[0], cr
);
201 void update_ekindata(int start
, int homenr
, gmx_ekindata_t
*ekind
,
202 t_grpopts
*opts
, rvec v
[], t_mdatoms
*md
, real lambda
)
207 /* calculate mean velocities at whole timestep */
208 for (g
= 0; (g
< opts
->ngtc
); g
++)
210 ekind
->tcstat
[g
].T
= 0;
215 for (g
= 0; (g
< opts
->ngacc
); g
++)
217 clear_rvec(ekind
->grpstat
[g
].u
);
221 for (n
= start
; (n
< start
+homenr
); n
++)
227 for (d
= 0; (d
< DIM
); d
++)
229 mv
= md
->massT
[n
]*v
[n
][d
];
230 ekind
->grpstat
[g
].u
[d
] += mv
;
234 for (g
= 0; (g
< opts
->ngacc
); g
++)
236 for (d
= 0; (d
< DIM
); d
++)
238 ekind
->grpstat
[g
].u
[d
] /=
239 (1-lambda
)*ekind
->grpstat
[g
].mA
+ lambda
*ekind
->grpstat
[g
].mB
;
245 real
sum_ekin(t_grpopts
*opts
, gmx_ekindata_t
*ekind
, real
*dekindlambda
,
246 gmx_bool bEkinAveVel
, gmx_bool bScaleEkin
)
250 t_grp_tcstat
*tcstat
;
259 clear_mat(ekind
->ekin
);
261 for (i
= 0; (i
< ngtc
); i
++)
265 tcstat
= &ekind
->tcstat
[i
];
266 /* Sometimes a group does not have degrees of freedom, e.g.
267 * when it consists of shells and virtual sites, then we just
268 * set the temperatue to 0 and also neglect the kinetic
269 * energy, which should be zero anyway.
278 /* in this case, kinetic energy is from the current velocities already */
279 msmul(tcstat
->ekinf
, tcstat
->ekinscalef_nhc
, tcstat
->ekinf
);
284 /* Calculate the full step Ekin as the average of the half steps */
285 for (j
= 0; (j
< DIM
); j
++)
287 for (m
= 0; (m
< DIM
); m
++)
289 tcstat
->ekinf
[j
][m
] =
290 0.5*(tcstat
->ekinh
[j
][m
]*tcstat
->ekinscaleh_nhc
+ tcstat
->ekinh_old
[j
][m
]);
294 m_add(tcstat
->ekinf
, ekind
->ekin
, ekind
->ekin
);
296 tcstat
->Th
= calc_temp(trace(tcstat
->ekinh
), nd
);
297 tcstat
->T
= calc_temp(trace(tcstat
->ekinf
), nd
);
299 /* after the scaling factors have been multiplied in, we can remove them */
302 tcstat
->ekinscalef_nhc
= 1.0;
306 tcstat
->ekinscaleh_nhc
= 1.0;
325 *dekindlambda
= ekind
->dekindl
;
329 *dekindlambda
= 0.5*(ekind
->dekindl
+ ekind
->dekindl_old
);