Added trivial const qualifiers
[gromacs.git] / src / gromacs / mdlib / expanded.cpp
blob7e70646bd57822a56a2a221fe981f20f087b4368
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #include "gmxpre.h"
37 #include "expanded.h"
39 #include <cmath>
40 #include <cstdio>
42 #include <algorithm>
44 #include "gromacs/domdec/domdec.h"
45 #include "gromacs/fileio/confio.h"
46 #include "gromacs/fileio/gmxfio.h"
47 #include "gromacs/fileio/xtcio.h"
48 #include "gromacs/gmxlib/chargegroup.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/listed-forces/disre.h"
52 #include "gromacs/listed-forces/orires.h"
53 #include "gromacs/math/functions.h"
54 #include "gromacs/math/units.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/mdlib/calcmu.h"
57 #include "gromacs/mdlib/constr.h"
58 #include "gromacs/mdlib/force.h"
59 #include "gromacs/mdlib/update.h"
60 #include "gromacs/mdtypes/enerdata.h"
61 #include "gromacs/mdtypes/forcerec.h"
62 #include "gromacs/mdtypes/inputrec.h"
63 #include "gromacs/mdtypes/md_enums.h"
64 #include "gromacs/mdtypes/mdatom.h"
65 #include "gromacs/mdtypes/state.h"
66 #include "gromacs/random/threefry.h"
67 #include "gromacs/random/uniformrealdistribution.h"
68 #include "gromacs/timing/wallcycle.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/gmxmpi.h"
71 #include "gromacs/utility/smalloc.h"
73 static void init_df_history_weights(df_history_t *dfhist, const t_expanded *expand, int nlim)
75 int i;
76 dfhist->wl_delta = expand->init_wl_delta;
77 for (i = 0; i < nlim; i++)
79 dfhist->sum_weights[i] = expand->init_lambda_weights[i];
80 dfhist->sum_dg[i] = expand->init_lambda_weights[i];
84 /* Eventually should contain all the functions needed to initialize expanded ensemble
85 before the md loop starts */
86 void init_expanded_ensemble(gmx_bool bStateFromCP, const t_inputrec *ir, df_history_t *dfhist)
88 if (!bStateFromCP)
90 init_df_history_weights(dfhist, ir->expandedvals, ir->fepvals->n_lambda);
94 static void GenerateGibbsProbabilities(const real *ene, double *p_k, double *pks, int minfep, int maxfep)
97 int i;
98 real maxene;
100 *pks = 0.0;
101 maxene = ene[minfep];
102 /* find the maximum value */
103 for (i = minfep; i <= maxfep; i++)
105 if (ene[i] > maxene)
107 maxene = ene[i];
110 /* find the denominator */
111 for (i = minfep; i <= maxfep; i++)
113 *pks += std::exp(ene[i]-maxene);
115 /*numerators*/
116 for (i = minfep; i <= maxfep; i++)
118 p_k[i] = std::exp(ene[i]-maxene) / *pks;
122 static void GenerateWeightedGibbsProbabilities(const real *ene, double *p_k, double *pks, int nlim, real *nvals, real delta)
125 int i;
126 real maxene;
127 real *nene;
128 *pks = 0.0;
130 snew(nene, nlim);
131 for (i = 0; i < nlim; i++)
133 if (nvals[i] == 0)
135 /* add the delta, since we need to make sure it's greater than zero, and
136 we need a non-arbitrary number? */
137 nene[i] = ene[i] + std::log(nvals[i]+delta);
139 else
141 nene[i] = ene[i] + std::log(nvals[i]);
145 /* find the maximum value */
146 maxene = nene[0];
147 for (i = 0; i < nlim; i++)
149 if (nene[i] > maxene)
151 maxene = nene[i];
155 /* subtract off the maximum, avoiding overflow */
156 for (i = 0; i < nlim; i++)
158 nene[i] -= maxene;
161 /* find the denominator */
162 for (i = 0; i < nlim; i++)
164 *pks += std::exp(nene[i]);
167 /*numerators*/
168 for (i = 0; i < nlim; i++)
170 p_k[i] = std::exp(nene[i]) / *pks;
172 sfree(nene);
175 static int FindMinimum(const real *min_metric, int N)
178 real min_val;
179 int min_nval, nval;
181 min_nval = 0;
182 min_val = min_metric[0];
184 for (nval = 0; nval < N; nval++)
186 if (min_metric[nval] < min_val)
188 min_val = min_metric[nval];
189 min_nval = nval;
192 return min_nval;
195 static gmx_bool CheckHistogramRatios(int nhisto, const real *histo, real ratio)
198 int i;
199 real nmean;
200 gmx_bool bIfFlat;
202 nmean = 0;
203 for (i = 0; i < nhisto; i++)
205 nmean += histo[i];
208 if (nmean == 0)
210 /* no samples! is bad!*/
211 bIfFlat = FALSE;
212 return bIfFlat;
214 nmean /= static_cast<real>(nhisto);
216 bIfFlat = TRUE;
217 for (i = 0; i < nhisto; i++)
219 /* make sure that all points are in the ratio < x < 1/ratio range */
220 if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio)))
222 bIfFlat = FALSE;
223 break;
226 return bIfFlat;
229 static gmx_bool CheckIfDoneEquilibrating(int nlim, const t_expanded *expand, const df_history_t *dfhist, int64_t step)
232 int i, totalsamples;
233 gmx_bool bDoneEquilibrating = TRUE;
234 gmx_bool bIfFlat;
236 /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
237 if (expand->lmc_forced_nstart > 0)
239 for (i = 0; i < nlim; i++)
241 if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
242 done equilibrating*/
244 bDoneEquilibrating = FALSE;
245 break;
249 else
251 /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
252 bDoneEquilibrating = TRUE;
254 /* calculate the total number of samples */
255 switch (expand->elmceq)
257 case elmceqNO:
258 /* We have not equilibrated, and won't, ever. */
259 bDoneEquilibrating = FALSE;
260 break;
261 case elmceqYES:
262 /* we have equilibrated -- we're done */
263 bDoneEquilibrating = TRUE;
264 break;
265 case elmceqSTEPS:
266 /* first, check if we are equilibrating by steps, if we're still under */
267 if (step < expand->equil_steps)
269 bDoneEquilibrating = FALSE;
271 break;
272 case elmceqSAMPLES:
273 totalsamples = 0;
274 for (i = 0; i < nlim; i++)
276 totalsamples += dfhist->n_at_lam[i];
278 if (totalsamples < expand->equil_samples)
280 bDoneEquilibrating = FALSE;
282 break;
283 case elmceqNUMATLAM:
284 for (i = 0; i < nlim; i++)
286 if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
287 done equilibrating*/
289 bDoneEquilibrating = FALSE;
290 break;
293 break;
294 case elmceqWLDELTA:
295 if (EWL(expand->elamstats)) /* This check is in readir as well, but
296 just to be sure */
298 if (dfhist->wl_delta > expand->equil_wl_delta)
300 bDoneEquilibrating = FALSE;
303 break;
304 case elmceqRATIO:
305 /* we can use the flatness as a judge of good weights, as long as
306 we're not doing minvar, or Wang-Landau.
307 But turn off for now until we figure out exactly how we do this.
310 if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
312 /* we want to use flatness -avoiding- the forced-through samples. Plus, we need to convert to
313 floats for this histogram function. */
315 real *modhisto;
316 snew(modhisto, nlim);
317 for (i = 0; i < nlim; i++)
319 modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
321 bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
322 sfree(modhisto);
323 if (!bIfFlat)
325 bDoneEquilibrating = FALSE;
328 break;
329 default:
330 bDoneEquilibrating = TRUE;
331 break;
334 return bDoneEquilibrating;
337 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
338 int fep_state, const real *scaled_lamee, const real *weighted_lamee, int64_t step)
340 gmx_bool bSufficientSamples;
341 int i;
342 int n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
343 real omega_m1_0, omega_p1_0, clam_osum;
344 real de, de_function;
345 real cnval, zero_sum_weights;
346 real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
347 real clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
348 real *lam_variance, *lam_dg;
349 double *p_k;
350 double pks = 0;
351 real chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
353 /* if we have equilibrated the weights, exit now */
354 if (dfhist->bEquil)
356 return FALSE;
359 if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))
361 dfhist->bEquil = TRUE;
362 /* zero out the visited states so we know how many equilibrated states we have
363 from here on out.*/
364 for (i = 0; i < nlim; i++)
366 dfhist->n_at_lam[i] = 0;
368 return TRUE;
371 /* If we reached this far, we have not equilibrated yet, keep on
372 going resetting the weights */
374 if (EWL(expand->elamstats))
376 if (expand->elamstats == elamstatsWL) /* Standard Wang-Landau */
378 dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
379 dfhist->wl_histo[fep_state] += 1.0;
381 else if (expand->elamstats == elamstatsWWL) /* Weighted Wang-Landau */
383 snew(p_k, nlim);
385 /* first increment count */
386 GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim-1);
387 for (i = 0; i < nlim; i++)
389 dfhist->wl_histo[i] += static_cast<real>(p_k[i]);
392 /* then increment weights (uses count) */
393 pks = 0.0;
394 GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist->wl_histo, dfhist->wl_delta);
396 for (i = 0; i < nlim; i++)
398 dfhist->sum_weights[i] -= dfhist->wl_delta*static_cast<real>(p_k[i]);
400 /* Alternate definition, using logarithms. Shouldn't make very much difference! */
402 real di;
403 for (i=0;i<nlim;i++)
405 di = (real)1.0 + dfhist->wl_delta*(real)p_k[i];
406 dfhist->sum_weights[i] -= log(di);
409 sfree(p_k);
412 zero_sum_weights = dfhist->sum_weights[0];
413 for (i = 0; i < nlim; i++)
415 dfhist->sum_weights[i] -= zero_sum_weights;
419 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMETROPOLIS || expand->elamstats == elamstatsMINVAR)
422 de_function = 0; /* to get rid of warnings, but this value will not be used because of the logic */
423 maxc = 2*expand->c_range+1;
425 snew(lam_dg, nlim);
426 snew(lam_variance, nlim);
428 snew(omegap_array, maxc);
429 snew(weightsp_array, maxc);
430 snew(varp_array, maxc);
431 snew(dwp_array, maxc);
433 snew(omegam_array, maxc);
434 snew(weightsm_array, maxc);
435 snew(varm_array, maxc);
436 snew(dwm_array, maxc);
438 /* unpack the current lambdas -- we will only update 2 of these */
440 for (i = 0; i < nlim-1; i++)
441 { /* only through the second to last */
442 lam_dg[i] = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
443 lam_variance[i] = gmx::square(dfhist->sum_variance[i+1]) - gmx::square(dfhist->sum_variance[i]);
446 /* accumulate running averages */
447 for (nval = 0; nval < maxc; nval++)
449 /* constants for later use */
450 cnval = static_cast<real>(nval-expand->c_range);
451 /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
452 if (fep_state > 0)
454 de = std::exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
455 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
457 de_function = 1.0/(1.0+de);
459 else if (expand->elamstats == elamstatsMETROPOLIS)
461 if (de < 1.0)
463 de_function = 1.0;
465 else
467 de_function = 1.0/de;
470 dfhist->accum_m[fep_state][nval] += de_function;
471 dfhist->accum_m2[fep_state][nval] += de_function*de_function;
474 if (fep_state < nlim-1)
476 de = std::exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
477 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
479 de_function = 1.0/(1.0+de);
481 else if (expand->elamstats == elamstatsMETROPOLIS)
483 if (de < 1.0)
485 de_function = 1.0;
487 else
489 de_function = 1.0/de;
492 dfhist->accum_p[fep_state][nval] += de_function;
493 dfhist->accum_p2[fep_state][nval] += de_function*de_function;
496 /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
498 n0 = dfhist->n_at_lam[fep_state];
499 if (fep_state > 0)
501 nm1 = dfhist->n_at_lam[fep_state-1];
503 else
505 nm1 = 0;
507 if (fep_state < nlim-1)
509 np1 = dfhist->n_at_lam[fep_state+1];
511 else
513 np1 = 0;
516 /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
517 chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;
519 if (n0 > 0)
521 chi_m1_0 = dfhist->accum_m[fep_state][nval]/n0;
522 chi_p1_0 = dfhist->accum_p[fep_state][nval]/n0;
523 chi_m2_0 = dfhist->accum_m2[fep_state][nval]/n0;
524 chi_p2_0 = dfhist->accum_p2[fep_state][nval]/n0;
527 if ((fep_state > 0 ) && (nm1 > 0))
529 chi_p1_m1 = dfhist->accum_p[fep_state-1][nval]/nm1;
530 chi_p2_m1 = dfhist->accum_p2[fep_state-1][nval]/nm1;
533 if ((fep_state < nlim-1) && (np1 > 0))
535 chi_m1_p1 = dfhist->accum_m[fep_state+1][nval]/np1;
536 chi_m2_p1 = dfhist->accum_m2[fep_state+1][nval]/np1;
539 omega_m1_0 = 0;
540 omega_p1_0 = 0;
541 clam_weightsm = 0;
542 clam_weightsp = 0;
543 clam_varm = 0;
544 clam_varp = 0;
546 if (fep_state > 0)
548 if (n0 > 0)
550 omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0) - 1.0;
551 if (nm1 > 0)
553 real omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1) - 1.0;
554 clam_weightsm = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
555 clam_varm = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
560 if (fep_state < nlim-1)
562 if (n0 > 0)
564 omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0) - 1.0;
565 if (np1 > 0)
567 real omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1) - 1.0;
568 clam_weightsp = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
569 clam_varp = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
574 if (n0 > 0)
576 omegam_array[nval] = omega_m1_0;
578 else
580 omegam_array[nval] = 0;
582 weightsm_array[nval] = clam_weightsm;
583 varm_array[nval] = clam_varm;
584 if (nm1 > 0)
586 dwm_array[nval] = fabs( (cnval + std::log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
588 else
590 dwm_array[nval] = std::fabs( cnval - lam_dg[fep_state-1] );
593 if (n0 > 0)
595 omegap_array[nval] = omega_p1_0;
597 else
599 omegap_array[nval] = 0;
601 weightsp_array[nval] = clam_weightsp;
602 varp_array[nval] = clam_varp;
603 if ((np1 > 0) && (n0 > 0))
605 dwp_array[nval] = fabs( (cnval + std::log((1.0*np1)/n0)) - lam_dg[fep_state] );
607 else
609 dwp_array[nval] = std::fabs( cnval - lam_dg[fep_state] );
614 /* find the C's closest to the old weights value */
616 min_nvalm = FindMinimum(dwm_array, maxc);
617 omega_m1_0 = omegam_array[min_nvalm];
618 clam_weightsm = weightsm_array[min_nvalm];
619 clam_varm = varm_array[min_nvalm];
621 min_nvalp = FindMinimum(dwp_array, maxc);
622 omega_p1_0 = omegap_array[min_nvalp];
623 clam_weightsp = weightsp_array[min_nvalp];
624 clam_varp = varp_array[min_nvalp];
626 clam_osum = omega_m1_0 + omega_p1_0;
627 clam_minvar = 0;
628 if (clam_osum > 0)
630 clam_minvar = 0.5*std::log(clam_osum);
633 if (fep_state > 0)
635 lam_dg[fep_state-1] = clam_weightsm;
636 lam_variance[fep_state-1] = clam_varm;
639 if (fep_state < nlim-1)
641 lam_dg[fep_state] = clam_weightsp;
642 lam_variance[fep_state] = clam_varp;
645 if (expand->elamstats == elamstatsMINVAR)
647 bSufficientSamples = TRUE;
648 /* make sure they are all past a threshold */
649 for (i = 0; i < nlim; i++)
651 if (dfhist->n_at_lam[i] < expand->minvarmin)
653 bSufficientSamples = FALSE;
656 if (bSufficientSamples)
658 dfhist->sum_minvar[fep_state] = clam_minvar;
659 if (fep_state == 0)
661 for (i = 0; i < nlim; i++)
663 dfhist->sum_minvar[i] += (expand->minvar_const-clam_minvar);
665 expand->minvar_const = clam_minvar;
666 dfhist->sum_minvar[fep_state] = 0.0;
668 else
670 dfhist->sum_minvar[fep_state] -= expand->minvar_const;
675 /* we need to rezero minvar now, since it could change at fep_state = 0 */
676 dfhist->sum_dg[0] = 0.0;
677 dfhist->sum_variance[0] = 0.0;
678 dfhist->sum_weights[0] = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
680 for (i = 1; i < nlim; i++)
682 dfhist->sum_dg[i] = lam_dg[i-1] + dfhist->sum_dg[i-1];
683 dfhist->sum_variance[i] = std::sqrt(lam_variance[i-1] + gmx::square(dfhist->sum_variance[i-1]));
684 dfhist->sum_weights[i] = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
687 sfree(lam_dg);
688 sfree(lam_variance);
690 sfree(omegam_array);
691 sfree(weightsm_array);
692 sfree(varm_array);
693 sfree(dwm_array);
695 sfree(omegap_array);
696 sfree(weightsp_array);
697 sfree(varp_array);
698 sfree(dwp_array);
700 return FALSE;
703 static int ChooseNewLambda(int nlim, const t_expanded *expand, df_history_t *dfhist, int fep_state,
704 const real *weighted_lamee, double *p_k,
705 int64_t seed, int64_t step)
707 /* Choose new lambda value, and update transition matrix */
709 int i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
710 real r1, r2, de, trialprob, tprob = 0;
711 double *propose, *accept, *remainder;
712 double pks;
713 real pnorm;
714 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::ExpandedEnsemble); // We only draw once, so zero bits internal counter is fine
715 gmx::UniformRealDistribution<real> dist;
717 starting_fep_state = fep_state;
718 lamnew = fep_state; /* so that there is a default setting -- stays the same */
720 if (!EWL(expand->elamstats)) /* ignore equilibrating the weights if using WL */
722 if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim-1] <= expand->lmc_forced_nstart))
724 /* Use a marching method to run through the lambdas and get preliminary free energy data,
725 before starting 'free' sampling. We start free sampling when we have enough at each lambda */
727 /* if we have enough at this lambda, move on to the next one */
729 if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
731 lamnew = fep_state+1;
732 if (lamnew == nlim) /* whoops, stepped too far! */
734 lamnew -= 1;
737 else
739 lamnew = fep_state;
741 return lamnew;
745 snew(propose, nlim);
746 snew(accept, nlim);
747 snew(remainder, nlim);
749 for (i = 0; i < expand->lmc_repeats; i++)
751 rng.restart(step, i);
752 dist.reset();
754 for (ifep = 0; ifep < nlim; ifep++)
756 propose[ifep] = 0;
757 accept[ifep] = 0;
760 if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
762 /* use the Gibbs sampler, with restricted range */
763 if (expand->gibbsdeltalam < 0)
765 minfep = 0;
766 maxfep = nlim-1;
768 else
770 minfep = fep_state - expand->gibbsdeltalam;
771 maxfep = fep_state + expand->gibbsdeltalam;
772 if (minfep < 0)
774 minfep = 0;
776 if (maxfep > nlim-1)
778 maxfep = nlim-1;
782 GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep);
784 if (expand->elmcmove == elmcmoveGIBBS)
786 for (ifep = minfep; ifep <= maxfep; ifep++)
788 propose[ifep] = p_k[ifep];
789 accept[ifep] = 1.0;
791 /* Gibbs sampling */
792 r1 = dist(rng);
793 for (lamnew = minfep; lamnew <= maxfep; lamnew++)
795 if (r1 <= p_k[lamnew])
797 break;
799 r1 -= p_k[lamnew];
802 else if (expand->elmcmove == elmcmoveMETGIBBS)
805 /* Metropolized Gibbs sampling */
806 for (ifep = minfep; ifep <= maxfep; ifep++)
808 remainder[ifep] = 1 - p_k[ifep];
811 /* find the proposal probabilities */
813 if (remainder[fep_state] == 0)
815 /* only the current state has any probability */
816 /* we have to stay at the current state */
817 lamnew = fep_state;
819 else
821 for (ifep = minfep; ifep <= maxfep; ifep++)
823 if (ifep != fep_state)
825 propose[ifep] = p_k[ifep]/remainder[fep_state];
827 else
829 propose[ifep] = 0;
833 r1 = dist(rng);
834 for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)
836 pnorm = p_k[lamtrial]/remainder[fep_state];
837 if (lamtrial != fep_state)
839 if (r1 <= pnorm)
841 break;
843 r1 -= pnorm;
847 /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
848 tprob = 1.0;
849 /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
850 trialprob = (remainder[fep_state])/(remainder[lamtrial]);
851 if (trialprob < tprob)
853 tprob = trialprob;
855 r2 = dist(rng);
856 if (r2 < tprob)
858 lamnew = lamtrial;
860 else
862 lamnew = fep_state;
866 /* now figure out the acceptance probability for each */
867 for (ifep = minfep; ifep <= maxfep; ifep++)
869 tprob = 1.0;
870 if (remainder[ifep] != 0)
872 trialprob = (remainder[fep_state])/(remainder[ifep]);
874 else
876 trialprob = 1.0; /* this state is the only choice! */
878 if (trialprob < tprob)
880 tprob = trialprob;
882 /* probability for fep_state=0, but that's fine, it's never proposed! */
883 accept[ifep] = tprob;
887 if (lamnew > maxfep)
889 /* it's possible some rounding is failing */
890 if (gmx_within_tol(remainder[fep_state], 0, 50*GMX_DOUBLE_EPS))
892 /* numerical rounding error -- no state other than the original has weight */
893 lamnew = fep_state;
895 else
897 /* probably not a numerical issue */
898 int loc = 0;
899 int nerror = 200+(maxfep-minfep+1)*60;
900 char *errorstr;
901 snew(errorstr, nerror);
902 /* if its greater than maxfep, then something went wrong -- probably underflow in the calculation
903 of sum weights. Generated detailed info for failure */
904 loc += sprintf(errorstr, "Something wrong in choosing new lambda state with a Gibbs move -- probably underflow in weight determination.\nDenominator is: %3d%17.10e\n i dE numerator weights\n", 0, pks);
905 for (ifep = minfep; ifep <= maxfep; ifep++)
907 loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep, weighted_lamee[ifep], p_k[ifep], dfhist->sum_weights[ifep]);
909 gmx_fatal(FARGS, "%s", errorstr);
913 else if ((expand->elmcmove == elmcmoveMETROPOLIS) || (expand->elmcmove == elmcmoveBARKER))
915 /* use the metropolis sampler with trial +/- 1 */
916 r1 = dist(rng);
917 if (r1 < 0.5)
919 if (fep_state == 0)
921 lamtrial = fep_state;
923 else
925 lamtrial = fep_state-1;
928 else
930 if (fep_state == nlim-1)
932 lamtrial = fep_state;
934 else
936 lamtrial = fep_state+1;
940 de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
941 if (expand->elmcmove == elmcmoveMETROPOLIS)
943 tprob = 1.0;
944 trialprob = std::exp(de);
945 if (trialprob < tprob)
947 tprob = trialprob;
949 propose[fep_state] = 0;
950 propose[lamtrial] = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
951 accept[fep_state] = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
952 accept[lamtrial] = tprob;
955 else if (expand->elmcmove == elmcmoveBARKER)
957 tprob = 1.0/(1.0+std::exp(-de));
959 propose[fep_state] = (1-tprob);
960 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
961 accept[fep_state] = 1.0;
962 accept[lamtrial] = 1.0;
965 r2 = dist(rng);
966 if (r2 < tprob)
968 lamnew = lamtrial;
970 else
972 lamnew = fep_state;
976 for (ifep = 0; ifep < nlim; ifep++)
978 dfhist->Tij[fep_state][ifep] += propose[ifep]*accept[ifep];
979 dfhist->Tij[fep_state][fep_state] += propose[ifep]*(1.0-accept[ifep]);
981 fep_state = lamnew;
984 dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
986 sfree(propose);
987 sfree(accept);
988 sfree(remainder);
990 return lamnew;
993 /* print out the weights to the log, along with current state */
994 void PrintFreeEnergyInfoToFile(FILE *outfile, const t_lambda *fep, const t_expanded *expand,
995 const t_simtemp *simtemp, const df_history_t *dfhist,
996 int fep_state, int frequency, int64_t step)
998 int nlim, i, ifep, jfep;
999 real dw, dg, dv, Tprint;
1000 const char *print_names[efptNR] = {" FEPL", "MassL", "CoulL", " VdwL", "BondL", "RestT", "Temp.(K)"};
1001 gmx_bool bSimTemp = FALSE;
1003 nlim = fep->n_lambda;
1004 if (simtemp != nullptr)
1006 bSimTemp = TRUE;
1009 if (step % frequency == 0)
1011 fprintf(outfile, " MC-lambda information\n");
1012 if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
1014 fprintf(outfile, " Wang-Landau incrementor is: %11.5g\n", dfhist->wl_delta);
1016 fprintf(outfile, " N");
1017 for (i = 0; i < efptNR; i++)
1019 if (fep->separate_dvdl[i])
1021 fprintf(outfile, "%7s", print_names[i]);
1023 else if ((i == efptTEMPERATURE) && bSimTemp)
1025 fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */
1028 fprintf(outfile, " Count ");
1029 if (expand->elamstats == elamstatsMINVAR)
1031 fprintf(outfile, "W(in kT) G(in kT) dG(in kT) dV(in kT)\n");
1033 else
1035 fprintf(outfile, "G(in kT) dG(in kT)\n");
1037 for (ifep = 0; ifep < nlim; ifep++)
1039 if (ifep == nlim-1)
1041 dw = 0.0;
1042 dg = 0.0;
1043 dv = 0.0;
1045 else
1047 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
1048 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
1049 dv = std::sqrt(gmx::square(dfhist->sum_variance[ifep+1]) - gmx::square(dfhist->sum_variance[ifep]));
1051 fprintf(outfile, "%3d", (ifep+1));
1052 for (i = 0; i < efptNR; i++)
1054 if (fep->separate_dvdl[i])
1056 fprintf(outfile, "%7.3f", fep->all_lambda[i][ifep]);
1058 else if (i == efptTEMPERATURE && bSimTemp)
1060 fprintf(outfile, "%9.3f", simtemp->temperatures[ifep]);
1063 if (EWL(expand->elamstats) && (!(dfhist->bEquil))) /* if performing WL and still haven't equilibrated */
1065 if (expand->elamstats == elamstatsWL)
1067 fprintf(outfile, " %8d", static_cast<int>(dfhist->wl_histo[ifep]));
1069 else
1071 fprintf(outfile, " %8.3f", dfhist->wl_histo[ifep]);
1074 else /* we have equilibrated weights */
1076 fprintf(outfile, " %8d", dfhist->n_at_lam[ifep]);
1078 if (expand->elamstats == elamstatsMINVAR)
1080 fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist->sum_weights[ifep], dfhist->sum_dg[ifep], dg, dv);
1082 else
1084 fprintf(outfile, " %10.5f %10.5f", dfhist->sum_weights[ifep], dw);
1086 if (ifep == fep_state)
1088 fprintf(outfile, " <<\n");
1090 else
1092 fprintf(outfile, " \n");
1095 fprintf(outfile, "\n");
1097 if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
1099 fprintf(outfile, " Transition Matrix\n");
1100 for (ifep = 0; ifep < nlim; ifep++)
1102 fprintf(outfile, "%12d", (ifep+1));
1104 fprintf(outfile, "\n");
1105 for (ifep = 0; ifep < nlim; ifep++)
1107 for (jfep = 0; jfep < nlim; jfep++)
1109 if (dfhist->n_at_lam[ifep] > 0)
1111 if (expand->bSymmetrizedTMatrix)
1113 Tprint = (dfhist->Tij[ifep][jfep]+dfhist->Tij[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1115 else
1117 Tprint = (dfhist->Tij[ifep][jfep])/(dfhist->n_at_lam[ifep]);
1120 else
1122 Tprint = 0.0;
1124 fprintf(outfile, "%12.8f", Tprint);
1126 fprintf(outfile, "%3d\n", (ifep+1));
1129 fprintf(outfile, " Empirical Transition Matrix\n");
1130 for (ifep = 0; ifep < nlim; ifep++)
1132 fprintf(outfile, "%12d", (ifep+1));
1134 fprintf(outfile, "\n");
1135 for (ifep = 0; ifep < nlim; ifep++)
1137 for (jfep = 0; jfep < nlim; jfep++)
1139 if (dfhist->n_at_lam[ifep] > 0)
1141 if (expand->bSymmetrizedTMatrix)
1143 Tprint = (dfhist->Tij_empirical[ifep][jfep]+dfhist->Tij_empirical[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1145 else
1147 Tprint = dfhist->Tij_empirical[ifep][jfep]/(dfhist->n_at_lam[ifep]);
1150 else
1152 Tprint = 0.0;
1154 fprintf(outfile, "%12.8f", Tprint);
1156 fprintf(outfile, "%3d\n", (ifep+1));
1162 int ExpandedEnsembleDynamics(FILE *log, const t_inputrec *ir, const gmx_enerdata_t *enerd,
1163 t_state *state, t_extmass *MassQ, int fep_state, df_history_t *dfhist,
1164 int64_t step,
1165 rvec *v, const t_mdatoms *mdatoms)
1166 /* Note that the state variable is only needed for simulated tempering, not
1167 Hamiltonian expanded ensemble. May be able to remove it after integrator refactoring. */
1169 real *pfep_lamee, *scaled_lamee, *weighted_lamee;
1170 double *p_k;
1171 int i, nlim, lamnew, totalsamples;
1172 real oneovert, maxscaled = 0, maxweighted = 0;
1173 t_expanded *expand;
1174 t_simtemp *simtemp;
1175 gmx_bool bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
1177 expand = ir->expandedvals;
1178 simtemp = ir->simtempvals;
1179 nlim = ir->fepvals->n_lambda;
1181 snew(scaled_lamee, nlim);
1182 snew(weighted_lamee, nlim);
1183 snew(pfep_lamee, nlim);
1184 snew(p_k, nlim);
1186 /* update the count at the current lambda*/
1187 dfhist->n_at_lam[fep_state]++;
1189 /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
1190 pressure controlled.*/
1192 pVTerm = 0;
1193 where does this PV term go?
1194 for (i=0;i<nlim;i++)
1196 fep_lamee[i] += pVTerm;
1200 /* determine the minimum value to avoid overflow. Probably a better way to do this */
1201 /* we don't need to include the pressure term, since the volume is the same between the two.
1202 is there some term we are neglecting, however? */
1204 if (ir->efep != efepNO)
1206 for (i = 0; i < nlim; i++)
1208 if (ir->bSimTemp)
1210 /* Note -- this assumes no mass changes, since kinetic energy is not added . . . */
1211 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
1212 + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[fep_state]))/BOLTZ;
1214 else
1216 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
1217 /* mc_temp is currently set to the system reft unless otherwise defined */
1220 /* save these energies for printing, so they don't get overwritten by the next step */
1221 /* they aren't overwritten in the non-free energy case, but we always print with these
1222 for simplicity */
1225 else
1227 if (ir->bSimTemp)
1229 for (i = 0; i < nlim; i++)
1231 scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[fep_state])/BOLTZ;
1236 for (i = 0; i < nlim; i++)
1238 pfep_lamee[i] = scaled_lamee[i];
1240 weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1241 if (i == 0)
1243 maxscaled = scaled_lamee[i];
1244 maxweighted = weighted_lamee[i];
1246 else
1248 if (scaled_lamee[i] > maxscaled)
1250 maxscaled = scaled_lamee[i];
1252 if (weighted_lamee[i] > maxweighted)
1254 maxweighted = weighted_lamee[i];
1259 for (i = 0; i < nlim; i++)
1261 scaled_lamee[i] -= maxscaled;
1262 weighted_lamee[i] -= maxweighted;
1265 /* update weights - we decide whether or not to actually do this inside */
1267 bDoneEquilibrating = UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
1268 if (bDoneEquilibrating)
1270 if (log)
1272 fprintf(log, "\nStep %" PRId64 ": Weights have equilibrated, using criteria: %s\n", step, elmceq_names[expand->elmceq]);
1276 lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
1277 ir->expandedvals->lmc_seed, step);
1278 /* if using simulated tempering, we need to adjust the temperatures */
1279 if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
1281 int i, j, n, d;
1282 real *buf_ngtc;
1283 real told;
1284 int nstart, nend, gt;
1286 snew(buf_ngtc, ir->opts.ngtc);
1288 for (i = 0; i < ir->opts.ngtc; i++)
1290 if (ir->opts.ref_t[i] > 0)
1292 told = ir->opts.ref_t[i];
1293 ir->opts.ref_t[i] = simtemp->temperatures[lamnew];
1294 buf_ngtc[i] = std::sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
1298 /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1300 nstart = 0;
1301 nend = mdatoms->homenr;
1302 for (n = nstart; n < nend; n++)
1304 gt = 0;
1305 if (mdatoms->cTC)
1307 gt = mdatoms->cTC[n];
1309 for (d = 0; d < DIM; d++)
1311 v[n][d] *= buf_ngtc[gt];
1315 if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
1317 /* we need to recalculate the masses if the temperature has changed */
1318 init_npt_masses(ir, state, MassQ, FALSE);
1319 for (i = 0; i < state->nnhpres; i++)
1321 for (j = 0; j < ir->opts.nhchainlength; j++)
1323 state->nhpres_vxi[i+j] *= buf_ngtc[i];
1326 for (i = 0; i < ir->opts.ngtc; i++)
1328 for (j = 0; j < ir->opts.nhchainlength; j++)
1330 state->nosehoover_vxi[i+j] *= buf_ngtc[i];
1334 sfree(buf_ngtc);
1337 /* now check on the Wang-Landau updating critera */
1339 if (EWL(expand->elamstats))
1341 bSwitchtoOneOverT = FALSE;
1342 if (expand->bWLoneovert)
1344 totalsamples = 0;
1345 for (i = 0; i < nlim; i++)
1347 totalsamples += dfhist->n_at_lam[i];
1349 oneovert = (1.0*nlim)/totalsamples;
1350 /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1351 /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1352 if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
1353 (dfhist->wl_delta < expand->init_wl_delta))
1355 bSwitchtoOneOverT = TRUE;
1358 if (bSwitchtoOneOverT)
1360 dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
1362 else
1364 bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
1365 if (bIfReset)
1367 for (i = 0; i < nlim; i++)
1369 dfhist->wl_histo[i] = 0;
1371 dfhist->wl_delta *= expand->wl_scale;
1372 if (log)
1374 fprintf(log, "\nStep %d: weights are now:", static_cast<int>(step));
1375 for (i = 0; i < nlim; i++)
1377 fprintf(log, " %.5f", dfhist->sum_weights[i]);
1379 fprintf(log, "\n");
1384 sfree(pfep_lamee);
1385 sfree(scaled_lamee);
1386 sfree(weighted_lamee);
1387 sfree(p_k);
1389 return lamnew;