Prepared t_mdatoms for using vector
[gromacs.git] / src / gromacs / mdlib / expanded.cpp
blobcd1b2981de77bce2f68d876c924630fc9443ae80
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #include "gmxpre.h"
37 #include "expanded.h"
39 #include <stdio.h>
41 #include <cmath>
43 #include <algorithm>
45 #include "gromacs/domdec/domdec.h"
46 #include "gromacs/fileio/confio.h"
47 #include "gromacs/fileio/gmxfio.h"
48 #include "gromacs/fileio/xtcio.h"
49 #include "gromacs/gmxlib/chargegroup.h"
50 #include "gromacs/gmxlib/network.h"
51 #include "gromacs/gmxlib/nrnb.h"
52 #include "gromacs/listed-forces/disre.h"
53 #include "gromacs/listed-forces/orires.h"
54 #include "gromacs/math/functions.h"
55 #include "gromacs/math/units.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/mdlib/calcmu.h"
58 #include "gromacs/mdlib/constr.h"
59 #include "gromacs/mdlib/force.h"
60 #include "gromacs/mdlib/update.h"
61 #include "gromacs/mdtypes/inputrec.h"
62 #include "gromacs/mdtypes/md_enums.h"
63 #include "gromacs/mdtypes/state.h"
64 #include "gromacs/random/threefry.h"
65 #include "gromacs/random/uniformrealdistribution.h"
66 #include "gromacs/timing/wallcycle.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/gmxmpi.h"
69 #include "gromacs/utility/smalloc.h"
71 static void init_df_history_weights(df_history_t *dfhist, t_expanded *expand, int nlim)
73 int i;
74 dfhist->wl_delta = expand->init_wl_delta;
75 for (i = 0; i < nlim; i++)
77 dfhist->sum_weights[i] = expand->init_lambda_weights[i];
78 dfhist->sum_dg[i] = expand->init_lambda_weights[i];
82 /* Eventually should contain all the functions needed to initialize expanded ensemble
83 before the md loop starts */
84 void init_expanded_ensemble(gmx_bool bStateFromCP, t_inputrec *ir, df_history_t *dfhist)
86 if (!bStateFromCP)
88 init_df_history_weights(dfhist, ir->expandedvals, ir->fepvals->n_lambda);
92 static void GenerateGibbsProbabilities(real *ene, double *p_k, double *pks, int minfep, int maxfep)
95 int i;
96 real maxene;
98 *pks = 0.0;
99 maxene = ene[minfep];
100 /* find the maximum value */
101 for (i = minfep; i <= maxfep; i++)
103 if (ene[i] > maxene)
105 maxene = ene[i];
108 /* find the denominator */
109 for (i = minfep; i <= maxfep; i++)
111 *pks += std::exp(ene[i]-maxene);
113 /*numerators*/
114 for (i = minfep; i <= maxfep; i++)
116 p_k[i] = std::exp(ene[i]-maxene) / *pks;
120 static void GenerateWeightedGibbsProbabilities(real *ene, double *p_k, double *pks, int nlim, real *nvals, real delta)
123 int i;
124 real maxene;
125 real *nene;
126 *pks = 0.0;
128 snew(nene, nlim);
129 for (i = 0; i < nlim; i++)
131 if (nvals[i] == 0)
133 /* add the delta, since we need to make sure it's greater than zero, and
134 we need a non-arbitrary number? */
135 nene[i] = ene[i] + std::log(nvals[i]+delta);
137 else
139 nene[i] = ene[i] + std::log(nvals[i]);
143 /* find the maximum value */
144 maxene = nene[0];
145 for (i = 0; i < nlim; i++)
147 if (nene[i] > maxene)
149 maxene = nene[i];
153 /* subtract off the maximum, avoiding overflow */
154 for (i = 0; i < nlim; i++)
156 nene[i] -= maxene;
159 /* find the denominator */
160 for (i = 0; i < nlim; i++)
162 *pks += std::exp(nene[i]);
165 /*numerators*/
166 for (i = 0; i < nlim; i++)
168 p_k[i] = std::exp(nene[i]) / *pks;
170 sfree(nene);
173 static int FindMinimum(real *min_metric, int N)
176 real min_val;
177 int min_nval, nval;
179 min_nval = 0;
180 min_val = min_metric[0];
182 for (nval = 0; nval < N; nval++)
184 if (min_metric[nval] < min_val)
186 min_val = min_metric[nval];
187 min_nval = nval;
190 return min_nval;
193 static gmx_bool CheckHistogramRatios(int nhisto, real *histo, real ratio)
196 int i;
197 real nmean;
198 gmx_bool bIfFlat;
200 nmean = 0;
201 for (i = 0; i < nhisto; i++)
203 nmean += histo[i];
206 if (nmean == 0)
208 /* no samples! is bad!*/
209 bIfFlat = FALSE;
210 return bIfFlat;
212 nmean /= (real)nhisto;
214 bIfFlat = TRUE;
215 for (i = 0; i < nhisto; i++)
217 /* make sure that all points are in the ratio < x < 1/ratio range */
218 if (!((histo[i]/nmean < 1.0/ratio) && (histo[i]/nmean > ratio)))
220 bIfFlat = FALSE;
221 break;
224 return bIfFlat;
227 static gmx_bool CheckIfDoneEquilibrating(int nlim, t_expanded *expand, df_history_t *dfhist, gmx_int64_t step)
230 int i, totalsamples;
231 gmx_bool bDoneEquilibrating = TRUE;
232 gmx_bool bIfFlat;
234 /* If we are doing slow growth to get initial values, we haven't finished equilibrating */
235 if (expand->lmc_forced_nstart > 0)
237 for (i = 0; i < nlim; i++)
239 if (dfhist->n_at_lam[i] < expand->lmc_forced_nstart) /* we are still doing the initial sweep, so we're definitely not
240 done equilibrating*/
242 bDoneEquilibrating = FALSE;
243 break;
247 else
249 /* assume we have equilibrated the weights, then check to see if any of the conditions are not met */
250 bDoneEquilibrating = TRUE;
252 /* calculate the total number of samples */
253 switch (expand->elmceq)
255 case elmceqNO:
256 /* We have not equilibrated, and won't, ever. */
257 bDoneEquilibrating = FALSE;
258 break;
259 case elmceqYES:
260 /* we have equilibrated -- we're done */
261 bDoneEquilibrating = TRUE;
262 break;
263 case elmceqSTEPS:
264 /* first, check if we are equilibrating by steps, if we're still under */
265 if (step < expand->equil_steps)
267 bDoneEquilibrating = FALSE;
269 break;
270 case elmceqSAMPLES:
271 totalsamples = 0;
272 for (i = 0; i < nlim; i++)
274 totalsamples += dfhist->n_at_lam[i];
276 if (totalsamples < expand->equil_samples)
278 bDoneEquilibrating = FALSE;
280 break;
281 case elmceqNUMATLAM:
282 for (i = 0; i < nlim; i++)
284 if (dfhist->n_at_lam[i] < expand->equil_n_at_lam) /* we are still doing the initial sweep, so we're definitely not
285 done equilibrating*/
287 bDoneEquilibrating = FALSE;
288 break;
291 break;
292 case elmceqWLDELTA:
293 if (EWL(expand->elamstats)) /* This check is in readir as well, but
294 just to be sure */
296 if (dfhist->wl_delta > expand->equil_wl_delta)
298 bDoneEquilibrating = FALSE;
301 break;
302 case elmceqRATIO:
303 /* we can use the flatness as a judge of good weights, as long as
304 we're not doing minvar, or Wang-Landau.
305 But turn off for now until we figure out exactly how we do this.
308 if (!(EWL(expand->elamstats) || expand->elamstats == elamstatsMINVAR))
310 /* we want to use flatness -avoiding- the forced-through samples. Plus, we need to convert to
311 floats for this histogram function. */
313 real *modhisto;
314 snew(modhisto, nlim);
315 for (i = 0; i < nlim; i++)
317 modhisto[i] = 1.0*(dfhist->n_at_lam[i]-expand->lmc_forced_nstart);
319 bIfFlat = CheckHistogramRatios(nlim, modhisto, expand->equil_ratio);
320 sfree(modhisto);
321 if (!bIfFlat)
323 bDoneEquilibrating = FALSE;
326 break;
327 default:
328 bDoneEquilibrating = TRUE;
329 break;
332 return bDoneEquilibrating;
335 static gmx_bool UpdateWeights(int nlim, t_expanded *expand, df_history_t *dfhist,
336 int fep_state, real *scaled_lamee, real *weighted_lamee, gmx_int64_t step)
338 gmx_bool bSufficientSamples;
339 int i;
340 int n0, np1, nm1, nval, min_nvalm, min_nvalp, maxc;
341 real omega_m1_0, omega_p1_0, clam_osum;
342 real de, de_function;
343 real cnval, zero_sum_weights;
344 real *omegam_array, *weightsm_array, *omegap_array, *weightsp_array, *varm_array, *varp_array, *dwp_array, *dwm_array;
345 real clam_varm, clam_varp, clam_weightsm, clam_weightsp, clam_minvar;
346 real *lam_variance, *lam_dg;
347 double *p_k;
348 double pks = 0;
349 real chi_m1_0, chi_p1_0, chi_m2_0, chi_p2_0, chi_p1_m1, chi_p2_m1, chi_m1_p1, chi_m2_p1;
351 /* if we have equilibrated the weights, exit now */
352 if (dfhist->bEquil)
354 return FALSE;
357 if (CheckIfDoneEquilibrating(nlim, expand, dfhist, step))
359 dfhist->bEquil = TRUE;
360 /* zero out the visited states so we know how many equilibrated states we have
361 from here on out.*/
362 for (i = 0; i < nlim; i++)
364 dfhist->n_at_lam[i] = 0;
366 return TRUE;
369 /* If we reached this far, we have not equilibrated yet, keep on
370 going resetting the weights */
372 if (EWL(expand->elamstats))
374 if (expand->elamstats == elamstatsWL) /* Standard Wang-Landau */
376 dfhist->sum_weights[fep_state] -= dfhist->wl_delta;
377 dfhist->wl_histo[fep_state] += 1.0;
379 else if (expand->elamstats == elamstatsWWL) /* Weighted Wang-Landau */
381 snew(p_k, nlim);
383 /* first increment count */
384 GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, 0, nlim-1);
385 for (i = 0; i < nlim; i++)
387 dfhist->wl_histo[i] += (real)p_k[i];
390 /* then increment weights (uses count) */
391 pks = 0.0;
392 GenerateWeightedGibbsProbabilities(weighted_lamee, p_k, &pks, nlim, dfhist->wl_histo, dfhist->wl_delta);
394 for (i = 0; i < nlim; i++)
396 dfhist->sum_weights[i] -= dfhist->wl_delta*(real)p_k[i];
398 /* Alternate definition, using logarithms. Shouldn't make very much difference! */
400 real di;
401 for (i=0;i<nlim;i++)
403 di = (real)1.0 + dfhist->wl_delta*(real)p_k[i];
404 dfhist->sum_weights[i] -= log(di);
407 sfree(p_k);
410 zero_sum_weights = dfhist->sum_weights[0];
411 for (i = 0; i < nlim; i++)
413 dfhist->sum_weights[i] -= zero_sum_weights;
417 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMETROPOLIS || expand->elamstats == elamstatsMINVAR)
420 de_function = 0; /* to get rid of warnings, but this value will not be used because of the logic */
421 maxc = 2*expand->c_range+1;
423 snew(lam_dg, nlim);
424 snew(lam_variance, nlim);
426 snew(omegap_array, maxc);
427 snew(weightsp_array, maxc);
428 snew(varp_array, maxc);
429 snew(dwp_array, maxc);
431 snew(omegam_array, maxc);
432 snew(weightsm_array, maxc);
433 snew(varm_array, maxc);
434 snew(dwm_array, maxc);
436 /* unpack the current lambdas -- we will only update 2 of these */
438 for (i = 0; i < nlim-1; i++)
439 { /* only through the second to last */
440 lam_dg[i] = dfhist->sum_dg[i+1] - dfhist->sum_dg[i];
441 lam_variance[i] = gmx::square(dfhist->sum_variance[i+1]) - gmx::square(dfhist->sum_variance[i]);
444 /* accumulate running averages */
445 for (nval = 0; nval < maxc; nval++)
447 /* constants for later use */
448 cnval = (real)(nval-expand->c_range);
449 /* actually, should be able to rewrite it w/o exponential, for better numerical stability */
450 if (fep_state > 0)
452 de = std::exp(cnval - (scaled_lamee[fep_state]-scaled_lamee[fep_state-1]));
453 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
455 de_function = 1.0/(1.0+de);
457 else if (expand->elamstats == elamstatsMETROPOLIS)
459 if (de < 1.0)
461 de_function = 1.0;
463 else
465 de_function = 1.0/de;
468 dfhist->accum_m[fep_state][nval] += de_function;
469 dfhist->accum_m2[fep_state][nval] += de_function*de_function;
472 if (fep_state < nlim-1)
474 de = std::exp(-cnval + (scaled_lamee[fep_state+1]-scaled_lamee[fep_state]));
475 if (expand->elamstats == elamstatsBARKER || expand->elamstats == elamstatsMINVAR)
477 de_function = 1.0/(1.0+de);
479 else if (expand->elamstats == elamstatsMETROPOLIS)
481 if (de < 1.0)
483 de_function = 1.0;
485 else
487 de_function = 1.0/de;
490 dfhist->accum_p[fep_state][nval] += de_function;
491 dfhist->accum_p2[fep_state][nval] += de_function*de_function;
494 /* Metropolis transition and Barker transition (unoptimized Bennett) acceptance weight determination */
496 n0 = dfhist->n_at_lam[fep_state];
497 if (fep_state > 0)
499 nm1 = dfhist->n_at_lam[fep_state-1];
501 else
503 nm1 = 0;
505 if (fep_state < nlim-1)
507 np1 = dfhist->n_at_lam[fep_state+1];
509 else
511 np1 = 0;
514 /* logic SHOULD keep these all set correctly whatever the logic, but apparently it can't figure it out. */
515 chi_m1_0 = chi_p1_0 = chi_m2_0 = chi_p2_0 = chi_p1_m1 = chi_p2_m1 = chi_m1_p1 = chi_m2_p1 = 0;
517 if (n0 > 0)
519 chi_m1_0 = dfhist->accum_m[fep_state][nval]/n0;
520 chi_p1_0 = dfhist->accum_p[fep_state][nval]/n0;
521 chi_m2_0 = dfhist->accum_m2[fep_state][nval]/n0;
522 chi_p2_0 = dfhist->accum_p2[fep_state][nval]/n0;
525 if ((fep_state > 0 ) && (nm1 > 0))
527 chi_p1_m1 = dfhist->accum_p[fep_state-1][nval]/nm1;
528 chi_p2_m1 = dfhist->accum_p2[fep_state-1][nval]/nm1;
531 if ((fep_state < nlim-1) && (np1 > 0))
533 chi_m1_p1 = dfhist->accum_m[fep_state+1][nval]/np1;
534 chi_m2_p1 = dfhist->accum_m2[fep_state+1][nval]/np1;
537 omega_m1_0 = 0;
538 omega_p1_0 = 0;
539 clam_weightsm = 0;
540 clam_weightsp = 0;
541 clam_varm = 0;
542 clam_varp = 0;
544 if (fep_state > 0)
546 if (n0 > 0)
548 omega_m1_0 = chi_m2_0/(chi_m1_0*chi_m1_0) - 1.0;
549 if (nm1 > 0)
551 real omega_p1_m1 = chi_p2_m1/(chi_p1_m1*chi_p1_m1) - 1.0;
552 clam_weightsm = (std::log(chi_m1_0) - std::log(chi_p1_m1)) + cnval;
553 clam_varm = (1.0/n0)*(omega_m1_0) + (1.0/nm1)*(omega_p1_m1);
558 if (fep_state < nlim-1)
560 if (n0 > 0)
562 omega_p1_0 = chi_p2_0/(chi_p1_0*chi_p1_0) - 1.0;
563 if (np1 > 0)
565 real omega_m1_p1 = chi_m2_p1/(chi_m1_p1*chi_m1_p1) - 1.0;
566 clam_weightsp = (std::log(chi_m1_p1) - std::log(chi_p1_0)) + cnval;
567 clam_varp = (1.0/np1)*(omega_m1_p1) + (1.0/n0)*(omega_p1_0);
572 if (n0 > 0)
574 omegam_array[nval] = omega_m1_0;
576 else
578 omegam_array[nval] = 0;
580 weightsm_array[nval] = clam_weightsm;
581 varm_array[nval] = clam_varm;
582 if (nm1 > 0)
584 dwm_array[nval] = fabs( (cnval + std::log((1.0*n0)/nm1)) - lam_dg[fep_state-1] );
586 else
588 dwm_array[nval] = fabs( cnval - lam_dg[fep_state-1] );
591 if (n0 > 0)
593 omegap_array[nval] = omega_p1_0;
595 else
597 omegap_array[nval] = 0;
599 weightsp_array[nval] = clam_weightsp;
600 varp_array[nval] = clam_varp;
601 if ((np1 > 0) && (n0 > 0))
603 dwp_array[nval] = fabs( (cnval + std::log((1.0*np1)/n0)) - lam_dg[fep_state] );
605 else
607 dwp_array[nval] = fabs( cnval - lam_dg[fep_state] );
612 /* find the C's closest to the old weights value */
614 min_nvalm = FindMinimum(dwm_array, maxc);
615 omega_m1_0 = omegam_array[min_nvalm];
616 clam_weightsm = weightsm_array[min_nvalm];
617 clam_varm = varm_array[min_nvalm];
619 min_nvalp = FindMinimum(dwp_array, maxc);
620 omega_p1_0 = omegap_array[min_nvalp];
621 clam_weightsp = weightsp_array[min_nvalp];
622 clam_varp = varp_array[min_nvalp];
624 clam_osum = omega_m1_0 + omega_p1_0;
625 clam_minvar = 0;
626 if (clam_osum > 0)
628 clam_minvar = 0.5*std::log(clam_osum);
631 if (fep_state > 0)
633 lam_dg[fep_state-1] = clam_weightsm;
634 lam_variance[fep_state-1] = clam_varm;
637 if (fep_state < nlim-1)
639 lam_dg[fep_state] = clam_weightsp;
640 lam_variance[fep_state] = clam_varp;
643 if (expand->elamstats == elamstatsMINVAR)
645 bSufficientSamples = TRUE;
646 /* make sure they are all past a threshold */
647 for (i = 0; i < nlim; i++)
649 if (dfhist->n_at_lam[i] < expand->minvarmin)
651 bSufficientSamples = FALSE;
654 if (bSufficientSamples)
656 dfhist->sum_minvar[fep_state] = clam_minvar;
657 if (fep_state == 0)
659 for (i = 0; i < nlim; i++)
661 dfhist->sum_minvar[i] += (expand->minvar_const-clam_minvar);
663 expand->minvar_const = clam_minvar;
664 dfhist->sum_minvar[fep_state] = 0.0;
666 else
668 dfhist->sum_minvar[fep_state] -= expand->minvar_const;
673 /* we need to rezero minvar now, since it could change at fep_state = 0 */
674 dfhist->sum_dg[0] = 0.0;
675 dfhist->sum_variance[0] = 0.0;
676 dfhist->sum_weights[0] = dfhist->sum_dg[0] + dfhist->sum_minvar[0]; /* should be zero */
678 for (i = 1; i < nlim; i++)
680 dfhist->sum_dg[i] = lam_dg[i-1] + dfhist->sum_dg[i-1];
681 dfhist->sum_variance[i] = std::sqrt(lam_variance[i-1] + gmx::square(dfhist->sum_variance[i-1]));
682 dfhist->sum_weights[i] = dfhist->sum_dg[i] + dfhist->sum_minvar[i];
685 sfree(lam_dg);
686 sfree(lam_variance);
688 sfree(omegam_array);
689 sfree(weightsm_array);
690 sfree(varm_array);
691 sfree(dwm_array);
693 sfree(omegap_array);
694 sfree(weightsp_array);
695 sfree(varp_array);
696 sfree(dwp_array);
698 return FALSE;
701 static int ChooseNewLambda(int nlim, t_expanded *expand, df_history_t *dfhist, int fep_state, real *weighted_lamee, double *p_k,
702 gmx_int64_t seed, gmx_int64_t step)
704 /* Choose new lambda value, and update transition matrix */
706 int i, ifep, minfep, maxfep, lamnew, lamtrial, starting_fep_state;
707 real r1, r2, de, trialprob, tprob = 0;
708 double *propose, *accept, *remainder;
709 double pks;
710 real pnorm;
711 gmx::ThreeFry2x64<0> rng(seed, gmx::RandomDomain::ExpandedEnsemble); // We only draw once, so zero bits internal counter is fine
712 gmx::UniformRealDistribution<real> dist;
714 starting_fep_state = fep_state;
715 lamnew = fep_state; /* so that there is a default setting -- stays the same */
717 if (!EWL(expand->elamstats)) /* ignore equilibrating the weights if using WL */
719 if ((expand->lmc_forced_nstart > 0) && (dfhist->n_at_lam[nlim-1] <= expand->lmc_forced_nstart))
721 /* Use a marching method to run through the lambdas and get preliminary free energy data,
722 before starting 'free' sampling. We start free sampling when we have enough at each lambda */
724 /* if we have enough at this lambda, move on to the next one */
726 if (dfhist->n_at_lam[fep_state] == expand->lmc_forced_nstart)
728 lamnew = fep_state+1;
729 if (lamnew == nlim) /* whoops, stepped too far! */
731 lamnew -= 1;
734 else
736 lamnew = fep_state;
738 return lamnew;
742 snew(propose, nlim);
743 snew(accept, nlim);
744 snew(remainder, nlim);
746 for (i = 0; i < expand->lmc_repeats; i++)
748 rng.restart(step, i);
749 dist.reset();
751 for (ifep = 0; ifep < nlim; ifep++)
753 propose[ifep] = 0;
754 accept[ifep] = 0;
757 if ((expand->elmcmove == elmcmoveGIBBS) || (expand->elmcmove == elmcmoveMETGIBBS))
759 /* use the Gibbs sampler, with restricted range */
760 if (expand->gibbsdeltalam < 0)
762 minfep = 0;
763 maxfep = nlim-1;
765 else
767 minfep = fep_state - expand->gibbsdeltalam;
768 maxfep = fep_state + expand->gibbsdeltalam;
769 if (minfep < 0)
771 minfep = 0;
773 if (maxfep > nlim-1)
775 maxfep = nlim-1;
779 GenerateGibbsProbabilities(weighted_lamee, p_k, &pks, minfep, maxfep);
781 if (expand->elmcmove == elmcmoveGIBBS)
783 for (ifep = minfep; ifep <= maxfep; ifep++)
785 propose[ifep] = p_k[ifep];
786 accept[ifep] = 1.0;
788 /* Gibbs sampling */
789 r1 = dist(rng);
790 for (lamnew = minfep; lamnew <= maxfep; lamnew++)
792 if (r1 <= p_k[lamnew])
794 break;
796 r1 -= p_k[lamnew];
799 else if (expand->elmcmove == elmcmoveMETGIBBS)
802 /* Metropolized Gibbs sampling */
803 for (ifep = minfep; ifep <= maxfep; ifep++)
805 remainder[ifep] = 1 - p_k[ifep];
808 /* find the proposal probabilities */
810 if (remainder[fep_state] == 0)
812 /* only the current state has any probability */
813 /* we have to stay at the current state */
814 lamnew = fep_state;
816 else
818 for (ifep = minfep; ifep <= maxfep; ifep++)
820 if (ifep != fep_state)
822 propose[ifep] = p_k[ifep]/remainder[fep_state];
824 else
826 propose[ifep] = 0;
830 r1 = dist(rng);
831 for (lamtrial = minfep; lamtrial <= maxfep; lamtrial++)
833 pnorm = p_k[lamtrial]/remainder[fep_state];
834 if (lamtrial != fep_state)
836 if (r1 <= pnorm)
838 break;
840 r1 -= pnorm;
844 /* we have now selected lamtrial according to p(lamtrial)/1-p(fep_state) */
845 tprob = 1.0;
846 /* trial probability is min{1,\frac{1 - p(old)}{1-p(new)} MRS 1/8/2008 */
847 trialprob = (remainder[fep_state])/(remainder[lamtrial]);
848 if (trialprob < tprob)
850 tprob = trialprob;
852 r2 = dist(rng);
853 if (r2 < tprob)
855 lamnew = lamtrial;
857 else
859 lamnew = fep_state;
863 /* now figure out the acceptance probability for each */
864 for (ifep = minfep; ifep <= maxfep; ifep++)
866 tprob = 1.0;
867 if (remainder[ifep] != 0)
869 trialprob = (remainder[fep_state])/(remainder[ifep]);
871 else
873 trialprob = 1.0; /* this state is the only choice! */
875 if (trialprob < tprob)
877 tprob = trialprob;
879 /* probability for fep_state=0, but that's fine, it's never proposed! */
880 accept[ifep] = tprob;
884 if (lamnew > maxfep)
886 /* it's possible some rounding is failing */
887 if (gmx_within_tol(remainder[fep_state], 0, 50*GMX_DOUBLE_EPS))
889 /* numerical rounding error -- no state other than the original has weight */
890 lamnew = fep_state;
892 else
894 /* probably not a numerical issue */
895 int loc = 0;
896 int nerror = 200+(maxfep-minfep+1)*60;
897 char *errorstr;
898 snew(errorstr, nerror);
899 /* if its greater than maxfep, then something went wrong -- probably underflow in the calculation
900 of sum weights. Generated detailed info for failure */
901 loc += sprintf(errorstr, "Something wrong in choosing new lambda state with a Gibbs move -- probably underflow in weight determination.\nDenominator is: %3d%17.10e\n i dE numerator weights\n", 0, pks);
902 for (ifep = minfep; ifep <= maxfep; ifep++)
904 loc += sprintf(&errorstr[loc], "%3d %17.10e%17.10e%17.10e\n", ifep, weighted_lamee[ifep], p_k[ifep], dfhist->sum_weights[ifep]);
906 gmx_fatal(FARGS, errorstr);
910 else if ((expand->elmcmove == elmcmoveMETROPOLIS) || (expand->elmcmove == elmcmoveBARKER))
912 /* use the metropolis sampler with trial +/- 1 */
913 r1 = dist(rng);
914 if (r1 < 0.5)
916 if (fep_state == 0)
918 lamtrial = fep_state;
920 else
922 lamtrial = fep_state-1;
925 else
927 if (fep_state == nlim-1)
929 lamtrial = fep_state;
931 else
933 lamtrial = fep_state+1;
937 de = weighted_lamee[lamtrial] - weighted_lamee[fep_state];
938 if (expand->elmcmove == elmcmoveMETROPOLIS)
940 tprob = 1.0;
941 trialprob = std::exp(de);
942 if (trialprob < tprob)
944 tprob = trialprob;
946 propose[fep_state] = 0;
947 propose[lamtrial] = 1.0; /* note that this overwrites the above line if fep_state = ntrial, which only occurs at the ends */
948 accept[fep_state] = 1.0; /* doesn't actually matter, never proposed unless fep_state = ntrial, in which case it's 1.0 anyway */
949 accept[lamtrial] = tprob;
952 else if (expand->elmcmove == elmcmoveBARKER)
954 tprob = 1.0/(1.0+std::exp(-de));
956 propose[fep_state] = (1-tprob);
957 propose[lamtrial] += tprob; /* we add, to account for the fact that at the end, they might be the same point */
958 accept[fep_state] = 1.0;
959 accept[lamtrial] = 1.0;
962 r2 = dist(rng);
963 if (r2 < tprob)
965 lamnew = lamtrial;
967 else
969 lamnew = fep_state;
973 for (ifep = 0; ifep < nlim; ifep++)
975 dfhist->Tij[fep_state][ifep] += propose[ifep]*accept[ifep];
976 dfhist->Tij[fep_state][fep_state] += propose[ifep]*(1.0-accept[ifep]);
978 fep_state = lamnew;
981 dfhist->Tij_empirical[starting_fep_state][lamnew] += 1.0;
983 sfree(propose);
984 sfree(accept);
985 sfree(remainder);
987 return lamnew;
990 /* print out the weights to the log, along with current state */
991 void PrintFreeEnergyInfoToFile(FILE *outfile, t_lambda *fep, t_expanded *expand, t_simtemp *simtemp, df_history_t *dfhist,
992 int fep_state, int frequency, gmx_int64_t step)
994 int nlim, i, ifep, jfep;
995 real dw, dg, dv, Tprint;
996 const char *print_names[efptNR] = {" FEPL", "MassL", "CoulL", " VdwL", "BondL", "RestT", "Temp.(K)"};
997 gmx_bool bSimTemp = FALSE;
999 nlim = fep->n_lambda;
1000 if (simtemp != nullptr)
1002 bSimTemp = TRUE;
1005 if (step % frequency == 0)
1007 fprintf(outfile, " MC-lambda information\n");
1008 if (EWL(expand->elamstats) && (!(dfhist->bEquil)))
1010 fprintf(outfile, " Wang-Landau incrementor is: %11.5g\n", dfhist->wl_delta);
1012 fprintf(outfile, " N");
1013 for (i = 0; i < efptNR; i++)
1015 if (fep->separate_dvdl[i])
1017 fprintf(outfile, "%7s", print_names[i]);
1019 else if ((i == efptTEMPERATURE) && bSimTemp)
1021 fprintf(outfile, "%10s", print_names[i]); /* more space for temperature formats */
1024 fprintf(outfile, " Count ");
1025 if (expand->elamstats == elamstatsMINVAR)
1027 fprintf(outfile, "W(in kT) G(in kT) dG(in kT) dV(in kT)\n");
1029 else
1031 fprintf(outfile, "G(in kT) dG(in kT)\n");
1033 for (ifep = 0; ifep < nlim; ifep++)
1035 if (ifep == nlim-1)
1037 dw = 0.0;
1038 dg = 0.0;
1039 dv = 0.0;
1041 else
1043 dw = dfhist->sum_weights[ifep+1] - dfhist->sum_weights[ifep];
1044 dg = dfhist->sum_dg[ifep+1] - dfhist->sum_dg[ifep];
1045 dv = std::sqrt(gmx::square(dfhist->sum_variance[ifep+1]) - gmx::square(dfhist->sum_variance[ifep]));
1047 fprintf(outfile, "%3d", (ifep+1));
1048 for (i = 0; i < efptNR; i++)
1050 if (fep->separate_dvdl[i])
1052 fprintf(outfile, "%7.3f", fep->all_lambda[i][ifep]);
1054 else if (i == efptTEMPERATURE && bSimTemp)
1056 fprintf(outfile, "%9.3f", simtemp->temperatures[ifep]);
1059 if (EWL(expand->elamstats) && (!(dfhist->bEquil))) /* if performing WL and still haven't equilibrated */
1061 if (expand->elamstats == elamstatsWL)
1063 fprintf(outfile, " %8d", (int)dfhist->wl_histo[ifep]);
1065 else
1067 fprintf(outfile, " %8.3f", dfhist->wl_histo[ifep]);
1070 else /* we have equilibrated weights */
1072 fprintf(outfile, " %8d", dfhist->n_at_lam[ifep]);
1074 if (expand->elamstats == elamstatsMINVAR)
1076 fprintf(outfile, " %10.5f %10.5f %10.5f %10.5f", dfhist->sum_weights[ifep], dfhist->sum_dg[ifep], dg, dv);
1078 else
1080 fprintf(outfile, " %10.5f %10.5f", dfhist->sum_weights[ifep], dw);
1082 if (ifep == fep_state)
1084 fprintf(outfile, " <<\n");
1086 else
1088 fprintf(outfile, " \n");
1091 fprintf(outfile, "\n");
1093 if ((step % expand->nstTij == 0) && (expand->nstTij > 0) && (step > 0))
1095 fprintf(outfile, " Transition Matrix\n");
1096 for (ifep = 0; ifep < nlim; ifep++)
1098 fprintf(outfile, "%12d", (ifep+1));
1100 fprintf(outfile, "\n");
1101 for (ifep = 0; ifep < nlim; ifep++)
1103 for (jfep = 0; jfep < nlim; jfep++)
1105 if (dfhist->n_at_lam[ifep] > 0)
1107 if (expand->bSymmetrizedTMatrix)
1109 Tprint = (dfhist->Tij[ifep][jfep]+dfhist->Tij[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1111 else
1113 Tprint = (dfhist->Tij[ifep][jfep])/(dfhist->n_at_lam[ifep]);
1116 else
1118 Tprint = 0.0;
1120 fprintf(outfile, "%12.8f", Tprint);
1122 fprintf(outfile, "%3d\n", (ifep+1));
1125 fprintf(outfile, " Empirical Transition Matrix\n");
1126 for (ifep = 0; ifep < nlim; ifep++)
1128 fprintf(outfile, "%12d", (ifep+1));
1130 fprintf(outfile, "\n");
1131 for (ifep = 0; ifep < nlim; ifep++)
1133 for (jfep = 0; jfep < nlim; jfep++)
1135 if (dfhist->n_at_lam[ifep] > 0)
1137 if (expand->bSymmetrizedTMatrix)
1139 Tprint = (dfhist->Tij_empirical[ifep][jfep]+dfhist->Tij_empirical[jfep][ifep])/(dfhist->n_at_lam[ifep]+dfhist->n_at_lam[jfep]);
1141 else
1143 Tprint = dfhist->Tij_empirical[ifep][jfep]/(dfhist->n_at_lam[ifep]);
1146 else
1148 Tprint = 0.0;
1150 fprintf(outfile, "%12.8f", Tprint);
1152 fprintf(outfile, "%3d\n", (ifep+1));
1158 int ExpandedEnsembleDynamics(FILE *log, t_inputrec *ir, gmx_enerdata_t *enerd,
1159 t_state *state, t_extmass *MassQ, int fep_state, df_history_t *dfhist,
1160 gmx_int64_t step,
1161 rvec *v, t_mdatoms *mdatoms)
1162 /* Note that the state variable is only needed for simulated tempering, not
1163 Hamiltonian expanded ensemble. May be able to remove it after integrator refactoring. */
1165 real *pfep_lamee, *scaled_lamee, *weighted_lamee;
1166 double *p_k;
1167 int i, nlim, lamnew, totalsamples;
1168 real oneovert, maxscaled = 0, maxweighted = 0;
1169 t_expanded *expand;
1170 t_simtemp *simtemp;
1171 gmx_bool bIfReset, bSwitchtoOneOverT, bDoneEquilibrating = FALSE;
1173 expand = ir->expandedvals;
1174 simtemp = ir->simtempvals;
1175 nlim = ir->fepvals->n_lambda;
1177 snew(scaled_lamee, nlim);
1178 snew(weighted_lamee, nlim);
1179 snew(pfep_lamee, nlim);
1180 snew(p_k, nlim);
1182 /* update the count at the current lambda*/
1183 dfhist->n_at_lam[fep_state]++;
1185 /* need to calculate the PV term somewhere, but not needed here? Not until there's a lambda state that's
1186 pressure controlled.*/
1188 pVTerm = 0;
1189 where does this PV term go?
1190 for (i=0;i<nlim;i++)
1192 fep_lamee[i] += pVTerm;
1196 /* determine the minimum value to avoid overflow. Probably a better way to do this */
1197 /* we don't need to include the pressure term, since the volume is the same between the two.
1198 is there some term we are neglecting, however? */
1200 if (ir->efep != efepNO)
1202 for (i = 0; i < nlim; i++)
1204 if (ir->bSimTemp)
1206 /* Note -- this assumes no mass changes, since kinetic energy is not added . . . */
1207 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(simtemp->temperatures[i]*BOLTZ)
1208 + enerd->term[F_EPOT]*(1.0/(simtemp->temperatures[i])- 1.0/(simtemp->temperatures[fep_state]))/BOLTZ;
1210 else
1212 scaled_lamee[i] = (enerd->enerpart_lambda[i+1]-enerd->enerpart_lambda[0])/(expand->mc_temp*BOLTZ);
1213 /* mc_temp is currently set to the system reft unless otherwise defined */
1216 /* save these energies for printing, so they don't get overwritten by the next step */
1217 /* they aren't overwritten in the non-free energy case, but we always print with these
1218 for simplicity */
1221 else
1223 if (ir->bSimTemp)
1225 for (i = 0; i < nlim; i++)
1227 scaled_lamee[i] = enerd->term[F_EPOT]*(1.0/simtemp->temperatures[i] - 1.0/simtemp->temperatures[fep_state])/BOLTZ;
1232 for (i = 0; i < nlim; i++)
1234 pfep_lamee[i] = scaled_lamee[i];
1236 weighted_lamee[i] = dfhist->sum_weights[i] - scaled_lamee[i];
1237 if (i == 0)
1239 maxscaled = scaled_lamee[i];
1240 maxweighted = weighted_lamee[i];
1242 else
1244 if (scaled_lamee[i] > maxscaled)
1246 maxscaled = scaled_lamee[i];
1248 if (weighted_lamee[i] > maxweighted)
1250 maxweighted = weighted_lamee[i];
1255 for (i = 0; i < nlim; i++)
1257 scaled_lamee[i] -= maxscaled;
1258 weighted_lamee[i] -= maxweighted;
1261 /* update weights - we decide whether or not to actually do this inside */
1263 bDoneEquilibrating = UpdateWeights(nlim, expand, dfhist, fep_state, scaled_lamee, weighted_lamee, step);
1264 if (bDoneEquilibrating)
1266 if (log)
1268 fprintf(log, "\nStep %d: Weights have equilibrated, using criteria: %s\n", (int)step, elmceq_names[expand->elmceq]);
1272 lamnew = ChooseNewLambda(nlim, expand, dfhist, fep_state, weighted_lamee, p_k,
1273 ir->expandedvals->lmc_seed, step);
1274 /* if using simulated tempering, we need to adjust the temperatures */
1275 if (ir->bSimTemp && (lamnew != fep_state)) /* only need to change the temperatures if we change the state */
1277 int i, j, n, d;
1278 real *buf_ngtc;
1279 real told;
1280 int nstart, nend, gt;
1282 snew(buf_ngtc, ir->opts.ngtc);
1284 for (i = 0; i < ir->opts.ngtc; i++)
1286 if (ir->opts.ref_t[i] > 0)
1288 told = ir->opts.ref_t[i];
1289 ir->opts.ref_t[i] = simtemp->temperatures[lamnew];
1290 buf_ngtc[i] = std::sqrt(ir->opts.ref_t[i]/told); /* using the buffer as temperature scaling */
1294 /* we don't need to manipulate the ekind information, as it isn't due to be reset until the next step anyway */
1296 nstart = 0;
1297 nend = mdatoms->homenr;
1298 for (n = nstart; n < nend; n++)
1300 gt = 0;
1301 if (mdatoms->cTC)
1303 gt = mdatoms->cTC[n];
1305 for (d = 0; d < DIM; d++)
1307 v[n][d] *= buf_ngtc[gt];
1311 if (inputrecNptTrotter(ir) || inputrecNphTrotter(ir) || inputrecNvtTrotter(ir))
1313 /* we need to recalculate the masses if the temperature has changed */
1314 init_npt_masses(ir, state, MassQ, FALSE);
1315 for (i = 0; i < state->nnhpres; i++)
1317 for (j = 0; j < ir->opts.nhchainlength; j++)
1319 state->nhpres_vxi[i+j] *= buf_ngtc[i];
1322 for (i = 0; i < ir->opts.ngtc; i++)
1324 for (j = 0; j < ir->opts.nhchainlength; j++)
1326 state->nosehoover_vxi[i+j] *= buf_ngtc[i];
1330 sfree(buf_ngtc);
1333 /* now check on the Wang-Landau updating critera */
1335 if (EWL(expand->elamstats))
1337 bSwitchtoOneOverT = FALSE;
1338 if (expand->bWLoneovert)
1340 totalsamples = 0;
1341 for (i = 0; i < nlim; i++)
1343 totalsamples += dfhist->n_at_lam[i];
1345 oneovert = (1.0*nlim)/totalsamples;
1346 /* oneovert has decreasd by a bit since last time, so we actually make sure its within one of this number */
1347 /* switch to 1/t incrementing when wl_delta has decreased at least once, and wl_delta is now less than 1/t */
1348 if ((dfhist->wl_delta <= ((totalsamples)/(totalsamples-1.00001))*oneovert) &&
1349 (dfhist->wl_delta < expand->init_wl_delta))
1351 bSwitchtoOneOverT = TRUE;
1354 if (bSwitchtoOneOverT)
1356 dfhist->wl_delta = oneovert; /* now we reduce by this each time, instead of only at flatness */
1358 else
1360 bIfReset = CheckHistogramRatios(nlim, dfhist->wl_histo, expand->wl_ratio);
1361 if (bIfReset)
1363 for (i = 0; i < nlim; i++)
1365 dfhist->wl_histo[i] = 0;
1367 dfhist->wl_delta *= expand->wl_scale;
1368 if (log)
1370 fprintf(log, "\nStep %d: weights are now:", (int)step);
1371 for (i = 0; i < nlim; i++)
1373 fprintf(log, " %.5f", dfhist->sum_weights[i]);
1375 fprintf(log, "\n");
1380 sfree(pfep_lamee);
1381 sfree(scaled_lamee);
1382 sfree(weighted_lamee);
1383 sfree(p_k);
1385 return lamnew;