Fix value of Ewald shift
[gromacs.git] / src / gromacs / ewald / pme-load-balancing.cpp
blobb7d45657886015fca3a3a80d0402c002e9f7970f
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
37 * \brief This file contains function definitions necessary for
38 * managing automatic load balance of PME calculations (Coulomb and
39 * LJ).
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_ewald
44 #include "gmxpre.h"
46 #include "pme-load-balancing.h"
48 #include "config.h"
50 #include <assert.h>
52 #include <cmath>
54 #include <algorithm>
56 #include "gromacs/domdec/domdec.h"
57 #include "gromacs/domdec/domdec_network.h"
58 #include "gromacs/legacyheaders/calcgrid.h"
59 #include "gromacs/legacyheaders/force.h"
60 #include "gromacs/legacyheaders/md_logging.h"
61 #include "gromacs/legacyheaders/network.h"
62 #include "gromacs/legacyheaders/sim_util.h"
63 #include "gromacs/legacyheaders/types/commrec.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/timing/wallcycle.h"
68 #include "gromacs/utility/cstringutil.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/smalloc.h"
72 #include "pme-internal.h"
74 /*! \brief Parameters and settings for one PP-PME setup */
75 struct pme_setup_t {
76 real rcut_coulomb; /**< Coulomb cut-off */
77 real rlist; /**< pair-list cut-off */
78 real rlistlong; /**< LR pair-list cut-off */
79 int nstcalclr; /**< frequency of evaluating long-range forces for group scheme */
80 real spacing; /**< (largest) PME grid spacing */
81 ivec grid; /**< the PME grid dimensions */
82 real grid_efficiency; /**< ineffiency factor for non-uniform grids <= 1 */
83 real ewaldcoeff_q; /**< Electrostatic Ewald coefficient */
84 real ewaldcoeff_lj; /**< LJ Ewald coefficient, only for the call to send_switchgrid */
85 struct gmx_pme_t *pmedata; /**< the data structure used in the PME code */
86 int count; /**< number of times this setup has been timed */
87 double cycles; /**< the fastest time for this setup in cycles */
90 /*! \brief After 50 nstlist periods of not observing imbalance: never tune PME */
91 const int PMETunePeriod = 50;
92 /*! \brief Trigger PME load balancing at more than 5% PME overload */
93 const real loadBalanceTriggerFactor = 1.05;
94 /*! \brief In the initial scan, step by grids that are at least a factor 0.8 coarser */
95 const real gridScaleFactor = 0.8;
96 /*! \brief In the initial scan, try to skip grids with uneven x/y/z spacing,
97 * checking if the "efficiency" is more than 5% worse than the previous grid.
99 const real relativeEfficiencyFactor = 1.05;
100 /*! \brief Rerun until a run is 12% slower setups than the fastest run so far */
101 const real maxRelativeSlowdownAccepted = 1.12;
102 /*! \brief If setups get more than 2% faster, do another round to avoid
103 * choosing a slower setup due to acceleration or fluctuations.
105 const real maxFluctuationAccepted = 1.02;
107 /*! \brief Enumeration whose values describe the effect limiting the load balancing */
108 enum epmelb {
109 epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimNR
112 /*! \brief Descriptive strings matching ::epmelb */
113 const char *pmelblim_str[epmelblimNR] =
114 { "no", "box size", "domain decompostion", "PME grid restriction" };
116 struct pme_load_balancing_t {
117 gmx_bool bSepPMERanks; /**< do we have separate PME ranks? */
118 gmx_bool bActive; /**< is PME tuning active? */
119 gmx_int64_t step_rel_stop; /**< stop the tuning after this value of step_rel */
120 gmx_bool bTriggerOnDLB; /**< trigger balancing only on DD DLB */
121 gmx_bool bBalance; /**< are we in the balancing phase, i.e. trying different setups? */
122 int nstage; /**< the current maximum number of stages */
124 real cut_spacing; /**< the minimum cutoff / PME grid spacing ratio */
125 real rcut_vdw; /**< Vdw cutoff (does not change) */
126 real rcut_coulomb_start; /**< Initial electrostatics cutoff */
127 int nstcalclr_start; /**< Initial electrostatics cutoff */
128 real rbuf_coulomb; /**< the pairlist buffer size */
129 real rbuf_vdw; /**< the pairlist buffer size */
130 matrix box_start; /**< the initial simulation box */
131 int n; /**< the count of setup as well as the allocation size */
132 pme_setup_t *setup; /**< the PME+cutoff setups */
133 int cur; /**< the inex (in setup) of the current setup */
134 int fastest; /**< index of the fastest setup up till now */
135 int lower_limit; /**< don't go below this setup index */
136 int start; /**< start of setup index range to consider in stage>0 */
137 int end; /**< end of setup index range to consider in stage>0 */
138 int elimited; /**< was the balancing limited, uses enum above */
139 int cutoff_scheme; /**< Verlet or group cut-offs */
141 int stage; /**< the current stage */
143 int cycles_n; /**< step cycle counter cummulative count */
144 double cycles_c; /**< step cycle counter cummulative cycles */
147 /* TODO The code in this file should call this getter, rather than
148 * read bActive anywhere */
149 bool pme_loadbal_is_active(const pme_load_balancing_t *pme_lb)
151 return pme_lb != NULL && pme_lb->bActive;
154 void pme_loadbal_init(pme_load_balancing_t **pme_lb_p,
155 t_commrec *cr,
156 FILE *fp_log,
157 const t_inputrec *ir,
158 matrix box,
159 const interaction_const_t *ic,
160 struct gmx_pme_t *pmedata,
161 gmx_bool bUseGPU,
162 gmx_bool *bPrinting)
164 GMX_RELEASE_ASSERT(ir->cutoff_scheme != ecutsGROUP, "PME tuning is not supported with cutoff-scheme=group (because it contains bugs)");
166 pme_load_balancing_t *pme_lb;
167 real spm, sp;
168 int d;
170 snew(pme_lb, 1);
172 pme_lb->bSepPMERanks = !(cr->duty & DUTY_PME);
174 /* Initially we turn on balancing directly on based on PP/PME imbalance */
175 pme_lb->bTriggerOnDLB = FALSE;
177 /* Any number of stages >= 2 is supported */
178 pme_lb->nstage = 2;
180 pme_lb->cutoff_scheme = ir->cutoff_scheme;
182 if (pme_lb->cutoff_scheme == ecutsVERLET)
184 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
185 pme_lb->rbuf_vdw = pme_lb->rbuf_coulomb;
187 else
189 if (ic->rcoulomb > ic->rlist)
191 pme_lb->rbuf_coulomb = ic->rlistlong - ic->rcoulomb;
193 else
195 pme_lb->rbuf_coulomb = ic->rlist - ic->rcoulomb;
197 if (ic->rvdw > ic->rlist)
199 pme_lb->rbuf_vdw = ic->rlistlong - ic->rvdw;
201 else
203 pme_lb->rbuf_vdw = ic->rlist - ic->rvdw;
207 copy_mat(box, pme_lb->box_start);
208 if (ir->ePBC == epbcXY && ir->nwall == 2)
210 svmul(ir->wall_ewald_zfac, pme_lb->box_start[ZZ], pme_lb->box_start[ZZ]);
213 pme_lb->n = 1;
214 snew(pme_lb->setup, pme_lb->n);
216 pme_lb->rcut_vdw = ic->rvdw;
217 pme_lb->rcut_coulomb_start = ir->rcoulomb;
218 pme_lb->nstcalclr_start = ir->nstcalclr;
220 pme_lb->cur = 0;
221 pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
222 pme_lb->setup[0].rlist = ic->rlist;
223 pme_lb->setup[0].rlistlong = ic->rlistlong;
224 pme_lb->setup[0].nstcalclr = ir->nstcalclr;
225 pme_lb->setup[0].grid[XX] = ir->nkx;
226 pme_lb->setup[0].grid[YY] = ir->nky;
227 pme_lb->setup[0].grid[ZZ] = ir->nkz;
228 pme_lb->setup[0].ewaldcoeff_q = ic->ewaldcoeff_q;
229 pme_lb->setup[0].ewaldcoeff_lj = ic->ewaldcoeff_lj;
231 pme_lb->setup[0].pmedata = pmedata;
233 spm = 0;
234 for (d = 0; d < DIM; d++)
236 sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
237 if (sp > spm)
239 spm = sp;
242 pme_lb->setup[0].spacing = spm;
244 if (ir->fourier_spacing > 0)
246 pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
248 else
250 pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
253 pme_lb->stage = 0;
255 pme_lb->fastest = 0;
256 pme_lb->lower_limit = 0;
257 pme_lb->start = 0;
258 pme_lb->end = 0;
259 pme_lb->elimited = epmelblimNO;
261 pme_lb->cycles_n = 0;
262 pme_lb->cycles_c = 0;
264 /* Tune with GPUs and/or separate PME ranks.
265 * When running only on a CPU without PME ranks, PME tuning will only help
266 * with small numbers of atoms in the cut-off sphere.
268 pme_lb->bActive = (wallcycle_have_counter() && (bUseGPU ||
269 pme_lb->bSepPMERanks));
271 /* With GPUs and no separate PME ranks we can't measure the PP/PME
272 * imbalance, so we start balancing right away.
273 * Otherwise we only start balancing after we observe imbalance.
275 pme_lb->bBalance = (pme_lb->bActive && (bUseGPU && !pme_lb->bSepPMERanks));
277 pme_lb->step_rel_stop = PMETunePeriod*ir->nstlist;
279 /* Delay DD load balancing when GPUs are used */
280 if (pme_lb->bActive && DOMAINDECOMP(cr) && cr->dd->nnodes > 1 && bUseGPU)
282 /* Lock DLB=auto to off (does nothing when DLB=yes/no.
283 * With GPUs and separate PME nodes, we want to first
284 * do PME tuning without DLB, since DLB might limit
285 * the cut-off, which never improves performance.
286 * We allow for DLB + PME tuning after a first round of tuning.
288 dd_dlb_lock(cr->dd);
289 if (dd_dlb_is_locked(cr->dd))
291 md_print_warn(cr, fp_log, "NOTE: DLB will not turn on during the first phase of PME tuning\n");
295 *pme_lb_p = pme_lb;
297 *bPrinting = pme_lb->bBalance;
300 /*! \brief Try to increase the cutoff during load balancing */
301 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t *pme_lb,
302 int pme_order,
303 const gmx_domdec_t *dd)
305 pme_setup_t *set;
306 int npmeranks_x, npmeranks_y;
307 real fac, sp;
308 real tmpr_coulomb, tmpr_vdw;
309 int d;
310 gmx_bool grid_ok;
312 /* Try to add a new setup with next larger cut-off to the list */
313 pme_lb->n++;
314 srenew(pme_lb->setup, pme_lb->n);
315 set = &pme_lb->setup[pme_lb->n-1];
316 set->pmedata = NULL;
318 get_pme_nnodes(dd, &npmeranks_x, &npmeranks_y);
320 fac = 1;
323 /* Avoid infinite while loop, which can occur at the minimum grid size.
324 * Note that in practice load balancing will stop before this point.
325 * The factor 2.1 allows for the extreme case in which only grids
326 * of powers of 2 are allowed (the current code supports more grids).
328 if (fac > 2.1)
330 pme_lb->n--;
332 return FALSE;
335 fac *= 1.01;
336 clear_ivec(set->grid);
337 sp = calc_grid(NULL, pme_lb->box_start,
338 fac*pme_lb->setup[pme_lb->cur].spacing,
339 &set->grid[XX],
340 &set->grid[YY],
341 &set->grid[ZZ]);
343 /* As here we can't easily check if one of the PME ranks
344 * uses threading, we do a conservative grid check.
345 * This means we can't use pme_order or less grid lines
346 * per PME rank along x, which is not a strong restriction.
348 gmx_pme_check_restrictions(pme_order,
349 set->grid[XX], set->grid[YY], set->grid[ZZ],
350 npmeranks_x, npmeranks_y,
351 TRUE,
352 FALSE,
353 &grid_ok);
355 while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
357 set->rcut_coulomb = pme_lb->cut_spacing*sp;
358 if (set->rcut_coulomb < pme_lb->rcut_coulomb_start)
360 /* This is unlikely, but can happen when e.g. continuing from
361 * a checkpoint after equilibration where the box shrank a lot.
362 * We want to avoid rcoulomb getting smaller than rvdw
363 * and there might be more issues with decreasing rcoulomb.
365 set->rcut_coulomb = pme_lb->rcut_coulomb_start;
368 if (pme_lb->cutoff_scheme == ecutsVERLET)
370 set->rlist = set->rcut_coulomb + pme_lb->rbuf_coulomb;
371 /* We dont use LR lists with Verlet, but this avoids if-statements in further checks */
372 set->rlistlong = set->rlist;
374 else
376 /* TODO Remove these lines and pme_lb->cutoff_scheme */
377 tmpr_coulomb = set->rcut_coulomb + pme_lb->rbuf_coulomb;
378 tmpr_vdw = pme_lb->rcut_vdw + pme_lb->rbuf_vdw;
379 /* Two (known) bugs with cutoff-scheme=group here:
380 * - This modification of rlist results in incorrect DD comunication.
381 * - We should set fr->bTwinRange = (fr->rlistlong > fr->rlist).
383 set->rlist = std::min(tmpr_coulomb, tmpr_vdw);
384 set->rlistlong = std::max(tmpr_coulomb, tmpr_vdw);
386 /* Set the long-range update frequency */
387 if (set->rlist == set->rlistlong)
389 /* No long-range interactions if the short-/long-range cutoffs are identical */
390 set->nstcalclr = 0;
392 else if (pme_lb->nstcalclr_start == 0 || pme_lb->nstcalclr_start == 1)
394 /* We were not doing long-range before, but now we are since rlist!=rlistlong */
395 set->nstcalclr = 1;
397 else
399 /* We were already doing long-range interactions from the start */
400 if (pme_lb->rcut_vdw > pme_lb->rcut_coulomb_start)
402 /* We were originally doing long-range VdW-only interactions.
403 * If rvdw is still longer than rcoulomb we keep the original nstcalclr,
404 * but if the coulomb cutoff has become longer we should update the long-range
405 * part every step.
407 set->nstcalclr = (tmpr_vdw > tmpr_coulomb) ? pme_lb->nstcalclr_start : 1;
409 else
411 /* We were not doing any long-range interaction from the start,
412 * since it is not possible to do twin-range coulomb for the PME interaction.
414 set->nstcalclr = 1;
419 set->spacing = sp;
420 /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
421 set->grid_efficiency = 1;
422 for (d = 0; d < DIM; d++)
424 set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
426 /* The Ewald coefficient is inversly proportional to the cut-off */
427 set->ewaldcoeff_q =
428 pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
429 /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
430 set->ewaldcoeff_lj =
431 pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
433 set->count = 0;
434 set->cycles = 0;
436 if (debug)
438 fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
439 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
441 return TRUE;
444 /*! \brief Print the PME grid */
445 static void print_grid(FILE *fp_err, FILE *fp_log,
446 const char *pre,
447 const char *desc,
448 const pme_setup_t *set,
449 double cycles)
451 char buf[STRLEN], buft[STRLEN];
453 if (cycles >= 0)
455 sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
457 else
459 buft[0] = '\0';
461 sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
462 pre,
463 desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
464 buft);
465 if (fp_err != NULL)
467 fprintf(fp_err, "\r%s\n", buf);
469 if (fp_log != NULL)
471 fprintf(fp_log, "%s\n", buf);
475 /*! \brief Return the index of the last setup used in PME load balancing */
476 static int pme_loadbal_end(pme_load_balancing_t *pme_lb)
478 /* In the initial stage only n is set; end is not set yet */
479 if (pme_lb->end > 0)
481 return pme_lb->end;
483 else
485 return pme_lb->n;
489 /*! \brief Print descriptive string about what limits PME load balancing */
490 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
491 gmx_int64_t step,
492 pme_load_balancing_t *pme_lb)
494 char buf[STRLEN], sbuf[22];
496 sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
497 gmx_step_str(step, sbuf),
498 pmelblim_str[pme_lb->elimited],
499 pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
500 if (fp_err != NULL)
502 fprintf(fp_err, "\r%s\n", buf);
504 if (fp_log != NULL)
506 fprintf(fp_log, "%s\n", buf);
510 /*! \brief Switch load balancing to stage 1
512 * In this stage, only reasonably fast setups are run again. */
513 static void switch_to_stage1(pme_load_balancing_t *pme_lb)
515 /* Increase start until we find a setup that is not slower than
516 * maxRelativeSlowdownAccepted times the fastest setup.
518 pme_lb->start = pme_lb->lower_limit;
519 while (pme_lb->start + 1 < pme_lb->n &&
520 (pme_lb->setup[pme_lb->start].count == 0 ||
521 pme_lb->setup[pme_lb->start].cycles >
522 pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted))
524 pme_lb->start++;
526 /* While increasing start, we might have skipped setups that we did not
527 * time during stage 0. We want to extend the range for stage 1 to include
528 * any skipped setups that lie between setups that were measured to be
529 * acceptably fast and too slow.
531 while (pme_lb->start > pme_lb->lower_limit &&
532 pme_lb->setup[pme_lb->start - 1].count == 0)
534 pme_lb->start--;
537 /* Decrease end only with setups that we timed and that are slow. */
538 pme_lb->end = pme_lb->n;
539 if (pme_lb->setup[pme_lb->end - 1].count > 0 &&
540 pme_lb->setup[pme_lb->end - 1].cycles >
541 pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted)
543 pme_lb->end--;
546 pme_lb->stage = 1;
548 /* Next we want to choose setup pme_lb->end-1, but as we will decrease
549 * pme_lb->cur by one right after returning, we set cur to end.
551 pme_lb->cur = pme_lb->end;
554 /*! \brief Process the timings and try to adjust the PME grid and Coulomb cut-off
556 * The adjustment is done to generate a different non-bonded PP and PME load.
557 * With separate PME ranks (PP and PME on different processes) or with
558 * a GPU (PP on GPU, PME on CPU), PP and PME run on different resources
559 * and changing the load will affect the load balance and performance.
560 * The total time for a set of integration steps is monitored and a range
561 * of grid/cut-off setups is scanned. After calling pme_load_balance many
562 * times and acquiring enough statistics, the best performing setup is chosen.
563 * Here we try to take into account fluctuations and changes due to external
564 * factors as well as DD load balancing.
566 static void
567 pme_load_balance(pme_load_balancing_t *pme_lb,
568 t_commrec *cr,
569 FILE *fp_err,
570 FILE *fp_log,
571 t_inputrec *ir,
572 t_state *state,
573 double cycles,
574 interaction_const_t *ic,
575 struct nonbonded_verlet_t *nbv,
576 struct gmx_pme_t ** pmedata,
577 gmx_int64_t step)
579 gmx_bool OK;
580 pme_setup_t *set;
581 double cycles_fast;
582 char buf[STRLEN], sbuf[22];
583 real rtab;
585 if (PAR(cr))
587 gmx_sumd(1, &cycles, cr);
588 cycles /= cr->nnodes;
591 set = &pme_lb->setup[pme_lb->cur];
592 set->count++;
594 rtab = ir->rlistlong + ir->tabext;
596 if (set->count % 2 == 1)
598 /* Skip the first cycle, because the first step after a switch
599 * is much slower due to allocation and/or caching effects.
601 return;
604 sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
605 print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
607 if (set->count <= 2)
609 set->cycles = cycles;
611 else
613 if (cycles*maxFluctuationAccepted < set->cycles &&
614 pme_lb->stage == pme_lb->nstage - 1)
616 /* The performance went up a lot (due to e.g. DD load balancing).
617 * Add a stage, keep the minima, but rescan all setups.
619 pme_lb->nstage++;
621 if (debug)
623 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
624 "Increased the number stages to %d"
625 " and ignoring the previous performance\n",
626 set->grid[XX], set->grid[YY], set->grid[ZZ],
627 set->cycles*1e-6, cycles*1e-6, maxFluctuationAccepted,
628 pme_lb->nstage);
631 set->cycles = std::min(set->cycles, cycles);
634 if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
636 pme_lb->fastest = pme_lb->cur;
638 if (DOMAINDECOMP(cr))
640 /* We found a new fastest setting, ensure that with subsequent
641 * shorter cut-off's the dynamic load balancing does not make
642 * the use of the current cut-off impossible. This solution is
643 * a trade-off, as the PME load balancing and DD domain size
644 * load balancing can interact in complex ways.
645 * With the Verlet kernels, DD load imbalance will usually be
646 * mainly due to bonded interaction imbalance, which will often
647 * quickly push the domain boundaries beyond the limit for the
648 * optimal, PME load balanced, cut-off. But it could be that
649 * better overal performance can be obtained with a slightly
650 * shorter cut-off and better DD load balancing.
652 set_dd_dlb_max_cutoff(cr, pme_lb->setup[pme_lb->fastest].rlistlong);
655 cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
657 /* Check in stage 0 if we should stop scanning grids.
658 * Stop when the time is more than maxRelativeSlowDownAccepted longer than the fastest.
660 if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
661 cycles > pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted)
663 pme_lb->n = pme_lb->cur + 1;
664 /* Done with scanning, go to stage 1 */
665 switch_to_stage1(pme_lb);
668 if (pme_lb->stage == 0)
670 int gridsize_start;
672 gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
676 if (pme_lb->cur+1 < pme_lb->n)
678 /* We had already generated the next setup */
679 OK = TRUE;
681 else
683 /* Find the next setup */
684 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
686 if (!OK)
688 pme_lb->elimited = epmelblimPMEGRID;
692 if (OK && ir->ePBC != epbcNONE)
694 OK = (sqr(pme_lb->setup[pme_lb->cur+1].rlistlong)
695 <= max_cutoff2(ir->ePBC, state->box));
696 if (!OK)
698 pme_lb->elimited = epmelblimBOX;
702 if (OK)
704 pme_lb->cur++;
706 if (DOMAINDECOMP(cr))
708 OK = change_dd_cutoff(cr, state, ir,
709 pme_lb->setup[pme_lb->cur].rlistlong);
710 if (!OK)
712 /* Failed: do not use this setup */
713 pme_lb->cur--;
714 pme_lb->elimited = epmelblimDD;
718 if (!OK)
720 /* We hit the upper limit for the cut-off,
721 * the setup should not go further than cur.
723 pme_lb->n = pme_lb->cur + 1;
724 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
725 /* Switch to the next stage */
726 switch_to_stage1(pme_lb);
729 while (OK &&
730 !(pme_lb->setup[pme_lb->cur].grid[XX]*
731 pme_lb->setup[pme_lb->cur].grid[YY]*
732 pme_lb->setup[pme_lb->cur].grid[ZZ] <
733 gridsize_start*gridScaleFactor
735 pme_lb->setup[pme_lb->cur].grid_efficiency <
736 pme_lb->setup[pme_lb->cur-1].grid_efficiency*relativeEfficiencyFactor));
739 if (pme_lb->stage > 0 && pme_lb->end == 1)
741 pme_lb->cur = pme_lb->lower_limit;
742 pme_lb->stage = pme_lb->nstage;
744 else if (pme_lb->stage > 0 && pme_lb->end > 1)
746 /* If stage = nstage-1:
747 * scan over all setups, rerunning only those setups
748 * which are not much slower than the fastest
749 * else:
750 * use the next setup
751 * Note that we loop backward to minimize the risk of the cut-off
752 * getting limited by DD DLB, since the DLB cut-off limit is set
753 * to the fastest PME setup.
757 if (pme_lb->cur > pme_lb->start)
759 pme_lb->cur--;
761 else
763 pme_lb->stage++;
765 pme_lb->cur = pme_lb->end - 1;
768 while (pme_lb->stage == pme_lb->nstage - 1 &&
769 pme_lb->setup[pme_lb->cur].count > 0 &&
770 pme_lb->setup[pme_lb->cur].cycles > cycles_fast*maxRelativeSlowdownAccepted);
772 if (pme_lb->stage == pme_lb->nstage)
774 /* We are done optimizing, use the fastest setup we found */
775 pme_lb->cur = pme_lb->fastest;
779 if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
781 OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistlong);
782 if (!OK)
784 /* For some reason the chosen cut-off is incompatible with DD.
785 * We should continue scanning a more limited range of cut-off's.
787 if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
789 /* stage=nstage says we're finished, but we should continue
790 * balancing, so we set back stage which was just incremented.
792 pme_lb->stage--;
794 if (pme_lb->cur <= pme_lb->fastest)
796 /* This should not happen, as we set limits on the DLB bounds.
797 * But we implement a complete failsafe solution anyhow.
799 md_print_warn(cr, fp_log, "The fastest PP/PME load balancing setting (cutoff %.3f nm) is no longer available due to DD DLB or box size limitations\n");
800 pme_lb->fastest = pme_lb->lower_limit;
801 pme_lb->start = pme_lb->lower_limit;
803 /* Limit the range to below the current cut-off, scan from start */
804 pme_lb->end = pme_lb->cur;
805 pme_lb->cur = pme_lb->start;
806 pme_lb->elimited = epmelblimDD;
807 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
811 /* Change the Coulomb cut-off and the PME grid */
813 set = &pme_lb->setup[pme_lb->cur];
815 ic->rcoulomb = set->rcut_coulomb;
816 ic->rlist = set->rlist;
817 ic->rlistlong = set->rlistlong;
818 ir->nstcalclr = set->nstcalclr;
819 ic->ewaldcoeff_q = set->ewaldcoeff_q;
820 /* TODO: centralize the code that sets the potentials shifts */
821 if (ic->coulomb_modifier == eintmodPOTSHIFT)
823 GMX_RELEASE_ASSERT(ic->rcoulomb != 0, "Cutoff radius cannot be zero");
824 ic->sh_ewald = gmx_erfc(ic->ewaldcoeff_q*ic->rcoulomb) / ic->rcoulomb;
826 if (EVDW_PME(ic->vdwtype))
828 /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
829 ic->rvdw = set->rcut_coulomb;
830 ic->ewaldcoeff_lj = set->ewaldcoeff_lj;
831 if (ic->vdw_modifier == eintmodPOTSHIFT)
833 real crc2;
835 ic->dispersion_shift.cpot = -std::pow(static_cast<double>(ic->rvdw), -6.0);
836 ic->repulsion_shift.cpot = -std::pow(static_cast<double>(ic->rvdw), -12.0);
837 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
838 crc2 = sqr(ic->ewaldcoeff_lj*ic->rvdw);
839 ic->sh_lj_ewald = (exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)*std::pow(static_cast<double>(ic->rvdw), -6.0);
843 /* We always re-initialize the tables whether they are used or not */
844 init_interaction_const_tables(NULL, ic, rtab);
846 nbnxn_gpu_pme_loadbal_update_param(nbv, ic);
848 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
849 * also sharing texture references. To keep the code simple, we don't
850 * treat texture references as shared resources, but this means that
851 * the coulomb_tab texture ref will get updated by multiple threads.
852 * Hence, to ensure that the non-bonded kernels don't start before all
853 * texture binding operations are finished, we need to wait for all ranks
854 * to arrive here before continuing.
856 * Note that we could omit this barrier if GPUs are not shared (or
857 * texture objects are used), but as this is initialization code, there
858 * is not point in complicating things.
860 #ifdef GMX_THREAD_MPI
861 if (PAR(cr) && use_GPU(nbv))
863 gmx_barrier(cr);
865 #endif /* GMX_THREAD_MPI */
867 if (!pme_lb->bSepPMERanks)
869 if (pme_lb->setup[pme_lb->cur].pmedata == NULL)
871 /* Generate a new PME data structure,
872 * copying part of the old pointers.
874 gmx_pme_reinit(&set->pmedata,
875 cr, pme_lb->setup[0].pmedata, ir,
876 set->grid);
878 *pmedata = set->pmedata;
880 else
882 /* Tell our PME-only rank to switch grid */
883 gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
886 if (debug)
888 print_grid(NULL, debug, "", "switched to", set, -1);
891 if (pme_lb->stage == pme_lb->nstage)
893 print_grid(fp_err, fp_log, "", "optimal", set, -1);
897 /*! \brief Prepare for another round of PME load balancing
899 * \param[in,out] pme_lb Pointer to PME load balancing struct
900 * \param[in] bDlbUnlocked TRUE is DLB was locked and is now unlocked
902 * If the conditions (e.g. DLB off/on, CPU/GPU throttling etc.) changed,
903 * the PP/PME balance might change and re-balancing can improve performance.
904 * This function adds 2 stages and adjusts the considered setup range.
906 static void continue_pme_loadbal(pme_load_balancing_t *pme_lb,
907 gmx_bool bDlbUnlocked)
909 /* Add 2 tuning stages, keep the detected end of the setup range */
910 pme_lb->nstage += 2;
911 if (bDlbUnlocked && pme_lb->bSepPMERanks)
913 /* With separate PME ranks, DLB should always lower the PP load and
914 * can only increase the PME load (more communication and imbalance),
915 * so we only need to scan longer cut-off's.
917 pme_lb->lower_limit = pme_lb->cur;
919 pme_lb->start = pme_lb->lower_limit;
922 void pme_loadbal_do(pme_load_balancing_t *pme_lb,
923 t_commrec *cr,
924 FILE *fp_err,
925 FILE *fp_log,
926 t_inputrec *ir,
927 t_forcerec *fr,
928 t_state *state,
929 gmx_wallcycle_t wcycle,
930 gmx_int64_t step,
931 gmx_int64_t step_rel,
932 gmx_bool *bPrinting)
934 int n_prev;
935 double cycles_prev;
937 assert(pme_lb != NULL);
939 if (!pme_lb->bActive)
941 return;
944 n_prev = pme_lb->cycles_n;
945 cycles_prev = pme_lb->cycles_c;
946 wallcycle_get(wcycle, ewcSTEP, &pme_lb->cycles_n, &pme_lb->cycles_c);
948 /* Before the first step we haven't done any steps yet.
949 * Also handle cases where ir->init_step % ir->nstlist != 0.
951 if (pme_lb->cycles_n < ir->nstlist)
953 return;
955 /* Sanity check, we expect nstlist cycle counts */
956 if (pme_lb->cycles_n - n_prev != ir->nstlist)
958 /* We could return here, but it's safer to issue an error and quit */
959 gmx_incons("pme_loadbal_do called at an interval != nstlist");
962 /* PME grid + cut-off optimization with GPUs or PME ranks */
963 if (!pme_lb->bBalance && pme_lb->bSepPMERanks)
965 if (pme_lb->bTriggerOnDLB)
967 pme_lb->bBalance = dd_dlb_is_on(cr->dd);
969 /* We should ignore the first timing to avoid timing allocation
970 * overhead. And since the PME load balancing is called just
971 * before DD repartitioning, the ratio returned by dd_pme_f_ratio
972 * is not over the last nstlist steps, but the nstlist steps before
973 * that. So the first useful ratio is available at step_rel=3*nstlist.
975 else if (step_rel >= 3*ir->nstlist)
977 if (DDMASTER(cr->dd))
979 /* If PME rank load is too high, start tuning */
980 pme_lb->bBalance =
981 (dd_pme_f_ratio(cr->dd) >= loadBalanceTriggerFactor);
983 dd_bcast(cr->dd, sizeof(gmx_bool), &pme_lb->bBalance);
986 pme_lb->bActive = (pme_lb->bBalance ||
987 step_rel <= pme_lb->step_rel_stop);
990 /* The location in the code of this balancing termination is strange.
991 * You would expect to have it after the call to pme_load_balance()
992 * below, since there pme_lb->stage is updated.
993 * But when terminating directly after deciding on and selecting the
994 * optimal setup, DLB will turn on right away if it was locked before.
995 * This might be due to PME reinitialization. So we check stage here
996 * to allow for another nstlist steps with DLB locked to stabilize
997 * the performance.
999 if (pme_lb->bBalance && pme_lb->stage == pme_lb->nstage)
1001 pme_lb->bBalance = FALSE;
1003 if (DOMAINDECOMP(cr) && dd_dlb_is_locked(cr->dd))
1005 /* Unlock the DLB=auto, DLB is allowed to activate */
1006 dd_dlb_unlock(cr->dd);
1007 md_print_warn(cr, fp_log, "NOTE: DLB can now turn on, when beneficial\n");
1009 /* We don't deactivate the tuning yet, since we will balance again
1010 * after DLB gets turned on, if it does within PMETune_period.
1012 continue_pme_loadbal(pme_lb, TRUE);
1013 pme_lb->bTriggerOnDLB = TRUE;
1014 pme_lb->step_rel_stop = step_rel + PMETunePeriod*ir->nstlist;
1016 else
1018 /* We're completely done with PME tuning */
1019 pme_lb->bActive = FALSE;
1022 if (DOMAINDECOMP(cr))
1024 /* Set the cut-off limit to the final selected cut-off,
1025 * so we don't have artificial DLB limits.
1026 * This also ensures that we won't disable the currently
1027 * optimal setting during a second round of PME balancing.
1029 set_dd_dlb_max_cutoff(cr, fr->ic->rlistlong);
1033 if (pme_lb->bBalance)
1035 /* We might not have collected nstlist steps in cycles yet,
1036 * since init_step might not be a multiple of nstlist,
1037 * but the first data collected is skipped anyhow.
1039 pme_load_balance(pme_lb, cr,
1040 fp_err, fp_log,
1041 ir, state, pme_lb->cycles_c - cycles_prev,
1042 fr->ic, fr->nbv, &fr->pmedata,
1043 step);
1045 /* Update constants in forcerec/inputrec to keep them in sync with fr->ic */
1046 fr->ewaldcoeff_q = fr->ic->ewaldcoeff_q;
1047 fr->ewaldcoeff_lj = fr->ic->ewaldcoeff_lj;
1048 fr->rlist = fr->ic->rlist;
1049 fr->rlistlong = fr->ic->rlistlong;
1050 fr->rcoulomb = fr->ic->rcoulomb;
1051 fr->rvdw = fr->ic->rvdw;
1053 if (ir->eDispCorr != edispcNO)
1055 calc_enervirdiff(NULL, ir->eDispCorr, fr);
1059 if (!pme_lb->bBalance &&
1060 (!pme_lb->bSepPMERanks || step_rel > pme_lb->step_rel_stop))
1062 /* We have just deactivated the balancing and we're not measuring PP/PME
1063 * imbalance during the first steps of the run: deactivate the tuning.
1065 pme_lb->bActive = FALSE;
1068 if (!(pme_lb->bActive) && DOMAINDECOMP(cr) && dd_dlb_is_locked(cr->dd))
1070 /* Make sure DLB is allowed when we deactivate PME tuning */
1071 dd_dlb_unlock(cr->dd);
1072 md_print_warn(cr, fp_log, "NOTE: DLB can now turn on, when beneficial\n");
1075 *bPrinting = pme_lb->bBalance;
1078 /*! \brief Return product of the number of PME grid points in each dimension */
1079 static int pme_grid_points(const pme_setup_t *setup)
1081 return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
1084 /*! \brief Retuern the largest short-range list cut-off radius */
1085 static real pme_loadbal_rlist(const pme_setup_t *setup)
1087 /* With the group cut-off scheme we can have twin-range either
1088 * for Coulomb or for VdW, so we need a check here.
1089 * With the Verlet cut-off scheme rlist=rlistlong.
1091 if (setup->rcut_coulomb > setup->rlist)
1093 return setup->rlistlong;
1095 else
1097 return setup->rlist;
1101 /*! \brief Print one load-balancing setting */
1102 static void print_pme_loadbal_setting(FILE *fplog,
1103 const char *name,
1104 const pme_setup_t *setup)
1106 fprintf(fplog,
1107 " %-7s %6.3f nm %6.3f nm %3d %3d %3d %5.3f nm %5.3f nm\n",
1108 name,
1109 setup->rcut_coulomb, pme_loadbal_rlist(setup),
1110 setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
1111 setup->spacing, 1/setup->ewaldcoeff_q);
1114 /*! \brief Print all load-balancing settings */
1115 static void print_pme_loadbal_settings(pme_load_balancing_t *pme_lb,
1116 t_commrec *cr,
1117 FILE *fplog,
1118 gmx_bool bNonBondedOnGPU)
1120 double pp_ratio, grid_ratio;
1121 real pp_ratio_temporary;
1123 pp_ratio_temporary = pme_loadbal_rlist(&pme_lb->setup[pme_lb->cur])/pme_loadbal_rlist(&pme_lb->setup[0]);
1124 pp_ratio = std::pow(static_cast<double>(pp_ratio_temporary), 3.0);
1125 grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
1126 (double)pme_grid_points(&pme_lb->setup[0]);
1128 fprintf(fplog, "\n");
1129 fprintf(fplog, " P P - P M E L O A D B A L A N C I N G\n");
1130 fprintf(fplog, "\n");
1131 /* Here we only warn when the optimal setting is the last one */
1132 if (pme_lb->elimited != epmelblimNO &&
1133 pme_lb->cur == pme_loadbal_end(pme_lb)-1)
1135 fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
1136 pmelblim_str[pme_lb->elimited]);
1137 fprintf(fplog, " you might not have reached a good load balance.\n");
1138 if (pme_lb->elimited == epmelblimDD)
1140 fprintf(fplog, " Try different mdrun -dd settings or lower the -dds value.\n");
1142 fprintf(fplog, "\n");
1144 fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
1145 fprintf(fplog, " particle-particle PME\n");
1146 fprintf(fplog, " rcoulomb rlist grid spacing 1/beta\n");
1147 print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
1148 print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
1149 fprintf(fplog, " cost-ratio %4.2f %4.2f\n",
1150 pp_ratio, grid_ratio);
1151 fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
1153 if (pp_ratio > 1.5 && !bNonBondedOnGPU)
1155 md_print_warn(cr, fplog,
1156 "NOTE: PME load balancing increased the non-bonded workload by more than 50%%.\n"
1157 " For better performance, use (more) PME ranks (mdrun -npme),\n"
1158 " or if you are beyond the scaling limit, use fewer total ranks (or nodes).\n");
1160 else
1162 fprintf(fplog, "\n");
1166 void pme_loadbal_done(pme_load_balancing_t *pme_lb,
1167 t_commrec *cr,
1168 FILE *fplog,
1169 gmx_bool bNonBondedOnGPU)
1171 if (fplog != NULL && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
1173 print_pme_loadbal_settings(pme_lb, cr, fplog, bNonBondedOnGPU);
1176 /* TODO: Here we should free all pointers in pme_lb,
1177 * but as it contains pme data structures,
1178 * we need to first make pme.c free all data.