Remove texture reference support in the CUDA
[gromacs.git] / src / gromacs / ewald / pme-load-balancing.cpp
blobb591f61262bf4091d31aa5118e450b983715524a
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
37 * \brief This file contains function definitions necessary for
38 * managing automatic load balance of PME calculations (Coulomb and
39 * LJ).
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_ewald
44 #include "gmxpre.h"
46 #include "pme-load-balancing.h"
48 #include "config.h"
50 #include <assert.h>
52 #include <cmath>
54 #include <algorithm>
56 #include "gromacs/domdec/domdec.h"
57 #include "gromacs/domdec/domdec_network.h"
58 #include "gromacs/domdec/domdec_struct.h"
59 #include "gromacs/ewald/ewald-utils.h"
60 #include "gromacs/ewald/pme.h"
61 #include "gromacs/fft/calcgrid.h"
62 #include "gromacs/gmxlib/network.h"
63 #include "gromacs/math/functions.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/forcerec.h"
66 #include "gromacs/mdlib/nb_verlet.h"
67 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
68 #include "gromacs/mdlib/nbnxn_pairlist.h"
69 #include "gromacs/mdlib/sim_util.h"
70 #include "gromacs/mdtypes/commrec.h"
71 #include "gromacs/mdtypes/inputrec.h"
72 #include "gromacs/mdtypes/md_enums.h"
73 #include "gromacs/mdtypes/state.h"
74 #include "gromacs/pbcutil/pbc.h"
75 #include "gromacs/timing/wallcycle.h"
76 #include "gromacs/utility/cstringutil.h"
77 #include "gromacs/utility/fatalerror.h"
78 #include "gromacs/utility/gmxassert.h"
79 #include "gromacs/utility/logger.h"
80 #include "gromacs/utility/smalloc.h"
82 #include "pme-internal.h"
84 /*! \brief Parameters and settings for one PP-PME setup */
85 struct pme_setup_t {
86 real rcut_coulomb; /**< Coulomb cut-off */
87 real rlistOuter; /**< cut-off for the outer pair-list */
88 real rlistInner; /**< cut-off for the inner pair-list */
89 real spacing; /**< (largest) PME grid spacing */
90 ivec grid; /**< the PME grid dimensions */
91 real grid_efficiency; /**< ineffiency factor for non-uniform grids <= 1 */
92 real ewaldcoeff_q; /**< Electrostatic Ewald coefficient */
93 real ewaldcoeff_lj; /**< LJ Ewald coefficient, only for the call to send_switchgrid */
94 struct gmx_pme_t *pmedata; /**< the data structure used in the PME code */
95 int count; /**< number of times this setup has been timed */
96 double cycles; /**< the fastest time for this setup in cycles */
99 /*! \brief After 50 nstlist periods of not observing imbalance: never tune PME */
100 const int PMETunePeriod = 50;
101 /*! \brief Trigger PME load balancing at more than 5% PME overload */
102 const real loadBalanceTriggerFactor = 1.05;
103 /*! \brief Scale the grid by a most at factor 1.7.
105 * This still leaves room for about 4-4.5x decrease in grid spacing while limiting the cases where
106 * large imbalance leads to extreme cutoff scaling for marginal benefits.
108 * This should help to avoid:
109 * - large increase in power consumption for little performance gain
110 * - increasing communication volume
111 * - limiting DLB
113 const real c_maxSpacingScaling = 1.7;
114 /*! \brief In the initial scan, step by grids that are at least a factor 0.8 coarser */
115 const real gridpointsScaleFactor = 0.8;
116 /*! \brief In the initial scan, try to skip grids with uneven x/y/z spacing,
117 * checking if the "efficiency" is more than 5% worse than the previous grid.
119 const real relativeEfficiencyFactor = 1.05;
120 /*! \brief Rerun until a run is 12% slower setups than the fastest run so far */
121 const real maxRelativeSlowdownAccepted = 1.12;
122 /*! \brief If setups get more than 2% faster, do another round to avoid
123 * choosing a slower setup due to acceleration or fluctuations.
125 const real maxFluctuationAccepted = 1.02;
127 /*! \brief Enumeration whose values describe the effect limiting the load balancing */
128 enum epmelb {
129 epmelblimNO, epmelblimBOX, epmelblimDD, epmelblimPMEGRID, epmelblimMAXSCALING, epmelblimNR
132 /*! \brief Descriptive strings matching ::epmelb */
133 const char *pmelblim_str[epmelblimNR] =
134 { "no", "box size", "domain decompostion", "PME grid restriction", "maximum allowed grid scaling" };
136 struct pme_load_balancing_t {
137 gmx_bool bSepPMERanks; /**< do we have separate PME ranks? */
138 gmx_bool bActive; /**< is PME tuning active? */
139 gmx_int64_t step_rel_stop; /**< stop the tuning after this value of step_rel */
140 gmx_bool bTriggerOnDLB; /**< trigger balancing only on DD DLB */
141 gmx_bool bBalance; /**< are we in the balancing phase, i.e. trying different setups? */
142 int nstage; /**< the current maximum number of stages */
144 real cut_spacing; /**< the minimum cutoff / PME grid spacing ratio */
145 real rcut_vdw; /**< Vdw cutoff (does not change) */
146 real rcut_coulomb_start; /**< Initial electrostatics cutoff */
147 real rbufOuter_coulomb; /**< the outer pairlist buffer size */
148 real rbufOuter_vdw; /**< the outer pairlist buffer size */
149 real rbufInner_coulomb; /**< the inner pairlist buffer size */
150 real rbufInner_vdw; /**< the inner pairlist buffer size */
151 matrix box_start; /**< the initial simulation box */
152 int n; /**< the count of setup as well as the allocation size */
153 pme_setup_t *setup; /**< the PME+cutoff setups */
154 int cur; /**< the inex (in setup) of the current setup */
155 int fastest; /**< index of the fastest setup up till now */
156 int lower_limit; /**< don't go below this setup index */
157 int start; /**< start of setup index range to consider in stage>0 */
158 int end; /**< end of setup index range to consider in stage>0 */
159 int elimited; /**< was the balancing limited, uses enum above */
160 int cutoff_scheme; /**< Verlet or group cut-offs */
162 int stage; /**< the current stage */
164 int cycles_n; /**< step cycle counter cummulative count */
165 double cycles_c; /**< step cycle counter cummulative cycles */
168 /* TODO The code in this file should call this getter, rather than
169 * read bActive anywhere */
170 bool pme_loadbal_is_active(const pme_load_balancing_t *pme_lb)
172 return pme_lb != nullptr && pme_lb->bActive;
175 void pme_loadbal_init(pme_load_balancing_t **pme_lb_p,
176 t_commrec *cr,
177 const gmx::MDLogger &mdlog,
178 const t_inputrec *ir,
179 matrix box,
180 const interaction_const_t *ic,
181 const NbnxnListParameters *listParams,
182 gmx_pme_t *pmedata,
183 gmx_bool bUseGPU,
184 gmx_bool *bPrinting)
186 GMX_RELEASE_ASSERT(ir->cutoff_scheme != ecutsGROUP, "PME tuning is not supported with cutoff-scheme=group (because it contains bugs)");
188 pme_load_balancing_t *pme_lb;
189 real spm, sp;
190 int d;
192 // Note that we don't (yet) support PME load balancing with LJ-PME only.
193 GMX_RELEASE_ASSERT(EEL_PME(ir->coulombtype), "pme_loadbal_init called without PME electrostatics");
194 // To avoid complexity, we require a single cut-off with PME for q+LJ.
195 // This is checked by grompp, but it doesn't hurt to check again.
196 GMX_RELEASE_ASSERT(!(EEL_PME(ir->coulombtype) && EVDW_PME(ir->vdwtype) && ir->rcoulomb != ir->rvdw), "With Coulomb and LJ PME, rcoulomb should be equal to rvdw");
198 snew(pme_lb, 1);
200 pme_lb->bSepPMERanks = !thisRankHasDuty(cr, DUTY_PME);
202 /* Initially we turn on balancing directly on based on PP/PME imbalance */
203 pme_lb->bTriggerOnDLB = FALSE;
205 /* Any number of stages >= 2 is supported */
206 pme_lb->nstage = 2;
208 pme_lb->cutoff_scheme = ir->cutoff_scheme;
210 pme_lb->rbufOuter_coulomb = listParams->rlistOuter - ic->rcoulomb;
211 pme_lb->rbufOuter_vdw = listParams->rlistOuter - ic->rvdw;
212 pme_lb->rbufInner_coulomb = listParams->rlistInner - ic->rcoulomb;
213 pme_lb->rbufInner_vdw = listParams->rlistInner - ic->rvdw;
215 /* Scale box with Ewald wall factor; note that we pmedata->boxScaler
216 * can't always usedd as it's not available with separate PME ranks.
218 EwaldBoxZScaler boxScaler(*ir);
219 boxScaler.scaleBox(box, pme_lb->box_start);
221 pme_lb->n = 1;
222 snew(pme_lb->setup, pme_lb->n);
224 pme_lb->rcut_vdw = ic->rvdw;
225 pme_lb->rcut_coulomb_start = ir->rcoulomb;
227 pme_lb->cur = 0;
228 pme_lb->setup[0].rcut_coulomb = ic->rcoulomb;
229 pme_lb->setup[0].rlistOuter = listParams->rlistOuter;
230 pme_lb->setup[0].rlistInner = listParams->rlistInner;
231 pme_lb->setup[0].grid[XX] = ir->nkx;
232 pme_lb->setup[0].grid[YY] = ir->nky;
233 pme_lb->setup[0].grid[ZZ] = ir->nkz;
234 pme_lb->setup[0].ewaldcoeff_q = ic->ewaldcoeff_q;
235 pme_lb->setup[0].ewaldcoeff_lj = ic->ewaldcoeff_lj;
237 if (!pme_lb->bSepPMERanks)
239 GMX_RELEASE_ASSERT(pmedata, "On ranks doing both PP and PME we need a valid pmedata object");
240 pme_lb->setup[0].pmedata = pmedata;
243 spm = 0;
244 for (d = 0; d < DIM; d++)
246 sp = norm(pme_lb->box_start[d])/pme_lb->setup[0].grid[d];
247 if (sp > spm)
249 spm = sp;
252 pme_lb->setup[0].spacing = spm;
254 if (ir->fourier_spacing > 0)
256 pme_lb->cut_spacing = ir->rcoulomb/ir->fourier_spacing;
258 else
260 pme_lb->cut_spacing = ir->rcoulomb/pme_lb->setup[0].spacing;
263 pme_lb->stage = 0;
265 pme_lb->fastest = 0;
266 pme_lb->lower_limit = 0;
267 pme_lb->start = 0;
268 pme_lb->end = 0;
269 pme_lb->elimited = epmelblimNO;
271 pme_lb->cycles_n = 0;
272 pme_lb->cycles_c = 0;
274 if (!wallcycle_have_counter())
276 GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: Cycle counters unsupported or not enabled in kernel. Cannot use PME-PP balancing.");
279 /* Tune with GPUs and/or separate PME ranks.
280 * When running only on a CPU without PME ranks, PME tuning will only help
281 * with small numbers of atoms in the cut-off sphere.
283 pme_lb->bActive = (wallcycle_have_counter() && (bUseGPU ||
284 pme_lb->bSepPMERanks));
286 /* With GPUs and no separate PME ranks we can't measure the PP/PME
287 * imbalance, so we start balancing right away.
288 * Otherwise we only start balancing after we observe imbalance.
290 pme_lb->bBalance = (pme_lb->bActive && (bUseGPU && !pme_lb->bSepPMERanks));
292 pme_lb->step_rel_stop = PMETunePeriod*ir->nstlist;
294 /* Delay DD load balancing when GPUs are used */
295 if (pme_lb->bActive && DOMAINDECOMP(cr) && cr->dd->nnodes > 1 && bUseGPU)
297 /* Lock DLB=auto to off (does nothing when DLB=yes/no.
298 * With GPUs and separate PME nodes, we want to first
299 * do PME tuning without DLB, since DLB might limit
300 * the cut-off, which never improves performance.
301 * We allow for DLB + PME tuning after a first round of tuning.
303 dd_dlb_lock(cr->dd);
304 if (dd_dlb_is_locked(cr->dd))
306 GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: DLB will not turn on during the first phase of PME tuning");
310 *pme_lb_p = pme_lb;
312 *bPrinting = pme_lb->bBalance;
315 /*! \brief Try to increase the cutoff during load balancing */
316 static gmx_bool pme_loadbal_increase_cutoff(pme_load_balancing_t *pme_lb,
317 int pme_order,
318 const gmx_domdec_t *dd)
320 pme_setup_t *set;
321 int npmeranks_x, npmeranks_y;
322 real fac, sp;
323 real tmpr_coulomb, tmpr_vdw;
324 int d;
325 bool grid_ok;
327 /* Try to add a new setup with next larger cut-off to the list */
328 pme_lb->n++;
329 srenew(pme_lb->setup, pme_lb->n);
330 set = &pme_lb->setup[pme_lb->n-1];
331 set->pmedata = nullptr;
333 get_pme_nnodes(dd, &npmeranks_x, &npmeranks_y);
335 fac = 1;
338 /* Avoid infinite while loop, which can occur at the minimum grid size.
339 * Note that in practice load balancing will stop before this point.
340 * The factor 2.1 allows for the extreme case in which only grids
341 * of powers of 2 are allowed (the current code supports more grids).
343 if (fac > 2.1)
345 pme_lb->n--;
347 return FALSE;
350 fac *= 1.01;
351 clear_ivec(set->grid);
352 sp = calcFftGrid(nullptr, pme_lb->box_start,
353 fac*pme_lb->setup[pme_lb->cur].spacing,
354 minimalPmeGridSize(pme_order),
355 &set->grid[XX],
356 &set->grid[YY],
357 &set->grid[ZZ]);
359 /* As here we can't easily check if one of the PME ranks
360 * uses threading, we do a conservative grid check.
361 * This means we can't use pme_order or less grid lines
362 * per PME rank along x, which is not a strong restriction.
364 grid_ok = gmx_pme_check_restrictions(pme_order,
365 set->grid[XX], set->grid[YY], set->grid[ZZ],
366 npmeranks_x,
367 true,
368 false);
370 while (sp <= 1.001*pme_lb->setup[pme_lb->cur].spacing || !grid_ok);
372 set->rcut_coulomb = pme_lb->cut_spacing*sp;
373 if (set->rcut_coulomb < pme_lb->rcut_coulomb_start)
375 /* This is unlikely, but can happen when e.g. continuing from
376 * a checkpoint after equilibration where the box shrank a lot.
377 * We want to avoid rcoulomb getting smaller than rvdw
378 * and there might be more issues with decreasing rcoulomb.
380 set->rcut_coulomb = pme_lb->rcut_coulomb_start;
383 if (pme_lb->cutoff_scheme == ecutsVERLET)
385 /* Never decrease the Coulomb and VdW list buffers */
386 set->rlistOuter = std::max(set->rcut_coulomb + pme_lb->rbufOuter_coulomb,
387 pme_lb->rcut_vdw + pme_lb->rbufOuter_vdw);
388 set->rlistInner = std::max(set->rcut_coulomb + pme_lb->rbufInner_coulomb,
389 pme_lb->rcut_vdw + pme_lb->rbufInner_vdw);
391 else
393 /* TODO Remove these lines and pme_lb->cutoff_scheme */
394 tmpr_coulomb = set->rcut_coulomb + pme_lb->rbufOuter_coulomb;
395 tmpr_vdw = pme_lb->rcut_vdw + pme_lb->rbufOuter_vdw;
396 /* Two (known) bugs with cutoff-scheme=group here:
397 * - This modification of rlist results in incorrect DD comunication.
398 * - We should set fr->bTwinRange = (fr->rlistlong > fr->rlist).
400 set->rlistOuter = std::min(tmpr_coulomb, tmpr_vdw);
401 set->rlistInner = set->rlistOuter;
404 set->spacing = sp;
405 /* The grid efficiency is the size wrt a grid with uniform x/y/z spacing */
406 set->grid_efficiency = 1;
407 for (d = 0; d < DIM; d++)
409 set->grid_efficiency *= (set->grid[d]*sp)/norm(pme_lb->box_start[d]);
411 /* The Ewald coefficient is inversly proportional to the cut-off */
412 set->ewaldcoeff_q =
413 pme_lb->setup[0].ewaldcoeff_q*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
414 /* We set ewaldcoeff_lj in set, even when LJ-PME is not used */
415 set->ewaldcoeff_lj =
416 pme_lb->setup[0].ewaldcoeff_lj*pme_lb->setup[0].rcut_coulomb/set->rcut_coulomb;
418 set->count = 0;
419 set->cycles = 0;
421 if (debug)
423 fprintf(debug, "PME loadbal: grid %d %d %d, coulomb cutoff %f\n",
424 set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb);
426 return TRUE;
429 /*! \brief Print the PME grid */
430 static void print_grid(FILE *fp_err, FILE *fp_log,
431 const char *pre,
432 const char *desc,
433 const pme_setup_t *set,
434 double cycles)
436 char buf[STRLEN], buft[STRLEN];
438 if (cycles >= 0)
440 sprintf(buft, ": %.1f M-cycles", cycles*1e-6);
442 else
444 buft[0] = '\0';
446 sprintf(buf, "%-11s%10s pme grid %d %d %d, coulomb cutoff %.3f%s",
447 pre,
448 desc, set->grid[XX], set->grid[YY], set->grid[ZZ], set->rcut_coulomb,
449 buft);
450 if (fp_err != nullptr)
452 fprintf(fp_err, "\r%s\n", buf);
453 fflush(fp_err);
455 if (fp_log != nullptr)
457 fprintf(fp_log, "%s\n", buf);
461 /*! \brief Return the index of the last setup used in PME load balancing */
462 static int pme_loadbal_end(pme_load_balancing_t *pme_lb)
464 /* In the initial stage only n is set; end is not set yet */
465 if (pme_lb->end > 0)
467 return pme_lb->end;
469 else
471 return pme_lb->n;
475 /*! \brief Print descriptive string about what limits PME load balancing */
476 static void print_loadbal_limited(FILE *fp_err, FILE *fp_log,
477 gmx_int64_t step,
478 pme_load_balancing_t *pme_lb)
480 char buf[STRLEN], sbuf[22];
482 sprintf(buf, "step %4s: the %s limits the PME load balancing to a coulomb cut-off of %.3f",
483 gmx_step_str(step, sbuf),
484 pmelblim_str[pme_lb->elimited],
485 pme_lb->setup[pme_loadbal_end(pme_lb)-1].rcut_coulomb);
486 if (fp_err != nullptr)
488 fprintf(fp_err, "\r%s\n", buf);
489 fflush(fp_err);
491 if (fp_log != nullptr)
493 fprintf(fp_log, "%s\n", buf);
497 /*! \brief Switch load balancing to stage 1
499 * In this stage, only reasonably fast setups are run again. */
500 static void switch_to_stage1(pme_load_balancing_t *pme_lb)
502 /* Increase start until we find a setup that is not slower than
503 * maxRelativeSlowdownAccepted times the fastest setup.
505 pme_lb->start = pme_lb->lower_limit;
506 while (pme_lb->start + 1 < pme_lb->n &&
507 (pme_lb->setup[pme_lb->start].count == 0 ||
508 pme_lb->setup[pme_lb->start].cycles >
509 pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted))
511 pme_lb->start++;
513 /* While increasing start, we might have skipped setups that we did not
514 * time during stage 0. We want to extend the range for stage 1 to include
515 * any skipped setups that lie between setups that were measured to be
516 * acceptably fast and too slow.
518 while (pme_lb->start > pme_lb->lower_limit &&
519 pme_lb->setup[pme_lb->start - 1].count == 0)
521 pme_lb->start--;
524 /* Decrease end only with setups that we timed and that are slow. */
525 pme_lb->end = pme_lb->n;
526 if (pme_lb->setup[pme_lb->end - 1].count > 0 &&
527 pme_lb->setup[pme_lb->end - 1].cycles >
528 pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted)
530 pme_lb->end--;
533 pme_lb->stage = 1;
535 /* Next we want to choose setup pme_lb->end-1, but as we will decrease
536 * pme_lb->cur by one right after returning, we set cur to end.
538 pme_lb->cur = pme_lb->end;
541 /*! \brief Process the timings and try to adjust the PME grid and Coulomb cut-off
543 * The adjustment is done to generate a different non-bonded PP and PME load.
544 * With separate PME ranks (PP and PME on different processes) or with
545 * a GPU (PP on GPU, PME on CPU), PP and PME run on different resources
546 * and changing the load will affect the load balance and performance.
547 * The total time for a set of integration steps is monitored and a range
548 * of grid/cut-off setups is scanned. After calling pme_load_balance many
549 * times and acquiring enough statistics, the best performing setup is chosen.
550 * Here we try to take into account fluctuations and changes due to external
551 * factors as well as DD load balancing.
553 static void
554 pme_load_balance(pme_load_balancing_t *pme_lb,
555 t_commrec *cr,
556 FILE *fp_err,
557 FILE *fp_log,
558 const gmx::MDLogger &mdlog,
559 const t_inputrec *ir,
560 t_state *state,
561 double cycles,
562 interaction_const_t *ic,
563 struct nonbonded_verlet_t *nbv,
564 struct gmx_pme_t ** pmedata,
565 gmx_int64_t step)
567 gmx_bool OK;
568 pme_setup_t *set;
569 double cycles_fast;
570 char buf[STRLEN], sbuf[22];
571 real rtab;
573 if (PAR(cr))
575 gmx_sumd(1, &cycles, cr);
576 cycles /= cr->nnodes;
579 set = &pme_lb->setup[pme_lb->cur];
580 set->count++;
582 rtab = ir->rlist + ir->tabext;
584 if (set->count % 2 == 1)
586 /* Skip the first cycle, because the first step after a switch
587 * is much slower due to allocation and/or caching effects.
589 return;
592 sprintf(buf, "step %4s: ", gmx_step_str(step, sbuf));
593 print_grid(fp_err, fp_log, buf, "timed with", set, cycles);
595 if (set->count <= 2)
597 set->cycles = cycles;
599 else
601 if (cycles*maxFluctuationAccepted < set->cycles &&
602 pme_lb->stage == pme_lb->nstage - 1)
604 /* The performance went up a lot (due to e.g. DD load balancing).
605 * Add a stage, keep the minima, but rescan all setups.
607 pme_lb->nstage++;
609 if (debug)
611 fprintf(debug, "The performance for grid %d %d %d went from %.3f to %.1f M-cycles, this is more than %f\n"
612 "Increased the number stages to %d"
613 " and ignoring the previous performance\n",
614 set->grid[XX], set->grid[YY], set->grid[ZZ],
615 set->cycles*1e-6, cycles*1e-6, maxFluctuationAccepted,
616 pme_lb->nstage);
619 set->cycles = std::min(set->cycles, cycles);
622 if (set->cycles < pme_lb->setup[pme_lb->fastest].cycles)
624 pme_lb->fastest = pme_lb->cur;
626 if (DOMAINDECOMP(cr))
628 /* We found a new fastest setting, ensure that with subsequent
629 * shorter cut-off's the dynamic load balancing does not make
630 * the use of the current cut-off impossible. This solution is
631 * a trade-off, as the PME load balancing and DD domain size
632 * load balancing can interact in complex ways.
633 * With the Verlet kernels, DD load imbalance will usually be
634 * mainly due to bonded interaction imbalance, which will often
635 * quickly push the domain boundaries beyond the limit for the
636 * optimal, PME load balanced, cut-off. But it could be that
637 * better overal performance can be obtained with a slightly
638 * shorter cut-off and better DD load balancing.
640 set_dd_dlb_max_cutoff(cr, pme_lb->setup[pme_lb->fastest].rlistOuter);
643 cycles_fast = pme_lb->setup[pme_lb->fastest].cycles;
645 /* Check in stage 0 if we should stop scanning grids.
646 * Stop when the time is more than maxRelativeSlowDownAccepted longer than the fastest.
648 if (pme_lb->stage == 0 && pme_lb->cur > 0 &&
649 cycles > pme_lb->setup[pme_lb->fastest].cycles*maxRelativeSlowdownAccepted)
651 pme_lb->n = pme_lb->cur + 1;
652 /* Done with scanning, go to stage 1 */
653 switch_to_stage1(pme_lb);
656 if (pme_lb->stage == 0)
658 int gridsize_start;
660 gridsize_start = set->grid[XX]*set->grid[YY]*set->grid[ZZ];
664 if (pme_lb->cur+1 < pme_lb->n)
666 /* We had already generated the next setup */
667 OK = TRUE;
669 else
671 /* Find the next setup */
672 OK = pme_loadbal_increase_cutoff(pme_lb, ir->pme_order, cr->dd);
674 if (!OK)
676 pme_lb->elimited = epmelblimPMEGRID;
680 if (OK &&
681 pme_lb->setup[pme_lb->cur+1].spacing > c_maxSpacingScaling*pme_lb->setup[0].spacing)
683 OK = FALSE;
684 pme_lb->elimited = epmelblimMAXSCALING;
687 if (OK && ir->ePBC != epbcNONE)
689 OK = (gmx::square(pme_lb->setup[pme_lb->cur+1].rlistOuter)
690 <= max_cutoff2(ir->ePBC, state->box));
691 if (!OK)
693 pme_lb->elimited = epmelblimBOX;
697 if (OK)
699 pme_lb->cur++;
701 if (DOMAINDECOMP(cr))
703 OK = change_dd_cutoff(cr, state, ir,
704 pme_lb->setup[pme_lb->cur].rlistOuter);
705 if (!OK)
707 /* Failed: do not use this setup */
708 pme_lb->cur--;
709 pme_lb->elimited = epmelblimDD;
713 if (!OK)
715 /* We hit the upper limit for the cut-off,
716 * the setup should not go further than cur.
718 pme_lb->n = pme_lb->cur + 1;
719 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
720 /* Switch to the next stage */
721 switch_to_stage1(pme_lb);
724 while (OK &&
725 !(pme_lb->setup[pme_lb->cur].grid[XX]*
726 pme_lb->setup[pme_lb->cur].grid[YY]*
727 pme_lb->setup[pme_lb->cur].grid[ZZ] <
728 gridsize_start*gridpointsScaleFactor
730 pme_lb->setup[pme_lb->cur].grid_efficiency <
731 pme_lb->setup[pme_lb->cur-1].grid_efficiency*relativeEfficiencyFactor));
734 if (pme_lb->stage > 0 && pme_lb->end == 1)
736 pme_lb->cur = pme_lb->lower_limit;
737 pme_lb->stage = pme_lb->nstage;
739 else if (pme_lb->stage > 0 && pme_lb->end > 1)
741 /* If stage = nstage-1:
742 * scan over all setups, rerunning only those setups
743 * which are not much slower than the fastest
744 * else:
745 * use the next setup
746 * Note that we loop backward to minimize the risk of the cut-off
747 * getting limited by DD DLB, since the DLB cut-off limit is set
748 * to the fastest PME setup.
752 if (pme_lb->cur > pme_lb->start)
754 pme_lb->cur--;
756 else
758 pme_lb->stage++;
760 pme_lb->cur = pme_lb->end - 1;
763 while (pme_lb->stage == pme_lb->nstage - 1 &&
764 pme_lb->setup[pme_lb->cur].count > 0 &&
765 pme_lb->setup[pme_lb->cur].cycles > cycles_fast*maxRelativeSlowdownAccepted);
767 if (pme_lb->stage == pme_lb->nstage)
769 /* We are done optimizing, use the fastest setup we found */
770 pme_lb->cur = pme_lb->fastest;
774 if (DOMAINDECOMP(cr) && pme_lb->stage > 0)
776 OK = change_dd_cutoff(cr, state, ir, pme_lb->setup[pme_lb->cur].rlistOuter);
777 if (!OK)
779 /* For some reason the chosen cut-off is incompatible with DD.
780 * We should continue scanning a more limited range of cut-off's.
782 if (pme_lb->cur > 1 && pme_lb->stage == pme_lb->nstage)
784 /* stage=nstage says we're finished, but we should continue
785 * balancing, so we set back stage which was just incremented.
787 pme_lb->stage--;
789 if (pme_lb->cur <= pme_lb->fastest)
791 /* This should not happen, as we set limits on the DLB bounds.
792 * But we implement a complete failsafe solution anyhow.
794 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
795 "The fastest PP/PME load balancing setting (cutoff %.3f nm) is no longer available due to DD DLB or box size limitations", pme_lb->fastest);
796 pme_lb->fastest = pme_lb->lower_limit;
797 pme_lb->start = pme_lb->lower_limit;
799 /* Limit the range to below the current cut-off, scan from start */
800 pme_lb->end = pme_lb->cur;
801 pme_lb->cur = pme_lb->start;
802 pme_lb->elimited = epmelblimDD;
803 print_loadbal_limited(fp_err, fp_log, step, pme_lb);
807 /* Change the Coulomb cut-off and the PME grid */
809 set = &pme_lb->setup[pme_lb->cur];
811 NbnxnListParameters *listParams = nbv->listParams.get();
813 ic->rcoulomb = set->rcut_coulomb;
814 listParams->rlistOuter = set->rlistOuter;
815 listParams->rlistInner = set->rlistInner;
816 ic->ewaldcoeff_q = set->ewaldcoeff_q;
817 /* TODO: centralize the code that sets the potentials shifts */
818 if (ic->coulomb_modifier == eintmodPOTSHIFT)
820 GMX_RELEASE_ASSERT(ic->rcoulomb != 0, "Cutoff radius cannot be zero");
821 ic->sh_ewald = std::erfc(ic->ewaldcoeff_q*ic->rcoulomb) / ic->rcoulomb;
823 if (EVDW_PME(ic->vdwtype))
825 /* We have PME for both Coulomb and VdW, set rvdw equal to rcoulomb */
826 ic->rvdw = set->rcut_coulomb;
827 ic->ewaldcoeff_lj = set->ewaldcoeff_lj;
828 if (ic->vdw_modifier == eintmodPOTSHIFT)
830 real crc2;
832 ic->dispersion_shift.cpot = -1.0/gmx::power6(static_cast<double>(ic->rvdw));
833 ic->repulsion_shift.cpot = -1.0/gmx::power12(static_cast<double>(ic->rvdw));
834 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
835 crc2 = gmx::square(ic->ewaldcoeff_lj*ic->rvdw);
836 ic->sh_lj_ewald = (std::exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)/gmx::power6(ic->rvdw);
840 /* We always re-initialize the tables whether they are used or not */
841 init_interaction_const_tables(nullptr, ic, rtab);
843 nbnxn_gpu_pme_loadbal_update_param(nbv, ic, listParams);
845 // TODO: with the texture reference support removed, this barrier is
846 // in principle not needed. Remove now or do it in a follow-up?
847 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
848 * also sharing texture references. To keep the code simple, we don't
849 * treat texture references as shared resources, but this means that
850 * the coulomb_tab texture ref will get updated by multiple threads.
851 * Hence, to ensure that the non-bonded kernels don't start before all
852 * texture binding operations are finished, we need to wait for all ranks
853 * to arrive here before continuing.
855 * Note that we could omit this barrier if GPUs are not shared (or
856 * texture objects are used), but as this is initialization code, there
857 * is not point in complicating things.
859 #if GMX_THREAD_MPI
860 if (PAR(cr) && use_GPU(nbv))
862 gmx_barrier(cr);
864 #endif /* GMX_THREAD_MPI */
866 if (!pme_lb->bSepPMERanks)
868 /* FIXME:
869 * CPU PME keeps a list of allocated pmedata's, that's why pme_lb->setup[pme_lb->cur].pmedata is not always nullptr.
870 * GPU PME, however, currently needs the gmx_pme_reinit always called on load balancing
871 * (pme_gpu_reinit might be not sufficiently decoupled from gmx_pme_init).
872 * This can lead to a lot of reallocations for PME GPU.
873 * Would be nicer if the allocated grid list was hidden within a single pmedata structure.
875 if ((pme_lb->setup[pme_lb->cur].pmedata == nullptr) || pme_gpu_task_enabled(pme_lb->setup[pme_lb->cur].pmedata))
877 /* Generate a new PME data structure,
878 * copying part of the old pointers.
880 gmx_pme_reinit(&set->pmedata,
881 cr, pme_lb->setup[0].pmedata, ir,
882 set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
884 *pmedata = set->pmedata;
886 else
888 /* Tell our PME-only rank to switch grid */
889 gmx_pme_send_switchgrid(cr, set->grid, set->ewaldcoeff_q, set->ewaldcoeff_lj);
892 if (debug)
894 print_grid(nullptr, debug, "", "switched to", set, -1);
897 if (pme_lb->stage == pme_lb->nstage)
899 print_grid(fp_err, fp_log, "", "optimal", set, -1);
903 /*! \brief Prepare for another round of PME load balancing
905 * \param[in,out] pme_lb Pointer to PME load balancing struct
906 * \param[in] bDlbUnlocked TRUE is DLB was locked and is now unlocked
908 * If the conditions (e.g. DLB off/on, CPU/GPU throttling etc.) changed,
909 * the PP/PME balance might change and re-balancing can improve performance.
910 * This function adds 2 stages and adjusts the considered setup range.
912 static void continue_pme_loadbal(pme_load_balancing_t *pme_lb,
913 gmx_bool bDlbUnlocked)
915 /* Add 2 tuning stages, keep the detected end of the setup range */
916 pme_lb->nstage += 2;
917 if (bDlbUnlocked && pme_lb->bSepPMERanks)
919 /* With separate PME ranks, DLB should always lower the PP load and
920 * can only increase the PME load (more communication and imbalance),
921 * so we only need to scan longer cut-off's.
923 pme_lb->lower_limit = pme_lb->cur;
925 pme_lb->start = pme_lb->lower_limit;
928 void pme_loadbal_do(pme_load_balancing_t *pme_lb,
929 t_commrec *cr,
930 FILE *fp_err,
931 FILE *fp_log,
932 const gmx::MDLogger &mdlog,
933 const t_inputrec *ir,
934 t_forcerec *fr,
935 t_state *state,
936 gmx_wallcycle_t wcycle,
937 gmx_int64_t step,
938 gmx_int64_t step_rel,
939 gmx_bool *bPrinting)
941 int n_prev;
942 double cycles_prev;
944 assert(pme_lb != nullptr);
946 if (!pme_lb->bActive)
948 return;
951 n_prev = pme_lb->cycles_n;
952 cycles_prev = pme_lb->cycles_c;
953 wallcycle_get(wcycle, ewcSTEP, &pme_lb->cycles_n, &pme_lb->cycles_c);
955 /* Before the first step we haven't done any steps yet.
956 * Also handle cases where ir->init_step % ir->nstlist != 0.
958 if (pme_lb->cycles_n < ir->nstlist)
960 return;
962 /* Sanity check, we expect nstlist cycle counts */
963 if (pme_lb->cycles_n - n_prev != ir->nstlist)
965 /* We could return here, but it's safer to issue an error and quit */
966 gmx_incons("pme_loadbal_do called at an interval != nstlist");
969 /* PME grid + cut-off optimization with GPUs or PME ranks */
970 if (!pme_lb->bBalance && pme_lb->bSepPMERanks)
972 if (pme_lb->bTriggerOnDLB)
974 pme_lb->bBalance = dd_dlb_is_on(cr->dd);
976 /* We should ignore the first timing to avoid timing allocation
977 * overhead. And since the PME load balancing is called just
978 * before DD repartitioning, the ratio returned by dd_pme_f_ratio
979 * is not over the last nstlist steps, but the nstlist steps before
980 * that. So the first useful ratio is available at step_rel=3*nstlist.
982 else if (step_rel >= 3*ir->nstlist)
984 if (DDMASTER(cr->dd))
986 /* If PME rank load is too high, start tuning */
987 pme_lb->bBalance =
988 (dd_pme_f_ratio(cr->dd) >= loadBalanceTriggerFactor);
990 dd_bcast(cr->dd, sizeof(gmx_bool), &pme_lb->bBalance);
993 pme_lb->bActive = (pme_lb->bBalance ||
994 step_rel <= pme_lb->step_rel_stop);
997 /* The location in the code of this balancing termination is strange.
998 * You would expect to have it after the call to pme_load_balance()
999 * below, since there pme_lb->stage is updated.
1000 * But when terminating directly after deciding on and selecting the
1001 * optimal setup, DLB will turn on right away if it was locked before.
1002 * This might be due to PME reinitialization. So we check stage here
1003 * to allow for another nstlist steps with DLB locked to stabilize
1004 * the performance.
1006 if (pme_lb->bBalance && pme_lb->stage == pme_lb->nstage)
1008 pme_lb->bBalance = FALSE;
1010 if (DOMAINDECOMP(cr) && dd_dlb_is_locked(cr->dd))
1012 /* Unlock the DLB=auto, DLB is allowed to activate */
1013 dd_dlb_unlock(cr->dd);
1014 GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: DLB can now turn on, when beneficial");
1016 /* We don't deactivate the tuning yet, since we will balance again
1017 * after DLB gets turned on, if it does within PMETune_period.
1019 continue_pme_loadbal(pme_lb, TRUE);
1020 pme_lb->bTriggerOnDLB = TRUE;
1021 pme_lb->step_rel_stop = step_rel + PMETunePeriod*ir->nstlist;
1023 else
1025 /* We're completely done with PME tuning */
1026 pme_lb->bActive = FALSE;
1029 if (DOMAINDECOMP(cr))
1031 /* Set the cut-off limit to the final selected cut-off,
1032 * so we don't have artificial DLB limits.
1033 * This also ensures that we won't disable the currently
1034 * optimal setting during a second round of PME balancing.
1036 set_dd_dlb_max_cutoff(cr, fr->nbv->listParams->rlistOuter);
1040 if (pme_lb->bBalance)
1042 /* We might not have collected nstlist steps in cycles yet,
1043 * since init_step might not be a multiple of nstlist,
1044 * but the first data collected is skipped anyhow.
1046 pme_load_balance(pme_lb, cr,
1047 fp_err, fp_log, mdlog,
1048 ir, state, pme_lb->cycles_c - cycles_prev,
1049 fr->ic, fr->nbv, &fr->pmedata,
1050 step);
1052 /* Update deprecated rlist in forcerec to stay in sync with fr->nbv */
1053 fr->rlist = fr->nbv->listParams->rlistOuter;
1055 if (ir->eDispCorr != edispcNO)
1057 calc_enervirdiff(nullptr, ir->eDispCorr, fr);
1061 if (!pme_lb->bBalance &&
1062 (!pme_lb->bSepPMERanks || step_rel > pme_lb->step_rel_stop))
1064 /* We have just deactivated the balancing and we're not measuring PP/PME
1065 * imbalance during the first steps of the run: deactivate the tuning.
1067 pme_lb->bActive = FALSE;
1070 if (!(pme_lb->bActive) && DOMAINDECOMP(cr) && dd_dlb_is_locked(cr->dd))
1072 /* Make sure DLB is allowed when we deactivate PME tuning */
1073 dd_dlb_unlock(cr->dd);
1074 GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: DLB can now turn on, when beneficial");
1077 *bPrinting = pme_lb->bBalance;
1080 /*! \brief Return product of the number of PME grid points in each dimension */
1081 static int pme_grid_points(const pme_setup_t *setup)
1083 return setup->grid[XX]*setup->grid[YY]*setup->grid[ZZ];
1086 /*! \brief Print one load-balancing setting */
1087 static void print_pme_loadbal_setting(FILE *fplog,
1088 const char *name,
1089 const pme_setup_t *setup)
1091 fprintf(fplog,
1092 " %-7s %6.3f nm %6.3f nm %3d %3d %3d %5.3f nm %5.3f nm\n",
1093 name,
1094 setup->rcut_coulomb, setup->rlistInner,
1095 setup->grid[XX], setup->grid[YY], setup->grid[ZZ],
1096 setup->spacing, 1/setup->ewaldcoeff_q);
1099 /*! \brief Print all load-balancing settings */
1100 static void print_pme_loadbal_settings(pme_load_balancing_t *pme_lb,
1101 FILE *fplog,
1102 const gmx::MDLogger &mdlog,
1103 gmx_bool bNonBondedOnGPU)
1105 double pp_ratio, grid_ratio;
1106 real pp_ratio_temporary;
1108 pp_ratio_temporary = pme_lb->setup[pme_lb->cur].rlistInner / pme_lb->setup[0].rlistInner;
1109 pp_ratio = gmx::power3(pp_ratio_temporary);
1110 grid_ratio = pme_grid_points(&pme_lb->setup[pme_lb->cur])/
1111 (double)pme_grid_points(&pme_lb->setup[0]);
1113 fprintf(fplog, "\n");
1114 fprintf(fplog, " P P - P M E L O A D B A L A N C I N G\n");
1115 fprintf(fplog, "\n");
1116 /* Here we only warn when the optimal setting is the last one */
1117 if (pme_lb->elimited != epmelblimNO &&
1118 pme_lb->cur == pme_loadbal_end(pme_lb)-1)
1120 fprintf(fplog, " NOTE: The PP/PME load balancing was limited by the %s,\n",
1121 pmelblim_str[pme_lb->elimited]);
1122 fprintf(fplog, " you might not have reached a good load balance.\n");
1123 if (pme_lb->elimited == epmelblimDD)
1125 fprintf(fplog, " Try different mdrun -dd settings or lower the -dds value.\n");
1127 fprintf(fplog, "\n");
1129 fprintf(fplog, " PP/PME load balancing changed the cut-off and PME settings:\n");
1130 fprintf(fplog, " particle-particle PME\n");
1131 fprintf(fplog, " rcoulomb rlist grid spacing 1/beta\n");
1132 print_pme_loadbal_setting(fplog, "initial", &pme_lb->setup[0]);
1133 print_pme_loadbal_setting(fplog, "final", &pme_lb->setup[pme_lb->cur]);
1134 fprintf(fplog, " cost-ratio %4.2f %4.2f\n",
1135 pp_ratio, grid_ratio);
1136 fprintf(fplog, " (note that these numbers concern only part of the total PP and PME load)\n");
1138 if (pp_ratio > 1.5 && !bNonBondedOnGPU)
1140 GMX_LOG(mdlog.warning).asParagraph().appendText(
1141 "NOTE: PME load balancing increased the non-bonded workload by more than 50%.\n"
1142 " For better performance, use (more) PME ranks (mdrun -npme),\n"
1143 " or if you are beyond the scaling limit, use fewer total ranks (or nodes).");
1145 else
1147 fprintf(fplog, "\n");
1151 void pme_loadbal_done(pme_load_balancing_t *pme_lb,
1152 FILE *fplog,
1153 const gmx::MDLogger &mdlog,
1154 gmx_bool bNonBondedOnGPU)
1156 if (fplog != nullptr && (pme_lb->cur > 0 || pme_lb->elimited != epmelblimNO))
1158 print_pme_loadbal_settings(pme_lb, fplog, mdlog, bNonBondedOnGPU);
1161 /* TODO: Here we should free all pointers in pme_lb,
1162 * but as it contains pme data structures,
1163 * we need to first make pme.c free all data.