Apply clang-tidy-8 readability-uppercase-literal-suffix
[gromacs.git] / src / gromacs / domdec / dlbtiming.cpp
blob8466bfc69c192289f96082d6a220d252bfe0112d
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #include "gmxpre.h"
38 #include "dlbtiming.h"
40 #include <cstring>
42 #include "gromacs/domdec/domdec.h"
43 #include "gromacs/gmxlib/nrnb.h"
44 #include "gromacs/utility/gmxassert.h"
46 #include "domdec_internal.h"
48 /*! \brief Struct for timing the region for dynamic load balancing */
49 struct BalanceRegion
51 /*! \brief Constructor */
52 BalanceRegion() :
53 isOpen(false),
54 isOpenOnCpu(false),
55 isOpenOnGpu(false),
56 cyclesOpenCpu(0),
57 cyclesLastCpu(0)
61 bool isOpen; /**< Are we in an open balancing region? */
62 bool isOpenOnCpu; /**< Is the, currently open, region still open on the CPU side? */
63 bool isOpenOnGpu; /**< Is the, currently open, region open on the GPU side? */
64 gmx_cycles_t cyclesOpenCpu; /**< Cycle count when opening the CPU region */
65 gmx_cycles_t cyclesLastCpu; /**< Cycle count at the last call to \p ddCloseBalanceRegionCpu() */
68 BalanceRegion *ddBalanceRegionAllocate()
70 return new BalanceRegion;
73 /*! \brief Returns the pointer to the balance region.
75 * This should be replaced by a properly managed BalanceRegion class,
76 * but that requires a lot of refactoring in domdec.cpp.
78 static BalanceRegion *getBalanceRegion(const gmx_domdec_t *dd)
80 GMX_ASSERT(dd != nullptr && dd->comm != nullptr, "Balance regions should only be used with DD");
81 BalanceRegion *region = dd->comm->balanceRegion;
82 GMX_ASSERT(region != nullptr, "Balance region should be initialized before use");
83 return region;
86 void DDBalanceRegionHandler::openRegionCpuImpl(DdAllowBalanceRegionReopen gmx_unused allowReopen) const
88 BalanceRegion *reg = getBalanceRegion(dd_);
89 if (dd_->comm->bRecordLoad)
91 GMX_ASSERT(allowReopen == DdAllowBalanceRegionReopen::yes || !reg->isOpen, "Should not open an already opened region");
93 reg->cyclesOpenCpu = gmx_cycles_read();
94 reg->isOpen = true;
95 reg->isOpenOnCpu = true;
96 reg->isOpenOnGpu = false;
100 void DDBalanceRegionHandler::openRegionGpuImpl() const
102 BalanceRegion *reg = getBalanceRegion(dd_);
103 GMX_ASSERT(reg->isOpen, "Can only open a GPU region inside an open CPU region");
104 GMX_ASSERT(!reg->isOpenOnGpu, "Can not re-open a GPU balance region");
105 reg->isOpenOnGpu = true;
108 void ddReopenBalanceRegionCpu(const gmx_domdec_t *dd)
110 BalanceRegion *reg = getBalanceRegion(dd);
111 /* If the GPU is busy, don't reopen as we are overlapping with work */
112 if (reg->isOpen && !reg->isOpenOnGpu)
114 reg->cyclesOpenCpu = gmx_cycles_read();
118 void DDBalanceRegionHandler::closeRegionCpuImpl() const
120 BalanceRegion *reg = getBalanceRegion(dd_);
121 if (reg->isOpen && reg->isOpenOnCpu)
123 GMX_ASSERT(reg->isOpenOnCpu, "Can only close an open region");
124 gmx_cycles_t cycles = gmx_cycles_read();
125 reg->isOpenOnCpu = false;
127 if (reg->isOpenOnGpu)
129 /* Store the cycles for estimating the GPU/CPU overlap time */
130 reg->cyclesLastCpu = cycles;
132 else
134 /* We can close the region */
135 float cyclesCpu = cycles - reg->cyclesOpenCpu;
136 dd_cycles_add(dd_, cyclesCpu, ddCyclF);
137 reg->isOpen = false;
142 void DDBalanceRegionHandler::closeRegionGpuImpl(float waitGpuCyclesInCpuRegion,
143 DdBalanceRegionWaitedForGpu waitedForGpu) const
145 BalanceRegion *reg = getBalanceRegion(dd_);
146 if (reg->isOpen)
148 GMX_ASSERT(reg->isOpenOnGpu, "Can not close a non-open GPU balance region");
149 GMX_ASSERT(!reg->isOpenOnCpu, "The GPU region should be closed after closing the CPU region");
151 float waitGpuCyclesEstimate = gmx_cycles_read() - reg->cyclesLastCpu;
152 if (waitedForGpu == DdBalanceRegionWaitedForGpu::no)
154 /* The actual time could be anywhere between 0 and
155 * waitCyclesEstimate. Using half is the best we can do.
157 const float unknownWaitEstimateFactor = 0.5F;
158 waitGpuCyclesEstimate *= unknownWaitEstimateFactor;
161 float cyclesCpu = reg->cyclesLastCpu - reg->cyclesOpenCpu;
162 dd_cycles_add(dd_, cyclesCpu + waitGpuCyclesEstimate, ddCyclF);
164 /* Register the total GPU wait time, to redistribute with GPU sharing */
165 dd_cycles_add(dd_, waitGpuCyclesInCpuRegion + waitGpuCyclesEstimate, ddCyclWaitGPU);
167 /* Close the region */
168 reg->isOpenOnGpu = false;
169 reg->isOpen = false;
173 //! Accumulates flop counts for force calculations.
174 static double force_flop_count(const t_nrnb *nrnb)
176 int i;
177 double sum;
178 const char *name;
180 sum = 0;
181 for (i = 0; i < eNR_NBKERNEL_FREE_ENERGY; i++)
183 /* To get closer to the real timings, we half the count
184 * for the normal loops and again half it for water loops.
186 name = nrnb_str(i);
187 if (strstr(name, "W3") != nullptr || strstr(name, "W4") != nullptr)
189 sum += nrnb->n[i]*0.25*cost_nrnb(i);
191 else
193 sum += nrnb->n[i]*0.50*cost_nrnb(i);
196 for (i = eNR_NBKERNEL_FREE_ENERGY; i <= eNR_NB14; i++)
198 name = nrnb_str(i);
199 if (strstr(name, "W3") != nullptr || strstr(name, "W4") != nullptr)
201 sum += nrnb->n[i]*cost_nrnb(i);
204 for (i = eNR_BONDS; i <= eNR_WALLS; i++)
206 sum += nrnb->n[i]*cost_nrnb(i);
209 return sum;
212 void dd_force_flop_start(gmx_domdec_t *dd, t_nrnb *nrnb)
214 if (dd->comm->eFlop)
216 dd->comm->flop -= force_flop_count(nrnb);
220 void dd_force_flop_stop(gmx_domdec_t *dd, t_nrnb *nrnb)
222 if (dd->comm->eFlop)
224 dd->comm->flop += force_flop_count(nrnb);
225 dd->comm->flop_n++;
229 void clear_dd_cycle_counts(gmx_domdec_t *dd)
231 int i;
233 for (i = 0; i < ddCyclNr; i++)
235 dd->comm->cycl[i] = 0;
236 dd->comm->cycl_n[i] = 0;
237 dd->comm->cycl_max[i] = 0;
239 dd->comm->flop = 0;
240 dd->comm->flop_n = 0;