Tidy: modernize-use-nullptr
[gromacs.git] / src / programs / mdrun / resource-division.cpp
blobd5518a725ca44738460d745de6ec47b3bd8dcb66
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #include "gmxpre.h"
38 #include "resource-division.h"
40 #include "config.h"
42 #include <stdlib.h>
43 #include <string.h>
45 #include <algorithm>
47 #include "gromacs/hardware/cpuinfo.h"
48 #include "gromacs/hardware/detecthardware.h"
49 #include "gromacs/hardware/gpu_hw_info.h"
50 #include "gromacs/hardware/hardwaretopology.h"
51 #include "gromacs/hardware/hw_info.h"
52 #include "gromacs/mdlib/gmx_omp_nthreads.h"
53 #include "gromacs/mdtypes/commrec.h"
54 #include "gromacs/mdtypes/inputrec.h"
55 #include "gromacs/mdtypes/md_enums.h"
56 #include "gromacs/topology/topology.h"
57 #include "gromacs/utility/fatalerror.h"
58 #include "gromacs/utility/gmxassert.h"
59 #include "gromacs/utility/logger.h"
60 #include "gromacs/utility/stringutil.h"
63 /* DISCLAIMER: All the atom count and thread numbers below are heuristic.
64 * The real switching points will depend on the system simulation,
65 * the algorithms used and the hardware it's running on, as well as if there
66 * are other jobs running on the same machine. We try to take into account
67 * factors that have a large influence, such as recent Intel CPUs being
68 * much better at wide multi-threading. The remaining factors should
69 * (hopefully) have a small influence, such that the performance just before
70 * and after a switch point doesn't change too much.
73 static const bool bHasOmpSupport = GMX_OPENMP;
75 #if GMX_THREAD_MPI
76 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
77 * the number of threads will get lowered.
79 static const int min_atoms_per_mpi_thread = 90;
80 static const int min_atoms_per_gpu = 900;
81 #endif /* GMX_THREAD_MPI */
83 /* TODO choose nthreads_omp based on hardware topology
84 when we have a hardware topology detection library */
85 /* First we consider the case of no MPI (1 MPI rank).
86 * In general, when running up to 8 threads, OpenMP should be faster.
87 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
88 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
89 * even on two CPUs it's usually faster (but with many OpenMP threads
90 * it could be faster not to use HT, currently we always use HT).
91 * On Nehalem/Westmere we want to avoid running 16 threads over
92 * two CPUs with HT, so we need a limit<16; thus we use 12.
93 * A reasonable limit for Intel Sandy and Ivy bridge,
94 * not knowing the topology, is 16 threads.
95 * Below we check for Intel and AVX, which for now includes
96 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
97 * model numbers we ensure also future Intel CPUs are covered.
99 const int nthreads_omp_faster_default = 8;
100 const int nthreads_omp_faster_Nehalem = 12;
101 const int nthreads_omp_faster_Intel_AVX = 16;
102 /* For CPU only runs the fastest options are usually MPI or OpenMP only.
103 * With one GPU, using MPI only is almost never optimal, so we need to
104 * compare running pure OpenMP with combined MPI+OpenMP. This means higher
105 * OpenMP threads counts can still be ok. Multiplying the numbers above
106 * by a factor of 2 seems to be a good estimate.
108 const int nthreads_omp_faster_gpu_fac = 2;
110 /* This is the case with MPI (2 or more MPI PP ranks).
111 * By default we will terminate with a fatal error when more than 8
112 * OpenMP thread are (indirectly) requested, since using less threads
113 * nearly always results in better performance.
114 * With thread-mpi and multiple GPUs or one GPU and too many threads
115 * we first try 6 OpenMP threads and then less until the number of MPI ranks
116 * is divisible by the number of GPUs.
118 #if GMX_OPENMP && GMX_MPI
119 const int nthreads_omp_mpi_ok_max = 8;
120 const int nthreads_omp_mpi_ok_min_cpu = 1;
121 #endif
122 const int nthreads_omp_mpi_ok_min_gpu = 2;
123 const int nthreads_omp_mpi_target_max = 6;
126 /* Returns the maximum OpenMP thread count for which using a single MPI rank
127 * should be faster than using multiple ranks with the same total thread count.
129 static int nthreads_omp_faster(const gmx::CpuInfo &cpuInfo, gmx_bool bUseGPU)
131 int nth;
133 if (cpuInfo.vendor() == gmx::CpuInfo::Vendor::Intel &&
134 cpuInfo.feature(gmx::CpuInfo::Feature::X86_Avx))
136 nth = nthreads_omp_faster_Intel_AVX;
138 else if (gmx::cpuIsX86Nehalem(cpuInfo))
140 // Intel Nehalem
141 nth = nthreads_omp_faster_Nehalem;
143 else
145 nth = nthreads_omp_faster_default;
148 if (bUseGPU)
150 nth *= nthreads_omp_faster_gpu_fac;
153 nth = std::min(nth, GMX_OPENMP_MAX_THREADS);
155 return nth;
158 /* Returns that maximum OpenMP thread count that passes the efficiency check */
159 static int nthreads_omp_efficient_max(int gmx_unused nrank,
160 const gmx::CpuInfo &cpuInfo,
161 gmx_bool bUseGPU)
163 #if GMX_OPENMP && GMX_MPI
164 if (nrank > 1)
166 return nthreads_omp_mpi_ok_max;
168 else
169 #endif
171 return nthreads_omp_faster(cpuInfo, bUseGPU);
175 /* Return the number of thread-MPI ranks to use.
176 * This is chosen such that we can always obey our own efficiency checks.
178 static int get_tmpi_omp_thread_division(const gmx_hw_info_t *hwinfo,
179 const gmx_hw_opt_t *hw_opt,
180 int nthreads_tot,
181 int ngpu)
183 int nrank;
184 const gmx::CpuInfo &cpuInfo = *hwinfo->cpuInfo;
186 GMX_RELEASE_ASSERT(nthreads_tot > 0, "There must be at least one thread per rank");
188 /* There are no separate PME nodes here, as we ensured in
189 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
190 * and a conditional ensures we would not have ended up here.
191 * Note that separate PME nodes might be switched on later.
193 if (ngpu > 0)
195 nrank = ngpu;
197 /* When the user sets nthreads_omp, we can end up oversubscribing CPU cores
198 * if we simply start as many ranks as GPUs. To avoid this, we start as few
199 * tMPI ranks as necessary to avoid oversubscription and instead leave GPUs idle.
200 * If the user does not set the number of OpenMP threads, nthreads_omp==0 and
201 * this code has no effect.
203 GMX_RELEASE_ASSERT(hw_opt->nthreads_omp >= 0, "nthreads_omp is negative, but previous checks should have prevented this");
204 while (nrank*hw_opt->nthreads_omp > hwinfo->nthreads_hw_avail && nrank > 1)
206 nrank--;
209 if (nthreads_tot < nrank)
211 /* #thread < #gpu is very unlikely, but if so: waste gpu(s) */
212 nrank = nthreads_tot;
214 else if (gmx_gpu_sharing_supported() &&
215 (nthreads_tot > nthreads_omp_faster(cpuInfo, ngpu > 0) ||
216 (ngpu > 1 && nthreads_tot/ngpu > nthreads_omp_mpi_target_max)))
218 /* The high OpenMP thread count will likely result in sub-optimal
219 * performance. Increase the rank count to reduce the thread count
220 * per rank. This will lead to GPU sharing by MPI ranks/threads.
222 int nshare;
224 /* Increase the rank count as long as have we more than 6 OpenMP
225 * threads per rank or the number of hardware threads is not
226 * divisible by the rank count. Don't go below 2 OpenMP threads.
228 nshare = 1;
231 nshare++;
232 nrank = ngpu*nshare;
234 while (nthreads_tot/nrank > nthreads_omp_mpi_target_max ||
235 (nthreads_tot/(ngpu*(nshare + 1)) >= nthreads_omp_mpi_ok_min_gpu && nthreads_tot % nrank != 0));
238 else if (hw_opt->nthreads_omp > 0)
240 /* Here we could oversubscribe, when we do, we issue a warning later */
241 nrank = std::max(1, nthreads_tot/hw_opt->nthreads_omp);
243 else
245 if (nthreads_tot <= nthreads_omp_faster(cpuInfo, ngpu > 0))
247 /* Use pure OpenMP parallelization */
248 nrank = 1;
250 else
252 /* Don't use OpenMP parallelization */
253 nrank = nthreads_tot;
257 return nrank;
261 static int getMaxGpuUsable(const gmx::MDLogger &mdlog, const gmx_hw_info_t *hwinfo,
262 int cutoff_scheme, gmx_bool bUseGpu)
264 /* This code relies on the fact that GPU are not detected when GPU
265 * acceleration was disabled at run time by the user.
267 if (cutoff_scheme == ecutsVERLET &&
268 bUseGpu &&
269 hwinfo->gpu_info.n_dev_compatible > 0)
271 if (gmx_multiple_gpu_per_node_supported())
273 return hwinfo->gpu_info.n_dev_compatible;
275 else
277 if (hwinfo->gpu_info.n_dev_compatible > 1)
279 GMX_LOG(mdlog.warning).asParagraph().appendText("More than one compatible GPU is available, but GROMACS can only use one of them. Using a single thread-MPI rank.");
281 return 1;
284 else
286 return 0;
291 #if GMX_THREAD_MPI
293 static bool
294 gmxSmtIsEnabled(const gmx::HardwareTopology &hwTop)
296 return (hwTop.supportLevel() >= gmx::HardwareTopology::SupportLevel::Basic && hwTop.machine().sockets[0].cores[0].hwThreads.size() > 1);
299 namespace
302 class SingleRankChecker
304 public:
305 //! Constructor
306 SingleRankChecker() : value_(false), reasons_() {}
307 /*! \brief Call this function for each possible condition
308 under which a single rank is required, along with a string
309 describing the constraint when it is applied. */
310 void applyConstraint(bool condition, const char *description)
312 if (condition)
314 value_ = true;
315 reasons_.push_back(gmx::formatString("%s only supports a single rank.", description));
318 //! After applying any conditions, is a single rank required?
319 bool mustUseOneRank() const
321 return value_;
323 /*! \brief Return a formatted string to use when writing a
324 message when a single rank is required, (or empty if no
325 constraint exists.) */
326 std::string getMessage() const
328 return formatAndJoin(reasons_, "\n", gmx::IdentityFormatter());
330 private:
331 bool value_;
332 std::vector<std::string> reasons_;
335 } // namespace
337 /* Get the number of MPI ranks to use for thread-MPI based on how many
338 * were requested, which algorithms we're using,
339 * and how many particles there are.
340 * At the point we have already called check_and_update_hw_opt.
341 * Thus all options should be internally consistent and consistent
342 * with the hardware, except that ntmpi could be larger than #GPU.
344 int get_nthreads_mpi(const gmx_hw_info_t *hwinfo,
345 gmx_hw_opt_t *hw_opt,
346 const t_inputrec *inputrec,
347 const gmx_mtop_t *mtop,
348 const gmx::MDLogger &mdlog,
349 gmx_bool bUseGpu,
350 bool doMembed)
352 int nthreads_hw, nthreads_tot_max, nrank, ngpu;
353 int min_atoms_per_mpi_rank;
355 const gmx::CpuInfo &cpuInfo = *hwinfo->cpuInfo;
356 const gmx::HardwareTopology &hwTop = *hwinfo->hardwareTopology;
359 /* Check if an algorithm does not support parallel simulation. */
360 // TODO This might work better if e.g. implemented algorithms
361 // had to define a function that returns such requirements,
362 // and a description string.
363 SingleRankChecker checker;
364 checker.applyConstraint(inputrec->eI == eiLBFGS, "L-BFGS minimization");
365 checker.applyConstraint(inputrec->coulombtype == eelEWALD, "Plain Ewald electrostatics");
366 checker.applyConstraint(doMembed, "Membrane embedding");
367 if (checker.mustUseOneRank())
369 std::string message = checker.getMessage();
370 if (hw_opt->nthreads_tmpi > 1)
372 gmx_fatal(FARGS, "%s However, you asked for more than 1 thread-MPI rank, so mdrun cannot continue. Choose a single rank, or a different algorithm.", message.c_str());
374 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("%s Choosing to use only a single thread-MPI rank.", message.c_str());
375 return 1;
379 if (hw_opt->nthreads_tmpi > 0)
381 /* Trivial, return right away */
382 return hw_opt->nthreads_tmpi;
385 nthreads_hw = hwinfo->nthreads_hw_avail;
387 if (nthreads_hw <= 0)
389 /* This should normally not happen, but if it does, we handle it */
390 gmx_fatal(FARGS, "The number of available hardware threads can not be detected, please specify the number of MPI ranks and the number of OpenMP threads (if supported) manually with options -ntmpi and -ntomp, respectively");
393 /* How many total (#tMPI*#OpenMP) threads can we start? */
394 if (hw_opt->nthreads_tot > 0)
396 nthreads_tot_max = hw_opt->nthreads_tot;
398 else
400 nthreads_tot_max = nthreads_hw;
403 ngpu = getMaxGpuUsable(mdlog, hwinfo, inputrec->cutoff_scheme, bUseGpu);
405 if (inputrec->cutoff_scheme == ecutsGROUP)
407 /* We checked this before, but it doesn't hurt to do it once more */
408 GMX_RELEASE_ASSERT(hw_opt->nthreads_omp == 1, "The group scheme only supports one OpenMP thread per rank");
411 nrank =
412 get_tmpi_omp_thread_division(hwinfo, hw_opt, nthreads_tot_max, ngpu);
414 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
416 /* Dims/steps are divided over the nodes iso splitting the atoms.
417 * With NM we can't have more ranks than #atoms*#dim. With TPI it's
418 * unlikely we have fewer atoms than ranks, and if so, communication
419 * would become a bottleneck, so we set the limit to 1 atom/rank.
421 min_atoms_per_mpi_rank = 1;
423 else
425 if (ngpu >= 1)
427 min_atoms_per_mpi_rank = min_atoms_per_gpu;
429 else
431 min_atoms_per_mpi_rank = min_atoms_per_mpi_thread;
435 if (mtop->natoms/nrank < min_atoms_per_mpi_rank)
437 int nrank_new;
439 /* the rank number was chosen automatically, but there are too few
440 atoms per rank, so we need to reduce the rank count */
441 nrank_new = std::max(1, mtop->natoms/min_atoms_per_mpi_rank);
443 /* Avoid partial use of Hyper-Threading */
444 if (gmxSmtIsEnabled(hwTop) &&
445 nrank_new > nthreads_hw/2 && nrank_new < nthreads_hw)
447 nrank_new = nthreads_hw/2;
450 /* If the user specified the total thread count, ensure this is
451 * divisible by the number of ranks.
452 * It is quite likely that we have too many total threads compared
453 * to the size of the system, but if the user asked for this many
454 * threads we should respect that.
456 while (hw_opt->nthreads_tot > 0 &&
457 hw_opt->nthreads_tot % nrank_new != 0)
459 nrank_new--;
462 /* Avoid large prime numbers in the rank count */
463 if (nrank_new >= 6)
465 /* Use only 6,8,10 with additional factors of 2 */
466 int fac;
468 fac = 2;
469 while (3*fac*2 <= nrank_new)
471 fac *= 2;
474 nrank_new = (nrank_new/fac)*fac;
476 else
478 /* Avoid 5, since small system won't fit 5 domains along
479 * a dimension. This might lead to waisting some cores, but this
480 * will have a small impact in this regime of very small systems.
482 if (nrank_new == 5)
484 nrank_new = 4;
488 nrank = nrank_new;
490 /* We reduced the number of tMPI ranks, which means we might violate
491 * our own efficiency checks if we simply use all hardware threads.
493 if (bHasOmpSupport && hw_opt->nthreads_omp <= 0 && hw_opt->nthreads_tot <= 0)
495 /* The user set neither the total nor the OpenMP thread count,
496 * we should use all hardware threads, unless we will violate
497 * our own efficiency limitation on the thread count.
499 int nt_omp_max;
501 nt_omp_max = nthreads_omp_efficient_max(nrank, cpuInfo, ngpu >= 1);
503 if (nrank*nt_omp_max < hwinfo->nthreads_hw_avail)
505 /* Limit the number of OpenMP threads to start */
506 hw_opt->nthreads_omp = nt_omp_max;
510 fprintf(stderr, "\n");
511 fprintf(stderr, "NOTE: Parallelization is limited by the small number of atoms,\n");
512 fprintf(stderr, " only starting %d thread-MPI ranks.\n", nrank);
513 fprintf(stderr, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
516 return nrank;
518 #endif /* GMX_THREAD_MPI */
521 void check_resource_division_efficiency(const gmx_hw_info_t *hwinfo,
522 const gmx_hw_opt_t *hw_opt,
523 gmx_bool bNtOmpOptionSet,
524 t_commrec *cr,
525 const gmx::MDLogger &mdlog)
527 #if GMX_OPENMP && GMX_MPI
528 int nth_omp_min, nth_omp_max, ngpu;
529 char buf[1000];
530 #if GMX_THREAD_MPI
531 const char *mpi_option = " (option -ntmpi)";
532 #else
533 const char *mpi_option = "";
534 #endif
536 /* This function should be called after thread-MPI (when configured) and
537 * OpenMP have been initialized. Check that here.
539 #if GMX_THREAD_MPI
540 GMX_RELEASE_ASSERT(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max, "Inconsistent OpenMP thread count default values");
541 GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
542 #endif
543 GMX_RELEASE_ASSERT(gmx_omp_nthreads_get(emntDefault) >= 1, "Must have at least one OpenMP thread");
545 nth_omp_min = gmx_omp_nthreads_get(emntDefault);
546 nth_omp_max = gmx_omp_nthreads_get(emntDefault);
547 ngpu = hw_opt->gpu_opt.n_dev_use;
549 /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
550 if (cr->nnodes + cr->npmenodes > 1)
552 int count[3], count_max[3];
554 count[0] = -nth_omp_min;
555 count[1] = nth_omp_max;
556 count[2] = ngpu;
558 MPI_Allreduce(count, count_max, 3, MPI_INT, MPI_MAX, cr->mpi_comm_mysim);
560 /* In case of an inhomogeneous run setup we use the maximum counts */
561 nth_omp_min = -count_max[0];
562 nth_omp_max = count_max[1];
563 ngpu = count_max[2];
566 int nthreads_omp_mpi_ok_min;
568 if (ngpu == 0)
570 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_cpu;
572 else
574 /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
575 * cases where the user specifies #ranks == #cores.
577 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_gpu;
580 if (DOMAINDECOMP(cr) && cr->nnodes > 1)
582 if (nth_omp_max < nthreads_omp_mpi_ok_min ||
583 (!(ngpu > 0 && !gmx_gpu_sharing_supported()) &&
584 nth_omp_max > nthreads_omp_mpi_ok_max))
586 /* Note that we print target_max here, not ok_max */
587 sprintf(buf, "Your choice of number of MPI ranks and amount of resources results in using %d OpenMP threads per rank, which is most likely inefficient. The optimum is usually between %d and %d threads per rank.",
588 nth_omp_max,
589 nthreads_omp_mpi_ok_min,
590 nthreads_omp_mpi_target_max);
592 if (bNtOmpOptionSet)
594 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("NOTE: %s", buf);
596 else
598 /* This fatal error, and the one below, is nasty, but it's
599 * probably the only way to ensure that all users don't waste
600 * a lot of resources, since many users don't read logs/stderr.
602 gmx_fatal(FARGS, "%s If you want to run with this setup, specify the -ntomp option. But we suggest to change the number of MPI ranks%s.", buf, mpi_option);
606 else
608 const gmx::CpuInfo &cpuInfo = *hwinfo->cpuInfo;
610 /* No domain decomposition (or only one domain) */
611 if (!(ngpu > 0 && !gmx_gpu_sharing_supported()) &&
612 nth_omp_max > nthreads_omp_faster(cpuInfo, ngpu > 0))
614 /* To arrive here, the user/system set #ranks and/or #OMPthreads */
615 gmx_bool bEnvSet;
616 char buf2[256];
618 bEnvSet = (getenv("OMP_NUM_THREADS") != nullptr);
620 if (bNtOmpOptionSet || bEnvSet)
622 sprintf(buf2, "You requested %d OpenMP threads", nth_omp_max);
624 else
626 sprintf(buf2, "Your choice of %d MPI rank%s and the use of %d total threads %sleads to the use of %d OpenMP threads",
627 cr->nnodes + cr->npmenodes,
628 cr->nnodes + cr->npmenodes == 1 ? "" : "s",
629 hw_opt->nthreads_tot > 0 ? hw_opt->nthreads_tot : hwinfo->nthreads_hw_avail,
630 hwinfo->nphysicalnode > 1 ? "on a node " : "",
631 nth_omp_max);
633 sprintf(buf, "%s, whereas we expect the optimum to be with more MPI ranks with %d to %d OpenMP threads.",
634 buf2, nthreads_omp_mpi_ok_min, nthreads_omp_mpi_target_max);
636 /* We can not quit with a fatal error when OMP_NUM_THREADS is set
637 * with different values per rank or node, since in that case
638 * the user can not set -ntomp to override the error.
640 if (bNtOmpOptionSet || (bEnvSet && nth_omp_min != nth_omp_max))
642 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted("NOTE: %s", buf);
644 else
646 gmx_fatal(FARGS, "%s If you want to run with this many OpenMP threads, specify the -ntomp option. But we suggest to increase the number of MPI ranks%s.", buf, mpi_option);
650 #else /* GMX_OPENMP && GMX_MPI */
651 /* No OpenMP and/or MPI: it doesn't make much sense to check */
652 GMX_UNUSED_VALUE(hw_opt);
653 GMX_UNUSED_VALUE(bNtOmpOptionSet);
654 GMX_UNUSED_VALUE(cr);
655 /* Check if we have more than 1 physical core, if detected,
656 * or more than 1 hardware thread if physical cores were not detected.
658 if (!GMX_OPENMP && !GMX_MPI && hwinfo->hardwareTopology->numberOfCores() > 1)
660 GMX_LOG(mdlog.warning).asParagraph().appendText("NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can only use a single CPU core");
662 #endif /* GMX_OPENMP && GMX_MPI */
666 static void print_hw_opt(FILE *fp, const gmx_hw_opt_t *hw_opt)
668 fprintf(fp, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
669 hw_opt->nthreads_tot,
670 hw_opt->nthreads_tmpi,
671 hw_opt->nthreads_omp,
672 hw_opt->nthreads_omp_pme,
673 hw_opt->gpu_opt.gpu_id != nullptr ? hw_opt->gpu_opt.gpu_id : "");
676 /* Checks we can do when we don't (yet) know the cut-off scheme */
677 void check_and_update_hw_opt_1(gmx_hw_opt_t *hw_opt,
678 const t_commrec *cr,
679 int nPmeRanks)
681 /* Currently hw_opt only contains default settings or settings supplied
682 * by the user on the command line.
684 if (hw_opt->nthreads_omp < 0)
686 gmx_fatal(FARGS, "The number of OpenMP threads supplied on the command line is %d, which is negative and not allowed", hw_opt->nthreads_omp);
689 /* Check for OpenMP settings stored in environment variables, which can
690 * potentially be different on different MPI ranks.
692 gmx_omp_nthreads_read_env(&hw_opt->nthreads_omp, SIMMASTER(cr));
694 /* Check restrictions on the user supplied options before modifying them.
695 * TODO: Put the user values in a const struct and preserve them.
697 #if !GMX_THREAD_MPI
698 if (hw_opt->nthreads_tot > 0)
700 gmx_fatal(FARGS, "Setting the total number of threads is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
702 if (hw_opt->nthreads_tmpi > 0)
704 gmx_fatal(FARGS, "Setting the number of thread-MPI ranks is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
706 #endif
708 if (bHasOmpSupport)
710 /* Check restrictions on PME thread related options set by the user */
712 if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
714 gmx_fatal(FARGS, "You need to specify -ntomp in addition to -ntomp_pme");
717 if (hw_opt->nthreads_omp_pme >= 1 &&
718 hw_opt->nthreads_omp_pme != hw_opt->nthreads_omp &&
719 nPmeRanks <= 0)
721 /* This can result in a fatal error on many MPI ranks,
722 * but since the thread count can differ per rank,
723 * we can't easily avoid this.
725 gmx_fatal(FARGS, "You need to explicitly specify the number of PME ranks (-npme) when using different number of OpenMP threads for PP and PME ranks");
728 else
730 /* GROMACS was configured without OpenMP support */
732 if (hw_opt->nthreads_omp > 1 || hw_opt->nthreads_omp_pme > 1)
734 gmx_fatal(FARGS, "More than 1 OpenMP thread requested, but GROMACS was compiled without OpenMP support");
736 hw_opt->nthreads_omp = 1;
737 hw_opt->nthreads_omp_pme = 1;
740 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
742 /* We have the same number of OpenMP threads for PP and PME ranks,
743 * thus we can perform several consistency checks.
745 if (hw_opt->nthreads_tmpi > 0 &&
746 hw_opt->nthreads_omp > 0 &&
747 hw_opt->nthreads_tot != hw_opt->nthreads_tmpi*hw_opt->nthreads_omp)
749 gmx_fatal(FARGS, "The total number of threads requested (%d) does not match the thread-MPI ranks (%d) times the OpenMP threads (%d) requested",
750 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp);
753 if (hw_opt->nthreads_tmpi > 0 &&
754 hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
756 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of thread-MPI ranks requested (%d)",
757 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi);
760 if (hw_opt->nthreads_omp > 0 &&
761 hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
763 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
764 hw_opt->nthreads_tot, hw_opt->nthreads_omp);
768 if (hw_opt->nthreads_tot == 1)
770 hw_opt->nthreads_tmpi = 1;
772 if (hw_opt->nthreads_omp > 1)
774 gmx_fatal(FARGS, "You requested %d OpenMP threads with %d total threads",
775 hw_opt->nthreads_tmpi, hw_opt->nthreads_tot);
777 hw_opt->nthreads_omp = 1;
778 hw_opt->nthreads_omp_pme = 1;
781 /* Parse GPU IDs, if provided.
782 * We check consistency with the tMPI thread count later.
784 gmx_parse_gpu_ids(&hw_opt->gpu_opt);
786 #if GMX_THREAD_MPI
787 if (hw_opt->gpu_opt.n_dev_use > 0 && hw_opt->nthreads_tmpi == 0)
789 /* Set the number of MPI threads equal to the number of GPUs */
790 hw_opt->nthreads_tmpi = hw_opt->gpu_opt.n_dev_use;
792 if (hw_opt->nthreads_tot > 0 &&
793 hw_opt->nthreads_tmpi > hw_opt->nthreads_tot)
795 /* We have more GPUs than total threads requested.
796 * We choose to (later) generate a mismatch error,
797 * instead of launching more threads than requested.
799 hw_opt->nthreads_tmpi = hw_opt->nthreads_tot;
802 #endif
804 if (debug)
806 print_hw_opt(debug, hw_opt);
809 /* Asserting this simplifies the hardware resource division later
810 * on. */
811 GMX_RELEASE_ASSERT(!(hw_opt->nthreads_omp_pme >= 1 && hw_opt->nthreads_omp <= 0),
812 "PME thread count should only be set when the normal thread count is also set");
815 /* Checks we can do when we know the cut-off scheme */
816 void check_and_update_hw_opt_2(gmx_hw_opt_t *hw_opt,
817 int cutoff_scheme)
819 if (cutoff_scheme == ecutsGROUP)
821 /* We only have OpenMP support for PME only nodes */
822 if (hw_opt->nthreads_omp > 1)
824 gmx_fatal(FARGS, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
825 ecutscheme_names[cutoff_scheme],
826 ecutscheme_names[ecutsVERLET]);
828 hw_opt->nthreads_omp = 1;
832 /* Checks we can do when we know the thread-MPI rank count */
833 void check_and_update_hw_opt_3(gmx_hw_opt_t gmx_unused *hw_opt)
835 #if GMX_THREAD_MPI
836 GMX_RELEASE_ASSERT(hw_opt->nthreads_tmpi >= 1, "Must have at least one thread-MPI rank");
838 /* If the user set the total number of threads on the command line
839 * and did not specify the number of OpenMP threads, set the latter here.
841 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp <= 0)
843 hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
845 if (!bHasOmpSupport && hw_opt->nthreads_omp > 1)
847 gmx_fatal(FARGS, "You (indirectly) asked for OpenMP threads by setting -nt > -ntmpi, but GROMACS was compiled without OpenMP support");
850 #endif
852 GMX_RELEASE_ASSERT(bHasOmpSupport || hw_opt->nthreads_omp == 1, "Without OpenMP support, only one thread per rank can be used");
854 /* We are done with updating nthreads_omp, we can set nthreads_omp_pme */
855 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
857 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
860 if (debug)
862 print_hw_opt(debug, hw_opt);