Obey OpenMP thread count limit with tMPI
[gromacs.git] / src / programs / mdrun / resource-division.cpp
blob9042aee8a9c860ac8a4399d3abf55c3357de6c8e
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #include "gmxpre.h"
38 #include "resource-division.h"
40 #include "config.h"
42 #include <assert.h>
43 #include <stdlib.h>
44 #include <string.h>
46 #include <algorithm>
48 #include "gromacs/legacyheaders/gmx_detect_hardware.h"
49 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
50 #include "gromacs/legacyheaders/md_logging.h"
51 #include "gromacs/legacyheaders/names.h"
52 #include "gromacs/legacyheaders/types/commrec.h"
53 #include "gromacs/utility/fatalerror.h"
56 /* DISCLAIMER: All the atom count and thread numbers below are heuristic.
57 * The real switching points will depend on the system simulation,
58 * the algorithms used and the hardware it's running on, as well as if there
59 * are other jobs running on the same machine. We try to take into account
60 * factors that have a large influence, such as recent Intel CPUs being
61 * much better at wide multi-threading. The remaining factors should
62 * (hopefully) have a small influence, such that the performance just before
63 * and after a switch point doesn't change too much.
66 #ifdef GMX_OPENMP
67 static const bool bOMP = true;
68 #else
69 static const bool bOMP = false;
70 #endif
72 #ifdef GMX_THREAD_MPI
73 /* The minimum number of atoms per tMPI thread. With fewer atoms than this,
74 * the number of threads will get lowered.
76 static const int min_atoms_per_mpi_thread = 90;
77 static const int min_atoms_per_gpu = 900;
78 #endif /* GMX_THREAD_MPI */
80 /* TODO choose nthreads_omp based on hardware topology
81 when we have a hardware topology detection library */
82 /* First we consider the case of no MPI (1 MPI rank).
83 * In general, when running up to 8 threads, OpenMP should be faster.
84 * Note: on AMD Bulldozer we should avoid running OpenMP over two dies.
85 * On Intel>=Nehalem running OpenMP on a single CPU is always faster,
86 * even on two CPUs it's usually faster (but with many OpenMP threads
87 * it could be faster not to use HT, currently we always use HT).
88 * On Nehalem/Westmere we want to avoid running 16 threads over
89 * two CPUs with HT, so we need a limit<16; thus we use 12.
90 * A reasonable limit for Intel Sandy and Ivy bridge,
91 * not knowing the topology, is 16 threads.
92 * Below we check for Intel and AVX, which for now includes
93 * Sandy/Ivy Bridge, Has/Broadwell. By checking for AVX instead of
94 * model numbers we ensure also future Intel CPUs are covered.
96 const int nthreads_omp_faster_default = 8;
97 const int nthreads_omp_faster_Nehalem = 12;
98 const int nthreads_omp_faster_Intel_AVX = 16;
99 /* For CPU only runs the fastest options are usually MPI or OpenMP only.
100 * With one GPU, using MPI only is almost never optimal, so we need to
101 * compare running pure OpenMP with combined MPI+OpenMP. This means higher
102 * OpenMP threads counts can still be ok. Multiplying the numbers above
103 * by a factor of 2 seems to be a good estimate.
105 const int nthreads_omp_faster_gpu_fac = 2;
107 /* This is the case with MPI (2 or more MPI PP ranks).
108 * By default we will terminate with a fatal error when more than 8
109 * OpenMP thread are (indirectly) requested, since using less threads
110 * nearly always results in better performance.
111 * With thread-mpi and multiple GPUs or one GPU and too many threads
112 * we first try 6 OpenMP threads and then less until the number of MPI ranks
113 * is divisible by the number of GPUs.
115 #if defined GMX_OPENMP && defined GMX_MPI
116 const int nthreads_omp_mpi_ok_max = 8;
117 const int nthreads_omp_mpi_ok_min_cpu = 1;
118 #endif
119 const int nthreads_omp_mpi_ok_min_gpu = 2;
120 const int nthreads_omp_mpi_target_max = 6;
123 /* Returns the maximum OpenMP thread count for which using a single MPI rank
124 * should be faster than using multiple ranks with the same total thread count.
126 static int nthreads_omp_faster(gmx_cpuid_t cpuid_info, gmx_bool bUseGPU)
128 int nth;
130 if (gmx_cpuid_vendor(cpuid_info) == GMX_CPUID_VENDOR_INTEL &&
131 gmx_cpuid_feature(cpuid_info, GMX_CPUID_FEATURE_X86_AVX))
133 nth = nthreads_omp_faster_Intel_AVX;
135 else if (gmx_cpuid_is_intel_nehalem(cpuid_info))
137 nth = nthreads_omp_faster_Nehalem;
139 else
141 nth = nthreads_omp_faster_default;
144 if (bUseGPU)
146 nth *= nthreads_omp_faster_gpu_fac;
149 nth = std::min(nth, GMX_OPENMP_MAX_THREADS);
151 return nth;
154 /* Returns that maximum OpenMP thread count that passes the efficiency check */
155 static int nthreads_omp_efficient_max(int gmx_unused nrank,
156 gmx_cpuid_t cpuid_info,
157 gmx_bool bUseGPU)
159 #if defined GMX_OPENMP && defined GMX_MPI
160 if (nrank > 1)
162 return nthreads_omp_mpi_ok_max;
164 else
165 #endif
167 return nthreads_omp_faster(cpuid_info, bUseGPU);
171 /* Return the number of thread-MPI ranks to use.
172 * This is chosen such that we can always obey our own efficiency checks.
174 static int get_tmpi_omp_thread_division(const gmx_hw_info_t *hwinfo,
175 const gmx_hw_opt_t *hw_opt,
176 int nthreads_tot,
177 int ngpu)
179 int nrank;
181 assert(nthreads_tot > 0);
183 /* There are no separate PME nodes here, as we ensured in
184 * check_and_update_hw_opt that nthreads_tmpi>0 with PME nodes
185 * and a conditional ensures we would not have ended up here.
186 * Note that separate PME nodes might be switched on later.
188 if (ngpu > 0)
190 nrank = ngpu;
191 if (nthreads_tot < nrank)
193 /* #thread < #gpu is very unlikely, but if so: waste gpu(s) */
194 nrank = nthreads_tot;
196 else if (gmx_gpu_sharing_supported() &&
197 (nthreads_tot > nthreads_omp_faster(hwinfo->cpuid_info,
198 ngpu > 0) ||
199 (ngpu > 1 && nthreads_tot/ngpu > nthreads_omp_mpi_target_max)))
201 /* The high OpenMP thread count will likely result in sub-optimal
202 * performance. Increase the rank count to reduce the thread count
203 * per rank. This will lead to GPU sharing by MPI ranks/threads.
205 int nshare;
207 /* Increase the rank count as long as have we more than 6 OpenMP
208 * threads per rank or the number of hardware threads is not
209 * divisible by the rank count. Don't go below 2 OpenMP threads.
211 nshare = 1;
214 nshare++;
215 nrank = ngpu*nshare;
217 while (nthreads_tot/nrank > nthreads_omp_mpi_target_max ||
218 (nthreads_tot/(ngpu*(nshare + 1)) >= nthreads_omp_mpi_ok_min_gpu && nthreads_tot % nrank != 0));
221 else if (hw_opt->nthreads_omp > 0)
223 /* Here we could oversubscribe, when we do, we issue a warning later */
224 nrank = std::max(1, nthreads_tot/hw_opt->nthreads_omp);
226 else
228 if (nthreads_tot <= nthreads_omp_faster(hwinfo->cpuid_info, ngpu > 0))
230 /* Use pure OpenMP parallelization */
231 nrank = 1;
233 else
235 /* Don't use OpenMP parallelization */
236 nrank = nthreads_tot;
240 return nrank;
244 static int getMaxGpuUsable(FILE *fplog, const t_commrec *cr, const gmx_hw_info_t *hwinfo, int cutoff_scheme)
246 /* This code relies on the fact that GPU are not detected when GPU
247 * acceleration was disabled at run time by the user.
249 if (cutoff_scheme == ecutsVERLET &&
250 hwinfo->gpu_info.n_dev_compatible > 0)
252 if (gmx_multiple_gpu_per_node_supported())
254 return hwinfo->gpu_info.n_dev_compatible;
256 else
258 if (hwinfo->gpu_info.n_dev_compatible > 1)
260 md_print_warn(cr, fplog, "More than one compatible GPU is available, but GROMACS can only use one of them. Using a single thread-MPI rank.\n");
262 return 1;
265 else
267 return 0;
272 #ifdef GMX_THREAD_MPI
273 /* Get the number of MPI ranks to use for thread-MPI based on how many
274 * were requested, which algorithms we're using,
275 * and how many particles there are.
276 * At the point we have already called check_and_update_hw_opt.
277 * Thus all options should be internally consistent and consistent
278 * with the hardware, except that ntmpi could be larger than #GPU.
280 int get_nthreads_mpi(const gmx_hw_info_t *hwinfo,
281 gmx_hw_opt_t *hw_opt,
282 const t_inputrec *inputrec,
283 const gmx_mtop_t *mtop,
284 const t_commrec *cr,
285 FILE *fplog)
287 int nthreads_hw, nthreads_tot_max, nrank, ngpu;
288 int min_atoms_per_mpi_rank;
290 /* Check if an algorithm does not support parallel simulation. */
291 if (inputrec->eI == eiLBFGS ||
292 inputrec->coulombtype == eelEWALD)
294 md_print_warn(cr, fplog, "The integration or electrostatics algorithm doesn't support parallel runs. Using a single thread-MPI rank.\n");
295 if (hw_opt->nthreads_tmpi > 1)
297 gmx_fatal(FARGS, "You asked for more than 1 thread-MPI rank, but an algorithm doesn't support that");
300 return 1;
303 if (hw_opt->nthreads_tmpi > 0)
305 /* Trivial, return right away */
306 return hw_opt->nthreads_tmpi;
309 nthreads_hw = hwinfo->nthreads_hw_avail;
311 if (nthreads_hw <= 0)
313 /* This should normally not happen, but if it does, we handle it */
314 gmx_fatal(FARGS, "The number of available hardware threads can not be detected, please specify the number of MPI ranks and the number of OpenMP threads (if supported) manually with options -ntmpi and -ntomp, respectively");
317 /* How many total (#tMPI*#OpenMP) threads can we start? */
318 if (hw_opt->nthreads_tot > 0)
320 nthreads_tot_max = hw_opt->nthreads_tot;
322 else
324 nthreads_tot_max = nthreads_hw;
327 ngpu = getMaxGpuUsable(fplog, cr, hwinfo, inputrec->cutoff_scheme);
329 if (inputrec->cutoff_scheme == ecutsGROUP)
331 /* We checked this before, but it doesn't hurt to do it once more */
332 assert(hw_opt->nthreads_omp == 1);
335 nrank =
336 get_tmpi_omp_thread_division(hwinfo, hw_opt, nthreads_tot_max, ngpu);
338 if (inputrec->eI == eiNM || EI_TPI(inputrec->eI))
340 /* Dims/steps are divided over the nodes iso splitting the atoms.
341 * With NM we can't have more ranks than #atoms*#dim. With TPI it's
342 * unlikely we have fewer atoms than ranks, and if so, communication
343 * would become a bottleneck, so we set the limit to 1 atom/rank.
345 min_atoms_per_mpi_rank = 1;
347 else
349 if (ngpu >= 1)
351 min_atoms_per_mpi_rank = min_atoms_per_gpu;
353 else
355 min_atoms_per_mpi_rank = min_atoms_per_mpi_thread;
359 if (mtop->natoms/nrank < min_atoms_per_mpi_rank)
361 int nrank_new;
363 /* the rank number was chosen automatically, but there are too few
364 atoms per rank, so we need to reduce the rank count */
365 nrank_new = std::max(1, mtop->natoms/min_atoms_per_mpi_rank);
367 /* Avoid partial use of Hyper-Threading */
368 if (gmx_cpuid_x86_smt(hwinfo->cpuid_info) == GMX_CPUID_X86_SMT_ENABLED &&
369 nrank_new > nthreads_hw/2 && nrank_new < nthreads_hw)
371 nrank_new = nthreads_hw/2;
374 /* If the user specified the total thread count, ensure this is
375 * divisible by the number of ranks.
376 * It is quite likely that we have too many total threads compared
377 * to the size of the system, but if the user asked for this many
378 * threads we should respect that.
380 while (hw_opt->nthreads_tot > 0 &&
381 hw_opt->nthreads_tot % nrank_new != 0)
383 nrank_new--;
386 /* Avoid large prime numbers in the rank count */
387 if (nrank_new >= 6)
389 /* Use only 6,8,10 with additional factors of 2 */
390 int fac;
392 fac = 2;
393 while (3*fac*2 <= nrank_new)
395 fac *= 2;
398 nrank_new = (nrank_new/fac)*fac;
400 else
402 /* Avoid 5, since small system won't fit 5 domains along
403 * a dimension. This might lead to waisting some cores, but this
404 * will have a small impact in this regime of very small systems.
406 if (nrank_new == 5)
408 nrank_new = 4;
412 nrank = nrank_new;
414 /* We reduced the number of tMPI ranks, which means we might violate
415 * our own efficiency checks if we simply use all hardware threads.
417 if (bOMP && hw_opt->nthreads_omp <= 0 && hw_opt->nthreads_tot <= 0)
419 /* The user set neither the total nor the OpenMP thread count,
420 * we should use all hardware threads, unless we will violate
421 * our own efficiency limitation on the thread count.
423 int nt_omp_max;
425 nt_omp_max = nthreads_omp_efficient_max(nrank, hwinfo->cpuid_info, ngpu >= 1);
427 if (nrank*nt_omp_max < hwinfo->nthreads_hw_avail)
429 /* Limit the number of OpenMP threads to start */
430 hw_opt->nthreads_omp = nt_omp_max;
434 fprintf(stderr, "\n");
435 fprintf(stderr, "NOTE: Parallelization is limited by the small number of atoms,\n");
436 fprintf(stderr, " only starting %d thread-MPI ranks.\n", nrank);
437 fprintf(stderr, " You can use the -nt and/or -ntmpi option to optimize the number of threads.\n\n");
440 return nrank;
442 #endif /* GMX_THREAD_MPI */
445 void check_resource_division_efficiency(const gmx_hw_info_t *hwinfo,
446 const gmx_hw_opt_t *hw_opt,
447 gmx_bool bNtOmpOptionSet,
448 t_commrec *cr,
449 FILE *fplog)
451 #if defined GMX_OPENMP && defined GMX_MPI
452 int nth_omp_min, nth_omp_max, ngpu;
453 char buf[1000];
454 #ifdef GMX_THREAD_MPI
455 const char *mpi_option = " (option -ntmpi)";
456 #else
457 const char *mpi_option = "";
458 #endif
460 /* This function should be called after thread-MPI (when configured) and
461 * OpenMP have been initialized. Check that here.
463 #ifdef GMX_THREAD_MPI
464 assert(nthreads_omp_faster_default >= nthreads_omp_mpi_ok_max);
465 assert(hw_opt->nthreads_tmpi >= 1);
466 #endif
467 assert(gmx_omp_nthreads_get(emntDefault) >= 1);
469 nth_omp_min = gmx_omp_nthreads_get(emntDefault);
470 nth_omp_max = gmx_omp_nthreads_get(emntDefault);
471 ngpu = hw_opt->gpu_opt.n_dev_use;
473 /* Thread-MPI seems to have a bug with reduce on 1 node, so use a cond. */
474 if (cr->nnodes + cr->npmenodes > 1)
476 int count[3], count_max[3];
478 count[0] = -nth_omp_min;
479 count[1] = nth_omp_max;
480 count[2] = ngpu;
482 MPI_Allreduce(count, count_max, 3, MPI_INT, MPI_MAX, cr->mpi_comm_mysim);
484 /* In case of an inhomogeneous run setup we use the maximum counts */
485 nth_omp_min = -count_max[0];
486 nth_omp_max = count_max[1];
487 ngpu = count_max[2];
490 int nthreads_omp_mpi_ok_min;
492 if (ngpu == 0)
494 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_cpu;
496 else
498 /* With GPUs we set the minimum number of OpenMP threads to 2 to catch
499 * cases where the user specifies #ranks == #cores.
501 nthreads_omp_mpi_ok_min = nthreads_omp_mpi_ok_min_gpu;
504 if (DOMAINDECOMP(cr) && cr->nnodes > 1)
506 if (nth_omp_max < nthreads_omp_mpi_ok_min ||
507 (!(ngpu > 0 && !gmx_gpu_sharing_supported()) &&
508 nth_omp_max > nthreads_omp_mpi_ok_max))
510 /* Note that we print target_max here, not ok_max */
511 sprintf(buf, "Your choice of number of MPI ranks and amount of resources results in using %d OpenMP threads per rank, which is most likely inefficient. The optimum is usually between %d and %d threads per rank.",
512 nth_omp_max,
513 nthreads_omp_mpi_ok_min,
514 nthreads_omp_mpi_target_max);
516 if (bNtOmpOptionSet)
518 md_print_warn(cr, fplog, "NOTE: %s\n", buf);
520 else
522 /* This fatal error, and the one below, is nasty, but it's
523 * probably the only way to ensure that all users don't waste
524 * a lot of resources, since many users don't read logs/stderr.
526 gmx_fatal(FARGS, "%s If you want to run with this setup, specify the -ntomp option. But we suggest to change the number of MPI ranks%s.", buf, mpi_option);
530 else
532 /* No domain decomposition (or only one domain) */
533 if (!(ngpu > 0 && !gmx_gpu_sharing_supported()) &&
534 nth_omp_max > nthreads_omp_faster(hwinfo->cpuid_info, ngpu > 0))
536 /* To arrive here, the user/system set #ranks and/or #OMPthreads */
537 gmx_bool bEnvSet;
538 char buf2[256];
540 bEnvSet = (getenv("OMP_NUM_THREADS") != NULL);
542 if (bNtOmpOptionSet || bEnvSet)
544 sprintf(buf2, "You requested %d OpenMP threads", nth_omp_max);
546 else
548 sprintf(buf2, "Your choice of %d MPI rank%s and the use of %d total threads %sleads to the use of %d OpenMP threads",
549 cr->nnodes + cr->npmenodes,
550 cr->nnodes + cr->npmenodes == 1 ? "" : "s",
551 hw_opt->nthreads_tot > 0 ? hw_opt->nthreads_tot : hwinfo->nthreads_hw_avail,
552 hwinfo->nphysicalnode > 1 ? "on a node " : "",
553 nth_omp_max);
555 sprintf(buf, "%s, whereas we expect the optimum to be with more MPI ranks with %d to %d OpenMP threads.",
556 buf2, nthreads_omp_mpi_ok_min, nthreads_omp_mpi_target_max);
558 /* We can not quit with a fatal error when OMP_NUM_THREADS is set
559 * with different values per rank or node, since in that case
560 * the user can not set -ntomp to override the error.
562 if (bNtOmpOptionSet || (bEnvSet && nth_omp_min != nth_omp_max))
564 md_print_warn(cr, fplog, "NOTE: %s\n", buf);
566 else
568 gmx_fatal(FARGS, "%s If you want to run with this many OpenMP threads, specify the -ntomp option. But we suggest to increase the number of MPI ranks%s.", buf, mpi_option);
572 #else /* GMX_OPENMP && GMX_MPI */
573 /* No OpenMP and/or MPI: it doesn't make much sense to check */
574 GMX_UNUSED_VALUE(hw_opt);
575 GMX_UNUSED_VALUE(bNtOmpOptionSet);
576 /* Check if we have more than 1 physical core, if detected,
577 * or more than 1 hardware thread if physical cores were not detected.
579 if ((hwinfo->ncore > 1) ||
580 (hwinfo->ncore == 0 && hwinfo->nthreads_hw_avail > 1))
582 md_print_warn(cr, fplog, "NOTE: GROMACS was compiled without OpenMP and (thread-)MPI support, can only use a single CPU core\n");
584 #endif /* GMX_OPENMP && GMX_MPI */
588 static void print_hw_opt(FILE *fp, const gmx_hw_opt_t *hw_opt)
590 fprintf(fp, "hw_opt: nt %d ntmpi %d ntomp %d ntomp_pme %d gpu_id '%s'\n",
591 hw_opt->nthreads_tot,
592 hw_opt->nthreads_tmpi,
593 hw_opt->nthreads_omp,
594 hw_opt->nthreads_omp_pme,
595 hw_opt->gpu_opt.gpu_id != NULL ? hw_opt->gpu_opt.gpu_id : "");
598 /* Checks we can do when we don't (yet) know the cut-off scheme */
599 void check_and_update_hw_opt_1(gmx_hw_opt_t *hw_opt,
600 gmx_bool bIsSimMaster)
602 gmx_omp_nthreads_read_env(&hw_opt->nthreads_omp, bIsSimMaster);
604 #ifndef GMX_THREAD_MPI
605 if (hw_opt->nthreads_tot > 0)
607 gmx_fatal(FARGS, "Setting the total number of threads is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
609 if (hw_opt->nthreads_tmpi > 0)
611 gmx_fatal(FARGS, "Setting the number of thread-MPI ranks is only supported with thread-MPI and GROMACS was compiled without thread-MPI");
613 #endif
615 if (!bOMP)
617 if (hw_opt->nthreads_omp > 1)
619 gmx_fatal(FARGS, "More than 1 OpenMP thread requested, but GROMACS was compiled without OpenMP support");
621 hw_opt->nthreads_omp = 1;
624 if (hw_opt->nthreads_tot > 0 && hw_opt->nthreads_omp_pme <= 0)
626 /* We have the same number of OpenMP threads for PP and PME processes,
627 * thus we can perform several consistency checks.
629 if (hw_opt->nthreads_tmpi > 0 &&
630 hw_opt->nthreads_omp > 0 &&
631 hw_opt->nthreads_tot != hw_opt->nthreads_tmpi*hw_opt->nthreads_omp)
633 gmx_fatal(FARGS, "The total number of threads requested (%d) does not match the thread-MPI ranks (%d) times the OpenMP threads (%d) requested",
634 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi, hw_opt->nthreads_omp);
637 if (hw_opt->nthreads_tmpi > 0 &&
638 hw_opt->nthreads_tot % hw_opt->nthreads_tmpi != 0)
640 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of thread-MPI ranks requested (%d)",
641 hw_opt->nthreads_tot, hw_opt->nthreads_tmpi);
644 if (hw_opt->nthreads_omp > 0 &&
645 hw_opt->nthreads_tot % hw_opt->nthreads_omp != 0)
647 gmx_fatal(FARGS, "The total number of threads requested (%d) is not divisible by the number of OpenMP threads requested (%d)",
648 hw_opt->nthreads_tot, hw_opt->nthreads_omp);
651 if (hw_opt->nthreads_tmpi > 0 &&
652 hw_opt->nthreads_omp <= 0)
654 hw_opt->nthreads_omp = hw_opt->nthreads_tot/hw_opt->nthreads_tmpi;
658 if (!bOMP && hw_opt->nthreads_omp > 1)
660 gmx_fatal(FARGS, "OpenMP threads are requested, but GROMACS was compiled without OpenMP support");
663 if (hw_opt->nthreads_omp_pme > 0 && hw_opt->nthreads_omp <= 0)
665 gmx_fatal(FARGS, "You need to specify -ntomp in addition to -ntomp_pme");
668 if (hw_opt->nthreads_tot == 1)
670 hw_opt->nthreads_tmpi = 1;
672 if (hw_opt->nthreads_omp > 1)
674 gmx_fatal(FARGS, "You requested %d OpenMP threads with %d total threads",
675 hw_opt->nthreads_tmpi, hw_opt->nthreads_tot);
677 hw_opt->nthreads_omp = 1;
680 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
682 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
685 /* Parse GPU IDs, if provided.
686 * We check consistency with the tMPI thread count later.
688 gmx_parse_gpu_ids(&hw_opt->gpu_opt);
690 #ifdef GMX_THREAD_MPI
691 if (hw_opt->gpu_opt.n_dev_use > 0 && hw_opt->nthreads_tmpi == 0)
693 /* Set the number of MPI threads equal to the number of GPUs */
694 hw_opt->nthreads_tmpi = hw_opt->gpu_opt.n_dev_use;
696 if (hw_opt->nthreads_tot > 0 &&
697 hw_opt->nthreads_tmpi > hw_opt->nthreads_tot)
699 /* We have more GPUs than total threads requested.
700 * We choose to (later) generate a mismatch error,
701 * instead of launching more threads than requested.
703 hw_opt->nthreads_tmpi = hw_opt->nthreads_tot;
706 #endif
708 if (debug)
710 print_hw_opt(debug, hw_opt);
714 /* Checks we can do when we know the cut-off scheme */
715 void check_and_update_hw_opt_2(gmx_hw_opt_t *hw_opt,
716 int cutoff_scheme)
718 if (cutoff_scheme == ecutsGROUP)
720 /* We only have OpenMP support for PME only nodes */
721 if (hw_opt->nthreads_omp > 1)
723 gmx_fatal(FARGS, "OpenMP threads have been requested with cut-off scheme %s, but these are only supported with cut-off scheme %s",
724 ecutscheme_names[cutoff_scheme],
725 ecutscheme_names[ecutsVERLET]);
727 hw_opt->nthreads_omp = 1;
730 if (hw_opt->nthreads_omp_pme <= 0 && hw_opt->nthreads_omp > 0)
732 hw_opt->nthreads_omp_pme = hw_opt->nthreads_omp;
735 if (debug)
737 print_hw_opt(debug, hw_opt);