Merge branch release-5-0
[gromacs.git] / src / gromacs / gmxlib / gmx_detect_hardware.cpp
blob490bd87327ca2353d63805fbf186662fad0d2b10
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #include "gmxpre.h"
37 #include "gromacs/legacyheaders/gmx_detect_hardware.h"
39 #include "config.h"
41 #include <assert.h>
42 #include <errno.h>
43 #include <stdlib.h>
44 #include <string.h>
46 #include <string>
47 #include <vector>
49 #ifdef HAVE_UNISTD_H
50 /* For sysconf */
51 #include <unistd.h>
52 #endif
53 #ifdef GMX_NATIVE_WINDOWS
54 #include <windows.h>
55 #endif
57 #include "thread_mpi/threads.h"
59 #include "gromacs/gmxlib/gpu_utils/gpu_utils.h"
60 #include "gromacs/legacyheaders/copyrite.h"
61 #include "gromacs/legacyheaders/gmx_cpuid.h"
62 #include "gromacs/legacyheaders/md_logging.h"
63 #include "gromacs/legacyheaders/network.h"
64 #include "gromacs/legacyheaders/types/commrec.h"
65 #include "gromacs/legacyheaders/types/enums.h"
66 #include "gromacs/legacyheaders/types/hw_info.h"
67 #include "gromacs/utility/arrayref.h"
68 #include "gromacs/utility/basenetwork.h"
69 #include "gromacs/utility/cstringutil.h"
70 #include "gromacs/utility/exceptions.h"
71 #include "gromacs/utility/fatalerror.h"
72 #include "gromacs/utility/gmxassert.h"
73 #include "gromacs/utility/gmxomp.h"
74 #include "gromacs/utility/smalloc.h"
75 #include "gromacs/utility/stringutil.h"
76 #include "gromacs/utility/sysinfo.h"
79 #ifdef GMX_GPU
80 const gmx_bool bGPUBinary = TRUE;
81 #else
82 const gmx_bool bGPUBinary = FALSE;
83 #endif
85 static const char * invalid_gpuid_hint =
86 "A delimiter-free sequence of valid numeric IDs of available GPUs is expected.";
88 /* The globally shared hwinfo structure. */
89 static gmx_hw_info_t *hwinfo_g;
90 /* A reference counter for the hwinfo structure */
91 static int n_hwinfo = 0;
92 /* A lock to protect the hwinfo structure */
93 static tMPI_Thread_mutex_t hw_info_lock = TMPI_THREAD_MUTEX_INITIALIZER;
96 /* FW decl. */
97 static void limit_num_gpus_used(gmx_gpu_opt_t *gpu_opt, int count);
98 static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t *gpu_info,
99 const gmx_gpu_opt_t *gpu_opt);
101 static void sprint_gpus(char *sbuf, const gmx_gpu_info_t *gpu_info)
103 int i, ndev;
104 char stmp[STRLEN];
106 ndev = gpu_info->ncuda_dev;
108 sbuf[0] = '\0';
109 for (i = 0; i < ndev; i++)
111 get_gpu_device_info_string(stmp, gpu_info, i);
112 strcat(sbuf, " ");
113 strcat(sbuf, stmp);
114 if (i < ndev - 1)
116 strcat(sbuf, "\n");
121 static void print_gpu_detection_stats(FILE *fplog,
122 const gmx_gpu_info_t *gpu_info,
123 const t_commrec *cr)
125 char onhost[266], stmp[STRLEN];
126 int ngpu;
128 if (!gpu_info->bDetectGPUs)
130 /* We skipped the detection, so don't print detection stats */
131 return;
134 ngpu = gpu_info->ncuda_dev;
136 #if defined GMX_MPI && !defined GMX_THREAD_MPI
137 /* We only print the detection on one, of possibly multiple, nodes */
138 strncpy(onhost, " on host ", 10);
139 gmx_gethostname(onhost+9, 256);
140 #else
141 /* We detect all relevant GPUs */
142 strncpy(onhost, "", 1);
143 #endif
145 if (ngpu > 0)
147 sprint_gpus(stmp, gpu_info);
148 md_print_warn(cr, fplog, "%d GPU%s detected%s:\n%s\n",
149 ngpu, (ngpu > 1) ? "s" : "", onhost, stmp);
151 else
153 md_print_warn(cr, fplog, "No GPUs detected%s\n", onhost);
157 /*! \brief Helper function for reporting GPU usage information
158 * in the mdrun log file
160 * \param[in] gpu_info Pointer to per-node GPU info struct
161 * \param[in] gpu_opt Pointer to per-node GPU options struct
162 * \param[in] numPpRanks Number of PP ranks per node
163 * \return String to write to the log file
164 * \throws std::bad_alloc if out of memory */
165 static std::string
166 makeGpuUsageReport(const gmx_gpu_info_t *gpu_info,
167 const gmx_gpu_opt_t *gpu_opt,
168 size_t numPpRanks)
170 int ngpu_use = gpu_opt->ncuda_dev_use;
171 int ngpu_comp = gpu_info->ncuda_dev_compatible;
173 /* Issue a note if GPUs are available but not used */
174 if (ngpu_comp > 0 && ngpu_use < 1)
176 return gmx::formatString("%d compatible GPU%s detected in the system, but none will be used.\n"
177 "Consider trying GPU acceleration with the Verlet scheme!\n",
178 ngpu_comp, (ngpu_comp > 1) ? "s" : "");
181 std::string output;
182 if (!gpu_opt->bUserSet)
184 // gpu_opt->cuda_dev_compatible is only populated during auto-selection
185 std::string gpuIdsString =
186 formatAndJoin(gmx::constArrayRefFromArray(gpu_opt->cuda_dev_compatible,
187 gpu_opt->ncuda_dev_compatible),
188 ",", gmx::StringFormatter("%d"));
189 bool bPluralGpus = gpu_opt->ncuda_dev_compatible > 1;
190 output += gmx::formatString("%d compatible GPU%s %s present, with ID%s %s\n",
191 gpu_opt->ncuda_dev_compatible,
192 bPluralGpus ? "s" : "",
193 bPluralGpus ? "are" : "is",
194 bPluralGpus ? "s" : "",
195 gpuIdsString.c_str());
199 std::vector<int> gpuIdsInUse;
200 for (int i = 0; i < ngpu_use; i++)
202 gpuIdsInUse.push_back(get_gpu_device_id(gpu_info, gpu_opt, i));
204 std::string gpuIdsString =
205 formatAndJoin(gpuIdsInUse, ",", gmx::StringFormatter("%d"));
206 int numGpusInUse = gmx_count_gpu_dev_unique(gpu_info, gpu_opt);
207 bool bPluralGpus = numGpusInUse > 1;
209 output += gmx::formatString("%d GPU%s %sselected for this run.\n"
210 "Mapping of GPU ID%s to the %d PP rank%s in this node: %s\n",
211 numGpusInUse, bPluralGpus ? "s" : "",
212 gpu_opt->bUserSet ? "user-" : "auto-",
213 bPluralGpus ? "s" : "",
214 numPpRanks,
215 (numPpRanks > 1) ? "s" : "",
216 gpuIdsString.c_str());
219 return output;
222 /* Give a suitable fatal error or warning if the build configuration
223 and runtime CPU do not match. */
224 static void
225 check_use_of_rdtscp_on_this_cpu(FILE *fplog,
226 const t_commrec *cr,
227 const gmx_hw_info_t *hwinfo)
229 gmx_bool bCpuHasRdtscp, bBinaryUsesRdtscp;
230 #ifdef HAVE_RDTSCP
231 bBinaryUsesRdtscp = TRUE;
232 #else
233 bBinaryUsesRdtscp = FALSE;
234 #endif
236 bCpuHasRdtscp = gmx_cpuid_feature(hwinfo->cpuid_info, GMX_CPUID_FEATURE_X86_RDTSCP);
238 if (!bCpuHasRdtscp && bBinaryUsesRdtscp)
240 gmx_fatal(FARGS, "The %s executable was compiled to use the rdtscp CPU instruction. "
241 "However, this is not supported by the current hardware and continuing would lead to a crash. "
242 "Please rebuild GROMACS with the GMX_USE_RDTSCP=OFF CMake option.",
243 ShortProgram());
246 if (bCpuHasRdtscp && !bBinaryUsesRdtscp)
248 md_print_warn(cr, fplog, "The current CPU can measure timings more accurately than the code in\n"
249 "%s was configured to use. This might affect your simulation\n"
250 "speed as accurate timings are needed for load-balancing.\n"
251 "Please consider rebuilding %s with the GMX_USE_RDTSCP=ON CMake option.\n",
252 ShortProgram(), ShortProgram());
256 void gmx_check_hw_runconf_consistency(FILE *fplog,
257 const gmx_hw_info_t *hwinfo,
258 const t_commrec *cr,
259 const gmx_hw_opt_t *hw_opt,
260 gmx_bool bUseGPU)
262 int npppn;
263 char th_or_proc[STRLEN], th_or_proc_plural[STRLEN], pernode[STRLEN];
264 gmx_bool btMPI, bMPI, bMaxMpiThreadsSet, bNthreadsAuto, bEmulateGPU;
266 assert(hwinfo);
267 assert(cr);
269 /* Below we only do consistency checks for PP and GPUs,
270 * this is irrelevant for PME only nodes, so in that case we return
271 * here.
273 if (!(cr->duty & DUTY_PP))
275 return;
278 #if defined(GMX_THREAD_MPI)
279 bMPI = FALSE;
280 btMPI = TRUE;
281 bNthreadsAuto = (hw_opt->nthreads_tmpi < 1);
282 #elif defined(GMX_LIB_MPI)
283 bMPI = TRUE;
284 btMPI = FALSE;
285 bNthreadsAuto = FALSE;
286 #else
287 bMPI = FALSE;
288 btMPI = FALSE;
289 bNthreadsAuto = FALSE;
290 #endif
292 /* GPU emulation detection is done later, but we need here as well
293 * -- uncool, but there's no elegant workaround */
294 bEmulateGPU = (getenv("GMX_EMULATE_GPU") != NULL);
295 bMaxMpiThreadsSet = (getenv("GMX_MAX_MPI_THREADS") != NULL);
297 /* check the SIMD level mdrun is compiled with against hardware
298 capabilities */
299 /* TODO: Here we assume homogeneous hardware which is not necessarily
300 the case! Might not hurt to add an extra check over MPI. */
301 gmx_cpuid_simd_check(hwinfo->cpuid_info, fplog, SIMMASTER(cr));
303 check_use_of_rdtscp_on_this_cpu(fplog, cr, hwinfo);
305 /* NOTE: this print is only for and on one physical node */
306 print_gpu_detection_stats(fplog, &hwinfo->gpu_info, cr);
308 if (hwinfo->gpu_info.ncuda_dev_compatible > 0)
310 std::string gpuUseageReport;
313 gpuUseageReport = makeGpuUsageReport(&hwinfo->gpu_info,
314 &hw_opt->gpu_opt,
315 cr->nrank_pp_intranode);
317 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
319 /* NOTE: this print is only for and on one physical node */
320 md_print_info(cr, fplog, gpuUseageReport.c_str());
323 /* Need to ensure that we have enough GPUs:
324 * - need one GPU per PP node
325 * - no GPU oversubscription with tMPI
326 * */
327 /* number of PP processes per node */
328 npppn = cr->nrank_pp_intranode;
330 pernode[0] = '\0';
331 th_or_proc_plural[0] = '\0';
332 if (btMPI)
334 sprintf(th_or_proc, "thread-MPI thread");
335 if (npppn > 1)
337 sprintf(th_or_proc_plural, "s");
340 else if (bMPI)
342 sprintf(th_or_proc, "MPI process");
343 if (npppn > 1)
345 sprintf(th_or_proc_plural, "es");
347 sprintf(pernode, " per node");
349 else
351 /* neither MPI nor tMPI */
352 sprintf(th_or_proc, "process");
355 if (bUseGPU && hwinfo->gpu_info.ncuda_dev_compatible > 0 &&
356 !bEmulateGPU)
358 int ngpu_comp, ngpu_use;
359 char gpu_comp_plural[2], gpu_use_plural[2];
361 ngpu_comp = hwinfo->gpu_info.ncuda_dev_compatible;
362 ngpu_use = hw_opt->gpu_opt.ncuda_dev_use;
364 sprintf(gpu_comp_plural, "%s", (ngpu_comp > 1) ? "s" : "");
365 sprintf(gpu_use_plural, "%s", (ngpu_use > 1) ? "s" : "");
367 /* number of tMPI threads auto-adjusted */
368 if (btMPI && bNthreadsAuto)
370 if (hw_opt->gpu_opt.bUserSet && npppn < ngpu_use)
372 /* The user manually provided more GPUs than threads we
373 could automatically start. */
374 gmx_fatal(FARGS,
375 "%d GPU%s provided, but only %d PP thread-MPI thread%s coud be started.\n"
376 "%s requires one PP tread-MPI thread per GPU; use fewer GPUs%s.",
377 ngpu_use, gpu_use_plural,
378 npppn, th_or_proc_plural,
379 ShortProgram(), bMaxMpiThreadsSet ? "\nor allow more threads to be used" : "");
382 if (!hw_opt->gpu_opt.bUserSet && npppn < ngpu_comp)
384 /* There are more GPUs than tMPI threads; we have
385 limited the number GPUs used. */
386 md_print_warn(cr, fplog,
387 "NOTE: %d GPU%s were detected, but only %d PP thread-MPI thread%s can be started.\n"
388 " %s can use one GPU per PP tread-MPI thread, so only %d GPU%s will be used.%s\n",
389 ngpu_comp, gpu_comp_plural,
390 npppn, th_or_proc_plural,
391 ShortProgram(), npppn,
392 npppn > 1 ? "s" : "",
393 bMaxMpiThreadsSet ? "\n Also, you can allow more threads to be used by increasing GMX_MAX_MPI_THREADS" : "");
397 if (hw_opt->gpu_opt.bUserSet)
399 if (ngpu_use != npppn)
401 gmx_fatal(FARGS,
402 "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
403 "%s was started with %d PP %s%s%s, but you provided %d GPU%s.",
404 th_or_proc, btMPI ? "s" : "es", pernode,
405 ShortProgram(), npppn, th_or_proc,
406 th_or_proc_plural, pernode,
407 ngpu_use, gpu_use_plural);
410 else
412 if (ngpu_comp > npppn)
414 md_print_warn(cr, fplog,
415 "NOTE: potentially sub-optimal launch configuration, %s started with less\n"
416 " PP %s%s%s than GPU%s available.\n"
417 " Each PP %s can use only one GPU, %d GPU%s%s will be used.\n",
418 ShortProgram(), th_or_proc,
419 th_or_proc_plural, pernode, gpu_comp_plural,
420 th_or_proc, npppn, gpu_use_plural, pernode);
423 if (ngpu_use != npppn)
425 /* Avoid duplicate error messages.
426 * Unfortunately we can only do this at the physical node
427 * level, since the hardware setup and MPI process count
428 * might differ between physical nodes.
430 if (cr->rank_pp_intranode == 0)
432 gmx_fatal(FARGS,
433 "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
434 "%s was started with %d PP %s%s%s, but only %d GPU%s were detected.",
435 th_or_proc, btMPI ? "s" : "es", pernode,
436 ShortProgram(), npppn, th_or_proc,
437 th_or_proc_plural, pernode,
438 ngpu_use, gpu_use_plural);
444 int same_count;
446 same_count = gmx_count_gpu_dev_shared(&hw_opt->gpu_opt);
448 if (same_count > 0)
450 md_print_info(cr, fplog,
451 "NOTE: You assigned %s to multiple %s%s.\n",
452 same_count > 1 ? "GPUs" : "a GPU", th_or_proc, btMPI ? "s" : "es");
457 #ifdef GMX_MPI
458 if (PAR(cr))
460 /* Avoid other ranks to continue after
461 inconsistency */
462 MPI_Barrier(cr->mpi_comm_mygroup);
464 #endif
468 /* Return 0 if none of the GPU (per node) are shared among PP ranks.
470 * Sharing GPUs among multiple PP ranks is possible when the user passes
471 * GPU IDs. Here we check for sharing and return a non-zero value when
472 * this is detected. Note that the return value represents the number of
473 * PP rank pairs that share a device.
475 int gmx_count_gpu_dev_shared(const gmx_gpu_opt_t *gpu_opt)
477 int same_count = 0;
478 int ngpu = gpu_opt->ncuda_dev_use;
480 if (gpu_opt->bUserSet)
482 int i, j;
484 for (i = 0; i < ngpu - 1; i++)
486 for (j = i + 1; j < ngpu; j++)
488 same_count += (gpu_opt->cuda_dev_use[i] ==
489 gpu_opt->cuda_dev_use[j]);
494 return same_count;
497 /* Count and return the number of unique GPUs (per node) selected.
499 * As sharing GPUs among multiple PP ranks is possible when the user passes
500 * GPU IDs, the number of GPUs user (per node) can be different from the
501 * number of GPU IDs selected.
503 static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t *gpu_info,
504 const gmx_gpu_opt_t *gpu_opt)
506 int i, uniq_count, ngpu;
507 int *uniq_ids;
509 assert(gpu_info);
510 assert(gpu_opt);
512 ngpu = gpu_info->ncuda_dev;
513 uniq_count = 0;
515 snew(uniq_ids, ngpu);
517 /* Each element in uniq_ids will be set to 0 or 1. The n-th element set
518 * to 1 indicates that the respective GPU was selected to be used. */
519 for (i = 0; i < gpu_opt->ncuda_dev_use; i++)
521 uniq_ids[get_gpu_device_id(gpu_info, gpu_opt, i)] = 1;
523 /* Count the devices used. */
524 for (i = 0; i < ngpu; i++)
526 uniq_count += uniq_ids[i];
529 sfree(uniq_ids);
531 return uniq_count;
535 /* Return the number of hardware threads supported by the current CPU.
536 * We assume that this is equal with the number of "processors"
537 * reported to be online by the OS at the time of the call. The
538 * definition of "processor" is according to an old POSIX standard.
540 * Note that the number of hardware threads is generally greater than
541 * the number of cores (e.g. x86 hyper-threading, Power). Managing the
542 * mapping of software threads to hardware threads is managed
543 * elsewhere. */
544 static int get_nthreads_hw_avail(FILE gmx_unused *fplog, const t_commrec gmx_unused *cr)
546 int ret = 0;
548 #if ((defined(WIN32) || defined( _WIN32 ) || defined(WIN64) || defined( _WIN64 )) && !(defined (__CYGWIN__) || defined (__CYGWIN32__)))
549 /* Windows */
550 SYSTEM_INFO sysinfo;
551 GetSystemInfo( &sysinfo );
552 ret = sysinfo.dwNumberOfProcessors;
553 #elif defined HAVE_SYSCONF
554 /* We are probably on Unix.
555 * Now check if we have the argument to use before executing the call
557 #if defined(_SC_NPROCESSORS_ONLN)
558 ret = sysconf(_SC_NPROCESSORS_ONLN);
559 #elif defined(_SC_NPROC_ONLN)
560 ret = sysconf(_SC_NPROC_ONLN);
561 #elif defined(_SC_NPROCESSORS_CONF)
562 ret = sysconf(_SC_NPROCESSORS_CONF);
563 #elif defined(_SC_NPROC_CONF)
564 ret = sysconf(_SC_NPROC_CONF);
565 #else
566 #warning "No valid sysconf argument value found. Executables will not be able to determine the number of hardware threads: mdrun will use 1 thread by default!"
567 #endif /* End of check for sysconf argument values */
569 #else
570 /* Neither windows nor Unix. No fscking idea how many hardware threads we have! */
571 ret = -1;
572 #endif
574 if (debug)
576 fprintf(debug, "Detected %d hardware threads to use.\n", ret);
579 #ifdef GMX_OPENMP
580 if (ret != gmx_omp_get_num_procs())
582 md_print_warn(cr, fplog,
583 "Number of hardware threads detected (%d) does not match the number reported by OpenMP (%d).\n"
584 "Consider setting the launch configuration manually!",
585 ret, gmx_omp_get_num_procs());
587 #endif
589 return ret;
592 static void gmx_detect_gpus(FILE *fplog, const t_commrec *cr)
594 #ifdef GMX_LIB_MPI
595 int rank_world;
596 MPI_Comm physicalnode_comm;
597 #endif
598 int rank_local;
600 /* Under certain circumstances MPI ranks on the same physical node
601 * can not simultaneously access the same GPU(s). Therefore we run
602 * the detection only on one MPI rank per node and broadcast the info.
603 * Note that with thread-MPI only a single thread runs this code.
605 * TODO: We should also do CPU hardware detection only once on each
606 * physical node and broadcast it, instead of do it on every MPI rank.
608 #ifdef GMX_LIB_MPI
609 /* A split of MPI_COMM_WORLD over physical nodes is only required here,
610 * so we create and destroy it locally.
612 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
613 MPI_Comm_split(MPI_COMM_WORLD, gmx_physicalnode_id_hash(),
614 rank_world, &physicalnode_comm);
615 MPI_Comm_rank(physicalnode_comm, &rank_local);
616 #else
617 /* Here there should be only one process, check this */
618 assert(cr->nnodes == 1 && cr->sim_nodeid == 0);
620 rank_local = 0;
621 #endif
623 if (rank_local == 0)
625 char detection_error[STRLEN] = "", sbuf[STRLEN];
627 if (detect_cuda_gpus(&hwinfo_g->gpu_info, detection_error) != 0)
629 if (detection_error[0] != '\0')
631 sprintf(sbuf, ":\n %s\n", detection_error);
633 else
635 sprintf(sbuf, ".");
637 md_print_warn(cr, fplog,
638 "NOTE: Error occurred during GPU detection%s"
639 " Can not use GPU acceleration, will fall back to CPU kernels.\n",
640 sbuf);
644 #ifdef GMX_LIB_MPI
645 /* Broadcast the GPU info to the other ranks within this node */
646 MPI_Bcast(&hwinfo_g->gpu_info.ncuda_dev, 1, MPI_INT, 0, physicalnode_comm);
648 if (hwinfo_g->gpu_info.ncuda_dev > 0)
650 int cuda_dev_size;
652 cuda_dev_size = hwinfo_g->gpu_info.ncuda_dev*sizeof_cuda_dev_info();
654 if (rank_local > 0)
656 hwinfo_g->gpu_info.cuda_dev =
657 (cuda_dev_info_ptr_t)malloc(cuda_dev_size);
659 MPI_Bcast(hwinfo_g->gpu_info.cuda_dev, cuda_dev_size, MPI_BYTE,
660 0, physicalnode_comm);
661 MPI_Bcast(&hwinfo_g->gpu_info.ncuda_dev_compatible, 1, MPI_INT,
662 0, physicalnode_comm);
665 MPI_Comm_free(&physicalnode_comm);
666 #endif
669 gmx_hw_info_t *gmx_detect_hardware(FILE *fplog, const t_commrec *cr,
670 gmx_bool bDetectGPUs)
672 int ret;
674 /* make sure no one else is doing the same thing */
675 ret = tMPI_Thread_mutex_lock(&hw_info_lock);
676 if (ret != 0)
678 gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
681 /* only initialize the hwinfo structure if it is not already initalized */
682 if (n_hwinfo == 0)
684 snew(hwinfo_g, 1);
686 /* detect CPUID info; no fuss, we don't detect system-wide
687 * -- sloppy, but that's it for now */
688 if (gmx_cpuid_init(&hwinfo_g->cpuid_info) != 0)
690 gmx_fatal_collective(FARGS, cr, NULL, "CPUID detection failed!");
693 /* detect number of hardware threads */
694 hwinfo_g->nthreads_hw_avail = get_nthreads_hw_avail(fplog, cr);
696 /* detect GPUs */
697 hwinfo_g->gpu_info.ncuda_dev = 0;
698 hwinfo_g->gpu_info.cuda_dev = NULL;
699 hwinfo_g->gpu_info.ncuda_dev_compatible = 0;
701 /* Run the detection if the binary was compiled with GPU support
702 * and we requested detection.
704 hwinfo_g->gpu_info.bDetectGPUs =
705 (bGPUBinary && bDetectGPUs &&
706 getenv("GMX_DISABLE_GPU_DETECTION") == NULL);
707 if (hwinfo_g->gpu_info.bDetectGPUs)
709 gmx_detect_gpus(fplog, cr);
712 /* increase the reference counter */
713 n_hwinfo++;
715 ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
716 if (ret != 0)
718 gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));
721 return hwinfo_g;
724 void gmx_parse_gpu_ids(gmx_gpu_opt_t *gpu_opt)
726 char *env;
728 if (gpu_opt->gpu_id != NULL && !bGPUBinary)
730 gmx_fatal(FARGS, "GPU ID string set, but %s was compiled without GPU support!", ShortProgram());
733 env = getenv("GMX_GPU_ID");
734 if (env != NULL && gpu_opt->gpu_id != NULL)
736 gmx_fatal(FARGS, "GMX_GPU_ID and -gpu_id can not be used at the same time");
738 if (env == NULL)
740 env = gpu_opt->gpu_id;
743 /* parse GPU IDs if the user passed any */
744 if (env != NULL)
746 /* Parse a "plain" GPU ID string which contains a sequence of
747 * digits corresponding to GPU IDs; the order will indicate
748 * the process/tMPI thread - GPU assignment. */
749 parse_digits_from_plain_string(env,
750 &gpu_opt->ncuda_dev_use,
751 &gpu_opt->cuda_dev_use);
753 if (gpu_opt->ncuda_dev_use == 0)
755 gmx_fatal(FARGS, "Empty GPU ID string encountered.\n%s\n",
756 invalid_gpuid_hint);
759 gpu_opt->bUserSet = TRUE;
763 void gmx_select_gpu_ids(FILE *fplog, const t_commrec *cr,
764 const gmx_gpu_info_t *gpu_info,
765 gmx_bool bForceUseGPU,
766 gmx_gpu_opt_t *gpu_opt)
768 int i;
769 char sbuf[STRLEN], stmp[STRLEN];
771 /* Bail if binary is not compiled with GPU acceleration, but this is either
772 * explicitly (-nb gpu) or implicitly (gpu ID passed) requested. */
773 if (bForceUseGPU && !bGPUBinary)
775 gmx_fatal(FARGS, "GPU acceleration requested, but %s was compiled without GPU support!", ShortProgram());
778 if (gpu_opt->bUserSet)
780 /* Check the GPU IDs passed by the user.
781 * (GPU IDs have been parsed by gmx_parse_gpu_ids before)
783 int *checkres;
784 int res;
786 snew(checkres, gpu_opt->ncuda_dev_use);
788 res = check_selected_cuda_gpus(checkres, gpu_info, gpu_opt);
790 if (!res)
792 print_gpu_detection_stats(fplog, gpu_info, cr);
794 sprintf(sbuf, "Some of the requested GPUs do not exist, behave strangely, or are not compatible:\n");
795 for (i = 0; i < gpu_opt->ncuda_dev_use; i++)
797 if (checkres[i] != egpuCompatible)
799 sprintf(stmp, " GPU #%d: %s\n",
800 gpu_opt->cuda_dev_use[i],
801 gpu_detect_res_str[checkres[i]]);
802 strcat(sbuf, stmp);
805 gmx_fatal(FARGS, "%s", sbuf);
808 sfree(checkres);
810 else
812 pick_compatible_gpus(&hwinfo_g->gpu_info, gpu_opt);
813 limit_num_gpus_used(gpu_opt, cr->nrank_pp_intranode);
816 /* If the user asked for a GPU, check whether we have a GPU */
817 if (bForceUseGPU && gpu_info->ncuda_dev_compatible == 0)
819 gmx_fatal(FARGS, "GPU acceleration requested, but no compatible GPUs were detected.");
823 /* If we detected more compatible GPUs than we can use, limit the
824 * number. We print detailed messages about this later in
825 * gmx_check_hw_runconf_consistency.
827 static void limit_num_gpus_used(gmx_gpu_opt_t *gpu_opt, int maxNumberToUse)
829 GMX_RELEASE_ASSERT(gpu_opt, "Invalid gpu_opt pointer passed");
830 GMX_RELEASE_ASSERT(maxNumberToUse >= 1,
831 gmx::formatString("Invalid limit (%d) for the number of GPUs (detected %d compatible GPUs)",
832 maxNumberToUse, gpu_opt->ncuda_dev_compatible).c_str());
834 /* Don't increase the number of GPUs used beyond (e.g.) the number
835 of PP ranks */
836 gpu_opt->ncuda_dev_use = std::min(gpu_opt->ncuda_dev_compatible, maxNumberToUse);
837 snew(gpu_opt->cuda_dev_use, gpu_opt->ncuda_dev_use);
838 for (int i = 0; i != gpu_opt->ncuda_dev_use; ++i)
840 /* TODO: improve this implementation: either sort GPUs or remove the weakest here */
841 gpu_opt->cuda_dev_use[i] = gpu_opt->cuda_dev_compatible[i];
845 void gmx_hardware_info_free(gmx_hw_info_t *hwinfo)
847 int ret;
849 ret = tMPI_Thread_mutex_lock(&hw_info_lock);
850 if (ret != 0)
852 gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
855 /* decrease the reference counter */
856 n_hwinfo--;
859 if (hwinfo != hwinfo_g)
861 gmx_incons("hwinfo < hwinfo_g");
864 if (n_hwinfo < 0)
866 gmx_incons("n_hwinfo < 0");
869 if (n_hwinfo == 0)
871 gmx_cpuid_done(hwinfo_g->cpuid_info);
872 free_gpu_info(&hwinfo_g->gpu_info);
873 sfree(hwinfo_g);
876 ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
877 if (ret != 0)
879 gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));