Fix copyright years for new code
[gromacs.git] / src / gromacs / gmxlib / gmx_detect_hardware.c
blob7750b25689f9982df61440b822acb1cb4e5c28d3
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #ifdef HAVE_CONFIG_H
36 #include <config.h>
37 #endif
39 #include <stdlib.h>
40 #include <assert.h>
41 #include <string.h>
43 #ifdef HAVE_UNISTD_H
44 /* For sysconf */
45 #include <unistd.h>
46 #endif
48 #include "types/enums.h"
49 #include "types/hw_info.h"
50 #include "types/commrec.h"
51 #include "gmx_fatal.h"
52 #include "gmx_fatal_collective.h"
53 #include "md_logging.h"
54 #include "gmx_cpuid.h"
55 #include "gromacs/utility/smalloc.h"
56 #include "gpu_utils.h"
57 #include "copyrite.h"
58 #include "gmx_detect_hardware.h"
59 #include "main.h"
60 #include "md_logging.h"
61 #include "gromacs/utility/gmxomp.h"
62 #include "gromacs/utility/cstringutil.h"
64 #include "thread_mpi/threads.h"
66 #ifdef GMX_NATIVE_WINDOWS
67 #include <windows.h>
68 #endif
70 #ifdef GMX_GPU
71 const gmx_bool bGPUBinary = TRUE;
72 #else
73 const gmx_bool bGPUBinary = FALSE;
74 #endif
76 static const char * invalid_gpuid_hint =
77 "A delimiter-free sequence of valid numeric IDs of available GPUs is expected.";
79 /* The globally shared hwinfo structure. */
80 static gmx_hw_info_t *hwinfo_g;
81 /* A reference counter for the hwinfo structure */
82 static int n_hwinfo = 0;
83 /* A lock to protect the hwinfo structure */
84 static tMPI_Thread_mutex_t hw_info_lock = TMPI_THREAD_MUTEX_INITIALIZER;
87 /* FW decl. */
88 static void limit_num_gpus_used(gmx_gpu_opt_t *gpu_opt, int count);
89 static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t *gpu_info,
90 const gmx_gpu_opt_t *gpu_opt);
92 static void sprint_gpus(char *sbuf, const gmx_gpu_info_t *gpu_info)
94 int i, ndev;
95 char stmp[STRLEN];
97 ndev = gpu_info->ncuda_dev;
99 sbuf[0] = '\0';
100 for (i = 0; i < ndev; i++)
102 get_gpu_device_info_string(stmp, gpu_info, i);
103 strcat(sbuf, " ");
104 strcat(sbuf, stmp);
105 if (i < ndev - 1)
107 strcat(sbuf, "\n");
112 static void print_gpu_detection_stats(FILE *fplog,
113 const gmx_gpu_info_t *gpu_info,
114 const t_commrec *cr)
116 char onhost[266], stmp[STRLEN];
117 int ngpu;
119 if (!gpu_info->bDetectGPUs)
121 /* We skipped the detection, so don't print detection stats */
122 return;
125 ngpu = gpu_info->ncuda_dev;
127 #if defined GMX_MPI && !defined GMX_THREAD_MPI
128 /* We only print the detection on one, of possibly multiple, nodes */
129 strncpy(onhost, " on host ", 10);
130 gmx_gethostname(onhost+9, 256);
131 #else
132 /* We detect all relevant GPUs */
133 strncpy(onhost, "", 1);
134 #endif
136 if (ngpu > 0)
138 sprint_gpus(stmp, gpu_info);
139 md_print_warn(cr, fplog, "%d GPU%s detected%s:\n%s\n",
140 ngpu, (ngpu > 1) ? "s" : "", onhost, stmp);
142 else
144 md_print_warn(cr, fplog, "No GPUs detected%s\n", onhost);
148 static void print_gpu_use_stats(FILE *fplog,
149 const gmx_gpu_info_t *gpu_info,
150 const gmx_gpu_opt_t *gpu_opt,
151 const t_commrec *cr)
153 char sbuf[STRLEN], stmp[STRLEN];
154 int i, ngpu_comp, ngpu_use;
156 ngpu_comp = gpu_info->ncuda_dev_compatible;
157 ngpu_use = gpu_opt->ncuda_dev_use;
159 /* Issue a note if GPUs are available but not used */
160 if (ngpu_comp > 0 && ngpu_use < 1)
162 sprintf(sbuf,
163 "%d compatible GPU%s detected in the system, but none will be used.\n"
164 "Consider trying GPU acceleration with the Verlet scheme!",
165 ngpu_comp, (ngpu_comp > 1) ? "s" : "");
167 else
169 int ngpu_use_uniq;
171 ngpu_use_uniq = gmx_count_gpu_dev_unique(gpu_info, gpu_opt);
173 sprintf(sbuf, "%d GPU%s %sselected for this run.\n"
174 "Mapping of GPU%s to the %d PP rank%s in this node: ",
175 ngpu_use_uniq, (ngpu_use_uniq > 1) ? "s" : "",
176 gpu_opt->bUserSet ? "user-" : "auto-",
177 (ngpu_use > 1) ? "s" : "",
178 cr->nrank_pp_intranode,
179 (cr->nrank_pp_intranode > 1) ? "s" : "");
181 for (i = 0; i < ngpu_use; i++)
183 sprintf(stmp, "#%d", get_gpu_device_id(gpu_info, gpu_opt, i));
184 if (i < ngpu_use - 1)
186 strcat(stmp, ", ");
188 strcat(sbuf, stmp);
191 md_print_info(cr, fplog, "%s\n\n", sbuf);
194 /* Give a suitable fatal error or warning if the build configuration
195 and runtime CPU do not match. */
196 static void
197 check_use_of_rdtscp_on_this_cpu(FILE *fplog,
198 const t_commrec *cr,
199 const gmx_hw_info_t *hwinfo)
201 gmx_bool bCpuHasRdtscp, bBinaryUsesRdtscp;
202 #ifdef HAVE_RDTSCP
203 bBinaryUsesRdtscp = TRUE;
204 #else
205 bBinaryUsesRdtscp = FALSE;
206 #endif
208 bCpuHasRdtscp = gmx_cpuid_feature(hwinfo->cpuid_info, GMX_CPUID_FEATURE_X86_RDTSCP);
210 if (!bCpuHasRdtscp && bBinaryUsesRdtscp)
212 gmx_fatal(FARGS, "The %s executable was compiled to use the rdtscp CPU instruction. "
213 "However, this is not supported by the current hardware and continuing would lead to a crash. "
214 "Please rebuild GROMACS with the GMX_USE_RDTSCP=OFF CMake option.",
215 ShortProgram());
218 if (bCpuHasRdtscp && !bBinaryUsesRdtscp)
220 md_print_warn(cr, fplog, "The current CPU can measure timings more accurately than the code in\n"
221 "%s was configured to use. This might affect your simulation\n"
222 "speed as accurate timings are needed for load-balancing.\n"
223 "Please consider rebuilding %s with the GMX_USE_RDTSCP=OFF CMake option.\n",
224 ShortProgram(), ShortProgram());
228 void gmx_check_hw_runconf_consistency(FILE *fplog,
229 const gmx_hw_info_t *hwinfo,
230 const t_commrec *cr,
231 const gmx_hw_opt_t *hw_opt,
232 gmx_bool bUseGPU)
234 int npppn, ntmpi_pp;
235 char sbuf[STRLEN], th_or_proc[STRLEN], th_or_proc_plural[STRLEN], pernode[STRLEN];
236 gmx_bool btMPI, bMPI, bMaxMpiThreadsSet, bNthreadsAuto, bEmulateGPU;
238 assert(hwinfo);
239 assert(cr);
241 /* Below we only do consistency checks for PP and GPUs,
242 * this is irrelevant for PME only nodes, so in that case we return
243 * here.
245 if (!(cr->duty & DUTY_PP))
247 return;
250 btMPI = bMPI = FALSE;
251 bNthreadsAuto = FALSE;
252 #if defined(GMX_THREAD_MPI)
253 btMPI = TRUE;
254 bNthreadsAuto = (hw_opt->nthreads_tmpi < 1);
255 #elif defined(GMX_LIB_MPI)
256 bMPI = TRUE;
257 #endif
259 /* GPU emulation detection is done later, but we need here as well
260 * -- uncool, but there's no elegant workaround */
261 bEmulateGPU = (getenv("GMX_EMULATE_GPU") != NULL);
262 bMaxMpiThreadsSet = (getenv("GMX_MAX_MPI_THREADS") != NULL);
264 /* check the SIMD level mdrun is compiled with against hardware
265 capabilities */
266 /* TODO: Here we assume homogeneous hardware which is not necessarily
267 the case! Might not hurt to add an extra check over MPI. */
268 gmx_cpuid_simd_check(hwinfo->cpuid_info, fplog, SIMMASTER(cr));
270 check_use_of_rdtscp_on_this_cpu(fplog, cr, hwinfo);
272 /* NOTE: this print is only for and on one physical node */
273 print_gpu_detection_stats(fplog, &hwinfo->gpu_info, cr);
275 if (hwinfo->gpu_info.ncuda_dev_compatible > 0)
277 /* NOTE: this print is only for and on one physical node */
278 print_gpu_use_stats(fplog, &hwinfo->gpu_info, &hw_opt->gpu_opt, cr);
281 /* Need to ensure that we have enough GPUs:
282 * - need one GPU per PP node
283 * - no GPU oversubscription with tMPI
284 * */
285 /* number of PP processes per node */
286 npppn = cr->nrank_pp_intranode;
288 pernode[0] = '\0';
289 th_or_proc_plural[0] = '\0';
290 if (btMPI)
292 sprintf(th_or_proc, "thread-MPI thread");
293 if (npppn > 1)
295 sprintf(th_or_proc_plural, "s");
298 else if (bMPI)
300 sprintf(th_or_proc, "MPI process");
301 if (npppn > 1)
303 sprintf(th_or_proc_plural, "es");
305 sprintf(pernode, " per node");
307 else
309 /* neither MPI nor tMPI */
310 sprintf(th_or_proc, "process");
313 if (bUseGPU && hwinfo->gpu_info.ncuda_dev_compatible > 0 &&
314 !bEmulateGPU)
316 int ngpu_comp, ngpu_use;
317 char gpu_comp_plural[2], gpu_use_plural[2];
319 ngpu_comp = hwinfo->gpu_info.ncuda_dev_compatible;
320 ngpu_use = hw_opt->gpu_opt.ncuda_dev_use;
322 sprintf(gpu_comp_plural, "%s", (ngpu_comp > 1) ? "s" : "");
323 sprintf(gpu_use_plural, "%s", (ngpu_use > 1) ? "s" : "");
325 /* number of tMPI threads auto-adjusted */
326 if (btMPI && bNthreadsAuto)
328 if (hw_opt->gpu_opt.bUserSet && npppn < ngpu_use)
330 /* The user manually provided more GPUs than threads we
331 could automatically start. */
332 gmx_fatal(FARGS,
333 "%d GPU%s provided, but only %d PP thread-MPI thread%s coud be started.\n"
334 "%s requires one PP tread-MPI thread per GPU; use fewer GPUs%s.",
335 ngpu_use, gpu_use_plural,
336 npppn, th_or_proc_plural,
337 ShortProgram(), bMaxMpiThreadsSet ? "\nor allow more threads to be used" : "");
340 if (!hw_opt->gpu_opt.bUserSet && npppn < ngpu_comp)
342 /* There are more GPUs than tMPI threads; we have
343 limited the number GPUs used. */
344 md_print_warn(cr, fplog,
345 "NOTE: %d GPU%s were detected, but only %d PP thread-MPI thread%s can be started.\n"
346 " %s can use one GPU per PP tread-MPI thread, so only %d GPU%s will be used.%s\n",
347 ngpu_comp, gpu_comp_plural,
348 npppn, th_or_proc_plural,
349 ShortProgram(), npppn,
350 npppn > 1 ? "s" : "",
351 bMaxMpiThreadsSet ? "\n Also, you can allow more threads to be used by increasing GMX_MAX_MPI_THREADS" : "");
355 if (hw_opt->gpu_opt.bUserSet)
357 if (ngpu_use != npppn)
359 gmx_fatal(FARGS,
360 "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
361 "%s was started with %d PP %s%s%s, but you provided %d GPU%s.",
362 th_or_proc, btMPI ? "s" : "es", pernode,
363 ShortProgram(), npppn, th_or_proc,
364 th_or_proc_plural, pernode,
365 ngpu_use, gpu_use_plural);
368 else
370 if (ngpu_comp > npppn)
372 md_print_warn(cr, fplog,
373 "NOTE: potentially sub-optimal launch configuration, %s started with less\n"
374 " PP %s%s%s than GPU%s available.\n"
375 " Each PP %s can use only one GPU, %d GPU%s%s will be used.\n",
376 ShortProgram(), th_or_proc,
377 th_or_proc_plural, pernode, gpu_comp_plural,
378 th_or_proc, npppn, gpu_use_plural, pernode);
381 if (ngpu_use != npppn)
383 /* Avoid duplicate error messages.
384 * Unfortunately we can only do this at the physical node
385 * level, since the hardware setup and MPI process count
386 * might differ between physical nodes.
388 if (cr->rank_pp_intranode == 0)
390 gmx_fatal(FARGS,
391 "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
392 "%s was started with %d PP %s%s%s, but only %d GPU%s were detected.",
393 th_or_proc, btMPI ? "s" : "es", pernode,
394 ShortProgram(), npppn, th_or_proc,
395 th_or_proc_plural, pernode,
396 ngpu_use, gpu_use_plural);
402 int same_count;
404 same_count = gmx_count_gpu_dev_shared(&hw_opt->gpu_opt);
406 if (same_count > 0)
408 md_print_info(cr, fplog,
409 "NOTE: You assigned %s to multiple %s%s.\n",
410 same_count > 1 ? "GPUs" : "a GPU", th_or_proc, btMPI ? "s" : "es");
415 #ifdef GMX_MPI
416 if (PAR(cr))
418 /* Avoid other ranks to continue after
419 inconsistency */
420 MPI_Barrier(cr->mpi_comm_mygroup);
422 #endif
426 /* Return 0 if none of the GPU (per node) are shared among PP ranks.
428 * Sharing GPUs among multiple PP ranks is possible when the user passes
429 * GPU IDs. Here we check for sharing and return a non-zero value when
430 * this is detected. Note that the return value represents the number of
431 * PP rank pairs that share a device.
433 int gmx_count_gpu_dev_shared(const gmx_gpu_opt_t *gpu_opt)
435 int same_count = 0;
436 int ngpu = gpu_opt->ncuda_dev_use;
438 if (gpu_opt->bUserSet)
440 int i, j;
442 for (i = 0; i < ngpu - 1; i++)
444 for (j = i + 1; j < ngpu; j++)
446 same_count += (gpu_opt->cuda_dev_use[i] ==
447 gpu_opt->cuda_dev_use[j]);
452 return same_count;
455 /* Count and return the number of unique GPUs (per node) selected.
457 * As sharing GPUs among multiple PP ranks is possible when the user passes
458 * GPU IDs, the number of GPUs user (per node) can be different from the
459 * number of GPU IDs selected.
461 static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t *gpu_info,
462 const gmx_gpu_opt_t *gpu_opt)
464 int i, uniq_count, ngpu;
465 int *uniq_ids;
467 assert(gpu_info);
468 assert(gpu_opt);
470 ngpu = gpu_info->ncuda_dev;
471 uniq_count = 0;
473 snew(uniq_ids, ngpu);
475 /* Each element in uniq_ids will be set to 0 or 1. The n-th element set
476 * to 1 indicates that the respective GPU was selected to be used. */
477 for (i = 0; i < gpu_opt->ncuda_dev_use; i++)
479 uniq_ids[get_gpu_device_id(gpu_info, gpu_opt, i)] = 1;
481 /* Count the devices used. */
482 for (i = 0; i < ngpu; i++)
484 uniq_count += uniq_ids[i];
487 sfree(uniq_ids);
489 return uniq_count;
493 /* Return the number of hardware threads supported by the current CPU.
494 * We assume that this is equal with the number of CPUs reported to be
495 * online by the OS at the time of the call.
497 static int get_nthreads_hw_avail(FILE gmx_unused *fplog, const t_commrec gmx_unused *cr)
499 int ret = 0;
501 #if ((defined(WIN32) || defined( _WIN32 ) || defined(WIN64) || defined( _WIN64 )) && !(defined (__CYGWIN__) || defined (__CYGWIN32__)))
502 /* Windows */
503 SYSTEM_INFO sysinfo;
504 GetSystemInfo( &sysinfo );
505 ret = sysinfo.dwNumberOfProcessors;
506 #elif defined HAVE_SYSCONF
507 /* We are probably on Unix.
508 * Now check if we have the argument to use before executing the call
510 #if defined(_SC_NPROCESSORS_ONLN)
511 ret = sysconf(_SC_NPROCESSORS_ONLN);
512 #elif defined(_SC_NPROC_ONLN)
513 ret = sysconf(_SC_NPROC_ONLN);
514 #elif defined(_SC_NPROCESSORS_CONF)
515 ret = sysconf(_SC_NPROCESSORS_CONF);
516 #elif defined(_SC_NPROC_CONF)
517 ret = sysconf(_SC_NPROC_CONF);
518 #else
519 #warning "No valid sysconf argument value found. Executables will not be able to determine the number of hardware threads: mdrun will use 1 thread by default!"
520 #endif /* End of check for sysconf argument values */
522 #else
523 /* Neither windows nor Unix. No fscking idea how many CPUs we have! */
524 ret = -1;
525 #endif
527 if (debug)
529 fprintf(debug, "Detected %d processors, will use this as the number "
530 "of supported hardware threads.\n", ret);
533 #ifdef GMX_OPENMP
534 if (ret != gmx_omp_get_num_procs())
536 md_print_warn(cr, fplog,
537 "Number of CPUs detected (%d) does not match the number reported by OpenMP (%d).\n"
538 "Consider setting the launch configuration manually!",
539 ret, gmx_omp_get_num_procs());
541 #endif
543 return ret;
546 static void gmx_detect_gpus(FILE *fplog, const t_commrec *cr)
548 #ifdef GMX_LIB_MPI
549 int rank_world;
550 MPI_Comm physicalnode_comm;
551 #endif
552 int rank_local;
554 /* Under certain circumstances MPI ranks on the same physical node
555 * can not simultaneously access the same GPU(s). Therefore we run
556 * the detection only on one MPI rank per node and broadcast the info.
557 * Note that with thread-MPI only a single thread runs this code.
559 * TODO: We should also do CPU hardware detection only once on each
560 * physical node and broadcast it, instead of do it on every MPI rank.
562 #ifdef GMX_LIB_MPI
563 /* A split of MPI_COMM_WORLD over physical nodes is only required here,
564 * so we create and destroy it locally.
566 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
567 MPI_Comm_split(MPI_COMM_WORLD, gmx_physicalnode_id_hash(),
568 rank_world, &physicalnode_comm);
569 MPI_Comm_rank(physicalnode_comm, &rank_local);
570 #else
571 /* Here there should be only one process, check this */
572 assert(cr->nnodes == 1 && cr->sim_nodeid == 0);
574 rank_local = 0;
575 #endif
577 if (rank_local == 0)
579 char detection_error[STRLEN] = "", sbuf[STRLEN];
581 if (detect_cuda_gpus(&hwinfo_g->gpu_info, detection_error) != 0)
583 if (detection_error[0] != '\0')
585 sprintf(sbuf, ":\n %s\n", detection_error);
587 else
589 sprintf(sbuf, ".");
591 md_print_warn(cr, fplog,
592 "NOTE: Error occurred during GPU detection%s"
593 " Can not use GPU acceleration, will fall back to CPU kernels.\n",
594 sbuf);
598 #ifdef GMX_LIB_MPI
599 /* Broadcast the GPU info to the other ranks within this node */
600 MPI_Bcast(&hwinfo_g->gpu_info.ncuda_dev, 1, MPI_INT, 0, physicalnode_comm);
602 if (hwinfo_g->gpu_info.ncuda_dev > 0)
604 int cuda_dev_size;
606 cuda_dev_size = hwinfo_g->gpu_info.ncuda_dev*sizeof_cuda_dev_info();
608 if (rank_local > 0)
610 hwinfo_g->gpu_info.cuda_dev =
611 (cuda_dev_info_ptr_t)malloc(cuda_dev_size);
613 MPI_Bcast(hwinfo_g->gpu_info.cuda_dev, cuda_dev_size, MPI_BYTE,
614 0, physicalnode_comm);
615 MPI_Bcast(&hwinfo_g->gpu_info.ncuda_dev_compatible, 1, MPI_INT,
616 0, physicalnode_comm);
619 MPI_Comm_free(&physicalnode_comm);
620 #endif
623 gmx_hw_info_t *gmx_detect_hardware(FILE *fplog, const t_commrec *cr,
624 gmx_bool bDetectGPUs)
626 gmx_hw_info_t *hw;
627 int ret;
629 /* make sure no one else is doing the same thing */
630 ret = tMPI_Thread_mutex_lock(&hw_info_lock);
631 if (ret != 0)
633 gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
636 /* only initialize the hwinfo structure if it is not already initalized */
637 if (n_hwinfo == 0)
639 snew(hwinfo_g, 1);
641 /* detect CPUID info; no fuss, we don't detect system-wide
642 * -- sloppy, but that's it for now */
643 if (gmx_cpuid_init(&hwinfo_g->cpuid_info) != 0)
645 gmx_fatal_collective(FARGS, cr, NULL, "CPUID detection failed!");
648 /* detect number of hardware threads */
649 hwinfo_g->nthreads_hw_avail = get_nthreads_hw_avail(fplog, cr);
651 /* detect GPUs */
652 hwinfo_g->gpu_info.ncuda_dev = 0;
653 hwinfo_g->gpu_info.cuda_dev = NULL;
654 hwinfo_g->gpu_info.ncuda_dev_compatible = 0;
656 /* Run the detection if the binary was compiled with GPU support
657 * and we requested detection.
659 hwinfo_g->gpu_info.bDetectGPUs =
660 (bGPUBinary && bDetectGPUs &&
661 getenv("GMX_DISABLE_GPU_DETECTION") == NULL);
662 if (hwinfo_g->gpu_info.bDetectGPUs)
664 gmx_detect_gpus(fplog, cr);
667 /* increase the reference counter */
668 n_hwinfo++;
670 ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
671 if (ret != 0)
673 gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));
676 return hwinfo_g;
679 void gmx_parse_gpu_ids(gmx_gpu_opt_t *gpu_opt)
681 char *env;
683 if (gpu_opt->gpu_id != NULL && !bGPUBinary)
685 gmx_fatal(FARGS, "GPU ID string set, but %s was compiled without GPU support!", ShortProgram());
688 env = getenv("GMX_GPU_ID");
689 if (env != NULL && gpu_opt->gpu_id != NULL)
691 gmx_fatal(FARGS, "GMX_GPU_ID and -gpu_id can not be used at the same time");
693 if (env == NULL)
695 env = gpu_opt->gpu_id;
698 /* parse GPU IDs if the user passed any */
699 if (env != NULL)
701 /* Parse a "plain" GPU ID string which contains a sequence of
702 * digits corresponding to GPU IDs; the order will indicate
703 * the process/tMPI thread - GPU assignment. */
704 parse_digits_from_plain_string(env,
705 &gpu_opt->ncuda_dev_use,
706 &gpu_opt->cuda_dev_use);
708 if (gpu_opt->ncuda_dev_use == 0)
710 gmx_fatal(FARGS, "Empty GPU ID string encountered.\n%s\n",
711 invalid_gpuid_hint);
714 gpu_opt->bUserSet = TRUE;
718 void gmx_select_gpu_ids(FILE *fplog, const t_commrec *cr,
719 const gmx_gpu_info_t *gpu_info,
720 gmx_bool bForceUseGPU,
721 gmx_gpu_opt_t *gpu_opt)
723 int i;
724 const char *env;
725 char sbuf[STRLEN], stmp[STRLEN];
727 /* Bail if binary is not compiled with GPU acceleration, but this is either
728 * explicitly (-nb gpu) or implicitly (gpu ID passed) requested. */
729 if (bForceUseGPU && !bGPUBinary)
731 gmx_fatal(FARGS, "GPU acceleration requested, but %s was compiled without GPU support!", ShortProgram());
734 if (gpu_opt->bUserSet)
736 /* Check the GPU IDs passed by the user.
737 * (GPU IDs have been parsed by gmx_parse_gpu_ids before)
739 int *checkres;
740 int res;
742 snew(checkres, gpu_opt->ncuda_dev_use);
744 res = check_selected_cuda_gpus(checkres, gpu_info, gpu_opt);
746 if (!res)
748 print_gpu_detection_stats(fplog, gpu_info, cr);
750 sprintf(sbuf, "Some of the requested GPUs do not exist, behave strangely, or are not compatible:\n");
751 for (i = 0; i < gpu_opt->ncuda_dev_use; i++)
753 if (checkres[i] != egpuCompatible)
755 sprintf(stmp, " GPU #%d: %s\n",
756 gpu_opt->cuda_dev_use[i],
757 gpu_detect_res_str[checkres[i]]);
758 strcat(sbuf, stmp);
761 gmx_fatal(FARGS, "%s", sbuf);
764 sfree(checkres);
766 else
768 pick_compatible_gpus(&hwinfo_g->gpu_info, gpu_opt);
770 if (gpu_opt->ncuda_dev_use > cr->nrank_pp_intranode)
772 /* We picked more GPUs than we can use: limit the number.
773 * We print detailed messages about this later in
774 * gmx_check_hw_runconf_consistency.
776 limit_num_gpus_used(gpu_opt, cr->nrank_pp_intranode);
779 gpu_opt->bUserSet = FALSE;
782 /* If the user asked for a GPU, check whether we have a GPU */
783 if (bForceUseGPU && gpu_info->ncuda_dev_compatible == 0)
785 gmx_fatal(FARGS, "GPU acceleration requested, but no compatible GPUs were detected.");
789 static void limit_num_gpus_used(gmx_gpu_opt_t *gpu_opt, int count)
791 int ndev_use;
793 assert(gpu_opt);
795 ndev_use = gpu_opt->ncuda_dev_use;
797 if (count > ndev_use)
799 /* won't increase the # of GPUs */
800 return;
803 if (count < 1)
805 char sbuf[STRLEN];
806 sprintf(sbuf, "Limiting the number of GPUs to <1 doesn't make sense (detected %d, %d requested)!",
807 ndev_use, count);
808 gmx_incons(sbuf);
811 /* TODO: improve this implementation: either sort GPUs or remove the weakest here */
812 gpu_opt->ncuda_dev_use = count;
815 void gmx_hardware_info_free(gmx_hw_info_t *hwinfo)
817 int ret;
819 ret = tMPI_Thread_mutex_lock(&hw_info_lock);
820 if (ret != 0)
822 gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
825 /* decrease the reference counter */
826 n_hwinfo--;
829 if (hwinfo != hwinfo_g)
831 gmx_incons("hwinfo < hwinfo_g");
834 if (n_hwinfo < 0)
836 gmx_incons("n_hwinfo < 0");
839 if (n_hwinfo == 0)
841 gmx_cpuid_done(hwinfo_g->cpuid_info);
842 free_gpu_info(&hwinfo_g->gpu_info);
843 sfree(hwinfo_g);
846 ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
847 if (ret != 0)
849 gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));