2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
48 #include "types/enums.h"
49 #include "types/hw_info.h"
50 #include "types/commrec.h"
51 #include "gmx_fatal.h"
52 #include "gmx_fatal_collective.h"
53 #include "md_logging.h"
54 #include "gmx_cpuid.h"
55 #include "gromacs/utility/smalloc.h"
56 #include "gpu_utils.h"
58 #include "gmx_detect_hardware.h"
60 #include "md_logging.h"
61 #include "gromacs/utility/gmxomp.h"
62 #include "gromacs/utility/cstringutil.h"
64 #include "thread_mpi/threads.h"
66 #ifdef GMX_NATIVE_WINDOWS
71 const gmx_bool bGPUBinary
= TRUE
;
73 const gmx_bool bGPUBinary
= FALSE
;
76 static const char * invalid_gpuid_hint
=
77 "A delimiter-free sequence of valid numeric IDs of available GPUs is expected.";
79 /* The globally shared hwinfo structure. */
80 static gmx_hw_info_t
*hwinfo_g
;
81 /* A reference counter for the hwinfo structure */
82 static int n_hwinfo
= 0;
83 /* A lock to protect the hwinfo structure */
84 static tMPI_Thread_mutex_t hw_info_lock
= TMPI_THREAD_MUTEX_INITIALIZER
;
88 static void limit_num_gpus_used(gmx_gpu_opt_t
*gpu_opt
, int count
);
89 static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t
*gpu_info
,
90 const gmx_gpu_opt_t
*gpu_opt
);
92 static void sprint_gpus(char *sbuf
, const gmx_gpu_info_t
*gpu_info
)
97 ndev
= gpu_info
->ncuda_dev
;
100 for (i
= 0; i
< ndev
; i
++)
102 get_gpu_device_info_string(stmp
, gpu_info
, i
);
112 static void print_gpu_detection_stats(FILE *fplog
,
113 const gmx_gpu_info_t
*gpu_info
,
116 char onhost
[266], stmp
[STRLEN
];
119 if (!gpu_info
->bDetectGPUs
)
121 /* We skipped the detection, so don't print detection stats */
125 ngpu
= gpu_info
->ncuda_dev
;
127 #if defined GMX_MPI && !defined GMX_THREAD_MPI
128 /* We only print the detection on one, of possibly multiple, nodes */
129 strncpy(onhost
, " on host ", 10);
130 gmx_gethostname(onhost
+9, 256);
132 /* We detect all relevant GPUs */
133 strncpy(onhost
, "", 1);
138 sprint_gpus(stmp
, gpu_info
);
139 md_print_warn(cr
, fplog
, "%d GPU%s detected%s:\n%s\n",
140 ngpu
, (ngpu
> 1) ? "s" : "", onhost
, stmp
);
144 md_print_warn(cr
, fplog
, "No GPUs detected%s\n", onhost
);
148 static void print_gpu_use_stats(FILE *fplog
,
149 const gmx_gpu_info_t
*gpu_info
,
150 const gmx_gpu_opt_t
*gpu_opt
,
153 char sbuf
[STRLEN
], stmp
[STRLEN
];
154 int i
, ngpu_comp
, ngpu_use
;
156 ngpu_comp
= gpu_info
->ncuda_dev_compatible
;
157 ngpu_use
= gpu_opt
->ncuda_dev_use
;
159 /* Issue a note if GPUs are available but not used */
160 if (ngpu_comp
> 0 && ngpu_use
< 1)
163 "%d compatible GPU%s detected in the system, but none will be used.\n"
164 "Consider trying GPU acceleration with the Verlet scheme!",
165 ngpu_comp
, (ngpu_comp
> 1) ? "s" : "");
171 ngpu_use_uniq
= gmx_count_gpu_dev_unique(gpu_info
, gpu_opt
);
173 sprintf(sbuf
, "%d GPU%s %sselected for this run.\n"
174 "Mapping of GPU%s to the %d PP rank%s in this node: ",
175 ngpu_use_uniq
, (ngpu_use_uniq
> 1) ? "s" : "",
176 gpu_opt
->bUserSet
? "user-" : "auto-",
177 (ngpu_use
> 1) ? "s" : "",
178 cr
->nrank_pp_intranode
,
179 (cr
->nrank_pp_intranode
> 1) ? "s" : "");
181 for (i
= 0; i
< ngpu_use
; i
++)
183 sprintf(stmp
, "#%d", get_gpu_device_id(gpu_info
, gpu_opt
, i
));
184 if (i
< ngpu_use
- 1)
191 md_print_info(cr
, fplog
, "%s\n\n", sbuf
);
194 /* Give a suitable fatal error or warning if the build configuration
195 and runtime CPU do not match. */
197 check_use_of_rdtscp_on_this_cpu(FILE *fplog
,
199 const gmx_hw_info_t
*hwinfo
)
201 gmx_bool bCpuHasRdtscp
, bBinaryUsesRdtscp
;
203 bBinaryUsesRdtscp
= TRUE
;
205 bBinaryUsesRdtscp
= FALSE
;
208 bCpuHasRdtscp
= gmx_cpuid_feature(hwinfo
->cpuid_info
, GMX_CPUID_FEATURE_X86_RDTSCP
);
210 if (!bCpuHasRdtscp
&& bBinaryUsesRdtscp
)
212 gmx_fatal(FARGS
, "The %s executable was compiled to use the rdtscp CPU instruction. "
213 "However, this is not supported by the current hardware and continuing would lead to a crash. "
214 "Please rebuild GROMACS with the GMX_USE_RDTSCP=OFF CMake option.",
218 if (bCpuHasRdtscp
&& !bBinaryUsesRdtscp
)
220 md_print_warn(cr
, fplog
, "The current CPU can measure timings more accurately than the code in\n"
221 "%s was configured to use. This might affect your simulation\n"
222 "speed as accurate timings are needed for load-balancing.\n"
223 "Please consider rebuilding %s with the GMX_USE_RDTSCP=OFF CMake option.\n",
224 ShortProgram(), ShortProgram());
228 void gmx_check_hw_runconf_consistency(FILE *fplog
,
229 const gmx_hw_info_t
*hwinfo
,
231 const gmx_hw_opt_t
*hw_opt
,
235 char sbuf
[STRLEN
], th_or_proc
[STRLEN
], th_or_proc_plural
[STRLEN
], pernode
[STRLEN
];
236 gmx_bool btMPI
, bMPI
, bMaxMpiThreadsSet
, bNthreadsAuto
, bEmulateGPU
;
241 /* Below we only do consistency checks for PP and GPUs,
242 * this is irrelevant for PME only nodes, so in that case we return
245 if (!(cr
->duty
& DUTY_PP
))
250 btMPI
= bMPI
= FALSE
;
251 bNthreadsAuto
= FALSE
;
252 #if defined(GMX_THREAD_MPI)
254 bNthreadsAuto
= (hw_opt
->nthreads_tmpi
< 1);
255 #elif defined(GMX_LIB_MPI)
259 /* GPU emulation detection is done later, but we need here as well
260 * -- uncool, but there's no elegant workaround */
261 bEmulateGPU
= (getenv("GMX_EMULATE_GPU") != NULL
);
262 bMaxMpiThreadsSet
= (getenv("GMX_MAX_MPI_THREADS") != NULL
);
264 /* check the SIMD level mdrun is compiled with against hardware
266 /* TODO: Here we assume homogeneous hardware which is not necessarily
267 the case! Might not hurt to add an extra check over MPI. */
268 gmx_cpuid_simd_check(hwinfo
->cpuid_info
, fplog
, SIMMASTER(cr
));
270 check_use_of_rdtscp_on_this_cpu(fplog
, cr
, hwinfo
);
272 /* NOTE: this print is only for and on one physical node */
273 print_gpu_detection_stats(fplog
, &hwinfo
->gpu_info
, cr
);
275 if (hwinfo
->gpu_info
.ncuda_dev_compatible
> 0)
277 /* NOTE: this print is only for and on one physical node */
278 print_gpu_use_stats(fplog
, &hwinfo
->gpu_info
, &hw_opt
->gpu_opt
, cr
);
281 /* Need to ensure that we have enough GPUs:
282 * - need one GPU per PP node
283 * - no GPU oversubscription with tMPI
285 /* number of PP processes per node */
286 npppn
= cr
->nrank_pp_intranode
;
289 th_or_proc_plural
[0] = '\0';
292 sprintf(th_or_proc
, "thread-MPI thread");
295 sprintf(th_or_proc_plural
, "s");
300 sprintf(th_or_proc
, "MPI process");
303 sprintf(th_or_proc_plural
, "es");
305 sprintf(pernode
, " per node");
309 /* neither MPI nor tMPI */
310 sprintf(th_or_proc
, "process");
313 if (bUseGPU
&& hwinfo
->gpu_info
.ncuda_dev_compatible
> 0 &&
316 int ngpu_comp
, ngpu_use
;
317 char gpu_comp_plural
[2], gpu_use_plural
[2];
319 ngpu_comp
= hwinfo
->gpu_info
.ncuda_dev_compatible
;
320 ngpu_use
= hw_opt
->gpu_opt
.ncuda_dev_use
;
322 sprintf(gpu_comp_plural
, "%s", (ngpu_comp
> 1) ? "s" : "");
323 sprintf(gpu_use_plural
, "%s", (ngpu_use
> 1) ? "s" : "");
325 /* number of tMPI threads auto-adjusted */
326 if (btMPI
&& bNthreadsAuto
)
328 if (hw_opt
->gpu_opt
.bUserSet
&& npppn
< ngpu_use
)
330 /* The user manually provided more GPUs than threads we
331 could automatically start. */
333 "%d GPU%s provided, but only %d PP thread-MPI thread%s coud be started.\n"
334 "%s requires one PP tread-MPI thread per GPU; use fewer GPUs%s.",
335 ngpu_use
, gpu_use_plural
,
336 npppn
, th_or_proc_plural
,
337 ShortProgram(), bMaxMpiThreadsSet
? "\nor allow more threads to be used" : "");
340 if (!hw_opt
->gpu_opt
.bUserSet
&& npppn
< ngpu_comp
)
342 /* There are more GPUs than tMPI threads; we have
343 limited the number GPUs used. */
344 md_print_warn(cr
, fplog
,
345 "NOTE: %d GPU%s were detected, but only %d PP thread-MPI thread%s can be started.\n"
346 " %s can use one GPU per PP tread-MPI thread, so only %d GPU%s will be used.%s\n",
347 ngpu_comp
, gpu_comp_plural
,
348 npppn
, th_or_proc_plural
,
349 ShortProgram(), npppn
,
350 npppn
> 1 ? "s" : "",
351 bMaxMpiThreadsSet
? "\n Also, you can allow more threads to be used by increasing GMX_MAX_MPI_THREADS" : "");
355 if (hw_opt
->gpu_opt
.bUserSet
)
357 if (ngpu_use
!= npppn
)
360 "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
361 "%s was started with %d PP %s%s%s, but you provided %d GPU%s.",
362 th_or_proc
, btMPI
? "s" : "es", pernode
,
363 ShortProgram(), npppn
, th_or_proc
,
364 th_or_proc_plural
, pernode
,
365 ngpu_use
, gpu_use_plural
);
370 if (ngpu_comp
> npppn
)
372 md_print_warn(cr
, fplog
,
373 "NOTE: potentially sub-optimal launch configuration, %s started with less\n"
374 " PP %s%s%s than GPU%s available.\n"
375 " Each PP %s can use only one GPU, %d GPU%s%s will be used.\n",
376 ShortProgram(), th_or_proc
,
377 th_or_proc_plural
, pernode
, gpu_comp_plural
,
378 th_or_proc
, npppn
, gpu_use_plural
, pernode
);
381 if (ngpu_use
!= npppn
)
383 /* Avoid duplicate error messages.
384 * Unfortunately we can only do this at the physical node
385 * level, since the hardware setup and MPI process count
386 * might differ between physical nodes.
388 if (cr
->rank_pp_intranode
== 0)
391 "Incorrect launch configuration: mismatching number of PP %s%s and GPUs%s.\n"
392 "%s was started with %d PP %s%s%s, but only %d GPU%s were detected.",
393 th_or_proc
, btMPI
? "s" : "es", pernode
,
394 ShortProgram(), npppn
, th_or_proc
,
395 th_or_proc_plural
, pernode
,
396 ngpu_use
, gpu_use_plural
);
404 same_count
= gmx_count_gpu_dev_shared(&hw_opt
->gpu_opt
);
408 md_print_info(cr
, fplog
,
409 "NOTE: You assigned %s to multiple %s%s.\n",
410 same_count
> 1 ? "GPUs" : "a GPU", th_or_proc
, btMPI
? "s" : "es");
418 /* Avoid other ranks to continue after
420 MPI_Barrier(cr
->mpi_comm_mygroup
);
426 /* Return 0 if none of the GPU (per node) are shared among PP ranks.
428 * Sharing GPUs among multiple PP ranks is possible when the user passes
429 * GPU IDs. Here we check for sharing and return a non-zero value when
430 * this is detected. Note that the return value represents the number of
431 * PP rank pairs that share a device.
433 int gmx_count_gpu_dev_shared(const gmx_gpu_opt_t
*gpu_opt
)
436 int ngpu
= gpu_opt
->ncuda_dev_use
;
438 if (gpu_opt
->bUserSet
)
442 for (i
= 0; i
< ngpu
- 1; i
++)
444 for (j
= i
+ 1; j
< ngpu
; j
++)
446 same_count
+= (gpu_opt
->cuda_dev_use
[i
] ==
447 gpu_opt
->cuda_dev_use
[j
]);
455 /* Count and return the number of unique GPUs (per node) selected.
457 * As sharing GPUs among multiple PP ranks is possible when the user passes
458 * GPU IDs, the number of GPUs user (per node) can be different from the
459 * number of GPU IDs selected.
461 static int gmx_count_gpu_dev_unique(const gmx_gpu_info_t
*gpu_info
,
462 const gmx_gpu_opt_t
*gpu_opt
)
464 int i
, uniq_count
, ngpu
;
470 ngpu
= gpu_info
->ncuda_dev
;
473 snew(uniq_ids
, ngpu
);
475 /* Each element in uniq_ids will be set to 0 or 1. The n-th element set
476 * to 1 indicates that the respective GPU was selected to be used. */
477 for (i
= 0; i
< gpu_opt
->ncuda_dev_use
; i
++)
479 uniq_ids
[get_gpu_device_id(gpu_info
, gpu_opt
, i
)] = 1;
481 /* Count the devices used. */
482 for (i
= 0; i
< ngpu
; i
++)
484 uniq_count
+= uniq_ids
[i
];
493 /* Return the number of hardware threads supported by the current CPU.
494 * We assume that this is equal with the number of CPUs reported to be
495 * online by the OS at the time of the call.
497 static int get_nthreads_hw_avail(FILE gmx_unused
*fplog
, const t_commrec gmx_unused
*cr
)
501 #if ((defined(WIN32) || defined( _WIN32 ) || defined(WIN64) || defined( _WIN64 )) && !(defined (__CYGWIN__) || defined (__CYGWIN32__)))
504 GetSystemInfo( &sysinfo
);
505 ret
= sysinfo
.dwNumberOfProcessors
;
506 #elif defined HAVE_SYSCONF
507 /* We are probably on Unix.
508 * Now check if we have the argument to use before executing the call
510 #if defined(_SC_NPROCESSORS_ONLN)
511 ret
= sysconf(_SC_NPROCESSORS_ONLN
);
512 #elif defined(_SC_NPROC_ONLN)
513 ret
= sysconf(_SC_NPROC_ONLN
);
514 #elif defined(_SC_NPROCESSORS_CONF)
515 ret
= sysconf(_SC_NPROCESSORS_CONF
);
516 #elif defined(_SC_NPROC_CONF)
517 ret
= sysconf(_SC_NPROC_CONF
);
519 #warning "No valid sysconf argument value found. Executables will not be able to determine the number of hardware threads: mdrun will use 1 thread by default!"
520 #endif /* End of check for sysconf argument values */
523 /* Neither windows nor Unix. No fscking idea how many CPUs we have! */
529 fprintf(debug
, "Detected %d processors, will use this as the number "
530 "of supported hardware threads.\n", ret
);
534 if (ret
!= gmx_omp_get_num_procs())
536 md_print_warn(cr
, fplog
,
537 "Number of CPUs detected (%d) does not match the number reported by OpenMP (%d).\n"
538 "Consider setting the launch configuration manually!",
539 ret
, gmx_omp_get_num_procs());
546 static void gmx_detect_gpus(FILE *fplog
, const t_commrec
*cr
)
550 MPI_Comm physicalnode_comm
;
554 /* Under certain circumstances MPI ranks on the same physical node
555 * can not simultaneously access the same GPU(s). Therefore we run
556 * the detection only on one MPI rank per node and broadcast the info.
557 * Note that with thread-MPI only a single thread runs this code.
559 * TODO: We should also do CPU hardware detection only once on each
560 * physical node and broadcast it, instead of do it on every MPI rank.
563 /* A split of MPI_COMM_WORLD over physical nodes is only required here,
564 * so we create and destroy it locally.
566 MPI_Comm_rank(MPI_COMM_WORLD
, &rank_world
);
567 MPI_Comm_split(MPI_COMM_WORLD
, gmx_physicalnode_id_hash(),
568 rank_world
, &physicalnode_comm
);
569 MPI_Comm_rank(physicalnode_comm
, &rank_local
);
571 /* Here there should be only one process, check this */
572 assert(cr
->nnodes
== 1 && cr
->sim_nodeid
== 0);
579 char detection_error
[STRLEN
] = "", sbuf
[STRLEN
];
581 if (detect_cuda_gpus(&hwinfo_g
->gpu_info
, detection_error
) != 0)
583 if (detection_error
[0] != '\0')
585 sprintf(sbuf
, ":\n %s\n", detection_error
);
591 md_print_warn(cr
, fplog
,
592 "NOTE: Error occurred during GPU detection%s"
593 " Can not use GPU acceleration, will fall back to CPU kernels.\n",
599 /* Broadcast the GPU info to the other ranks within this node */
600 MPI_Bcast(&hwinfo_g
->gpu_info
.ncuda_dev
, 1, MPI_INT
, 0, physicalnode_comm
);
602 if (hwinfo_g
->gpu_info
.ncuda_dev
> 0)
606 cuda_dev_size
= hwinfo_g
->gpu_info
.ncuda_dev
*sizeof_cuda_dev_info();
610 hwinfo_g
->gpu_info
.cuda_dev
=
611 (cuda_dev_info_ptr_t
)malloc(cuda_dev_size
);
613 MPI_Bcast(hwinfo_g
->gpu_info
.cuda_dev
, cuda_dev_size
, MPI_BYTE
,
614 0, physicalnode_comm
);
615 MPI_Bcast(&hwinfo_g
->gpu_info
.ncuda_dev_compatible
, 1, MPI_INT
,
616 0, physicalnode_comm
);
619 MPI_Comm_free(&physicalnode_comm
);
623 gmx_hw_info_t
*gmx_detect_hardware(FILE *fplog
, const t_commrec
*cr
,
624 gmx_bool bDetectGPUs
)
629 /* make sure no one else is doing the same thing */
630 ret
= tMPI_Thread_mutex_lock(&hw_info_lock
);
633 gmx_fatal(FARGS
, "Error locking hwinfo mutex: %s", strerror(errno
));
636 /* only initialize the hwinfo structure if it is not already initalized */
641 /* detect CPUID info; no fuss, we don't detect system-wide
642 * -- sloppy, but that's it for now */
643 if (gmx_cpuid_init(&hwinfo_g
->cpuid_info
) != 0)
645 gmx_fatal_collective(FARGS
, cr
, NULL
, "CPUID detection failed!");
648 /* detect number of hardware threads */
649 hwinfo_g
->nthreads_hw_avail
= get_nthreads_hw_avail(fplog
, cr
);
652 hwinfo_g
->gpu_info
.ncuda_dev
= 0;
653 hwinfo_g
->gpu_info
.cuda_dev
= NULL
;
654 hwinfo_g
->gpu_info
.ncuda_dev_compatible
= 0;
656 /* Run the detection if the binary was compiled with GPU support
657 * and we requested detection.
659 hwinfo_g
->gpu_info
.bDetectGPUs
=
660 (bGPUBinary
&& bDetectGPUs
&&
661 getenv("GMX_DISABLE_GPU_DETECTION") == NULL
);
662 if (hwinfo_g
->gpu_info
.bDetectGPUs
)
664 gmx_detect_gpus(fplog
, cr
);
667 /* increase the reference counter */
670 ret
= tMPI_Thread_mutex_unlock(&hw_info_lock
);
673 gmx_fatal(FARGS
, "Error unlocking hwinfo mutex: %s", strerror(errno
));
679 void gmx_parse_gpu_ids(gmx_gpu_opt_t
*gpu_opt
)
683 if (gpu_opt
->gpu_id
!= NULL
&& !bGPUBinary
)
685 gmx_fatal(FARGS
, "GPU ID string set, but %s was compiled without GPU support!", ShortProgram());
688 env
= getenv("GMX_GPU_ID");
689 if (env
!= NULL
&& gpu_opt
->gpu_id
!= NULL
)
691 gmx_fatal(FARGS
, "GMX_GPU_ID and -gpu_id can not be used at the same time");
695 env
= gpu_opt
->gpu_id
;
698 /* parse GPU IDs if the user passed any */
701 /* Parse a "plain" GPU ID string which contains a sequence of
702 * digits corresponding to GPU IDs; the order will indicate
703 * the process/tMPI thread - GPU assignment. */
704 parse_digits_from_plain_string(env
,
705 &gpu_opt
->ncuda_dev_use
,
706 &gpu_opt
->cuda_dev_use
);
708 if (gpu_opt
->ncuda_dev_use
== 0)
710 gmx_fatal(FARGS
, "Empty GPU ID string encountered.\n%s\n",
714 gpu_opt
->bUserSet
= TRUE
;
718 void gmx_select_gpu_ids(FILE *fplog
, const t_commrec
*cr
,
719 const gmx_gpu_info_t
*gpu_info
,
720 gmx_bool bForceUseGPU
,
721 gmx_gpu_opt_t
*gpu_opt
)
725 char sbuf
[STRLEN
], stmp
[STRLEN
];
727 /* Bail if binary is not compiled with GPU acceleration, but this is either
728 * explicitly (-nb gpu) or implicitly (gpu ID passed) requested. */
729 if (bForceUseGPU
&& !bGPUBinary
)
731 gmx_fatal(FARGS
, "GPU acceleration requested, but %s was compiled without GPU support!", ShortProgram());
734 if (gpu_opt
->bUserSet
)
736 /* Check the GPU IDs passed by the user.
737 * (GPU IDs have been parsed by gmx_parse_gpu_ids before)
742 snew(checkres
, gpu_opt
->ncuda_dev_use
);
744 res
= check_selected_cuda_gpus(checkres
, gpu_info
, gpu_opt
);
748 print_gpu_detection_stats(fplog
, gpu_info
, cr
);
750 sprintf(sbuf
, "Some of the requested GPUs do not exist, behave strangely, or are not compatible:\n");
751 for (i
= 0; i
< gpu_opt
->ncuda_dev_use
; i
++)
753 if (checkres
[i
] != egpuCompatible
)
755 sprintf(stmp
, " GPU #%d: %s\n",
756 gpu_opt
->cuda_dev_use
[i
],
757 gpu_detect_res_str
[checkres
[i
]]);
761 gmx_fatal(FARGS
, "%s", sbuf
);
768 pick_compatible_gpus(&hwinfo_g
->gpu_info
, gpu_opt
);
770 if (gpu_opt
->ncuda_dev_use
> cr
->nrank_pp_intranode
)
772 /* We picked more GPUs than we can use: limit the number.
773 * We print detailed messages about this later in
774 * gmx_check_hw_runconf_consistency.
776 limit_num_gpus_used(gpu_opt
, cr
->nrank_pp_intranode
);
779 gpu_opt
->bUserSet
= FALSE
;
782 /* If the user asked for a GPU, check whether we have a GPU */
783 if (bForceUseGPU
&& gpu_info
->ncuda_dev_compatible
== 0)
785 gmx_fatal(FARGS
, "GPU acceleration requested, but no compatible GPUs were detected.");
789 static void limit_num_gpus_used(gmx_gpu_opt_t
*gpu_opt
, int count
)
795 ndev_use
= gpu_opt
->ncuda_dev_use
;
797 if (count
> ndev_use
)
799 /* won't increase the # of GPUs */
806 sprintf(sbuf
, "Limiting the number of GPUs to <1 doesn't make sense (detected %d, %d requested)!",
811 /* TODO: improve this implementation: either sort GPUs or remove the weakest here */
812 gpu_opt
->ncuda_dev_use
= count
;
815 void gmx_hardware_info_free(gmx_hw_info_t
*hwinfo
)
819 ret
= tMPI_Thread_mutex_lock(&hw_info_lock
);
822 gmx_fatal(FARGS
, "Error locking hwinfo mutex: %s", strerror(errno
));
825 /* decrease the reference counter */
829 if (hwinfo
!= hwinfo_g
)
831 gmx_incons("hwinfo < hwinfo_g");
836 gmx_incons("n_hwinfo < 0");
841 gmx_cpuid_done(hwinfo_g
->cpuid_info
);
842 free_gpu_info(&hwinfo_g
->gpu_info
);
846 ret
= tMPI_Thread_mutex_unlock(&hw_info_lock
);
849 gmx_fatal(FARGS
, "Error unlocking hwinfo mutex: %s", strerror(errno
));