Separate canDetectGpus and findGpus futher, and fix tests
[gromacs.git] / src / gromacs / hardware / detecthardware.cpp
blobf72237eab67ebfe9328168d1a4505161528b5bb7
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 #include "gmxpre.h"
37 #include "detecthardware.h"
39 #include "config.h"
41 #include <cerrno>
42 #include <cstdlib>
43 #include <cstring>
45 #include <algorithm>
46 #include <array>
47 #include <chrono>
48 #include <memory>
49 #include <string>
50 #include <thread>
51 #include <vector>
53 #include "thread_mpi/threads.h"
55 #include "gromacs/compat/make_unique.h"
56 #include "gromacs/gpu_utils/gpu_utils.h"
57 #include "gromacs/hardware/cpuinfo.h"
58 #include "gromacs/hardware/hardwaretopology.h"
59 #include "gromacs/hardware/hw_info.h"
60 #include "gromacs/mdtypes/commrec.h"
61 #include "gromacs/simd/support.h"
62 #include "gromacs/utility/basedefinitions.h"
63 #include "gromacs/utility/basenetwork.h"
64 #include "gromacs/utility/baseversion.h"
65 #include "gromacs/utility/cstringutil.h"
66 #include "gromacs/utility/exceptions.h"
67 #include "gromacs/utility/fatalerror.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/logger.h"
70 #include "gromacs/utility/programcontext.h"
71 #include "gromacs/utility/smalloc.h"
72 #include "gromacs/utility/stringutil.h"
73 #include "gromacs/utility/sysinfo.h"
75 #include "architecture.h"
77 #ifdef HAVE_UNISTD_H
78 # include <unistd.h> // sysconf()
79 #endif
81 namespace gmx
84 //! Convenience macro to help us avoid ifdefs each time we use sysconf
85 #if !defined(_SC_NPROCESSORS_ONLN) && defined(_SC_NPROC_ONLN)
86 # define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN
87 #endif
89 //! Convenience macro to help us avoid ifdefs each time we use sysconf
90 #if !defined(_SC_NPROCESSORS_CONF) && defined(_SC_NPROC_CONF)
91 # define _SC_NPROCESSORS_CONF _SC_NPROC_CONF
92 #endif
94 //! Constant used to help minimize preprocessed code
95 static const bool bGPUBinary = GMX_GPU != GMX_GPU_NONE;
97 /*! \brief The hwinfo structure (common to all threads in this process).
99 * \todo This should become a shared_ptr owned by e.g. Mdrunner::runner()
100 * that is shared across any threads as needed (e.g. for thread-MPI). That
101 * offers about the same run time performance as we get here, and avoids a
102 * lot of custom code.
104 static std::unique_ptr<gmx_hw_info_t> hwinfo_g;
105 //! A reference counter for the hwinfo structure
106 static int n_hwinfo = 0;
107 //! A lock to protect the hwinfo structure
108 static tMPI_Thread_mutex_t hw_info_lock = TMPI_THREAD_MUTEX_INITIALIZER;
110 //! Detect GPUs, if that makes sense to attempt.
111 static void gmx_detect_gpus(const gmx::MDLogger &mdlog, const t_commrec *cr)
113 #if GMX_LIB_MPI
114 int rank_world;
115 MPI_Comm physicalnode_comm;
116 #endif
117 bool isMasterRankOfNode;
119 hwinfo_g->gpu_info.bDetectGPUs =
120 (bGPUBinary && getenv("GMX_DISABLE_GPU_DETECTION") == nullptr);
121 if (!hwinfo_g->gpu_info.bDetectGPUs)
123 return;
126 /* Under certain circumstances MPI ranks on the same physical node
127 * can not simultaneously access the same GPU(s). Therefore we run
128 * the detection only on one MPI rank per node and broadcast the info.
129 * Note that with thread-MPI only a single thread runs this code.
131 * NOTE: We can't broadcast gpu_info with OpenCL as the device and platform
132 * ID stored in the structure are unique for each rank (even if a device
133 * is shared by multiple ranks).
135 * TODO: We should also do CPU hardware detection only once on each
136 * physical node and broadcast it, instead of do it on every MPI rank.
138 #if GMX_LIB_MPI
139 /* A split of MPI_COMM_WORLD over physical nodes is only required here,
140 * so we create and destroy it locally.
142 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
143 MPI_Comm_split(MPI_COMM_WORLD, gmx_physicalnode_id_hash(),
144 rank_world, &physicalnode_comm);
146 int rankOnNode = -1;
147 MPI_Comm_rank(physicalnode_comm, &rankOnNode);
148 isMasterRankOfNode = (rankOnNode == 0);
150 GMX_UNUSED_VALUE(cr);
151 #else
152 // Here there should be only one process, because if we are using
153 // thread-MPI, only one thread is active so far. So we check this.
154 GMX_RELEASE_ASSERT(cr->nnodes == 1 && cr->sim_nodeid == 0, "Only a single (master) process should execute here");
155 isMasterRankOfNode = true;
156 #endif
158 /* With CUDA detect only on one rank per host, with OpenCL need do
159 * the detection on all PP ranks */
160 bool isOpenclPpRank = ((GMX_GPU == GMX_GPU_OPENCL) && thisRankHasDuty(cr, DUTY_PP));
162 bool gpusCanBeDetected = false;
163 if (isMasterRankOfNode || isOpenclPpRank)
165 std::string errorMessage;
166 gpusCanBeDetected = canDetectGpus(&errorMessage);
167 if (!gpusCanBeDetected)
169 GMX_LOG(mdlog.warning).asParagraph().appendTextFormatted(
170 "NOTE: GPUs cannot be detected:\n"
171 " %s\n"
172 " Can not use GPU acceleration, will fall back to CPU kernels.",
173 errorMessage.c_str());
177 if (gpusCanBeDetected)
179 findGpus(&hwinfo_g->gpu_info);
180 // No need to tell the user anything at this point, they get a
181 // hardware report later.
184 #if GMX_LIB_MPI
185 if (!isOpenclPpRank)
187 /* Broadcast the GPU info to the other ranks within this node */
188 MPI_Bcast(&hwinfo_g->gpu_info.n_dev, 1, MPI_INT, 0, physicalnode_comm);
190 if (hwinfo_g->gpu_info.n_dev > 0)
192 int dev_size;
194 dev_size = hwinfo_g->gpu_info.n_dev*sizeof_gpu_dev_info();
196 if (!isMasterRankOfNode)
198 hwinfo_g->gpu_info.gpu_dev =
199 (struct gmx_device_info_t *)malloc(dev_size);
201 MPI_Bcast(hwinfo_g->gpu_info.gpu_dev, dev_size, MPI_BYTE,
202 0, physicalnode_comm);
203 MPI_Bcast(&hwinfo_g->gpu_info.n_dev_compatible, 1, MPI_INT,
204 0, physicalnode_comm);
208 MPI_Comm_free(&physicalnode_comm);
209 #endif
212 //! Reduce the locally collected \p hwinfo_g over MPI ranks
213 static void gmx_collect_hardware_mpi(const gmx::CpuInfo &cpuInfo)
215 const int ncore = hwinfo_g->hardwareTopology->numberOfCores();
216 /* Zen has family=23, for now we treat future AMD CPUs like Zen */
217 const bool cpuIsAmdZen = (cpuInfo.vendor() == CpuInfo::Vendor::Amd &&
218 cpuInfo.family() >= 23);
220 #if GMX_LIB_MPI
221 int rank_id;
222 int nrank, rank, nhwthread, ngpu, i;
223 int gpu_hash;
224 int *buf, *all;
226 rank_id = gmx_physicalnode_id_hash();
227 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
228 MPI_Comm_size(MPI_COMM_WORLD, &nrank);
229 nhwthread = hwinfo_g->nthreads_hw_avail;
230 ngpu = hwinfo_g->gpu_info.n_dev_compatible;
231 /* Create a unique hash of the GPU type(s) in this node */
232 gpu_hash = 0;
233 /* Here it might be better to only loop over the compatible GPU, but we
234 * don't have that information available and it would also require
235 * removing the device ID from the device info string.
237 for (i = 0; i < hwinfo_g->gpu_info.n_dev; i++)
239 char stmp[STRLEN];
241 /* Since the device ID is incorporated in the hash, the order of
242 * the GPUs affects the hash. Also two identical GPUs won't give
243 * a gpu_hash of zero after XORing.
245 get_gpu_device_info_string(stmp, hwinfo_g->gpu_info, i);
246 gpu_hash ^= gmx_string_fullhash_func(stmp, gmx_string_hash_init);
249 snew(buf, nrank);
250 snew(all, nrank);
251 buf[rank] = rank_id;
253 MPI_Allreduce(buf, all, nrank, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
255 gmx_bool bFound;
256 int nnode0, ncore0, nhwthread0, ngpu0, r;
258 bFound = FALSE;
259 ncore0 = 0;
260 nnode0 = 0;
261 nhwthread0 = 0;
262 ngpu0 = 0;
263 for (r = 0; r < nrank; r++)
265 if (all[r] == rank_id)
267 if (!bFound && r == rank)
269 /* We are the first rank in this physical node */
270 nnode0 = 1;
271 ncore0 = ncore;
272 nhwthread0 = nhwthread;
273 ngpu0 = ngpu;
275 bFound = TRUE;
279 sfree(buf);
280 sfree(all);
282 constexpr int numElementsCounts = 4;
283 std::array<int, numElementsCounts> countsReduced;
285 std::array<int, numElementsCounts> countsLocal;
286 /* Sum values from only intra-rank 0 so we get the sum over all nodes */
287 countsLocal[0] = nnode0;
288 countsLocal[1] = ncore0;
289 countsLocal[2] = nhwthread0;
290 countsLocal[3] = ngpu0;
292 MPI_Allreduce(countsLocal.data(), countsReduced.data(), countsLocal.size(),
293 MPI_INT, MPI_SUM, MPI_COMM_WORLD);
296 constexpr int numElementsMax = 11;
297 std::array<int, numElementsMax> maxMinReduced;
299 std::array<int, numElementsMax> maxMinLocal;
300 /* Store + and - values for all ranks,
301 * so we can get max+min with one MPI call.
303 maxMinLocal[0] = ncore;
304 maxMinLocal[1] = nhwthread;
305 maxMinLocal[2] = ngpu;
306 maxMinLocal[3] = static_cast<int>(gmx::simdSuggested(cpuInfo));
307 maxMinLocal[4] = gpu_hash;
308 maxMinLocal[5] = -maxMinLocal[0];
309 maxMinLocal[6] = -maxMinLocal[1];
310 maxMinLocal[7] = -maxMinLocal[2];
311 maxMinLocal[8] = -maxMinLocal[3];
312 maxMinLocal[9] = -maxMinLocal[4];
313 maxMinLocal[10] = (cpuIsAmdZen ? 1 : 0);
315 MPI_Allreduce(maxMinLocal.data(), maxMinReduced.data(), maxMinLocal.size(),
316 MPI_INT, MPI_MAX, MPI_COMM_WORLD);
319 hwinfo_g->nphysicalnode = countsReduced[0];
320 hwinfo_g->ncore_tot = countsReduced[1];
321 hwinfo_g->ncore_min = -maxMinReduced[5];
322 hwinfo_g->ncore_max = maxMinReduced[0];
323 hwinfo_g->nhwthread_tot = countsReduced[2];
324 hwinfo_g->nhwthread_min = -maxMinReduced[6];
325 hwinfo_g->nhwthread_max = maxMinReduced[1];
326 hwinfo_g->ngpu_compatible_tot = countsReduced[3];
327 hwinfo_g->ngpu_compatible_min = -maxMinReduced[7];
328 hwinfo_g->ngpu_compatible_max = maxMinReduced[2];
329 hwinfo_g->simd_suggest_min = -maxMinReduced[8];
330 hwinfo_g->simd_suggest_max = maxMinReduced[3];
331 hwinfo_g->bIdenticalGPUs = (maxMinReduced[4] == -maxMinReduced[9]);
332 hwinfo_g->haveAmdZenCpu = (maxMinReduced[10] > 0);
333 #else
334 /* All ranks use the same pointer, protected by a mutex in the caller */
335 hwinfo_g->nphysicalnode = 1;
336 hwinfo_g->ncore_tot = ncore;
337 hwinfo_g->ncore_min = ncore;
338 hwinfo_g->ncore_max = ncore;
339 hwinfo_g->nhwthread_tot = hwinfo_g->nthreads_hw_avail;
340 hwinfo_g->nhwthread_min = hwinfo_g->nthreads_hw_avail;
341 hwinfo_g->nhwthread_max = hwinfo_g->nthreads_hw_avail;
342 hwinfo_g->ngpu_compatible_tot = hwinfo_g->gpu_info.n_dev_compatible;
343 hwinfo_g->ngpu_compatible_min = hwinfo_g->gpu_info.n_dev_compatible;
344 hwinfo_g->ngpu_compatible_max = hwinfo_g->gpu_info.n_dev_compatible;
345 hwinfo_g->simd_suggest_min = static_cast<int>(simdSuggested(cpuInfo));
346 hwinfo_g->simd_suggest_max = static_cast<int>(simdSuggested(cpuInfo));
347 hwinfo_g->bIdenticalGPUs = TRUE;
348 hwinfo_g->haveAmdZenCpu = cpuIsAmdZen;
349 #endif
352 /*! \brief Utility that does dummy computing for max 2 seconds to spin up cores
354 * This routine will check the number of cores configured and online
355 * (using sysconf), and the spins doing dummy compute operations for up to
356 * 2 seconds, or until all cores have come online. This can be used prior to
357 * hardware detection for platforms that take unused processors offline.
359 * This routine will not throw exceptions.
361 static void
362 spinUpCore() noexcept
364 #if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF) && defined(_SC_NPROCESSORS_ONLN)
365 float dummy = 0.1;
366 int countConfigured = sysconf(_SC_NPROCESSORS_CONF); // noexcept
367 auto start = std::chrono::steady_clock::now(); // noexcept
369 while (sysconf(_SC_NPROCESSORS_ONLN) < countConfigured &&
370 std::chrono::steady_clock::now() - start < std::chrono::seconds(2))
372 for (int i = 1; i < 10000; i++)
374 dummy /= i;
378 if (dummy < 0)
380 printf("This cannot happen, but prevents loop from being optimized away.");
382 #endif
385 /*! \brief Prepare the system before hardware topology detection
387 * This routine should perform any actions we want to put the system in a state
388 * where we want it to be before detecting the hardware topology. For most
389 * processors there is nothing to do, but some architectures (in particular ARM)
390 * have support for taking configured cores offline, which will make them disappear
391 * from the online processor count.
393 * This routine checks if there is a mismatch between the number of cores
394 * configured and online, and in that case we issue a small workload that
395 * attempts to wake sleeping cores before doing the actual detection.
397 * This type of mismatch can also occur for x86 or PowerPC on Linux, if SMT has only
398 * been disabled in the kernel (rather than bios). Since those cores will never
399 * come online automatically, we currently skip this test for x86 & PowerPC to
400 * avoid wasting 2 seconds. We also skip the test if there is no thread support.
402 * \note Cores will sleep relatively quickly again, so it's important to issue
403 * the real detection code directly after this routine.
405 static void
406 hardwareTopologyPrepareDetection()
408 #if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF) && \
409 (defined(THREAD_PTHREADS) || defined(THREAD_WINDOWS))
411 // Modify this conditional when/if x86 or PowerPC starts to sleep some cores
412 if (c_architecture != Architecture::X86 &&
413 c_architecture != Architecture::PowerPC)
415 int countConfigured = sysconf(_SC_NPROCESSORS_CONF);
416 std::vector<std::thread> workThreads(countConfigured);
418 for (auto &t : workThreads)
420 t = std::thread(spinUpCore);
423 for (auto &t : workThreads)
425 t.join();
428 #endif
431 /*! \brief Sanity check hardware topology and print some notes to log
433 * \param mdlog Logger.
434 * \param hardwareTopology Reference to hardwareTopology object.
436 static void
437 hardwareTopologyDoubleCheckDetection(const gmx::MDLogger gmx_unused &mdlog,
438 const gmx::HardwareTopology gmx_unused &hardwareTopology)
440 #if defined HAVE_SYSCONF && defined(_SC_NPROCESSORS_CONF)
441 if (hardwareTopology.supportLevel() < gmx::HardwareTopology::SupportLevel::LogicalProcessorCount)
443 return;
446 int countFromDetection = hardwareTopology.machine().logicalProcessorCount;
447 int countConfigured = sysconf(_SC_NPROCESSORS_CONF);
449 /* BIOS, kernel or user actions can take physical processors
450 * offline. We already cater for the some of the cases inside the hardwareToplogy
451 * by trying to spin up cores just before we detect, but there could be other
452 * cases where it is worthwhile to hint that there might be more resources available.
454 if (countConfigured >= 0 && countConfigured != countFromDetection)
456 GMX_LOG(mdlog.info).
457 appendTextFormatted("Note: %d CPUs configured, but only %d were detected to be online.\n", countConfigured, countFromDetection);
459 if (c_architecture == Architecture::X86 &&
460 countConfigured == 2*countFromDetection)
462 GMX_LOG(mdlog.info).
463 appendText(" X86 Hyperthreading is likely disabled; enable it for better performance.");
465 // For PowerPC (likely Power8) it is possible to set SMT to either 2,4, or 8-way hardware threads.
466 // We only warn if it is completely disabled since default performance drops with SMT8.
467 if (c_architecture == Architecture::PowerPC &&
468 countConfigured == 8*countFromDetection)
470 GMX_LOG(mdlog.info).
471 appendText(" PowerPC SMT is likely disabled; enable SMT2/SMT4 for better performance.");
474 #endif
477 gmx_hw_info_t *gmx_detect_hardware(const gmx::MDLogger &mdlog, const t_commrec *cr)
479 int ret;
481 /* make sure no one else is doing the same thing */
482 ret = tMPI_Thread_mutex_lock(&hw_info_lock);
483 if (ret != 0)
485 gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
488 /* only initialize the hwinfo structure if it is not already initalized */
489 if (n_hwinfo == 0)
491 hwinfo_g = compat::make_unique<gmx_hw_info_t>();
493 hwinfo_g->cpuInfo = new gmx::CpuInfo(gmx::CpuInfo::detect());
495 hardwareTopologyPrepareDetection();
496 hwinfo_g->hardwareTopology = new gmx::HardwareTopology(gmx::HardwareTopology::detect());
498 // If we detected the topology on this system, double-check that it makes sense
499 if (hwinfo_g->hardwareTopology->isThisSystem())
501 hardwareTopologyDoubleCheckDetection(mdlog, *(hwinfo_g->hardwareTopology));
504 // TODO: Get rid of this altogether.
505 hwinfo_g->nthreads_hw_avail = hwinfo_g->hardwareTopology->machine().logicalProcessorCount;
507 /* detect GPUs */
508 hwinfo_g->gpu_info.n_dev = 0;
509 hwinfo_g->gpu_info.n_dev_compatible = 0;
510 hwinfo_g->gpu_info.gpu_dev = nullptr;
512 gmx_detect_gpus(mdlog, cr);
513 gmx_collect_hardware_mpi(*hwinfo_g->cpuInfo);
515 /* increase the reference counter */
516 n_hwinfo++;
518 ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
519 if (ret != 0)
521 gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));
524 return hwinfo_g.get();
527 bool compatibleGpusFound(const gmx_gpu_info_t &gpu_info)
529 return gpu_info.n_dev_compatible > 0;
532 void gmx_hardware_info_free()
534 int ret;
536 ret = tMPI_Thread_mutex_lock(&hw_info_lock);
537 if (ret != 0)
539 gmx_fatal(FARGS, "Error locking hwinfo mutex: %s", strerror(errno));
542 /* decrease the reference counter */
543 n_hwinfo--;
546 if (n_hwinfo < 0)
548 gmx_incons("n_hwinfo < 0");
551 if (n_hwinfo == 0)
553 delete hwinfo_g->cpuInfo;
554 delete hwinfo_g->hardwareTopology;
555 free_gpu_info(&hwinfo_g->gpu_info);
556 hwinfo_g.reset();
559 ret = tMPI_Thread_mutex_unlock(&hw_info_lock);
560 if (ret != 0)
562 gmx_fatal(FARGS, "Error unlocking hwinfo mutex: %s", strerror(errno));
566 } // namespace gmx