Access the device status directly, remove the getter
[gromacs.git] / src / gromacs / hardware / hw_info.h
blobb7b796f8b6da8ee93d13841d03fdbe92a3dfa6ac
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef GMX_HARDWARE_HWINFO_H
37 #define GMX_HARDWARE_HWINFO_H
39 #include <memory>
40 #include <string>
41 #include <vector>
43 #include "gromacs/hardware/device_management.h"
44 #include "gromacs/utility/basedefinitions.h"
46 namespace gmx
48 class CpuInfo;
49 class HardwareTopology;
50 } // namespace gmx
51 struct DeviceInformation;
53 /* Hardware information structure with CPU and GPU information.
54 * It is initialized by gmx_detect_hardware().
55 * NOTE: this structure may only contain structures that are
56 * valid over the whole process (i.e. must be able to
57 * be shared among all threads) */
58 struct gmx_hw_info_t
60 gmx_hw_info_t(std::unique_ptr<gmx::CpuInfo> cpuInfo,
61 std::unique_ptr<gmx::HardwareTopology> hardwareTopology);
62 ~gmx_hw_info_t();
64 /* Data for our local physical node */
66 /*! \brief Number of hardware threads available.
68 * This number is based on the number of CPUs reported as
69 * available by the OS at the time of detection. */
70 int nthreads_hw_avail;
73 std::unique_ptr<gmx::CpuInfo> cpuInfo; /* Information about CPU capabilities */
74 std::unique_ptr<gmx::HardwareTopology> hardwareTopology; /* Information about hardware topology */
75 std::vector<std::unique_ptr<DeviceInformation>> deviceInfoList; /* Information about GPUs detected on this physical node */
78 /* Data reduced through MPI over all physical nodes */
79 int nphysicalnode; /* Number of physical nodes */
80 int ncore_tot; /* Sum of #cores over all nodes, can be 0 */
81 int ncore_min; /* Min #cores over all nodes */
82 int ncore_max; /* Max #cores over all nodes */
83 int nhwthread_tot; /* Sum of #hwthreads over all nodes */
84 int nhwthread_min; /* Min #hwthreads over all nodes */
85 int nhwthread_max; /* Max #hwthreads over all nodes */
86 int ngpu_compatible_tot; /* Sum of #GPUs over all nodes */
87 int ngpu_compatible_min; /* Min #GPUs over all nodes */
88 int ngpu_compatible_max; /* Max #GPUs over all nodes */
90 int simd_suggest_min; /* Highest SIMD instruction set supported by all ranks */
91 int simd_suggest_max; /* Highest SIMD instruction set supported by at least one rank */
93 gmx_bool bIdenticalGPUs; /* TRUE if all ranks have the same type(s) and order of GPUs */
94 bool haveAmdZen1Cpu; /* TRUE when at least one CPU in any of the nodes is AMD Zen of the first generation */
98 /* The options for the thread affinity setting, default: auto */
99 enum class ThreadAffinity
101 Select,
102 Auto,
104 Off,
105 Count
108 /*! \internal \brief Threading and GPU options, can be set automatically or by the user
110 * \todo During mdrunner(), if the user has left any of these values
111 * at their defaults (which tends to mean "choose automatically"),
112 * then those values are over-written with the result of such
113 * automation. This creates problems for the subsequent code in
114 * knowing what was done, why, and reporting correctly to the
115 * user. Find a way to improve this.
117 struct gmx_hw_opt_t
119 //! Total number of threads requested (thread-MPI + OpenMP).
120 int nthreads_tot = 0;
121 //! Number of thread-MPI threads requested.
122 int nthreads_tmpi = 0;
123 //! Number of OpenMP threads requested.
124 int nthreads_omp = 0;
125 //! Number of OpenMP threads to use on PME_only ranks.
126 int nthreads_omp_pme = 0;
127 //! Thread affinity switch, see enum above.
128 ThreadAffinity threadAffinity = ThreadAffinity::Select;
129 //! Logical core pinning stride.
130 int core_pinning_stride = 0;
131 //! Logical core pinning offset.
132 int core_pinning_offset = 0;
133 //! Empty, or a string provided by the user declaring (unique) GPU IDs available for mdrun to use.
134 std::string gpuIdsAvailable = "";
135 //! Empty, or a string provided by the user mapping GPU tasks to devices.
136 std::string userGpuTaskAssignment = "";
137 //! Tells whether mdrun is free to choose the total number of threads (by choosing the number of OpenMP and/or thread-MPI threads).
138 bool totNumThreadsIsAuto;
141 #endif