2 # This file is part of the GROMACS molecular simulation package.
4 # Copyright (c) 2012,2013,2014,215, by the GROMACS development team, led by
5 # Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 # and including many others, as listed in the AUTHORS file in the
7 # top-level source directory and at http://www.gromacs.org.
9 # GROMACS is free software; you can redistribute it and/or
10 # modify it under the terms of the GNU Lesser General Public License
11 # as published by the Free Software Foundation; either version 2.1
12 # of the License, or (at your option) any later version.
14 # GROMACS is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 # Lesser General Public License for more details.
19 # You should have received a copy of the GNU Lesser General Public
20 # License along with GROMACS; if not, see
21 # http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 # If you want to redistribute modifications to GROMACS, please
25 # consider that scientific software is very special. Version
26 # control is crucial - bugs must be traceable. We will be happy to
27 # consider code for inclusion in the official distribution, but
28 # derived work must not be called official GROMACS. Details are found
29 # in the README & COPYING files - if they are missing, get the
30 # official version at http://www.gromacs.org.
32 # To help us fund GROMACS development, we humbly ask that you cite
33 # the research papers on the package. Check out http://www.gromacs.org.
35 # Manage CUDA nvcc compilation configuration, try to be smart to ease the users'
36 # pain as much as possible:
37 # - use the CUDA_HOST_COMPILER if defined by the user, otherwise
38 # - auto-detect compatible nvcc host compiler and set nvcc -ccbin (if not MPI wrapper)
39 # - set icc compatibility mode to gcc 4.6
40 # - (advanced) variables set:
41 # * CUDA_HOST_COMPILER - the host compiler for nvcc (only with cmake <2.8.10)
42 # * CUDA_HOST_COMPILER_OPTIONS - the full host-compiler related option list passed to nvcc
44 # Note that from CMake 2.8.10 FindCUDA defines CUDA_HOST_COMPILER internally,
45 # so we won't set it ourselves, but hope that the module does a good job.
47 gmx_check_if_changed(CUDA_HOST_COMPILER_CHANGED CUDA_HOST_COMPILER)
49 # CUDA_HOST_COMPILER changed hence it is not auto-set anymore
50 if (CUDA_HOST_COMPILER_CHANGED AND CUDA_HOST_COMPILER_AUTOSET)
51 unset(CUDA_HOST_COMPILER_AUTOSET CACHE)
54 # Explicitly set the host compiler for nvcc if the current compiler is
55 # supported and it's not an MPI compiler wrapper, otherwise warn the user.
57 # Note that even though nvcc compiles host code as C++, we use the
58 # CMAKE_C_COMPILER as host compiler. We do this because CUDA versions
59 # preceding 5.0 only recognize icc, but not icpc. However, both gcc and icc
60 # (i.e. all supported compilers) happily compile C++ code.
62 # Also note that with MSVC nvcc sets the -compiler-bindir option behind the
63 # scenes; to avoid conflicts we don't set -ccbin automatically.
65 # This will be executed only with cmake <v2.8.10 as later versions set the
66 # host compiler in FindCUDA.
67 if (NOT DEFINED CUDA_HOST_COMPILER AND NOT MSVC)
68 if (NOT CMAKE_COMPILER_IS_GNUCC AND
69 NOT (CMAKE_C_COMPILER_ID MATCHES "Intel" AND UNIX AND NOT APPLE))
71 Will not set the nvcc host compiler because the current C compiler is not
73 ${CMAKE_C_COMPILER} (ID: ${CMAKE_C_COMPILER_ID})
74 Compatible compilers are: gcc on Linux and Mac OS X, the Intel Compiler on 64-bit
75 Linux and MSVC on Windows. Note that with newer CUDA releases this might change,
76 for up-to-date compatibility information check the NVIDIA documentation.
77 If nothing specified, nvcc will automatically pick the platform-default compiler;
78 Note that mixing compilers can cause errors.
79 To manually set the nvcc host compiler, edit CUDA_NVCC_FLAGS or re-configure
80 setting CUDA_HOST_COMPILER to the full path of a compatible compiler.
83 # do not use MPI compiler wrappers, as these are prone to brake nvcc
84 if (GMX_MPI AND NOT "${MPI_C_FOUND}") # FindMPI-based detection
86 Will not set the nvcc host compiler because the current C compiler is an MPI
87 compiler wrapper: ${CMAKE_C_COMPILER}
88 MPI compiler wrappers are prone to not work with nvcc. You might get lucky,
89 but the safest is to use the C compiler that the MPI compiler wrapper uses
90 (if this is compatible).
91 To manually set the nvcc host compiler, edit CUDA_NVCC_FLAGS or re-configure
92 setting CUDA_HOST_COMPILER to the full path of a compatible compiler.
95 set(CUDA_HOST_COMPILER "${CMAKE_C_COMPILER}")
96 set(CUDA_HOST_COMPILER_AUTOSET TRUE CACHE INTERNAL
97 "True if CUDA_HOST_COMPILER is automatically set")
102 # set up host compiler and its options
103 if(CUDA_HOST_COMPILER_CHANGED)
104 # FindCUDA in CMake 2.8.10 sets the host compiler internally
105 if (CMAKE_VERSION VERSION_LESS "2.8.10")
106 message(STATUS "Setting the nvcc host compiler to: ${CUDA_HOST_COMPILER}")
107 set(CUDA_HOST_COMPILER ${CUDA_HOST_COMPILER}
108 CACHE PATH "Host compiler for nvcc")
111 # On *nix force icc in gcc 4.6 compatibility mode. This is needed
112 # as even with icc used as host compiler, when icc's gcc compatibility
113 # mode is higher than the max gcc version officially supported by CUDA,
114 # nvcc will freak out.
115 set(CUDA_HOST_COMPILER_OPTIONS "")
117 ((CMAKE_C_COMPILER_ID MATCHES "Intel" AND
118 (CUDA_HOST_COMPILER_AUTOSET OR CMAKE_C_COMPILER STREQUAL CUDA_HOST_COMPILER)) OR
119 (CMAKE_CXX_COMPILER_ID MATCHES "Intel" AND CMAKE_CXX_COMPILER STREQUAL CUDA_HOST_COMPILER))
121 message(STATUS "Setting Intel Compiler compatibity mode to gcc 4.6 for nvcc host compilation")
122 list(APPEND CUDA_HOST_COMPILER_OPTIONS "-Xcompiler;-gcc-version=460")
125 if(APPLE AND CMAKE_C_COMPILER_ID MATCHES "GNU")
126 # Some versions of gcc-4.8 and gcc-4.9 produce errors (in particular on OS X)
127 # if we do not use -D__STRICT_ANSI__. It is harmless, so we might as well add it for all versions.
128 list(APPEND CUDA_HOST_COMPILER_OPTIONS "-D__STRICT_ANSI__")
131 set(CUDA_HOST_COMPILER_OPTIONS "${CUDA_HOST_COMPILER_OPTIONS}"
132 CACHE STRING "Options for nvcc host compiler (do not edit!).")
134 mark_as_advanced(CUDA_HOST_COMPILER CUDA_HOST_COMPILER_OPTIONS)
137 # If any of these manual override variables for target CUDA GPU architectures
138 # or virtual architecture is set, parse the values and assemble the nvcc
139 # command line for these. Otherwise use our defaults.
140 # Note that the manual override variables require a semicolon separated
141 # architectures codes.
142 if (GMX_CUDA_TARGET_SM OR GMX_CUDA_TARGET_COMPUTE)
143 set(GMX_CUDA_NVCC_GENCODE_FLAGS)
144 set(_target_sm_list ${GMX_CUDA_TARGET_SM})
145 foreach(_target ${_target_sm_list})
146 list(APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_${_target},code=sm_${_target}")
148 set(_target_compute_list ${GMX_CUDA_TARGET_COMPUTE})
149 foreach(_target ${_target_compute_list})
150 list(APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_${_target},code=compute_${_target}")
153 # Set the CUDA GPU architectures to compile for:
154 # - with CUDA >=5.0 <6.5: CC <=3.5 is supported
155 # => compile sm_20, sm_30, sm_35 SASS, and compute_35 PTX
156 # - with CUDA ==6.5: CC <=3.7 and 5.0 are supported
157 # => compile sm_20, sm_30, sm_35, sm_37 sm_50, SASS, and compute_50 PTX
158 # - with CUDA >=7.0 CC 5.2 is supported (5.3, Tegra X1 we don't generate code for)
159 # => compile sm_20, sm_30, sm_35, sm_37, sm_50, & sm_52 SASS, and compute_52 PTX
161 # Note that CUDA 6.5.19 second patch release supports cc 5.2 too, but
162 # CUDA_VERSION does not contain patch version and having PTX 5.0 JIT-ed is
163 # equally fast as compiling with sm_5.2 anyway.
165 # First add flags that trigger SASS (binary) code generation for physical arch
166 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_20,code=sm_20")
167 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_30,code=sm_30")
168 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_35,code=sm_35")
170 if(NOT CUDA_VERSION VERSION_LESS "6.5") # >= 6.5
171 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_37,code=sm_37")
172 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_50,code=sm_50")
174 if(NOT CUDA_VERSION VERSION_LESS "7.0") # >= 7.0
175 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_52,code=sm_52")
178 # Next add flags that trigger PTX code generation for the newest supported virtual arch
179 # that's useful to JIT to future architectures
180 if(CUDA_VERSION VERSION_LESS "6.5")
181 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_35,code=compute_35")
182 elseif(CUDA_VERSION VERSION_LESS "7.0")
183 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_50,code=compute_50")
184 else() # version >= 7.0
185 list (APPEND GMX_CUDA_NVCC_GENCODE_FLAGS "-gencode;arch=compute_52,code=compute_52")
189 gmx_dependent_cache_variable(GMX_CUDA_TARGET_SM "List of CUDA GPU architecture codes to compile for (without the sm_ prefix)" STRING "" GMX_CUDA_TARGET_SM)
190 gmx_dependent_cache_variable(GMX_CUDA_TARGET_COMPUTE "List of CUDA virtual architecture codes to compile for (without the compute_ prefix)" STRING "" GMX_CUDA_TARGET_COMPUTE)
192 # assemble the CUDA flags
193 list(APPEND GMX_CUDA_NVCC_FLAGS "${GMX_CUDA_NVCC_GENCODE_FLAGS}")
194 list(APPEND GMX_CUDA_NVCC_FLAGS "-use_fast_math")
196 # assemble the CUDA host compiler flags
197 # with CMake <2.8.10 the host compiler needs to be set on the nvcc command line
198 if (CMAKE_VERSION VERSION_LESS "2.8.10")
199 list(APPEND GMX_CUDA_NVCC_FLAGS "-ccbin=${CUDA_HOST_COMPILER}")
201 list(APPEND GMX_CUDA_NVCC_FLAGS "${CUDA_HOST_COMPILER_OPTIONS}")
203 # The flags are set as local variables which shadow the cache variables. The cache variables
204 # (can be set by the user) are appended. This is done in a macro to set the flags when all
205 # host compiler flags are already set.
206 macro(GMX_SET_CUDA_NVCC_FLAGS)
207 if(CUDA_PROPAGATE_HOST_FLAGS)
208 set(CUDA_PROPAGATE_HOST_FLAGS OFF)
210 # When CUDA 6.5 is required we should use C++11 also for CUDA and also propagate
211 # the C++11 flag to CUDA. Then we can use the solution implemented in FindCUDA
212 # (starting with 3.3 - can be backported). For now we need to remove the C++11
213 # flag which means we need to manually propagate all other flags.
214 string(REGEX REPLACE "[-]+std=c\\+\\+0x" "" _CMAKE_CXX_FLAGS_NOCXX11 "${CMAKE_CXX_FLAGS}")
216 # The IBM xlc compiler chokes if we use both altivec and Cuda. Solve
217 # this by not propagating the flag in this case.
218 if(CMAKE_CXX_COMPILER_ID MATCHES "XL")
219 string(REGEX REPLACE "-qaltivec" _CMAKE_CXX_FLAGS_NOCXX11 "$_CMAKE_CXX_FLAGS_NOCXX11")
222 string(REPLACE " " "," _flags "${_CMAKE_CXX_FLAGS_NOCXX11}")
223 set(CUDA_NVCC_FLAGS "${GMX_CUDA_NVCC_FLAGS};${CUDA_NVCC_FLAGS};-Xcompiler;${_flags}")
225 # Create list of all possible configurations. For multi-configuration this is CMAKE_CONFIGURATION_TYPES
226 # and for single configuration CMAKE_BUILD_TYPE. Not sure why to add the default ones, but FindCUDA
228 set(CUDA_configuration_types ${CMAKE_CONFIGURATION_TYPES} ${CMAKE_BUILD_TYPE} Debug MinSizeRel Release RelWithDebInfo)
229 list(REMOVE_DUPLICATES CUDA_configuration_types)
231 foreach(_config ${CUDA_configuration_types})
232 string(TOUPPER ${_config} _config_upper)
233 string(REPLACE " " "," _flags "${CMAKE_CXX_FLAGS_${_config_upper}}")
234 set(CUDA_NVCC_FLAGS_${_config_upper} "${CUDA_NVCC_FLAGS_${_config_upper}};-Xcompiler;${_flags}")
237 set(CUDA_NVCC_FLAGS "${GMX_CUDA_NVCC_FLAGS};${CUDA_NVCC_FLAGS}")