2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/mdlib/nbnxn_gpu.h"
54 #include "nbnxn_cuda.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/mdlib/force_flags.h"
58 #include "gromacs/mdlib/nb_verlet.h"
59 #include "gromacs/mdlib/nbnxn_gpu_common.h"
60 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
61 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
62 #include "gromacs/mdlib/nbnxn_pairlist.h"
63 #include "gromacs/timing/gpu_timing.h"
64 #include "gromacs/utility/cstringutil.h"
65 #include "gromacs/utility/gmxassert.h"
67 #include "nbnxn_cuda_types.h"
70 /***** The kernel declarations/definitions come here *****/
72 /* Top-level kernel declaration generation: will generate through multiple
73 * inclusion the following flavors for all kernel declarations:
74 * - force-only output;
75 * - force and energy output;
76 * - force-only with pair list pruning;
77 * - force and energy output with pair list pruning.
79 #define FUNCTION_DECLARATION_ONLY
81 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
82 /** Force & energy **/
84 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
87 /*** Pair-list pruning kernels ***/
90 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
91 /** Force & energy **/
93 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernels.cuh"
97 /* Prune-only kernels */
98 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_pruneonly.cuh"
99 #undef FUNCTION_DECLARATION_ONLY
101 /* Now generate the function definitions if we are using a single compilation unit. */
102 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
103 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_noprune.cu"
104 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_F_prune.cu"
105 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_noprune.cu"
106 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_VF_prune.cu"
107 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_kernel_pruneonly.cu"
108 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
111 /*! Nonbonded kernel function pointer type */
112 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
117 /*********************************/
119 /* XXX switch between chevron and cudaLaunch (supported only in CUDA >=7.0)
120 -- only for benchmarking purposes */
121 static const bool bUseCudaLaunchKernel =
122 (GMX_CUDA_VERSION >= 7000) && (getenv("GMX_DISABLE_CUDALAUNCH") == NULL);
124 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
125 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
130 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
131 empty domain) and that case should be handled before this point. */
132 assert(nwork_units > 0);
134 max_grid_x_size = dinfo->prop.maxGridSize[0];
136 /* do we exceed the grid x dimension limit? */
137 if (nwork_units > max_grid_x_size)
139 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
140 "The number of nonbonded work units (=number of super-clusters) exceeds the"
141 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
148 /* Constant arrays listing all kernel function pointers and enabling selection
149 of a kernel in an elegant manner. */
151 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
152 * electrostatics and VDW type.
154 * Note that the row- and column-order of function pointers has to match the
155 * order of corresponding enumerated electrostatics and vdw types, resp.,
156 * defined in nbnxn_cuda_types.h.
159 /*! Force-only kernel function pointers. */
160 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
162 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
163 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
164 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
165 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
166 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
167 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
170 /*! Force + energy kernel function pointers. */
171 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
173 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
174 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
175 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
176 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
177 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
178 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
181 /*! Force + pruning kernel function pointers. */
182 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
184 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
185 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
186 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
187 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
188 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
189 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
192 /*! Force + energy + pruning kernel function pointers. */
193 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
195 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
196 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
197 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
198 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
199 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
200 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
203 /*! Return a pointer to the kernel version to be executed at the current step. */
204 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
208 const gmx_device_info_t gmx_unused *devInfo)
210 nbnxn_cu_kfunc_ptr_t res;
212 GMX_ASSERT(eeltype < eelCuNR,
213 "The electrostatics type requested is not implemented in the CUDA kernels.");
214 GMX_ASSERT(evdwtype < evdwCuNR,
215 "The VdW type requested is not implemented in the CUDA kernels.");
217 /* assert assumptions made by the kernels */
218 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
219 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
225 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
229 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
236 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
240 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
247 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
248 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
254 /* size of shmem (force-buffers/xq/atom type preloading) */
255 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
256 /* i-atom x+q in shared memory */
257 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
258 /* cj in shared memory, for each warp separately */
259 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
260 if (dinfo->prop.major >= 3)
262 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
263 nbp->vdwtype == evdwCuCUTCOMBLB)
265 /* i-atom LJ combination parameters in shared memory */
266 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
270 /* i-atom types in shared memory */
271 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
274 if (dinfo->prop.major < 3)
276 /* force reduction buffers in shared memory */
277 shmem += c_clSize * c_clSize * 3 * sizeof(float);
282 /*! As we execute nonbonded workload in separate streams, before launching
283 the kernel we need to make sure that he following operations have completed:
284 - atomdata allocation and related H2D transfers (every nstlist step);
285 - pair list H2D transfer (every nstlist step);
286 - shift vector H2D transfer (every nstlist step);
287 - force (+shift force and energy) output clearing (every step).
289 These operations are issued in the local stream at the beginning of the step
290 and therefore always complete before the local kernel launch. The non-local
291 kernel is launched after the local on the same device/context hence it is
292 inherently scheduled after the operations in the local stream (including the
293 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
294 devices with multiple hardware queues the dependency needs to be enforced.
295 We use the misc_ops_and_local_H2D_done event to record the point where
296 the local x+q H2D (and all preceding) tasks are complete and synchronize
297 with this event in the non-local stream before launching the non-bonded kernel.
299 void nbnxn_gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
300 const nbnxn_atomdata_t *nbatom,
305 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
306 /* CUDA kernel launch-related stuff */
308 dim3 dim_block, dim_grid;
309 nbnxn_cu_kfunc_ptr_t nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
311 cu_atomdata_t *adat = nb->atdat;
312 cu_nbparam_t *nbp = nb->nbparam;
313 cu_plist_t *plist = nb->plist[iloc];
314 cu_timers_t *t = nb->timers;
315 cudaStream_t stream = nb->stream[iloc];
317 bool bCalcEner = flags & GMX_FORCE_ENERGY;
318 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
319 bool bDoTime = nb->bDoTime;
321 /* Don't launch the non-local kernel if there is no work to do.
322 Doing the same for the local kernel is more complicated, since the
323 local part of the force array also depends on the non-local kernel.
324 So to avoid complicating the code and to reduce the risk of bugs,
325 we always call the local kernel, the local x+q copy and later (not in
326 this function) the stream wait, local f copyback and the f buffer
327 clearing. All these operations, except for the local interaction kernel,
328 are needed for the non-local interactions. The skip of the local kernel
329 call is taken care of later in this function. */
330 if (canSkipWork(nb, iloc))
332 plist->haveFreshList = false;
337 /* calculate the atom data index range based on locality */
341 adat_len = adat->natoms_local;
345 adat_begin = adat->natoms_local;
346 adat_len = adat->natoms - adat->natoms_local;
349 /* beginning of timed HtoD section */
352 t->nb_h2d[iloc].openTimingRegion(stream);
356 cu_copy_H2D_async(adat->xq + adat_begin, nbatom->x + adat_begin * 4,
357 adat_len * sizeof(*adat->xq), stream);
361 t->nb_h2d[iloc].closeTimingRegion(stream);
364 /* When we get here all misc operations issues in the local stream as well as
365 the local xq H2D are done,
366 so we record that in the local stream and wait for it in the nonlocal one. */
367 if (nb->bUseTwoStreams)
369 if (iloc == eintLocal)
371 stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
372 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
376 stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
377 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
381 if (nbp->useDynamicPruning && plist->haveFreshList)
383 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
384 (TODO: ATM that's the way the timing accounting can distinguish between
385 separate prune kernel and combined force+prune, maybe we need a better way?).
387 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
390 if (plist->nsci == 0)
392 /* Don't launch an empty local kernel (not allowed with CUDA) */
396 /* beginning of timed nonbonded calculation section */
399 t->nb_k[iloc].openTimingRegion(stream);
402 /* get the pointer to the kernel flavor we need to use */
403 nb_kernel = select_nbnxn_kernel(nbp->eeltype,
406 (plist->haveFreshList && !nb->timers->didPrune[iloc]),
409 /* Kernel launch config:
410 * - The thread block dimensions match the size of i-clusters, j-clusters,
411 * and j-cluster concurrency, in x, y, and z, respectively.
412 * - The 1D block-grid contains as many blocks as super-clusters.
414 int num_threads_z = 1;
415 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
419 nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
420 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
421 dim_grid = dim3(nblock, 1, 1);
422 shmem = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
426 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %ux%ux%u\n\t"
427 "\tGrid: %ux%u\n\t#Super-clusters/clusters: %d/%d (%d)\n"
429 dim_block.x, dim_block.y, dim_block.z,
430 dim_grid.x, dim_grid.y, plist->nsci*c_numClPerSupercl,
431 c_numClPerSupercl, plist->na_c,
435 if (bUseCudaLaunchKernel)
437 gmx_unused void* kernel_args[4];
438 kernel_args[0] = adat;
439 kernel_args[1] = nbp;
440 kernel_args[2] = plist;
441 kernel_args[3] = &bCalcFshift;
443 #if GMX_CUDA_VERSION >= 7000
444 cudaLaunchKernel((void *)nb_kernel, dim_grid, dim_block, kernel_args, shmem, stream);
449 nb_kernel<<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, bCalcFshift);
451 CU_LAUNCH_ERR("k_calc_nb");
455 t->nb_k[iloc].closeTimingRegion(stream);
458 #if (defined(WIN32) || defined( _WIN32 ))
459 /* Windows: force flushing WDDM queue */
460 stat = cudaStreamQuery(stream);
464 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
465 static inline int calc_shmem_required_prune(const int num_threads_z)
469 /* i-atom x in shared memory */
470 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
471 /* cj in shared memory, for each warp separately */
472 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
477 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
481 cu_atomdata_t *adat = nb->atdat;
482 cu_nbparam_t *nbp = nb->nbparam;
483 cu_plist_t *plist = nb->plist[iloc];
484 cu_timers_t *t = nb->timers;
485 cudaStream_t stream = nb->stream[iloc];
487 bool bDoTime = nb->bDoTime;
489 if (plist->haveFreshList)
491 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
493 /* Set rollingPruningNumParts to signal that it is not set */
494 plist->rollingPruningNumParts = 0;
495 plist->rollingPruningPart = 0;
499 if (plist->rollingPruningNumParts == 0)
501 plist->rollingPruningNumParts = numParts;
505 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
509 /* Use a local variable for part and update in plist, so we can return here
510 * without duplicating the part increment code.
512 int part = plist->rollingPruningPart;
514 plist->rollingPruningPart++;
515 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
517 plist->rollingPruningPart = 0;
520 /* Compute the number of list entries to prune in this pass */
521 int numSciInPart = (plist->nsci - part)/numParts;
523 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
524 if (numSciInPart <= 0)
526 plist->haveFreshList = false;
531 GpuRegionTimer *timer = nullptr;
534 timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
537 /* beginning of timed prune calculation section */
540 timer->openTimingRegion(stream);
543 /* Kernel launch config:
544 * - The thread block dimensions match the size of i-clusters, j-clusters,
545 * and j-cluster concurrency, in x, y, and z, respectively.
546 * - The 1D block-grid contains as many blocks as super-clusters.
548 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
549 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
550 dim3 dim_block = dim3(c_clSize, c_clSize, num_threads_z);
551 dim3 dim_grid = dim3(nblock, 1, 1);
552 int shmem = calc_shmem_required_prune(num_threads_z);
556 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %ux%ux%u\n\t"
557 "\tGrid: %ux%u\n\t#Super-clusters/clusters: %d/%d (%d)\n"
559 dim_block.x, dim_block.y, dim_block.z,
560 dim_grid.x, dim_grid.y, numSciInPart*c_numClPerSupercl,
561 c_numClPerSupercl, plist->na_c,
565 if (bUseCudaLaunchKernel)
567 gmx_unused void* kernel_args[5];
568 kernel_args[0] = adat;
569 kernel_args[1] = nbp;
570 kernel_args[2] = plist;
571 kernel_args[3] = &numParts;
572 kernel_args[4] = ∂
574 #if GMX_CUDA_VERSION >= 7000
575 if (plist->haveFreshList)
577 cudaLaunchKernel((void *)nbnxn_kernel_prune_cuda<true>, dim_grid, dim_block, kernel_args, shmem, stream);
581 cudaLaunchKernel((void *)nbnxn_kernel_prune_cuda<false>, dim_grid, dim_block, kernel_args, shmem, stream);
587 if (plist->haveFreshList)
589 nbnxn_kernel_prune_cuda<true><<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, numParts, part);
593 nbnxn_kernel_prune_cuda<false><<< dim_grid, dim_block, shmem, stream>>> (*adat, *nbp, *plist, numParts, part);
596 CU_LAUNCH_ERR("k_pruneonly");
598 /* TODO: consider a more elegant way to track which kernel has been called
599 (combined or separate 1st pass prune, rolling prune). */
600 if (plist->haveFreshList)
602 plist->haveFreshList = false;
603 /* Mark that pruning has been done */
604 nb->timers->didPrune[iloc] = true;
608 /* Mark that rolling pruning has been done */
609 nb->timers->didRollingPrune[iloc] = true;
614 timer->closeTimingRegion(stream);
617 #if (defined(WIN32) || defined( _WIN32 ))
618 /* Windows: force flushing WDDM queue */
619 stat = cudaStreamQuery(stream);
623 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
624 const nbnxn_atomdata_t *nbatom,
629 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
631 /* determine interaction locality from atom locality */
632 int iloc = gpuAtomToInteractionLocality(aloc);
634 cu_atomdata_t *adat = nb->atdat;
635 cu_timers_t *t = nb->timers;
636 bool bDoTime = nb->bDoTime;
637 cudaStream_t stream = nb->stream[iloc];
639 bool bCalcEner = flags & GMX_FORCE_ENERGY;
640 bool bCalcFshift = flags & GMX_FORCE_VIRIAL;
642 /* don't launch non-local copy-back if there was no non-local work to do */
643 if (canSkipWork(nb, iloc))
648 getGpuAtomRange(adat, aloc, adat_begin, adat_len);
650 /* beginning of timed D2H section */
653 t->nb_d2h[iloc].openTimingRegion(stream);
656 /* With DD the local D2H transfer can only start after the non-local
657 kernel has finished. */
658 if (iloc == eintLocal && nb->bUseTwoStreams)
660 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
661 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
665 cu_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f + adat_begin,
666 (adat_len)*sizeof(*adat->f), stream);
668 /* After the non-local D2H is launched the nonlocal_done event can be
669 recorded which signals that the local D2H can proceed. This event is not
670 placed after the non-local kernel because we want the non-local data
672 if (iloc == eintNonlocal)
674 stat = cudaEventRecord(nb->nonlocal_done, stream);
675 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
678 /* only transfer energies in the local stream */
684 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
685 SHIFTS * sizeof(*nb->nbst.fshift), stream);
691 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
692 sizeof(*nb->nbst.e_lj), stream);
693 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
694 sizeof(*nb->nbst.e_el), stream);
700 t->nb_d2h[iloc].closeTimingRegion(stream);
704 void nbnxn_cuda_set_cacheconfig(const gmx_device_info_t *devinfo)
708 for (int i = 0; i < eelCuNR; i++)
710 for (int j = 0; j < evdwCuNR; j++)
712 if (devinfo->prop.major >= 3)
714 /* Default kernel on sm 3.x and later 32/32 kB Shared/L1 */
715 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
716 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
717 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
718 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
722 /* On Fermi prefer L1 gives 2% higher performance */
723 /* Default kernel on sm_2.x 16/48 kB Shared/L1 */
724 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferL1);
725 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferL1);
726 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferL1);
727 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferL1);
729 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");