2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Define CUDA implementation of nbnxn_gpu.h
38 * \author Szilard Pall <pall.szilard@gmail.com>
47 #include "gromacs/nbnxm/nbnxm_gpu.h"
54 #include "nbnxm_cuda.h"
56 #include "gromacs/gpu_utils/cudautils.cuh"
57 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
58 #include "gromacs/gpu_utils/vectype_ops.cuh"
59 #include "gromacs/mdlib/ppforceworkload.h"
60 #include "gromacs/nbnxm/atomdata.h"
61 #include "gromacs/nbnxm/gpu_common.h"
62 #include "gromacs/nbnxm/gpu_common_utils.h"
63 #include "gromacs/nbnxm/gpu_data_mgmt.h"
64 #include "gromacs/nbnxm/grid.h"
65 #include "gromacs/nbnxm/nbnxm.h"
66 #include "gromacs/nbnxm/pairlist.h"
67 #include "gromacs/nbnxm/cuda/nbnxm_buffer_ops_kernels.cuh"
68 #include "gromacs/timing/gpu_timing.h"
69 #include "gromacs/utility/cstringutil.h"
70 #include "gromacs/utility/gmxassert.h"
72 #include "nbnxm_cuda_types.h"
74 /***** The kernel declarations/definitions come here *****/
76 /* Top-level kernel declaration generation: will generate through multiple
77 * inclusion the following flavors for all kernel declarations:
78 * - force-only output;
79 * - force and energy output;
80 * - force-only with pair list pruning;
81 * - force and energy output with pair list pruning.
83 #define FUNCTION_DECLARATION_ONLY
85 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
86 /** Force & energy **/
88 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
91 /*** Pair-list pruning kernels ***/
94 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
95 /** Force & energy **/
97 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernels.cuh"
101 /* Prune-only kernels */
102 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cuh"
103 #undef FUNCTION_DECLARATION_ONLY
105 /* Now generate the function definitions if we are using a single compilation unit. */
106 #if GMX_CUDA_NB_SINGLE_COMPILATION_UNIT
107 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_noprune.cu"
108 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_F_prune.cu"
109 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_noprune.cu"
110 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_VF_prune.cu"
111 #include "gromacs/nbnxm/cuda/nbnxm_cuda_kernel_pruneonly.cu"
112 #endif /* GMX_CUDA_NB_SINGLE_COMPILATION_UNIT */
118 //! Number of CUDA threads in a block
119 //TODO Optimize this through experimentation
120 constexpr static int c_bufOpsThreadsPerBlock = 128;
122 /*! Nonbonded kernel function pointer type */
123 typedef void (*nbnxn_cu_kfunc_ptr_t)(const cu_atomdata_t,
128 /*********************************/
130 /*! Returns the number of blocks to be used for the nonbonded GPU kernel. */
131 static inline int calc_nb_kernel_nblock(int nwork_units, const gmx_device_info_t *dinfo)
136 /* CUDA does not accept grid dimension of 0 (which can happen e.g. with an
137 empty domain) and that case should be handled before this point. */
138 assert(nwork_units > 0);
140 max_grid_x_size = dinfo->prop.maxGridSize[0];
142 /* do we exceed the grid x dimension limit? */
143 if (nwork_units > max_grid_x_size)
145 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
146 "The number of nonbonded work units (=number of super-clusters) exceeds the"
147 "maximum grid size in x dimension (%d > %d)!", nwork_units, max_grid_x_size);
154 /* Constant arrays listing all kernel function pointers and enabling selection
155 of a kernel in an elegant manner. */
157 /*! Pointers to the non-bonded kernels organized in 2-dim arrays by:
158 * electrostatics and VDW type.
160 * Note that the row- and column-order of function pointers has to match the
161 * order of corresponding enumerated electrostatics and vdw types, resp.,
162 * defined in nbnxn_cuda_types.h.
165 /*! Force-only kernel function pointers. */
166 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_noprune_ptr[eelCuNR][evdwCuNR] =
168 { nbnxn_kernel_ElecCut_VdwLJ_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_cuda },
169 { nbnxn_kernel_ElecRF_VdwLJ_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_cuda },
170 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_cuda },
171 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_cuda },
172 { nbnxn_kernel_ElecEw_VdwLJ_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_cuda },
173 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_cuda }
176 /*! Force + energy kernel function pointers. */
177 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_noprune_ptr[eelCuNR][evdwCuNR] =
179 { nbnxn_kernel_ElecCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_cuda },
180 { nbnxn_kernel_ElecRF_VdwLJ_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_cuda },
181 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_cuda },
182 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_cuda },
183 { nbnxn_kernel_ElecEw_VdwLJ_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_cuda },
184 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_cuda }
187 /*! Force + pruning kernel function pointers. */
188 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_noener_prune_ptr[eelCuNR][evdwCuNR] =
190 { nbnxn_kernel_ElecCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_cuda },
191 { nbnxn_kernel_ElecRF_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_cuda },
192 { nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_cuda },
193 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_cuda },
194 { nbnxn_kernel_ElecEw_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_cuda },
195 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_cuda }
198 /*! Force + energy + pruning kernel function pointers. */
199 static const nbnxn_cu_kfunc_ptr_t nb_kfunc_ener_prune_ptr[eelCuNR][evdwCuNR] =
201 { nbnxn_kernel_ElecCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_cuda },
202 { nbnxn_kernel_ElecRF_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_cuda },
203 { nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_cuda },
204 { nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_cuda },
205 { nbnxn_kernel_ElecEw_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_cuda },
206 { nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_cuda, nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_cuda }
209 /*! Return a pointer to the kernel version to be executed at the current step. */
210 static inline nbnxn_cu_kfunc_ptr_t select_nbnxn_kernel(int eeltype,
214 const gmx_device_info_t gmx_unused *devInfo)
216 nbnxn_cu_kfunc_ptr_t res;
218 GMX_ASSERT(eeltype < eelCuNR,
219 "The electrostatics type requested is not implemented in the CUDA kernels.");
220 GMX_ASSERT(evdwtype < evdwCuNR,
221 "The VdW type requested is not implemented in the CUDA kernels.");
223 /* assert assumptions made by the kernels */
224 GMX_ASSERT(c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit == devInfo->prop.warpSize,
225 "The CUDA kernels require the cluster_size_i*cluster_size_j/nbnxn_gpu_clusterpair_split to match the warp size of the architecture targeted.");
231 res = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
235 res = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
242 res = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
246 res = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
253 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use. */
254 static inline int calc_shmem_required_nonbonded(const int num_threads_z, const gmx_device_info_t gmx_unused *dinfo, const cu_nbparam_t *nbp)
260 /* size of shmem (force-buffers/xq/atom type preloading) */
261 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
262 /* i-atom x+q in shared memory */
263 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
264 /* cj in shared memory, for each warp separately */
265 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
267 if (nbp->vdwtype == evdwCuCUTCOMBGEOM ||
268 nbp->vdwtype == evdwCuCUTCOMBLB)
270 /* i-atom LJ combination parameters in shared memory */
271 shmem += c_numClPerSupercl * c_clSize * sizeof(float2);
275 /* i-atom types in shared memory */
276 shmem += c_numClPerSupercl * c_clSize * sizeof(int);
282 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
284 * As the point where the local stream tasks can be considered complete happens
285 * at the same call point where the nonlocal stream should be synced with the
286 * the local, this function records the event if called with the local stream as
287 * argument and inserts in the GPU stream a wait on the event on the nonlocal.
289 void nbnxnInsertNonlocalGpuDependency(const gmx_nbnxn_cuda_t *nb,
290 const InteractionLocality interactionLocality)
292 cudaStream_t stream = nb->stream[interactionLocality];
294 /* When we get here all misc operations issued in the local stream as well as
295 the local xq H2D are done,
296 so we record that in the local stream and wait for it in the nonlocal one.
297 This wait needs to precede any PP tasks, bonded or nonbonded, that may
298 compute on interactions between local and nonlocal atoms.
300 if (nb->bUseTwoStreams)
302 if (interactionLocality == InteractionLocality::Local)
304 cudaError_t stat = cudaEventRecord(nb->misc_ops_and_local_H2D_done, stream);
305 CU_RET_ERR(stat, "cudaEventRecord on misc_ops_and_local_H2D_done failed");
309 cudaError_t stat = cudaStreamWaitEvent(stream, nb->misc_ops_and_local_H2D_done, 0);
310 CU_RET_ERR(stat, "cudaStreamWaitEvent on misc_ops_and_local_H2D_done failed");
315 /*! \brief Launch asynchronously the xq buffer host to device copy. */
316 void gpu_copy_xq_to_gpu(gmx_nbnxn_cuda_t *nb,
317 const nbnxn_atomdata_t *nbatom,
318 const AtomLocality atomLocality)
320 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
322 GMX_ASSERT(atomLocality == AtomLocality::Local || atomLocality == AtomLocality::NonLocal,
323 "Only local and non-local xq transfers are supported");
325 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
327 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
329 cu_atomdata_t *adat = nb->atdat;
330 cu_plist_t *plist = nb->plist[iloc];
331 cu_timers_t *t = nb->timers;
332 cudaStream_t stream = nb->stream[iloc];
334 bool bDoTime = nb->bDoTime;
336 /* Don't launch the non-local H2D copy if there is no dependent
337 work to do: neither non-local nor other (e.g. bonded) work
338 to do that has as input the nbnxn coordaintes.
339 Doing the same for the local kernel is more complicated, since the
340 local part of the force array also depends on the non-local kernel.
341 So to avoid complicating the code and to reduce the risk of bugs,
342 we always call the local local x+q copy (and the rest of the local
343 work in nbnxn_gpu_launch_kernel().
345 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
347 plist->haveFreshList = false;
352 /* calculate the atom data index range based on locality */
353 if (atomLocality == AtomLocality::Local)
356 adat_len = adat->natoms_local;
360 adat_begin = adat->natoms_local;
361 adat_len = adat->natoms - adat->natoms_local;
365 /* beginning of timed HtoD section */
368 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
371 cu_copy_H2D_async(adat->xq + adat_begin, static_cast<const void *>(nbatom->x().data() + adat_begin * 4),
372 adat_len * sizeof(*adat->xq), stream);
376 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
379 /* When we get here all misc operations issued in the local stream as well as
380 the local xq H2D are done,
381 so we record that in the local stream and wait for it in the nonlocal one.
382 This wait needs to precede any PP tasks, bonded or nonbonded, that may
383 compute on interactions between local and nonlocal atoms.
385 nbnxnInsertNonlocalGpuDependency(nb, iloc);
388 /*! As we execute nonbonded workload in separate streams, before launching
389 the kernel we need to make sure that he following operations have completed:
390 - atomdata allocation and related H2D transfers (every nstlist step);
391 - pair list H2D transfer (every nstlist step);
392 - shift vector H2D transfer (every nstlist step);
393 - force (+shift force and energy) output clearing (every step).
395 These operations are issued in the local stream at the beginning of the step
396 and therefore always complete before the local kernel launch. The non-local
397 kernel is launched after the local on the same device/context hence it is
398 inherently scheduled after the operations in the local stream (including the
399 above "misc_ops") on pre-GK110 devices with single hardware queue, but on later
400 devices with multiple hardware queues the dependency needs to be enforced.
401 We use the misc_ops_and_local_H2D_done event to record the point where
402 the local x+q H2D (and all preceding) tasks are complete and synchronize
403 with this event in the non-local stream before launching the non-bonded kernel.
405 void gpu_launch_kernel(gmx_nbnxn_cuda_t *nb,
406 const gmx::ForceFlags &forceFlags,
407 const InteractionLocality iloc)
409 cu_atomdata_t *adat = nb->atdat;
410 cu_nbparam_t *nbp = nb->nbparam;
411 cu_plist_t *plist = nb->plist[iloc];
412 cu_timers_t *t = nb->timers;
413 cudaStream_t stream = nb->stream[iloc];
415 bool bDoTime = nb->bDoTime;
417 /* Don't launch the non-local kernel if there is no work to do.
418 Doing the same for the local kernel is more complicated, since the
419 local part of the force array also depends on the non-local kernel.
420 So to avoid complicating the code and to reduce the risk of bugs,
421 we always call the local kernel, and later (not in
422 this function) the stream wait, local f copyback and the f buffer
423 clearing. All these operations, except for the local interaction kernel,
424 are needed for the non-local interactions. The skip of the local kernel
425 call is taken care of later in this function. */
426 if (canSkipNonbondedWork(*nb, iloc))
428 plist->haveFreshList = false;
433 if (nbp->useDynamicPruning && plist->haveFreshList)
435 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
436 (TODO: ATM that's the way the timing accounting can distinguish between
437 separate prune kernel and combined force+prune, maybe we need a better way?).
439 gpu_launch_kernel_pruneonly(nb, iloc, 1);
442 if (plist->nsci == 0)
444 /* Don't launch an empty local kernel (not allowed with CUDA) */
448 /* beginning of timed nonbonded calculation section */
451 t->interaction[iloc].nb_k.openTimingRegion(stream);
454 /* Kernel launch config:
455 * - The thread block dimensions match the size of i-clusters, j-clusters,
456 * and j-cluster concurrency, in x, y, and z, respectively.
457 * - The 1D block-grid contains as many blocks as super-clusters.
459 int num_threads_z = 1;
460 if (nb->dev_info->prop.major == 3 && nb->dev_info->prop.minor == 7)
464 int nblock = calc_nb_kernel_nblock(plist->nsci, nb->dev_info);
467 KernelLaunchConfig config;
468 config.blockSize[0] = c_clSize;
469 config.blockSize[1] = c_clSize;
470 config.blockSize[2] = num_threads_z;
471 config.gridSize[0] = nblock;
472 config.sharedMemorySize = calc_shmem_required_nonbonded(num_threads_z, nb->dev_info, nbp);
473 config.stream = stream;
477 fprintf(debug, "Non-bonded GPU launch configuration:\n\tThread block: %zux%zux%zu\n\t"
478 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
480 config.blockSize[0], config.blockSize[1], config.blockSize[2],
481 config.gridSize[0], config.gridSize[1], plist->nsci*c_numClPerSupercl,
482 c_numClPerSupercl, plist->na_c,
483 config.sharedMemorySize);
486 auto *timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
487 const auto kernel = select_nbnxn_kernel(nbp->eeltype,
489 forceFlags.computeEnergy,
490 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune),
492 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &forceFlags.computeVirial);
493 launchGpuKernel(kernel, config, timingEvent, "k_calc_nb", kernelArgs);
497 t->interaction[iloc].nb_k.closeTimingRegion(stream);
500 if (GMX_NATIVE_WINDOWS)
502 /* Windows: force flushing WDDM queue */
503 cudaStreamQuery(stream);
507 /*! Calculates the amount of shared memory required by the CUDA kernel in use. */
508 static inline int calc_shmem_required_prune(const int num_threads_z)
512 /* i-atom x in shared memory */
513 shmem = c_numClPerSupercl * c_clSize * sizeof(float4);
514 /* cj in shared memory, for each warp separately */
515 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
520 void gpu_launch_kernel_pruneonly(gmx_nbnxn_cuda_t *nb,
521 const InteractionLocality iloc,
524 cu_atomdata_t *adat = nb->atdat;
525 cu_nbparam_t *nbp = nb->nbparam;
526 cu_plist_t *plist = nb->plist[iloc];
527 cu_timers_t *t = nb->timers;
528 cudaStream_t stream = nb->stream[iloc];
530 bool bDoTime = nb->bDoTime;
532 if (plist->haveFreshList)
534 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
536 /* Set rollingPruningNumParts to signal that it is not set */
537 plist->rollingPruningNumParts = 0;
538 plist->rollingPruningPart = 0;
542 if (plist->rollingPruningNumParts == 0)
544 plist->rollingPruningNumParts = numParts;
548 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
552 /* Use a local variable for part and update in plist, so we can return here
553 * without duplicating the part increment code.
555 int part = plist->rollingPruningPart;
557 plist->rollingPruningPart++;
558 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
560 plist->rollingPruningPart = 0;
563 /* Compute the number of list entries to prune in this pass */
564 int numSciInPart = (plist->nsci - part)/numParts;
566 /* Don't launch the kernel if there is no work to do (not allowed with CUDA) */
567 if (numSciInPart <= 0)
569 plist->haveFreshList = false;
574 GpuRegionTimer *timer = nullptr;
577 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
580 /* beginning of timed prune calculation section */
583 timer->openTimingRegion(stream);
586 /* Kernel launch config:
587 * - The thread block dimensions match the size of i-clusters, j-clusters,
588 * and j-cluster concurrency, in x, y, and z, respectively.
589 * - The 1D block-grid contains as many blocks as super-clusters.
591 int num_threads_z = c_cudaPruneKernelJ4Concurrency;
592 int nblock = calc_nb_kernel_nblock(numSciInPart, nb->dev_info);
593 KernelLaunchConfig config;
594 config.blockSize[0] = c_clSize;
595 config.blockSize[1] = c_clSize;
596 config.blockSize[2] = num_threads_z;
597 config.gridSize[0] = nblock;
598 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
599 config.stream = stream;
603 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tThread block: %zux%zux%zu\n\t"
604 "\tGrid: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
606 config.blockSize[0], config.blockSize[1], config.blockSize[2],
607 config.gridSize[0], config.gridSize[1], numSciInPart*c_numClPerSupercl,
608 c_numClPerSupercl, plist->na_c,
609 config.sharedMemorySize);
612 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
613 constexpr char kernelName[] = "k_pruneonly";
614 const auto kernel = plist->haveFreshList ? nbnxn_kernel_prune_cuda<true> : nbnxn_kernel_prune_cuda<false>;
615 const auto kernelArgs = prepareGpuKernelArguments(kernel, config, adat, nbp, plist, &numParts, &part);
616 launchGpuKernel(kernel, config, timingEvent, kernelName, kernelArgs);
618 /* TODO: consider a more elegant way to track which kernel has been called
619 (combined or separate 1st pass prune, rolling prune). */
620 if (plist->haveFreshList)
622 plist->haveFreshList = false;
623 /* Mark that pruning has been done */
624 nb->timers->interaction[iloc].didPrune = true;
628 /* Mark that rolling pruning has been done */
629 nb->timers->interaction[iloc].didRollingPrune = true;
634 timer->closeTimingRegion(stream);
637 if (GMX_NATIVE_WINDOWS)
639 /* Windows: force flushing WDDM queue */
640 cudaStreamQuery(stream);
644 void gpu_launch_cpyback(gmx_nbnxn_cuda_t *nb,
645 nbnxn_atomdata_t *nbatom,
646 const gmx::ForceFlags &forceFlags,
647 const AtomLocality atomLocality,
648 const bool copyBackNbForce)
650 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
653 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
655 /* determine interaction locality from atom locality */
656 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
658 /* extract the data */
659 cu_atomdata_t *adat = nb->atdat;
660 cu_timers_t *t = nb->timers;
661 bool bDoTime = nb->bDoTime;
662 cudaStream_t stream = nb->stream[iloc];
664 /* don't launch non-local copy-back if there was no non-local work to do */
665 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
670 getGpuAtomRange(adat, atomLocality, &adat_begin, &adat_len);
672 /* beginning of timed D2H section */
675 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
678 /* With DD the local D2H transfer can only start after the non-local
679 kernel has finished. */
680 if (iloc == InteractionLocality::Local && nb->bUseTwoStreams)
682 stat = cudaStreamWaitEvent(stream, nb->nonlocal_done, 0);
683 CU_RET_ERR(stat, "cudaStreamWaitEvent on nonlocal_done failed");
689 cu_copy_D2H_async(nbatom->out[0].f.data() + adat_begin * 3, adat->f + adat_begin,
690 (adat_len)*sizeof(*adat->f), stream);
693 /* After the non-local D2H is launched the nonlocal_done event can be
694 recorded which signals that the local D2H can proceed. This event is not
695 placed after the non-local kernel because we want the non-local data
697 if (iloc == InteractionLocality::NonLocal)
699 stat = cudaEventRecord(nb->nonlocal_done, stream);
700 CU_RET_ERR(stat, "cudaEventRecord on nonlocal_done failed");
703 /* only transfer energies in the local stream */
704 if (iloc == InteractionLocality::Local)
706 /* DtoH fshift when virial is needed */
707 if (forceFlags.computeVirial)
709 cu_copy_D2H_async(nb->nbst.fshift, adat->fshift,
710 SHIFTS * sizeof(*nb->nbst.fshift), stream);
714 if (forceFlags.computeEnergy)
716 cu_copy_D2H_async(nb->nbst.e_lj, adat->e_lj,
717 sizeof(*nb->nbst.e_lj), stream);
718 cu_copy_D2H_async(nb->nbst.e_el, adat->e_el,
719 sizeof(*nb->nbst.e_el), stream);
725 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
729 void cuda_set_cacheconfig()
733 for (int i = 0; i < eelCuNR; i++)
735 for (int j = 0; j < evdwCuNR; j++)
737 /* Default kernel 32/32 kB Shared/L1 */
738 cudaFuncSetCacheConfig(nb_kfunc_ener_prune_ptr[i][j], cudaFuncCachePreferEqual);
739 cudaFuncSetCacheConfig(nb_kfunc_ener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
740 cudaFuncSetCacheConfig(nb_kfunc_noener_prune_ptr[i][j], cudaFuncCachePreferEqual);
741 stat = cudaFuncSetCacheConfig(nb_kfunc_noener_noprune_ptr[i][j], cudaFuncCachePreferEqual);
742 CU_RET_ERR(stat, "cudaFuncSetCacheConfig failed");
747 /* X buffer operations on GPU: copies coordinates to the GPU in rvec format. */
748 void nbnxn_gpu_copy_x_to_gpu(const Nbnxm::Grid &grid,
750 const Nbnxm::AtomLocality locality,
751 const rvec *coordinatesHost)
753 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
754 GMX_ASSERT(coordinatesHost, "Need a valid host pointer");
756 bool bDoTime = nb->bDoTime;
758 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
759 int nCopyAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
760 int copyAtomStart = grid.srcAtomBegin();
762 cudaStream_t stream = nb->stream[interactionLoc];
764 // empty domain avoid launching zero-byte copy
767 if (interactionLoc == Nbnxm::InteractionLocality::Local)
769 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
776 nb->timers->xf[locality].nb_h2d.openTimingRegion(stream);
779 rvec *devicePtrDest = reinterpret_cast<rvec *> (nb->xrvec[copyAtomStart]);
780 const rvec *devicePtrSrc = reinterpret_cast<const rvec *> (coordinatesHost[copyAtomStart]);
781 copyToDeviceBuffer(&devicePtrDest, devicePtrSrc, 0, nCopyAtoms,
782 stream, GpuApiCallBehavior::Async, nullptr);
786 nb->timers->xf[locality].nb_h2d.closeTimingRegion(stream);
790 DeviceBuffer<float> nbnxn_gpu_get_x_gpu(gmx_nbnxn_gpu_t *nb)
792 return reinterpret_cast< DeviceBuffer<float> >(nb->xrvec);
795 /* X buffer operations on GPU: performs conversion from rvec to nb format. */
796 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid &grid,
797 bool setFillerCoords,
799 DeviceBuffer<float> coordinatesDevice,
800 const Nbnxm::AtomLocality locality,
804 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
806 cu_atomdata_t *adat = nb->atdat;
808 const int numColumns = grid.numColumns();
809 const int cellOffset = grid.cellOffset();
810 const int numAtomsPerCell = grid.numAtomsPerCell();
811 Nbnxm::InteractionLocality interactionLoc = gpuAtomToInteractionLocality(locality);
813 cudaStream_t stream = nb->stream[interactionLoc];
815 // TODO: This will only work with CUDA
816 GMX_ASSERT(coordinatesDevice, "Need a valid device pointer");
818 int numAtoms = grid.srcAtomEnd() - grid.srcAtomBegin();
819 // avoid empty kernel launch, skip to inserting stream dependency
822 KernelLaunchConfig config;
823 config.blockSize[0] = c_bufOpsThreadsPerBlock;
824 config.blockSize[1] = 1;
825 config.blockSize[2] = 1;
826 config.gridSize[0] = (grid.numCellsColumnMax()*numAtomsPerCell + c_bufOpsThreadsPerBlock - 1)/c_bufOpsThreadsPerBlock;
827 config.gridSize[1] = numColumns;
828 config.gridSize[2] = 1;
829 GMX_ASSERT(config.gridSize[0] > 0, "Can not have empty grid, early return above avoids this");
830 config.sharedMemorySize = 0;
831 config.stream = stream;
833 auto kernelFn = nbnxn_gpu_x_to_nbat_x_kernel;
834 float *xqPtr = &(adat->xq->x);
835 const int *d_atomIndices = nb->atomIndices;
836 const int *d_cxy_na = &nb->cxy_na[numColumnsMax*gridId];
837 const int *d_cxy_ind = &nb->cxy_ind[numColumnsMax*gridId];
838 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config,
848 launchGpuKernel(kernelFn, config, nullptr, "XbufferOps", kernelArgs);
851 nbnxnInsertNonlocalGpuDependency(nb, interactionLoc);
854 /* F buffer operations on GPU: performs force summations and conversion from nb to rvec format. */
855 void nbnxn_gpu_add_nbat_f_to_f(const AtomLocality atomLocality,
856 DeviceBuffer<float> totalForcesDevice,
858 void *pmeForcesDevice,
859 GpuEventSynchronizer *pmeForcesReady,
862 bool useGpuFPmeReduction,
863 bool accumulateForce)
865 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
867 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
868 cudaStream_t stream = nb->stream[iLocality];
869 cu_atomdata_t *adat = nb->atdat;
871 if (useGpuFPmeReduction)
873 //Stream must wait for PME force completion
874 pmeForcesReady->enqueueWaitEvent(stream);
879 KernelLaunchConfig config;
880 config.blockSize[0] = c_bufOpsThreadsPerBlock;
881 config.blockSize[1] = 1;
882 config.blockSize[2] = 1;
883 config.gridSize[0] = ((numAtoms+1)+c_bufOpsThreadsPerBlock-1)/c_bufOpsThreadsPerBlock;
884 config.gridSize[1] = 1;
885 config.gridSize[2] = 1;
886 config.sharedMemorySize = 0;
887 config.stream = stream;
889 auto kernelFn = accumulateForce ?
890 nbnxn_gpu_add_nbat_f_to_f_kernel<true, false> :
891 nbnxn_gpu_add_nbat_f_to_f_kernel<false, false>;
893 if (useGpuFPmeReduction)
895 kernelFn = accumulateForce ?
896 nbnxn_gpu_add_nbat_f_to_f_kernel<true, true> :
897 nbnxn_gpu_add_nbat_f_to_f_kernel<false, true>;
900 const float3 *d_fNB = adat->f;
901 const float3 *d_fPme = (float3*) pmeForcesDevice;
902 float3 *d_fTotal = (float3*) totalForcesDevice;
903 const int *d_cell = nb->cell;
905 const auto kernelArgs = prepareGpuKernelArguments(kernelFn, config,
913 launchGpuKernel(kernelFn, config, nullptr, "FbufferOps", kernelArgs);
917 DeviceBuffer<float> nbnxn_gpu_get_f_gpu(gmx_nbnxn_gpu_t *nb)
919 return reinterpret_cast< DeviceBuffer<float> >(nb->frvec);
922 void nbnxn_launch_copy_f_to_gpu(const AtomLocality atomLocality,
923 const Nbnxm::GridSet &gridSet,
927 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
928 GMX_ASSERT(f, "Need a valid f pointer");
930 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
931 cudaStream_t stream = nb->stream[iLocality];
933 bool bDoTime = nb->bDoTime;
934 cu_timers_t *t = nb->timers;
936 int atomStart = 0, nAtoms = 0;
938 nbnxn_get_atom_range(atomLocality, gridSet, &atomStart, &nAtoms);
942 t->xf[atomLocality].nb_h2d.openTimingRegion(stream);
945 rvec *ptrDest = reinterpret_cast<rvec *> (nb->frvec[atomStart]);
946 rvec *ptrSrc = reinterpret_cast<rvec *> (f[atomStart]);
947 //copyToDeviceBuffer(&ptrDest, ptrSrc, 0, nAtoms,
948 // stream, GpuApiCallBehavior::Async, nullptr);
949 //TODO use above API call rather than direct memcpy when force has been implemented in a hostvector
950 cudaMemcpyAsync(ptrDest, ptrSrc, nAtoms*sizeof(rvec), cudaMemcpyHostToDevice,
955 t->xf[atomLocality].nb_h2d.closeTimingRegion(stream);
961 void nbnxn_launch_copy_f_from_gpu(const AtomLocality atomLocality,
962 const Nbnxm::GridSet &gridSet,
966 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
967 GMX_ASSERT(f, "Need a valid f pointer");
969 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
970 cudaStream_t stream = nb->stream[iLocality];
972 bool bDoTime = nb->bDoTime;
973 cu_timers_t *t = nb->timers;
974 int atomStart, nAtoms;
976 nbnxn_get_atom_range(atomLocality, gridSet, &atomStart, &nAtoms);
980 t->xf[atomLocality].nb_d2h.openTimingRegion(stream);
983 GMX_ASSERT(nb->frvec, "Need a valid nb->frvec pointer");
984 rvec *ptrDest = reinterpret_cast<rvec *> (f[atomStart]);
985 rvec *ptrSrc = reinterpret_cast<rvec *> (nb->frvec[atomStart]);
986 //copyFromDeviceBuffer(ptrDest, &ptrSrc, 0, nAtoms,
987 // stream, GpuApiCallBehavior::Async, nullptr);
988 //TODO use above API call rather than direct memcpy when force has been implemented in a hostvector
989 cudaMemcpyAsync(ptrDest, ptrSrc, nAtoms*sizeof(rvec), cudaMemcpyDeviceToHost,
994 t->xf[atomLocality].nb_d2h.closeTimingRegion(stream);
1000 void nbnxn_wait_for_gpu_force_reduction(const AtomLocality gmx_unused atomLocality,
1001 gmx_nbnxn_gpu_t *nb)
1003 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
1005 const InteractionLocality iLocality = gpuAtomToInteractionLocality(atomLocality);
1007 cudaStream_t stream = nb->stream[iLocality];
1009 cudaStreamSynchronize(stream);
1013 } // namespace Nbnxm