Make cl_nbparam into a struct
[gromacs.git] / src / gromacs / nbnxm / opencl / nbnxm_ocl.cpp
blobcd929a4dbd628c1740662f1db145e6b0c09c4dca
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \internal \file
37 * \brief Define OpenCL implementation of nbnxm_gpu.h
39 * \author Anca Hamuraru <anca@streamcomputing.eu>
40 * \author Teemu Virolainen <teemu@streamcomputing.eu>
41 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
45 * TODO (psz):
46 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
47 * - Rework the copying of OCL data structures done before every invocation of both
48 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
49 * time calling clSetKernelArg only on the updated parameters (if tracking changed
50 * parameters is feasible);
51 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
52 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
53 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
55 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
56 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
57 * is identical in the two implementations, so a 3-way split might allow sharing
58 * code;
59 * -
62 #include "gmxpre.h"
64 #include <assert.h>
65 #include <stdlib.h>
67 #if defined(_MSVC)
68 # include <limits>
69 #endif
71 #include "thread_mpi/atomic.h"
73 #include "gromacs/gpu_utils/device_context.h"
74 #include "gromacs/gpu_utils/gputraits_ocl.h"
75 #include "gromacs/gpu_utils/oclutils.h"
76 #include "gromacs/hardware/hw_info.h"
77 #include "gromacs/mdtypes/simulation_workload.h"
78 #include "gromacs/nbnxm/atomdata.h"
79 #include "gromacs/nbnxm/gpu_common.h"
80 #include "gromacs/nbnxm/gpu_common_utils.h"
81 #include "gromacs/nbnxm/gpu_data_mgmt.h"
82 #include "gromacs/nbnxm/nbnxm.h"
83 #include "gromacs/nbnxm/nbnxm_gpu.h"
84 #include "gromacs/nbnxm/pairlist.h"
85 #include "gromacs/pbcutil/ishift.h"
86 #include "gromacs/timing/gpu_timing.h"
87 #include "gromacs/utility/cstringutil.h"
88 #include "gromacs/utility/fatalerror.h"
89 #include "gromacs/utility/gmxassert.h"
91 #include "nbnxm_ocl_types.h"
93 namespace Nbnxm
96 /*! \brief Convenience constants */
97 //@{
98 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
99 //@}
102 /*! \brief Validates the input global work size parameter.
104 static inline void validate_global_work_size(const KernelLaunchConfig& config,
105 int work_dim,
106 const DeviceInformation* dinfo)
108 cl_uint device_size_t_size_bits;
109 cl_uint host_size_t_size_bits;
111 GMX_ASSERT(dinfo, "Need a valid device info object");
113 size_t global_work_size[3];
114 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
115 for (int i = 0; i < work_dim; i++)
117 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
120 /* Each component of a global_work_size must not exceed the range given by the
121 sizeof(device size_t) for the device on which the kernel execution will
122 be enqueued. See:
123 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
125 device_size_t_size_bits = dinfo->adress_bits;
126 host_size_t_size_bits = static_cast<cl_uint>(sizeof(size_t) * 8);
128 /* If sizeof(host size_t) <= sizeof(device size_t)
129 => global_work_size components will always be valid
130 else
131 => get device limit for global work size and
132 compare it against each component of global_work_size.
134 if (host_size_t_size_bits > device_size_t_size_bits)
136 size_t device_limit;
138 device_limit = (1ULL << device_size_t_size_bits) - 1;
140 for (int i = 0; i < work_dim; i++)
142 if (global_work_size[i] > device_limit)
144 gmx_fatal(
145 FARGS,
146 "Watch out, the input system is too large to simulate!\n"
147 "The number of nonbonded work units (=number of super-clusters) exceeds the"
148 "device capabilities. Global work size limit exceeded (%zu > %zu)!",
149 global_work_size[i], device_limit);
155 /* Constant arrays listing non-bonded kernel function names. The arrays are
156 * organized in 2-dim arrays by: electrostatics and VDW type.
158 * Note that the row- and column-order of function pointers has to match the
159 * order of corresponding enumerated electrostatics and vdw types, resp.,
160 * defined in nbnxm_ocl_types.h.
163 /*! \brief Force-only kernel function names. */
164 static const char* nb_kfunc_noener_noprune_ptr[eelTypeNR][evdwTypeNR] = {
165 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl",
166 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl",
167 "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl",
168 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
169 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl",
170 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl",
171 "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl",
172 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
173 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl",
174 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl",
175 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl",
176 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl",
177 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
178 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl",
179 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl",
180 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl",
181 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl",
182 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl",
183 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl",
184 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
185 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl",
186 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl",
187 "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl",
188 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
189 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl",
190 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl",
191 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl",
192 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl",
193 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl",
194 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
197 /*! \brief Force + energy kernel function pointers. */
198 static const char* nb_kfunc_ener_noprune_ptr[eelTypeNR][evdwTypeNR] = {
199 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl",
200 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl",
201 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl",
202 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
203 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl",
204 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl",
205 "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl",
206 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
207 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl",
208 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl",
209 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl",
210 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl",
211 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
212 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl",
213 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl",
214 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl",
215 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl",
216 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl",
217 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl",
218 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
219 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl",
220 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl",
221 "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl",
222 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
223 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl",
224 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl",
225 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl",
226 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl",
227 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl",
228 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl",
229 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
232 /*! \brief Force + pruning kernel function pointers. */
233 static const char* nb_kfunc_noener_prune_ptr[eelTypeNR][evdwTypeNR] = {
234 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl",
235 "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl",
236 "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl",
237 "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl",
238 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl",
239 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
240 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl",
241 "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl",
242 "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl",
243 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl",
244 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
245 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl",
246 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl",
247 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl",
248 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl",
249 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl",
250 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl",
251 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
252 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl",
253 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl",
254 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl",
255 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl",
256 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl",
257 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl",
258 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
259 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl",
260 "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl",
261 "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl",
262 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl",
263 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
264 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl",
265 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl",
266 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl",
267 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl",
268 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl",
269 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl",
270 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
273 /*! \brief Force + energy + pruning kernel function pointers. */
274 static const char* nb_kfunc_ener_prune_ptr[eelTypeNR][evdwTypeNR] = {
275 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl",
276 "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl",
277 "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl",
278 "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl",
279 "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl",
280 "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl",
281 "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
282 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl",
283 "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl",
284 "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl",
285 "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl",
286 "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl",
287 "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
288 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl",
289 "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl",
290 "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl",
291 "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl",
292 "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl",
293 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl",
294 "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
295 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl",
296 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl",
297 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl",
298 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl",
299 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl",
300 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
301 "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
302 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl",
303 "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl",
304 "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl",
305 "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl",
306 "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl",
307 "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
308 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl",
309 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl",
310 "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl",
311 "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl",
312 "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl",
313 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl",
314 "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
317 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
319 * \param[in] kernel_pruneonly array of prune kernel objects
320 * \param[in] firstPrunePass true if the first pruning pass is being executed
322 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[], bool firstPrunePass)
324 cl_kernel* kernelPtr;
326 if (firstPrunePass)
328 kernelPtr = &(kernel_pruneonly[epruneFirst]);
330 else
332 kernelPtr = &(kernel_pruneonly[epruneRolling]);
334 // TODO: consider creating the prune kernel object here to avoid a
335 // clCreateKernel for the rolling prune kernel if this is not needed.
336 return *kernelPtr;
339 /*! \brief Return a pointer to the kernel version to be executed at the current step.
340 * OpenCL kernel objects are cached in nb. If the requested kernel is not
341 * found in the cache, it will be created and the cache will be updated.
343 static inline cl_kernel select_nbnxn_kernel(NbnxmGpu* nb, int eeltype, int evdwtype, bool bDoEne, bool bDoPrune)
345 const char* kernel_name_to_run;
346 cl_kernel* kernel_ptr;
347 cl_int cl_error;
349 GMX_ASSERT(eeltype < eelTypeNR,
350 "The electrostatics type requested is not implemented in the OpenCL kernels.");
351 GMX_ASSERT(evdwtype < evdwTypeNR,
352 "The VdW type requested is not implemented in the OpenCL kernels.");
354 if (bDoEne)
356 if (bDoPrune)
358 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
359 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
361 else
363 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
364 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
367 else
369 if (bDoPrune)
371 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
372 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
374 else
376 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
377 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
381 if (nullptr == kernel_ptr[0])
383 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
384 GMX_ASSERT(cl_error == CL_SUCCESS,
385 ("clCreateKernel failed: " + ocl_get_error_string(cl_error)).c_str());
388 return *kernel_ptr;
391 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
393 static inline int calc_shmem_required_nonbonded(int vdwType, bool bPrefetchLjParam)
395 int shmem;
397 /* size of shmem (force-buffers/xq/atom type preloading) */
398 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
399 /* i-atom x+q in shared memory */
400 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4; /* xqib */
401 /* cj in shared memory, for both warps separately
402 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
404 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
405 if (bPrefetchLjParam)
407 if (useLjCombRule(vdwType))
409 /* i-atom LJ combination parameters in shared memory */
410 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * 2
411 * sizeof(float); /* atib abused for ljcp, float2 */
413 else
415 /* i-atom types in shared memory */
416 shmem += c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(int); /* atib */
419 /* force reduction buffers in shared memory */
420 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
421 /* Warp vote. In fact it must be * number of warps in block.. */
422 shmem += sizeof(cl_uint) * 2; /* warp_any */
423 return shmem;
426 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
428 * The device can't use the same data structures as the host for two main reasons:
429 * - OpenCL restrictions (pointers are not accepted inside data structures)
430 * - some host side fields are not needed for the OpenCL kernels.
432 * This function is called before the launch of both nbnxn and prune kernels.
434 static void fillin_ocl_structures(NBParamGpu* nbp, cl_nbparam_params_t* nbparams_params)
436 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
437 nbparams_params->c_rf = nbp->c_rf;
438 nbparams_params->dispersion_shift = nbp->dispersion_shift;
439 nbparams_params->eeltype = nbp->eeltype;
440 nbparams_params->epsfac = nbp->epsfac;
441 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
442 nbparams_params->ewald_beta = nbp->ewald_beta;
443 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
444 nbparams_params->repulsion_shift = nbp->repulsion_shift;
445 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
446 nbparams_params->rvdw_sq = nbp->rvdw_sq;
447 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
448 nbparams_params->rvdw_switch = nbp->rvdw_switch;
449 nbparams_params->sh_ewald = nbp->sh_ewald;
450 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
451 nbparams_params->two_k_rf = nbp->two_k_rf;
452 nbparams_params->vdwtype = nbp->vdwtype;
453 nbparams_params->vdw_switch = nbp->vdw_switch;
456 /*! \brief Enqueues a wait for event completion.
458 * Then it releases the event and sets it to 0.
459 * Don't use this function when more than one wait will be issued for the event.
460 * Equivalent to Cuda Stream Sync. */
461 static void sync_ocl_event(cl_command_queue stream, cl_event* ocl_event)
463 cl_int gmx_unused cl_error;
465 /* Enqueue wait */
466 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, nullptr);
467 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
469 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
470 cl_error = clReleaseEvent(*ocl_event);
471 GMX_ASSERT(cl_error == CL_SUCCESS,
472 ("clReleaseEvent failed: " + ocl_get_error_string(cl_error)).c_str());
473 *ocl_event = nullptr;
476 /*! \brief Launch asynchronously the xq buffer host to device copy. */
477 void gpu_copy_xq_to_gpu(NbnxmGpu* nb, const nbnxn_atomdata_t* nbatom, const AtomLocality atomLocality)
479 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
481 const InteractionLocality iloc = gpuAtomToInteractionLocality(atomLocality);
483 /* local/nonlocal offset and length used for xq and f */
484 int adat_begin, adat_len;
486 cl_atomdata_t* adat = nb->atdat;
487 cl_plist_t* plist = nb->plist[iloc];
488 cl_timers_t* t = nb->timers;
489 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
491 bool bDoTime = nb->bDoTime;
493 /* Don't launch the non-local H2D copy if there is no dependent
494 work to do: neither non-local nor other (e.g. bonded) work
495 to do that has as input the nbnxn coordinates.
496 Doing the same for the local kernel is more complicated, since the
497 local part of the force array also depends on the non-local kernel.
498 So to avoid complicating the code and to reduce the risk of bugs,
499 we always call the local local x+q copy (and the rest of the local
500 work in nbnxn_gpu_launch_kernel().
502 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
504 plist->haveFreshList = false;
506 return;
509 /* calculate the atom data index range based on locality */
510 if (atomLocality == AtomLocality::Local)
512 adat_begin = 0;
513 adat_len = adat->natoms_local;
515 else
517 adat_begin = adat->natoms_local;
518 adat_len = adat->natoms - adat->natoms_local;
521 /* beginning of timed HtoD section */
522 if (bDoTime)
524 t->xf[atomLocality].nb_h2d.openTimingRegion(deviceStream);
527 /* HtoD x, q */
528 GMX_ASSERT(sizeof(float) == sizeof(*nbatom->x().data()),
529 "The size of the xyzq buffer element should be equal to the size of float4.");
530 copyToDeviceBuffer(&adat->xq, nbatom->x().data() + adat_begin * 4, adat_begin * 4, adat_len * 4,
531 deviceStream, GpuApiCallBehavior::Async,
532 bDoTime ? t->xf[atomLocality].nb_h2d.fetchNextEvent() : nullptr);
534 if (bDoTime)
536 t->xf[atomLocality].nb_h2d.closeTimingRegion(deviceStream);
539 /* When we get here all misc operations issues in the local stream as well as
540 the local xq H2D are done,
541 so we record that in the local stream and wait for it in the nonlocal one. */
542 if (nb->bUseTwoStreams)
544 if (iloc == InteractionLocality::Local)
546 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(
547 deviceStream.stream(), 0, nullptr, &(nb->misc_ops_and_local_H2D_done));
548 GMX_ASSERT(cl_error == CL_SUCCESS,
549 ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
551 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
552 * in the local stream in order to be able to sync with the above event
553 * from the non-local stream.
555 cl_error = clFlush(deviceStream.stream());
556 GMX_ASSERT(cl_error == CL_SUCCESS,
557 ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
559 else
561 sync_ocl_event(deviceStream.stream(), &(nb->misc_ops_and_local_H2D_done));
567 /*! \brief Launch GPU kernel
569 As we execute nonbonded workload in separate queues, before launching
570 the kernel we need to make sure that he following operations have completed:
571 - atomdata allocation and related H2D transfers (every nstlist step);
572 - pair list H2D transfer (every nstlist step);
573 - shift vector H2D transfer (every nstlist step);
574 - force (+shift force and energy) output clearing (every step).
576 These operations are issued in the local queue at the beginning of the step
577 and therefore always complete before the local kernel launch. The non-local
578 kernel is launched after the local on the same device/context, so this is
579 inherently scheduled after the operations in the local stream (including the
580 above "misc_ops").
581 However, for the sake of having a future-proof implementation, we use the
582 misc_ops_done event to record the point in time when the above operations
583 are finished and synchronize with this event in the non-local stream.
585 void gpu_launch_kernel(NbnxmGpu* nb, const gmx::StepWorkload& stepWork, const Nbnxm::InteractionLocality iloc)
587 cl_atomdata_t* adat = nb->atdat;
588 NBParamGpu* nbp = nb->nbparam;
589 cl_plist_t* plist = nb->plist[iloc];
590 cl_timers_t* t = nb->timers;
591 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
593 bool bDoTime = nb->bDoTime;
595 cl_nbparam_params_t nbparams_params;
597 /* Don't launch the non-local kernel if there is no work to do.
598 Doing the same for the local kernel is more complicated, since the
599 local part of the force array also depends on the non-local kernel.
600 So to avoid complicating the code and to reduce the risk of bugs,
601 we always call the local kernel and later (not in
602 this function) the stream wait, local f copyback and the f buffer
603 clearing. All these operations, except for the local interaction kernel,
604 are needed for the non-local interactions. The skip of the local kernel
605 call is taken care of later in this function. */
606 if (canSkipNonbondedWork(*nb, iloc))
608 plist->haveFreshList = false;
610 return;
613 if (nbp->useDynamicPruning && plist->haveFreshList)
615 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
616 (that's the way the timing accounting can distinguish between
617 separate prune kernel and combined force+prune).
619 Nbnxm::gpu_launch_kernel_pruneonly(nb, iloc, 1);
622 if (plist->nsci == 0)
624 /* Don't launch an empty local kernel (is not allowed with OpenCL).
626 return;
629 /* beginning of timed nonbonded calculation section */
630 if (bDoTime)
632 t->interaction[iloc].nb_k.openTimingRegion(deviceStream);
635 /* kernel launch config */
637 KernelLaunchConfig config;
638 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
639 config.blockSize[0] = c_clSize;
640 config.blockSize[1] = c_clSize;
641 config.gridSize[0] = plist->nsci;
643 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
645 if (debug)
647 fprintf(debug,
648 "Non-bonded GPU launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
649 "Global work size : %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n",
650 config.blockSize[0], config.blockSize[1], config.blockSize[2],
651 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
652 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
653 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c);
656 fillin_ocl_structures(nbp, &nbparams_params);
658 auto* timingEvent = bDoTime ? t->interaction[iloc].nb_k.fetchNextEvent() : nullptr;
659 constexpr char kernelName[] = "k_calc_nb";
660 const auto kernel =
661 select_nbnxn_kernel(nb, nbp->eeltype, nbp->vdwtype, stepWork.computeEnergy,
662 (plist->haveFreshList && !nb->timers->interaction[iloc].didPrune));
665 // The OpenCL kernel takes int as second to last argument because bool is
666 // not supported as a kernel argument type (sizeof(bool) is implementation defined).
667 const int computeFshift = static_cast<int>(stepWork.computeVirial);
668 if (useLjCombRule(nb->nbparam->vdwtype))
670 const auto kernelArgs = prepareGpuKernelArguments(
671 kernel, config, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el,
672 &adat->fshift, &adat->lj_comb, &adat->shift_vec, &nbp->nbfp, &nbp->nbfp_comb,
673 &nbp->coulomb_tab, &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
675 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
677 else
679 const auto kernelArgs = prepareGpuKernelArguments(
680 kernel, config, &adat->ntypes, &nbparams_params, &adat->xq, &adat->f, &adat->e_lj,
681 &adat->e_el, &adat->fshift, &adat->atom_types, &adat->shift_vec, &nbp->nbfp, &nbp->nbfp_comb,
682 &nbp->coulomb_tab, &plist->sci, &plist->cj4, &plist->excl, &computeFshift);
683 launchGpuKernel(kernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
686 if (bDoTime)
688 t->interaction[iloc].nb_k.closeTimingRegion(deviceStream);
693 /*! \brief Calculates the amount of shared memory required by the prune kernel.
695 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
696 * for OpenCL local memory.
698 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd
699 * dimension. \returns the amount of local memory in bytes required by the pruning kernel
701 static inline int calc_shmem_required_prune(const int num_threads_z)
703 int shmem;
705 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
706 shmem = c_nbnxnGpuNumClusterPerSupercluster * c_clSize * sizeof(float) * 4;
707 /* cj in shared memory, for each warp separately
708 * Note: only need to load once per wavefront, but to keep the code simple,
709 * for now we load twice on AMD.
711 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
712 /* Warp vote, requires one uint per warp/32 threads per block. */
713 shmem += sizeof(cl_uint) * 2 * num_threads_z;
715 return shmem;
718 /*! \brief
719 * Launch the pairlist prune only kernel for the given locality.
720 * \p numParts tells in how many parts, i.e. calls the list will be pruned.
722 void gpu_launch_kernel_pruneonly(NbnxmGpu* nb, const InteractionLocality iloc, const int numParts)
724 cl_atomdata_t* adat = nb->atdat;
725 NBParamGpu* nbp = nb->nbparam;
726 cl_plist_t* plist = nb->plist[iloc];
727 cl_timers_t* t = nb->timers;
728 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
729 bool bDoTime = nb->bDoTime;
731 if (plist->haveFreshList)
733 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
735 /* Set rollingPruningNumParts to signal that it is not set */
736 plist->rollingPruningNumParts = 0;
737 plist->rollingPruningPart = 0;
739 else
741 if (plist->rollingPruningNumParts == 0)
743 plist->rollingPruningNumParts = numParts;
745 else
747 GMX_ASSERT(numParts == plist->rollingPruningNumParts,
748 "It is not allowed to change numParts in between list generation steps");
752 /* Use a local variable for part and update in plist, so we can return here
753 * without duplicating the part increment code.
755 int part = plist->rollingPruningPart;
757 plist->rollingPruningPart++;
758 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
760 plist->rollingPruningPart = 0;
763 /* Compute the number of list entries to prune in this pass */
764 int numSciInPart = (plist->nsci - part) / numParts;
766 /* Don't launch the kernel if there is no work to do. */
767 if (numSciInPart <= 0)
769 plist->haveFreshList = false;
771 return;
774 GpuRegionTimer* timer = nullptr;
775 if (bDoTime)
777 timer = &(plist->haveFreshList ? t->interaction[iloc].prune_k : t->interaction[iloc].rollingPrune_k);
780 /* beginning of timed prune calculation section */
781 if (bDoTime)
783 timer->openTimingRegion(deviceStream);
786 /* Kernel launch config:
787 * - The thread block dimensions match the size of i-clusters, j-clusters,
788 * and j-cluster concurrency, in x, y, and z, respectively.
789 * - The 1D block-grid contains as many blocks as super-clusters.
791 int num_threads_z = c_oclPruneKernelJ4ConcurrencyDEFAULT;
794 /* kernel launch config */
795 KernelLaunchConfig config;
796 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
797 config.blockSize[0] = c_clSize;
798 config.blockSize[1] = c_clSize;
799 config.blockSize[2] = num_threads_z;
800 config.gridSize[0] = numSciInPart;
802 validate_global_work_size(config, 3, &nb->deviceContext_->deviceInfo());
804 if (debug)
806 fprintf(debug,
807 "Pruning GPU kernel launch configuration:\n\tLocal work size: %zux%zux%zu\n\t"
808 "\tGlobal work size: %zux%zu\n\t#Super-clusters/clusters: %d/%d (%d)\n"
809 "\tShMem: %zu\n",
810 config.blockSize[0], config.blockSize[1], config.blockSize[2],
811 config.blockSize[0] * config.gridSize[0], config.blockSize[1] * config.gridSize[1],
812 plist->nsci * c_nbnxnGpuNumClusterPerSupercluster,
813 c_nbnxnGpuNumClusterPerSupercluster, plist->na_c, config.sharedMemorySize);
816 cl_nbparam_params_t nbparams_params;
817 fillin_ocl_structures(nbp, &nbparams_params);
819 auto* timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
820 constexpr char kernelName[] = "k_pruneonly";
821 const auto pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
822 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config, &nbparams_params,
823 &adat->xq, &adat->shift_vec, &plist->sci,
824 &plist->cj4, &plist->imask, &numParts, &part);
825 launchGpuKernel(pruneKernel, config, deviceStream, timingEvent, kernelName, kernelArgs);
827 if (plist->haveFreshList)
829 plist->haveFreshList = false;
830 /* Mark that pruning has been done */
831 nb->timers->interaction[iloc].didPrune = true;
833 else
835 /* Mark that rolling pruning has been done */
836 nb->timers->interaction[iloc].didRollingPrune = true;
839 if (bDoTime)
841 timer->closeTimingRegion(deviceStream);
845 /*! \brief
846 * Launch asynchronously the download of nonbonded forces from the GPU
847 * (and energies/shift forces if required).
849 void gpu_launch_cpyback(NbnxmGpu* nb,
850 struct nbnxn_atomdata_t* nbatom,
851 const gmx::StepWorkload& stepWork,
852 const AtomLocality aloc)
854 GMX_ASSERT(nb, "Need a valid nbnxn_gpu object");
856 cl_int gmx_unused cl_error;
857 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
859 /* determine interaction locality from atom locality */
860 const InteractionLocality iloc = gpuAtomToInteractionLocality(aloc);
862 cl_atomdata_t* adat = nb->atdat;
863 cl_timers_t* t = nb->timers;
864 bool bDoTime = nb->bDoTime;
865 const DeviceStream& deviceStream = *nb->deviceStreams[iloc];
867 /* don't launch non-local copy-back if there was no non-local work to do */
868 if ((iloc == InteractionLocality::NonLocal) && !haveGpuShortRangeWork(*nb, iloc))
870 /* TODO An alternative way to signal that non-local work is
871 complete is to use a clEnqueueMarker+clEnqueueBarrier
872 pair. However, the use of bNonLocalStreamActive has the
873 advantage of being local to the host, so probably minimizes
874 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
875 test case, overall simulation performance was higher with
876 the API calls, but this has not been tested on AMD OpenCL,
877 so could be worth considering in future. */
878 nb->bNonLocalStreamActive = CL_FALSE;
879 return;
882 getGpuAtomRange(adat, aloc, &adat_begin, &adat_len);
884 /* beginning of timed D2H section */
885 if (bDoTime)
887 t->xf[aloc].nb_d2h.openTimingRegion(deviceStream);
890 /* With DD the local D2H transfer can only start after the non-local
891 has been launched. */
892 if (iloc == InteractionLocality::Local && nb->bNonLocalStreamActive)
894 sync_ocl_event(deviceStream.stream(), &(nb->nonlocal_done));
897 /* DtoH f */
898 GMX_ASSERT(sizeof(*nbatom->out[0].f.data()) == sizeof(float),
899 "The host force buffer should be in single precision to match device data size.");
900 copyFromDeviceBuffer(&nbatom->out[0].f.data()[adat_begin * DIM], &adat->f, adat_begin * DIM,
901 adat_len * DIM, deviceStream, GpuApiCallBehavior::Async,
902 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
904 /* kick off work */
905 cl_error = clFlush(deviceStream.stream());
906 GMX_ASSERT(cl_error == CL_SUCCESS, ("clFlush failed: " + ocl_get_error_string(cl_error)).c_str());
908 /* After the non-local D2H is launched the nonlocal_done event can be
909 recorded which signals that the local D2H can proceed. This event is not
910 placed after the non-local kernel because we first need the non-local
911 data back first. */
912 if (iloc == InteractionLocality::NonLocal)
914 cl_error = clEnqueueMarkerWithWaitList(deviceStream.stream(), 0, nullptr, &(nb->nonlocal_done));
915 GMX_ASSERT(cl_error == CL_SUCCESS,
916 ("clEnqueueMarkerWithWaitList failed: " + ocl_get_error_string(cl_error)).c_str());
917 nb->bNonLocalStreamActive = CL_TRUE;
920 /* only transfer energies in the local stream */
921 if (iloc == InteractionLocality::Local)
923 /* DtoH fshift when virial is needed */
924 if (stepWork.computeVirial)
926 GMX_ASSERT(sizeof(*nb->nbst.fshift) == DIM * sizeof(float),
927 "Sizes of host- and device-side shift vector elements should be the same.");
928 copyFromDeviceBuffer(reinterpret_cast<float*>(nb->nbst.fshift), &adat->fshift, 0,
929 SHIFTS * DIM, deviceStream, GpuApiCallBehavior::Async,
930 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
933 /* DtoH energies */
934 if (stepWork.computeEnergy)
936 GMX_ASSERT(sizeof(*nb->nbst.e_lj) == sizeof(float),
937 "Sizes of host- and device-side LJ energy terms should be the same.");
938 copyFromDeviceBuffer(nb->nbst.e_lj, &adat->e_lj, 0, 1, deviceStream, GpuApiCallBehavior::Async,
939 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
940 GMX_ASSERT(sizeof(*nb->nbst.e_el) == sizeof(float),
941 "Sizes of host- and device-side electrostatic energy terms should be the "
942 "same.");
943 copyFromDeviceBuffer(nb->nbst.e_el, &adat->e_el, 0, 1, deviceStream, GpuApiCallBehavior::Async,
944 bDoTime ? t->xf[aloc].nb_d2h.fetchNextEvent() : nullptr);
948 if (bDoTime)
950 t->xf[aloc].nb_d2h.closeTimingRegion(deviceStream);
955 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
956 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t& ic)
958 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
959 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
960 int kernel_type;
962 /* Benchmarking/development environment variables to force the use of
963 analytical or tabulated Ewald kernel. */
964 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != nullptr);
965 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != nullptr);
967 if (bForceAnalyticalEwald && bForceTabulatedEwald)
969 gmx_incons(
970 "Both analytical and tabulated Ewald OpenCL non-bonded kernels "
971 "requested through environment variables.");
974 /* OpenCL: By default, use analytical Ewald
975 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
978 /* By default use analytical Ewald. */
979 bUseAnalyticalEwald = true;
980 if (bForceAnalyticalEwald)
982 if (debug)
984 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
987 else if (bForceTabulatedEwald)
989 bUseAnalyticalEwald = false;
991 if (debug)
993 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
997 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
998 forces it (use it for debugging/benchmarking only). */
999 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == nullptr))
1001 kernel_type = bUseAnalyticalEwald ? eelTypeEWALD_ANA : eelTypeEWALD_TAB;
1003 else
1005 kernel_type = bUseAnalyticalEwald ? eelTypeEWALD_ANA_TWIN : eelTypeEWALD_TAB_TWIN;
1008 return kernel_type;
1011 } // namespace Nbnxm