prepareGpuKernelArguments() and launchGpuKernel() are added
[gromacs.git] / src / gromacs / mdlib / nbnxn_ocl / nbnxn_ocl.cpp
blobaf56d62424676d64ed338b6ddc074dc550591d43
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief Define OpenCL implementation of nbnxn_gpu.h
38 * \author Anca Hamuraru <anca@streamcomputing.eu>
39 * \author Teemu Virolainen <teemu@streamcomputing.eu>
40 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
41 * \author Szilárd Páll <pall.szilard@gmail.com>
42 * \ingroup module_mdlib
44 * TODO (psz):
45 * - Add a static const cl_uint c_pruneKernelWorkDim / c_nbnxnKernelWorkDim = 3;
46 * - Rework the copying of OCL data structures done before every invocation of both
47 * nb and prune kernels (using fillin_ocl_structures); also consider at the same
48 * time calling clSetKernelArg only on the updated parameters (if tracking changed
49 * parameters is feasible);
50 * - Consider using the event_wait_list argument to clEnqueueNDRangeKernel to mark
51 * dependencies on the kernel launched: e.g. the non-local nb kernel's dependency
52 * on the misc_ops_and_local_H2D_done event could be better expressed this way.
54 * - Consider extracting common sections of the OpenCL and CUDA nbnxn logic, e.g:
55 * - in nbnxn_gpu_launch_kernel_pruneonly() the pre- and post-kernel launch logic
56 * is identical in the two implementations, so a 3-way split might allow sharing
57 * code;
58 * -
61 #include "gmxpre.h"
63 #include <assert.h>
64 #include <stdlib.h>
66 #if defined(_MSVC)
67 #include <limits>
68 #endif
70 #include "thread_mpi/atomic.h"
72 #include "gromacs/gpu_utils/gputraits_ocl.h"
73 #include "gromacs/gpu_utils/oclutils.h"
74 #include "gromacs/hardware/hw_info.h"
75 #include "gromacs/mdlib/force_flags.h"
76 #include "gromacs/mdlib/nb_verlet.h"
77 #include "gromacs/mdlib/nbnxn_consts.h"
78 #include "gromacs/mdlib/nbnxn_gpu.h"
79 #include "gromacs/mdlib/nbnxn_gpu_common.h"
80 #include "gromacs/mdlib/nbnxn_gpu_common_utils.h"
81 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
82 #include "gromacs/mdlib/nbnxn_pairlist.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/timing/gpu_timing.h"
85 #include "gromacs/utility/cstringutil.h"
86 #include "gromacs/utility/fatalerror.h"
87 #include "gromacs/utility/gmxassert.h"
89 #include "nbnxn_ocl_internal.h"
90 #include "nbnxn_ocl_types.h"
93 /*! \brief Convenience constants */
94 //@{
95 static const int c_numClPerSupercl = c_nbnxnGpuNumClusterPerSupercluster;
96 static const int c_clSize = c_nbnxnGpuClusterSize;
97 //@}
100 /*! \brief Validates the input global work size parameter.
102 static inline void validate_global_work_size(const KernelLaunchConfig &config, int work_dim, const gmx_device_info_t *dinfo)
104 cl_uint device_size_t_size_bits;
105 cl_uint host_size_t_size_bits;
107 assert(dinfo);
109 size_t global_work_size[3];
110 GMX_ASSERT(work_dim <= 3, "Not supporting hyper-grids just yet");
111 for (int i = 0; i < work_dim; i++)
113 global_work_size[i] = config.blockSize[i] * config.gridSize[i];
116 /* Each component of a global_work_size must not exceed the range given by the
117 sizeof(device size_t) for the device on which the kernel execution will
118 be enqueued. See:
119 https://www.khronos.org/registry/cl/sdk/1.0/docs/man/xhtml/clEnqueueNDRangeKernel.html
121 device_size_t_size_bits = dinfo->adress_bits;
122 host_size_t_size_bits = (cl_uint)(sizeof(size_t) * 8);
124 /* If sizeof(host size_t) <= sizeof(device size_t)
125 => global_work_size components will always be valid
126 else
127 => get device limit for global work size and
128 compare it against each component of global_work_size.
130 if (host_size_t_size_bits > device_size_t_size_bits)
132 size_t device_limit;
134 device_limit = (((size_t)1) << device_size_t_size_bits) - 1;
136 for (int i = 0; i < work_dim; i++)
138 if (global_work_size[i] > device_limit)
140 gmx_fatal(FARGS, "Watch out, the input system is too large to simulate!\n"
141 "The number of nonbonded work units (=number of super-clusters) exceeds the"
142 "device capabilities. Global work size limit exceeded (%d > %d)!",
143 global_work_size[i], device_limit);
149 /* Constant arrays listing non-bonded kernel function names. The arrays are
150 * organized in 2-dim arrays by: electrostatics and VDW type.
152 * Note that the row- and column-order of function pointers has to match the
153 * order of corresponding enumerated electrostatics and vdw types, resp.,
154 * defined in nbnxn_cuda_types.h.
157 /*! \brief Force-only kernel function names. */
158 static const char* nb_kfunc_noener_noprune_ptr[eelOclNR][evdwOclNR] =
160 { "nbnxn_kernel_ElecCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_opencl" },
161 { "nbnxn_kernel_ElecRF_VdwLJ_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_opencl" },
162 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_opencl" },
163 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_opencl" },
164 { "nbnxn_kernel_ElecEw_VdwLJ_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_opencl" },
165 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_opencl" }
168 /*! \brief Force + energy kernel function pointers. */
169 static const char* nb_kfunc_ener_noprune_ptr[eelOclNR][evdwOclNR] =
171 { "nbnxn_kernel_ElecCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_opencl" },
172 { "nbnxn_kernel_ElecRF_VdwLJ_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_opencl" },
173 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_opencl" },
174 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_opencl" },
175 { "nbnxn_kernel_ElecEw_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_opencl" },
176 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_opencl" }
179 /*! \brief Force + pruning kernel function pointers. */
180 static const char* nb_kfunc_noener_prune_ptr[eelOclNR][evdwOclNR] =
182 { "nbnxn_kernel_ElecCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_F_prune_opencl" },
183 { "nbnxn_kernel_ElecRF_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_F_prune_opencl" },
184 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_F_prune_opencl" },
185 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_F_prune_opencl" },
186 { "nbnxn_kernel_ElecEw_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_F_prune_opencl" },
187 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_F_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_F_prune_opencl" }
190 /*! \brief Force + energy + pruning kernel function pointers. */
191 static const char* nb_kfunc_ener_prune_ptr[eelOclNR][evdwOclNR] =
193 { "nbnxn_kernel_ElecCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecCut_VdwLJEwCombLB_VF_prune_opencl" },
194 { "nbnxn_kernel_ElecRF_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecRF_VdwLJEwCombLB_VF_prune_opencl" },
195 { "nbnxn_kernel_ElecEwQSTab_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTab_VdwLJEwCombLB_VF_prune_opencl" },
196 { "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwQSTabTwinCut_VdwLJEwCombLB_VF_prune_opencl" },
197 { "nbnxn_kernel_ElecEw_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEw_VdwLJEwCombLB_VF_prune_opencl" },
198 { "nbnxn_kernel_ElecEwTwinCut_VdwLJ_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJCombLB_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJFsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJPsw_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombGeom_VF_prune_opencl", "nbnxn_kernel_ElecEwTwinCut_VdwLJEwCombLB_VF_prune_opencl" }
201 /*! \brief Return a pointer to the prune kernel version to be executed at the current invocation.
203 * \param[in] kernel_pruneonly array of prune kernel objects
204 * \param[in] firstPrunePass true if the first pruning pass is being executed
206 static inline cl_kernel selectPruneKernel(cl_kernel kernel_pruneonly[],
207 bool firstPrunePass)
209 cl_kernel *kernelPtr;
211 if (firstPrunePass)
213 kernelPtr = &(kernel_pruneonly[epruneFirst]);
215 else
217 kernelPtr = &(kernel_pruneonly[epruneRolling]);
219 // TODO: consider creating the prune kernel object here to avoid a
220 // clCreateKernel for the rolling prune kernel if this is not needed.
221 return *kernelPtr;
224 /*! \brief Return a pointer to the kernel version to be executed at the current step.
225 * OpenCL kernel objects are cached in nb. If the requested kernel is not
226 * found in the cache, it will be created and the cache will be updated.
228 static inline cl_kernel select_nbnxn_kernel(gmx_nbnxn_ocl_t *nb,
229 int eeltype,
230 int evdwtype,
231 bool bDoEne,
232 bool bDoPrune)
234 const char* kernel_name_to_run;
235 cl_kernel *kernel_ptr;
236 cl_int cl_error;
238 assert(eeltype < eelOclNR);
239 assert(evdwtype < evdwOclNR);
241 if (bDoEne)
243 if (bDoPrune)
245 kernel_name_to_run = nb_kfunc_ener_prune_ptr[eeltype][evdwtype];
246 kernel_ptr = &(nb->kernel_ener_prune_ptr[eeltype][evdwtype]);
248 else
250 kernel_name_to_run = nb_kfunc_ener_noprune_ptr[eeltype][evdwtype];
251 kernel_ptr = &(nb->kernel_ener_noprune_ptr[eeltype][evdwtype]);
254 else
256 if (bDoPrune)
258 kernel_name_to_run = nb_kfunc_noener_prune_ptr[eeltype][evdwtype];
259 kernel_ptr = &(nb->kernel_noener_prune_ptr[eeltype][evdwtype]);
261 else
263 kernel_name_to_run = nb_kfunc_noener_noprune_ptr[eeltype][evdwtype];
264 kernel_ptr = &(nb->kernel_noener_noprune_ptr[eeltype][evdwtype]);
268 if (NULL == kernel_ptr[0])
270 *kernel_ptr = clCreateKernel(nb->dev_rundata->program, kernel_name_to_run, &cl_error);
271 assert(cl_error == CL_SUCCESS);
273 // TODO: handle errors
275 return *kernel_ptr;
278 /*! \brief Calculates the amount of shared memory required by the nonbonded kernel in use.
280 static inline int calc_shmem_required_nonbonded(int vdwType,
281 bool bPrefetchLjParam)
283 int shmem;
285 /* size of shmem (force-buffers/xq/atom type preloading) */
286 /* NOTE: with the default kernel on sm3.0 we need shmem only for pre-loading */
287 /* i-atom x+q in shared memory */
288 shmem = c_numClPerSupercl * c_clSize * sizeof(float) * 4; /* xqib */
289 /* cj in shared memory, for both warps separately
290 * TODO: in the "nowarp kernels we load cj only once so the factor 2 is not needed.
292 shmem += 2 * c_nbnxnGpuJgroupSize * sizeof(int); /* cjs */
293 if (bPrefetchLjParam)
295 if (useLjCombRule(vdwType))
297 /* i-atom LJ combination parameters in shared memory */
298 shmem += c_numClPerSupercl * c_clSize * 2*sizeof(float); /* atib abused for ljcp, float2 */
300 else
302 /* i-atom types in shared memory */
303 shmem += c_numClPerSupercl * c_clSize * sizeof(int); /* atib */
306 /* force reduction buffers in shared memory */
307 shmem += c_clSize * c_clSize * 3 * sizeof(float); /* f_buf */
308 /* Warp vote. In fact it must be * number of warps in block.. */
309 shmem += sizeof(cl_uint) * 2; /* warp_any */
310 return shmem;
313 /*! \brief Initializes data structures that are going to be sent to the OpenCL device.
315 * The device can't use the same data structures as the host for two main reasons:
316 * - OpenCL restrictions (pointers are not accepted inside data structures)
317 * - some host side fields are not needed for the OpenCL kernels.
319 * This function is called before the launch of both nbnxn and prune kernels.
321 static void fillin_ocl_structures(cl_nbparam_t *nbp,
322 cl_nbparam_params_t *nbparams_params)
324 nbparams_params->coulomb_tab_scale = nbp->coulomb_tab_scale;
325 nbparams_params->c_rf = nbp->c_rf;
326 nbparams_params->dispersion_shift = nbp->dispersion_shift;
327 nbparams_params->eeltype = nbp->eeltype;
328 nbparams_params->epsfac = nbp->epsfac;
329 nbparams_params->ewaldcoeff_lj = nbp->ewaldcoeff_lj;
330 nbparams_params->ewald_beta = nbp->ewald_beta;
331 nbparams_params->rcoulomb_sq = nbp->rcoulomb_sq;
332 nbparams_params->repulsion_shift = nbp->repulsion_shift;
333 nbparams_params->rlistOuter_sq = nbp->rlistOuter_sq;
334 nbparams_params->rvdw_sq = nbp->rvdw_sq;
335 nbparams_params->rlistInner_sq = nbp->rlistInner_sq;
336 nbparams_params->rvdw_switch = nbp->rvdw_switch;
337 nbparams_params->sh_ewald = nbp->sh_ewald;
338 nbparams_params->sh_lj_ewald = nbp->sh_lj_ewald;
339 nbparams_params->two_k_rf = nbp->two_k_rf;
340 nbparams_params->vdwtype = nbp->vdwtype;
341 nbparams_params->vdw_switch = nbp->vdw_switch;
344 /*! \brief Enqueues a wait for event completion.
346 * Then it releases the event and sets it to 0.
347 * Don't use this function when more than one wait will be issued for the event.
348 * Equivalent to Cuda Stream Sync. */
349 static void sync_ocl_event(cl_command_queue stream, cl_event *ocl_event)
351 cl_int gmx_unused cl_error;
353 /* Enqueue wait */
354 cl_error = clEnqueueBarrierWithWaitList(stream, 1, ocl_event, NULL);
355 GMX_RELEASE_ASSERT(CL_SUCCESS == cl_error, ocl_get_error_string(cl_error).c_str());
357 /* Release event and reset it to 0. It is ok to release it as enqueuewaitforevents performs implicit retain for events. */
358 cl_error = clReleaseEvent(*ocl_event);
359 assert(CL_SUCCESS == cl_error);
360 *ocl_event = 0;
363 /*! \brief Launch GPU kernel
365 As we execute nonbonded workload in separate queues, before launching
366 the kernel we need to make sure that he following operations have completed:
367 - atomdata allocation and related H2D transfers (every nstlist step);
368 - pair list H2D transfer (every nstlist step);
369 - shift vector H2D transfer (every nstlist step);
370 - force (+shift force and energy) output clearing (every step).
372 These operations are issued in the local queue at the beginning of the step
373 and therefore always complete before the local kernel launch. The non-local
374 kernel is launched after the local on the same device/context, so this is
375 inherently scheduled after the operations in the local stream (including the
376 above "misc_ops").
377 However, for the sake of having a future-proof implementation, we use the
378 misc_ops_done event to record the point in time when the above operations
379 are finished and synchronize with this event in the non-local stream.
381 void nbnxn_gpu_launch_kernel(gmx_nbnxn_ocl_t *nb,
382 const struct nbnxn_atomdata_t *nbatom,
383 int flags,
384 int iloc)
386 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
387 /* OpenCL kernel launch-related stuff */
388 cl_kernel nb_kernel = NULL; /* fn pointer to the nonbonded kernel */
390 cl_atomdata_t *adat = nb->atdat;
391 cl_nbparam_t *nbp = nb->nbparam;
392 cl_plist_t *plist = nb->plist[iloc];
393 cl_timers_t *t = nb->timers;
394 cl_command_queue stream = nb->stream[iloc];
396 bool bCalcEner = flags & GMX_FORCE_ENERGY;
397 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
398 bool bDoTime = nb->bDoTime;
400 cl_nbparam_params_t nbparams_params;
402 /* Don't launch the non-local kernel if there is no work to do.
403 Doing the same for the local kernel is more complicated, since the
404 local part of the force array also depends on the non-local kernel.
405 So to avoid complicating the code and to reduce the risk of bugs,
406 we always call the local kernel, the local x+q copy and later (not in
407 this function) the stream wait, local f copyback and the f buffer
408 clearing. All these operations, except for the local interaction kernel,
409 are needed for the non-local interactions. The skip of the local kernel
410 call is taken care of later in this function. */
411 if (canSkipWork(nb, iloc))
413 plist->haveFreshList = false;
415 return;
418 /* calculate the atom data index range based on locality */
419 if (LOCAL_I(iloc))
421 adat_begin = 0;
422 adat_len = adat->natoms_local;
424 else
426 adat_begin = adat->natoms_local;
427 adat_len = adat->natoms - adat->natoms_local;
430 /* beginning of timed HtoD section */
431 if (bDoTime)
433 t->nb_h2d[iloc].openTimingRegion(stream);
436 /* HtoD x, q */
437 ocl_copy_H2D_async(adat->xq, nbatom->x + adat_begin * 4, adat_begin*sizeof(float)*4,
438 adat_len * sizeof(float) * 4, stream, bDoTime ? t->nb_h2d[iloc].fetchNextEvent() : nullptr);
440 if (bDoTime)
442 t->nb_h2d[iloc].closeTimingRegion(stream);
445 /* When we get here all misc operations issues in the local stream as well as
446 the local xq H2D are done,
447 so we record that in the local stream and wait for it in the nonlocal one. */
448 if (nb->bUseTwoStreams)
450 if (iloc == eintLocal)
452 cl_int gmx_used_in_debug cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->misc_ops_and_local_H2D_done));
453 assert(CL_SUCCESS == cl_error);
455 /* Based on the v1.2 section 5.13 of the OpenCL spec, a flush is needed
456 * in the local stream in order to be able to sync with the above event
457 * from the non-local stream.
459 cl_error = clFlush(stream);
460 assert(CL_SUCCESS == cl_error);
462 else
464 sync_ocl_event(stream, &(nb->misc_ops_and_local_H2D_done));
468 if (nbp->useDynamicPruning && plist->haveFreshList)
470 /* Prunes for rlistOuter and rlistInner, sets plist->haveFreshList=false
471 (that's the way the timing accounting can distinguish between
472 separate prune kernel and combined force+prune).
474 nbnxn_gpu_launch_kernel_pruneonly(nb, iloc, 1);
477 if (plist->nsci == 0)
479 /* Don't launch an empty local kernel (is not allowed with OpenCL).
480 * TODO: Separate H2D and kernel launch into separate functions.
482 return;
485 /* beginning of timed nonbonded calculation section */
486 if (bDoTime)
488 t->nb_k[iloc].openTimingRegion(stream);
491 /* get the pointer to the kernel flavor we need to use */
492 nb_kernel = select_nbnxn_kernel(nb,
493 nbp->eeltype,
494 nbp->vdwtype,
495 bCalcEner,
496 (plist->haveFreshList && !nb->timers->didPrune[iloc]));
498 /* kernel launch config */
500 KernelLaunchConfig config;
501 config.sharedMemorySize = calc_shmem_required_nonbonded(nbp->vdwtype, nb->bPrefetchLjParam);
502 config.stream = stream;
503 config.blockSize[0] = c_clSize;
504 config.blockSize[1] = c_clSize;
505 config.gridSize[0] = plist->nsci;
507 validate_global_work_size(config, 3, nb->dev_info);
509 if (debug)
511 fprintf(debug, "Non-bonded GPU launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
512 "Global work size : %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n",
513 (int)(config.blockSize[0]), (int)(config.blockSize[1]), (int)(config.blockSize[2]),
514 (int)(config.blockSize[0] * config.gridSize[0]), (int)(config.blockSize[1] * config.gridSize[1]), plist->nsci*c_numClPerSupercl,
515 c_numClPerSupercl, plist->na_c);
518 fillin_ocl_structures(nbp, &nbparams_params);
520 auto *timingEvent = bDoTime ? t->nb_k[iloc].fetchNextEvent() : nullptr;
521 constexpr char kernelName[] = "k_calc_nb";
522 if (useLjCombRule(nb->nbparam->vdwtype))
524 const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
525 &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
526 &adat->lj_comb,
527 &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
528 &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
530 launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
532 else
534 const auto kernelArgs = prepareGpuKernelArguments(nb_kernel, config,
535 &adat->ntypes,
536 &nbparams_params, &adat->xq, &adat->f, &adat->e_lj, &adat->e_el, &adat->fshift,
537 &adat->atom_types,
538 &adat->shift_vec, &nbp->nbfp_climg2d, &nbp->nbfp_comb_climg2d, &nbp->coulomb_tab_climg2d,
539 &plist->sci, &plist->cj4, &plist->excl, &bCalcFshift);
540 launchGpuKernel(nb_kernel, config, timingEvent, kernelName, kernelArgs);
543 if (bDoTime)
545 t->nb_k[iloc].closeTimingRegion(stream);
550 /*! \brief Calculates the amount of shared memory required by the prune kernel.
552 * Note that for the sake of simplicity we use the CUDA terminology "shared memory"
553 * for OpenCL local memory.
555 * \param[in] num_threads_z cj4 concurrency equal to the number of threads/work items in the 3-rd dimension.
556 * \returns the amount of local memory in bytes required by the pruning kernel
558 static inline int calc_shmem_required_prune(const int num_threads_z)
560 int shmem;
562 /* i-atom x in shared memory (for convenience we load all 4 components including q) */
563 shmem = c_numClPerSupercl * c_clSize * sizeof(float)*4;
564 /* cj in shared memory, for each warp separately
565 * Note: only need to load once per wavefront, but to keep the code simple,
566 * for now we load twice on AMD.
568 shmem += num_threads_z * c_nbnxnGpuClusterpairSplit * c_nbnxnGpuJgroupSize * sizeof(int);
569 /* Warp vote, requires one uint per warp/32 threads per block. */
570 shmem += sizeof(cl_uint) * 2*num_threads_z;
572 return shmem;
575 void nbnxn_gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t *nb,
576 int iloc,
577 int numParts)
579 cl_atomdata_t *adat = nb->atdat;
580 cl_nbparam_t *nbp = nb->nbparam;
581 cl_plist_t *plist = nb->plist[iloc];
582 cl_timers_t *t = nb->timers;
583 cl_command_queue stream = nb->stream[iloc];
584 bool bDoTime = nb->bDoTime;
586 if (plist->haveFreshList)
588 GMX_ASSERT(numParts == 1, "With first pruning we expect 1 part");
590 /* Set rollingPruningNumParts to signal that it is not set */
591 plist->rollingPruningNumParts = 0;
592 plist->rollingPruningPart = 0;
594 else
596 if (plist->rollingPruningNumParts == 0)
598 plist->rollingPruningNumParts = numParts;
600 else
602 GMX_ASSERT(numParts == plist->rollingPruningNumParts, "It is not allowed to change numParts in between list generation steps");
606 /* Use a local variable for part and update in plist, so we can return here
607 * without duplicating the part increment code.
609 int part = plist->rollingPruningPart;
611 plist->rollingPruningPart++;
612 if (plist->rollingPruningPart >= plist->rollingPruningNumParts)
614 plist->rollingPruningPart = 0;
617 /* Compute the number of list entries to prune in this pass */
618 int numSciInPart = (plist->nsci - part)/numParts;
620 /* Don't launch the kernel if there is no work to do. */
621 if (numSciInPart <= 0)
623 plist->haveFreshList = false;
625 return;
628 GpuRegionTimer *timer = nullptr;
629 if (bDoTime)
631 timer = &(plist->haveFreshList ? t->prune_k[iloc] : t->rollingPrune_k[iloc]);
634 /* beginning of timed prune calculation section */
635 if (bDoTime)
637 timer->openTimingRegion(stream);
640 /* Kernel launch config:
641 * - The thread block dimensions match the size of i-clusters, j-clusters,
642 * and j-cluster concurrency, in x, y, and z, respectively.
643 * - The 1D block-grid contains as many blocks as super-clusters.
645 int num_threads_z = getOclPruneKernelJ4Concurrency(nb->dev_info->vendor_e);
646 cl_kernel pruneKernel = selectPruneKernel(nb->kernel_pruneonly, plist->haveFreshList);
648 /* kernel launch config */
649 KernelLaunchConfig config;
650 config.sharedMemorySize = calc_shmem_required_prune(num_threads_z);
651 config.stream = stream;
652 config.blockSize[0] = c_clSize;
653 config.blockSize[1] = c_clSize;
654 config.blockSize[2] = num_threads_z;
655 config.gridSize[0] = numSciInPart;
657 validate_global_work_size(config, 3, nb->dev_info);
659 if (debug)
661 fprintf(debug, "Pruning GPU kernel launch configuration:\n\tLocal work size: %dx%dx%d\n\t"
662 "\tGlobal work size: %dx%d\n\t#Super-clusters/clusters: %d/%d (%d)\n"
663 "\tShMem: %zu\n",
664 (int)(config.blockSize[0]), (int)(config.blockSize[1]), (int)(config.blockSize[2]),
665 (int)(config.blockSize[0] * config.gridSize[0]), (int)(config.blockSize[1] * config.gridSize[1]), plist->nsci*c_numClPerSupercl,
666 c_numClPerSupercl, plist->na_c, config.sharedMemorySize);
669 cl_nbparam_params_t nbparams_params;
670 fillin_ocl_structures(nbp, &nbparams_params);
672 auto *timingEvent = bDoTime ? timer->fetchNextEvent() : nullptr;
673 constexpr char kernelName[] = "k_pruneonly";
674 const auto kernelArgs = prepareGpuKernelArguments(pruneKernel, config,
675 &nbparams_params, &adat->xq, &adat->shift_vec,
676 &plist->sci, &plist->cj4, &plist->imask, &numParts, &part);
677 launchGpuKernel(pruneKernel, config, timingEvent, kernelName, kernelArgs);
679 if (plist->haveFreshList)
681 plist->haveFreshList = false;
682 /* Mark that pruning has been done */
683 nb->timers->didPrune[iloc] = true;
685 else
687 /* Mark that rolling pruning has been done */
688 nb->timers->didRollingPrune[iloc] = true;
691 if (bDoTime)
693 timer->closeTimingRegion(stream);
697 /*! \brief
698 * Launch asynchronously the download of nonbonded forces from the GPU
699 * (and energies/shift forces if required).
701 void nbnxn_gpu_launch_cpyback(gmx_nbnxn_ocl_t *nb,
702 const struct nbnxn_atomdata_t *nbatom,
703 int flags,
704 int aloc)
706 cl_int gmx_unused cl_error;
707 int adat_begin, adat_len; /* local/nonlocal offset and length used for xq and f */
709 /* determine interaction locality from atom locality */
710 int iloc = gpuAtomToInteractionLocality(aloc);
712 cl_atomdata_t *adat = nb->atdat;
713 cl_timers_t *t = nb->timers;
714 bool bDoTime = nb->bDoTime;
715 cl_command_queue stream = nb->stream[iloc];
717 bool bCalcEner = flags & GMX_FORCE_ENERGY;
718 int bCalcFshift = flags & GMX_FORCE_VIRIAL;
721 /* don't launch non-local copy-back if there was no non-local work to do */
722 if (canSkipWork(nb, iloc))
724 /* TODO An alternative way to signal that non-local work is
725 complete is to use a clEnqueueMarker+clEnqueueBarrier
726 pair. However, the use of bNonLocalStreamActive has the
727 advantage of being local to the host, so probably minimizes
728 overhead. Curiously, for NVIDIA OpenCL with an empty-domain
729 test case, overall simulation performance was higher with
730 the API calls, but this has not been tested on AMD OpenCL,
731 so could be worth considering in future. */
732 nb->bNonLocalStreamActive = false;
733 return;
736 getGpuAtomRange(adat, aloc, adat_begin, adat_len);
738 /* beginning of timed D2H section */
739 if (bDoTime)
741 t->nb_d2h[iloc].openTimingRegion(stream);
744 /* With DD the local D2H transfer can only start after the non-local
745 has been launched. */
746 if (iloc == eintLocal && nb->bNonLocalStreamActive)
748 sync_ocl_event(stream, &(nb->nonlocal_done));
751 /* DtoH f */
752 ocl_copy_D2H_async(nbatom->out[0].f + adat_begin * 3, adat->f, adat_begin*3*sizeof(float),
753 (adat_len)* adat->f_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
755 /* kick off work */
756 cl_error = clFlush(stream);
757 assert(CL_SUCCESS == cl_error);
759 /* After the non-local D2H is launched the nonlocal_done event can be
760 recorded which signals that the local D2H can proceed. This event is not
761 placed after the non-local kernel because we first need the non-local
762 data back first. */
763 if (iloc == eintNonlocal)
765 cl_error = clEnqueueMarkerWithWaitList(stream, 0, NULL, &(nb->nonlocal_done));
766 assert(CL_SUCCESS == cl_error);
767 nb->bNonLocalStreamActive = true;
770 /* only transfer energies in the local stream */
771 if (LOCAL_I(iloc))
773 /* DtoH fshift */
774 if (bCalcFshift)
776 ocl_copy_D2H_async(nb->nbst.fshift, adat->fshift, 0,
777 SHIFTS * adat->fshift_elem_size, stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
780 /* DtoH energies */
781 if (bCalcEner)
783 ocl_copy_D2H_async(nb->nbst.e_lj, adat->e_lj, 0,
784 sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
786 ocl_copy_D2H_async(nb->nbst.e_el, adat->e_el, 0,
787 sizeof(float), stream, bDoTime ? t->nb_d2h[iloc].fetchNextEvent() : nullptr);
791 if (bDoTime)
793 t->nb_d2h[iloc].closeTimingRegion(stream);
798 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
799 int nbnxn_gpu_pick_ewald_kernel_type(bool bTwinCut)
801 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
802 int kernel_type;
804 /* Benchmarking/development environment variables to force the use of
805 analytical or tabulated Ewald kernel. */
806 bForceAnalyticalEwald = (getenv("GMX_OCL_NB_ANA_EWALD") != NULL);
807 bForceTabulatedEwald = (getenv("GMX_OCL_NB_TAB_EWALD") != NULL);
809 if (bForceAnalyticalEwald && bForceTabulatedEwald)
811 gmx_incons("Both analytical and tabulated Ewald OpenCL non-bonded kernels "
812 "requested through environment variables.");
815 /* OpenCL: By default, use analytical Ewald
816 * TODO: tabulated does not work, it needs fixing, see init_nbparam() in nbnxn_ocl_data_mgmt.cpp
818 * TODO: decide if dev_info parameter should be added to recognize NVIDIA CC>=3.0 devices.
821 //if ((dev_info->prop.major >= 3 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
822 if ((1 || bForceAnalyticalEwald) && !bForceTabulatedEwald)
824 bUseAnalyticalEwald = true;
826 if (debug)
828 fprintf(debug, "Using analytical Ewald OpenCL kernels\n");
831 else
833 bUseAnalyticalEwald = false;
835 if (debug)
837 fprintf(debug, "Using tabulated Ewald OpenCL kernels\n");
841 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
842 forces it (use it for debugging/benchmarking only). */
843 if (!bTwinCut && (getenv("GMX_OCL_NB_EWALD_TWINCUT") == NULL))
845 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA : eelOclEWALD_TAB;
847 else
849 kernel_type = bUseAnalyticalEwald ? eelOclEWALD_ANA_TWIN : eelOclEWALD_TAB_TWIN;
852 return kernel_type;