StatePropagatorDataGpu object to manage GPU forces, positions and velocities buffers
[gromacs.git] / src / gromacs / nbnxm / nbnxm_gpu.h
blobf661a19ca1a4ea4759c2b295435677f7d5fd20ef
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \libinternal \file
36 * \brief Declare interface for GPU execution for NBNXN module
38 * \author Szilard Pall <pall.szilard@gmail.com>
39 * \author Mark Abraham <mark.j.abraham@gmail.com>
40 * \ingroup module_nbnxm
43 #ifndef GMX_NBNXM_NBNXM_GPU_H
44 #define GMX_NBNXM_NBNXM_GPU_H
46 #include "gromacs/gpu_utils/gpu_macros.h"
47 #include "gromacs/math/vectypes.h"
48 #include "gromacs/nbnxm/atomdata.h"
49 #include "gromacs/utility/basedefinitions.h"
50 #include "gromacs/utility/real.h"
52 #include "gpu_types.h"
53 #include "locality.h"
55 struct interaction_const_t;
56 struct nbnxn_atomdata_t;
57 struct gmx_wallcycle;
58 enum class GpuTaskCompletion;
60 namespace gmx
62 class GpuBonded;
63 class StepWorkload;
66 namespace Nbnxm
69 class Grid;
71 /*! \brief
72 * Launch asynchronously the xq buffer host to device copy.
74 * The nonlocal copy is skipped if there is no dependent work to do,
75 * neither non-local nonbonded interactions nor bonded GPU work.
77 * \param [in] nb GPU nonbonded data.
78 * \param [in] nbdata Host-side atom data structure.
79 * \param [in] aloc Atom locality flag.
81 GPU_FUNC_QUALIFIER
82 void gpu_copy_xq_to_gpu(gmx_nbnxn_gpu_t gmx_unused *nb,
83 const struct nbnxn_atomdata_t gmx_unused *nbdata,
84 AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
86 /*! \brief
87 * Launch asynchronously the nonbonded force calculations.
89 * Also launches the initial pruning of a fresh list after search.
91 * The local and non-local interaction calculations are launched in two
92 * separate streams. If there is no work (i.e. empty pair list), the
93 * force kernel launch is omitted.
96 GPU_FUNC_QUALIFIER
97 void gpu_launch_kernel(gmx_nbnxn_gpu_t gmx_unused *nb,
98 const gmx::StepWorkload gmx_unused &stepWork,
99 InteractionLocality gmx_unused iloc) GPU_FUNC_TERM;
101 /*! \brief
102 * Launch asynchronously the nonbonded prune-only kernel.
104 * The local and non-local list pruning are launched in their separate streams.
106 * Notes for future scheduling tuning:
107 * Currently we schedule the dynamic pruning between two MD steps *after* both local and
108 * nonlocal force D2H transfers completed. We could launch already after the cpyback
109 * is launched, but we want to avoid prune kernels (especially in the non-local
110 * high prio-stream) competing with nonbonded work.
112 * However, this is not ideal as this schedule does not expose the available
113 * concurrency. The dynamic pruning kernel:
114 * - should be allowed to overlap with any task other than force compute, including
115 * transfers (F D2H and the next step's x H2D as well as force clearing).
116 * - we'd prefer to avoid competition with non-bonded force kernels belonging
117 * to the same rank and ideally other ranks too.
119 * In the most general case, the former would require scheduling pruning in a separate
120 * stream and adding additional event sync points to ensure that force kernels read
121 * consistent pair list data. This would lead to some overhead (due to extra
122 * cudaStreamWaitEvent calls, 3-5 us/call) which we might be able to live with.
123 * The gains from additional overlap might not be significant as long as
124 * update+constraints anyway takes longer than pruning, but there will still
125 * be use-cases where more overlap may help (e.g. multiple ranks per GPU,
126 * no/hbonds only constraints).
127 * The above second point is harder to address given that multiple ranks will often
128 * share a GPU. Ranks that complete their nonbondeds sooner can schedule pruning earlier
129 * and without a third priority level it is difficult to avoid some interference of
130 * prune kernels with force tasks (in particular preemption of low-prio local force task).
132 * \param [inout] nb GPU nonbonded data.
133 * \param [in] iloc Interaction locality flag.
134 * \param [in] numParts Number of parts the pair list is split into in the rolling kernel.
136 GPU_FUNC_QUALIFIER
137 void gpu_launch_kernel_pruneonly(gmx_nbnxn_gpu_t gmx_unused *nb,
138 InteractionLocality gmx_unused iloc,
139 int gmx_unused numParts) GPU_FUNC_TERM;
141 /*! \brief
142 * Launch asynchronously the download of short-range forces from the GPU
143 * (and energies/shift forces if required).
145 GPU_FUNC_QUALIFIER
146 void gpu_launch_cpyback(gmx_nbnxn_gpu_t gmx_unused *nb,
147 nbnxn_atomdata_t gmx_unused *nbatom,
148 const gmx::StepWorkload gmx_unused &stepWork,
149 AtomLocality gmx_unused aloc,
150 bool gmx_unused copyBackNbForce) GPU_FUNC_TERM;
152 /*! \brief Attempts to complete nonbonded GPU task.
154 * This function attempts to complete the nonbonded task (both GPU and CPU auxiliary work).
155 * Success, i.e. that the tasks completed and results are ready to be consumed, is signaled
156 * by the return value (always true if blocking wait mode requested).
158 * The \p completionKind parameter controls whether the behavior is non-blocking
159 * (achieved by passing GpuTaskCompletion::Check) or blocking wait until the results
160 * are ready (when GpuTaskCompletion::Wait is passed).
161 * As the "Check" mode the function will return immediately if the GPU stream
162 * still contain tasks that have not completed, it allows more flexible overlapping
163 * of work on the CPU with GPU execution.
165 * Note that it is only safe to use the results, and to continue to the next MD
166 * step when this function has returned true which indicates successful completion of
167 * - All nonbonded GPU tasks: both compute and device transfer(s)
168 * - auxiliary tasks: updating the internal module state (timing accumulation, list pruning states) and
169 * - internal staging reduction of (\p fshift, \p e_el, \p e_lj).
171 * In GpuTaskCompletion::Check mode this function does the timing and keeps correct count
172 * for the nonbonded task (incrementing only once per taks), in the GpuTaskCompletion::Wait mode
173 * timing is expected to be done in the caller.
175 * TODO: improve the handling of outputs e.g. by ensuring that this function explcitly returns the
176 * force buffer (instead of that being passed only to nbnxn_gpu_launch_cpyback()) and by returning
177 * the energy and Fshift contributions for some external/centralized reduction.
179 * \param[in] nb The nonbonded data GPU structure
180 * \param[in] stepWork Step schedule flags
181 * \param[in] aloc Atom locality identifier
182 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
183 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
184 * \param[out] shiftForces Shift forces buffer to accumulate into
185 * \param[in] completionKind Indicates whether nnbonded task completion should only be checked rather than waited for
186 * \param[out] wcycle Pointer to wallcycle data structure
187 * \returns True if the nonbonded tasks associated with \p aloc locality have completed
189 GPU_FUNC_QUALIFIER
190 bool gpu_try_finish_task(gmx_nbnxn_gpu_t gmx_unused *nb,
191 const gmx::StepWorkload gmx_unused &stepWork,
192 AtomLocality gmx_unused aloc,
193 real gmx_unused *e_lj,
194 real gmx_unused *e_el,
195 gmx::ArrayRef<gmx::RVec> gmx_unused shiftForces,
196 GpuTaskCompletion gmx_unused completionKind,
197 gmx_wallcycle gmx_unused *wcycle) GPU_FUNC_TERM_WITH_RETURN(false);
199 /*! \brief Completes the nonbonded GPU task blocking until GPU tasks and data
200 * transfers to finish.
202 * Also does timing accounting and reduction of the internal staging buffers.
203 * As this is called at the end of the step, it also resets the pair list and
204 * pruning flags.
206 * \param[in] nb The nonbonded data GPU structure
207 * \param[in] stepWork Step schedule flags
208 * \param[in] aloc Atom locality identifier
209 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
210 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
211 * \param[out] shiftForces Shift forces buffer to accumulate into
213 GPU_FUNC_QUALIFIER
214 float gpu_wait_finish_task(gmx_nbnxn_gpu_t gmx_unused *nb,
215 const gmx::StepWorkload gmx_unused &stepWork,
216 AtomLocality gmx_unused aloc,
217 real gmx_unused *e_lj,
218 real gmx_unused *e_el,
219 gmx::ArrayRef<gmx::RVec> gmx_unused shiftForces,
220 gmx_wallcycle gmx_unused *wcycle) GPU_FUNC_TERM_WITH_RETURN(0.0);
222 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
223 GPU_FUNC_QUALIFIER
224 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t gmx_unused &ic) GPU_FUNC_TERM_WITH_RETURN(-1);
226 /*! \brief Initialization for X buffer operations on GPU.
227 * Called on the NS step and performs (re-)allocations and memory copies. !*/
228 CUDA_FUNC_QUALIFIER
229 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet gmx_unused &gridSet,
230 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv) CUDA_FUNC_TERM;
232 /*! \brief X buffer operations on GPU: performs conversion from rvec to nb format.
234 * \param[in] grid Grid to be converted.
235 * \param[in] setFillerCoords If the filler coordinates are used.
236 * \param[in,out] gpu_nbv The nonbonded data GPU structure.
237 * \param[in] d_x Device-side coordinates in plain rvec format.
238 * \param[in] locality Copy coordinates for local or non-local atoms.
239 * \param[in] gridId Index of the grid being converted.
240 * \param[in] numColumnsMax Maximum number of columns in the grid.
242 CUDA_FUNC_QUALIFIER
243 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid gmx_unused &grid,
244 bool gmx_unused setFillerCoords,
245 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
246 DeviceBuffer<float> gmx_unused d_x,
247 Nbnxm::AtomLocality gmx_unused locality,
248 int gmx_unused gridId,
249 int gmx_unused numColumnsMax) CUDA_FUNC_TERM;
251 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
252 * \param[in] nb The nonbonded data GPU structure
253 * \param[in] interactionLocality Local or NonLocal sync point
255 CUDA_FUNC_QUALIFIER
256 void nbnxnInsertNonlocalGpuDependency(const gmx_nbnxn_gpu_t gmx_unused *nb,
257 InteractionLocality gmx_unused interactionLocality) CUDA_FUNC_TERM;
259 /*! \brief Set up internal flags that indicate what type of short-range work there is.
261 * As nonbondeds and bondeds share input/output buffers and GPU queues,
262 * both are considered when checking for work in the current domain.
264 * This function is expected to be called every time the work-distribution
265 * can change (i.e. at search/domain decomposition steps).
267 * \param[inout] nb Pointer to the nonbonded GPU data structure
268 * \param[in] gpuBonded Pointer to the GPU bonded data structure
269 * \param[in] iLocality Interaction locality identifier
271 GPU_FUNC_QUALIFIER
272 void setupGpuShortRangeWork(gmx_nbnxn_gpu_t gmx_unused *nb,
273 const gmx::GpuBonded gmx_unused *gpuBonded,
274 Nbnxm::InteractionLocality gmx_unused iLocality) GPU_FUNC_TERM;
276 /*! \brief Returns true if there is GPU short-range work for the given atom locality.
278 * Note that as, unlike nonbonded tasks, bonded tasks are not split into local/nonlocal,
279 * and therefore if there are GPU offloaded bonded interactions, this function will return
280 * true for both local and nonlocal atom range.
282 * \param[inout] nb Pointer to the nonbonded GPU data structure
283 * \param[in] aLocality Atom locality identifier
285 GPU_FUNC_QUALIFIER
286 bool haveGpuShortRangeWork(const gmx_nbnxn_gpu_t gmx_unused *nb,
287 Nbnxm::AtomLocality gmx_unused aLocality) GPU_FUNC_TERM_WITH_RETURN(false);
289 /*! \brief Initialization for F buffer operations on GPU */
290 CUDA_FUNC_QUALIFIER
291 void nbnxn_gpu_init_add_nbat_f_to_f(const int gmx_unused *cell,
292 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
293 int gmx_unused natoms_total) CUDA_FUNC_TERM;
295 /*! \brief Force buffer operations on GPU.
297 * Transforms non-bonded forces into plain rvec format and add all the force components to the total
298 * force buffer
300 * \param[in] atomLocality If the reduction should be performed on local or non-local atoms.
301 * \param[in] totalForcesDevice Device buffer to accumulate resulting force.
302 * \param[in] gpu_nbv The NBNXM GPU data structure.
303 * \param[in] pmeForcesDevice Device buffer with PME forces.
304 * \param[in] pmeForcesReady Event that signals when the PME forces are ready for the reduction.
305 * \param[in] atomStart Index of the first atom to reduce forces for.
306 * \param[in] numAtoms Number of atoms to reduce forces for.
307 * \param[in] useGpuFPmeReduction Whether PME forces should be added.
308 * \param[in] accumulateForce Whether there are usefull data already in the total force buffer.
311 CUDA_FUNC_QUALIFIER
312 void nbnxn_gpu_add_nbat_f_to_f(AtomLocality gmx_unused atomLocality,
313 DeviceBuffer<float> gmx_unused totalForcesDevice,
314 gmx_nbnxn_gpu_t gmx_unused *gpu_nbv,
315 void gmx_unused *pmeForcesDevice,
316 GpuEventSynchronizer gmx_unused *pmeForcesReady,
317 int gmx_unused atomStart,
318 int gmx_unused numAtoms,
319 bool gmx_unused useGpuFPmeReduction,
320 bool gmx_unused accumulateForce) CUDA_FUNC_TERM;
322 /*! \brief Wait for GPU stream to complete */
323 CUDA_FUNC_QUALIFIER
324 void nbnxn_wait_for_gpu_force_reduction(AtomLocality gmx_unused atomLocality,
325 gmx_nbnxn_gpu_t gmx_unused *nb) CUDA_FUNC_TERM;
327 /*! \brief sync CPU thread on coordinate copy to device
328 * \param[in] nb The nonbonded data GPU structure
330 CUDA_FUNC_QUALIFIER
331 void nbnxn_wait_x_on_device(gmx_nbnxn_gpu_t gmx_unused *nb) CUDA_FUNC_TERM;
333 /*! \brief return pointer to event recorded when coordinates have been copied to device
334 * \param[in] nb The nonbonded data GPU structure
336 CUDA_FUNC_QUALIFIER
337 void* nbnxn_get_x_on_device_event(const gmx_nbnxn_gpu_t gmx_unused *nb) CUDA_FUNC_TERM_WITH_RETURN(nullptr);
339 /*! \brief Wait for non-local copy of coordinate buffer from device to host
340 * \param[in] nb The nonbonded data GPU structure
342 CUDA_FUNC_QUALIFIER
343 void nbnxn_wait_nonlocal_x_copy_D2H_done(gmx_nbnxn_gpu_t gmx_unused *nb) CUDA_FUNC_TERM;
345 /*! \brief Ensure local stream waits for non-local stream
346 * \param[in] nb The nonbonded data GPU structure
348 CUDA_FUNC_QUALIFIER
349 void nbnxn_stream_local_wait_for_nonlocal(gmx_nbnxn_gpu_t gmx_unused *nb) CUDA_FUNC_TERM;
351 } // namespace Nbnxm
352 #endif