Add function to get residue start and end
[gromacs.git] / src / gromacs / nbnxm / nbnxm_gpu.h
blob2370fc836bf352f5f31e984556ae6848e6ae7f7b
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017 by the GROMACS development team.
5 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \libinternal \file
37 * \brief Declare interface for GPU execution for NBNXN module
39 * \author Szilard Pall <pall.szilard@gmail.com>
40 * \author Mark Abraham <mark.j.abraham@gmail.com>
41 * \ingroup module_nbnxm
44 #ifndef GMX_NBNXM_NBNXM_GPU_H
45 #define GMX_NBNXM_NBNXM_GPU_H
47 #include "gromacs/gpu_utils/gpu_macros.h"
48 #include "gromacs/math/vectypes.h"
49 #include "gromacs/mdtypes/locality.h"
50 #include "gromacs/nbnxm/atomdata.h"
51 #include "gromacs/utility/basedefinitions.h"
52 #include "gromacs/utility/real.h"
54 struct interaction_const_t;
55 struct nbnxn_atomdata_t;
56 struct gmx_wallcycle;
57 enum class GpuTaskCompletion;
59 namespace gmx
61 class GpuBonded;
62 class StepWorkload;
63 } // namespace gmx
65 namespace Nbnxm
68 class Grid;
70 /*! \brief
71 * Launch asynchronously the xq buffer host to device copy.
73 * The nonlocal copy is skipped if there is no dependent work to do,
74 * neither non-local nonbonded interactions nor bonded GPU work.
76 * \param [in] nb GPU nonbonded data.
77 * \param [in] nbdata Host-side atom data structure.
78 * \param [in] aloc Atom locality flag.
80 GPU_FUNC_QUALIFIER
81 void gpu_copy_xq_to_gpu(NbnxmGpu gmx_unused* nb,
82 const struct nbnxn_atomdata_t gmx_unused* nbdata,
83 gmx::AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
85 /*! \brief
86 * Launch asynchronously the nonbonded force calculations.
88 * Also launches the initial pruning of a fresh list after search.
90 * The local and non-local interaction calculations are launched in two
91 * separate streams. If there is no work (i.e. empty pair list), the
92 * force kernel launch is omitted.
95 GPU_FUNC_QUALIFIER
96 void gpu_launch_kernel(NbnxmGpu gmx_unused* nb,
97 const gmx::StepWorkload gmx_unused& stepWork,
98 gmx::InteractionLocality gmx_unused iloc) GPU_FUNC_TERM;
100 /*! \brief
101 * Launch asynchronously the nonbonded prune-only kernel.
103 * The local and non-local list pruning are launched in their separate streams.
105 * Notes for future scheduling tuning:
106 * Currently we schedule the dynamic pruning between two MD steps *after* both local and
107 * nonlocal force D2H transfers completed. We could launch already after the cpyback
108 * is launched, but we want to avoid prune kernels (especially in the non-local
109 * high prio-stream) competing with nonbonded work.
111 * However, this is not ideal as this schedule does not expose the available
112 * concurrency. The dynamic pruning kernel:
113 * - should be allowed to overlap with any task other than force compute, including
114 * transfers (F D2H and the next step's x H2D as well as force clearing).
115 * - we'd prefer to avoid competition with non-bonded force kernels belonging
116 * to the same rank and ideally other ranks too.
118 * In the most general case, the former would require scheduling pruning in a separate
119 * stream and adding additional event sync points to ensure that force kernels read
120 * consistent pair list data. This would lead to some overhead (due to extra
121 * cudaStreamWaitEvent calls, 3-5 us/call) which we might be able to live with.
122 * The gains from additional overlap might not be significant as long as
123 * update+constraints anyway takes longer than pruning, but there will still
124 * be use-cases where more overlap may help (e.g. multiple ranks per GPU,
125 * no/hbonds only constraints).
126 * The above second point is harder to address given that multiple ranks will often
127 * share a GPU. Ranks that complete their nonbondeds sooner can schedule pruning earlier
128 * and without a third priority level it is difficult to avoid some interference of
129 * prune kernels with force tasks (in particular preemption of low-prio local force task).
131 * \param [inout] nb GPU nonbonded data.
132 * \param [in] iloc Interaction locality flag.
133 * \param [in] numParts Number of parts the pair list is split into in the rolling kernel.
135 GPU_FUNC_QUALIFIER
136 void gpu_launch_kernel_pruneonly(NbnxmGpu gmx_unused* nb,
137 gmx::InteractionLocality gmx_unused iloc,
138 int gmx_unused numParts) GPU_FUNC_TERM;
140 /*! \brief
141 * Launch asynchronously the download of short-range forces from the GPU
142 * (and energies/shift forces if required).
144 GPU_FUNC_QUALIFIER
145 void gpu_launch_cpyback(NbnxmGpu gmx_unused* nb,
146 nbnxn_atomdata_t gmx_unused* nbatom,
147 const gmx::StepWorkload gmx_unused& stepWork,
148 gmx::AtomLocality gmx_unused aloc) GPU_FUNC_TERM;
150 /*! \brief Attempts to complete nonbonded GPU task.
152 * This function attempts to complete the nonbonded task (both GPU and CPU auxiliary work).
153 * Success, i.e. that the tasks completed and results are ready to be consumed, is signaled
154 * by the return value (always true if blocking wait mode requested).
156 * The \p completionKind parameter controls whether the behavior is non-blocking
157 * (achieved by passing GpuTaskCompletion::Check) or blocking wait until the results
158 * are ready (when GpuTaskCompletion::Wait is passed).
159 * As the "Check" mode the function will return immediately if the GPU stream
160 * still contain tasks that have not completed, it allows more flexible overlapping
161 * of work on the CPU with GPU execution.
163 * Note that it is only safe to use the results, and to continue to the next MD
164 * step when this function has returned true which indicates successful completion of
165 * - All nonbonded GPU tasks: both compute and device transfer(s)
166 * - auxiliary tasks: updating the internal module state (timing accumulation, list pruning states) and
167 * - internal staging reduction of (\p fshift, \p e_el, \p e_lj).
169 * In GpuTaskCompletion::Check mode this function does the timing and keeps correct count
170 * for the nonbonded task (incrementing only once per taks), in the GpuTaskCompletion::Wait mode
171 * timing is expected to be done in the caller.
173 * TODO: improve the handling of outputs e.g. by ensuring that this function explcitly returns the
174 * force buffer (instead of that being passed only to nbnxn_gpu_launch_cpyback()) and by returning
175 * the energy and Fshift contributions for some external/centralized reduction.
177 * \param[in] nb The nonbonded data GPU structure
178 * \param[in] stepWork Step schedule flags
179 * \param[in] aloc Atom locality identifier
180 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
181 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
182 * \param[out] shiftForces Shift forces buffer to accumulate into
183 * \param[in] completionKind Indicates whether nnbonded task completion should only be checked rather than waited for
184 * \param[out] wcycle Pointer to wallcycle data structure
185 * \returns True if the nonbonded tasks associated with \p aloc locality have completed
187 GPU_FUNC_QUALIFIER
188 bool gpu_try_finish_task(NbnxmGpu gmx_unused* nb,
189 const gmx::StepWorkload gmx_unused& stepWork,
190 gmx::AtomLocality gmx_unused aloc,
191 real gmx_unused* e_lj,
192 real gmx_unused* e_el,
193 gmx::ArrayRef<gmx::RVec> gmx_unused shiftForces,
194 GpuTaskCompletion gmx_unused completionKind,
195 gmx_wallcycle gmx_unused* wcycle) GPU_FUNC_TERM_WITH_RETURN(false);
197 /*! \brief Completes the nonbonded GPU task blocking until GPU tasks and data
198 * transfers to finish.
200 * Also does timing accounting and reduction of the internal staging buffers.
201 * As this is called at the end of the step, it also resets the pair list and
202 * pruning flags.
204 * \param[in] nb The nonbonded data GPU structure
205 * \param[in] stepWork Step schedule flags
206 * \param[in] aloc Atom locality identifier
207 * \param[out] e_lj Pointer to the LJ energy output to accumulate into
208 * \param[out] e_el Pointer to the electrostatics energy output to accumulate into
209 * \param[out] shiftForces Shift forces buffer to accumulate into
210 * \param[out] wcycle Pointer to wallcycle data structure */
211 GPU_FUNC_QUALIFIER
212 float gpu_wait_finish_task(NbnxmGpu gmx_unused* nb,
213 const gmx::StepWorkload gmx_unused& stepWork,
214 gmx::AtomLocality gmx_unused aloc,
215 real gmx_unused* e_lj,
216 real gmx_unused* e_el,
217 gmx::ArrayRef<gmx::RVec> gmx_unused shiftForces,
218 gmx_wallcycle gmx_unused* wcycle) GPU_FUNC_TERM_WITH_RETURN(0.0);
220 /*! \brief Selects the Ewald kernel type, analytical or tabulated, single or twin cut-off. */
221 GPU_FUNC_QUALIFIER
222 int nbnxn_gpu_pick_ewald_kernel_type(const interaction_const_t gmx_unused& ic)
223 GPU_FUNC_TERM_WITH_RETURN(-1);
225 /*! \brief Initialization for X buffer operations on GPU.
226 * Called on the NS step and performs (re-)allocations and memory copies. !*/
227 CUDA_FUNC_QUALIFIER
228 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet gmx_unused& gridSet,
229 NbnxmGpu gmx_unused* gpu_nbv) CUDA_FUNC_TERM;
231 /*! \brief X buffer operations on GPU: performs conversion from rvec to nb format.
233 * \param[in] grid Grid to be converted.
234 * \param[in] setFillerCoords If the filler coordinates are used.
235 * \param[in,out] gpu_nbv The nonbonded data GPU structure.
236 * \param[in] d_x Device-side coordinates in plain rvec format.
237 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in
238 * the device memory. \param[in] locality Copy coordinates for local or non-local atoms.
239 * \param[in] gridId Index of the grid being converted.
240 * \param[in] numColumnsMax Maximum number of columns in the grid.
242 CUDA_FUNC_QUALIFIER
243 void nbnxn_gpu_x_to_nbat_x(const Nbnxm::Grid gmx_unused& grid,
244 bool gmx_unused setFillerCoords,
245 NbnxmGpu gmx_unused* gpu_nbv,
246 DeviceBuffer<gmx::RVec> gmx_unused d_x,
247 GpuEventSynchronizer gmx_unused* xReadyOnDevice,
248 gmx::AtomLocality gmx_unused locality,
249 int gmx_unused gridId,
250 int gmx_unused numColumnsMax) CUDA_FUNC_TERM;
252 /*! \brief Sync the nonlocal stream with dependent tasks in the local queue.
253 * \param[in] nb The nonbonded data GPU structure
254 * \param[in] interactionLocality Local or NonLocal sync point
256 CUDA_FUNC_QUALIFIER
257 void nbnxnInsertNonlocalGpuDependency(const NbnxmGpu gmx_unused* nb,
258 gmx::InteractionLocality gmx_unused interactionLocality) CUDA_FUNC_TERM;
260 /*! \brief Set up internal flags that indicate what type of short-range work there is.
262 * As nonbondeds and bondeds share input/output buffers and GPU queues,
263 * both are considered when checking for work in the current domain.
265 * This function is expected to be called every time the work-distribution
266 * can change (i.e. at search/domain decomposition steps).
268 * \param[inout] nb Pointer to the nonbonded GPU data structure
269 * \param[in] gpuBonded Pointer to the GPU bonded data structure
270 * \param[in] iLocality Interaction locality identifier
272 GPU_FUNC_QUALIFIER
273 void setupGpuShortRangeWork(NbnxmGpu gmx_unused* nb,
274 const gmx::GpuBonded gmx_unused* gpuBonded,
275 gmx::InteractionLocality gmx_unused iLocality) GPU_FUNC_TERM;
277 /*! \brief Returns true if there is GPU short-range work for the given atom locality.
279 * Note that as, unlike nonbonded tasks, bonded tasks are not split into local/nonlocal,
280 * and therefore if there are GPU offloaded bonded interactions, this function will return
281 * true for both local and nonlocal atom range.
283 * \param[inout] nb Pointer to the nonbonded GPU data structure
284 * \param[in] aLocality Atom locality identifier
286 GPU_FUNC_QUALIFIER
287 bool haveGpuShortRangeWork(const NbnxmGpu gmx_unused* nb, gmx::AtomLocality gmx_unused aLocality)
288 GPU_FUNC_TERM_WITH_RETURN(false);
290 /*! \brief Initialization for F buffer operations on GPU */
291 CUDA_FUNC_QUALIFIER
292 void nbnxn_gpu_init_add_nbat_f_to_f(const int gmx_unused* cell,
293 NbnxmGpu gmx_unused* gpu_nbv,
294 int gmx_unused natoms_total,
295 GpuEventSynchronizer gmx_unused* localReductionDone) CUDA_FUNC_TERM;
297 /*! \brief Force buffer operations on GPU.
299 * Transforms non-bonded forces into plain rvec format and add all the force components to the total
300 * force buffer
302 * \param[in] atomLocality If the reduction should be performed on local or non-local atoms.
303 * \param[in] totalForcesDevice Device buffer to accumulate resulting force.
304 * \param[in] gpu_nbv The NBNXM GPU data structure.
305 * \param[in] pmeForcesDevice Device buffer with PME forces.
306 * \param[in] dependencyList List of synchronizers that represent the dependencies the reduction task needs to sync on.
307 * \param[in] atomStart Index of the first atom to reduce forces for.
308 * \param[in] numAtoms Number of atoms to reduce forces for.
309 * \param[in] useGpuFPmeReduction Whether PME forces should be added.
310 * \param[in] accumulateForce Whether there are usefull data already in the total force buffer.
313 CUDA_FUNC_QUALIFIER
314 void nbnxn_gpu_add_nbat_f_to_f(gmx::AtomLocality gmx_unused atomLocality,
315 DeviceBuffer<gmx::RVec> gmx_unused totalForcesDevice,
316 NbnxmGpu gmx_unused* gpu_nbv,
317 void gmx_unused* pmeForcesDevice,
318 gmx::ArrayRef<GpuEventSynchronizer* const> gmx_unused dependencyList,
319 int gmx_unused atomStart,
320 int gmx_unused numAtoms,
321 bool gmx_unused useGpuFPmeReduction,
322 bool gmx_unused accumulateForce) CUDA_FUNC_TERM;
324 /*! \brief sync CPU thread on coordinate copy to device
325 * \param[in] nb The nonbonded data GPU structure
327 CUDA_FUNC_QUALIFIER
328 void nbnxn_wait_x_on_device(NbnxmGpu gmx_unused* nb) CUDA_FUNC_TERM;
330 } // namespace Nbnxm
331 #endif