Make cl_nbparam into a struct
[gromacs.git] / src / gromacs / nbnxm / cuda / nbnxm_cuda_types.h
blobacadca29c5fcf4e17410d471db55e8d0ab134bc8
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2012, The GROMACS development team.
6 * Copyright (c) 2013-2019,2020, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \internal \file
39 * \brief
40 * Data types used internally in the nbnxn_cuda module.
42 * \author Szilárd Páll <pall.szilard@gmail.com>
43 * \ingroup module_nbnxm
46 #ifndef NBNXM_CUDA_TYPES_H
47 #define NBNXM_CUDA_TYPES_H
49 #include "gromacs/gpu_utils/cuda_arch_utils.cuh"
50 #include "gromacs/gpu_utils/cudautils.cuh"
51 #include "gromacs/gpu_utils/devicebuffer.h"
52 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
53 #include "gromacs/gpu_utils/gputraits.cuh"
54 #include "gromacs/mdtypes/interaction_const.h"
55 #include "gromacs/nbnxm/gpu_types_common.h"
56 #include "gromacs/nbnxm/nbnxm.h"
57 #include "gromacs/nbnxm/pairlist.h"
58 #include "gromacs/timing/gpu_timing.h"
59 #include "gromacs/utility/enumerationhelpers.h"
61 /*! \brief Macro definining default for the prune kernel's j4 processing concurrency.
63 * The GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY macro allows compile-time override.
65 #ifndef GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY
66 # define GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY 4
67 #endif
68 /*! \brief Default for the prune kernel's j4 processing concurrency.
70 * Initialized using the #GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY macro which allows compile-time override.
72 const int c_cudaPruneKernelJ4Concurrency = GMX_NBNXN_PRUNE_KERNEL_J4_CONCURRENCY;
74 /* TODO: consider moving this to kernel_utils */
75 /* Convenience defines */
76 /*! \brief cluster size = number of atoms per cluster. */
77 static constexpr int c_clSize = c_nbnxnGpuClusterSize;
79 /* All structs prefixed with "cu_" hold data used in GPU calculations and
80 * are passed to the kernels, except cu_timers_t. */
81 /*! \cond */
82 typedef struct cu_atomdata cu_atomdata_t;
83 /*! \endcond */
86 /** \internal
87 * \brief Staging area for temporary data downloaded from the GPU.
89 * The energies/shift forces get downloaded here first, before getting added
90 * to the CPU-side aggregate values.
92 struct nb_staging_t
94 //! LJ energy
95 float* e_lj = nullptr;
96 //! electrostatic energy
97 float* e_el = nullptr;
98 //! shift forces
99 float3* fshift = nullptr;
102 /** \internal
103 * \brief Nonbonded atom data - both inputs and outputs.
105 struct cu_atomdata
107 //! number of atoms
108 int natoms;
109 //! number of local atoms
110 int natoms_local;
111 //! allocation size for the atom data (xq, f)
112 int nalloc;
114 //! atom coordinates + charges, size natoms
115 DeviceBuffer<float4> xq;
116 //! force output array, size natoms
117 DeviceBuffer<float3> f;
119 //! LJ energy output, size 1
120 DeviceBuffer<float> e_lj;
121 //! Electrostatics energy input, size 1
122 DeviceBuffer<float> e_el;
124 //! shift forces
125 DeviceBuffer<float3> fshift;
127 //! number of atom types
128 int ntypes;
129 //! atom type indices, size natoms
130 DeviceBuffer<int> atom_types;
131 //! sqrt(c6),sqrt(c12) size natoms
132 DeviceBuffer<float2> lj_comb;
134 //! shifts
135 DeviceBuffer<float3> shift_vec;
136 //! true if the shift vector has been uploaded
137 bool bShiftVecUploaded;
140 /** \internal
141 * \brief Pair list data.
143 using cu_plist_t = Nbnxm::gpu_plist;
145 /** \internal
146 * \brief Typedef of actual timer type.
148 typedef struct Nbnxm::gpu_timers_t cu_timers_t;
150 class GpuEventSynchronizer;
152 /*! \internal
153 * \brief Main data structure for CUDA nonbonded force calculations.
155 struct NbnxmGpu
157 /*! \brief GPU device context.
159 * \todo Make it constant reference, once NbnxmGpu is a proper class.
161 const DeviceContext* deviceContext_;
162 /*! \brief true if doing both local/non-local NB work on GPU */
163 bool bUseTwoStreams = false;
164 /*! \brief atom data */
165 cu_atomdata_t* atdat = nullptr;
166 /*! \brief f buf ops cell index mapping */
167 int* cell = nullptr;
168 /*! \brief number of indices in cell buffer */
169 int ncell = 0;
170 /*! \brief number of indices allocated in cell buffer */
171 int ncell_alloc = 0;
172 /*! \brief array of atom indices */
173 int* atomIndices = nullptr;
174 /*! \brief size of atom indices */
175 int atomIndicesSize = 0;
176 /*! \brief size of atom indices allocated in device buffer */
177 int atomIndicesSize_alloc = 0;
178 /*! \brief x buf ops num of atoms */
179 int* cxy_na = nullptr;
180 /*! \brief number of elements in cxy_na */
181 int ncxy_na = 0;
182 /*! \brief number of elements allocated allocated in device buffer */
183 int ncxy_na_alloc = 0;
184 /*! \brief x buf ops cell index mapping */
185 int* cxy_ind = nullptr;
186 /*! \brief number of elements in cxy_ind */
187 int ncxy_ind = 0;
188 /*! \brief number of elements allocated allocated in device buffer */
189 int ncxy_ind_alloc = 0;
190 /*! \brief parameters required for the non-bonded calc. */
191 NBParamGpu* nbparam = nullptr;
192 /*! \brief pair-list data structures (local and non-local) */
193 gmx::EnumerationArray<Nbnxm::InteractionLocality, cu_plist_t*> plist = { { nullptr } };
194 /*! \brief staging area where fshift/energies get downloaded */
195 nb_staging_t nbst;
196 /*! \brief local and non-local GPU streams */
197 gmx::EnumerationArray<Nbnxm::InteractionLocality, const DeviceStream*> deviceStreams;
199 /*! \brief Events used for synchronization */
200 /*! \{ */
201 /*! \brief Event triggered when the non-local non-bonded
202 * kernel is done (and the local transfer can proceed) */
203 cudaEvent_t nonlocal_done = nullptr;
204 /*! \brief Event triggered when the tasks issued in the local
205 * stream that need to precede the non-local force or buffer
206 * operation calculations are done (e.g. f buffer 0-ing, local
207 * x/q H2D, buffer op initialization in local stream that is
208 * required also by nonlocal stream ) */
209 cudaEvent_t misc_ops_and_local_H2D_done = nullptr;
210 /*! \} */
212 /*! \brief True if there is work for the current domain in the
213 * respective locality.
215 * This includes local/nonlocal GPU work, either bonded or
216 * nonbonded, scheduled to be executed in the current
217 * domain. As long as bonded work is not split up into
218 * local/nonlocal, if there is bonded GPU work, both flags
219 * will be true. */
220 gmx::EnumerationArray<Nbnxm::InteractionLocality, bool> haveWork = { { false } };
222 /*! \brief Pointer to event synchronizer triggered when the local
223 * GPU buffer ops / reduction is complete
225 * \note That the synchronizer is managed outside of this module
226 * in StatePropagatorDataGpu.
228 GpuEventSynchronizer* localFReductionDone = nullptr;
230 /*! \brief Event triggered when non-local coordinate buffer
231 * has been copied from device to host. */
232 GpuEventSynchronizer* xNonLocalCopyD2HDone = nullptr;
234 /* NOTE: With current CUDA versions (<=5.0) timing doesn't work with multiple
235 * concurrent streams, so we won't time if both l/nl work is done on GPUs.
236 * Timer init/uninit is still done even with timing off so only the condition
237 * setting bDoTime needs to be change if this CUDA "feature" gets fixed. */
238 /*! \brief True if event-based timing is enabled. */
239 bool bDoTime = false;
240 /*! \brief CUDA event-based timers. */
241 cu_timers_t* timers = nullptr;
242 /*! \brief Timing data. TODO: deprecate this and query timers for accumulated data instead */
243 gmx_wallclock_gpu_nbnxn_t* timings = nullptr;
246 #endif /* NBNXN_CUDA_TYPES_H */