Remove BufferOpsUseGpu enum
[gromacs.git] / src / gromacs / nbnxm / nbnxm.h
blob3cd2a38ee4fdd01f96822218697403c08037072f
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017 by the GROMACS development team.
5 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 // FIXME: remove the "__" prefix in front of the group def when we move the
38 // nonbonded code into separate dir.
40 /*! \libinternal \defgroup __module_nbnxm Short-range non-bonded interaction module
41 * \ingroup group_mdrun
43 * \brief Computes forces and energies for short-range pair-interactions
44 * based on the Verlet algorithm. The algorithm uses pair-lists generated
45 * at fixed intervals as well as various flavors of pair interaction kernels
46 * implemented for a wide range of CPU and GPU architectures.
48 * The module includes support for flavors of Coulomb and Lennard-Jones interaction
49 * treatment implemented for a large range of SIMD instruction sets for CPU
50 * architectures as well as in CUDA and OpenCL for GPU architectures.
51 * Additionally there is a reference CPU non-SIMD and a reference CPU
52 * for GPU pair-list setup interaction kernel.
54 * The implementation of the kernels is based on the cluster non-bonded algorithm
55 * which in the code is referred to as the NxM algorithms ("nbnxm_" prefix);
56 * for details of the algorithm see DOI:10.1016/j.cpc.2013.06.003.
58 * Algorithmically, the non-bonded computation has two different modes:
59 * A "classical" mode: generate a list every nstlist steps containing at least
60 * all atom pairs up to a distance of rlistOuter and compute pair interactions
61 * for all pairs that are within the interaction cut-off.
62 * A "dynamic pruning" mode: generate an "outer-list" up to cut-off rlistOuter
63 * every nstlist steps and prune the outer-list using a cut-off of rlistInner
64 * every nstlistPrune steps to obtain a, smaller, "inner-list". This
65 * results in fewer interaction computations and allows for a larger nstlist.
66 * On a GPU, this dynamic pruning is performed in a rolling fashion, pruning
67 * only a sub-part of the list each (second) step. This way it can often
68 * overlap with integration and constraints on the CPU.
69 * Currently a simple heuristic determines which mode will be used.
71 * TODO: add a summary list and brief descriptions of the different submodules:
72 * search, CPU kernels, GPU glue code + kernels.
74 * \author Berk Hess <hess@kth.se>
75 * \author Szilárd Páll <pall.szilard@gmail.com>
76 * \author Mark Abraham <mark.j.abraham@gmail.com>
77 * \author Anca Hamuraru <anca@streamcomputing.eu>
78 * \author Teemu Virolainen <teemu@streamcomputing.eu>
79 * \author Dimitrios Karkoulis <dimitris.karkoulis@gmail.com>
81 * TODO: add more authors!
84 /*! \libinternal
85 * \defgroup module_nbnxm Non-bonded pair interactions
86 * \ingroup group_mdrun
87 * \brief
88 * Implements non-bonded pair interaction functionality for NxM atom clusters.
90 * This module provides methods to, very efficiently, compute non-bonded
91 * pair interactions on CPUs as well as accelerators. It also provides
92 * a method to construct the NxM atom-cluster pair-list required for
93 * computing these non-bonded iteractions.
96 /*! \libinternal \file
98 * \brief This file contains the public interface of the nbnxm module
99 * that implements the NxM atom cluster non-bonded algorithm to efficiently
100 * compute pair forces.
103 * \author Berk Hess <hess@kth.se>
104 * \author Szilárd Páll <pall.szilard@gmail.com>
106 * \inlibraryapi
107 * \ingroup module_nbnxm
111 #ifndef GMX_NBNXM_NBNXM_H
112 #define GMX_NBNXM_NBNXM_H
114 #include <memory>
116 #include "gromacs/gpu_utils/devicebuffer_datatype.h"
117 #include "gromacs/math/vectypes.h"
118 #include "gromacs/mdtypes/locality.h"
119 #include "gromacs/utility/arrayref.h"
120 #include "gromacs/utility/enumerationhelpers.h"
121 #include "gromacs/utility/real.h"
123 struct gmx_device_info_t;
124 struct gmx_domdec_zones_t;
125 struct gmx_enerdata_t;
126 struct gmx_hw_info_t;
127 struct gmx_mtop_t;
128 struct NbnxmGpu;
129 struct gmx_wallcycle;
130 struct interaction_const_t;
131 struct nbnxn_atomdata_t;
132 struct nonbonded_verlet_t;
133 class PairSearch;
134 class PairlistSets;
135 struct t_commrec;
136 struct t_lambda;
137 struct t_mdatoms;
138 struct t_nrnb;
139 struct t_forcerec;
140 struct t_inputrec;
142 class GpuEventSynchronizer;
144 namespace gmx
146 class ForceWithShiftForces;
147 class GpuBonded;
148 template<typename>
149 class ListOfLists;
150 class MDLogger;
151 template<typename>
152 class Range;
153 class StepWorkload;
154 class UpdateGroupsCog;
155 } // namespace gmx
157 namespace Nbnxm
159 enum class KernelType;
162 namespace Nbnxm
165 /*! \brief Nonbonded NxN kernel types: plain C, CPU SIMD, GPU, GPU emulation */
166 enum class KernelType : int
168 NotSet = 0,
169 Cpu4x4_PlainC,
170 Cpu4xN_Simd_4xN,
171 Cpu4xN_Simd_2xNN,
172 Gpu8x8x8,
173 Cpu8x8x8_PlainC,
174 Count
177 /*! \brief Ewald exclusion types */
178 enum class EwaldExclusionType : int
180 NotSet = 0,
181 Table,
182 Analytical,
183 DecidedByGpuModule
186 /* \brief The non-bonded setup, also affects the pairlist construction kernel */
187 struct KernelSetup
189 //! The non-bonded type, also affects the pairlist construction kernel
190 KernelType kernelType = KernelType::NotSet;
191 //! Ewald exclusion computation handling type, currently only used for CPU
192 EwaldExclusionType ewaldExclusionType = EwaldExclusionType::NotSet;
195 /*! \brief Return a string identifying the kernel type.
197 * \param [in] kernelType nonbonded kernel type, takes values from the nbnxn_kernel_type enum
198 * \returns a string identifying the kernel corresponding to the type passed as argument
200 const char* lookup_kernel_name(Nbnxm::KernelType kernelType);
202 } // namespace Nbnxm
204 /*! \brief Flag to tell the nonbonded kernels whether to clear the force output buffers */
205 enum
207 enbvClearFNo,
208 enbvClearFYes
211 /*! \libinternal
212 * \brief Top-level non-bonded data structure for the Verlet-type cut-off scheme. */
213 struct nonbonded_verlet_t
215 public:
216 //! Constructs an object from its components
217 nonbonded_verlet_t(std::unique_ptr<PairlistSets> pairlistSets,
218 std::unique_ptr<PairSearch> pairSearch,
219 std::unique_ptr<nbnxn_atomdata_t> nbat,
220 const Nbnxm::KernelSetup& kernelSetup,
221 NbnxmGpu* gpu_nbv,
222 gmx_wallcycle* wcycle);
224 ~nonbonded_verlet_t();
226 //! Returns whether a GPU is use for the non-bonded calculations
227 bool useGpu() const { return kernelSetup_.kernelType == Nbnxm::KernelType::Gpu8x8x8; }
229 //! Returns whether a GPU is emulated for the non-bonded calculations
230 bool emulateGpu() const
232 return kernelSetup_.kernelType == Nbnxm::KernelType::Cpu8x8x8_PlainC;
235 //! Return whether the pairlist is of simple, CPU type
236 bool pairlistIsSimple() const { return !useGpu() && !emulateGpu(); }
238 //! Initialize the pair list sets, TODO this should be private
239 void initPairlistSets(bool haveMultipleDomains);
241 //! Returns the order of the local atoms on the grid
242 gmx::ArrayRef<const int> getLocalAtomOrder() const;
244 //! Sets the order of the local atoms to the order grid atom ordering
245 void setLocalAtomOrder();
247 //! Returns the index position of the atoms on the search grid
248 gmx::ArrayRef<const int> getGridIndices() const;
250 /*! \brief Constructs the pairlist for the given locality
252 * When there are no non-self exclusions, \p exclusions can be empty.
253 * Otherwise the number of lists in \p exclusions should match the number
254 * of atoms when not using DD, or the total number of atoms in the i-zones
255 * when using DD.
257 * \param[in] iLocality The interaction locality: local or non-local
258 * \param[in] exclusions Lists of exclusions for every atom.
259 * \param[in] step Used to set the list creation step
260 * \param[in,out] nrnb Flop accounting struct, can be nullptr
262 void constructPairlist(gmx::InteractionLocality iLocality,
263 const gmx::ListOfLists<int>& exclusions,
264 int64_t step,
265 t_nrnb* nrnb);
267 //! Updates all the atom properties in Nbnxm
268 void setAtomProperties(const t_mdatoms& mdatoms, gmx::ArrayRef<const int> atomInfo);
270 /*!\brief Convert the coordinates to NBNXM format for the given locality.
272 * The API function for the transformation of the coordinates from one layout to another.
274 * \param[in] locality Whether coordinates for local or non-local atoms should be
275 * transformed. \param[in] fillLocal If the coordinates for filler particles should be
276 * zeroed. \param[in] coordinates Coordinates in plain rvec format to be transformed.
278 void convertCoordinates(gmx::AtomLocality locality, bool fillLocal, gmx::ArrayRef<const gmx::RVec> coordinates);
280 /*!\brief Convert the coordinates to NBNXM format on the GPU for the given locality
282 * The API function for the transformation of the coordinates from one layout to another in the GPU memory.
284 * \param[in] locality Whether coordinates for local or non-local atoms should be transformed.
285 * \param[in] fillLocal If the coordinates for filler particles should be zeroed.
286 * \param[in] d_x GPU coordinates buffer in plain rvec format to be transformed.
287 * \param[in] xReadyOnDevice Event synchronizer indicating that the coordinates are ready in the device memory.
289 void convertCoordinatesGpu(gmx::AtomLocality locality,
290 bool fillLocal,
291 DeviceBuffer<float> d_x,
292 GpuEventSynchronizer* xReadyOnDevice);
294 //! Init for GPU version of setup coordinates in Nbnxm
295 void atomdata_init_copy_x_to_nbat_x_gpu();
297 //! Sync the nonlocal GPU stream with dependent tasks in the local queue.
298 void insertNonlocalGpuDependency(gmx::InteractionLocality interactionLocality);
300 //! Returns a reference to the pairlist sets
301 const PairlistSets& pairlistSets() const { return *pairlistSets_; }
303 //! Returns whether step is a dynamic list pruning step, for CPU lists
304 bool isDynamicPruningStepCpu(int64_t step) const;
306 //! Returns whether step is a dynamic list pruning step, for GPU lists
307 bool isDynamicPruningStepGpu(int64_t step) const;
309 //! Dispatches the dynamic pruning kernel for the given locality, for CPU lists
310 void dispatchPruneKernelCpu(gmx::InteractionLocality iLocality, const rvec* shift_vec);
312 //! Dispatches the dynamic pruning kernel for GPU lists
313 void dispatchPruneKernelGpu(int64_t step);
315 //! \brief Executes the non-bonded kernel of the GPU or launches it on the GPU
316 void dispatchNonbondedKernel(gmx::InteractionLocality iLocality,
317 const interaction_const_t& ic,
318 const gmx::StepWorkload& stepWork,
319 int clearF,
320 const t_forcerec& fr,
321 gmx_enerdata_t* enerd,
322 t_nrnb* nrnb);
324 //! Executes the non-bonded free-energy kernel, always runs on the CPU
325 void dispatchFreeEnergyKernel(gmx::InteractionLocality iLocality,
326 const t_forcerec* fr,
327 rvec x[],
328 gmx::ForceWithShiftForces* forceWithShiftForces,
329 const t_mdatoms& mdatoms,
330 t_lambda* fepvals,
331 real* lambda,
332 gmx_enerdata_t* enerd,
333 const gmx::StepWorkload& stepWork,
334 t_nrnb* nrnb);
336 /*! \brief Add the forces stored in nbat to f, zeros the forces in nbat
337 * \param [in] locality Local or non-local
338 * \param [inout] force Force to be added to
340 void atomdata_add_nbat_f_to_f(gmx::AtomLocality locality, gmx::ArrayRef<gmx::RVec> force);
342 /*! \brief Add the forces stored in nbat to total force using GPU buffer opse
344 * \param [in] locality Local or non-local
345 * \param [in,out] totalForcesDevice Force to be added to
346 * \param [in] forcesPmeDevice Device buffer with PME forces
347 * \param[in] dependencyList List of synchronizers that represent the dependencies the reduction task needs to sync on.
348 * \param [in] useGpuFPmeReduction Whether PME forces should be added
349 * \param [in] accumulateForce If the total force buffer already contains data
351 void atomdata_add_nbat_f_to_f_gpu(gmx::AtomLocality locality,
352 DeviceBuffer<float> totalForcesDevice,
353 void* forcesPmeDevice,
354 gmx::ArrayRef<GpuEventSynchronizer* const> dependencyList,
355 bool useGpuFPmeReduction,
356 bool accumulateForce);
358 /*! \brief Outer body of function to perform initialization for F buffer operations on GPU.
360 * \param localReductionDone Pointer to an event synchronizer that marks the completion of the local f buffer ops kernel.
362 void atomdata_init_add_nbat_f_to_f_gpu(GpuEventSynchronizer* localReductionDone);
364 /*! \brief return GPU pointer to f in rvec format */
365 void* get_gpu_frvec();
367 //! Return the kernel setup
368 const Nbnxm::KernelSetup& kernelSetup() const { return kernelSetup_; }
370 //! Returns the outer radius for the pair list
371 real pairlistInnerRadius() const;
373 //! Returns the outer radius for the pair list
374 real pairlistOuterRadius() const;
376 //! Changes the pair-list outer and inner radius
377 void changePairlistRadii(real rlistOuter, real rlistInner);
379 //! Set up internal flags that indicate what type of short-range work there is.
380 void setupGpuShortRangeWork(const gmx::GpuBonded* gpuBonded, gmx::InteractionLocality iLocality);
382 // TODO: Make all data members private
383 public:
384 //! All data related to the pair lists
385 std::unique_ptr<PairlistSets> pairlistSets_;
386 //! Working data for constructing the pairlists
387 std::unique_ptr<PairSearch> pairSearch_;
388 //! Atom data
389 std::unique_ptr<nbnxn_atomdata_t> nbat;
391 private:
392 //! The non-bonded setup, also affects the pairlist construction kernel
393 Nbnxm::KernelSetup kernelSetup_;
394 //! \brief Pointer to wallcycle structure.
395 gmx_wallcycle* wcycle_;
397 public:
398 //! GPU Nbnxm data, only used with a physical GPU (TODO: use unique_ptr)
399 NbnxmGpu* gpu_nbv;
402 namespace Nbnxm
405 /*! \brief Creates an Nbnxm object */
406 std::unique_ptr<nonbonded_verlet_t> init_nb_verlet(const gmx::MDLogger& mdlog,
407 gmx_bool bFEP_NonBonded,
408 const t_inputrec* ir,
409 const t_forcerec* fr,
410 const t_commrec* cr,
411 const gmx_hw_info_t& hardwareInfo,
412 const gmx_device_info_t* deviceInfo,
413 const gmx_mtop_t* mtop,
414 matrix box,
415 gmx_wallcycle* wcycle);
417 } // namespace Nbnxm
419 /*! \brief Put the atoms on the pair search grid.
421 * Only atoms with indices wihtin \p atomRange in x are put on the grid.
422 * When \p updateGroupsCog != nullptr, atoms are put on the grid
423 * based on the center of geometry of the group they belong to.
424 * Atoms or COGs of groups should be within the bounding box provided,
425 * this is checked in debug builds when not using update groups.
426 * The atom density is used to determine the grid size when \p gridIndex = 0.
427 * When \p atomDensity <= 0, the density is determined from atomEnd-atomStart
428 * and the bounding box corners.
429 * With domain decomposition, part of the atoms might have migrated,
430 * but have not been removed yet. This count is given by \p numAtomsMoved.
431 * When \p move[i] < 0 particle i has migrated and will not be put on the grid.
433 * \param[in,out] nb_verlet The non-bonded object
434 * \param[in] box Box used for periodic distance calculations
435 * \param[in] gridIndex The index of the grid to spread to, always 0 except with test particle insertion
436 * \param[in] lowerCorner Atom groups to be gridded should have coordinates >= this corner
437 * \param[in] upperCorner Atom groups to be gridded should have coordinates <= this corner
438 * \param[in] updateGroupsCog Centers of geometry for update groups, pass nullptr when not using update groups
439 * \param[in] atomRange Range of atoms to grid
440 * \param[in] atomDensity An estimate of the atom density, used for peformance optimization and only with \p gridIndex = 0
441 * \param[in] atomInfo Atom information flags
442 * \param[in] x Coordinates for atoms to grid
443 * \param[in] numAtomsMoved The number of atoms that will move to another domain, pass 0 without DD
444 * \param[in] move Move flags for atoms, pass nullptr without DD
446 void nbnxn_put_on_grid(nonbonded_verlet_t* nb_verlet,
447 const matrix box,
448 int gridIndex,
449 const rvec lowerCorner,
450 const rvec upperCorner,
451 const gmx::UpdateGroupsCog* updateGroupsCog,
452 gmx::Range<int> atomRange,
453 real atomDensity,
454 gmx::ArrayRef<const int> atomInfo,
455 gmx::ArrayRef<const gmx::RVec> x,
456 int numAtomsMoved,
457 const int* move);
459 /*! \brief As nbnxn_put_on_grid, but for the non-local atoms
461 * with domain decomposition. Should be called after calling
462 * nbnxn_search_put_on_grid for the local atoms / home zone.
464 void nbnxn_put_on_grid_nonlocal(nonbonded_verlet_t* nb_verlet,
465 const struct gmx_domdec_zones_t* zones,
466 gmx::ArrayRef<const int> atomInfo,
467 gmx::ArrayRef<const gmx::RVec> x);
469 #endif // GMX_NBNXN_NBNXM_H