Remove extra semicolons causing clang warnings.
[gromacs.git] / src / gromacs / mdlib / nbnxn_pairlist.h
blobd4dc4851525d17dc5fb95ac79b19f2a4b48b95f0
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #ifndef _nbnxn_pairlist_h
37 #define _nbnxn_pairlist_h
39 #include "config.h"
41 #include <cstddef>
43 #include "gromacs/gpu_utils/hostallocator.h"
44 #include "gromacs/math/vectypes.h"
45 #include "gromacs/mdlib/nbnxn_consts.h"
46 #include "gromacs/mdtypes/nblist.h"
47 #include "gromacs/utility/basedefinitions.h"
48 #include "gromacs/utility/bitmask.h"
49 #include "gromacs/utility/defaultinitializationallocator.h"
50 #include "gromacs/utility/real.h"
52 struct NbnxnPairlistCpuWork;
53 struct NbnxnPairlistGpuWork;
54 struct tMPI_Atomic;
56 /* Convenience type for vector with aligned memory */
57 template<typename T>
58 using AlignedVector = std::vector < T, gmx::AlignedAllocator < T>>;
60 /* Convenience type for vector that avoids initialization at resize() */
61 template<typename T>
62 using FastVector = std::vector < T, gmx::DefaultInitializationAllocator < T>>;
64 /*! \cond INTERNAL */
66 /*! \brief The setup for generating and pruning the nbnxn pair list.
68 * Without dynamic pruning rlistOuter=rlistInner.
70 struct NbnxnListParameters
72 /*! \brief Constructor producing a struct with dynamic pruning disabled
74 NbnxnListParameters(real rlist) :
75 useDynamicPruning(false),
76 nstlistPrune(-1),
77 rlistOuter(rlist),
78 rlistInner(rlist),
79 numRollingParts(1)
83 bool useDynamicPruning; //!< Are we using dynamic pair-list pruning
84 int nstlistPrune; //!< Pair-list dynamic pruning interval
85 real rlistOuter; //!< Cut-off of the larger, outer pair-list
86 real rlistInner; //!< Cut-off of the smaller, inner pair-list
87 int numRollingParts; //!< The number parts to divide the pair-list into for rolling pruning, a value of 1 gives no rolling pruning
90 /*! \endcond */
92 /* With CPU kernels the i-cluster size is always 4 atoms. */
93 static constexpr int c_nbnxnCpuIClusterSize = 4;
95 /* With GPU kernels the i and j cluster size is 8 atoms for CUDA and can be set at compile time for OpenCL */
96 #if GMX_GPU == GMX_GPU_OPENCL
97 static constexpr int c_nbnxnGpuClusterSize = GMX_OPENCL_NB_CLUSTER_SIZE;
98 #else
99 static constexpr int c_nbnxnGpuClusterSize = 8;
100 #endif
102 /* The number of clusters in a pair-search cell, used for GPU */
103 static constexpr int c_gpuNumClusterPerCellZ = 2;
104 static constexpr int c_gpuNumClusterPerCellY = 2;
105 static constexpr int c_gpuNumClusterPerCellX = 2;
106 static constexpr int c_gpuNumClusterPerCell = c_gpuNumClusterPerCellZ*c_gpuNumClusterPerCellY*c_gpuNumClusterPerCellX;
108 /* In CUDA the number of threads in a warp is 32 and we have cluster pairs
109 * of 8*8=64 atoms, so it's convenient to store data for cluster pair halves.
111 static constexpr int c_nbnxnGpuClusterpairSplit = 2;
113 /* The fixed size of the exclusion mask array for a half cluster pair */
114 static constexpr int c_nbnxnGpuExclSize = c_nbnxnGpuClusterSize*c_nbnxnGpuClusterSize/c_nbnxnGpuClusterpairSplit;
116 /* A buffer data structure of 64 bytes
117 * to be placed at the beginning and end of structs
118 * to avoid cache invalidation of the real contents
119 * of the struct by writes to neighboring memory.
121 typedef struct {
122 int dummy[16];
123 } gmx_cache_protect_t;
125 /* Abstract type for pair searching data */
126 typedef struct nbnxn_search * nbnxn_search_t;
128 /* Function that should return a pointer *ptr to memory
129 * of size nbytes.
130 * Error handling should be done within this function.
132 typedef void nbnxn_alloc_t (void **ptr, size_t nbytes);
134 /* Function that should free the memory pointed to by *ptr.
135 * NULL should not be passed to this function.
137 typedef void nbnxn_free_t (void *ptr);
139 /* This is the actual cluster-pair list j-entry.
140 * cj is the j-cluster.
141 * The interaction bits in excl are indexed i-major, j-minor.
142 * The cj entries are sorted such that ones with exclusions come first.
143 * This means that once a full mask (=NBNXN_INTERACTION_MASK_ALL)
144 * is found, all subsequent j-entries in the i-entry also have full masks.
146 struct nbnxn_cj_t
148 int cj; /* The j-cluster */
149 unsigned int excl; /* The exclusion (interaction) bits */
152 /* In nbnxn_ci_t the integer shift contains the shift in the lower 7 bits.
153 * The upper bits contain information for non-bonded kernel optimization.
154 * Simply calculating LJ and Coulomb for all pairs in a cluster pair is fine.
155 * But three flags can be used to skip interactions, currently only for subc=0
156 * !(shift & NBNXN_CI_DO_LJ(subc)) => we can skip LJ for all pairs
157 * shift & NBNXN_CI_HALF_LJ(subc) => we can skip LJ for the second half of i
158 * !(shift & NBNXN_CI_DO_COUL(subc)) => we can skip Coulomb for all pairs
160 #define NBNXN_CI_SHIFT 127
161 #define NBNXN_CI_DO_LJ(subc) (1<<(7+3*(subc)))
162 #define NBNXN_CI_HALF_LJ(subc) (1<<(8+3*(subc)))
163 #define NBNXN_CI_DO_COUL(subc) (1<<(9+3*(subc)))
165 /* Cluster-pair Interaction masks
166 * Bit i*j-cluster-size + j tells if atom i and j interact.
168 // TODO: Rename according to convention when moving into Nbnxn namespace
169 /* All interaction mask is the same for all kernels */
170 constexpr unsigned int NBNXN_INTERACTION_MASK_ALL = 0xffffffffU;
171 /* 4x4 kernel diagonal mask */
172 constexpr unsigned int NBNXN_INTERACTION_MASK_DIAG = 0x08ceU;
173 /* 4x2 kernel diagonal masks */
174 constexpr unsigned int NBNXN_INTERACTION_MASK_DIAG_J2_0 = 0x0002U;
175 constexpr unsigned int NBNXN_INTERACTION_MASK_DIAG_J2_1 = 0x002fU;
176 /* 4x8 kernel diagonal masks */
177 constexpr unsigned int NBNXN_INTERACTION_MASK_DIAG_J8_0 = 0xf0f8fcfeU;
178 constexpr unsigned int NBNXN_INTERACTION_MASK_DIAG_J8_1 = 0x0080c0e0U;
180 /* Simple pair-list i-unit */
181 struct nbnxn_ci_t
183 int ci; /* i-cluster */
184 int shift; /* Shift vector index plus possible flags, see above */
185 int cj_ind_start; /* Start index into cj */
186 int cj_ind_end; /* End index into cj */
189 /* Grouped pair-list i-unit */
190 typedef struct {
191 /* Returns the number of j-cluster groups in this entry */
192 int numJClusterGroups() const
194 return cj4_ind_end - cj4_ind_start;
197 int sci; /* i-super-cluster */
198 int shift; /* Shift vector index plus possible flags */
199 int cj4_ind_start; /* Start index into cj4 */
200 int cj4_ind_end; /* End index into cj4 */
201 } nbnxn_sci_t;
203 /* Interaction data for a j-group for one warp */
204 struct nbnxn_im_ei_t
206 // The i-cluster interactions mask for 1 warp
207 unsigned int imask = 0U;
208 // Index into the exclusion array for 1 warp, default index 0 which means no exclusions
209 int excl_ind = 0;
212 typedef struct {
213 int cj[c_nbnxnGpuJgroupSize]; /* The 4 j-clusters */
214 nbnxn_im_ei_t imei[c_nbnxnGpuClusterpairSplit]; /* The i-cluster mask data for 2 warps */
215 } nbnxn_cj4_t;
217 /* Struct for storing the atom-pair interaction bits for a cluster pair in a GPU pairlist */
218 struct nbnxn_excl_t
220 /* Constructor, sets no exclusions, so all atom pairs interacting */
221 nbnxn_excl_t()
223 for (unsigned int &pairEntry : pair)
225 pairEntry = NBNXN_INTERACTION_MASK_ALL;
229 /* Topology exclusion interaction bits per warp */
230 unsigned int pair[c_nbnxnGpuExclSize];
233 /* Cluster pairlist type for use on CPUs */
234 struct NbnxnPairlistCpu
236 gmx_cache_protect_t cp0;
238 int na_ci; /* The number of atoms per i-cluster */
239 int na_cj; /* The number of atoms per j-cluster */
240 real rlist; /* The radius for constructing the list */
241 FastVector<nbnxn_ci_t> ci; /* The i-cluster list */
242 FastVector<nbnxn_ci_t> ciOuter; /* The outer, unpruned i-cluster list */
244 FastVector<nbnxn_cj_t> cj; /* The j-cluster list, size ncj */
245 FastVector<nbnxn_cj_t> cjOuter; /* The outer, unpruned j-cluster list */
246 int ncjInUse; /* The number of j-clusters that are used by ci entries in this list, will be <= cj.size() */
248 int nci_tot; /* The total number of i clusters */
250 NbnxnPairlistCpuWork *work;
252 gmx_cache_protect_t cp1;
255 /* Cluster pairlist type, with extra hierarchies, for on the GPU
257 * NOTE: for better performance when combining lists over threads,
258 * all vectors should use default initialization. But when
259 * changing this, excl should be intialized when adding entries.
261 struct NbnxnPairlistGpu
263 /* Constructor
265 * \param[in] pinningPolicy Sets the pinning policy for all buffers used on the GPU
267 NbnxnPairlistGpu(gmx::PinningPolicy pinningPolicy);
269 gmx_cache_protect_t cp0;
271 int na_ci; /* The number of atoms per i-cluster */
272 int na_cj; /* The number of atoms per j-cluster */
273 int na_sc; /* The number of atoms per super cluster */
274 real rlist; /* The radius for constructing the list */
275 // The i-super-cluster list, indexes into cj4;
276 gmx::HostVector<nbnxn_sci_t> sci;
277 // The list of 4*j-cluster groups
278 gmx::HostVector<nbnxn_cj4_t> cj4;
279 // Atom interaction bits (non-exclusions)
280 gmx::HostVector<nbnxn_excl_t> excl;
281 // The total number of i-clusters
282 int nci_tot;
284 NbnxnPairlistGpuWork *work;
286 gmx_cache_protect_t cp1;
289 typedef struct {
290 int nnbl; /* number of lists */
291 NbnxnPairlistCpu **nbl; /* lists for CPU */
292 NbnxnPairlistCpu **nbl_work; /* work space for rebalancing lists */
293 NbnxnPairlistGpu **nblGpu; /* lists for GPU */
294 gmx_bool bCombined; /* TRUE if lists get combined into one (the 1st) */
295 gmx_bool bSimple; /* TRUE if the list of of type "simple"
296 (na_sc=na_s, no super-clusters used) */
297 int natpair_ljq; /* Total number of atom pairs for LJ+Q kernel */
298 int natpair_lj; /* Total number of atom pairs for LJ kernel */
299 int natpair_q; /* Total number of atom pairs for Q kernel */
300 t_nblist **nbl_fep; /* List of free-energy atom pair interactions */
301 int64_t outerListCreationStep; /* Step at which the outer list was created */
302 } nbnxn_pairlist_set_t;
304 enum {
305 nbatXYZ, nbatXYZQ, nbatX4, nbatX8
308 // Struct that holds force and energy output buffers
309 struct nbnxn_atomdata_output_t
311 /* Constructor
313 * \param[in] nb_kernel_type Type of non-bonded kernel
314 * \param[in] numEnergyGroups The number of energy groups
315 * \param[in] simdEnergyBufferStride Stride for entries in the energy buffers for SIMD kernels
316 * \param[in] pinningPolicy Sets the pinning policy for all buffers used on the GPU
318 nbnxn_atomdata_output_t(int nb_kernel_type,
319 int numEnergyGroups,
320 int simdEnergyBUfferStride,
321 gmx::PinningPolicy pinningPolicy);
323 gmx::HostVector<real> f; // f, size natoms*fstride
324 gmx::HostVector<real> fshift; // Shift force array, size SHIFTS*DIM
325 gmx::HostVector<real> Vvdw; // Temporary Van der Waals group energy storage
326 gmx::HostVector<real> Vc; // Temporary Coulomb group energy storage
327 AlignedVector<real> VSvdw; // Temporary SIMD Van der Waals group energy storage
328 AlignedVector<real> VSc; // Temporary SIMD Coulomb group energy storage
331 /* Block size in atoms for the non-bonded thread force-buffer reduction,
332 * should be a multiple of all cell and x86 SIMD sizes (i.e. 2, 4 and 8).
333 * Should be small to reduce the reduction and zeroing cost,
334 * but too small will result in overhead.
335 * Currently the block size is NBNXN_BUFFERFLAG_SIZE*3*sizeof(real)=192 bytes.
337 #if GMX_DOUBLE
338 #define NBNXN_BUFFERFLAG_SIZE 8
339 #else
340 #define NBNXN_BUFFERFLAG_SIZE 16
341 #endif
343 /* We store the reduction flags as gmx_bitmask_t.
344 * This limits the number of flags to BITMASK_SIZE.
346 #define NBNXN_BUFFERFLAG_MAX_THREADS (BITMASK_SIZE)
348 /* Flags for telling if threads write to force output buffers */
349 typedef struct {
350 int nflag; /* The number of flag blocks */
351 gmx_bitmask_t *flag; /* Bit i is set when thread i writes to a cell-block */
352 int flag_nalloc; /* Allocation size of cxy_flag */
353 } nbnxn_buffer_flags_t;
355 /* LJ combination rules: geometric, Lorentz-Berthelot, none */
356 enum {
357 ljcrGEOM, ljcrLB, ljcrNONE, ljcrNR
360 /* Struct that stores atom related data for the nbnxn module
362 * Note: performance would improve slightly when all std::vector containers
363 * in this struct would not initialize during resize().
365 struct nbnxn_atomdata_t
366 { //NOLINT(clang-analyzer-optin.performance.Padding)
367 struct Params
369 /* Constructor
371 * \param[in] pinningPolicy Sets the pinning policy for all data that might be transfered to a GPU
373 Params(gmx::PinningPolicy pinningPolicy);
375 // The number of different atom types
376 int numTypes;
377 // Lennard-Jone 6*C6 and 12*C12 parameters, size numTypes*2*2
378 gmx::HostVector<real> nbfp;
379 // Combination rule, see enum defined above
380 int comb_rule;
381 // LJ parameters per atom type, size numTypes*2
382 gmx::HostVector<real> nbfp_comb;
383 // As nbfp, but with a stride for the present SIMD architecture
384 AlignedVector<real> nbfp_aligned;
385 // Atom types per atom
386 gmx::HostVector<int> type;
387 // LJ parameters per atom for fast SIMD loading
388 gmx::HostVector<real> lj_comb;
389 // Charges per atom, not set with format nbatXYZQ
390 gmx::HostVector<real> q;
391 // The number of energy groups
392 int nenergrp;
393 // 2log(nenergrp)
394 int neg_2log;
395 // The energy groups, one int entry per cluster, only set when needed
396 gmx::HostVector<int> energrp;
399 // Diagonal and topology exclusion helper data for all SIMD kernels
400 struct SimdMasks
402 SimdMasks();
404 // Helper data for setting up diagonal exclusion masks in the SIMD 4xN kernels
405 AlignedVector<real> diagonal_4xn_j_minus_i;
406 // Helper data for setting up diaginal exclusion masks in the SIMD 2xNN kernels
407 AlignedVector<real> diagonal_2xnn_j_minus_i;
408 // Filters for topology exclusion masks for the SIMD kernels
409 AlignedVector<uint32_t> exclusion_filter;
410 // Filters for topology exclusion masks for double SIMD kernels without SIMD int32 logical support
411 AlignedVector<uint64_t> exclusion_filter64;
412 // Array of masks needed for exclusions
413 AlignedVector<real> interaction_array;
416 /* Constructor
418 * \param[in] pinningPolicy Sets the pinning policy for all data that might be transfered to a GPU
420 nbnxn_atomdata_t(gmx::PinningPolicy pinningPolicy);
422 /* Returns a const reference to the parameters */
423 const Params &params() const
425 return params_;
428 /* Returns a non-const reference to the parameters */
429 Params &paramsDeprecated()
431 return params_;
434 /* Returns the current total number of atoms stored */
435 int numAtoms() const
437 return numAtoms_;
440 /* Return the coordinate buffer, and q with xFormat==nbatXYZQ */
441 gmx::ArrayRef<const real> x() const
443 return x_;
446 /* Return the coordinate buffer, and q with xFormat==nbatXYZQ */
447 gmx::ArrayRef<real> x()
449 return x_;
452 /* Resizes the coordinate buffer and sets the number of atoms */
453 void resizeCoordinateBuffer(int numAtoms);
455 /* Resizes the force buffers for the current number of atoms */
456 void resizeForceBuffers();
458 private:
459 // The LJ and charge parameters
460 Params params_;
461 // The total number of atoms currently stored
462 int numAtoms_;
463 public:
464 int natoms_local; /* Number of local atoms */
465 int XFormat; /* The format of x (and q), enum */
466 int FFormat; /* The format of f, enum */
467 gmx_bool bDynamicBox; /* Do we need to update shift_vec every step? */
468 gmx::HostVector<gmx::RVec> shift_vec; /* Shift vectors, copied from t_forcerec */
469 int xstride; /* stride for a coordinate in x (usually 3 or 4) */
470 int fstride; /* stride for a coordinate in f (usually 3 or 4) */
471 private:
472 gmx::HostVector<real> x_; /* x and possibly q, size natoms*xstride */
474 public:
475 // Masks for handling exclusions in the SIMD kernels
476 const SimdMasks simdMasks;
478 /* Output data */
479 std::vector<nbnxn_atomdata_output_t> out; /* Output data structures, 1 per thread */
481 /* Reduction related data */
482 gmx_bool bUseBufferFlags; /* Use the flags or operate on all atoms */
483 nbnxn_buffer_flags_t buffer_flags; /* Flags for buffer zeroing+reduc. */
484 gmx_bool bUseTreeReduce; /* Use tree for force reduction */
485 tMPI_Atomic *syncStep; /* Synchronization step for tree reduce */
488 #endif