Simplify PME GPU constants
[gromacs.git] / src / gromacs / ewald / pme_internal.h
blob686d63e42456f163c7bac2bf5f126cb60d4d5639
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
38 /*! \internal \file
40 * \brief This file contains function declarations necessary for
41 * computing energies and forces for the PME long-ranged part (Coulomb
42 * and LJ).
44 * \author Berk Hess <hess@kth.se>
45 * \author Mark Abraham <mark.j.abraham@gmail.com>
46 * \ingroup module_ewald
49 /* TODO This file is a temporary holding area for stuff local to the
50 * PME code, before it acquires some more normal ewald/file.c and
51 * ewald/file.h structure. In future clean up, get rid of this file,
52 * to build more normal. */
54 #ifndef GMX_EWALD_PME_INTERNAL_H
55 #define GMX_EWALD_PME_INTERNAL_H
57 #include "config.h"
59 #include <vector>
61 #include "gromacs/math/gmxcomplex.h"
62 #include "gromacs/utility/alignedallocator.h"
63 #include "gromacs/utility/arrayref.h"
64 #include "gromacs/utility/basedefinitions.h"
65 #include "gromacs/utility/defaultinitializationallocator.h"
66 #include "gromacs/utility/gmxmpi.h"
68 #include "spline_vectors.h"
70 //! A repeat of typedef from parallel_3dfft.h
71 typedef struct gmx_parallel_3dfft* gmx_parallel_3dfft_t;
73 struct t_commrec;
74 struct t_inputrec;
75 struct PmeGpu;
77 enum class PmeRunMode;
79 //@{
80 //! Grid indices for A state for charge and Lennard-Jones C6
81 #define PME_GRID_QA 0
82 #define PME_GRID_C6A 2
83 //@}
85 //@{
86 /*! \brief Flags that indicate the number of PME grids in use */
87 #define DO_Q 2 /* Electrostatic grids have index q<2 */
88 #define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
89 #define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
90 //@}
92 /*! \brief Pascal triangle coefficients scaled with (1/2)^6 for LJ-PME with LB-rules */
93 static const real lb_scale_factor[] = { 1.0 / 64, 6.0 / 64, 15.0 / 64, 20.0 / 64,
94 15.0 / 64, 6.0 / 64, 1.0 / 64 };
96 /*! \brief Pascal triangle coefficients used in solve_pme_lj_yzx, only need to do 4 calculations due to symmetry */
97 static const real lb_scale_factor_symm[] = { 2.0 / 64, 12.0 / 64, 30.0 / 64, 20.0 / 64 };
99 /*! \brief We only define a maximum to be able to use local arrays without allocation.
100 * An order larger than 12 should never be needed, even for test cases.
101 * If needed it can be changed here.
103 #define PME_ORDER_MAX 12
106 /* Temporary suppression until these structs become opaque and don't live in
107 * a header that is included by other headers. Also, until then I have no
108 * idea what some of the names mean. */
110 //! @cond Doxygen_Suppress
112 /*! \brief Data structure for grid communication */
113 struct pme_grid_comm_t
115 int send_id; //!< Source rank id
116 int send_index0;
117 int send_nindex;
118 int recv_id; //!< Destination rank id
119 int recv_index0;
120 int recv_nindex;
121 int recv_size = 0; //!< Receive buffer width, used with OpenMP
124 /*! \brief Data structure for grid overlap communication in a single dimension */
125 struct pme_overlap_t
127 MPI_Comm mpi_comm; //!< MPI communcator
128 int nnodes; //!< Number of ranks
129 int nodeid; //!< Unique rank identifcator
130 std::vector<int> s2g0; //!< The local interpolation grid start
131 std::vector<int> s2g1; //!< The local interpolation grid end
132 int send_size; //!< Send buffer width, used with OpenMP
133 std::vector<pme_grid_comm_t> comm_data; //!< All the individual communication data for each rank
134 std::vector<real> sendbuf; //!< Shared buffer for sending
135 std::vector<real> recvbuf; //!< Shared buffer for receiving
138 template<typename T>
139 using AlignedVector = std::vector<T, gmx::AlignedAllocator<T>>;
141 template<typename T>
142 using FastVector = std::vector<T, gmx::DefaultInitializationAllocator<T>>;
144 /*! \brief Data structure for organizing particle allocation to threads */
145 struct AtomToThreadMap
147 //! Cumulative counts of the number of particles per thread
148 int* n = nullptr;
149 //! Storage buffer for n
150 std::vector<int> nBuffer;
151 //! Particle indices ordered on thread index (n)
152 FastVector<int> i;
155 /*! \internal
156 * \brief Coefficients for theta or dtheta
158 class SplineCoefficients
160 public:
161 //! Reallocate for use with up to nalloc coefficients
162 void realloc(int nalloc);
164 //! Pointers to the coefficient buffer for x, y, z
165 splinevec coefficients = { nullptr };
167 private:
168 //! Storage for x coefficients
169 std::vector<real> bufferX_;
170 //! Storage for y coefficients
171 std::vector<real> bufferY_;
172 //! Storage for z coefficients, aligned for SIMD load
173 AlignedVector<real> bufferZ_;
176 /*! \brief Data structure for beta-spline interpolation */
177 struct splinedata_t
179 int n = 0;
180 FastVector<int> ind;
181 SplineCoefficients theta;
182 SplineCoefficients dtheta;
183 int nalloc = 0;
186 /*! \brief PME slab MPI communication setup */
187 struct SlabCommSetup
189 //! The nodes to send x and q to with DD
190 int node_dest;
191 //! The nodes to receive x and q from with DD
192 int node_src;
193 //! Index for commnode into the buffers
194 int buf_index;
195 //! The number of atoms to receive
196 int rcount;
199 /*! \internal
200 * \brief Data structure for coordinating transfers between PME ranks along one dimension
202 * Also used for passing coordinates, coefficients and forces to and from PME routines.
204 class PmeAtomComm
206 public:
207 //! Constructor, \p PmeMpiCommunicator is the communicator for this dimension
208 PmeAtomComm(MPI_Comm PmeMpiCommunicator, int numThreads, int pmeOrder, int dimIndex, bool doSpread);
210 //! Set the atom count and when necessary resizes atom buffers
211 void setNumAtoms(int numAtoms);
213 //! Returns the atom count
214 int numAtoms() const { return numAtoms_; }
216 //! Returns the number of atoms to send to each rank
217 gmx::ArrayRef<int> sendCount()
219 GMX_ASSERT(!count_thread.empty(), "Need at least one thread_count");
220 return count_thread[0];
223 //! The index of the dimension, 0=x, 1=y
224 int dimind = 0;
225 //! The number of slabs and ranks this dimension is decomposed over
226 int nslab = 1;
227 //! Our MPI rank index
228 int nodeid = 0;
229 //! Communicator for this dimension
230 MPI_Comm mpi_comm;
232 //! Communication setup for each slab, only present with nslab > 1
233 std::vector<SlabCommSetup> slabCommSetup;
234 //! The maximum communication distance counted in MPI ranks
235 int maxshift = 0;
237 //! The target slab index for each particle
238 FastVector<int> pd;
239 //! Target particle counts for each slab, for each thread
240 std::vector<std::vector<int>> count_thread;
242 private:
243 //! The number of atoms
244 int numAtoms_ = 0;
246 public:
247 //! The coordinates
248 gmx::ArrayRef<const gmx::RVec> x;
249 //! The coefficient, charges or LJ C6
250 gmx::ArrayRef<const real> coefficient;
251 //! The forces
252 gmx::ArrayRef<gmx::RVec> f;
253 //! Coordinate buffer, used only with nslab > 1
254 FastVector<gmx::RVec> xBuffer;
255 //! Coefficient buffer, used only with nslab > 1
256 FastVector<real> coefficientBuffer;
257 //! Force buffer, used only with nslab > 1
258 FastVector<gmx::RVec> fBuffer;
259 //! Tells whether these coordinates are used for spreading
260 bool bSpread;
261 //! The PME order
262 int pme_order;
263 //! The grid index per atom
264 FastVector<gmx::IVec> idx;
265 //! Fractional atom coordinates relative to the lower cell boundary
266 FastVector<gmx::RVec> fractx;
268 //! The number of threads to use in PME
269 int nthread;
270 //! Thread index for each atom
271 FastVector<int> thread_idx;
272 std::vector<AtomToThreadMap> threadMap;
273 std::vector<splinedata_t> spline;
276 /*! \brief Data structure for a single PME grid */
277 struct pmegrid_t
279 ivec ci; /* The spatial location of this grid */
280 ivec n; /* The used size of *grid, including order-1 */
281 ivec offset; /* The grid offset from the full node grid */
282 int order; /* PME spreading order */
283 ivec s; /* The allocated size of *grid, s >= n */
284 real* grid; /* The grid local thread, size n */
287 /*! \brief Data structures for PME grids */
288 struct pmegrids_t
290 pmegrid_t grid; /* The full node grid (non thread-local) */
291 int nthread; /* The number of threads operating on this grid */
292 ivec nc; /* The local spatial decomposition over the threads */
293 pmegrid_t* grid_th; /* Array of grids for each thread */
294 real* grid_all; /* Allocated array for the grids in *grid_th */
295 int* g2t[DIM]; /* The grid to thread index */
296 ivec nthread_comm; /* The number of threads to communicate with */
299 /*! \brief Data structure for spline-interpolation working buffers */
300 struct pme_spline_work;
302 /*! \brief Data structure for working buffers */
303 struct pme_solve_work_t;
305 /*! \brief Master PME data structure */
306 struct gmx_pme_t
307 { //NOLINT(clang-analyzer-optin.performance.Padding)
308 int ndecompdim; /* The number of decomposition dimensions */
309 int nodeid; /* Our nodeid in mpi->mpi_comm */
310 int nodeid_major;
311 int nodeid_minor;
312 int nnodes; /* The number of nodes doing PME */
313 int nnodes_major;
314 int nnodes_minor;
316 MPI_Comm mpi_comm;
317 MPI_Comm mpi_comm_d[2]; /* Indexed on dimension, 0=x, 1=y */
318 #if GMX_MPI
319 MPI_Datatype rvec_mpi; /* the pme vector's MPI type */
320 #endif
322 gmx_bool bUseThreads; /* Does any of the PME ranks have nthread>1 ? */
323 int nthread; /* The number of threads doing PME on our rank */
325 gmx_bool bPPnode; /* Node also does particle-particle forces */
326 bool doCoulomb; /* Apply PME to electrostatics */
327 bool doLJ; /* Apply PME to Lennard-Jones r^-6 interactions */
328 gmx_bool bFEP; /* Compute Free energy contribution */
329 gmx_bool bFEP_q;
330 gmx_bool bFEP_lj;
331 int nkx, nky, nkz; /* Grid dimensions */
332 gmx_bool bP3M; /* Do P3M: optimize the influence function */
333 int pme_order;
334 real ewaldcoeff_q; /* Ewald splitting coefficient for Coulomb */
335 real ewaldcoeff_lj; /* Ewald splitting coefficient for r^-6 */
336 real epsilon_r;
339 enum PmeRunMode runMode; /* Which codepath is the PME runner taking - CPU, GPU, mixed;
340 * TODO: this is the information that should be owned by the task
341 * scheduler, and ideally not be duplicated here.
344 PmeGpu* gpu; /* A pointer to the GPU data.
345 * TODO: this should be unique or a shared pointer.
346 * Currently in practice there is a single gmx_pme_t instance while a code
347 * is partially set up for many of them. The PME tuning calls gmx_pme_reinit()
348 * which fully reinitializes the one and only PME structure anew while maybe
349 * keeping the old grid buffers if they were already large enough.
350 * This small choice should be made clear in the later refactoring -
351 * do we store many PME objects for different grid sizes,
352 * or a single PME object that handles different grid sizes gracefully.
356 class EwaldBoxZScaler* boxScaler; /**< The scaling data Ewald uses with walls (set at pme_init constant for the entire run) */
359 int ljpme_combination_rule; /* Type of combination rule in LJ-PME */
361 int ngrids; /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
363 pmegrids_t pmegrid[DO_Q_AND_LJ_LB]; /* Grids on which we do spreading/interpolation,
364 * includes overlap Grid indices are ordered as
365 * follows:
366 * 0: Coloumb PME, state A
367 * 1: Coloumb PME, state B
368 * 2-8: LJ-PME
369 * This can probably be done in a better way
370 * but this simple hack works for now
373 /* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
374 int pmegrid_nx, pmegrid_ny, pmegrid_nz;
375 /* pmegrid_nz might be larger than strictly necessary to ensure
376 * memory alignment, pmegrid_nz_base gives the real base size.
378 int pmegrid_nz_base;
379 /* The local PME grid starting indices */
380 int pmegrid_start_ix, pmegrid_start_iy, pmegrid_start_iz;
382 /* Work data for spreading and gathering */
383 pme_spline_work* spline_work;
385 real** fftgrid; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
386 /* inside the interpolation grid, but separate for 2D PME decomp. */
387 int fftgrid_nx, fftgrid_ny, fftgrid_nz;
389 t_complex** cfftgrid; /* Grids for complex FFT data */
391 int cfftgrid_nx, cfftgrid_ny, cfftgrid_nz;
393 gmx_parallel_3dfft_t* pfft_setup;
395 int * nnx, *nny, *nnz;
396 real *fshx, *fshy, *fshz;
398 std::vector<PmeAtomComm> atc; /* Indexed on decomposition index */
399 matrix recipbox;
400 real boxVolume;
401 splinevec bsp_mod;
402 /* Buffers to store data for local atoms for L-B combination rule
403 * calculations in LJ-PME. lb_buf1 stores either the coefficients
404 * for spreading/gathering (in serial), or the C6 coefficient for
405 * local atoms (in parallel). lb_buf2 is only used in parallel,
406 * and stores the sigma values for local atoms. */
407 FastVector<real> lb_buf1;
408 FastVector<real> lb_buf2;
410 pme_overlap_t overlap[2]; /* Indexed on dimension, 0=x, 1=y */
412 /* Atom step for energy only calculation in gmx_pme_calc_energy() */
413 std::unique_ptr<PmeAtomComm> atc_energy;
415 /* Communication buffers */
416 rvec* bufv; /* Communication buffer */
417 real* bufr; /* Communication buffer */
418 int buf_nalloc; /* The communication buffer size */
420 /* thread local work data for solve_pme */
421 struct pme_solve_work_t* solve_work;
423 /* Work data for sum_qgrid */
424 real* sum_qgrid_tmp;
425 real* sum_qgrid_dd_tmp;
428 //! @endcond
430 #endif