2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file contains function declarations necessary for
40 * computing energies and forces for the PME long-ranged part (Coulomb
43 * \author Berk Hess <hess@kth.se>
44 * \author Mark Abraham <mark.j.abraham@gmail.com>
45 * \ingroup module_ewald
48 /* TODO This file is a temporary holding area for stuff local to the
49 * PME code, before it acquires some more normal ewald/file.c and
50 * ewald/file.h structure. In future clean up, get rid of this file,
51 * to build more normal. */
53 #ifndef GMX_EWALD_PME_INTERNAL_H
54 #define GMX_EWALD_PME_INTERNAL_H
60 #include "gromacs/math/gmxcomplex.h"
61 #include "gromacs/timing/wallcycle.h"
62 #include "gromacs/timing/walltime_accounting.h"
63 #include "gromacs/utility/gmxmpi.h"
65 //! A repeat of typedef from parallel_3dfft.h
66 typedef struct gmx_parallel_3dfft
*gmx_parallel_3dfft_t
;
72 //! Grid indices for A state for charge and Lennard-Jones C6
74 #define PME_GRID_C6A 2
78 /*! \brief Flags that indicate the number of PME grids in use */
79 #define DO_Q 2 /* Electrostatic grids have index q<2 */
80 #define DO_Q_AND_LJ 4 /* non-LB LJ grids have index 2 <= q < 4 */
81 #define DO_Q_AND_LJ_LB 9 /* With LB rules we need a total of 2+7 grids */
84 /*! \brief Pascal triangle coefficients scaled with (1/2)^6 for LJ-PME with LB-rules */
85 static const real lb_scale_factor
[] = {
86 1.0/64, 6.0/64, 15.0/64, 20.0/64,
87 15.0/64, 6.0/64, 1.0/64
90 /*! \brief Pascal triangle coefficients used in solve_pme_lj_yzx, only need to do 4 calculations due to symmetry */
91 static const real lb_scale_factor_symm
[] = { 2.0/64, 12.0/64, 30.0/64, 20.0/64 };
93 /*! \brief We only define a maximum to be able to use local arrays without allocation.
94 * An order larger than 12 should never be needed, even for test cases.
95 * If needed it can be changed here.
97 #define PME_ORDER_MAX 12
99 /*! \brief As gmx_pme_init, but takes most settings, except the grid/Ewald coefficients, from pme_src.
100 * This is only called when the PME cut-off/grid size changes.
102 int gmx_pme_reinit(struct gmx_pme_t
**pmedata
,
104 struct gmx_pme_t
* pme_src
,
105 const t_inputrec
* ir
,
111 /* The following three routines are for PME/PP node splitting in pme_pp.c */
113 /*! \brief Abstract type for PME <-> PP communication */
114 typedef struct gmx_pme_pp
*gmx_pme_pp_t
;
116 /* Temporary suppression until these structs become opaque and don't live in
117 * a header that is included by other headers. Also, until then I have no
118 * idea what some of the names mean. */
120 //! @cond Doxygen_Suppress
122 /*! \brief Data structure for grid communication */
128 int recv_size
; /* Receive buffer width, used with OpenMP */
131 /*! \brief Data structure for grid overlap communication */
140 int *send_id
, *recv_id
;
141 int send_size
; /* Send buffer width, used with OpenMP */
142 pme_grid_comm_t
*comm_data
;
147 /*! \brief Data structure for organizing particle allocation to threads */
149 int *n
; /* Cumulative counts of the number of particles per thread */
150 int nalloc
; /* Allocation size of i */
151 int *i
; /* Particle indices ordered on thread index (n) */
154 /*! \brief Helper typedef for spline vectors */
155 typedef real
*splinevec
[DIM
];
157 /*! \brief Data structure for beta-spline interpolation */
167 /*! \brief Data structure for coordinating transfer between PP and PME ranks*/
169 int dimind
; /* The index of the dimension, 0=x, 1=y */
176 int *node_dest
; /* The nodes to send x and q to with DD */
177 int *node_src
; /* The nodes to receive x and q from with DD */
178 int *buf_index
; /* Index for commnode into the buffers */
185 int *count
; /* The number of atoms to send to each node */
187 int *rcount
; /* The number of atoms to receive */
194 gmx_bool bSpread
; /* These coordinates are used for spreading */
197 rvec
*fractx
; /* Fractional coordinate relative to
198 * the lower cell boundary
201 int *thread_idx
; /* Which thread should spread which coefficient */
202 thread_plist_t
*thread_plist
;
203 splinedata_t
*spline
;
206 /*! \brief Data structure for a single PME grid */
208 ivec ci
; /* The spatial location of this grid */
209 ivec n
; /* The used size of *grid, including order-1 */
210 ivec offset
; /* The grid offset from the full node grid */
211 int order
; /* PME spreading order */
212 ivec s
; /* The allocated size of *grid, s >= n */
213 real
*grid
; /* The grid local thread, size n */
216 /*! \brief Data structures for PME grids */
218 pmegrid_t grid
; /* The full node grid (non thread-local) */
219 int nthread
; /* The number of threads operating on this grid */
220 ivec nc
; /* The local spatial decomposition over the threads */
221 pmegrid_t
*grid_th
; /* Array of grids for each thread */
222 real
*grid_all
; /* Allocated array for the grids in *grid_th */
223 int *g2t
[DIM
]; /* The grid to thread index */
224 ivec nthread_comm
; /* The number of threads to communicate with */
227 /*! \brief Data structure for spline-interpolation working buffers */
228 struct pme_spline_work
;
230 /*! \brief Data structure for working buffers */
231 struct pme_solve_work_t
;
233 /*! \brief Master PME data structure */
234 typedef struct gmx_pme_t
{
235 int ndecompdim
; /* The number of decomposition dimensions */
236 int nodeid
; /* Our nodeid in mpi->mpi_comm */
239 int nnodes
; /* The number of nodes doing PME */
244 MPI_Comm mpi_comm_d
[2]; /* Indexed on dimension, 0=x, 1=y */
246 MPI_Datatype rvec_mpi
; /* the pme vector's MPI type */
249 gmx_bool bUseThreads
; /* Does any of the PME ranks have nthread>1 ? */
250 int nthread
; /* The number of threads doing PME on our rank */
252 gmx_bool bPPnode
; /* Node also does particle-particle forces */
253 bool doCoulomb
; /* Apply PME to electrostatics */
254 bool doLJ
; /* Apply PME to Lennard-Jones r^-6 interactions */
255 gmx_bool bFEP
; /* Compute Free energy contribution */
258 int nkx
, nky
, nkz
; /* Grid dimensions */
259 gmx_bool bP3M
; /* Do P3M: optimize the influence function */
261 real ewaldcoeff_q
; /* Ewald splitting coefficient for Coulomb */
262 real ewaldcoeff_lj
; /* Ewald splitting coefficient for r^-6 */
265 int ljpme_combination_rule
; /* Type of combination rule in LJ-PME */
267 int ngrids
; /* number of grids we maintain for pmegrid, (c)fftgrid and pfft_setups*/
269 pmegrids_t pmegrid
[DO_Q_AND_LJ_LB
]; /* Grids on which we do spreading/interpolation,
270 * includes overlap Grid indices are ordered as
272 * 0: Coloumb PME, state A
273 * 1: Coloumb PME, state B
275 * This can probably be done in a better way
276 * but this simple hack works for now
278 /* The PME coefficient spreading grid sizes/strides, includes pme_order-1 */
279 int pmegrid_nx
, pmegrid_ny
, pmegrid_nz
;
280 /* pmegrid_nz might be larger than strictly necessary to ensure
281 * memory alignment, pmegrid_nz_base gives the real base size.
284 /* The local PME grid starting indices */
285 int pmegrid_start_ix
, pmegrid_start_iy
, pmegrid_start_iz
;
287 /* Work data for spreading and gathering */
288 pme_spline_work
*spline_work
;
290 real
**fftgrid
; /* Grids for FFT. With 1D FFT decomposition this can be a pointer */
291 /* inside the interpolation grid, but separate for 2D PME decomp. */
292 int fftgrid_nx
, fftgrid_ny
, fftgrid_nz
;
294 t_complex
**cfftgrid
; /* Grids for complex FFT data */
296 int cfftgrid_nx
, cfftgrid_ny
, cfftgrid_nz
;
298 gmx_parallel_3dfft_t
*pfft_setup
;
300 int *nnx
, *nny
, *nnz
;
301 real
*fshx
, *fshy
, *fshz
;
303 pme_atomcomm_t atc
[2]; /* Indexed on decomposition index */
306 /* Buffers to store data for local atoms for L-B combination rule
307 * calculations in LJ-PME. lb_buf1 stores either the coefficients
308 * for spreading/gathering (in serial), or the C6 coefficient for
309 * local atoms (in parallel). lb_buf2 is only used in parallel,
310 * and stores the sigma values for local atoms. */
311 real
*lb_buf1
, *lb_buf2
;
312 int lb_buf_nalloc
; /* Allocation size for the above buffers. */
314 pme_overlap_t overlap
[2]; /* Indexed on dimension, 0=x, 1=y */
316 pme_atomcomm_t atc_energy
; /* Only for gmx_pme_calc_energy */
318 rvec
*bufv
; /* Communication buffer */
319 real
*bufr
; /* Communication buffer */
320 int buf_nalloc
; /* The communication buffer size */
322 /* thread local work data for solve_pme */
323 struct pme_solve_work_t
*solve_work
;
325 /* Work data for sum_qgrid */
326 real
* sum_qgrid_tmp
;
327 real
* sum_qgrid_dd_tmp
;
332 /*! \brief Initialize the PME-only side of the PME <-> PP communication */
333 gmx_pme_pp_t
gmx_pme_pp_init(t_commrec
*cr
);
335 /*! \brief Tell our PME-only node to switch to a new grid size */
336 void gmx_pme_send_switchgrid(t_commrec
*cr
, ivec grid_size
, real ewaldcoeff_q
, real ewaldcoeff_lj
);
338 /*! \brief Return values for gmx_pme_recv_q_x */
340 pmerecvqxX
, /* calculate PME mesh interactions for new x */
341 pmerecvqxFINISH
, /* the simulation should finish, we should quit */
342 pmerecvqxSWITCHGRID
, /* change the PME grid size */
343 pmerecvqxRESETCOUNTERS
/* reset the cycle and flop counters */
346 /*! \brief Called by PME-only ranks to receive coefficients and coordinates
348 * The return value is used to control further processing, with meanings:
349 * pmerecvqxX: all parameters set, chargeA and chargeB can be NULL
350 * pmerecvqxFINISH: no parameters set
351 * pmerecvqxSWITCHGRID: only grid_size and *ewaldcoeff are set
352 * pmerecvqxRESETCOUNTERS: *step is set
354 int gmx_pme_recv_coeffs_coords(struct gmx_pme_pp
*pme_pp
,
356 real
**chargeA
, real
**chargeB
,
357 real
**sqrt_c6A
, real
**sqrt_c6B
,
358 real
**sigmaA
, real
**sigmaB
,
359 matrix box
, rvec
**x
, rvec
**f
,
360 int *maxshift_x
, int *maxshift_y
,
361 real
*lambda_q
, real
*lambda_lj
,
364 ivec grid_size
, real
*ewaldcoeff_q
, real
*ewaldcoeff_lj
);
366 /*! \brief Send the PME mesh force, virial and energy to the PP-only nodes */
367 void gmx_pme_send_force_vir_ener(struct gmx_pme_pp
*pme_pp
,
368 rvec
*f
, matrix vir_q
, real energy_q
,
369 matrix vir_lj
, real energy_lj
,
370 real dvdlambda_q
, real dvdlambda_lj
,