3 * This source code is part of
7 * GROningen MAchine for Chemical Simulations
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
33 * Gromacs Runs On Most of All Computer Systems
40 #include "visibility.h"
42 #include "gmxcomplex.h"
43 #include "gmx_wallcycle.h"
49 typedef real
*splinevec
[DIM
];
51 enum { GMX_SUM_QGRID_FORWARD
, GMX_SUM_QGRID_BACKWARD
};
54 int gmx_pme_init(gmx_pme_t
*pmedata
,t_commrec
*cr
,
55 int nnodes_major
,int nnodes_minor
,
56 t_inputrec
*ir
,int homenr
,
57 gmx_bool bFreeEnergy
, gmx_bool bReproducible
, int nthread
);
58 /* Initialize the pme data structures resepectively.
59 * Return value 0 indicates all well, non zero is an error code.
63 int gmx_pme_reinit(gmx_pme_t
* pmedata
,
66 const t_inputrec
* ir
,
68 /* As gmx_pme_init, but takes most settings, except the grid, from pme_src */
70 int gmx_pme_destroy(FILE *log
,gmx_pme_t
*pmedata
);
71 /* Destroy the pme data structures resepectively.
72 * Return value 0 indicates all well, non zero is an error code.
75 #define GMX_PME_SPREAD_Q (1<<0)
76 #define GMX_PME_SOLVE (1<<1)
77 #define GMX_PME_CALC_F (1<<2)
78 #define GMX_PME_CALC_ENER_VIR (1<<3)
79 /* This forces the grid to be backtransformed even without GMX_PME_CALC_F */
80 #define GMX_PME_CALC_POT (1<<4)
81 #define GMX_PME_DO_ALL_F (GMX_PME_SPREAD_Q | GMX_PME_SOLVE | GMX_PME_CALC_F)
83 int gmx_pme_do(gmx_pme_t pme
,
84 int start
, int homenr
,
86 real chargeA
[], real chargeB
[],
87 matrix box
, t_commrec
*cr
,
88 int maxshift_x
, int maxshift_y
,
89 t_nrnb
*nrnb
, gmx_wallcycle_t wcycle
,
90 matrix lrvir
, real ewaldcoeff
,
91 real
*energy
, real lambda
,
92 real
*dvdlambda
, int flags
);
93 /* Do a PME calculation for the long range electrostatics.
94 * flags, defined above, determine which parts of the calculation are performed.
95 * Return value 0 indicates all well, non zero is an error code.
99 int gmx_pmeonly(gmx_pme_t pme
,
100 t_commrec
*cr
, t_nrnb
*mynrnb
,
101 gmx_wallcycle_t wcycle
,
102 real ewaldcoeff
, gmx_bool bGatherOnly
,
104 /* Called on the nodes that do PME exclusively (as slaves)
107 void gmx_pme_calc_energy(gmx_pme_t pme
,int n
,rvec
*x
,real
*q
,real
*V
);
108 /* Calculate the PME grid energy V for n charges with a potential
109 * in the pme struct determined before with a call to gmx_pme_do
110 * with at least GMX_PME_SPREAD_Q and GMX_PME_SOLVE specified.
111 * Note that the charges are not spread on the grid in the pme struct.
112 * Currently does not work in parallel or with free energy.
115 /* The following three routines are for PME/PP node splitting in pme_pp.c */
117 /* Abstract type for PME <-> PP communication */
118 typedef struct gmx_pme_pp
*gmx_pme_pp_t
;
120 gmx_pme_pp_t
gmx_pme_pp_init(t_commrec
*cr
);
121 /* Initialize the PME-only side of the PME <-> PP communication */
123 void gmx_pme_send_q(t_commrec
*cr
,
124 gmx_bool bFreeEnergy
, real
*chargeA
, real
*chargeB
,
125 int maxshift_x
, int maxshift_y
);
126 /* Send the charges and maxshift to out PME-only node. */
128 void gmx_pme_send_x(t_commrec
*cr
, matrix box
, rvec
*x
,
129 gmx_bool bFreeEnergy
, real lambda
,
131 gmx_large_int_t step
);
132 /* Send the coordinates to our PME-only node and request a PME calculation */
135 void gmx_pme_send_finish(t_commrec
*cr
);
136 /* Tell our PME-only node to finish */
139 void gmx_pme_send_switch(t_commrec
*cr
, ivec grid_size
, real ewaldcoeff
);
140 /* Tell our PME-only node to switch to a new grid size */
142 void gmx_pme_receive_f(t_commrec
*cr
,
143 rvec f
[], matrix vir
,
144 real
*energy
, real
*dvdlambda
,
146 /* PP nodes receive the long range forces from the PME nodes */
148 int gmx_pme_recv_q_x(gmx_pme_pp_t pme_pp
,
149 real
**chargeA
, real
**chargeB
,
150 matrix box
, rvec
**x
,rvec
**f
,
151 int *maxshift_x
, int *maxshift_y
,
152 gmx_bool
*bFreeEnergy
, real
*lambda
,
154 gmx_large_int_t
*step
,
155 ivec grid_size
, real
*ewaldcoeff
);
157 /* Receive charges and/or coordinates from the PP-only nodes.
158 * Returns the number of atoms, or -1 when the run is finished.
159 * In the special case of a PME grid size switch request, -2 is returned
160 * and grid_size and *ewaldcoeff are set, which are otherwise not set.
163 void gmx_pme_send_force_vir_ener(gmx_pme_pp_t pme_pp
,
165 real energy
, real dvdlambda
,
167 /* Send the PME mesh force, virial and energy to the PP-only nodes */