Merge branch 'master' of git@git.gromacs.org:gromacs
[gromacs/rigid-bodies.git] / include / pme.h
blob83e88e8771c0dea6aaf37035de5b55b1d3b28e30
1 /*
2 *
3 * This source code is part of
4 *
5 * G R O M A C S
6 *
7 * GROningen MAchine for Chemical Simulations
8 *
9 * VERSION 3.2.0
10 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
11 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
12 * Copyright (c) 2001-2004, The GROMACS development team,
13 * check out http://www.gromacs.org for more information.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
20 * If you want to redistribute modifications, please consider that
21 * scientific software is very special. Version control is crucial -
22 * bugs must be traceable. We will be happy to consider code for
23 * inclusion in the official distribution, but derived work must not
24 * be called official GROMACS. Details are found in the README & COPYING
25 * files - if they are missing, get the official version at www.gromacs.org.
27 * To help us fund GROMACS development, we humbly ask that you cite
28 * the papers on the package - you can find them in the top README file.
30 * For more info, check our website at http://www.gromacs.org
32 * And Hey:
33 * Gromacs Runs On Most of All Computer Systems
36 #ifndef _pme_h
37 #define _pme_h
39 #ifdef HAVE_CONFIG_H
40 #include <config.h>
41 #endif
43 #include <stdio.h>
44 #include "typedefs.h"
45 #include "gmxcomplex.h"
46 #include "fftgrid.h"
47 #include "gmx_wallcycle.h"
49 #ifdef __cplusplus
50 extern "C" {
51 #endif
53 typedef real *splinevec[DIM];
55 enum { GMX_SUM_QGRID_FORWARD, GMX_SUM_QGRID_BACKWARD };
57 extern int pme_inconvenient_nnodes(int nkx,int nky,int nnodes);
58 /* Checks for FFT + solve_pme load imbalance, returns:
59 * 0 when no or negligible load imbalance is expected
60 * 1 when a slight load imbalance is expected
61 * 2 when using less PME nodes is expected to be faster
64 extern int gmx_pme_init(gmx_pme_t *pmedata,t_commrec *cr,int nnodes_major,
65 t_inputrec *ir,int homenr,
66 bool bFreeEnergy, bool bReproducible);
68 extern int gmx_pme_destroy(FILE *log,gmx_pme_t *pmedata);
69 /* Initialize and destroy the pme data structures resepectively.
70 * Return value 0 indicates all well, non zero is an error code.
73 #define GMX_PME_SPREAD_Q (1<<0)
74 #define GMX_PME_SOLVE (1<<1)
75 #define GMX_PME_CALC_F (1<<2)
76 #define GMX_PME_DO_ALL (GMX_PME_SPREAD_Q | GMX_PME_SOLVE | GMX_PME_CALC_F)
78 extern int gmx_pme_do(gmx_pme_t pme,
79 int start, int homenr,
80 rvec x[], rvec f[],
81 real chargeA[], real chargeB[],
82 matrix box, t_commrec *cr,
83 int maxshift0, int maxshift1,
84 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
85 matrix lrvir, real ewaldcoeff,
86 real *energy, real lambda,
87 real *dvdlambda, int flags);
88 /* Do a PME calculation for the long range electrostatics.
89 * flags, defined above, determine which parts of the calculation are performed.
90 * Return value 0 indicates all well, non zero is an error code.
93 extern int gmx_pmeonly(gmx_pme_t pme,
94 t_commrec *cr, t_nrnb *mynrnb,
95 gmx_wallcycle_t wcycle,
96 real ewaldcoeff, bool bGatherOnly,
97 t_inputrec *ir);
98 /* Called on the nodes that do PME exclusively (as slaves)
101 extern void gmx_sum_qgrid(gmx_pme_t pme,t_commrec *cr,t_fftgrid *grid,
102 int direction);
104 extern void gmx_pme_calc_energy(gmx_pme_t pme,int n,rvec *x,real *q,real *V);
105 /* Calculate the PME grid energy V for n charges with a potential
106 * in the pme struct determined before with a call to gmx_pme_do
107 * with at least GMX_PME_SPREAD_Q and GMX_PME_SOLVE specified.
108 * Note that the charges are not spread on the grid in the pme struct.
109 * Currently does not work in parallel or with free energy.
112 /* The following three routines are for PME/PP node splitting in pme_pp.c */
114 /* Abstract type for PME <-> PP communication */
115 typedef struct gmx_pme_pp *gmx_pme_pp_t;
117 extern gmx_pme_pp_t gmx_pme_pp_init(t_commrec *cr);
118 /* Initialize the PME-only side of the PME <-> PP communication */
120 extern void gmx_pme_send_q(t_commrec *cr,
121 bool bFreeEnergy, real *chargeA, real *chargeB,
122 int maxshift0, int maxshift1);
123 /* Send the charges and maxshift to out PME-only node. */
125 extern void gmx_pme_send_x(t_commrec *cr, matrix box, rvec *x,
126 bool bFreeEnergy, real lambda, gmx_large_int_t step);
127 /* Send the coordinates to our PME-only node and request a PME calculation */
129 extern void gmx_pme_finish(t_commrec *cr);
130 /* Tell our PME-only node to finish */
132 extern void gmx_pme_receive_f(t_commrec *cr,
133 rvec f[], matrix vir,
134 real *energy, real *dvdlambda,
135 float *pme_cycles);
136 /* PP nodes receive the long range forces from the PME nodes */
138 extern int gmx_pme_recv_q_x(gmx_pme_pp_t pme_pp,
139 real **chargeA, real **chargeB,
140 matrix box, rvec **x,rvec **f,
141 int *maxshift0,int *maxshift1,
142 bool *bFreeEnergy,real *lambda,
143 gmx_large_int_t *step);
144 /* Receive charges and/or coordinates from the PP-only nodes.
145 * Returns the number of atoms, or -1 when the run is finished.
148 extern void gmx_pme_send_force_vir_ener(gmx_pme_pp_t pme_pp,
149 rvec *f, matrix vir,
150 real energy, real dvdlambda,
151 float cycles,
152 bool bGotStopNextStepSignal,
153 bool bGotStopNextNbrStepSignal);
154 /* Send the PME mesh force, virial and energy to the PP-only nodes */
156 #ifdef __cplusplus
158 #endif
160 #endif