Fixed include statements such that double precision version of genborn.c
[gromacs/rigid-bodies.git] / src / mdlib / domdec_network.c
blobf431f93bb56c279fd03e666d139ce79855fa32f2
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
3 *
4 * This file is part of Gromacs Copyright (c) 1991-2008
5 * David van der Spoel, Erik Lindahl, Berk Hess, University of Groningen.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * To help us fund GROMACS development, we humbly ask that you cite
13 * the research papers on the package. Check out http://www.gromacs.org
15 * And Hey:
16 * Gnomes, ROck Monsters And Chili Sauce
19 #ifdef HAVE_CONFIG_H
20 #include <config.h>
21 #endif
23 #include <string.h>
24 #include "domdec_network.h"
26 #ifdef GMX_LIB_MPI
27 #include <mpi.h>
28 #endif
29 #ifdef GMX_THREADS
30 #include "tmpi.h"
31 #endif
34 #define DDMASTERRANK(dd) (dd->masterrank)
37 void dd_sendrecv_int(const gmx_domdec_t *dd,
38 int ddimind,int direction,
39 int *buf_s,int n_s,
40 int *buf_r,int n_r)
42 #ifdef GMX_MPI
43 int rank_s,rank_r;
44 MPI_Status stat;
46 rank_s = dd->neighbor[ddimind][direction==dddirForward ? 0 : 1];
47 rank_r = dd->neighbor[ddimind][direction==dddirForward ? 1 : 0];
49 if (n_s && n_r)
51 MPI_Sendrecv(buf_s,n_s*sizeof(int),MPI_BYTE,rank_s,0,
52 buf_r,n_r*sizeof(int),MPI_BYTE,rank_r,0,
53 dd->mpi_comm_all,&stat);
55 else if (n_s)
57 MPI_Send( buf_s,n_s*sizeof(int),MPI_BYTE,rank_s,0,
58 dd->mpi_comm_all);
60 else if (n_r)
62 MPI_Recv( buf_r,n_r*sizeof(int),MPI_BYTE,rank_r,0,
63 dd->mpi_comm_all,&stat);
66 #endif
69 void dd_sendrecv_real(const gmx_domdec_t *dd,
70 int ddimind,int direction,
71 real *buf_s,int n_s,
72 real *buf_r,int n_r)
74 #ifdef GMX_MPI
75 int rank_s,rank_r;
76 MPI_Status stat;
78 rank_s = dd->neighbor[ddimind][direction==dddirForward ? 0 : 1];
79 rank_r = dd->neighbor[ddimind][direction==dddirForward ? 1 : 0];
81 if (n_s && n_r)
83 MPI_Sendrecv(buf_s,n_s*sizeof(real),MPI_BYTE,rank_s,0,
84 buf_r,n_r*sizeof(real),MPI_BYTE,rank_r,0,
85 dd->mpi_comm_all,&stat);
87 else if (n_s)
89 MPI_Send( buf_s,n_s*sizeof(real),MPI_BYTE,rank_s,0,
90 dd->mpi_comm_all);
92 else if (n_r)
94 MPI_Recv( buf_r,n_r*sizeof(real),MPI_BYTE,rank_r,0,
95 dd->mpi_comm_all,&stat);
98 #endif
101 void dd_sendrecv_rvec(const gmx_domdec_t *dd,
102 int ddimind,int direction,
103 rvec *buf_s,int n_s,
104 rvec *buf_r,int n_r)
106 #ifdef GMX_MPI
107 int rank_s,rank_r;
108 MPI_Status stat;
110 rank_s = dd->neighbor[ddimind][direction==dddirForward ? 0 : 1];
111 rank_r = dd->neighbor[ddimind][direction==dddirForward ? 1 : 0];
113 if (n_s && n_r)
115 MPI_Sendrecv(buf_s[0],n_s*sizeof(rvec),MPI_BYTE,rank_s,0,
116 buf_r[0],n_r*sizeof(rvec),MPI_BYTE,rank_r,0,
117 dd->mpi_comm_all,&stat);
119 else if (n_s)
121 MPI_Send( buf_s[0],n_s*sizeof(rvec),MPI_BYTE,rank_s,0,
122 dd->mpi_comm_all);
123 } else if (n_r)
125 MPI_Recv( buf_r[0],n_r*sizeof(rvec),MPI_BYTE,rank_r,0,
126 dd->mpi_comm_all,&stat);
129 #endif
132 void dd_sendrecv2_rvec(const gmx_domdec_t *dd,
133 int ddimind,
134 rvec *buf_s_fw,int n_s_fw,
135 rvec *buf_r_fw,int n_r_fw,
136 rvec *buf_s_bw,int n_s_bw,
137 rvec *buf_r_bw,int n_r_bw)
139 #ifdef GMX_MPI
140 int rank_fw,rank_bw,nreq;
141 MPI_Request req[4];
142 MPI_Status stat[4];
144 rank_fw = dd->neighbor[ddimind][0];
145 rank_bw = dd->neighbor[ddimind][1];
147 if (!dd->bSendRecv2)
149 /* Try to send and receive in two directions simultaneously.
150 * Should be faster, especially on machines
151 * with full 3D communication networks.
152 * However, it could be that communication libraries are
153 * optimized for MPI_Sendrecv and non-blocking MPI calls
154 * are slower.
155 * SendRecv2 can be turned on with the env.var. GMX_DD_SENDRECV2
157 nreq = 0;
158 if (n_r_fw)
160 MPI_Irecv(buf_r_fw[0],n_r_fw*sizeof(rvec),MPI_BYTE,
161 rank_bw,0,dd->mpi_comm_all,&req[nreq++]);
163 if (n_r_bw)
165 MPI_Irecv(buf_r_bw[0],n_r_bw*sizeof(rvec),MPI_BYTE,
166 rank_fw,1,dd->mpi_comm_all,&req[nreq++]);
168 if (n_s_fw)
170 MPI_Isend(buf_s_fw[0],n_s_fw*sizeof(rvec),MPI_BYTE,
171 rank_fw,0,dd->mpi_comm_all,&req[nreq++]);
173 if (n_s_bw)
175 MPI_Isend(buf_s_bw[0],n_s_bw*sizeof(rvec),MPI_BYTE,
176 rank_bw,1,dd->mpi_comm_all,&req[nreq++]);
178 if (nreq)
180 MPI_Waitall(nreq,req,stat);
183 else
185 /* Communicate in two ordered phases.
186 * This is slower, even on a dual-core Opteron cluster
187 * with a single full-duplex network connection per machine.
189 /* Forward */
190 MPI_Sendrecv(buf_s_fw[0],n_s_fw*sizeof(rvec),MPI_BYTE,rank_fw,0,
191 buf_r_fw[0],n_r_fw*sizeof(rvec),MPI_BYTE,rank_bw,0,
192 dd->mpi_comm_all,&stat[0]);
193 /* Backward */
194 MPI_Sendrecv(buf_s_bw[0],n_s_bw*sizeof(rvec),MPI_BYTE,rank_bw,0,
195 buf_r_bw[0],n_r_bw*sizeof(rvec),MPI_BYTE,rank_fw,0,
196 dd->mpi_comm_all,&stat[0]);
198 #endif
201 void dd_bcast(gmx_domdec_t *dd,int nbytes,void *data)
203 #ifdef GMX_MPI
204 MPI_Bcast(data,nbytes,MPI_BYTE,
205 DDMASTERRANK(dd),dd->mpi_comm_all);
206 #endif
209 void dd_bcastc(gmx_domdec_t *dd,int nbytes,void *src,void *dest)
211 if (DDMASTER(dd))
213 memcpy(dest,src,nbytes);
215 #ifdef GMX_MPI
216 MPI_Bcast(dest,nbytes,MPI_BYTE,
217 DDMASTERRANK(dd),dd->mpi_comm_all);
218 #endif
221 void dd_scatter(gmx_domdec_t *dd,int nbytes,void *src,void *dest)
223 #ifdef GMX_MPI
224 MPI_Scatter(src,nbytes,MPI_BYTE,
225 dest,nbytes,MPI_BYTE,
226 DDMASTERRANK(dd),dd->mpi_comm_all);
227 #endif
230 void dd_gather(gmx_domdec_t *dd,int nbytes,void *src,void *dest)
232 #ifdef GMX_MPI
233 MPI_Gather(src,nbytes,MPI_BYTE,
234 dest,nbytes,MPI_BYTE,
235 DDMASTERRANK(dd),dd->mpi_comm_all);
236 #endif
239 void dd_scatterv(gmx_domdec_t *dd,
240 int *scounts,int *disps,void *sbuf,
241 int rcount,void *rbuf)
243 #ifdef GMX_MPI
244 int dum;
246 if (rcount == 0)
248 /* MPI does not allow NULL pointers */
249 rbuf = &dum;
251 MPI_Scatterv(sbuf,scounts,disps,MPI_BYTE,
252 rbuf,rcount,MPI_BYTE,
253 DDMASTERRANK(dd),dd->mpi_comm_all);
254 #endif
257 void dd_gatherv(gmx_domdec_t *dd,
258 int scount,void *sbuf,
259 int *rcounts,int *disps,void *rbuf)
261 #ifdef GMX_MPI
262 int dum;
264 if (scount == 0)
266 /* MPI does not allow NULL pointers */
267 sbuf = &dum;
269 MPI_Gatherv(sbuf,scount,MPI_BYTE,
270 rbuf,rcounts,disps,MPI_BYTE,
271 DDMASTERRANK(dd),dd->mpi_comm_all);
272 #endif