Remove all unnecessary HAVE_CONFIG_H
[gromacs.git] / src / gromacs / mdlib / domdec_network.c
blob3bbdd7c6ecbdd94f2a922e75b83ddd3c0f664b52
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2008,2009,2010,2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 #include "config.h"
38 #include <string.h>
39 #include "types/commrec.h"
40 #include "domdec_network.h"
42 #include "gromacs/utility/gmxmpi.h"
45 #define DDMASTERRANK(dd) (dd->masterrank)
48 void dd_sendrecv_int(const gmx_domdec_t gmx_unused *dd,
49 int gmx_unused ddimind, int gmx_unused direction,
50 int gmx_unused *buf_s, int gmx_unused n_s,
51 int gmx_unused *buf_r, int gmx_unused n_r)
53 #ifdef GMX_MPI
54 int rank_s, rank_r;
55 MPI_Status stat;
57 rank_s = dd->neighbor[ddimind][direction == dddirForward ? 0 : 1];
58 rank_r = dd->neighbor[ddimind][direction == dddirForward ? 1 : 0];
60 if (n_s && n_r)
62 MPI_Sendrecv(buf_s, n_s*sizeof(int), MPI_BYTE, rank_s, 0,
63 buf_r, n_r*sizeof(int), MPI_BYTE, rank_r, 0,
64 dd->mpi_comm_all, &stat);
66 else if (n_s)
68 MPI_Send( buf_s, n_s*sizeof(int), MPI_BYTE, rank_s, 0,
69 dd->mpi_comm_all);
71 else if (n_r)
73 MPI_Recv( buf_r, n_r*sizeof(int), MPI_BYTE, rank_r, 0,
74 dd->mpi_comm_all, &stat);
77 #endif
80 void dd_sendrecv_real(const gmx_domdec_t gmx_unused *dd,
81 int gmx_unused ddimind, int gmx_unused direction,
82 real gmx_unused *buf_s, int gmx_unused n_s,
83 real gmx_unused *buf_r, int gmx_unused n_r)
85 #ifdef GMX_MPI
86 int rank_s, rank_r;
87 MPI_Status stat;
89 rank_s = dd->neighbor[ddimind][direction == dddirForward ? 0 : 1];
90 rank_r = dd->neighbor[ddimind][direction == dddirForward ? 1 : 0];
92 if (n_s && n_r)
94 MPI_Sendrecv(buf_s, n_s*sizeof(real), MPI_BYTE, rank_s, 0,
95 buf_r, n_r*sizeof(real), MPI_BYTE, rank_r, 0,
96 dd->mpi_comm_all, &stat);
98 else if (n_s)
100 MPI_Send( buf_s, n_s*sizeof(real), MPI_BYTE, rank_s, 0,
101 dd->mpi_comm_all);
103 else if (n_r)
105 MPI_Recv( buf_r, n_r*sizeof(real), MPI_BYTE, rank_r, 0,
106 dd->mpi_comm_all, &stat);
109 #endif
112 void dd_sendrecv_rvec(const gmx_domdec_t gmx_unused *dd,
113 int gmx_unused ddimind, int gmx_unused direction,
114 rvec gmx_unused *buf_s, int gmx_unused n_s,
115 rvec gmx_unused *buf_r, int gmx_unused n_r)
117 #ifdef GMX_MPI
118 int rank_s, rank_r;
119 MPI_Status stat;
121 rank_s = dd->neighbor[ddimind][direction == dddirForward ? 0 : 1];
122 rank_r = dd->neighbor[ddimind][direction == dddirForward ? 1 : 0];
124 if (n_s && n_r)
126 MPI_Sendrecv(buf_s[0], n_s*sizeof(rvec), MPI_BYTE, rank_s, 0,
127 buf_r[0], n_r*sizeof(rvec), MPI_BYTE, rank_r, 0,
128 dd->mpi_comm_all, &stat);
130 else if (n_s)
132 MPI_Send( buf_s[0], n_s*sizeof(rvec), MPI_BYTE, rank_s, 0,
133 dd->mpi_comm_all);
135 else if (n_r)
137 MPI_Recv( buf_r[0], n_r*sizeof(rvec), MPI_BYTE, rank_r, 0,
138 dd->mpi_comm_all, &stat);
141 #endif
144 void dd_sendrecv2_rvec(const gmx_domdec_t gmx_unused *dd,
145 int gmx_unused ddimind,
146 rvec gmx_unused *buf_s_fw, int gmx_unused n_s_fw,
147 rvec gmx_unused *buf_r_fw, int gmx_unused n_r_fw,
148 rvec gmx_unused *buf_s_bw, int gmx_unused n_s_bw,
149 rvec gmx_unused *buf_r_bw, int gmx_unused n_r_bw)
151 #ifdef GMX_MPI
152 int rank_fw, rank_bw, nreq;
153 MPI_Request req[4];
154 MPI_Status stat[4];
156 rank_fw = dd->neighbor[ddimind][0];
157 rank_bw = dd->neighbor[ddimind][1];
159 if (!dd->bSendRecv2)
161 /* Try to send and receive in two directions simultaneously.
162 * Should be faster, especially on machines
163 * with full 3D communication networks.
164 * However, it could be that communication libraries are
165 * optimized for MPI_Sendrecv and non-blocking MPI calls
166 * are slower.
167 * SendRecv2 can be turned on with the env.var. GMX_DD_SENDRECV2
169 nreq = 0;
170 if (n_r_fw)
172 MPI_Irecv(buf_r_fw[0], n_r_fw*sizeof(rvec), MPI_BYTE,
173 rank_bw, 0, dd->mpi_comm_all, &req[nreq++]);
175 if (n_r_bw)
177 MPI_Irecv(buf_r_bw[0], n_r_bw*sizeof(rvec), MPI_BYTE,
178 rank_fw, 1, dd->mpi_comm_all, &req[nreq++]);
180 if (n_s_fw)
182 MPI_Isend(buf_s_fw[0], n_s_fw*sizeof(rvec), MPI_BYTE,
183 rank_fw, 0, dd->mpi_comm_all, &req[nreq++]);
185 if (n_s_bw)
187 MPI_Isend(buf_s_bw[0], n_s_bw*sizeof(rvec), MPI_BYTE,
188 rank_bw, 1, dd->mpi_comm_all, &req[nreq++]);
190 if (nreq)
192 MPI_Waitall(nreq, req, stat);
195 else
197 /* Communicate in two ordered phases.
198 * This is slower, even on a dual-core Opteron cluster
199 * with a single full-duplex network connection per machine.
201 /* Forward */
202 MPI_Sendrecv(buf_s_fw[0], n_s_fw*sizeof(rvec), MPI_BYTE, rank_fw, 0,
203 buf_r_fw[0], n_r_fw*sizeof(rvec), MPI_BYTE, rank_bw, 0,
204 dd->mpi_comm_all, &stat[0]);
205 /* Backward */
206 MPI_Sendrecv(buf_s_bw[0], n_s_bw*sizeof(rvec), MPI_BYTE, rank_bw, 0,
207 buf_r_bw[0], n_r_bw*sizeof(rvec), MPI_BYTE, rank_fw, 0,
208 dd->mpi_comm_all, &stat[0]);
210 #endif
213 /* IBM's BlueGene(/L) MPI_Bcast dereferences the data pointer
214 * even when 0 == nbytes, so we protect calls to it on BlueGene.
215 * Fortunately dd_bcast() and dd_bcastc() are only
216 * called during DD setup and partition.
219 void dd_bcast(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *data)
221 #ifdef GMX_MPI
222 #ifdef GMX_BLUEGENE
223 if (nbytes > 0)
225 #endif
226 MPI_Bcast(data, nbytes, MPI_BYTE,
227 DDMASTERRANK(dd), dd->mpi_comm_all);
228 #ifdef GMX_BLUEGENE
230 #endif
231 #endif
234 void dd_bcastc(gmx_domdec_t *dd, int nbytes, void *src, void *dest)
236 if (DDMASTER(dd))
238 memcpy(dest, src, nbytes);
240 #ifdef GMX_MPI
241 #ifdef GMX_BLUEGENE
242 if (nbytes > 0)
244 #endif
245 MPI_Bcast(dest, nbytes, MPI_BYTE,
246 DDMASTERRANK(dd), dd->mpi_comm_all);
247 #ifdef GMX_BLUEGENE
249 #endif
250 #endif
253 void dd_scatter(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *src, void gmx_unused *dest)
255 #ifdef GMX_MPI
256 MPI_Scatter(src, nbytes, MPI_BYTE,
257 dest, nbytes, MPI_BYTE,
258 DDMASTERRANK(dd), dd->mpi_comm_all);
259 #endif
262 void dd_gather(gmx_domdec_t gmx_unused *dd, int gmx_unused nbytes, void gmx_unused *src, void gmx_unused *dest)
264 #ifdef GMX_MPI
265 MPI_Gather(src, nbytes, MPI_BYTE,
266 dest, nbytes, MPI_BYTE,
267 DDMASTERRANK(dd), dd->mpi_comm_all);
268 #endif
271 void dd_scatterv(gmx_domdec_t gmx_unused *dd,
272 int gmx_unused *scounts, int gmx_unused *disps, void gmx_unused *sbuf,
273 int gmx_unused rcount, void gmx_unused *rbuf)
275 #ifdef GMX_MPI
276 int dum;
278 if (rcount == 0)
280 /* MPI does not allow NULL pointers */
281 rbuf = &dum;
283 MPI_Scatterv(sbuf, scounts, disps, MPI_BYTE,
284 rbuf, rcount, MPI_BYTE,
285 DDMASTERRANK(dd), dd->mpi_comm_all);
286 #endif
289 void dd_gatherv(gmx_domdec_t gmx_unused *dd,
290 int gmx_unused scount, void gmx_unused *sbuf,
291 int gmx_unused *rcounts, int gmx_unused *disps, void gmx_unused *rbuf)
293 #ifdef GMX_MPI
294 int dum;
296 if (scount == 0)
298 /* MPI does not allow NULL pointers */
299 sbuf = &dum;
301 MPI_Gatherv(sbuf, scount, MPI_BYTE,
302 rbuf, rcounts, disps, MPI_BYTE,
303 DDMASTERRANK(dd), dd->mpi_comm_all);
304 #endif