Convert gmx_domdec_master_t to C++
[gromacs.git] / src / gromacs / domdec / collect.cpp
blobf3a90cce4f148fb86a29f6d84d1bde5ba4e7bffa
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /* \internal \file
37 * \brief Implements functions to collect state data to the master rank.
39 * \author Berk Hess <hess@kth.se>
40 * \ingroup module_domdec
43 #include "gmxpre.h"
45 #include "collect.h"
47 #include "config.h"
49 #include "gromacs/domdec/domdec_network.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/mdtypes/state.h"
52 #include "gromacs/utility/fatalerror.h"
53 #include "gromacs/utility/smalloc.h"
55 #include "atomdistribution.h"
56 #include "distribute.h"
57 #include "domdec_internal.h"
59 static void dd_collect_cg(gmx_domdec_t *dd,
60 const t_state *state_local)
62 if (state_local->ddp_count == dd->comm->master_cg_ddp_count)
64 /* The master has the correct distribution */
65 return;
68 gmx::ArrayRef<const int> atomGroups;
69 int nat_home = 0;
71 if (state_local->ddp_count == dd->ddp_count)
73 /* The local state and DD are in sync, use the DD indices */
74 atomGroups = gmx::constArrayRefFromArray(dd->index_gl, dd->ncg_home);
75 nat_home = dd->nat_home;
77 else if (state_local->ddp_count_cg_gl == state_local->ddp_count)
79 /* The DD is out of sync with the local state, but we have stored
80 * the cg indices with the local state, so we can use those.
82 const t_block &cgs_gl = dd->comm->cgs_gl;
84 atomGroups = state_local->cg_gl;
85 nat_home = 0;
86 for (const int &i : atomGroups)
88 nat_home += cgs_gl.index[i + 1] - cgs_gl.index[i];
91 else
93 gmx_incons("Attempted to collect a vector for a state for which the charge group distribution is unknown");
96 AtomDistribution *ma = dd->ma;
98 /* Collect the charge group and atom counts on the master */
99 int localBuffer[2] = { static_cast<int>(atomGroups.size()), nat_home };
100 dd_gather(dd, 2*sizeof(int), localBuffer,
101 DDMASTER(dd) ? ma->intBuffer.data() : nullptr);
103 if (DDMASTER(dd))
105 int groupOffset = 0;
106 for (int rank = 0; rank < dd->nnodes; rank++)
108 auto &domainGroups = ma->domainGroups[rank];
109 int numGroups = ma->intBuffer[2*rank];
111 domainGroups.atomGroups = gmx::constArrayRefFromArray(ma->atomGroups.data() + groupOffset, numGroups);
113 domainGroups.numAtoms = ma->intBuffer[2*rank + 1];
115 groupOffset += numGroups;
118 if (debug)
120 fprintf(debug, "Initial charge group distribution: ");
121 for (int rank = 0; rank < dd->nnodes; rank++)
123 fprintf(debug, " %zu", ma->domainGroups[rank].atomGroups.size());
125 fprintf(debug, "\n");
128 /* Make byte counts and indices */
129 int offset = 0;
130 for (int rank = 0; rank < dd->nnodes; rank++)
132 int numGroups = ma->domainGroups[rank].atomGroups.size();
133 ma->intBuffer[rank] = numGroups*sizeof(int);
134 ma->intBuffer[dd->nnodes + rank] = offset*sizeof(int);
135 offset += numGroups;
139 /* Collect the charge group indices on the master */
140 dd_gatherv(dd,
141 atomGroups.size()*sizeof(int), atomGroups.data(),
142 DDMASTER(dd) ? ma->intBuffer.data() : nullptr,
143 DDMASTER(dd) ? ma->intBuffer.data() + dd->nnodes : nullptr,
144 DDMASTER(dd) ? ma->atomGroups.data() : nullptr);
146 dd->comm->master_cg_ddp_count = state_local->ddp_count;
149 static void dd_collect_vec_sendrecv(gmx_domdec_t *dd,
150 gmx::ArrayRef<const gmx::RVec> lv,
151 gmx::ArrayRef<gmx::RVec> v)
153 AtomDistribution *ma = dd->ma;
155 if (!DDMASTER(dd))
157 #if GMX_MPI
158 MPI_Send(const_cast<void *>(static_cast<const void *>(lv.data())), dd->nat_home*sizeof(rvec), MPI_BYTE,
159 dd->masterrank, dd->rank, dd->mpi_comm_all);
160 #endif
162 else
164 /* Copy the master coordinates to the global array */
165 const t_block &cgs_gl = dd->comm->cgs_gl;
167 int rank = dd->masterrank;
168 int localAtom = 0;
169 for (const int &i : ma->domainGroups[rank].atomGroups)
171 for (int globalAtom = cgs_gl.index[i]; globalAtom < cgs_gl.index[i + 1]; globalAtom++)
173 copy_rvec(lv[localAtom++], v[globalAtom]);
177 for (int rank = 0; rank < dd->nnodes; rank++)
179 if (rank != dd->rank)
181 const auto &domainGroups = ma->domainGroups[rank];
183 GMX_RELEASE_ASSERT(v.data() != ma->rvecBuffer.data(), "We need different communication and return buffers");
185 /* When we send/recv instead of scatter/gather, we might need
186 * to increase the communication buffer size here.
188 if (static_cast<size_t>(domainGroups.numAtoms) > ma->rvecBuffer.size())
190 ma->rvecBuffer.resize(domainGroups.numAtoms);
193 #if GMX_MPI
194 MPI_Recv(ma->rvecBuffer.data(), domainGroups.numAtoms*sizeof(rvec), MPI_BYTE, rank,
195 rank, dd->mpi_comm_all, MPI_STATUS_IGNORE);
196 #endif
197 int localAtom = 0;
198 for (const int &cg : domainGroups.atomGroups)
200 for (int globalAtom = cgs_gl.index[cg]; globalAtom < cgs_gl.index[cg + 1]; globalAtom++)
202 copy_rvec(ma->rvecBuffer[localAtom++], v[globalAtom]);
210 static void dd_collect_vec_gatherv(gmx_domdec_t *dd,
211 gmx::ArrayRef<const gmx::RVec> lv,
212 gmx::ArrayRef<gmx::RVec> v)
214 int *recvCounts = nullptr;
215 int *displacements = nullptr;
217 if (DDMASTER(dd))
219 get_commbuffer_counts(dd->ma, &recvCounts, &displacements);
222 dd_gatherv(dd, dd->nat_home*sizeof(rvec), lv.data(), recvCounts, displacements,
223 DDMASTER(dd) ? dd->ma->rvecBuffer.data() : nullptr);
225 if (DDMASTER(dd))
227 const AtomDistribution &ma = *dd->ma;
229 const t_block &cgs_gl = dd->comm->cgs_gl;
231 int bufferAtom = 0;
232 for (int rank = 0; rank < dd->nnodes; rank++)
234 const auto &domainGroups = ma.domainGroups[rank];
235 for (const int &cg : domainGroups.atomGroups)
237 for (int globalAtom = cgs_gl.index[cg]; globalAtom < cgs_gl.index[cg + 1]; globalAtom++)
239 copy_rvec(ma.rvecBuffer[bufferAtom++], v[globalAtom]);
246 void dd_collect_vec(gmx_domdec_t *dd,
247 const t_state *state_local,
248 gmx::ArrayRef<const gmx::RVec> lv,
249 gmx::ArrayRef<gmx::RVec> v)
251 dd_collect_cg(dd, state_local);
253 if (dd->nnodes <= c_maxNumRanksUseSendRecvForScatterAndGather)
255 dd_collect_vec_sendrecv(dd, lv, v);
257 else
259 dd_collect_vec_gatherv(dd, lv, v);
264 void dd_collect_state(gmx_domdec_t *dd,
265 const t_state *state_local, t_state *state)
267 int nh = state_local->nhchainlength;
269 if (DDMASTER(dd))
271 GMX_RELEASE_ASSERT(state->nhchainlength == nh, "The global and local Nose-Hoover chain lengths should match");
273 for (int i = 0; i < efptNR; i++)
275 state->lambda[i] = state_local->lambda[i];
277 state->fep_state = state_local->fep_state;
278 state->veta = state_local->veta;
279 state->vol0 = state_local->vol0;
280 copy_mat(state_local->box, state->box);
281 copy_mat(state_local->boxv, state->boxv);
282 copy_mat(state_local->svir_prev, state->svir_prev);
283 copy_mat(state_local->fvir_prev, state->fvir_prev);
284 copy_mat(state_local->pres_prev, state->pres_prev);
286 for (int i = 0; i < state_local->ngtc; i++)
288 for (int j = 0; j < nh; j++)
290 state->nosehoover_xi[i*nh+j] = state_local->nosehoover_xi[i*nh+j];
291 state->nosehoover_vxi[i*nh+j] = state_local->nosehoover_vxi[i*nh+j];
293 state->therm_integral[i] = state_local->therm_integral[i];
295 for (int i = 0; i < state_local->nnhpres; i++)
297 for (int j = 0; j < nh; j++)
299 state->nhpres_xi[i*nh+j] = state_local->nhpres_xi[i*nh+j];
300 state->nhpres_vxi[i*nh+j] = state_local->nhpres_vxi[i*nh+j];
303 state->baros_integral = state_local->baros_integral;
305 if (state_local->flags & (1 << estX))
307 gmx::ArrayRef<gmx::RVec> globalXRef = state ? gmx::makeArrayRef(state->x) : gmx::EmptyArrayRef();
308 dd_collect_vec(dd, state_local, state_local->x, globalXRef);
310 if (state_local->flags & (1 << estV))
312 gmx::ArrayRef<gmx::RVec> globalVRef = state ? gmx::makeArrayRef(state->v) : gmx::EmptyArrayRef();
313 dd_collect_vec(dd, state_local, state_local->v, globalVRef);
315 if (state_local->flags & (1 << estCGP))
317 gmx::ArrayRef<gmx::RVec> globalCgpRef = state ? gmx::makeArrayRef(state->cg_p) : gmx::EmptyArrayRef();
318 dd_collect_vec(dd, state_local, state_local->cg_p, globalCgpRef);