Partial conversion of gmx_domdec_t to C++
[gromacs.git] / src / gromacs / domdec / domdec_specatomcomm.cpp
blob510ca68483b1985ef3ffb593f8c653cb8588433a
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2006,2007,2008,2009,2010,2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /*! \internal \file
38 * \brief This file implements functions for domdec to use
39 * while managing inter-atomic constraints.
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_domdec
45 #include "gmxpre.h"
47 #include "domdec_specatomcomm.h"
49 #include <assert.h>
51 #include <algorithm>
53 #include "gromacs/domdec/domdec.h"
54 #include "gromacs/domdec/domdec_network.h"
55 #include "gromacs/domdec/domdec_struct.h"
56 #include "gromacs/domdec/ga2la.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/mdtypes/commrec.h"
59 #include "gromacs/pbcutil/ishift.h"
60 #include "gromacs/utility/fatalerror.h"
61 #include "gromacs/utility/gmxassert.h"
62 #include "gromacs/utility/smalloc.h"
64 #include "hash.h"
66 void dd_move_f_specat(gmx_domdec_t *dd, gmx_domdec_specat_comm_t *spac,
67 rvec *f, rvec *fshift)
69 gmx_specatsend_t *spas;
70 rvec *vbuf;
71 int n, n0, n1, d, dim, dir, i;
72 ivec vis;
73 int is;
74 gmx_bool bPBC, bScrew;
76 n = spac->at_end;
77 for (d = dd->ndim-1; d >= 0; d--)
79 dim = dd->dim[d];
80 if (dd->nc[dim] > 2)
82 /* Pulse the grid forward and backward */
83 spas = spac->spas[d];
84 n0 = spas[0].nrecv;
85 n1 = spas[1].nrecv;
86 n -= n1 + n0;
87 vbuf = spac->vbuf;
88 /* Send and receive the coordinates */
89 dd_sendrecv2_rvec(dd, d,
90 f+n+n1, n0, vbuf, spas[0].nsend,
91 f+n, n1, vbuf+spas[0].nsend, spas[1].nsend);
92 for (dir = 0; dir < 2; dir++)
94 bPBC = ((dir == 0 && dd->ci[dim] == 0) ||
95 (dir == 1 && dd->ci[dim] == dd->nc[dim]-1));
96 bScrew = (bPBC && dd->bScrewPBC && dim == XX);
98 spas = &spac->spas[d][dir];
99 /* Sum the buffer into the required forces */
100 if (!bPBC || (!bScrew && fshift == nullptr))
102 for (i = 0; i < spas->nsend; i++)
104 rvec_inc(f[spas->a[i]], *vbuf);
105 vbuf++;
108 else
110 clear_ivec(vis);
111 vis[dim] = (dir == 0 ? 1 : -1);
112 is = IVEC2IS(vis);
113 if (!bScrew)
115 /* Sum and add to shift forces */
116 for (i = 0; i < spas->nsend; i++)
118 rvec_inc(f[spas->a[i]], *vbuf);
119 rvec_inc(fshift[is], *vbuf);
120 vbuf++;
123 else
125 /* Rotate the forces */
126 for (i = 0; i < spas->nsend; i++)
128 f[spas->a[i]][XX] += (*vbuf)[XX];
129 f[spas->a[i]][YY] -= (*vbuf)[YY];
130 f[spas->a[i]][ZZ] -= (*vbuf)[ZZ];
131 if (fshift)
133 rvec_inc(fshift[is], *vbuf);
135 vbuf++;
141 else
143 /* Two cells, so we only need to communicate one way */
144 spas = &spac->spas[d][0];
145 n -= spas->nrecv;
146 /* Send and receive the coordinates */
147 dd_sendrecv_rvec(dd, d, dddirForward,
148 f+n, spas->nrecv, spac->vbuf, spas->nsend);
149 /* Sum the buffer into the required forces */
150 if (dd->bScrewPBC && dim == XX &&
151 (dd->ci[dim] == 0 ||
152 dd->ci[dim] == dd->nc[dim]-1))
154 for (i = 0; i < spas->nsend; i++)
156 /* Rotate the force */
157 f[spas->a[i]][XX] += spac->vbuf[i][XX];
158 f[spas->a[i]][YY] -= spac->vbuf[i][YY];
159 f[spas->a[i]][ZZ] -= spac->vbuf[i][ZZ];
162 else
164 for (i = 0; i < spas->nsend; i++)
166 rvec_inc(f[spas->a[i]], spac->vbuf[i]);
173 void dd_move_x_specat(gmx_domdec_t *dd, gmx_domdec_specat_comm_t *spac,
174 const matrix box,
175 rvec *x0,
176 rvec *x1, gmx_bool bX1IsCoord)
178 gmx_specatsend_t *spas;
179 rvec *x, *vbuf, *rbuf;
180 int nvec, v, n, nn, ns0, ns1, nr0, nr1, nr, d, dim, dir, i;
181 gmx_bool bPBC, bScrew = FALSE;
182 rvec shift = {0, 0, 0};
184 nvec = 1;
185 if (x1 != nullptr)
187 nvec++;
190 n = spac->at_start;
191 for (d = 0; d < dd->ndim; d++)
193 dim = dd->dim[d];
194 if (dd->nc[dim] > 2)
196 /* Pulse the grid forward and backward */
197 vbuf = spac->vbuf;
198 for (dir = 0; dir < 2; dir++)
200 if (dir == 0 && dd->ci[dim] == 0)
202 bPBC = TRUE;
203 bScrew = (dd->bScrewPBC && dim == XX);
204 copy_rvec(box[dim], shift);
206 else if (dir == 1 && dd->ci[dim] == dd->nc[dim]-1)
208 bPBC = TRUE;
209 bScrew = (dd->bScrewPBC && dim == XX);
210 for (i = 0; i < DIM; i++)
212 shift[i] = -box[dim][i];
215 else
217 bPBC = FALSE;
218 bScrew = FALSE;
220 spas = &spac->spas[d][dir];
221 for (v = 0; v < nvec; v++)
223 x = (v == 0 ? x0 : x1);
224 /* Copy the required coordinates to the send buffer */
225 if (!bPBC || (v == 1 && !bX1IsCoord))
227 /* Only copy */
228 for (i = 0; i < spas->nsend; i++)
230 copy_rvec(x[spas->a[i]], *vbuf);
231 vbuf++;
234 else if (!bScrew)
236 /* Shift coordinates */
237 for (i = 0; i < spas->nsend; i++)
239 rvec_add(x[spas->a[i]], shift, *vbuf);
240 vbuf++;
243 else
245 /* Shift and rotate coordinates */
246 for (i = 0; i < spas->nsend; i++)
248 (*vbuf)[XX] = x[spas->a[i]][XX] + shift[XX];
249 (*vbuf)[YY] = box[YY][YY] - x[spas->a[i]][YY] + shift[YY];
250 (*vbuf)[ZZ] = box[ZZ][ZZ] - x[spas->a[i]][ZZ] + shift[ZZ];
251 vbuf++;
256 /* Send and receive the coordinates */
257 spas = spac->spas[d];
258 ns0 = spas[0].nsend;
259 nr0 = spas[0].nrecv;
260 ns1 = spas[1].nsend;
261 nr1 = spas[1].nrecv;
262 if (nvec == 1)
264 dd_sendrecv2_rvec(dd, d,
265 spac->vbuf+ns0, ns1, x0+n, nr1,
266 spac->vbuf, ns0, x0+n+nr1, nr0);
268 else
270 /* Communicate both vectors in one buffer */
271 rbuf = spac->vbuf2;
272 dd_sendrecv2_rvec(dd, d,
273 spac->vbuf+2*ns0, 2*ns1, rbuf, 2*nr1,
274 spac->vbuf, 2*ns0, rbuf+2*nr1, 2*nr0);
275 /* Split the buffer into the two vectors */
276 nn = n;
277 for (dir = 1; dir >= 0; dir--)
279 nr = spas[dir].nrecv;
280 for (v = 0; v < 2; v++)
282 x = (v == 0 ? x0 : x1);
283 for (i = 0; i < nr; i++)
285 copy_rvec(*rbuf, x[nn+i]);
286 rbuf++;
289 nn += nr;
292 n += nr0 + nr1;
294 else
296 spas = &spac->spas[d][0];
297 /* Copy the required coordinates to the send buffer */
298 vbuf = spac->vbuf;
299 for (v = 0; v < nvec; v++)
301 x = (v == 0 ? x0 : x1);
302 if (dd->bScrewPBC && dim == XX &&
303 (dd->ci[XX] == 0 || dd->ci[XX] == dd->nc[XX]-1))
305 /* Here we only perform the rotation, the rest of the pbc
306 * is handled in the constraint or viste routines.
308 for (i = 0; i < spas->nsend; i++)
310 (*vbuf)[XX] = x[spas->a[i]][XX];
311 (*vbuf)[YY] = box[YY][YY] - x[spas->a[i]][YY];
312 (*vbuf)[ZZ] = box[ZZ][ZZ] - x[spas->a[i]][ZZ];
313 vbuf++;
316 else
318 for (i = 0; i < spas->nsend; i++)
320 copy_rvec(x[spas->a[i]], *vbuf);
321 vbuf++;
325 /* Send and receive the coordinates */
326 if (nvec == 1)
328 dd_sendrecv_rvec(dd, d, dddirBackward,
329 spac->vbuf, spas->nsend, x0+n, spas->nrecv);
331 else
333 /* Communicate both vectors in one buffer */
334 rbuf = spac->vbuf2;
335 dd_sendrecv_rvec(dd, d, dddirBackward,
336 spac->vbuf, 2*spas->nsend, rbuf, 2*spas->nrecv);
337 /* Split the buffer into the two vectors */
338 nr = spas[0].nrecv;
339 for (v = 0; v < 2; v++)
341 x = (v == 0 ? x0 : x1);
342 for (i = 0; i < nr; i++)
344 copy_rvec(*rbuf, x[n+i]);
345 rbuf++;
349 n += spas->nrecv;
354 int setup_specat_communication(gmx_domdec_t *dd,
355 ind_req_t *ireq,
356 gmx_domdec_specat_comm_t *spac,
357 gmx_hash_t *ga2la_specat,
358 int at_start,
359 int vbuf_fac,
360 const char *specat_type,
361 const char *add_err)
363 int nsend[2], nlast, nsend_zero[2] = {0, 0}, *nsend_ptr;
364 int d, dim, ndir, dir, nr, ns, i, nrecv_local, n0, start, indr, ind, buf[2];
365 int nat_tot_specat, nat_tot_prev, nalloc_old;
366 gmx_bool bPBC;
367 gmx_specatsend_t *spas;
369 if (debug)
371 fprintf(debug, "Begin setup_specat_communication for %s\n", specat_type);
374 /* nsend[0]: the number of atoms requested by this node only,
375 * we communicate this for more efficients checks
376 * nsend[1]: the total number of requested atoms
378 nsend[0] = ireq->n;
379 nsend[1] = nsend[0];
380 nlast = nsend[1];
381 for (d = dd->ndim-1; d >= 0; d--)
383 /* Pulse the grid forward and backward */
384 dim = dd->dim[d];
385 bPBC = (dim < dd->npbcdim);
386 if (dd->nc[dim] == 2)
388 /* Only 2 cells, so we only need to communicate once */
389 ndir = 1;
391 else
393 ndir = 2;
395 for (dir = 0; dir < ndir; dir++)
397 if (!bPBC &&
398 dd->nc[dim] > 2 &&
399 ((dir == 0 && dd->ci[dim] == dd->nc[dim] - 1) ||
400 (dir == 1 && dd->ci[dim] == 0)))
402 /* No pbc: the fist/last cell should not request atoms */
403 nsend_ptr = nsend_zero;
405 else
407 nsend_ptr = nsend;
409 /* Communicate the number of indices */
410 dd_sendrecv_int(dd, d, dir == 0 ? dddirForward : dddirBackward,
411 nsend_ptr, 2, spac->nreq[d][dir], 2);
412 nr = spac->nreq[d][dir][1];
413 if (nlast+nr > ireq->nalloc)
415 ireq->nalloc = over_alloc_dd(nlast+nr);
416 srenew(ireq->ind, ireq->nalloc);
418 /* Communicate the indices */
419 dd_sendrecv_int(dd, d, dir == 0 ? dddirForward : dddirBackward,
420 ireq->ind, nsend_ptr[1], ireq->ind+nlast, nr);
421 nlast += nr;
423 nsend[1] = nlast;
425 if (debug)
427 fprintf(debug, "Communicated the counts\n");
430 /* Search for the requested atoms and communicate the indices we have */
431 nat_tot_specat = at_start;
432 nrecv_local = 0;
433 for (d = 0; d < dd->ndim; d++)
435 /* Pulse the grid forward and backward */
436 if (dd->dim[d] >= dd->npbcdim || dd->nc[dd->dim[d]] > 2)
438 ndir = 2;
440 else
442 ndir = 1;
444 nat_tot_prev = nat_tot_specat;
445 for (dir = ndir-1; dir >= 0; dir--)
447 if (nat_tot_specat > spac->bSendAtom_nalloc)
449 nalloc_old = spac->bSendAtom_nalloc;
450 spac->bSendAtom_nalloc = over_alloc_dd(nat_tot_specat);
451 srenew(spac->bSendAtom, spac->bSendAtom_nalloc);
452 for (i = nalloc_old; i < spac->bSendAtom_nalloc; i++)
454 spac->bSendAtom[i] = FALSE;
457 spas = &spac->spas[d][dir];
458 n0 = spac->nreq[d][dir][0];
459 nr = spac->nreq[d][dir][1];
460 if (debug)
462 fprintf(debug, "dim=%d, dir=%d, searching for %d atoms\n",
463 d, dir, nr);
465 start = nlast - nr;
466 spas->nsend = 0;
467 nsend[0] = 0;
468 for (i = 0; i < nr; i++)
470 indr = ireq->ind[start+i];
471 ind = -1;
472 /* Check if this is a home atom and if so ind will be set */
473 if (!ga2la_get_home(dd->ga2la, indr, &ind))
475 /* Search in the communicated atoms */
476 ind = gmx_hash_get_minone(ga2la_specat, indr);
478 if (ind >= 0)
480 if (i < n0 || !spac->bSendAtom[ind])
482 if (spas->nsend+1 > spas->a_nalloc)
484 spas->a_nalloc = over_alloc_large(spas->nsend+1);
485 srenew(spas->a, spas->a_nalloc);
487 /* Store the local index so we know which coordinates
488 * to send out later.
490 spas->a[spas->nsend] = ind;
491 spac->bSendAtom[ind] = TRUE;
492 if (spas->nsend+1 > spac->ibuf_nalloc)
494 spac->ibuf_nalloc = over_alloc_large(spas->nsend+1);
495 srenew(spac->ibuf, spac->ibuf_nalloc);
497 /* Store the global index so we can send it now */
498 spac->ibuf[spas->nsend] = indr;
499 if (i < n0)
501 nsend[0]++;
503 spas->nsend++;
507 nlast = start;
508 /* Clear the local flags */
509 for (i = 0; i < spas->nsend; i++)
511 spac->bSendAtom[spas->a[i]] = FALSE;
513 /* Send and receive the number of indices to communicate */
514 nsend[1] = spas->nsend;
515 dd_sendrecv_int(dd, d, dir == 0 ? dddirBackward : dddirForward,
516 nsend, 2, buf, 2);
517 if (debug)
519 fprintf(debug, "Send to rank %d, %d (%d) indices, "
520 "receive from rank %d, %d (%d) indices\n",
521 dd->neighbor[d][1-dir], nsend[1], nsend[0],
522 dd->neighbor[d][dir], buf[1], buf[0]);
523 if (gmx_debug_at)
525 for (i = 0; i < spas->nsend; i++)
527 fprintf(debug, " %d", spac->ibuf[i]+1);
529 fprintf(debug, "\n");
532 nrecv_local += buf[0];
533 spas->nrecv = buf[1];
534 dd->globalAtomIndices.resize(nat_tot_specat + spas->nrecv);
535 /* Send and receive the indices */
536 dd_sendrecv_int(dd, d, dir == 0 ? dddirBackward : dddirForward,
537 spac->ibuf, spas->nsend,
538 dd->globalAtomIndices.data() + nat_tot_specat, spas->nrecv);
539 nat_tot_specat += spas->nrecv;
542 /* Allocate the x/f communication buffers */
543 ns = spac->spas[d][0].nsend;
544 nr = spac->spas[d][0].nrecv;
545 if (ndir == 2)
547 ns += spac->spas[d][1].nsend;
548 nr += spac->spas[d][1].nrecv;
550 if (vbuf_fac*ns > spac->vbuf_nalloc)
552 spac->vbuf_nalloc = over_alloc_dd(vbuf_fac*ns);
553 srenew(spac->vbuf, spac->vbuf_nalloc);
555 if (vbuf_fac == 2 && vbuf_fac*nr > spac->vbuf2_nalloc)
557 spac->vbuf2_nalloc = over_alloc_dd(vbuf_fac*nr);
558 srenew(spac->vbuf2, spac->vbuf2_nalloc);
561 /* Make a global to local index for the communication atoms */
562 for (i = nat_tot_prev; i < nat_tot_specat; i++)
564 gmx_hash_change_or_set(ga2la_specat, dd->globalAtomIndices[i], i);
568 /* Check that in the end we got the number of atoms we asked for */
569 if (nrecv_local != ireq->n)
571 if (debug)
573 fprintf(debug, "Requested %d, received %d (tot recv %d)\n",
574 ireq->n, nrecv_local, nat_tot_specat-at_start);
575 if (gmx_debug_at)
577 for (i = 0; i < ireq->n; i++)
579 ind = gmx_hash_get_minone(ga2la_specat, ireq->ind[i]);
580 fprintf(debug, " %s%d",
581 (ind >= 0) ? "" : "!",
582 ireq->ind[i]+1);
584 fprintf(debug, "\n");
587 fprintf(stderr, "\nDD cell %d %d %d: Neighboring cells do not have atoms:",
588 dd->ci[XX], dd->ci[YY], dd->ci[ZZ]);
589 for (i = 0; i < ireq->n; i++)
591 if (gmx_hash_get_minone(ga2la_specat, ireq->ind[i]) < 0)
593 fprintf(stderr, " %d", ireq->ind[i]+1);
596 fprintf(stderr, "\n");
597 gmx_fatal(FARGS, "DD cell %d %d %d could only obtain %d of the %d atoms that are connected via %ss from the neighboring cells. This probably means your %s lengths are too long compared to the domain decomposition cell size. Decrease the number of domain decomposition grid cells%s%s.",
598 dd->ci[XX], dd->ci[YY], dd->ci[ZZ],
599 nrecv_local, ireq->n, specat_type,
600 specat_type, add_err,
601 dd_dlb_is_on(dd) ? " or use the -rcon option of mdrun" : "");
604 spac->at_start = at_start;
605 spac->at_end = nat_tot_specat;
607 if (debug)
609 fprintf(debug, "Done setup_specat_communication\n");
612 return nat_tot_specat;
615 gmx_domdec_specat_comm_t *specat_comm_init(int nthread)
617 gmx_domdec_specat_comm_t *spac;
619 snew(spac, 1);
620 spac->nthread = nthread;
621 snew(spac->ireq, spac->nthread);
623 return spac;