Fix use of config.h in halo exchange tests
[gromacs.git] / src / gromacs / domdec / tests / haloexchange_mpi.cpp
blob224d54e934a8880e1ed56104b42c827d0afd0106
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief Tests for the halo exchange
38 * The test sets up the rank topology and performs a coordinate halo
39 * exchange (for both CPU and GPU codepaths) for several 1D and 2D
40 * pulse configirations. Each pulse involves a few non-contiguous
41 * indices. The sending rank, atom number and spatial 3D index are
42 * encoded in the x values, to allow correctness checking following
43 * the halo exchange.
45 * \todo Add 3D case
47 * \author Alan Gray <alang@nvidia.com>
48 * \ingroup module_domdec
51 #include "gmxpre.h"
53 #include "config.h"
55 #include <array>
57 #include <gtest/gtest.h>
59 #include "gromacs/domdec/atomdistribution.h"
60 #include "gromacs/domdec/domdec_internal.h"
61 #include "gromacs/domdec/gpuhaloexchange.h"
62 #if GMX_GPU_CUDA
63 # include "gromacs/gpu_utils/device_stream.h"
64 # include "gromacs/gpu_utils/devicebuffer.h"
65 # include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
66 #endif
67 #include "gromacs/gpu_utils/hostallocator.h"
68 #include "gromacs/mdtypes/inputrec.h"
70 #include "testutils/mpitest.h"
71 #include "testutils/test_hardware_environment.h"
73 namespace gmx
75 namespace test
77 namespace
80 /*! \brief Get encoded numerical value for sending rank, atom number and spatial 3D index
82 * \param [in] sendRank MPI rank of sender
83 * \param [in] atomNumber Atom number
84 * \param [in] spatial3dIndex Spatial 3D Index
86 * \returns Encoded value
88 float encodedValue(const int sendRank, const int atomNumber, const int spatial3dIndex)
90 return sendRank * 1000 + atomNumber * 100 + spatial3dIndex;
93 /*! \brief Initialize halo array
95 * \param [in] x Atom coordinate data array
96 * \param [in] numHomeAtoms Number of home atoms
97 * \param [in] numAtomsTotal Total number of atoms, including halo
99 void initHaloData(RVec* x, const int numHomeAtoms, const int numAtomsTotal)
101 int rank;
102 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
104 for (int i = 0; i < numAtomsTotal; i++)
106 for (int j = 0; j < DIM; j++)
108 x[i][j] = i < numHomeAtoms ? encodedValue(rank, i, j) : -1;
113 /*! \brief Perform GPU halo exchange, including required setup and data transfers
115 * \param [in] dd Domain decomposition object
116 * \param [in] box Box matrix
117 * \param [in] h_x Atom coordinate data array on host
118 * \param [in] numAtomsTotal Total number of atoms, including halo
120 void gpuHalo(gmx_domdec_t* dd, matrix box, RVec* h_x, int numAtomsTotal)
122 #if (GMX_GPU_CUDA && GMX_THREAD_MPI)
123 // Set up GPU hardware environment and assign this MPI rank to a device
124 int rank;
125 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
126 int numDevices = getTestHardwareEnvironment()->getTestDeviceList().size();
127 const auto& testDevice = getTestHardwareEnvironment()->getTestDeviceList()[rank % numDevices];
128 const auto& deviceContext = testDevice->deviceContext();
129 setActiveDevice(testDevice->deviceInfo());
130 DeviceStream deviceStream(deviceContext, DeviceStreamPriority::Normal, false);
132 // Set up GPU buffer and copy input data from host
133 DeviceBuffer<RVec> d_x;
134 int d_x_size = -1;
135 int d_x_size_alloc = -1;
136 reallocateDeviceBuffer(&d_x, numAtomsTotal, &d_x_size, &d_x_size_alloc, deviceContext);
138 copyToDeviceBuffer(&d_x, h_x, 0, numAtomsTotal, deviceStream, GpuApiCallBehavior::Sync, nullptr);
140 GpuEventSynchronizer coordinatesReadyOnDeviceEvent;
141 coordinatesReadyOnDeviceEvent.markEvent(deviceStream);
143 // Perform GPU halo exchange
144 for (int d = 0; d < dd->ndim; d++)
146 for (int pulse = 0; pulse < dd->comm->cd[d].numPulses(); pulse++)
148 GpuHaloExchange gpuHaloExchange(dd, d, MPI_COMM_WORLD, deviceContext, deviceStream,
149 deviceStream, pulse, nullptr);
150 gpuHaloExchange.reinitHalo(d_x, nullptr);
151 gpuHaloExchange.communicateHaloCoordinates(box, &coordinatesReadyOnDeviceEvent);
155 GpuEventSynchronizer haloCompletedEvent;
156 haloCompletedEvent.markEvent(deviceStream);
157 haloCompletedEvent.waitForEvent();
159 // Copy results back to host
160 copyFromDeviceBuffer(h_x, &d_x, 0, numAtomsTotal, deviceStream, GpuApiCallBehavior::Sync, nullptr);
162 freeDeviceBuffer(d_x);
163 #else
164 GMX_UNUSED_VALUE(dd);
165 GMX_UNUSED_VALUE(box);
166 GMX_UNUSED_VALUE(h_x);
167 GMX_UNUSED_VALUE(numAtomsTotal);
168 #endif
171 /*! \brief Define 1D rank topology with 4 MPI tasks
173 * \param [in] dd Domain decomposition object
175 void define1dRankTopology(gmx_domdec_t* dd)
177 int rank;
178 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
180 dd->neighbor[0][0] = (rank + 1) % 4;
181 dd->neighbor[0][1] = (rank == 0) ? 3 : rank - 1;
184 /*! \brief Define 2D rank topology with 4 MPI tasks
186 * -----
187 * | 2 3 |
188 * | 0 1 |
189 * -----
191 * \param [in] dd Domain decomposition object
193 void define2dRankTopology(gmx_domdec_t* dd)
196 int rank;
197 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
199 switch (rank)
201 case 0:
202 dd->neighbor[0][0] = 1;
203 dd->neighbor[0][1] = 1;
204 dd->neighbor[1][0] = 2;
205 dd->neighbor[1][1] = 2;
206 break;
207 case 1:
208 dd->neighbor[0][0] = 0;
209 dd->neighbor[0][1] = 0;
210 dd->neighbor[1][0] = 3;
211 dd->neighbor[1][1] = 3;
212 break;
213 case 2:
214 dd->neighbor[0][0] = 3;
215 dd->neighbor[0][1] = 3;
216 dd->neighbor[1][0] = 0;
217 dd->neighbor[1][1] = 0;
218 break;
219 case 3:
220 dd->neighbor[0][0] = 2;
221 dd->neighbor[0][1] = 2;
222 dd->neighbor[1][0] = 1;
223 dd->neighbor[1][1] = 1;
224 break;
228 /*! \brief Define a 1D halo with 1 pulses
230 * \param [in] dd Domain decomposition object
231 * \param [in] indvec Vector of index vectors
233 void define1dHaloWith1Pulse(gmx_domdec_t* dd, std::vector<gmx_domdec_ind_t>* indvec)
236 int rank;
237 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
239 std::vector<int> indexvec;
240 gmx_domdec_ind_t ind;
242 dd->ndim = 1;
243 int nzone = 1;
244 int dimIndex = 0;
246 // Set up indices involved in halo
247 indexvec.clear();
248 indvec->clear();
250 dd->comm->cd[dimIndex].receiveInPlace = true;
251 dd->dim[dimIndex] = 0;
252 dd->ci[dimIndex] = rank;
254 // First pulse involves (arbitrary) indices 1 and 3
255 indexvec.push_back(1);
256 indexvec.push_back(3);
258 ind.index = indexvec;
259 ind.nsend[nzone + 1] = 2;
260 ind.nrecv[nzone + 1] = 2;
261 indvec->push_back(ind);
263 dd->comm->cd[dimIndex].ind = *indvec;
266 /*! \brief Define a 1D halo with 2 pulses
268 * \param [in] dd Domain decomposition object
269 * \param [in] indvec Vector of index vectors
271 void define1dHaloWith2Pulses(gmx_domdec_t* dd, std::vector<gmx_domdec_ind_t>* indvec)
274 int rank;
275 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
277 std::vector<int> indexvec;
278 gmx_domdec_ind_t ind;
280 dd->ndim = 1;
281 int nzone = 1;
282 int dimIndex = 0;
284 // Set up indices involved in halo
285 indexvec.clear();
286 indvec->clear();
288 dd->comm->cd[dimIndex].receiveInPlace = true;
289 dd->dim[dimIndex] = 0;
290 dd->ci[dimIndex] = rank;
292 // First pulse involves (arbitrary) indices 1 and 3
293 indexvec.push_back(1);
294 indexvec.push_back(3);
296 ind.index = indexvec;
297 ind.nsend[nzone + 1] = 2;
298 ind.nrecv[nzone + 1] = 2;
299 indvec->push_back(ind);
301 // Add another pulse with (arbitrary) indices 4,5,7
302 indexvec.clear();
304 indexvec.push_back(4);
305 indexvec.push_back(5);
306 indexvec.push_back(7);
308 ind.index = indexvec;
309 ind.nsend[nzone + 1] = 3;
310 ind.nrecv[nzone + 1] = 3;
311 indvec->push_back(ind);
313 dd->comm->cd[dimIndex].ind = *indvec;
316 /*! \brief Define a 2D halo with 1 pulse in each dimension
318 * \param [in] dd Domain decomposition object
319 * \param [in] indvec Vector of index vectors
321 void define2dHaloWith1PulseInEachDim(gmx_domdec_t* dd, std::vector<gmx_domdec_ind_t>* indvec)
324 int rank;
325 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
327 std::vector<int> indexvec;
328 gmx_domdec_ind_t ind;
330 dd->ndim = 2;
331 int nzone = 1;
332 for (int dimIndex = 0; dimIndex < dd->ndim; dimIndex++)
335 // Set up indices involved in halo
336 indexvec.clear();
337 indvec->clear();
339 dd->comm->cd[dimIndex].receiveInPlace = true;
340 dd->dim[dimIndex] = 0;
341 dd->ci[dimIndex] = rank;
343 // Single pulse involving (arbitrary) indices 1 and 3
344 indexvec.push_back(1);
345 indexvec.push_back(3);
347 ind.index = indexvec;
348 ind.nsend[nzone + 1] = 2;
349 ind.nrecv[nzone + 1] = 2;
350 indvec->push_back(ind);
352 dd->comm->cd[dimIndex].ind = *indvec;
354 nzone += nzone;
358 /*! \brief Define a 2D halo with 2 pulses in the first dimension
360 * \param [in] dd Domain decomposition object
361 * \param [in] indvec Vector of index vectors
363 void define2dHaloWith2PulsesInDim1(gmx_domdec_t* dd, std::vector<gmx_domdec_ind_t>* indvec)
366 int rank;
367 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
369 std::vector<int> indexvec;
370 gmx_domdec_ind_t ind;
372 dd->ndim = 2;
373 int nzone = 1;
374 for (int dimIndex = 0; dimIndex < dd->ndim; dimIndex++)
377 // Set up indices involved in halo
378 indexvec.clear();
379 indvec->clear();
381 dd->comm->cd[dimIndex].receiveInPlace = true;
382 dd->dim[dimIndex] = 0;
383 dd->ci[dimIndex] = rank;
385 // First pulse involves (arbitrary) indices 1 and 3
386 indexvec.push_back(1);
387 indexvec.push_back(3);
389 ind.index = indexvec;
390 ind.nsend[nzone + 1] = 2;
391 ind.nrecv[nzone + 1] = 2;
392 indvec->push_back(ind);
394 if (dimIndex == 0) // Add another pulse with (arbitrary) indices 4,5,7
396 indexvec.clear();
398 indexvec.push_back(4);
399 indexvec.push_back(5);
400 indexvec.push_back(7);
402 ind.index = indexvec;
403 ind.nsend[nzone + 1] = 3;
404 ind.nrecv[nzone + 1] = 3;
405 indvec->push_back(ind);
408 dd->comm->cd[dimIndex].ind = *indvec;
410 nzone += nzone;
414 /*! \brief Check results for above-defined 1D halo with 1 pulse
416 * \param [in] x Atom coordinate data array
417 * \param [in] dd Domain decomposition object
418 * \param [in] numHomeAtoms Number of home atoms
420 void checkResults1dHaloWith1Pulse(const RVec* x, const gmx_domdec_t* dd, const int numHomeAtoms)
422 // Check results are expected from values encoded in x data
423 for (int j = 0; j < DIM; j++)
425 // First Pulse in first dim: atoms 1 and 3 from forward horizontal neighbour
426 EXPECT_EQ(x[numHomeAtoms][j], encodedValue(dd->neighbor[0][0], 1, j));
427 EXPECT_EQ(x[numHomeAtoms + 1][j], encodedValue(dd->neighbor[0][0], 3, j));
431 /*! \brief Check results for above-defined 1D halo with 2 pulses
433 * \param [in] x Atom coordinate data array
434 * \param [in] dd Domain decomposition object
435 * \param [in] numHomeAtoms Number of home atoms
437 void checkResults1dHaloWith2Pulses(const RVec* x, const gmx_domdec_t* dd, const int numHomeAtoms)
439 // Check results are expected from values encoded in x data
440 for (int j = 0; j < DIM; j++)
442 // First Pulse in first dim: atoms 1 and 3 from forward horizontal neighbour
443 EXPECT_EQ(x[numHomeAtoms][j], encodedValue(dd->neighbor[0][0], 1, j));
444 EXPECT_EQ(x[numHomeAtoms + 1][j], encodedValue(dd->neighbor[0][0], 3, j));
445 // Second Pulse in first dim: atoms 4,5,7 from forward horizontal neighbour
446 EXPECT_EQ(x[numHomeAtoms + 2][j], encodedValue(dd->neighbor[0][0], 4, j));
447 EXPECT_EQ(x[numHomeAtoms + 3][j], encodedValue(dd->neighbor[0][0], 5, j));
448 EXPECT_EQ(x[numHomeAtoms + 4][j], encodedValue(dd->neighbor[0][0], 7, j));
452 /*! \brief Check results for above-defined 2D halo with 1 pulse in each dimension
454 * \param [in] x Atom coordinate data array
455 * \param [in] dd Domain decomposition object
456 * \param [in] numHomeAtoms Number of home atoms
458 void checkResults2dHaloWith1PulseInEachDim(const RVec* x, const gmx_domdec_t* dd, const int numHomeAtoms)
460 // Check results are expected from values encoded in x data
461 for (int j = 0; j < DIM; j++)
463 // First Pulse in first dim: atoms 1 and 3 from forward horizontal neighbour
464 EXPECT_EQ(x[numHomeAtoms][j], encodedValue(dd->neighbor[0][0], 1, j));
465 EXPECT_EQ(x[numHomeAtoms + 1][j], encodedValue(dd->neighbor[0][0], 3, j));
466 // First Pulse in second dim: atoms 1 and 3 from forward vertical neighbour
467 EXPECT_EQ(x[numHomeAtoms + 2][j], encodedValue(dd->neighbor[1][0], 1, j));
468 EXPECT_EQ(x[numHomeAtoms + 3][j], encodedValue(dd->neighbor[1][0], 3, j));
472 /*! \brief Check results for above-defined 2D halo with 2 pulses in the first dimension
474 * \param [in] x Atom coordinate data array
475 * \param [in] dd Domain decomposition object
476 * \param [in] numHomeAtoms Number of home atoms
478 void checkResults2dHaloWith2PulsesInDim1(const RVec* x, const gmx_domdec_t* dd, const int numHomeAtoms)
480 // Check results are expected from values encoded in x data
481 for (int j = 0; j < DIM; j++)
483 // First Pulse in first dim: atoms 1 and 3 from forward horizontal neighbour
484 EXPECT_EQ(x[numHomeAtoms][j], encodedValue(dd->neighbor[0][0], 1, j));
485 EXPECT_EQ(x[numHomeAtoms + 1][j], encodedValue(dd->neighbor[0][0], 3, j));
486 // Second Pulse in first dim: atoms 4,5,7 from forward horizontal neighbour
487 EXPECT_EQ(x[numHomeAtoms + 2][j], encodedValue(dd->neighbor[0][0], 4, j));
488 EXPECT_EQ(x[numHomeAtoms + 3][j], encodedValue(dd->neighbor[0][0], 5, j));
489 EXPECT_EQ(x[numHomeAtoms + 4][j], encodedValue(dd->neighbor[0][0], 7, j));
490 // First Pulse in second dim: atoms 1 and 3 from forward vertical neighbour
491 EXPECT_EQ(x[numHomeAtoms + 5][j], encodedValue(dd->neighbor[1][0], 1, j));
492 EXPECT_EQ(x[numHomeAtoms + 6][j], encodedValue(dd->neighbor[1][0], 3, j));
496 TEST(HaloExchangeTest, Coordinates1dHaloWith1Pulse)
498 GMX_MPI_TEST(4);
500 // Set up atom data
501 const int numHomeAtoms = 10;
502 const int numHaloAtoms = 2;
503 const int numAtomsTotal = numHomeAtoms + numHaloAtoms;
504 HostVector<RVec> h_x;
505 changePinningPolicy(&h_x, PinningPolicy::PinnedIfSupported);
506 h_x.resize(numAtomsTotal);
508 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
510 // Set up dd
511 t_inputrec ir;
512 gmx_domdec_t dd(ir);
513 dd.mpi_comm_all = MPI_COMM_WORLD;
514 gmx_domdec_comm_t comm;
515 dd.comm = &comm;
516 dd.unitCellInfo.haveScrewPBC = false;
518 DDAtomRanges atomRanges;
519 atomRanges.setEnd(DDAtomRanges::Type::Home, numHomeAtoms);
520 dd.comm->atomRanges = atomRanges;
522 define1dRankTopology(&dd);
524 std::vector<gmx_domdec_ind_t> indvec;
525 define1dHaloWith1Pulse(&dd, &indvec);
527 // Perform halo exchange
528 matrix box = { { 0., 0., 0. } };
529 dd_move_x(&dd, box, static_cast<ArrayRef<RVec>>(h_x), nullptr);
531 // Check results
532 checkResults1dHaloWith1Pulse(h_x.data(), &dd, numHomeAtoms);
534 if (GMX_GPU_CUDA && GMX_THREAD_MPI) // repeat with GPU halo codepath
536 // Re-initialize input
537 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
539 // Perform GPU halo exchange
540 gpuHalo(&dd, box, h_x.data(), numAtomsTotal);
542 // Check results
543 checkResults1dHaloWith1Pulse(h_x.data(), &dd, numHomeAtoms);
547 TEST(HaloExchangeTest, Coordinates1dHaloWith2Pulses)
549 GMX_MPI_TEST(4);
551 // Set up atom data
552 const int numHomeAtoms = 10;
553 const int numHaloAtoms = 5;
554 const int numAtomsTotal = numHomeAtoms + numHaloAtoms;
555 HostVector<RVec> h_x;
556 changePinningPolicy(&h_x, PinningPolicy::PinnedIfSupported);
557 h_x.resize(numAtomsTotal);
559 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
561 // Set up dd
562 t_inputrec ir;
563 gmx_domdec_t dd(ir);
564 dd.mpi_comm_all = MPI_COMM_WORLD;
565 gmx_domdec_comm_t comm;
566 dd.comm = &comm;
567 dd.unitCellInfo.haveScrewPBC = false;
569 DDAtomRanges atomRanges;
570 atomRanges.setEnd(DDAtomRanges::Type::Home, numHomeAtoms);
571 dd.comm->atomRanges = atomRanges;
573 define1dRankTopology(&dd);
575 std::vector<gmx_domdec_ind_t> indvec;
576 define1dHaloWith2Pulses(&dd, &indvec);
578 // Perform halo exchange
579 matrix box = { { 0., 0., 0. } };
580 dd_move_x(&dd, box, static_cast<ArrayRef<RVec>>(h_x), nullptr);
582 // Check results
583 checkResults1dHaloWith2Pulses(h_x.data(), &dd, numHomeAtoms);
585 if (GMX_GPU_CUDA && GMX_THREAD_MPI) // repeat with GPU halo codepath
587 // Re-initialize input
588 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
590 // Perform GPU halo exchange
591 gpuHalo(&dd, box, h_x.data(), numAtomsTotal);
593 // Check results
594 checkResults1dHaloWith2Pulses(h_x.data(), &dd, numHomeAtoms);
599 TEST(HaloExchangeTest, Coordinates2dHaloWith1PulseInEachDim)
601 GMX_MPI_TEST(4);
603 // Set up atom data
604 const int numHomeAtoms = 10;
605 const int numHaloAtoms = 4;
606 const int numAtomsTotal = numHomeAtoms + numHaloAtoms;
607 HostVector<RVec> h_x;
608 changePinningPolicy(&h_x, PinningPolicy::PinnedIfSupported);
609 h_x.resize(numAtomsTotal);
611 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
613 // Set up dd
614 t_inputrec ir;
615 gmx_domdec_t dd(ir);
616 dd.mpi_comm_all = MPI_COMM_WORLD;
617 gmx_domdec_comm_t comm;
618 dd.comm = &comm;
619 dd.unitCellInfo.haveScrewPBC = false;
621 DDAtomRanges atomRanges;
622 atomRanges.setEnd(DDAtomRanges::Type::Home, numHomeAtoms);
623 dd.comm->atomRanges = atomRanges;
625 define2dRankTopology(&dd);
627 std::vector<gmx_domdec_ind_t> indvec;
628 define2dHaloWith1PulseInEachDim(&dd, &indvec);
630 // Perform halo exchange
631 matrix box = { { 0., 0., 0. } };
632 dd_move_x(&dd, box, static_cast<ArrayRef<RVec>>(h_x), nullptr);
634 // Check results
635 checkResults2dHaloWith1PulseInEachDim(h_x.data(), &dd, numHomeAtoms);
637 if (GMX_GPU_CUDA && GMX_THREAD_MPI) // repeat with GPU halo codepath
639 // Re-initialize input
640 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
642 // Perform GPU halo exchange
643 gpuHalo(&dd, box, h_x.data(), numAtomsTotal);
645 // Check results
646 checkResults2dHaloWith1PulseInEachDim(h_x.data(), &dd, numHomeAtoms);
650 TEST(HaloExchangeTest, Coordinates2dHaloWith2PulsesInDim1)
652 GMX_MPI_TEST(4);
654 // Set up atom data
655 const int numHomeAtoms = 10;
656 const int numHaloAtoms = 7;
657 const int numAtomsTotal = numHomeAtoms + numHaloAtoms;
658 HostVector<RVec> h_x;
659 changePinningPolicy(&h_x, PinningPolicy::PinnedIfSupported);
660 h_x.resize(numAtomsTotal);
662 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
664 // Set up dd
665 t_inputrec ir;
666 gmx_domdec_t dd(ir);
667 dd.mpi_comm_all = MPI_COMM_WORLD;
668 gmx_domdec_comm_t comm;
669 dd.comm = &comm;
670 dd.unitCellInfo.haveScrewPBC = false;
672 DDAtomRanges atomRanges;
673 atomRanges.setEnd(DDAtomRanges::Type::Home, numHomeAtoms);
674 dd.comm->atomRanges = atomRanges;
676 define2dRankTopology(&dd);
678 std::vector<gmx_domdec_ind_t> indvec;
679 define2dHaloWith2PulsesInDim1(&dd, &indvec);
681 // Perform halo exchange
682 matrix box = { { 0., 0., 0. } };
683 dd_move_x(&dd, box, static_cast<ArrayRef<RVec>>(h_x), nullptr);
685 // Check results
686 checkResults2dHaloWith2PulsesInDim1(h_x.data(), &dd, numHomeAtoms);
688 #if (GMX_GPU_CUDA && GMX_THREAD_MPI) // repeat with GPU halo codepath
689 // Re-initialize input
690 initHaloData(h_x.data(), numHomeAtoms, numAtomsTotal);
692 // Perform GPU halo exchange
693 gpuHalo(&dd, box, h_x.data(), numAtomsTotal);
695 // Check results
696 checkResults2dHaloWith2PulsesInDim1(h_x.data(), &dd, numHomeAtoms);
697 #endif
700 } // namespace
701 } // namespace test
702 } // namespace gmx