2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * Implements common routines for PME tests.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
44 #include "pmetestcommon.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/pme_gather.h"
52 #include "gromacs/ewald/pme_gpu_calculate_splines.h"
53 #include "gromacs/ewald/pme_gpu_constants.h"
54 #include "gromacs/ewald/pme_gpu_internal.h"
55 #include "gromacs/ewald/pme_gpu_staging.h"
56 #include "gromacs/ewald/pme_grid.h"
57 #include "gromacs/ewald/pme_internal.h"
58 #include "gromacs/ewald/pme_redistribute.h"
59 #include "gromacs/ewald/pme_solve.h"
60 #include "gromacs/ewald/pme_spread.h"
61 #include "gromacs/fft/parallel_3dfft.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/math/invertmatrix.h"
64 #include "gromacs/mdtypes/commrec.h"
65 #include "gromacs/pbcutil/pbc.h"
66 #include "gromacs/topology/topology.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/logger.h"
70 #include "gromacs/utility/stringutil.h"
72 #include "testutils/testasserts.h"
74 #include "testhardwarecontexts.h"
81 bool pmeSupportsInputForMode(const gmx_hw_info_t
& hwinfo
, const t_inputrec
* inputRec
, CodePath mode
)
87 case CodePath::CPU
: implemented
= true; break;
90 implemented
= (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo
, nullptr)
91 && pme_gpu_supports_input(*inputRec
, mtop
, nullptr));
94 default: GMX_THROW(InternalError("Test not implemented for this mode"));
99 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder
)
101 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
102 * hard to know what to pick without testing lots of
103 * implementations. */
104 const uint64_t sineUlps
= 10;
105 return 4 * (splineOrder
- 2) + 2 * sineUlps
* splineOrder
;
108 //! PME initialization
109 PmeSafePointer
pmeInitWrapper(const t_inputrec
* inputRec
,
111 const DeviceInformation
* deviceInfo
,
112 const PmeGpuProgram
* pmeGpuProgram
,
113 const Matrix3x3
& box
,
114 const real ewaldCoeff_q
,
115 const real ewaldCoeff_lj
)
117 const MDLogger dummyLogger
;
118 const auto runMode
= (mode
== CodePath::CPU
) ? PmeRunMode::CPU
: PmeRunMode::Mixed
;
119 t_commrec dummyCommrec
= { 0 };
120 NumPmeDomains numPmeDomains
= { 1, 1 };
121 gmx_pme_t
* pmeDataRaw
=
122 gmx_pme_init(&dummyCommrec
, numPmeDomains
, inputRec
, false, false, true, ewaldCoeff_q
,
123 ewaldCoeff_lj
, 1, runMode
, nullptr, deviceInfo
, pmeGpuProgram
, dummyLogger
);
124 PmeSafePointer
pme(pmeDataRaw
); // taking ownership
126 // TODO get rid of this with proper matrix type
128 for (int i
= 0; i
< DIM
; i
++)
130 for (int j
= 0; j
< DIM
; j
++)
132 boxTemp
[i
][j
] = box
[i
* DIM
+ j
];
135 const char* boxError
= check_box(PbcType::Unset
, boxTemp
);
136 GMX_RELEASE_ASSERT(boxError
== nullptr, boxError
);
140 case CodePath::CPU
: invertBoxMatrix(boxTemp
, pme
->recipbox
); break;
143 pme_gpu_set_testing(pme
->gpu
, true);
144 pme_gpu_update_input_box(pme
->gpu
, boxTemp
);
147 default: GMX_THROW(InternalError("Test not implemented for this mode"));
153 //! Simple PME initialization based on input, no atom data
154 PmeSafePointer
pmeInitEmpty(const t_inputrec
* inputRec
,
156 const DeviceInformation
* deviceInfo
,
157 const PmeGpuProgram
* pmeGpuProgram
,
158 const Matrix3x3
& box
,
159 const real ewaldCoeff_q
,
160 const real ewaldCoeff_lj
)
162 return pmeInitWrapper(inputRec
, mode
, deviceInfo
, pmeGpuProgram
, box
, ewaldCoeff_q
, ewaldCoeff_lj
);
163 // hiding the fact that PME actually needs to know the number of atoms in advance
166 PmeSafePointer
pmeInitEmpty(const t_inputrec
* inputRec
)
168 const Matrix3x3 defaultBox
= { { 1.0F
, 0.0F
, 0.0F
, 0.0F
, 1.0F
, 0.0F
, 0.0F
, 0.0F
, 1.0F
} };
169 return pmeInitWrapper(inputRec
, CodePath::CPU
, nullptr, nullptr, defaultBox
, 0.0F
, 0.0F
);
172 //! Make a GPU state-propagator manager
173 std::unique_ptr
<StatePropagatorDataGpu
> makeStatePropagatorDataGpu(const gmx_pme_t
& pme
,
174 const DeviceContext
& deviceContext
)
176 // TODO: Pin the host buffer and use async memory copies
177 // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
178 // restrict one from using other constructor here.
179 return std::make_unique
<StatePropagatorDataGpu
>(pme_gpu_get_device_stream(&pme
), deviceContext
,
180 GpuApiCallBehavior::Sync
,
181 pme_gpu_get_block_size(&pme
), nullptr);
184 //! PME initialization with atom data
185 void pmeInitAtoms(gmx_pme_t
* pme
,
186 StatePropagatorDataGpu
* stateGpu
,
188 const CoordinatesVector
& coordinates
,
189 const ChargesVector
& charges
)
191 const index atomCount
= coordinates
.size();
192 GMX_RELEASE_ASSERT(atomCount
== charges
.ssize(), "Mismatch in atom data");
193 PmeAtomComm
* atc
= nullptr;
198 atc
= &(pme
->atc
[0]);
199 atc
->x
= coordinates
;
200 atc
->coefficient
= charges
;
201 gmx_pme_reinit_atoms(pme
, atomCount
, charges
.data());
202 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
206 // TODO: Avoid use of atc in the GPU code path
207 atc
= &(pme
->atc
[0]);
208 // We need to set atc->n for passing the size in the tests
209 atc
->setNumAtoms(atomCount
);
210 gmx_pme_reinit_atoms(pme
, atomCount
, charges
.data());
212 stateGpu
->reinit(atomCount
, atomCount
);
213 stateGpu
->copyCoordinatesToGpu(arrayRefFromArray(coordinates
.data(), coordinates
.size()),
214 gmx::AtomLocality::All
);
215 pme_gpu_set_kernelparam_coordinates(pme
->gpu
, stateGpu
->getCoordinates());
219 default: GMX_THROW(InternalError("Test not implemented for this mode"));
223 //! Getting local PME real grid pointer for test I/O
224 static real
* pmeGetRealGridInternal(const gmx_pme_t
* pme
)
226 const size_t gridIndex
= 0;
227 return pme
->fftgrid
[gridIndex
];
230 //! Getting local PME real grid dimensions
231 static void pmeGetRealGridSizesInternal(const gmx_pme_t
* pme
,
233 IVec
& gridSize
, //NOLINT(google-runtime-references)
234 IVec
& paddedGridSize
) //NOLINT(google-runtime-references)
236 const size_t gridIndex
= 0;
237 IVec gridOffsetUnused
;
241 gmx_parallel_3dfft_real_limits(pme
->pfft_setup
[gridIndex
], gridSize
, gridOffsetUnused
,
246 pme_gpu_get_real_grid_sizes(pme
->gpu
, &gridSize
, &paddedGridSize
);
249 default: GMX_THROW(InternalError("Test not implemented for this mode"));
253 //! Getting local PME complex grid pointer for test I/O
254 static t_complex
* pmeGetComplexGridInternal(const gmx_pme_t
* pme
)
256 const size_t gridIndex
= 0;
257 return pme
->cfftgrid
[gridIndex
];
260 //! Getting local PME complex grid dimensions
261 static void pmeGetComplexGridSizesInternal(const gmx_pme_t
* pme
,
262 IVec
& gridSize
, //NOLINT(google-runtime-references)
263 IVec
& paddedGridSize
) //NOLINT(google-runtime-references)
265 const size_t gridIndex
= 0;
266 IVec gridOffsetUnused
, complexOrderUnused
;
267 gmx_parallel_3dfft_complex_limits(pme
->pfft_setup
[gridIndex
], complexOrderUnused
, gridSize
,
268 gridOffsetUnused
, paddedGridSize
); // TODO: what about YZX ordering?
271 //! Getting the PME grid memory buffer and its sizes - template definition
272 template<typename ValueType
>
273 static void pmeGetGridAndSizesInternal(const gmx_pme_t
* /*unused*/,
275 ValueType
*& /*unused*/, //NOLINT(google-runtime-references)
276 IVec
& /*unused*/, //NOLINT(google-runtime-references)
277 IVec
& /*unused*/) //NOLINT(google-runtime-references)
279 GMX_THROW(InternalError("Deleted function call"));
280 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
283 //! Getting the PME real grid memory buffer and its sizes
285 void pmeGetGridAndSizesInternal
<real
>(const gmx_pme_t
* pme
, CodePath mode
, real
*& grid
, IVec
& gridSize
, IVec
& paddedGridSize
)
287 grid
= pmeGetRealGridInternal(pme
);
288 pmeGetRealGridSizesInternal(pme
, mode
, gridSize
, paddedGridSize
);
291 //! Getting the PME complex grid memory buffer and its sizes
293 void pmeGetGridAndSizesInternal
<t_complex
>(const gmx_pme_t
* pme
,
297 IVec
& paddedGridSize
)
299 grid
= pmeGetComplexGridInternal(pme
);
300 pmeGetComplexGridSizesInternal(pme
, gridSize
, paddedGridSize
);
303 //! PME spline calculation and charge spreading
304 void pmePerformSplineAndSpread(gmx_pme_t
* pme
,
305 CodePath mode
, // TODO const qualifiers elsewhere
309 GMX_RELEASE_ASSERT(pme
!= nullptr, "PME data is not initialized");
310 PmeAtomComm
* atc
= &(pme
->atc
[0]);
311 const size_t gridIndex
= 0;
312 const bool computeSplinesForZeroCharges
= true;
313 real
* fftgrid
= spreadCharges
? pme
->fftgrid
[gridIndex
] : nullptr;
314 real
* pmegrid
= pme
->pmegrid
[gridIndex
].grid
.grid
;
319 spread_on_grid(pme
, atc
, &pme
->pmegrid
[gridIndex
], computeSplines
, spreadCharges
,
320 fftgrid
, computeSplinesForZeroCharges
, gridIndex
);
321 if (spreadCharges
&& !pme
->bUseThreads
)
323 wrap_periodic_pmegrid(pme
, pmegrid
);
324 copy_pmegrid_to_fftgrid(pme
, pmegrid
, fftgrid
, gridIndex
);
330 // no synchronization needed as x is transferred in the PME stream
331 GpuEventSynchronizer
* xReadyOnDevice
= nullptr;
332 pme_gpu_spread(pme
->gpu
, xReadyOnDevice
, gridIndex
, fftgrid
, computeSplines
, spreadCharges
);
336 default: GMX_THROW(InternalError("Test not implemented for this mode"));
340 //! Getting the internal spline data buffer pointer
341 static real
* pmeGetSplineDataInternal(const gmx_pme_t
* pme
, PmeSplineDataType type
, int dimIndex
)
343 GMX_ASSERT((0 <= dimIndex
) && (dimIndex
< DIM
), "Invalid dimension index");
344 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
345 const size_t threadIndex
= 0;
346 real
* splineBuffer
= nullptr;
349 case PmeSplineDataType::Values
:
350 splineBuffer
= atc
->spline
[threadIndex
].theta
.coefficients
[dimIndex
];
353 case PmeSplineDataType::Derivatives
:
354 splineBuffer
= atc
->spline
[threadIndex
].dtheta
.coefficients
[dimIndex
];
357 default: GMX_THROW(InternalError("Unknown spline data type"));
363 void pmePerformSolve(const gmx_pme_t
* pme
,
365 PmeSolveAlgorithm method
,
367 GridOrdering gridOrdering
,
368 bool computeEnergyAndVirial
)
370 t_complex
* h_grid
= pmeGetComplexGridInternal(pme
);
371 const bool useLorentzBerthelot
= false;
372 const size_t threadIndex
= 0;
376 if (gridOrdering
!= GridOrdering::YZX
)
378 GMX_THROW(InternalError("Test not implemented for this mode"));
382 case PmeSolveAlgorithm::Coulomb
:
383 solve_pme_yzx(pme
, h_grid
, cellVolume
, computeEnergyAndVirial
, pme
->nthread
, threadIndex
);
386 case PmeSolveAlgorithm::LennardJones
:
387 solve_pme_lj_yzx(pme
, &h_grid
, useLorentzBerthelot
, cellVolume
,
388 computeEnergyAndVirial
, pme
->nthread
, threadIndex
);
391 default: GMX_THROW(InternalError("Test not implemented for this mode"));
398 case PmeSolveAlgorithm::Coulomb
:
399 pme_gpu_solve(pme
->gpu
, h_grid
, gridOrdering
, computeEnergyAndVirial
);
402 default: GMX_THROW(InternalError("Test not implemented for this mode"));
406 default: GMX_THROW(InternalError("Test not implemented for this mode"));
410 //! PME force gathering
411 void pmePerformGather(gmx_pme_t
* pme
, CodePath mode
, ForcesVector
& forces
)
413 PmeAtomComm
* atc
= &(pme
->atc
[0]);
414 const index atomCount
= atc
->numAtoms();
415 GMX_RELEASE_ASSERT(forces
.ssize() == atomCount
, "Invalid force buffer size");
416 const real scale
= 1.0;
417 const size_t threadIndex
= 0;
418 const size_t gridIndex
= 0;
419 real
* pmegrid
= pme
->pmegrid
[gridIndex
].grid
.grid
;
420 real
* fftgrid
= pme
->fftgrid
[gridIndex
];
426 if (atc
->nthread
== 1)
428 // something which is normally done in serial spline computation (make_thread_local_ind())
429 atc
->spline
[threadIndex
].n
= atomCount
;
431 copy_fftgrid_to_pmegrid(pme
, fftgrid
, pmegrid
, gridIndex
, pme
->nthread
, threadIndex
);
432 unwrap_periodic_pmegrid(pme
, pmegrid
);
433 gather_f_bsplines(pme
, pmegrid
, true, atc
, &atc
->spline
[threadIndex
], scale
);
438 // Variable initialization needs a non-switch scope
439 const bool computeEnergyAndVirial
= false;
440 PmeOutput output
= pme_gpu_getOutput(*pme
, computeEnergyAndVirial
);
441 GMX_ASSERT(forces
.size() == output
.forces_
.size(),
442 "Size of force buffers did not match");
443 pme_gpu_gather(pme
->gpu
, reinterpret_cast<float*>(fftgrid
));
444 std::copy(std::begin(output
.forces_
), std::end(output
.forces_
), std::begin(forces
));
448 default: GMX_THROW(InternalError("Test not implemented for this mode"));
452 //! PME test finalization before fetching the outputs
453 void pmeFinalizeTest(const gmx_pme_t
* pme
, CodePath mode
)
457 case CodePath::CPU
: break;
459 case CodePath::GPU
: pme_gpu_synchronize(pme
->gpu
); break;
461 default: GMX_THROW(InternalError("Test not implemented for this mode"));
465 //! A binary enum for spline data layout transformation
466 enum class PmeLayoutTransform
472 /*! \brief Gets a unique index to an element in a spline parameter buffer.
474 * These theta/dtheta buffers are laid out for GPU spread/gather
475 * kernels. The index is wrt the execution block, in range(0,
476 * atomsPerBlock * order * DIM).
478 * This is a wrapper, only used in unit tests.
479 * \param[in] order PME order
480 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
481 * \param[in] dimIndex Dimension index (from 0 to 2)
482 * \param[in] atomIndex Atom index wrt the block.
483 * \param[in] atomsPerWarp Number of atoms processed by a warp.
485 * \returns Index into theta or dtheta array using GPU layout.
487 static int getSplineParamFullIndex(int order
, int splineIndex
, int dimIndex
, int atomIndex
, int atomsPerWarp
)
489 if (order
!= c_pmeGpuOrder
)
493 constexpr int fixedOrder
= c_pmeGpuOrder
;
494 GMX_UNUSED_VALUE(fixedOrder
);
496 const int atomWarpIndex
= atomIndex
% atomsPerWarp
;
497 const int warpIndex
= atomIndex
/ atomsPerWarp
;
498 int indexBase
, result
;
499 switch (atomsPerWarp
)
502 indexBase
= getSplineParamIndexBase
<fixedOrder
, 1>(warpIndex
, atomWarpIndex
);
503 result
= getSplineParamIndex
<fixedOrder
, 1>(indexBase
, dimIndex
, splineIndex
);
507 indexBase
= getSplineParamIndexBase
<fixedOrder
, 2>(warpIndex
, atomWarpIndex
);
508 result
= getSplineParamIndex
<fixedOrder
, 2>(indexBase
, dimIndex
, splineIndex
);
512 indexBase
= getSplineParamIndexBase
<fixedOrder
, 4>(warpIndex
, atomWarpIndex
);
513 result
= getSplineParamIndex
<fixedOrder
, 4>(indexBase
, dimIndex
, splineIndex
);
517 indexBase
= getSplineParamIndexBase
<fixedOrder
, 8>(warpIndex
, atomWarpIndex
);
518 result
= getSplineParamIndex
<fixedOrder
, 8>(indexBase
, dimIndex
, splineIndex
);
522 GMX_THROW(NotImplementedError(
523 formatString("Test function call not unrolled for atomsPerWarp = %d in "
524 "getSplineParamFullIndex",
530 /*!\brief Return the number of atoms per warp */
531 static int pme_gpu_get_atoms_per_warp(const PmeGpu
* pmeGpu
)
533 const int order
= pmeGpu
->common
->pme_order
;
534 const int threadsPerAtom
=
535 (pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::Order
? order
: order
* order
);
536 return pmeGpu
->programHandle_
->warpSize() / threadsPerAtom
;
539 /*! \brief Rearranges the atom spline data between the GPU and host layouts.
540 * Only used for test purposes so far, likely to be horribly slow.
542 * \param[in] pmeGpu The PME GPU structure.
543 * \param[out] atc The PME CPU atom data structure (with a single-threaded layout).
544 * \param[in] type The spline data type (values or derivatives).
545 * \param[in] dimIndex Dimension index.
546 * \param[in] transform Layout transform type
548 static void pme_gpu_transform_spline_atom_data(const PmeGpu
* pmeGpu
,
549 const PmeAtomComm
* atc
,
550 PmeSplineDataType type
,
552 PmeLayoutTransform transform
)
554 // The GPU atom spline data is laid out in a different way currently than the CPU one.
555 // This function converts the data from GPU to CPU layout (in the host memory).
556 // It is only intended for testing purposes so far.
557 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
558 // performance (e.g. spreading on GPU, gathering on CPU).
559 GMX_RELEASE_ASSERT(atc
->nthread
== 1, "Only the serial PME data layout is supported");
560 const uintmax_t threadIndex
= 0;
561 const auto atomCount
= atc
->numAtoms();
562 const auto atomsPerWarp
= pme_gpu_get_atoms_per_warp(pmeGpu
);
563 const auto pmeOrder
= pmeGpu
->common
->pme_order
;
564 GMX_ASSERT(pmeOrder
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
566 real
* cpuSplineBuffer
;
567 float* h_splineBuffer
;
570 case PmeSplineDataType::Values
:
571 cpuSplineBuffer
= atc
->spline
[threadIndex
].theta
.coefficients
[dimIndex
];
572 h_splineBuffer
= pmeGpu
->staging
.h_theta
;
575 case PmeSplineDataType::Derivatives
:
576 cpuSplineBuffer
= atc
->spline
[threadIndex
].dtheta
.coefficients
[dimIndex
];
577 h_splineBuffer
= pmeGpu
->staging
.h_dtheta
;
580 default: GMX_THROW(InternalError("Unknown spline data type"));
583 for (auto atomIndex
= 0; atomIndex
< atomCount
; atomIndex
++)
585 for (auto orderIndex
= 0; orderIndex
< pmeOrder
; orderIndex
++)
587 const auto gpuValueIndex
=
588 getSplineParamFullIndex(pmeOrder
, orderIndex
, dimIndex
, atomIndex
, atomsPerWarp
);
589 const auto cpuValueIndex
= atomIndex
* pmeOrder
+ orderIndex
;
590 GMX_ASSERT(cpuValueIndex
< atomCount
* pmeOrder
,
591 "Atom spline data index out of bounds (while transforming GPU data layout "
595 case PmeLayoutTransform::GpuToHost
:
596 cpuSplineBuffer
[cpuValueIndex
] = h_splineBuffer
[gpuValueIndex
];
599 case PmeLayoutTransform::HostToGpu
:
600 h_splineBuffer
[gpuValueIndex
] = cpuSplineBuffer
[cpuValueIndex
];
603 default: GMX_THROW(InternalError("Unknown layout transform"));
609 //! Setting atom spline values/derivatives to be used in spread/gather
610 void pmeSetSplineData(const gmx_pme_t
* pme
,
612 const SplineParamsDimVector
& splineValues
,
613 PmeSplineDataType type
,
616 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
617 const index atomCount
= atc
->numAtoms();
618 const index pmeOrder
= pme
->pme_order
;
619 const index dimSize
= pmeOrder
* atomCount
;
620 GMX_RELEASE_ASSERT(dimSize
== splineValues
.ssize(), "Mismatch in spline data");
621 real
* splineBuffer
= pmeGetSplineDataInternal(pme
, type
, dimIndex
);
626 std::copy(splineValues
.begin(), splineValues
.end(), splineBuffer
);
630 std::copy(splineValues
.begin(), splineValues
.end(), splineBuffer
);
631 pme_gpu_transform_spline_atom_data(pme
->gpu
, atc
, type
, dimIndex
, PmeLayoutTransform::HostToGpu
);
634 default: GMX_THROW(InternalError("Test not implemented for this mode"));
638 //! Setting gridline indices to be used in spread/gather
639 void pmeSetGridLineIndices(gmx_pme_t
* pme
, CodePath mode
, const GridLineIndicesVector
& gridLineIndices
)
641 PmeAtomComm
* atc
= &(pme
->atc
[0]);
642 const index atomCount
= atc
->numAtoms();
643 GMX_RELEASE_ASSERT(atomCount
== gridLineIndices
.ssize(), "Mismatch in gridline indices size");
645 IVec paddedGridSizeUnused
, gridSize(0, 0, 0);
646 pmeGetRealGridSizesInternal(pme
, mode
, gridSize
, paddedGridSizeUnused
);
648 for (const auto& index
: gridLineIndices
)
650 for (int i
= 0; i
< DIM
; i
++)
652 GMX_RELEASE_ASSERT((0 <= index
[i
]) && (index
[i
] < gridSize
[i
]),
653 "Invalid gridline index");
660 memcpy(pme_gpu_staging(pme
->gpu
).h_gridlineIndices
, gridLineIndices
.data(),
661 atomCount
* sizeof(gridLineIndices
[0]));
665 atc
->idx
.resize(gridLineIndices
.size());
666 std::copy(gridLineIndices
.begin(), gridLineIndices
.end(), atc
->idx
.begin());
668 default: GMX_THROW(InternalError("Test not implemented for this mode"));
672 //! Getting plain index into the complex 3d grid
673 inline size_t pmeGetGridPlainIndexInternal(const IVec
& index
, const IVec
& paddedGridSize
, GridOrdering gridOrdering
)
676 switch (gridOrdering
)
678 case GridOrdering::YZX
:
679 result
= (index
[YY
] * paddedGridSize
[ZZ
] + index
[ZZ
]) * paddedGridSize
[XX
] + index
[XX
];
682 case GridOrdering::XYZ
:
683 result
= (index
[XX
] * paddedGridSize
[YY
] + index
[YY
]) * paddedGridSize
[ZZ
] + index
[ZZ
];
686 default: GMX_THROW(InternalError("Test not implemented for this mode"));
691 //! Setting real or complex grid
692 template<typename ValueType
>
693 static void pmeSetGridInternal(const gmx_pme_t
* pme
,
695 GridOrdering gridOrdering
,
696 const SparseGridValuesInput
<ValueType
>& gridValues
)
698 IVec
gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
700 pmeGetGridAndSizesInternal
<ValueType
>(pme
, mode
, grid
, gridSize
, paddedGridSize
);
704 case CodePath::GPU
: // intentional absence of break, the grid will be copied from the host buffer in testing mode
707 paddedGridSize
[XX
] * paddedGridSize
[YY
] * paddedGridSize
[ZZ
] * sizeof(ValueType
));
708 for (const auto& gridValue
: gridValues
)
710 for (int i
= 0; i
< DIM
; i
++)
712 GMX_RELEASE_ASSERT((0 <= gridValue
.first
[i
]) && (gridValue
.first
[i
] < gridSize
[i
]),
713 "Invalid grid value index");
715 const size_t gridValueIndex
=
716 pmeGetGridPlainIndexInternal(gridValue
.first
, paddedGridSize
, gridOrdering
);
717 grid
[gridValueIndex
] = gridValue
.second
;
721 default: GMX_THROW(InternalError("Test not implemented for this mode"));
725 //! Setting real grid to be used in gather
726 void pmeSetRealGrid(const gmx_pme_t
* pme
, CodePath mode
, const SparseRealGridValuesInput
& gridValues
)
728 pmeSetGridInternal
<real
>(pme
, mode
, GridOrdering::XYZ
, gridValues
);
731 //! Setting complex grid to be used in solve
732 void pmeSetComplexGrid(const gmx_pme_t
* pme
,
734 GridOrdering gridOrdering
,
735 const SparseComplexGridValuesInput
& gridValues
)
737 pmeSetGridInternal
<t_complex
>(pme
, mode
, gridOrdering
, gridValues
);
740 //! Getting the single dimension's spline values or derivatives
741 SplineParamsDimVector
pmeGetSplineData(const gmx_pme_t
* pme
, CodePath mode
, PmeSplineDataType type
, int dimIndex
)
743 GMX_RELEASE_ASSERT(pme
!= nullptr, "PME data is not initialized");
744 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
745 const size_t atomCount
= atc
->numAtoms();
746 const size_t pmeOrder
= pme
->pme_order
;
747 const size_t dimSize
= pmeOrder
* atomCount
;
749 real
* sourceBuffer
= pmeGetSplineDataInternal(pme
, type
, dimIndex
);
750 SplineParamsDimVector result
;
754 pme_gpu_transform_spline_atom_data(pme
->gpu
, atc
, type
, dimIndex
, PmeLayoutTransform::GpuToHost
);
755 result
= arrayRefFromArray(sourceBuffer
, dimSize
);
758 case CodePath::CPU
: result
= arrayRefFromArray(sourceBuffer
, dimSize
); break;
760 default: GMX_THROW(InternalError("Test not implemented for this mode"));
765 //! Getting the gridline indices
766 GridLineIndicesVector
pmeGetGridlineIndices(const gmx_pme_t
* pme
, CodePath mode
)
768 GMX_RELEASE_ASSERT(pme
!= nullptr, "PME data is not initialized");
769 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
770 const size_t atomCount
= atc
->numAtoms();
772 GridLineIndicesVector gridLineIndices
;
776 gridLineIndices
= arrayRefFromArray(
777 reinterpret_cast<IVec
*>(pme_gpu_staging(pme
->gpu
).h_gridlineIndices
), atomCount
);
780 case CodePath::CPU
: gridLineIndices
= atc
->idx
; break;
782 default: GMX_THROW(InternalError("Test not implemented for this mode"));
784 return gridLineIndices
;
787 //! Getting real or complex grid - only non zero values
788 template<typename ValueType
>
789 static SparseGridValuesOutput
<ValueType
> pmeGetGridInternal(const gmx_pme_t
* pme
,
791 GridOrdering gridOrdering
)
793 IVec
gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
795 pmeGetGridAndSizesInternal
<ValueType
>(pme
, mode
, grid
, gridSize
, paddedGridSize
);
796 SparseGridValuesOutput
<ValueType
> gridValues
;
799 case CodePath::GPU
: // intentional absence of break
802 for (int ix
= 0; ix
< gridSize
[XX
]; ix
++)
804 for (int iy
= 0; iy
< gridSize
[YY
]; iy
++)
806 for (int iz
= 0; iz
< gridSize
[ZZ
]; iz
++)
808 IVec
temp(ix
, iy
, iz
);
809 const size_t gridValueIndex
=
810 pmeGetGridPlainIndexInternal(temp
, paddedGridSize
, gridOrdering
);
811 const ValueType value
= grid
[gridValueIndex
];
812 if (value
!= ValueType
{})
814 auto key
= formatString("Cell %d %d %d", ix
, iy
, iz
);
815 gridValues
[key
] = value
;
822 default: GMX_THROW(InternalError("Test not implemented for this mode"));
827 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
828 SparseRealGridValuesOutput
pmeGetRealGrid(const gmx_pme_t
* pme
, CodePath mode
)
830 return pmeGetGridInternal
<real
>(pme
, mode
, GridOrdering::XYZ
);
833 //! Getting the complex grid output of pmePerformSolve()
834 SparseComplexGridValuesOutput
pmeGetComplexGrid(const gmx_pme_t
* pme
, CodePath mode
, GridOrdering gridOrdering
)
836 return pmeGetGridInternal
<t_complex
>(pme
, mode
, gridOrdering
);
839 //! Getting the reciprocal energy and virial
840 PmeOutput
pmeGetReciprocalEnergyAndVirial(const gmx_pme_t
* pme
, CodePath mode
, PmeSolveAlgorithm method
)
848 case PmeSolveAlgorithm::Coulomb
:
849 get_pme_ener_vir_q(pme
->solve_work
, pme
->nthread
, &output
);
852 case PmeSolveAlgorithm::LennardJones
:
853 get_pme_ener_vir_lj(pme
->solve_work
, pme
->nthread
, &output
);
856 default: GMX_THROW(InternalError("Test not implemented for this mode"));
862 case PmeSolveAlgorithm::Coulomb
: pme_gpu_getEnergyAndVirial(*pme
, &output
); break;
864 default: GMX_THROW(InternalError("Test not implemented for this mode"));
868 default: GMX_THROW(InternalError("Test not implemented for this mode"));