Simplify PME GPU constants
[gromacs.git] / src / gromacs / ewald / tests / pmetestcommon.cpp
blobeaf697e1d5fd8d82ca7faaa39e8a32db4ab5ed67
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief
37 * Implements common routines for PME tests.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
42 #include "gmxpre.h"
44 #include "pmetestcommon.h"
46 #include <cstring>
48 #include <algorithm>
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/pme_gather.h"
52 #include "gromacs/ewald/pme_gpu_calculate_splines.h"
53 #include "gromacs/ewald/pme_gpu_constants.h"
54 #include "gromacs/ewald/pme_gpu_internal.h"
55 #include "gromacs/ewald/pme_gpu_staging.h"
56 #include "gromacs/ewald/pme_grid.h"
57 #include "gromacs/ewald/pme_internal.h"
58 #include "gromacs/ewald/pme_redistribute.h"
59 #include "gromacs/ewald/pme_solve.h"
60 #include "gromacs/ewald/pme_spread.h"
61 #include "gromacs/fft/parallel_3dfft.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/math/invertmatrix.h"
64 #include "gromacs/mdtypes/commrec.h"
65 #include "gromacs/pbcutil/pbc.h"
66 #include "gromacs/topology/topology.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/gmxassert.h"
69 #include "gromacs/utility/logger.h"
70 #include "gromacs/utility/stringutil.h"
72 #include "testutils/testasserts.h"
74 #include "testhardwarecontexts.h"
76 namespace gmx
78 namespace test
81 bool pmeSupportsInputForMode(const gmx_hw_info_t& hwinfo, const t_inputrec* inputRec, CodePath mode)
83 bool implemented;
84 gmx_mtop_t mtop;
85 switch (mode)
87 case CodePath::CPU: implemented = true; break;
89 case CodePath::GPU:
90 implemented = (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo, nullptr)
91 && pme_gpu_supports_input(*inputRec, mtop, nullptr));
92 break;
94 default: GMX_THROW(InternalError("Test not implemented for this mode"));
96 return implemented;
99 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
101 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
102 * hard to know what to pick without testing lots of
103 * implementations. */
104 const uint64_t sineUlps = 10;
105 return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
108 //! PME initialization
109 PmeSafePointer pmeInitWrapper(const t_inputrec* inputRec,
110 const CodePath mode,
111 const DeviceInformation* deviceInfo,
112 const PmeGpuProgram* pmeGpuProgram,
113 const Matrix3x3& box,
114 const real ewaldCoeff_q,
115 const real ewaldCoeff_lj)
117 const MDLogger dummyLogger;
118 const auto runMode = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::Mixed;
119 t_commrec dummyCommrec = { 0 };
120 NumPmeDomains numPmeDomains = { 1, 1 };
121 gmx_pme_t* pmeDataRaw =
122 gmx_pme_init(&dummyCommrec, numPmeDomains, inputRec, false, false, true, ewaldCoeff_q,
123 ewaldCoeff_lj, 1, runMode, nullptr, deviceInfo, pmeGpuProgram, dummyLogger);
124 PmeSafePointer pme(pmeDataRaw); // taking ownership
126 // TODO get rid of this with proper matrix type
127 matrix boxTemp;
128 for (int i = 0; i < DIM; i++)
130 for (int j = 0; j < DIM; j++)
132 boxTemp[i][j] = box[i * DIM + j];
135 const char* boxError = check_box(PbcType::Unset, boxTemp);
136 GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
138 switch (mode)
140 case CodePath::CPU: invertBoxMatrix(boxTemp, pme->recipbox); break;
142 case CodePath::GPU:
143 pme_gpu_set_testing(pme->gpu, true);
144 pme_gpu_update_input_box(pme->gpu, boxTemp);
145 break;
147 default: GMX_THROW(InternalError("Test not implemented for this mode"));
150 return pme;
153 //! Simple PME initialization based on input, no atom data
154 PmeSafePointer pmeInitEmpty(const t_inputrec* inputRec,
155 const CodePath mode,
156 const DeviceInformation* deviceInfo,
157 const PmeGpuProgram* pmeGpuProgram,
158 const Matrix3x3& box,
159 const real ewaldCoeff_q,
160 const real ewaldCoeff_lj)
162 return pmeInitWrapper(inputRec, mode, deviceInfo, pmeGpuProgram, box, ewaldCoeff_q, ewaldCoeff_lj);
163 // hiding the fact that PME actually needs to know the number of atoms in advance
166 PmeSafePointer pmeInitEmpty(const t_inputrec* inputRec)
168 const Matrix3x3 defaultBox = { { 1.0F, 0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 1.0F } };
169 return pmeInitWrapper(inputRec, CodePath::CPU, nullptr, nullptr, defaultBox, 0.0F, 0.0F);
172 //! Make a GPU state-propagator manager
173 std::unique_ptr<StatePropagatorDataGpu> makeStatePropagatorDataGpu(const gmx_pme_t& pme,
174 const DeviceContext& deviceContext)
176 // TODO: Pin the host buffer and use async memory copies
177 // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
178 // restrict one from using other constructor here.
179 return std::make_unique<StatePropagatorDataGpu>(pme_gpu_get_device_stream(&pme), deviceContext,
180 GpuApiCallBehavior::Sync,
181 pme_gpu_get_block_size(&pme), nullptr);
184 //! PME initialization with atom data
185 void pmeInitAtoms(gmx_pme_t* pme,
186 StatePropagatorDataGpu* stateGpu,
187 const CodePath mode,
188 const CoordinatesVector& coordinates,
189 const ChargesVector& charges)
191 const index atomCount = coordinates.size();
192 GMX_RELEASE_ASSERT(atomCount == charges.ssize(), "Mismatch in atom data");
193 PmeAtomComm* atc = nullptr;
195 switch (mode)
197 case CodePath::CPU:
198 atc = &(pme->atc[0]);
199 atc->x = coordinates;
200 atc->coefficient = charges;
201 gmx_pme_reinit_atoms(pme, atomCount, charges.data());
202 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
203 break;
205 case CodePath::GPU:
206 // TODO: Avoid use of atc in the GPU code path
207 atc = &(pme->atc[0]);
208 // We need to set atc->n for passing the size in the tests
209 atc->setNumAtoms(atomCount);
210 gmx_pme_reinit_atoms(pme, atomCount, charges.data());
212 stateGpu->reinit(atomCount, atomCount);
213 stateGpu->copyCoordinatesToGpu(arrayRefFromArray(coordinates.data(), coordinates.size()),
214 gmx::AtomLocality::All);
215 pme_gpu_set_kernelparam_coordinates(pme->gpu, stateGpu->getCoordinates());
217 break;
219 default: GMX_THROW(InternalError("Test not implemented for this mode"));
223 //! Getting local PME real grid pointer for test I/O
224 static real* pmeGetRealGridInternal(const gmx_pme_t* pme)
226 const size_t gridIndex = 0;
227 return pme->fftgrid[gridIndex];
230 //! Getting local PME real grid dimensions
231 static void pmeGetRealGridSizesInternal(const gmx_pme_t* pme,
232 CodePath mode,
233 IVec& gridSize, //NOLINT(google-runtime-references)
234 IVec& paddedGridSize) //NOLINT(google-runtime-references)
236 const size_t gridIndex = 0;
237 IVec gridOffsetUnused;
238 switch (mode)
240 case CodePath::CPU:
241 gmx_parallel_3dfft_real_limits(pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused,
242 paddedGridSize);
243 break;
245 case CodePath::GPU:
246 pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
247 break;
249 default: GMX_THROW(InternalError("Test not implemented for this mode"));
253 //! Getting local PME complex grid pointer for test I/O
254 static t_complex* pmeGetComplexGridInternal(const gmx_pme_t* pme)
256 const size_t gridIndex = 0;
257 return pme->cfftgrid[gridIndex];
260 //! Getting local PME complex grid dimensions
261 static void pmeGetComplexGridSizesInternal(const gmx_pme_t* pme,
262 IVec& gridSize, //NOLINT(google-runtime-references)
263 IVec& paddedGridSize) //NOLINT(google-runtime-references)
265 const size_t gridIndex = 0;
266 IVec gridOffsetUnused, complexOrderUnused;
267 gmx_parallel_3dfft_complex_limits(pme->pfft_setup[gridIndex], complexOrderUnused, gridSize,
268 gridOffsetUnused, paddedGridSize); // TODO: what about YZX ordering?
271 //! Getting the PME grid memory buffer and its sizes - template definition
272 template<typename ValueType>
273 static void pmeGetGridAndSizesInternal(const gmx_pme_t* /*unused*/,
274 CodePath /*unused*/,
275 ValueType*& /*unused*/, //NOLINT(google-runtime-references)
276 IVec& /*unused*/, //NOLINT(google-runtime-references)
277 IVec& /*unused*/) //NOLINT(google-runtime-references)
279 GMX_THROW(InternalError("Deleted function call"));
280 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
283 //! Getting the PME real grid memory buffer and its sizes
284 template<>
285 void pmeGetGridAndSizesInternal<real>(const gmx_pme_t* pme, CodePath mode, real*& grid, IVec& gridSize, IVec& paddedGridSize)
287 grid = pmeGetRealGridInternal(pme);
288 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
291 //! Getting the PME complex grid memory buffer and its sizes
292 template<>
293 void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t* pme,
294 CodePath /*unused*/,
295 t_complex*& grid,
296 IVec& gridSize,
297 IVec& paddedGridSize)
299 grid = pmeGetComplexGridInternal(pme);
300 pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
303 //! PME spline calculation and charge spreading
304 void pmePerformSplineAndSpread(gmx_pme_t* pme,
305 CodePath mode, // TODO const qualifiers elsewhere
306 bool computeSplines,
307 bool spreadCharges)
309 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
310 PmeAtomComm* atc = &(pme->atc[0]);
311 const size_t gridIndex = 0;
312 const bool computeSplinesForZeroCharges = true;
313 real* fftgrid = spreadCharges ? pme->fftgrid[gridIndex] : nullptr;
314 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
316 switch (mode)
318 case CodePath::CPU:
319 spread_on_grid(pme, atc, &pme->pmegrid[gridIndex], computeSplines, spreadCharges,
320 fftgrid, computeSplinesForZeroCharges, gridIndex);
321 if (spreadCharges && !pme->bUseThreads)
323 wrap_periodic_pmegrid(pme, pmegrid);
324 copy_pmegrid_to_fftgrid(pme, pmegrid, fftgrid, gridIndex);
326 break;
328 case CodePath::GPU:
330 // no synchronization needed as x is transferred in the PME stream
331 GpuEventSynchronizer* xReadyOnDevice = nullptr;
332 pme_gpu_spread(pme->gpu, xReadyOnDevice, gridIndex, fftgrid, computeSplines, spreadCharges);
334 break;
336 default: GMX_THROW(InternalError("Test not implemented for this mode"));
340 //! Getting the internal spline data buffer pointer
341 static real* pmeGetSplineDataInternal(const gmx_pme_t* pme, PmeSplineDataType type, int dimIndex)
343 GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
344 const PmeAtomComm* atc = &(pme->atc[0]);
345 const size_t threadIndex = 0;
346 real* splineBuffer = nullptr;
347 switch (type)
349 case PmeSplineDataType::Values:
350 splineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
351 break;
353 case PmeSplineDataType::Derivatives:
354 splineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
355 break;
357 default: GMX_THROW(InternalError("Unknown spline data type"));
359 return splineBuffer;
362 //! PME solving
363 void pmePerformSolve(const gmx_pme_t* pme,
364 CodePath mode,
365 PmeSolveAlgorithm method,
366 real cellVolume,
367 GridOrdering gridOrdering,
368 bool computeEnergyAndVirial)
370 t_complex* h_grid = pmeGetComplexGridInternal(pme);
371 const bool useLorentzBerthelot = false;
372 const size_t threadIndex = 0;
373 switch (mode)
375 case CodePath::CPU:
376 if (gridOrdering != GridOrdering::YZX)
378 GMX_THROW(InternalError("Test not implemented for this mode"));
380 switch (method)
382 case PmeSolveAlgorithm::Coulomb:
383 solve_pme_yzx(pme, h_grid, cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
384 break;
386 case PmeSolveAlgorithm::LennardJones:
387 solve_pme_lj_yzx(pme, &h_grid, useLorentzBerthelot, cellVolume,
388 computeEnergyAndVirial, pme->nthread, threadIndex);
389 break;
391 default: GMX_THROW(InternalError("Test not implemented for this mode"));
393 break;
395 case CodePath::GPU:
396 switch (method)
398 case PmeSolveAlgorithm::Coulomb:
399 pme_gpu_solve(pme->gpu, h_grid, gridOrdering, computeEnergyAndVirial);
400 break;
402 default: GMX_THROW(InternalError("Test not implemented for this mode"));
404 break;
406 default: GMX_THROW(InternalError("Test not implemented for this mode"));
410 //! PME force gathering
411 void pmePerformGather(gmx_pme_t* pme, CodePath mode, ForcesVector& forces)
413 PmeAtomComm* atc = &(pme->atc[0]);
414 const index atomCount = atc->numAtoms();
415 GMX_RELEASE_ASSERT(forces.ssize() == atomCount, "Invalid force buffer size");
416 const real scale = 1.0;
417 const size_t threadIndex = 0;
418 const size_t gridIndex = 0;
419 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
420 real* fftgrid = pme->fftgrid[gridIndex];
422 switch (mode)
424 case CodePath::CPU:
425 atc->f = forces;
426 if (atc->nthread == 1)
428 // something which is normally done in serial spline computation (make_thread_local_ind())
429 atc->spline[threadIndex].n = atomCount;
431 copy_fftgrid_to_pmegrid(pme, fftgrid, pmegrid, gridIndex, pme->nthread, threadIndex);
432 unwrap_periodic_pmegrid(pme, pmegrid);
433 gather_f_bsplines(pme, pmegrid, true, atc, &atc->spline[threadIndex], scale);
434 break;
436 case CodePath::GPU:
438 // Variable initialization needs a non-switch scope
439 const bool computeEnergyAndVirial = false;
440 PmeOutput output = pme_gpu_getOutput(*pme, computeEnergyAndVirial);
441 GMX_ASSERT(forces.size() == output.forces_.size(),
442 "Size of force buffers did not match");
443 pme_gpu_gather(pme->gpu, reinterpret_cast<float*>(fftgrid));
444 std::copy(std::begin(output.forces_), std::end(output.forces_), std::begin(forces));
446 break;
448 default: GMX_THROW(InternalError("Test not implemented for this mode"));
452 //! PME test finalization before fetching the outputs
453 void pmeFinalizeTest(const gmx_pme_t* pme, CodePath mode)
455 switch (mode)
457 case CodePath::CPU: break;
459 case CodePath::GPU: pme_gpu_synchronize(pme->gpu); break;
461 default: GMX_THROW(InternalError("Test not implemented for this mode"));
465 //! A binary enum for spline data layout transformation
466 enum class PmeLayoutTransform
468 GpuToHost,
469 HostToGpu
472 /*! \brief Gets a unique index to an element in a spline parameter buffer.
474 * These theta/dtheta buffers are laid out for GPU spread/gather
475 * kernels. The index is wrt the execution block, in range(0,
476 * atomsPerBlock * order * DIM).
478 * This is a wrapper, only used in unit tests.
479 * \param[in] order PME order
480 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
481 * \param[in] dimIndex Dimension index (from 0 to 2)
482 * \param[in] atomIndex Atom index wrt the block.
483 * \param[in] atomsPerWarp Number of atoms processed by a warp.
485 * \returns Index into theta or dtheta array using GPU layout.
487 static int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
489 if (order != c_pmeGpuOrder)
491 throw order;
493 constexpr int fixedOrder = c_pmeGpuOrder;
494 GMX_UNUSED_VALUE(fixedOrder);
496 const int atomWarpIndex = atomIndex % atomsPerWarp;
497 const int warpIndex = atomIndex / atomsPerWarp;
498 int indexBase, result;
499 switch (atomsPerWarp)
501 case 1:
502 indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
503 result = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
504 break;
506 case 2:
507 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
508 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
509 break;
511 case 4:
512 indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
513 result = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
514 break;
516 case 8:
517 indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
518 result = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
519 break;
521 default:
522 GMX_THROW(NotImplementedError(
523 formatString("Test function call not unrolled for atomsPerWarp = %d in "
524 "getSplineParamFullIndex",
525 atomsPerWarp)));
527 return result;
530 /*!\brief Return the number of atoms per warp */
531 static int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
533 const int order = pmeGpu->common->pme_order;
534 const int threadsPerAtom =
535 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
536 return pmeGpu->programHandle_->warpSize() / threadsPerAtom;
539 /*! \brief Rearranges the atom spline data between the GPU and host layouts.
540 * Only used for test purposes so far, likely to be horribly slow.
542 * \param[in] pmeGpu The PME GPU structure.
543 * \param[out] atc The PME CPU atom data structure (with a single-threaded layout).
544 * \param[in] type The spline data type (values or derivatives).
545 * \param[in] dimIndex Dimension index.
546 * \param[in] transform Layout transform type
548 static void pme_gpu_transform_spline_atom_data(const PmeGpu* pmeGpu,
549 const PmeAtomComm* atc,
550 PmeSplineDataType type,
551 int dimIndex,
552 PmeLayoutTransform transform)
554 // The GPU atom spline data is laid out in a different way currently than the CPU one.
555 // This function converts the data from GPU to CPU layout (in the host memory).
556 // It is only intended for testing purposes so far.
557 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
558 // performance (e.g. spreading on GPU, gathering on CPU).
559 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
560 const uintmax_t threadIndex = 0;
561 const auto atomCount = atc->numAtoms();
562 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
563 const auto pmeOrder = pmeGpu->common->pme_order;
564 GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
566 real* cpuSplineBuffer;
567 float* h_splineBuffer;
568 switch (type)
570 case PmeSplineDataType::Values:
571 cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
572 h_splineBuffer = pmeGpu->staging.h_theta;
573 break;
575 case PmeSplineDataType::Derivatives:
576 cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
577 h_splineBuffer = pmeGpu->staging.h_dtheta;
578 break;
580 default: GMX_THROW(InternalError("Unknown spline data type"));
583 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
585 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
587 const auto gpuValueIndex =
588 getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
589 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
590 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
591 "Atom spline data index out of bounds (while transforming GPU data layout "
592 "for host)");
593 switch (transform)
595 case PmeLayoutTransform::GpuToHost:
596 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
597 break;
599 case PmeLayoutTransform::HostToGpu:
600 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
601 break;
603 default: GMX_THROW(InternalError("Unknown layout transform"));
609 //! Setting atom spline values/derivatives to be used in spread/gather
610 void pmeSetSplineData(const gmx_pme_t* pme,
611 CodePath mode,
612 const SplineParamsDimVector& splineValues,
613 PmeSplineDataType type,
614 int dimIndex)
616 const PmeAtomComm* atc = &(pme->atc[0]);
617 const index atomCount = atc->numAtoms();
618 const index pmeOrder = pme->pme_order;
619 const index dimSize = pmeOrder * atomCount;
620 GMX_RELEASE_ASSERT(dimSize == splineValues.ssize(), "Mismatch in spline data");
621 real* splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
623 switch (mode)
625 case CodePath::CPU:
626 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
627 break;
629 case CodePath::GPU:
630 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
631 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
632 break;
634 default: GMX_THROW(InternalError("Test not implemented for this mode"));
638 //! Setting gridline indices to be used in spread/gather
639 void pmeSetGridLineIndices(gmx_pme_t* pme, CodePath mode, const GridLineIndicesVector& gridLineIndices)
641 PmeAtomComm* atc = &(pme->atc[0]);
642 const index atomCount = atc->numAtoms();
643 GMX_RELEASE_ASSERT(atomCount == gridLineIndices.ssize(), "Mismatch in gridline indices size");
645 IVec paddedGridSizeUnused, gridSize(0, 0, 0);
646 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
648 for (const auto& index : gridLineIndices)
650 for (int i = 0; i < DIM; i++)
652 GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]),
653 "Invalid gridline index");
657 switch (mode)
659 case CodePath::GPU:
660 memcpy(pme_gpu_staging(pme->gpu).h_gridlineIndices, gridLineIndices.data(),
661 atomCount * sizeof(gridLineIndices[0]));
662 break;
664 case CodePath::CPU:
665 atc->idx.resize(gridLineIndices.size());
666 std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx.begin());
667 break;
668 default: GMX_THROW(InternalError("Test not implemented for this mode"));
672 //! Getting plain index into the complex 3d grid
673 inline size_t pmeGetGridPlainIndexInternal(const IVec& index, const IVec& paddedGridSize, GridOrdering gridOrdering)
675 size_t result;
676 switch (gridOrdering)
678 case GridOrdering::YZX:
679 result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
680 break;
682 case GridOrdering::XYZ:
683 result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
684 break;
686 default: GMX_THROW(InternalError("Test not implemented for this mode"));
688 return result;
691 //! Setting real or complex grid
692 template<typename ValueType>
693 static void pmeSetGridInternal(const gmx_pme_t* pme,
694 CodePath mode,
695 GridOrdering gridOrdering,
696 const SparseGridValuesInput<ValueType>& gridValues)
698 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
699 ValueType* grid;
700 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
702 switch (mode)
704 case CodePath::GPU: // intentional absence of break, the grid will be copied from the host buffer in testing mode
705 case CodePath::CPU:
706 std::memset(grid, 0,
707 paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
708 for (const auto& gridValue : gridValues)
710 for (int i = 0; i < DIM; i++)
712 GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]),
713 "Invalid grid value index");
715 const size_t gridValueIndex =
716 pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
717 grid[gridValueIndex] = gridValue.second;
719 break;
721 default: GMX_THROW(InternalError("Test not implemented for this mode"));
725 //! Setting real grid to be used in gather
726 void pmeSetRealGrid(const gmx_pme_t* pme, CodePath mode, const SparseRealGridValuesInput& gridValues)
728 pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
731 //! Setting complex grid to be used in solve
732 void pmeSetComplexGrid(const gmx_pme_t* pme,
733 CodePath mode,
734 GridOrdering gridOrdering,
735 const SparseComplexGridValuesInput& gridValues)
737 pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
740 //! Getting the single dimension's spline values or derivatives
741 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t* pme, CodePath mode, PmeSplineDataType type, int dimIndex)
743 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
744 const PmeAtomComm* atc = &(pme->atc[0]);
745 const size_t atomCount = atc->numAtoms();
746 const size_t pmeOrder = pme->pme_order;
747 const size_t dimSize = pmeOrder * atomCount;
749 real* sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
750 SplineParamsDimVector result;
751 switch (mode)
753 case CodePath::GPU:
754 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
755 result = arrayRefFromArray(sourceBuffer, dimSize);
756 break;
758 case CodePath::CPU: result = arrayRefFromArray(sourceBuffer, dimSize); break;
760 default: GMX_THROW(InternalError("Test not implemented for this mode"));
762 return result;
765 //! Getting the gridline indices
766 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t* pme, CodePath mode)
768 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
769 const PmeAtomComm* atc = &(pme->atc[0]);
770 const size_t atomCount = atc->numAtoms();
772 GridLineIndicesVector gridLineIndices;
773 switch (mode)
775 case CodePath::GPU:
776 gridLineIndices = arrayRefFromArray(
777 reinterpret_cast<IVec*>(pme_gpu_staging(pme->gpu).h_gridlineIndices), atomCount);
778 break;
780 case CodePath::CPU: gridLineIndices = atc->idx; break;
782 default: GMX_THROW(InternalError("Test not implemented for this mode"));
784 return gridLineIndices;
787 //! Getting real or complex grid - only non zero values
788 template<typename ValueType>
789 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t* pme,
790 CodePath mode,
791 GridOrdering gridOrdering)
793 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
794 ValueType* grid;
795 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
796 SparseGridValuesOutput<ValueType> gridValues;
797 switch (mode)
799 case CodePath::GPU: // intentional absence of break
800 case CodePath::CPU:
801 gridValues.clear();
802 for (int ix = 0; ix < gridSize[XX]; ix++)
804 for (int iy = 0; iy < gridSize[YY]; iy++)
806 for (int iz = 0; iz < gridSize[ZZ]; iz++)
808 IVec temp(ix, iy, iz);
809 const size_t gridValueIndex =
810 pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
811 const ValueType value = grid[gridValueIndex];
812 if (value != ValueType{})
814 auto key = formatString("Cell %d %d %d", ix, iy, iz);
815 gridValues[key] = value;
820 break;
822 default: GMX_THROW(InternalError("Test not implemented for this mode"));
824 return gridValues;
827 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
828 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t* pme, CodePath mode)
830 return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
833 //! Getting the complex grid output of pmePerformSolve()
834 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t* pme, CodePath mode, GridOrdering gridOrdering)
836 return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
839 //! Getting the reciprocal energy and virial
840 PmeOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t* pme, CodePath mode, PmeSolveAlgorithm method)
842 PmeOutput output;
843 switch (mode)
845 case CodePath::CPU:
846 switch (method)
848 case PmeSolveAlgorithm::Coulomb:
849 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output);
850 break;
852 case PmeSolveAlgorithm::LennardJones:
853 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output);
854 break;
856 default: GMX_THROW(InternalError("Test not implemented for this mode"));
858 break;
859 case CodePath::GPU:
860 switch (method)
862 case PmeSolveAlgorithm::Coulomb: pme_gpu_getEnergyAndVirial(*pme, &output); break;
864 default: GMX_THROW(InternalError("Test not implemented for this mode"));
866 break;
868 default: GMX_THROW(InternalError("Test not implemented for this mode"));
870 return output;
873 } // namespace test
874 } // namespace gmx