Update instructions in containers.rst
[gromacs.git] / src / gromacs / ewald / tests / pmetestcommon.cpp
blobeb1e170411bbf090582852ed283fe65824919143
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief
37 * Implements common routines for PME tests.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
42 #include "gmxpre.h"
44 #include "pmetestcommon.h"
46 #include <cstring>
48 #include <algorithm>
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/pme_gather.h"
52 #include "gromacs/ewald/pme_gpu_calculate_splines.h"
53 #include "gromacs/ewald/pme_gpu_constants.h"
54 #include "gromacs/ewald/pme_gpu_internal.h"
55 #include "gromacs/ewald/pme_gpu_staging.h"
56 #include "gromacs/ewald/pme_grid.h"
57 #include "gromacs/ewald/pme_internal.h"
58 #include "gromacs/ewald/pme_redistribute.h"
59 #include "gromacs/ewald/pme_solve.h"
60 #include "gromacs/ewald/pme_spread.h"
61 #include "gromacs/fft/parallel_3dfft.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/hardware/device_management.h"
64 #include "gromacs/math/invertmatrix.h"
65 #include "gromacs/mdtypes/commrec.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/topology/topology.h"
68 #include "gromacs/utility/exceptions.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/stringutil.h"
73 #include "testutils/test_hardware_environment.h"
74 #include "testutils/testasserts.h"
76 class DeviceContext;
78 namespace gmx
80 namespace test
83 bool pmeSupportsInputForMode(const gmx_hw_info_t& hwinfo, const t_inputrec* inputRec, CodePath mode)
85 bool implemented;
86 switch (mode)
88 case CodePath::CPU: implemented = true; break;
90 case CodePath::GPU:
91 implemented = (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo, nullptr)
92 && pme_gpu_supports_input(*inputRec, nullptr));
93 break;
95 default: GMX_THROW(InternalError("Test not implemented for this mode"));
97 return implemented;
100 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
102 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
103 * hard to know what to pick without testing lots of
104 * implementations. */
105 const uint64_t sineUlps = 10;
106 return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
109 //! PME initialization
110 PmeSafePointer pmeInitWrapper(const t_inputrec* inputRec,
111 const CodePath mode,
112 const DeviceContext* deviceContext,
113 const DeviceStream* deviceStream,
114 const PmeGpuProgram* pmeGpuProgram,
115 const Matrix3x3& box,
116 const real ewaldCoeff_q,
117 const real ewaldCoeff_lj)
119 const MDLogger dummyLogger;
120 const auto runMode = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::Mixed;
121 t_commrec dummyCommrec = { 0 };
122 NumPmeDomains numPmeDomains = { 1, 1 };
123 gmx_pme_t* pmeDataRaw = gmx_pme_init(&dummyCommrec, numPmeDomains, inputRec, false, false, true,
124 ewaldCoeff_q, ewaldCoeff_lj, 1, runMode, nullptr,
125 deviceContext, deviceStream, pmeGpuProgram, dummyLogger);
126 PmeSafePointer pme(pmeDataRaw); // taking ownership
128 // TODO get rid of this with proper matrix type
129 matrix boxTemp;
130 for (int i = 0; i < DIM; i++)
132 for (int j = 0; j < DIM; j++)
134 boxTemp[i][j] = box[i * DIM + j];
137 const char* boxError = check_box(PbcType::Unset, boxTemp);
138 GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
140 switch (mode)
142 case CodePath::CPU: invertBoxMatrix(boxTemp, pme->recipbox); break;
144 case CodePath::GPU:
145 pme_gpu_set_testing(pme->gpu, true);
146 pme_gpu_update_input_box(pme->gpu, boxTemp);
147 break;
149 default: GMX_THROW(InternalError("Test not implemented for this mode"));
152 return pme;
155 PmeSafePointer pmeInitEmpty(const t_inputrec* inputRec)
157 const Matrix3x3 defaultBox = { { 1.0F, 0.0F, 0.0F, 0.0F, 1.0F, 0.0F, 0.0F, 0.0F, 1.0F } };
158 return pmeInitWrapper(inputRec, CodePath::CPU, nullptr, nullptr, nullptr, defaultBox, 0.0F, 0.0F);
161 //! Make a GPU state-propagator manager
162 std::unique_ptr<StatePropagatorDataGpu> makeStatePropagatorDataGpu(const gmx_pme_t& pme,
163 const DeviceContext* deviceContext,
164 const DeviceStream* deviceStream)
166 // TODO: Pin the host buffer and use async memory copies
167 // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
168 // restrict one from using other constructor here.
169 return std::make_unique<StatePropagatorDataGpu>(deviceStream, *deviceContext, GpuApiCallBehavior::Sync,
170 pme_gpu_get_block_size(&pme), nullptr);
173 //! PME initialization with atom data
174 void pmeInitAtoms(gmx_pme_t* pme,
175 StatePropagatorDataGpu* stateGpu,
176 const CodePath mode,
177 const CoordinatesVector& coordinates,
178 const ChargesVector& charges)
180 const index atomCount = coordinates.size();
181 GMX_RELEASE_ASSERT(atomCount == charges.ssize(), "Mismatch in atom data");
182 PmeAtomComm* atc = nullptr;
184 switch (mode)
186 case CodePath::CPU:
187 atc = &(pme->atc[0]);
188 atc->x = coordinates;
189 atc->coefficient = charges;
190 gmx_pme_reinit_atoms(pme, atomCount, charges.data(), nullptr);
191 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
192 break;
194 case CodePath::GPU:
195 // TODO: Avoid use of atc in the GPU code path
196 atc = &(pme->atc[0]);
197 // We need to set atc->n for passing the size in the tests
198 atc->setNumAtoms(atomCount);
199 gmx_pme_reinit_atoms(pme, atomCount, charges.data(), nullptr);
201 stateGpu->reinit(atomCount, atomCount);
202 stateGpu->copyCoordinatesToGpu(arrayRefFromArray(coordinates.data(), coordinates.size()),
203 gmx::AtomLocality::All);
204 pme_gpu_set_kernelparam_coordinates(pme->gpu, stateGpu->getCoordinates());
206 break;
208 default: GMX_THROW(InternalError("Test not implemented for this mode"));
212 //! Getting local PME real grid pointer for test I/O
213 static real* pmeGetRealGridInternal(const gmx_pme_t* pme)
215 const size_t gridIndex = 0;
216 return pme->fftgrid[gridIndex];
219 //! Getting local PME real grid dimensions
220 static void pmeGetRealGridSizesInternal(const gmx_pme_t* pme,
221 CodePath mode,
222 IVec& gridSize, //NOLINT(google-runtime-references)
223 IVec& paddedGridSize) //NOLINT(google-runtime-references)
225 const size_t gridIndex = 0;
226 IVec gridOffsetUnused;
227 switch (mode)
229 case CodePath::CPU:
230 gmx_parallel_3dfft_real_limits(pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused,
231 paddedGridSize);
232 break;
234 case CodePath::GPU:
235 pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
236 break;
238 default: GMX_THROW(InternalError("Test not implemented for this mode"));
242 //! Getting local PME complex grid pointer for test I/O
243 static t_complex* pmeGetComplexGridInternal(const gmx_pme_t* pme)
245 const size_t gridIndex = 0;
246 return pme->cfftgrid[gridIndex];
249 //! Getting local PME complex grid dimensions
250 static void pmeGetComplexGridSizesInternal(const gmx_pme_t* pme,
251 IVec& gridSize, //NOLINT(google-runtime-references)
252 IVec& paddedGridSize) //NOLINT(google-runtime-references)
254 const size_t gridIndex = 0;
255 IVec gridOffsetUnused, complexOrderUnused;
256 gmx_parallel_3dfft_complex_limits(pme->pfft_setup[gridIndex], complexOrderUnused, gridSize,
257 gridOffsetUnused, paddedGridSize); // TODO: what about YZX ordering?
260 //! Getting the PME grid memory buffer and its sizes - template definition
261 template<typename ValueType>
262 static void pmeGetGridAndSizesInternal(const gmx_pme_t* /*unused*/,
263 CodePath /*unused*/,
264 ValueType*& /*unused*/, //NOLINT(google-runtime-references)
265 IVec& /*unused*/, //NOLINT(google-runtime-references)
266 IVec& /*unused*/) //NOLINT(google-runtime-references)
268 GMX_THROW(InternalError("Deleted function call"));
269 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
272 //! Getting the PME real grid memory buffer and its sizes
273 template<>
274 void pmeGetGridAndSizesInternal<real>(const gmx_pme_t* pme, CodePath mode, real*& grid, IVec& gridSize, IVec& paddedGridSize)
276 grid = pmeGetRealGridInternal(pme);
277 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
280 //! Getting the PME complex grid memory buffer and its sizes
281 template<>
282 void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t* pme,
283 CodePath /*unused*/,
284 t_complex*& grid,
285 IVec& gridSize,
286 IVec& paddedGridSize)
288 grid = pmeGetComplexGridInternal(pme);
289 pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
292 //! PME spline calculation and charge spreading
293 void pmePerformSplineAndSpread(gmx_pme_t* pme,
294 CodePath mode, // TODO const qualifiers elsewhere
295 bool computeSplines,
296 bool spreadCharges)
298 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
299 PmeAtomComm* atc = &(pme->atc[0]);
300 const size_t gridIndex = 0;
301 const bool computeSplinesForZeroCharges = true;
302 real** fftgrid = spreadCharges ? pme->fftgrid : nullptr;
303 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
305 switch (mode)
307 case CodePath::CPU:
308 spread_on_grid(pme, atc, &pme->pmegrid[gridIndex], computeSplines, spreadCharges,
309 fftgrid != nullptr ? fftgrid[gridIndex] : nullptr,
310 computeSplinesForZeroCharges, gridIndex);
311 if (spreadCharges && !pme->bUseThreads)
313 wrap_periodic_pmegrid(pme, pmegrid);
314 copy_pmegrid_to_fftgrid(
315 pme, pmegrid, fftgrid != nullptr ? fftgrid[gridIndex] : nullptr, gridIndex);
317 break;
319 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
320 * double precision. GPUs are not used with double precision anyhow. */
321 #if !GMX_DOUBLE
322 case CodePath::GPU:
324 const real lambdaQ = 1.0;
325 // no synchronization needed as x is transferred in the PME stream
326 GpuEventSynchronizer* xReadyOnDevice = nullptr;
327 pme_gpu_spread(pme->gpu, xReadyOnDevice, fftgrid, computeSplines, spreadCharges, lambdaQ);
329 break;
330 #endif
332 default: GMX_THROW(InternalError("Test not implemented for this mode"));
336 //! Getting the internal spline data buffer pointer
337 static real* pmeGetSplineDataInternal(const gmx_pme_t* pme, PmeSplineDataType type, int dimIndex)
339 GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
340 const PmeAtomComm* atc = &(pme->atc[0]);
341 const size_t threadIndex = 0;
342 real* splineBuffer = nullptr;
343 switch (type)
345 case PmeSplineDataType::Values:
346 splineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
347 break;
349 case PmeSplineDataType::Derivatives:
350 splineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
351 break;
353 default: GMX_THROW(InternalError("Unknown spline data type"));
355 return splineBuffer;
358 //! PME solving
359 void pmePerformSolve(const gmx_pme_t* pme,
360 CodePath mode,
361 PmeSolveAlgorithm method,
362 real cellVolume,
363 GridOrdering gridOrdering,
364 bool computeEnergyAndVirial)
366 t_complex* h_grid = pmeGetComplexGridInternal(pme);
367 const bool useLorentzBerthelot = false;
368 const size_t threadIndex = 0;
369 const size_t gridIndex = 0;
370 switch (mode)
372 case CodePath::CPU:
373 if (gridOrdering != GridOrdering::YZX)
375 GMX_THROW(InternalError("Test not implemented for this mode"));
377 switch (method)
379 case PmeSolveAlgorithm::Coulomb:
380 solve_pme_yzx(pme, h_grid, cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
381 break;
383 case PmeSolveAlgorithm::LennardJones:
384 solve_pme_lj_yzx(pme, &h_grid, useLorentzBerthelot, cellVolume,
385 computeEnergyAndVirial, pme->nthread, threadIndex);
386 break;
388 default: GMX_THROW(InternalError("Test not implemented for this mode"));
390 break;
392 case CodePath::GPU:
393 switch (method)
395 case PmeSolveAlgorithm::Coulomb:
396 pme_gpu_solve(pme->gpu, gridIndex, h_grid, gridOrdering, computeEnergyAndVirial);
397 break;
399 default: GMX_THROW(InternalError("Test not implemented for this mode"));
401 break;
403 default: GMX_THROW(InternalError("Test not implemented for this mode"));
407 //! PME force gathering
408 void pmePerformGather(gmx_pme_t* pme, CodePath mode, ForcesVector& forces)
410 PmeAtomComm* atc = &(pme->atc[0]);
411 const index atomCount = atc->numAtoms();
412 GMX_RELEASE_ASSERT(forces.ssize() == atomCount, "Invalid force buffer size");
413 const real scale = 1.0;
414 const size_t threadIndex = 0;
415 const size_t gridIndex = 0;
416 real* pmegrid = pme->pmegrid[gridIndex].grid.grid;
417 real** fftgrid = pme->fftgrid;
419 switch (mode)
421 case CodePath::CPU:
422 atc->f = forces;
423 if (atc->nthread == 1)
425 // something which is normally done in serial spline computation (make_thread_local_ind())
426 atc->spline[threadIndex].n = atomCount;
428 copy_fftgrid_to_pmegrid(pme, fftgrid[gridIndex], pmegrid, gridIndex, pme->nthread, threadIndex);
429 unwrap_periodic_pmegrid(pme, pmegrid);
430 gather_f_bsplines(pme, pmegrid, true, atc, &atc->spline[threadIndex], scale);
431 break;
433 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
434 * double precision. GPUs are not used with double precision anyhow. */
435 #if !GMX_DOUBLE
436 case CodePath::GPU:
438 // Variable initialization needs a non-switch scope
439 const bool computeEnergyAndVirial = false;
440 const real lambdaQ = 1.0;
441 PmeOutput output = pme_gpu_getOutput(*pme, computeEnergyAndVirial, lambdaQ);
442 GMX_ASSERT(forces.size() == output.forces_.size(),
443 "Size of force buffers did not match");
444 pme_gpu_gather(pme->gpu, fftgrid, lambdaQ);
445 std::copy(std::begin(output.forces_), std::end(output.forces_), std::begin(forces));
447 break;
448 #endif
450 default: GMX_THROW(InternalError("Test not implemented for this mode"));
454 //! PME test finalization before fetching the outputs
455 void pmeFinalizeTest(const gmx_pme_t* pme, CodePath mode)
457 switch (mode)
459 case CodePath::CPU: break;
461 case CodePath::GPU: pme_gpu_synchronize(pme->gpu); break;
463 default: GMX_THROW(InternalError("Test not implemented for this mode"));
467 //! A binary enum for spline data layout transformation
468 enum class PmeLayoutTransform
470 GpuToHost,
471 HostToGpu
474 /*! \brief Gets a unique index to an element in a spline parameter buffer.
476 * These theta/dtheta buffers are laid out for GPU spread/gather
477 * kernels. The index is wrt the execution block, in range(0,
478 * atomsPerBlock * order * DIM).
480 * This is a wrapper, only used in unit tests.
481 * \param[in] order PME order
482 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
483 * \param[in] dimIndex Dimension index (from 0 to 2)
484 * \param[in] atomIndex Atom index wrt the block.
485 * \param[in] atomsPerWarp Number of atoms processed by a warp.
487 * \returns Index into theta or dtheta array using GPU layout.
489 static int getSplineParamFullIndex(int order, int splineIndex, int dimIndex, int atomIndex, int atomsPerWarp)
491 if (order != c_pmeGpuOrder)
493 throw order;
495 constexpr int fixedOrder = c_pmeGpuOrder;
496 GMX_UNUSED_VALUE(fixedOrder);
498 const int atomWarpIndex = atomIndex % atomsPerWarp;
499 const int warpIndex = atomIndex / atomsPerWarp;
500 int indexBase, result;
501 switch (atomsPerWarp)
503 case 1:
504 indexBase = getSplineParamIndexBase<fixedOrder, 1>(warpIndex, atomWarpIndex);
505 result = getSplineParamIndex<fixedOrder, 1>(indexBase, dimIndex, splineIndex);
506 break;
508 case 2:
509 indexBase = getSplineParamIndexBase<fixedOrder, 2>(warpIndex, atomWarpIndex);
510 result = getSplineParamIndex<fixedOrder, 2>(indexBase, dimIndex, splineIndex);
511 break;
513 case 4:
514 indexBase = getSplineParamIndexBase<fixedOrder, 4>(warpIndex, atomWarpIndex);
515 result = getSplineParamIndex<fixedOrder, 4>(indexBase, dimIndex, splineIndex);
516 break;
518 case 8:
519 indexBase = getSplineParamIndexBase<fixedOrder, 8>(warpIndex, atomWarpIndex);
520 result = getSplineParamIndex<fixedOrder, 8>(indexBase, dimIndex, splineIndex);
521 break;
523 default:
524 GMX_THROW(NotImplementedError(
525 formatString("Test function call not unrolled for atomsPerWarp = %d in "
526 "getSplineParamFullIndex",
527 atomsPerWarp)));
529 return result;
532 /*!\brief Return the number of atoms per warp */
533 static int pme_gpu_get_atoms_per_warp(const PmeGpu* pmeGpu)
535 const int order = pmeGpu->common->pme_order;
536 const int threadsPerAtom =
537 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
538 return pmeGpu->programHandle_->warpSize() / threadsPerAtom;
541 /*! \brief Rearranges the atom spline data between the GPU and host layouts.
542 * Only used for test purposes so far, likely to be horribly slow.
544 * \param[in] pmeGpu The PME GPU structure.
545 * \param[out] atc The PME CPU atom data structure (with a single-threaded layout).
546 * \param[in] type The spline data type (values or derivatives).
547 * \param[in] dimIndex Dimension index.
548 * \param[in] transform Layout transform type
550 static void pme_gpu_transform_spline_atom_data(const PmeGpu* pmeGpu,
551 const PmeAtomComm* atc,
552 PmeSplineDataType type,
553 int dimIndex,
554 PmeLayoutTransform transform)
556 // The GPU atom spline data is laid out in a different way currently than the CPU one.
557 // This function converts the data from GPU to CPU layout (in the host memory).
558 // It is only intended for testing purposes so far.
559 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
560 // performance (e.g. spreading on GPU, gathering on CPU).
561 GMX_RELEASE_ASSERT(atc->nthread == 1, "Only the serial PME data layout is supported");
562 const uintmax_t threadIndex = 0;
563 const auto atomCount = atc->numAtoms();
564 const auto atomsPerWarp = pme_gpu_get_atoms_per_warp(pmeGpu);
565 const auto pmeOrder = pmeGpu->common->pme_order;
566 GMX_ASSERT(pmeOrder == c_pmeGpuOrder, "Only PME order 4 is implemented");
568 real* cpuSplineBuffer;
569 float* h_splineBuffer;
570 switch (type)
572 case PmeSplineDataType::Values:
573 cpuSplineBuffer = atc->spline[threadIndex].theta.coefficients[dimIndex];
574 h_splineBuffer = pmeGpu->staging.h_theta;
575 break;
577 case PmeSplineDataType::Derivatives:
578 cpuSplineBuffer = atc->spline[threadIndex].dtheta.coefficients[dimIndex];
579 h_splineBuffer = pmeGpu->staging.h_dtheta;
580 break;
582 default: GMX_THROW(InternalError("Unknown spline data type"));
585 for (auto atomIndex = 0; atomIndex < atomCount; atomIndex++)
587 for (auto orderIndex = 0; orderIndex < pmeOrder; orderIndex++)
589 const auto gpuValueIndex =
590 getSplineParamFullIndex(pmeOrder, orderIndex, dimIndex, atomIndex, atomsPerWarp);
591 const auto cpuValueIndex = atomIndex * pmeOrder + orderIndex;
592 GMX_ASSERT(cpuValueIndex < atomCount * pmeOrder,
593 "Atom spline data index out of bounds (while transforming GPU data layout "
594 "for host)");
595 switch (transform)
597 case PmeLayoutTransform::GpuToHost:
598 cpuSplineBuffer[cpuValueIndex] = h_splineBuffer[gpuValueIndex];
599 break;
601 case PmeLayoutTransform::HostToGpu:
602 h_splineBuffer[gpuValueIndex] = cpuSplineBuffer[cpuValueIndex];
603 break;
605 default: GMX_THROW(InternalError("Unknown layout transform"));
611 //! Setting atom spline values/derivatives to be used in spread/gather
612 void pmeSetSplineData(const gmx_pme_t* pme,
613 CodePath mode,
614 const SplineParamsDimVector& splineValues,
615 PmeSplineDataType type,
616 int dimIndex)
618 const PmeAtomComm* atc = &(pme->atc[0]);
619 const index atomCount = atc->numAtoms();
620 const index pmeOrder = pme->pme_order;
621 const index dimSize = pmeOrder * atomCount;
622 GMX_RELEASE_ASSERT(dimSize == splineValues.ssize(), "Mismatch in spline data");
623 real* splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
625 switch (mode)
627 case CodePath::CPU:
628 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
629 break;
631 case CodePath::GPU:
632 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
633 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
634 break;
636 default: GMX_THROW(InternalError("Test not implemented for this mode"));
640 //! Setting gridline indices to be used in spread/gather
641 void pmeSetGridLineIndices(gmx_pme_t* pme, CodePath mode, const GridLineIndicesVector& gridLineIndices)
643 PmeAtomComm* atc = &(pme->atc[0]);
644 const index atomCount = atc->numAtoms();
645 GMX_RELEASE_ASSERT(atomCount == gridLineIndices.ssize(), "Mismatch in gridline indices size");
647 IVec paddedGridSizeUnused, gridSize(0, 0, 0);
648 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
650 for (const auto& index : gridLineIndices)
652 for (int i = 0; i < DIM; i++)
654 GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]),
655 "Invalid gridline index");
659 switch (mode)
661 case CodePath::GPU:
662 memcpy(pme_gpu_staging(pme->gpu).h_gridlineIndices, gridLineIndices.data(),
663 atomCount * sizeof(gridLineIndices[0]));
664 break;
666 case CodePath::CPU:
667 atc->idx.resize(gridLineIndices.size());
668 std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx.begin());
669 break;
670 default: GMX_THROW(InternalError("Test not implemented for this mode"));
674 //! Getting plain index into the complex 3d grid
675 inline size_t pmeGetGridPlainIndexInternal(const IVec& index, const IVec& paddedGridSize, GridOrdering gridOrdering)
677 size_t result;
678 switch (gridOrdering)
680 case GridOrdering::YZX:
681 result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
682 break;
684 case GridOrdering::XYZ:
685 result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
686 break;
688 default: GMX_THROW(InternalError("Test not implemented for this mode"));
690 return result;
693 //! Setting real or complex grid
694 template<typename ValueType>
695 static void pmeSetGridInternal(const gmx_pme_t* pme,
696 CodePath mode,
697 GridOrdering gridOrdering,
698 const SparseGridValuesInput<ValueType>& gridValues)
700 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
701 ValueType* grid;
702 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
704 switch (mode)
706 case CodePath::GPU: // intentional absence of break, the grid will be copied from the host buffer in testing mode
707 case CodePath::CPU:
708 std::memset(grid, 0,
709 paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
710 for (const auto& gridValue : gridValues)
712 for (int i = 0; i < DIM; i++)
714 GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]),
715 "Invalid grid value index");
717 const size_t gridValueIndex =
718 pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
719 grid[gridValueIndex] = gridValue.second;
721 break;
723 default: GMX_THROW(InternalError("Test not implemented for this mode"));
727 //! Setting real grid to be used in gather
728 void pmeSetRealGrid(const gmx_pme_t* pme, CodePath mode, const SparseRealGridValuesInput& gridValues)
730 pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
733 //! Setting complex grid to be used in solve
734 void pmeSetComplexGrid(const gmx_pme_t* pme,
735 CodePath mode,
736 GridOrdering gridOrdering,
737 const SparseComplexGridValuesInput& gridValues)
739 pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
742 //! Getting the single dimension's spline values or derivatives
743 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t* pme, CodePath mode, PmeSplineDataType type, int dimIndex)
745 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
746 const PmeAtomComm* atc = &(pme->atc[0]);
747 const size_t atomCount = atc->numAtoms();
748 const size_t pmeOrder = pme->pme_order;
749 const size_t dimSize = pmeOrder * atomCount;
751 real* sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
752 SplineParamsDimVector result;
753 switch (mode)
755 case CodePath::GPU:
756 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
757 result = arrayRefFromArray(sourceBuffer, dimSize);
758 break;
760 case CodePath::CPU: result = arrayRefFromArray(sourceBuffer, dimSize); break;
762 default: GMX_THROW(InternalError("Test not implemented for this mode"));
764 return result;
767 //! Getting the gridline indices
768 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t* pme, CodePath mode)
770 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
771 const PmeAtomComm* atc = &(pme->atc[0]);
772 const size_t atomCount = atc->numAtoms();
774 GridLineIndicesVector gridLineIndices;
775 switch (mode)
777 case CodePath::GPU:
778 gridLineIndices = arrayRefFromArray(
779 reinterpret_cast<IVec*>(pme_gpu_staging(pme->gpu).h_gridlineIndices), atomCount);
780 break;
782 case CodePath::CPU: gridLineIndices = atc->idx; break;
784 default: GMX_THROW(InternalError("Test not implemented for this mode"));
786 return gridLineIndices;
789 //! Getting real or complex grid - only non zero values
790 template<typename ValueType>
791 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t* pme,
792 CodePath mode,
793 GridOrdering gridOrdering)
795 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
796 ValueType* grid;
797 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
798 SparseGridValuesOutput<ValueType> gridValues;
799 switch (mode)
801 case CodePath::GPU: // intentional absence of break
802 case CodePath::CPU:
803 gridValues.clear();
804 for (int ix = 0; ix < gridSize[XX]; ix++)
806 for (int iy = 0; iy < gridSize[YY]; iy++)
808 for (int iz = 0; iz < gridSize[ZZ]; iz++)
810 IVec temp(ix, iy, iz);
811 const size_t gridValueIndex =
812 pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
813 const ValueType value = grid[gridValueIndex];
814 if (value != ValueType{})
816 auto key = formatString("Cell %d %d %d", ix, iy, iz);
817 gridValues[key] = value;
822 break;
824 default: GMX_THROW(InternalError("Test not implemented for this mode"));
826 return gridValues;
829 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
830 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t* pme, CodePath mode)
832 return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
835 //! Getting the complex grid output of pmePerformSolve()
836 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t* pme, CodePath mode, GridOrdering gridOrdering)
838 return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
841 //! Getting the reciprocal energy and virial
842 PmeOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t* pme, CodePath mode, PmeSolveAlgorithm method)
844 PmeOutput output;
845 const real lambdaQ = 1.0;
846 switch (mode)
848 case CodePath::CPU:
849 switch (method)
851 case PmeSolveAlgorithm::Coulomb:
852 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &output);
853 break;
855 case PmeSolveAlgorithm::LennardJones:
856 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &output);
857 break;
859 default: GMX_THROW(InternalError("Test not implemented for this mode"));
861 break;
862 case CodePath::GPU:
863 switch (method)
865 case PmeSolveAlgorithm::Coulomb:
866 pme_gpu_getEnergyAndVirial(*pme, lambdaQ, &output);
867 break;
869 default: GMX_THROW(InternalError("Test not implemented for this mode"));
871 break;
873 default: GMX_THROW(InternalError("Test not implemented for this mode"));
875 return output;
878 const char* codePathToString(CodePath codePath)
880 switch (codePath)
882 case CodePath::CPU: return "CPU";
883 case CodePath::GPU: return "GPU";
884 default: GMX_THROW(NotImplementedError("This CodePath should support codePathToString"));
888 PmeTestHardwareContext::PmeTestHardwareContext() : codePath_(CodePath::CPU) {}
890 PmeTestHardwareContext::PmeTestHardwareContext(TestDevice* testDevice) :
891 codePath_(CodePath::CPU),
892 testDevice_(testDevice)
894 setActiveDevice(testDevice_->deviceInfo());
895 pmeGpuProgram_ = buildPmeGpuProgram(testDevice_->deviceContext());
898 //! Returns a human-readable context description line
899 std::string PmeTestHardwareContext::description() const
901 switch (codePath_)
903 case CodePath::CPU: return "CPU";
904 case CodePath::GPU: return "GPU (" + testDevice_->description() + ")";
905 default: return "Unknown code path.";
909 void PmeTestHardwareContext::activate() const
911 if (codePath_ == CodePath::GPU)
913 setActiveDevice(testDevice_->deviceInfo());
917 std::vector<std::unique_ptr<PmeTestHardwareContext>> createPmeTestHardwareContextList()
919 std::vector<std::unique_ptr<PmeTestHardwareContext>> pmeTestHardwareContextList;
920 // Add CPU
921 pmeTestHardwareContextList.emplace_back(std::make_unique<PmeTestHardwareContext>());
922 // Add GPU devices
923 const auto& testDeviceList = getTestHardwareEnvironment()->getTestDeviceList();
924 for (const auto& testDevice : testDeviceList)
926 pmeTestHardwareContextList.emplace_back(std::make_unique<PmeTestHardwareContext>(testDevice.get()));
928 return pmeTestHardwareContextList;
931 } // namespace test
932 } // namespace gmx