Clean up PME domain count passing
[gromacs.git] / src / gromacs / ewald / tests / pmetestcommon.cpp
blobd47c1e9d35c12a92cc94b0b95df35bbc1bc37d0d
1 /*
2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
35 /*! \internal \file
36 * \brief
37 * Implements common routines for PME tests.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
42 #include "gmxpre.h"
44 #include "pmetestcommon.h"
46 #include <cstring>
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/ewald/pme-gather.h"
50 #include "gromacs/ewald/pme-gpu-internal.h"
51 #include "gromacs/ewald/pme-grid.h"
52 #include "gromacs/ewald/pme-internal.h"
53 #include "gromacs/ewald/pme-solve.h"
54 #include "gromacs/ewald/pme-spread.h"
55 #include "gromacs/fft/parallel_3dfft.h"
56 #include "gromacs/gpu_utils/gpu_utils.h"
57 #include "gromacs/math/invertmatrix.h"
58 #include "gromacs/mdtypes/commrec.h"
59 #include "gromacs/pbcutil/pbc.h"
60 #include "gromacs/utility/exceptions.h"
61 #include "gromacs/utility/gmxassert.h"
62 #include "gromacs/utility/logger.h"
63 #include "gromacs/utility/stringutil.h"
65 #include "testutils/testasserts.h"
67 namespace gmx
69 namespace test
72 bool pmeSupportsInputForMode(const t_inputrec *inputRec, CodePath mode)
74 bool implemented;
75 switch (mode)
77 case CodePath::CPU:
78 implemented = true;
79 break;
81 case CodePath::CUDA:
82 implemented = pme_gpu_supports_input(inputRec, nullptr);
83 break;
85 default:
86 GMX_THROW(InternalError("Test not implemented for this mode"));
88 return implemented;
91 gmx_uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder)
93 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
94 * hard to know what to pick without testing lots of
95 * implementations. */
96 const gmx_uint64_t sineUlps = 10;
97 return 4 * (splineOrder - 2) + 2 * sineUlps * splineOrder;
100 //! PME initialization - internal
101 static PmeSafePointer pmeInitInternal(const t_inputrec *inputRec,
102 CodePath mode,
103 gmx_device_info_t *gpuInfo,
104 size_t atomCount,
105 const Matrix3x3 &box,
106 real ewaldCoeff_q = 1.0f,
107 real ewaldCoeff_lj = 1.0f
110 const MDLogger dummyLogger;
111 if (gpuInfo)
113 init_gpu(dummyLogger, gpuInfo);
115 const auto runMode = (mode == CodePath::CPU) ? PmeRunMode::CPU : PmeRunMode::GPU;
116 t_commrec dummyCommrec = {0};
117 NumPmeDomains numPmeDomains = { 1, 1 };
118 gmx_pme_t *pmeDataRaw = gmx_pme_init(&dummyCommrec, numPmeDomains, inputRec, atomCount, false, false, true,
119 ewaldCoeff_q, ewaldCoeff_lj, 1, runMode, nullptr, gpuInfo, dummyLogger);
120 PmeSafePointer pme(pmeDataRaw); // taking ownership
122 // TODO get rid of this with proper matrix type
123 matrix boxTemp;
124 for (int i = 0; i < DIM; i++)
126 for (int j = 0; j < DIM; j++)
128 boxTemp[i][j] = box[i * DIM + j];
131 const char *boxError = check_box(-1, boxTemp);
132 GMX_RELEASE_ASSERT(boxError == nullptr, boxError);
134 switch (mode)
136 case CodePath::CPU:
137 invertBoxMatrix(boxTemp, pme->recipbox);
138 break;
140 case CodePath::CUDA:
141 pme_gpu_set_testing(pme->gpu, true);
142 pme_gpu_update_input_box(pme->gpu, boxTemp);
143 break;
145 default:
146 GMX_THROW(InternalError("Test not implemented for this mode"));
149 return pme;
152 //! Simple PME initialization based on input, no atom data
153 PmeSafePointer pmeInitEmpty(const t_inputrec *inputRec,
154 CodePath mode,
155 gmx_device_info_t *gpuInfo,
156 const Matrix3x3 &box,
157 real ewaldCoeff_q,
158 real ewaldCoeff_lj
161 return pmeInitInternal(inputRec, mode, gpuInfo, 0, box, ewaldCoeff_q, ewaldCoeff_lj);
162 // hiding the fact that PME actually needs to know the number of atoms in advance
165 //! PME initialization with atom data
166 PmeSafePointer pmeInitAtoms(const t_inputrec *inputRec,
167 CodePath mode,
168 gmx_device_info_t *gpuInfo,
169 const CoordinatesVector &coordinates,
170 const ChargesVector &charges,
171 const Matrix3x3 &box
174 const size_t atomCount = coordinates.size();
175 GMX_RELEASE_ASSERT(atomCount == charges.size(), "Mismatch in atom data");
176 PmeSafePointer pmeSafe = pmeInitInternal(inputRec, mode, gpuInfo, atomCount, box);
177 pme_atomcomm_t *atc = nullptr;
179 switch (mode)
181 case CodePath::CPU:
182 atc = &(pmeSafe->atc[0]);
183 atc->x = const_cast<rvec *>(as_rvec_array(coordinates.data()));
184 atc->coefficient = const_cast<real *>(charges.data());
185 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
186 break;
188 case CodePath::CUDA:
189 gmx_pme_reinit_atoms(pmeSafe.get(), atomCount, charges.data());
190 pme_gpu_copy_input_coordinates(pmeSafe->gpu, as_rvec_array(coordinates.data()));
191 break;
193 default:
194 GMX_THROW(InternalError("Test not implemented for this mode"));
197 return pmeSafe;
200 //! Getting local PME real grid pointer for test I/O
201 static real *pmeGetRealGridInternal(const gmx_pme_t *pme)
203 const size_t gridIndex = 0;
204 return pme->fftgrid[gridIndex];
207 //! Getting local PME real grid dimensions
208 static void pmeGetRealGridSizesInternal(const gmx_pme_t *pme,
209 CodePath mode,
210 IVec &gridSize,
211 IVec &paddedGridSize)
213 const size_t gridIndex = 0;
214 IVec gridOffsetUnused;
215 switch (mode)
217 case CodePath::CPU:
218 gmx_parallel_3dfft_real_limits(pme->pfft_setup[gridIndex], gridSize, gridOffsetUnused, paddedGridSize);
219 break;
221 case CodePath::CUDA:
222 pme_gpu_get_real_grid_sizes(pme->gpu, &gridSize, &paddedGridSize);
223 break;
225 default:
226 GMX_THROW(InternalError("Test not implemented for this mode"));
230 //! Getting local PME complex grid pointer for test I/O
231 static t_complex *pmeGetComplexGridInternal(const gmx_pme_t *pme)
233 const size_t gridIndex = 0;
234 return pme->cfftgrid[gridIndex];
237 //! Getting local PME complex grid dimensions
238 static void pmeGetComplexGridSizesInternal(const gmx_pme_t *pme,
239 IVec &gridSize,
240 IVec &paddedGridSize)
242 const size_t gridIndex = 0;
243 IVec gridOffsetUnused, complexOrderUnused;
244 gmx_parallel_3dfft_complex_limits(pme->pfft_setup[gridIndex], complexOrderUnused, gridSize, gridOffsetUnused, paddedGridSize); //TODO: what about YZX ordering?
247 //! Getting the PME grid memory buffer and its sizes - template definition
248 template<typename ValueType> static void pmeGetGridAndSizesInternal(const gmx_pme_t *, CodePath, ValueType * &, IVec &, IVec &)
250 GMX_THROW(InternalError("Deleted function call"));
251 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
254 //! Getting the PME real grid memory buffer and its sizes
255 template<> void pmeGetGridAndSizesInternal<real>(const gmx_pme_t *pme, CodePath mode, real * &grid, IVec &gridSize, IVec &paddedGridSize)
257 grid = pmeGetRealGridInternal(pme);
258 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSize);
261 //! Getting the PME complex grid memory buffer and its sizes
262 template<> void pmeGetGridAndSizesInternal<t_complex>(const gmx_pme_t *pme, CodePath, t_complex * &grid, IVec &gridSize, IVec &paddedGridSize)
264 grid = pmeGetComplexGridInternal(pme);
265 pmeGetComplexGridSizesInternal(pme, gridSize, paddedGridSize);
268 //! PME spline calculation and charge spreading
269 void pmePerformSplineAndSpread(gmx_pme_t *pme, CodePath mode, // TODO const qualifiers elsewhere
270 bool computeSplines, bool spreadCharges)
272 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
273 pme_atomcomm_t *atc = &(pme->atc[0]);
274 const size_t gridIndex = 0;
275 const bool computeSplinesForZeroCharges = true;
276 real *fftgrid = spreadCharges ? pme->fftgrid[gridIndex] : nullptr;
277 real *pmegrid = pme->pmegrid[gridIndex].grid.grid;
279 switch (mode)
281 case CodePath::CPU:
282 spread_on_grid(pme, atc, &pme->pmegrid[gridIndex], computeSplines, spreadCharges,
283 fftgrid, computeSplinesForZeroCharges, gridIndex);
284 if (spreadCharges && !pme->bUseThreads)
286 wrap_periodic_pmegrid(pme, pmegrid);
287 copy_pmegrid_to_fftgrid(pme, pmegrid, fftgrid, gridIndex);
289 break;
291 case CodePath::CUDA:
292 pme_gpu_spread(pme->gpu, gridIndex, fftgrid, computeSplines, spreadCharges);
293 break;
295 default:
296 GMX_THROW(InternalError("Test not implemented for this mode"));
300 //! Getting the internal spline data buffer pointer
301 static real *pmeGetSplineDataInternal(const gmx_pme_t *pme, PmeSplineDataType type, int dimIndex)
303 GMX_ASSERT((0 <= dimIndex) && (dimIndex < DIM), "Invalid dimension index");
304 const pme_atomcomm_t *atc = &(pme->atc[0]);
305 const size_t threadIndex = 0;
306 real *splineBuffer = nullptr;
307 switch (type)
309 case PmeSplineDataType::Values:
310 splineBuffer = atc->spline[threadIndex].theta[dimIndex];
311 break;
313 case PmeSplineDataType::Derivatives:
314 splineBuffer = atc->spline[threadIndex].dtheta[dimIndex];
315 break;
317 default:
318 GMX_THROW(InternalError("Unknown spline data type"));
320 return splineBuffer;
323 //! PME solving
324 void pmePerformSolve(const gmx_pme_t *pme, CodePath mode,
325 PmeSolveAlgorithm method, real cellVolume,
326 GridOrdering gridOrdering, bool computeEnergyAndVirial)
328 t_complex *h_grid = pmeGetComplexGridInternal(pme);
329 const bool useLorentzBerthelot = false;
330 const size_t threadIndex = 0;
331 switch (mode)
333 case CodePath::CPU:
334 if (gridOrdering != GridOrdering::YZX)
336 GMX_THROW(InternalError("Test not implemented for this mode"));
338 switch (method)
340 case PmeSolveAlgorithm::Coulomb:
341 solve_pme_yzx(pme, h_grid, cellVolume,
342 computeEnergyAndVirial, pme->nthread, threadIndex);
343 break;
345 case PmeSolveAlgorithm::LennardJones:
346 solve_pme_lj_yzx(pme, &h_grid, useLorentzBerthelot,
347 cellVolume, computeEnergyAndVirial, pme->nthread, threadIndex);
348 break;
350 default:
351 GMX_THROW(InternalError("Test not implemented for this mode"));
353 break;
355 case CodePath::CUDA:
356 switch (method)
358 case PmeSolveAlgorithm::Coulomb:
359 pme_gpu_solve(pme->gpu, h_grid, gridOrdering, computeEnergyAndVirial);
360 break;
362 default:
363 GMX_THROW(InternalError("Test not implemented for this mode"));
365 break;
367 default:
368 GMX_THROW(InternalError("Test not implemented for this mode"));
372 //! PME force gathering
373 void pmePerformGather(gmx_pme_t *pme, CodePath mode,
374 PmeForceOutputHandling inputTreatment, ForcesVector &forces)
376 pme_atomcomm_t *atc = &(pme->atc[0]);
377 const size_t atomCount = atc->n;
378 GMX_RELEASE_ASSERT(forces.size() == atomCount, "Invalid force buffer size");
379 const bool forceReductionWithInput = (inputTreatment == PmeForceOutputHandling::ReduceWithInput);
380 const real scale = 1.0;
381 const size_t threadIndex = 0;
382 const size_t gridIndex = 0;
383 real *pmegrid = pme->pmegrid[gridIndex].grid.grid;
384 real *fftgrid = pme->fftgrid[gridIndex];
386 switch (mode)
388 case CodePath::CPU:
389 atc->f = as_rvec_array(forces.begin());
390 if (atc->nthread == 1)
392 // something which is normally done in serial spline computation (make_thread_local_ind())
393 atc->spline[threadIndex].n = atomCount;
395 copy_fftgrid_to_pmegrid(pme, fftgrid, pmegrid, gridIndex, pme->nthread, threadIndex);
396 unwrap_periodic_pmegrid(pme, pmegrid);
397 gather_f_bsplines(pme, pmegrid, !forceReductionWithInput, atc, &atc->spline[threadIndex], scale);
398 break;
400 case CodePath::CUDA:
402 // Variable initialization needs a non-switch scope
403 auto stagingForces = pme_gpu_get_forces(pme->gpu);
404 GMX_ASSERT(forces.size() == stagingForces.size(), "Size of force buffers did not match");
405 if (forceReductionWithInput)
407 for (size_t i = 0; i != forces.size(); ++i)
409 stagingForces[i] = forces[i];
412 pme_gpu_gather(pme->gpu, inputTreatment, reinterpret_cast<float *>(fftgrid));
413 for (size_t i = 0; i != forces.size(); ++i)
415 forces[i] = stagingForces[i];
418 break;
420 default:
421 GMX_THROW(InternalError("Test not implemented for this mode"));
425 //! PME test finalization before fetching the outputs
426 void pmeFinalizeTest(const gmx_pme_t *pme, CodePath mode)
428 switch (mode)
430 case CodePath::CPU:
431 break;
433 case CodePath::CUDA:
434 pme_gpu_synchronize(pme->gpu);
435 break;
437 default:
438 GMX_THROW(InternalError("Test not implemented for this mode"));
442 //! Setting atom spline values/derivatives to be used in spread/gather
443 void pmeSetSplineData(const gmx_pme_t *pme, CodePath mode,
444 const SplineParamsDimVector &splineValues, PmeSplineDataType type, int dimIndex)
446 const pme_atomcomm_t *atc = &(pme->atc[0]);
447 const size_t atomCount = atc->n;
448 const size_t pmeOrder = pme->pme_order;
449 const size_t dimSize = pmeOrder * atomCount;
450 GMX_RELEASE_ASSERT(dimSize == splineValues.size(), "Mismatch in spline data");
451 real *splineBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
453 switch (mode)
455 case CodePath::CPU:
456 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
457 break;
459 case CodePath::CUDA:
460 std::copy(splineValues.begin(), splineValues.end(), splineBuffer);
461 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::HostToGpu);
462 break;
464 default:
465 GMX_THROW(InternalError("Test not implemented for this mode"));
469 //! Setting gridline indices to be used in spread/gather
470 void pmeSetGridLineIndices(const gmx_pme_t *pme, CodePath mode,
471 const GridLineIndicesVector &gridLineIndices)
473 const pme_atomcomm_t *atc = &(pme->atc[0]);
474 const size_t atomCount = atc->n;
475 GMX_RELEASE_ASSERT(atomCount == gridLineIndices.size(), "Mismatch in gridline indices size");
477 IVec paddedGridSizeUnused, gridSize(0, 0, 0);
478 pmeGetRealGridSizesInternal(pme, mode, gridSize, paddedGridSizeUnused);
480 for (const auto &index : gridLineIndices)
482 for (int i = 0; i < DIM; i++)
484 GMX_RELEASE_ASSERT((0 <= index[i]) && (index[i] < gridSize[i]), "Invalid gridline index");
488 switch (mode)
490 case CodePath::CUDA:
491 memcpy(pme->gpu->staging.h_gridlineIndices, gridLineIndices.data(), atomCount * sizeof(gridLineIndices[0]));
492 break;
494 case CodePath::CPU:
495 // incompatible IVec and ivec assignment?
496 //std::copy(gridLineIndices.begin(), gridLineIndices.end(), atc->idx);
497 memcpy(atc->idx, gridLineIndices.data(), atomCount * sizeof(gridLineIndices[0]));
498 break;
500 default:
501 GMX_THROW(InternalError("Test not implemented for this mode"));
505 //! Getting plain index into the complex 3d grid
506 inline size_t pmeGetGridPlainIndexInternal(const IVec &index, const IVec &paddedGridSize, GridOrdering gridOrdering)
508 size_t result;
509 switch (gridOrdering)
511 case GridOrdering::YZX:
512 result = (index[YY] * paddedGridSize[ZZ] + index[ZZ]) * paddedGridSize[XX] + index[XX];
513 break;
515 case GridOrdering::XYZ:
516 result = (index[XX] * paddedGridSize[YY] + index[YY]) * paddedGridSize[ZZ] + index[ZZ];
517 break;
519 default:
520 GMX_THROW(InternalError("Test not implemented for this mode"));
522 return result;
525 //! Setting real or complex grid
526 template<typename ValueType>
527 static void pmeSetGridInternal(const gmx_pme_t *pme, CodePath mode,
528 GridOrdering gridOrdering,
529 const SparseGridValuesInput<ValueType> &gridValues)
531 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
532 ValueType *grid;
533 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
535 switch (mode)
537 case CodePath::CUDA: // intentional absence of break, the grid will be copied from the host buffer in testing mode
538 case CodePath::CPU:
539 std::memset(grid, 0, paddedGridSize[XX] * paddedGridSize[YY] * paddedGridSize[ZZ] * sizeof(ValueType));
540 for (const auto &gridValue : gridValues)
542 for (int i = 0; i < DIM; i++)
544 GMX_RELEASE_ASSERT((0 <= gridValue.first[i]) && (gridValue.first[i] < gridSize[i]), "Invalid grid value index");
546 const size_t gridValueIndex = pmeGetGridPlainIndexInternal(gridValue.first, paddedGridSize, gridOrdering);
547 grid[gridValueIndex] = gridValue.second;
549 break;
551 default:
552 GMX_THROW(InternalError("Test not implemented for this mode"));
556 //! Setting real grid to be used in gather
557 void pmeSetRealGrid(const gmx_pme_t *pme, CodePath mode,
558 const SparseRealGridValuesInput &gridValues)
560 pmeSetGridInternal<real>(pme, mode, GridOrdering::XYZ, gridValues);
563 //! Setting complex grid to be used in solve
564 void pmeSetComplexGrid(const gmx_pme_t *pme, CodePath mode,
565 GridOrdering gridOrdering,
566 const SparseComplexGridValuesInput &gridValues)
568 pmeSetGridInternal<t_complex>(pme, mode, gridOrdering, gridValues);
571 //! Getting the single dimension's spline values or derivatives
572 SplineParamsDimVector pmeGetSplineData(const gmx_pme_t *pme, CodePath mode,
573 PmeSplineDataType type, int dimIndex)
575 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
576 const pme_atomcomm_t *atc = &(pme->atc[0]);
577 const size_t atomCount = atc->n;
578 const size_t pmeOrder = pme->pme_order;
579 const size_t dimSize = pmeOrder * atomCount;
581 real *sourceBuffer = pmeGetSplineDataInternal(pme, type, dimIndex);
582 SplineParamsDimVector result;
583 switch (mode)
585 case CodePath::CUDA:
586 pme_gpu_transform_spline_atom_data(pme->gpu, atc, type, dimIndex, PmeLayoutTransform::GpuToHost);
587 // fallthrough
589 case CodePath::CPU:
590 result = arrayRefFromArray(sourceBuffer, dimSize);
591 break;
593 default:
594 GMX_THROW(InternalError("Test not implemented for this mode"));
596 return result;
599 //! Getting the gridline indices
600 GridLineIndicesVector pmeGetGridlineIndices(const gmx_pme_t *pme, CodePath mode)
602 GMX_RELEASE_ASSERT(pme != nullptr, "PME data is not initialized");
603 const pme_atomcomm_t *atc = &(pme->atc[0]);
604 const size_t atomCount = atc->n;
606 GridLineIndicesVector gridLineIndices;
607 switch (mode)
609 case CodePath::CUDA:
610 gridLineIndices = arrayRefFromArray(reinterpret_cast<IVec *>(pme->gpu->staging.h_gridlineIndices), atomCount);
611 break;
613 case CodePath::CPU:
614 gridLineIndices = arrayRefFromArray(reinterpret_cast<IVec *>(atc->idx), atomCount);
615 break;
617 default:
618 GMX_THROW(InternalError("Test not implemented for this mode"));
620 return gridLineIndices;
623 //! Getting real or complex grid - only non zero values
624 template<typename ValueType>
625 static SparseGridValuesOutput<ValueType> pmeGetGridInternal(const gmx_pme_t *pme, CodePath mode, GridOrdering gridOrdering)
627 IVec gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
628 ValueType *grid;
629 pmeGetGridAndSizesInternal<ValueType>(pme, mode, grid, gridSize, paddedGridSize);
630 SparseGridValuesOutput<ValueType> gridValues;
631 switch (mode)
633 case CodePath::CUDA: // intentional absence of break
634 case CodePath::CPU:
635 gridValues.clear();
636 for (int ix = 0; ix < gridSize[XX]; ix++)
638 for (int iy = 0; iy < gridSize[YY]; iy++)
640 for (int iz = 0; iz < gridSize[ZZ]; iz++)
642 IVec temp(ix, iy, iz);
643 const size_t gridValueIndex = pmeGetGridPlainIndexInternal(temp, paddedGridSize, gridOrdering);
644 const ValueType value = grid[gridValueIndex];
645 if (value != ValueType {})
647 auto key = formatString("Cell %d %d %d", ix, iy, iz);
648 gridValues[key] = value;
653 break;
655 default:
656 GMX_THROW(InternalError("Test not implemented for this mode"));
658 return gridValues;
661 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
662 SparseRealGridValuesOutput pmeGetRealGrid(const gmx_pme_t *pme, CodePath mode)
664 return pmeGetGridInternal<real>(pme, mode, GridOrdering::XYZ);
667 //! Getting the complex grid output of pmePerformSolve()
668 SparseComplexGridValuesOutput pmeGetComplexGrid(const gmx_pme_t *pme, CodePath mode,
669 GridOrdering gridOrdering)
671 return pmeGetGridInternal<t_complex>(pme, mode, gridOrdering);
674 //! Getting the reciprocal energy and virial
675 PmeSolveOutput pmeGetReciprocalEnergyAndVirial(const gmx_pme_t *pme, CodePath mode,
676 PmeSolveAlgorithm method)
678 real energy = 0.0f;
679 Matrix3x3 virial;
680 matrix virialTemp = {{0}}; //TODO get rid of
681 switch (mode)
683 case CodePath::CPU:
684 switch (method)
686 case PmeSolveAlgorithm::Coulomb:
687 get_pme_ener_vir_q(pme->solve_work, pme->nthread, &energy, virialTemp);
688 break;
690 case PmeSolveAlgorithm::LennardJones:
691 get_pme_ener_vir_lj(pme->solve_work, pme->nthread, &energy, virialTemp);
692 break;
694 default:
695 GMX_THROW(InternalError("Test not implemented for this mode"));
697 break;
698 case CodePath::CUDA:
699 switch (method)
701 case PmeSolveAlgorithm::Coulomb:
702 pme_gpu_get_energy_virial(pme->gpu, &energy, virialTemp);
703 break;
705 default:
706 GMX_THROW(InternalError("Test not implemented for this mode"));
708 break;
710 default:
711 GMX_THROW(InternalError("Test not implemented for this mode"));
713 for (int i = 0; i < DIM; i++)
715 for (int j = 0; j < DIM; j++)
717 virial[i * DIM + j] = virialTemp[i][j];
720 return std::make_tuple(energy, virial);