2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 * Implements common routines for PME tests.
39 * \author Aleksei Iupinov <a.yupinov@gmail.com>
40 * \ingroup module_ewald
44 #include "pmetestcommon.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/ewald/pme_gather.h"
52 #include "gromacs/ewald/pme_gpu_calculate_splines.h"
53 #include "gromacs/ewald/pme_gpu_constants.h"
54 #include "gromacs/ewald/pme_gpu_internal.h"
55 #include "gromacs/ewald/pme_gpu_staging.h"
56 #include "gromacs/ewald/pme_grid.h"
57 #include "gromacs/ewald/pme_internal.h"
58 #include "gromacs/ewald/pme_redistribute.h"
59 #include "gromacs/ewald/pme_solve.h"
60 #include "gromacs/ewald/pme_spread.h"
61 #include "gromacs/fft/parallel_3dfft.h"
62 #include "gromacs/gpu_utils/device_stream_manager.h"
63 #include "gromacs/gpu_utils/gpu_utils.h"
64 #include "gromacs/math/invertmatrix.h"
65 #include "gromacs/mdtypes/commrec.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/topology/topology.h"
68 #include "gromacs/utility/exceptions.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/stringutil.h"
73 #include "testutils/testasserts.h"
75 #include "testhardwarecontexts.h"
82 bool pmeSupportsInputForMode(const gmx_hw_info_t
& hwinfo
, const t_inputrec
* inputRec
, CodePath mode
)
87 case CodePath::CPU
: implemented
= true; break;
90 implemented
= (pme_gpu_supports_build(nullptr) && pme_gpu_supports_hardware(hwinfo
, nullptr)
91 && pme_gpu_supports_input(*inputRec
, nullptr));
94 default: GMX_THROW(InternalError("Test not implemented for this mode"));
99 uint64_t getSplineModuliDoublePrecisionUlps(int splineOrder
)
101 /* Arbitrary ulp tolerance for sine/cosine implementation. It's
102 * hard to know what to pick without testing lots of
103 * implementations. */
104 const uint64_t sineUlps
= 10;
105 return 4 * (splineOrder
- 2) + 2 * sineUlps
* splineOrder
;
108 //! PME initialization
109 PmeSafePointer
pmeInitWrapper(const t_inputrec
* inputRec
,
111 const DeviceContext
* deviceContext
,
112 const DeviceStream
* deviceStream
,
113 const PmeGpuProgram
* pmeGpuProgram
,
114 const Matrix3x3
& box
,
115 const real ewaldCoeff_q
,
116 const real ewaldCoeff_lj
)
118 const MDLogger dummyLogger
;
119 const auto runMode
= (mode
== CodePath::CPU
) ? PmeRunMode::CPU
: PmeRunMode::Mixed
;
120 t_commrec dummyCommrec
= { 0 };
121 NumPmeDomains numPmeDomains
= { 1, 1 };
122 gmx_pme_t
* pmeDataRaw
= gmx_pme_init(&dummyCommrec
, numPmeDomains
, inputRec
, false, false, true,
123 ewaldCoeff_q
, ewaldCoeff_lj
, 1, runMode
, nullptr,
124 deviceContext
, deviceStream
, pmeGpuProgram
, dummyLogger
);
125 PmeSafePointer
pme(pmeDataRaw
); // taking ownership
127 // TODO get rid of this with proper matrix type
129 for (int i
= 0; i
< DIM
; i
++)
131 for (int j
= 0; j
< DIM
; j
++)
133 boxTemp
[i
][j
] = box
[i
* DIM
+ j
];
136 const char* boxError
= check_box(PbcType::Unset
, boxTemp
);
137 GMX_RELEASE_ASSERT(boxError
== nullptr, boxError
);
141 case CodePath::CPU
: invertBoxMatrix(boxTemp
, pme
->recipbox
); break;
144 pme_gpu_set_testing(pme
->gpu
, true);
145 pme_gpu_update_input_box(pme
->gpu
, boxTemp
);
148 default: GMX_THROW(InternalError("Test not implemented for this mode"));
154 //! Simple PME initialization based on input, no atom data
155 PmeSafePointer
pmeInitEmpty(const t_inputrec
* inputRec
,
157 const DeviceContext
* deviceContext
,
158 const DeviceStream
* deviceStream
,
159 const PmeGpuProgram
* pmeGpuProgram
,
160 const Matrix3x3
& box
,
161 const real ewaldCoeff_q
,
162 const real ewaldCoeff_lj
)
164 return pmeInitWrapper(inputRec
, mode
, deviceContext
, deviceStream
, pmeGpuProgram
, box
,
165 ewaldCoeff_q
, ewaldCoeff_lj
);
166 // hiding the fact that PME actually needs to know the number of atoms in advance
169 PmeSafePointer
pmeInitEmpty(const t_inputrec
* inputRec
)
171 const Matrix3x3 defaultBox
= { { 1.0F
, 0.0F
, 0.0F
, 0.0F
, 1.0F
, 0.0F
, 0.0F
, 0.0F
, 1.0F
} };
172 return pmeInitWrapper(inputRec
, CodePath::CPU
, nullptr, nullptr, nullptr, defaultBox
, 0.0F
, 0.0F
);
175 //! Make a GPU state-propagator manager
176 std::unique_ptr
<StatePropagatorDataGpu
> makeStatePropagatorDataGpu(const gmx_pme_t
& pme
,
177 const DeviceContext
* deviceContext
,
178 const DeviceStream
* deviceStream
)
180 // TODO: Pin the host buffer and use async memory copies
181 // TODO: Special constructor for PME-only rank / PME-tests is used here. There should be a mechanism to
182 // restrict one from using other constructor here.
183 return std::make_unique
<StatePropagatorDataGpu
>(deviceStream
, *deviceContext
, GpuApiCallBehavior::Sync
,
184 pme_gpu_get_block_size(&pme
), nullptr);
187 //! PME initialization with atom data
188 void pmeInitAtoms(gmx_pme_t
* pme
,
189 StatePropagatorDataGpu
* stateGpu
,
191 const CoordinatesVector
& coordinates
,
192 const ChargesVector
& charges
)
194 const index atomCount
= coordinates
.size();
195 GMX_RELEASE_ASSERT(atomCount
== charges
.ssize(), "Mismatch in atom data");
196 PmeAtomComm
* atc
= nullptr;
201 atc
= &(pme
->atc
[0]);
202 atc
->x
= coordinates
;
203 atc
->coefficient
= charges
;
204 gmx_pme_reinit_atoms(pme
, atomCount
, charges
.data(), nullptr);
205 /* With decomposition there would be more boilerplate atc code here, e.g. do_redist_pos_coeffs */
209 // TODO: Avoid use of atc in the GPU code path
210 atc
= &(pme
->atc
[0]);
211 // We need to set atc->n for passing the size in the tests
212 atc
->setNumAtoms(atomCount
);
213 gmx_pme_reinit_atoms(pme
, atomCount
, charges
.data(), nullptr);
215 stateGpu
->reinit(atomCount
, atomCount
);
216 stateGpu
->copyCoordinatesToGpu(arrayRefFromArray(coordinates
.data(), coordinates
.size()),
217 gmx::AtomLocality::All
);
218 pme_gpu_set_kernelparam_coordinates(pme
->gpu
, stateGpu
->getCoordinates());
222 default: GMX_THROW(InternalError("Test not implemented for this mode"));
226 //! Getting local PME real grid pointer for test I/O
227 static real
* pmeGetRealGridInternal(const gmx_pme_t
* pme
)
229 const size_t gridIndex
= 0;
230 return pme
->fftgrid
[gridIndex
];
233 //! Getting local PME real grid dimensions
234 static void pmeGetRealGridSizesInternal(const gmx_pme_t
* pme
,
236 IVec
& gridSize
, //NOLINT(google-runtime-references)
237 IVec
& paddedGridSize
) //NOLINT(google-runtime-references)
239 const size_t gridIndex
= 0;
240 IVec gridOffsetUnused
;
244 gmx_parallel_3dfft_real_limits(pme
->pfft_setup
[gridIndex
], gridSize
, gridOffsetUnused
,
249 pme_gpu_get_real_grid_sizes(pme
->gpu
, &gridSize
, &paddedGridSize
);
252 default: GMX_THROW(InternalError("Test not implemented for this mode"));
256 //! Getting local PME complex grid pointer for test I/O
257 static t_complex
* pmeGetComplexGridInternal(const gmx_pme_t
* pme
)
259 const size_t gridIndex
= 0;
260 return pme
->cfftgrid
[gridIndex
];
263 //! Getting local PME complex grid dimensions
264 static void pmeGetComplexGridSizesInternal(const gmx_pme_t
* pme
,
265 IVec
& gridSize
, //NOLINT(google-runtime-references)
266 IVec
& paddedGridSize
) //NOLINT(google-runtime-references)
268 const size_t gridIndex
= 0;
269 IVec gridOffsetUnused
, complexOrderUnused
;
270 gmx_parallel_3dfft_complex_limits(pme
->pfft_setup
[gridIndex
], complexOrderUnused
, gridSize
,
271 gridOffsetUnused
, paddedGridSize
); // TODO: what about YZX ordering?
274 //! Getting the PME grid memory buffer and its sizes - template definition
275 template<typename ValueType
>
276 static void pmeGetGridAndSizesInternal(const gmx_pme_t
* /*unused*/,
278 ValueType
*& /*unused*/, //NOLINT(google-runtime-references)
279 IVec
& /*unused*/, //NOLINT(google-runtime-references)
280 IVec
& /*unused*/) //NOLINT(google-runtime-references)
282 GMX_THROW(InternalError("Deleted function call"));
283 // explicitly deleting general template does not compile in clang/icc, see https://llvm.org/bugs/show_bug.cgi?id=17537
286 //! Getting the PME real grid memory buffer and its sizes
288 void pmeGetGridAndSizesInternal
<real
>(const gmx_pme_t
* pme
, CodePath mode
, real
*& grid
, IVec
& gridSize
, IVec
& paddedGridSize
)
290 grid
= pmeGetRealGridInternal(pme
);
291 pmeGetRealGridSizesInternal(pme
, mode
, gridSize
, paddedGridSize
);
294 //! Getting the PME complex grid memory buffer and its sizes
296 void pmeGetGridAndSizesInternal
<t_complex
>(const gmx_pme_t
* pme
,
300 IVec
& paddedGridSize
)
302 grid
= pmeGetComplexGridInternal(pme
);
303 pmeGetComplexGridSizesInternal(pme
, gridSize
, paddedGridSize
);
306 //! PME spline calculation and charge spreading
307 void pmePerformSplineAndSpread(gmx_pme_t
* pme
,
308 CodePath mode
, // TODO const qualifiers elsewhere
312 GMX_RELEASE_ASSERT(pme
!= nullptr, "PME data is not initialized");
313 PmeAtomComm
* atc
= &(pme
->atc
[0]);
314 const size_t gridIndex
= 0;
315 const bool computeSplinesForZeroCharges
= true;
316 real
** fftgrid
= spreadCharges
? pme
->fftgrid
: nullptr;
317 real
* pmegrid
= pme
->pmegrid
[gridIndex
].grid
.grid
;
322 spread_on_grid(pme
, atc
, &pme
->pmegrid
[gridIndex
], computeSplines
, spreadCharges
,
323 fftgrid
!= nullptr ? fftgrid
[gridIndex
] : nullptr,
324 computeSplinesForZeroCharges
, gridIndex
);
325 if (spreadCharges
&& !pme
->bUseThreads
)
327 wrap_periodic_pmegrid(pme
, pmegrid
);
328 copy_pmegrid_to_fftgrid(
329 pme
, pmegrid
, fftgrid
!= nullptr ? fftgrid
[gridIndex
] : nullptr, gridIndex
);
333 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
334 * double precision. GPUs are not used with double precision anyhow. */
338 const real lambdaQ
= 1.0;
339 // no synchronization needed as x is transferred in the PME stream
340 GpuEventSynchronizer
* xReadyOnDevice
= nullptr;
341 pme_gpu_spread(pme
->gpu
, xReadyOnDevice
, fftgrid
, computeSplines
, spreadCharges
, lambdaQ
);
346 default: GMX_THROW(InternalError("Test not implemented for this mode"));
350 //! Getting the internal spline data buffer pointer
351 static real
* pmeGetSplineDataInternal(const gmx_pme_t
* pme
, PmeSplineDataType type
, int dimIndex
)
353 GMX_ASSERT((0 <= dimIndex
) && (dimIndex
< DIM
), "Invalid dimension index");
354 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
355 const size_t threadIndex
= 0;
356 real
* splineBuffer
= nullptr;
359 case PmeSplineDataType::Values
:
360 splineBuffer
= atc
->spline
[threadIndex
].theta
.coefficients
[dimIndex
];
363 case PmeSplineDataType::Derivatives
:
364 splineBuffer
= atc
->spline
[threadIndex
].dtheta
.coefficients
[dimIndex
];
367 default: GMX_THROW(InternalError("Unknown spline data type"));
373 void pmePerformSolve(const gmx_pme_t
* pme
,
375 PmeSolveAlgorithm method
,
377 GridOrdering gridOrdering
,
378 bool computeEnergyAndVirial
)
380 t_complex
* h_grid
= pmeGetComplexGridInternal(pme
);
381 const bool useLorentzBerthelot
= false;
382 const size_t threadIndex
= 0;
383 const size_t gridIndex
= 0;
387 if (gridOrdering
!= GridOrdering::YZX
)
389 GMX_THROW(InternalError("Test not implemented for this mode"));
393 case PmeSolveAlgorithm::Coulomb
:
394 solve_pme_yzx(pme
, h_grid
, cellVolume
, computeEnergyAndVirial
, pme
->nthread
, threadIndex
);
397 case PmeSolveAlgorithm::LennardJones
:
398 solve_pme_lj_yzx(pme
, &h_grid
, useLorentzBerthelot
, cellVolume
,
399 computeEnergyAndVirial
, pme
->nthread
, threadIndex
);
402 default: GMX_THROW(InternalError("Test not implemented for this mode"));
409 case PmeSolveAlgorithm::Coulomb
:
410 pme_gpu_solve(pme
->gpu
, gridIndex
, h_grid
, gridOrdering
, computeEnergyAndVirial
);
413 default: GMX_THROW(InternalError("Test not implemented for this mode"));
417 default: GMX_THROW(InternalError("Test not implemented for this mode"));
421 //! PME force gathering
422 void pmePerformGather(gmx_pme_t
* pme
, CodePath mode
, ForcesVector
& forces
)
424 PmeAtomComm
* atc
= &(pme
->atc
[0]);
425 const index atomCount
= atc
->numAtoms();
426 GMX_RELEASE_ASSERT(forces
.ssize() == atomCount
, "Invalid force buffer size");
427 const real scale
= 1.0;
428 const size_t threadIndex
= 0;
429 const size_t gridIndex
= 0;
430 real
* pmegrid
= pme
->pmegrid
[gridIndex
].grid
.grid
;
431 real
** fftgrid
= pme
->fftgrid
;
437 if (atc
->nthread
== 1)
439 // something which is normally done in serial spline computation (make_thread_local_ind())
440 atc
->spline
[threadIndex
].n
= atomCount
;
442 copy_fftgrid_to_pmegrid(pme
, fftgrid
[gridIndex
], pmegrid
, gridIndex
, pme
->nthread
, threadIndex
);
443 unwrap_periodic_pmegrid(pme
, pmegrid
);
444 gather_f_bsplines(pme
, pmegrid
, true, atc
, &atc
->spline
[threadIndex
], scale
);
447 /* The compiler will complain about passing fftgrid (converting double ** to float **) if using
448 * double precision. GPUs are not used with double precision anyhow. */
452 // Variable initialization needs a non-switch scope
453 const bool computeEnergyAndVirial
= false;
454 const real lambdaQ
= 1.0;
455 PmeOutput output
= pme_gpu_getOutput(*pme
, computeEnergyAndVirial
, lambdaQ
);
456 GMX_ASSERT(forces
.size() == output
.forces_
.size(),
457 "Size of force buffers did not match");
458 pme_gpu_gather(pme
->gpu
, fftgrid
, lambdaQ
);
459 std::copy(std::begin(output
.forces_
), std::end(output
.forces_
), std::begin(forces
));
464 default: GMX_THROW(InternalError("Test not implemented for this mode"));
468 //! PME test finalization before fetching the outputs
469 void pmeFinalizeTest(const gmx_pme_t
* pme
, CodePath mode
)
473 case CodePath::CPU
: break;
475 case CodePath::GPU
: pme_gpu_synchronize(pme
->gpu
); break;
477 default: GMX_THROW(InternalError("Test not implemented for this mode"));
481 //! A binary enum for spline data layout transformation
482 enum class PmeLayoutTransform
488 /*! \brief Gets a unique index to an element in a spline parameter buffer.
490 * These theta/dtheta buffers are laid out for GPU spread/gather
491 * kernels. The index is wrt the execution block, in range(0,
492 * atomsPerBlock * order * DIM).
494 * This is a wrapper, only used in unit tests.
495 * \param[in] order PME order
496 * \param[in] splineIndex Spline contribution index (from 0 to \p order - 1)
497 * \param[in] dimIndex Dimension index (from 0 to 2)
498 * \param[in] atomIndex Atom index wrt the block.
499 * \param[in] atomsPerWarp Number of atoms processed by a warp.
501 * \returns Index into theta or dtheta array using GPU layout.
503 static int getSplineParamFullIndex(int order
, int splineIndex
, int dimIndex
, int atomIndex
, int atomsPerWarp
)
505 if (order
!= c_pmeGpuOrder
)
509 constexpr int fixedOrder
= c_pmeGpuOrder
;
510 GMX_UNUSED_VALUE(fixedOrder
);
512 const int atomWarpIndex
= atomIndex
% atomsPerWarp
;
513 const int warpIndex
= atomIndex
/ atomsPerWarp
;
514 int indexBase
, result
;
515 switch (atomsPerWarp
)
518 indexBase
= getSplineParamIndexBase
<fixedOrder
, 1>(warpIndex
, atomWarpIndex
);
519 result
= getSplineParamIndex
<fixedOrder
, 1>(indexBase
, dimIndex
, splineIndex
);
523 indexBase
= getSplineParamIndexBase
<fixedOrder
, 2>(warpIndex
, atomWarpIndex
);
524 result
= getSplineParamIndex
<fixedOrder
, 2>(indexBase
, dimIndex
, splineIndex
);
528 indexBase
= getSplineParamIndexBase
<fixedOrder
, 4>(warpIndex
, atomWarpIndex
);
529 result
= getSplineParamIndex
<fixedOrder
, 4>(indexBase
, dimIndex
, splineIndex
);
533 indexBase
= getSplineParamIndexBase
<fixedOrder
, 8>(warpIndex
, atomWarpIndex
);
534 result
= getSplineParamIndex
<fixedOrder
, 8>(indexBase
, dimIndex
, splineIndex
);
538 GMX_THROW(NotImplementedError(
539 formatString("Test function call not unrolled for atomsPerWarp = %d in "
540 "getSplineParamFullIndex",
546 /*!\brief Return the number of atoms per warp */
547 static int pme_gpu_get_atoms_per_warp(const PmeGpu
* pmeGpu
)
549 const int order
= pmeGpu
->common
->pme_order
;
550 const int threadsPerAtom
=
551 (pmeGpu
->settings
.threadsPerAtom
== ThreadsPerAtom::Order
? order
: order
* order
);
552 return pmeGpu
->programHandle_
->warpSize() / threadsPerAtom
;
555 /*! \brief Rearranges the atom spline data between the GPU and host layouts.
556 * Only used for test purposes so far, likely to be horribly slow.
558 * \param[in] pmeGpu The PME GPU structure.
559 * \param[out] atc The PME CPU atom data structure (with a single-threaded layout).
560 * \param[in] type The spline data type (values or derivatives).
561 * \param[in] dimIndex Dimension index.
562 * \param[in] transform Layout transform type
564 static void pme_gpu_transform_spline_atom_data(const PmeGpu
* pmeGpu
,
565 const PmeAtomComm
* atc
,
566 PmeSplineDataType type
,
568 PmeLayoutTransform transform
)
570 // The GPU atom spline data is laid out in a different way currently than the CPU one.
571 // This function converts the data from GPU to CPU layout (in the host memory).
572 // It is only intended for testing purposes so far.
573 // Ideally we should use similar layouts on CPU and GPU if we care about mixed modes and their
574 // performance (e.g. spreading on GPU, gathering on CPU).
575 GMX_RELEASE_ASSERT(atc
->nthread
== 1, "Only the serial PME data layout is supported");
576 const uintmax_t threadIndex
= 0;
577 const auto atomCount
= atc
->numAtoms();
578 const auto atomsPerWarp
= pme_gpu_get_atoms_per_warp(pmeGpu
);
579 const auto pmeOrder
= pmeGpu
->common
->pme_order
;
580 GMX_ASSERT(pmeOrder
== c_pmeGpuOrder
, "Only PME order 4 is implemented");
582 real
* cpuSplineBuffer
;
583 float* h_splineBuffer
;
586 case PmeSplineDataType::Values
:
587 cpuSplineBuffer
= atc
->spline
[threadIndex
].theta
.coefficients
[dimIndex
];
588 h_splineBuffer
= pmeGpu
->staging
.h_theta
;
591 case PmeSplineDataType::Derivatives
:
592 cpuSplineBuffer
= atc
->spline
[threadIndex
].dtheta
.coefficients
[dimIndex
];
593 h_splineBuffer
= pmeGpu
->staging
.h_dtheta
;
596 default: GMX_THROW(InternalError("Unknown spline data type"));
599 for (auto atomIndex
= 0; atomIndex
< atomCount
; atomIndex
++)
601 for (auto orderIndex
= 0; orderIndex
< pmeOrder
; orderIndex
++)
603 const auto gpuValueIndex
=
604 getSplineParamFullIndex(pmeOrder
, orderIndex
, dimIndex
, atomIndex
, atomsPerWarp
);
605 const auto cpuValueIndex
= atomIndex
* pmeOrder
+ orderIndex
;
606 GMX_ASSERT(cpuValueIndex
< atomCount
* pmeOrder
,
607 "Atom spline data index out of bounds (while transforming GPU data layout "
611 case PmeLayoutTransform::GpuToHost
:
612 cpuSplineBuffer
[cpuValueIndex
] = h_splineBuffer
[gpuValueIndex
];
615 case PmeLayoutTransform::HostToGpu
:
616 h_splineBuffer
[gpuValueIndex
] = cpuSplineBuffer
[cpuValueIndex
];
619 default: GMX_THROW(InternalError("Unknown layout transform"));
625 //! Setting atom spline values/derivatives to be used in spread/gather
626 void pmeSetSplineData(const gmx_pme_t
* pme
,
628 const SplineParamsDimVector
& splineValues
,
629 PmeSplineDataType type
,
632 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
633 const index atomCount
= atc
->numAtoms();
634 const index pmeOrder
= pme
->pme_order
;
635 const index dimSize
= pmeOrder
* atomCount
;
636 GMX_RELEASE_ASSERT(dimSize
== splineValues
.ssize(), "Mismatch in spline data");
637 real
* splineBuffer
= pmeGetSplineDataInternal(pme
, type
, dimIndex
);
642 std::copy(splineValues
.begin(), splineValues
.end(), splineBuffer
);
646 std::copy(splineValues
.begin(), splineValues
.end(), splineBuffer
);
647 pme_gpu_transform_spline_atom_data(pme
->gpu
, atc
, type
, dimIndex
, PmeLayoutTransform::HostToGpu
);
650 default: GMX_THROW(InternalError("Test not implemented for this mode"));
654 //! Setting gridline indices to be used in spread/gather
655 void pmeSetGridLineIndices(gmx_pme_t
* pme
, CodePath mode
, const GridLineIndicesVector
& gridLineIndices
)
657 PmeAtomComm
* atc
= &(pme
->atc
[0]);
658 const index atomCount
= atc
->numAtoms();
659 GMX_RELEASE_ASSERT(atomCount
== gridLineIndices
.ssize(), "Mismatch in gridline indices size");
661 IVec paddedGridSizeUnused
, gridSize(0, 0, 0);
662 pmeGetRealGridSizesInternal(pme
, mode
, gridSize
, paddedGridSizeUnused
);
664 for (const auto& index
: gridLineIndices
)
666 for (int i
= 0; i
< DIM
; i
++)
668 GMX_RELEASE_ASSERT((0 <= index
[i
]) && (index
[i
] < gridSize
[i
]),
669 "Invalid gridline index");
676 memcpy(pme_gpu_staging(pme
->gpu
).h_gridlineIndices
, gridLineIndices
.data(),
677 atomCount
* sizeof(gridLineIndices
[0]));
681 atc
->idx
.resize(gridLineIndices
.size());
682 std::copy(gridLineIndices
.begin(), gridLineIndices
.end(), atc
->idx
.begin());
684 default: GMX_THROW(InternalError("Test not implemented for this mode"));
688 //! Getting plain index into the complex 3d grid
689 inline size_t pmeGetGridPlainIndexInternal(const IVec
& index
, const IVec
& paddedGridSize
, GridOrdering gridOrdering
)
692 switch (gridOrdering
)
694 case GridOrdering::YZX
:
695 result
= (index
[YY
] * paddedGridSize
[ZZ
] + index
[ZZ
]) * paddedGridSize
[XX
] + index
[XX
];
698 case GridOrdering::XYZ
:
699 result
= (index
[XX
] * paddedGridSize
[YY
] + index
[YY
]) * paddedGridSize
[ZZ
] + index
[ZZ
];
702 default: GMX_THROW(InternalError("Test not implemented for this mode"));
707 //! Setting real or complex grid
708 template<typename ValueType
>
709 static void pmeSetGridInternal(const gmx_pme_t
* pme
,
711 GridOrdering gridOrdering
,
712 const SparseGridValuesInput
<ValueType
>& gridValues
)
714 IVec
gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
716 pmeGetGridAndSizesInternal
<ValueType
>(pme
, mode
, grid
, gridSize
, paddedGridSize
);
720 case CodePath::GPU
: // intentional absence of break, the grid will be copied from the host buffer in testing mode
723 paddedGridSize
[XX
] * paddedGridSize
[YY
] * paddedGridSize
[ZZ
] * sizeof(ValueType
));
724 for (const auto& gridValue
: gridValues
)
726 for (int i
= 0; i
< DIM
; i
++)
728 GMX_RELEASE_ASSERT((0 <= gridValue
.first
[i
]) && (gridValue
.first
[i
] < gridSize
[i
]),
729 "Invalid grid value index");
731 const size_t gridValueIndex
=
732 pmeGetGridPlainIndexInternal(gridValue
.first
, paddedGridSize
, gridOrdering
);
733 grid
[gridValueIndex
] = gridValue
.second
;
737 default: GMX_THROW(InternalError("Test not implemented for this mode"));
741 //! Setting real grid to be used in gather
742 void pmeSetRealGrid(const gmx_pme_t
* pme
, CodePath mode
, const SparseRealGridValuesInput
& gridValues
)
744 pmeSetGridInternal
<real
>(pme
, mode
, GridOrdering::XYZ
, gridValues
);
747 //! Setting complex grid to be used in solve
748 void pmeSetComplexGrid(const gmx_pme_t
* pme
,
750 GridOrdering gridOrdering
,
751 const SparseComplexGridValuesInput
& gridValues
)
753 pmeSetGridInternal
<t_complex
>(pme
, mode
, gridOrdering
, gridValues
);
756 //! Getting the single dimension's spline values or derivatives
757 SplineParamsDimVector
pmeGetSplineData(const gmx_pme_t
* pme
, CodePath mode
, PmeSplineDataType type
, int dimIndex
)
759 GMX_RELEASE_ASSERT(pme
!= nullptr, "PME data is not initialized");
760 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
761 const size_t atomCount
= atc
->numAtoms();
762 const size_t pmeOrder
= pme
->pme_order
;
763 const size_t dimSize
= pmeOrder
* atomCount
;
765 real
* sourceBuffer
= pmeGetSplineDataInternal(pme
, type
, dimIndex
);
766 SplineParamsDimVector result
;
770 pme_gpu_transform_spline_atom_data(pme
->gpu
, atc
, type
, dimIndex
, PmeLayoutTransform::GpuToHost
);
771 result
= arrayRefFromArray(sourceBuffer
, dimSize
);
774 case CodePath::CPU
: result
= arrayRefFromArray(sourceBuffer
, dimSize
); break;
776 default: GMX_THROW(InternalError("Test not implemented for this mode"));
781 //! Getting the gridline indices
782 GridLineIndicesVector
pmeGetGridlineIndices(const gmx_pme_t
* pme
, CodePath mode
)
784 GMX_RELEASE_ASSERT(pme
!= nullptr, "PME data is not initialized");
785 const PmeAtomComm
* atc
= &(pme
->atc
[0]);
786 const size_t atomCount
= atc
->numAtoms();
788 GridLineIndicesVector gridLineIndices
;
792 gridLineIndices
= arrayRefFromArray(
793 reinterpret_cast<IVec
*>(pme_gpu_staging(pme
->gpu
).h_gridlineIndices
), atomCount
);
796 case CodePath::CPU
: gridLineIndices
= atc
->idx
; break;
798 default: GMX_THROW(InternalError("Test not implemented for this mode"));
800 return gridLineIndices
;
803 //! Getting real or complex grid - only non zero values
804 template<typename ValueType
>
805 static SparseGridValuesOutput
<ValueType
> pmeGetGridInternal(const gmx_pme_t
* pme
,
807 GridOrdering gridOrdering
)
809 IVec
gridSize(0, 0, 0), paddedGridSize(0, 0, 0);
811 pmeGetGridAndSizesInternal
<ValueType
>(pme
, mode
, grid
, gridSize
, paddedGridSize
);
812 SparseGridValuesOutput
<ValueType
> gridValues
;
815 case CodePath::GPU
: // intentional absence of break
818 for (int ix
= 0; ix
< gridSize
[XX
]; ix
++)
820 for (int iy
= 0; iy
< gridSize
[YY
]; iy
++)
822 for (int iz
= 0; iz
< gridSize
[ZZ
]; iz
++)
824 IVec
temp(ix
, iy
, iz
);
825 const size_t gridValueIndex
=
826 pmeGetGridPlainIndexInternal(temp
, paddedGridSize
, gridOrdering
);
827 const ValueType value
= grid
[gridValueIndex
];
828 if (value
!= ValueType
{})
830 auto key
= formatString("Cell %d %d %d", ix
, iy
, iz
);
831 gridValues
[key
] = value
;
838 default: GMX_THROW(InternalError("Test not implemented for this mode"));
843 //! Getting the real grid (spreading output of pmePerformSplineAndSpread())
844 SparseRealGridValuesOutput
pmeGetRealGrid(const gmx_pme_t
* pme
, CodePath mode
)
846 return pmeGetGridInternal
<real
>(pme
, mode
, GridOrdering::XYZ
);
849 //! Getting the complex grid output of pmePerformSolve()
850 SparseComplexGridValuesOutput
pmeGetComplexGrid(const gmx_pme_t
* pme
, CodePath mode
, GridOrdering gridOrdering
)
852 return pmeGetGridInternal
<t_complex
>(pme
, mode
, gridOrdering
);
855 //! Getting the reciprocal energy and virial
856 PmeOutput
pmeGetReciprocalEnergyAndVirial(const gmx_pme_t
* pme
, CodePath mode
, PmeSolveAlgorithm method
)
859 const real lambdaQ
= 1.0;
865 case PmeSolveAlgorithm::Coulomb
:
866 get_pme_ener_vir_q(pme
->solve_work
, pme
->nthread
, &output
);
869 case PmeSolveAlgorithm::LennardJones
:
870 get_pme_ener_vir_lj(pme
->solve_work
, pme
->nthread
, &output
);
873 default: GMX_THROW(InternalError("Test not implemented for this mode"));
879 case PmeSolveAlgorithm::Coulomb
:
880 pme_gpu_getEnergyAndVirial(*pme
, lambdaQ
, &output
);
883 default: GMX_THROW(InternalError("Test not implemented for this mode"));
887 default: GMX_THROW(InternalError("Test not implemented for this mode"));