3 * This file is part of the GROMACS molecular simulation package.
5 * Copyright (c) 2019, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief Implements Leap-Frog using CUDA
40 * This file contains implementation of basic Leap-Frog integrator
41 * using CUDA, including class initialization, data-structures management
44 * \todo Reconsider naming towards using "gpu" suffix instead of "cuda".
46 * \author Artem Zhmurov <zhmurov@gmail.com>
48 * \ingroup module_mdlib
52 #include "leapfrog_cuda.cuh"
61 #include "gromacs/gpu_utils/cudautils.cuh"
62 #include "gromacs/gpu_utils/devicebuffer.cuh"
63 #include "gromacs/gpu_utils/gputraits.cuh"
64 #include "gromacs/gpu_utils/vectype_ops.cuh"
65 #include "gromacs/math/vec.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/pbcutil/pbc_aiuc_cuda.cuh"
72 //! Number of CUDA threads in a block
73 constexpr static int c_threadsPerBlock = 256;
74 //! Maximum number of threads in a block (for __launch_bounds__)
75 constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
77 /*! \brief Main kernel for Leap-Frog integrator.
79 * Each GPU thread works with a single particle. Empty declaration is needed to
80 * avoid "no previous prototype for function" clang warning.
82 * \todo Check if the force should be set to zero here.
83 * \todo This kernel can also accumulate incidental temperatures for each atom.
85 * \param[in] numAtoms Total number of atoms.
86 * \param[in] gm_x Coordinates before the timestep
87 * \param[out] gm_xp Coordinates after the timestep.
88 * \param[in,out] gm_v Velocities to update.
89 * \param[in] gm_f Atomic forces.
90 * \param[in] gm_inverseMasses Reciprocal masses.
91 * \param[in] dt Timestep.
93 __launch_bounds__(c_maxThreadsPerBlock)
94 __global__ void leapfrog_kernel(const int numAtoms,
95 const float3* __restrict__ gm_x,
96 float3* __restrict__ gm_xp,
97 float3* __restrict__ gm_v,
98 const float3* __restrict__ gm_f,
99 const float* __restrict__ gm_inverseMasses,
102 __launch_bounds__(c_maxThreadsPerBlock)
103 __global__ void leapfrog_kernel(const int numAtoms,
104 const float3* __restrict__ gm_x,
105 float3* __restrict__ gm_xp,
106 float3* __restrict__ gm_v,
107 const float3* __restrict__ gm_f,
108 const float* __restrict__ gm_inverseMasses,
111 int threadIndex = blockIdx.x*blockDim.x + threadIdx.x;
112 if (threadIndex < numAtoms)
114 float3 xi = gm_x[threadIndex];
115 float3 vi = gm_v[threadIndex];
116 float3 fi = gm_f[threadIndex];
117 float imi = gm_inverseMasses[threadIndex];
118 float imidt = imi*dt;
121 gm_v[threadIndex] = vi;
122 gm_xp[threadIndex] = xi;
127 void LeapFrogCuda::integrate(const float3 *d_x,
134 ensureNoPendingCudaError("In CUDA version of Leap-Frog integrator");
136 KernelLaunchConfig config;
137 config.blockSize[0] = c_threadsPerBlock;
138 config.blockSize[1] = 1;
139 config.blockSize[2] = 1;
140 config.gridSize[0] = (numAtoms_ + c_threadsPerBlock - 1)/c_threadsPerBlock;
141 config.sharedMemorySize = 0;
142 config.stream = stream_;
144 auto kernelPtr = leapfrog_kernel;
146 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config,
151 &d_inverseMasses_, &dt);
152 launchGpuKernel(kernelPtr, config, nullptr, "leapfrog_kernel", kernelArgs);
157 LeapFrogCuda::LeapFrogCuda()
161 // TODO When the code will be integrated into the schedule, it will be assigned non-default stream.
165 LeapFrogCuda::~LeapFrogCuda()
167 freeDeviceBuffer(&d_inverseMasses_);
171 void LeapFrogCuda::setPbc(const t_pbc *pbc)
173 setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
176 void LeapFrogCuda::set(const t_mdatoms &md)
178 if (md.nr > numAtoms_)
182 freeDeviceBuffer(&d_inverseMasses_);
185 allocateDeviceBuffer(&d_inverseMasses_, numAtoms_, nullptr);
187 copyToDeviceBuffer(&d_inverseMasses_, (float*)md.invmass,
188 0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);