Convert test infrastructure targets to static library targets.
[gromacs.git] / src / gromacs / mdlib / leapfrog_cuda_impl.cu
bloba1d142acecdba86d6d8e232e8b388a149909ae47
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*! \internal \file
36  *
37  * \brief Implements Leap-Frog using CUDA
38  *
39  * This file contains implementation of basic Leap-Frog integrator
40  * using CUDA, including class initialization, data-structures management
41  * and GPU kernel.
42  *
43  * \todo Reconsider naming towards using "gpu" suffix instead of "cuda".
44  *
45  * \author Artem Zhmurov <zhmurov@gmail.com>
46  *
47  * \ingroup module_mdlib
48  */
49 #include "gmxpre.h"
51 #include "leapfrog_cuda_impl.h"
53 #include <assert.h>
54 #include <stdio.h>
56 #include <cmath>
58 #include <algorithm>
60 #include "gromacs/gpu_utils/cudautils.cuh"
61 #include "gromacs/gpu_utils/devicebuffer.cuh"
62 #include "gromacs/gpu_utils/gputraits.cuh"
63 #include "gromacs/gpu_utils/vectype_ops.cuh"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/leapfrog_cuda.h"
66 #include "gromacs/pbcutil/pbc.h"
67 #include "gromacs/pbcutil/pbc_aiuc_cuda.cuh"
69 namespace gmx
72 //! Number of CUDA threads in a block
73 constexpr static int c_threadsPerBlock = 256;
74 //! Maximum number of threads in a block (for __launch_bounds__)
75 constexpr static int c_maxThreadsPerBlock = c_threadsPerBlock;
77 /*! \brief Main kernel for Leap-Frog integrator.
78  *
79  *  Each GPU thread works with a single particle. Empty declaration is needed to
80  *  avoid "no previous prototype for function" clang warning.
81  *
82  *  \todo Check if the force should be set to zero here.
83  *  \todo This kernel can also accumulate incidental temperatures for each atom.
84  *
85  * \param[in]     numAtoms                  Total number of atoms.
86  * \param[in]     gm_x                      Coordinates before the timestep
87  * \param[out]    gm_xp                     Coordinates after the timestep.
88  * \param[in,out] gm_v                      Velocities to update.
89  * \param[in]     gm_f                      Atomic forces.
90  * \param[in]     gm_inverseMasses          Reciprocal masses.
91  * \param[in]     dt                        Timestep.
92  */
93 __launch_bounds__(c_maxThreadsPerBlock)
94 __global__ void leapfrog_kernel(const int                  numAtoms,
95                                 const float3* __restrict__ gm_x,
96                                 float3* __restrict__       gm_xp,
97                                 float3* __restrict__       gm_v,
98                                 const float3* __restrict__ gm_f,
99                                 const float*  __restrict__ gm_inverseMasses,
100                                 const float                dt);
102 __launch_bounds__(c_maxThreadsPerBlock)
103 __global__ void leapfrog_kernel(const int                  numAtoms,
104                                 const float3* __restrict__ gm_x,
105                                 float3* __restrict__       gm_xp,
106                                 float3* __restrict__       gm_v,
107                                 const float3* __restrict__ gm_f,
108                                 const float*  __restrict__ gm_inverseMasses,
109                                 const float                dt)
111     int threadIndex = blockIdx.x*blockDim.x + threadIdx.x;
112     if (threadIndex < numAtoms)
113     {
114         float3 xi           = gm_x[threadIndex];
115         float3 vi           = gm_v[threadIndex];
116         float3 fi           = gm_f[threadIndex];
117         float  imi          = gm_inverseMasses[threadIndex];
118         float  imidt        = imi*dt;
119         vi                 += fi*imidt;
120         xi                 += vi*dt;
121         gm_v[threadIndex]   = vi;
122         gm_xp[threadIndex]  = xi;
123     }
124     return;
127 void LeapFrogCuda::Impl::integrate(const float3 *d_x,
128                                    float3       *d_xp,
129                                    float3       *d_v,
130                                    const float3 *d_f,
131                                    const real    dt)
134     ensureNoPendingCudaError("In CUDA version of Leap-Frog integrator");
136     KernelLaunchConfig config;
137     config.blockSize[0]     = c_threadsPerBlock;
138     config.blockSize[1]     = 1;
139     config.blockSize[2]     = 1;
140     config.gridSize[0]      = (numAtoms_ + c_threadsPerBlock - 1)/c_threadsPerBlock;
141     config.sharedMemorySize = 0;
142     config.stream           = stream_;
144     auto          kernelPtr         = leapfrog_kernel;
145     const float3 *gm_x              = d_x;
146     float3       *gm_xp             = d_xp;
147     float3       *gm_v              = d_v;
148     const float3 *gm_f              = d_f;
149     const float  *gm_inverseMasses  = d_inverseMasses_;
151     const auto    kernelArgs = prepareGpuKernelArguments(kernelPtr, config,
152                                                          &numAtoms_,
153                                                          &gm_x, &gm_xp,
154                                                          &gm_v,
155                                                          &gm_f,
156                                                          &gm_inverseMasses, &dt);
157     launchGpuKernel(kernelPtr, config, nullptr, "leapfrog_kernel", kernelArgs);
159     return;
162 void LeapFrogCuda::Impl::copyIntegrateCopy(const int   numAtoms,
163                                            const rvec *h_x,
164                                            rvec       *h_xp,
165                                            rvec       *h_v,
166                                            const rvec *h_f,
167                                            const real  dt)
169     float3 *d_x, *d_xp, *d_v, *d_f;
171     allocateDeviceBuffer(&d_x,  numAtoms, nullptr);
172     allocateDeviceBuffer(&d_xp, numAtoms, nullptr);
173     allocateDeviceBuffer(&d_v,  numAtoms, nullptr);
174     allocateDeviceBuffer(&d_f,  numAtoms, nullptr);
176     copyToDeviceBuffer(&d_x,  (float3*)h_x,  0, numAtoms, stream_, GpuApiCallBehavior::Sync, nullptr);
177     copyToDeviceBuffer(&d_xp, (float3*)h_xp, 0, numAtoms, stream_, GpuApiCallBehavior::Sync, nullptr);
178     copyToDeviceBuffer(&d_v,  (float3*)h_v,  0, numAtoms, stream_, GpuApiCallBehavior::Sync, nullptr);
179     copyToDeviceBuffer(&d_f,  (float3*)h_f,  0, numAtoms, stream_, GpuApiCallBehavior::Sync, nullptr);
181     integrate(d_x, d_xp, d_v, d_f, dt);
183     copyFromDeviceBuffer((float3*)h_xp, &d_xp, 0, numAtoms, stream_, GpuApiCallBehavior::Sync, nullptr);
184     copyFromDeviceBuffer((float3*)h_v, &d_v, 0, numAtoms, stream_, GpuApiCallBehavior::Sync, nullptr);
186     freeDeviceBuffer(&d_x);
187     freeDeviceBuffer(&d_xp);
188     freeDeviceBuffer(&d_v);
189     freeDeviceBuffer(&d_f);
193 LeapFrogCuda::Impl::Impl()
195     numAtoms_ = 0;
197     // TODO When the code will be integrated into the schedule, it will be assigned non-default stream.
198     stream_ = nullptr;
201 LeapFrogCuda::Impl::~Impl()
203     freeDeviceBuffer(&d_inverseMasses_);
207 void LeapFrogCuda::Impl::setPbc(const t_pbc *pbc)
209     setPbcAiuc(pbc->ndim_ePBC, pbc->box, &pbcAiuc_);
212 void LeapFrogCuda::Impl::set(const t_mdatoms &md)
214     if (md.nr > numAtoms_)
215     {
216         if (numAtoms_ > 0)
217         {
218             freeDeviceBuffer(&d_inverseMasses_);
219         }
220         numAtoms_ = md.nr;
221         allocateDeviceBuffer(&d_inverseMasses_,  numAtoms_, nullptr);
222     }
223     copyToDeviceBuffer(&d_inverseMasses_, (float*)md.invmass,
224                        0, numAtoms_, stream_, GpuApiCallBehavior::Sync, nullptr);
228 LeapFrogCuda::LeapFrogCuda()
229     : impl_(new Impl())
233 LeapFrogCuda::~LeapFrogCuda() = default;
235 void LeapFrogCuda::copyIntegrateCopy(const int   numAtoms,
236                                      const rvec *h_x,
237                                      rvec       *h_xp,
238                                      rvec       *h_v,
239                                      const rvec *h_f,
240                                      const real  dt)
242     impl_->copyIntegrateCopy(numAtoms, h_x, h_xp, h_v, h_f, dt);
245 void LeapFrogCuda::setPbc(const t_pbc *pbc)
247     impl_->setPbc(pbc);
250 void LeapFrogCuda::set(const t_mdatoms &md)
252     impl_->set(md);
255 } //namespace gmx