2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016 by the GROMACS development team.
5 * Copyright (c) 2017,2018,2019,2020, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
37 * \brief Define CUDA implementation of nbnxn_gpu_data_mgmt.h
39 * \author Szilard Pall <pall.szilard@gmail.com>
48 // TODO We would like to move this down, but the way gmx_nbnxn_gpu_t
49 // is currently declared means this has to be before gpu_types.h
50 #include "nbnxm_cuda_types.h"
52 // TODO Remove this comment when the above order issue is resolved
53 #include "gromacs/gpu_utils/cudautils.cuh"
54 #include "gromacs/gpu_utils/gpu_utils.h"
55 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
56 #include "gromacs/gpu_utils/pmalloc_cuda.h"
57 #include "gromacs/hardware/gpu_hw_info.h"
58 #include "gromacs/math/vectypes.h"
59 #include "gromacs/mdlib/force_flags.h"
60 #include "gromacs/mdtypes/interaction_const.h"
61 #include "gromacs/mdtypes/md_enums.h"
62 #include "gromacs/nbnxm/atomdata.h"
63 #include "gromacs/nbnxm/gpu_data_mgmt.h"
64 #include "gromacs/nbnxm/gridset.h"
65 #include "gromacs/nbnxm/nbnxm.h"
66 #include "gromacs/nbnxm/nbnxm_gpu.h"
67 #include "gromacs/nbnxm/pairlistsets.h"
68 #include "gromacs/pbcutil/ishift.h"
69 #include "gromacs/timing/gpu_timing.h"
70 #include "gromacs/utility/basedefinitions.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/fatalerror.h"
73 #include "gromacs/utility/real.h"
74 #include "gromacs/utility/smalloc.h"
76 #include "nbnxm_cuda.h"
81 /* This is a heuristically determined parameter for the Kepler
82 * and Maxwell architectures for the minimum size of ci lists by multiplying
83 * this constant with the # of multiprocessors on the current device.
84 * Since the maximum number of blocks per multiprocessor is 16, the ideal
85 * count for small systems is 32 or 48 blocks per multiprocessor. Because
86 * there is a bit of fluctuations in the generated block counts, we use
87 * a target of 44 instead of the ideal value of 48.
89 static unsigned int gpu_min_ci_balanced_factor = 44;
92 static void nbnxn_cuda_clear_e_fshift(gmx_nbnxn_cuda_t* nb);
95 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t* nbparam);
97 /*! \brief Return whether combination rules are used.
99 * \param[in] pointer to nonbonded paramter struct
100 * \return true if combination rules are used in this run, false otherwise
102 static inline bool useLjCombRule(const cu_nbparam_t* nbparam)
104 return (nbparam->vdwtype == evdwCuCUTCOMBGEOM || nbparam->vdwtype == evdwCuCUTCOMBLB);
107 /*! \brief Initialized the Ewald Coulomb correction GPU table.
109 Tabulates the Ewald Coulomb force and initializes the size/scale
110 and the table GPU array. If called with an already allocated table,
111 it just re-uploads the table.
113 static void init_ewald_coulomb_force_table(const EwaldCorrectionTables& tables, cu_nbparam_t* nbp)
115 if (nbp->coulomb_tab != nullptr)
117 nbnxn_cuda_free_nbparam_table(nbp);
120 nbp->coulomb_tab_scale = tables.scale;
121 initParamLookupTable(nbp->coulomb_tab, nbp->coulomb_tab_texobj, tables.tableF.data(),
122 tables.tableF.size());
126 /*! Initializes the atomdata structure first time, it only gets filled at
128 static void init_atomdata_first(cu_atomdata_t* ad, int ntypes)
133 stat = cudaMalloc((void**)&ad->shift_vec, SHIFTS * sizeof(*ad->shift_vec));
134 CU_RET_ERR(stat, "cudaMalloc failed on ad->shift_vec");
135 ad->bShiftVecUploaded = false;
137 stat = cudaMalloc((void**)&ad->fshift, SHIFTS * sizeof(*ad->fshift));
138 CU_RET_ERR(stat, "cudaMalloc failed on ad->fshift");
140 stat = cudaMalloc((void**)&ad->e_lj, sizeof(*ad->e_lj));
141 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_lj");
142 stat = cudaMalloc((void**)&ad->e_el, sizeof(*ad->e_el));
143 CU_RET_ERR(stat, "cudaMalloc failed on ad->e_el");
145 /* initialize to nullptr poiters to data that is not allocated here and will
146 need reallocation in nbnxn_cuda_init_atomdata */
150 /* size -1 indicates that the respective array hasn't been initialized yet */
155 /*! Selects the Ewald kernel type, analytical on SM 3.0 and later, tabulated on
156 earlier GPUs, single or twin cut-off. */
157 static int pick_ewald_kernel_type(const interaction_const_t& ic)
159 bool bTwinCut = (ic.rcoulomb != ic.rvdw);
160 bool bUseAnalyticalEwald, bForceAnalyticalEwald, bForceTabulatedEwald;
163 /* Benchmarking/development environment variables to force the use of
164 analytical or tabulated Ewald kernel. */
165 bForceAnalyticalEwald = (getenv("GMX_CUDA_NB_ANA_EWALD") != nullptr);
166 bForceTabulatedEwald = (getenv("GMX_CUDA_NB_TAB_EWALD") != nullptr);
168 if (bForceAnalyticalEwald && bForceTabulatedEwald)
171 "Both analytical and tabulated Ewald CUDA non-bonded kernels "
172 "requested through environment variables.");
175 /* By default use analytical Ewald. */
176 bUseAnalyticalEwald = true;
177 if (bForceAnalyticalEwald)
181 fprintf(debug, "Using analytical Ewald CUDA kernels\n");
184 else if (bForceTabulatedEwald)
186 bUseAnalyticalEwald = false;
190 fprintf(debug, "Using tabulated Ewald CUDA kernels\n");
194 /* Use twin cut-off kernels if requested by bTwinCut or the env. var.
195 forces it (use it for debugging/benchmarking only). */
196 if (!bTwinCut && (getenv("GMX_CUDA_NB_EWALD_TWINCUT") == nullptr))
198 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA : eelCuEWALD_TAB;
202 kernel_type = bUseAnalyticalEwald ? eelCuEWALD_ANA_TWIN : eelCuEWALD_TAB_TWIN;
208 /*! Copies all parameters related to the cut-off from ic to nbp */
209 static void set_cutoff_parameters(cu_nbparam_t* nbp, const interaction_const_t* ic, const PairlistParams& listParams)
211 nbp->ewald_beta = ic->ewaldcoeff_q;
212 nbp->sh_ewald = ic->sh_ewald;
213 nbp->epsfac = ic->epsfac;
214 nbp->two_k_rf = 2.0 * ic->k_rf;
215 nbp->c_rf = ic->c_rf;
216 nbp->rvdw_sq = ic->rvdw * ic->rvdw;
217 nbp->rcoulomb_sq = ic->rcoulomb * ic->rcoulomb;
218 nbp->rlistOuter_sq = listParams.rlistOuter * listParams.rlistOuter;
219 nbp->rlistInner_sq = listParams.rlistInner * listParams.rlistInner;
220 nbp->useDynamicPruning = listParams.useDynamicPruning;
222 nbp->sh_lj_ewald = ic->sh_lj_ewald;
223 nbp->ewaldcoeff_lj = ic->ewaldcoeff_lj;
225 nbp->rvdw_switch = ic->rvdw_switch;
226 nbp->dispersion_shift = ic->dispersion_shift;
227 nbp->repulsion_shift = ic->repulsion_shift;
228 nbp->vdw_switch = ic->vdw_switch;
231 /*! Initializes the nonbonded parameter data structure. */
232 static void init_nbparam(cu_nbparam_t* nbp,
233 const interaction_const_t* ic,
234 const PairlistParams& listParams,
235 const nbnxn_atomdata_t::Params& nbatParams)
239 ntypes = nbatParams.numTypes;
241 set_cutoff_parameters(nbp, ic, listParams);
243 /* The kernel code supports LJ combination rules (geometric and LB) for
244 * all kernel types, but we only generate useful combination rule kernels.
245 * We currently only use LJ combination rule (geometric and LB) kernels
246 * for plain cut-off LJ. On Maxwell the force only kernels speed up 15%
247 * with PME and 20% with RF, the other kernels speed up about half as much.
248 * For LJ force-switch the geometric rule would give 7% speed-up, but this
249 * combination is rarely used. LJ force-switch with LB rule is more common,
250 * but gives only 1% speed-up.
252 if (ic->vdwtype == evdwCUT)
254 switch (ic->vdw_modifier)
257 case eintmodPOTSHIFT:
258 switch (nbatParams.comb_rule)
260 case ljcrNONE: nbp->vdwtype = evdwCuCUT; break;
261 case ljcrGEOM: nbp->vdwtype = evdwCuCUTCOMBGEOM; break;
262 case ljcrLB: nbp->vdwtype = evdwCuCUTCOMBLB; break;
265 "The requested LJ combination rule is not implemented in the CUDA "
266 "GPU accelerated kernels!");
269 case eintmodFORCESWITCH: nbp->vdwtype = evdwCuFSWITCH; break;
270 case eintmodPOTSWITCH: nbp->vdwtype = evdwCuPSWITCH; break;
273 "The requested VdW interaction modifier is not implemented in the CUDA GPU "
274 "accelerated kernels!");
277 else if (ic->vdwtype == evdwPME)
279 if (ic->ljpme_comb_rule == ljcrGEOM)
281 assert(nbatParams.comb_rule == ljcrGEOM);
282 nbp->vdwtype = evdwCuEWALDGEOM;
286 assert(nbatParams.comb_rule == ljcrLB);
287 nbp->vdwtype = evdwCuEWALDLB;
293 "The requested VdW type is not implemented in the CUDA GPU accelerated kernels!");
296 if (ic->eeltype == eelCUT)
298 nbp->eeltype = eelCuCUT;
300 else if (EEL_RF(ic->eeltype))
302 nbp->eeltype = eelCuRF;
304 else if ((EEL_PME(ic->eeltype) || ic->eeltype == eelEWALD))
306 nbp->eeltype = pick_ewald_kernel_type(*ic);
310 /* Shouldn't happen, as this is checked when choosing Verlet-scheme */
312 "The requested electrostatics type is not implemented in the CUDA GPU accelerated "
316 /* generate table for PME */
317 nbp->coulomb_tab = nullptr;
318 if (nbp->eeltype == eelCuEWALD_TAB || nbp->eeltype == eelCuEWALD_TAB_TWIN)
320 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
321 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp);
324 /* set up LJ parameter lookup table */
325 if (!useLjCombRule(nbp))
327 initParamLookupTable(nbp->nbfp, nbp->nbfp_texobj, nbatParams.nbfp.data(), 2 * ntypes * ntypes);
330 /* set up LJ-PME parameter lookup table */
331 if (ic->vdwtype == evdwPME)
333 initParamLookupTable(nbp->nbfp_comb, nbp->nbfp_comb_texobj, nbatParams.nbfp_comb.data(), 2 * ntypes);
337 /*! Re-generate the GPU Ewald force table, resets rlist, and update the
338 * electrostatic type switching to twin cut-off (or back) if needed. */
339 void gpu_pme_loadbal_update_param(const nonbonded_verlet_t* nbv, const interaction_const_t* ic)
341 if (!nbv || !nbv->useGpu())
345 cu_nbparam_t* nbp = nbv->gpu_nbv->nbparam;
347 set_cutoff_parameters(nbp, ic, nbv->pairlistSets().params());
349 nbp->eeltype = pick_ewald_kernel_type(*ic);
351 GMX_RELEASE_ASSERT(ic->coulombEwaldTables, "Need valid Coulomb Ewald correction tables");
352 init_ewald_coulomb_force_table(*ic->coulombEwaldTables, nbp);
355 /*! Initializes the pair list data structure. */
356 static void init_plist(cu_plist_t* pl)
358 /* initialize to nullptr pointers to data that is not allocated here and will
359 need reallocation in nbnxn_gpu_init_pairlist */
365 /* size -1 indicates that the respective array hasn't been initialized yet */
372 pl->imask_nalloc = -1;
374 pl->excl_nalloc = -1;
375 pl->haveFreshList = false;
378 /*! Initializes the timings data structure. */
379 static void init_timings(gmx_wallclock_gpu_nbnxn_t* t)
388 for (i = 0; i < 2; i++)
390 for (j = 0; j < 2; j++)
392 t->ktime[i][j].t = 0.0;
393 t->ktime[i][j].c = 0;
397 t->pruneTime.t = 0.0;
398 t->dynamicPruneTime.c = 0;
399 t->dynamicPruneTime.t = 0.0;
402 /*! Initializes simulation constant data. */
403 static void cuda_init_const(gmx_nbnxn_cuda_t* nb,
404 const interaction_const_t* ic,
405 const PairlistParams& listParams,
406 const nbnxn_atomdata_t::Params& nbatParams)
408 init_atomdata_first(nb->atdat, nbatParams.numTypes);
409 init_nbparam(nb->nbparam, ic, listParams, nbatParams);
411 /* clear energy and shift force outputs */
412 nbnxn_cuda_clear_e_fshift(nb);
415 gmx_nbnxn_cuda_t* gpu_init(const gmx_device_info_t* deviceInfo,
416 const interaction_const_t* ic,
417 const PairlistParams& listParams,
418 const nbnxn_atomdata_t* nbat,
420 gmx_bool bLocalAndNonlocal)
424 gmx_nbnxn_cuda_t* nb;
427 snew(nb->nbparam, 1);
428 snew(nb->plist[InteractionLocality::Local], 1);
429 if (bLocalAndNonlocal)
431 snew(nb->plist[InteractionLocality::NonLocal], 1);
434 nb->bUseTwoStreams = bLocalAndNonlocal;
436 nb->timers = new cu_timers_t();
437 snew(nb->timings, 1);
440 pmalloc((void**)&nb->nbst.e_lj, sizeof(*nb->nbst.e_lj));
441 pmalloc((void**)&nb->nbst.e_el, sizeof(*nb->nbst.e_el));
442 pmalloc((void**)&nb->nbst.fshift, SHIFTS * sizeof(*nb->nbst.fshift));
444 init_plist(nb->plist[InteractionLocality::Local]);
446 /* set device info, just point it to the right GPU among the detected ones */
447 nb->dev_info = deviceInfo;
449 /* local/non-local GPU streams */
450 stat = cudaStreamCreate(&nb->stream[InteractionLocality::Local]);
451 CU_RET_ERR(stat, "cudaStreamCreate on stream[InterationLocality::Local] failed");
452 if (nb->bUseTwoStreams)
454 init_plist(nb->plist[InteractionLocality::NonLocal]);
456 /* Note that the device we're running on does not have to support
457 * priorities, because we are querying the priority range which in this
458 * case will be a single value.
460 int highest_priority;
461 stat = cudaDeviceGetStreamPriorityRange(nullptr, &highest_priority);
462 CU_RET_ERR(stat, "cudaDeviceGetStreamPriorityRange failed");
464 stat = cudaStreamCreateWithPriority(&nb->stream[InteractionLocality::NonLocal],
465 cudaStreamDefault, highest_priority);
467 "cudaStreamCreateWithPriority on stream[InteractionLocality::NonLocal] failed");
470 /* init events for sychronization (timing disabled for performance reasons!) */
471 stat = cudaEventCreateWithFlags(&nb->nonlocal_done, cudaEventDisableTiming);
472 CU_RET_ERR(stat, "cudaEventCreate on nonlocal_done failed");
473 stat = cudaEventCreateWithFlags(&nb->misc_ops_and_local_H2D_done, cudaEventDisableTiming);
474 CU_RET_ERR(stat, "cudaEventCreate on misc_ops_and_local_H2D_done failed");
476 nb->xNonLocalCopyD2HDone = new GpuEventSynchronizer();
478 /* WARNING: CUDA timings are incorrect with multiple streams.
479 * This is the main reason why they are disabled by default.
481 // TODO: Consider turning on by default when we can detect nr of streams.
482 nb->bDoTime = (getenv("GMX_ENABLE_GPU_TIMING") != nullptr);
486 init_timings(nb->timings);
489 /* set the kernel type for the current GPU */
490 /* pick L1 cache configuration */
491 cuda_set_cacheconfig();
493 cuda_init_const(nb, ic, listParams, nbat->params());
495 nb->atomIndicesSize = 0;
496 nb->atomIndicesSize_alloc = 0;
498 nb->ncxy_na_alloc = 0;
500 nb->ncxy_ind_alloc = 0;
506 fprintf(debug, "Initialized CUDA data structures.\n");
512 void gpu_init_pairlist(gmx_nbnxn_cuda_t* nb, const NbnxnPairlistGpu* h_plist, const InteractionLocality iloc)
515 bool bDoTime = (nb->bDoTime && !h_plist->sci.empty());
516 cudaStream_t stream = nb->stream[iloc];
517 cu_plist_t* d_plist = nb->plist[iloc];
519 if (d_plist->na_c < 0)
521 d_plist->na_c = h_plist->na_ci;
525 if (d_plist->na_c != h_plist->na_ci)
527 sprintf(sbuf, "In cu_init_plist: the #atoms per cell has changed (from %d to %d)",
528 d_plist->na_c, h_plist->na_ci);
533 gpu_timers_t::Interaction& iTimers = nb->timers->interaction[iloc];
537 iTimers.pl_h2d.openTimingRegion(stream);
538 iTimers.didPairlistH2D = true;
541 DeviceContext context = nullptr;
543 reallocateDeviceBuffer(&d_plist->sci, h_plist->sci.size(), &d_plist->nsci, &d_plist->sci_nalloc, context);
544 copyToDeviceBuffer(&d_plist->sci, h_plist->sci.data(), 0, h_plist->sci.size(), stream,
545 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
547 reallocateDeviceBuffer(&d_plist->cj4, h_plist->cj4.size(), &d_plist->ncj4, &d_plist->cj4_nalloc, context);
548 copyToDeviceBuffer(&d_plist->cj4, h_plist->cj4.data(), 0, h_plist->cj4.size(), stream,
549 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
551 reallocateDeviceBuffer(&d_plist->imask, h_plist->cj4.size() * c_nbnxnGpuClusterpairSplit,
552 &d_plist->nimask, &d_plist->imask_nalloc, context);
554 reallocateDeviceBuffer(&d_plist->excl, h_plist->excl.size(), &d_plist->nexcl,
555 &d_plist->excl_nalloc, context);
556 copyToDeviceBuffer(&d_plist->excl, h_plist->excl.data(), 0, h_plist->excl.size(), stream,
557 GpuApiCallBehavior::Async, bDoTime ? iTimers.pl_h2d.fetchNextEvent() : nullptr);
561 iTimers.pl_h2d.closeTimingRegion(stream);
564 /* the next use of thist list we be the first one, so we need to prune */
565 d_plist->haveFreshList = true;
568 void gpu_upload_shiftvec(gmx_nbnxn_cuda_t* nb, const nbnxn_atomdata_t* nbatom)
570 cu_atomdata_t* adat = nb->atdat;
571 cudaStream_t ls = nb->stream[InteractionLocality::Local];
573 /* only if we have a dynamic box */
574 if (nbatom->bDynamicBox || !adat->bShiftVecUploaded)
576 cu_copy_H2D_async(adat->shift_vec, nbatom->shift_vec.data(), SHIFTS * sizeof(*adat->shift_vec), ls);
577 adat->bShiftVecUploaded = true;
581 /*! Clears the first natoms_clear elements of the GPU nonbonded force output array. */
582 static void nbnxn_cuda_clear_f(gmx_nbnxn_cuda_t* nb, int natoms_clear)
585 cu_atomdata_t* adat = nb->atdat;
586 cudaStream_t ls = nb->stream[InteractionLocality::Local];
588 stat = cudaMemsetAsync(adat->f, 0, natoms_clear * sizeof(*adat->f), ls);
589 CU_RET_ERR(stat, "cudaMemsetAsync on f falied");
592 /*! Clears nonbonded shift force output array and energy outputs on the GPU. */
593 static void nbnxn_cuda_clear_e_fshift(gmx_nbnxn_cuda_t* nb)
596 cu_atomdata_t* adat = nb->atdat;
597 cudaStream_t ls = nb->stream[InteractionLocality::Local];
599 stat = cudaMemsetAsync(adat->fshift, 0, SHIFTS * sizeof(*adat->fshift), ls);
600 CU_RET_ERR(stat, "cudaMemsetAsync on fshift falied");
601 stat = cudaMemsetAsync(adat->e_lj, 0, sizeof(*adat->e_lj), ls);
602 CU_RET_ERR(stat, "cudaMemsetAsync on e_lj falied");
603 stat = cudaMemsetAsync(adat->e_el, 0, sizeof(*adat->e_el), ls);
604 CU_RET_ERR(stat, "cudaMemsetAsync on e_el falied");
607 void gpu_clear_outputs(gmx_nbnxn_cuda_t* nb, bool computeVirial)
609 nbnxn_cuda_clear_f(nb, nb->atdat->natoms);
610 /* clear shift force array and energies if the outputs were
611 used in the current step */
614 nbnxn_cuda_clear_e_fshift(nb);
618 void gpu_init_atomdata(gmx_nbnxn_cuda_t* nb, const nbnxn_atomdata_t* nbat)
623 bool bDoTime = nb->bDoTime;
624 cu_timers_t* timers = nb->timers;
625 cu_atomdata_t* d_atdat = nb->atdat;
626 cudaStream_t ls = nb->stream[InteractionLocality::Local];
628 natoms = nbat->numAtoms();
633 /* time async copy */
634 timers->atdat.openTimingRegion(ls);
637 /* need to reallocate if we have to copy more atoms than the amount of space
638 available and only allocate if we haven't initialized yet, i.e d_atdat->natoms == -1 */
639 if (natoms > d_atdat->nalloc)
641 nalloc = over_alloc_small(natoms);
643 /* free up first if the arrays have already been initialized */
644 if (d_atdat->nalloc != -1)
646 freeDeviceBuffer(&d_atdat->f);
647 freeDeviceBuffer(&d_atdat->xq);
648 freeDeviceBuffer(&d_atdat->atom_types);
649 freeDeviceBuffer(&d_atdat->lj_comb);
652 stat = cudaMalloc((void**)&d_atdat->f, nalloc * sizeof(*d_atdat->f));
653 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->f");
654 stat = cudaMalloc((void**)&d_atdat->xq, nalloc * sizeof(*d_atdat->xq));
655 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->xq");
656 if (useLjCombRule(nb->nbparam))
658 stat = cudaMalloc((void**)&d_atdat->lj_comb, nalloc * sizeof(*d_atdat->lj_comb));
659 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->lj_comb");
663 stat = cudaMalloc((void**)&d_atdat->atom_types, nalloc * sizeof(*d_atdat->atom_types));
664 CU_RET_ERR(stat, "cudaMalloc failed on d_atdat->atom_types");
667 d_atdat->nalloc = nalloc;
671 d_atdat->natoms = natoms;
672 d_atdat->natoms_local = nbat->natoms_local;
674 /* need to clear GPU f output if realloc happened */
677 nbnxn_cuda_clear_f(nb, nalloc);
680 if (useLjCombRule(nb->nbparam))
682 cu_copy_H2D_async(d_atdat->lj_comb, nbat->params().lj_comb.data(),
683 natoms * sizeof(*d_atdat->lj_comb), ls);
687 cu_copy_H2D_async(d_atdat->atom_types, nbat->params().type.data(),
688 natoms * sizeof(*d_atdat->atom_types), ls);
693 timers->atdat.closeTimingRegion(ls);
697 static void nbnxn_cuda_free_nbparam_table(cu_nbparam_t* nbparam)
699 if (nbparam->eeltype == eelCuEWALD_TAB || nbparam->eeltype == eelCuEWALD_TAB_TWIN)
701 destroyParamLookupTable(nbparam->coulomb_tab, nbparam->coulomb_tab_texobj);
705 void gpu_free(gmx_nbnxn_cuda_t* nb)
708 cu_atomdata_t* atdat;
709 cu_nbparam_t* nbparam;
717 nbparam = nb->nbparam;
719 nbnxn_cuda_free_nbparam_table(nbparam);
721 stat = cudaEventDestroy(nb->nonlocal_done);
722 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->nonlocal_done");
723 stat = cudaEventDestroy(nb->misc_ops_and_local_H2D_done);
724 CU_RET_ERR(stat, "cudaEventDestroy failed on timers->misc_ops_and_local_H2D_done");
729 /* The non-local counters/stream (second in the array) are needed only with DD. */
730 for (int i = 0; i <= (nb->bUseTwoStreams ? 1 : 0); i++)
732 stat = cudaStreamDestroy(nb->stream[i]);
733 CU_RET_ERR(stat, "cudaStreamDestroy failed on stream");
737 if (!useLjCombRule(nb->nbparam))
739 destroyParamLookupTable(nbparam->nbfp, nbparam->nbfp_texobj);
742 if (nbparam->vdwtype == evdwCuEWALDGEOM || nbparam->vdwtype == evdwCuEWALDLB)
744 destroyParamLookupTable(nbparam->nbfp_comb, nbparam->nbfp_comb_texobj);
747 stat = cudaFree(atdat->shift_vec);
748 CU_RET_ERR(stat, "cudaFree failed on atdat->shift_vec");
749 stat = cudaFree(atdat->fshift);
750 CU_RET_ERR(stat, "cudaFree failed on atdat->fshift");
752 stat = cudaFree(atdat->e_lj);
753 CU_RET_ERR(stat, "cudaFree failed on atdat->e_lj");
754 stat = cudaFree(atdat->e_el);
755 CU_RET_ERR(stat, "cudaFree failed on atdat->e_el");
757 freeDeviceBuffer(&atdat->f);
758 freeDeviceBuffer(&atdat->xq);
759 freeDeviceBuffer(&atdat->atom_types);
760 freeDeviceBuffer(&atdat->lj_comb);
763 auto* plist = nb->plist[InteractionLocality::Local];
764 freeDeviceBuffer(&plist->sci);
765 freeDeviceBuffer(&plist->cj4);
766 freeDeviceBuffer(&plist->imask);
767 freeDeviceBuffer(&plist->excl);
769 if (nb->bUseTwoStreams)
771 auto* plist_nl = nb->plist[InteractionLocality::NonLocal];
772 freeDeviceBuffer(&plist_nl->sci);
773 freeDeviceBuffer(&plist_nl->cj4);
774 freeDeviceBuffer(&plist_nl->imask);
775 freeDeviceBuffer(&plist_nl->excl);
780 pfree(nb->nbst.e_lj);
781 nb->nbst.e_lj = nullptr;
783 pfree(nb->nbst.e_el);
784 nb->nbst.e_el = nullptr;
786 pfree(nb->nbst.fshift);
787 nb->nbst.fshift = nullptr;
796 fprintf(debug, "Cleaned up CUDA data structures.\n");
800 //! This function is documented in the header file
801 gmx_wallclock_gpu_nbnxn_t* gpu_get_timings(gmx_nbnxn_cuda_t* nb)
803 return (nb != nullptr && nb->bDoTime) ? nb->timings : nullptr;
806 void gpu_reset_timings(nonbonded_verlet_t* nbv)
808 if (nbv->gpu_nbv && nbv->gpu_nbv->bDoTime)
810 init_timings(nbv->gpu_nbv->timings);
814 int gpu_min_ci_balanced(gmx_nbnxn_cuda_t* nb)
816 return nb != nullptr ? gpu_min_ci_balanced_factor * nb->dev_info->prop.multiProcessorCount : 0;
819 gmx_bool gpu_is_kernel_ewald_analytical(const gmx_nbnxn_cuda_t* nb)
821 return ((nb->nbparam->eeltype == eelCuEWALD_ANA) || (nb->nbparam->eeltype == eelCuEWALD_ANA_TWIN));
824 void* gpu_get_command_stream(gmx_nbnxn_gpu_t* nb, const InteractionLocality iloc)
828 return static_cast<void*>(&nb->stream[iloc]);
831 void* gpu_get_xq(gmx_nbnxn_gpu_t* nb)
835 return static_cast<void*>(nb->atdat->xq);
838 void* gpu_get_f(gmx_nbnxn_gpu_t* nb)
842 return static_cast<void*>(nb->atdat->f);
845 rvec* gpu_get_fshift(gmx_nbnxn_gpu_t* nb)
849 return reinterpret_cast<rvec*>(nb->atdat->fshift);
852 /* Initialization for X buffer operations on GPU. */
853 /* TODO Remove explicit pinning from host arrays from here and manage in a more natural way*/
854 void nbnxn_gpu_init_x_to_nbat_x(const Nbnxm::GridSet& gridSet, gmx_nbnxn_gpu_t* gpu_nbv)
856 cudaStream_t stream = gpu_nbv->stream[InteractionLocality::Local];
857 bool bDoTime = gpu_nbv->bDoTime;
858 const int maxNumColumns = gridSet.numColumnsMax();
860 reallocateDeviceBuffer(&gpu_nbv->cxy_na, maxNumColumns * gridSet.grids().size(),
861 &gpu_nbv->ncxy_na, &gpu_nbv->ncxy_na_alloc, nullptr);
862 reallocateDeviceBuffer(&gpu_nbv->cxy_ind, maxNumColumns * gridSet.grids().size(),
863 &gpu_nbv->ncxy_ind, &gpu_nbv->ncxy_ind_alloc, nullptr);
865 for (unsigned int g = 0; g < gridSet.grids().size(); g++)
868 const Nbnxm::Grid& grid = gridSet.grids()[g];
870 const int numColumns = grid.numColumns();
871 const int* atomIndices = gridSet.atomIndices().data();
872 const int atomIndicesSize = gridSet.atomIndices().size();
873 const int* cxy_na = grid.cxy_na().data();
874 const int* cxy_ind = grid.cxy_ind().data();
876 reallocateDeviceBuffer(&gpu_nbv->atomIndices, atomIndicesSize, &gpu_nbv->atomIndicesSize,
877 &gpu_nbv->atomIndicesSize_alloc, nullptr);
879 if (atomIndicesSize > 0)
884 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(stream);
887 copyToDeviceBuffer(&gpu_nbv->atomIndices, atomIndices, 0, atomIndicesSize, stream,
888 GpuApiCallBehavior::Async, nullptr);
892 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(stream);
900 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(stream);
903 int* destPtr = &gpu_nbv->cxy_na[maxNumColumns * g];
904 copyToDeviceBuffer(&destPtr, cxy_na, 0, numColumns, stream, GpuApiCallBehavior::Async, nullptr);
908 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(stream);
913 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.openTimingRegion(stream);
916 destPtr = &gpu_nbv->cxy_ind[maxNumColumns * g];
917 copyToDeviceBuffer(&destPtr, cxy_ind, 0, numColumns, stream, GpuApiCallBehavior::Async, nullptr);
921 gpu_nbv->timers->xf[AtomLocality::Local].nb_h2d.closeTimingRegion(stream);
926 // The above data is transferred on the local stream but is a
927 // dependency of the nonlocal stream (specifically the nonlocal X
928 // buf ops kernel). We therefore set a dependency to ensure
929 // that the nonlocal stream waits on the local stream here.
930 // This call records an event in the local stream:
931 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::Local);
932 // ...and this call instructs the nonlocal stream to wait on that event:
933 nbnxnInsertNonlocalGpuDependency(gpu_nbv, Nbnxm::InteractionLocality::NonLocal);
938 /* Initialization for F buffer operations on GPU. */
939 void nbnxn_gpu_init_add_nbat_f_to_f(const int* cell,
940 gmx_nbnxn_gpu_t* gpu_nbv,
942 GpuEventSynchronizer* const localReductionDone)
945 cudaStream_t stream = gpu_nbv->stream[InteractionLocality::Local];
947 GMX_ASSERT(localReductionDone, "localReductionDone should be a valid pointer");
948 gpu_nbv->localFReductionDone = localReductionDone;
950 if (natoms_total > 0)
952 reallocateDeviceBuffer(&gpu_nbv->cell, natoms_total, &gpu_nbv->ncell, &gpu_nbv->ncell_alloc, nullptr);
953 copyToDeviceBuffer(&gpu_nbv->cell, cell, 0, natoms_total, stream, GpuApiCallBehavior::Async, nullptr);